Version 3.7.4

Proper "libv8.so.3.7.4" SONAME for Linux shared library (issue 1786).

Fix Harmony sets and maps to allow null and undefined as keys (still hidden behind --harmony flag) (issue 1622).

Implement VirtualMemory on FreeBSD to fix build (issue 1807).

Enable VFP instructions for Android.

Fix error handling in Date.prototype.toISOString (issue 1792).

Bug fixes and performance improvements for all platforms.

Not officially supported but noteworthy: Crankshaft for MIPS :-)

git-svn-id: http://v8.googlecode.com/svn/trunk@9889 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
diff --git a/src/SConscript b/src/SConscript
index be4a8f0..42de36b 100755
--- a/src/SConscript
+++ b/src/SConscript
@@ -172,6 +172,9 @@
     mips/frames-mips.cc
     mips/full-codegen-mips.cc
     mips/ic-mips.cc
+    mips/lithium-codegen-mips.cc
+    mips/lithium-gap-resolver-mips.cc
+    mips/lithium-mips.cc
     mips/macro-assembler-mips.cc
     mips/regexp-macro-assembler-mips.cc
     mips/stub-cache-mips.cc
diff --git a/src/accessors.cc b/src/accessors.cc
index 02998f9..fac8ed9 100644
--- a/src/accessors.cc
+++ b/src/accessors.cc
@@ -621,8 +621,9 @@
 
       if (!frame->is_optimized()) {
         // If there is an arguments variable in the stack, we return that.
-        Handle<SerializedScopeInfo> info(function->shared()->scope_info());
-        int index = info->StackSlotIndex(isolate->heap()->arguments_symbol());
+        Handle<ScopeInfo> scope_info(function->shared()->scope_info());
+        int index = scope_info->StackSlotIndex(
+            isolate->heap()->arguments_symbol());
         if (index >= 0) {
           Handle<Object> arguments(frame->GetExpression(index), isolate);
           if (!arguments->IsArgumentsMarker()) return *arguments;
diff --git a/src/api.cc b/src/api.cc
index ac4f07f..40a89e2 100644
--- a/src/api.cc
+++ b/src/api.cc
@@ -4299,7 +4299,7 @@
 }
 
 static void* ExternalValueImpl(i::Handle<i::Object> obj) {
-  return reinterpret_cast<void*>(i::Foreign::cast(*obj)->address());
+  return reinterpret_cast<void*>(i::Foreign::cast(*obj)->foreign_address());
 }
 
 
@@ -4325,7 +4325,7 @@
   if (value->IsSmi()) {
     return i::Internals::GetExternalPointerFromSmi(value);
   } else if (value->IsForeign()) {
-    return reinterpret_cast<void*>(i::Foreign::cast(value)->address());
+    return reinterpret_cast<void*>(i::Foreign::cast(value)->foreign_address());
   } else {
     return NULL;
   }
@@ -4878,7 +4878,7 @@
 
     NeanderObject listener(i::JSObject::cast(listeners.get(i)));
     i::Handle<i::Foreign> callback_obj(i::Foreign::cast(listener.get(0)));
-    if (callback_obj->address() == FUNCTION_ADDR(that)) {
+    if (callback_obj->foreign_address() == FUNCTION_ADDR(that)) {
       listeners.set(i, isolate->heap()->undefined_value());
     }
   }
diff --git a/src/api.h b/src/api.h
index 07723cb..f41c96e 100644
--- a/src/api.h
+++ b/src/api.h
@@ -115,7 +115,8 @@
 template <typename T> static inline T ToCData(v8::internal::Object* obj) {
   STATIC_ASSERT(sizeof(T) == sizeof(v8::internal::Address));
   return reinterpret_cast<T>(
-      reinterpret_cast<intptr_t>(v8::internal::Foreign::cast(obj)->address()));
+      reinterpret_cast<intptr_t>(
+          v8::internal::Foreign::cast(obj)->foreign_address()));
 }
 
 
diff --git a/src/arm/code-stubs-arm.cc b/src/arm/code-stubs-arm.cc
index cb3bc88..7a541f5 100644
--- a/src/arm/code-stubs-arm.cc
+++ b/src/arm/code-stubs-arm.cc
@@ -5143,7 +5143,8 @@
 
 
 void StringCharCodeAtGenerator::GenerateSlow(
-    MacroAssembler* masm, const RuntimeCallHelper& call_helper) {
+    MacroAssembler* masm,
+    const RuntimeCallHelper& call_helper) {
   __ Abort("Unexpected fallthrough to CharCodeAt slow case");
 
   // Index is not a smi.
@@ -5219,7 +5220,8 @@
 
 
 void StringCharFromCodeGenerator::GenerateSlow(
-    MacroAssembler* masm, const RuntimeCallHelper& call_helper) {
+    MacroAssembler* masm,
+    const RuntimeCallHelper& call_helper) {
   __ Abort("Unexpected fallthrough to CharFromCode slow case");
 
   __ bind(&slow_case_);
@@ -5244,7 +5246,8 @@
 
 
 void StringCharAtGenerator::GenerateSlow(
-    MacroAssembler* masm, const RuntimeCallHelper& call_helper) {
+    MacroAssembler* masm,
+    const RuntimeCallHelper& call_helper) {
   char_code_at_generator_.GenerateSlow(masm, call_helper);
   char_from_code_generator_.GenerateSlow(masm, call_helper);
 }
@@ -6750,84 +6753,6 @@
 }
 
 
-// TODO(kmillikin): Eliminate this function when the stub cache is fully
-// handlified.
-MaybeObject* StringDictionaryLookupStub::TryGenerateNegativeLookup(
-    MacroAssembler* masm,
-    Label* miss,
-    Label* done,
-    Register receiver,
-    Register properties,
-    String* name,
-    Register scratch0) {
-  // If names of slots in range from 1 to kProbes - 1 for the hash value are
-  // not equal to the name and kProbes-th slot is not used (its name is the
-  // undefined value), it guarantees the hash table doesn't contain the
-  // property. It's true even if some slots represent deleted properties
-  // (their names are the null value).
-  for (int i = 0; i < kInlinedProbes; i++) {
-    // scratch0 points to properties hash.
-    // Compute the masked index: (hash + i + i * i) & mask.
-    Register index = scratch0;
-    // Capacity is smi 2^n.
-    __ ldr(index, FieldMemOperand(properties, kCapacityOffset));
-    __ sub(index, index, Operand(1));
-    __ and_(index, index, Operand(
-        Smi::FromInt(name->Hash() + StringDictionary::GetProbeOffset(i))));
-
-    // Scale the index by multiplying by the entry size.
-    ASSERT(StringDictionary::kEntrySize == 3);
-    __ add(index, index, Operand(index, LSL, 1));  // index *= 3.
-
-    Register entity_name = scratch0;
-    // Having undefined at this place means the name is not contained.
-    ASSERT_EQ(kSmiTagSize, 1);
-    Register tmp = properties;
-    __ add(tmp, properties, Operand(index, LSL, 1));
-    __ ldr(entity_name, FieldMemOperand(tmp, kElementsStartOffset));
-
-    ASSERT(!tmp.is(entity_name));
-    __ LoadRoot(tmp, Heap::kUndefinedValueRootIndex);
-    __ cmp(entity_name, tmp);
-    __ b(eq, done);
-
-    if (i != kInlinedProbes - 1) {
-      // Stop if found the property.
-      __ cmp(entity_name, Operand(Handle<String>(name)));
-      __ b(eq, miss);
-
-      // Check if the entry name is not a symbol.
-      __ ldr(entity_name, FieldMemOperand(entity_name, HeapObject::kMapOffset));
-      __ ldrb(entity_name,
-              FieldMemOperand(entity_name, Map::kInstanceTypeOffset));
-      __ tst(entity_name, Operand(kIsSymbolMask));
-      __ b(eq, miss);
-
-      // Restore the properties.
-      __ ldr(properties,
-             FieldMemOperand(receiver, JSObject::kPropertiesOffset));
-    }
-  }
-
-  const int spill_mask =
-      (lr.bit() | r6.bit() | r5.bit() | r4.bit() | r3.bit() |
-       r2.bit() | r1.bit() | r0.bit());
-
-  __ stm(db_w, sp, spill_mask);
-  __ ldr(r0, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
-  __ mov(r1, Operand(Handle<String>(name)));
-  StringDictionaryLookupStub stub(NEGATIVE_LOOKUP);
-  MaybeObject* result = masm->TryCallStub(&stub);
-  if (result->IsFailure()) return result;
-  __ tst(r0, Operand(r0));
-  __ ldm(ia_w, sp, spill_mask);
-
-  __ b(eq, done);
-  __ b(ne, miss);
-  return result;
-}
-
-
 // Probe the string dictionary in the |elements| register. Jump to the
 // |done| label if a property with the given name is found. Jump to
 // the |miss| label otherwise.
diff --git a/src/arm/code-stubs-arm.h b/src/arm/code-stubs-arm.h
index 647fc8d..b846864 100644
--- a/src/arm/code-stubs-arm.h
+++ b/src/arm/code-stubs-arm.h
@@ -807,17 +807,6 @@
                                      Handle<String> name,
                                      Register scratch0);
 
-  // TODO(kmillikin): Eliminate this function when the stub cache is fully
-  // handlified.
-  MUST_USE_RESULT static MaybeObject* TryGenerateNegativeLookup(
-      MacroAssembler* masm,
-      Label* miss,
-      Label* done,
-      Register receiver,
-      Register properties,
-      String* name,
-      Register scratch0);
-
   static void GeneratePositiveLookup(MacroAssembler* masm,
                                      Label* miss,
                                      Label* done,
diff --git a/src/arm/codegen-arm.cc b/src/arm/codegen-arm.cc
index 508d830..6b111ff 100644
--- a/src/arm/codegen-arm.cc
+++ b/src/arm/codegen-arm.cc
@@ -180,6 +180,10 @@
 
   // Hole found, store the-hole NaN.
   __ bind(&convert_hole);
+  if (FLAG_debug_code) {
+    __ CompareRoot(r9, Heap::kTheHoleValueRootIndex);
+    __ Assert(eq, "object found in smi-only array");
+  }
   __ Strd(r4, r5, MemOperand(r7, 8, PostIndex));
 
   __ bind(&entry);
diff --git a/src/arm/full-codegen-arm.cc b/src/arm/full-codegen-arm.cc
index 497a295..f19bde7 100644
--- a/src/arm/full-codegen-arm.cc
+++ b/src/arm/full-codegen-arm.cc
@@ -47,11 +47,6 @@
 #define __ ACCESS_MASM(masm_)
 
 
-static unsigned GetPropertyId(Property* property) {
-  return property->id();
-}
-
-
 // A patch site is a location in the code which it is possible to patch. This
 // class has a number of methods to emit the code which is patchable and the
 // method EmitPatchInfo to record a marker back to the patchable code. This
@@ -399,7 +394,7 @@
   ASSERT(var->IsStackAllocated() || var->IsContextSlot());
   // For simplicity we always test the accumulator register.
   codegen()->GetVar(result_register(), var);
-  codegen()->PrepareForBailoutBeforeSplit(TOS_REG, false, NULL, NULL);
+  codegen()->PrepareForBailoutBeforeSplit(condition(), false, NULL, NULL);
   codegen()->DoTest(this);
 }
 
@@ -422,7 +417,7 @@
 
 
 void FullCodeGenerator::TestContext::Plug(Heap::RootListIndex index) const {
-  codegen()->PrepareForBailoutBeforeSplit(TOS_REG,
+  codegen()->PrepareForBailoutBeforeSplit(condition(),
                                           true,
                                           true_label_,
                                           false_label_);
@@ -457,7 +452,7 @@
 
 
 void FullCodeGenerator::TestContext::Plug(Handle<Object> lit) const {
-  codegen()->PrepareForBailoutBeforeSplit(TOS_REG,
+  codegen()->PrepareForBailoutBeforeSplit(condition(),
                                           true,
                                           true_label_,
                                           false_label_);
@@ -516,7 +511,7 @@
   // For simplicity we always test the accumulator register.
   __ Drop(count);
   __ Move(result_register(), reg);
-  codegen()->PrepareForBailoutBeforeSplit(TOS_REG, false, NULL, NULL);
+  codegen()->PrepareForBailoutBeforeSplit(condition(), false, NULL, NULL);
   codegen()->DoTest(this);
 }
 
@@ -583,7 +578,7 @@
 
 
 void FullCodeGenerator::TestContext::Plug(bool flag) const {
-  codegen()->PrepareForBailoutBeforeSplit(TOS_REG,
+  codegen()->PrepareForBailoutBeforeSplit(condition(),
                                           true,
                                           true_label_,
                                           false_label_);
@@ -686,7 +681,7 @@
 }
 
 
-void FullCodeGenerator::PrepareForBailoutBeforeSplit(State state,
+void FullCodeGenerator::PrepareForBailoutBeforeSplit(Expression* expr,
                                                      bool should_normalize,
                                                      Label* if_true,
                                                      Label* if_false) {
@@ -697,13 +692,7 @@
 
   Label skip;
   if (should_normalize) __ b(&skip);
-
-  ForwardBailoutStack* current = forward_bailout_stack_;
-  while (current != NULL) {
-    PrepareForBailout(current->expr(), state);
-    current = current->parent();
-  }
-
+  PrepareForBailout(expr, TOS_REG);
   if (should_normalize) {
     __ LoadRoot(ip, Heap::kTrueValueRootIndex);
     __ cmp(r0, ip);
@@ -721,8 +710,8 @@
   // need to "declare" it at runtime to make sure it actually exists in the
   // local context.
   Variable* variable = proxy->var();
-  bool binding_needs_init =
-      mode == CONST || mode == CONST_HARMONY || mode == LET;
+  bool binding_needs_init = (function == NULL) &&
+      (mode == CONST || mode == CONST_HARMONY || mode == LET);
   switch (variable->location()) {
     case Variable::UNALLOCATED:
       ++(*global_count);
@@ -1294,28 +1283,60 @@
       Comment cmnt(masm_, var->IsContextSlot()
                               ? "Context variable"
                               : "Stack variable");
-      if (!var->binding_needs_init()) {
-        context()->Plug(var);
-      } else {
-        // Let and const need a read barrier.
-        GetVar(r0, var);
-        __ CompareRoot(r0, Heap::kTheHoleValueRootIndex);
-        if (var->mode() == LET || var->mode() == CONST_HARMONY) {
-          // Throw a reference error when using an uninitialized let/const
-          // binding in harmony mode.
-          Label done;
-          __ b(ne, &done);
-          __ mov(r0, Operand(var->name()));
-          __ push(r0);
-          __ CallRuntime(Runtime::kThrowReferenceError, 1);
-          __ bind(&done);
-        } else {
-          // Uninitalized const bindings outside of harmony mode are unholed.
-          ASSERT(var->mode() == CONST);
-          __ LoadRoot(r0, Heap::kUndefinedValueRootIndex, eq);
+      if (var->binding_needs_init()) {
+        // var->scope() may be NULL when the proxy is located in eval code and
+        // refers to a potential outside binding. Currently those bindings are
+        // always looked up dynamically, i.e. in that case
+        //     var->location() == LOOKUP.
+        // always holds.
+        ASSERT(var->scope() != NULL);
+
+        // Check if the binding really needs an initialization check. The check
+        // can be skipped in the following situation: we have a LET or CONST
+        // binding in harmony mode, both the Variable and the VariableProxy have
+        // the same declaration scope (i.e. they are both in global code, in the
+        // same function or in the same eval code) and the VariableProxy is in
+        // the source physically located after the initializer of the variable.
+        //
+        // We cannot skip any initialization checks for CONST in non-harmony
+        // mode because const variables may be declared but never initialized:
+        //   if (false) { const x; }; var y = x;
+        //
+        // The condition on the declaration scopes is a conservative check for
+        // nested functions that access a binding and are called before the
+        // binding is initialized:
+        //   function() { f(); let x = 1; function f() { x = 2; } }
+        //
+        // Check that we always have valid source position.
+        ASSERT(var->initializer_position() != RelocInfo::kNoPosition);
+        ASSERT(proxy->position() != RelocInfo::kNoPosition);
+        bool skip_init_check =
+            var->mode() != CONST &&
+            var->scope()->DeclarationScope() == scope()->DeclarationScope() &&
+            var->initializer_position() < proxy->position();
+        if (!skip_init_check) {
+          // Let and const need a read barrier.
+          GetVar(r0, var);
+          __ CompareRoot(r0, Heap::kTheHoleValueRootIndex);
+          if (var->mode() == LET || var->mode() == CONST_HARMONY) {
+            // Throw a reference error when using an uninitialized let/const
+            // binding in harmony mode.
+            Label done;
+            __ b(ne, &done);
+            __ mov(r0, Operand(var->name()));
+            __ push(r0);
+            __ CallRuntime(Runtime::kThrowReferenceError, 1);
+            __ bind(&done);
+          } else {
+            // Uninitalized const bindings outside of harmony mode are unholed.
+            ASSERT(var->mode() == CONST);
+            __ LoadRoot(r0, Heap::kUndefinedValueRootIndex, eq);
+          }
+          context()->Plug(r0);
+          break;
         }
-        context()->Plug(r0);
       }
+      context()->Plug(var);
       break;
     }
 
@@ -1734,7 +1755,7 @@
   __ mov(r2, Operand(key->handle()));
   // Call load IC. It has arguments receiver and property name r0 and r2.
   Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
-  __ Call(ic, RelocInfo::CODE_TARGET, GetPropertyId(prop));
+  __ Call(ic, RelocInfo::CODE_TARGET, prop->id());
 }
 
 
@@ -1742,7 +1763,7 @@
   SetSourcePosition(prop->position());
   // Call keyed load IC. It has arguments key and receiver in r0 and r1.
   Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
-  __ Call(ic, RelocInfo::CODE_TARGET, GetPropertyId(prop));
+  __ Call(ic, RelocInfo::CODE_TARGET, prop->id());
 }
 
 
@@ -2200,8 +2221,7 @@
 }
 
 
-void FullCodeGenerator::EmitResolvePossiblyDirectEval(ResolveEvalFlag flag,
-                                                      int arg_count) {
+void FullCodeGenerator::EmitResolvePossiblyDirectEval(int arg_count) {
   // Push copy of the first argument or undefined if it doesn't exist.
   if (arg_count > 0) {
     __ ldr(r1, MemOperand(sp, arg_count * kPointerSize));
@@ -2221,9 +2241,7 @@
   __ mov(r1, Operand(Smi::FromInt(strict_mode)));
   __ push(r1);
 
-  __ CallRuntime(flag == SKIP_CONTEXT_LOOKUP
-                 ? Runtime::kResolvePossiblyDirectEvalNoLookup
-                 : Runtime::kResolvePossiblyDirectEval, 4);
+  __ CallRuntime(Runtime::kResolvePossiblyDirectEval, 4);
 }
 
 
@@ -2257,28 +2275,11 @@
         VisitForStackValue(args->at(i));
       }
 
-      // If we know that eval can only be shadowed by eval-introduced
-      // variables we attempt to load the global eval function directly
-      // in generated code. If we succeed, there is no need to perform a
-      // context lookup in the runtime system.
-      Label done;
-      Variable* var = proxy->var();
-      if (!var->IsUnallocated() && var->mode() == DYNAMIC_GLOBAL) {
-        Label slow;
-        EmitLoadGlobalCheckExtensions(var, NOT_INSIDE_TYPEOF, &slow);
-        // Push the function and resolve eval.
-        __ push(r0);
-        EmitResolvePossiblyDirectEval(SKIP_CONTEXT_LOOKUP, arg_count);
-        __ jmp(&done);
-        __ bind(&slow);
-      }
-
       // Push a copy of the function (found below the arguments) and
       // resolve eval.
       __ ldr(r1, MemOperand(sp, (arg_count + 1) * kPointerSize));
       __ push(r1);
-      EmitResolvePossiblyDirectEval(PERFORM_CONTEXT_LOOKUP, arg_count);
-      __ bind(&done);
+      EmitResolvePossiblyDirectEval(arg_count);
 
       // The runtime call returns a pair of values in r0 (function) and
       // r1 (receiver). Touch up the stack with the right values.
@@ -2402,7 +2403,8 @@
 }
 
 
-void FullCodeGenerator::EmitIsSmi(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitIsSmi(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 1);
 
   VisitForAccumulatorValue(args->at(0));
@@ -2414,7 +2416,7 @@
   context()->PrepareTest(&materialize_true, &materialize_false,
                          &if_true, &if_false, &fall_through);
 
-  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+  PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
   __ tst(r0, Operand(kSmiTagMask));
   Split(eq, if_true, if_false, fall_through);
 
@@ -2422,7 +2424,8 @@
 }
 
 
-void FullCodeGenerator::EmitIsNonNegativeSmi(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitIsNonNegativeSmi(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 1);
 
   VisitForAccumulatorValue(args->at(0));
@@ -2434,7 +2437,7 @@
   context()->PrepareTest(&materialize_true, &materialize_false,
                          &if_true, &if_false, &fall_through);
 
-  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+  PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
   __ tst(r0, Operand(kSmiTagMask | 0x80000000));
   Split(eq, if_true, if_false, fall_through);
 
@@ -2442,7 +2445,8 @@
 }
 
 
-void FullCodeGenerator::EmitIsObject(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitIsObject(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 1);
 
   VisitForAccumulatorValue(args->at(0));
@@ -2467,14 +2471,15 @@
   __ cmp(r1, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
   __ b(lt, if_false);
   __ cmp(r1, Operand(LAST_NONCALLABLE_SPEC_OBJECT_TYPE));
-  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+  PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
   Split(le, if_true, if_false, fall_through);
 
   context()->Plug(if_true, if_false);
 }
 
 
-void FullCodeGenerator::EmitIsSpecObject(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitIsSpecObject(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 1);
 
   VisitForAccumulatorValue(args->at(0));
@@ -2488,14 +2493,15 @@
 
   __ JumpIfSmi(r0, if_false);
   __ CompareObjectType(r0, r1, r1, FIRST_SPEC_OBJECT_TYPE);
-  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+  PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
   Split(ge, if_true, if_false, fall_through);
 
   context()->Plug(if_true, if_false);
 }
 
 
-void FullCodeGenerator::EmitIsUndetectableObject(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitIsUndetectableObject(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 1);
 
   VisitForAccumulatorValue(args->at(0));
@@ -2511,7 +2517,7 @@
   __ ldr(r1, FieldMemOperand(r0, HeapObject::kMapOffset));
   __ ldrb(r1, FieldMemOperand(r1, Map::kBitFieldOffset));
   __ tst(r1, Operand(1 << Map::kIsUndetectable));
-  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+  PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
   Split(ne, if_true, if_false, fall_through);
 
   context()->Plug(if_true, if_false);
@@ -2519,8 +2525,8 @@
 
 
 void FullCodeGenerator::EmitIsStringWrapperSafeForDefaultValueOf(
-    ZoneList<Expression*>* args) {
-
+    CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 1);
 
   VisitForAccumulatorValue(args->at(0));
@@ -2599,12 +2605,13 @@
   __ strb(r2, FieldMemOperand(r1, Map::kBitField2Offset));
   __ jmp(if_true);
 
-  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+  PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
   context()->Plug(if_true, if_false);
 }
 
 
-void FullCodeGenerator::EmitIsFunction(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitIsFunction(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 1);
 
   VisitForAccumulatorValue(args->at(0));
@@ -2618,14 +2625,15 @@
 
   __ JumpIfSmi(r0, if_false);
   __ CompareObjectType(r0, r1, r2, JS_FUNCTION_TYPE);
-  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+  PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
   Split(eq, if_true, if_false, fall_through);
 
   context()->Plug(if_true, if_false);
 }
 
 
-void FullCodeGenerator::EmitIsArray(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitIsArray(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 1);
 
   VisitForAccumulatorValue(args->at(0));
@@ -2639,14 +2647,15 @@
 
   __ JumpIfSmi(r0, if_false);
   __ CompareObjectType(r0, r1, r1, JS_ARRAY_TYPE);
-  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+  PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
   Split(eq, if_true, if_false, fall_through);
 
   context()->Plug(if_true, if_false);
 }
 
 
-void FullCodeGenerator::EmitIsRegExp(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitIsRegExp(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 1);
 
   VisitForAccumulatorValue(args->at(0));
@@ -2660,7 +2669,7 @@
 
   __ JumpIfSmi(r0, if_false);
   __ CompareObjectType(r0, r1, r1, JS_REGEXP_TYPE);
-  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+  PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
   Split(eq, if_true, if_false, fall_through);
 
   context()->Plug(if_true, if_false);
@@ -2668,8 +2677,8 @@
 
 
 
-void FullCodeGenerator::EmitIsConstructCall(ZoneList<Expression*>* args) {
-  ASSERT(args->length() == 0);
+void FullCodeGenerator::EmitIsConstructCall(CallRuntime* expr) {
+  ASSERT(expr->arguments()->length() == 0);
 
   Label materialize_true, materialize_false;
   Label* if_true = NULL;
@@ -2692,14 +2701,15 @@
   __ bind(&check_frame_marker);
   __ ldr(r1, MemOperand(r2, StandardFrameConstants::kMarkerOffset));
   __ cmp(r1, Operand(Smi::FromInt(StackFrame::CONSTRUCT)));
-  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+  PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
   Split(eq, if_true, if_false, fall_through);
 
   context()->Plug(if_true, if_false);
 }
 
 
-void FullCodeGenerator::EmitObjectEquals(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitObjectEquals(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 2);
 
   // Load the two objects into registers and perform the comparison.
@@ -2715,14 +2725,15 @@
 
   __ pop(r1);
   __ cmp(r0, r1);
-  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+  PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
   Split(eq, if_true, if_false, fall_through);
 
   context()->Plug(if_true, if_false);
 }
 
 
-void FullCodeGenerator::EmitArguments(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitArguments(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 1);
 
   // ArgumentsAccessStub expects the key in edx and the formal
@@ -2736,9 +2747,8 @@
 }
 
 
-void FullCodeGenerator::EmitArgumentsLength(ZoneList<Expression*>* args) {
-  ASSERT(args->length() == 0);
-
+void FullCodeGenerator::EmitArgumentsLength(CallRuntime* expr) {
+  ASSERT(expr->arguments()->length() == 0);
   Label exit;
   // Get the number of formal parameters.
   __ mov(r0, Operand(Smi::FromInt(info_->scope()->num_parameters())));
@@ -2758,7 +2768,8 @@
 }
 
 
-void FullCodeGenerator::EmitClassOf(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitClassOf(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 1);
   Label done, null, function, non_function_constructor;
 
@@ -2818,7 +2829,7 @@
 }
 
 
-void FullCodeGenerator::EmitLog(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitLog(CallRuntime* expr) {
   // Conditionally generate a log call.
   // Args:
   //   0 (literal string): The type of logging (corresponds to the flags).
@@ -2826,6 +2837,7 @@
   //   1 (string): Format string.  Access the string at argument index 2
   //     with '%2s' (see Logger::LogRuntime for all the formats).
   //   2 (array): Arguments to the format string.
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT_EQ(args->length(), 3);
   if (CodeGenerator::ShouldGenerateLog(args->at(0))) {
     VisitForStackValue(args->at(1));
@@ -2839,9 +2851,8 @@
 }
 
 
-void FullCodeGenerator::EmitRandomHeapNumber(ZoneList<Expression*>* args) {
-  ASSERT(args->length() == 0);
-
+void FullCodeGenerator::EmitRandomHeapNumber(CallRuntime* expr) {
+  ASSERT(expr->arguments()->length() == 0);
   Label slow_allocate_heapnumber;
   Label heapnumber_allocated;
 
@@ -2893,9 +2904,10 @@
 }
 
 
-void FullCodeGenerator::EmitSubString(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitSubString(CallRuntime* expr) {
   // Load the arguments on the stack and call the stub.
   SubStringStub stub;
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 3);
   VisitForStackValue(args->at(0));
   VisitForStackValue(args->at(1));
@@ -2905,9 +2917,10 @@
 }
 
 
-void FullCodeGenerator::EmitRegExpExec(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitRegExpExec(CallRuntime* expr) {
   // Load the arguments on the stack and call the stub.
   RegExpExecStub stub;
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 4);
   VisitForStackValue(args->at(0));
   VisitForStackValue(args->at(1));
@@ -2918,9 +2931,9 @@
 }
 
 
-void FullCodeGenerator::EmitValueOf(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitValueOf(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 1);
-
   VisitForAccumulatorValue(args->at(0));  // Load the object.
 
   Label done;
@@ -2936,8 +2949,9 @@
 }
 
 
-void FullCodeGenerator::EmitMathPow(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitMathPow(CallRuntime* expr) {
   // Load the arguments on the stack and call the runtime function.
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 2);
   VisitForStackValue(args->at(0));
   VisitForStackValue(args->at(1));
@@ -2947,9 +2961,9 @@
 }
 
 
-void FullCodeGenerator::EmitSetValueOf(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitSetValueOf(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 2);
-
   VisitForStackValue(args->at(0));  // Load the object.
   VisitForAccumulatorValue(args->at(1));  // Load the value.
   __ pop(r1);  // r0 = value. r1 = object.
@@ -2975,9 +2989,9 @@
 }
 
 
-void FullCodeGenerator::EmitNumberToString(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitNumberToString(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT_EQ(args->length(), 1);
-
   // Load the argument on the stack and call the stub.
   VisitForStackValue(args->at(0));
 
@@ -2987,9 +3001,9 @@
 }
 
 
-void FullCodeGenerator::EmitStringCharFromCode(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitStringCharFromCode(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 1);
-
   VisitForAccumulatorValue(args->at(0));
 
   Label done;
@@ -3005,9 +3019,9 @@
 }
 
 
-void FullCodeGenerator::EmitStringCharCodeAt(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitStringCharCodeAt(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 2);
-
   VisitForStackValue(args->at(0));
   VisitForAccumulatorValue(args->at(1));
 
@@ -3052,9 +3066,9 @@
 }
 
 
-void FullCodeGenerator::EmitStringCharAt(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitStringCharAt(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 2);
-
   VisitForStackValue(args->at(0));
   VisitForAccumulatorValue(args->at(1));
 
@@ -3101,9 +3115,9 @@
 }
 
 
-void FullCodeGenerator::EmitStringAdd(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitStringAdd(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT_EQ(2, args->length());
-
   VisitForStackValue(args->at(0));
   VisitForStackValue(args->at(1));
 
@@ -3113,9 +3127,9 @@
 }
 
 
-void FullCodeGenerator::EmitStringCompare(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitStringCompare(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT_EQ(2, args->length());
-
   VisitForStackValue(args->at(0));
   VisitForStackValue(args->at(1));
 
@@ -3125,10 +3139,11 @@
 }
 
 
-void FullCodeGenerator::EmitMathSin(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitMathSin(CallRuntime* expr) {
   // Load the argument on the stack and call the stub.
   TranscendentalCacheStub stub(TranscendentalCache::SIN,
                                TranscendentalCacheStub::TAGGED);
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 1);
   VisitForStackValue(args->at(0));
   __ CallStub(&stub);
@@ -3136,10 +3151,11 @@
 }
 
 
-void FullCodeGenerator::EmitMathCos(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitMathCos(CallRuntime* expr) {
   // Load the argument on the stack and call the stub.
   TranscendentalCacheStub stub(TranscendentalCache::COS,
                                TranscendentalCacheStub::TAGGED);
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 1);
   VisitForStackValue(args->at(0));
   __ CallStub(&stub);
@@ -3147,10 +3163,11 @@
 }
 
 
-void FullCodeGenerator::EmitMathLog(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitMathLog(CallRuntime* expr) {
   // Load the argument on the stack and call the stub.
   TranscendentalCacheStub stub(TranscendentalCache::LOG,
                                TranscendentalCacheStub::TAGGED);
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 1);
   VisitForStackValue(args->at(0));
   __ CallStub(&stub);
@@ -3158,8 +3175,9 @@
 }
 
 
-void FullCodeGenerator::EmitMathSqrt(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitMathSqrt(CallRuntime* expr) {
   // Load the argument on the stack and call the runtime function.
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 1);
   VisitForStackValue(args->at(0));
   __ CallRuntime(Runtime::kMath_sqrt, 1);
@@ -3167,7 +3185,8 @@
 }
 
 
-void FullCodeGenerator::EmitCallFunction(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitCallFunction(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() >= 2);
 
   int arg_count = args->length() - 2;  // 2 ~ receiver and function.
@@ -3186,8 +3205,9 @@
 }
 
 
-void FullCodeGenerator::EmitRegExpConstructResult(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitRegExpConstructResult(CallRuntime* expr) {
   RegExpConstructResultStub stub;
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 3);
   VisitForStackValue(args->at(0));
   VisitForStackValue(args->at(1));
@@ -3197,7 +3217,8 @@
 }
 
 
-void FullCodeGenerator::EmitSwapElements(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitSwapElements(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 3);
   VisitForStackValue(args->at(0));
   VisitForStackValue(args->at(1));
@@ -3294,9 +3315,9 @@
 }
 
 
-void FullCodeGenerator::EmitGetFromCache(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitGetFromCache(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT_EQ(2, args->length());
-
   ASSERT_NE(NULL, args->at(0)->AsLiteral());
   int cache_id = Smi::cast(*(args->at(0)->AsLiteral()->handle()))->value();
 
@@ -3345,7 +3366,8 @@
 }
 
 
-void FullCodeGenerator::EmitIsRegExpEquivalent(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitIsRegExpEquivalent(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT_EQ(2, args->length());
 
   Register right = r0;
@@ -3385,7 +3407,8 @@
 }
 
 
-void FullCodeGenerator::EmitHasCachedArrayIndex(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitHasCachedArrayIndex(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
   VisitForAccumulatorValue(args->at(0));
 
   Label materialize_true, materialize_false;
@@ -3397,14 +3420,15 @@
 
   __ ldr(r0, FieldMemOperand(r0, String::kHashFieldOffset));
   __ tst(r0, Operand(String::kContainsCachedArrayIndexMask));
-  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+  PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
   Split(eq, if_true, if_false, fall_through);
 
   context()->Plug(if_true, if_false);
 }
 
 
-void FullCodeGenerator::EmitGetCachedArrayIndex(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitGetCachedArrayIndex(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 1);
   VisitForAccumulatorValue(args->at(0));
 
@@ -3419,12 +3443,12 @@
 }
 
 
-void FullCodeGenerator::EmitFastAsciiArrayJoin(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
   Label bailout, done, one_char_separator, long_separator,
       non_trivial_array, not_size_one_array, loop,
       empty_separator_loop, one_char_separator_loop,
       one_char_separator_loop_entry, long_separator_loop;
-
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 2);
   VisitForStackValue(args->at(1));
   VisitForAccumulatorValue(args->at(0));
@@ -3752,18 +3776,35 @@
         // Unary NOT has no side effects so it's only necessary to visit the
         // subexpression.  Match the optimizing compiler by not branching.
         VisitForEffect(expr->expression());
+      } else if (context()->IsTest()) {
+        const TestContext* test = TestContext::cast(context());
+        // The labels are swapped for the recursive call.
+        VisitForControl(expr->expression(),
+                        test->false_label(),
+                        test->true_label(),
+                        test->fall_through());
+        context()->Plug(test->true_label(), test->false_label());
       } else {
-        Label materialize_true, materialize_false;
-        Label* if_true = NULL;
-        Label* if_false = NULL;
-        Label* fall_through = NULL;
-
-        // Notice that the labels are swapped.
-        context()->PrepareTest(&materialize_true, &materialize_false,
-                               &if_false, &if_true, &fall_through);
-        if (context()->IsTest()) ForwardBailoutToChild(expr);
-        VisitForControl(expr->expression(), if_true, if_false, fall_through);
-        context()->Plug(if_false, if_true);  // Labels swapped.
+        // We handle value contexts explicitly rather than simply visiting
+        // for control and plugging the control flow into the context,
+        // because we need to prepare a pair of extra administrative AST ids
+        // for the optimizing compiler.
+        ASSERT(context()->IsAccumulatorValue() || context()->IsStackValue());
+        Label materialize_true, materialize_false, done;
+        VisitForControl(expr->expression(),
+                        &materialize_false,
+                        &materialize_true,
+                        &materialize_true);
+        __ bind(&materialize_true);
+        PrepareForBailoutForId(expr->MaterializeTrueId(), NO_REGISTERS);
+        __ LoadRoot(r0, Heap::kTrueValueRootIndex);
+        if (context()->IsStackValue()) __ push(r0);
+        __ jmp(&done);
+        __ bind(&materialize_false);
+        PrepareForBailoutForId(expr->MaterializeFalseId(), NO_REGISTERS);
+        __ LoadRoot(r0, Heap::kFalseValueRootIndex);
+        if (context()->IsStackValue()) __ push(r0);
+        __ bind(&done);
       }
       break;
     }
@@ -4022,12 +4063,13 @@
     context()->Plug(r0);
   } else {
     // This expression cannot throw a reference error at the top level.
-    VisitInCurrentContext(expr);
+    VisitInDuplicateContext(expr);
   }
 }
 
 
 void FullCodeGenerator::EmitLiteralCompareTypeof(Expression* expr,
+                                                 Expression* sub_expr,
                                                  Handle<String> check) {
   Label materialize_true, materialize_false;
   Label* if_true = NULL;
@@ -4037,9 +4079,9 @@
                          &if_true, &if_false, &fall_through);
 
   { AccumulatorValueContext context(this);
-    VisitForTypeofValue(expr);
+    VisitForTypeofValue(sub_expr);
   }
-  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+  PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
 
   if (check->Equals(isolate()->heap()->number_symbol())) {
     __ JumpIfSmi(r0, if_true);
@@ -4126,7 +4168,7 @@
     case Token::IN:
       VisitForStackValue(expr->right());
       __ InvokeBuiltin(Builtins::IN, CALL_FUNCTION);
-      PrepareForBailoutBeforeSplit(TOS_REG, false, NULL, NULL);
+      PrepareForBailoutBeforeSplit(expr, false, NULL, NULL);
       __ LoadRoot(ip, Heap::kTrueValueRootIndex);
       __ cmp(r0, ip);
       Split(eq, if_true, if_false, fall_through);
@@ -4136,7 +4178,7 @@
       VisitForStackValue(expr->right());
       InstanceofStub stub(InstanceofStub::kNoFlags);
       __ CallStub(&stub);
-      PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+      PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
       // The stub returns 0 for true.
       __ tst(r0, r0);
       Split(eq, if_true, if_false, fall_through);
@@ -4186,7 +4228,7 @@
       Handle<Code> ic = CompareIC::GetUninitialized(op);
       __ Call(ic, RelocInfo::CODE_TARGET, expr->id());
       patch_site.EmitPatchInfo();
-      PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+      PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
       __ cmp(r0, Operand(0));
       Split(cond, if_true, if_false, fall_through);
     }
@@ -4209,7 +4251,7 @@
                          &if_true, &if_false, &fall_through);
 
   VisitForAccumulatorValue(sub_expr);
-  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+  PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
   Heap::RootListIndex nil_value = nil == kNullValue ?
       Heap::kNullValueRootIndex :
       Heap::kUndefinedValueRootIndex;
diff --git a/src/arm/lithium-arm.cc b/src/arm/lithium-arm.cc
index 5197842..ffb7457 100644
--- a/src/arm/lithium-arm.cc
+++ b/src/arm/lithium-arm.cc
@@ -750,7 +750,7 @@
   instr->MarkAsCall();
   instr = AssignPointerMap(instr);
 
-  if (hinstr->HasSideEffects()) {
+  if (hinstr->HasObservableSideEffects()) {
     ASSERT(hinstr->next()->IsSimulate());
     HSimulate* sim = HSimulate::cast(hinstr->next());
     instr = SetInstructionPendingDeoptimizationEnvironment(
@@ -762,7 +762,8 @@
   // Thus we still need to attach environment to this call even if
   // call sequence can not deoptimize eagerly.
   bool needs_environment =
-      (can_deoptimize == CAN_DEOPTIMIZE_EAGERLY) || !hinstr->HasSideEffects();
+      (can_deoptimize == CAN_DEOPTIMIZE_EAGERLY) ||
+      !hinstr->HasObservableSideEffects();
   if (needs_environment && !instr->HasEnvironment()) {
     instr = AssignEnvironment(instr);
   }
@@ -820,28 +821,6 @@
 }
 
 
-LInstruction* LChunkBuilder::DoBit(Token::Value op,
-                                   HBitwiseBinaryOperation* instr) {
-  if (instr->representation().IsInteger32()) {
-    ASSERT(instr->left()->representation().IsInteger32());
-    ASSERT(instr->right()->representation().IsInteger32());
-
-    LOperand* left = UseRegisterAtStart(instr->LeastConstantOperand());
-    LOperand* right = UseOrConstantAtStart(instr->MostConstantOperand());
-    return DefineAsRegister(new LBitI(op, left, right));
-  } else {
-    ASSERT(instr->representation().IsTagged());
-    ASSERT(instr->left()->representation().IsTagged());
-    ASSERT(instr->right()->representation().IsTagged());
-
-    LOperand* left = UseFixed(instr->left(), r1);
-    LOperand* right = UseFixed(instr->right(), r0);
-    LArithmeticT* result = new LArithmeticT(op, left, right);
-    return MarkAsCall(DefineFixed(result, r0), instr);
-  }
-}
-
-
 LInstruction* LChunkBuilder::DoShift(Token::Value op,
                                      HBitwiseBinaryOperation* instr) {
   if (instr->representation().IsTagged()) {
@@ -1243,8 +1222,24 @@
 }
 
 
-LInstruction* LChunkBuilder::DoBitAnd(HBitAnd* instr) {
-  return DoBit(Token::BIT_AND, instr);
+LInstruction* LChunkBuilder::DoBitwise(HBitwise* instr) {
+  if (instr->representation().IsInteger32()) {
+    ASSERT(instr->left()->representation().IsInteger32());
+    ASSERT(instr->right()->representation().IsInteger32());
+
+    LOperand* left = UseRegisterAtStart(instr->LeastConstantOperand());
+    LOperand* right = UseOrConstantAtStart(instr->MostConstantOperand());
+    return DefineAsRegister(new LBitI(left, right));
+  } else {
+    ASSERT(instr->representation().IsTagged());
+    ASSERT(instr->left()->representation().IsTagged());
+    ASSERT(instr->right()->representation().IsTagged());
+
+    LOperand* left = UseFixed(instr->left(), r1);
+    LOperand* right = UseFixed(instr->right(), r0);
+    LArithmeticT* result = new LArithmeticT(instr->op(), left, right);
+    return MarkAsCall(DefineFixed(result, r0), instr);
+  }
 }
 
 
@@ -1255,16 +1250,6 @@
 }
 
 
-LInstruction* LChunkBuilder::DoBitOr(HBitOr* instr) {
-  return DoBit(Token::BIT_OR, instr);
-}
-
-
-LInstruction* LChunkBuilder::DoBitXor(HBitXor* instr) {
-  return DoBit(Token::BIT_XOR, instr);
-}
-
-
 LInstruction* LChunkBuilder::DoDiv(HDiv* instr) {
   if (instr->representation().IsDouble()) {
     return DoArithmeticD(Token::DIV, instr);
diff --git a/src/arm/lithium-arm.h b/src/arm/lithium-arm.h
index 5733bd0..1452e53 100644
--- a/src/arm/lithium-arm.h
+++ b/src/arm/lithium-arm.h
@@ -796,18 +796,15 @@
 
 class LBitI: public LTemplateInstruction<1, 2, 0> {
  public:
-  LBitI(Token::Value op, LOperand* left, LOperand* right)
-      : op_(op) {
+  LBitI(LOperand* left, LOperand* right) {
     inputs_[0] = left;
     inputs_[1] = right;
   }
 
-  Token::Value op() const { return op_; }
+  Token::Value op() const { return hydrogen()->op(); }
 
   DECLARE_CONCRETE_INSTRUCTION(BitI, "bit-i")
-
- private:
-  Token::Value op_;
+  DECLARE_HYDROGEN_ACCESSOR(Bitwise)
 };
 
 
@@ -2192,7 +2189,6 @@
   void VisitInstruction(HInstruction* current);
 
   void DoBasicBlock(HBasicBlock* block, HBasicBlock* next_block);
-  LInstruction* DoBit(Token::Value op, HBitwiseBinaryOperation* instr);
   LInstruction* DoShift(Token::Value op, HBitwiseBinaryOperation* instr);
   LInstruction* DoArithmeticD(Token::Value op,
                               HArithmeticBinaryOperation* instr);
diff --git a/src/arm/macro-assembler-arm.cc b/src/arm/macro-assembler-arm.cc
index cf4258c..90bad75 100644
--- a/src/arm/macro-assembler-arm.cc
+++ b/src/arm/macro-assembler-arm.cc
@@ -1094,7 +1094,7 @@
 }
 
 
-void MacroAssembler::InvokeFunction(JSFunction* function,
+void MacroAssembler::InvokeFunction(Handle<JSFunction> function,
                                     const ParameterCount& actual,
                                     InvokeFlag flag,
                                     CallKind call_kind) {
@@ -1102,7 +1102,7 @@
   ASSERT(flag == JUMP_FUNCTION || has_frame());
 
   // Get the function and setup the context.
-  mov(r1, Operand(Handle<JSFunction>(function)));
+  mov(r1, Operand(function));
   ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
 
   ParameterCount expected(function->shared()->formal_parameter_count());
@@ -2084,41 +2084,19 @@
 }
 
 
-MaybeObject* MacroAssembler::TryCallStub(CodeStub* stub, Condition cond) {
-  ASSERT(AllowThisStubCall(stub));  // Stub calls are not allowed in some stubs.
-  Object* result;
-  { MaybeObject* maybe_result = stub->TryGetCode();
-    if (!maybe_result->ToObject(&result)) return maybe_result;
-  }
-  Handle<Code> code(Code::cast(result));
-  Call(code, RelocInfo::CODE_TARGET, kNoASTId, cond);
-  return result;
-}
-
-
 void MacroAssembler::TailCallStub(CodeStub* stub, Condition cond) {
   ASSERT(allow_stub_calls_ || stub->CompilingCallsToThisStubIsGCSafe());
   Jump(stub->GetCode(), RelocInfo::CODE_TARGET, cond);
 }
 
 
-MaybeObject* MacroAssembler::TryTailCallStub(CodeStub* stub, Condition cond) {
-  Object* result;
-  { MaybeObject* maybe_result = stub->TryGetCode();
-    if (!maybe_result->ToObject(&result)) return maybe_result;
-  }
-  Jump(Handle<Code>(Code::cast(result)), RelocInfo::CODE_TARGET, cond);
-  return result;
-}
-
-
 static int AddressOffset(ExternalReference ref0, ExternalReference ref1) {
   return ref0.address() - ref1.address();
 }
 
 
-MaybeObject* MacroAssembler::TryCallApiFunctionAndReturn(
-    ExternalReference function, int stack_space) {
+void MacroAssembler::CallApiFunctionAndReturn(ExternalReference function,
+                                              int stack_space) {
   ExternalReference next_address =
       ExternalReference::handle_scope_next_address();
   const int kNextOffset = 0;
@@ -2181,14 +2159,10 @@
   mov(pc, lr);
 
   bind(&promote_scheduled_exception);
-  MaybeObject* result
-      = TryTailCallExternalReference(
-          ExternalReference(Runtime::kPromoteScheduledException, isolate()),
-          0,
-          1);
-  if (result->IsFailure()) {
-    return result;
-  }
+  TailCallExternalReference(
+      ExternalReference(Runtime::kPromoteScheduledException, isolate()),
+      0,
+      1);
 
   // HandleScope limit has changed. Delete allocated extensions.
   bind(&delete_allocated_handles);
@@ -2200,8 +2174,6 @@
       ExternalReference::delete_handle_scope_extensions(isolate()), 1);
   mov(r0, r4);
   jmp(&leave_exit_frame);
-
-  return result;
 }
 
 
@@ -2633,17 +2605,6 @@
 }
 
 
-MaybeObject* MacroAssembler::TryTailCallExternalReference(
-    const ExternalReference& ext, int num_arguments, int result_size) {
-  // TODO(1236192): Most runtime routines don't need the number of
-  // arguments passed in because it is constant. At some point we
-  // should remove this need and make the runtime routine entry code
-  // smarter.
-  mov(r0, Operand(num_arguments));
-  return TryJumpToExternalReference(ext);
-}
-
-
 void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid,
                                      int num_arguments,
                                      int result_size) {
@@ -2664,18 +2625,6 @@
 }
 
 
-MaybeObject* MacroAssembler::TryJumpToExternalReference(
-    const ExternalReference& builtin) {
-#if defined(__thumb__)
-  // Thumb mode builtin.
-  ASSERT((reinterpret_cast<intptr_t>(builtin.address()) & 1) == 1);
-#endif
-  mov(r1, Operand(builtin));
-  CEntryStub stub(1);
-  return TryTailCallStub(&stub);
-}
-
-
 void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id,
                                    InvokeFlag flag,
                                    const CallWrapper& call_wrapper) {
diff --git a/src/arm/macro-assembler-arm.h b/src/arm/macro-assembler-arm.h
index 90c4b37..8f6803e 100644
--- a/src/arm/macro-assembler-arm.h
+++ b/src/arm/macro-assembler-arm.h
@@ -520,7 +520,7 @@
                       const CallWrapper& call_wrapper,
                       CallKind call_kind);
 
-  void InvokeFunction(JSFunction* function,
+  void InvokeFunction(Handle<JSFunction> function,
                       const ParameterCount& actual,
                       InvokeFlag flag,
                       CallKind call_kind);
@@ -926,20 +926,9 @@
   // Call a code stub.
   void CallStub(CodeStub* stub, Condition cond = al);
 
-  // Call a code stub and return the code object called.  Try to generate
-  // the code if necessary.  Do not perform a GC but instead return a retry
-  // after GC failure.
-  MUST_USE_RESULT MaybeObject* TryCallStub(CodeStub* stub, Condition cond = al);
-
   // Call a code stub.
   void TailCallStub(CodeStub* stub, Condition cond = al);
 
-  // Tail call a code stub (jump) and return the code object called.  Try to
-  // generate the code if necessary.  Do not perform a GC but instead return
-  // a retry after GC failure.
-  MUST_USE_RESULT MaybeObject* TryTailCallStub(CodeStub* stub,
-                                               Condition cond = al);
-
   // Call a runtime routine.
   void CallRuntime(const Runtime::Function* f, int num_arguments);
   void CallRuntimeSaveDoubles(Runtime::FunctionId id);
@@ -958,12 +947,6 @@
                                  int num_arguments,
                                  int result_size);
 
-  // Tail call of a runtime routine (jump). Try to generate the code if
-  // necessary. Do not perform a GC but instead return a retry after GC
-  // failure.
-  MUST_USE_RESULT MaybeObject* TryTailCallExternalReference(
-      const ExternalReference& ext, int num_arguments, int result_size);
-
   // Convenience function: tail call a runtime routine (jump).
   void TailCallRuntime(Runtime::FunctionId fid,
                        int num_arguments,
@@ -1012,18 +995,15 @@
 
   void GetCFunctionDoubleResult(const DoubleRegister dst);
 
-  // Calls an API function. Allocates HandleScope, extracts returned value
-  // from handle and propagates exceptions. Restores context.
-  // stack_space - space to be unwound on exit (includes the call js
-  // arguments space and the additional space allocated for the fast call).
-  MaybeObject* TryCallApiFunctionAndReturn(ExternalReference function,
-                                           int stack_space);
+  // Calls an API function.  Allocates HandleScope, extracts returned value
+  // from handle and propagates exceptions.  Restores context.  stack_space
+  // - space to be unwound on exit (includes the call js arguments space and
+  // the additional space allocated for the fast call).
+  void CallApiFunctionAndReturn(ExternalReference function, int stack_space);
 
   // Jump to a runtime routine.
   void JumpToExternalReference(const ExternalReference& builtin);
 
-  MaybeObject* TryJumpToExternalReference(const ExternalReference& ext);
-
   // Invoke specified builtin JavaScript function. Adds an entry to
   // the unresolved list if the name does not resolve.
   void InvokeBuiltin(Builtins::JavaScript id,
diff --git a/src/arm/stub-cache-arm.cc b/src/arm/stub-cache-arm.cc
index f9a10c4..bf6f085 100644
--- a/src/arm/stub-cache-arm.cc
+++ b/src/arm/stub-cache-arm.cc
@@ -149,68 +149,6 @@
 }
 
 
-// TODO(kmillikin): Eliminate this function when the stub cache is fully
-// handlified.
-MUST_USE_RESULT static MaybeObject* TryGenerateDictionaryNegativeLookup(
-    MacroAssembler* masm,
-    Label* miss_label,
-    Register receiver,
-    String* name,
-    Register scratch0,
-    Register scratch1) {
-  ASSERT(name->IsSymbol());
-  Counters* counters = masm->isolate()->counters();
-  __ IncrementCounter(counters->negative_lookups(), 1, scratch0, scratch1);
-  __ IncrementCounter(counters->negative_lookups_miss(), 1, scratch0, scratch1);
-
-  Label done;
-
-  const int kInterceptorOrAccessCheckNeededMask =
-      (1 << Map::kHasNamedInterceptor) | (1 << Map::kIsAccessCheckNeeded);
-
-  // Bail out if the receiver has a named interceptor or requires access checks.
-  Register map = scratch1;
-  __ ldr(map, FieldMemOperand(receiver, HeapObject::kMapOffset));
-  __ ldrb(scratch0, FieldMemOperand(map, Map::kBitFieldOffset));
-  __ tst(scratch0, Operand(kInterceptorOrAccessCheckNeededMask));
-  __ b(ne, miss_label);
-
-  // Check that receiver is a JSObject.
-  __ ldrb(scratch0, FieldMemOperand(map, Map::kInstanceTypeOffset));
-  __ cmp(scratch0, Operand(FIRST_SPEC_OBJECT_TYPE));
-  __ b(lt, miss_label);
-
-  // Load properties array.
-  Register properties = scratch0;
-  __ ldr(properties, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
-  // Check that the properties array is a dictionary.
-  __ ldr(map, FieldMemOperand(properties, HeapObject::kMapOffset));
-  Register tmp = properties;
-  __ LoadRoot(tmp, Heap::kHashTableMapRootIndex);
-  __ cmp(map, tmp);
-  __ b(ne, miss_label);
-
-  // Restore the temporarily used register.
-  __ ldr(properties, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
-
-
-  MaybeObject* result = StringDictionaryLookupStub::TryGenerateNegativeLookup(
-      masm,
-      miss_label,
-      &done,
-      receiver,
-      properties,
-      name,
-      scratch1);
-  if (result->IsFailure()) return result;
-
-  __ bind(&done);
-  __ DecrementCounter(counters->negative_lookups_miss(), 1, scratch0, scratch1);
-
-  return result;
-}
-
-
 void StubCache::GenerateProbe(MacroAssembler* masm,
                               Code::Flags flags,
                               Register receiver,
@@ -294,7 +232,10 @@
 
 
 void StubCompiler::GenerateDirectLoadGlobalFunctionPrototype(
-    MacroAssembler* masm, int index, Register prototype, Label* miss) {
+    MacroAssembler* masm,
+    int index,
+    Register prototype,
+    Label* miss) {
   Isolate* isolate = masm->isolate();
   // Check we're still in the same context.
   __ ldr(prototype, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
@@ -302,8 +243,8 @@
   __ cmp(prototype, ip);
   __ b(ne, miss);
   // Get the global function with the given index.
-  JSFunction* function =
-      JSFunction::cast(isolate->global_context()->get(index));
+  Handle<JSFunction> function(
+      JSFunction::cast(isolate->global_context()->get(index)));
   // Load its initial map. The global functions all have initial maps.
   __ Move(prototype, Handle<Map>(function->initial_map()));
   // Load the prototype from the initial map.
@@ -566,12 +507,12 @@
                                      Register receiver,
                                      Register holder,
                                      Register name,
-                                     JSObject* holder_obj) {
+                                     Handle<JSObject> holder_obj) {
   __ push(name);
-  InterceptorInfo* interceptor = holder_obj->GetNamedInterceptor();
-  ASSERT(!masm->isolate()->heap()->InNewSpace(interceptor));
+  Handle<InterceptorInfo> interceptor(holder_obj->GetNamedInterceptor());
+  ASSERT(!masm->isolate()->heap()->InNewSpace(*interceptor));
   Register scratch = name;
-  __ mov(scratch, Operand(Handle<Object>(interceptor)));
+  __ mov(scratch, Operand(interceptor));
   __ push(scratch);
   __ push(receiver);
   __ push(holder);
@@ -580,11 +521,12 @@
 }
 
 
-static void CompileCallLoadPropertyWithInterceptor(MacroAssembler* masm,
-                                                   Register receiver,
-                                                   Register holder,
-                                                   Register name,
-                                                   JSObject* holder_obj) {
+static void CompileCallLoadPropertyWithInterceptor(
+    MacroAssembler* masm,
+    Register receiver,
+    Register holder,
+    Register name,
+    Handle<JSObject> holder_obj) {
   PushInterceptorArguments(masm, receiver, holder, name, holder_obj);
 
   ExternalReference ref =
@@ -597,6 +539,7 @@
   __ CallStub(&stub);
 }
 
+
 static const int kFastApiCallArguments = 3;
 
 // Reserves space for the extra arguments to FastHandleApiCall in the
@@ -618,10 +561,9 @@
 }
 
 
-static MaybeObject* GenerateFastApiDirectCall(
-    MacroAssembler* masm,
-    const CallOptimization& optimization,
-    int argc) {
+static void GenerateFastApiDirectCall(MacroAssembler* masm,
+                                      const CallOptimization& optimization,
+                                      int argc) {
   // ----------- S t a t e -------------
   //  -- sp[0]              : holder (set by CheckPrototypes)
   //  -- sp[4]              : callee js function
@@ -632,18 +574,18 @@
   //  -- sp[(argc + 4) * 4] : receiver
   // -----------------------------------
   // Get the function and setup the context.
-  JSFunction* function = optimization.constant_function();
-  __ mov(r5, Operand(Handle<JSFunction>(function)));
+  Handle<JSFunction> function = optimization.constant_function();
+  __ mov(r5, Operand(function));
   __ ldr(cp, FieldMemOperand(r5, JSFunction::kContextOffset));
 
   // Pass the additional arguments FastHandleApiCall expects.
-  Object* call_data = optimization.api_call_info()->data();
-  Handle<CallHandlerInfo> api_call_info_handle(optimization.api_call_info());
-  if (masm->isolate()->heap()->InNewSpace(call_data)) {
-    __ Move(r0, api_call_info_handle);
+  Handle<CallHandlerInfo> api_call_info = optimization.api_call_info();
+  Handle<Object> call_data(api_call_info->data());
+  if (masm->isolate()->heap()->InNewSpace(*call_data)) {
+    __ Move(r0, api_call_info);
     __ ldr(r6, FieldMemOperand(r0, CallHandlerInfo::kDataOffset));
   } else {
-    __ Move(r6, Handle<Object>(call_data));
+    __ Move(r6, call_data);
   }
   // Store js function and call data.
   __ stm(ib, sp, r5.bit() | r6.bit());
@@ -652,10 +594,6 @@
   // (refer to layout above).
   __ add(r2, sp, Operand(2 * kPointerSize));
 
-  Object* callback = optimization.api_call_info()->callback();
-  Address api_function_address = v8::ToCData<Address>(callback);
-  ApiFunction fun(api_function_address);
-
   const int kApiStackSpace = 4;
 
   FrameScope frame_scope(masm, StackFrame::MANUAL);
@@ -676,16 +614,15 @@
   __ mov(ip, Operand(0));
   __ str(ip, MemOperand(r0, 3 * kPointerSize));
 
-  // Emitting a stub call may try to allocate (if the code is not
-  // already generated). Do not allow the assembler to perform a
-  // garbage collection but instead return the allocation failure
-  // object.
   const int kStackUnwindSpace = argc + kFastApiCallArguments + 1;
+  Address function_address = v8::ToCData<Address>(api_call_info->callback());
+  ApiFunction fun(function_address);
   ExternalReference ref = ExternalReference(&fun,
                                             ExternalReference::DIRECT_API_CALL,
                                             masm->isolate());
   AllowExternalCallThatCantCauseGC scope(masm);
-  return masm->TryCallApiFunctionAndReturn(ref, kStackUnwindSpace);
+
+  __ CallApiFunctionAndReturn(ref, kStackUnwindSpace);
 }
 
 
@@ -700,86 +637,63 @@
         name_(name),
         extra_ic_state_(extra_ic_state) {}
 
-  MaybeObject* Compile(MacroAssembler* masm,
-                       JSObject* object,
-                       JSObject* holder,
-                       String* name,
-                       LookupResult* lookup,
-                       Register receiver,
-                       Register scratch1,
-                       Register scratch2,
-                       Register scratch3,
-                       Label* miss) {
+  void Compile(MacroAssembler* masm,
+               Handle<JSObject> object,
+               Handle<JSObject> holder,
+               Handle<String> name,
+               LookupResult* lookup,
+               Register receiver,
+               Register scratch1,
+               Register scratch2,
+               Register scratch3,
+               Label* miss) {
     ASSERT(holder->HasNamedInterceptor());
     ASSERT(!holder->GetNamedInterceptor()->getter()->IsUndefined());
 
     // Check that the receiver isn't a smi.
     __ JumpIfSmi(receiver, miss);
-
     CallOptimization optimization(lookup);
-
     if (optimization.is_constant_call()) {
-      return CompileCacheable(masm,
-                              object,
-                              receiver,
-                              scratch1,
-                              scratch2,
-                              scratch3,
-                              holder,
-                              lookup,
-                              name,
-                              optimization,
-                              miss);
+      CompileCacheable(masm, object, receiver, scratch1, scratch2, scratch3,
+                       holder, lookup, name, optimization, miss);
     } else {
-      CompileRegular(masm,
-                     object,
-                     receiver,
-                     scratch1,
-                     scratch2,
-                     scratch3,
-                     name,
-                     holder,
-                     miss);
-      return masm->isolate()->heap()->undefined_value();
+      CompileRegular(masm, object, receiver, scratch1, scratch2, scratch3,
+                     name, holder, miss);
     }
   }
 
  private:
-  MaybeObject* CompileCacheable(MacroAssembler* masm,
-                                JSObject* object,
-                                Register receiver,
-                                Register scratch1,
-                                Register scratch2,
-                                Register scratch3,
-                                JSObject* interceptor_holder,
-                                LookupResult* lookup,
-                                String* name,
-                                const CallOptimization& optimization,
-                                Label* miss_label) {
+  void CompileCacheable(MacroAssembler* masm,
+                        Handle<JSObject> object,
+                        Register receiver,
+                        Register scratch1,
+                        Register scratch2,
+                        Register scratch3,
+                        Handle<JSObject> interceptor_holder,
+                        LookupResult* lookup,
+                        Handle<String> name,
+                        const CallOptimization& optimization,
+                        Label* miss_label) {
     ASSERT(optimization.is_constant_call());
     ASSERT(!lookup->holder()->IsGlobalObject());
-
     Counters* counters = masm->isolate()->counters();
-
     int depth1 = kInvalidProtoDepth;
     int depth2 = kInvalidProtoDepth;
     bool can_do_fast_api_call = false;
     if (optimization.is_simple_api_call() &&
-       !lookup->holder()->IsGlobalObject()) {
-     depth1 =
-         optimization.GetPrototypeDepthOfExpectedType(object,
-                                                      interceptor_holder);
-     if (depth1 == kInvalidProtoDepth) {
-       depth2 =
-           optimization.GetPrototypeDepthOfExpectedType(interceptor_holder,
-                                                        lookup->holder());
-     }
-     can_do_fast_api_call = (depth1 != kInvalidProtoDepth) ||
-                            (depth2 != kInvalidProtoDepth);
+        !lookup->holder()->IsGlobalObject()) {
+      depth1 = optimization.GetPrototypeDepthOfExpectedType(
+          object, interceptor_holder);
+      if (depth1 == kInvalidProtoDepth) {
+        depth2 = optimization.GetPrototypeDepthOfExpectedType(
+            interceptor_holder, Handle<JSObject>(lookup->holder()));
+      }
+      can_do_fast_api_call =
+          depth1 != kInvalidProtoDepth || depth2 != kInvalidProtoDepth;
     }
 
     __ IncrementCounter(counters->call_const_interceptor(), 1,
-                      scratch1, scratch2);
+                        scratch1, scratch2);
 
     if (can_do_fast_api_call) {
       __ IncrementCounter(counters->call_const_interceptor_fast_api(), 1,
@@ -792,9 +706,9 @@
     Label miss_cleanup;
     Label* miss = can_do_fast_api_call ? &miss_cleanup : miss_label;
     Register holder =
-        stub_compiler_->CheckPrototypes(object, receiver,
-                                        interceptor_holder, scratch1,
-                                        scratch2, scratch3, name, depth1, miss);
+        stub_compiler_->CheckPrototypes(object, receiver, interceptor_holder,
+                                        scratch1, scratch2, scratch3,
+                                        name, depth1, miss);
 
     // Invoke an interceptor and if it provides a value,
     // branch to |regular_invoke|.
@@ -807,10 +721,11 @@
 
     // Check that the maps from interceptor's holder to constant function's
     // holder haven't changed and thus we can use cached constant function.
-    if (interceptor_holder != lookup->holder()) {
+    if (*interceptor_holder != lookup->holder()) {
       stub_compiler_->CheckPrototypes(interceptor_holder, receiver,
-                                      lookup->holder(), scratch1,
-                                      scratch2, scratch3, name, depth2, miss);
+                                      Handle<JSObject>(lookup->holder()),
+                                      scratch1, scratch2, scratch3,
+                                      name, depth2, miss);
     } else {
       // CheckPrototypes has a side effect of fetching a 'holder'
       // for API (object which is instanceof for the signature).  It's
@@ -821,10 +736,7 @@
 
     // Invoke function.
     if (can_do_fast_api_call) {
-      MaybeObject* result = GenerateFastApiDirectCall(masm,
-                                                      optimization,
-                                                      arguments_.immediate());
-      if (result->IsFailure()) return result;
+      GenerateFastApiDirectCall(masm, optimization, arguments_.immediate());
     } else {
       CallKind call_kind = CallICBase::Contextual::decode(extra_ic_state_)
           ? CALL_AS_FUNCTION
@@ -845,66 +757,53 @@
     if (can_do_fast_api_call) {
       FreeSpaceForFastApiCall(masm);
     }
-
-    return masm->isolate()->heap()->undefined_value();
   }
 
   void CompileRegular(MacroAssembler* masm,
-                      JSObject* object,
+                      Handle<JSObject> object,
                       Register receiver,
                       Register scratch1,
                       Register scratch2,
                       Register scratch3,
-                      String* name,
-                      JSObject* interceptor_holder,
+                      Handle<String> name,
+                      Handle<JSObject> interceptor_holder,
                       Label* miss_label) {
     Register holder =
         stub_compiler_->CheckPrototypes(object, receiver, interceptor_holder,
-                                        scratch1, scratch2, scratch3, name,
-                                        miss_label);
+                                        scratch1, scratch2, scratch3,
+                                        name, miss_label);
 
     // Call a runtime function to load the interceptor property.
     FrameScope scope(masm, StackFrame::INTERNAL);
     // Save the name_ register across the call.
     __ push(name_);
-
-    PushInterceptorArguments(masm,
-                             receiver,
-                             holder,
-                             name_,
-                             interceptor_holder);
-
+    PushInterceptorArguments(masm, receiver, holder, name_, interceptor_holder);
     __ CallExternalReference(
         ExternalReference(IC_Utility(IC::kLoadPropertyWithInterceptorForCall),
                           masm->isolate()),
         5);
-
     // Restore the name_ register.
     __ pop(name_);
-
     // Leave the internal frame.
   }
 
   void LoadWithInterceptor(MacroAssembler* masm,
                            Register receiver,
                            Register holder,
-                           JSObject* holder_obj,
+                           Handle<JSObject> holder_obj,
                            Register scratch,
                            Label* interceptor_succeeded) {
     {
       FrameScope scope(masm, StackFrame::INTERNAL);
       __ Push(holder, name_);
-
       CompileCallLoadPropertyWithInterceptor(masm,
                                              receiver,
                                              holder,
                                              name_,
                                              holder_obj);
-
       __ pop(name_);  // Restore the name.
       __ pop(receiver);  // Restore the holder.
     }
-
     // If interceptor returns no-result sentinel, call the constant function.
     __ LoadRoot(scratch, Heap::kNoInterceptorResultSentinelRootIndex);
     __ cmp(r0, scratch);
@@ -938,30 +837,6 @@
 }
 
 
-// TODO(kmillikin): Eliminate this function when the stub cache is fully
-// handlified.
-MUST_USE_RESULT static MaybeObject* TryGenerateCheckPropertyCell(
-    MacroAssembler* masm,
-    GlobalObject* global,
-    String* name,
-    Register scratch,
-    Label* miss) {
-  Object* probe;
-  { MaybeObject* maybe_probe = global->EnsurePropertyCell(name);
-    if (!maybe_probe->ToObject(&probe)) return maybe_probe;
-  }
-  JSGlobalPropertyCell* cell = JSGlobalPropertyCell::cast(probe);
-  ASSERT(cell->value()->IsTheHole());
-  __ mov(scratch, Operand(Handle<Object>(cell)));
-  __ ldr(scratch,
-         FieldMemOperand(scratch, JSGlobalPropertyCell::kValueOffset));
-  __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
-  __ cmp(scratch, ip);
-  __ b(ne, miss);
-  return cell;
-}
-
-
 // Calls GenerateCheckPropertyCell for each global object in the prototype chain
 // from object to (but not including) holder.
 static void GenerateCheckPropertyCells(MacroAssembler* masm,
@@ -984,34 +859,6 @@
 }
 
 
-// TODO(kmillikin): Eliminate this function when the stub cache is fully
-// handlified.
-MUST_USE_RESULT static MaybeObject* TryGenerateCheckPropertyCells(
-    MacroAssembler* masm,
-    JSObject* object,
-    JSObject* holder,
-    String* name,
-    Register scratch,
-    Label* miss) {
-  JSObject* current = object;
-  while (current != holder) {
-    if (current->IsGlobalObject()) {
-      // Returns a cell or a failure.
-      MaybeObject* result = TryGenerateCheckPropertyCell(
-          masm,
-          GlobalObject::cast(current),
-          name,
-          scratch,
-          miss);
-      if (result->IsFailure()) return result;
-    }
-    ASSERT(current->IsJSObject());
-    current = JSObject::cast(current->GetPrototype());
-  }
-  return NULL;
-}
-
-
 // Convert and store int passed in register ival to IEEE 754 single precision
 // floating point value at memory location (dst + 4 * wordoffset)
 // If VFP3 is available use it for conversion.
@@ -1226,150 +1073,6 @@
 }
 
 
-// TODO(kmillikin): Eliminate this function when the stub cache is fully
-// handlified.
-Register StubCompiler::CheckPrototypes(JSObject* object,
-                                       Register object_reg,
-                                       JSObject* holder,
-                                       Register holder_reg,
-                                       Register scratch1,
-                                       Register scratch2,
-                                       String* name,
-                                       int save_at_depth,
-                                       Label* miss) {
-  // Make sure there's no overlap between holder and object registers.
-  ASSERT(!scratch1.is(object_reg) && !scratch1.is(holder_reg));
-  ASSERT(!scratch2.is(object_reg) && !scratch2.is(holder_reg)
-         && !scratch2.is(scratch1));
-
-  // Keep track of the current object in register reg.
-  Register reg = object_reg;
-  int depth = 0;
-
-  if (save_at_depth == depth) {
-    __ str(reg, MemOperand(sp));
-  }
-
-  // Check the maps in the prototype chain.
-  // Traverse the prototype chain from the object and do map checks.
-  JSObject* current = object;
-  while (current != holder) {
-    depth++;
-
-    // Only global objects and objects that do not require access
-    // checks are allowed in stubs.
-    ASSERT(current->IsJSGlobalProxy() || !current->IsAccessCheckNeeded());
-
-    ASSERT(current->GetPrototype()->IsJSObject());
-    JSObject* prototype = JSObject::cast(current->GetPrototype());
-    if (!current->HasFastProperties() &&
-        !current->IsJSGlobalObject() &&
-        !current->IsJSGlobalProxy()) {
-      if (!name->IsSymbol()) {
-        MaybeObject* maybe_lookup_result = heap()->LookupSymbol(name);
-        Object* lookup_result = NULL;  // Initialization to please compiler.
-        if (!maybe_lookup_result->ToObject(&lookup_result)) {
-          set_failure(Failure::cast(maybe_lookup_result));
-          return reg;
-        }
-        name = String::cast(lookup_result);
-      }
-      ASSERT(current->property_dictionary()->FindEntry(name) ==
-             StringDictionary::kNotFound);
-
-      MaybeObject* negative_lookup =
-          TryGenerateDictionaryNegativeLookup(masm(),
-                                              miss,
-                                              reg,
-                                              name,
-                                              scratch1,
-                                              scratch2);
-      if (negative_lookup->IsFailure()) {
-        set_failure(Failure::cast(negative_lookup));
-        return reg;
-      }
-
-      __ ldr(scratch1, FieldMemOperand(reg, HeapObject::kMapOffset));
-      reg = holder_reg;  // from now the object is in holder_reg
-      __ ldr(reg, FieldMemOperand(scratch1, Map::kPrototypeOffset));
-    } else if (heap()->InNewSpace(prototype)) {
-      // Get the map of the current object.
-      __ ldr(scratch1, FieldMemOperand(reg, HeapObject::kMapOffset));
-      __ cmp(scratch1, Operand(Handle<Map>(current->map())));
-
-      // Branch on the result of the map check.
-      __ b(ne, miss);
-
-      // Check access rights to the global object.  This has to happen
-      // after the map check so that we know that the object is
-      // actually a global object.
-      if (current->IsJSGlobalProxy()) {
-        __ CheckAccessGlobalProxy(reg, scratch1, miss);
-        // Restore scratch register to be the map of the object.  In the
-        // new space case below, we load the prototype from the map in
-        // the scratch register.
-        __ ldr(scratch1, FieldMemOperand(reg, HeapObject::kMapOffset));
-      }
-
-      reg = holder_reg;  // from now the object is in holder_reg
-      // The prototype is in new space; we cannot store a reference
-      // to it in the code. Load it from the map.
-      __ ldr(reg, FieldMemOperand(scratch1, Map::kPrototypeOffset));
-    } else {
-      // Check the map of the current object.
-      __ ldr(scratch1, FieldMemOperand(reg, HeapObject::kMapOffset));
-      __ cmp(scratch1, Operand(Handle<Map>(current->map())));
-      // Branch on the result of the map check.
-      __ b(ne, miss);
-      // Check access rights to the global object.  This has to happen
-      // after the map check so that we know that the object is
-      // actually a global object.
-      if (current->IsJSGlobalProxy()) {
-        __ CheckAccessGlobalProxy(reg, scratch1, miss);
-      }
-      // The prototype is in old space; load it directly.
-      reg = holder_reg;  // from now the object is in holder_reg
-      __ mov(reg, Operand(Handle<JSObject>(prototype)));
-    }
-
-    if (save_at_depth == depth) {
-      __ str(reg, MemOperand(sp));
-    }
-
-    // Go to the next object in the prototype chain.
-    current = prototype;
-  }
-
-  // Check the holder map.
-  __ ldr(scratch1, FieldMemOperand(reg, HeapObject::kMapOffset));
-  __ cmp(scratch1, Operand(Handle<Map>(current->map())));
-  __ b(ne, miss);
-
-  // Log the check depth.
-  LOG(masm()->isolate(), IntEvent("check-maps-depth", depth + 1));
-
-  // Perform security check for access to the global object.
-  ASSERT(holder->IsJSGlobalProxy() || !holder->IsAccessCheckNeeded());
-  if (holder->IsJSGlobalProxy()) {
-    __ CheckAccessGlobalProxy(reg, scratch1, miss);
-  }
-
-  // If we've skipped any global objects, it's not enough to verify
-  // that their maps haven't changed.  We also need to check that the
-  // property cell for the property is still empty.
-  MaybeObject* result = TryGenerateCheckPropertyCells(masm(),
-                                                      object,
-                                                      holder,
-                                                      name,
-                                                      scratch1,
-                                                      miss);
-  if (result->IsFailure()) set_failure(Failure::cast(result));
-
-  // Return the register containing the holder.
-  return reg;
-}
-
-
 void StubCompiler::GenerateLoadField(Handle<JSObject> object,
                                      Handle<JSObject> holder,
                                      Register receiver,
@@ -1412,43 +1115,37 @@
 }
 
 
-MaybeObject* StubCompiler::GenerateLoadCallback(JSObject* object,
-                                                JSObject* holder,
-                                                Register receiver,
-                                                Register name_reg,
-                                                Register scratch1,
-                                                Register scratch2,
-                                                Register scratch3,
-                                                AccessorInfo* callback,
-                                                String* name,
-                                                Label* miss) {
+void StubCompiler::GenerateLoadCallback(Handle<JSObject> object,
+                                        Handle<JSObject> holder,
+                                        Register receiver,
+                                        Register name_reg,
+                                        Register scratch1,
+                                        Register scratch2,
+                                        Register scratch3,
+                                        Handle<AccessorInfo> callback,
+                                        Handle<String> name,
+                                        Label* miss) {
   // Check that the receiver isn't a smi.
   __ JumpIfSmi(receiver, miss);
 
   // Check that the maps haven't changed.
-  Register reg =
-      CheckPrototypes(object, receiver, holder, scratch1, scratch2, scratch3,
-                      name, miss);
+  Register reg = CheckPrototypes(object, receiver, holder, scratch1,
+                                 scratch2, scratch3, name, miss);
 
   // Build AccessorInfo::args_ list on the stack and push property name below
   // the exit frame to make GC aware of them and store pointers to them.
   __ push(receiver);
   __ mov(scratch2, sp);  // scratch2 = AccessorInfo::args_
-  Handle<AccessorInfo> callback_handle(callback);
-  if (heap()->InNewSpace(callback_handle->data())) {
-    __ Move(scratch3, callback_handle);
+  if (heap()->InNewSpace(callback->data())) {
+    __ Move(scratch3, callback);
     __ ldr(scratch3, FieldMemOperand(scratch3, AccessorInfo::kDataOffset));
   } else {
-    __ Move(scratch3, Handle<Object>(callback_handle->data()));
+    __ Move(scratch3, Handle<Object>(callback->data()));
   }
   __ Push(reg, scratch3, name_reg);
   __ mov(r0, sp);  // r0 = Handle<String>
 
-  Address getter_address = v8::ToCData<Address>(callback->getter());
-  ApiFunction fun(getter_address);
-
   const int kApiStackSpace = 1;
-
   FrameScope frame_scope(masm(), StackFrame::MANUAL);
   __ EnterExitFrame(false, kApiStackSpace);
 
@@ -1457,28 +1154,26 @@
   __ str(scratch2, MemOperand(sp, 1 * kPointerSize));
   __ add(r1, sp, Operand(1 * kPointerSize));  // r1 = AccessorInfo&
 
-  // Emitting a stub call may try to allocate (if the code is not
-  // already generated).  Do not allow the assembler to perform a
-  // garbage collection but instead return the allocation failure
-  // object.
   const int kStackUnwindSpace = 4;
+  Address getter_address = v8::ToCData<Address>(callback->getter());
+  ApiFunction fun(getter_address);
   ExternalReference ref =
       ExternalReference(&fun,
                         ExternalReference::DIRECT_GETTER_CALL,
                         masm()->isolate());
-  return masm()->TryCallApiFunctionAndReturn(ref, kStackUnwindSpace);
+  __ CallApiFunctionAndReturn(ref, kStackUnwindSpace);
 }
 
 
-void StubCompiler::GenerateLoadInterceptor(JSObject* object,
-                                           JSObject* interceptor_holder,
+void StubCompiler::GenerateLoadInterceptor(Handle<JSObject> object,
+                                           Handle<JSObject> interceptor_holder,
                                            LookupResult* lookup,
                                            Register receiver,
                                            Register name_reg,
                                            Register scratch1,
                                            Register scratch2,
                                            Register scratch3,
-                                           String* name,
+                                           Handle<String> name,
                                            Label* miss) {
   ASSERT(interceptor_holder->HasNamedInterceptor());
   ASSERT(!interceptor_holder->GetNamedInterceptor()->getter()->IsUndefined());
@@ -1494,9 +1189,9 @@
     if (lookup->type() == FIELD) {
       compile_followup_inline = true;
     } else if (lookup->type() == CALLBACKS &&
-        lookup->GetCallbackObject()->IsAccessorInfo() &&
-        AccessorInfo::cast(lookup->GetCallbackObject())->getter() != NULL) {
-      compile_followup_inline = true;
+               lookup->GetCallbackObject()->IsAccessorInfo()) {
+      compile_followup_inline =
+          AccessorInfo::cast(lookup->GetCallbackObject())->getter() != NULL;
     }
   }
 
@@ -1513,14 +1208,12 @@
     // Requires a frame to make GC aware of pushed pointers.
     {
       FrameScope frame_scope(masm(), StackFrame::INTERNAL);
-
       if (lookup->type() == CALLBACKS && !receiver.is(holder_reg)) {
         // CALLBACKS case needs a receiver to be passed into C++ callback.
         __ Push(receiver, holder_reg, name_reg);
       } else {
         __ Push(holder_reg, name_reg);
       }
-
       // Invoke an interceptor.  Note: map checks from receiver to
       // interceptor's holder has been compiled before (see a caller
       // of this method.)
@@ -1529,7 +1222,6 @@
                                              holder_reg,
                                              name_reg,
                                              interceptor_holder);
-
       // Check if interceptor provided a value for property.  If it's
       // the case, return immediately.
       Label interceptor_failed;
@@ -1545,16 +1237,14 @@
       if (lookup->type() == CALLBACKS && !receiver.is(holder_reg)) {
         __ pop(receiver);
       }
-
       // Leave the internal frame.
     }
-
     // Check that the maps from interceptor's holder to lookup's holder
     // haven't changed.  And load lookup's holder into |holder| register.
-    if (interceptor_holder != lookup->holder()) {
+    if (*interceptor_holder != lookup->holder()) {
       holder_reg = CheckPrototypes(interceptor_holder,
                                    holder_reg,
-                                   lookup->holder(),
+                                   Handle<JSObject>(lookup->holder()),
                                    scratch1,
                                    scratch2,
                                    scratch3,
@@ -1573,15 +1263,14 @@
       // We found CALLBACKS property in prototype chain of interceptor's
       // holder.
       ASSERT(lookup->type() == CALLBACKS);
-      ASSERT(lookup->GetCallbackObject()->IsAccessorInfo());
-      AccessorInfo* callback = AccessorInfo::cast(lookup->GetCallbackObject());
-      ASSERT(callback != NULL);
+      Handle<AccessorInfo> callback(
+          AccessorInfo::cast(lookup->GetCallbackObject()));
       ASSERT(callback->getter() != NULL);
 
       // Tail call to runtime.
       // Important invariant in CALLBACKS case: the code above must be
       // structured to never clobber |receiver| register.
-      __ Move(scratch2, Handle<AccessorInfo>(callback));
+      __ Move(scratch2, callback);
       // holder_reg is either receiver or scratch1.
       if (!receiver.is(holder_reg)) {
         ASSERT(scratch1.is(holder_reg));
@@ -1626,9 +1315,9 @@
 }
 
 
-void CallStubCompiler::GenerateGlobalReceiverCheck(JSObject* object,
-                                                   JSObject* holder,
-                                                   String* name,
+void CallStubCompiler::GenerateGlobalReceiverCheck(Handle<JSObject> object,
+                                                   Handle<JSObject> holder,
+                                                   Handle<String> name,
                                                    Label* miss) {
   ASSERT(holder->IsGlobalObject());
 
@@ -1641,7 +1330,7 @@
   // If the object is the holder then we know that it's a global
   // object which can only happen for contextual calls. In this case,
   // the receiver cannot be a smi.
-  if (object != holder) {
+  if (!object.is_identical_to(holder)) {
     __ JumpIfSmi(r0, miss);
   }
 
@@ -1650,15 +1339,16 @@
 }
 
 
-void CallStubCompiler::GenerateLoadFunctionFromCell(JSGlobalPropertyCell* cell,
-                                                    JSFunction* function,
-                                                    Label* miss) {
+void CallStubCompiler::GenerateLoadFunctionFromCell(
+    Handle<JSGlobalPropertyCell> cell,
+    Handle<JSFunction> function,
+    Label* miss) {
   // Get the value from the cell.
-  __ mov(r3, Operand(Handle<JSGlobalPropertyCell>(cell)));
+  __ mov(r3, Operand(cell));
   __ ldr(r1, FieldMemOperand(r3, JSGlobalPropertyCell::kValueOffset));
 
   // Check that the cell contains the same function.
-  if (heap()->InNewSpace(function)) {
+  if (heap()->InNewSpace(*function)) {
     // We can't embed a pointer to a function in new space so we have
     // to verify that the shared function info is unchanged. This has
     // the nice side effect that multiple closures based on the same
@@ -1672,11 +1362,10 @@
     __ Move(r3, Handle<SharedFunctionInfo>(function->shared()));
     __ ldr(r4, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
     __ cmp(r4, r3);
-    __ b(ne, miss);
   } else {
-    __ cmp(r1, Operand(Handle<JSFunction>(function)));
-    __ b(ne, miss);
+    __ cmp(r1, Operand(function));
   }
+  __ b(ne, miss);
 }
 
 
@@ -1689,20 +1378,6 @@
 }
 
 
-// TODO(kmillikin): Eliminate this function when the stub cache is fully
-// handlified.
-MaybeObject* CallStubCompiler::TryGenerateMissBranch() {
-  MaybeObject* maybe_obj =
-      isolate()->stub_cache()->TryComputeCallMiss(arguments().immediate(),
-                                                  kind_,
-                                                  extra_state_);
-  Object* obj;
-  if (!maybe_obj->ToObject(&obj)) return maybe_obj;
-  __ Jump(Handle<Code>(Code::cast(obj)), RelocInfo::CODE_TARGET);
-  return obj;
-}
-
-
 Handle<Code> CallStubCompiler::CompileCallField(Handle<JSObject> object,
                                                 Handle<JSObject> holder,
                                                 int index,
@@ -1737,11 +1412,12 @@
 }
 
 
-MaybeObject* CallStubCompiler::CompileArrayPushCall(Object* object,
-                                                    JSObject* holder,
-                                                    JSGlobalPropertyCell* cell,
-                                                    JSFunction* function,
-                                                    String* name) {
+Handle<Code> CallStubCompiler::CompileArrayPushCall(
+    Handle<Object> object,
+    Handle<JSObject> holder,
+    Handle<JSGlobalPropertyCell> cell,
+    Handle<JSFunction> function,
+    Handle<String> name) {
   // ----------- S t a t e -------------
   //  -- r2    : name
   //  -- lr    : return address
@@ -1751,14 +1427,12 @@
   // -----------------------------------
 
   // If object is not an array, bail out to regular call.
-  if (!object->IsJSArray() || cell != NULL) return heap()->undefined_value();
+  if (!object->IsJSArray() || !cell.is_null()) return Handle<Code>::null();
 
   Label miss;
-
-  GenerateNameCheck(Handle<String>(name), &miss);
+  GenerateNameCheck(name, &miss);
 
   Register receiver = r1;
-
   // Get the receiver from the stack
   const int argc = arguments().immediate();
   __ ldr(receiver, MemOperand(sp, argc * kPointerSize));
@@ -1767,8 +1441,8 @@
   __ JumpIfSmi(receiver, &miss);
 
   // Check that the maps haven't changed.
-  CheckPrototypes(JSObject::cast(object), receiver,
-                  holder, r3, r0, r4, name, &miss);
+  CheckPrototypes(Handle<JSObject>::cast(object), receiver, holder, r3, r0, r4,
+                  name, &miss);
 
   if (argc == 0) {
     // Nothing to do, just return the length.
@@ -1777,10 +1451,8 @@
     __ Ret();
   } else {
     Label call_builtin;
-
     Register elements = r3;
     Register end_elements = r5;
-
     // Get the elements array of the object.
     __ ldr(elements, FieldMemOperand(receiver, JSArray::kElementsOffset));
 
@@ -1921,19 +1593,19 @@
 
   // Handle call cache miss.
   __ bind(&miss);
-  MaybeObject* maybe_result = TryGenerateMissBranch();
-  if (maybe_result->IsFailure()) return maybe_result;
+  GenerateMissBranch();
 
   // Return the generated code.
-  return TryGetCode(function);
+  return GetCode(function);
 }
 
 
-MaybeObject* CallStubCompiler::CompileArrayPopCall(Object* object,
-                                                   JSObject* holder,
-                                                   JSGlobalPropertyCell* cell,
-                                                   JSFunction* function,
-                                                   String* name) {
+Handle<Code> CallStubCompiler::CompileArrayPopCall(
+    Handle<Object> object,
+    Handle<JSObject> holder,
+    Handle<JSGlobalPropertyCell> cell,
+    Handle<JSFunction> function,
+    Handle<String> name) {
   // ----------- S t a t e -------------
   //  -- r2    : name
   //  -- lr    : return address
@@ -1943,25 +1615,22 @@
   // -----------------------------------
 
   // If object is not an array, bail out to regular call.
-  if (!object->IsJSArray() || cell != NULL) return heap()->undefined_value();
+  if (!object->IsJSArray() || !cell.is_null()) return Handle<Code>::null();
 
   Label miss, return_undefined, call_builtin;
-
   Register receiver = r1;
   Register elements = r3;
-
-  GenerateNameCheck(Handle<String>(name), &miss);
+  GenerateNameCheck(name, &miss);
 
   // Get the receiver from the stack
   const int argc = arguments().immediate();
   __ ldr(receiver, MemOperand(sp, argc * kPointerSize));
-
   // Check that the receiver isn't a smi.
   __ JumpIfSmi(receiver, &miss);
 
   // Check that the maps haven't changed.
-  CheckPrototypes(JSObject::cast(object),
-                  receiver, holder, elements, r4, r0, name, &miss);
+  CheckPrototypes(Handle<JSObject>::cast(object), receiver, holder, elements,
+                  r4, r0, name, &miss);
 
   // Get the elements array of the object.
   __ ldr(elements, FieldMemOperand(receiver, JSArray::kElementsOffset));
@@ -2010,20 +1679,19 @@
 
   // Handle call cache miss.
   __ bind(&miss);
-  MaybeObject* maybe_result = TryGenerateMissBranch();
-  if (maybe_result->IsFailure()) return maybe_result;
+  GenerateMissBranch();
 
   // Return the generated code.
-  return TryGetCode(function);
+  return GetCode(function);
 }
 
 
-MaybeObject* CallStubCompiler::CompileStringCharCodeAtCall(
-    Object* object,
-    JSObject* holder,
-    JSGlobalPropertyCell* cell,
-    JSFunction* function,
-    String* name) {
+Handle<Code> CallStubCompiler::CompileStringCharCodeAtCall(
+    Handle<Object> object,
+    Handle<JSObject> holder,
+    Handle<JSGlobalPropertyCell> cell,
+    Handle<JSFunction> function,
+    Handle<String> name) {
   // ----------- S t a t e -------------
   //  -- r2                     : function name
   //  -- lr                     : return address
@@ -2033,10 +1701,9 @@
   // -----------------------------------
 
   // If object is not a string, bail out to regular call.
-  if (!object->IsString() || cell != NULL) return heap()->undefined_value();
+  if (!object->IsString() || !cell.is_null()) return Handle<Code>::null();
 
   const int argc = arguments().immediate();
-
   Label miss;
   Label name_miss;
   Label index_out_of_range;
@@ -2047,17 +1714,16 @@
        DEFAULT_STRING_STUB)) {
     index_out_of_range_label = &miss;
   }
-
-  GenerateNameCheck(Handle<String>(name), &name_miss);
+  GenerateNameCheck(name, &name_miss);
 
   // Check that the maps starting from the prototype haven't changed.
   GenerateDirectLoadGlobalFunctionPrototype(masm(),
                                             Context::STRING_FUNCTION_INDEX,
                                             r0,
                                             &miss);
-  ASSERT(object != holder);
-  CheckPrototypes(JSObject::cast(object->GetPrototype()), r0, holder,
-                  r1, r3, r4, name, &miss);
+  ASSERT(!object.is_identical_to(holder));
+  CheckPrototypes(Handle<JSObject>(JSObject::cast(object->GetPrototype())),
+                  r0, holder, r1, r3, r4, name, &miss);
 
   Register receiver = r1;
   Register index = r4;
@@ -2070,20 +1736,20 @@
     __ LoadRoot(index, Heap::kUndefinedValueRootIndex);
   }
 
-  StringCharCodeAtGenerator char_code_at_generator(receiver,
-                                                   index,
-                                                   scratch,
-                                                   result,
-                                                   &miss,  // When not a string.
-                                                   &miss,  // When not a number.
-                                                   index_out_of_range_label,
-                                                   STRING_INDEX_IS_NUMBER);
-  char_code_at_generator.GenerateFast(masm());
+  StringCharCodeAtGenerator generator(receiver,
+                                      index,
+                                      scratch,
+                                      result,
+                                      &miss,  // When not a string.
+                                      &miss,  // When not a number.
+                                      index_out_of_range_label,
+                                      STRING_INDEX_IS_NUMBER);
+  generator.GenerateFast(masm());
   __ Drop(argc + 1);
   __ Ret();
 
   StubRuntimeCallHelper call_helper;
-  char_code_at_generator.GenerateSlow(masm(), call_helper);
+  generator.GenerateSlow(masm(), call_helper);
 
   if (index_out_of_range.is_linked()) {
     __ bind(&index_out_of_range);
@@ -2094,22 +1760,21 @@
 
   __ bind(&miss);
   // Restore function name in r2.
-  __ Move(r2, Handle<String>(name));
+  __ Move(r2, name);
   __ bind(&name_miss);
-  MaybeObject* maybe_result = TryGenerateMissBranch();
-  if (maybe_result->IsFailure()) return maybe_result;
+  GenerateMissBranch();
 
   // Return the generated code.
-  return TryGetCode(function);
+  return GetCode(function);
 }
 
 
-MaybeObject* CallStubCompiler::CompileStringCharAtCall(
-    Object* object,
-    JSObject* holder,
-    JSGlobalPropertyCell* cell,
-    JSFunction* function,
-    String* name) {
+Handle<Code> CallStubCompiler::CompileStringCharAtCall(
+    Handle<Object> object,
+    Handle<JSObject> holder,
+    Handle<JSGlobalPropertyCell> cell,
+    Handle<JSFunction> function,
+    Handle<String> name) {
   // ----------- S t a t e -------------
   //  -- r2                     : function name
   //  -- lr                     : return address
@@ -2119,31 +1784,28 @@
   // -----------------------------------
 
   // If object is not a string, bail out to regular call.
-  if (!object->IsString() || cell != NULL) return heap()->undefined_value();
+  if (!object->IsString() || !cell.is_null()) return Handle<Code>::null();
 
   const int argc = arguments().immediate();
-
   Label miss;
   Label name_miss;
   Label index_out_of_range;
   Label* index_out_of_range_label = &index_out_of_range;
-
   if (kind_ == Code::CALL_IC &&
       (CallICBase::StringStubState::decode(extra_state_) ==
        DEFAULT_STRING_STUB)) {
     index_out_of_range_label = &miss;
   }
-
-  GenerateNameCheck(Handle<String>(name), &name_miss);
+  GenerateNameCheck(name, &name_miss);
 
   // Check that the maps starting from the prototype haven't changed.
   GenerateDirectLoadGlobalFunctionPrototype(masm(),
                                             Context::STRING_FUNCTION_INDEX,
                                             r0,
                                             &miss);
-  ASSERT(object != holder);
-  CheckPrototypes(JSObject::cast(object->GetPrototype()), r0, holder,
-                  r1, r3, r4, name, &miss);
+  ASSERT(!object.is_identical_to(holder));
+  CheckPrototypes(Handle<JSObject>(JSObject::cast(object->GetPrototype())),
+                  r0, holder, r1, r3, r4, name, &miss);
 
   Register receiver = r0;
   Register index = r4;
@@ -2157,21 +1819,21 @@
     __ LoadRoot(index, Heap::kUndefinedValueRootIndex);
   }
 
-  StringCharAtGenerator char_at_generator(receiver,
-                                          index,
-                                          scratch1,
-                                          scratch2,
-                                          result,
-                                          &miss,  // When not a string.
-                                          &miss,  // When not a number.
-                                          index_out_of_range_label,
-                                          STRING_INDEX_IS_NUMBER);
-  char_at_generator.GenerateFast(masm());
+  StringCharAtGenerator generator(receiver,
+                                  index,
+                                  scratch1,
+                                  scratch2,
+                                  result,
+                                  &miss,  // When not a string.
+                                  &miss,  // When not a number.
+                                  index_out_of_range_label,
+                                  STRING_INDEX_IS_NUMBER);
+  generator.GenerateFast(masm());
   __ Drop(argc + 1);
   __ Ret();
 
   StubRuntimeCallHelper call_helper;
-  char_at_generator.GenerateSlow(masm(), call_helper);
+  generator.GenerateSlow(masm(), call_helper);
 
   if (index_out_of_range.is_linked()) {
     __ bind(&index_out_of_range);
@@ -2182,22 +1844,21 @@
 
   __ bind(&miss);
   // Restore function name in r2.
-  __ Move(r2, Handle<String>(name));
+  __ Move(r2, name);
   __ bind(&name_miss);
-  MaybeObject* maybe_result = TryGenerateMissBranch();
-  if (maybe_result->IsFailure()) return maybe_result;
+  GenerateMissBranch();
 
   // Return the generated code.
-  return TryGetCode(function);
+  return GetCode(function);
 }
 
 
-MaybeObject* CallStubCompiler::CompileStringFromCharCodeCall(
-    Object* object,
-    JSObject* holder,
-    JSGlobalPropertyCell* cell,
-    JSFunction* function,
-    String* name) {
+Handle<Code> CallStubCompiler::CompileStringFromCharCodeCall(
+    Handle<Object> object,
+    Handle<JSObject> holder,
+    Handle<JSGlobalPropertyCell> cell,
+    Handle<JSFunction> function,
+    Handle<String> name) {
   // ----------- S t a t e -------------
   //  -- r2                     : function name
   //  -- lr                     : return address
@@ -2210,22 +1871,23 @@
 
   // If the object is not a JSObject or we got an unexpected number of
   // arguments, bail out to the regular call.
-  if (!object->IsJSObject() || argc != 1) return heap()->undefined_value();
+  if (!object->IsJSObject() || argc != 1) return Handle<Code>::null();
 
   Label miss;
-  GenerateNameCheck(Handle<String>(name), &miss);
+  GenerateNameCheck(name, &miss);
 
-  if (cell == NULL) {
+  if (cell.is_null()) {
     __ ldr(r1, MemOperand(sp, 1 * kPointerSize));
 
     STATIC_ASSERT(kSmiTag == 0);
     __ JumpIfSmi(r1, &miss);
 
-    CheckPrototypes(JSObject::cast(object), r1, holder, r0, r3, r4, name,
-                    &miss);
+    CheckPrototypes(Handle<JSObject>::cast(object), r1, holder, r0, r3, r4,
+                    name, &miss);
   } else {
-    ASSERT(cell->value() == function);
-    GenerateGlobalReceiverCheck(JSObject::cast(object), holder, name, &miss);
+    ASSERT(cell->value() == *function);
+    GenerateGlobalReceiverCheck(Handle<JSObject>::cast(object), holder, name,
+                                &miss);
     GenerateLoadFunctionFromCell(cell, function, &miss);
   }
 
@@ -2241,13 +1903,13 @@
   // Convert the smi code to uint16.
   __ and_(code, code, Operand(Smi::FromInt(0xffff)));
 
-  StringCharFromCodeGenerator char_from_code_generator(code, r0);
-  char_from_code_generator.GenerateFast(masm());
+  StringCharFromCodeGenerator generator(code, r0);
+  generator.GenerateFast(masm());
   __ Drop(argc + 1);
   __ Ret();
 
   StubRuntimeCallHelper call_helper;
-  char_from_code_generator.GenerateSlow(masm(), call_helper);
+  generator.GenerateSlow(masm(), call_helper);
 
   // Tail call the full function. We do not have to patch the receiver
   // because the function makes no use of it.
@@ -2256,19 +1918,19 @@
 
   __ bind(&miss);
   // r2: function name.
-  MaybeObject* maybe_result = TryGenerateMissBranch();
-  if (maybe_result->IsFailure()) return maybe_result;
+  GenerateMissBranch();
 
   // Return the generated code.
-  return (cell == NULL) ? TryGetCode(function) : TryGetCode(NORMAL, name);
+  return cell.is_null() ? GetCode(function) : GetCode(NORMAL, name);
 }
 
 
-MaybeObject* CallStubCompiler::CompileMathFloorCall(Object* object,
-                                                    JSObject* holder,
-                                                    JSGlobalPropertyCell* cell,
-                                                    JSFunction* function,
-                                                    String* name) {
+Handle<Code> CallStubCompiler::CompileMathFloorCall(
+    Handle<Object> object,
+    Handle<JSObject> holder,
+    Handle<JSGlobalPropertyCell> cell,
+    Handle<JSFunction> function,
+    Handle<String> name) {
   // ----------- S t a t e -------------
   //  -- r2                     : function name
   //  -- lr                     : return address
@@ -2278,31 +1940,28 @@
   // -----------------------------------
 
   if (!CpuFeatures::IsSupported(VFP3)) {
-      return heap()->undefined_value();
+    return Handle<Code>::null();
   }
 
   CpuFeatures::Scope scope_vfp3(VFP3);
-
   const int argc = arguments().immediate();
-
   // If the object is not a JSObject or we got an unexpected number of
   // arguments, bail out to the regular call.
-  if (!object->IsJSObject() || argc != 1) return heap()->undefined_value();
+  if (!object->IsJSObject() || argc != 1) return Handle<Code>::null();
 
   Label miss, slow;
-  GenerateNameCheck(Handle<String>(name), &miss);
+  GenerateNameCheck(name, &miss);
 
-  if (cell == NULL) {
+  if (cell.is_null()) {
     __ ldr(r1, MemOperand(sp, 1 * kPointerSize));
-
     STATIC_ASSERT(kSmiTag == 0);
     __ JumpIfSmi(r1, &miss);
-
-    CheckPrototypes(JSObject::cast(object), r1, holder, r0, r3, r4, name,
-                    &miss);
+    CheckPrototypes(Handle<JSObject>::cast(object), r1, holder, r0, r3, r4,
+                    name, &miss);
   } else {
-    ASSERT(cell->value() == function);
-    GenerateGlobalReceiverCheck(JSObject::cast(object), holder, name, &miss);
+    ASSERT(cell->value() == *function);
+    GenerateGlobalReceiverCheck(Handle<JSObject>::cast(object), holder, name,
+                                &miss);
     GenerateLoadFunctionFromCell(cell, function, &miss);
   }
 
@@ -2404,19 +2063,19 @@
 
   __ bind(&miss);
   // r2: function name.
-  MaybeObject* maybe_result = TryGenerateMissBranch();
-  if (maybe_result->IsFailure()) return maybe_result;
+  GenerateMissBranch();
 
   // Return the generated code.
-  return (cell == NULL) ? TryGetCode(function) : TryGetCode(NORMAL, name);
+  return cell.is_null() ? GetCode(function) : GetCode(NORMAL, name);
 }
 
 
-MaybeObject* CallStubCompiler::CompileMathAbsCall(Object* object,
-                                                  JSObject* holder,
-                                                  JSGlobalPropertyCell* cell,
-                                                  JSFunction* function,
-                                                  String* name) {
+Handle<Code> CallStubCompiler::CompileMathAbsCall(
+    Handle<Object> object,
+    Handle<JSObject> holder,
+    Handle<JSGlobalPropertyCell> cell,
+    Handle<JSFunction> function,
+    Handle<String> name) {
   // ----------- S t a t e -------------
   //  -- r2                     : function name
   //  -- lr                     : return address
@@ -2426,25 +2085,22 @@
   // -----------------------------------
 
   const int argc = arguments().immediate();
-
   // If the object is not a JSObject or we got an unexpected number of
   // arguments, bail out to the regular call.
-  if (!object->IsJSObject() || argc != 1) return heap()->undefined_value();
+  if (!object->IsJSObject() || argc != 1) return Handle<Code>::null();
 
   Label miss;
-  GenerateNameCheck(Handle<String>(name), &miss);
-
-  if (cell == NULL) {
+  GenerateNameCheck(name, &miss);
+  if (cell.is_null()) {
     __ ldr(r1, MemOperand(sp, 1 * kPointerSize));
-
     STATIC_ASSERT(kSmiTag == 0);
     __ JumpIfSmi(r1, &miss);
-
-    CheckPrototypes(JSObject::cast(object), r1, holder, r0, r3, r4, name,
-                    &miss);
+    CheckPrototypes(Handle<JSObject>::cast(object), r1, holder, r0, r3, r4,
+                    name, &miss);
   } else {
-    ASSERT(cell->value() == function);
-    GenerateGlobalReceiverCheck(JSObject::cast(object), holder, name, &miss);
+    ASSERT(cell->value() == *function);
+    GenerateGlobalReceiverCheck(Handle<JSObject>::cast(object), holder, name,
+                                &miss);
     GenerateLoadFunctionFromCell(cell, function, &miss);
   }
 
@@ -2505,36 +2161,34 @@
 
   __ bind(&miss);
   // r2: function name.
-  MaybeObject* maybe_result = TryGenerateMissBranch();
-  if (maybe_result->IsFailure()) return maybe_result;
+  GenerateMissBranch();
 
   // Return the generated code.
-  return (cell == NULL) ? TryGetCode(function) : TryGetCode(NORMAL, name);
+  return cell.is_null() ? GetCode(function) : GetCode(NORMAL, name);
 }
 
 
-MaybeObject* CallStubCompiler::CompileFastApiCall(
+Handle<Code> CallStubCompiler::CompileFastApiCall(
     const CallOptimization& optimization,
-    Object* object,
-    JSObject* holder,
-    JSGlobalPropertyCell* cell,
-    JSFunction* function,
-    String* name) {
+    Handle<Object> object,
+    Handle<JSObject> holder,
+    Handle<JSGlobalPropertyCell> cell,
+    Handle<JSFunction> function,
+    Handle<String> name) {
   Counters* counters = isolate()->counters();
 
   ASSERT(optimization.is_simple_api_call());
   // Bail out if object is a global object as we don't want to
   // repatch it to global receiver.
-  if (object->IsGlobalObject()) return heap()->undefined_value();
-  if (cell != NULL) return heap()->undefined_value();
-  if (!object->IsJSObject()) return heap()->undefined_value();
+  if (object->IsGlobalObject()) return Handle<Code>::null();
+  if (!cell.is_null()) return Handle<Code>::null();
+  if (!object->IsJSObject()) return Handle<Code>::null();
   int depth = optimization.GetPrototypeDepthOfExpectedType(
-            JSObject::cast(object), holder);
-  if (depth == kInvalidProtoDepth) return heap()->undefined_value();
+      Handle<JSObject>::cast(object), holder);
+  if (depth == kInvalidProtoDepth) return Handle<Code>::null();
 
   Label miss, miss_before_stack_reserved;
-
-  GenerateNameCheck(Handle<String>(name), &miss_before_stack_reserved);
+  GenerateNameCheck(name, &miss_before_stack_reserved);
 
   // Get the receiver from the stack.
   const int argc = arguments().immediate();
@@ -2549,45 +2203,41 @@
   ReserveSpaceForFastApiCall(masm(), r0);
 
   // Check that the maps haven't changed and find a Holder as a side effect.
-  CheckPrototypes(JSObject::cast(object), r1, holder, r0, r3, r4, name,
+  CheckPrototypes(Handle<JSObject>::cast(object), r1, holder, r0, r3, r4, name,
                   depth, &miss);
 
-  MaybeObject* result = GenerateFastApiDirectCall(masm(), optimization, argc);
-  if (result->IsFailure()) return result;
+  GenerateFastApiDirectCall(masm(), optimization, argc);
 
   __ bind(&miss);
   FreeSpaceForFastApiCall(masm());
 
   __ bind(&miss_before_stack_reserved);
-  MaybeObject* maybe_result = TryGenerateMissBranch();
-  if (maybe_result->IsFailure()) return maybe_result;
+  GenerateMissBranch();
 
   // Return the generated code.
-  return TryGetCode(function);
+  return GetCode(function);
 }
 
 
-MaybeObject* CallStubCompiler::CompileCallConstant(Object* object,
-                                                   JSObject* holder,
-                                                   JSFunction* function,
-                                                   String* name,
+Handle<Code> CallStubCompiler::CompileCallConstant(Handle<Object> object,
+                                                   Handle<JSObject> holder,
+                                                   Handle<JSFunction> function,
+                                                   Handle<String> name,
                                                    CheckType check) {
   // ----------- S t a t e -------------
   //  -- r2    : name
   //  -- lr    : return address
   // -----------------------------------
   if (HasCustomCallGenerator(function)) {
-    MaybeObject* maybe_result = CompileCustomCall(
-        object, holder, NULL, function, name);
-    Object* result;
-    if (!maybe_result->ToObject(&result)) return maybe_result;
-    // undefined means bail out to regular compiler.
-    if (!result->IsUndefined()) return result;
+    Handle<Code> code = CompileCustomCall(object, holder,
+                                          Handle<JSGlobalPropertyCell>::null(),
+                                          function, name);
+    // A null handle means bail out to the regular compiler code below.
+    if (!code.is_null()) return code;
   }
 
   Label miss;
-
-  GenerateNameCheck(Handle<String>(name), &miss);
+  GenerateNameCheck(name, &miss);
 
   // Get the receiver from the stack
   const int argc = arguments().immediate();
@@ -2601,16 +2251,14 @@
   // Make sure that it's okay not to patch the on stack receiver
   // unless we're doing a receiver map check.
   ASSERT(!object->IsGlobalObject() || check == RECEIVER_MAP_CHECK);
-
-  SharedFunctionInfo* function_info = function->shared();
   switch (check) {
     case RECEIVER_MAP_CHECK:
       __ IncrementCounter(masm()->isolate()->counters()->call_const(),
                           1, r0, r3);
 
       // Check that the maps haven't changed.
-      CheckPrototypes(JSObject::cast(object), r1, holder, r0, r3, r4, name,
-                      &miss);
+      CheckPrototypes(Handle<JSObject>::cast(object), r1, holder, r0, r3, r4,
+                      name, &miss);
 
       // Patch the receiver on the stack with the global proxy if
       // necessary.
@@ -2621,28 +2269,25 @@
       break;
 
     case STRING_CHECK:
-      if (!function->IsBuiltin() && !function_info->strict_mode()) {
-        // Calling non-strict non-builtins with a value as the receiver
-        // requires boxing.
-        __ jmp(&miss);
-      } else {
+      if (function->IsBuiltin() || function->shared()->strict_mode()) {
         // Check that the object is a two-byte string or a symbol.
         __ CompareObjectType(r1, r3, r3, FIRST_NONSTRING_TYPE);
         __ b(ge, &miss);
         // Check that the maps starting from the prototype haven't changed.
         GenerateDirectLoadGlobalFunctionPrototype(
             masm(), Context::STRING_FUNCTION_INDEX, r0, &miss);
-        CheckPrototypes(JSObject::cast(object->GetPrototype()), r0, holder, r3,
-                        r1, r4, name, &miss);
-      }
-      break;
-
-    case NUMBER_CHECK: {
-      if (!function->IsBuiltin() && !function_info->strict_mode()) {
+        CheckPrototypes(
+            Handle<JSObject>(JSObject::cast(object->GetPrototype())),
+            r0, holder, r3, r1, r4, name, &miss);
+      } else {
         // Calling non-strict non-builtins with a value as the receiver
         // requires boxing.
         __ jmp(&miss);
-      } else {
+      }
+      break;
+
+    case NUMBER_CHECK:
+      if (function->IsBuiltin() || function->shared()->strict_mode()) {
         Label fast;
         // Check that the object is a smi or a heap number.
         __ JumpIfSmi(r1, &fast);
@@ -2652,18 +2297,18 @@
         // Check that the maps starting from the prototype haven't changed.
         GenerateDirectLoadGlobalFunctionPrototype(
             masm(), Context::NUMBER_FUNCTION_INDEX, r0, &miss);
-        CheckPrototypes(JSObject::cast(object->GetPrototype()), r0, holder, r3,
-                        r1, r4, name, &miss);
-      }
-      break;
-    }
-
-    case BOOLEAN_CHECK: {
-      if (!function->IsBuiltin() && !function_info->strict_mode()) {
+        CheckPrototypes(
+            Handle<JSObject>(JSObject::cast(object->GetPrototype())),
+            r0, holder, r3, r1, r4, name, &miss);
+      } else {
         // Calling non-strict non-builtins with a value as the receiver
         // requires boxing.
         __ jmp(&miss);
-      } else {
+      }
+      break;
+
+    case BOOLEAN_CHECK:
+      if (function->IsBuiltin() || function->shared()->strict_mode()) {
         Label fast;
         // Check that the object is a boolean.
         __ LoadRoot(ip, Heap::kTrueValueRootIndex);
@@ -2676,14 +2321,15 @@
         // Check that the maps starting from the prototype haven't changed.
         GenerateDirectLoadGlobalFunctionPrototype(
             masm(), Context::BOOLEAN_FUNCTION_INDEX, r0, &miss);
-        CheckPrototypes(JSObject::cast(object->GetPrototype()), r0, holder, r3,
-                        r1, r4, name, &miss);
+        CheckPrototypes(
+            Handle<JSObject>(JSObject::cast(object->GetPrototype())),
+            r0, holder, r3, r1, r4, name, &miss);
+      } else {
+        // Calling non-strict non-builtins with a value as the receiver
+        // requires boxing.
+        __ jmp(&miss);
       }
       break;
-    }
-
-    default:
-      UNREACHABLE();
   }
 
   CallKind call_kind = CallICBase::Contextual::decode(extra_state_)
@@ -2693,29 +2339,25 @@
 
   // Handle call cache miss.
   __ bind(&miss);
-  MaybeObject* maybe_result = TryGenerateMissBranch();
-  if (maybe_result->IsFailure()) return maybe_result;
+  GenerateMissBranch();
 
   // Return the generated code.
-  return TryGetCode(function);
+  return GetCode(function);
 }
 
 
-MaybeObject* CallStubCompiler::CompileCallInterceptor(JSObject* object,
-                                                      JSObject* holder,
-                                                      String* name) {
+Handle<Code> CallStubCompiler::CompileCallInterceptor(Handle<JSObject> object,
+                                                      Handle<JSObject> holder,
+                                                      Handle<String> name) {
   // ----------- S t a t e -------------
   //  -- r2    : name
   //  -- lr    : return address
   // -----------------------------------
-
   Label miss;
-
-  GenerateNameCheck(Handle<String>(name), &miss);
+  GenerateNameCheck(name, &miss);
 
   // Get the number of arguments.
   const int argc = arguments().immediate();
-
   LookupResult lookup(isolate());
   LookupPostInterceptor(holder, name, &lookup);
 
@@ -2723,66 +2365,47 @@
   __ ldr(r1, MemOperand(sp, argc * kPointerSize));
 
   CallInterceptorCompiler compiler(this, arguments(), r2, extra_state_);
-  MaybeObject* result = compiler.Compile(masm(),
-                                         object,
-                                         holder,
-                                         name,
-                                         &lookup,
-                                         r1,
-                                         r3,
-                                         r4,
-                                         r0,
-                                         &miss);
-  if (result->IsFailure()) {
-      return result;
-  }
+  compiler.Compile(masm(), object, holder, name, &lookup, r1, r3, r4, r0,
+                   &miss);
 
   // Move returned value, the function to call, to r1.
   __ mov(r1, r0);
   // Restore receiver.
   __ ldr(r0, MemOperand(sp, argc * kPointerSize));
 
-  GenerateCallFunction(masm(), Handle<Object>(object), arguments(), &miss,
-                       extra_state_);
+  GenerateCallFunction(masm(), object, arguments(), &miss, extra_state_);
 
   // Handle call cache miss.
   __ bind(&miss);
-  MaybeObject* maybe_result = TryGenerateMissBranch();
-  if (maybe_result->IsFailure()) return maybe_result;
+  GenerateMissBranch();
 
   // Return the generated code.
-  return TryGetCode(INTERCEPTOR, name);
+  return GetCode(INTERCEPTOR, name);
 }
 
 
-MaybeObject* CallStubCompiler::CompileCallGlobal(JSObject* object,
-                                                 GlobalObject* holder,
-                                                 JSGlobalPropertyCell* cell,
-                                                 JSFunction* function,
-                                                 String* name) {
+Handle<Code> CallStubCompiler::CompileCallGlobal(
+    Handle<JSObject> object,
+    Handle<GlobalObject> holder,
+    Handle<JSGlobalPropertyCell> cell,
+    Handle<JSFunction> function,
+    Handle<String> name) {
   // ----------- S t a t e -------------
   //  -- r2    : name
   //  -- lr    : return address
   // -----------------------------------
-
   if (HasCustomCallGenerator(function)) {
-    MaybeObject* maybe_result = CompileCustomCall(
-        object, holder, cell, function, name);
-    Object* result;
-    if (!maybe_result->ToObject(&result)) return maybe_result;
-    // undefined means bail out to regular compiler.
-    if (!result->IsUndefined()) return result;
+    Handle<Code> code = CompileCustomCall(object, holder, cell, function, name);
+    // A null handle means bail out to the regular compiler code below.
+    if (!code.is_null()) return code;
   }
 
   Label miss;
-
-  GenerateNameCheck(Handle<String>(name), &miss);
+  GenerateNameCheck(name, &miss);
 
   // Get the number of arguments.
   const int argc = arguments().immediate();
-
   GenerateGlobalReceiverCheck(object, holder, name, &miss);
-
   GenerateLoadFunctionFromCell(cell, function, &miss);
 
   // Patch the receiver on the stack with the global proxy if
@@ -2798,7 +2421,6 @@
   // Jump to the cached code (tail call).
   Counters* counters = masm()->isolate()->counters();
   __ IncrementCounter(counters->call_global_inline(), 1, r3, r4);
-  Handle<Code> code(function->code());
   ParameterCount expected(function->shared()->formal_parameter_count());
   CallKind call_kind = CallICBase::Contextual::decode(extra_state_)
       ? CALL_AS_FUNCTION
@@ -2813,11 +2435,10 @@
   // Handle call cache miss.
   __ bind(&miss);
   __ IncrementCounter(counters->call_global_inline_miss(), 1, r1, r3);
-  MaybeObject* maybe_result = TryGenerateMissBranch();
-  if (maybe_result->IsFailure()) return maybe_result;
+  GenerateMissBranch();
 
   // Return the generated code.
-  return TryGetCode(NORMAL, name);
+  return GetCode(NORMAL, name);
 }
 
 
@@ -3050,29 +2671,24 @@
 }
 
 
-MaybeObject* LoadStubCompiler::CompileLoadCallback(String* name,
-                                                   JSObject* object,
-                                                   JSObject* holder,
-                                                   AccessorInfo* callback) {
+Handle<Code> LoadStubCompiler::CompileLoadCallback(
+    Handle<String> name,
+    Handle<JSObject> object,
+    Handle<JSObject> holder,
+    Handle<AccessorInfo> callback) {
   // ----------- S t a t e -------------
   //  -- r0    : receiver
   //  -- r2    : name
   //  -- lr    : return address
   // -----------------------------------
   Label miss;
-
-  MaybeObject* result = GenerateLoadCallback(object, holder, r0, r2, r3, r1, r4,
-                                             callback, name, &miss);
-  if (result->IsFailure()) {
-    miss.Unuse();
-    return result;
-  }
-
+  GenerateLoadCallback(object, holder, r0, r2, r3, r1, r4, callback, name,
+                       &miss);
   __ bind(&miss);
   GenerateLoadMiss(masm(), Code::LOAD_IC);
 
   // Return the generated code.
-  return TryGetCode(CALLBACKS, name);
+  return GetCode(CALLBACKS, name);
 }
 
 
@@ -3096,9 +2712,9 @@
 }
 
 
-MaybeObject* LoadStubCompiler::CompileLoadInterceptor(JSObject* object,
-                                                      JSObject* holder,
-                                                      String* name) {
+Handle<Code> LoadStubCompiler::CompileLoadInterceptor(Handle<JSObject> object,
+                                                      Handle<JSObject> holder,
+                                                      Handle<String> name) {
   // ----------- S t a t e -------------
   //  -- r0    : receiver
   //  -- r2    : name
@@ -3108,21 +2724,13 @@
 
   LookupResult lookup(isolate());
   LookupPostInterceptor(holder, name, &lookup);
-  GenerateLoadInterceptor(object,
-                          holder,
-                          &lookup,
-                          r0,
-                          r2,
-                          r3,
-                          r1,
-                          r4,
-                          name,
+  GenerateLoadInterceptor(object, holder, &lookup, r0, r2, r3, r1, r4, name,
                           &miss);
   __ bind(&miss);
   GenerateLoadMiss(masm(), Code::LOAD_IC);
 
   // Return the generated code.
-  return TryGetCode(INTERCEPTOR, name);
+  return GetCode(INTERCEPTOR, name);
 }
 
 
@@ -3197,11 +2805,11 @@
 }
 
 
-MaybeObject* KeyedLoadStubCompiler::CompileLoadCallback(
-    String* name,
-    JSObject* receiver,
-    JSObject* holder,
-    AccessorInfo* callback) {
+Handle<Code> KeyedLoadStubCompiler::CompileLoadCallback(
+    Handle<String> name,
+    Handle<JSObject> receiver,
+    Handle<JSObject> holder,
+    Handle<AccessorInfo> callback) {
   // ----------- S t a t e -------------
   //  -- lr    : return address
   //  -- r0    : key
@@ -3210,20 +2818,15 @@
   Label miss;
 
   // Check the key is the cached one.
-  __ cmp(r0, Operand(Handle<String>(name)));
+  __ cmp(r0, Operand(name));
   __ b(ne, &miss);
 
-  MaybeObject* result = GenerateLoadCallback(receiver, holder, r1, r0, r2, r3,
-                                             r4, callback, name, &miss);
-  if (result->IsFailure()) {
-    miss.Unuse();
-    return result;
-  }
-
+  GenerateLoadCallback(receiver, holder, r1, r0, r2, r3, r4, callback, name,
+                       &miss);
   __ bind(&miss);
   GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
 
-  return TryGetCode(CALLBACKS, name);
+  return GetCode(CALLBACKS, name);
 }
 
 
@@ -3252,9 +2855,10 @@
 }
 
 
-MaybeObject* KeyedLoadStubCompiler::CompileLoadInterceptor(JSObject* receiver,
-                                                           JSObject* holder,
-                                                           String* name) {
+Handle<Code> KeyedLoadStubCompiler::CompileLoadInterceptor(
+    Handle<JSObject> receiver,
+    Handle<JSObject> holder,
+    Handle<String> name) {
   // ----------- S t a t e -------------
   //  -- lr    : return address
   //  -- r0    : key
@@ -3263,25 +2867,17 @@
   Label miss;
 
   // Check the key is the cached one.
-  __ cmp(r0, Operand(Handle<String>(name)));
+  __ cmp(r0, Operand(name));
   __ b(ne, &miss);
 
   LookupResult lookup(isolate());
   LookupPostInterceptor(holder, name, &lookup);
-  GenerateLoadInterceptor(receiver,
-                          holder,
-                          &lookup,
-                          r1,
-                          r0,
-                          r2,
-                          r3,
-                          r4,
-                          name,
+  GenerateLoadInterceptor(receiver, holder, &lookup, r1, r0, r2, r3, r4, name,
                           &miss);
   __ bind(&miss);
   GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
 
-  return TryGetCode(INTERCEPTOR, name);
+  return GetCode(INTERCEPTOR, name);
 }
 
 
@@ -3501,7 +3097,8 @@
 }
 
 
-MaybeObject* ConstructStubCompiler::CompileConstructStub(JSFunction* function) {
+Handle<Code> ConstructStubCompiler::CompileConstructStub(
+    Handle<JSFunction> function) {
   // ----------- S t a t e -------------
   //  -- r0    : argc
   //  -- r1    : constructor
@@ -3547,12 +3144,7 @@
   // r2: initial map
   // r7: undefined
   __ ldrb(r3, FieldMemOperand(r2, Map::kInstanceSizeOffset));
-  __ AllocateInNewSpace(r3,
-                        r4,
-                        r5,
-                        r6,
-                        &generic_stub_call,
-                        SIZE_IN_WORDS);
+  __ AllocateInNewSpace(r3, r4, r5, r6, &generic_stub_call, SIZE_IN_WORDS);
 
   // Allocated the JSObject, now initialize the fields. Map is set to initial
   // map and properties and elements are set to empty fixed array.
@@ -3584,7 +3176,7 @@
   // r7: undefined
   // Fill the initialized properties with a constant value or a passed argument
   // depending on the this.x = ...; assignment in the function.
-  SharedFunctionInfo* shared = function->shared();
+  Handle<SharedFunctionInfo> shared(function->shared());
   for (int i = 0; i < shared->this_property_assignments_count(); i++) {
     if (shared->IsThisPropertyAssignmentArgument(i)) {
       Label not_passed, next;
@@ -4043,9 +3635,9 @@
   __ TailCallRuntime(Runtime::kKeyedGetProperty, 2, 1);
 
   __ bind(&miss_force_generic);
-  Code* stub = masm->isolate()->builtins()->builtin(
-      Builtins::kKeyedLoadIC_MissForceGeneric);
-  __ Jump(Handle<Code>(stub), RelocInfo::CODE_TARGET);
+  Handle<Code> stub =
+      masm->isolate()->builtins()->KeyedLoadIC_MissForceGeneric();
+  __ Jump(stub, RelocInfo::CODE_TARGET);
 }
 
 
@@ -4419,9 +4011,9 @@
   __ Ret();
 
   __ bind(&miss_force_generic);
-  Code* stub = masm->isolate()->builtins()->builtin(
-      Builtins::kKeyedLoadIC_MissForceGeneric);
-  __ Jump(Handle<Code>(stub), RelocInfo::CODE_TARGET);
+  Handle<Code> stub =
+      masm->isolate()->builtins()->KeyedLoadIC_MissForceGeneric();
+  __ Jump(stub, RelocInfo::CODE_TARGET);
 }
 
 
diff --git a/src/ast.cc b/src/ast.cc
index 9e34bc0..d5282d2 100644
--- a/src/ast.cc
+++ b/src/ast.cc
@@ -48,16 +48,19 @@
 // ----------------------------------------------------------------------------
 // Implementation of other node functionality.
 
-Assignment* ExpressionStatement::StatementAsSimpleAssignment() {
-  return (expression()->AsAssignment() != NULL &&
-          !expression()->AsAssignment()->is_compound())
-      ? expression()->AsAssignment()
-      : NULL;
+
+bool Expression::IsSmiLiteral() {
+  return AsLiteral() != NULL && AsLiteral()->handle()->IsSmi();
 }
 
 
-CountOperation* ExpressionStatement::StatementAsCountOperation() {
-  return expression()->AsCountOperation();
+bool Expression::IsStringLiteral() {
+  return AsLiteral() != NULL && AsLiteral()->handle()->IsString();
+}
+
+
+bool Expression::IsNullLiteral() {
+  return AsLiteral() != NULL && AsLiteral()->handle()->IsNull();
 }
 
 
diff --git a/src/ast.h b/src/ast.h
index 3de00ef..295257a 100644
--- a/src/ast.h
+++ b/src/ast.h
@@ -118,7 +118,6 @@
 #define DECLARE_NODE_TYPE(type)                                         \
   virtual void Accept(AstVisitor* v);                                   \
   virtual AstNode::Type node_type() const { return AstNode::k##type; }  \
-  virtual type* As##type() { return this; }
 
 
 class AstNode: public ZoneObject {
@@ -153,7 +152,8 @@
 
   // Type testing & conversion functions overridden by concrete subclasses.
 #define DECLARE_NODE_FUNCTIONS(type)                  \
-  virtual type* As##type() { return NULL; }
+  bool Is##type() { return node_type() == AstNode::k##type; }          \
+  type* As##type() { return Is##type() ? reinterpret_cast<type*>(this) : NULL; }
   AST_NODE_LIST(DECLARE_NODE_FUNCTIONS)
 #undef DECLARE_NODE_FUNCTIONS
 
@@ -196,9 +196,6 @@
 
   virtual Statement* AsStatement()  { return this; }
 
-  virtual Assignment* StatementAsSimpleAssignment() { return NULL; }
-  virtual CountOperation* StatementAsCountOperation() { return NULL; }
-
   bool IsEmpty() { return AsEmptyStatement() != NULL; }
 
   void set_statement_pos(int statement_pos) { statement_pos_ = statement_pos; }
@@ -264,7 +261,6 @@
 
   virtual Expression* AsExpression()  { return this; }
 
-  virtual bool IsTrivial() { return false; }
   virtual bool IsValidLeftHandSide() { return false; }
 
   // Helpers for ToBoolean conversion.
@@ -276,33 +272,24 @@
   // names because [] for string objects is handled only by keyed ICs.
   virtual bool IsPropertyName() { return false; }
 
-  // Mark the expression as being compiled as an expression
-  // statement. This is used to transform postfix increments to
-  // (faster) prefix increments.
-  virtual void MarkAsStatement() { /* do nothing */ }
-
   // True iff the result can be safely overwritten (to avoid allocation).
   // False for operations that can return one of their operands.
   virtual bool ResultOverwriteAllowed() { return false; }
 
   // True iff the expression is a literal represented as a smi.
-  virtual bool IsSmiLiteral() { return false; }
+  bool IsSmiLiteral();
 
   // True iff the expression is a string literal.
-  virtual bool IsStringLiteral() { return false; }
+  bool IsStringLiteral();
 
   // True iff the expression is the null literal.
-  virtual bool IsNullLiteral() { return false; }
+  bool IsNullLiteral();
 
   // Type feedback information for assignments and properties.
   virtual bool IsMonomorphic() {
     UNREACHABLE();
     return false;
   }
-  virtual bool IsArrayLength() {
-    UNREACHABLE();
-    return false;
-  }
   virtual SmallMapList* GetReceiverTypes() {
     UNREACHABLE();
     return NULL;
@@ -368,16 +355,6 @@
 
   DECLARE_NODE_TYPE(Block)
 
-  virtual Assignment* StatementAsSimpleAssignment() {
-    if (statements_.length() != 1) return NULL;
-    return statements_[0]->StatementAsSimpleAssignment();
-  }
-
-  virtual CountOperation* StatementAsCountOperation() {
-    if (statements_.length() != 1) return NULL;
-    return statements_[0]->StatementAsCountOperation();
-  }
-
   virtual bool IsInlineable() const;
 
   void AddStatement(Statement* statement) { statements_.Add(statement); }
@@ -612,9 +589,6 @@
 
   virtual bool IsInlineable() const;
 
-  virtual Assignment* StatementAsSimpleAssignment();
-  virtual CountOperation* StatementAsCountOperation();
-
   void set_expression(Expression* e) { expression_ = e; }
   Expression* expression() const { return expression_; }
 
@@ -895,11 +869,6 @@
 
   DECLARE_NODE_TYPE(Literal)
 
-  virtual bool IsTrivial() { return true; }
-  virtual bool IsSmiLiteral() { return handle_->IsSmi(); }
-  virtual bool IsStringLiteral() { return handle_->IsString(); }
-  virtual bool IsNullLiteral() { return handle_->IsNull(); }
-
   // Check if this literal is identical to the other literal.
   bool IsIdenticalTo(const Literal* other) const {
     return handle_.is_identical_to(other->handle_);
@@ -1114,12 +1083,6 @@
     return var_ == NULL ? true : var_->IsValidLeftHandSide();
   }
 
-  virtual bool IsTrivial() {
-    // Reading from a mutable variable is a side effect, but the
-    // variable for 'this' is immutable.
-    return is_this_ || is_trivial_;
-  }
-
   virtual bool IsInlineable() const;
 
   bool IsVariable(Handle<String> n) {
@@ -1187,7 +1150,7 @@
   void RecordTypeFeedback(TypeFeedbackOracle* oracle);
   virtual bool IsMonomorphic() { return is_monomorphic_; }
   virtual SmallMapList* GetReceiverTypes() { return &receiver_types_; }
-  virtual bool IsArrayLength() { return is_array_length_; }
+  bool IsArrayLength() { return is_array_length_; }
 
  private:
   Expression* obj_;
@@ -1325,8 +1288,17 @@
                  Token::Value op,
                  Expression* expression,
                  int pos)
-      : Expression(isolate), op_(op), expression_(expression), pos_(pos) {
+      : Expression(isolate),
+        op_(op),
+        expression_(expression),
+        pos_(pos),
+        materialize_true_id_(AstNode::kNoNumber),
+        materialize_false_id_(AstNode::kNoNumber) {
     ASSERT(Token::IsUnaryOp(op));
+    if (op == Token::NOT) {
+      materialize_true_id_ = GetNextId(isolate);
+      materialize_false_id_ = GetNextId(isolate);
+    }
   }
 
   DECLARE_NODE_TYPE(UnaryOperation)
@@ -1339,10 +1311,18 @@
   Expression* expression() const { return expression_; }
   virtual int position() const { return pos_; }
 
+  int MaterializeTrueId() { return materialize_true_id_; }
+  int MaterializeFalseId() { return materialize_false_id_; }
+
  private:
   Token::Value op_;
   Expression* expression_;
   int pos_;
+
+  // For unary not (Token::NOT), the AST ids where true and false will
+  // actually be materialized, respectively.
+  int materialize_true_id_;
+  int materialize_false_id_;
 };
 
 
diff --git a/src/atomicops_internals_mips_gcc.h b/src/atomicops_internals_mips_gcc.h
index 5113de2..9498fd7 100644
--- a/src/atomicops_internals_mips_gcc.h
+++ b/src/atomicops_internals_mips_gcc.h
@@ -30,7 +30,7 @@
 #ifndef V8_ATOMICOPS_INTERNALS_MIPS_GCC_H_
 #define V8_ATOMICOPS_INTERNALS_MIPS_GCC_H_
 
-#define ATOMICOPS_COMPILER_BARRIER() __asm__ __volatile__("sync" : : : "memory")
+#define ATOMICOPS_COMPILER_BARRIER() __asm__ __volatile__("" : : : "memory")
 
 namespace v8 {
 namespace internal {
@@ -48,16 +48,19 @@
 inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
                                          Atomic32 old_value,
                                          Atomic32 new_value) {
-  Atomic32 prev;
-  __asm__ __volatile__("1:\n"
-                       "ll %0, %1\n"  // prev = *ptr
+  Atomic32 prev, tmp;
+  __asm__ __volatile__(".set push\n"
+                       ".set noreorder\n"
+                       "1:\n"
+                       "ll %0, %5\n"  // prev = *ptr
                        "bne %0, %3, 2f\n"  // if (prev != old_value) goto 2
-                       "nop\n"  // delay slot nop
-                       "sc %2, %1\n"  // *ptr = new_value (with atomic check)
+                       "move %2, %4\n"  // tmp = new_value
+                       "sc %2, %1\n"  // *ptr = tmp (with atomic check)
                        "beqz %2, 1b\n"  // start again on atomic error
                        "nop\n"  // delay slot nop
                        "2:\n"
-                       : "=&r" (prev), "=m" (*ptr), "+&r" (new_value)
+                       ".set pop\n"
+                       : "=&r" (prev), "=m" (*ptr), "=&r" (tmp)
                        : "Ir" (old_value), "r" (new_value), "m" (*ptr)
                        : "memory");
   return prev;
@@ -68,12 +71,15 @@
 inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr,
                                          Atomic32 new_value) {
   Atomic32 temp, old;
-  __asm__ __volatile__("1:\n"
+  __asm__ __volatile__(".set push\n"
+                       ".set noreorder\n"
+                       "1:\n"
                        "ll %1, %2\n"  // old = *ptr
                        "move %0, %3\n"  // temp = new_value
                        "sc %0, %2\n"  // *ptr = temp (with atomic check)
                        "beqz %0, 1b\n"  // start again on atomic error
                        "nop\n"  // delay slot nop
+                       ".set pop\n"
                        : "=&r" (temp), "=&r" (old), "=m" (*ptr)
                        : "r" (new_value), "m" (*ptr)
                        : "memory");
@@ -87,13 +93,15 @@
                                           Atomic32 increment) {
   Atomic32 temp, temp2;
 
-  __asm__ __volatile__("1:\n"
+  __asm__ __volatile__(".set push\n"
+                       ".set noreorder\n"
+                       "1:\n"
                        "ll %0, %2\n"  // temp = *ptr
-                       "addu %0, %3\n"  // temp = temp + increment
-                       "move %1, %0\n"  // temp2 = temp
-                       "sc %0, %2\n"  // *ptr = temp (with atomic check)
-                       "beqz %0, 1b\n"  // start again on atomic error
-                       "nop\n"  // delay slot nop
+                       "addu %1, %0, %3\n"  // temp2 = temp + increment
+                       "sc %1, %2\n"  // *ptr = temp2 (with atomic check)
+                       "beqz %1, 1b\n"  // start again on atomic error
+                       "addu %1, %0, %3\n"  // temp2 = temp + increment
+                       ".set pop\n"
                        : "=&r" (temp), "=&r" (temp2), "=m" (*ptr)
                        : "Ir" (increment), "m" (*ptr)
                        : "memory");
@@ -103,6 +111,7 @@
 
 inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
                                         Atomic32 increment) {
+  ATOMICOPS_COMPILER_BARRIER();
   Atomic32 res = NoBarrier_AtomicIncrement(ptr, increment);
   ATOMICOPS_COMPILER_BARRIER();
   return res;
@@ -117,16 +126,19 @@
 inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
                                        Atomic32 old_value,
                                        Atomic32 new_value) {
-  Atomic32 x = NoBarrier_CompareAndSwap(ptr, old_value, new_value);
   ATOMICOPS_COMPILER_BARRIER();
-  return x;
+  Atomic32 res = NoBarrier_CompareAndSwap(ptr, old_value, new_value);
+  ATOMICOPS_COMPILER_BARRIER();
+  return res;
 }
 
 inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
                                        Atomic32 old_value,
                                        Atomic32 new_value) {
   ATOMICOPS_COMPILER_BARRIER();
-  return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
+  Atomic32 res = NoBarrier_CompareAndSwap(ptr, old_value, new_value);
+  ATOMICOPS_COMPILER_BARRIER();
+  return res;
 }
 
 inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
@@ -134,7 +146,7 @@
 }
 
 inline void MemoryBarrier() {
-  ATOMICOPS_COMPILER_BARRIER();
+  __asm__ __volatile__("sync" : : : "memory");
 }
 
 inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {
diff --git a/src/builtins.cc b/src/builtins.cc
index e758b9a..f252911 100644
--- a/src/builtins.cc
+++ b/src/builtins.cc
@@ -772,6 +772,13 @@
 
   // Set the length.
   result_array->set_length(Smi::FromInt(result_len));
+
+  // Set the ElementsKind.
+  ElementsKind elements_kind = JSObject::cast(receiver)->GetElementsKind();
+  if (result_array->GetElementsKind() != elements_kind) {
+    MaybeObject* maybe = result_array->TransitionElementsKind(elements_kind);
+    if (maybe->IsFailure()) return maybe;
+  }
   return result_array;
 }
 
@@ -865,6 +872,13 @@
 
     // Set the length.
     result_array->set_length(Smi::FromInt(actual_delete_count));
+
+    // Set the ElementsKind.
+    ElementsKind elements_kind = array->GetElementsKind();
+    if (result_array->GetElementsKind() != elements_kind) {
+      MaybeObject* maybe = result_array->TransitionElementsKind(elements_kind);
+      if (maybe->IsFailure()) return maybe;
+    }
   }
 
   int item_count = (n_arguments > 1) ? (n_arguments - 2) : 0;
diff --git a/src/code-stubs.h b/src/code-stubs.h
index fc7000b..d49ff1d 100644
--- a/src/code-stubs.h
+++ b/src/code-stubs.h
@@ -58,10 +58,8 @@
   V(FastNewContext)                      \
   V(FastNewBlockContext)                 \
   V(FastCloneShallowArray)               \
-  V(RevertToNumber)                      \
   V(ToBoolean)                           \
   V(ToNumber)                            \
-  V(CounterOp)                           \
   V(ArgumentsAccess)                     \
   V(RegExpConstructResult)               \
   V(NumberToString)                      \
@@ -71,7 +69,8 @@
   V(KeyedStoreElement)                   \
   V(DebuggerStatement)                   \
   V(StringDictionaryLookup)              \
-  V(ElementsTransitionAndStore)
+  V(ElementsTransitionAndStore)          \
+  V(StoreArrayLiteralElement)
 
 // List of code stubs only used on ARM platforms.
 #ifdef V8_TARGET_ARCH_ARM
@@ -1064,6 +1063,20 @@
   DISALLOW_COPY_AND_ASSIGN(ElementsTransitionAndStoreStub);
 };
 
+
+class StoreArrayLiteralElementStub : public CodeStub {
+ public:
+  explicit StoreArrayLiteralElementStub() {}
+
+ private:
+  Major MajorKey() { return StoreArrayLiteralElement; }
+  int MinorKey() { return 0; }
+
+  void Generate(MacroAssembler* masm);
+
+  DISALLOW_COPY_AND_ASSIGN(StoreArrayLiteralElementStub);
+};
+
 } }  // namespace v8::internal
 
 #endif  // V8_CODE_STUBS_H_
diff --git a/src/collection.js b/src/collection.js
index 4e45885..12ff893 100644
--- a/src/collection.js
+++ b/src/collection.js
@@ -32,6 +32,11 @@
 
 //-------------------------------------------------------------------
 
+// Global sentinel to be used instead of undefined keys, which are not
+// supported internally but required for Harmony sets and maps.
+var undefined_sentinel = {};
+
+
 function SetConstructor() {
   if (%_IsConstructCall()) {
     %SetInitialize(this);
@@ -42,16 +47,25 @@
 
 
 function SetAdd(key) {
+  if (IS_UNDEFINED(key)) {
+    key = undefined_sentinel;
+  }
   return %SetAdd(this, key);
 }
 
 
 function SetHas(key) {
+  if (IS_UNDEFINED(key)) {
+    key = undefined_sentinel;
+  }
   return %SetHas(this, key);
 }
 
 
 function SetDelete(key) {
+  if (IS_UNDEFINED(key)) {
+    key = undefined_sentinel;
+  }
   return %SetDelete(this, key);
 }
 
@@ -66,21 +80,33 @@
 
 
 function MapGet(key) {
+  if (IS_UNDEFINED(key)) {
+    key = undefined_sentinel;
+  }
   return %MapGet(this, key);
 }
 
 
 function MapSet(key, value) {
+  if (IS_UNDEFINED(key)) {
+    key = undefined_sentinel;
+  }
   return %MapSet(this, key, value);
 }
 
 
 function MapHas(key) {
+  if (IS_UNDEFINED(key)) {
+    key = undefined_sentinel;
+  }
   return !IS_UNDEFINED(%MapGet(this, key));
 }
 
 
 function MapDelete(key) {
+  if (IS_UNDEFINED(key)) {
+    key = undefined_sentinel;
+  }
   if (!IS_UNDEFINED(%MapGet(this, key))) {
     %MapSet(this, key, void 0);
     return true;
diff --git a/src/compiler.cc b/src/compiler.cc
index 88db467..4a5f399 100644
--- a/src/compiler.cc
+++ b/src/compiler.cc
@@ -326,8 +326,7 @@
   // the compilation info is set if compilation succeeded.
   bool succeeded = MakeCode(info);
   if (!info->shared_info().is_null()) {
-    Handle<SerializedScopeInfo> scope_info =
-        SerializedScopeInfo::Create(info->scope());
+    Handle<ScopeInfo> scope_info = ScopeInfo::Create(info->scope());
     info->shared_info()->set_scope_info(*scope_info);
   }
   return succeeded;
@@ -395,7 +394,7 @@
           lit->name(),
           lit->materialized_literal_count(),
           info->code(),
-          SerializedScopeInfo::Create(info->scope()));
+          ScopeInfo::Create(info->scope()));
 
   ASSERT_EQ(RelocInfo::kNoPosition, lit->function_token_position());
   Compiler::SetFunctionInfo(result, lit, true, script);
@@ -623,7 +622,7 @@
       RecordFunctionCompilation(Logger::LAZY_COMPILE_TAG, info, shared);
 
       if (info->IsOptimizing()) {
-        ASSERT(shared->scope_info() != SerializedScopeInfo::Empty());
+        ASSERT(shared->scope_info() != ScopeInfo::Empty());
         function->ReplaceCode(*code);
       } else {
         // Update the shared function info with the compiled code and the
@@ -631,8 +630,7 @@
         // info initialization is important since set_scope_info might
         // trigger a GC, causing the ASSERT below to be invalid if the code
         // was flushed. By settting the code object last we avoid this.
-        Handle<SerializedScopeInfo> scope_info =
-            SerializedScopeInfo::Create(info->scope());
+        Handle<ScopeInfo> scope_info = ScopeInfo::Create(info->scope());
         shared->set_scope_info(*scope_info);
         shared->set_code(*code);
         if (!function.is_null()) {
@@ -695,7 +693,7 @@
   bool allow_lazy = literal->AllowsLazyCompilation() &&
       !LiveEditFunctionTracker::IsActive(info.isolate());
 
-  Handle<SerializedScopeInfo> scope_info(SerializedScopeInfo::Empty());
+  Handle<ScopeInfo> scope_info(ScopeInfo::Empty());
 
   // Generate code
   if (FLAG_lazy && allow_lazy) {
@@ -704,7 +702,7 @@
   } else if ((V8::UseCrankshaft() && MakeCrankshaftCode(&info)) ||
              (!V8::UseCrankshaft() && FullCodeGenerator::MakeCode(&info))) {
     ASSERT(!info.code().is_null());
-    scope_info = SerializedScopeInfo::Create(info.scope());
+    scope_info = ScopeInfo::Create(info.scope());
   } else {
     return Handle<SharedFunctionInfo>::null();
   }
diff --git a/src/contexts.cc b/src/contexts.cc
index b25ffac..aee963b 100644
--- a/src/contexts.cc
+++ b/src/contexts.cc
@@ -137,16 +137,17 @@
     if (context->IsFunctionContext() || context->IsBlockContext()) {
       // Use serialized scope information of functions and blocks to search
       // for the context index.
-      Handle<SerializedScopeInfo> scope_info;
+      Handle<ScopeInfo> scope_info;
       if (context->IsFunctionContext()) {
-        scope_info = Handle<SerializedScopeInfo>(
+        scope_info = Handle<ScopeInfo>(
             context->closure()->shared()->scope_info(), isolate);
       } else {
-        scope_info = Handle<SerializedScopeInfo>(
-            SerializedScopeInfo::cast(context->extension()), isolate);
+        scope_info = Handle<ScopeInfo>(
+            ScopeInfo::cast(context->extension()), isolate);
       }
       VariableMode mode;
-      int slot_index = scope_info->ContextSlotIndex(*name, &mode);
+      InitializationFlag init_flag;
+      int slot_index = scope_info->ContextSlotIndex(*name, &mode, &init_flag);
       ASSERT(slot_index < 0 || slot_index >= MIN_CONTEXT_SLOTS);
       if (slot_index >= 0) {
         if (FLAG_trace_contexts) {
@@ -168,15 +169,19 @@
             break;
           case LET:
             *attributes = NONE;
-            *binding_flags = MUTABLE_CHECK_INITIALIZED;
+            *binding_flags = (init_flag == kNeedsInitialization)
+                ? MUTABLE_CHECK_INITIALIZED : MUTABLE_IS_INITIALIZED;
             break;
           case CONST:
             *attributes = READ_ONLY;
-            *binding_flags = IMMUTABLE_CHECK_INITIALIZED;
+            *binding_flags = (init_flag == kNeedsInitialization)
+                ? IMMUTABLE_CHECK_INITIALIZED : IMMUTABLE_IS_INITIALIZED;
             break;
           case CONST_HARMONY:
             *attributes = READ_ONLY;
-            *binding_flags = IMMUTABLE_CHECK_INITIALIZED_HARMONY;
+            *binding_flags = (init_flag == kNeedsInitialization)
+                ? IMMUTABLE_CHECK_INITIALIZED_HARMONY :
+                IMMUTABLE_IS_INITIALIZED_HARMONY;
             break;
           case DYNAMIC:
           case DYNAMIC_GLOBAL:
@@ -250,10 +255,10 @@
     ASSERT(context->IsFunctionContext());
 
     // Check non-parameter locals.
-    Handle<SerializedScopeInfo> scope_info(
-        context->closure()->shared()->scope_info());
+    Handle<ScopeInfo> scope_info(context->closure()->shared()->scope_info());
     VariableMode mode;
-    int index = scope_info->ContextSlotIndex(*name, &mode);
+    InitializationFlag init_flag;
+    int index = scope_info->ContextSlotIndex(*name, &mode, &init_flag);
     ASSERT(index < 0 || index >= MIN_CONTEXT_SLOTS);
     if (index >= 0) return false;
 
@@ -262,7 +267,7 @@
     if (param_index >= 0) return false;
 
     // Check context only holding the function name variable.
-    index = scope_info->FunctionContextSlotIndex(*name, NULL);
+    index = scope_info->FunctionContextSlotIndex(*name, &mode);
     if (index >= 0) return false;
     context = context->previous();
   }
@@ -279,9 +284,7 @@
   Context* context = this;
   while (!context->IsGlobalContext()) {
     if (context->IsFunctionContext()) {
-      Handle<SerializedScopeInfo> scope_info(
-          context->closure()->shared()->scope_info());
-      if (scope_info->CallsEval() && !scope_info->IsStrictMode()) {
+      if (context->closure()->shared()->scope_info()->CallsNonStrictEval()) {
         // No need to go further since the answers will not change from
         // here.
         *outer_scope_calls_non_strict_eval = true;
diff --git a/src/date.js b/src/date.js
index ccefce5..8d7d5d8 100644
--- a/src/date.js
+++ b/src/date.js
@@ -294,8 +294,8 @@
 }
 
 
-var ymd_from_time_cache = [$NaN, $NaN, $NaN];
-var ymd_from_time_cached_time = $NaN;
+var ymd_from_time_cache = [1970, 0, 1];
+var ymd_from_time_cached_time = 0;
 
 function YearFromTime(t) {
   if (t !== ymd_from_time_cached_time) {
@@ -351,13 +351,12 @@
   date = TO_INTEGER_MAP_MINUS_ZERO(date);
 
   if (year < kMinYear || year > kMaxYear ||
-      month < kMinMonth || month > kMaxMonth ||
-      date < kMinDate || date > kMaxDate) {
+      month < kMinMonth || month > kMaxMonth) {
     return $NaN;
   }
 
-  // Now we rely on year, month and date being SMIs.
-  return %DateMakeDay(year, month, date);
+  // Now we rely on year and month being SMIs.
+  return %DateMakeDay(year, month) + date - 1;
 }
 
 
@@ -978,9 +977,10 @@
 }
 
 
+// ECMA 262 - 15.9.5.43
 function DateToISOString() {
   var t = DATE_VALUE(this);
-  if (NUMBER_IS_NAN(t)) return kInvalidDate;
+  if (NUMBER_IS_NAN(t)) throw MakeRangeError("invalid_time_value", []);
   var year = this.getUTCFullYear();
   var year_string;
   if (year >= 0 && year <= 9999) {
diff --git a/src/debug.cc b/src/debug.cc
index dc9f297..88149d8 100644
--- a/src/debug.cc
+++ b/src/debug.cc
@@ -2621,7 +2621,8 @@
                                   v8::Debug::ClientData* client_data) {
   Handle<Foreign> callback_obj(Handle<Foreign>::cast(event_listener_));
   v8::Debug::EventCallback2 callback =
-      FUNCTION_CAST<v8::Debug::EventCallback2>(callback_obj->address());
+      FUNCTION_CAST<v8::Debug::EventCallback2>(
+          callback_obj->foreign_address());
   EventDetailsImpl event_details(
       event,
       Handle<JSObject>::cast(exec_state),
diff --git a/src/deoptimizer.h b/src/deoptimizer.h
index 33580a1..284676c 100644
--- a/src/deoptimizer.h
+++ b/src/deoptimizer.h
@@ -369,7 +369,20 @@
   }
 
   double GetDoubleFrameSlot(unsigned offset) {
-    return *reinterpret_cast<double*>(GetFrameSlotPointer(offset));
+    intptr_t* ptr = GetFrameSlotPointer(offset);
+#if V8_TARGET_ARCH_MIPS
+    // Prevent gcc from using load-double (mips ldc1) on (possibly)
+    // non-64-bit aligned double. Uses two lwc1 instructions.
+    union conversion {
+      double d;
+      uint32_t u[2];
+    } c;
+    c.u[0] = *reinterpret_cast<uint32_t*>(ptr);
+    c.u[1] = *(reinterpret_cast<uint32_t*>(ptr) + 1);
+    return c.d;
+#else
+    return *reinterpret_cast<double*>(ptr);
+#endif
   }
 
   void SetFrameSlot(unsigned offset, intptr_t value) {
diff --git a/src/factory.cc b/src/factory.cc
index 15f640e..88684a4 100644
--- a/src/factory.cc
+++ b/src/factory.cc
@@ -303,7 +303,7 @@
 Handle<Context> Factory::NewBlockContext(
     Handle<JSFunction> function,
     Handle<Context> previous,
-    Handle<SerializedScopeInfo> scope_info) {
+    Handle<ScopeInfo> scope_info) {
   CALL_HEAP_FUNCTION(
       isolate(),
       isolate()->heap()->AllocateBlockContext(*function,
@@ -766,11 +766,11 @@
 }
 
 
-Handle<SerializedScopeInfo> Factory::NewSerializedScopeInfo(int length) {
+Handle<ScopeInfo> Factory::NewScopeInfo(int length) {
   CALL_HEAP_FUNCTION(
       isolate(),
-      isolate()->heap()->AllocateSerializedScopeInfo(length),
-      SerializedScopeInfo);
+      isolate()->heap()->AllocateScopeInfo(length),
+      ScopeInfo);
 }
 
 
@@ -985,7 +985,7 @@
     Handle<String> name,
     int number_of_literals,
     Handle<Code> code,
-    Handle<SerializedScopeInfo> scope_info) {
+    Handle<ScopeInfo> scope_info) {
   Handle<SharedFunctionInfo> shared = NewSharedFunctionInfo(name);
   shared->set_code(*code);
   shared->set_scope_info(*scope_info);
diff --git a/src/factory.h b/src/factory.h
index 2073ce3..158db5b 100644
--- a/src/factory.h
+++ b/src/factory.h
@@ -172,7 +172,7 @@
   // Create a 'block' context.
   Handle<Context> NewBlockContext(Handle<JSFunction> function,
                                   Handle<Context> previous,
-                                  Handle<SerializedScopeInfo> scope_info);
+                                  Handle<ScopeInfo> scope_info);
 
   // Return the Symbol matching the passed in string.
   Handle<String> SymbolFromString(Handle<String> value);
@@ -291,7 +291,7 @@
       Handle<Context> context,
       PretenureFlag pretenure = TENURED);
 
-  Handle<SerializedScopeInfo> NewSerializedScopeInfo(int length);
+  Handle<ScopeInfo> NewScopeInfo(int length);
 
   Handle<Code> NewCode(const CodeDesc& desc,
                        Code::Flags flags,
@@ -409,7 +409,7 @@
       Handle<String> name,
       int number_of_literals,
       Handle<Code> code,
-      Handle<SerializedScopeInfo> scope_info);
+      Handle<ScopeInfo> scope_info);
   Handle<SharedFunctionInfo> NewSharedFunctionInfo(Handle<String> name);
 
   Handle<JSMessageObject> NewJSMessageObject(
diff --git a/src/flag-definitions.h b/src/flag-definitions.h
index ee6ef01..9f11616 100644
--- a/src/flag-definitions.h
+++ b/src/flag-definitions.h
@@ -114,11 +114,7 @@
             "Optimize object size, Array shift, DOM strings and string +")
 
 // Flags for Crankshaft.
-#ifdef V8_TARGET_ARCH_MIPS
-  DEFINE_bool(crankshaft, false, "use crankshaft")
-#else
-  DEFINE_bool(crankshaft, true, "use crankshaft")
-#endif
+DEFINE_bool(crankshaft, true, "use crankshaft")
 DEFINE_string(hydrogen_filter, "", "hydrogen use/trace filter")
 DEFINE_bool(use_hydrogen, true, "use generated hydrogen for compilation")
 DEFINE_bool(build_lithium, true, "use lithium chunk builder")
@@ -326,7 +322,8 @@
 
 // simulator-arm.cc and simulator-mips.cc
 DEFINE_bool(trace_sim, false, "Trace simulator execution")
-DEFINE_bool(check_icache, false, "Check icache flushes in ARM simulator")
+DEFINE_bool(check_icache, false,
+            "Check icache flushes in ARM and MIPS simulator")
 DEFINE_int(stop_sim_at, 0, "Simulator stop after x number of instructions")
 DEFINE_int(sim_stack_alignment, 8,
            "Stack alingment in bytes in simulator (4 or 8, 8 is default)")
@@ -433,7 +430,6 @@
 DEFINE_bool(print_builtin_json_ast, false,
             "print source AST for builtins as JSON")
 DEFINE_string(stop_at, "", "function name where to insert a breakpoint")
-DEFINE_bool(verify_stack_height, false, "verify stack height tracing on ia32")
 
 // compiler.cc
 DEFINE_bool(print_builtin_scopes, false, "print scopes for builtins")
diff --git a/src/frames.cc b/src/frames.cc
index 7c4c573..9fd0042 100644
--- a/src/frames.cc
+++ b/src/frames.cc
@@ -1002,11 +1002,15 @@
   if (IsConstructor()) accumulator->Add("new ");
   accumulator->PrintFunction(function, receiver, &code);
 
-  Handle<SerializedScopeInfo> scope_info(SerializedScopeInfo::Empty());
+  // Get scope information for nicer output, if possible. If code is NULL, or
+  // doesn't contain scope info, scope_info will return 0 for the number of
+  // parameters, stack local variables, context local variables, stack slots,
+  // or context slots.
+  Handle<ScopeInfo> scope_info(ScopeInfo::Empty());
 
   if (function->IsJSFunction()) {
     Handle<SharedFunctionInfo> shared(JSFunction::cast(function)->shared());
-    scope_info = Handle<SerializedScopeInfo>(shared->scope_info());
+    scope_info = Handle<ScopeInfo>(shared->scope_info());
     Object* script_obj = shared->script();
     if (script_obj->IsScript()) {
       Handle<Script> script(Script::cast(script_obj));
@@ -1031,11 +1035,6 @@
 
   accumulator->Add("(this=%o", receiver);
 
-  // Get scope information for nicer output, if possible. If code is
-  // NULL, or doesn't contain scope info, info will return 0 for the
-  // number of parameters, stack slots, or context slots.
-  ScopeInfo<PreallocatedStorage> info(*scope_info);
-
   // Print the parameters.
   int parameters_count = ComputeParametersCount();
   for (int i = 0; i < parameters_count; i++) {
@@ -1043,8 +1042,8 @@
     // If we have a name for the parameter we print it. Nameless
     // parameters are either because we have more actual parameters
     // than formal parameters or because we have no scope information.
-    if (i < info.number_of_parameters()) {
-      accumulator->PrintName(*info.parameter_name(i));
+    if (i < scope_info->ParameterCount()) {
+      accumulator->PrintName(scope_info->ParameterName(i));
       accumulator->Add("=");
     }
     accumulator->Add("%o", GetParameter(i));
@@ -1062,8 +1061,8 @@
   accumulator->Add(" {\n");
 
   // Compute the number of locals and expression stack elements.
-  int stack_locals_count = info.number_of_stack_slots();
-  int heap_locals_count = info.number_of_context_slots();
+  int stack_locals_count = scope_info->StackLocalCount();
+  int heap_locals_count = scope_info->ContextLocalCount();
   int expressions_count = ComputeExpressionsCount();
 
   // Print stack-allocated local variables.
@@ -1072,7 +1071,7 @@
   }
   for (int i = 0; i < stack_locals_count; i++) {
     accumulator->Add("  var ");
-    accumulator->PrintName(*info.stack_slot_name(i));
+    accumulator->PrintName(scope_info->StackLocalName(i));
     accumulator->Add(" = ");
     if (i < expressions_count) {
       accumulator->Add("%o", GetExpression(i));
@@ -1089,16 +1088,16 @@
   }
 
   // Print heap-allocated local variables.
-  if (heap_locals_count > Context::MIN_CONTEXT_SLOTS) {
+  if (heap_locals_count > 0) {
     accumulator->Add("  // heap-allocated locals\n");
   }
-  for (int i = Context::MIN_CONTEXT_SLOTS; i < heap_locals_count; i++) {
+  for (int i = 0; i < heap_locals_count; i++) {
     accumulator->Add("  var ");
-    accumulator->PrintName(*info.context_slot_name(i));
+    accumulator->PrintName(scope_info->ContextLocalName(i));
     accumulator->Add(" = ");
     if (context != NULL) {
       if (i < context->length()) {
-        accumulator->Add("%o", context->get(i));
+        accumulator->Add("%o", context->get(Context::MIN_CONTEXT_SLOTS + i));
       } else {
         accumulator->Add(
             "// warning: missing context slot - inconsistent frame?");
diff --git a/src/full-codegen.cc b/src/full-codegen.cc
index 27c509f..1589b86 100644
--- a/src/full-codegen.cc
+++ b/src/full-codegen.cc
@@ -410,27 +410,24 @@
 
 void FullCodeGenerator::StackValueContext::Plug(Register reg) const {
   __ push(reg);
-  codegen()->increment_stack_height();
 }
 
 
 void FullCodeGenerator::TestContext::Plug(Register reg) const {
   // For simplicity we always test the accumulator register.
   __ Move(result_register(), reg);
-  codegen()->PrepareForBailoutBeforeSplit(TOS_REG, false, NULL, NULL);
+  codegen()->PrepareForBailoutBeforeSplit(condition(), false, NULL, NULL);
   codegen()->DoTest(this);
 }
 
 
 void FullCodeGenerator::EffectContext::PlugTOS() const {
   __ Drop(1);
-  codegen()->decrement_stack_height();
 }
 
 
 void FullCodeGenerator::AccumulatorValueContext::PlugTOS() const {
   __ pop(result_register());
-  codegen()->decrement_stack_height();
 }
 
 
@@ -441,8 +438,7 @@
 void FullCodeGenerator::TestContext::PlugTOS() const {
   // For simplicity we always test the accumulator register.
   __ pop(result_register());
-  codegen()->decrement_stack_height();
-  codegen()->PrepareForBailoutBeforeSplit(TOS_REG, false, NULL, NULL);
+  codegen()->PrepareForBailoutBeforeSplit(condition(), false, NULL, NULL);
   codegen()->DoTest(this);
 }
 
@@ -656,14 +652,13 @@
 }
 
 
-void FullCodeGenerator::EmitInlineRuntimeCall(CallRuntime* node) {
-  ZoneList<Expression*>* args = node->arguments();
-  const Runtime::Function* function = node->function();
+void FullCodeGenerator::EmitInlineRuntimeCall(CallRuntime* expr) {
+  const Runtime::Function* function = expr->function();
   ASSERT(function != NULL);
   ASSERT(function->intrinsic_type == Runtime::INLINE);
   InlineFunctionGenerator generator =
       FindInlineFunctionGenerator(function->function_id);
-  ((*this).*(generator))(args);
+  ((*this).*(generator))(expr);
 }
 
 
@@ -680,11 +675,25 @@
 }
 
 
+void FullCodeGenerator::VisitInDuplicateContext(Expression* expr) {
+  if (context()->IsEffect()) {
+    VisitForEffect(expr);
+  } else if (context()->IsAccumulatorValue()) {
+    VisitForAccumulatorValue(expr);
+  } else if (context()->IsStackValue()) {
+    VisitForStackValue(expr);
+  } else if (context()->IsTest()) {
+    const TestContext* test = TestContext::cast(context());
+    VisitForControl(expr, test->true_label(), test->false_label(),
+                    test->fall_through());
+  }
+}
+
+
 void FullCodeGenerator::VisitComma(BinaryOperation* expr) {
   Comment cmnt(masm_, "[ Comma");
   VisitForEffect(expr->left());
-  if (context()->IsTest()) ForwardBailoutToChild(expr);
-  VisitInCurrentContext(expr->right());
+  VisitInDuplicateContext(expr->right());
 }
 
 
@@ -706,7 +715,6 @@
     }
     PrepareForBailoutForId(right_id, NO_REGISTERS);
     __ bind(&eval_right);
-    ForwardBailoutToChild(expr);
 
   } else if (context()->IsAccumulatorValue()) {
     VisitForAccumulatorValue(left);
@@ -714,7 +722,6 @@
     // case we need it.
     __ push(result_register());
     Label discard, restore;
-    PrepareForBailoutBeforeSplit(TOS_REG, false, NULL, NULL);
     if (is_logical_and) {
       DoTest(left, &discard, &restore, &restore);
     } else {
@@ -733,7 +740,6 @@
     // case we need it.
     __ push(result_register());
     Label discard;
-    PrepareForBailoutBeforeSplit(TOS_REG, false, NULL, NULL);
     if (is_logical_and) {
       DoTest(left, &discard, &done, &discard);
     } else {
@@ -755,7 +761,7 @@
     __ bind(&eval_right);
   }
 
-  VisitInCurrentContext(right);
+  VisitInDuplicateContext(right);
   __ bind(&done);
 }
 
@@ -782,34 +788,6 @@
 }
 
 
-void FullCodeGenerator::ForwardBailoutToChild(Expression* expr) {
-  if (!info_->HasDeoptimizationSupport()) return;
-  ASSERT(context()->IsTest());
-  ASSERT(expr == forward_bailout_stack_->expr());
-  forward_bailout_pending_ = forward_bailout_stack_;
-}
-
-
-void FullCodeGenerator::VisitInCurrentContext(Expression* expr) {
-  if (context()->IsTest()) {
-    ForwardBailoutStack stack(expr, forward_bailout_pending_);
-    ForwardBailoutStack* saved = forward_bailout_stack_;
-    forward_bailout_pending_ = NULL;
-    forward_bailout_stack_ = &stack;
-    Visit(expr);
-    forward_bailout_stack_ = saved;
-  } else {
-    ASSERT(forward_bailout_pending_ == NULL);
-    Visit(expr);
-    State state = context()->IsAccumulatorValue() ? TOS_REG : NO_REGISTERS;
-    PrepareForBailout(expr, state);
-    // Forwarding bailouts to children is a one shot operation. It should have
-    // been processed at this point.
-    ASSERT(forward_bailout_pending_ == NULL);
-  }
-}
-
-
 void FullCodeGenerator::VisitBlock(Block* stmt) {
   Comment cmnt(masm_, "[ Block");
   NestedBlock nested_block(this, stmt);
@@ -820,9 +798,8 @@
   if (stmt->block_scope() != NULL) {
     { Comment cmnt(masm_, "[ Extend block context");
       scope_ = stmt->block_scope();
-      Handle<SerializedScopeInfo> scope_info = scope_->GetSerializedScopeInfo();
-      int heap_slots =
-          scope_info->NumberOfContextSlots() - Context::MIN_CONTEXT_SLOTS;
+      Handle<ScopeInfo> scope_info = scope_->GetScopeInfo();
+      int heap_slots = scope_info->ContextLength() - Context::MIN_CONTEXT_SLOTS;
       __ Push(scope_info);
       PushFunctionArgumentForContextAllocation();
       if (heap_slots <= FastNewBlockContextStub::kMaximumSlots) {
@@ -979,7 +956,6 @@
   VisitForStackValue(stmt->expression());
   PushFunctionArgumentForContextAllocation();
   __ CallRuntime(Runtime::kPushWithContext, 2);
-  decrement_stack_height();
   StoreToFrameField(StandardFrameConstants::kContextOffset, context_register());
 
   { WithOrCatch body(this);
@@ -1149,13 +1125,10 @@
   // Try block code. Sets up the exception handler chain.
   __ bind(&try_handler_setup);
   {
-    const int delta = StackHandlerConstants::kSize / kPointerSize;
     TryCatch try_block(this);
     __ PushTryHandler(IN_JAVASCRIPT, TRY_CATCH_HANDLER);
-    increment_stack_height(delta);
     Visit(stmt->try_block());
     __ PopTryHandler();
-    decrement_stack_height(delta);
   }
   __ bind(&done);
 }
@@ -1187,7 +1160,6 @@
   // cooked before GC.
   Label finally_entry;
   Label try_handler_setup;
-  const int original_stack_height = stack_height();
 
   // Setup the try-handler chain. Use a call to
   // Jump to try-handler setup and try-block code. Use call to put try-handler
@@ -1209,7 +1181,6 @@
     // Finally block implementation.
     Finally finally_block(this);
     EnterFinallyBlock();
-    set_stack_height(original_stack_height + Finally::kElementCount);
     Visit(stmt->finally_block());
     ExitFinallyBlock();  // Return to the calling code.
   }
@@ -1217,13 +1188,10 @@
   __ bind(&try_handler_setup);
   {
     // Setup try handler (stack pointer registers).
-    const int delta = StackHandlerConstants::kSize / kPointerSize;
     TryFinally try_block(this, &finally_entry);
     __ PushTryHandler(IN_JAVASCRIPT, TRY_FINALLY_HANDLER);
-    set_stack_height(original_stack_height + delta);
     Visit(stmt->try_block());
     __ PopTryHandler();
-    set_stack_height(original_stack_height);
   }
   // Execute the finally block on the way out.  Clobber the unpredictable
   // value in the accumulator with one that's safe for GC.  The finally
@@ -1253,7 +1221,6 @@
   __ bind(&true_case);
   SetExpressionPosition(expr->then_expression(),
                         expr->then_expression_position());
-  int start_stack_height = stack_height();
   if (context()->IsTest()) {
     const TestContext* for_test = TestContext::cast(context());
     VisitForControl(expr->then_expression(),
@@ -1261,17 +1228,15 @@
                     for_test->false_label(),
                     NULL);
   } else {
-    VisitInCurrentContext(expr->then_expression());
+    VisitInDuplicateContext(expr->then_expression());
     __ jmp(&done);
   }
 
   PrepareForBailoutForId(expr->ElseId(), NO_REGISTERS);
   __ bind(&false_case);
-  set_stack_height(start_stack_height);
-  if (context()->IsTest()) ForwardBailoutToChild(expr);
   SetExpressionPosition(expr->else_expression(),
                         expr->else_expression_position());
-  VisitInCurrentContext(expr->else_expression());
+  VisitInDuplicateContext(expr->else_expression());
   // If control flow falls through Visit, merge it with true case here.
   if (!context()->IsTest()) {
     __ bind(&done);
@@ -1308,11 +1273,8 @@
 
 void FullCodeGenerator::VisitThrow(Throw* expr) {
   Comment cmnt(masm_, "[ Throw");
-  // Throw has no effect on the stack height or the current expression context.
-  // Usually the expression context is null, because throw is a statement.
   VisitForStackValue(expr->exception());
   __ CallRuntime(Runtime::kThrow, 1);
-  decrement_stack_height();
   // Never returns here.
 }
 
@@ -1332,7 +1294,7 @@
   Expression *sub_expr;
   Handle<String> check;
   if (expr->IsLiteralCompareTypeof(&sub_expr, &check)) {
-    EmitLiteralCompareTypeof(sub_expr, check);
+    EmitLiteralCompareTypeof(expr, sub_expr, check);
     return true;
   }
 
diff --git a/src/full-codegen.h b/src/full-codegen.h
index 9132502..799f96d 100644
--- a/src/full-codegen.h
+++ b/src/full-codegen.h
@@ -83,12 +83,9 @@
         scope_(NULL),
         nesting_stack_(NULL),
         loop_depth_(0),
-        stack_height_(0),
         context_(NULL),
         bailout_entries_(0),
-        stack_checks_(2),  // There's always at least one.
-        forward_bailout_stack_(NULL),
-        forward_bailout_pending_(NULL) {
+        stack_checks_(2) {  // There's always at least one.
   }
 
   static bool MakeCode(CompilationInfo* info);
@@ -276,27 +273,8 @@
     }
   };
 
-  // The forward bailout stack keeps track of the expressions that can
-  // bail out to just before the control flow is split in a child
-  // node. The stack elements are linked together through the parent
-  // link when visiting expressions in test contexts after requesting
-  // bailout in child forwarding.
-  class ForwardBailoutStack BASE_EMBEDDED {
-   public:
-    ForwardBailoutStack(Expression* expr, ForwardBailoutStack* parent)
-        : expr_(expr), parent_(parent) { }
-
-    Expression* expr() const { return expr_; }
-    ForwardBailoutStack* parent() const { return parent_; }
-
-   private:
-    Expression* const expr_;
-    ForwardBailoutStack* const parent_;
-  };
-
   // Type of a member function that generates inline code for a native function.
-  typedef void (FullCodeGenerator::*InlineFunctionGenerator)
-      (ZoneList<Expression*>*);
+  typedef void (FullCodeGenerator::*InlineFunctionGenerator)(CallRuntime* expr);
 
   static const InlineFunctionGenerator kInlineFunctionGenerators[];
 
@@ -357,23 +335,22 @@
   // need the write barrier if location is CONTEXT.
   MemOperand VarOperand(Variable* var, Register scratch);
 
-  // Forward the bailout responsibility for the given expression to
-  // the next child visited (which must be in a test context).
-  void ForwardBailoutToChild(Expression* expr);
-
   void VisitForEffect(Expression* expr) {
     EffectContext context(this);
-    VisitInCurrentContext(expr);
+    Visit(expr);
+    PrepareForBailout(expr, NO_REGISTERS);
   }
 
   void VisitForAccumulatorValue(Expression* expr) {
     AccumulatorValueContext context(this);
-    VisitInCurrentContext(expr);
+    Visit(expr);
+    PrepareForBailout(expr, TOS_REG);
   }
 
   void VisitForStackValue(Expression* expr) {
     StackValueContext context(this);
-    VisitInCurrentContext(expr);
+    Visit(expr);
+    PrepareForBailout(expr, NO_REGISTERS);
   }
 
   void VisitForControl(Expression* expr,
@@ -381,9 +358,14 @@
                        Label* if_false,
                        Label* fall_through) {
     TestContext context(this, expr, if_true, if_false, fall_through);
-    VisitInCurrentContext(expr);
+    Visit(expr);
+    // For test contexts, we prepare for bailout before branching, not at
+    // the end of the entire expression.  This happens as part of visiting
+    // the expression.
   }
 
+  void VisitInDuplicateContext(Expression* expr);
+
   void VisitDeclarations(ZoneList<Declaration*>* declarations);
   void DeclareGlobals(Handle<FixedArray> pairs);
   int DeclareGlobalsFlags();
@@ -395,7 +377,9 @@
 
   // Platform-specific code for comparing the type of a value with
   // a given literal string.
-  void EmitLiteralCompareTypeof(Expression* expr, Handle<String> check);
+  void EmitLiteralCompareTypeof(Expression* expr,
+                                Expression* sub_expr,
+                                Handle<String> check);
 
   // Platform-specific code for equality comparison with a nil-like value.
   void EmitLiteralCompareNil(CompareOperation* expr,
@@ -415,7 +399,7 @@
   // canonical JS true value so we will insert a (dead) test against true at
   // the actual bailout target from the optimized code. If not
   // should_normalize, the true and false labels are ignored.
-  void PrepareForBailoutBeforeSplit(State state,
+  void PrepareForBailoutBeforeSplit(Expression* expr,
                                     bool should_normalize,
                                     Label* if_true,
                                     Label* if_false);
@@ -450,7 +434,7 @@
   void EmitInlineRuntimeCall(CallRuntime* expr);
 
 #define EMIT_INLINE_RUNTIME_CALL(name, x, y) \
-  void Emit##name(ZoneList<Expression*>* arguments);
+  void Emit##name(CallRuntime* expr);
   INLINE_FUNCTION_LIST(EMIT_INLINE_RUNTIME_CALL)
   INLINE_RUNTIME_FUNCTION_LIST(EMIT_INLINE_RUNTIME_CALL)
 #undef EMIT_INLINE_RUNTIME_CALL
@@ -466,13 +450,8 @@
                                  Label* done);
   void EmitVariableLoad(VariableProxy* proxy);
 
-  enum ResolveEvalFlag {
-    SKIP_CONTEXT_LOOKUP,
-    PERFORM_CONTEXT_LOOKUP
-  };
-
   // Expects the arguments and the function already pushed.
-  void EmitResolvePossiblyDirectEval(ResolveEvalFlag flag, int arg_count);
+  void EmitResolvePossiblyDirectEval(int arg_count);
 
   // Platform-specific support for allocating a new closure based on
   // the given function info.
@@ -539,35 +518,6 @@
     loop_depth_--;
   }
 
-#if defined(V8_TARGET_ARCH_IA32)
-  int stack_height() { return stack_height_; }
-  void set_stack_height(int depth) { stack_height_ = depth; }
-  void increment_stack_height() { stack_height_++; }
-  void increment_stack_height(int delta) { stack_height_ += delta; }
-  void decrement_stack_height() {
-    if (FLAG_verify_stack_height) {
-      ASSERT(stack_height_ > 0);
-    }
-    stack_height_--;
-  }
-  void decrement_stack_height(int delta) {
-    stack_height_-= delta;
-    if (FLAG_verify_stack_height) {
-      ASSERT(stack_height_ >= 0);
-    }
-  }
-  // Call this function only if FLAG_verify_stack_height is true.
-  void verify_stack_height();  // Generates a runtime check of esp - ebp.
-#else
-  int stack_height() { return 0; }
-  void set_stack_height(int depth) {}
-  void increment_stack_height() {}
-  void increment_stack_height(int delta) {}
-  void decrement_stack_height() {}
-  void decrement_stack_height(int delta) {}
-  void verify_stack_height() {}
-#endif  // V8_TARGET_ARCH_IA32
-
   MacroAssembler* masm() { return masm_; }
 
   class ExpressionContext;
@@ -611,7 +561,6 @@
   void VisitComma(BinaryOperation* expr);
   void VisitLogicalExpression(BinaryOperation* expr);
   void VisitArithmeticExpression(BinaryOperation* expr);
-  void VisitInCurrentContext(Expression* expr);
 
   void VisitForTypeofValue(Expression* expr);
 
@@ -630,10 +579,6 @@
 
     virtual ~ExpressionContext() {
       codegen_->set_new_context(old_);
-      if (FLAG_verify_stack_height) {
-        ASSERT_EQ(expected_stack_height_, codegen()->stack_height());
-        codegen()->verify_stack_height();
-      }
     }
 
     Isolate* isolate() const { return codegen_->isolate(); }
@@ -687,7 +632,6 @@
     FullCodeGenerator* codegen() const { return codegen_; }
     MacroAssembler* masm() const { return masm_; }
     MacroAssembler* masm_;
-    int expected_stack_height_;  // The expected stack height esp - ebp on exit.
 
    private:
     const ExpressionContext* old_;
@@ -697,9 +641,7 @@
   class AccumulatorValueContext : public ExpressionContext {
    public:
     explicit AccumulatorValueContext(FullCodeGenerator* codegen)
-        : ExpressionContext(codegen) {
-      expected_stack_height_ = codegen->stack_height();
-    }
+        : ExpressionContext(codegen) { }
 
     virtual void Plug(bool flag) const;
     virtual void Plug(Register reg) const;
@@ -720,9 +662,7 @@
   class StackValueContext : public ExpressionContext {
    public:
     explicit StackValueContext(FullCodeGenerator* codegen)
-        : ExpressionContext(codegen) {
-      expected_stack_height_ = codegen->stack_height() + 1;
-    }
+        : ExpressionContext(codegen) { }
 
     virtual void Plug(bool flag) const;
     virtual void Plug(Register reg) const;
@@ -751,9 +691,7 @@
           condition_(condition),
           true_label_(true_label),
           false_label_(false_label),
-          fall_through_(fall_through) {
-      expected_stack_height_ = codegen->stack_height();
-    }
+          fall_through_(fall_through) { }
 
     static const TestContext* cast(const ExpressionContext* context) {
       ASSERT(context->IsTest());
@@ -790,10 +728,7 @@
   class EffectContext : public ExpressionContext {
    public:
     explicit EffectContext(FullCodeGenerator* codegen)
-        : ExpressionContext(codegen) {
-      expected_stack_height_ = codegen->stack_height();
-    }
-
+        : ExpressionContext(codegen) { }
 
     virtual void Plug(bool flag) const;
     virtual void Plug(Register reg) const;
@@ -817,12 +752,9 @@
   Label return_label_;
   NestedStatement* nesting_stack_;
   int loop_depth_;
-  int stack_height_;
   const ExpressionContext* context_;
   ZoneList<BailoutEntry> bailout_entries_;
   ZoneList<BailoutEntry> stack_checks_;
-  ForwardBailoutStack* forward_bailout_stack_;
-  ForwardBailoutStack* forward_bailout_pending_;
 
   friend class NestedStatement;
 
diff --git a/src/gdb-jit.cc b/src/gdb-jit.cc
index 68cb053..b386bed 100644
--- a/src/gdb-jit.cc
+++ b/src/gdb-jit.cc
@@ -1115,13 +1115,13 @@
       int context_slots = scope_info.number_of_context_slots();
       // The real slot ID is internal_slots + context_slot_id.
       int internal_slots = Context::MIN_CONTEXT_SLOTS;
-      int locals = scope_info.NumberOfLocals();
+      int locals = scope_info.LocalCount();
       int current_abbreviation = 4;
 
       for (int param = 0; param < params; ++param) {
         w->WriteULEB128(current_abbreviation++);
         w->WriteString(
-            *scope_info.parameter_name(param)->ToCString(DISALLOW_NULLS));
+            *scope_info.ParameterName(param)->ToCString(DISALLOW_NULLS));
         w->Write<uint32_t>(ty_offset);
         Writer::Slot<uint32_t> block_size = w->CreateSlotHere<uint32_t>();
         uintptr_t block_start = w->position();
@@ -1312,7 +1312,7 @@
       int context_slots = scope_info.number_of_context_slots();
       // The real slot ID is internal_slots + context_slot_id.
       int internal_slots = Context::MIN_CONTEXT_SLOTS;
-      int locals = scope_info.NumberOfLocals();
+      int locals = scope_info.LocalCount();
       int total_children =
           params + slots + context_slots + internal_slots + locals + 2;
 
diff --git a/src/globals.h b/src/globals.h
index 26a0e5f..f85f286 100644
--- a/src/globals.h
+++ b/src/globals.h
@@ -364,11 +364,7 @@
 // The Strict Mode (ECMA-262 5th edition, 4.2.2).
 enum StrictModeFlag {
   kNonStrictMode,
-  kStrictMode,
-  // This value is never used, but is needed to prevent GCC 4.5 from failing
-  // to compile when we assert that a flag is either kNonStrictMode or
-  // kStrictMode.
-  kInvalidStrictFlag
+  kStrictMode
 };
 
 
diff --git a/src/handles.cc b/src/handles.cc
index 62851f3..db8ce4d 100644
--- a/src/handles.cc
+++ b/src/handles.cc
@@ -516,8 +516,9 @@
   Handle<Object> cache = Utils::OpenHandle(*handle);
   JSValue* wrapper = JSValue::cast(*cache);
   Foreign* foreign = Script::cast(wrapper->value())->wrapper();
-  ASSERT(foreign->address() == reinterpret_cast<Address>(cache.location()));
-  foreign->set_address(0);
+  ASSERT(foreign->foreign_address() ==
+         reinterpret_cast<Address>(cache.location()));
+  foreign->set_foreign_address(0);
   Isolate* isolate = Isolate::Current();
   isolate->global_handles()->Destroy(cache.location());
   isolate->counters()->script_wrappers()->Decrement();
@@ -525,10 +526,10 @@
 
 
 Handle<JSValue> GetScriptWrapper(Handle<Script> script) {
-  if (script->wrapper()->address() != NULL) {
+  if (script->wrapper()->foreign_address() != NULL) {
     // Return the script wrapper directly from the cache.
     return Handle<JSValue>(
-        reinterpret_cast<JSValue**>(script->wrapper()->address()));
+        reinterpret_cast<JSValue**>(script->wrapper()->foreign_address()));
   }
   Isolate* isolate = Isolate::Current();
   // Construct a new script wrapper.
@@ -544,7 +545,8 @@
   Handle<Object> handle = isolate->global_handles()->Create(*result);
   isolate->global_handles()->MakeWeak(handle.location(), NULL,
                                       &ClearWrapperCache);
-  script->wrapper()->set_address(reinterpret_cast<Address>(handle.location()));
+  script->wrapper()->set_foreign_address(
+      reinterpret_cast<Address>(handle.location()));
   return result;
 }
 
diff --git a/src/hashmap.cc b/src/hashmap.cc
index 1422afd..37748a3 100644
--- a/src/hashmap.cc
+++ b/src/hashmap.cc
@@ -39,12 +39,6 @@
 Allocator HashMap::DefaultAllocator;
 
 
-HashMap::HashMap() {
-  allocator_ = NULL;
-  match_ = NULL;
-}
-
-
 HashMap::HashMap(MatchFun match,
                  Allocator* allocator,
                  uint32_t initial_capacity) {
diff --git a/src/hashmap.h b/src/hashmap.h
index 5c13212..4e6a454 100644
--- a/src/hashmap.h
+++ b/src/hashmap.h
@@ -50,11 +50,6 @@
 
   typedef bool (*MatchFun) (void* key1, void* key2);
 
-  // Dummy constructor.  This constructor doesn't set up the hash
-  // map properly so don't use it unless you have good reason (e.g.,
-  // you know that the HashMap will never be used).
-  HashMap();
-
   // initial_capacity is the size of the initial hash map;
   // it must be a power of 2 (and thus must not be 0).
   explicit HashMap(MatchFun match,
diff --git a/src/heap-inl.h b/src/heap-inl.h
index aaf2927..c065b73 100644
--- a/src/heap-inl.h
+++ b/src/heap-inl.h
@@ -571,11 +571,11 @@
 #ifdef DEBUG
   for (int i = 0; i < new_space_strings_.length(); ++i) {
     ASSERT(heap_->InNewSpace(new_space_strings_[i]));
-    ASSERT(new_space_strings_[i] != HEAP->raw_unchecked_null_value());
+    ASSERT(new_space_strings_[i] != HEAP->raw_unchecked_the_hole_value());
   }
   for (int i = 0; i < old_space_strings_.length(); ++i) {
     ASSERT(!heap_->InNewSpace(old_space_strings_[i]));
-    ASSERT(old_space_strings_[i] != HEAP->raw_unchecked_null_value());
+    ASSERT(old_space_strings_[i] != HEAP->raw_unchecked_the_hole_value());
   }
 #endif
 }
diff --git a/src/heap.cc b/src/heap.cc
index bbb9d3e..ef1eb77 100644
--- a/src/heap.cc
+++ b/src/heap.cc
@@ -539,7 +539,7 @@
     for (Object** p = start; p < end; p++) {
       if ((*p)->IsHeapObject()) {
         // Check that the symbol is actually a symbol.
-        ASSERT((*p)->IsNull() || (*p)->IsUndefined() || (*p)->IsSymbol());
+        ASSERT((*p)->IsTheHole() || (*p)->IsUndefined() || (*p)->IsSymbol());
       }
     }
   }
@@ -1871,7 +1871,7 @@
         AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
     if (!maybe_obj->ToObject(&obj)) return false;
   }
-  set_serialized_scope_info_map(Map::cast(obj));
+  set_scope_info_map(Map::cast(obj));
 
   { MaybeObject* maybe_obj = AllocateMap(HEAP_NUMBER_TYPE, HeapNumber::kSize);
     if (!maybe_obj->ToObject(&obj)) return false;
@@ -2629,12 +2629,10 @@
   // Statically ensure that it is safe to allocate foreigns in paged spaces.
   STATIC_ASSERT(Foreign::kSize <= Page::kMaxHeapObjectSize);
   AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
-  Object* result;
-  { MaybeObject* maybe_result = Allocate(foreign_map(), space);
-    if (!maybe_result->ToObject(&result)) return maybe_result;
-  }
-
-  Foreign::cast(result)->set_address(address);
+  Foreign* result;
+  MaybeObject* maybe_result = Allocate(foreign_map(), space);
+  if (!maybe_result->To(&result)) return maybe_result;
+  result->set_foreign_address(address);
   return result;
 }
 
@@ -2648,7 +2646,7 @@
   share->set_name(name);
   Code* illegal = isolate_->builtins()->builtin(Builtins::kIllegal);
   share->set_code(illegal);
-  share->set_scope_info(SerializedScopeInfo::Empty());
+  share->set_scope_info(ScopeInfo::Empty());
   Code* construct_stub =
       isolate_->builtins()->builtin(Builtins::kJSConstructStubGeneric);
   share->set_construct_stub(construct_stub);
@@ -4396,10 +4394,10 @@
 
 MaybeObject* Heap::AllocateBlockContext(JSFunction* function,
                                         Context* previous,
-                                        SerializedScopeInfo* scope_info) {
+                                        ScopeInfo* scope_info) {
   Object* result;
   { MaybeObject* maybe_result =
-        AllocateFixedArrayWithHoles(scope_info->NumberOfContextSlots());
+        AllocateFixedArrayWithHoles(scope_info->ContextLength());
     if (!maybe_result->ToObject(&result)) return maybe_result;
   }
   Context* context = reinterpret_cast<Context*>(result);
@@ -4412,14 +4410,11 @@
 }
 
 
-MaybeObject* Heap::AllocateSerializedScopeInfo(int length) {
-  Object* result;
-  { MaybeObject* maybe_result = AllocateFixedArray(length, TENURED);
-    if (!maybe_result->ToObject(&result)) return maybe_result;
-  }
-  SerializedScopeInfo* scope_info =
-      reinterpret_cast<SerializedScopeInfo*>(result);
-  scope_info->set_map(serialized_scope_info_map());
+MaybeObject* Heap::AllocateScopeInfo(int length) {
+  FixedArray* scope_info;
+  MaybeObject* maybe_scope_info = AllocateFixedArray(length, TENURED);
+  if (!maybe_scope_info->To(&scope_info)) return maybe_scope_info;
+  scope_info->set_map(scope_info_map());
   return scope_info;
 }
 
@@ -6342,7 +6337,9 @@
 void ExternalStringTable::CleanUp() {
   int last = 0;
   for (int i = 0; i < new_space_strings_.length(); ++i) {
-    if (new_space_strings_[i] == heap_->raw_unchecked_null_value()) continue;
+    if (new_space_strings_[i] == heap_->raw_unchecked_the_hole_value()) {
+      continue;
+    }
     if (heap_->InNewSpace(new_space_strings_[i])) {
       new_space_strings_[last++] = new_space_strings_[i];
     } else {
@@ -6352,7 +6349,9 @@
   new_space_strings_.Rewind(last);
   last = 0;
   for (int i = 0; i < old_space_strings_.length(); ++i) {
-    if (old_space_strings_[i] == heap_->raw_unchecked_null_value()) continue;
+    if (old_space_strings_[i] == heap_->raw_unchecked_the_hole_value()) {
+      continue;
+    }
     ASSERT(!heap_->InNewSpace(old_space_strings_[i]));
     old_space_strings_[last++] = old_space_strings_[i];
   }
diff --git a/src/heap.h b/src/heap.h
index 7c0b0ea..60ea337 100644
--- a/src/heap.h
+++ b/src/heap.h
@@ -73,7 +73,7 @@
   V(Map, global_context_map, GlobalContextMap)                                 \
   V(Map, fixed_array_map, FixedArrayMap)                                       \
   V(Map, code_map, CodeMap)                                                    \
-  V(Map, serialized_scope_info_map, SerializedScopeInfoMap)                    \
+  V(Map, scope_info_map, ScopeInfoMap)                                         \
   V(Map, fixed_cow_array_map, FixedCOWArrayMap)                                \
   V(Map, fixed_double_array_map, FixedDoubleArrayMap)                          \
   V(Object, no_interceptor_result_sentinel, NoInterceptorResultSentinel)       \
@@ -547,7 +547,7 @@
   MUST_USE_RESULT MaybeObject* AllocateCodeCache();
 
   // Allocates a serialized scope info.
-  MUST_USE_RESULT MaybeObject* AllocateSerializedScopeInfo(int length);
+  MUST_USE_RESULT MaybeObject* AllocateScopeInfo(int length);
 
   // Allocates an empty PolymorphicCodeCache.
   MUST_USE_RESULT MaybeObject* AllocatePolymorphicCodeCache();
@@ -737,7 +737,7 @@
   // Allocate a block context.
   MUST_USE_RESULT MaybeObject* AllocateBlockContext(JSFunction* function,
                                                     Context* previous,
-                                                    SerializedScopeInfo* info);
+                                                    ScopeInfo* info);
 
   // Allocates a new utility object in the old generation.
   MUST_USE_RESULT MaybeObject* AllocateStruct(InstanceType type);
diff --git a/src/hydrogen-instructions.cc b/src/hydrogen-instructions.cc
index 6f46509..ae62367 100644
--- a/src/hydrogen-instructions.cc
+++ b/src/hydrogen-instructions.cc
@@ -126,7 +126,9 @@
   bool may_overflow = false;  // Overflow is ignored here.
   lower_ = AddWithoutOverflow(lower_, value, &may_overflow);
   upper_ = AddWithoutOverflow(upper_, value, &may_overflow);
+#ifdef DEBUG
   Verify();
+#endif
 }
 
 
@@ -173,7 +175,9 @@
   lower_ = AddWithoutOverflow(lower_, other->lower(), &may_overflow);
   upper_ = AddWithoutOverflow(upper_, other->upper(), &may_overflow);
   KeepOrder();
+#ifdef DEBUG
   Verify();
+#endif
   return may_overflow;
 }
 
@@ -183,7 +187,9 @@
   lower_ = SubWithoutOverflow(lower_, other->upper(), &may_overflow);
   upper_ = SubWithoutOverflow(upper_, other->lower(), &may_overflow);
   KeepOrder();
+#ifdef DEBUG
   Verify();
+#endif
   return may_overflow;
 }
 
@@ -197,9 +203,11 @@
 }
 
 
+#ifdef DEBUG
 void Range::Verify() const {
   ASSERT(lower_ <= upper_);
 }
+#endif
 
 
 bool Range::MulAndCheckOverflow(Range* other) {
@@ -210,7 +218,9 @@
   int v4 = MulWithoutOverflow(upper_, other->upper(), &may_overflow);
   lower_ = Min(Min(v1, v2), Min(v3, v4));
   upper_ = Max(Max(v1, v2), Max(v3, v4));
+#ifdef DEBUG
   Verify();
+#endif
   return may_overflow;
 }
 
@@ -234,25 +244,6 @@
 }
 
 
-const char* HType::ToShortString() {
-  switch (type_) {
-    case kTagged: return "t";
-    case kTaggedPrimitive: return "p";
-    case kTaggedNumber: return "n";
-    case kSmi: return "m";
-    case kHeapNumber: return "h";
-    case kString: return "s";
-    case kBoolean: return "b";
-    case kNonPrimitive: return "r";
-    case kJSArray: return "a";
-    case kJSObject: return "o";
-    case kUninitialized: return "z";
-  }
-  UNREACHABLE();
-  return "Unreachable code";
-}
-
-
 HType HType::TypeFromValue(Handle<Object> value) {
   HType result = HType::Tagged();
   if (value->IsSmi()) {
@@ -564,7 +555,7 @@
   // followed by a simulate instruction, we need to insert after the
   // simulate instruction instead.
   HInstruction* next = previous->next_;
-  if (previous->HasSideEffects() && next != NULL) {
+  if (previous->HasObservableSideEffects() && next != NULL) {
     ASSERT(next->IsSimulate());
     previous = next;
     next = previous->next_;
@@ -604,7 +595,7 @@
 
   // Verify that instructions that may have side-effects are followed
   // by a simulate instruction.
-  if (HasSideEffects() && !IsOsrEntry()) {
+  if (HasObservableSideEffects() && !IsOsrEntry()) {
     ASSERT(next()->IsSimulate());
   }
 
@@ -1252,28 +1243,17 @@
 }
 
 
-Range* HBitAnd::InferRange() {
+Range* HBitwise::InferRange() {
+  if (op() == Token::BIT_XOR) return HValue::InferRange();
   int32_t left_mask = (left()->range() != NULL)
       ? left()->range()->Mask()
       : 0xffffffff;
   int32_t right_mask = (right()->range() != NULL)
       ? right()->range()->Mask()
       : 0xffffffff;
-  int32_t result_mask = left_mask & right_mask;
-  return (result_mask >= 0)
-      ? new Range(0, result_mask)
-      : HValue::InferRange();
-}
-
-
-Range* HBitOr::InferRange() {
-  int32_t left_mask = (left()->range() != NULL)
-      ? left()->range()->Mask()
-      : 0xffffffff;
-  int32_t right_mask = (right()->range() != NULL)
-      ? right()->range()->Mask()
-      : 0xffffffff;
-  int32_t result_mask = left_mask | right_mask;
+  int32_t result_mask = (op() == Token::BIT_AND)
+      ? left_mask & right_mask
+      : left_mask | right_mask;
   return (result_mask >= 0)
       ? new Range(0, result_mask)
       : HValue::InferRange();
@@ -1785,21 +1765,6 @@
 }
 
 
-HType HBitAnd::CalculateInferredType() {
-  return HType::TaggedNumber();
-}
-
-
-HType HBitXor::CalculateInferredType() {
-  return HType::TaggedNumber();
-}
-
-
-HType HBitOr::CalculateInferredType() {
-  return HType::TaggedNumber();
-}
-
-
 HType HBitNot::CalculateInferredType() {
   return HType::TaggedNumber();
 }
@@ -1810,21 +1775,6 @@
 }
 
 
-HType HShl::CalculateInferredType() {
-  return HType::TaggedNumber();
-}
-
-
-HType HShr::CalculateInferredType() {
-  return HType::TaggedNumber();
-}
-
-
-HType HSar::CalculateInferredType() {
-  return HType::TaggedNumber();
-}
-
-
 HType HStringCharFromCode::CalculateInferredType() {
   return HType::String();
 }
diff --git a/src/hydrogen-instructions.h b/src/hydrogen-instructions.h
index 65fc4df..101d62a 100644
--- a/src/hydrogen-instructions.h
+++ b/src/hydrogen-instructions.h
@@ -67,10 +67,8 @@
   V(ArgumentsLength)                           \
   V(ArgumentsObject)                           \
   V(ArrayLiteral)                              \
-  V(BitAnd)                                    \
+  V(Bitwise)                                   \
   V(BitNot)                                    \
-  V(BitOr)                                     \
-  V(BitXor)                                    \
   V(BlockEntry)                                \
   V(BoundsCheck)                               \
   V(Branch)                                    \
@@ -183,6 +181,7 @@
   V(Calls)                                     \
   V(InobjectFields)                            \
   V(BackingStoreFields)                        \
+  V(ElementsKind)                              \
   V(ArrayElements)                             \
   V(DoubleArrayElements)                       \
   V(SpecializedArrayElements)                  \
@@ -246,7 +245,9 @@
     return lower_ >= Smi::kMinValue && upper_ <= Smi::kMaxValue;
   }
   void KeepOrder();
+#ifdef DEBUG
   void Verify() const;
+#endif
 
   void StackUpon(Range* other) {
     Intersect(other);
@@ -406,7 +407,6 @@
   static HType TypeFromValue(Handle<Object> value);
 
   const char* ToString();
-  const char* ToShortString();
 
  private:
   enum Type {
@@ -621,8 +621,14 @@
   void SetAllSideEffects() { flags_ |= AllSideEffects(); }
   void ClearAllSideEffects() { flags_ &= ~AllSideEffects(); }
   bool HasSideEffects() const { return (flags_ & AllSideEffects()) != 0; }
+  bool HasObservableSideEffects() const {
+    return (flags_ & ObservableSideEffects()) != 0;
+  }
 
   int ChangesFlags() const { return flags_ & ChangesFlagsMask(); }
+  int ObservableChangesFlags() const {
+    return flags_ & ChangesFlagsMask() & ObservableSideEffects();
+  }
 
   Range* range() const { return range_; }
   bool HasRange() const { return range_ != NULL; }
@@ -702,6 +708,12 @@
     return ChangesFlagsMask() & ~(1 << kChangesOsrEntries);
   }
 
+  // A flag mask of all side effects that can make observable changes in
+  // an executing program (i.e. are not safe to repeat, move or remove);
+  static int ObservableSideEffects() {
+    return ChangesFlagsMask() & ~(1 << kChangesElementsKind);
+  }
+
   // Remove the matching use from the use list if present.  Returns the
   // removed list node or NULL.
   HUseListNode* RemoveUse(HValue* value, int index);
@@ -1739,7 +1751,7 @@
   explicit HElementsKind(HValue* value) : HUnaryOperation(value) {
     set_representation(Representation::Integer32());
     SetFlag(kUseGVN);
-    SetFlag(kDependsOnMaps);
+    SetFlag(kDependsOnElementsKind);
   }
 
   virtual Representation RequiredInputRepresentation(int index) {
@@ -1866,6 +1878,7 @@
     set_representation(Representation::Tagged());
     SetFlag(kUseGVN);
     SetFlag(kDependsOnMaps);
+    SetFlag(kDependsOnElementsKind);
   }
 
   virtual Representation RequiredInputRepresentation(int index) {
@@ -3028,52 +3041,30 @@
 };
 
 
-class HBitAnd: public HBitwiseBinaryOperation {
+class HBitwise: public HBitwiseBinaryOperation {
  public:
-  HBitAnd(HValue* context, HValue* left, HValue* right)
-      : HBitwiseBinaryOperation(context, left, right) { }
+  HBitwise(Token::Value op, HValue* context, HValue* left, HValue* right)
+      : HBitwiseBinaryOperation(context, left, right), op_(op) {
+        ASSERT(op == Token::BIT_AND ||
+               op == Token::BIT_OR ||
+               op == Token::BIT_XOR);
+      }
+
+  Token::Value op() const { return op_; }
 
   virtual bool IsCommutative() const { return true; }
-  virtual HType CalculateInferredType();
 
-  DECLARE_CONCRETE_INSTRUCTION(BitAnd)
+  DECLARE_CONCRETE_INSTRUCTION(Bitwise)
 
  protected:
-  virtual bool DataEquals(HValue* other) { return true; }
+  virtual bool DataEquals(HValue* other) {
+    return op() == HBitwise::cast(other)->op();
+  }
 
   virtual Range* InferRange();
-};
 
-
-class HBitXor: public HBitwiseBinaryOperation {
- public:
-  HBitXor(HValue* context, HValue* left, HValue* right)
-      : HBitwiseBinaryOperation(context, left, right) { }
-
-  virtual bool IsCommutative() const { return true; }
-  virtual HType CalculateInferredType();
-
-  DECLARE_CONCRETE_INSTRUCTION(BitXor)
-
- protected:
-  virtual bool DataEquals(HValue* other) { return true; }
-};
-
-
-class HBitOr: public HBitwiseBinaryOperation {
- public:
-  HBitOr(HValue* context, HValue* left, HValue* right)
-      : HBitwiseBinaryOperation(context, left, right) { }
-
-  virtual bool IsCommutative() const { return true; }
-  virtual HType CalculateInferredType();
-
-  DECLARE_CONCRETE_INSTRUCTION(BitOr)
-
- protected:
-  virtual bool DataEquals(HValue* other) { return true; }
-
-  virtual Range* InferRange();
+ private:
+  Token::Value op_;
 };
 
 
@@ -3083,7 +3074,6 @@
       : HBitwiseBinaryOperation(context, left, right) { }
 
   virtual Range* InferRange();
-  virtual HType CalculateInferredType();
 
   DECLARE_CONCRETE_INSTRUCTION(Shl)
 
@@ -3098,7 +3088,6 @@
       : HBitwiseBinaryOperation(context, left, right) { }
 
   virtual Range* InferRange();
-  virtual HType CalculateInferredType();
 
   DECLARE_CONCRETE_INSTRUCTION(Shr)
 
@@ -3113,7 +3102,6 @@
       : HBitwiseBinaryOperation(context, left, right) { }
 
   virtual Range* InferRange();
-  virtual HType CalculateInferredType();
 
   DECLARE_CONCRETE_INSTRUCTION(Sar)
 
@@ -3915,7 +3903,7 @@
         transitioned_map_(transitioned_map) {
     SetOperandAt(0, object);
     SetFlag(kUseGVN);
-    SetFlag(kDependsOnMaps);
+    SetFlag(kChangesElementsKind);
     set_representation(Representation::Tagged());
   }
 
diff --git a/src/hydrogen.cc b/src/hydrogen.cc
index 3a4d172..fdd9dfb 100644
--- a/src/hydrogen.cc
+++ b/src/hydrogen.cc
@@ -1346,6 +1346,7 @@
   explicit HGlobalValueNumberer(HGraph* graph, CompilationInfo* info)
       : graph_(graph),
         info_(info),
+        removed_side_effects_(false),
         block_side_effects_(graph->blocks()->length()),
         loop_side_effects_(graph->blocks()->length()),
         visited_on_paths_(graph->zone(), graph->blocks()->length()) {
@@ -1357,7 +1358,8 @@
     ASSERT(!info_->isolate()->heap()->allow_allocation(true));
   }
 
-  void Analyze();
+  // Returns true if values with side effects are removed.
+  bool Analyze();
 
  private:
   int CollectSideEffectsOnPathsToDominatedBlock(HBasicBlock* dominator,
@@ -1377,6 +1379,7 @@
 
   HGraph* graph_;
   CompilationInfo* info_;
+  bool removed_side_effects_;
 
   // A map of block IDs to their side effects.
   ZoneList<int> block_side_effects_;
@@ -1390,13 +1393,14 @@
 };
 
 
-void HGlobalValueNumberer::Analyze() {
+bool HGlobalValueNumberer::Analyze() {
   ComputeBlockSideEffects();
   if (FLAG_loop_invariant_code_motion) {
     LoopInvariantCodeMotion();
   }
   HValueMap* map = new(zone()) HValueMap();
   AnalyzeBlock(graph_->entry_block(), map);
+  return removed_side_effects_;
 }
 
 
@@ -1530,11 +1534,12 @@
     HInstruction* next = instr->next();
     int flags = instr->ChangesFlags();
     if (flags != 0) {
-      ASSERT(!instr->CheckFlag(HValue::kUseGVN));
       // Clear all instructions in the map that are affected by side effects.
       map->Kill(flags);
       TraceGVN("Instruction %d kills\n", instr->id());
-    } else if (instr->CheckFlag(HValue::kUseGVN)) {
+    }
+    if (instr->CheckFlag(HValue::kUseGVN)) {
+      ASSERT(!instr->HasObservableSideEffects());
       HValue* other = map->Lookup(instr);
       if (other != NULL) {
         ASSERT(instr->Equals(other) && other->Equals(instr));
@@ -1543,6 +1548,7 @@
                  instr->Mnemonic(),
                  other->id(),
                  other->Mnemonic());
+        if (instr->HasSideEffects()) removed_side_effects_ = true;
         instr->DeleteAndReplaceWith(other);
       } else {
         map->Add(instr);
@@ -2108,12 +2114,12 @@
 void EffectContext::ReturnInstruction(HInstruction* instr, int ast_id) {
   ASSERT(!instr->IsControlInstruction());
   owner()->AddInstruction(instr);
-  if (instr->HasSideEffects()) owner()->AddSimulate(ast_id);
+  if (instr->HasObservableSideEffects()) owner()->AddSimulate(ast_id);
 }
 
 
 void EffectContext::ReturnControl(HControlInstruction* instr, int ast_id) {
-  ASSERT(!instr->HasSideEffects());
+  ASSERT(!instr->HasObservableSideEffects());
   HBasicBlock* empty_true = owner()->graph()->CreateBasicBlock();
   HBasicBlock* empty_false = owner()->graph()->CreateBasicBlock();
   instr->SetSuccessorAt(0, empty_true);
@@ -2131,12 +2137,12 @@
   }
   owner()->AddInstruction(instr);
   owner()->Push(instr);
-  if (instr->HasSideEffects()) owner()->AddSimulate(ast_id);
+  if (instr->HasObservableSideEffects()) owner()->AddSimulate(ast_id);
 }
 
 
 void ValueContext::ReturnControl(HControlInstruction* instr, int ast_id) {
-  ASSERT(!instr->HasSideEffects());
+  ASSERT(!instr->HasObservableSideEffects());
   if (!arguments_allowed() && instr->CheckFlag(HValue::kIsArguments)) {
     return owner()->Bailout("bad value context for arguments object value");
   }
@@ -2161,7 +2167,7 @@
   builder->AddInstruction(instr);
   // We expect a simulate after every expression with side effects, though
   // this one isn't actually needed (and wouldn't work if it were targeted).
-  if (instr->HasSideEffects()) {
+  if (instr->HasObservableSideEffects()) {
     builder->Push(instr);
     builder->AddSimulate(ast_id);
     builder->Pop();
@@ -2171,7 +2177,7 @@
 
 
 void TestContext::ReturnControl(HControlInstruction* instr, int ast_id) {
-  ASSERT(!instr->HasSideEffects());
+  ASSERT(!instr->HasObservableSideEffects());
   HBasicBlock* empty_true = owner()->graph()->CreateBasicBlock();
   HBasicBlock* empty_false = owner()->graph()->CreateBasicBlock();
   instr->SetSuccessorAt(0, empty_true);
@@ -2373,7 +2379,13 @@
   if (FLAG_use_gvn) {
     HPhase phase("Global value numbering", graph());
     HGlobalValueNumberer gvn(graph(), info());
-    gvn.Analyze();
+    bool removed_side_effects = gvn.Analyze();
+    // Trigger a second analysis pass to further eliminate duplicate values that
+    // could only be discovered by removing side-effect-generating instructions
+    // during the first pass.
+    if (FLAG_smi_only_arrays && removed_side_effects) {
+      gvn.Analyze();
+    }
   }
 
   if (FLAG_use_range) {
@@ -3546,7 +3558,7 @@
       // The HSimulate for the store should not see the stored value in
       // effect contexts (it is not materialized at expr->id() in the
       // unoptimized code).
-      if (instr->HasSideEffects()) {
+      if (instr->HasObservableSideEffects()) {
         if (ast_context()->IsEffect()) {
           AddSimulate(expr->id());
         } else {
@@ -3619,7 +3631,7 @@
   Push(value);
   instr->set_position(expr->position());
   AddInstruction(instr);
-  if (instr->HasSideEffects()) AddSimulate(expr->AssignmentId());
+  if (instr->HasObservableSideEffects()) AddSimulate(expr->AssignmentId());
   return ast_context()->ReturnValue(Pop());
 }
 
@@ -3640,7 +3652,7 @@
         new(zone()) HStoreGlobalCell(value, cell, lookup.GetPropertyDetails());
     instr->set_position(position);
     AddInstruction(instr);
-    if (instr->HasSideEffects()) AddSimulate(ast_id);
+    if (instr->HasObservableSideEffects()) AddSimulate(ast_id);
   } else {
     HValue* context =  environment()->LookupContext();
     HGlobalObject* global_object = new(zone()) HGlobalObject(context);
@@ -3653,8 +3665,8 @@
                                         function_strict_mode_flag());
     instr->set_position(position);
     AddInstruction(instr);
-    ASSERT(instr->HasSideEffects());
-    if (instr->HasSideEffects()) AddSimulate(ast_id);
+    ASSERT(instr->HasObservableSideEffects());
+    if (instr->HasObservableSideEffects()) AddSimulate(ast_id);
   }
 }
 
@@ -3711,7 +3723,9 @@
         HStoreContextSlot* instr =
             new(zone()) HStoreContextSlot(context, var->index(), Top());
         AddInstruction(instr);
-        if (instr->HasSideEffects()) AddSimulate(expr->AssignmentId());
+        if (instr->HasObservableSideEffects()) {
+          AddSimulate(expr->AssignmentId());
+        }
         break;
       }
 
@@ -3737,7 +3751,7 @@
         load = BuildLoadNamedGeneric(obj, prop);
       }
       PushAndAdd(load);
-      if (load->HasSideEffects()) AddSimulate(expr->CompoundLoadId());
+      if (load->HasObservableSideEffects()) AddSimulate(expr->CompoundLoadId());
 
       CHECK_ALIVE(VisitForValue(expr->value()));
       HValue* right = Pop();
@@ -3745,14 +3759,14 @@
 
       HInstruction* instr = BuildBinaryOperation(operation, left, right);
       PushAndAdd(instr);
-      if (instr->HasSideEffects()) AddSimulate(operation->id());
+      if (instr->HasObservableSideEffects()) AddSimulate(operation->id());
 
       HInstruction* store = BuildStoreNamed(obj, instr, prop);
       AddInstruction(store);
       // Drop the simulated receiver and value.  Return the value.
       Drop(2);
       Push(instr);
-      if (store->HasSideEffects()) AddSimulate(expr->AssignmentId());
+      if (store->HasObservableSideEffects()) AddSimulate(expr->AssignmentId());
       return ast_context()->ReturnValue(Pop());
 
     } else {
@@ -3777,7 +3791,7 @@
 
       HInstruction* instr = BuildBinaryOperation(operation, left, right);
       PushAndAdd(instr);
-      if (instr->HasSideEffects()) AddSimulate(operation->id());
+      if (instr->HasObservableSideEffects()) AddSimulate(operation->id());
 
       expr->RecordTypeFeedback(oracle());
       HandleKeyedElementAccess(obj, key, instr, expr, expr->AssignmentId(),
@@ -3875,7 +3889,9 @@
         HStoreContextSlot* instr =
             new(zone()) HStoreContextSlot(context, var->index(), Top());
         AddInstruction(instr);
-        if (instr->HasSideEffects()) AddSimulate(expr->AssignmentId());
+        if (instr->HasObservableSideEffects()) {
+          AddSimulate(expr->AssignmentId());
+        }
         return ast_context()->ReturnValue(Pop());
       }
 
@@ -4148,7 +4164,7 @@
   if (num_untransitionable_maps == 1) {
     HInstruction* instr = AddInstruction(BuildMonomorphicElementAccess(
         object, key, val, untransitionable_map, is_store));
-    *has_side_effects |= instr->HasSideEffects();
+    *has_side_effects |= instr->HasObservableSideEffects();
     instr->set_position(position);
     return is_store ? NULL : instr;
   }
@@ -4235,7 +4251,7 @@
           Push(access);
         }
 
-        *has_side_effects |= access->HasSideEffects();
+        *has_side_effects |= access->HasObservableSideEffects();
         if (position != -1) {
           access->set_position(position);
         }
@@ -4256,7 +4272,7 @@
         access = AddInstruction(BuildExternalArrayElementAccess(
             external_elements, checked_key, val, elements_kind, is_store));
       }
-      *has_side_effects |= access->HasSideEffects();
+      *has_side_effects |= access->HasObservableSideEffects();
       access->set_position(position);
       if (!is_store) {
         Push(access);
@@ -4301,7 +4317,7 @@
   }
   instr->set_position(position);
   AddInstruction(instr);
-  *has_side_effects = instr->HasSideEffects();
+  *has_side_effects = instr->HasObservableSideEffects();
   return instr;
 }
 
@@ -4368,7 +4384,7 @@
   CHECK_ALIVE(VisitForValue(expr->obj()));
 
   HInstruction* instr = NULL;
-  if (expr->IsArrayLength()) {
+  if (expr->AsProperty()->IsArrayLength()) {
     HValue* array = Pop();
     AddInstruction(new(zone()) HCheckNonSmi(array));
     HInstruction* mapcheck =
@@ -4689,11 +4705,11 @@
       TraceInline(target, caller, "could not generate deoptimization info");
       return false;
     }
-    if (target_shared->scope_info() == SerializedScopeInfo::Empty()) {
+    if (target_shared->scope_info() == ScopeInfo::Empty()) {
       // The scope info might not have been set if a lazily compiled
       // function is inlined before being called for the first time.
-      Handle<SerializedScopeInfo> target_scope_info =
-          SerializedScopeInfo::Create(target_info.scope());
+      Handle<ScopeInfo> target_scope_info =
+          ScopeInfo::Create(target_info.scope());
       target_shared->set_scope_info(*target_scope_info);
     }
     target_shared->EnableDeoptimizationSupport(*target_info.code());
@@ -4900,7 +4916,7 @@
             AddInstruction(square_root);
             // MathPowHalf doesn't have side effects so there's no need for
             // an environment simulation here.
-            ASSERT(!square_root->HasSideEffects());
+            ASSERT(!square_root->HasObservableSideEffects());
             result = new(zone()) HDiv(context, double_one, square_root);
           } else if (exponent == 2.0) {
             result = new(zone()) HMul(context, left, left);
@@ -5033,7 +5049,7 @@
         return;
       }
 
-      if (CallStubCompiler::HasCustomCallGenerator(*expr->target()) ||
+      if (CallStubCompiler::HasCustomCallGenerator(expr->target()) ||
           expr->check_type() != RECEIVER_MAP_CHECK) {
         // When the target has a custom call IC generator, use the IC,
         // because it is likely to generate better code.  Also use the IC
@@ -5363,7 +5379,7 @@
                                 materialize_true));
 
   if (materialize_false->HasPredecessor()) {
-    materialize_false->SetJoinId(expr->expression()->id());
+    materialize_false->SetJoinId(expr->MaterializeFalseId());
     set_current_block(materialize_false);
     Push(graph()->GetConstantFalse());
   } else {
@@ -5371,7 +5387,7 @@
   }
 
   if (materialize_true->HasPredecessor()) {
-    materialize_true->SetJoinId(expr->expression()->id());
+    materialize_true->SetJoinId(expr->MaterializeTrueId());
     set_current_block(materialize_true);
     Push(graph()->GetConstantTrue());
   } else {
@@ -5484,7 +5500,9 @@
         HStoreContextSlot* instr =
             new(zone()) HStoreContextSlot(context, var->index(), after);
         AddInstruction(instr);
-        if (instr->HasSideEffects()) AddSimulate(expr->AssignmentId());
+        if (instr->HasObservableSideEffects()) {
+          AddSimulate(expr->AssignmentId());
+        }
         break;
       }
 
@@ -5513,7 +5531,7 @@
         load = BuildLoadNamedGeneric(obj, prop);
       }
       PushAndAdd(load);
-      if (load->HasSideEffects()) AddSimulate(expr->CountId());
+      if (load->HasObservableSideEffects()) AddSimulate(expr->CountId());
 
       after = BuildIncrement(returns_original_input, expr);
       input = Pop();
@@ -5526,7 +5544,7 @@
       // necessary.
       environment()->SetExpressionStackAt(0, after);
       if (returns_original_input) environment()->SetExpressionStackAt(1, input);
-      if (store->HasSideEffects()) AddSimulate(expr->AssignmentId());
+      if (store->HasObservableSideEffects()) AddSimulate(expr->AssignmentId());
 
     } else {
       // Keyed property.
@@ -5619,13 +5637,9 @@
       instr = new(zone()) HDiv(context, left, right);
       break;
     case Token::BIT_XOR:
-      instr = new(zone()) HBitXor(context, left, right);
-      break;
     case Token::BIT_AND:
-      instr = new(zone()) HBitAnd(context, left, right);
-      break;
     case Token::BIT_OR:
-      instr = new(zone()) HBitOr(context, left, right);
+      instr = new(zone()) HBitwise(expr->op(), context, left, right);
       break;
     case Token::SAR:
       instr = new(zone()) HSar(context, left, right);
@@ -6080,7 +6094,7 @@
           HStoreContextSlot* store =
               new HStoreContextSlot(context, var->index(), value);
           AddInstruction(store);
-          if (store->HasSideEffects()) AddSimulate(proxy->id());
+          if (store->HasObservableSideEffects()) AddSimulate(proxy->id());
         } else {
           environment()->Bind(var, value);
         }
diff --git a/src/ia32/assembler-ia32.cc b/src/ia32/assembler-ia32.cc
index 66a9884..c6030f9 100644
--- a/src/ia32/assembler-ia32.cc
+++ b/src/ia32/assembler-ia32.cc
@@ -628,26 +628,6 @@
 }
 
 
-void Assembler::cmov(Condition cc, Register dst, int32_t imm32) {
-  ASSERT(CpuFeatures::IsEnabled(CMOV));
-  EnsureSpace ensure_space(this);
-  UNIMPLEMENTED();
-  USE(cc);
-  USE(dst);
-  USE(imm32);
-}
-
-
-void Assembler::cmov(Condition cc, Register dst, Handle<Object> handle) {
-  ASSERT(CpuFeatures::IsEnabled(CMOV));
-  EnsureSpace ensure_space(this);
-  UNIMPLEMENTED();
-  USE(cc);
-  USE(dst);
-  USE(handle);
-}
-
-
 void Assembler::cmov(Condition cc, Register dst, const Operand& src) {
   ASSERT(CpuFeatures::IsEnabled(CMOV));
   EnsureSpace ensure_space(this);
diff --git a/src/ia32/assembler-ia32.h b/src/ia32/assembler-ia32.h
index 4dfde5f..fbd04bb 100644
--- a/src/ia32/assembler-ia32.h
+++ b/src/ia32/assembler-ia32.h
@@ -713,8 +713,6 @@
   void movzx_w(Register dst, const Operand& src);
 
   // Conditional moves
-  void cmov(Condition cc, Register dst, int32_t imm32);
-  void cmov(Condition cc, Register dst, Handle<Object> handle);
   void cmov(Condition cc, Register dst, Register src) {
     cmov(cc, dst, Operand(src));
   }
diff --git a/src/ia32/code-stubs-ia32.cc b/src/ia32/code-stubs-ia32.cc
index 37b519a..50cddca 100644
--- a/src/ia32/code-stubs-ia32.cc
+++ b/src/ia32/code-stubs-ia32.cc
@@ -5189,7 +5189,8 @@
 
 
 void StringCharCodeAtGenerator::GenerateSlow(
-    MacroAssembler* masm, const RuntimeCallHelper& call_helper) {
+    MacroAssembler* masm,
+    const RuntimeCallHelper& call_helper) {
   __ Abort("Unexpected fallthrough to CharCodeAt slow case");
 
   // Index is not a smi.
@@ -5274,7 +5275,8 @@
 
 
 void StringCharFromCodeGenerator::GenerateSlow(
-    MacroAssembler* masm, const RuntimeCallHelper& call_helper) {
+    MacroAssembler* masm,
+    const RuntimeCallHelper& call_helper) {
   __ Abort("Unexpected fallthrough to CharFromCode slow case");
 
   __ bind(&slow_case_);
@@ -5301,7 +5303,8 @@
 
 
 void StringCharAtGenerator::GenerateSlow(
-    MacroAssembler* masm, const RuntimeCallHelper& call_helper) {
+    MacroAssembler* masm,
+    const RuntimeCallHelper& call_helper) {
   char_code_at_generator_.GenerateSlow(masm, call_helper);
   char_from_code_generator_.GenerateSlow(masm, call_helper);
 }
@@ -6599,69 +6602,6 @@
 }
 
 
-// TODO(kmillikin): Eliminate this function when the stub cache is fully
-// handlified.
-MaybeObject* StringDictionaryLookupStub::TryGenerateNegativeLookup(
-    MacroAssembler* masm,
-    Label* miss,
-    Label* done,
-    Register properties,
-    String* name,
-    Register r0) {
-  ASSERT(name->IsSymbol());
-
-  // If names of slots in range from 1 to kProbes - 1 for the hash value are
-  // not equal to the name and kProbes-th slot is not used (its name is the
-  // undefined value), it guarantees the hash table doesn't contain the
-  // property. It's true even if some slots represent deleted properties
-  // (their names are the null value).
-  for (int i = 0; i < kInlinedProbes; i++) {
-    // Compute the masked index: (hash + i + i * i) & mask.
-    Register index = r0;
-    // Capacity is smi 2^n.
-    __ mov(index, FieldOperand(properties, kCapacityOffset));
-    __ dec(index);
-    __ and_(index,
-            Immediate(Smi::FromInt(name->Hash() +
-                                   StringDictionary::GetProbeOffset(i))));
-
-    // Scale the index by multiplying by the entry size.
-    ASSERT(StringDictionary::kEntrySize == 3);
-    __ lea(index, Operand(index, index, times_2, 0));  // index *= 3.
-    Register entity_name = r0;
-    // Having undefined at this place means the name is not contained.
-    ASSERT_EQ(kSmiTagSize, 1);
-    __ mov(entity_name, Operand(properties, index, times_half_pointer_size,
-                                kElementsStartOffset - kHeapObjectTag));
-    __ cmp(entity_name, masm->isolate()->factory()->undefined_value());
-    __ j(equal, done);
-
-    // Stop if found the property.
-    __ cmp(entity_name, Handle<String>(name));
-    __ j(equal, miss);
-
-    // Check if the entry name is not a symbol.
-    __ mov(entity_name, FieldOperand(entity_name, HeapObject::kMapOffset));
-    __ test_b(FieldOperand(entity_name, Map::kInstanceTypeOffset),
-              kIsSymbolMask);
-    __ j(zero, miss);
-  }
-
-  StringDictionaryLookupStub stub(properties,
-                                  r0,
-                                  r0,
-                                  StringDictionaryLookupStub::NEGATIVE_LOOKUP);
-  __ push(Immediate(Handle<Object>(name)));
-  __ push(Immediate(name->Hash()));
-  MaybeObject* result = masm->TryCallStub(&stub);
-  if (result->IsFailure()) return result;
-  __ test(r0, r0);
-  __ j(not_zero, miss);
-  __ jmp(done);
-  return result;
-}
-
-
 // Probe the string dictionary in the |elements| register. Jump to the
 // |done| label if a property with the given name is found leaving the
 // index into the dictionary in |r0|. Jump to the |miss| label
@@ -6847,6 +6787,8 @@
   // ElementsTransitionGenerator::GenerateDoubleToObject
   { eax, edx, esi, EMIT_REMEMBERED_SET},
   { edx, eax, edi, EMIT_REMEMBERED_SET},
+  // StoreArrayLiteralElementStub::Generate
+  { ebx, eax, ecx, EMIT_REMEMBERED_SET},
   // Null termination.
   { no_reg, no_reg, no_reg, EMIT_REMEMBERED_SET}
 };
@@ -7089,6 +7031,93 @@
   // Fall through when we need to inform the incremental marker.
 }
 
+
+void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- eax    : element value to store
+  //  -- ebx    : array literal
+  //  -- edi    : map of array literal
+  //  -- ecx    : element index as smi
+  //  -- edx    : array literal index in function
+  //  -- esp[0] : return address
+  // -----------------------------------
+
+  Label element_done;
+  Label double_elements;
+  Label smi_element;
+  Label slow_elements;
+  Label slow_elements_from_double;
+  Label fast_elements;
+
+  if (!FLAG_trace_elements_transitions) {
+    __ CheckFastElements(edi, &double_elements);
+
+    // FAST_SMI_ONLY_ELEMENTS or FAST_ELEMENTS
+    __ JumpIfSmi(eax, &smi_element);
+    __ CheckFastSmiOnlyElements(edi, &fast_elements, Label::kNear);
+
+    // Store into the array literal requires a elements transition. Call into
+    // the runtime.
+  }
+
+  __ bind(&slow_elements);
+  __ pop(edi);  // Pop return address and remember to put back later for tail
+                // call.
+  __ push(ebx);
+  __ push(ecx);
+  __ push(eax);
+  __ mov(ebx, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
+  __ push(FieldOperand(ebx, JSFunction::kLiteralsOffset));
+  __ push(edx);
+  __ push(edi);  // Return return address so that tail call returns to right
+                 // place.
+  __ TailCallRuntime(Runtime::kStoreArrayLiteralElement, 5, 1);
+
+  if (!FLAG_trace_elements_transitions) {
+    // Array literal has ElementsKind of FAST_DOUBLE_ELEMENTS.
+    __ bind(&double_elements);
+
+    __ push(edx);
+    __ mov(edx, FieldOperand(ebx, JSObject::kElementsOffset));
+    __ StoreNumberToDoubleElements(eax,
+                                   edx,
+                                   ecx,
+                                   edi,
+                                   xmm0,
+                                   &slow_elements_from_double,
+                                   false);
+    __ pop(edx);
+    __ jmp(&element_done);
+
+    __ bind(&slow_elements_from_double);
+    __ pop(edx);
+    __ jmp(&slow_elements);
+
+    // Array literal has ElementsKind of FAST_ELEMENTS and value is an object.
+    __ bind(&fast_elements);
+    __ mov(ebx, FieldOperand(ebx, JSObject::kElementsOffset));
+    __ lea(ecx, FieldOperand(ebx, ecx, times_half_pointer_size,
+                             FixedArrayBase::kHeaderSize));
+    __ mov(Operand(ecx, 0), eax);
+    // Update the write barrier for the array store.
+    __ RecordWrite(ebx, ecx, eax,
+                   kDontSaveFPRegs,
+                   EMIT_REMEMBERED_SET,
+                   OMIT_SMI_CHECK);
+    __ jmp(&element_done);
+
+    // Array literal has ElementsKind of FAST_SMI_ONLY_ELEMENTS or
+    // FAST_ELEMENTS, and value is Smi.
+    __ bind(&smi_element);
+    __ mov(ebx, FieldOperand(ebx, JSObject::kElementsOffset));
+    __ mov(FieldOperand(ebx, ecx, times_half_pointer_size,
+                        FixedArrayBase::kHeaderSize), eax);
+    // Fall through
+    __ bind(&element_done);
+    __ ret(0);
+  }
+}
+
 #undef __
 
 } }  // namespace v8::internal
diff --git a/src/ia32/code-stubs-ia32.h b/src/ia32/code-stubs-ia32.h
index 8775344..692cbcf 100644
--- a/src/ia32/code-stubs-ia32.h
+++ b/src/ia32/code-stubs-ia32.h
@@ -428,16 +428,6 @@
                                      Handle<String> name,
                                      Register r0);
 
-  // TODO(kmillikin): Eliminate this function when the stub cache is fully
-  // handlified.
-  MUST_USE_RESULT static MaybeObject* TryGenerateNegativeLookup(
-      MacroAssembler* masm,
-      Label* miss,
-      Label* done,
-      Register properties,
-      String* name,
-      Register r0);
-
   static void GeneratePositiveLookup(MacroAssembler* masm,
                                      Label* miss,
                                      Label* done,
diff --git a/src/ia32/codegen-ia32.cc b/src/ia32/codegen-ia32.cc
index 3912727..cf52415 100644
--- a/src/ia32/codegen-ia32.cc
+++ b/src/ia32/codegen-ia32.cc
@@ -378,6 +378,12 @@
 
   // Found hole, store hole_nan_as_double instead.
   __ bind(&convert_hole);
+
+  if (FLAG_debug_code) {
+    __ cmp(ebx, masm->isolate()->factory()->the_hole_value());
+    __ Assert(equal, "object found in smi-only array");
+  }
+
   if (CpuFeatures::IsSupported(SSE2)) {
     CpuFeatures::Scope use_sse2(SSE2);
     __ movdbl(FieldOperand(eax, edi, times_4, FixedDoubleArray::kHeaderSize),
diff --git a/src/ia32/full-codegen-ia32.cc b/src/ia32/full-codegen-ia32.cc
index 25588c8..2cbf518 100644
--- a/src/ia32/full-codegen-ia32.cc
+++ b/src/ia32/full-codegen-ia32.cc
@@ -44,11 +44,6 @@
 #define __ ACCESS_MASM(masm_)
 
 
-static unsigned GetPropertyId(Property* property) {
-  return property->id();
-}
-
-
 class JumpPatchSite BASE_EMBEDDED {
  public:
   explicit JumpPatchSite(MacroAssembler* masm) : masm_(masm) {
@@ -169,11 +164,6 @@
     }
   }
 
-  set_stack_height(2 + scope()->num_stack_slots());
-  if (FLAG_verify_stack_height) {
-    verify_stack_height();
-  }
-
   bool function_in_register = true;
 
   // Possibly allocate a local context.
@@ -372,15 +362,6 @@
 }
 
 
-void FullCodeGenerator::verify_stack_height() {
-  ASSERT(FLAG_verify_stack_height);
-  __ sub(ebp, Immediate(kPointerSize * stack_height()));
-  __ cmp(ebp, esp);
-  __ Assert(equal, "Full codegen stack height not as expected.");
-  __ add(ebp, Immediate(kPointerSize * stack_height()));
-}
-
-
 void FullCodeGenerator::EffectContext::Plug(Variable* var) const {
   ASSERT(var->IsStackAllocated() || var->IsContextSlot());
 }
@@ -397,14 +378,13 @@
   MemOperand operand = codegen()->VarOperand(var, result_register());
   // Memory operands can be pushed directly.
   __ push(operand);
-  codegen()->increment_stack_height();
 }
 
 
 void FullCodeGenerator::TestContext::Plug(Variable* var) const {
   // For simplicity we always test the accumulator register.
   codegen()->GetVar(result_register(), var);
-  codegen()->PrepareForBailoutBeforeSplit(TOS_REG, false, NULL, NULL);
+  codegen()->PrepareForBailoutBeforeSplit(condition(), false, NULL, NULL);
   codegen()->DoTest(this);
 }
 
@@ -451,12 +431,11 @@
   } else {
     __ push(Immediate(lit));
   }
-  codegen()->increment_stack_height();
 }
 
 
 void FullCodeGenerator::TestContext::Plug(Handle<Object> lit) const {
-  codegen()->PrepareForBailoutBeforeSplit(TOS_REG,
+  codegen()->PrepareForBailoutBeforeSplit(condition(),
                                           true,
                                           true_label_,
                                           false_label_);
@@ -489,7 +468,6 @@
                                                    Register reg) const {
   ASSERT(count > 0);
   __ Drop(count);
-  codegen()->decrement_stack_height(count);
 }
 
 
@@ -499,7 +477,6 @@
   ASSERT(count > 0);
   __ Drop(count);
   __ Move(result_register(), reg);
-  codegen()->decrement_stack_height(count);
 }
 
 
@@ -508,7 +485,6 @@
   ASSERT(count > 0);
   if (count > 1) __ Drop(count - 1);
   __ mov(Operand(esp, 0), reg);
-  codegen()->decrement_stack_height(count - 1);
 }
 
 
@@ -518,9 +494,8 @@
   // For simplicity we always test the accumulator register.
   __ Drop(count);
   __ Move(result_register(), reg);
-  codegen()->PrepareForBailoutBeforeSplit(TOS_REG, false, NULL, NULL);
+  codegen()->PrepareForBailoutBeforeSplit(condition(), false, NULL, NULL);
   codegen()->DoTest(this);
-  codegen()->decrement_stack_height(count);
 }
 
 
@@ -554,7 +529,6 @@
   __ bind(materialize_false);
   __ push(Immediate(isolate()->factory()->false_value()));
   __ bind(&done);
-  codegen()->increment_stack_height();
 }
 
 
@@ -582,12 +556,11 @@
       ? isolate()->factory()->true_value()
       : isolate()->factory()->false_value();
   __ push(Immediate(value));
-  codegen()->increment_stack_height();
 }
 
 
 void FullCodeGenerator::TestContext::Plug(bool flag) const {
-  codegen()->PrepareForBailoutBeforeSplit(TOS_REG,
+  codegen()->PrepareForBailoutBeforeSplit(condition(),
                                           true,
                                           true_label_,
                                           false_label_);
@@ -680,7 +653,7 @@
 }
 
 
-void FullCodeGenerator::PrepareForBailoutBeforeSplit(State state,
+void FullCodeGenerator::PrepareForBailoutBeforeSplit(Expression* expr,
                                                      bool should_normalize,
                                                      Label* if_true,
                                                      Label* if_false) {
@@ -691,13 +664,7 @@
 
   Label skip;
   if (should_normalize) __ jmp(&skip, Label::kNear);
-
-  ForwardBailoutStack* current = forward_bailout_stack_;
-  while (current != NULL) {
-    PrepareForBailout(current->expr(), state);
-    current = current->parent();
-  }
-
+  PrepareForBailout(expr, TOS_REG);
   if (should_normalize) {
     __ cmp(eax, isolate()->factory()->true_value());
     Split(equal, if_true, if_false, NULL);
@@ -714,8 +681,8 @@
   // need to "declare" it at runtime to make sure it actually exists in the
   // local context.
   Variable* variable = proxy->var();
-  bool binding_needs_init =
-      mode == CONST || mode == CONST_HARMONY || mode == LET;
+  bool binding_needs_init = (function == NULL) &&
+      (mode == CONST || mode == CONST_HARMONY || mode == LET);
   switch (variable->location()) {
     case Variable::UNALLOCATED:
       ++(*global_count);
@@ -784,18 +751,14 @@
       // Note: For variables we must not push an initial value (such as
       // 'undefined') because we may have a (legal) redeclaration and we
       // must not destroy the current value.
-      increment_stack_height(3);
       if (function != NULL) {
         VisitForStackValue(function);
       } else if (binding_needs_init) {
         __ push(Immediate(isolate()->factory()->the_hole_value()));
-        increment_stack_height();
       } else {
         __ push(Immediate(Smi::FromInt(0)));  // Indicates no initial value.
-        increment_stack_height();
       }
       __ CallRuntime(Runtime::kDeclareContextSlot, 4);
-      decrement_stack_height(4);
       break;
     }
   }
@@ -820,7 +783,6 @@
   Breakable nested_statement(this, stmt);
   SetStatementPosition(stmt);
 
-  int switch_clause_stack_height = stack_height();
   // Keep the switch value on the stack until a case matches.
   VisitForStackValue(stmt->tag());
   PrepareForBailoutForId(stmt->EntryId(), NO_REGISTERS);
@@ -885,7 +847,6 @@
     __ jmp(default_clause->body_target());
   }
 
-  set_stack_height(switch_clause_stack_height);
   // Compile all the case bodies.
   for (int i = 0; i < clauses->length(); i++) {
     Comment cmnt(masm_, "[ Case body");
@@ -927,7 +888,6 @@
   __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
   __ bind(&done_convert);
   __ push(eax);
-  increment_stack_height();
 
   // Check for proxies.
   Label call_runtime;
@@ -1025,8 +985,6 @@
   __ push(eax);  // Fixed array length (as smi).
   __ push(Immediate(Smi::FromInt(0)));  // Initial index.
 
-  // 1 ~ The object has already been pushed.
-  increment_stack_height(ForIn::kElementCount - 1);
   // Generate code for doing the condition check.
   __ bind(&loop);
   __ mov(eax, Operand(esp, 0 * kPointerSize));  // Get the current index.
@@ -1088,7 +1046,6 @@
   __ bind(loop_statement.break_label());
   __ add(esp, Immediate(5 * kPointerSize));
 
-  decrement_stack_height(ForIn::kElementCount);
   // Exit and decrement the loop depth.
   __ bind(&exit);
   decrement_loop_depth();
@@ -1277,27 +1234,59 @@
       Comment cmnt(masm_, var->IsContextSlot()
                               ? "Context variable"
                               : "Stack variable");
-      if (!var->binding_needs_init()) {
-        context()->Plug(var);
-      } else {
-        // Let and const need a read barrier.
-        Label done;
-        GetVar(eax, var);
-        __ cmp(eax, isolate()->factory()->the_hole_value());
-        __ j(not_equal, &done, Label::kNear);
-        if (var->mode() == LET || var->mode() == CONST_HARMONY) {
-          // Throw a reference error when using an uninitialized let/const
-          // binding in harmony mode.
-          __ push(Immediate(var->name()));
-          __ CallRuntime(Runtime::kThrowReferenceError, 1);
-        } else {
-          // Uninitalized const bindings outside of harmony mode are unholed.
-          ASSERT(var->mode() == CONST);
-          __ mov(eax, isolate()->factory()->undefined_value());
+      if (var->binding_needs_init()) {
+        // var->scope() may be NULL when the proxy is located in eval code and
+        // refers to a potential outside binding. Currently those bindings are
+        // always looked up dynamically, i.e. in that case
+        //     var->location() == LOOKUP.
+        // always holds.
+        ASSERT(var->scope() != NULL);
+
+        // Check if the binding really needs an initialization check. The check
+        // can be skipped in the following situation: we have a LET or CONST
+        // binding in harmony mode, both the Variable and the VariableProxy have
+        // the same declaration scope (i.e. they are both in global code, in the
+        // same function or in the same eval code) and the VariableProxy is in
+        // the source physically located after the initializer of the variable.
+        //
+        // We cannot skip any initialization checks for CONST in non-harmony
+        // mode because const variables may be declared but never initialized:
+        //   if (false) { const x; }; var y = x;
+        //
+        // The condition on the declaration scopes is a conservative check for
+        // nested functions that access a binding and are called before the
+        // binding is initialized:
+        //   function() { f(); let x = 1; function f() { x = 2; } }
+        //
+        // Check that we always have valid source position.
+        ASSERT(var->initializer_position() != RelocInfo::kNoPosition);
+        ASSERT(proxy->position() != RelocInfo::kNoPosition);
+        bool skip_init_check =
+            var->mode() != CONST &&
+            var->scope()->DeclarationScope() == scope()->DeclarationScope() &&
+            var->initializer_position() < proxy->position();
+        if (!skip_init_check) {
+          // Let and const need a read barrier.
+          Label done;
+          GetVar(eax, var);
+          __ cmp(eax, isolate()->factory()->the_hole_value());
+          __ j(not_equal, &done, Label::kNear);
+          if (var->mode() == LET || var->mode() == CONST_HARMONY) {
+            // Throw a reference error when using an uninitialized let/const
+            // binding in harmony mode.
+            __ push(Immediate(var->name()));
+            __ CallRuntime(Runtime::kThrowReferenceError, 1);
+          } else {
+            // Uninitalized const bindings outside of harmony mode are unholed.
+            ASSERT(var->mode() == CONST);
+            __ mov(eax, isolate()->factory()->undefined_value());
+          }
+          __ bind(&done);
+          context()->Plug(eax);
+          break;
         }
-        __ bind(&done);
-        context()->Plug(eax);
       }
+      context()->Plug(var);
       break;
     }
 
@@ -1410,7 +1399,6 @@
     if (!result_saved) {
       __ push(eax);  // Save result on the stack
       result_saved = true;
-      increment_stack_height();
     }
     switch (property->kind()) {
       case ObjectLiteral::Property::MATERIALIZED_LITERAL:
@@ -1435,7 +1423,6 @@
         // Fall through.
       case ObjectLiteral::Property::PROTOTYPE:
         __ push(Operand(esp, 0));  // Duplicate receiver.
-        increment_stack_height();
         VisitForStackValue(key);
         VisitForStackValue(value);
         if (property->emit_store()) {
@@ -1444,20 +1431,16 @@
         } else {
           __ Drop(3);
         }
-        decrement_stack_height(3);
         break;
       case ObjectLiteral::Property::SETTER:
       case ObjectLiteral::Property::GETTER:
         __ push(Operand(esp, 0));  // Duplicate receiver.
-        increment_stack_height();
         VisitForStackValue(key);
         __ push(Immediate(property->kind() == ObjectLiteral::Property::SETTER ?
                           Smi::FromInt(1) :
                           Smi::FromInt(0)));
-        increment_stack_height();
         VisitForStackValue(value);
         __ CallRuntime(Runtime::kDefineAccessor, 4);
-        decrement_stack_height(4);
         break;
       default: UNREACHABLE();
     }
@@ -1532,67 +1515,16 @@
     if (!result_saved) {
       __ push(eax);
       result_saved = true;
-      increment_stack_height();
     }
     VisitForAccumulatorValue(subexpr);
 
     // Store the subexpression value in the array's elements.
     __ mov(ebx, Operand(esp, 0));  // Copy of array literal.
     __ mov(edi, FieldOperand(ebx, JSObject::kMapOffset));
-    __ mov(ebx, FieldOperand(ebx, JSObject::kElementsOffset));
-    int offset = FixedArray::kHeaderSize + (i * kPointerSize);
-
-    Label element_done;
-    Label double_elements;
-    Label smi_element;
-    Label slow_elements;
-    Label fast_elements;
-    __ CheckFastElements(edi, &double_elements);
-
-    // FAST_SMI_ONLY_ELEMENTS or FAST_ELEMENTS
-    __ JumpIfSmi(result_register(), &smi_element);
-    __ CheckFastSmiOnlyElements(edi, &fast_elements, Label::kNear);
-
-    // Store into the array literal requires a elements transition. Call into
-    // the runtime.
-    __ bind(&slow_elements);
-    __ push(Operand(esp, 0));  // Copy of array literal.
-    __ push(Immediate(Smi::FromInt(i)));
-    __ push(result_register());
-    __ push(Immediate(Smi::FromInt(NONE)));  // PropertyAttributes
-    __ push(Immediate(Smi::FromInt(strict_mode_flag())));  // Strict mode.
-    __ CallRuntime(Runtime::kSetProperty, 5);
-    __ jmp(&element_done);
-
-    // Array literal has ElementsKind of FAST_DOUBLE_ELEMENTS.
-    __ bind(&double_elements);
     __ mov(ecx, Immediate(Smi::FromInt(i)));
-    __ StoreNumberToDoubleElements(result_register(),
-                                   ebx,
-                                   ecx,
-                                   edx,
-                                   xmm0,
-                                   &slow_elements,
-                                   false);
-    __ jmp(&element_done);
-
-    // Array literal has ElementsKind of FAST_ELEMENTS and value is an object.
-    __ bind(&fast_elements);
-    __ mov(FieldOperand(ebx, offset), result_register());
-    // Update the write barrier for the array store.
-    __ RecordWriteField(ebx, offset, result_register(), ecx,
-                        kDontSaveFPRegs,
-                        EMIT_REMEMBERED_SET,
-                        OMIT_SMI_CHECK);
-    __ jmp(&element_done);
-
-    // Array literal has ElementsKind of FAST_SMI_ONLY_ELEMENTS or
-    // FAST_ELEMENTS, and value is Smi.
-    __ bind(&smi_element);
-    __ mov(FieldOperand(ebx, offset), result_register());
-    // Fall through
-
-    __ bind(&element_done);
+    __ mov(edx, Immediate(Smi::FromInt(expr->literal_index())));
+    StoreArrayLiteralElementStub stub;
+    __ CallStub(&stub);
 
     PrepareForBailoutForId(expr->GetIdForElement(i), NO_REGISTERS);
   }
@@ -1610,9 +1542,7 @@
   // Invalid left-hand sides are rewritten to have a 'throw ReferenceError'
   // on the left-hand side.
   if (!expr->target()->IsValidLeftHandSide()) {
-    ASSERT(expr->target()->AsThrow() != NULL);
-    VisitInCurrentContext(expr->target());  // Throw does not plug the context
-    context()->Plug(eax);
+    VisitForEffect(expr->target());
     return;
   }
 
@@ -1637,7 +1567,6 @@
         // We need the receiver both on the stack and in the accumulator.
         VisitForAccumulatorValue(property->obj());
         __ push(result_register());
-        increment_stack_height();
       } else {
         VisitForStackValue(property->obj());
       }
@@ -1648,7 +1577,6 @@
         VisitForAccumulatorValue(property->key());
         __ mov(edx, Operand(esp, 0));
         __ push(eax);
-        increment_stack_height();
       } else {
         VisitForStackValue(property->obj());
         VisitForStackValue(property->key());
@@ -1680,7 +1608,6 @@
 
     Token::Value op = expr->binary_op();
     __ push(eax);  // Left operand goes on the stack.
-    increment_stack_height();
     VisitForAccumulatorValue(expr->value());
 
     OverwriteMode mode = expr->value()->ResultOverwriteAllowed()
@@ -1730,14 +1657,14 @@
   ASSERT(!key->handle()->IsSmi());
   __ mov(ecx, Immediate(key->handle()));
   Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
-  __ call(ic, RelocInfo::CODE_TARGET, GetPropertyId(prop));
+  __ call(ic, RelocInfo::CODE_TARGET, prop->id());
 }
 
 
 void FullCodeGenerator::EmitKeyedPropertyLoad(Property* prop) {
   SetSourcePosition(prop->position());
   Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
-  __ call(ic, RelocInfo::CODE_TARGET, GetPropertyId(prop));
+  __ call(ic, RelocInfo::CODE_TARGET, prop->id());
 }
 
 
@@ -1750,7 +1677,6 @@
   // stack. Right operand is in eax.
   Label smi_case, done, stub_call;
   __ pop(edx);
-  decrement_stack_height();
   __ mov(ecx, eax);
   __ or_(eax, edx);
   JumpPatchSite patch_site(masm_);
@@ -1842,7 +1768,6 @@
                                      Token::Value op,
                                      OverwriteMode mode) {
   __ pop(edx);
-  decrement_stack_height();
   BinaryOpStub stub(op, mode);
   JumpPatchSite patch_site(masm_);    // unbound, signals no inlined smi code.
   __ call(stub.GetCode(), RelocInfo::CODE_TARGET, expr->id());
@@ -1855,9 +1780,7 @@
   // Invalid left-hand sides are rewritten to have a 'throw
   // ReferenceError' on the left-hand side.
   if (!expr->IsValidLeftHandSide()) {
-    ASSERT(expr->AsThrow() != NULL);
-    VisitInCurrentContext(expr);  // Throw does not plug the context
-    context()->Plug(eax);
+    VisitForEffect(expr);
     return;
   }
 
@@ -1881,11 +1804,9 @@
     }
     case NAMED_PROPERTY: {
       __ push(eax);  // Preserve value.
-      increment_stack_height();
       VisitForAccumulatorValue(prop->obj());
       __ mov(edx, eax);
       __ pop(eax);  // Restore value.
-      decrement_stack_height();
       __ mov(ecx, prop->key()->AsLiteral()->handle());
       Handle<Code> ic = is_strict_mode()
           ? isolate()->builtins()->StoreIC_Initialize_Strict()
@@ -1895,14 +1816,11 @@
     }
     case KEYED_PROPERTY: {
       __ push(eax);  // Preserve value.
-      increment_stack_height();
       VisitForStackValue(prop->obj());
       VisitForAccumulatorValue(prop->key());
       __ mov(ecx, eax);
       __ pop(edx);
-      decrement_stack_height();
       __ pop(eax);  // Restore value.
-      decrement_stack_height();
       Handle<Code> ic = is_strict_mode()
           ? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
           : isolate()->builtins()->KeyedStoreIC_Initialize();
@@ -2029,7 +1947,6 @@
     __ mov(edx, Operand(esp, 0));
   } else {
     __ pop(edx);
-    decrement_stack_height();
   }
   Handle<Code> ic = is_strict_mode()
       ? isolate()->builtins()->StoreIC_Initialize_Strict()
@@ -2043,7 +1960,6 @@
     __ CallRuntime(Runtime::kToFastProperties, 1);
     __ pop(eax);
     __ Drop(1);
-    decrement_stack_height();
   }
   PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
   context()->Plug(eax);
@@ -2065,12 +1981,10 @@
   }
 
   __ pop(ecx);
-  decrement_stack_height();
   if (expr->ends_initialization_block()) {
     __ mov(edx, Operand(esp, 0));  // Leave receiver on the stack for later.
   } else {
     __ pop(edx);
-    decrement_stack_height();
   }
   // Record source code position before IC call.
   SetSourcePosition(expr->position());
@@ -2086,7 +2000,6 @@
     __ push(edx);
     __ CallRuntime(Runtime::kToFastProperties, 1);
     __ pop(eax);
-    decrement_stack_height();
   }
 
   PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
@@ -2106,7 +2019,6 @@
     VisitForStackValue(expr->obj());
     VisitForAccumulatorValue(expr->key());
     __ pop(edx);
-    decrement_stack_height();
     EmitKeyedPropertyLoad(expr);
     context()->Plug(eax);
   }
@@ -2133,7 +2045,6 @@
   RecordJSReturnSite(expr);
   // Restore context register.
   __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
-  decrement_stack_height(arg_count + 1);
   context()->Plug(eax);
 }
 
@@ -2148,7 +2059,6 @@
   __ pop(ecx);
   __ push(eax);
   __ push(ecx);
-  increment_stack_height();
 
   // Load the arguments.
   ZoneList<Expression*>* args = expr->arguments();
@@ -2167,7 +2077,6 @@
   RecordJSReturnSite(expr);
   // Restore context register.
   __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
-  decrement_stack_height(arg_count + 1);
   context()->DropAndPlug(1, eax);  // Drop the key still on the stack.
 }
 
@@ -2211,14 +2120,11 @@
   RecordJSReturnSite(expr);
   // Restore context register.
   __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
-
-  decrement_stack_height(arg_count + 1);
   context()->DropAndPlug(1, eax);
 }
 
 
-void FullCodeGenerator::EmitResolvePossiblyDirectEval(ResolveEvalFlag flag,
-                                                      int arg_count) {
+void FullCodeGenerator::EmitResolvePossiblyDirectEval(int arg_count) {
   // Push copy of the first argument or undefined if it doesn't exist.
   if (arg_count > 0) {
     __ push(Operand(esp, arg_count * kPointerSize));
@@ -2235,9 +2141,7 @@
       FLAG_harmony_scoping ? kStrictMode : strict_mode_flag();
   __ push(Immediate(Smi::FromInt(strict_mode)));
 
-  __ CallRuntime(flag == SKIP_CONTEXT_LOOKUP
-                 ? Runtime::kResolvePossiblyDirectEvalNoLookup
-                 : Runtime::kResolvePossiblyDirectEval, 4);
+  __ CallRuntime(Runtime::kResolvePossiblyDirectEval, 4);
 }
 
 
@@ -2263,33 +2167,15 @@
       VisitForStackValue(callee);
       // Reserved receiver slot.
       __ push(Immediate(isolate()->factory()->undefined_value()));
-      increment_stack_height();
       // Push the arguments.
       for (int i = 0; i < arg_count; i++) {
         VisitForStackValue(args->at(i));
       }
 
-      // If we know that eval can only be shadowed by eval-introduced
-      // variables we attempt to load the global eval function directly in
-      // generated code. If we succeed, there is no need to perform a
-      // context lookup in the runtime system.
-      Label done;
-      Variable* var = proxy->var();
-      if (!var->IsUnallocated() && var->mode() == DYNAMIC_GLOBAL) {
-        Label slow;
-        EmitLoadGlobalCheckExtensions(var, NOT_INSIDE_TYPEOF, &slow);
-        // Push the function and resolve eval.
-        __ push(eax);
-        EmitResolvePossiblyDirectEval(SKIP_CONTEXT_LOOKUP, arg_count);
-        __ jmp(&done);
-        __ bind(&slow);
-      }
-
       // Push a copy of the function (found below the arguments) and
       // resolve eval.
       __ push(Operand(esp, (arg_count + 1) * kPointerSize));
-      EmitResolvePossiblyDirectEval(PERFORM_CONTEXT_LOOKUP, arg_count);
-      __ bind(&done);
+      EmitResolvePossiblyDirectEval(arg_count);
 
       // The runtime call returns a pair of values in eax (function) and
       // edx (receiver). Touch up the stack with the right values.
@@ -2303,13 +2189,11 @@
     RecordJSReturnSite(expr);
     // Restore context register.
     __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
-    decrement_stack_height(arg_count + 1);  // Function is left on the stack.
     context()->DropAndPlug(1, eax);
 
   } else if (proxy != NULL && proxy->var()->IsUnallocated()) {
     // Push global object as receiver for the call IC.
     __ push(GlobalObjectOperand());
-    increment_stack_height();
     EmitCallWithIC(expr, proxy->name(), RelocInfo::CODE_TARGET_CONTEXT);
 
   } else if (proxy != NULL && proxy->var()->IsLookupSlot()) {
@@ -2328,7 +2212,6 @@
     __ CallRuntime(Runtime::kLoadContextSlot, 2);
     __ push(eax);  // Function.
     __ push(edx);  // Receiver.
-    increment_stack_height(2);
 
     // If fast case code has been generated, emit code to push the function
     // and receiver and have the slow path jump around this code.
@@ -2336,8 +2219,7 @@
       Label call;
       __ jmp(&call, Label::kNear);
       __ bind(&done);
-      // Push function.  Stack height already incremented in slow case
-      // above.
+      // Push function.
       __ push(eax);
       // The receiver is implicitly the global receiver. Indicate this by
       // passing the hole to the call function stub.
@@ -2370,7 +2252,6 @@
     // Load global receiver object.
     __ mov(ebx, GlobalObjectOperand());
     __ push(FieldOperand(ebx, GlobalObject::kGlobalReceiverOffset));
-    increment_stack_height();
     // Emit function call.
     EmitCallWithStub(expr, NO_CALL_FUNCTION_FLAGS);
   }
@@ -2411,13 +2292,12 @@
   Handle<Code> construct_builtin =
       isolate()->builtins()->JSConstructCall();
   __ call(construct_builtin, RelocInfo::CONSTRUCT_CALL);
-
-  decrement_stack_height(arg_count + 1);
   context()->Plug(eax);
 }
 
 
-void FullCodeGenerator::EmitIsSmi(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitIsSmi(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 1);
 
   VisitForAccumulatorValue(args->at(0));
@@ -2429,7 +2309,7 @@
   context()->PrepareTest(&materialize_true, &materialize_false,
                          &if_true, &if_false, &fall_through);
 
-  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+  PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
   __ test(eax, Immediate(kSmiTagMask));
   Split(zero, if_true, if_false, fall_through);
 
@@ -2437,7 +2317,8 @@
 }
 
 
-void FullCodeGenerator::EmitIsNonNegativeSmi(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitIsNonNegativeSmi(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 1);
 
   VisitForAccumulatorValue(args->at(0));
@@ -2449,7 +2330,7 @@
   context()->PrepareTest(&materialize_true, &materialize_false,
                          &if_true, &if_false, &fall_through);
 
-  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+  PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
   __ test(eax, Immediate(kSmiTagMask | 0x80000000));
   Split(zero, if_true, if_false, fall_through);
 
@@ -2457,7 +2338,8 @@
 }
 
 
-void FullCodeGenerator::EmitIsObject(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitIsObject(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 1);
 
   VisitForAccumulatorValue(args->at(0));
@@ -2481,14 +2363,15 @@
   __ cmp(ecx, FIRST_NONCALLABLE_SPEC_OBJECT_TYPE);
   __ j(below, if_false);
   __ cmp(ecx, LAST_NONCALLABLE_SPEC_OBJECT_TYPE);
-  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+  PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
   Split(below_equal, if_true, if_false, fall_through);
 
   context()->Plug(if_true, if_false);
 }
 
 
-void FullCodeGenerator::EmitIsSpecObject(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitIsSpecObject(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 1);
 
   VisitForAccumulatorValue(args->at(0));
@@ -2502,14 +2385,15 @@
 
   __ JumpIfSmi(eax, if_false);
   __ CmpObjectType(eax, FIRST_SPEC_OBJECT_TYPE, ebx);
-  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+  PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
   Split(above_equal, if_true, if_false, fall_through);
 
   context()->Plug(if_true, if_false);
 }
 
 
-void FullCodeGenerator::EmitIsUndetectableObject(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitIsUndetectableObject(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 1);
 
   VisitForAccumulatorValue(args->at(0));
@@ -2525,7 +2409,7 @@
   __ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset));
   __ movzx_b(ebx, FieldOperand(ebx, Map::kBitFieldOffset));
   __ test(ebx, Immediate(1 << Map::kIsUndetectable));
-  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+  PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
   Split(not_zero, if_true, if_false, fall_through);
 
   context()->Plug(if_true, if_false);
@@ -2533,7 +2417,8 @@
 
 
 void FullCodeGenerator::EmitIsStringWrapperSafeForDefaultValueOf(
-    ZoneList<Expression*>* args) {
+    CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 1);
 
   VisitForAccumulatorValue(args->at(0));
@@ -2610,12 +2495,13 @@
          Immediate(1 << Map::kStringWrapperSafeForDefaultValueOf));
   __ jmp(if_true);
 
-  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+  PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
   context()->Plug(if_true, if_false);
 }
 
 
-void FullCodeGenerator::EmitIsFunction(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitIsFunction(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 1);
 
   VisitForAccumulatorValue(args->at(0));
@@ -2629,14 +2515,15 @@
 
   __ JumpIfSmi(eax, if_false);
   __ CmpObjectType(eax, JS_FUNCTION_TYPE, ebx);
-  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+  PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
   Split(equal, if_true, if_false, fall_through);
 
   context()->Plug(if_true, if_false);
 }
 
 
-void FullCodeGenerator::EmitIsArray(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitIsArray(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 1);
 
   VisitForAccumulatorValue(args->at(0));
@@ -2650,14 +2537,15 @@
 
   __ JumpIfSmi(eax, if_false);
   __ CmpObjectType(eax, JS_ARRAY_TYPE, ebx);
-  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+  PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
   Split(equal, if_true, if_false, fall_through);
 
   context()->Plug(if_true, if_false);
 }
 
 
-void FullCodeGenerator::EmitIsRegExp(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitIsRegExp(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 1);
 
   VisitForAccumulatorValue(args->at(0));
@@ -2671,7 +2559,7 @@
 
   __ JumpIfSmi(eax, if_false);
   __ CmpObjectType(eax, JS_REGEXP_TYPE, ebx);
-  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+  PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
   Split(equal, if_true, if_false, fall_through);
 
   context()->Plug(if_true, if_false);
@@ -2679,8 +2567,8 @@
 
 
 
-void FullCodeGenerator::EmitIsConstructCall(ZoneList<Expression*>* args) {
-  ASSERT(args->length() == 0);
+void FullCodeGenerator::EmitIsConstructCall(CallRuntime* expr) {
+  ASSERT(expr->arguments()->length() == 0);
 
   Label materialize_true, materialize_false;
   Label* if_true = NULL;
@@ -2703,14 +2591,15 @@
   __ bind(&check_frame_marker);
   __ cmp(Operand(eax, StandardFrameConstants::kMarkerOffset),
          Immediate(Smi::FromInt(StackFrame::CONSTRUCT)));
-  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+  PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
   Split(equal, if_true, if_false, fall_through);
 
   context()->Plug(if_true, if_false);
 }
 
 
-void FullCodeGenerator::EmitObjectEquals(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitObjectEquals(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 2);
 
   // Load the two objects into registers and perform the comparison.
@@ -2725,16 +2614,16 @@
                          &if_true, &if_false, &fall_through);
 
   __ pop(ebx);
-  decrement_stack_height();
   __ cmp(eax, ebx);
-  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+  PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
   Split(equal, if_true, if_false, fall_through);
 
   context()->Plug(if_true, if_false);
 }
 
 
-void FullCodeGenerator::EmitArguments(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitArguments(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 1);
 
   // ArgumentsAccessStub expects the key in edx and the formal
@@ -2748,8 +2637,8 @@
 }
 
 
-void FullCodeGenerator::EmitArgumentsLength(ZoneList<Expression*>* args) {
-  ASSERT(args->length() == 0);
+void FullCodeGenerator::EmitArgumentsLength(CallRuntime* expr) {
+  ASSERT(expr->arguments()->length() == 0);
 
   Label exit;
   // Get the number of formal parameters.
@@ -2771,7 +2660,8 @@
 }
 
 
-void FullCodeGenerator::EmitClassOf(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitClassOf(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 1);
   Label done, null, function, non_function_constructor;
 
@@ -2831,7 +2721,7 @@
 }
 
 
-void FullCodeGenerator::EmitLog(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitLog(CallRuntime* expr) {
   // Conditionally generate a log call.
   // Args:
   //   0 (literal string): The type of logging (corresponds to the flags).
@@ -2839,12 +2729,12 @@
   //   1 (string): Format string.  Access the string at argument index 2
   //     with '%2s' (see Logger::LogRuntime for all the formats).
   //   2 (array): Arguments to the format string.
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT_EQ(args->length(), 3);
   if (CodeGenerator::ShouldGenerateLog(args->at(0))) {
     VisitForStackValue(args->at(1));
     VisitForStackValue(args->at(2));
     __ CallRuntime(Runtime::kLog, 2);
-    decrement_stack_height(2);
   }
   // Finally, we're expected to leave a value on the top of the stack.
   __ mov(eax, isolate()->factory()->undefined_value());
@@ -2852,8 +2742,8 @@
 }
 
 
-void FullCodeGenerator::EmitRandomHeapNumber(ZoneList<Expression*>* args) {
-  ASSERT(args->length() == 0);
+void FullCodeGenerator::EmitRandomHeapNumber(CallRuntime* expr) {
+  ASSERT(expr->arguments()->length() == 0);
 
   Label slow_allocate_heapnumber;
   Label heapnumber_allocated;
@@ -2903,34 +2793,35 @@
 }
 
 
-void FullCodeGenerator::EmitSubString(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitSubString(CallRuntime* expr) {
   // Load the arguments on the stack and call the stub.
   SubStringStub stub;
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 3);
   VisitForStackValue(args->at(0));
   VisitForStackValue(args->at(1));
   VisitForStackValue(args->at(2));
   __ CallStub(&stub);
-  decrement_stack_height(3);
   context()->Plug(eax);
 }
 
 
-void FullCodeGenerator::EmitRegExpExec(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitRegExpExec(CallRuntime* expr) {
   // Load the arguments on the stack and call the stub.
   RegExpExecStub stub;
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 4);
   VisitForStackValue(args->at(0));
   VisitForStackValue(args->at(1));
   VisitForStackValue(args->at(2));
   VisitForStackValue(args->at(3));
   __ CallStub(&stub);
-  decrement_stack_height(4);
   context()->Plug(eax);
 }
 
 
-void FullCodeGenerator::EmitValueOf(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitValueOf(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 1);
 
   VisitForAccumulatorValue(args->at(0));  // Load the object.
@@ -2948,8 +2839,9 @@
 }
 
 
-void FullCodeGenerator::EmitMathPow(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitMathPow(CallRuntime* expr) {
   // Load the arguments on the stack and call the runtime function.
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 2);
   VisitForStackValue(args->at(0));
   VisitForStackValue(args->at(1));
@@ -2960,18 +2852,17 @@
   } else {
     __ CallRuntime(Runtime::kMath_pow, 2);
   }
-  decrement_stack_height(2);
   context()->Plug(eax);
 }
 
 
-void FullCodeGenerator::EmitSetValueOf(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitSetValueOf(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 2);
 
   VisitForStackValue(args->at(0));  // Load the object.
   VisitForAccumulatorValue(args->at(1));  // Load the value.
   __ pop(ebx);  // eax = value. ebx = object.
-  decrement_stack_height();
 
   Label done;
   // If the object is a smi, return the value.
@@ -2994,7 +2885,8 @@
 }
 
 
-void FullCodeGenerator::EmitNumberToString(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitNumberToString(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT_EQ(args->length(), 1);
 
   // Load the argument on the stack and call the stub.
@@ -3002,12 +2894,12 @@
 
   NumberToStringStub stub;
   __ CallStub(&stub);
-  decrement_stack_height();
   context()->Plug(eax);
 }
 
 
-void FullCodeGenerator::EmitStringCharFromCode(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitStringCharFromCode(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 1);
 
   VisitForAccumulatorValue(args->at(0));
@@ -3025,7 +2917,8 @@
 }
 
 
-void FullCodeGenerator::EmitStringCharCodeAt(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitStringCharCodeAt(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 2);
 
   VisitForStackValue(args->at(0));
@@ -3037,7 +2930,6 @@
   Register result = edx;
 
   __ pop(object);
-  decrement_stack_height();
 
   Label need_conversion;
   Label index_out_of_range;
@@ -3073,7 +2965,8 @@
 }
 
 
-void FullCodeGenerator::EmitStringCharAt(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitStringCharAt(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 2);
 
   VisitForStackValue(args->at(0));
@@ -3086,7 +2979,6 @@
   Register result = eax;
 
   __ pop(object);
-  decrement_stack_height();
 
   Label need_conversion;
   Label index_out_of_range;
@@ -3123,7 +3015,8 @@
 }
 
 
-void FullCodeGenerator::EmitStringAdd(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitStringAdd(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT_EQ(2, args->length());
 
   VisitForStackValue(args->at(0));
@@ -3131,12 +3024,12 @@
 
   StringAddStub stub(NO_STRING_ADD_FLAGS);
   __ CallStub(&stub);
-  decrement_stack_height(2);
   context()->Plug(eax);
 }
 
 
-void FullCodeGenerator::EmitStringCompare(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitStringCompare(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT_EQ(2, args->length());
 
   VisitForStackValue(args->at(0));
@@ -3144,58 +3037,58 @@
 
   StringCompareStub stub;
   __ CallStub(&stub);
-  decrement_stack_height(2);
   context()->Plug(eax);
 }
 
 
-void FullCodeGenerator::EmitMathSin(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitMathSin(CallRuntime* expr) {
   // Load the argument on the stack and call the stub.
   TranscendentalCacheStub stub(TranscendentalCache::SIN,
                                TranscendentalCacheStub::TAGGED);
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 1);
   VisitForStackValue(args->at(0));
   __ CallStub(&stub);
-  decrement_stack_height();
   context()->Plug(eax);
 }
 
 
-void FullCodeGenerator::EmitMathCos(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitMathCos(CallRuntime* expr) {
   // Load the argument on the stack and call the stub.
   TranscendentalCacheStub stub(TranscendentalCache::COS,
                                TranscendentalCacheStub::TAGGED);
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 1);
   VisitForStackValue(args->at(0));
   __ CallStub(&stub);
-  decrement_stack_height();
   context()->Plug(eax);
 }
 
 
-void FullCodeGenerator::EmitMathLog(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitMathLog(CallRuntime* expr) {
   // Load the argument on the stack and call the stub.
   TranscendentalCacheStub stub(TranscendentalCache::LOG,
                                TranscendentalCacheStub::TAGGED);
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 1);
   VisitForStackValue(args->at(0));
   __ CallStub(&stub);
-  decrement_stack_height();
   context()->Plug(eax);
 }
 
 
-void FullCodeGenerator::EmitMathSqrt(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitMathSqrt(CallRuntime* expr) {
   // Load the argument on the stack and call the runtime function.
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 1);
   VisitForStackValue(args->at(0));
   __ CallRuntime(Runtime::kMath_sqrt, 1);
-  decrement_stack_height();
   context()->Plug(eax);
 }
 
 
-void FullCodeGenerator::EmitCallFunction(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitCallFunction(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() >= 2);
 
   int arg_count = args->length() - 2;  // 2 ~ receiver and function.
@@ -3210,25 +3103,25 @@
   __ InvokeFunction(edi, count, CALL_FUNCTION,
                     NullCallWrapper(), CALL_AS_METHOD);
   __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
-  decrement_stack_height(arg_count + 1);
   context()->Plug(eax);
 }
 
 
-void FullCodeGenerator::EmitRegExpConstructResult(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitRegExpConstructResult(CallRuntime* expr) {
   // Load the arguments on the stack and call the stub.
   RegExpConstructResultStub stub;
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 3);
   VisitForStackValue(args->at(0));
   VisitForStackValue(args->at(1));
   VisitForStackValue(args->at(2));
   __ CallStub(&stub);
-  decrement_stack_height(3);
   context()->Plug(eax);
 }
 
 
-void FullCodeGenerator::EmitSwapElements(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitSwapElements(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 3);
   VisitForStackValue(args->at(0));
   VisitForStackValue(args->at(1));
@@ -3316,12 +3209,12 @@
   __ CallRuntime(Runtime::kSwapElements, 3);
 
   __ bind(&done);
-  decrement_stack_height(3);
   context()->Plug(eax);
 }
 
 
-void FullCodeGenerator::EmitGetFromCache(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitGetFromCache(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT_EQ(2, args->length());
 
   ASSERT_NE(NULL, args->at(0)->AsLiteral());
@@ -3369,7 +3262,8 @@
 }
 
 
-void FullCodeGenerator::EmitIsRegExpEquivalent(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitIsRegExpEquivalent(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT_EQ(2, args->length());
 
   Register right = eax;
@@ -3402,12 +3296,12 @@
   __ mov(eax, Immediate(isolate()->factory()->true_value()));
   __ bind(&done);
 
-  decrement_stack_height();
   context()->Plug(eax);
 }
 
 
-void FullCodeGenerator::EmitHasCachedArrayIndex(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitHasCachedArrayIndex(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 1);
 
   VisitForAccumulatorValue(args->at(0));
@@ -3425,14 +3319,15 @@
 
   __ test(FieldOperand(eax, String::kHashFieldOffset),
           Immediate(String::kContainsCachedArrayIndexMask));
-  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+  PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
   Split(zero, if_true, if_false, fall_through);
 
   context()->Plug(if_true, if_false);
 }
 
 
-void FullCodeGenerator::EmitGetCachedArrayIndex(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitGetCachedArrayIndex(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 1);
   VisitForAccumulatorValue(args->at(0));
 
@@ -3447,11 +3342,12 @@
 }
 
 
-void FullCodeGenerator::EmitFastAsciiArrayJoin(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
   Label bailout, done, one_char_separator, long_separator,
       non_trivial_array, not_size_one_array, loop,
       loop_1, loop_1_condition, loop_2, loop_2_entry, loop_3, loop_3_entry;
 
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 2);
   // We will leave the separator on the stack until the end of the function.
   VisitForStackValue(args->at(1));
@@ -3706,7 +3602,6 @@
   __ add(esp, Immediate(3 * kPointerSize));
 
   __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
-  decrement_stack_height();
   context()->Plug(eax);
 }
 
@@ -3726,7 +3621,6 @@
     // Prepare for calling JS runtime function.
     __ mov(eax, GlobalObjectOperand());
     __ push(FieldOperand(eax, GlobalObject::kBuiltinsOffset));
-    increment_stack_height();
   }
 
   // Push the arguments ("left-to-right").
@@ -3748,11 +3642,6 @@
     // Call the C runtime function.
     __ CallRuntime(expr->function(), arg_count);
   }
-  decrement_stack_height(arg_count);
-  if (expr->is_jsruntime()) {
-    decrement_stack_height();
-  }
-
   context()->Plug(eax);
 }
 
@@ -3769,7 +3658,6 @@
         VisitForStackValue(property->key());
         __ push(Immediate(Smi::FromInt(strict_mode_flag())));
         __ InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION);
-        decrement_stack_height(2);
         context()->Plug(eax);
       } else if (proxy != NULL) {
         Variable* var = proxy->var();
@@ -3817,18 +3705,41 @@
         // Unary NOT has no side effects so it's only necessary to visit the
         // subexpression.  Match the optimizing compiler by not branching.
         VisitForEffect(expr->expression());
+      } else if (context()->IsTest()) {
+        const TestContext* test = TestContext::cast(context());
+        // The labels are swapped for the recursive call.
+        VisitForControl(expr->expression(),
+                        test->false_label(),
+                        test->true_label(),
+                        test->fall_through());
+        context()->Plug(test->true_label(), test->false_label());
       } else {
-        Label materialize_true, materialize_false;
-        Label* if_true = NULL;
-        Label* if_false = NULL;
-        Label* fall_through = NULL;
-
-        // Notice that the labels are swapped.
-        context()->PrepareTest(&materialize_true, &materialize_false,
-                               &if_false, &if_true, &fall_through);
-        if (context()->IsTest()) ForwardBailoutToChild(expr);
-        VisitForControl(expr->expression(), if_true, if_false, fall_through);
-        context()->Plug(if_false, if_true);  // Labels swapped.
+        // We handle value contexts explicitly rather than simply visiting
+        // for control and plugging the control flow into the context,
+        // because we need to prepare a pair of extra administrative AST ids
+        // for the optimizing compiler.
+        ASSERT(context()->IsAccumulatorValue() || context()->IsStackValue());
+        Label materialize_true, materialize_false, done;
+        VisitForControl(expr->expression(),
+                        &materialize_false,
+                        &materialize_true,
+                        &materialize_true);
+        __ bind(&materialize_true);
+        PrepareForBailoutForId(expr->MaterializeTrueId(), NO_REGISTERS);
+        if (context()->IsAccumulatorValue()) {
+          __ mov(eax, isolate()->factory()->true_value());
+        } else {
+          __ push(isolate()->factory()->true_value());
+        }
+        __ jmp(&done, Label::kNear);
+        __ bind(&materialize_false);
+        PrepareForBailoutForId(expr->MaterializeFalseId(), NO_REGISTERS);
+        if (context()->IsAccumulatorValue()) {
+          __ mov(eax, isolate()->factory()->false_value());
+        } else {
+          __ push(isolate()->factory()->false_value());
+        }
+        __ bind(&done);
       }
       break;
     }
@@ -3839,7 +3750,6 @@
         VisitForTypeofValue(expr->expression());
       }
       __ CallRuntime(Runtime::kTypeof, 1);
-      decrement_stack_height();
       context()->Plug(eax);
       break;
     }
@@ -3893,10 +3803,7 @@
   // Invalid left-hand sides are rewritten to have a 'throw ReferenceError'
   // as the left-hand side.
   if (!expr->expression()->IsValidLeftHandSide()) {
-    ASSERT(expr->expression()->AsThrow() != NULL);
-    VisitInCurrentContext(expr->expression());
-    // Visiting Throw does not plug the context.
-    context()->Plug(eax);
+    VisitForEffect(expr->expression());
     return;
   }
 
@@ -3921,20 +3828,17 @@
     // Reserve space for result of postfix operation.
     if (expr->is_postfix() && !context()->IsEffect()) {
       __ push(Immediate(Smi::FromInt(0)));
-      increment_stack_height();
     }
     if (assign_type == NAMED_PROPERTY) {
       // Put the object both on the stack and in the accumulator.
       VisitForAccumulatorValue(prop->obj());
       __ push(eax);
-      increment_stack_height();
       EmitNamedPropertyLoad(prop);
     } else {
       VisitForStackValue(prop->obj());
       VisitForAccumulatorValue(prop->key());
       __ mov(edx, Operand(esp, 0));
       __ push(eax);
-      increment_stack_height();
       EmitKeyedPropertyLoad(prop);
     }
   }
@@ -3965,7 +3869,6 @@
       switch (assign_type) {
         case VARIABLE:
           __ push(eax);
-          increment_stack_height();
           break;
         case NAMED_PROPERTY:
           __ mov(Operand(esp, kPointerSize), eax);
@@ -4039,7 +3942,6 @@
     case NAMED_PROPERTY: {
       __ mov(ecx, prop->key()->AsLiteral()->handle());
       __ pop(edx);
-      decrement_stack_height();
       Handle<Code> ic = is_strict_mode()
           ? isolate()->builtins()->StoreIC_Initialize_Strict()
           : isolate()->builtins()->StoreIC_Initialize();
@@ -4057,8 +3959,6 @@
     case KEYED_PROPERTY: {
       __ pop(ecx);
       __ pop(edx);
-      decrement_stack_height();
-      decrement_stack_height();
       Handle<Code> ic = is_strict_mode()
           ? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
           : isolate()->builtins()->KeyedStoreIC_Initialize();
@@ -4110,12 +4010,13 @@
     context()->Plug(eax);
   } else {
     // This expression cannot throw a reference error at the top level.
-    VisitInCurrentContext(expr);
+    VisitInDuplicateContext(expr);
   }
 }
 
 
 void FullCodeGenerator::EmitLiteralCompareTypeof(Expression* expr,
+                                                 Expression* sub_expr,
                                                  Handle<String> check) {
   Label materialize_true, materialize_false;
   Label* if_true = NULL;
@@ -4125,9 +4026,9 @@
                          &if_true, &if_false, &fall_through);
 
   { AccumulatorValueContext context(this);
-    VisitForTypeofValue(expr);
+    VisitForTypeofValue(sub_expr);
   }
-  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+  PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
 
   if (check->Equals(isolate()->heap()->number_symbol())) {
     __ JumpIfSmi(eax, if_true);
@@ -4211,8 +4112,7 @@
     case Token::IN:
       VisitForStackValue(expr->right());
       __ InvokeBuiltin(Builtins::IN, CALL_FUNCTION);
-      decrement_stack_height(2);
-      PrepareForBailoutBeforeSplit(TOS_REG, false, NULL, NULL);
+      PrepareForBailoutBeforeSplit(expr, false, NULL, NULL);
       __ cmp(eax, isolate()->factory()->true_value());
       Split(equal, if_true, if_false, fall_through);
       break;
@@ -4221,8 +4121,7 @@
       VisitForStackValue(expr->right());
       InstanceofStub stub(InstanceofStub::kNoFlags);
       __ CallStub(&stub);
-      decrement_stack_height(2);
-      PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+      PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
       __ test(eax, eax);
       // The stub returns 0 for true.
       Split(zero, if_true, if_false, fall_through);
@@ -4255,7 +4154,6 @@
           UNREACHABLE();
       }
       __ pop(edx);
-      decrement_stack_height();
 
       bool inline_smi_code = ShouldInlineSmiCase(op);
       JumpPatchSite patch_site(masm_);
@@ -4275,7 +4173,7 @@
       __ call(ic, RelocInfo::CODE_TARGET, expr->id());
       patch_site.EmitPatchInfo();
 
-      PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+      PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
       __ test(eax, eax);
       Split(cc, if_true, if_false, fall_through);
     }
@@ -4298,7 +4196,7 @@
                          &if_true, &if_false, &fall_through);
 
   VisitForAccumulatorValue(sub_expr);
-  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+  PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
   Handle<Object> nil_value = nil == kNullValue ?
       isolate()->factory()->null_value() :
       isolate()->factory()->undefined_value();
diff --git a/src/ia32/lithium-ia32.cc b/src/ia32/lithium-ia32.cc
index 626f899..227d0b5 100644
--- a/src/ia32/lithium-ia32.cc
+++ b/src/ia32/lithium-ia32.cc
@@ -110,22 +110,17 @@
 }
 
 
-template<int R, int I, int T>
-void LTemplateInstruction<R, I, T>::PrintDataTo(StringStream* stream) {
+void LInstruction::PrintDataTo(StringStream* stream) {
   stream->Add("= ");
-  for (int i = 0; i < inputs_.length(); i++) {
+  for (int i = 0; i < InputCount(); i++) {
     if (i > 0) stream->Add(" ");
-    inputs_[i]->PrintTo(stream);
+    InputAt(i)->PrintTo(stream);
   }
 }
 
 
-template<int R, int I, int T>
-void LTemplateInstruction<R, I, T>::PrintOutputOperandTo(StringStream* stream) {
-  for (int i = 0; i < results_.length(); i++) {
-    if (i > 0) stream->Add(" ");
-    results_[i]->PrintTo(stream);
-  }
+void LInstruction::PrintOutputOperandTo(StringStream* stream) {
+  if (HasResult()) result()->PrintTo(stream);
 }
 
 
@@ -459,7 +454,7 @@
 
 
 void LChunk::AddInstruction(LInstruction* instr, HBasicBlock* block) {
-  LInstructionGap* gap = new LInstructionGap(block);
+  LInstructionGap* gap = new(graph_->zone()) LInstructionGap(block);
   int index = -1;
   if (instr->IsControl()) {
     instructions_.Add(gap);
@@ -534,7 +529,7 @@
 
 LChunk* LChunkBuilder::Build() {
   ASSERT(is_unused());
-  chunk_ = new LChunk(info(), graph());
+  chunk_ = new(zone()) LChunk(info(), graph());
   HPhase phase("Building chunk", chunk_);
   status_ = BUILDING;
   const ZoneList<HBasicBlock*>* blocks = graph()->blocks();
@@ -570,14 +565,14 @@
 
 
 LUnallocated* LChunkBuilder::ToUnallocated(Register reg) {
-  return new LUnallocated(LUnallocated::FIXED_REGISTER,
-                          Register::ToAllocationIndex(reg));
+  return new(zone()) LUnallocated(LUnallocated::FIXED_REGISTER,
+                                  Register::ToAllocationIndex(reg));
 }
 
 
 LUnallocated* LChunkBuilder::ToUnallocated(XMMRegister reg) {
-  return new LUnallocated(LUnallocated::FIXED_DOUBLE_REGISTER,
-                          XMMRegister::ToAllocationIndex(reg));
+  return new(zone()) LUnallocated(LUnallocated::FIXED_DOUBLE_REGISTER,
+                                  XMMRegister::ToAllocationIndex(reg));
 }
 
 
@@ -592,30 +587,30 @@
 
 
 LOperand* LChunkBuilder::UseRegister(HValue* value) {
-  return Use(value, new LUnallocated(LUnallocated::MUST_HAVE_REGISTER));
+  return Use(value, new(zone()) LUnallocated(LUnallocated::MUST_HAVE_REGISTER));
 }
 
 
 LOperand* LChunkBuilder::UseRegisterAtStart(HValue* value) {
   return Use(value,
-             new LUnallocated(LUnallocated::MUST_HAVE_REGISTER,
-                              LUnallocated::USED_AT_START));
+             new(zone()) LUnallocated(LUnallocated::MUST_HAVE_REGISTER,
+                                      LUnallocated::USED_AT_START));
 }
 
 
 LOperand* LChunkBuilder::UseTempRegister(HValue* value) {
-  return Use(value, new LUnallocated(LUnallocated::WRITABLE_REGISTER));
+  return Use(value, new(zone()) LUnallocated(LUnallocated::WRITABLE_REGISTER));
 }
 
 
 LOperand* LChunkBuilder::Use(HValue* value) {
-  return Use(value, new LUnallocated(LUnallocated::NONE));
+  return Use(value, new(zone()) LUnallocated(LUnallocated::NONE));
 }
 
 
 LOperand* LChunkBuilder::UseAtStart(HValue* value) {
-  return Use(value, new LUnallocated(LUnallocated::NONE,
-                                     LUnallocated::USED_AT_START));
+  return Use(value, new(zone()) LUnallocated(LUnallocated::NONE,
+                                             LUnallocated::USED_AT_START));
 }
 
 
@@ -650,7 +645,7 @@
 LOperand* LChunkBuilder::UseAny(HValue* value) {
   return value->IsConstant()
       ? chunk_->DefineConstantOperand(HConstant::cast(value))
-      :  Use(value, new LUnallocated(LUnallocated::ANY));
+      :  Use(value, new(zone()) LUnallocated(LUnallocated::ANY));
 }
 
 
@@ -675,14 +670,15 @@
 
 template<int I, int T>
 LInstruction* LChunkBuilder::Define(LTemplateInstruction<1, I, T>* instr) {
-  return Define(instr, new LUnallocated(LUnallocated::NONE));
+  return Define(instr, new(zone()) LUnallocated(LUnallocated::NONE));
 }
 
 
 template<int I, int T>
 LInstruction* LChunkBuilder::DefineAsRegister(
     LTemplateInstruction<1, I, T>* instr) {
-  return Define(instr, new LUnallocated(LUnallocated::MUST_HAVE_REGISTER));
+  return Define(instr,
+                new(zone()) LUnallocated(LUnallocated::MUST_HAVE_REGISTER));
 }
 
 
@@ -690,14 +686,16 @@
 LInstruction* LChunkBuilder::DefineAsSpilled(
     LTemplateInstruction<1, I, T>* instr,
     int index) {
-  return Define(instr, new LUnallocated(LUnallocated::FIXED_SLOT, index));
+  return Define(instr,
+                new(zone()) LUnallocated(LUnallocated::FIXED_SLOT, index));
 }
 
 
 template<int I, int T>
 LInstruction* LChunkBuilder::DefineSameAsFirst(
     LTemplateInstruction<1, I, T>* instr) {
-  return Define(instr, new LUnallocated(LUnallocated::SAME_AS_FIRST_INPUT));
+  return Define(instr,
+                new(zone()) LUnallocated(LUnallocated::SAME_AS_FIRST_INPUT));
 }
 
 
@@ -750,7 +748,7 @@
   instr->MarkAsCall();
   instr = AssignPointerMap(instr);
 
-  if (hinstr->HasSideEffects()) {
+  if (hinstr->HasObservableSideEffects()) {
     ASSERT(hinstr->next()->IsSimulate());
     HSimulate* sim = HSimulate::cast(hinstr->next());
     instr = SetInstructionPendingDeoptimizationEnvironment(
@@ -762,7 +760,8 @@
   // Thus we still need to attach environment to this call even if
   // call sequence can not deoptimize eagerly.
   bool needs_environment =
-      (can_deoptimize == CAN_DEOPTIMIZE_EAGERLY) || !hinstr->HasSideEffects();
+      (can_deoptimize == CAN_DEOPTIMIZE_EAGERLY) ||
+      !hinstr->HasObservableSideEffects();
   if (needs_environment && !instr->HasEnvironment()) {
     instr = AssignEnvironment(instr);
   }
@@ -779,13 +778,14 @@
 
 LInstruction* LChunkBuilder::AssignPointerMap(LInstruction* instr) {
   ASSERT(!instr->HasPointerMap());
-  instr->set_pointer_map(new LPointerMap(position_));
+  instr->set_pointer_map(new(zone()) LPointerMap(position_));
   return instr;
 }
 
 
 LUnallocated* LChunkBuilder::TempRegister() {
-  LUnallocated* operand = new LUnallocated(LUnallocated::MUST_HAVE_REGISTER);
+  LUnallocated* operand =
+      new(zone()) LUnallocated(LUnallocated::MUST_HAVE_REGISTER);
   allocator_->RecordTemporary(operand);
   return operand;
 }
@@ -806,40 +806,17 @@
 
 
 LInstruction* LChunkBuilder::DoBlockEntry(HBlockEntry* instr) {
-  return new LLabel(instr->block());
+  return new(zone()) LLabel(instr->block());
 }
 
 
 LInstruction* LChunkBuilder::DoSoftDeoptimize(HSoftDeoptimize* instr) {
-  return AssignEnvironment(new LDeoptimize);
+  return AssignEnvironment(new(zone()) LDeoptimize);
 }
 
 
 LInstruction* LChunkBuilder::DoDeoptimize(HDeoptimize* instr) {
-  return AssignEnvironment(new LDeoptimize);
-}
-
-
-LInstruction* LChunkBuilder::DoBit(Token::Value op,
-                                   HBitwiseBinaryOperation* instr) {
-  if (instr->representation().IsInteger32()) {
-    ASSERT(instr->left()->representation().IsInteger32());
-    ASSERT(instr->right()->representation().IsInteger32());
-
-    LOperand* left = UseRegisterAtStart(instr->LeastConstantOperand());
-    LOperand* right = UseOrConstantAtStart(instr->MostConstantOperand());
-    return DefineSameAsFirst(new LBitI(op, left, right));
-  } else {
-    ASSERT(instr->representation().IsTagged());
-    ASSERT(instr->left()->representation().IsTagged());
-    ASSERT(instr->right()->representation().IsTagged());
-
-    LOperand* context = UseFixed(instr->context(), esi);
-    LOperand* left = UseFixed(instr->left(), edx);
-    LOperand* right = UseFixed(instr->right(), eax);
-    LArithmeticT* result = new LArithmeticT(op, context, left, right);
-    return MarkAsCall(DefineFixed(result, eax), instr);
-  }
+  return AssignEnvironment(new(zone()) LDeoptimize);
 }
 
 
@@ -852,7 +829,7 @@
     LOperand* context = UseFixed(instr->context(), esi);
     LOperand* left = UseFixed(instr->left(), edx);
     LOperand* right = UseFixed(instr->right(), eax);
-    LArithmeticT* result = new LArithmeticT(op, context, left, right);
+    LArithmeticT* result = new(zone()) LArithmeticT(op, context, left, right);
     return MarkAsCall(DefineFixed(result, eax), instr);
   }
 
@@ -886,7 +863,7 @@
   }
 
   LInstruction* result =
-      DefineSameAsFirst(new LShiftI(op, left, right, does_deopt));
+      DefineSameAsFirst(new(zone()) LShiftI(op, left, right, does_deopt));
   return does_deopt ? AssignEnvironment(result) : result;
 }
 
@@ -899,7 +876,7 @@
   ASSERT(op != Token::MOD);
   LOperand* left = UseRegisterAtStart(instr->left());
   LOperand* right = UseRegisterAtStart(instr->right());
-  LArithmeticD* result = new LArithmeticD(op, left, right);
+  LArithmeticD* result = new(zone()) LArithmeticD(op, left, right);
   return DefineSameAsFirst(result);
 }
 
@@ -919,7 +896,7 @@
   LOperand* left_operand = UseFixed(left, edx);
   LOperand* right_operand = UseFixed(right, eax);
   LArithmeticT* result =
-      new LArithmeticT(op, context, left_operand, right_operand);
+      new(zone()) LArithmeticT(op, context, left_operand, right_operand);
   return MarkAsCall(DefineFixed(result, eax), instr);
 }
 
@@ -1017,12 +994,13 @@
   int ast_id = hydrogen_env->ast_id();
   ASSERT(ast_id != AstNode::kNoNumber);
   int value_count = hydrogen_env->length();
-  LEnvironment* result = new LEnvironment(hydrogen_env->closure(),
-                                          ast_id,
-                                          hydrogen_env->parameter_count(),
-                                          argument_count_,
-                                          value_count,
-                                          outer);
+  LEnvironment* result =
+      new(zone()) LEnvironment(hydrogen_env->closure(),
+                               ast_id,
+                               hydrogen_env->parameter_count(),
+                               argument_count_,
+                               value_count,
+                               outer);
   for (int i = 0; i < value_count; ++i) {
     if (hydrogen_env->is_special_index(i)) continue;
 
@@ -1031,7 +1009,7 @@
     if (value->IsArgumentsObject()) {
       op = NULL;
     } else if (value->IsPushArgument()) {
-      op = new LArgument((*argument_index_accumulator)++);
+      op = new(zone()) LArgument((*argument_index_accumulator)++);
     } else {
       op = UseAny(value);
     }
@@ -1043,7 +1021,7 @@
 
 
 LInstruction* LChunkBuilder::DoGoto(HGoto* instr) {
-  return new LGoto(instr->FirstSuccessor()->block_id());
+  return new(zone()) LGoto(instr->FirstSuccessor()->block_id());
 }
 
 
@@ -1055,7 +1033,7 @@
     HBasicBlock* successor = HConstant::cast(v)->ToBoolean()
         ? instr->FirstSuccessor()
         : instr->SecondSuccessor();
-    return new LGoto(successor->block_id());
+    return new(zone()) LGoto(successor->block_id());
   }
   ToBooleanStub::Types expected = instr->expected_input_types();
   // We need a temporary register when we have to access the map *or* we have
@@ -1063,24 +1041,24 @@
   // involving maps).
   bool needs_temp = expected.NeedsMap() || expected.IsEmpty();
   LOperand* temp = needs_temp ? TempRegister() : NULL;
-  return AssignEnvironment(new LBranch(UseRegister(v), temp));
+  return AssignEnvironment(new(zone()) LBranch(UseRegister(v), temp));
 }
 
 
 LInstruction* LChunkBuilder::DoCompareMap(HCompareMap* instr) {
   ASSERT(instr->value()->representation().IsTagged());
   LOperand* value = UseRegisterAtStart(instr->value());
-  return new LCmpMapAndBranch(value);
+  return new(zone()) LCmpMapAndBranch(value);
 }
 
 
 LInstruction* LChunkBuilder::DoArgumentsLength(HArgumentsLength* length) {
-  return DefineAsRegister(new LArgumentsLength(Use(length->value())));
+  return DefineAsRegister(new(zone()) LArgumentsLength(Use(length->value())));
 }
 
 
 LInstruction* LChunkBuilder::DoArgumentsElements(HArgumentsElements* elems) {
-  return DefineAsRegister(new LArgumentsElements);
+  return DefineAsRegister(new(zone()) LArgumentsElements);
 }
 
 
@@ -1088,7 +1066,7 @@
   LOperand* left = UseFixed(instr->left(), InstanceofStub::left());
   LOperand* right = UseFixed(instr->right(), InstanceofStub::right());
   LOperand* context = UseFixed(instr->context(), esi);
-  LInstanceOf* result = new LInstanceOf(context, left, right);
+  LInstanceOf* result = new(zone()) LInstanceOf(context, left, right);
   return MarkAsCall(DefineFixed(result, eax), instr);
 }
 
@@ -1096,7 +1074,7 @@
 LInstruction* LChunkBuilder::DoInstanceOfKnownGlobal(
     HInstanceOfKnownGlobal* instr) {
   LInstanceOfKnownGlobal* result =
-      new LInstanceOfKnownGlobal(
+      new(zone()) LInstanceOfKnownGlobal(
           UseFixed(instr->context(), esi),
           UseFixed(instr->left(), InstanceofStub::left()),
           FixedTemp(edi));
@@ -1110,11 +1088,11 @@
   LOperand* length = UseFixed(instr->length(), ebx);
   LOperand* elements = UseFixed(instr->elements(), ecx);
   LOperand* temp = FixedTemp(edx);
-  LApplyArguments* result = new LApplyArguments(function,
-                                                receiver,
-                                                length,
-                                                elements,
-                                                temp);
+  LApplyArguments* result = new(zone()) LApplyArguments(function,
+                                                        receiver,
+                                                        length,
+                                                        elements,
+                                                        temp);
   return MarkAsCall(DefineFixed(result, eax), instr, CAN_DEOPTIMIZE_EAGERLY);
 }
 
@@ -1122,42 +1100,44 @@
 LInstruction* LChunkBuilder::DoPushArgument(HPushArgument* instr) {
   ++argument_count_;
   LOperand* argument = UseAny(instr->argument());
-  return new LPushArgument(argument);
+  return new(zone()) LPushArgument(argument);
 }
 
 
 LInstruction* LChunkBuilder::DoThisFunction(HThisFunction* instr) {
-  return instr->HasNoUses() ? NULL : DefineAsRegister(new LThisFunction);
+  return instr->HasNoUses()
+      ? NULL
+      : DefineAsRegister(new(zone()) LThisFunction);
 }
 
 
 LInstruction* LChunkBuilder::DoContext(HContext* instr) {
-  return instr->HasNoUses() ? NULL : DefineAsRegister(new LContext);
+  return instr->HasNoUses() ? NULL : DefineAsRegister(new(zone()) LContext);
 }
 
 
 LInstruction* LChunkBuilder::DoOuterContext(HOuterContext* instr) {
   LOperand* context = UseRegisterAtStart(instr->value());
-  return DefineAsRegister(new LOuterContext(context));
+  return DefineAsRegister(new(zone()) LOuterContext(context));
 }
 
 
 LInstruction* LChunkBuilder::DoGlobalObject(HGlobalObject* instr) {
   LOperand* context = UseRegisterAtStart(instr->value());
-  return DefineAsRegister(new LGlobalObject(context));
+  return DefineAsRegister(new(zone()) LGlobalObject(context));
 }
 
 
 LInstruction* LChunkBuilder::DoGlobalReceiver(HGlobalReceiver* instr) {
   LOperand* global_object = UseRegisterAtStart(instr->value());
-  return DefineAsRegister(new LGlobalReceiver(global_object));
+  return DefineAsRegister(new(zone()) LGlobalReceiver(global_object));
 }
 
 
 LInstruction* LChunkBuilder::DoCallConstantFunction(
     HCallConstantFunction* instr) {
   argument_count_ -= instr->argument_count();
-  return MarkAsCall(DefineFixed(new LCallConstantFunction, eax), instr);
+  return MarkAsCall(DefineFixed(new(zone()) LCallConstantFunction, eax), instr);
 }
 
 
@@ -1165,7 +1145,7 @@
   LOperand* context = UseFixed(instr->context(), esi);
   LOperand* function = UseFixed(instr->function(), edi);
   argument_count_ -= instr->argument_count();
-  LInvokeFunction* result = new LInvokeFunction(context, function);
+  LInvokeFunction* result = new(zone()) LInvokeFunction(context, function);
   return MarkAsCall(DefineFixed(result, eax), instr, CANNOT_DEOPTIMIZE_EAGERLY);
 }
 
@@ -1177,17 +1157,20 @@
     ASSERT(instr->value()->representation().IsDouble());
     LOperand* context = UseAny(instr->context());  // Not actually used.
     LOperand* input = UseRegisterAtStart(instr->value());
-    LUnaryMathOperation* result = new LUnaryMathOperation(context, input);
+    LUnaryMathOperation* result = new(zone()) LUnaryMathOperation(context,
+                                                                  input);
     return DefineSameAsFirst(result);
   } else if (op == kMathSin || op == kMathCos) {
     LOperand* context = UseFixed(instr->context(), esi);
     LOperand* input = UseFixedDouble(instr->value(), xmm1);
-    LUnaryMathOperation* result = new LUnaryMathOperation(context, input);
+    LUnaryMathOperation* result = new(zone()) LUnaryMathOperation(context,
+                                                                  input);
     return MarkAsCall(DefineFixedDouble(result, xmm1), instr);
   } else {
     LOperand* input = UseRegisterAtStart(instr->value());
     LOperand* context = UseAny(instr->context());  // Deferred use by MathAbs.
-    LUnaryMathOperation* result = new LUnaryMathOperation(context, input);
+    LUnaryMathOperation* result = new(zone()) LUnaryMathOperation(context,
+                                                                  input);
     switch (op) {
       case kMathAbs:
         return AssignEnvironment(AssignPointerMap(DefineSameAsFirst(result)));
@@ -1212,7 +1195,7 @@
   LOperand* context = UseFixed(instr->context(), esi);
   LOperand* key = UseFixed(instr->key(), ecx);
   argument_count_ -= instr->argument_count();
-  LCallKeyed* result = new LCallKeyed(context, key);
+  LCallKeyed* result = new(zone()) LCallKeyed(context, key);
   return MarkAsCall(DefineFixed(result, eax), instr);
 }
 
@@ -1220,7 +1203,7 @@
 LInstruction* LChunkBuilder::DoCallNamed(HCallNamed* instr) {
   LOperand* context = UseFixed(instr->context(), esi);
   argument_count_ -= instr->argument_count();
-  LCallNamed* result = new LCallNamed(context);
+  LCallNamed* result = new(zone()) LCallNamed(context);
   return MarkAsCall(DefineFixed(result, eax), instr);
 }
 
@@ -1228,14 +1211,14 @@
 LInstruction* LChunkBuilder::DoCallGlobal(HCallGlobal* instr) {
   LOperand* context = UseFixed(instr->context(), esi);
   argument_count_ -= instr->argument_count();
-  LCallGlobal* result = new LCallGlobal(context);
+  LCallGlobal* result = new(zone()) LCallGlobal(context);
   return MarkAsCall(DefineFixed(result, eax), instr);
 }
 
 
 LInstruction* LChunkBuilder::DoCallKnownGlobal(HCallKnownGlobal* instr) {
   argument_count_ -= instr->argument_count();
-  return MarkAsCall(DefineFixed(new LCallKnownGlobal, eax), instr);
+  return MarkAsCall(DefineFixed(new(zone()) LCallKnownGlobal, eax), instr);
 }
 
 
@@ -1243,7 +1226,7 @@
   LOperand* context = UseFixed(instr->context(), esi);
   LOperand* constructor = UseFixed(instr->constructor(), edi);
   argument_count_ -= instr->argument_count();
-  LCallNew* result = new LCallNew(context, constructor);
+  LCallNew* result = new(zone()) LCallNew(context, constructor);
   return MarkAsCall(DefineFixed(result, eax), instr);
 }
 
@@ -1251,7 +1234,7 @@
 LInstruction* LChunkBuilder::DoCallFunction(HCallFunction* instr) {
   LOperand* context = UseFixed(instr->context(), esi);
   argument_count_ -= instr->argument_count();
-  LCallFunction* result = new LCallFunction(context);
+  LCallFunction* result = new(zone()) LCallFunction(context);
   return MarkAsCall(DefineFixed(result, eax), instr);
 }
 
@@ -1259,7 +1242,7 @@
 LInstruction* LChunkBuilder::DoCallRuntime(HCallRuntime* instr) {
   argument_count_ -= instr->argument_count();
   LOperand* context = UseFixed(instr->context(), esi);
-  return MarkAsCall(DefineFixed(new LCallRuntime(context), eax), instr);
+  return MarkAsCall(DefineFixed(new(zone()) LCallRuntime(context), eax), instr);
 }
 
 
@@ -1278,8 +1261,26 @@
 }
 
 
-LInstruction* LChunkBuilder::DoBitAnd(HBitAnd* instr) {
-  return DoBit(Token::BIT_AND, instr);
+LInstruction* LChunkBuilder::DoBitwise(HBitwise* instr) {
+  if (instr->representation().IsInteger32()) {
+    ASSERT(instr->left()->representation().IsInteger32());
+    ASSERT(instr->right()->representation().IsInteger32());
+
+    LOperand* left = UseRegisterAtStart(instr->LeastConstantOperand());
+    LOperand* right = UseOrConstantAtStart(instr->MostConstantOperand());
+    return DefineSameAsFirst(new(zone()) LBitI(left, right));
+  } else {
+    ASSERT(instr->representation().IsTagged());
+    ASSERT(instr->left()->representation().IsTagged());
+    ASSERT(instr->right()->representation().IsTagged());
+
+    LOperand* context = UseFixed(instr->context(), esi);
+    LOperand* left = UseFixed(instr->left(), edx);
+    LOperand* right = UseFixed(instr->right(), eax);
+    LArithmeticT* result =
+        new(zone()) LArithmeticT(instr->op(), context, left, right);
+    return MarkAsCall(DefineFixed(result, eax), instr);
+  }
 }
 
 
@@ -1287,21 +1288,11 @@
   ASSERT(instr->value()->representation().IsInteger32());
   ASSERT(instr->representation().IsInteger32());
   LOperand* input = UseRegisterAtStart(instr->value());
-  LBitNotI* result = new LBitNotI(input);
+  LBitNotI* result = new(zone()) LBitNotI(input);
   return DefineSameAsFirst(result);
 }
 
 
-LInstruction* LChunkBuilder::DoBitOr(HBitOr* instr) {
-  return DoBit(Token::BIT_OR, instr);
-}
-
-
-LInstruction* LChunkBuilder::DoBitXor(HBitXor* instr) {
-  return DoBit(Token::BIT_XOR, instr);
-}
-
-
 LInstruction* LChunkBuilder::DoDiv(HDiv* instr) {
   if (instr->representation().IsDouble()) {
     return DoArithmeticD(Token::DIV, instr);
@@ -1311,7 +1302,7 @@
     LOperand* temp = FixedTemp(edx);
     LOperand* dividend = UseFixed(instr->left(), eax);
     LOperand* divisor = UseRegister(instr->right());
-    LDivI* result = new LDivI(dividend, divisor, temp);
+    LDivI* result = new(zone()) LDivI(dividend, divisor, temp);
     return AssignEnvironment(DefineFixed(result, eax));
   } else {
     ASSERT(instr->representation().IsTagged());
@@ -1329,7 +1320,8 @@
     if (instr->HasPowerOf2Divisor()) {
       ASSERT(!instr->CheckFlag(HValue::kCanBeDivByZero));
       LOperand* value = UseRegisterAtStart(instr->left());
-      LModI* mod = new LModI(value, UseOrConstant(instr->right()), NULL);
+      LModI* mod =
+          new(zone()) LModI(value, UseOrConstant(instr->right()), NULL);
       result = DefineSameAsFirst(mod);
     } else {
       // The temporary operand is necessary to ensure that right is
@@ -1337,7 +1329,7 @@
       LOperand* temp = FixedTemp(edx);
       LOperand* value = UseFixed(instr->left(), eax);
       LOperand* divisor = UseRegister(instr->right());
-      LModI* mod = new LModI(value, divisor, temp);
+      LModI* mod = new(zone()) LModI(value, divisor, temp);
       result = DefineFixed(mod, edx);
     }
 
@@ -1354,7 +1346,7 @@
     // TODO(fschneider): Allow any register as input registers.
     LOperand* left = UseFixedDouble(instr->left(), xmm2);
     LOperand* right = UseFixedDouble(instr->right(), xmm1);
-    LArithmeticD* result = new LArithmeticD(Token::MOD, left, right);
+    LArithmeticD* result = new(zone()) LArithmeticD(Token::MOD, left, right);
     return MarkAsCall(DefineFixedDouble(result, xmm1), instr);
   }
 }
@@ -1370,7 +1362,7 @@
     if (instr->CheckFlag(HValue::kBailoutOnMinusZero)) {
       temp = TempRegister();
     }
-    LMulI* mul = new LMulI(left, right, temp);
+    LMulI* mul = new(zone()) LMulI(left, right, temp);
     return AssignEnvironment(DefineSameAsFirst(mul));
   } else if (instr->representation().IsDouble()) {
     return DoArithmeticD(Token::MUL, instr);
@@ -1387,7 +1379,7 @@
     ASSERT(instr->right()->representation().IsInteger32());
     LOperand* left = UseRegisterAtStart(instr->left());
     LOperand* right = UseOrConstantAtStart(instr->right());
-    LSubI* sub = new LSubI(left, right);
+    LSubI* sub = new(zone()) LSubI(left, right);
     LInstruction* result = DefineSameAsFirst(sub);
     if (instr->CheckFlag(HValue::kCanOverflow)) {
       result = AssignEnvironment(result);
@@ -1408,7 +1400,7 @@
     ASSERT(instr->right()->representation().IsInteger32());
     LOperand* left = UseRegisterAtStart(instr->LeastConstantOperand());
     LOperand* right = UseOrConstantAtStart(instr->MostConstantOperand());
-    LAddI* add = new LAddI(left, right);
+    LAddI* add = new(zone()) LAddI(left, right);
     LInstruction* result = DefineSameAsFirst(add);
     if (instr->CheckFlag(HValue::kCanOverflow)) {
       result = AssignEnvironment(result);
@@ -1433,7 +1425,7 @@
   LOperand* right = exponent_type.IsDouble() ?
       UseFixedDouble(instr->right(), xmm2) :
       UseFixed(instr->right(), eax);
-  LPower* result = new LPower(left, right);
+  LPower* result = new(zone()) LPower(left, right);
   return MarkAsCall(DefineFixedDouble(result, xmm3), instr,
                     CAN_DEOPTIMIZE_EAGERLY);
 }
@@ -1445,7 +1437,7 @@
   LOperand* context = UseFixed(instr->context(), esi);
   LOperand* left = UseFixed(instr->left(), edx);
   LOperand* right = UseFixed(instr->right(), eax);
-  LCmpT* result = new LCmpT(context, left, right);
+  LCmpT* result = new(zone()) LCmpT(context, left, right);
   return MarkAsCall(DefineFixed(result, eax), instr);
 }
 
@@ -1458,7 +1450,7 @@
     ASSERT(instr->right()->representation().IsInteger32());
     LOperand* left = UseRegisterOrConstantAtStart(instr->left());
     LOperand* right = UseOrConstantAtStart(instr->right());
-    return new LCmpIDAndBranch(left, right);
+    return new(zone()) LCmpIDAndBranch(left, right);
   } else {
     ASSERT(r.IsDouble());
     ASSERT(instr->left()->representation().IsDouble());
@@ -1472,7 +1464,7 @@
       left = UseRegisterAtStart(instr->left());
       right = UseRegisterAtStart(instr->right());
     }
-    return new LCmpIDAndBranch(left, right);
+    return new(zone()) LCmpIDAndBranch(left, right);
   }
 }
 
@@ -1481,49 +1473,51 @@
     HCompareObjectEqAndBranch* instr) {
   LOperand* left = UseRegisterAtStart(instr->left());
   LOperand* right = UseAtStart(instr->right());
-  return new LCmpObjectEqAndBranch(left, right);
+  return new(zone()) LCmpObjectEqAndBranch(left, right);
 }
 
 
 LInstruction* LChunkBuilder::DoCompareConstantEqAndBranch(
   HCompareConstantEqAndBranch* instr) {
-  return new LCmpConstantEqAndBranch(UseRegisterAtStart(instr->value()));
+  return new(zone()) LCmpConstantEqAndBranch(
+      UseRegisterAtStart(instr->value()));
 }
 
 
 LInstruction* LChunkBuilder::DoIsNilAndBranch(HIsNilAndBranch* instr) {
   // We only need a temp register for non-strict compare.
   LOperand* temp = instr->kind() == kStrictEquality ? NULL : TempRegister();
-  return new LIsNilAndBranch(UseRegisterAtStart(instr->value()), temp);
+  return new(zone()) LIsNilAndBranch(UseRegisterAtStart(instr->value()), temp);
 }
 
 
 LInstruction* LChunkBuilder::DoIsObjectAndBranch(HIsObjectAndBranch* instr) {
   ASSERT(instr->value()->representation().IsTagged());
   LOperand* temp = TempRegister();
-  return new LIsObjectAndBranch(UseRegister(instr->value()), temp);
+  return new(zone()) LIsObjectAndBranch(UseRegister(instr->value()), temp);
 }
 
 
 LInstruction* LChunkBuilder::DoIsSmiAndBranch(HIsSmiAndBranch* instr) {
   ASSERT(instr->value()->representation().IsTagged());
-  return new LIsSmiAndBranch(Use(instr->value()));
+  return new(zone()) LIsSmiAndBranch(Use(instr->value()));
 }
 
 
 LInstruction* LChunkBuilder::DoIsUndetectableAndBranch(
     HIsUndetectableAndBranch* instr) {
   ASSERT(instr  ->value()->representation().IsTagged());
-  return new LIsUndetectableAndBranch(UseRegisterAtStart(instr->value()),
-                                      TempRegister());
+  return new(zone()) LIsUndetectableAndBranch(
+      UseRegisterAtStart(instr->value()), TempRegister());
 }
 
 
 LInstruction* LChunkBuilder::DoHasInstanceTypeAndBranch(
     HHasInstanceTypeAndBranch* instr) {
   ASSERT(instr->value()->representation().IsTagged());
-  return new LHasInstanceTypeAndBranch(UseRegisterAtStart(instr->value()),
-                                       TempRegister());
+  return new(zone()) LHasInstanceTypeAndBranch(
+      UseRegisterAtStart(instr->value()),
+      TempRegister());
 }
 
 
@@ -1532,14 +1526,14 @@
   ASSERT(instr->value()->representation().IsTagged());
   LOperand* value = UseRegisterAtStart(instr->value());
 
-  return DefineAsRegister(new LGetCachedArrayIndex(value));
+  return DefineAsRegister(new(zone()) LGetCachedArrayIndex(value));
 }
 
 
 LInstruction* LChunkBuilder::DoHasCachedArrayIndexAndBranch(
     HHasCachedArrayIndexAndBranch* instr) {
   ASSERT(instr->value()->representation().IsTagged());
-  return new LHasCachedArrayIndexAndBranch(
+  return new(zone()) LHasCachedArrayIndexAndBranch(
       UseRegisterAtStart(instr->value()));
 }
 
@@ -1547,7 +1541,7 @@
 LInstruction* LChunkBuilder::DoClassOfTestAndBranch(
     HClassOfTestAndBranch* instr) {
   ASSERT(instr->value()->representation().IsTagged());
-  return new LClassOfTestAndBranch(UseTempRegister(instr->value()),
+  return new(zone()) LClassOfTestAndBranch(UseTempRegister(instr->value()),
                                    TempRegister(),
                                    TempRegister());
 }
@@ -1555,32 +1549,32 @@
 
 LInstruction* LChunkBuilder::DoJSArrayLength(HJSArrayLength* instr) {
   LOperand* array = UseRegisterAtStart(instr->value());
-  return DefineAsRegister(new LJSArrayLength(array));
+  return DefineAsRegister(new(zone()) LJSArrayLength(array));
 }
 
 
 LInstruction* LChunkBuilder::DoFixedArrayBaseLength(
     HFixedArrayBaseLength* instr) {
   LOperand* array = UseRegisterAtStart(instr->value());
-  return DefineAsRegister(new LFixedArrayBaseLength(array));
+  return DefineAsRegister(new(zone()) LFixedArrayBaseLength(array));
 }
 
 
 LInstruction* LChunkBuilder::DoElementsKind(HElementsKind* instr) {
   LOperand* object = UseRegisterAtStart(instr->value());
-  return DefineAsRegister(new LElementsKind(object));
+  return DefineAsRegister(new(zone()) LElementsKind(object));
 }
 
 
 LInstruction* LChunkBuilder::DoValueOf(HValueOf* instr) {
   LOperand* object = UseRegister(instr->value());
-  LValueOf* result = new LValueOf(object, TempRegister());
+  LValueOf* result = new(zone()) LValueOf(object, TempRegister());
   return AssignEnvironment(DefineSameAsFirst(result));
 }
 
 
 LInstruction* LChunkBuilder::DoBoundsCheck(HBoundsCheck* instr) {
-  return AssignEnvironment(new LBoundsCheck(
+  return AssignEnvironment(new(zone()) LBoundsCheck(
       UseRegisterOrConstantAtStart(instr->index()),
       UseAtStart(instr->length())));
 }
@@ -1596,7 +1590,7 @@
 LInstruction* LChunkBuilder::DoThrow(HThrow* instr) {
   LOperand* context = UseFixed(instr->context(), esi);
   LOperand* value = UseFixed(instr->value(), eax);
-  return MarkAsCall(new LThrow(context, value), instr);
+  return MarkAsCall(new(zone()) LThrow(context, value), instr);
 }
 
 
@@ -1619,7 +1613,7 @@
   if (from.IsTagged()) {
     if (to.IsDouble()) {
       LOperand* value = UseRegister(instr->value());
-      LNumberUntagD* res = new LNumberUntagD(value);
+      LNumberUntagD* res = new(zone()) LNumberUntagD(value);
       return AssignEnvironment(DefineAsRegister(res));
     } else {
       ASSERT(to.IsInteger32());
@@ -1631,10 +1625,10 @@
             (truncating && CpuFeatures::IsSupported(SSE3))
             ? NULL
             : FixedTemp(xmm1);
-        LTaggedToI* res = new LTaggedToI(value, xmm_temp);
+        LTaggedToI* res = new(zone()) LTaggedToI(value, xmm_temp);
         return AssignEnvironment(DefineSameAsFirst(res));
       } else {
-        return DefineSameAsFirst(new LSmiUntag(value, needs_check));
+        return DefineSameAsFirst(new(zone()) LSmiUntag(value, needs_check));
       }
     }
   } else if (from.IsDouble()) {
@@ -1644,7 +1638,7 @@
 
       // Make sure that temp and result_temp are different registers.
       LUnallocated* result_temp = TempRegister();
-      LNumberTagD* result = new LNumberTagD(value, temp);
+      LNumberTagD* result = new(zone()) LNumberTagD(value, temp);
       return AssignPointerMap(Define(result, result_temp));
     } else {
       ASSERT(to.IsInteger32());
@@ -1653,21 +1647,23 @@
       LOperand* value = needs_temp ?
           UseTempRegister(instr->value()) : UseRegister(instr->value());
       LOperand* temp = needs_temp ? TempRegister() : NULL;
-      return AssignEnvironment(DefineAsRegister(new LDoubleToI(value, temp)));
+      return AssignEnvironment(
+          DefineAsRegister(new(zone()) LDoubleToI(value, temp)));
     }
   } else if (from.IsInteger32()) {
     if (to.IsTagged()) {
       HValue* val = instr->value();
       LOperand* value = UseRegister(val);
       if (val->HasRange() && val->range()->IsInSmiRange()) {
-        return DefineSameAsFirst(new LSmiTag(value));
+        return DefineSameAsFirst(new(zone()) LSmiTag(value));
       } else {
-        LNumberTagI* result = new LNumberTagI(value);
+        LNumberTagI* result = new(zone()) LNumberTagI(value);
         return AssignEnvironment(AssignPointerMap(DefineSameAsFirst(result)));
       }
     } else {
       ASSERT(to.IsDouble());
-      return DefineAsRegister(new LInteger32ToDouble(Use(instr->value())));
+      return DefineAsRegister(
+          new(zone()) LInteger32ToDouble(Use(instr->value())));
     }
   }
   UNREACHABLE();
@@ -1677,28 +1673,28 @@
 
 LInstruction* LChunkBuilder::DoCheckNonSmi(HCheckNonSmi* instr) {
   LOperand* value = UseAtStart(instr->value());
-  return AssignEnvironment(new LCheckNonSmi(value));
+  return AssignEnvironment(new(zone()) LCheckNonSmi(value));
 }
 
 
 LInstruction* LChunkBuilder::DoCheckInstanceType(HCheckInstanceType* instr) {
   LOperand* value = UseRegisterAtStart(instr->value());
   LOperand* temp = TempRegister();
-  LCheckInstanceType* result = new LCheckInstanceType(value, temp);
+  LCheckInstanceType* result = new(zone()) LCheckInstanceType(value, temp);
   return AssignEnvironment(result);
 }
 
 
 LInstruction* LChunkBuilder::DoCheckPrototypeMaps(HCheckPrototypeMaps* instr) {
   LOperand* temp = TempRegister();
-  LCheckPrototypeMaps* result = new LCheckPrototypeMaps(temp);
+  LCheckPrototypeMaps* result = new(zone()) LCheckPrototypeMaps(temp);
   return AssignEnvironment(result);
 }
 
 
 LInstruction* LChunkBuilder::DoCheckSmi(HCheckSmi* instr) {
   LOperand* value = UseAtStart(instr->value());
-  return AssignEnvironment(new LCheckSmi(value));
+  return AssignEnvironment(new(zone()) LCheckSmi(value));
 }
 
 
@@ -1710,13 +1706,13 @@
   LOperand* value = Isolate::Current()->heap()->InNewSpace(*instr->target())
       ? UseRegisterAtStart(instr->value())
       : UseAtStart(instr->value());
-  return AssignEnvironment(new LCheckFunction(value));
+  return AssignEnvironment(new(zone()) LCheckFunction(value));
 }
 
 
 LInstruction* LChunkBuilder::DoCheckMap(HCheckMap* instr) {
   LOperand* value = UseRegisterAtStart(instr->value());
-  LCheckMap* result = new LCheckMap(value);
+  LCheckMap* result = new(zone()) LCheckMap(value);
   return AssignEnvironment(result);
 }
 
@@ -1726,17 +1722,17 @@
   Representation input_rep = value->representation();
   if (input_rep.IsDouble()) {
     LOperand* reg = UseRegister(value);
-    return DefineAsRegister(new LClampDToUint8(reg));
+    return DefineAsRegister(new(zone()) LClampDToUint8(reg));
   } else if (input_rep.IsInteger32()) {
     LOperand* reg = UseFixed(value, eax);
-    return DefineFixed(new LClampIToUint8(reg), eax);
+    return DefineFixed(new(zone()) LClampIToUint8(reg), eax);
   } else {
     ASSERT(input_rep.IsTagged());
     LOperand* reg = UseFixed(value, eax);
     // Register allocator doesn't (yet) support allocation of double
     // temps. Reserve xmm1 explicitly.
     LOperand* temp = FixedTemp(xmm1);
-    LClampTToUint8* result = new LClampTToUint8(reg, temp);
+    LClampTToUint8* result = new(zone()) LClampTToUint8(reg, temp);
     return AssignEnvironment(DefineFixed(result, eax));
   }
 }
@@ -1751,7 +1747,7 @@
     LOperand* reg = UseRegister(value);
     LOperand* temp_reg =
         CpuFeatures::IsSupported(SSE3) ? NULL : TempRegister();
-    result = DefineAsRegister(new LDoubleToI(reg, temp_reg));
+    result = DefineAsRegister(new(zone()) LDoubleToI(reg, temp_reg));
   } else if (input_rep.IsInteger32()) {
     // Canonicalization should already have removed the hydrogen instruction in
     // this case, since it is a noop.
@@ -1764,29 +1760,29 @@
     // temps. Reserve xmm1 explicitly.
     LOperand* xmm_temp =
         CpuFeatures::IsSupported(SSE3) ? NULL : FixedTemp(xmm1);
-    result = DefineSameAsFirst(new LTaggedToI(reg, xmm_temp));
+    result = DefineSameAsFirst(new(zone()) LTaggedToI(reg, xmm_temp));
   }
   return AssignEnvironment(result);
 }
 
 
 LInstruction* LChunkBuilder::DoReturn(HReturn* instr) {
-  return new LReturn(UseFixed(instr->value(), eax));
+  return new(zone()) LReturn(UseFixed(instr->value(), eax));
 }
 
 
 LInstruction* LChunkBuilder::DoConstant(HConstant* instr) {
   Representation r = instr->representation();
   if (r.IsInteger32()) {
-    return DefineAsRegister(new LConstantI);
+    return DefineAsRegister(new(zone()) LConstantI);
   } else if (r.IsDouble()) {
     double value = instr->DoubleValue();
     LOperand* temp = (BitCast<uint64_t, double>(value) != 0)
         ? TempRegister()
         : NULL;
-    return DefineAsRegister(new LConstantD(temp));
+    return DefineAsRegister(new(zone()) LConstantD(temp));
   } else if (r.IsTagged()) {
-    return DefineAsRegister(new LConstantT);
+    return DefineAsRegister(new(zone()) LConstantT);
   } else {
     UNREACHABLE();
     return NULL;
@@ -1795,7 +1791,7 @@
 
 
 LInstruction* LChunkBuilder::DoLoadGlobalCell(HLoadGlobalCell* instr) {
-  LLoadGlobalCell* result = new LLoadGlobalCell;
+  LLoadGlobalCell* result = new(zone()) LLoadGlobalCell;
   return instr->RequiresHoleCheck()
       ? AssignEnvironment(DefineAsRegister(result))
       : DefineAsRegister(result);
@@ -1805,14 +1801,15 @@
 LInstruction* LChunkBuilder::DoLoadGlobalGeneric(HLoadGlobalGeneric* instr) {
   LOperand* context = UseFixed(instr->context(), esi);
   LOperand* global_object = UseFixed(instr->global_object(), eax);
-  LLoadGlobalGeneric* result = new LLoadGlobalGeneric(context, global_object);
+  LLoadGlobalGeneric* result =
+      new(zone()) LLoadGlobalGeneric(context, global_object);
   return MarkAsCall(DefineFixed(result, eax), instr);
 }
 
 
 LInstruction* LChunkBuilder::DoStoreGlobalCell(HStoreGlobalCell* instr) {
   LStoreGlobalCell* result =
-      new LStoreGlobalCell(UseTempRegister(instr->value()),
+      new(zone()) LStoreGlobalCell(UseTempRegister(instr->value()),
                            TempRegister(),
                            TempRegister());
   return instr->RequiresHoleCheck() ? AssignEnvironment(result) : result;
@@ -1824,14 +1821,14 @@
   LOperand* global_object = UseFixed(instr->global_object(), edx);
   LOperand* value = UseFixed(instr->value(), eax);
   LStoreGlobalGeneric* result =
-      new LStoreGlobalGeneric(context, global_object, value);
+      new(zone()) LStoreGlobalGeneric(context, global_object, value);
   return MarkAsCall(result, instr);
 }
 
 
 LInstruction* LChunkBuilder::DoLoadContextSlot(HLoadContextSlot* instr) {
   LOperand* context = UseRegisterAtStart(instr->value());
-  return DefineAsRegister(new LLoadContextSlot(context));
+  return DefineAsRegister(new(zone()) LLoadContextSlot(context));
 }
 
 
@@ -1846,14 +1843,14 @@
     value = UseRegister(instr->value());
     temp = NULL;
   }
-  return new LStoreContextSlot(context, value, temp);
+  return new(zone()) LStoreContextSlot(context, value, temp);
 }
 
 
 LInstruction* LChunkBuilder::DoLoadNamedField(HLoadNamedField* instr) {
   ASSERT(instr->representation().IsTagged());
   LOperand* obj = UseRegisterAtStart(instr->object());
-  return DefineAsRegister(new LLoadNamedField(obj));
+  return DefineAsRegister(new(zone()) LLoadNamedField(obj));
 }
 
 
@@ -1864,12 +1861,12 @@
   if (instr->need_generic()) {
     LOperand* obj = UseFixed(instr->object(), eax);
     LLoadNamedFieldPolymorphic* result =
-        new LLoadNamedFieldPolymorphic(context, obj);
+        new(zone()) LLoadNamedFieldPolymorphic(context, obj);
     return MarkAsCall(DefineFixed(result, eax), instr);
   } else {
     LOperand* obj = UseRegisterAtStart(instr->object());
     LLoadNamedFieldPolymorphic* result =
-        new LLoadNamedFieldPolymorphic(context, obj);
+        new(zone()) LLoadNamedFieldPolymorphic(context, obj);
     return AssignEnvironment(DefineAsRegister(result));
   }
 }
@@ -1878,7 +1875,7 @@
 LInstruction* LChunkBuilder::DoLoadNamedGeneric(HLoadNamedGeneric* instr) {
   LOperand* context = UseFixed(instr->context(), esi);
   LOperand* object = UseFixed(instr->object(), eax);
-  LLoadNamedGeneric* result = new LLoadNamedGeneric(context, object);
+  LLoadNamedGeneric* result = new(zone()) LLoadNamedGeneric(context, object);
   return MarkAsCall(DefineFixed(result, eax), instr);
 }
 
@@ -1886,21 +1883,21 @@
 LInstruction* LChunkBuilder::DoLoadFunctionPrototype(
     HLoadFunctionPrototype* instr) {
   return AssignEnvironment(DefineAsRegister(
-      new LLoadFunctionPrototype(UseRegister(instr->function()),
-                                 TempRegister())));
+      new(zone()) LLoadFunctionPrototype(UseRegister(instr->function()),
+                                         TempRegister())));
 }
 
 
 LInstruction* LChunkBuilder::DoLoadElements(HLoadElements* instr) {
   LOperand* input = UseRegisterAtStart(instr->value());
-  return DefineAsRegister(new LLoadElements(input));
+  return DefineAsRegister(new(zone()) LLoadElements(input));
 }
 
 
 LInstruction* LChunkBuilder::DoLoadExternalArrayPointer(
     HLoadExternalArrayPointer* instr) {
   LOperand* input = UseRegisterAtStart(instr->value());
-  return DefineAsRegister(new LLoadExternalArrayPointer(input));
+  return DefineAsRegister(new(zone()) LLoadExternalArrayPointer(input));
 }
 
 
@@ -1910,7 +1907,7 @@
   ASSERT(instr->key()->representation().IsInteger32());
   LOperand* obj = UseRegisterAtStart(instr->object());
   LOperand* key = UseRegisterOrConstantAtStart(instr->key());
-  LLoadKeyedFastElement* result = new LLoadKeyedFastElement(obj, key);
+  LLoadKeyedFastElement* result = new(zone()) LLoadKeyedFastElement(obj, key);
   return AssignEnvironment(DefineAsRegister(result));
 }
 
@@ -1922,7 +1919,7 @@
   LOperand* elements = UseRegisterAtStart(instr->elements());
   LOperand* key = UseRegisterOrConstantAtStart(instr->key());
   LLoadKeyedFastDoubleElement* result =
-      new LLoadKeyedFastDoubleElement(elements, key);
+      new(zone()) LLoadKeyedFastDoubleElement(elements, key);
   return AssignEnvironment(DefineAsRegister(result));
 }
 
@@ -1942,7 +1939,7 @@
   LOperand* external_pointer = UseRegister(instr->external_pointer());
   LOperand* key = UseRegisterOrConstant(instr->key());
   LLoadKeyedSpecializedArrayElement* result =
-      new LLoadKeyedSpecializedArrayElement(external_pointer,
+      new(zone()) LLoadKeyedSpecializedArrayElement(external_pointer,
                                             key);
   LInstruction* load_instr = DefineAsRegister(result);
   // An unsigned int array load might overflow and cause a deopt, make sure it
@@ -1958,7 +1955,8 @@
   LOperand* object = UseFixed(instr->object(), edx);
   LOperand* key = UseFixed(instr->key(), eax);
 
-  LLoadKeyedGeneric* result = new LLoadKeyedGeneric(context, object, key);
+  LLoadKeyedGeneric* result =
+      new(zone()) LLoadKeyedGeneric(context, object, key);
   return MarkAsCall(DefineFixed(result, eax), instr);
 }
 
@@ -1978,7 +1976,7 @@
       ? UseTempRegister(instr->key())
       : UseRegisterOrConstantAtStart(instr->key());
 
-  return AssignEnvironment(new LStoreKeyedFastElement(obj, key, val));
+  return AssignEnvironment(new(zone()) LStoreKeyedFastElement(obj, key, val));
 }
 
 
@@ -1992,7 +1990,7 @@
   LOperand* val = UseTempRegister(instr->value());
   LOperand* key = UseRegisterOrConstantAtStart(instr->key());
 
-  return new LStoreKeyedFastDoubleElement(elements, key, val);
+  return new(zone()) LStoreKeyedFastDoubleElement(elements, key, val);
 }
 
 
@@ -2022,9 +2020,9 @@
     val = UseRegister(instr->value());
   }
 
-  return new LStoreKeyedSpecializedArrayElement(external_pointer,
-                                                key,
-                                                val);
+  return new(zone()) LStoreKeyedSpecializedArrayElement(external_pointer,
+                                                        key,
+                                                        val);
 }
 
 
@@ -2039,7 +2037,7 @@
   ASSERT(instr->value()->representation().IsTagged());
 
   LStoreKeyedGeneric* result =
-      new LStoreKeyedGeneric(context, object, key, value);
+      new(zone()) LStoreKeyedGeneric(context, object, key, value);
   return MarkAsCall(result, instr);
 }
 
@@ -2052,14 +2050,16 @@
     LOperand* new_map_reg = TempRegister();
     LOperand* temp_reg = TempRegister();
     LTransitionElementsKind* result =
-        new LTransitionElementsKind(object, new_map_reg, temp_reg);
+        new(zone()) LTransitionElementsKind(object, new_map_reg, temp_reg);
     return DefineSameAsFirst(result);
   } else {
     LOperand* object = UseFixed(instr->object(), eax);
     LOperand* fixed_object_reg = FixedTemp(edx);
     LOperand* new_map_reg = FixedTemp(ebx);
     LTransitionElementsKind* result =
-        new LTransitionElementsKind(object, new_map_reg, fixed_object_reg);
+        new(zone()) LTransitionElementsKind(object,
+                                            new_map_reg,
+                                            fixed_object_reg);
     return MarkAsCall(DefineFixed(result, eax), instr);
   }
 }
@@ -2087,7 +2087,7 @@
       ? TempRegister()
       : NULL;
 
-  return new LStoreNamedField(obj, val, temp);
+  return new(zone()) LStoreNamedField(obj, val, temp);
 }
 
 
@@ -2096,7 +2096,8 @@
   LOperand* object = UseFixed(instr->object(), edx);
   LOperand* value = UseFixed(instr->value(), eax);
 
-  LStoreNamedGeneric* result = new LStoreNamedGeneric(context, object, value);
+  LStoreNamedGeneric* result =
+      new(zone()) LStoreNamedGeneric(context, object, value);
   return MarkAsCall(result, instr);
 }
 
@@ -2105,7 +2106,7 @@
   LOperand* context = UseFixed(instr->context(), esi);
   LOperand* left = UseOrConstantAtStart(instr->left());
   LOperand* right = UseOrConstantAtStart(instr->right());
-  LStringAdd* string_add = new LStringAdd(context, left, right);
+  LStringAdd* string_add = new(zone()) LStringAdd(context, left, right);
   return MarkAsCall(DefineFixed(string_add, eax), instr);
 }
 
@@ -2114,7 +2115,8 @@
   LOperand* string = UseTempRegister(instr->string());
   LOperand* index = UseTempRegister(instr->index());
   LOperand* context = UseAny(instr->context());
-  LStringCharCodeAt* result = new LStringCharCodeAt(context, string, index);
+  LStringCharCodeAt* result =
+      new(zone()) LStringCharCodeAt(context, string, index);
   return AssignEnvironment(AssignPointerMap(DefineAsRegister(result)));
 }
 
@@ -2122,38 +2124,43 @@
 LInstruction* LChunkBuilder::DoStringCharFromCode(HStringCharFromCode* instr) {
   LOperand* char_code = UseRegister(instr->value());
   LOperand* context = UseAny(instr->context());
-  LStringCharFromCode* result = new LStringCharFromCode(context, char_code);
+  LStringCharFromCode* result =
+      new(zone()) LStringCharFromCode(context, char_code);
   return AssignPointerMap(DefineAsRegister(result));
 }
 
 
 LInstruction* LChunkBuilder::DoStringLength(HStringLength* instr) {
   LOperand* string = UseRegisterAtStart(instr->value());
-  return DefineAsRegister(new LStringLength(string));
+  return DefineAsRegister(new(zone()) LStringLength(string));
 }
 
 
 LInstruction* LChunkBuilder::DoArrayLiteral(HArrayLiteral* instr) {
   LOperand* context = UseFixed(instr->context(), esi);
-  return MarkAsCall(DefineFixed(new LArrayLiteral(context), eax), instr);
+  return MarkAsCall(
+      DefineFixed(new(zone()) LArrayLiteral(context), eax), instr);
 }
 
 
 LInstruction* LChunkBuilder::DoObjectLiteral(HObjectLiteral* instr) {
   LOperand* context = UseFixed(instr->context(), esi);
-  return MarkAsCall(DefineFixed(new LObjectLiteral(context), eax), instr);
+  return MarkAsCall(
+      DefineFixed(new(zone()) LObjectLiteral(context), eax), instr);
 }
 
 
 LInstruction* LChunkBuilder::DoRegExpLiteral(HRegExpLiteral* instr) {
   LOperand* context = UseFixed(instr->context(), esi);
-  return MarkAsCall(DefineFixed(new LRegExpLiteral(context), eax), instr);
+  return MarkAsCall(
+      DefineFixed(new(zone()) LRegExpLiteral(context), eax), instr);
 }
 
 
 LInstruction* LChunkBuilder::DoFunctionLiteral(HFunctionLiteral* instr) {
   LOperand* context = UseFixed(instr->context(), esi);
-  return MarkAsCall(DefineFixed(new LFunctionLiteral(context), eax), instr);
+  return MarkAsCall(
+      DefineFixed(new(zone()) LFunctionLiteral(context), eax), instr);
 }
 
 
@@ -2161,7 +2168,7 @@
   LOperand* context = UseFixed(instr->context(), esi);
   LOperand* object = UseAtStart(instr->object());
   LOperand* key = UseOrConstantAtStart(instr->key());
-  LDeleteProperty* result = new LDeleteProperty(context, object, key);
+  LDeleteProperty* result = new(zone()) LDeleteProperty(context, object, key);
   return MarkAsCall(DefineFixed(result, eax), instr);
 }
 
@@ -2169,13 +2176,13 @@
 LInstruction* LChunkBuilder::DoOsrEntry(HOsrEntry* instr) {
   allocator_->MarkAsOsrEntry();
   current_block_->last_environment()->set_ast_id(instr->ast_id());
-  return AssignEnvironment(new LOsrEntry);
+  return AssignEnvironment(new(zone()) LOsrEntry);
 }
 
 
 LInstruction* LChunkBuilder::DoParameter(HParameter* instr) {
   int spill_index = chunk()->GetParameterStackSlot(instr->index());
-  return DefineAsSpilled(new LParameter, spill_index);
+  return DefineAsSpilled(new(zone()) LParameter, spill_index);
 }
 
 
@@ -2185,14 +2192,14 @@
     Abort("Too many spill slots needed for OSR");
     spill_index = 0;
   }
-  return DefineAsSpilled(new LUnknownOSRValue, spill_index);
+  return DefineAsSpilled(new(zone()) LUnknownOSRValue, spill_index);
 }
 
 
 LInstruction* LChunkBuilder::DoCallStub(HCallStub* instr) {
   LOperand* context = UseFixed(instr->context(), esi);
   argument_count_ -= instr->argument_count();
-  LCallStub* result = new LCallStub(context);
+  LCallStub* result = new(zone()) LCallStub(context);
   return MarkAsCall(DefineFixed(result, eax), instr);
 }
 
@@ -2210,14 +2217,15 @@
   LOperand* arguments = UseRegister(instr->arguments());
   LOperand* length = UseTempRegister(instr->length());
   LOperand* index = Use(instr->index());
-  LAccessArgumentsAt* result = new LAccessArgumentsAt(arguments, length, index);
+  LAccessArgumentsAt* result =
+      new(zone()) LAccessArgumentsAt(arguments, length, index);
   return AssignEnvironment(DefineAsRegister(result));
 }
 
 
 LInstruction* LChunkBuilder::DoToFastProperties(HToFastProperties* instr) {
   LOperand* object = UseFixed(instr->value(), eax);
-  LToFastProperties* result = new LToFastProperties(object);
+  LToFastProperties* result = new(zone()) LToFastProperties(object);
   return MarkAsCall(DefineFixed(result, eax), instr);
 }
 
@@ -2225,19 +2233,19 @@
 LInstruction* LChunkBuilder::DoTypeof(HTypeof* instr) {
   LOperand* context = UseFixed(instr->context(), esi);
   LOperand* value = UseAtStart(instr->value());
-  LTypeof* result = new LTypeof(context, value);
+  LTypeof* result = new(zone()) LTypeof(context, value);
   return MarkAsCall(DefineFixed(result, eax), instr);
 }
 
 
 LInstruction* LChunkBuilder::DoTypeofIsAndBranch(HTypeofIsAndBranch* instr) {
-  return new LTypeofIsAndBranch(UseTempRegister(instr->value()));
+  return new(zone()) LTypeofIsAndBranch(UseTempRegister(instr->value()));
 }
 
 
 LInstruction* LChunkBuilder::DoIsConstructCallAndBranch(
     HIsConstructCallAndBranch* instr) {
-  return new LIsConstructCallAndBranch(TempRegister());
+  return new(zone()) LIsConstructCallAndBranch(TempRegister());
 }
 
 
@@ -2261,7 +2269,7 @@
   // lazy bailout instruction to capture the environment.
   if (pending_deoptimization_ast_id_ != AstNode::kNoNumber) {
     ASSERT(pending_deoptimization_ast_id_ == instr->ast_id());
-    LLazyBailout* lazy_bailout = new LLazyBailout;
+    LLazyBailout* lazy_bailout = new(zone()) LLazyBailout;
     LInstruction* result = AssignEnvironment(lazy_bailout);
     instruction_pending_deoptimization_environment_->
         set_deoptimization_environment(result->environment());
@@ -2276,11 +2284,12 @@
 LInstruction* LChunkBuilder::DoStackCheck(HStackCheck* instr) {
   if (instr->is_function_entry()) {
     LOperand* context = UseFixed(instr->context(), esi);
-    return MarkAsCall(new LStackCheck(context), instr);
+    return MarkAsCall(new(zone()) LStackCheck(context), instr);
   } else {
     ASSERT(instr->is_backwards_branch());
     LOperand* context = UseAny(instr->context());
-    return AssignEnvironment(AssignPointerMap(new LStackCheck(context)));
+    return AssignEnvironment(
+        AssignPointerMap(new(zone()) LStackCheck(context)));
   }
 }
 
@@ -2309,7 +2318,7 @@
   LOperand* context = UseFixed(instr->context(), esi);
   LOperand* key = UseOrConstantAtStart(instr->key());
   LOperand* object = UseOrConstantAtStart(instr->object());
-  LIn* result = new LIn(context, key, object);
+  LIn* result = new(zone()) LIn(context, key, object);
   return MarkAsCall(DefineFixed(result, eax), instr);
 }
 
diff --git a/src/ia32/lithium-ia32.h b/src/ia32/lithium-ia32.h
index 5f23afa..98487b4 100644
--- a/src/ia32/lithium-ia32.h
+++ b/src/ia32/lithium-ia32.h
@@ -192,8 +192,8 @@
   virtual void CompileToNative(LCodeGen* generator) = 0;
   virtual const char* Mnemonic() const = 0;
   virtual void PrintTo(StringStream* stream);
-  virtual void PrintDataTo(StringStream* stream) = 0;
-  virtual void PrintOutputOperandTo(StringStream* stream) = 0;
+  virtual void PrintDataTo(StringStream* stream);
+  virtual void PrintOutputOperandTo(StringStream* stream);
 
   enum Opcode {
     // Declare a unique enum value for each instruction.
@@ -289,9 +289,6 @@
   int TempCount() { return T; }
   LOperand* TempAt(int i) { return temps_[i]; }
 
-  virtual void PrintDataTo(StringStream* stream);
-  virtual void PrintOutputOperandTo(StringStream* stream);
-
  protected:
   EmbeddedContainer<LOperand*, R> results_;
   EmbeddedContainer<LOperand*, I> inputs_;
@@ -799,18 +796,15 @@
 
 class LBitI: public LTemplateInstruction<1, 2, 0> {
  public:
-  LBitI(Token::Value op, LOperand* left, LOperand* right)
-      : op_(op) {
+  LBitI(LOperand* left, LOperand* right) {
     inputs_[0] = left;
     inputs_[1] = right;
   }
 
-  Token::Value op() const { return op_; }
+  Token::Value op() const { return hydrogen()->op(); }
 
   DECLARE_CONCRETE_INSTRUCTION(BitI, "bit-i")
-
- private:
-  Token::Value op_;
+  DECLARE_HYDROGEN_ACCESSOR(Bitwise)
 };
 
 
@@ -2169,6 +2163,7 @@
       : chunk_(NULL),
         info_(info),
         graph_(graph),
+        isolate_(graph->isolate()),
         status_(UNUSED),
         current_instruction_(NULL),
         current_block_(NULL),
@@ -2198,6 +2193,7 @@
   LChunk* chunk() const { return chunk_; }
   CompilationInfo* info() const { return info_; }
   HGraph* graph() const { return graph_; }
+  Zone* zone() { return isolate_->zone(); }
 
   bool is_unused() const { return status_ == UNUSED; }
   bool is_building() const { return status_ == BUILDING; }
@@ -2300,7 +2296,6 @@
   void VisitInstruction(HInstruction* current);
 
   void DoBasicBlock(HBasicBlock* block, HBasicBlock* next_block);
-  LInstruction* DoBit(Token::Value op, HBitwiseBinaryOperation* instr);
   LInstruction* DoShift(Token::Value op, HBitwiseBinaryOperation* instr);
   LInstruction* DoArithmeticD(Token::Value op,
                               HArithmeticBinaryOperation* instr);
@@ -2310,6 +2305,7 @@
   LChunk* chunk_;
   CompilationInfo* info_;
   HGraph* const graph_;
+  Isolate* isolate_;
   Status status_;
   HInstruction* current_instruction_;
   HBasicBlock* current_block_;
diff --git a/src/ia32/macro-assembler-ia32.cc b/src/ia32/macro-assembler-ia32.cc
index dd1ace9..1676a70 100644
--- a/src/ia32/macro-assembler-ia32.cc
+++ b/src/ia32/macro-assembler-ia32.cc
@@ -1594,33 +1594,12 @@
 }
 
 
-MaybeObject* MacroAssembler::TryCallStub(CodeStub* stub) {
-  ASSERT(AllowThisStubCall(stub));  // Calls are not allowed in some stubs.
-  Object* result;
-  { MaybeObject* maybe_result = stub->TryGetCode();
-    if (!maybe_result->ToObject(&result)) return maybe_result;
-  }
-  call(Handle<Code>(Code::cast(result)), RelocInfo::CODE_TARGET);
-  return result;
-}
-
-
 void MacroAssembler::TailCallStub(CodeStub* stub) {
   ASSERT(allow_stub_calls_ || stub->CompilingCallsToThisStubIsGCSafe());
   jmp(stub->GetCode(), RelocInfo::CODE_TARGET);
 }
 
 
-MaybeObject* MacroAssembler::TryTailCallStub(CodeStub* stub) {
-  Object* result;
-  { MaybeObject* maybe_result = stub->TryGetCode();
-    if (!maybe_result->ToObject(&result)) return maybe_result;
-  }
-  jmp(Handle<Code>(Code::cast(result)), RelocInfo::CODE_TARGET);
-  return result;
-}
-
-
 void MacroAssembler::StubReturn(int argc) {
   ASSERT(argc >= 1 && generating_stub());
   ret((argc - 1) * kPointerSize);
@@ -1674,12 +1653,6 @@
 }
 
 
-MaybeObject* MacroAssembler::TryCallRuntime(Runtime::FunctionId id,
-                                            int num_arguments) {
-  return TryCallRuntime(Runtime::FunctionForId(id), num_arguments);
-}
-
-
 void MacroAssembler::CallRuntime(const Runtime::Function* f,
                                  int num_arguments) {
   // If the expected number of arguments of the runtime function is
@@ -1701,26 +1674,6 @@
 }
 
 
-MaybeObject* MacroAssembler::TryCallRuntime(const Runtime::Function* f,
-                                            int num_arguments) {
-  if (f->nargs >= 0 && f->nargs != num_arguments) {
-    IllegalOperation(num_arguments);
-    // Since we did not call the stub, there was no allocation failure.
-    // Return some non-failure object.
-    return isolate()->heap()->undefined_value();
-  }
-
-  // TODO(1236192): Most runtime routines don't need the number of
-  // arguments passed in because it is constant. At some point we
-  // should remove this need and make the runtime routine entry code
-  // smarter.
-  Set(eax, Immediate(num_arguments));
-  mov(ebx, Immediate(ExternalReference(f, isolate())));
-  CEntryStub ces(1);
-  return TryCallStub(&ces);
-}
-
-
 void MacroAssembler::CallExternalReference(ExternalReference ref,
                                            int num_arguments) {
   mov(eax, Immediate(num_arguments));
@@ -1743,17 +1696,6 @@
 }
 
 
-MaybeObject* MacroAssembler::TryTailCallExternalReference(
-    const ExternalReference& ext, int num_arguments, int result_size) {
-  // TODO(1236192): Most runtime routines don't need the number of
-  // arguments passed in because it is constant. At some point we
-  // should remove this need and make the runtime routine entry code
-  // smarter.
-  Set(eax, Immediate(num_arguments));
-  return TryJumpToExternalReference(ext);
-}
-
-
 void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid,
                                      int num_arguments,
                                      int result_size) {
@@ -1763,14 +1705,6 @@
 }
 
 
-MaybeObject* MacroAssembler::TryTailCallRuntime(Runtime::FunctionId fid,
-                                                int num_arguments,
-                                                int result_size) {
-  return TryTailCallExternalReference(
-      ExternalReference(fid, isolate()), num_arguments, result_size);
-}
-
-
 // If true, a Handle<T> returned by value from a function with cdecl calling
 // convention will be returned directly as a value of location_ field in a
 // register eax.
@@ -1819,8 +1753,8 @@
 }
 
 
-MaybeObject* MacroAssembler::TryCallApiFunctionAndReturn(ApiFunction* function,
-                                                         int stack_space) {
+void MacroAssembler::CallApiFunctionAndReturn(Address function_address,
+                                              int stack_space) {
   ExternalReference next_address =
       ExternalReference::handle_scope_next_address();
   ExternalReference limit_address =
@@ -1833,8 +1767,8 @@
   mov(edi, Operand::StaticVariable(limit_address));
   add(Operand::StaticVariable(level_address), Immediate(1));
 
-  // Call the api function!
-  call(function->address(), RelocInfo::RUNTIME_ENTRY);
+  // Call the api function.
+  call(function_address, RelocInfo::RUNTIME_ENTRY);
 
   if (!kReturnHandlesDirectly) {
     // PrepareCallApiFunction saved pointer to the output slot into
@@ -1872,11 +1806,8 @@
   LeaveApiExitFrame();
   ret(stack_space * kPointerSize);
   bind(&promote_scheduled_exception);
-  MaybeObject* result =
-      TryTailCallRuntime(Runtime::kPromoteScheduledException, 0, 1);
-  if (result->IsFailure()) {
-    return result;
-  }
+  TailCallRuntime(Runtime::kPromoteScheduledException, 0, 1);
+
   bind(&empty_handle);
   // It was zero; the result is undefined.
   mov(eax, isolate()->factory()->undefined_value());
@@ -1893,8 +1824,6 @@
   call(eax);
   mov(eax, edi);
   jmp(&leave_exit_frame);
-
-  return result;
 }
 
 
@@ -1906,15 +1835,6 @@
 }
 
 
-MaybeObject* MacroAssembler::TryJumpToExternalReference(
-    const ExternalReference& ext) {
-  // Set the entry point and jump to the C entry runtime stub.
-  mov(ebx, Immediate(ext));
-  CEntryStub ces(1);
-  return TryTailCallStub(&ces);
-}
-
-
 void MacroAssembler::SetCallKind(Register dst, CallKind call_kind) {
   // This macro takes the dst register to make the code more readable
   // at the call sites. However, the dst register has to be ecx to
@@ -2079,7 +1999,7 @@
 }
 
 
-void MacroAssembler::InvokeFunction(JSFunction* function,
+void MacroAssembler::InvokeFunction(Handle<JSFunction> function,
                                     const ParameterCount& actual,
                                     InvokeFlag flag,
                                     const CallWrapper& call_wrapper,
@@ -2088,7 +2008,7 @@
   ASSERT(flag == JUMP_FUNCTION || has_frame());
 
   // Get the function and setup the context.
-  mov(edi, Immediate(Handle<JSFunction>(function)));
+  mov(edi, Immediate(function));
   mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
 
   ParameterCount expected(function->shared()->formal_parameter_count());
diff --git a/src/ia32/macro-assembler-ia32.h b/src/ia32/macro-assembler-ia32.h
index 8528c55..68a14bb 100644
--- a/src/ia32/macro-assembler-ia32.h
+++ b/src/ia32/macro-assembler-ia32.h
@@ -278,7 +278,7 @@
                       const CallWrapper& call_wrapper,
                       CallKind call_kind);
 
-  void InvokeFunction(JSFunction* function,
+  void InvokeFunction(Handle<JSFunction> function,
                       const ParameterCount& actual,
                       InvokeFlag flag,
                       const CallWrapper& call_wrapper,
@@ -632,19 +632,9 @@
   // Call a code stub.  Generate the code if necessary.
   void CallStub(CodeStub* stub, unsigned ast_id = kNoASTId);
 
-  // Call a code stub and return the code object called.  Try to generate
-  // the code if necessary.  Do not perform a GC but instead return a retry
-  // after GC failure.
-  MUST_USE_RESULT MaybeObject* TryCallStub(CodeStub* stub);
-
   // Tail call a code stub (jump).  Generate the code if necessary.
   void TailCallStub(CodeStub* stub);
 
-  // Tail call a code stub (jump) and return the code object called.  Try to
-  // generate the code if necessary.  Do not perform a GC but instead return
-  // a retry after GC failure.
-  MUST_USE_RESULT MaybeObject* TryTailCallStub(CodeStub* stub);
-
   // Return from a code stub after popping its arguments.
   void StubReturn(int argc);
 
@@ -652,19 +642,9 @@
   void CallRuntime(const Runtime::Function* f, int num_arguments);
   void CallRuntimeSaveDoubles(Runtime::FunctionId id);
 
-  // Call a runtime function, returning the CodeStub object called.
-  // Try to generate the stub code if necessary.  Do not perform a GC
-  // but instead return a retry after GC failure.
-  MUST_USE_RESULT MaybeObject* TryCallRuntime(const Runtime::Function* f,
-                                              int num_arguments);
-
   // Convenience function: Same as above, but takes the fid instead.
   void CallRuntime(Runtime::FunctionId id, int num_arguments);
 
-  // Convenience function: Same as above, but takes the fid instead.
-  MUST_USE_RESULT MaybeObject* TryCallRuntime(Runtime::FunctionId id,
-                                              int num_arguments);
-
   // Convenience function: call an external reference.
   void CallExternalReference(ExternalReference ref, int num_arguments);
 
@@ -675,23 +655,11 @@
                                  int num_arguments,
                                  int result_size);
 
-  // Tail call of a runtime routine (jump). Try to generate the code if
-  // necessary. Do not perform a GC but instead return a retry after GC failure.
-  MUST_USE_RESULT MaybeObject* TryTailCallExternalReference(
-      const ExternalReference& ext, int num_arguments, int result_size);
-
   // Convenience function: tail call a runtime routine (jump).
   void TailCallRuntime(Runtime::FunctionId fid,
                        int num_arguments,
                        int result_size);
 
-  // Convenience function: tail call a runtime routine (jump). Try to generate
-  // the code if necessary. Do not perform a GC but instead return a retry after
-  // GC failure.
-  MUST_USE_RESULT MaybeObject* TryTailCallRuntime(Runtime::FunctionId fid,
-                                                  int num_arguments,
-                                                  int result_size);
-
   // Before calling a C-function from generated code, align arguments on stack.
   // After aligning the frame, arguments must be stored in esp[0], esp[4],
   // etc., not pushed. The argument count assumes all arguments are word sized.
@@ -716,19 +684,15 @@
   // stores the pointer to the reserved slot into esi.
   void PrepareCallApiFunction(int argc);
 
-  // Calls an API function. Allocates HandleScope, extracts
-  // returned value from handle and propagates exceptions.
-  // Clobbers ebx, edi and caller-save registers. Restores context.
-  // On return removes stack_space * kPointerSize (GCed).
-  MaybeObject* TryCallApiFunctionAndReturn(ApiFunction* function,
-                                           int stack_space);
+  // Calls an API function.  Allocates HandleScope, extracts returned value
+  // from handle and propagates exceptions.  Clobbers ebx, edi and
+  // caller-save registers.  Restores context.  On return removes
+  // stack_space * kPointerSize (GCed).
+  void CallApiFunctionAndReturn(Address function_address, int stack_space);
 
   // Jump to a runtime routine.
   void JumpToExternalReference(const ExternalReference& ext);
 
-  MaybeObject* TryJumpToExternalReference(const ExternalReference& ext);
-
-
   // ---------------------------------------------------------------------------
   // Utilities
 
diff --git a/src/ia32/stub-cache-ia32.cc b/src/ia32/stub-cache-ia32.cc
index af53acd..3089a69 100644
--- a/src/ia32/stub-cache-ia32.cc
+++ b/src/ia32/stub-cache-ia32.cc
@@ -153,59 +153,6 @@
 }
 
 
-// TODO(kmillikin): Eliminate this function when the stub cache is fully
-// handlified.
-static MaybeObject* TryGenerateDictionaryNegativeLookup(MacroAssembler* masm,
-                                                        Label* miss_label,
-                                                        Register receiver,
-                                                        String* name,
-                                                        Register r0,
-                                                        Register r1) {
-  ASSERT(name->IsSymbol());
-  Counters* counters = masm->isolate()->counters();
-  __ IncrementCounter(counters->negative_lookups(), 1);
-  __ IncrementCounter(counters->negative_lookups_miss(), 1);
-
-  __ mov(r0, FieldOperand(receiver, HeapObject::kMapOffset));
-
-  const int kInterceptorOrAccessCheckNeededMask =
-      (1 << Map::kHasNamedInterceptor) | (1 << Map::kIsAccessCheckNeeded);
-
-  // Bail out if the receiver has a named interceptor or requires access checks.
-  __ test_b(FieldOperand(r0, Map::kBitFieldOffset),
-            kInterceptorOrAccessCheckNeededMask);
-  __ j(not_zero, miss_label);
-
-  // Check that receiver is a JSObject.
-  __ CmpInstanceType(r0, FIRST_SPEC_OBJECT_TYPE);
-  __ j(below, miss_label);
-
-  // Load properties array.
-  Register properties = r0;
-  __ mov(properties, FieldOperand(receiver, JSObject::kPropertiesOffset));
-
-  // Check that the properties array is a dictionary.
-  __ cmp(FieldOperand(properties, HeapObject::kMapOffset),
-         Immediate(masm->isolate()->factory()->hash_table_map()));
-  __ j(not_equal, miss_label);
-
-  Label done;
-  MaybeObject* result =
-      StringDictionaryLookupStub::TryGenerateNegativeLookup(masm,
-                                                            miss_label,
-                                                            &done,
-                                                            properties,
-                                                            name,
-                                                            r1);
-  if (result->IsFailure()) return result;
-
-  __ bind(&done);
-  __ DecrementCounter(counters->negative_lookups_miss(), 1);
-
-  return result;
-}
-
-
 void StubCache::GenerateProbe(MacroAssembler* masm,
                               Code::Flags flags,
                               Register receiver,
@@ -274,14 +221,17 @@
 
 
 void StubCompiler::GenerateDirectLoadGlobalFunctionPrototype(
-    MacroAssembler* masm, int index, Register prototype, Label* miss) {
+    MacroAssembler* masm,
+    int index,
+    Register prototype,
+    Label* miss) {
   // Check we're still in the same context.
   __ cmp(Operand(esi, Context::SlotOffset(Context::GLOBAL_INDEX)),
          masm->isolate()->global());
   __ j(not_equal, miss);
   // Get the global function with the given index.
-  JSFunction* function =
-      JSFunction::cast(masm->isolate()->global_context()->get(index));
+  Handle<JSFunction> function(
+      JSFunction::cast(masm->isolate()->global_context()->get(index)));
   // Load its initial map. The global functions all have initial maps.
   __ Set(prototype, Immediate(Handle<Map>(function->initial_map())));
   // Load the prototype from the initial map.
@@ -396,12 +346,12 @@
                                      Register receiver,
                                      Register holder,
                                      Register name,
-                                     JSObject* holder_obj) {
+                                     Handle<JSObject> holder_obj) {
   __ push(name);
-  InterceptorInfo* interceptor = holder_obj->GetNamedInterceptor();
-  ASSERT(!masm->isolate()->heap()->InNewSpace(interceptor));
+  Handle<InterceptorInfo> interceptor(holder_obj->GetNamedInterceptor());
+  ASSERT(!masm->isolate()->heap()->InNewSpace(*interceptor));
   Register scratch = name;
-  __ mov(scratch, Immediate(Handle<Object>(interceptor)));
+  __ mov(scratch, Immediate(interceptor));
   __ push(scratch);
   __ push(receiver);
   __ push(holder);
@@ -409,11 +359,12 @@
 }
 
 
-static void CompileCallLoadPropertyWithInterceptor(MacroAssembler* masm,
-                                                   Register receiver,
-                                                   Register holder,
-                                                   Register name,
-                                                   JSObject* holder_obj) {
+static void CompileCallLoadPropertyWithInterceptor(
+    MacroAssembler* masm,
+    Register receiver,
+    Register holder,
+    Register name,
+    Handle<JSObject> holder_obj) {
   PushInterceptorArguments(masm, receiver, holder, name, holder_obj);
   __ CallExternalReference(
       ExternalReference(IC_Utility(IC::kLoadPropertyWithInterceptorOnly),
@@ -460,9 +411,9 @@
 
 
 // Generates call to API function.
-static MaybeObject* GenerateFastApiCall(MacroAssembler* masm,
-                                        const CallOptimization& optimization,
-                                        int argc) {
+static void GenerateFastApiCall(MacroAssembler* masm,
+                                const CallOptimization& optimization,
+                                int argc) {
   // ----------- S t a t e -------------
   //  -- esp[0]              : return address
   //  -- esp[4]              : object passing the type check
@@ -477,30 +428,25 @@
   //  -- esp[(argc + 4) * 4] : receiver
   // -----------------------------------
   // Get the function and setup the context.
-  JSFunction* function = optimization.constant_function();
-  __ mov(edi, Immediate(Handle<JSFunction>(function)));
+  Handle<JSFunction> function = optimization.constant_function();
+  __ mov(edi, Immediate(function));
   __ mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
 
   // Pass the additional arguments.
   __ mov(Operand(esp, 2 * kPointerSize), edi);
-  Object* call_data = optimization.api_call_info()->data();
-  Handle<CallHandlerInfo> api_call_info_handle(optimization.api_call_info());
-  if (masm->isolate()->heap()->InNewSpace(call_data)) {
-    __ mov(ecx, api_call_info_handle);
+  Handle<CallHandlerInfo> api_call_info = optimization.api_call_info();
+  Handle<Object> call_data(api_call_info->data());
+  if (masm->isolate()->heap()->InNewSpace(*call_data)) {
+    __ mov(ecx, api_call_info);
     __ mov(ebx, FieldOperand(ecx, CallHandlerInfo::kDataOffset));
     __ mov(Operand(esp, 3 * kPointerSize), ebx);
   } else {
-    __ mov(Operand(esp, 3 * kPointerSize),
-           Immediate(Handle<Object>(call_data)));
+    __ mov(Operand(esp, 3 * kPointerSize), Immediate(call_data));
   }
 
   // Prepare arguments.
   __ lea(eax, Operand(esp, 3 * kPointerSize));
 
-  Object* callback = optimization.api_call_info()->callback();
-  Address api_function_address = v8::ToCData<Address>(callback);
-  ApiFunction fun(api_function_address);
-
   const int kApiArgc = 1;  // API function gets reference to the v8::Arguments.
 
   // Allocate the v8::Arguments structure in the arguments' space since
@@ -520,12 +466,10 @@
   __ lea(eax, ApiParameterOperand(1));
   __ mov(ApiParameterOperand(0), eax);
 
-  // Emitting a stub call may try to allocate (if the code is not
-  // already generated).  Do not allow the assembler to perform a
-  // garbage collection but instead return the allocation failure
-  // object.
-  return masm->TryCallApiFunctionAndReturn(&fun,
-                                           argc + kFastApiCallArguments + 1);
+  // Function address is a foreign pointer outside V8's heap.
+  Address function_address = v8::ToCData<Address>(api_call_info->callback());
+  __ CallApiFunctionAndReturn(function_address,
+                              argc + kFastApiCallArguments + 1);
 }
 
 
@@ -540,16 +484,16 @@
         name_(name),
         extra_state_(extra_state) {}
 
-  MaybeObject* Compile(MacroAssembler* masm,
-                       JSObject* object,
-                       JSObject* holder,
-                       String* name,
-                       LookupResult* lookup,
-                       Register receiver,
-                       Register scratch1,
-                       Register scratch2,
-                       Register scratch3,
-                       Label* miss) {
+  void Compile(MacroAssembler* masm,
+               Handle<JSObject> object,
+               Handle<JSObject> holder,
+               Handle<String> name,
+               LookupResult* lookup,
+               Register receiver,
+               Register scratch1,
+               Register scratch2,
+               Register scratch3,
+               Label* miss) {
     ASSERT(holder->HasNamedInterceptor());
     ASSERT(!holder->GetNamedInterceptor()->getter()->IsUndefined());
 
@@ -557,45 +501,27 @@
     __ JumpIfSmi(receiver, miss);
 
     CallOptimization optimization(lookup);
-
     if (optimization.is_constant_call()) {
-      return CompileCacheable(masm,
-                              object,
-                              receiver,
-                              scratch1,
-                              scratch2,
-                              scratch3,
-                              holder,
-                              lookup,
-                              name,
-                              optimization,
-                              miss);
+      CompileCacheable(masm, object, receiver, scratch1, scratch2, scratch3,
+                       holder, lookup, name, optimization, miss);
     } else {
-      CompileRegular(masm,
-                     object,
-                     receiver,
-                     scratch1,
-                     scratch2,
-                     scratch3,
-                     name,
-                     holder,
-                     miss);
-      return masm->isolate()->heap()->undefined_value();  // Success.
+      CompileRegular(masm, object, receiver, scratch1, scratch2, scratch3,
+                     name, holder, miss);
     }
   }
 
  private:
-  MaybeObject* CompileCacheable(MacroAssembler* masm,
-                                JSObject* object,
-                                Register receiver,
-                                Register scratch1,
-                                Register scratch2,
-                                Register scratch3,
-                                JSObject* interceptor_holder,
-                                LookupResult* lookup,
-                                String* name,
-                                const CallOptimization& optimization,
-                                Label* miss_label) {
+  void CompileCacheable(MacroAssembler* masm,
+                        Handle<JSObject> object,
+                        Register receiver,
+                        Register scratch1,
+                        Register scratch2,
+                        Register scratch3,
+                        Handle<JSObject> interceptor_holder,
+                        LookupResult* lookup,
+                        Handle<String> name,
+                        const CallOptimization& optimization,
+                        Label* miss_label) {
     ASSERT(optimization.is_constant_call());
     ASSERT(!lookup->holder()->IsGlobalObject());
 
@@ -604,16 +530,14 @@
     bool can_do_fast_api_call = false;
     if (optimization.is_simple_api_call() &&
         !lookup->holder()->IsGlobalObject()) {
-      depth1 =
-          optimization.GetPrototypeDepthOfExpectedType(object,
-                                                       interceptor_holder);
+      depth1 = optimization.GetPrototypeDepthOfExpectedType(
+          object, interceptor_holder);
       if (depth1 == kInvalidProtoDepth) {
-        depth2 =
-            optimization.GetPrototypeDepthOfExpectedType(interceptor_holder,
-                                                         lookup->holder());
+        depth2 = optimization.GetPrototypeDepthOfExpectedType(
+            interceptor_holder, Handle<JSObject>(lookup->holder()));
       }
-      can_do_fast_api_call = (depth1 != kInvalidProtoDepth) ||
-                             (depth2 != kInvalidProtoDepth);
+      can_do_fast_api_call =
+          depth1 != kInvalidProtoDepth || depth2 != kInvalidProtoDepth;
     }
 
     Counters* counters = masm->isolate()->counters();
@@ -629,9 +553,9 @@
     Label miss_cleanup;
     Label* miss = can_do_fast_api_call ? &miss_cleanup : miss_label;
     Register holder =
-        stub_compiler_->CheckPrototypes(object, receiver,
-                                        interceptor_holder, scratch1,
-                                        scratch2, scratch3, name, depth1, miss);
+        stub_compiler_->CheckPrototypes(object, receiver, interceptor_holder,
+                                        scratch1, scratch2, scratch3,
+                                        name, depth1, miss);
 
     // Invoke an interceptor and if it provides a value,
     // branch to |regular_invoke|.
@@ -644,10 +568,11 @@
 
     // Check that the maps from interceptor's holder to constant function's
     // holder haven't changed and thus we can use cached constant function.
-    if (interceptor_holder != lookup->holder()) {
+    if (*interceptor_holder != lookup->holder()) {
       stub_compiler_->CheckPrototypes(interceptor_holder, receiver,
-                                      lookup->holder(), scratch1,
-                                      scratch2, scratch3, name, depth2, miss);
+                                      Handle<JSObject>(lookup->holder()),
+                                      scratch1, scratch2, scratch3,
+                                      name, depth2, miss);
     } else {
       // CheckPrototypes has a side effect of fetching a 'holder'
       // for API (object which is instanceof for the signature).  It's
@@ -658,9 +583,7 @@
 
     // Invoke function.
     if (can_do_fast_api_call) {
-      MaybeObject* result =
-          GenerateFastApiCall(masm, optimization, arguments_.immediate());
-      if (result->IsFailure()) return result;
+      GenerateFastApiCall(masm, optimization, arguments_.immediate());
     } else {
       CallKind call_kind = CallICBase::Contextual::decode(extra_state_)
           ? CALL_AS_FUNCTION
@@ -681,33 +604,27 @@
     if (can_do_fast_api_call) {
       FreeSpaceForFastApiCall(masm, scratch1);
     }
-
-    return masm->isolate()->heap()->undefined_value();  // Success.
   }
 
   void CompileRegular(MacroAssembler* masm,
-                      JSObject* object,
+                      Handle<JSObject> object,
                       Register receiver,
                       Register scratch1,
                       Register scratch2,
                       Register scratch3,
-                      String* name,
-                      JSObject* interceptor_holder,
+                      Handle<String> name,
+                      Handle<JSObject> interceptor_holder,
                       Label* miss_label) {
     Register holder =
         stub_compiler_->CheckPrototypes(object, receiver, interceptor_holder,
-                                        scratch1, scratch2, scratch3, name,
-                                        miss_label);
+                                        scratch1, scratch2, scratch3,
+                                        name, miss_label);
 
     FrameScope scope(masm, StackFrame::INTERNAL);
     // Save the name_ register across the call.
     __ push(name_);
 
-    PushInterceptorArguments(masm,
-                             receiver,
-                             holder,
-                             name_,
-                             interceptor_holder);
+    PushInterceptorArguments(masm, receiver, holder, name_, interceptor_holder);
 
     __ CallExternalReference(
         ExternalReference(IC_Utility(IC::kLoadPropertyWithInterceptorForCall),
@@ -723,7 +640,7 @@
   void LoadWithInterceptor(MacroAssembler* masm,
                            Register receiver,
                            Register holder,
-                           JSObject* holder_obj,
+                           Handle<JSObject> holder_obj,
                            Label* interceptor_succeeded) {
     {
       FrameScope scope(masm, StackFrame::INTERNAL);
@@ -762,10 +679,9 @@
 
 
 void StubCompiler::GenerateKeyedLoadMissForceGeneric(MacroAssembler* masm) {
-  Code* code = masm->isolate()->builtins()->builtin(
-      Builtins::kKeyedLoadIC_MissForceGeneric);
-  Handle<Code> ic(code);
-  __ jmp(ic, RelocInfo::CODE_TARGET);
+  Handle<Code> code =
+      masm->isolate()->builtins()->KeyedLoadIC_MissForceGeneric();
+  __ jmp(code, RelocInfo::CODE_TARGET);
 }
 
 
@@ -883,33 +799,6 @@
 }
 
 
-// TODO(kmillikin): Eliminate this function when the stub cache is fully
-// handlified.
-MUST_USE_RESULT static MaybeObject* TryGenerateCheckPropertyCell(
-    MacroAssembler* masm,
-    GlobalObject* global,
-    String* name,
-    Register scratch,
-    Label* miss) {
-  Object* probe;
-  { MaybeObject* maybe_probe = global->EnsurePropertyCell(name);
-    if (!maybe_probe->ToObject(&probe)) return maybe_probe;
-  }
-  JSGlobalPropertyCell* cell = JSGlobalPropertyCell::cast(probe);
-  ASSERT(cell->value()->IsTheHole());
-  if (Serializer::enabled()) {
-    __ mov(scratch, Immediate(Handle<Object>(cell)));
-    __ cmp(FieldOperand(scratch, JSGlobalPropertyCell::kValueOffset),
-           Immediate(masm->isolate()->factory()->the_hole_value()));
-  } else {
-    __ cmp(Operand::Cell(Handle<JSGlobalPropertyCell>(cell)),
-           Immediate(masm->isolate()->factory()->the_hole_value()));
-  }
-  __ j(not_equal, miss);
-  return cell;
-}
-
-
 // Calls GenerateCheckPropertyCell for each global object in the prototype chain
 // from object to (but not including) holder.
 static void GenerateCheckPropertyCells(MacroAssembler* masm,
@@ -931,35 +820,6 @@
   }
 }
 
-
-// TODO(kmillikin): Eliminate this function when the stub cache is fully
-// handlified.
-MUST_USE_RESULT static MaybeObject* TryGenerateCheckPropertyCells(
-    MacroAssembler* masm,
-    JSObject* object,
-    JSObject* holder,
-    String* name,
-    Register scratch,
-    Label* miss) {
-  JSObject* current = object;
-  while (current != holder) {
-    if (current->IsGlobalObject()) {
-      // Returns a cell or a failure.
-      MaybeObject* result = TryGenerateCheckPropertyCell(
-          masm,
-          GlobalObject::cast(current),
-          name,
-          scratch,
-          miss);
-      if (result->IsFailure()) return result;
-    }
-    ASSERT(current->IsJSObject());
-    current = JSObject::cast(current->GetPrototype());
-  }
-  return NULL;
-}
-
-
 #undef __
 #define __ ACCESS_MASM(masm())
 
@@ -1076,144 +936,6 @@
 }
 
 
-// TODO(kmillikin): Eliminate this function when the stub cache is fully
-// handlified.
-Register StubCompiler::CheckPrototypes(JSObject* object,
-                                       Register object_reg,
-                                       JSObject* holder,
-                                       Register holder_reg,
-                                       Register scratch1,
-                                       Register scratch2,
-                                       String* name,
-                                       int save_at_depth,
-                                       Label* miss) {
-  // Make sure there's no overlap between holder and object registers.
-  ASSERT(!scratch1.is(object_reg) && !scratch1.is(holder_reg));
-  ASSERT(!scratch2.is(object_reg) && !scratch2.is(holder_reg)
-         && !scratch2.is(scratch1));
-
-  // Keep track of the current object in register reg.
-  Register reg = object_reg;
-  JSObject* current = object;
-  int depth = 0;
-
-  if (save_at_depth == depth) {
-    __ mov(Operand(esp, kPointerSize), reg);
-  }
-
-  // Traverse the prototype chain and check the maps in the prototype chain for
-  // fast and global objects or do negative lookup for normal objects.
-  while (current != holder) {
-    depth++;
-
-    // Only global objects and objects that do not require access
-    // checks are allowed in stubs.
-    ASSERT(current->IsJSGlobalProxy() || !current->IsAccessCheckNeeded());
-
-    ASSERT(current->GetPrototype()->IsJSObject());
-    JSObject* prototype = JSObject::cast(current->GetPrototype());
-    if (!current->HasFastProperties() &&
-        !current->IsJSGlobalObject() &&
-        !current->IsJSGlobalProxy()) {
-      if (!name->IsSymbol()) {
-        MaybeObject* maybe_lookup_result = heap()->LookupSymbol(name);
-        Object* lookup_result = NULL;  // Initialization to please compiler.
-        if (!maybe_lookup_result->ToObject(&lookup_result)) {
-          set_failure(Failure::cast(maybe_lookup_result));
-          return reg;
-        }
-        name = String::cast(lookup_result);
-      }
-      ASSERT(current->property_dictionary()->FindEntry(name) ==
-             StringDictionary::kNotFound);
-
-      MaybeObject* negative_lookup =
-          TryGenerateDictionaryNegativeLookup(masm(), miss, reg, name,
-                                              scratch1, scratch2);
-      if (negative_lookup->IsFailure()) {
-        set_failure(Failure::cast(negative_lookup));
-        return reg;
-      }
-
-      __ mov(scratch1, FieldOperand(reg, HeapObject::kMapOffset));
-      reg = holder_reg;  // from now the object is in holder_reg
-      __ mov(reg, FieldOperand(scratch1, Map::kPrototypeOffset));
-    } else if (heap()->InNewSpace(prototype)) {
-      // Get the map of the current object.
-      __ mov(scratch1, FieldOperand(reg, HeapObject::kMapOffset));
-      __ cmp(scratch1, Immediate(Handle<Map>(current->map())));
-      // Branch on the result of the map check.
-      __ j(not_equal, miss);
-      // Check access rights to the global object.  This has to happen
-      // after the map check so that we know that the object is
-      // actually a global object.
-      if (current->IsJSGlobalProxy()) {
-        __ CheckAccessGlobalProxy(reg, scratch1, miss);
-
-        // Restore scratch register to be the map of the object.
-        // We load the prototype from the map in the scratch register.
-        __ mov(scratch1, FieldOperand(reg, HeapObject::kMapOffset));
-      }
-      // The prototype is in new space; we cannot store a reference
-      // to it in the code. Load it from the map.
-      reg = holder_reg;  // from now the object is in holder_reg
-      __ mov(reg, FieldOperand(scratch1, Map::kPrototypeOffset));
-    } else {
-      // Check the map of the current object.
-      __ cmp(FieldOperand(reg, HeapObject::kMapOffset),
-             Immediate(Handle<Map>(current->map())));
-      // Branch on the result of the map check.
-      __ j(not_equal, miss);
-      // Check access rights to the global object.  This has to happen
-      // after the map check so that we know that the object is
-      // actually a global object.
-      if (current->IsJSGlobalProxy()) {
-        __ CheckAccessGlobalProxy(reg, scratch1, miss);
-      }
-      // The prototype is in old space; load it directly.
-      reg = holder_reg;  // from now the object is in holder_reg
-      __ mov(reg, Handle<JSObject>(prototype));
-    }
-
-    if (save_at_depth == depth) {
-      __ mov(Operand(esp, kPointerSize), reg);
-    }
-
-    // Go to the next object in the prototype chain.
-    current = prototype;
-  }
-  ASSERT(current == holder);
-
-  // Log the check depth.
-  LOG(isolate(), IntEvent("check-maps-depth", depth + 1));
-
-  // Check the holder map.
-  __ cmp(FieldOperand(reg, HeapObject::kMapOffset),
-         Immediate(Handle<Map>(holder->map())));
-  __ j(not_equal, miss);
-
-  // Perform security check for access to the global object.
-  ASSERT(holder->IsJSGlobalProxy() || !holder->IsAccessCheckNeeded());
-  if (holder->IsJSGlobalProxy()) {
-    __ CheckAccessGlobalProxy(reg, scratch1, miss);
-  }
-
-  // If we've skipped any global objects, it's not enough to verify
-  // that their maps haven't changed.  We also need to check that the
-  // property cell for the property is still empty.
-  MaybeObject* result = TryGenerateCheckPropertyCells(masm(),
-                                                      object,
-                                                      holder,
-                                                      name,
-                                                      scratch1,
-                                                      miss);
-  if (result->IsFailure()) set_failure(Failure::cast(result));
-
-  // Return the register containing the holder.
-  return reg;
-}
-
-
 void StubCompiler::GenerateLoadField(Handle<JSObject> object,
                                      Handle<JSObject> holder,
                                      Register receiver,
@@ -1236,25 +958,22 @@
 }
 
 
-MaybeObject* StubCompiler::GenerateLoadCallback(JSObject* object,
-                                                JSObject* holder,
-                                                Register receiver,
-                                                Register name_reg,
-                                                Register scratch1,
-                                                Register scratch2,
-                                                Register scratch3,
-                                                AccessorInfo* callback,
-                                                String* name,
-                                                Label* miss) {
+void StubCompiler::GenerateLoadCallback(Handle<JSObject> object,
+                                        Handle<JSObject> holder,
+                                        Register receiver,
+                                        Register name_reg,
+                                        Register scratch1,
+                                        Register scratch2,
+                                        Register scratch3,
+                                        Handle<AccessorInfo> callback,
+                                        Handle<String> name,
+                                        Label* miss) {
   // Check that the receiver isn't a smi.
   __ JumpIfSmi(receiver, miss);
 
   // Check that the maps haven't changed.
-  Register reg =
-      CheckPrototypes(object, receiver, holder, scratch1,
-                      scratch2, scratch3, name, miss);
-
-  Handle<AccessorInfo> callback_handle(callback);
+  Register reg = CheckPrototypes(object, receiver, holder, scratch1,
+                                 scratch2, scratch3, name, miss);
 
   // Insert additional parameters into the stack frame above return address.
   ASSERT(!scratch3.is(reg));
@@ -1265,11 +984,11 @@
   ASSERT(!scratch2.is(reg));
   __ push(reg);  // holder
   // Push data from AccessorInfo.
-  if (isolate()->heap()->InNewSpace(callback_handle->data())) {
-    __ mov(scratch1, Immediate(callback_handle));
+  if (isolate()->heap()->InNewSpace(callback->data())) {
+    __ mov(scratch1, Immediate(callback));
     __ push(FieldOperand(scratch1, AccessorInfo::kDataOffset));
   } else {
-    __ push(Immediate(Handle<Object>(callback_handle->data())));
+    __ push(Immediate(Handle<Object>(callback->data())));
   }
 
   // Save a pointer to where we pushed the arguments pointer.
@@ -1281,10 +1000,6 @@
 
   __ push(scratch3);  // Restore return address.
 
-  // Do call through the api.
-  Address getter_address = v8::ToCData<Address>(callback->getter());
-  ApiFunction fun(getter_address);
-
   // 3 elements array for v8::Agruments::values_, handler for name and pointer
   // to the values (it considered as smi in GC).
   const int kStackSpace = 5;
@@ -1299,7 +1014,8 @@
   // already generated).  Do not allow the assembler to perform a
   // garbage collection but instead return the allocation failure
   // object.
-  return masm()->TryCallApiFunctionAndReturn(&fun, kStackSpace);
+  Address getter_address = v8::ToCData<Address>(callback->getter());
+  __ CallApiFunctionAndReturn(getter_address, kStackSpace);
 }
 
 
@@ -1325,15 +1041,15 @@
 }
 
 
-void StubCompiler::GenerateLoadInterceptor(JSObject* object,
-                                           JSObject* interceptor_holder,
+void StubCompiler::GenerateLoadInterceptor(Handle<JSObject> object,
+                                           Handle<JSObject> interceptor_holder,
                                            LookupResult* lookup,
                                            Register receiver,
                                            Register name_reg,
                                            Register scratch1,
                                            Register scratch2,
                                            Register scratch3,
-                                           String* name,
+                                           Handle<String> name,
                                            Label* miss) {
   ASSERT(interceptor_holder->HasNamedInterceptor());
   ASSERT(!interceptor_holder->GetNamedInterceptor()->getter()->IsUndefined());
@@ -1349,9 +1065,9 @@
     if (lookup->type() == FIELD) {
       compile_followup_inline = true;
     } else if (lookup->type() == CALLBACKS &&
-        lookup->GetCallbackObject()->IsAccessorInfo() &&
-        AccessorInfo::cast(lookup->GetCallbackObject())->getter() != NULL) {
-      compile_followup_inline = true;
+               lookup->GetCallbackObject()->IsAccessorInfo()) {
+      compile_followup_inline =
+          AccessorInfo::cast(lookup->GetCallbackObject())->getter() != NULL;
     }
   }
 
@@ -1405,10 +1121,10 @@
 
     // Check that the maps from interceptor's holder to lookup's holder
     // haven't changed.  And load lookup's holder into holder_reg.
-    if (interceptor_holder != lookup->holder()) {
+    if (*interceptor_holder != lookup->holder()) {
       holder_reg = CheckPrototypes(interceptor_holder,
                                    holder_reg,
-                                   lookup->holder(),
+                                   Handle<JSObject>(lookup->holder()),
                                    scratch1,
                                    scratch2,
                                    scratch3,
@@ -1427,9 +1143,8 @@
       // We found CALLBACKS property in prototype chain of interceptor's
       // holder.
       ASSERT(lookup->type() == CALLBACKS);
-      ASSERT(lookup->GetCallbackObject()->IsAccessorInfo());
-      AccessorInfo* callback = AccessorInfo::cast(lookup->GetCallbackObject());
-      ASSERT(callback != NULL);
+      Handle<AccessorInfo> callback(
+          AccessorInfo::cast(lookup->GetCallbackObject()));
       ASSERT(callback->getter() != NULL);
 
       // Tail call to runtime.
@@ -1438,7 +1153,7 @@
       __ pop(scratch2);  // return address
       __ push(receiver);
       __ push(holder_reg);
-      __ mov(holder_reg, Immediate(Handle<AccessorInfo>(callback)));
+      __ mov(holder_reg, Immediate(callback));
       __ push(FieldOperand(holder_reg, AccessorInfo::kDataOffset));
       __ push(holder_reg);
       __ push(name_reg);
@@ -1476,9 +1191,9 @@
 }
 
 
-void CallStubCompiler::GenerateGlobalReceiverCheck(JSObject* object,
-                                                   JSObject* holder,
-                                                   String* name,
+void CallStubCompiler::GenerateGlobalReceiverCheck(Handle<JSObject> object,
+                                                   Handle<JSObject> holder,
+                                                   Handle<String> name,
                                                    Label* miss) {
   ASSERT(holder->IsGlobalObject());
 
@@ -1491,7 +1206,7 @@
   // If the object is the holder then we know that it's a global
   // object which can only happen for contextual calls. In this case,
   // the receiver cannot be a smi.
-  if (object != holder) {
+  if (!object.is_identical_to(holder)) {
     __ JumpIfSmi(edx, miss);
   }
 
@@ -1500,19 +1215,20 @@
 }
 
 
-void CallStubCompiler::GenerateLoadFunctionFromCell(JSGlobalPropertyCell* cell,
-                                                    JSFunction* function,
-                                                    Label* miss) {
+void CallStubCompiler::GenerateLoadFunctionFromCell(
+    Handle<JSGlobalPropertyCell> cell,
+    Handle<JSFunction> function,
+    Label* miss) {
   // Get the value from the cell.
   if (Serializer::enabled()) {
-    __ mov(edi, Immediate(Handle<JSGlobalPropertyCell>(cell)));
+    __ mov(edi, Immediate(cell));
     __ mov(edi, FieldOperand(edi, JSGlobalPropertyCell::kValueOffset));
   } else {
-    __ mov(edi, Operand::Cell(Handle<JSGlobalPropertyCell>(cell)));
+    __ mov(edi, Operand::Cell(cell));
   }
 
   // Check that the cell contains the same function.
-  if (isolate()->heap()->InNewSpace(function)) {
+  if (isolate()->heap()->InNewSpace(*function)) {
     // We can't embed a pointer to a function in new space so we have
     // to verify that the shared function info is unchanged. This has
     // the nice side effect that multiple closures based on the same
@@ -1525,11 +1241,10 @@
     // Check the shared function info. Make sure it hasn't changed.
     __ cmp(FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset),
            Immediate(Handle<SharedFunctionInfo>(function->shared())));
-    __ j(not_equal, miss);
   } else {
-    __ cmp(edi, Immediate(Handle<JSFunction>(function)));
-    __ j(not_equal, miss);
+    __ cmp(edi, Immediate(function));
   }
+  __ j(not_equal, miss);
 }
 
 
@@ -1542,20 +1257,6 @@
 }
 
 
-// TODO(kmillikin): Eliminate this function when the stub cache is fully
-// handlified.
-MaybeObject* CallStubCompiler::TryGenerateMissBranch() {
-  MaybeObject* maybe_obj =
-      isolate()->stub_cache()->TryComputeCallMiss(arguments().immediate(),
-                                                  kind_,
-                                                  extra_state_);
-  Object* obj;
-  if (!maybe_obj->ToObject(&obj)) return maybe_obj;
-  __ jmp(Handle<Code>(Code::cast(obj)), RelocInfo::CODE_TARGET);
-  return obj;
-}
-
-
 Handle<Code> CallStubCompiler::CompileCallField(Handle<JSObject> object,
                                                 Handle<JSObject> holder,
                                                 int index,
@@ -1612,11 +1313,12 @@
 }
 
 
-MaybeObject* CallStubCompiler::CompileArrayPushCall(Object* object,
-                                                    JSObject* holder,
-                                                    JSGlobalPropertyCell* cell,
-                                                    JSFunction* function,
-                                                    String* name) {
+Handle<Code> CallStubCompiler::CompileArrayPushCall(
+    Handle<Object> object,
+    Handle<JSObject> holder,
+    Handle<JSGlobalPropertyCell> cell,
+    Handle<JSFunction> function,
+    Handle<String> name) {
   // ----------- S t a t e -------------
   //  -- ecx                 : name
   //  -- esp[0]              : return address
@@ -1626,13 +1328,13 @@
   // -----------------------------------
 
   // If object is not an array, bail out to regular call.
-  if (!object->IsJSArray() || cell != NULL) {
-    return isolate()->heap()->undefined_value();
+  if (!object->IsJSArray() || !cell.is_null()) {
+    return Handle<Code>::null();
   }
 
   Label miss;
 
-  GenerateNameCheck(Handle<String>(name), &miss);
+  GenerateNameCheck(name, &miss);
 
   // Get the receiver from the stack.
   const int argc = arguments().immediate();
@@ -1641,9 +1343,8 @@
   // Check that the receiver isn't a smi.
   __ JumpIfSmi(edx, &miss);
 
-  CheckPrototypes(JSObject::cast(object), edx,
-                  holder, ebx,
-                  eax, edi, name, &miss);
+  CheckPrototypes(Handle<JSObject>::cast(object), edx, holder, ebx, eax, edi,
+                  name, &miss);
 
   if (argc == 0) {
     // Noop, return the length.
@@ -1705,8 +1406,8 @@
                                FixedArray::kHeaderSize - argc * kPointerSize));
       __ mov(Operand(edx, 0), ecx);
 
-      __ RecordWrite(
-          ebx, edx, ecx, kDontSaveFPRegs, EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
+      __ RecordWrite(ebx, edx, ecx, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
+                     OMIT_SMI_CHECK);
 
       __ ret((argc + 1) * kPointerSize);
 
@@ -1789,19 +1490,19 @@
   }
 
   __ bind(&miss);
-  MaybeObject* maybe_result = TryGenerateMissBranch();
-  if (maybe_result->IsFailure()) return maybe_result;
+  GenerateMissBranch();
 
   // Return the generated code.
-  return TryGetCode(function);
+  return GetCode(function);
 }
 
 
-MaybeObject* CallStubCompiler::CompileArrayPopCall(Object* object,
-                                                   JSObject* holder,
-                                                   JSGlobalPropertyCell* cell,
-                                                   JSFunction* function,
-                                                   String* name) {
+Handle<Code> CallStubCompiler::CompileArrayPopCall(
+    Handle<Object> object,
+    Handle<JSObject> holder,
+    Handle<JSGlobalPropertyCell> cell,
+    Handle<JSFunction> function,
+    Handle<String> name) {
   // ----------- S t a t e -------------
   //  -- ecx                 : name
   //  -- esp[0]              : return address
@@ -1811,13 +1512,13 @@
   // -----------------------------------
 
   // If object is not an array, bail out to regular call.
-  if (!object->IsJSArray() || cell != NULL) {
-    return heap()->undefined_value();
+  if (!object->IsJSArray() || !cell.is_null()) {
+    return Handle<Code>::null();
   }
 
   Label miss, return_undefined, call_builtin;
 
-  GenerateNameCheck(Handle<String>(name), &miss);
+  GenerateNameCheck(name, &miss);
 
   // Get the receiver from the stack.
   const int argc = arguments().immediate();
@@ -1825,9 +1526,8 @@
 
   // Check that the receiver isn't a smi.
   __ JumpIfSmi(edx, &miss);
-  CheckPrototypes(JSObject::cast(object), edx,
-                  holder, ebx,
-                  eax, edi, name, &miss);
+  CheckPrototypes(Handle<JSObject>::cast(object), edx, holder, ebx, eax, edi,
+                  name, &miss);
 
   // Get the elements array of the object.
   __ mov(ebx, FieldOperand(edx, JSArray::kElementsOffset));
@@ -1872,20 +1572,19 @@
       1);
 
   __ bind(&miss);
-  MaybeObject* maybe_result = TryGenerateMissBranch();
-  if (maybe_result->IsFailure()) return maybe_result;
+  GenerateMissBranch();
 
   // Return the generated code.
-  return TryGetCode(function);
+  return GetCode(function);
 }
 
 
-MaybeObject* CallStubCompiler::CompileStringCharCodeAtCall(
-    Object* object,
-    JSObject* holder,
-    JSGlobalPropertyCell* cell,
-    JSFunction* function,
-    String* name) {
+Handle<Code> CallStubCompiler::CompileStringCharCodeAtCall(
+    Handle<Object> object,
+    Handle<JSObject> holder,
+    Handle<JSGlobalPropertyCell> cell,
+    Handle<JSFunction> function,
+    Handle<String> name) {
   // ----------- S t a t e -------------
   //  -- ecx                 : function name
   //  -- esp[0]              : return address
@@ -1895,8 +1594,8 @@
   // -----------------------------------
 
   // If object is not a string, bail out to regular call.
-  if (!object->IsString() || cell != NULL) {
-    return isolate()->heap()->undefined_value();
+  if (!object->IsString() || !cell.is_null()) {
+    return Handle<Code>::null();
   }
 
   const int argc = arguments().immediate();
@@ -1912,16 +1611,16 @@
     index_out_of_range_label = &miss;
   }
 
-  GenerateNameCheck(Handle<String>(name), &name_miss);
+  GenerateNameCheck(name, &name_miss);
 
   // Check that the maps starting from the prototype haven't changed.
   GenerateDirectLoadGlobalFunctionPrototype(masm(),
                                             Context::STRING_FUNCTION_INDEX,
                                             eax,
                                             &miss);
-  ASSERT(object != holder);
-  CheckPrototypes(JSObject::cast(object->GetPrototype()), eax, holder,
-                  ebx, edx, edi, name, &miss);
+  ASSERT(!object.is_identical_to(holder));
+  CheckPrototypes(Handle<JSObject>(JSObject::cast(object->GetPrototype())),
+                  eax, holder, ebx, edx, edi, name, &miss);
 
   Register receiver = ebx;
   Register index = edi;
@@ -1934,19 +1633,19 @@
     __ Set(index, Immediate(factory()->undefined_value()));
   }
 
-  StringCharCodeAtGenerator char_code_at_generator(receiver,
-                                                   index,
-                                                   scratch,
-                                                   result,
-                                                   &miss,  // When not a string.
-                                                   &miss,  // When not a number.
-                                                   index_out_of_range_label,
-                                                   STRING_INDEX_IS_NUMBER);
-  char_code_at_generator.GenerateFast(masm());
+  StringCharCodeAtGenerator generator(receiver,
+                                      index,
+                                      scratch,
+                                      result,
+                                      &miss,  // When not a string.
+                                      &miss,  // When not a number.
+                                      index_out_of_range_label,
+                                      STRING_INDEX_IS_NUMBER);
+  generator.GenerateFast(masm());
   __ ret((argc + 1) * kPointerSize);
 
   StubRuntimeCallHelper call_helper;
-  char_code_at_generator.GenerateSlow(masm(), call_helper);
+  generator.GenerateSlow(masm(), call_helper);
 
   if (index_out_of_range.is_linked()) {
     __ bind(&index_out_of_range);
@@ -1956,22 +1655,21 @@
 
   __ bind(&miss);
   // Restore function name in ecx.
-  __ Set(ecx, Immediate(Handle<String>(name)));
+  __ Set(ecx, Immediate(name));
   __ bind(&name_miss);
-  MaybeObject* maybe_result = TryGenerateMissBranch();
-  if (maybe_result->IsFailure()) return maybe_result;
+  GenerateMissBranch();
 
   // Return the generated code.
-  return TryGetCode(function);
+  return GetCode(function);
 }
 
 
-MaybeObject* CallStubCompiler::CompileStringCharAtCall(
-    Object* object,
-    JSObject* holder,
-    JSGlobalPropertyCell* cell,
-    JSFunction* function,
-    String* name) {
+Handle<Code> CallStubCompiler::CompileStringCharAtCall(
+    Handle<Object> object,
+    Handle<JSObject> holder,
+    Handle<JSGlobalPropertyCell> cell,
+    Handle<JSFunction> function,
+    Handle<String> name) {
   // ----------- S t a t e -------------
   //  -- ecx                 : function name
   //  -- esp[0]              : return address
@@ -1981,8 +1679,8 @@
   // -----------------------------------
 
   // If object is not a string, bail out to regular call.
-  if (!object->IsString() || cell != NULL) {
-    return heap()->undefined_value();
+  if (!object->IsString() || !cell.is_null()) {
+    return Handle<Code>::null();
   }
 
   const int argc = arguments().immediate();
@@ -1998,16 +1696,16 @@
     index_out_of_range_label = &miss;
   }
 
-  GenerateNameCheck(Handle<String>(name), &name_miss);
+  GenerateNameCheck(name, &name_miss);
 
   // Check that the maps starting from the prototype haven't changed.
   GenerateDirectLoadGlobalFunctionPrototype(masm(),
                                             Context::STRING_FUNCTION_INDEX,
                                             eax,
                                             &miss);
-  ASSERT(object != holder);
-  CheckPrototypes(JSObject::cast(object->GetPrototype()), eax, holder,
-                  ebx, edx, edi, name, &miss);
+  ASSERT(!object.is_identical_to(holder));
+  CheckPrototypes(Handle<JSObject>(JSObject::cast(object->GetPrototype())),
+                  eax, holder, ebx, edx, edi, name, &miss);
 
   Register receiver = eax;
   Register index = edi;
@@ -2021,20 +1719,20 @@
     __ Set(index, Immediate(factory()->undefined_value()));
   }
 
-  StringCharAtGenerator char_at_generator(receiver,
-                                          index,
-                                          scratch1,
-                                          scratch2,
-                                          result,
-                                          &miss,  // When not a string.
-                                          &miss,  // When not a number.
-                                          index_out_of_range_label,
-                                          STRING_INDEX_IS_NUMBER);
-  char_at_generator.GenerateFast(masm());
+  StringCharAtGenerator generator(receiver,
+                                  index,
+                                  scratch1,
+                                  scratch2,
+                                  result,
+                                  &miss,  // When not a string.
+                                  &miss,  // When not a number.
+                                  index_out_of_range_label,
+                                  STRING_INDEX_IS_NUMBER);
+  generator.GenerateFast(masm());
   __ ret((argc + 1) * kPointerSize);
 
   StubRuntimeCallHelper call_helper;
-  char_at_generator.GenerateSlow(masm(), call_helper);
+  generator.GenerateSlow(masm(), call_helper);
 
   if (index_out_of_range.is_linked()) {
     __ bind(&index_out_of_range);
@@ -2044,22 +1742,21 @@
 
   __ bind(&miss);
   // Restore function name in ecx.
-  __ Set(ecx, Immediate(Handle<String>(name)));
+  __ Set(ecx, Immediate(name));
   __ bind(&name_miss);
-  MaybeObject* maybe_result = TryGenerateMissBranch();
-  if (maybe_result->IsFailure()) return maybe_result;
+  GenerateMissBranch();
 
   // Return the generated code.
-  return TryGetCode(function);
+  return GetCode(function);
 }
 
 
-MaybeObject* CallStubCompiler::CompileStringFromCharCodeCall(
-    Object* object,
-    JSObject* holder,
-    JSGlobalPropertyCell* cell,
-    JSFunction* function,
-    String* name) {
+Handle<Code> CallStubCompiler::CompileStringFromCharCodeCall(
+    Handle<Object> object,
+    Handle<JSObject> holder,
+    Handle<JSGlobalPropertyCell> cell,
+    Handle<JSFunction> function,
+    Handle<String> name) {
   // ----------- S t a t e -------------
   //  -- ecx                 : function name
   //  -- esp[0]              : return address
@@ -2073,23 +1770,22 @@
   // If the object is not a JSObject or we got an unexpected number of
   // arguments, bail out to the regular call.
   if (!object->IsJSObject() || argc != 1) {
-    return isolate()->heap()->undefined_value();
+    return Handle<Code>::null();
   }
 
   Label miss;
-  GenerateNameCheck(Handle<String>(name), &miss);
+  GenerateNameCheck(name, &miss);
 
-  if (cell == NULL) {
+  if (cell.is_null()) {
     __ mov(edx, Operand(esp, 2 * kPointerSize));
-
     STATIC_ASSERT(kSmiTag == 0);
     __ JumpIfSmi(edx, &miss);
-
-    CheckPrototypes(JSObject::cast(object), edx, holder, ebx, eax, edi, name,
-                    &miss);
+    CheckPrototypes(Handle<JSObject>::cast(object), edx, holder, ebx, eax, edi,
+                    name, &miss);
   } else {
-    ASSERT(cell->value() == function);
-    GenerateGlobalReceiverCheck(JSObject::cast(object), holder, name, &miss);
+    ASSERT(cell->value() == *function);
+    GenerateGlobalReceiverCheck(Handle<JSObject>::cast(object), holder, name,
+                                &miss);
     GenerateLoadFunctionFromCell(cell, function, &miss);
   }
 
@@ -2105,12 +1801,12 @@
   // Convert the smi code to uint16.
   __ and_(code, Immediate(Smi::FromInt(0xffff)));
 
-  StringCharFromCodeGenerator char_from_code_generator(code, eax);
-  char_from_code_generator.GenerateFast(masm());
+  StringCharFromCodeGenerator generator(code, eax);
+  generator.GenerateFast(masm());
   __ ret(2 * kPointerSize);
 
   StubRuntimeCallHelper call_helper;
-  char_from_code_generator.GenerateSlow(masm(), call_helper);
+  generator.GenerateSlow(masm(), call_helper);
 
   // Tail call the full function. We do not have to patch the receiver
   // because the function makes no use of it.
@@ -2123,19 +1819,19 @@
 
   __ bind(&miss);
   // ecx: function name.
-  MaybeObject* maybe_result = TryGenerateMissBranch();
-  if (maybe_result->IsFailure()) return maybe_result;
+  GenerateMissBranch();
 
   // Return the generated code.
-  return (cell == NULL) ? TryGetCode(function) : TryGetCode(NORMAL, name);
+  return cell.is_null() ? GetCode(function) : GetCode(NORMAL, name);
 }
 
 
-MaybeObject* CallStubCompiler::CompileMathFloorCall(Object* object,
-                                                    JSObject* holder,
-                                                    JSGlobalPropertyCell* cell,
-                                                    JSFunction* function,
-                                                    String* name) {
+Handle<Code> CallStubCompiler::CompileMathFloorCall(
+    Handle<Object> object,
+    Handle<JSObject> holder,
+    Handle<JSGlobalPropertyCell> cell,
+    Handle<JSFunction> function,
+    Handle<String> name) {
   // ----------- S t a t e -------------
   //  -- ecx                 : name
   //  -- esp[0]              : return address
@@ -2145,7 +1841,7 @@
   // -----------------------------------
 
   if (!CpuFeatures::IsSupported(SSE2)) {
-    return isolate()->heap()->undefined_value();
+    return Handle<Code>::null();
   }
 
   CpuFeatures::Scope use_sse2(SSE2);
@@ -2155,23 +1851,24 @@
   // If the object is not a JSObject or we got an unexpected number of
   // arguments, bail out to the regular call.
   if (!object->IsJSObject() || argc != 1) {
-    return isolate()->heap()->undefined_value();
+    return Handle<Code>::null();
   }
 
   Label miss;
-  GenerateNameCheck(Handle<String>(name), &miss);
+  GenerateNameCheck(name, &miss);
 
-  if (cell == NULL) {
+  if (cell.is_null()) {
     __ mov(edx, Operand(esp, 2 * kPointerSize));
 
     STATIC_ASSERT(kSmiTag == 0);
     __ JumpIfSmi(edx, &miss);
 
-    CheckPrototypes(JSObject::cast(object), edx, holder, ebx, eax, edi, name,
-                    &miss);
+    CheckPrototypes(Handle<JSObject>::cast(object), edx, holder, ebx, eax, edi,
+                    name, &miss);
   } else {
-    ASSERT(cell->value() == function);
-    GenerateGlobalReceiverCheck(JSObject::cast(object), holder, name, &miss);
+    ASSERT(cell->value() == *function);
+    GenerateGlobalReceiverCheck(Handle<JSObject>::cast(object), holder, name,
+                                &miss);
     GenerateLoadFunctionFromCell(cell, function, &miss);
   }
 
@@ -2252,19 +1949,19 @@
 
   __ bind(&miss);
   // ecx: function name.
-  MaybeObject* maybe_result = TryGenerateMissBranch();
-  if (maybe_result->IsFailure()) return maybe_result;
+  GenerateMissBranch();
 
   // Return the generated code.
-  return (cell == NULL) ? TryGetCode(function) : TryGetCode(NORMAL, name);
+  return cell.is_null() ? GetCode(function) : GetCode(NORMAL, name);
 }
 
 
-MaybeObject* CallStubCompiler::CompileMathAbsCall(Object* object,
-                                                  JSObject* holder,
-                                                  JSGlobalPropertyCell* cell,
-                                                  JSFunction* function,
-                                                  String* name) {
+Handle<Code> CallStubCompiler::CompileMathAbsCall(
+    Handle<Object> object,
+    Handle<JSObject> holder,
+    Handle<JSGlobalPropertyCell> cell,
+    Handle<JSFunction> function,
+    Handle<String> name) {
   // ----------- S t a t e -------------
   //  -- ecx                 : name
   //  -- esp[0]              : return address
@@ -2278,23 +1975,24 @@
   // If the object is not a JSObject or we got an unexpected number of
   // arguments, bail out to the regular call.
   if (!object->IsJSObject() || argc != 1) {
-    return isolate()->heap()->undefined_value();
+    return Handle<Code>::null();
   }
 
   Label miss;
-  GenerateNameCheck(Handle<String>(name), &miss);
+  GenerateNameCheck(name, &miss);
 
-  if (cell == NULL) {
+  if (cell.is_null()) {
     __ mov(edx, Operand(esp, 2 * kPointerSize));
 
     STATIC_ASSERT(kSmiTag == 0);
     __ JumpIfSmi(edx, &miss);
 
-    CheckPrototypes(JSObject::cast(object), edx, holder, ebx, eax, edi, name,
-                    &miss);
+    CheckPrototypes(Handle<JSObject>::cast(object), edx, holder, ebx, eax, edi,
+                    name, &miss);
   } else {
-    ASSERT(cell->value() == function);
-    GenerateGlobalReceiverCheck(JSObject::cast(object), holder, name, &miss);
+    ASSERT(cell->value() == *function);
+    GenerateGlobalReceiverCheck(Handle<JSObject>::cast(object), holder, name,
+                                &miss);
     GenerateLoadFunctionFromCell(cell, function, &miss);
   }
 
@@ -2356,34 +2054,33 @@
 
   __ bind(&miss);
   // ecx: function name.
-  MaybeObject* maybe_result = TryGenerateMissBranch();
-  if (maybe_result->IsFailure()) return maybe_result;
+  GenerateMissBranch();
 
   // Return the generated code.
-  return (cell == NULL) ? TryGetCode(function) : TryGetCode(NORMAL, name);
+  return cell.is_null() ? GetCode(function) : GetCode(NORMAL, name);
 }
 
 
-MaybeObject* CallStubCompiler::CompileFastApiCall(
+Handle<Code> CallStubCompiler::CompileFastApiCall(
     const CallOptimization& optimization,
-    Object* object,
-    JSObject* holder,
-    JSGlobalPropertyCell* cell,
-    JSFunction* function,
-    String* name) {
+    Handle<Object> object,
+    Handle<JSObject> holder,
+    Handle<JSGlobalPropertyCell> cell,
+    Handle<JSFunction> function,
+    Handle<String> name) {
   ASSERT(optimization.is_simple_api_call());
   // Bail out if object is a global object as we don't want to
   // repatch it to global receiver.
-  if (object->IsGlobalObject()) return heap()->undefined_value();
-  if (cell != NULL) return heap()->undefined_value();
-  if (!object->IsJSObject()) return heap()->undefined_value();
+  if (object->IsGlobalObject()) return Handle<Code>::null();
+  if (!cell.is_null()) return Handle<Code>::null();
+  if (!object->IsJSObject()) return Handle<Code>::null();
   int depth = optimization.GetPrototypeDepthOfExpectedType(
-            JSObject::cast(object), holder);
-  if (depth == kInvalidProtoDepth) return heap()->undefined_value();
+      Handle<JSObject>::cast(object), holder);
+  if (depth == kInvalidProtoDepth) return Handle<Code>::null();
 
   Label miss, miss_before_stack_reserved;
 
-  GenerateNameCheck(Handle<String>(name), &miss_before_stack_reserved);
+  GenerateNameCheck(name, &miss_before_stack_reserved);
 
   // Get the receiver from the stack.
   const int argc = arguments().immediate();
@@ -2401,8 +2098,8 @@
   __ sub(esp, Immediate(kFastApiCallArguments * kPointerSize));
 
   // Check that the maps haven't changed and find a Holder as a side effect.
-  CheckPrototypes(JSObject::cast(object), edx, holder,
-                  ebx, eax, edi, name, depth, &miss);
+  CheckPrototypes(Handle<JSObject>::cast(object), edx, holder, ebx, eax, edi,
+                  name, depth, &miss);
 
   // Move the return address on top of the stack.
   __ mov(eax, Operand(esp, 3 * kPointerSize));
@@ -2410,27 +2107,24 @@
 
   // esp[2 * kPointerSize] is uninitialized, esp[3 * kPointerSize] contains
   // duplicate of return address and will be overwritten.
-  MaybeObject* result = GenerateFastApiCall(masm(), optimization, argc);
-  if (result->IsFailure()) return result;
+  GenerateFastApiCall(masm(), optimization, argc);
 
   __ bind(&miss);
   __ add(esp, Immediate(kFastApiCallArguments * kPointerSize));
 
   __ bind(&miss_before_stack_reserved);
-  MaybeObject* maybe_result = TryGenerateMissBranch();
-  if (maybe_result->IsFailure()) return maybe_result;
+  GenerateMissBranch();
 
   // Return the generated code.
-  return TryGetCode(function);
+  return GetCode(function);
 }
 
 
-MaybeObject* CallStubCompiler::CompileCallConstant(
-    Object* object,
-    JSObject* holder,
-    JSFunction* function,
-    String* name,
-    CheckType check) {
+Handle<Code> CallStubCompiler::CompileCallConstant(Handle<Object> object,
+                                                   Handle<JSObject> holder,
+                                                   Handle<JSFunction> function,
+                                                   Handle<String> name,
+                                                   CheckType check) {
   // ----------- S t a t e -------------
   //  -- ecx                 : name
   //  -- esp[0]              : return address
@@ -2440,17 +2134,15 @@
   // -----------------------------------
 
   if (HasCustomCallGenerator(function)) {
-    MaybeObject* maybe_result = CompileCustomCall(
-        object, holder, NULL, function, name);
-    Object* result;
-    if (!maybe_result->ToObject(&result)) return maybe_result;
-    // undefined means bail out to regular compiler.
-    if (!result->IsUndefined()) return result;
+    Handle<Code> code = CompileCustomCall(object, holder,
+                                          Handle<JSGlobalPropertyCell>::null(),
+                                          function, name);
+    // A null handle means bail out to the regular compiler code below.
+    if (!code.is_null()) return code;
   }
 
   Label miss;
-
-  GenerateNameCheck(Handle<String>(name), &miss);
+  GenerateNameCheck(name, &miss);
 
   // Get the receiver from the stack.
   const int argc = arguments().immediate();
@@ -2464,15 +2156,13 @@
   // Make sure that it's okay not to patch the on stack receiver
   // unless we're doing a receiver map check.
   ASSERT(!object->IsGlobalObject() || check == RECEIVER_MAP_CHECK);
-
-  SharedFunctionInfo* function_info = function->shared();
   switch (check) {
     case RECEIVER_MAP_CHECK:
       __ IncrementCounter(isolate()->counters()->call_const(), 1);
 
       // Check that the maps haven't changed.
-      CheckPrototypes(JSObject::cast(object), edx, holder,
-                      ebx, eax, edi, name, &miss);
+      CheckPrototypes(Handle<JSObject>::cast(object), edx, holder, ebx, eax,
+                      edi, name, &miss);
 
       // Patch the receiver on the stack with the global proxy if
       // necessary.
@@ -2483,28 +2173,25 @@
       break;
 
     case STRING_CHECK:
-      if (!function->IsBuiltin() && !function_info->strict_mode()) {
-        // Calling non-strict non-builtins with a value as the receiver
-        // requires boxing.
-        __ jmp(&miss);
-      } else {
+      if (function->IsBuiltin() || function->shared()->strict_mode()) {
         // Check that the object is a string or a symbol.
         __ CmpObjectType(edx, FIRST_NONSTRING_TYPE, eax);
         __ j(above_equal, &miss);
         // Check that the maps starting from the prototype haven't changed.
         GenerateDirectLoadGlobalFunctionPrototype(
             masm(), Context::STRING_FUNCTION_INDEX, eax, &miss);
-        CheckPrototypes(JSObject::cast(object->GetPrototype()), eax, holder,
-                        ebx, edx, edi, name, &miss);
-      }
-      break;
-
-    case NUMBER_CHECK: {
-      if (!function->IsBuiltin() && !function_info->strict_mode()) {
+        CheckPrototypes(
+            Handle<JSObject>(JSObject::cast(object->GetPrototype())),
+            eax, holder, ebx, edx, edi, name, &miss);
+      } else {
         // Calling non-strict non-builtins with a value as the receiver
         // requires boxing.
         __ jmp(&miss);
-      } else {
+      }
+      break;
+
+    case NUMBER_CHECK:
+      if (function->IsBuiltin() || function->shared()->strict_mode()) {
         Label fast;
         // Check that the object is a smi or a heap number.
         __ JumpIfSmi(edx, &fast);
@@ -2514,18 +2201,18 @@
         // Check that the maps starting from the prototype haven't changed.
         GenerateDirectLoadGlobalFunctionPrototype(
             masm(), Context::NUMBER_FUNCTION_INDEX, eax, &miss);
-        CheckPrototypes(JSObject::cast(object->GetPrototype()), eax, holder,
-                        ebx, edx, edi, name, &miss);
-      }
-      break;
-    }
-
-    case BOOLEAN_CHECK: {
-      if (!function->IsBuiltin() && !function_info->strict_mode()) {
+        CheckPrototypes(
+            Handle<JSObject>(JSObject::cast(object->GetPrototype())),
+            eax, holder, ebx, edx, edi, name, &miss);
+      } else {
         // Calling non-strict non-builtins with a value as the receiver
         // requires boxing.
         __ jmp(&miss);
-      } else {
+      }
+      break;
+
+    case BOOLEAN_CHECK:
+      if (function->IsBuiltin() || function->shared()->strict_mode()) {
         Label fast;
         // Check that the object is a boolean.
         __ cmp(edx, factory()->true_value());
@@ -2536,14 +2223,15 @@
         // Check that the maps starting from the prototype haven't changed.
         GenerateDirectLoadGlobalFunctionPrototype(
             masm(), Context::BOOLEAN_FUNCTION_INDEX, eax, &miss);
-        CheckPrototypes(JSObject::cast(object->GetPrototype()), eax, holder,
-                        ebx, edx, edi, name, &miss);
+        CheckPrototypes(
+            Handle<JSObject>(JSObject::cast(object->GetPrototype())),
+            eax, holder, ebx, edx, edi, name, &miss);
+      } else {
+        // Calling non-strict non-builtins with a value as the receiver
+        // requires boxing.
+        __ jmp(&miss);
       }
       break;
-    }
-
-    default:
-      UNREACHABLE();
   }
 
   CallKind call_kind = CallICBase::Contextual::decode(extra_state_)
@@ -2554,17 +2242,16 @@
 
   // Handle call cache miss.
   __ bind(&miss);
-  MaybeObject* maybe_result = TryGenerateMissBranch();
-  if (maybe_result->IsFailure()) return maybe_result;
+  GenerateMissBranch();
 
   // Return the generated code.
-  return TryGetCode(function);
+  return GetCode(function);
 }
 
 
-MaybeObject* CallStubCompiler::CompileCallInterceptor(JSObject* object,
-                                                      JSObject* holder,
-                                                      String* name) {
+Handle<Code> CallStubCompiler::CompileCallInterceptor(Handle<JSObject> object,
+                                                      Handle<JSObject> holder,
+                                                      Handle<String> name) {
   // ----------- S t a t e -------------
   //  -- ecx                 : name
   //  -- esp[0]              : return address
@@ -2574,7 +2261,7 @@
   // -----------------------------------
   Label miss;
 
-  GenerateNameCheck(Handle<String>(name), &miss);
+  GenerateNameCheck(name, &miss);
 
   // Get the number of arguments.
   const int argc = arguments().immediate();
@@ -2586,17 +2273,8 @@
   __ mov(edx, Operand(esp, (argc + 1) * kPointerSize));
 
   CallInterceptorCompiler compiler(this, arguments(), ecx, extra_state_);
-  MaybeObject* result = compiler.Compile(masm(),
-                                         object,
-                                         holder,
-                                         name,
-                                         &lookup,
-                                         edx,
-                                         ebx,
-                                         edi,
-                                         eax,
-                                         &miss);
-  if (result->IsFailure()) return result;
+  compiler.Compile(masm(), object, holder, name, &lookup, edx, ebx, edi, eax,
+                   &miss);
 
   // Restore receiver.
   __ mov(edx, Operand(esp, (argc + 1) * kPointerSize));
@@ -2623,20 +2301,19 @@
 
   // Handle load cache miss.
   __ bind(&miss);
-  MaybeObject* maybe_result = TryGenerateMissBranch();
-  if (maybe_result->IsFailure()) return maybe_result;
+  GenerateMissBranch();
 
   // Return the generated code.
-  return TryGetCode(INTERCEPTOR, name);
+  return GetCode(INTERCEPTOR, name);
 }
 
 
-MaybeObject* CallStubCompiler::CompileCallGlobal(
-    JSObject* object,
-    GlobalObject* holder,
-    JSGlobalPropertyCell* cell,
-    JSFunction* function,
-    String* name) {
+Handle<Code> CallStubCompiler::CompileCallGlobal(
+    Handle<JSObject> object,
+    Handle<GlobalObject> holder,
+    Handle<JSGlobalPropertyCell> cell,
+    Handle<JSFunction> function,
+    Handle<String> name) {
   // ----------- S t a t e -------------
   //  -- ecx                 : name
   //  -- esp[0]              : return address
@@ -2646,23 +2323,17 @@
   // -----------------------------------
 
   if (HasCustomCallGenerator(function)) {
-    MaybeObject* maybe_result = CompileCustomCall(
-        object, holder, cell, function, name);
-    Object* result;
-    if (!maybe_result->ToObject(&result)) return maybe_result;
-    // undefined means bail out to regular compiler.
-    if (!result->IsUndefined()) return result;
+    Handle<Code> code = CompileCustomCall(object, holder, cell, function, name);
+    // A null handle means bail out to the regular compiler code below.
+    if (!code.is_null()) return code;
   }
 
   Label miss;
-
-  GenerateNameCheck(Handle<String>(name), &miss);
+  GenerateNameCheck(name, &miss);
 
   // Get the number of arguments.
   const int argc = arguments().immediate();
-
   GenerateGlobalReceiverCheck(object, holder, name, &miss);
-
   GenerateLoadFunctionFromCell(cell, function, &miss);
 
   // Patch the receiver on the stack with the global proxy.
@@ -2691,11 +2362,10 @@
   // Handle call cache miss.
   __ bind(&miss);
   __ IncrementCounter(counters->call_global_inline_miss(), 1);
-  MaybeObject* maybe_result = TryGenerateMissBranch();
-  if (maybe_result->IsFailure()) return maybe_result;
+  GenerateMissBranch();
 
   // Return the generated code.
-  return TryGetCode(NORMAL, name);
+  return GetCode(NORMAL, name);
 }
 
 
@@ -3039,10 +2709,11 @@
 }
 
 
-MaybeObject* LoadStubCompiler::CompileLoadCallback(String* name,
-                                                   JSObject* object,
-                                                   JSObject* holder,
-                                                   AccessorInfo* callback) {
+Handle<Code> LoadStubCompiler::CompileLoadCallback(
+    Handle<String> name,
+    Handle<JSObject> object,
+    Handle<JSObject> holder,
+    Handle<AccessorInfo> callback) {
   // ----------- S t a t e -------------
   //  -- eax    : receiver
   //  -- ecx    : name
@@ -3050,18 +2721,13 @@
   // -----------------------------------
   Label miss;
 
-  MaybeObject* result = GenerateLoadCallback(object, holder, eax, ecx, ebx, edx,
-                                             edi, callback, name, &miss);
-  if (result->IsFailure()) {
-    miss.Unuse();
-    return result;
-  }
-
+  GenerateLoadCallback(object, holder, eax, ecx, ebx, edx, edi, callback,
+                       name, &miss);
   __ bind(&miss);
   GenerateLoadMiss(masm(), Code::LOAD_IC);
 
   // Return the generated code.
-  return TryGetCode(CALLBACKS, name);
+  return GetCode(CALLBACKS, name);
 }
 
 
@@ -3085,9 +2751,9 @@
 }
 
 
-MaybeObject* LoadStubCompiler::CompileLoadInterceptor(JSObject* receiver,
-                                                      JSObject* holder,
-                                                      String* name) {
+Handle<Code> LoadStubCompiler::CompileLoadInterceptor(Handle<JSObject> receiver,
+                                                      Handle<JSObject> holder,
+                                                      Handle<String> name) {
   // ----------- S t a t e -------------
   //  -- eax    : receiver
   //  -- ecx    : name
@@ -3100,22 +2766,14 @@
 
   // TODO(368): Compile in the whole chain: all the interceptors in
   // prototypes and ultimate answer.
-  GenerateLoadInterceptor(receiver,
-                          holder,
-                          &lookup,
-                          eax,
-                          ecx,
-                          edx,
-                          ebx,
-                          edi,
-                          name,
-                          &miss);
+  GenerateLoadInterceptor(receiver, holder, &lookup, eax, ecx, edx, ebx, edi,
+                          name, &miss);
 
   __ bind(&miss);
   GenerateLoadMiss(masm(), Code::LOAD_IC);
 
   // Return the generated code.
-  return TryGetCode(INTERCEPTOR, name);
+  return GetCode(INTERCEPTOR, name);
 }
 
 
@@ -3202,11 +2860,11 @@
 }
 
 
-MaybeObject* KeyedLoadStubCompiler::CompileLoadCallback(
-    String* name,
-    JSObject* receiver,
-    JSObject* holder,
-    AccessorInfo* callback) {
+Handle<Code> KeyedLoadStubCompiler::CompileLoadCallback(
+    Handle<String> name,
+    Handle<JSObject> receiver,
+    Handle<JSObject> holder,
+    Handle<AccessorInfo> callback) {
   // ----------- S t a t e -------------
   //  -- eax    : key
   //  -- edx    : receiver
@@ -3218,23 +2876,18 @@
   __ IncrementCounter(counters->keyed_load_callback(), 1);
 
   // Check that the name has not changed.
-  __ cmp(eax, Immediate(Handle<String>(name)));
+  __ cmp(eax, Immediate(name));
   __ j(not_equal, &miss);
 
-  MaybeObject* result = GenerateLoadCallback(receiver, holder, edx, eax, ebx,
-                                             ecx, edi, callback, name, &miss);
-  if (result->IsFailure()) {
-    miss.Unuse();
-    return result;
-  }
+  GenerateLoadCallback(receiver, holder, edx, eax, ebx, ecx, edi, callback,
+                       name, &miss);
 
   __ bind(&miss);
-
   __ DecrementCounter(counters->keyed_load_callback(), 1);
   GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
 
   // Return the generated code.
-  return TryGetCode(CALLBACKS, name);
+  return GetCode(CALLBACKS, name);
 }
 
 
@@ -3268,9 +2921,10 @@
 }
 
 
-MaybeObject* KeyedLoadStubCompiler::CompileLoadInterceptor(JSObject* receiver,
-                                                           JSObject* holder,
-                                                           String* name) {
+Handle<Code> KeyedLoadStubCompiler::CompileLoadInterceptor(
+    Handle<JSObject> receiver,
+    Handle<JSObject> holder,
+    Handle<String> name) {
   // ----------- S t a t e -------------
   //  -- eax    : key
   //  -- edx    : receiver
@@ -3282,27 +2936,19 @@
   __ IncrementCounter(counters->keyed_load_interceptor(), 1);
 
   // Check that the name has not changed.
-  __ cmp(eax, Immediate(Handle<String>(name)));
+  __ cmp(eax, Immediate(name));
   __ j(not_equal, &miss);
 
   LookupResult lookup(isolate());
   LookupPostInterceptor(holder, name, &lookup);
-  GenerateLoadInterceptor(receiver,
-                          holder,
-                          &lookup,
-                          edx,
-                          eax,
-                          ecx,
-                          ebx,
-                          edi,
-                          name,
-                          &miss);
+  GenerateLoadInterceptor(receiver, holder, &lookup, edx, eax, ecx, ebx, edi,
+                          name, &miss);
   __ bind(&miss);
   __ DecrementCounter(counters->keyed_load_interceptor(), 1);
   GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
 
   // Return the generated code.
-  return TryGetCode(INTERCEPTOR, name);
+  return GetCode(INTERCEPTOR, name);
 }
 
 
@@ -3433,7 +3079,8 @@
 
 // Specialized stub for constructing objects from functions which only have only
 // simple assignments of the form this.x = ...; in their body.
-MaybeObject* ConstructStubCompiler::CompileConstructStub(JSFunction* function) {
+Handle<Code> ConstructStubCompiler::CompileConstructStub(
+    Handle<JSFunction> function) {
   // ----------- S t a t e -------------
   //  -- eax : argc
   //  -- edi : constructor
@@ -3472,12 +3119,8 @@
   // ebx: initial map
   __ movzx_b(ecx, FieldOperand(ebx, Map::kInstanceSizeOffset));
   __ shl(ecx, kPointerSizeLog2);
-  __ AllocateInNewSpace(ecx,
-                        edx,
-                        ecx,
-                        no_reg,
-                        &generic_stub_call,
-                        NO_ALLOCATION_FLAGS);
+  __ AllocateInNewSpace(ecx, edx, ecx, no_reg,
+                        &generic_stub_call, NO_ALLOCATION_FLAGS);
 
   // Allocated the JSObject, now initialize the fields and add the heap tag.
   // ebx: initial map
@@ -3508,7 +3151,7 @@
   // edi: undefined
   // Fill the initialized properties with a constant value or a passed argument
   // depending on the this.x = ...; assignment in the function.
-  SharedFunctionInfo* shared = function->shared();
+  Handle<SharedFunctionInfo> shared(function->shared());
   for (int i = 0; i < shared->this_property_assignments_count(); i++) {
     if (shared->IsThisPropertyAssignmentArgument(i)) {
       // Check if the argument assigned to the property is actually passed.
@@ -3560,9 +3203,8 @@
   // Jump to the generic stub in case the specialized code cannot handle the
   // construction.
   __ bind(&generic_stub_call);
-  Handle<Code> generic_construct_stub =
-      isolate()->builtins()->JSConstructStubGeneric();
-  __ jmp(generic_construct_stub, RelocInfo::CODE_TARGET);
+  Handle<Code> code = isolate()->builtins()->JSConstructStubGeneric();
+  __ jmp(code, RelocInfo::CODE_TARGET);
 
   // Return the generated code.
   return GetCode();
diff --git a/src/incremental-marking-inl.h b/src/incremental-marking-inl.h
index 2a7fba7..c8af236 100644
--- a/src/incremental-marking-inl.h
+++ b/src/incremental-marking-inl.h
@@ -37,62 +37,42 @@
 bool IncrementalMarking::BaseRecordWrite(HeapObject* obj,
                                          Object** slot,
                                          Object* value) {
-  if (IsMarking() && value->IsHeapObject()) {
-    MarkBit value_bit = Marking::MarkBitFrom(HeapObject::cast(value));
-    if (Marking::IsWhite(value_bit)) {
-      MarkBit obj_bit = Marking::MarkBitFrom(obj);
-      if (Marking::IsBlack(obj_bit)) {
-        BlackToGreyAndUnshift(obj, obj_bit);
-        RestartIfNotMarking();
-      }
-
-      // Object is either grey or white it will be scanned if survives.
-      return false;
+  MarkBit value_bit = Marking::MarkBitFrom(HeapObject::cast(value));
+  if (Marking::IsWhite(value_bit)) {
+    MarkBit obj_bit = Marking::MarkBitFrom(obj);
+    if (Marking::IsBlack(obj_bit)) {
+      BlackToGreyAndUnshift(obj, obj_bit);
+      RestartIfNotMarking();
     }
-    return true;
+
+    // Object is either grey or white.  It will be scanned if survives.
+    return false;
   }
-  return false;
+  return true;
 }
 
 
 void IncrementalMarking::RecordWrite(HeapObject* obj,
                                      Object** slot,
                                      Object* value) {
-  if (BaseRecordWrite(obj, slot, value) && is_compacting_ && slot != NULL) {
-    MarkBit obj_bit = Marking::MarkBitFrom(obj);
-    if (Marking::IsBlack(obj_bit)) {
-      // Object is not going to be rescanned we need to record the slot.
-      heap_->mark_compact_collector()->RecordSlot(
-          HeapObject::RawField(obj, 0), slot, value);
-    }
+  if (IsMarking() && value->NonFailureIsHeapObject()) {
+    RecordWriteSlow(obj, slot, value);
   }
 }
 
 
+void IncrementalMarking::RecordWriteOfCodeEntry(JSFunction* host,
+                                                Object** slot,
+                                                Code* value) {
+  if (IsMarking()) RecordWriteOfCodeEntrySlow(host, slot, value);
+}
+
+
 void IncrementalMarking::RecordWriteIntoCode(HeapObject* obj,
                                              RelocInfo* rinfo,
                                              Object* value) {
-  if (IsMarking() && value->IsHeapObject()) {
-    MarkBit value_bit = Marking::MarkBitFrom(HeapObject::cast(value));
-    if (Marking::IsWhite(value_bit)) {
-      MarkBit obj_bit = Marking::MarkBitFrom(obj);
-      if (Marking::IsBlack(obj_bit)) {
-        BlackToGreyAndUnshift(obj, obj_bit);
-        RestartIfNotMarking();
-      }
-
-      // Object is either grey or white it will be scanned if survives.
-      return;
-    }
-
-    if (is_compacting_) {
-      MarkBit obj_bit = Marking::MarkBitFrom(obj);
-      if (Marking::IsBlack(obj_bit)) {
-        // Object is not going to be rescanned we need to record the slot.
-        heap_->mark_compact_collector()->RecordRelocSlot(rinfo,
-                                                         Code::cast(value));
-      }
-    }
+  if (IsMarking() && value->NonFailureIsHeapObject()) {
+    RecordWriteIntoCodeSlow(obj, rinfo, value);
   }
 }
 
diff --git a/src/incremental-marking.cc b/src/incremental-marking.cc
index 68b830a..6d2f393 100644
--- a/src/incremental-marking.cc
+++ b/src/incremental-marking.cc
@@ -60,6 +60,20 @@
 }
 
 
+void IncrementalMarking::RecordWriteSlow(HeapObject* obj,
+                                         Object** slot,
+                                         Object* value) {
+  if (BaseRecordWrite(obj, slot, value) && is_compacting_ && slot != NULL) {
+    MarkBit obj_bit = Marking::MarkBitFrom(obj);
+    if (Marking::IsBlack(obj_bit)) {
+      // Object is not going to be rescanned we need to record the slot.
+      heap_->mark_compact_collector()->RecordSlot(
+          HeapObject::RawField(obj, 0), slot, value);
+    }
+  }
+}
+
+
 void IncrementalMarking::RecordWriteFromCode(HeapObject* obj,
                                              Object* value,
                                              Isolate* isolate) {
@@ -108,7 +122,7 @@
 }
 
 
-void IncrementalMarking::RecordWriteOfCodeEntry(JSFunction* host,
+void IncrementalMarking::RecordWriteOfCodeEntrySlow(JSFunction* host,
                                                 Object** slot,
                                                 Code* value) {
   if (BaseRecordWrite(host, slot, value) && is_compacting_) {
@@ -119,6 +133,30 @@
 }
 
 
+void IncrementalMarking::RecordWriteIntoCodeSlow(HeapObject* obj,
+                                                 RelocInfo* rinfo,
+                                                 Object* value) {
+  MarkBit value_bit = Marking::MarkBitFrom(HeapObject::cast(value));
+  if (Marking::IsWhite(value_bit)) {
+    MarkBit obj_bit = Marking::MarkBitFrom(obj);
+    if (Marking::IsBlack(obj_bit)) {
+      BlackToGreyAndUnshift(obj, obj_bit);
+      RestartIfNotMarking();
+    }
+    // Object is either grey or white.  It will be scanned if survives.
+    return;
+  }
+
+  if (is_compacting_) {
+    MarkBit obj_bit = Marking::MarkBitFrom(obj);
+    if (Marking::IsBlack(obj_bit)) {
+      // Object is not going to be rescanned.  We need to record the slot.
+      heap_->mark_compact_collector()->RecordRelocSlot(rinfo,
+                                                       Code::cast(value));
+    }
+  }
+}
+
 
 class IncrementalMarkingMarkingVisitor : public ObjectVisitor {
  public:
diff --git a/src/incremental-marking.h b/src/incremental-marking.h
index fa7337b..4542fbd 100644
--- a/src/incremental-marking.h
+++ b/src/incremental-marking.h
@@ -59,7 +59,7 @@
 
   inline bool IsStopped() { return state() == STOPPED; }
 
-  inline bool IsMarking() { return state() >= MARKING; }
+  INLINE(bool IsMarking()) { return state() >= MARKING; }
 
   inline bool IsMarkingIncomplete() { return state() == MARKING; }
 
@@ -120,16 +120,23 @@
                                                Object** slot,
                                                Isolate* isolate);
 
-  inline bool BaseRecordWrite(HeapObject* obj, Object** slot, Object* value);
-
-
-  inline void RecordWrite(HeapObject* obj, Object** slot, Object* value);
-  inline void RecordWriteIntoCode(HeapObject* obj,
+  INLINE(bool BaseRecordWrite(HeapObject* obj, Object** slot, Object* value));
+  INLINE(void RecordWrite(HeapObject* obj, Object** slot, Object* value));
+  INLINE(void RecordWriteIntoCode(HeapObject* obj,
                                   RelocInfo* rinfo,
-                                  Object* value);
+                                  Object* value));
+  INLINE(void RecordWriteOfCodeEntry(JSFunction* host,
+                                     Object** slot,
+                                     Code* value));
+
+
+  void RecordWriteSlow(HeapObject* obj, Object** slot, Object* value);
+  void RecordWriteIntoCodeSlow(HeapObject* obj,
+                               RelocInfo* rinfo,
+                               Object* value);
+  void RecordWriteOfCodeEntrySlow(JSFunction* host, Object** slot, Code* value);
   void RecordCodeTargetPatch(Code* host, Address pc, HeapObject* value);
   void RecordCodeTargetPatch(Address pc, HeapObject* value);
-  void RecordWriteOfCodeEntry(JSFunction* host, Object** slot, Code* value);
 
   inline void RecordWrites(HeapObject* obj);
 
diff --git a/src/isolate-inl.h b/src/isolate-inl.h
index d6e6131..0a2c174 100644
--- a/src/isolate-inl.h
+++ b/src/isolate-inl.h
@@ -45,9 +45,7 @@
   }
   isolate->set_save_context(this);
 
-  // If there is no JS frame under the current C frame, use the value 0.
-  JavaScriptFrameIterator it(isolate);
-  js_sp_ = it.done() ? 0 : it.frame()->sp();
+  c_entry_fp_ = isolate->c_entry_fp(isolate->thread_local_top());
 }
 
 
diff --git a/src/isolate.h b/src/isolate.h
index 5453bf2..116b802 100644
--- a/src/isolate.h
+++ b/src/isolate.h
@@ -1246,8 +1246,8 @@
   SaveContext* prev() { return prev_; }
 
   // Returns true if this save context is below a given JavaScript frame.
-  bool below(JavaScriptFrame* frame) {
-    return (js_sp_ == 0) || (frame->sp() < js_sp_);
+  bool IsBelowFrame(JavaScriptFrame* frame) {
+    return (c_entry_fp_ == 0) || (c_entry_fp_ > frame->sp());
   }
 
  private:
@@ -1256,7 +1256,7 @@
   Handle<Context> dummy_;
 #endif
   SaveContext* prev_;
-  Address js_sp_;  // The top JS frame's sp when saving context.
+  Address c_entry_fp_;
 };
 
 
diff --git a/src/liveedit.cc b/src/liveedit.cc
index 6107cbf..9b77be1 100644
--- a/src/liveedit.cc
+++ b/src/liveedit.cc
@@ -855,38 +855,20 @@
       return HEAP->undefined_value();
     }
     do {
-      ZoneList<Variable*> list(10);
-      outer_scope->CollectUsedVariables(&list);
-      int j = 0;
-      for (int i = 0; i < list.length(); i++) {
-        Variable* var1 = list[i];
-        if (var1->IsContextSlot()) {
-          if (j != i) {
-            list[j] = var1;
-          }
-          j++;
-        }
-      }
+      ZoneList<Variable*> stack_list(outer_scope->StackLocalCount());
+      ZoneList<Variable*> context_list(outer_scope->ContextLocalCount());
+      outer_scope->CollectStackAndContextLocals(&stack_list, &context_list);
+      context_list.Sort(&Variable::CompareIndex);
 
-      // Sort it.
-      for (int k = 1; k < j; k++) {
-        int l = k;
-        for (int m = k + 1; m < j; m++) {
-          if (list[l]->index() > list[m]->index()) {
-            l = m;
-          }
-        }
-        list[k] = list[l];
-      }
-      for (int i = 0; i < j; i++) {
+      for (int i = 0; i < context_list.length(); i++) {
         SetElementNonStrict(scope_info_list,
                             scope_info_length,
-                            list[i]->name());
+                            context_list[i]->name());
         scope_info_length++;
         SetElementNonStrict(
             scope_info_list,
             scope_info_length,
-            Handle<Smi>(Smi::FromInt(list[i]->index())));
+            Handle<Smi>(Smi::FromInt(context_list[i]->index())));
         scope_info_length++;
       }
       SetElementNonStrict(scope_info_list,
@@ -1108,7 +1090,7 @@
     ReplaceCodeObject(shared_info->code(), *code);
     Handle<Object> code_scope_info =  compile_info_wrapper.GetCodeScopeInfo();
     if (code_scope_info->IsFixedArray()) {
-      shared_info->set_scope_info(SerializedScopeInfo::cast(*code_scope_info));
+      shared_info->set_scope_info(ScopeInfo::cast(*code_scope_info));
     }
   }
 
diff --git a/src/macros.py b/src/macros.py
index a42e83c..bf7119f 100644
--- a/src/macros.py
+++ b/src/macros.py
@@ -82,8 +82,6 @@
 const kMaxYear  = 1000000;
 const kMinMonth = -10000000;
 const kMaxMonth = 10000000;
-const kMinDate  = -100000000;
-const kMaxDate  = 100000000;
 
 # Native cache ids.
 const STRING_TO_REGEXP_CACHE_ID = 0;
diff --git a/src/mark-compact.cc b/src/mark-compact.cc
index b41b033..94e65fa 100644
--- a/src/mark-compact.cc
+++ b/src/mark-compact.cc
@@ -1516,8 +1516,8 @@
         if (o->IsExternalString()) {
           heap_->FinalizeExternalString(String::cast(*p));
         }
-        // Set the entry to null_value (as deleted).
-        *p = heap_->null_value();
+        // Set the entry to the_hole_value (as deleted).
+        *p = heap_->the_hole_value();
         pointers_removed_++;
       }
     }
@@ -2124,7 +2124,7 @@
              i += MapCache::kEntrySize) {
           Object* raw_key = map_cache->get(i);
           if (raw_key == heap()->undefined_value() ||
-              raw_key == heap()->null_value()) continue;
+              raw_key == heap()->the_hole_value()) continue;
           STATIC_ASSERT(MapCache::kEntrySize == 2);
           Object* raw_map = map_cache->get(i + 1);
           if (raw_map->IsHeapObject() && IsMarked(raw_map)) {
@@ -2132,8 +2132,8 @@
           } else {
             // Delete useless entries with unmarked maps.
             ASSERT(raw_map->IsMap());
-            map_cache->set_null_unchecked(heap(), i);
-            map_cache->set_null_unchecked(heap(), i + 1);
+            map_cache->set_the_hole(i);
+            map_cache->set_the_hole(i + 1);
           }
         }
         if (used_elements == 0) {
@@ -2320,7 +2320,7 @@
     ObjectHashTable* table = weak_map->unchecked_table();
     for (int i = 0; i < table->Capacity(); i++) {
       if (!MarkCompactCollector::IsMarked(HeapObject::cast(table->KeyAt(i)))) {
-        table->RemoveEntry(i, heap());
+        table->RemoveEntry(i);
       }
     }
     weak_map_obj = weak_map->next();
diff --git a/src/messages.cc b/src/messages.cc
index c70463d..a0793c2 100644
--- a/src/messages.cc
+++ b/src/messages.cc
@@ -126,7 +126,7 @@
       v8::NeanderObject listener(JSObject::cast(global_listeners.get(i)));
       Handle<Foreign> callback_obj(Foreign::cast(listener.get(0)));
       v8::MessageCallback callback =
-          FUNCTION_CAST<v8::MessageCallback>(callback_obj->address());
+          FUNCTION_CAST<v8::MessageCallback>(callback_obj->foreign_address());
       Handle<Object> callback_data(listener.get(1));
       {
         // Do not allow exceptions to propagate.
diff --git a/src/messages.js b/src/messages.js
index e4607ab..e50f295 100644
--- a/src/messages.js
+++ b/src/messages.js
@@ -198,6 +198,7 @@
       // RangeError
       "invalid_array_length",         ["Invalid array length"],
       "stack_overflow",               ["Maximum call stack size exceeded"],
+      "invalid_time_value",           ["Invalid time value"],
       // SyntaxError
       "unable_to_parse",              ["Parse error"],
       "invalid_regexp_flags",         ["Invalid flags supplied to RegExp constructor '", "%0", "'"],
@@ -744,12 +745,8 @@
     hasBeenSet = true;
     value = v;
   }
-  var desc = { get: getter,
-               set: setter,
-               enumerable: false,
-               configurable: true };
-  desc = ToPropertyDescriptor(desc);
-  DefineOwnProperty(obj, name, desc, true);
+  %DefineOrRedefineAccessorProperty(obj, name, GETTER, getter, DONT_ENUM);
+  %DefineOrRedefineAccessorProperty(obj, name, SETTER, setter, DONT_ENUM);
 }
 
 function CallSite(receiver, fun, pos) {
diff --git a/src/mips/builtins-mips.cc b/src/mips/builtins-mips.cc
index 17975fe..98fd57d 100644
--- a/src/mips/builtins-mips.cc
+++ b/src/mips/builtins-mips.cc
@@ -628,8 +628,7 @@
 
   Label slow, non_function_call;
   // Check that the function is not a smi.
-  __ And(t0, a1, Operand(kSmiTagMask));
-  __ Branch(&non_function_call, eq, t0, Operand(zero_reg));
+  __ JumpIfSmi(a1, &non_function_call);
   // Check that the function is a JSFunction.
   __ GetObjectType(a1, a2, a2);
   __ Branch(&slow, ne, a2, Operand(JS_FUNCTION_TYPE));
@@ -705,8 +704,7 @@
       // Load the initial map and verify that it is in fact a map.
       // a1: constructor function
       __ lw(a2, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset));
-      __ And(t0, a2, Operand(kSmiTagMask));
-      __ Branch(&rt_call, eq, t0, Operand(zero_reg));
+      __ JumpIfSmi(a2, &rt_call);
       __ GetObjectType(a2, a3, t4);
       __ Branch(&rt_call, ne, t4, Operand(MAP_TYPE));
 
@@ -984,8 +982,7 @@
     // sp[0]: receiver (newly allocated object)
     // sp[1]: constructor function
     // sp[2]: number of arguments (smi-tagged)
-    __ And(t0, v0, Operand(kSmiTagMask));
-    __ Branch(&use_receiver, eq, t0, Operand(zero_reg));
+    __ JumpIfSmi(v0, &use_receiver);
 
     // If the type of the result (stored in its map) is less than
     // FIRST_SPEC_OBJECT_TYPE, it is not an object in the ECMA sense.
@@ -1176,24 +1173,93 @@
 }
 
 
-// These functions are called from C++ but cannot be used in live code.
+static void Generate_NotifyDeoptimizedHelper(MacroAssembler* masm,
+                                             Deoptimizer::BailoutType type) {
+  {
+    FrameScope scope(masm, StackFrame::INTERNAL);
+    // Pass the function and deoptimization type to the runtime system.
+    __ li(a0, Operand(Smi::FromInt(static_cast<int>(type))));
+    __ push(a0);
+    __ CallRuntime(Runtime::kNotifyDeoptimized, 1);
+  }
+
+  // Get the full codegen state from the stack and untag it -> t2.
+  __ lw(t2, MemOperand(sp, 0 * kPointerSize));
+  __ SmiUntag(t2);
+  // Switch on the state.
+  Label with_tos_register, unknown_state;
+  __ Branch(&with_tos_register,
+            ne, t2, Operand(FullCodeGenerator::NO_REGISTERS));
+  __ Addu(sp, sp, Operand(1 * kPointerSize));  // Remove state.
+  __ Ret();
+
+  __ bind(&with_tos_register);
+  __ lw(v0, MemOperand(sp, 1 * kPointerSize));
+  __ Branch(&unknown_state, ne, t2, Operand(FullCodeGenerator::TOS_REG));
+
+  __ Addu(sp, sp, Operand(2 * kPointerSize));  // Remove state.
+  __ Ret();
+
+  __ bind(&unknown_state);
+  __ stop("no cases left");
+}
+
+
 void Builtins::Generate_NotifyDeoptimized(MacroAssembler* masm) {
-  __ Abort("Call to unimplemented function in builtins-mips.cc");
+  Generate_NotifyDeoptimizedHelper(masm, Deoptimizer::EAGER);
 }
 
 
 void Builtins::Generate_NotifyLazyDeoptimized(MacroAssembler* masm) {
-  __ Abort("Call to unimplemented function in builtins-mips.cc");
+  Generate_NotifyDeoptimizedHelper(masm, Deoptimizer::LAZY);
 }
 
 
 void Builtins::Generate_NotifyOSR(MacroAssembler* masm) {
-  __ Abort("Call to unimplemented function in builtins-mips.cc");
+  // For now, we are relying on the fact that Runtime::NotifyOSR
+  // doesn't do any garbage collection which allows us to save/restore
+  // the registers without worrying about which of them contain
+  // pointers. This seems a bit fragile.
+  RegList saved_regs =
+      (kJSCallerSaved | kCalleeSaved | ra.bit() | fp.bit()) & ~sp.bit();
+  __ MultiPush(saved_regs);
+  {
+    FrameScope scope(masm, StackFrame::INTERNAL);
+    __ CallRuntime(Runtime::kNotifyOSR, 0);
+  }
+  __ MultiPop(saved_regs);
+  __ Ret();
 }
 
 
 void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
-  __ Abort("Call to unimplemented function in builtins-mips.cc");
+  CpuFeatures::TryForceFeatureScope scope(VFP3);
+  if (!CpuFeatures::IsSupported(FPU)) {
+    __ Abort("Unreachable code: Cannot optimize without FPU support.");
+    return;
+  }
+
+  // Lookup the function in the JavaScript frame and push it as an
+  // argument to the on-stack replacement function.
+  __ lw(a0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+  {
+    FrameScope scope(masm, StackFrame::INTERNAL);
+    __ push(a0);
+    __ CallRuntime(Runtime::kCompileForOnStackReplacement, 1);
+  }
+
+  // If the result was -1 it means that we couldn't optimize the
+  // function. Just return and continue in the unoptimized version.
+  __ Ret(eq, v0, Operand(Smi::FromInt(-1)));
+
+  // Untag the AST id and push it on the stack.
+  __ SmiUntag(v0);
+  __ push(v0);
+
+  // Generate the code for doing the frame-to-frame translation using
+  // the deoptimizer infrastructure.
+  Deoptimizer::EntryGenerator generator(masm, Deoptimizer::OSR);
+  generator.Generate();
 }
 
 
@@ -1215,8 +1281,7 @@
   __ sll(at, a0, kPointerSizeLog2);
   __ addu(at, sp, at);
   __ lw(a1, MemOperand(at));
-  __ And(at, a1, Operand(kSmiTagMask));
-  __ Branch(&non_function, eq, at, Operand(zero_reg));
+  __ JumpIfSmi(a1, &non_function);
   __ GetObjectType(a1, a2, a2);
   __ Branch(&slow, ne, a2, Operand(JS_FUNCTION_TYPE));
 
@@ -1395,8 +1460,7 @@
   const int kFunctionOffset =  4 * kPointerSize;
 
   {
-    FrameScope scope(masm, StackFrame::INTERNAL);
-
+    FrameScope frame_scope(masm, StackFrame::INTERNAL);
     __ lw(a0, MemOperand(fp, kFunctionOffset));  // Get the function.
     __ push(a0);
     __ lw(a0, MemOperand(fp, kArgsOffset));  // Get the args array.
@@ -1456,8 +1520,7 @@
     __ Branch(&push_receiver, ne, t3, Operand(zero_reg));
 
     // Compute the receiver in non-strict mode.
-    __ And(t3, a0, Operand(kSmiTagMask));
-    __ Branch(&call_to_object, eq, t3, Operand(zero_reg));
+    __ JumpIfSmi(a0, &call_to_object);
     __ LoadRoot(a1, Heap::kNullValueRootIndex);
     __ Branch(&use_global_receiver, eq, a0, Operand(a1));
     __ LoadRoot(a2, Heap::kUndefinedValueRootIndex);
@@ -1530,8 +1593,7 @@
     __ InvokeFunction(a1, actual, CALL_FUNCTION,
                       NullCallWrapper(), CALL_AS_METHOD);
 
-    scope.GenerateLeaveFrame();
-
+    frame_scope.GenerateLeaveFrame();
     __ Ret(USE_DELAY_SLOT);
     __ Addu(sp, sp, Operand(3 * kPointerSize));  // In delay slot.
 
diff --git a/src/mips/code-stubs-mips.cc b/src/mips/code-stubs-mips.cc
index 85e929d..7267779 100644
--- a/src/mips/code-stubs-mips.cc
+++ b/src/mips/code-stubs-mips.cc
@@ -1156,8 +1156,7 @@
          (lhs.is(a1) && rhs.is(a0)));
 
   Label lhs_is_smi;
-  __ And(t0, lhs, Operand(kSmiTagMask));
-  __ Branch(&lhs_is_smi, eq, t0, Operand(zero_reg));
+  __ JumpIfSmi(lhs, &lhs_is_smi);
   // Rhs is a Smi.
   // Check whether the non-smi is a heap number.
   __ GetObjectType(lhs, t4, t4);
@@ -4712,8 +4711,7 @@
   // Check that the third argument is a positive smi less than the subject
   // string length. A negative value will be greater (unsigned comparison).
   __ lw(a0, MemOperand(sp, kPreviousIndexOffset));
-  __ And(at, a0, Operand(kSmiTagMask));
-  __ Branch(&runtime, ne, at, Operand(zero_reg));
+  __ JumpIfNotSmi(a0, &runtime);
   __ Branch(&runtime, ls, a3, Operand(a0));
 
   // a2: Number of capture registers
@@ -5351,7 +5349,8 @@
 
 
 void StringCharCodeAtGenerator::GenerateSlow(
-    MacroAssembler* masm, const RuntimeCallHelper& call_helper) {
+    MacroAssembler* masm,
+    const RuntimeCallHelper& call_helper) {
   __ Abort("Unexpected fallthrough to CharCodeAt slow case");
 
   // Index is not a smi.
@@ -5437,7 +5436,8 @@
 
 
 void StringCharFromCodeGenerator::GenerateSlow(
-    MacroAssembler* masm, const RuntimeCallHelper& call_helper) {
+    MacroAssembler* masm,
+    const RuntimeCallHelper& call_helper) {
   __ Abort("Unexpected fallthrough to CharFromCode slow case");
 
   __ bind(&slow_case_);
@@ -5463,7 +5463,8 @@
 
 
 void StringCharAtGenerator::GenerateSlow(
-    MacroAssembler* masm, const RuntimeCallHelper& call_helper) {
+    MacroAssembler* masm,
+    const RuntimeCallHelper& call_helper) {
   char_code_at_generator_.GenerateSlow(masm, call_helper);
   char_from_code_generator_.GenerateSlow(masm, call_helper);
 }
@@ -6979,84 +6980,6 @@
     // Having undefined at this place means the name is not contained.
     ASSERT_EQ(kSmiTagSize, 1);
     Register tmp = properties;
-    __ sll(tmp, index, 1);
-    __ Addu(tmp, properties, tmp);
-    __ lw(entity_name, FieldMemOperand(tmp, kElementsStartOffset));
-
-    ASSERT(!tmp.is(entity_name));
-    __ LoadRoot(tmp, Heap::kUndefinedValueRootIndex);
-    __ Branch(done, eq, entity_name, Operand(tmp));
-
-    if (i != kInlinedProbes - 1) {
-      // Stop if found the property.
-      __ Branch(miss, eq, entity_name, Operand(Handle<String>(name)));
-
-      // Check if the entry name is not a symbol.
-      __ lw(entity_name, FieldMemOperand(entity_name, HeapObject::kMapOffset));
-      __ lbu(entity_name,
-             FieldMemOperand(entity_name, Map::kInstanceTypeOffset));
-      __ And(tmp, entity_name, Operand(kIsSymbolMask));
-      __ Branch(miss, eq, tmp, Operand(zero_reg));
-
-      // Restore the properties.
-      __ lw(properties,
-            FieldMemOperand(receiver, JSObject::kPropertiesOffset));
-    }
-  }
-
-  const int spill_mask =
-      (ra.bit() | t2.bit() | t1.bit() | t0.bit() | a3.bit() |
-       a2.bit() | a1.bit() | a0.bit() | v0.bit());
-
-  __ MultiPush(spill_mask);
-  __ lw(a0, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
-  __ li(a1, Operand(Handle<String>(name)));
-  StringDictionaryLookupStub stub(NEGATIVE_LOOKUP);
-  __ CallStub(&stub);
-  __ mov(at, v0);
-  __ MultiPop(spill_mask);
-
-  __ Branch(done, eq, at, Operand(zero_reg));
-  __ Branch(miss, ne, at, Operand(zero_reg));
-}
-
-
-// TODO(kmillikin): Eliminate this function when the stub cache is fully
-// handlified.
-MaybeObject* StringDictionaryLookupStub::TryGenerateNegativeLookup(
-    MacroAssembler* masm,
-    Label* miss,
-    Label* done,
-    Register receiver,
-    Register properties,
-    String* name,
-    Register scratch0) {
-// If names of slots in range from 1 to kProbes - 1 for the hash value are
-  // not equal to the name and kProbes-th slot is not used (its name is the
-  // undefined value), it guarantees the hash table doesn't contain the
-  // property. It's true even if some slots represent deleted properties
-  // (their names are the null value).
-  for (int i = 0; i < kInlinedProbes; i++) {
-    // scratch0 points to properties hash.
-    // Compute the masked index: (hash + i + i * i) & mask.
-    Register index = scratch0;
-    // Capacity is smi 2^n.
-    __ lw(index, FieldMemOperand(properties, kCapacityOffset));
-    __ Subu(index, index, Operand(1));
-    __ And(index, index, Operand(
-         Smi::FromInt(name->Hash() + StringDictionary::GetProbeOffset(i))));
-
-    // Scale the index by multiplying by the entry size.
-    ASSERT(StringDictionary::kEntrySize == 3);
-    // index *= 3.
-    __ sll(at, index, 1);
-    __ Addu(index, index, at);
-
-    Register entity_name = scratch0;
-    // Having undefined at this place means the name is not contained.
-    ASSERT_EQ(kSmiTagSize, 1);
-    Register tmp = properties;
-
     __ sll(scratch0, index, 1);
     __ Addu(tmp, properties, scratch0);
     __ lw(entity_name, FieldMemOperand(tmp, kElementsStartOffset));
@@ -7090,14 +7013,12 @@
   __ lw(a0, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
   __ li(a1, Operand(Handle<String>(name)));
   StringDictionaryLookupStub stub(NEGATIVE_LOOKUP);
-  MaybeObject* result = masm->TryCallStub(&stub);
-  if (result->IsFailure()) return result;
+  __ CallStub(&stub);
   __ mov(at, v0);
   __ MultiPop(spill_mask);
 
   __ Branch(done, eq, at, Operand(zero_reg));
   __ Branch(miss, ne, at, Operand(zero_reg));
-  return result;
 }
 
 
@@ -7112,6 +7033,11 @@
                                                         Register name,
                                                         Register scratch1,
                                                         Register scratch2) {
+  ASSERT(!elements.is(scratch1));
+  ASSERT(!elements.is(scratch2));
+  ASSERT(!name.is(scratch1));
+  ASSERT(!name.is(scratch2));
+
   // Assert that name contains a string.
   if (FLAG_debug_code) __ AbortIfNotString(name);
 
diff --git a/src/mips/code-stubs-mips.h b/src/mips/code-stubs-mips.h
index beb20aa..94ef2af 100644
--- a/src/mips/code-stubs-mips.h
+++ b/src/mips/code-stubs-mips.h
@@ -807,17 +807,6 @@
                                      Handle<String> name,
                                      Register scratch0);
 
-  // TODO(kmillikin): Eliminate this function when the stub cache is fully
-  // handlified.
-  MUST_USE_RESULT static MaybeObject* TryGenerateNegativeLookup(
-      MacroAssembler* masm,
-      Label* miss,
-      Label* done,
-      Register receiver,
-      Register properties,
-      String* name,
-      Register scratch0);
-
   static void GeneratePositiveLookup(MacroAssembler* masm,
                                      Label* miss,
                                      Label* done,
diff --git a/src/mips/deoptimizer-mips.cc b/src/mips/deoptimizer-mips.cc
index 92d7edd..bc735df 100644
--- a/src/mips/deoptimizer-mips.cc
+++ b/src/mips/deoptimizer-mips.cc
@@ -32,24 +32,112 @@
 #include "full-codegen.h"
 #include "safepoint-table.h"
 
-// Note: this file was taken from the X64 version. ARM has a partially working
-// lithium implementation, but for now it is not ported to mips.
-
 namespace v8 {
 namespace internal {
 
 
-const int Deoptimizer::table_entry_size_ = 10;
+const int Deoptimizer::table_entry_size_ = 32;
 
 
 int Deoptimizer::patch_size() {
-  const int kCallInstructionSizeInWords = 3;
+  const int kCallInstructionSizeInWords = 4;
   return kCallInstructionSizeInWords * Assembler::kInstrSize;
 }
 
 
+void Deoptimizer::EnsureRelocSpaceForLazyDeoptimization(Handle<Code> code) {
+  // Nothing to do. No new relocation information is written for lazy
+  // deoptimization on MIPS.
+}
+
+
 void Deoptimizer::DeoptimizeFunction(JSFunction* function) {
-  UNIMPLEMENTED();
+  HandleScope scope;
+  AssertNoAllocation no_allocation;
+
+  if (!function->IsOptimized()) return;
+
+  // Get the optimized code.
+  Code* code = function->code();
+
+  // Invalidate the relocation information, as it will become invalid by the
+  // code patching below, and is not needed any more.
+  code->InvalidateRelocation();
+
+  // For each return after a safepoint insert an absolute call to the
+  // corresponding deoptimization entry.
+  unsigned last_pc_offset = 0;
+  SafepointTable table(function->code());
+  for (unsigned i = 0; i < table.length(); i++) {
+    unsigned pc_offset = table.GetPcOffset(i);
+    SafepointEntry safepoint_entry = table.GetEntry(i);
+    int deoptimization_index = safepoint_entry.deoptimization_index();
+    int gap_code_size = safepoint_entry.gap_code_size();
+    // Check that we did not shoot past next safepoint.
+    CHECK(pc_offset >= last_pc_offset);
+#ifdef DEBUG
+    // Destroy the code which is not supposed to be run again.
+    int instructions = (pc_offset - last_pc_offset) / Assembler::kInstrSize;
+    CodePatcher destroyer(code->instruction_start() + last_pc_offset,
+                          instructions);
+    for (int x = 0; x < instructions; x++) {
+      destroyer.masm()->break_(0);
+    }
+#endif
+    last_pc_offset = pc_offset;
+    if (deoptimization_index != Safepoint::kNoDeoptimizationIndex) {
+      Address deoptimization_entry = Deoptimizer::GetDeoptimizationEntry(
+          deoptimization_index, Deoptimizer::LAZY);
+      last_pc_offset += gap_code_size;
+      int call_size_in_bytes = MacroAssembler::CallSize(deoptimization_entry,
+                                                        RelocInfo::NONE);
+      int call_size_in_words = call_size_in_bytes / Assembler::kInstrSize;
+      ASSERT(call_size_in_bytes % Assembler::kInstrSize == 0);
+      ASSERT(call_size_in_bytes <= patch_size());
+      CodePatcher patcher(code->instruction_start() + last_pc_offset,
+                          call_size_in_words);
+      patcher.masm()->Call(deoptimization_entry, RelocInfo::NONE);
+      last_pc_offset += call_size_in_bytes;
+    }
+  }
+
+#ifdef DEBUG
+  // Destroy the code which is not supposed to be run again.
+  int instructions =
+      (code->safepoint_table_offset() - last_pc_offset) / Assembler::kInstrSize;
+  CodePatcher destroyer(code->instruction_start() + last_pc_offset,
+                        instructions);
+  for (int x = 0; x < instructions; x++) {
+    destroyer.masm()->break_(0);
+  }
+#endif
+
+  Isolate* isolate = code->GetIsolate();
+
+  // Add the deoptimizing code to the list.
+  DeoptimizingCodeListNode* node = new DeoptimizingCodeListNode(code);
+  DeoptimizerData* data = isolate->deoptimizer_data();
+  node->set_next(data->deoptimizing_code_list_);
+  data->deoptimizing_code_list_ = node;
+
+  // We might be in the middle of incremental marking with compaction.
+  // Tell collector to treat this code object in a special way and
+  // ignore all slots that might have been recorded on it.
+  isolate->heap()->mark_compact_collector()->InvalidateCode(code);
+
+  // Set the code for the function to non-optimized version.
+  function->ReplaceCode(function->shared()->code());
+
+  if (FLAG_trace_deopt) {
+    PrintF("[forced deoptimization: ");
+    function->PrintName();
+    PrintF(" / %x]\n", reinterpret_cast<uint32_t>(function));
+#ifdef DEBUG
+    if (FLAG_print_code) {
+      code->PrintLn();
+    }
+#endif
+  }
 }
 
 
@@ -57,7 +145,42 @@
                                         Address pc_after,
                                         Code* check_code,
                                         Code* replacement_code) {
-  UNIMPLEMENTED();
+  const int kInstrSize = Assembler::kInstrSize;
+  // This structure comes from FullCodeGenerator::EmitStackCheck.
+  // The call of the stack guard check has the following form:
+  // sltu at, sp, t0
+  // beq at, zero_reg, ok
+  // lui t9, <stack guard address> upper
+  // ori t9, <stack guard address> lower
+  // jalr t9
+  // nop
+  // ----- pc_after points here
+
+  ASSERT(Assembler::IsBeq(Assembler::instr_at(pc_after - 5 * kInstrSize)));
+
+  // Replace the sltu instruction with load-imm 1 to at, so beq is not taken.
+  CodePatcher patcher(pc_after - 6 * kInstrSize, 1);
+  patcher.masm()->addiu(at, zero_reg, 1);
+
+  // Replace the stack check address in the load-immediate (lui/ori pair)
+  // with the entry address of the replacement code.
+  ASSERT(reinterpret_cast<uint32_t>(
+      Assembler::target_address_at(pc_after - 4 * kInstrSize)) ==
+      reinterpret_cast<uint32_t>(check_code->entry()));
+  Assembler::set_target_address_at(pc_after - 4 * kInstrSize,
+                                   replacement_code->entry());
+
+  // We patched the code to the following form:
+  // addiu at, zero_reg, 1
+  // beq at, zero_reg, ok  ;; Not changed
+  // lui t9, <on-stack replacement address> upper
+  // ori t9, <on-stack replacement address> lower
+  // jalr t9  ;; Not changed
+  // nop  ;; Not changed
+  // ----- pc_after points here
+
+  unoptimized_code->GetHeap()->incremental_marking()->RecordCodeTargetPatch(
+      unoptimized_code, pc_after - 4 * kInstrSize, replacement_code);
 }
 
 
@@ -65,34 +188,618 @@
                                          Address pc_after,
                                          Code* check_code,
                                          Code* replacement_code) {
-  UNIMPLEMENTED();
+  // Exact opposite of the function above.
+  const int kInstrSize = Assembler::kInstrSize;
+  ASSERT(Assembler::IsAddImmediate(
+      Assembler::instr_at(pc_after - 6 * kInstrSize)));
+  ASSERT(Assembler::IsBeq(Assembler::instr_at(pc_after - 5 * kInstrSize)));
+
+  // Restore the sltu instruction so beq can be taken again.
+  CodePatcher patcher(pc_after - 6 * kInstrSize, 1);
+  patcher.masm()->sltu(at, sp, t0);
+
+  // Replace the on-stack replacement address in the load-immediate (lui/ori
+  // pair) with the entry address of the normal stack-check code.
+  ASSERT(reinterpret_cast<uint32_t>(
+      Assembler::target_address_at(pc_after - 4 * kInstrSize)) ==
+      reinterpret_cast<uint32_t>(replacement_code->entry()));
+  Assembler::set_target_address_at(pc_after - 4 * kInstrSize,
+                                   check_code->entry());
+
+  check_code->GetHeap()->incremental_marking()->RecordCodeTargetPatch(
+      unoptimized_code, pc_after - 4 * kInstrSize, check_code);
+}
+
+
+static int LookupBailoutId(DeoptimizationInputData* data, unsigned ast_id) {
+  ByteArray* translations = data->TranslationByteArray();
+  int length = data->DeoptCount();
+  for (int i = 0; i < length; i++) {
+    if (static_cast<unsigned>(data->AstId(i)->value()) == ast_id) {
+      TranslationIterator it(translations,  data->TranslationIndex(i)->value());
+      int value = it.Next();
+      ASSERT(Translation::BEGIN == static_cast<Translation::Opcode>(value));
+      // Read the number of frames.
+      value = it.Next();
+      if (value == 1) return i;
+    }
+  }
+  UNREACHABLE();
+  return -1;
 }
 
 
 void Deoptimizer::DoComputeOsrOutputFrame() {
-  UNIMPLEMENTED();
+  DeoptimizationInputData* data = DeoptimizationInputData::cast(
+      optimized_code_->deoptimization_data());
+  unsigned ast_id = data->OsrAstId()->value();
+
+  int bailout_id = LookupBailoutId(data, ast_id);
+  unsigned translation_index = data->TranslationIndex(bailout_id)->value();
+  ByteArray* translations = data->TranslationByteArray();
+
+  TranslationIterator iterator(translations, translation_index);
+  Translation::Opcode opcode =
+      static_cast<Translation::Opcode>(iterator.Next());
+  ASSERT(Translation::BEGIN == opcode);
+  USE(opcode);
+  int count = iterator.Next();
+  ASSERT(count == 1);
+  USE(count);
+
+  opcode = static_cast<Translation::Opcode>(iterator.Next());
+  USE(opcode);
+  ASSERT(Translation::FRAME == opcode);
+  unsigned node_id = iterator.Next();
+  USE(node_id);
+  ASSERT(node_id == ast_id);
+  JSFunction* function = JSFunction::cast(ComputeLiteral(iterator.Next()));
+  USE(function);
+  ASSERT(function == function_);
+  unsigned height = iterator.Next();
+  unsigned height_in_bytes = height * kPointerSize;
+  USE(height_in_bytes);
+
+  unsigned fixed_size = ComputeFixedSize(function_);
+  unsigned input_frame_size = input_->GetFrameSize();
+  ASSERT(fixed_size + height_in_bytes == input_frame_size);
+
+  unsigned stack_slot_size = optimized_code_->stack_slots() * kPointerSize;
+  unsigned outgoing_height = data->ArgumentsStackHeight(bailout_id)->value();
+  unsigned outgoing_size = outgoing_height * kPointerSize;
+  unsigned output_frame_size = fixed_size + stack_slot_size + outgoing_size;
+  ASSERT(outgoing_size == 0);  // OSR does not happen in the middle of a call.
+
+  if (FLAG_trace_osr) {
+    PrintF("[on-stack replacement: begin 0x%08" V8PRIxPTR " ",
+           reinterpret_cast<intptr_t>(function_));
+    function_->PrintName();
+    PrintF(" => node=%u, frame=%d->%d]\n",
+           ast_id,
+           input_frame_size,
+           output_frame_size);
+  }
+
+  // There's only one output frame in the OSR case.
+  output_count_ = 1;
+  output_ = new FrameDescription*[1];
+  output_[0] = new(output_frame_size) FrameDescription(
+      output_frame_size, function_);
+#ifdef DEBUG
+  output_[0]->SetKind(Code::OPTIMIZED_FUNCTION);
+#endif
+
+  // Clear the incoming parameters in the optimized frame to avoid
+  // confusing the garbage collector.
+  unsigned output_offset = output_frame_size - kPointerSize;
+  int parameter_count = function_->shared()->formal_parameter_count() + 1;
+  for (int i = 0; i < parameter_count; ++i) {
+    output_[0]->SetFrameSlot(output_offset, 0);
+    output_offset -= kPointerSize;
+  }
+
+  // Translate the incoming parameters. This may overwrite some of the
+  // incoming argument slots we've just cleared.
+  int input_offset = input_frame_size - kPointerSize;
+  bool ok = true;
+  int limit = input_offset - (parameter_count * kPointerSize);
+  while (ok && input_offset > limit) {
+    ok = DoOsrTranslateCommand(&iterator, &input_offset);
+  }
+
+  // There are no translation commands for the caller's pc and fp, the
+  // context, and the function.  Set them up explicitly.
+  for (int i =  StandardFrameConstants::kCallerPCOffset;
+       ok && i >=  StandardFrameConstants::kMarkerOffset;
+       i -= kPointerSize) {
+    uint32_t input_value = input_->GetFrameSlot(input_offset);
+    if (FLAG_trace_osr) {
+      const char* name = "UNKNOWN";
+      switch (i) {
+        case StandardFrameConstants::kCallerPCOffset:
+          name = "caller's pc";
+          break;
+        case StandardFrameConstants::kCallerFPOffset:
+          name = "fp";
+          break;
+        case StandardFrameConstants::kContextOffset:
+          name = "context";
+          break;
+        case StandardFrameConstants::kMarkerOffset:
+          name = "function";
+          break;
+      }
+      PrintF("    [sp + %d] <- 0x%08x ; [sp + %d] (fixed part - %s)\n",
+             output_offset,
+             input_value,
+             input_offset,
+             name);
+    }
+
+    output_[0]->SetFrameSlot(output_offset, input_->GetFrameSlot(input_offset));
+    input_offset -= kPointerSize;
+    output_offset -= kPointerSize;
+  }
+
+  // Translate the rest of the frame.
+  while (ok && input_offset >= 0) {
+    ok = DoOsrTranslateCommand(&iterator, &input_offset);
+  }
+
+  // If translation of any command failed, continue using the input frame.
+  if (!ok) {
+    delete output_[0];
+    output_[0] = input_;
+    output_[0]->SetPc(reinterpret_cast<uint32_t>(from_));
+  } else {
+    // Setup the frame pointer and the context pointer.
+    output_[0]->SetRegister(fp.code(), input_->GetRegister(fp.code()));
+    output_[0]->SetRegister(cp.code(), input_->GetRegister(cp.code()));
+
+    unsigned pc_offset = data->OsrPcOffset()->value();
+    uint32_t pc = reinterpret_cast<uint32_t>(
+        optimized_code_->entry() + pc_offset);
+    output_[0]->SetPc(pc);
+  }
+  Code* continuation = isolate_->builtins()->builtin(Builtins::kNotifyOSR);
+  output_[0]->SetContinuation(
+      reinterpret_cast<uint32_t>(continuation->entry()));
+
+  if (FLAG_trace_osr) {
+    PrintF("[on-stack replacement translation %s: 0x%08" V8PRIxPTR " ",
+           ok ? "finished" : "aborted",
+           reinterpret_cast<intptr_t>(function));
+    function->PrintName();
+    PrintF(" => pc=0x%0x]\n", output_[0]->GetPc());
+  }
 }
 
 
+// This code is very similar to ia32/arm code, but relies on register names
+// (fp, sp) and how the frame is laid out.
 void Deoptimizer::DoComputeFrame(TranslationIterator* iterator,
                                  int frame_index) {
-  UNIMPLEMENTED();
-}
+  // Read the ast node id, function, and frame height for this output frame.
+  Translation::Opcode opcode =
+      static_cast<Translation::Opcode>(iterator->Next());
+  USE(opcode);
+  ASSERT(Translation::FRAME == opcode);
+  int node_id = iterator->Next();
+  JSFunction* function = JSFunction::cast(ComputeLiteral(iterator->Next()));
+  unsigned height = iterator->Next();
+  unsigned height_in_bytes = height * kPointerSize;
+  if (FLAG_trace_deopt) {
+    PrintF("  translating ");
+    function->PrintName();
+    PrintF(" => node=%d, height=%d\n", node_id, height_in_bytes);
+  }
 
+  // The 'fixed' part of the frame consists of the incoming parameters and
+  // the part described by JavaScriptFrameConstants.
+  unsigned fixed_frame_size = ComputeFixedSize(function);
+  unsigned input_frame_size = input_->GetFrameSize();
+  unsigned output_frame_size = height_in_bytes + fixed_frame_size;
+
+  // Allocate and store the output frame description.
+  FrameDescription* output_frame =
+      new(output_frame_size) FrameDescription(output_frame_size, function);
+#ifdef DEBUG
+  output_frame->SetKind(Code::FUNCTION);
+#endif
+
+  bool is_bottommost = (0 == frame_index);
+  bool is_topmost = (output_count_ - 1 == frame_index);
+  ASSERT(frame_index >= 0 && frame_index < output_count_);
+  ASSERT(output_[frame_index] == NULL);
+  output_[frame_index] = output_frame;
+
+  // The top address for the bottommost output frame can be computed from
+  // the input frame pointer and the output frame's height.  For all
+  // subsequent output frames, it can be computed from the previous one's
+  // top address and the current frame's size.
+  uint32_t top_address;
+  if (is_bottommost) {
+    // 2 = context and function in the frame.
+    top_address =
+        input_->GetRegister(fp.code()) - (2 * kPointerSize) - height_in_bytes;
+  } else {
+    top_address = output_[frame_index - 1]->GetTop() - output_frame_size;
+  }
+  output_frame->SetTop(top_address);
+
+  // Compute the incoming parameter translation.
+  int parameter_count = function->shared()->formal_parameter_count() + 1;
+  unsigned output_offset = output_frame_size;
+  unsigned input_offset = input_frame_size;
+  for (int i = 0; i < parameter_count; ++i) {
+    output_offset -= kPointerSize;
+    DoTranslateCommand(iterator, frame_index, output_offset);
+  }
+  input_offset -= (parameter_count * kPointerSize);
+
+  // There are no translation commands for the caller's pc and fp, the
+  // context, and the function.  Synthesize their values and set them up
+  // explicitly.
+  //
+  // The caller's pc for the bottommost output frame is the same as in the
+  // input frame.  For all subsequent output frames, it can be read from the
+  // previous one.  This frame's pc can be computed from the non-optimized
+  // function code and AST id of the bailout.
+  output_offset -= kPointerSize;
+  input_offset -= kPointerSize;
+  intptr_t value;
+  if (is_bottommost) {
+    value = input_->GetFrameSlot(input_offset);
+  } else {
+    value = output_[frame_index - 1]->GetPc();
+  }
+  output_frame->SetFrameSlot(output_offset, value);
+  if (FLAG_trace_deopt) {
+    PrintF("    0x%08x: [top + %d] <- 0x%08x ; caller's pc\n",
+           top_address + output_offset, output_offset, value);
+  }
+
+  // The caller's frame pointer for the bottommost output frame is the same
+  // as in the input frame.  For all subsequent output frames, it can be
+  // read from the previous one.  Also compute and set this frame's frame
+  // pointer.
+  output_offset -= kPointerSize;
+  input_offset -= kPointerSize;
+  if (is_bottommost) {
+    value = input_->GetFrameSlot(input_offset);
+  } else {
+    value = output_[frame_index - 1]->GetFp();
+  }
+  output_frame->SetFrameSlot(output_offset, value);
+  intptr_t fp_value = top_address + output_offset;
+  ASSERT(!is_bottommost || input_->GetRegister(fp.code()) == fp_value);
+  output_frame->SetFp(fp_value);
+  if (is_topmost) {
+    output_frame->SetRegister(fp.code(), fp_value);
+  }
+  if (FLAG_trace_deopt) {
+    PrintF("    0x%08x: [top + %d] <- 0x%08x ; caller's fp\n",
+           fp_value, output_offset, value);
+  }
+
+  // For the bottommost output frame the context can be gotten from the input
+  // frame. For all subsequent output frames it can be gotten from the function
+  // so long as we don't inline functions that need local contexts.
+  output_offset -= kPointerSize;
+  input_offset -= kPointerSize;
+  if (is_bottommost) {
+    value = input_->GetFrameSlot(input_offset);
+  } else {
+    value = reinterpret_cast<intptr_t>(function->context());
+  }
+  output_frame->SetFrameSlot(output_offset, value);
+  if (is_topmost) {
+    output_frame->SetRegister(cp.code(), value);
+  }
+  if (FLAG_trace_deopt) {
+    PrintF("    0x%08x: [top + %d] <- 0x%08x ; context\n",
+           top_address + output_offset, output_offset, value);
+  }
+
+  // The function was mentioned explicitly in the BEGIN_FRAME.
+  output_offset -= kPointerSize;
+  input_offset -= kPointerSize;
+  value = reinterpret_cast<uint32_t>(function);
+  // The function for the bottommost output frame should also agree with the
+  // input frame.
+  ASSERT(!is_bottommost || input_->GetFrameSlot(input_offset) == value);
+  output_frame->SetFrameSlot(output_offset, value);
+  if (FLAG_trace_deopt) {
+    PrintF("    0x%08x: [top + %d] <- 0x%08x ; function\n",
+           top_address + output_offset, output_offset, value);
+  }
+
+  // Translate the rest of the frame.
+  for (unsigned i = 0; i < height; ++i) {
+    output_offset -= kPointerSize;
+    DoTranslateCommand(iterator, frame_index, output_offset);
+  }
+  ASSERT(0 == output_offset);
+
+  // Compute this frame's PC, state, and continuation.
+  Code* non_optimized_code = function->shared()->code();
+  FixedArray* raw_data = non_optimized_code->deoptimization_data();
+  DeoptimizationOutputData* data = DeoptimizationOutputData::cast(raw_data);
+  Address start = non_optimized_code->instruction_start();
+  unsigned pc_and_state = GetOutputInfo(data, node_id, function->shared());
+  unsigned pc_offset = FullCodeGenerator::PcField::decode(pc_and_state);
+  uint32_t pc_value = reinterpret_cast<uint32_t>(start + pc_offset);
+  output_frame->SetPc(pc_value);
+
+  FullCodeGenerator::State state =
+      FullCodeGenerator::StateField::decode(pc_and_state);
+  output_frame->SetState(Smi::FromInt(state));
+
+
+  // Set the continuation for the topmost frame.
+  if (is_topmost && bailout_type_ != DEBUGGER) {
+    Builtins* builtins = isolate_->builtins();
+    Code* continuation = (bailout_type_ == EAGER)
+        ? builtins->builtin(Builtins::kNotifyDeoptimized)
+        : builtins->builtin(Builtins::kNotifyLazyDeoptimized);
+    output_frame->SetContinuation(
+        reinterpret_cast<uint32_t>(continuation->entry()));
+  }
+}
 
 void Deoptimizer::FillInputFrame(Address tos, JavaScriptFrame* frame) {
-  UNIMPLEMENTED();
+  // Set the register values. The values are not important as there are no
+  // callee saved registers in JavaScript frames, so all registers are
+  // spilled. Registers fp and sp are set to the correct values though.
+
+  for (int i = 0; i < Register::kNumRegisters; i++) {
+    input_->SetRegister(i, i * 4);
+  }
+  input_->SetRegister(sp.code(), reinterpret_cast<intptr_t>(frame->sp()));
+  input_->SetRegister(fp.code(), reinterpret_cast<intptr_t>(frame->fp()));
+  for (int i = 0; i < DoubleRegister::kNumAllocatableRegisters; i++) {
+    input_->SetDoubleRegister(i, 0.0);
+  }
+
+  // Fill the frame content from the actual data on the frame.
+  for (unsigned i = 0; i < input_->GetFrameSize(); i += kPointerSize) {
+    input_->SetFrameSlot(i, Memory::uint32_at(tos + i));
+  }
 }
 
 
+#define __ masm()->
+
+
+// This code tries to be close to ia32 code so that any changes can be
+// easily ported.
 void Deoptimizer::EntryGenerator::Generate() {
-  UNIMPLEMENTED();
+  GeneratePrologue();
+
+  Isolate* isolate = masm()->isolate();
+
+  CpuFeatures::Scope scope(FPU);
+  // Unlike on ARM we don't save all the registers, just the useful ones.
+  // For the rest, there are gaps on the stack, so the offsets remain the same.
+  const int kNumberOfRegisters = Register::kNumRegisters;
+
+  RegList restored_regs = kJSCallerSaved | kCalleeSaved;
+  RegList saved_regs = restored_regs | sp.bit() | ra.bit();
+
+  const int kDoubleRegsSize =
+      kDoubleSize * FPURegister::kNumAllocatableRegisters;
+
+  // Save all FPU registers before messing with them.
+  __ Subu(sp, sp, Operand(kDoubleRegsSize));
+  for (int i = 0; i < FPURegister::kNumAllocatableRegisters; ++i) {
+    FPURegister fpu_reg = FPURegister::FromAllocationIndex(i);
+    int offset = i * kDoubleSize;
+    __ sdc1(fpu_reg, MemOperand(sp, offset));
+  }
+
+  // Push saved_regs (needed to populate FrameDescription::registers_).
+  // Leave gaps for other registers.
+  __ Subu(sp, sp, kNumberOfRegisters * kPointerSize);
+  for (int16_t i = kNumberOfRegisters - 1; i >= 0; i--) {
+    if ((saved_regs & (1 << i)) != 0) {
+      __ sw(ToRegister(i), MemOperand(sp, kPointerSize * i));
+    }
+  }
+
+  const int kSavedRegistersAreaSize =
+      (kNumberOfRegisters * kPointerSize) + kDoubleRegsSize;
+
+  // Get the bailout id from the stack.
+  __ lw(a2, MemOperand(sp, kSavedRegistersAreaSize));
+
+  // Get the address of the location in the code object if possible (a3) (return
+  // address for lazy deoptimization) and compute the fp-to-sp delta in
+  // register t0.
+  if (type() == EAGER) {
+    __ mov(a3, zero_reg);
+    // Correct one word for bailout id.
+    __ Addu(t0, sp, Operand(kSavedRegistersAreaSize + (1 * kPointerSize)));
+  } else if (type() == OSR) {
+    __ mov(a3, ra);
+    // Correct one word for bailout id.
+    __ Addu(t0, sp, Operand(kSavedRegistersAreaSize + (1 * kPointerSize)));
+  } else {
+    __ mov(a3, ra);
+    // Correct two words for bailout id and return address.
+    __ Addu(t0, sp, Operand(kSavedRegistersAreaSize + (2 * kPointerSize)));
+  }
+
+  __ Subu(t0, fp, t0);
+
+  // Allocate a new deoptimizer object.
+  // Pass four arguments in a0 to a3 and fifth & sixth arguments on stack.
+  __ PrepareCallCFunction(6, t1);
+  __ lw(a0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+  __ li(a1, Operand(type()));  // bailout type,
+  // a2: bailout id already loaded.
+  // a3: code address or 0 already loaded.
+  __ sw(t0, CFunctionArgumentOperand(5));  // Fp-to-sp delta.
+  __ li(t1, Operand(ExternalReference::isolate_address()));
+  __ sw(t1, CFunctionArgumentOperand(6));  // Isolate.
+  // Call Deoptimizer::New().
+  {
+    AllowExternalCallThatCantCauseGC scope(masm());
+    __ CallCFunction(ExternalReference::new_deoptimizer_function(isolate), 6);
+  }
+
+  // Preserve "deoptimizer" object in register v0 and get the input
+  // frame descriptor pointer to a1 (deoptimizer->input_);
+  // Move deopt-obj to a0 for call to Deoptimizer::ComputeOutputFrames() below.
+  __ mov(a0, v0);
+  __ lw(a1, MemOperand(v0, Deoptimizer::input_offset()));
+
+  // Copy core registers into FrameDescription::registers_[kNumRegisters].
+  ASSERT(Register::kNumRegisters == kNumberOfRegisters);
+  for (int i = 0; i < kNumberOfRegisters; i++) {
+    int offset = (i * kPointerSize) + FrameDescription::registers_offset();
+    if ((saved_regs & (1 << i)) != 0) {
+      __ lw(a2, MemOperand(sp, i * kPointerSize));
+      __ sw(a2, MemOperand(a1, offset));
+    } else if (FLAG_debug_code) {
+      __ li(a2, kDebugZapValue);
+      __ sw(a2, MemOperand(a1, offset));
+    }
+  }
+
+  // Copy FPU registers to
+  // double_registers_[DoubleRegister::kNumAllocatableRegisters]
+  int double_regs_offset = FrameDescription::double_registers_offset();
+  for (int i = 0; i < FPURegister::kNumAllocatableRegisters; ++i) {
+    int dst_offset = i * kDoubleSize + double_regs_offset;
+    int src_offset = i * kDoubleSize + kNumberOfRegisters * kPointerSize;
+    __ ldc1(f0, MemOperand(sp, src_offset));
+    __ sdc1(f0, MemOperand(a1, dst_offset));
+  }
+
+  // Remove the bailout id, eventually return address, and the saved registers
+  // from the stack.
+  if (type() == EAGER || type() == OSR) {
+    __ Addu(sp, sp, Operand(kSavedRegistersAreaSize + (1 * kPointerSize)));
+  } else {
+    __ Addu(sp, sp, Operand(kSavedRegistersAreaSize + (2 * kPointerSize)));
+  }
+
+  // Compute a pointer to the unwinding limit in register a2; that is
+  // the first stack slot not part of the input frame.
+  __ lw(a2, MemOperand(a1, FrameDescription::frame_size_offset()));
+  __ Addu(a2, a2, sp);
+
+  // Unwind the stack down to - but not including - the unwinding
+  // limit and copy the contents of the activation frame to the input
+  // frame description.
+  __ Addu(a3, a1, Operand(FrameDescription::frame_content_offset()));
+  Label pop_loop;
+  __ bind(&pop_loop);
+  __ pop(t0);
+  __ sw(t0, MemOperand(a3, 0));
+  __ Branch(USE_DELAY_SLOT, &pop_loop, ne, a2, Operand(sp));
+  __ addiu(a3, a3, sizeof(uint32_t));  // In delay slot.
+
+  // Compute the output frame in the deoptimizer.
+  __ push(a0);  // Preserve deoptimizer object across call.
+  // a0: deoptimizer object; a1: scratch.
+  __ PrepareCallCFunction(1, a1);
+  // Call Deoptimizer::ComputeOutputFrames().
+  {
+    AllowExternalCallThatCantCauseGC scope(masm());
+    __ CallCFunction(
+        ExternalReference::compute_output_frames_function(isolate), 1);
+  }
+  __ pop(a0);  // Restore deoptimizer object (class Deoptimizer).
+
+  // Replace the current (input) frame with the output frames.
+  Label outer_push_loop, inner_push_loop;
+  // Outer loop state: a0 = current "FrameDescription** output_",
+  // a1 = one past the last FrameDescription**.
+  __ lw(a1, MemOperand(a0, Deoptimizer::output_count_offset()));
+  __ lw(a0, MemOperand(a0, Deoptimizer::output_offset()));  // a0 is output_.
+  __ sll(a1, a1, kPointerSizeLog2);  // Count to offset.
+  __ addu(a1, a0, a1);  // a1 = one past the last FrameDescription**.
+  __ bind(&outer_push_loop);
+  // Inner loop state: a2 = current FrameDescription*, a3 = loop index.
+  __ lw(a2, MemOperand(a0, 0));  // output_[ix]
+  __ lw(a3, MemOperand(a2, FrameDescription::frame_size_offset()));
+  __ bind(&inner_push_loop);
+  __ Subu(a3, a3, Operand(sizeof(uint32_t)));
+  __ Addu(t2, a2, Operand(a3));
+  __ lw(t3, MemOperand(t2, FrameDescription::frame_content_offset()));
+  __ push(t3);
+  __ Branch(&inner_push_loop, ne, a3, Operand(zero_reg));
+
+  __ Addu(a0, a0, Operand(kPointerSize));
+  __ Branch(&outer_push_loop, lt, a0, Operand(a1));
+
+
+  // Push state, pc, and continuation from the last output frame.
+  if (type() != OSR) {
+    __ lw(t2, MemOperand(a2, FrameDescription::state_offset()));
+    __ push(t2);
+  }
+
+  __ lw(t2, MemOperand(a2, FrameDescription::pc_offset()));
+  __ push(t2);
+  __ lw(t2, MemOperand(a2, FrameDescription::continuation_offset()));
+  __ push(t2);
+
+
+  // Technically restoring 'at' should work unless zero_reg is also restored
+  // but it's safer to check for this.
+  ASSERT(!(at.bit() & restored_regs));
+  // Restore the registers from the last output frame.
+  __ mov(at, a2);
+  for (int i = kNumberOfRegisters - 1; i >= 0; i--) {
+    int offset = (i * kPointerSize) + FrameDescription::registers_offset();
+    if ((restored_regs & (1 << i)) != 0) {
+      __ lw(ToRegister(i), MemOperand(at, offset));
+    }
+  }
+
+  // Set up the roots register.
+  ExternalReference roots_array_start =
+      ExternalReference::roots_array_start(isolate);
+  __ li(roots, Operand(roots_array_start));
+
+  __ pop(at);  // Get continuation, leave pc on stack.
+  __ pop(ra);
+  __ Jump(at);
+  __ stop("Unreachable.");
 }
 
 
 void Deoptimizer::TableEntryGenerator::GeneratePrologue() {
-  UNIMPLEMENTED();
+  Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm());
+
+  // Create a sequence of deoptimization entries. Note that any
+  // registers may be still live.
+
+  Label done;
+  for (int i = 0; i < count(); i++) {
+    int start = masm()->pc_offset();
+    USE(start);
+    if (type() != EAGER) {
+      // Emulate ia32 like call by pushing return address to stack.
+      __ push(ra);
+    }
+    __ li(at, Operand(i));
+    __ push(at);
+    __ Branch(&done);
+
+    // Pad the rest of the code.
+    while (table_entry_size_ > (masm()->pc_offset() - start)) {
+      __ nop();
+    }
+
+    ASSERT_EQ(table_entry_size_, masm()->pc_offset() - start);
+  }
+  __ bind(&done);
 }
 
+#undef __
+
 
 } }  // namespace v8::internal
diff --git a/src/mips/full-codegen-mips.cc b/src/mips/full-codegen-mips.cc
index 2f989bc..c5b69cc 100644
--- a/src/mips/full-codegen-mips.cc
+++ b/src/mips/full-codegen-mips.cc
@@ -55,11 +55,6 @@
 #define __ ACCESS_MASM(masm_)
 
 
-static unsigned GetPropertyId(Property* property) {
-  return property->id();
-}
-
-
 // A patch site is a location in the code which it is possible to patch. This
 // class has a number of methods to emit the code which is patchable and the
 // method EmitPatchInfo to record a marker back to the patchable code. This
@@ -410,7 +405,7 @@
 void FullCodeGenerator::TestContext::Plug(Variable* var) const {
   // For simplicity we always test the accumulator register.
   codegen()->GetVar(result_register(), var);
-  codegen()->PrepareForBailoutBeforeSplit(TOS_REG, false, NULL, NULL);
+  codegen()->PrepareForBailoutBeforeSplit(condition(), false, NULL, NULL);
   codegen()->DoTest(this);
 }
 
@@ -433,7 +428,7 @@
 
 
 void FullCodeGenerator::TestContext::Plug(Heap::RootListIndex index) const {
-  codegen()->PrepareForBailoutBeforeSplit(TOS_REG,
+  codegen()->PrepareForBailoutBeforeSplit(condition(),
                                           true,
                                           true_label_,
                                           false_label_);
@@ -468,7 +463,7 @@
 
 
 void FullCodeGenerator::TestContext::Plug(Handle<Object> lit) const {
-  codegen()->PrepareForBailoutBeforeSplit(TOS_REG,
+  codegen()->PrepareForBailoutBeforeSplit(condition(),
                                           true,
                                           true_label_,
                                           false_label_);
@@ -527,7 +522,7 @@
   // For simplicity we always test the accumulator register.
   __ Drop(count);
   __ Move(result_register(), reg);
-  codegen()->PrepareForBailoutBeforeSplit(TOS_REG, false, NULL, NULL);
+  codegen()->PrepareForBailoutBeforeSplit(condition(), false, NULL, NULL);
   codegen()->DoTest(this);
 }
 
@@ -594,7 +589,7 @@
 
 
 void FullCodeGenerator::TestContext::Plug(bool flag) const {
-  codegen()->PrepareForBailoutBeforeSplit(TOS_REG,
+  codegen()->PrepareForBailoutBeforeSplit(condition(),
                                           true,
                                           true_label_,
                                           false_label_);
@@ -697,7 +692,7 @@
 }
 
 
-void FullCodeGenerator::PrepareForBailoutBeforeSplit(State state,
+void FullCodeGenerator::PrepareForBailoutBeforeSplit(Expression* expr,
                                                      bool should_normalize,
                                                      Label* if_true,
                                                      Label* if_false) {
@@ -708,13 +703,7 @@
 
   Label skip;
   if (should_normalize) __ Branch(&skip);
-
-  ForwardBailoutStack* current = forward_bailout_stack_;
-  while (current != NULL) {
-    PrepareForBailout(current->expr(), state);
-    current = current->parent();
-  }
-
+  PrepareForBailout(expr, TOS_REG);
   if (should_normalize) {
     __ LoadRoot(t0, Heap::kTrueValueRootIndex);
     Split(eq, a0, Operand(t0), if_true, if_false, NULL);
@@ -731,8 +720,8 @@
   // need to "declare" it at runtime to make sure it actually exists in the
   // local context.
   Variable* variable = proxy->var();
-  bool binding_needs_init =
-      mode == CONST || mode == CONST_HARMONY || mode == LET;
+  bool binding_needs_init = (function == NULL) &&
+      (mode == CONST || mode == CONST_HARMONY || mode == LET);
   switch (variable->location()) {
     case Variable::UNALLOCATED:
       ++(*global_count);
@@ -1748,7 +1737,7 @@
   __ li(a2, Operand(key->handle()));
   // Call load IC. It has arguments receiver and property name a0 and a2.
   Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
-  __ Call(ic, RelocInfo::CODE_TARGET, GetPropertyId(prop));
+  __ Call(ic, RelocInfo::CODE_TARGET, prop->id());
 }
 
 
@@ -1757,7 +1746,7 @@
   __ mov(a0, result_register());
   // Call keyed load IC. It has arguments key and receiver in a0 and a1.
   Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
-  __ Call(ic, RelocInfo::CODE_TARGET, GetPropertyId(prop));
+  __ Call(ic, RelocInfo::CODE_TARGET, prop->id());
 }
 
 
@@ -2225,8 +2214,7 @@
 }
 
 
-void FullCodeGenerator::EmitResolvePossiblyDirectEval(ResolveEvalFlag flag,
-                                                      int arg_count) {
+void FullCodeGenerator::EmitResolvePossiblyDirectEval(int arg_count) {
   // Push copy of the first argument or undefined if it doesn't exist.
   if (arg_count > 0) {
     __ lw(a1, MemOperand(sp, arg_count * kPointerSize));
@@ -2246,9 +2234,7 @@
   __ li(a1, Operand(Smi::FromInt(strict_mode)));
   __ push(a1);
 
-  __ CallRuntime(flag == SKIP_CONTEXT_LOOKUP
-                 ? Runtime::kResolvePossiblyDirectEvalNoLookup
-                 : Runtime::kResolvePossiblyDirectEval, 4);
+  __ CallRuntime(Runtime::kResolvePossiblyDirectEval, 4);
 }
 
 
@@ -2282,28 +2268,11 @@
         VisitForStackValue(args->at(i));
       }
 
-      // If we know that eval can only be shadowed by eval-introduced
-      // variables we attempt to load the global eval function directly
-      // in generated code. If we succeed, there is no need to perform a
-      // context lookup in the runtime system.
-      Label done;
-      Variable* var = proxy->var();
-      if (!var->IsUnallocated() && var->mode() == DYNAMIC_GLOBAL) {
-        Label slow;
-        EmitLoadGlobalCheckExtensions(var, NOT_INSIDE_TYPEOF, &slow);
-        // Push the function and resolve eval.
-        __ push(v0);
-        EmitResolvePossiblyDirectEval(SKIP_CONTEXT_LOOKUP, arg_count);
-        __ jmp(&done);
-        __ bind(&slow);
-      }
-
       // Push a copy of the function (found below the arguments) and
       // resolve eval.
       __ lw(a1, MemOperand(sp, (arg_count + 1) * kPointerSize));
       __ push(a1);
-      EmitResolvePossiblyDirectEval(PERFORM_CONTEXT_LOOKUP, arg_count);
-      __ bind(&done);
+      EmitResolvePossiblyDirectEval(arg_count);
 
       // The runtime call returns a pair of values in v0 (function) and
       // v1 (receiver). Touch up the stack with the right values.
@@ -2426,7 +2395,8 @@
 }
 
 
-void FullCodeGenerator::EmitIsSmi(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitIsSmi(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 1);
 
   VisitForAccumulatorValue(args->at(0));
@@ -2438,7 +2408,7 @@
   context()->PrepareTest(&materialize_true, &materialize_false,
                          &if_true, &if_false, &fall_through);
 
-  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+  PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
   __ And(t0, v0, Operand(kSmiTagMask));
   Split(eq, t0, Operand(zero_reg), if_true, if_false, fall_through);
 
@@ -2446,7 +2416,8 @@
 }
 
 
-void FullCodeGenerator::EmitIsNonNegativeSmi(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitIsNonNegativeSmi(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 1);
 
   VisitForAccumulatorValue(args->at(0));
@@ -2458,7 +2429,7 @@
   context()->PrepareTest(&materialize_true, &materialize_false,
                          &if_true, &if_false, &fall_through);
 
-  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+  PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
   __ And(at, v0, Operand(kSmiTagMask | 0x80000000));
   Split(eq, at, Operand(zero_reg), if_true, if_false, fall_through);
 
@@ -2466,7 +2437,8 @@
 }
 
 
-void FullCodeGenerator::EmitIsObject(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitIsObject(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 1);
 
   VisitForAccumulatorValue(args->at(0));
@@ -2488,7 +2460,7 @@
   __ Branch(if_false, ne, at, Operand(zero_reg));
   __ lbu(a1, FieldMemOperand(a2, Map::kInstanceTypeOffset));
   __ Branch(if_false, lt, a1, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
-  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+  PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
   Split(le, a1, Operand(LAST_NONCALLABLE_SPEC_OBJECT_TYPE),
         if_true, if_false, fall_through);
 
@@ -2496,7 +2468,8 @@
 }
 
 
-void FullCodeGenerator::EmitIsSpecObject(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitIsSpecObject(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 1);
 
   VisitForAccumulatorValue(args->at(0));
@@ -2510,7 +2483,7 @@
 
   __ JumpIfSmi(v0, if_false);
   __ GetObjectType(v0, a1, a1);
-  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+  PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
   Split(ge, a1, Operand(FIRST_SPEC_OBJECT_TYPE),
         if_true, if_false, fall_through);
 
@@ -2518,7 +2491,8 @@
 }
 
 
-void FullCodeGenerator::EmitIsUndetectableObject(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitIsUndetectableObject(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 1);
 
   VisitForAccumulatorValue(args->at(0));
@@ -2534,7 +2508,7 @@
   __ lw(a1, FieldMemOperand(v0, HeapObject::kMapOffset));
   __ lbu(a1, FieldMemOperand(a1, Map::kBitFieldOffset));
   __ And(at, a1, Operand(1 << Map::kIsUndetectable));
-  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+  PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
   Split(ne, at, Operand(zero_reg), if_true, if_false, fall_through);
 
   context()->Plug(if_true, if_false);
@@ -2542,8 +2516,8 @@
 
 
 void FullCodeGenerator::EmitIsStringWrapperSafeForDefaultValueOf(
-    ZoneList<Expression*>* args) {
-
+    CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 1);
 
   VisitForAccumulatorValue(args->at(0));
@@ -2619,12 +2593,13 @@
   __ sb(a2, FieldMemOperand(a1, Map::kBitField2Offset));
   __ jmp(if_true);
 
-  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+  PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
   context()->Plug(if_true, if_false);
 }
 
 
-void FullCodeGenerator::EmitIsFunction(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitIsFunction(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 1);
 
   VisitForAccumulatorValue(args->at(0));
@@ -2638,7 +2613,7 @@
 
   __ JumpIfSmi(v0, if_false);
   __ GetObjectType(v0, a1, a2);
-  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+  PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
   __ Branch(if_true, eq, a2, Operand(JS_FUNCTION_TYPE));
   __ Branch(if_false);
 
@@ -2646,7 +2621,8 @@
 }
 
 
-void FullCodeGenerator::EmitIsArray(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitIsArray(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 1);
 
   VisitForAccumulatorValue(args->at(0));
@@ -2660,7 +2636,7 @@
 
   __ JumpIfSmi(v0, if_false);
   __ GetObjectType(v0, a1, a1);
-  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+  PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
   Split(eq, a1, Operand(JS_ARRAY_TYPE),
         if_true, if_false, fall_through);
 
@@ -2668,7 +2644,8 @@
 }
 
 
-void FullCodeGenerator::EmitIsRegExp(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitIsRegExp(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 1);
 
   VisitForAccumulatorValue(args->at(0));
@@ -2682,15 +2659,15 @@
 
   __ JumpIfSmi(v0, if_false);
   __ GetObjectType(v0, a1, a1);
-  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+  PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
   Split(eq, a1, Operand(JS_REGEXP_TYPE), if_true, if_false, fall_through);
 
   context()->Plug(if_true, if_false);
 }
 
 
-void FullCodeGenerator::EmitIsConstructCall(ZoneList<Expression*>* args) {
-  ASSERT(args->length() == 0);
+void FullCodeGenerator::EmitIsConstructCall(CallRuntime* expr) {
+  ASSERT(expr->arguments()->length() == 0);
 
   Label materialize_true, materialize_false;
   Label* if_true = NULL;
@@ -2712,7 +2689,7 @@
   // Check the marker in the calling frame.
   __ bind(&check_frame_marker);
   __ lw(a1, MemOperand(a2, StandardFrameConstants::kMarkerOffset));
-  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+  PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
   Split(eq, a1, Operand(Smi::FromInt(StackFrame::CONSTRUCT)),
         if_true, if_false, fall_through);
 
@@ -2720,7 +2697,8 @@
 }
 
 
-void FullCodeGenerator::EmitObjectEquals(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitObjectEquals(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 2);
 
   // Load the two objects into registers and perform the comparison.
@@ -2735,14 +2713,15 @@
                          &if_true, &if_false, &fall_through);
 
   __ pop(a1);
-  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+  PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
   Split(eq, v0, Operand(a1), if_true, if_false, fall_through);
 
   context()->Plug(if_true, if_false);
 }
 
 
-void FullCodeGenerator::EmitArguments(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitArguments(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 1);
 
   // ArgumentsAccessStub expects the key in a1 and the formal
@@ -2756,9 +2735,8 @@
 }
 
 
-void FullCodeGenerator::EmitArgumentsLength(ZoneList<Expression*>* args) {
-  ASSERT(args->length() == 0);
-
+void FullCodeGenerator::EmitArgumentsLength(CallRuntime* expr) {
+  ASSERT(expr->arguments()->length() == 0);
   Label exit;
   // Get the number of formal parameters.
   __ li(v0, Operand(Smi::FromInt(info_->scope()->num_parameters())));
@@ -2778,7 +2756,8 @@
 }
 
 
-void FullCodeGenerator::EmitClassOf(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitClassOf(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 1);
   Label done, null, function, non_function_constructor;
 
@@ -2837,7 +2816,7 @@
 }
 
 
-void FullCodeGenerator::EmitLog(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitLog(CallRuntime* expr) {
   // Conditionally generate a log call.
   // Args:
   //   0 (literal string): The type of logging (corresponds to the flags).
@@ -2845,6 +2824,7 @@
   //   1 (string): Format string.  Access the string at argument index 2
   //     with '%2s' (see Logger::LogRuntime for all the formats).
   //   2 (array): Arguments to the format string.
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT_EQ(args->length(), 3);
   if (CodeGenerator::ShouldGenerateLog(args->at(0))) {
     VisitForStackValue(args->at(1));
@@ -2858,9 +2838,8 @@
 }
 
 
-void FullCodeGenerator::EmitRandomHeapNumber(ZoneList<Expression*>* args) {
-  ASSERT(args->length() == 0);
-
+void FullCodeGenerator::EmitRandomHeapNumber(CallRuntime* expr) {
+  ASSERT(expr->arguments()->length() == 0);
   Label slow_allocate_heapnumber;
   Label heapnumber_allocated;
 
@@ -2911,9 +2890,10 @@
 }
 
 
-void FullCodeGenerator::EmitSubString(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitSubString(CallRuntime* expr) {
   // Load the arguments on the stack and call the stub.
   SubStringStub stub;
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 3);
   VisitForStackValue(args->at(0));
   VisitForStackValue(args->at(1));
@@ -2923,9 +2903,10 @@
 }
 
 
-void FullCodeGenerator::EmitRegExpExec(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitRegExpExec(CallRuntime* expr) {
   // Load the arguments on the stack and call the stub.
   RegExpExecStub stub;
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 4);
   VisitForStackValue(args->at(0));
   VisitForStackValue(args->at(1));
@@ -2936,7 +2917,8 @@
 }
 
 
-void FullCodeGenerator::EmitValueOf(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitValueOf(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 1);
 
   VisitForAccumulatorValue(args->at(0));  // Load the object.
@@ -2955,8 +2937,9 @@
 }
 
 
-void FullCodeGenerator::EmitMathPow(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitMathPow(CallRuntime* expr) {
   // Load the arguments on the stack and call the runtime function.
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 2);
   VisitForStackValue(args->at(0));
   VisitForStackValue(args->at(1));
@@ -2966,7 +2949,8 @@
 }
 
 
-void FullCodeGenerator::EmitSetValueOf(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitSetValueOf(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 2);
 
   VisitForStackValue(args->at(0));  // Load the object.
@@ -2994,7 +2978,8 @@
 }
 
 
-void FullCodeGenerator::EmitNumberToString(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitNumberToString(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT_EQ(args->length(), 1);
 
   // Load the argument on the stack and call the stub.
@@ -3006,7 +2991,8 @@
 }
 
 
-void FullCodeGenerator::EmitStringCharFromCode(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitStringCharFromCode(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 1);
 
   VisitForAccumulatorValue(args->at(0));
@@ -3024,7 +3010,8 @@
 }
 
 
-void FullCodeGenerator::EmitStringCharCodeAt(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitStringCharCodeAt(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 2);
 
   VisitForStackValue(args->at(0));
@@ -3072,7 +3059,8 @@
 }
 
 
-void FullCodeGenerator::EmitStringCharAt(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitStringCharAt(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 2);
 
   VisitForStackValue(args->at(0));
@@ -3122,9 +3110,9 @@
 }
 
 
-void FullCodeGenerator::EmitStringAdd(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitStringAdd(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT_EQ(2, args->length());
-
   VisitForStackValue(args->at(0));
   VisitForStackValue(args->at(1));
 
@@ -3134,7 +3122,8 @@
 }
 
 
-void FullCodeGenerator::EmitStringCompare(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitStringCompare(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT_EQ(2, args->length());
 
   VisitForStackValue(args->at(0));
@@ -3146,10 +3135,11 @@
 }
 
 
-void FullCodeGenerator::EmitMathSin(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitMathSin(CallRuntime* expr) {
   // Load the argument on the stack and call the stub.
   TranscendentalCacheStub stub(TranscendentalCache::SIN,
                                TranscendentalCacheStub::TAGGED);
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 1);
   VisitForStackValue(args->at(0));
   __ mov(a0, result_register());  // Stub requires parameter in a0 and on tos.
@@ -3158,10 +3148,11 @@
 }
 
 
-void FullCodeGenerator::EmitMathCos(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitMathCos(CallRuntime* expr) {
   // Load the argument on the stack and call the stub.
   TranscendentalCacheStub stub(TranscendentalCache::COS,
                                TranscendentalCacheStub::TAGGED);
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 1);
   VisitForStackValue(args->at(0));
   __ mov(a0, result_register());  // Stub requires parameter in a0 and on tos.
@@ -3170,10 +3161,11 @@
 }
 
 
-void FullCodeGenerator::EmitMathLog(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitMathLog(CallRuntime* expr) {
   // Load the argument on the stack and call the stub.
   TranscendentalCacheStub stub(TranscendentalCache::LOG,
                                TranscendentalCacheStub::TAGGED);
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 1);
   VisitForStackValue(args->at(0));
   __ mov(a0, result_register());  // Stub requires parameter in a0 and on tos.
@@ -3182,8 +3174,9 @@
 }
 
 
-void FullCodeGenerator::EmitMathSqrt(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitMathSqrt(CallRuntime* expr) {
   // Load the argument on the stack and call the runtime function.
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 1);
   VisitForStackValue(args->at(0));
   __ CallRuntime(Runtime::kMath_sqrt, 1);
@@ -3191,7 +3184,8 @@
 }
 
 
-void FullCodeGenerator::EmitCallFunction(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitCallFunction(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() >= 2);
 
   int arg_count = args->length() - 2;  // 2 ~ receiver and function.
@@ -3210,8 +3204,9 @@
 }
 
 
-void FullCodeGenerator::EmitRegExpConstructResult(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitRegExpConstructResult(CallRuntime* expr) {
   RegExpConstructResultStub stub;
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 3);
   VisitForStackValue(args->at(0));
   VisitForStackValue(args->at(1));
@@ -3221,7 +3216,8 @@
 }
 
 
-void FullCodeGenerator::EmitSwapElements(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitSwapElements(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 3);
   VisitForStackValue(args->at(0));
   VisitForStackValue(args->at(1));
@@ -3318,7 +3314,8 @@
 }
 
 
-void FullCodeGenerator::EmitGetFromCache(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitGetFromCache(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT_EQ(2, args->length());
 
   ASSERT_NE(NULL, args->at(0)->AsLiteral());
@@ -3371,7 +3368,8 @@
 }
 
 
-void FullCodeGenerator::EmitIsRegExpEquivalent(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitIsRegExpEquivalent(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT_EQ(2, args->length());
 
   Register right = v0;
@@ -3387,8 +3385,7 @@
   __ Branch(&ok, eq, left, Operand(right));
   // Fail if either is a non-HeapObject.
   __ And(tmp, left, Operand(right));
-  __ And(at, tmp, Operand(kSmiTagMask));
-  __ Branch(&fail, eq, at, Operand(zero_reg));
+  __ JumpIfSmi(tmp, &fail);
   __ lw(tmp, FieldMemOperand(left, HeapObject::kMapOffset));
   __ lbu(tmp2, FieldMemOperand(tmp, Map::kInstanceTypeOffset));
   __ Branch(&fail, ne, tmp2, Operand(JS_REGEXP_TYPE));
@@ -3408,7 +3405,8 @@
 }
 
 
-void FullCodeGenerator::EmitHasCachedArrayIndex(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitHasCachedArrayIndex(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
   VisitForAccumulatorValue(args->at(0));
 
   Label materialize_true, materialize_false;
@@ -3421,14 +3419,15 @@
   __ lw(a0, FieldMemOperand(v0, String::kHashFieldOffset));
   __ And(a0, a0, Operand(String::kContainsCachedArrayIndexMask));
 
-  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+  PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
   Split(eq, a0, Operand(zero_reg), if_true, if_false, fall_through);
 
   context()->Plug(if_true, if_false);
 }
 
 
-void FullCodeGenerator::EmitGetCachedArrayIndex(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitGetCachedArrayIndex(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 1);
   VisitForAccumulatorValue(args->at(0));
 
@@ -3443,12 +3442,12 @@
 }
 
 
-void FullCodeGenerator::EmitFastAsciiArrayJoin(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
   Label bailout, done, one_char_separator, long_separator,
       non_trivial_array, not_size_one_array, loop,
       empty_separator_loop, one_char_separator_loop,
       one_char_separator_loop_entry, long_separator_loop;
-
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 2);
   VisitForStackValue(args->at(1));
   VisitForAccumulatorValue(args->at(0));
@@ -3784,18 +3783,35 @@
         // Unary NOT has no side effects so it's only necessary to visit the
         // subexpression.  Match the optimizing compiler by not branching.
         VisitForEffect(expr->expression());
+      } else if (context()->IsTest()) {
+        const TestContext* test = TestContext::cast(context());
+        // The labels are swapped for the recursive call.
+        VisitForControl(expr->expression(),
+                        test->false_label(),
+                        test->true_label(),
+                        test->fall_through());
+        context()->Plug(test->true_label(), test->false_label());
       } else {
-        Label materialize_true, materialize_false;
-        Label* if_true = NULL;
-        Label* if_false = NULL;
-        Label* fall_through = NULL;
-
-        // Notice that the labels are swapped.
-        context()->PrepareTest(&materialize_true, &materialize_false,
-                               &if_false, &if_true, &fall_through);
-        if (context()->IsTest()) ForwardBailoutToChild(expr);
-        VisitForControl(expr->expression(), if_true, if_false, fall_through);
-        context()->Plug(if_false, if_true);  // Labels swapped.
+        // We handle value contexts explicitly rather than simply visiting
+        // for control and plugging the control flow into the context,
+        // because we need to prepare a pair of extra administrative AST ids
+        // for the optimizing compiler.
+        ASSERT(context()->IsAccumulatorValue() || context()->IsStackValue());
+        Label materialize_true, materialize_false, done;
+        VisitForControl(expr->expression(),
+                        &materialize_false,
+                        &materialize_true,
+                        &materialize_true);
+        __ bind(&materialize_true);
+        PrepareForBailoutForId(expr->MaterializeTrueId(), NO_REGISTERS);
+        __ LoadRoot(v0, Heap::kTrueValueRootIndex);
+        if (context()->IsStackValue()) __ push(v0);
+        __ jmp(&done);
+        __ bind(&materialize_false);
+        PrepareForBailoutForId(expr->MaterializeFalseId(), NO_REGISTERS);
+        __ LoadRoot(v0, Heap::kFalseValueRootIndex);
+        if (context()->IsStackValue()) __ push(v0);
+        __ bind(&done);
       }
       break;
     }
@@ -4057,11 +4073,12 @@
     context()->Plug(v0);
   } else {
     // This expression cannot throw a reference error at the top level.
-    VisitInCurrentContext(expr);
+    VisitInDuplicateContext(expr);
   }
 }
 
 void FullCodeGenerator::EmitLiteralCompareTypeof(Expression* expr,
+                                                 Expression* sub_expr,
                                                  Handle<String> check) {
   Label materialize_true, materialize_false;
   Label* if_true = NULL;
@@ -4071,9 +4088,9 @@
                          &if_true, &if_false, &fall_through);
 
   { AccumulatorValueContext context(this);
-    VisitForTypeofValue(expr);
+    VisitForTypeofValue(sub_expr);
   }
-  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+  PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
 
   if (check->Equals(isolate()->heap()->number_symbol())) {
     __ JumpIfSmi(v0, if_true);
@@ -4159,7 +4176,7 @@
     case Token::IN:
       VisitForStackValue(expr->right());
       __ InvokeBuiltin(Builtins::IN, CALL_FUNCTION);
-      PrepareForBailoutBeforeSplit(TOS_REG, false, NULL, NULL);
+      PrepareForBailoutBeforeSplit(expr, false, NULL, NULL);
       __ LoadRoot(t0, Heap::kTrueValueRootIndex);
       Split(eq, v0, Operand(t0), if_true, if_false, fall_through);
       break;
@@ -4168,7 +4185,7 @@
       VisitForStackValue(expr->right());
       InstanceofStub stub(InstanceofStub::kNoFlags);
       __ CallStub(&stub);
-      PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+      PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
       // The stub returns 0 for true.
       Split(eq, v0, Operand(zero_reg), if_true, if_false, fall_through);
       break;
@@ -4216,7 +4233,7 @@
       Handle<Code> ic = CompareIC::GetUninitialized(op);
       __ Call(ic, RelocInfo::CODE_TARGET, expr->id());
       patch_site.EmitPatchInfo();
-      PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+      PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
       Split(cc, v0, Operand(zero_reg), if_true, if_false, fall_through);
     }
   }
@@ -4238,7 +4255,7 @@
                          &if_true, &if_false, &fall_through);
 
   VisitForAccumulatorValue(sub_expr);
-  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+  PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
   Heap::RootListIndex nil_value = nil == kNullValue ?
       Heap::kNullValueRootIndex :
       Heap::kUndefinedValueRootIndex;
@@ -4253,8 +4270,7 @@
     __ Branch(if_true, eq, a0, Operand(a1));
     __ LoadRoot(a1, other_nil_value);
     __ Branch(if_true, eq, a0, Operand(a1));
-    __ And(at, a0, Operand(kSmiTagMask));
-    __ Branch(if_false, eq, at, Operand(zero_reg));
+    __ JumpIfSmi(a0, if_false);
     // It can be an undetectable object.
     __ lw(a1, FieldMemOperand(a0, HeapObject::kMapOffset));
     __ lbu(a1, FieldMemOperand(a1, Map::kBitFieldOffset));
diff --git a/src/mips/ic-mips.cc b/src/mips/ic-mips.cc
index ca6383c..a8b3fa3 100644
--- a/src/mips/ic-mips.cc
+++ b/src/mips/ic-mips.cc
@@ -527,8 +527,7 @@
   if (id == IC::kCallIC_Miss) {
     Label invoke, global;
     __ lw(a2, MemOperand(sp, argc * kPointerSize));
-    __ andi(t0, a2, kSmiTagMask);
-    __ Branch(&invoke, eq, t0, Operand(zero_reg));
+    __ JumpIfSmi(a2, &invoke);
     __ GetObjectType(a2, a3, a3);
     __ Branch(&global, eq, a3, Operand(JS_GLOBAL_OBJECT_TYPE));
     __ Branch(&invoke, ne, a3, Operand(JS_BUILTINS_OBJECT_TYPE));
diff --git a/src/mips/lithium-codegen-mips.cc b/src/mips/lithium-codegen-mips.cc
new file mode 100644
index 0000000..e640b53
--- /dev/null
+++ b/src/mips/lithium-codegen-mips.cc
@@ -0,0 +1,4628 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#include "mips/lithium-codegen-mips.h"
+#include "mips/lithium-gap-resolver-mips.h"
+#include "code-stubs.h"
+#include "stub-cache.h"
+
+namespace v8 {
+namespace internal {
+
+
+class SafepointGenerator : public CallWrapper {
+ public:
+  SafepointGenerator(LCodeGen* codegen,
+                     LPointerMap* pointers,
+                     int deoptimization_index)
+      : codegen_(codegen),
+        pointers_(pointers),
+        deoptimization_index_(deoptimization_index) { }
+  virtual ~SafepointGenerator() { }
+
+  virtual void BeforeCall(int call_size) const {
+    ASSERT(call_size >= 0);
+    // Ensure that we have enough space after the previous safepoint position
+    // for the generated code there.
+    int call_end = codegen_->masm()->pc_offset() + call_size;
+    int prev_jump_end =
+        codegen_->LastSafepointEnd() + Deoptimizer::patch_size();
+    if (call_end < prev_jump_end) {
+      int padding_size = prev_jump_end - call_end;
+      ASSERT_EQ(0, padding_size % Assembler::kInstrSize);
+      while (padding_size > 0) {
+        codegen_->masm()->nop();
+        padding_size -= Assembler::kInstrSize;
+      }
+    }
+  }
+
+  virtual void AfterCall() const {
+    codegen_->RecordSafepoint(pointers_, deoptimization_index_);
+  }
+
+ private:
+  LCodeGen* codegen_;
+  LPointerMap* pointers_;
+  int deoptimization_index_;
+};
+
+
+#define __ masm()->
+
+bool LCodeGen::GenerateCode() {
+  HPhase phase("Code generation", chunk());
+  ASSERT(is_unused());
+  status_ = GENERATING;
+  CpuFeatures::Scope scope(FPU);
+
+  CodeStub::GenerateFPStubs();
+
+  // Open a frame scope to indicate that there is a frame on the stack.  The
+  // NONE indicates that the scope shouldn't actually generate code to set up
+  // the frame (that is done in GeneratePrologue).
+  FrameScope frame_scope(masm_, StackFrame::NONE);
+
+  return GeneratePrologue() &&
+      GenerateBody() &&
+      GenerateDeferredCode() &&
+      GenerateSafepointTable();
+}
+
+
+void LCodeGen::FinishCode(Handle<Code> code) {
+  ASSERT(is_done());
+  code->set_stack_slots(GetStackSlotCount());
+  code->set_safepoint_table_offset(safepoints_.GetCodeOffset());
+  PopulateDeoptimizationData(code);
+  Deoptimizer::EnsureRelocSpaceForLazyDeoptimization(code);
+}
+
+
+void LCodeGen::Abort(const char* format, ...) {
+  if (FLAG_trace_bailout) {
+    SmartArrayPointer<char> name(
+        info()->shared_info()->DebugName()->ToCString());
+    PrintF("Aborting LCodeGen in @\"%s\": ", *name);
+    va_list arguments;
+    va_start(arguments, format);
+    OS::VPrint(format, arguments);
+    va_end(arguments);
+    PrintF("\n");
+  }
+  status_ = ABORTED;
+}
+
+
+void LCodeGen::Comment(const char* format, ...) {
+  if (!FLAG_code_comments) return;
+  char buffer[4 * KB];
+  StringBuilder builder(buffer, ARRAY_SIZE(buffer));
+  va_list arguments;
+  va_start(arguments, format);
+  builder.AddFormattedList(format, arguments);
+  va_end(arguments);
+
+  // Copy the string before recording it in the assembler to avoid
+  // issues when the stack allocated buffer goes out of scope.
+  size_t length = builder.position();
+  Vector<char> copy = Vector<char>::New(length + 1);
+  memcpy(copy.start(), builder.Finalize(), copy.length());
+  masm()->RecordComment(copy.start());
+}
+
+
+bool LCodeGen::GeneratePrologue() {
+  ASSERT(is_generating());
+
+#ifdef DEBUG
+  if (strlen(FLAG_stop_at) > 0 &&
+      info_->function()->name()->IsEqualTo(CStrVector(FLAG_stop_at))) {
+    __ stop("stop_at");
+  }
+#endif
+
+  // a1: Callee's JS function.
+  // cp: Callee's context.
+  // fp: Caller's frame pointer.
+  // lr: Caller's pc.
+
+  // Strict mode functions and builtins need to replace the receiver
+  // with undefined when called as functions (without an explicit
+  // receiver object). r5 is zero for method calls and non-zero for
+  // function calls.
+  if (info_->is_strict_mode() || info_->is_native()) {
+    Label ok;
+    __ Branch(&ok, eq, t1, Operand(zero_reg));
+
+    int receiver_offset = scope()->num_parameters() * kPointerSize;
+    __ LoadRoot(a2, Heap::kUndefinedValueRootIndex);
+    __ sw(a2, MemOperand(sp, receiver_offset));
+    __ bind(&ok);
+  }
+
+  __ Push(ra, fp, cp, a1);
+  __ Addu(fp, sp, Operand(2 * kPointerSize));  // Adj. FP to point to saved FP.
+
+  // Reserve space for the stack slots needed by the code.
+  int slots = GetStackSlotCount();
+  if (slots > 0) {
+    if (FLAG_debug_code) {
+      __ li(a0, Operand(slots));
+      __ li(a2, Operand(kSlotsZapValue));
+      Label loop;
+      __ bind(&loop);
+      __ push(a2);
+      __ Subu(a0, a0, 1);
+      __ Branch(&loop, ne, a0, Operand(zero_reg));
+    } else {
+      __ Subu(sp, sp, Operand(slots * kPointerSize));
+    }
+  }
+
+  // Possibly allocate a local context.
+  int heap_slots = scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
+  if (heap_slots > 0) {
+    Comment(";;; Allocate local context");
+    // Argument to NewContext is the function, which is in a1.
+    __ push(a1);
+    if (heap_slots <= FastNewContextStub::kMaximumSlots) {
+      FastNewContextStub stub(heap_slots);
+      __ CallStub(&stub);
+    } else {
+      __ CallRuntime(Runtime::kNewFunctionContext, 1);
+    }
+    RecordSafepoint(Safepoint::kNoDeoptimizationIndex);
+    // Context is returned in both v0 and cp.  It replaces the context
+    // passed to us.  It's saved in the stack and kept live in cp.
+    __ sw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+    // Copy any necessary parameters into the context.
+    int num_parameters = scope()->num_parameters();
+    for (int i = 0; i < num_parameters; i++) {
+      Variable* var = scope()->parameter(i);
+      if (var->IsContextSlot()) {
+        int parameter_offset = StandardFrameConstants::kCallerSPOffset +
+            (num_parameters - 1 - i) * kPointerSize;
+        // Load parameter from stack.
+        __ lw(a0, MemOperand(fp, parameter_offset));
+        // Store it in the context.
+        MemOperand target = ContextOperand(cp, var->index());
+        __ sw(a0, target);
+        // Update the write barrier. This clobbers a3 and a0.
+        __ RecordWriteContextSlot(
+            cp, target.offset(), a0, a3, kRAHasBeenSaved, kSaveFPRegs);
+      }
+    }
+    Comment(";;; End allocate local context");
+  }
+
+  // Trace the call.
+  if (FLAG_trace) {
+    __ CallRuntime(Runtime::kTraceEnter, 0);
+  }
+  return !is_aborted();
+}
+
+
+bool LCodeGen::GenerateBody() {
+  ASSERT(is_generating());
+  bool emit_instructions = true;
+  for (current_instruction_ = 0;
+       !is_aborted() && current_instruction_ < instructions_->length();
+       current_instruction_++) {
+    LInstruction* instr = instructions_->at(current_instruction_);
+    if (instr->IsLabel()) {
+      LLabel* label = LLabel::cast(instr);
+      emit_instructions = !label->HasReplacement();
+    }
+
+    if (emit_instructions) {
+      Comment(";;; @%d: %s.", current_instruction_, instr->Mnemonic());
+      instr->CompileToNative(this);
+    }
+  }
+  return !is_aborted();
+}
+
+
+LInstruction* LCodeGen::GetNextInstruction() {
+  if (current_instruction_ < instructions_->length() - 1) {
+    return instructions_->at(current_instruction_ + 1);
+  } else {
+    return NULL;
+  }
+}
+
+
+bool LCodeGen::GenerateDeferredCode() {
+  ASSERT(is_generating());
+  if (deferred_.length() > 0) {
+    for (int i = 0; !is_aborted() && i < deferred_.length(); i++) {
+      LDeferredCode* code = deferred_[i];
+      __ bind(code->entry());
+      Comment(";;; Deferred code @%d: %s.",
+              code->instruction_index(),
+              code->instr()->Mnemonic());
+      code->Generate();
+      __ jmp(code->exit());
+    }
+
+    // Pad code to ensure that the last piece of deferred code have
+    // room for lazy bailout.
+    while ((masm()->pc_offset() - LastSafepointEnd())
+           < Deoptimizer::patch_size()) {
+      __ nop();
+    }
+  }
+  // Deferred code is the last part of the instruction sequence. Mark
+  // the generated code as done unless we bailed out.
+  if (!is_aborted()) status_ = DONE;
+  return !is_aborted();
+}
+
+
+bool LCodeGen::GenerateDeoptJumpTable() {
+  // TODO(plind): not clear that this will have advantage for MIPS.
+  // Skipping it for now. Raised issue #100 for this.
+  Abort("Unimplemented: %s", "GenerateDeoptJumpTable");
+  return false;
+}
+
+
+bool LCodeGen::GenerateSafepointTable() {
+  ASSERT(is_done());
+  safepoints_.Emit(masm(), GetStackSlotCount());
+  return !is_aborted();
+}
+
+
+Register LCodeGen::ToRegister(int index) const {
+  return Register::FromAllocationIndex(index);
+}
+
+
+DoubleRegister LCodeGen::ToDoubleRegister(int index) const {
+  return DoubleRegister::FromAllocationIndex(index);
+}
+
+
+Register LCodeGen::ToRegister(LOperand* op) const {
+  ASSERT(op->IsRegister());
+  return ToRegister(op->index());
+}
+
+
+Register LCodeGen::EmitLoadRegister(LOperand* op, Register scratch) {
+  if (op->IsRegister()) {
+    return ToRegister(op->index());
+  } else if (op->IsConstantOperand()) {
+    __ li(scratch, ToOperand(op));
+    return scratch;
+  } else if (op->IsStackSlot() || op->IsArgument()) {
+    __ lw(scratch, ToMemOperand(op));
+    return scratch;
+  }
+  UNREACHABLE();
+  return scratch;
+}
+
+
+DoubleRegister LCodeGen::ToDoubleRegister(LOperand* op) const {
+  ASSERT(op->IsDoubleRegister());
+  return ToDoubleRegister(op->index());
+}
+
+
+DoubleRegister LCodeGen::EmitLoadDoubleRegister(LOperand* op,
+                                                FloatRegister flt_scratch,
+                                                DoubleRegister dbl_scratch) {
+  if (op->IsDoubleRegister()) {
+    return ToDoubleRegister(op->index());
+  } else if (op->IsConstantOperand()) {
+    LConstantOperand* const_op = LConstantOperand::cast(op);
+    Handle<Object> literal = chunk_->LookupLiteral(const_op);
+    Representation r = chunk_->LookupLiteralRepresentation(const_op);
+    if (r.IsInteger32()) {
+      ASSERT(literal->IsNumber());
+      __ li(at, Operand(static_cast<int32_t>(literal->Number())));
+      __ mtc1(at, flt_scratch);
+      __ cvt_d_w(dbl_scratch, flt_scratch);
+      return dbl_scratch;
+    } else if (r.IsDouble()) {
+      Abort("unsupported double immediate");
+    } else if (r.IsTagged()) {
+      Abort("unsupported tagged immediate");
+    }
+  } else if (op->IsStackSlot() || op->IsArgument()) {
+    MemOperand mem_op = ToMemOperand(op);
+    __ ldc1(dbl_scratch, mem_op);
+    return dbl_scratch;
+  }
+  UNREACHABLE();
+  return dbl_scratch;
+}
+
+
+int LCodeGen::ToInteger32(LConstantOperand* op) const {
+  Handle<Object> value = chunk_->LookupLiteral(op);
+  ASSERT(chunk_->LookupLiteralRepresentation(op).IsInteger32());
+  ASSERT(static_cast<double>(static_cast<int32_t>(value->Number())) ==
+      value->Number());
+  return static_cast<int32_t>(value->Number());
+}
+
+
+double LCodeGen::ToDouble(LConstantOperand* op) const {
+  Handle<Object> value = chunk_->LookupLiteral(op);
+  return value->Number();
+}
+
+
+Operand LCodeGen::ToOperand(LOperand* op) {
+  if (op->IsConstantOperand()) {
+    LConstantOperand* const_op = LConstantOperand::cast(op);
+    Handle<Object> literal = chunk_->LookupLiteral(const_op);
+    Representation r = chunk_->LookupLiteralRepresentation(const_op);
+    if (r.IsInteger32()) {
+      ASSERT(literal->IsNumber());
+      return Operand(static_cast<int32_t>(literal->Number()));
+    } else if (r.IsDouble()) {
+      Abort("ToOperand Unsupported double immediate.");
+    }
+    ASSERT(r.IsTagged());
+    return Operand(literal);
+  } else if (op->IsRegister()) {
+    return Operand(ToRegister(op));
+  } else if (op->IsDoubleRegister()) {
+    Abort("ToOperand IsDoubleRegister unimplemented");
+    return Operand(0);
+  }
+  // Stack slots not implemented, use ToMemOperand instead.
+  UNREACHABLE();
+  return Operand(0);
+}
+
+
+MemOperand LCodeGen::ToMemOperand(LOperand* op) const {
+  ASSERT(!op->IsRegister());
+  ASSERT(!op->IsDoubleRegister());
+  ASSERT(op->IsStackSlot() || op->IsDoubleStackSlot());
+  int index = op->index();
+  if (index >= 0) {
+    // Local or spill slot. Skip the frame pointer, function, and
+    // context in the fixed part of the frame.
+    return MemOperand(fp, -(index + 3) * kPointerSize);
+  } else {
+    // Incoming parameter. Skip the return address.
+    return MemOperand(fp, -(index - 1) * kPointerSize);
+  }
+}
+
+
+MemOperand LCodeGen::ToHighMemOperand(LOperand* op) const {
+  ASSERT(op->IsDoubleStackSlot());
+  int index = op->index();
+  if (index >= 0) {
+    // Local or spill slot. Skip the frame pointer, function, context,
+    // and the first word of the double in the fixed part of the frame.
+    return MemOperand(fp, -(index + 3) * kPointerSize + kPointerSize);
+  } else {
+    // Incoming parameter. Skip the return address and the first word of
+    // the double.
+    return MemOperand(fp, -(index - 1) * kPointerSize + kPointerSize);
+  }
+}
+
+
+void LCodeGen::WriteTranslation(LEnvironment* environment,
+                                Translation* translation) {
+  if (environment == NULL) return;
+
+  // The translation includes one command per value in the environment.
+  int translation_size = environment->values()->length();
+  // The output frame height does not include the parameters.
+  int height = translation_size - environment->parameter_count();
+
+  WriteTranslation(environment->outer(), translation);
+  int closure_id = DefineDeoptimizationLiteral(environment->closure());
+  translation->BeginFrame(environment->ast_id(), closure_id, height);
+  for (int i = 0; i < translation_size; ++i) {
+    LOperand* value = environment->values()->at(i);
+    // spilled_registers_ and spilled_double_registers_ are either
+    // both NULL or both set.
+    if (environment->spilled_registers() != NULL && value != NULL) {
+      if (value->IsRegister() &&
+          environment->spilled_registers()[value->index()] != NULL) {
+        translation->MarkDuplicate();
+        AddToTranslation(translation,
+                         environment->spilled_registers()[value->index()],
+                         environment->HasTaggedValueAt(i));
+      } else if (
+          value->IsDoubleRegister() &&
+          environment->spilled_double_registers()[value->index()] != NULL) {
+        translation->MarkDuplicate();
+        AddToTranslation(
+            translation,
+            environment->spilled_double_registers()[value->index()],
+            false);
+      }
+    }
+
+    AddToTranslation(translation, value, environment->HasTaggedValueAt(i));
+  }
+}
+
+
+void LCodeGen::AddToTranslation(Translation* translation,
+                                LOperand* op,
+                                bool is_tagged) {
+  if (op == NULL) {
+    // TODO(twuerthinger): Introduce marker operands to indicate that this value
+    // is not present and must be reconstructed from the deoptimizer. Currently
+    // this is only used for the arguments object.
+    translation->StoreArgumentsObject();
+  } else if (op->IsStackSlot()) {
+    if (is_tagged) {
+      translation->StoreStackSlot(op->index());
+    } else {
+      translation->StoreInt32StackSlot(op->index());
+    }
+  } else if (op->IsDoubleStackSlot()) {
+    translation->StoreDoubleStackSlot(op->index());
+  } else if (op->IsArgument()) {
+    ASSERT(is_tagged);
+    int src_index = GetStackSlotCount() + op->index();
+    translation->StoreStackSlot(src_index);
+  } else if (op->IsRegister()) {
+    Register reg = ToRegister(op);
+    if (is_tagged) {
+      translation->StoreRegister(reg);
+    } else {
+      translation->StoreInt32Register(reg);
+    }
+  } else if (op->IsDoubleRegister()) {
+    DoubleRegister reg = ToDoubleRegister(op);
+    translation->StoreDoubleRegister(reg);
+  } else if (op->IsConstantOperand()) {
+    Handle<Object> literal = chunk()->LookupLiteral(LConstantOperand::cast(op));
+    int src_index = DefineDeoptimizationLiteral(literal);
+    translation->StoreLiteral(src_index);
+  } else {
+    UNREACHABLE();
+  }
+}
+
+
+void LCodeGen::CallCode(Handle<Code> code,
+                        RelocInfo::Mode mode,
+                        LInstruction* instr) {
+  CallCodeGeneric(code, mode, instr, RECORD_SIMPLE_SAFEPOINT);
+}
+
+
+void LCodeGen::CallCodeGeneric(Handle<Code> code,
+                               RelocInfo::Mode mode,
+                               LInstruction* instr,
+                               SafepointMode safepoint_mode) {
+  ASSERT(instr != NULL);
+  LPointerMap* pointers = instr->pointer_map();
+  RecordPosition(pointers->position());
+  __ Call(code, mode);
+  RegisterLazyDeoptimization(instr, safepoint_mode);
+}
+
+
+void LCodeGen::CallRuntime(const Runtime::Function* function,
+                           int num_arguments,
+                           LInstruction* instr) {
+  ASSERT(instr != NULL);
+  LPointerMap* pointers = instr->pointer_map();
+  ASSERT(pointers != NULL);
+  RecordPosition(pointers->position());
+
+  __ CallRuntime(function, num_arguments);
+  RegisterLazyDeoptimization(instr, RECORD_SIMPLE_SAFEPOINT);
+}
+
+
+void LCodeGen::CallRuntimeFromDeferred(Runtime::FunctionId id,
+                                       int argc,
+                                       LInstruction* instr) {
+  __ CallRuntimeSaveDoubles(id);
+  RecordSafepointWithRegisters(
+      instr->pointer_map(), argc, Safepoint::kNoDeoptimizationIndex);
+}
+
+
+void LCodeGen::RegisterLazyDeoptimization(LInstruction* instr,
+                                          SafepointMode safepoint_mode) {
+  // Create the environment to bailout to. If the call has side effects
+  // execution has to continue after the call otherwise execution can continue
+  // from a previous bailout point repeating the call.
+  LEnvironment* deoptimization_environment;
+  if (instr->HasDeoptimizationEnvironment()) {
+    deoptimization_environment = instr->deoptimization_environment();
+  } else {
+    deoptimization_environment = instr->environment();
+  }
+
+  RegisterEnvironmentForDeoptimization(deoptimization_environment);
+  if (safepoint_mode == RECORD_SIMPLE_SAFEPOINT) {
+    RecordSafepoint(instr->pointer_map(),
+                    deoptimization_environment->deoptimization_index());
+  } else {
+    ASSERT(safepoint_mode == RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
+    RecordSafepointWithRegisters(
+        instr->pointer_map(),
+        0,
+        deoptimization_environment->deoptimization_index());
+  }
+}
+
+
+void LCodeGen::RegisterEnvironmentForDeoptimization(LEnvironment* environment) {
+  if (!environment->HasBeenRegistered()) {
+    // Physical stack frame layout:
+    // -x ............. -4  0 ..................................... y
+    // [incoming arguments] [spill slots] [pushed outgoing arguments]
+
+    // Layout of the environment:
+    // 0 ..................................................... size-1
+    // [parameters] [locals] [expression stack including arguments]
+
+    // Layout of the translation:
+    // 0 ........................................................ size - 1 + 4
+    // [expression stack including arguments] [locals] [4 words] [parameters]
+    // |>------------  translation_size ------------<|
+
+    int frame_count = 0;
+    for (LEnvironment* e = environment; e != NULL; e = e->outer()) {
+      ++frame_count;
+    }
+    Translation translation(&translations_, frame_count);
+    WriteTranslation(environment, &translation);
+    int deoptimization_index = deoptimizations_.length();
+    environment->Register(deoptimization_index, translation.index());
+    deoptimizations_.Add(environment);
+  }
+}
+
+
+void LCodeGen::DeoptimizeIf(Condition cc,
+                            LEnvironment* environment,
+                            Register src1,
+                            const Operand& src2) {
+  RegisterEnvironmentForDeoptimization(environment);
+  ASSERT(environment->HasBeenRegistered());
+  int id = environment->deoptimization_index();
+  Address entry = Deoptimizer::GetDeoptimizationEntry(id, Deoptimizer::EAGER);
+  ASSERT(entry != NULL);
+  if (entry == NULL) {
+    Abort("bailout was not prepared");
+    return;
+  }
+
+  ASSERT(FLAG_deopt_every_n_times < 2);  // Other values not supported on MIPS.
+
+  if (FLAG_deopt_every_n_times == 1 &&
+      info_->shared_info()->opt_count() == id) {
+    __ Jump(entry, RelocInfo::RUNTIME_ENTRY);
+    return;
+  }
+
+  if (FLAG_trap_on_deopt) {
+    Label skip;
+    if (cc != al) {
+      __ Branch(&skip, NegateCondition(cc), src1, src2);
+    }
+    __ stop("trap_on_deopt");
+    __ bind(&skip);
+  }
+
+  if (cc == al) {
+    __ Jump(entry, RelocInfo::RUNTIME_ENTRY);
+  } else {
+    // TODO(plind): The Arm port is a little different here, due to their
+    // DeOpt jump table, which is not used for Mips yet.
+    __ Jump(entry, RelocInfo::RUNTIME_ENTRY, cc, src1, src2);
+  }
+}
+
+
+void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) {
+  int length = deoptimizations_.length();
+  if (length == 0) return;
+  ASSERT(FLAG_deopt);
+  Handle<DeoptimizationInputData> data =
+      factory()->NewDeoptimizationInputData(length, TENURED);
+
+  Handle<ByteArray> translations = translations_.CreateByteArray();
+  data->SetTranslationByteArray(*translations);
+  data->SetInlinedFunctionCount(Smi::FromInt(inlined_function_count_));
+
+  Handle<FixedArray> literals =
+      factory()->NewFixedArray(deoptimization_literals_.length(), TENURED);
+  for (int i = 0; i < deoptimization_literals_.length(); i++) {
+    literals->set(i, *deoptimization_literals_[i]);
+  }
+  data->SetLiteralArray(*literals);
+
+  data->SetOsrAstId(Smi::FromInt(info_->osr_ast_id()));
+  data->SetOsrPcOffset(Smi::FromInt(osr_pc_offset_));
+
+  // Populate the deoptimization entries.
+  for (int i = 0; i < length; i++) {
+    LEnvironment* env = deoptimizations_[i];
+    data->SetAstId(i, Smi::FromInt(env->ast_id()));
+    data->SetTranslationIndex(i, Smi::FromInt(env->translation_index()));
+    data->SetArgumentsStackHeight(i,
+                                  Smi::FromInt(env->arguments_stack_height()));
+  }
+  code->set_deoptimization_data(*data);
+}
+
+
+int LCodeGen::DefineDeoptimizationLiteral(Handle<Object> literal) {
+  int result = deoptimization_literals_.length();
+  for (int i = 0; i < deoptimization_literals_.length(); ++i) {
+    if (deoptimization_literals_[i].is_identical_to(literal)) return i;
+  }
+  deoptimization_literals_.Add(literal);
+  return result;
+}
+
+
+void LCodeGen::PopulateDeoptimizationLiteralsWithInlinedFunctions() {
+  ASSERT(deoptimization_literals_.length() == 0);
+
+  const ZoneList<Handle<JSFunction> >* inlined_closures =
+      chunk()->inlined_closures();
+
+  for (int i = 0, length = inlined_closures->length();
+       i < length;
+       i++) {
+    DefineDeoptimizationLiteral(inlined_closures->at(i));
+  }
+
+  inlined_function_count_ = deoptimization_literals_.length();
+}
+
+
+void LCodeGen::RecordSafepoint(
+    LPointerMap* pointers,
+    Safepoint::Kind kind,
+    int arguments,
+    int deoptimization_index) {
+  ASSERT(expected_safepoint_kind_ == kind);
+
+  const ZoneList<LOperand*>* operands = pointers->GetNormalizedOperands();
+  Safepoint safepoint = safepoints_.DefineSafepoint(masm(),
+      kind, arguments, deoptimization_index);
+  for (int i = 0; i < operands->length(); i++) {
+    LOperand* pointer = operands->at(i);
+    if (pointer->IsStackSlot()) {
+      safepoint.DefinePointerSlot(pointer->index());
+    } else if (pointer->IsRegister() && (kind & Safepoint::kWithRegisters)) {
+      safepoint.DefinePointerRegister(ToRegister(pointer));
+    }
+  }
+  if (kind & Safepoint::kWithRegisters) {
+    // Register cp always contains a pointer to the context.
+    safepoint.DefinePointerRegister(cp);
+  }
+}
+
+
+void LCodeGen::RecordSafepoint(LPointerMap* pointers,
+                               int deoptimization_index) {
+  RecordSafepoint(pointers, Safepoint::kSimple, 0, deoptimization_index);
+}
+
+
+void LCodeGen::RecordSafepoint(int deoptimization_index) {
+  LPointerMap empty_pointers(RelocInfo::kNoPosition);
+  RecordSafepoint(&empty_pointers, deoptimization_index);
+}
+
+
+void LCodeGen::RecordSafepointWithRegisters(LPointerMap* pointers,
+                                            int arguments,
+                                            int deoptimization_index) {
+  RecordSafepoint(pointers, Safepoint::kWithRegisters, arguments,
+      deoptimization_index);
+}
+
+
+void LCodeGen::RecordSafepointWithRegistersAndDoubles(
+    LPointerMap* pointers,
+    int arguments,
+    int deoptimization_index) {
+  RecordSafepoint(pointers, Safepoint::kWithRegistersAndDoubles, arguments,
+      deoptimization_index);
+}
+
+
+void LCodeGen::RecordPosition(int position) {
+  if (position == RelocInfo::kNoPosition) return;
+  masm()->positions_recorder()->RecordPosition(position);
+}
+
+
+void LCodeGen::DoLabel(LLabel* label) {
+  if (label->is_loop_header()) {
+    Comment(";;; B%d - LOOP entry", label->block_id());
+  } else {
+    Comment(";;; B%d", label->block_id());
+  }
+  __ bind(label->label());
+  current_block_ = label->block_id();
+  DoGap(label);
+}
+
+
+void LCodeGen::DoParallelMove(LParallelMove* move) {
+  resolver_.Resolve(move);
+}
+
+
+void LCodeGen::DoGap(LGap* gap) {
+  for (int i = LGap::FIRST_INNER_POSITION;
+       i <= LGap::LAST_INNER_POSITION;
+       i++) {
+    LGap::InnerPosition inner_pos = static_cast<LGap::InnerPosition>(i);
+    LParallelMove* move = gap->GetParallelMove(inner_pos);
+    if (move != NULL) DoParallelMove(move);
+  }
+
+  LInstruction* next = GetNextInstruction();
+  if (next != NULL && next->IsLazyBailout()) {
+    int pc = masm()->pc_offset();
+    safepoints_.SetPcAfterGap(pc);
+  }
+}
+
+
+void LCodeGen::DoInstructionGap(LInstructionGap* instr) {
+  DoGap(instr);
+}
+
+
+void LCodeGen::DoParameter(LParameter* instr) {
+  // Nothing to do.
+}
+
+
+void LCodeGen::DoCallStub(LCallStub* instr) {
+  ASSERT(ToRegister(instr->result()).is(v0));
+  switch (instr->hydrogen()->major_key()) {
+    case CodeStub::RegExpConstructResult: {
+      RegExpConstructResultStub stub;
+      CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+      break;
+    }
+    case CodeStub::RegExpExec: {
+      RegExpExecStub stub;
+      CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+      break;
+    }
+    case CodeStub::SubString: {
+      SubStringStub stub;
+      CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+      break;
+    }
+    case CodeStub::NumberToString: {
+      NumberToStringStub stub;
+      CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+      break;
+    }
+    case CodeStub::StringAdd: {
+      StringAddStub stub(NO_STRING_ADD_FLAGS);
+      CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+      break;
+    }
+    case CodeStub::StringCompare: {
+      StringCompareStub stub;
+      CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+      break;
+    }
+    case CodeStub::TranscendentalCache: {
+      __ lw(a0, MemOperand(sp, 0));
+      TranscendentalCacheStub stub(instr->transcendental_type(),
+                                   TranscendentalCacheStub::TAGGED);
+      CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+      break;
+    }
+    default:
+      UNREACHABLE();
+  }
+}
+
+
+void LCodeGen::DoUnknownOSRValue(LUnknownOSRValue* instr) {
+  // Nothing to do.
+}
+
+
+void LCodeGen::DoModI(LModI* instr) {
+  Register scratch = scratch0();
+  const Register left = ToRegister(instr->InputAt(0));
+  const Register result = ToRegister(instr->result());
+
+  // p2constant holds the right side value if it's a power of 2 constant.
+  // In other cases it is 0.
+  int32_t p2constant = 0;
+
+  if (instr->InputAt(1)->IsConstantOperand()) {
+      p2constant = ToInteger32(LConstantOperand::cast(instr->InputAt(1)));
+      if (p2constant % 2 != 0) {
+        p2constant = 0;
+      }
+      // Result always takes the sign of the dividend (left).
+      p2constant = abs(p2constant);
+  }
+
+  // div runs in the background while we check for special cases.
+  Register right = EmitLoadRegister(instr->InputAt(1), scratch);
+  __ div(left, right);
+
+  // Check for x % 0.
+  if (instr->hydrogen()->CheckFlag(HValue::kCanBeDivByZero)) {
+    DeoptimizeIf(eq, instr->environment(), right, Operand(zero_reg));
+  }
+
+  Label skip_div, do_div;
+  if (p2constant != 0) {
+    // Fall back to the result of the div instruction if we could have sign
+    // problems.
+    __ Branch(&do_div, lt, left, Operand(zero_reg));
+    // Modulo by masking.
+    __ And(scratch, left, p2constant - 1);
+    __ Branch(&skip_div);
+  }
+
+  __ bind(&do_div);
+  __ mfhi(scratch);
+  __ bind(&skip_div);
+
+  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+    // Result always takes the sign of the dividend (left).
+    Label done;
+    __ Branch(USE_DELAY_SLOT, &done, ge, left, Operand(zero_reg));
+    __ mov(result, scratch);
+    DeoptimizeIf(eq, instr->environment(), result, Operand(zero_reg));
+    __ bind(&done);
+  } else {
+    __ Move(result, scratch);
+  }
+}
+
+
+void LCodeGen::DoDivI(LDivI* instr) {
+  const Register left = ToRegister(instr->InputAt(0));
+  const Register right = ToRegister(instr->InputAt(1));
+  const Register result = ToRegister(instr->result());
+
+  // On MIPS div is asynchronous - it will run in the background while we
+  // check for special cases.
+  __ div(left, right);
+
+  // Check for x / 0.
+  if (instr->hydrogen()->CheckFlag(HValue::kCanBeDivByZero)) {
+    DeoptimizeIf(eq, instr->environment(), right, Operand(zero_reg));
+  }
+
+  // Check for (0 / -x) that will produce negative zero.
+  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+    Label left_not_zero;
+    __ Branch(&left_not_zero, ne, left, Operand(zero_reg));
+    DeoptimizeIf(lt, instr->environment(), right, Operand(zero_reg));
+    __ bind(&left_not_zero);
+  }
+
+  // Check for (-kMinInt / -1).
+  if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
+    Label left_not_min_int;
+    __ Branch(&left_not_min_int, ne, left, Operand(kMinInt));
+    DeoptimizeIf(eq, instr->environment(), right, Operand(-1));
+    __ bind(&left_not_min_int);
+  }
+
+  __ mfhi(result);
+  DeoptimizeIf(ne, instr->environment(), result, Operand(zero_reg));
+  __ mflo(result);
+}
+
+
+void LCodeGen::DoMulI(LMulI* instr) {
+  Register scratch = scratch0();
+  Register result = ToRegister(instr->result());
+  // Note that result may alias left.
+  Register left = ToRegister(instr->InputAt(0));
+  LOperand* right_op = instr->InputAt(1);
+
+  bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
+  bool bailout_on_minus_zero =
+    instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero);
+
+  if (right_op->IsConstantOperand() && !can_overflow) {
+    // Use optimized code for specific constants.
+    int32_t constant = ToInteger32(LConstantOperand::cast(right_op));
+
+    if (bailout_on_minus_zero && (constant < 0)) {
+      // The case of a null constant will be handled separately.
+      // If constant is negative and left is null, the result should be -0.
+      DeoptimizeIf(eq, instr->environment(), left, Operand(zero_reg));
+    }
+
+    switch (constant) {
+      case -1:
+        __ Subu(result, zero_reg, left);
+        break;
+      case 0:
+        if (bailout_on_minus_zero) {
+          // If left is strictly negative and the constant is null, the
+          // result is -0. Deoptimize if required, otherwise return 0.
+          DeoptimizeIf(lt, instr->environment(), left, Operand(zero_reg));
+        }
+        __ mov(result, zero_reg);
+        break;
+      case 1:
+        // Nothing to do.
+        __ Move(result, left);
+        break;
+      default:
+        // Multiplying by powers of two and powers of two plus or minus
+        // one can be done faster with shifted operands.
+        // For other constants we emit standard code.
+        int32_t mask = constant >> 31;
+        uint32_t constant_abs = (constant + mask) ^ mask;
+
+        if (IsPowerOf2(constant_abs) ||
+            IsPowerOf2(constant_abs - 1) ||
+            IsPowerOf2(constant_abs + 1)) {
+          if (IsPowerOf2(constant_abs)) {
+            int32_t shift = WhichPowerOf2(constant_abs);
+            __ sll(result, left, shift);
+          } else if (IsPowerOf2(constant_abs - 1)) {
+            int32_t shift = WhichPowerOf2(constant_abs - 1);
+            __ sll(result, left, shift);
+            __ Addu(result, result, left);
+          } else if (IsPowerOf2(constant_abs + 1)) {
+            int32_t shift = WhichPowerOf2(constant_abs + 1);
+            __ sll(result, left, shift);
+            __ Subu(result, result, left);
+          }
+
+          // Correct the sign of the result is the constant is negative.
+          if (constant < 0)  {
+            __ Subu(result, zero_reg, result);
+          }
+
+        } else {
+          // Generate standard code.
+          __ li(at, constant);
+          __ mul(result, left, at);
+        }
+    }
+
+  } else {
+    Register right = EmitLoadRegister(right_op, scratch);
+    if (bailout_on_minus_zero) {
+      __ Or(ToRegister(instr->TempAt(0)), left, right);
+    }
+
+    if (can_overflow) {
+      // hi:lo = left * right.
+      __ mult(left, right);
+      __ mfhi(scratch);
+      __ mflo(result);
+      __ sra(at, result, 31);
+      DeoptimizeIf(ne, instr->environment(), scratch, Operand(at));
+    } else {
+      __ mul(result, left, right);
+    }
+
+    if (bailout_on_minus_zero) {
+      // Bail out if the result is supposed to be negative zero.
+      Label done;
+      __ Branch(&done, ne, result, Operand(zero_reg));
+      DeoptimizeIf(lt,
+                   instr->environment(),
+                   ToRegister(instr->TempAt(0)),
+                   Operand(zero_reg));
+      __ bind(&done);
+    }
+  }
+}
+
+
+void LCodeGen::DoBitI(LBitI* instr) {
+  LOperand* left_op = instr->InputAt(0);
+  LOperand* right_op = instr->InputAt(1);
+  ASSERT(left_op->IsRegister());
+  Register left = ToRegister(left_op);
+  Register result = ToRegister(instr->result());
+  Operand right(no_reg);
+
+  if (right_op->IsStackSlot() || right_op->IsArgument()) {
+    right = Operand(EmitLoadRegister(right_op, at));
+  } else {
+    ASSERT(right_op->IsRegister() || right_op->IsConstantOperand());
+    right = ToOperand(right_op);
+  }
+
+  switch (instr->op()) {
+    case Token::BIT_AND:
+      __ And(result, left, right);
+      break;
+    case Token::BIT_OR:
+      __ Or(result, left, right);
+      break;
+    case Token::BIT_XOR:
+      __ Xor(result, left, right);
+      break;
+    default:
+      UNREACHABLE();
+      break;
+  }
+}
+
+
+void LCodeGen::DoShiftI(LShiftI* instr) {
+  // Both 'left' and 'right' are "used at start" (see LCodeGen::DoShift), so
+  // result may alias either of them.
+  LOperand* right_op = instr->InputAt(1);
+  Register left = ToRegister(instr->InputAt(0));
+  Register result = ToRegister(instr->result());
+
+  if (right_op->IsRegister()) {
+    // No need to mask the right operand on MIPS, it is built into the variable
+    // shift instructions.
+    switch (instr->op()) {
+      case Token::SAR:
+        __ srav(result, left, ToRegister(right_op));
+        break;
+      case Token::SHR:
+        __ srlv(result, left, ToRegister(right_op));
+        if (instr->can_deopt()) {
+          DeoptimizeIf(lt, instr->environment(), result, Operand(zero_reg));
+        }
+        break;
+      case Token::SHL:
+        __ sllv(result, left, ToRegister(right_op));
+        break;
+      default:
+        UNREACHABLE();
+        break;
+    }
+  } else {
+    // Mask the right_op operand.
+    int value = ToInteger32(LConstantOperand::cast(right_op));
+    uint8_t shift_count = static_cast<uint8_t>(value & 0x1F);
+    switch (instr->op()) {
+      case Token::SAR:
+        if (shift_count != 0) {
+          __ sra(result, left, shift_count);
+        } else {
+          __ Move(result, left);
+        }
+        break;
+      case Token::SHR:
+        if (shift_count != 0) {
+          __ srl(result, left, shift_count);
+        } else {
+          if (instr->can_deopt()) {
+            __ And(at, left, Operand(0x80000000));
+            DeoptimizeIf(ne, instr->environment(), at, Operand(zero_reg));
+          }
+          __ Move(result, left);
+        }
+        break;
+      case Token::SHL:
+        if (shift_count != 0) {
+          __ sll(result, left, shift_count);
+        } else {
+          __ Move(result, left);
+        }
+        break;
+      default:
+        UNREACHABLE();
+        break;
+    }
+  }
+}
+
+
+void LCodeGen::DoSubI(LSubI* instr) {
+  LOperand* left = instr->InputAt(0);
+  LOperand* right = instr->InputAt(1);
+  LOperand* result = instr->result();
+  bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
+
+  if (!can_overflow) {
+    if (right->IsStackSlot() || right->IsArgument()) {
+      Register right_reg = EmitLoadRegister(right, at);
+      __ Subu(ToRegister(result), ToRegister(left), Operand(right_reg));
+    } else {
+      ASSERT(right->IsRegister() || right->IsConstantOperand());
+      __ Subu(ToRegister(result), ToRegister(left), ToOperand(right));
+    }
+  } else {  // can_overflow.
+    Register overflow = scratch0();
+    Register scratch = scratch1();
+    if (right->IsStackSlot() ||
+        right->IsArgument() ||
+        right->IsConstantOperand()) {
+      Register right_reg = EmitLoadRegister(right, scratch);
+      __ SubuAndCheckForOverflow(ToRegister(result),
+                                 ToRegister(left),
+                                 right_reg,
+                                 overflow);  // Reg at also used as scratch.
+    } else {
+      ASSERT(right->IsRegister());
+      // Due to overflow check macros not supporting constant operands,
+      // handling the IsConstantOperand case was moved to prev if clause.
+      __ SubuAndCheckForOverflow(ToRegister(result),
+                                 ToRegister(left),
+                                 ToRegister(right),
+                                 overflow);  // Reg at also used as scratch.
+    }
+    DeoptimizeIf(lt, instr->environment(), overflow, Operand(zero_reg));
+  }
+}
+
+
+void LCodeGen::DoConstantI(LConstantI* instr) {
+  ASSERT(instr->result()->IsRegister());
+  __ li(ToRegister(instr->result()), Operand(instr->value()));
+}
+
+
+void LCodeGen::DoConstantD(LConstantD* instr) {
+  ASSERT(instr->result()->IsDoubleRegister());
+  DoubleRegister result = ToDoubleRegister(instr->result());
+  double v = instr->value();
+  __ Move(result, v);
+}
+
+
+void LCodeGen::DoConstantT(LConstantT* instr) {
+  ASSERT(instr->result()->IsRegister());
+  __ li(ToRegister(instr->result()), Operand(instr->value()));
+}
+
+
+void LCodeGen::DoJSArrayLength(LJSArrayLength* instr) {
+  Register result = ToRegister(instr->result());
+  Register array = ToRegister(instr->InputAt(0));
+  __ lw(result, FieldMemOperand(array, JSArray::kLengthOffset));
+}
+
+
+void LCodeGen::DoFixedArrayBaseLength(LFixedArrayBaseLength* instr) {
+  Register result = ToRegister(instr->result());
+  Register array = ToRegister(instr->InputAt(0));
+  __ lw(result, FieldMemOperand(array, FixedArrayBase::kLengthOffset));
+}
+
+
+void LCodeGen::DoElementsKind(LElementsKind* instr) {
+  Register result = ToRegister(instr->result());
+  Register input = ToRegister(instr->InputAt(0));
+
+  // Load map into |result|.
+  __ lw(result, FieldMemOperand(input, HeapObject::kMapOffset));
+  // Load the map's "bit field 2" into |result|. We only need the first byte,
+  // but the following bit field extraction takes care of that anyway.
+  __ lbu(result, FieldMemOperand(result, Map::kBitField2Offset));
+  // Retrieve elements_kind from bit field 2.
+  __ Ext(result, result, Map::kElementsKindShift, Map::kElementsKindBitCount);
+}
+
+
+void LCodeGen::DoValueOf(LValueOf* instr) {
+  Register input = ToRegister(instr->InputAt(0));
+  Register result = ToRegister(instr->result());
+  Register map = ToRegister(instr->TempAt(0));
+  Label done;
+
+  // If the object is a smi return the object.
+  __ Move(result, input);
+  __ JumpIfSmi(input, &done);
+
+  // If the object is not a value type, return the object.
+  __ GetObjectType(input, map, map);
+  __ Branch(&done, ne, map, Operand(JS_VALUE_TYPE));
+  __ lw(result, FieldMemOperand(input, JSValue::kValueOffset));
+
+  __ bind(&done);
+}
+
+
+void LCodeGen::DoBitNotI(LBitNotI* instr) {
+  Register input = ToRegister(instr->InputAt(0));
+  Register result = ToRegister(instr->result());
+  __ Nor(result, zero_reg, Operand(input));
+}
+
+
+void LCodeGen::DoThrow(LThrow* instr) {
+  Register input_reg = EmitLoadRegister(instr->InputAt(0), at);
+  __ push(input_reg);
+  CallRuntime(Runtime::kThrow, 1, instr);
+
+  if (FLAG_debug_code) {
+    __ stop("Unreachable code.");
+  }
+}
+
+
+void LCodeGen::DoAddI(LAddI* instr) {
+  LOperand* left = instr->InputAt(0);
+  LOperand* right = instr->InputAt(1);
+  LOperand* result = instr->result();
+  bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
+
+  if (!can_overflow) {
+    if (right->IsStackSlot() || right->IsArgument()) {
+      Register right_reg = EmitLoadRegister(right, at);
+      __ Addu(ToRegister(result), ToRegister(left), Operand(right_reg));
+    } else {
+      ASSERT(right->IsRegister() || right->IsConstantOperand());
+      __ Addu(ToRegister(result), ToRegister(left), ToOperand(right));
+    }
+  } else {  // can_overflow.
+    Register overflow = scratch0();
+    Register scratch = scratch1();
+    if (right->IsStackSlot() ||
+        right->IsArgument() ||
+        right->IsConstantOperand()) {
+      Register right_reg = EmitLoadRegister(right, scratch);
+      __ AdduAndCheckForOverflow(ToRegister(result),
+                                 ToRegister(left),
+                                 right_reg,
+                                 overflow);  // Reg at also used as scratch.
+    } else {
+      ASSERT(right->IsRegister());
+      // Due to overflow check macros not supporting constant operands,
+      // handling the IsConstantOperand case was moved to prev if clause.
+      __ AdduAndCheckForOverflow(ToRegister(result),
+                                 ToRegister(left),
+                                 ToRegister(right),
+                                 overflow);  // Reg at also used as scratch.
+    }
+    DeoptimizeIf(lt, instr->environment(), overflow, Operand(zero_reg));
+  }
+}
+
+
+void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
+  DoubleRegister left = ToDoubleRegister(instr->InputAt(0));
+  DoubleRegister right = ToDoubleRegister(instr->InputAt(1));
+  DoubleRegister result = ToDoubleRegister(instr->result());
+  switch (instr->op()) {
+    case Token::ADD:
+      __ add_d(result, left, right);
+      break;
+    case Token::SUB:
+      __ sub_d(result, left, right);
+      break;
+    case Token::MUL:
+      __ mul_d(result, left, right);
+      break;
+    case Token::DIV:
+      __ div_d(result, left, right);
+      break;
+    case Token::MOD: {
+      // Save a0-a3 on the stack.
+      RegList saved_regs = a0.bit() | a1.bit() | a2.bit() | a3.bit();
+      __ MultiPush(saved_regs);
+
+      __ PrepareCallCFunction(0, 2, scratch0());
+      __ SetCallCDoubleArguments(left, right);
+      __ CallCFunction(
+          ExternalReference::double_fp_operation(Token::MOD, isolate()),
+          0, 2);
+      // Move the result in the double result register.
+      __ GetCFunctionDoubleResult(result);
+
+      // Restore saved register.
+      __ MultiPop(saved_regs);
+      break;
+    }
+    default:
+      UNREACHABLE();
+      break;
+  }
+}
+
+
+void LCodeGen::DoArithmeticT(LArithmeticT* instr) {
+  ASSERT(ToRegister(instr->InputAt(0)).is(a1));
+  ASSERT(ToRegister(instr->InputAt(1)).is(a0));
+  ASSERT(ToRegister(instr->result()).is(v0));
+
+  BinaryOpStub stub(instr->op(), NO_OVERWRITE);
+  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+  // Other arch use a nop here, to signal that there is no inlined
+  // patchable code. Mips does not need the nop, since our marker
+  // instruction (andi zero_reg) will never be used in normal code.
+}
+
+
+int LCodeGen::GetNextEmittedBlock(int block) {
+  for (int i = block + 1; i < graph()->blocks()->length(); ++i) {
+    LLabel* label = chunk_->GetLabel(i);
+    if (!label->HasReplacement()) return i;
+  }
+  return -1;
+}
+
+
+void LCodeGen::EmitBranch(int left_block, int right_block,
+                          Condition cc, Register src1, const Operand& src2) {
+  int next_block = GetNextEmittedBlock(current_block_);
+  right_block = chunk_->LookupDestination(right_block);
+  left_block = chunk_->LookupDestination(left_block);
+  if (right_block == left_block) {
+    EmitGoto(left_block);
+  } else if (left_block == next_block) {
+    __ Branch(chunk_->GetAssemblyLabel(right_block),
+              NegateCondition(cc), src1, src2);
+  } else if (right_block == next_block) {
+    __ Branch(chunk_->GetAssemblyLabel(left_block), cc, src1, src2);
+  } else {
+    __ Branch(chunk_->GetAssemblyLabel(left_block), cc, src1, src2);
+    __ Branch(chunk_->GetAssemblyLabel(right_block));
+  }
+}
+
+
+void LCodeGen::EmitBranchF(int left_block, int right_block,
+                           Condition cc, FPURegister src1, FPURegister src2) {
+  int next_block = GetNextEmittedBlock(current_block_);
+  right_block = chunk_->LookupDestination(right_block);
+  left_block = chunk_->LookupDestination(left_block);
+  if (right_block == left_block) {
+    EmitGoto(left_block);
+  } else if (left_block == next_block) {
+    __ BranchF(chunk_->GetAssemblyLabel(right_block), NULL,
+               NegateCondition(cc), src1, src2);
+  } else if (right_block == next_block) {
+    __ BranchF(chunk_->GetAssemblyLabel(left_block), NULL, cc, src1, src2);
+  } else {
+    __ BranchF(chunk_->GetAssemblyLabel(left_block), NULL, cc, src1, src2);
+    __ Branch(chunk_->GetAssemblyLabel(right_block));
+  }
+}
+
+
+void LCodeGen::DoBranch(LBranch* instr) {
+  int true_block = chunk_->LookupDestination(instr->true_block_id());
+  int false_block = chunk_->LookupDestination(instr->false_block_id());
+
+  Representation r = instr->hydrogen()->value()->representation();
+  if (r.IsInteger32()) {
+    Register reg = ToRegister(instr->InputAt(0));
+    EmitBranch(true_block, false_block, ne, reg, Operand(zero_reg));
+  } else if (r.IsDouble()) {
+    DoubleRegister reg = ToDoubleRegister(instr->InputAt(0));
+    // Test the double value. Zero and NaN are false.
+    EmitBranchF(true_block, false_block, ne, reg, kDoubleRegZero);
+  } else {
+    ASSERT(r.IsTagged());
+    Register reg = ToRegister(instr->InputAt(0));
+    HType type = instr->hydrogen()->value()->type();
+    if (type.IsBoolean()) {
+      __ LoadRoot(at, Heap::kTrueValueRootIndex);
+      EmitBranch(true_block, false_block, eq, reg, Operand(at));
+    } else if (type.IsSmi()) {
+      EmitBranch(true_block, false_block, ne, reg, Operand(zero_reg));
+    } else {
+      Label* true_label = chunk_->GetAssemblyLabel(true_block);
+      Label* false_label = chunk_->GetAssemblyLabel(false_block);
+
+      ToBooleanStub::Types expected = instr->hydrogen()->expected_input_types();
+      // Avoid deopts in the case where we've never executed this path before.
+      if (expected.IsEmpty()) expected = ToBooleanStub::all_types();
+
+      if (expected.Contains(ToBooleanStub::UNDEFINED)) {
+        // undefined -> false.
+        __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
+        __ Branch(false_label, eq, reg, Operand(at));
+      }
+      if (expected.Contains(ToBooleanStub::BOOLEAN)) {
+        // Boolean -> its value.
+        __ LoadRoot(at, Heap::kTrueValueRootIndex);
+        __ Branch(true_label, eq, reg, Operand(at));
+        __ LoadRoot(at, Heap::kFalseValueRootIndex);
+        __ Branch(false_label, eq, reg, Operand(at));
+      }
+      if (expected.Contains(ToBooleanStub::NULL_TYPE)) {
+        // 'null' -> false.
+        __ LoadRoot(at, Heap::kNullValueRootIndex);
+        __ Branch(false_label, eq, reg, Operand(at));
+      }
+
+      if (expected.Contains(ToBooleanStub::SMI)) {
+        // Smis: 0 -> false, all other -> true.
+        __ Branch(false_label, eq, reg, Operand(zero_reg));
+        __ JumpIfSmi(reg, true_label);
+      } else if (expected.NeedsMap()) {
+        // If we need a map later and have a Smi -> deopt.
+        __ And(at, reg, Operand(kSmiTagMask));
+        DeoptimizeIf(eq, instr->environment(), at, Operand(zero_reg));
+      }
+
+      const Register map = scratch0();
+      if (expected.NeedsMap()) {
+        __ lw(map, FieldMemOperand(reg, HeapObject::kMapOffset));
+        if (expected.CanBeUndetectable()) {
+          // Undetectable -> false.
+          __ lbu(at, FieldMemOperand(map, Map::kBitFieldOffset));
+          __ And(at, at, Operand(1 << Map::kIsUndetectable));
+          __ Branch(false_label, ne, at, Operand(zero_reg));
+        }
+      }
+
+      if (expected.Contains(ToBooleanStub::SPEC_OBJECT)) {
+        // spec object -> true.
+        __ lbu(at, FieldMemOperand(map, Map::kInstanceTypeOffset));
+        __ Branch(true_label, ge, at, Operand(FIRST_SPEC_OBJECT_TYPE));
+      }
+
+      if (expected.Contains(ToBooleanStub::STRING)) {
+        // String value -> false iff empty.
+        Label not_string;
+        __ lbu(at, FieldMemOperand(map, Map::kInstanceTypeOffset));
+        __ Branch(&not_string, ge , at, Operand(FIRST_NONSTRING_TYPE));
+        __ lw(at, FieldMemOperand(reg, String::kLengthOffset));
+        __ Branch(true_label, ne, at, Operand(zero_reg));
+        __ Branch(false_label);
+        __ bind(&not_string);
+      }
+
+      if (expected.Contains(ToBooleanStub::HEAP_NUMBER)) {
+        // heap number -> false iff +0, -0, or NaN.
+        DoubleRegister dbl_scratch = double_scratch0();
+        Label not_heap_number;
+        __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
+        __ Branch(&not_heap_number, ne, map, Operand(at));
+        __ ldc1(dbl_scratch, FieldMemOperand(reg, HeapNumber::kValueOffset));
+        __ BranchF(true_label, false_label, ne, dbl_scratch, kDoubleRegZero);
+        // Falls through if dbl_scratch == 0.
+        __ Branch(false_label);
+        __ bind(&not_heap_number);
+      }
+
+      // We've seen something for the first time -> deopt.
+      DeoptimizeIf(al, instr->environment(), zero_reg, Operand(zero_reg));
+    }
+  }
+}
+
+
+void LCodeGen::EmitGoto(int block) {
+  block = chunk_->LookupDestination(block);
+  int next_block = GetNextEmittedBlock(current_block_);
+  if (block != next_block) {
+    __ jmp(chunk_->GetAssemblyLabel(block));
+  }
+}
+
+
+void LCodeGen::DoGoto(LGoto* instr) {
+  EmitGoto(instr->block_id());
+}
+
+
+Condition LCodeGen::TokenToCondition(Token::Value op, bool is_unsigned) {
+  Condition cond = kNoCondition;
+  switch (op) {
+    case Token::EQ:
+    case Token::EQ_STRICT:
+      cond = eq;
+      break;
+    case Token::LT:
+      cond = is_unsigned ? lo : lt;
+      break;
+    case Token::GT:
+      cond = is_unsigned ? hi : gt;
+      break;
+    case Token::LTE:
+      cond = is_unsigned ? ls : le;
+      break;
+    case Token::GTE:
+      cond = is_unsigned ? hs : ge;
+      break;
+    case Token::IN:
+    case Token::INSTANCEOF:
+    default:
+      UNREACHABLE();
+  }
+  return cond;
+}
+
+
+void LCodeGen::DoCmpIDAndBranch(LCmpIDAndBranch* instr) {
+  LOperand* left = instr->InputAt(0);
+  LOperand* right = instr->InputAt(1);
+  int false_block = chunk_->LookupDestination(instr->false_block_id());
+  int true_block = chunk_->LookupDestination(instr->true_block_id());
+
+  Condition cond = TokenToCondition(instr->op(), false);
+
+  if (left->IsConstantOperand() && right->IsConstantOperand()) {
+    // We can statically evaluate the comparison.
+    double left_val = ToDouble(LConstantOperand::cast(left));
+    double right_val = ToDouble(LConstantOperand::cast(right));
+    int next_block =
+      EvalComparison(instr->op(), left_val, right_val) ? true_block
+                                                       : false_block;
+    EmitGoto(next_block);
+  } else {
+    if (instr->is_double()) {
+      // Compare left and right as doubles and load the
+      // resulting flags into the normal status register.
+      FPURegister left_reg = ToDoubleRegister(left);
+      FPURegister right_reg = ToDoubleRegister(right);
+
+      // If a NaN is involved, i.e. the result is unordered,
+      // jump to false block label.
+      __ BranchF(NULL, chunk_->GetAssemblyLabel(false_block), eq,
+                 left_reg, right_reg);
+
+      EmitBranchF(true_block, false_block, cond, left_reg, right_reg);
+    } else {
+      Register cmp_left;
+      Operand cmp_right = Operand(0);
+
+      if (right->IsConstantOperand()) {
+        cmp_left = ToRegister(left);
+        cmp_right = Operand(ToInteger32(LConstantOperand::cast(right)));
+      } else if (left->IsConstantOperand()) {
+        cmp_left = ToRegister(right);
+        cmp_right = Operand(ToInteger32(LConstantOperand::cast(left)));
+        // We transposed the operands. Reverse the condition.
+        cond = ReverseCondition(cond);
+      } else {
+        cmp_left = ToRegister(left);
+        cmp_right = Operand(ToRegister(right));
+      }
+
+      EmitBranch(true_block, false_block, cond, cmp_left, cmp_right);
+    }
+  }
+}
+
+
+void LCodeGen::DoCmpObjectEqAndBranch(LCmpObjectEqAndBranch* instr) {
+  Register left = ToRegister(instr->InputAt(0));
+  Register right = ToRegister(instr->InputAt(1));
+  int false_block = chunk_->LookupDestination(instr->false_block_id());
+  int true_block = chunk_->LookupDestination(instr->true_block_id());
+
+  EmitBranch(true_block, false_block, eq, left, Operand(right));
+}
+
+
+void LCodeGen::DoCmpConstantEqAndBranch(LCmpConstantEqAndBranch* instr) {
+  Register left = ToRegister(instr->InputAt(0));
+  int true_block = chunk_->LookupDestination(instr->true_block_id());
+  int false_block = chunk_->LookupDestination(instr->false_block_id());
+
+  EmitBranch(true_block, false_block, eq, left,
+             Operand(instr->hydrogen()->right()));
+}
+
+
+
+void LCodeGen::DoIsNilAndBranch(LIsNilAndBranch* instr) {
+  Register scratch = scratch0();
+  Register reg = ToRegister(instr->InputAt(0));
+  int false_block = chunk_->LookupDestination(instr->false_block_id());
+
+  // If the expression is known to be untagged or a smi, then it's definitely
+  // not null, and it can't be a an undetectable object.
+  if (instr->hydrogen()->representation().IsSpecialization() ||
+      instr->hydrogen()->type().IsSmi()) {
+    EmitGoto(false_block);
+    return;
+  }
+
+  int true_block = chunk_->LookupDestination(instr->true_block_id());
+
+  Heap::RootListIndex nil_value = instr->nil() == kNullValue ?
+      Heap::kNullValueRootIndex :
+      Heap::kUndefinedValueRootIndex;
+  __ LoadRoot(at, nil_value);
+  if (instr->kind() == kStrictEquality) {
+    EmitBranch(true_block, false_block, eq, reg, Operand(at));
+  } else {
+    Heap::RootListIndex other_nil_value = instr->nil() == kNullValue ?
+        Heap::kUndefinedValueRootIndex :
+        Heap::kNullValueRootIndex;
+    Label* true_label = chunk_->GetAssemblyLabel(true_block);
+    Label* false_label = chunk_->GetAssemblyLabel(false_block);
+    __ Branch(USE_DELAY_SLOT, true_label, eq, reg, Operand(at));
+    __ LoadRoot(at, other_nil_value);  // In the delay slot.
+    __ Branch(USE_DELAY_SLOT, true_label, eq, reg, Operand(at));
+    __ JumpIfSmi(reg, false_label);  // In the delay slot.
+    // Check for undetectable objects by looking in the bit field in
+    // the map. The object has already been smi checked.
+    __ lw(scratch, FieldMemOperand(reg, HeapObject::kMapOffset));
+    __ lbu(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset));
+    __ And(scratch, scratch, 1 << Map::kIsUndetectable);
+    EmitBranch(true_block, false_block, ne, scratch, Operand(zero_reg));
+  }
+}
+
+
+Condition LCodeGen::EmitIsObject(Register input,
+                                 Register temp1,
+                                 Label* is_not_object,
+                                 Label* is_object) {
+  Register temp2 = scratch0();
+  __ JumpIfSmi(input, is_not_object);
+
+  __ LoadRoot(temp2, Heap::kNullValueRootIndex);
+  __ Branch(is_object, eq, input, Operand(temp2));
+
+  // Load map.
+  __ lw(temp1, FieldMemOperand(input, HeapObject::kMapOffset));
+  // Undetectable objects behave like undefined.
+  __ lbu(temp2, FieldMemOperand(temp1, Map::kBitFieldOffset));
+  __ And(temp2, temp2, Operand(1 << Map::kIsUndetectable));
+  __ Branch(is_not_object, ne, temp2, Operand(zero_reg));
+
+  // Load instance type and check that it is in object type range.
+  __ lbu(temp2, FieldMemOperand(temp1, Map::kInstanceTypeOffset));
+  __ Branch(is_not_object,
+            lt, temp2, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
+
+  return le;
+}
+
+
+void LCodeGen::DoIsObjectAndBranch(LIsObjectAndBranch* instr) {
+  Register reg = ToRegister(instr->InputAt(0));
+  Register temp1 = ToRegister(instr->TempAt(0));
+  Register temp2 = scratch0();
+
+  int true_block = chunk_->LookupDestination(instr->true_block_id());
+  int false_block = chunk_->LookupDestination(instr->false_block_id());
+  Label* true_label = chunk_->GetAssemblyLabel(true_block);
+  Label* false_label = chunk_->GetAssemblyLabel(false_block);
+
+  Condition true_cond =
+      EmitIsObject(reg, temp1, false_label, true_label);
+
+  EmitBranch(true_block, false_block, true_cond, temp2,
+             Operand(LAST_NONCALLABLE_SPEC_OBJECT_TYPE));
+}
+
+
+void LCodeGen::DoIsSmiAndBranch(LIsSmiAndBranch* instr) {
+  int true_block = chunk_->LookupDestination(instr->true_block_id());
+  int false_block = chunk_->LookupDestination(instr->false_block_id());
+
+  Register input_reg = EmitLoadRegister(instr->InputAt(0), at);
+  __ And(at, input_reg, kSmiTagMask);
+  EmitBranch(true_block, false_block, eq, at, Operand(zero_reg));
+}
+
+
+void LCodeGen::DoIsUndetectableAndBranch(LIsUndetectableAndBranch* instr) {
+  Register input = ToRegister(instr->InputAt(0));
+  Register temp = ToRegister(instr->TempAt(0));
+
+  int true_block = chunk_->LookupDestination(instr->true_block_id());
+  int false_block = chunk_->LookupDestination(instr->false_block_id());
+
+  __ JumpIfSmi(input, chunk_->GetAssemblyLabel(false_block));
+  __ lw(temp, FieldMemOperand(input, HeapObject::kMapOffset));
+  __ lbu(temp, FieldMemOperand(temp, Map::kBitFieldOffset));
+  __ And(at, temp, Operand(1 << Map::kIsUndetectable));
+  EmitBranch(true_block, false_block, ne, at, Operand(zero_reg));
+}
+
+
+static InstanceType TestType(HHasInstanceTypeAndBranch* instr) {
+  InstanceType from = instr->from();
+  InstanceType to = instr->to();
+  if (from == FIRST_TYPE) return to;
+  ASSERT(from == to || to == LAST_TYPE);
+  return from;
+}
+
+
+static Condition BranchCondition(HHasInstanceTypeAndBranch* instr) {
+  InstanceType from = instr->from();
+  InstanceType to = instr->to();
+  if (from == to) return eq;
+  if (to == LAST_TYPE) return hs;
+  if (from == FIRST_TYPE) return ls;
+  UNREACHABLE();
+  return eq;
+}
+
+
+void LCodeGen::DoHasInstanceTypeAndBranch(LHasInstanceTypeAndBranch* instr) {
+  Register scratch = scratch0();
+  Register input = ToRegister(instr->InputAt(0));
+
+  int true_block = chunk_->LookupDestination(instr->true_block_id());
+  int false_block = chunk_->LookupDestination(instr->false_block_id());
+
+  Label* false_label = chunk_->GetAssemblyLabel(false_block);
+
+  __ JumpIfSmi(input, false_label);
+
+  __ GetObjectType(input, scratch, scratch);
+  EmitBranch(true_block,
+             false_block,
+             BranchCondition(instr->hydrogen()),
+             scratch,
+             Operand(TestType(instr->hydrogen())));
+}
+
+
+void LCodeGen::DoGetCachedArrayIndex(LGetCachedArrayIndex* instr) {
+  Register input = ToRegister(instr->InputAt(0));
+  Register result = ToRegister(instr->result());
+
+  if (FLAG_debug_code) {
+    __ AbortIfNotString(input);
+  }
+
+  __ lw(result, FieldMemOperand(input, String::kHashFieldOffset));
+  __ IndexFromHash(result, result);
+}
+
+
+void LCodeGen::DoHasCachedArrayIndexAndBranch(
+    LHasCachedArrayIndexAndBranch* instr) {
+  Register input = ToRegister(instr->InputAt(0));
+  Register scratch = scratch0();
+
+  int true_block = chunk_->LookupDestination(instr->true_block_id());
+  int false_block = chunk_->LookupDestination(instr->false_block_id());
+
+  __ lw(scratch,
+         FieldMemOperand(input, String::kHashFieldOffset));
+  __ And(at, scratch, Operand(String::kContainsCachedArrayIndexMask));
+  EmitBranch(true_block, false_block, eq, at, Operand(zero_reg));
+}
+
+
+// Branches to a label or falls through with this instance class-name adr
+// returned in temp reg, available for comparison by the caller. Trashes the
+// temp registers, but not the input. Only input and temp2 may alias.
+void LCodeGen::EmitClassOfTest(Label* is_true,
+                               Label* is_false,
+                               Handle<String>class_name,
+                               Register input,
+                               Register temp,
+                               Register temp2) {
+  ASSERT(!input.is(temp));
+  ASSERT(!temp.is(temp2));  // But input and temp2 may be the same register.
+  __ JumpIfSmi(input, is_false);
+
+  if (class_name->IsEqualTo(CStrVector("Function"))) {
+    // Assuming the following assertions, we can use the same compares to test
+    // for both being a function type and being in the object type range.
+    STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
+    STATIC_ASSERT(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE ==
+                  FIRST_SPEC_OBJECT_TYPE + 1);
+    STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE ==
+                  LAST_SPEC_OBJECT_TYPE - 1);
+    STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
+
+    __ GetObjectType(input, temp, temp2);
+    __ Branch(is_false, lt, temp2, Operand(FIRST_SPEC_OBJECT_TYPE));
+    __ Branch(is_true, eq, temp2, Operand(FIRST_SPEC_OBJECT_TYPE));
+    __ Branch(is_true, eq, temp2, Operand(LAST_SPEC_OBJECT_TYPE));
+  } else {
+    // Faster code path to avoid two compares: subtract lower bound from the
+    // actual type and do a signed compare with the width of the type range.
+    __ GetObjectType(input, temp, temp2);
+    __ Subu(temp2, temp2, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
+    __ Branch(is_false, gt, temp2, Operand(LAST_NONCALLABLE_SPEC_OBJECT_TYPE -
+                                           FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
+  }
+
+  // Now we are in the FIRST-LAST_NONCALLABLE_SPEC_OBJECT_TYPE range.
+  // Check if the constructor in the map is a function.
+  __ lw(temp, FieldMemOperand(temp, Map::kConstructorOffset));
+
+  // Objects with a non-function constructor have class 'Object'.
+  __ GetObjectType(temp, temp2, temp2);
+  if (class_name->IsEqualTo(CStrVector("Object"))) {
+    __ Branch(is_true, ne, temp2, Operand(JS_FUNCTION_TYPE));
+  } else {
+    __ Branch(is_false, ne, temp2, Operand(JS_FUNCTION_TYPE));
+  }
+
+  // temp now contains the constructor function. Grab the
+  // instance class name from there.
+  __ lw(temp, FieldMemOperand(temp, JSFunction::kSharedFunctionInfoOffset));
+  __ lw(temp, FieldMemOperand(temp,
+                               SharedFunctionInfo::kInstanceClassNameOffset));
+  // The class name we are testing against is a symbol because it's a literal.
+  // The name in the constructor is a symbol because of the way the context is
+  // booted.  This routine isn't expected to work for random API-created
+  // classes and it doesn't have to because you can't access it with natives
+  // syntax.  Since both sides are symbols it is sufficient to use an identity
+  // comparison.
+
+  // End with the address of this class_name instance in temp register.
+  // On MIPS, the caller must do the comparison with Handle<String>class_name.
+}
+
+
+void LCodeGen::DoClassOfTestAndBranch(LClassOfTestAndBranch* instr) {
+  Register input = ToRegister(instr->InputAt(0));
+  Register temp = scratch0();
+  Register temp2 = ToRegister(instr->TempAt(0));
+  Handle<String> class_name = instr->hydrogen()->class_name();
+
+  int true_block = chunk_->LookupDestination(instr->true_block_id());
+  int false_block = chunk_->LookupDestination(instr->false_block_id());
+
+  Label* true_label = chunk_->GetAssemblyLabel(true_block);
+  Label* false_label = chunk_->GetAssemblyLabel(false_block);
+
+  EmitClassOfTest(true_label, false_label, class_name, input, temp, temp2);
+
+  EmitBranch(true_block, false_block, eq, temp, Operand(class_name));
+}
+
+
+void LCodeGen::DoCmpMapAndBranch(LCmpMapAndBranch* instr) {
+  Register reg = ToRegister(instr->InputAt(0));
+  Register temp = ToRegister(instr->TempAt(0));
+  int true_block = instr->true_block_id();
+  int false_block = instr->false_block_id();
+
+  __ lw(temp, FieldMemOperand(reg, HeapObject::kMapOffset));
+  EmitBranch(true_block, false_block, eq, temp, Operand(instr->map()));
+}
+
+
+void LCodeGen::DoInstanceOf(LInstanceOf* instr) {
+  Label true_label, done;
+  ASSERT(ToRegister(instr->InputAt(0)).is(a0));  // Object is in a0.
+  ASSERT(ToRegister(instr->InputAt(1)).is(a1));  // Function is in a1.
+  Register result = ToRegister(instr->result());
+  ASSERT(result.is(v0));
+
+  InstanceofStub stub(InstanceofStub::kArgsInRegisters);
+  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+
+  __ Branch(&true_label, eq, result, Operand(zero_reg));
+  __ li(result, Operand(factory()->false_value()));
+  __ Branch(&done);
+  __ bind(&true_label);
+  __ li(result, Operand(factory()->true_value()));
+  __ bind(&done);
+}
+
+
+void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
+  class DeferredInstanceOfKnownGlobal: public LDeferredCode {
+   public:
+    DeferredInstanceOfKnownGlobal(LCodeGen* codegen,
+                                  LInstanceOfKnownGlobal* instr)
+        : LDeferredCode(codegen), instr_(instr) { }
+    virtual void Generate() {
+      codegen()->DoDeferredLInstanceOfKnownGlobal(instr_, &map_check_);
+    }
+    virtual LInstruction* instr() { return instr_; }
+    Label* map_check() { return &map_check_; }
+
+   private:
+    LInstanceOfKnownGlobal* instr_;
+    Label map_check_;
+  };
+
+  DeferredInstanceOfKnownGlobal* deferred;
+  deferred = new DeferredInstanceOfKnownGlobal(this, instr);
+
+  Label done, false_result;
+  Register object = ToRegister(instr->InputAt(0));
+  Register temp = ToRegister(instr->TempAt(0));
+  Register result = ToRegister(instr->result());
+
+  ASSERT(object.is(a0));
+  ASSERT(result.is(v0));
+
+  // A Smi is not instance of anything.
+  __ JumpIfSmi(object, &false_result);
+
+  // This is the inlined call site instanceof cache. The two occurences of the
+  // hole value will be patched to the last map/result pair generated by the
+  // instanceof stub.
+  Label cache_miss;
+  Register map = temp;
+  __ lw(map, FieldMemOperand(object, HeapObject::kMapOffset));
+
+  Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_);
+  __ bind(deferred->map_check());  // Label for calculating code patching.
+  // We use Factory::the_hole_value() on purpose instead of loading from the
+  // root array to force relocation to be able to later patch with
+  // the cached map.
+  __ li(at, Operand(factory()->the_hole_value()), true);
+  __ Branch(&cache_miss, ne, map, Operand(at));
+  // We use Factory::the_hole_value() on purpose instead of loading from the
+  // root array to force relocation to be able to later patch
+  // with true or false.
+  __ li(result, Operand(factory()->the_hole_value()), true);
+  __ Branch(&done);
+
+  // The inlined call site cache did not match. Check null and string before
+  // calling the deferred code.
+  __ bind(&cache_miss);
+  // Null is not instance of anything.
+  __ LoadRoot(temp, Heap::kNullValueRootIndex);
+  __ Branch(&false_result, eq, object, Operand(temp));
+
+  // String values is not instance of anything.
+  Condition cc = __ IsObjectStringType(object, temp, temp);
+  __ Branch(&false_result, cc, temp, Operand(zero_reg));
+
+  // Go to the deferred code.
+  __ Branch(deferred->entry());
+
+  __ bind(&false_result);
+  __ LoadRoot(result, Heap::kFalseValueRootIndex);
+
+  // Here result has either true or false. Deferred code also produces true or
+  // false object.
+  __ bind(deferred->exit());
+  __ bind(&done);
+}
+
+
+void LCodeGen::DoDeferredLInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
+                                                Label* map_check) {
+  Register result = ToRegister(instr->result());
+  ASSERT(result.is(v0));
+
+  InstanceofStub::Flags flags = InstanceofStub::kNoFlags;
+  flags = static_cast<InstanceofStub::Flags>(
+      flags | InstanceofStub::kArgsInRegisters);
+  flags = static_cast<InstanceofStub::Flags>(
+      flags | InstanceofStub::kCallSiteInlineCheck);
+  flags = static_cast<InstanceofStub::Flags>(
+      flags | InstanceofStub::kReturnTrueFalseObject);
+  InstanceofStub stub(flags);
+
+  PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
+
+  // Get the temp register reserved by the instruction. This needs to be t0 as
+  // its slot of the pushing of safepoint registers is used to communicate the
+  // offset to the location of the map check.
+  Register temp = ToRegister(instr->TempAt(0));
+  ASSERT(temp.is(t0));
+  __ li(InstanceofStub::right(), Operand(instr->function()));
+  static const int kAdditionalDelta = 7;
+  int delta = masm_->InstructionsGeneratedSince(map_check) + kAdditionalDelta;
+  Label before_push_delta;
+  __ bind(&before_push_delta);
+  {
+    Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_);
+    __ li(temp, Operand(delta * kPointerSize), true);
+    __ StoreToSafepointRegisterSlot(temp, temp);
+  }
+  CallCodeGeneric(stub.GetCode(),
+                  RelocInfo::CODE_TARGET,
+                  instr,
+                  RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
+  // Put the result value into the result register slot and
+  // restore all registers.
+  __ StoreToSafepointRegisterSlot(result, result);
+}
+
+
+static Condition ComputeCompareCondition(Token::Value op) {
+  switch (op) {
+    case Token::EQ_STRICT:
+    case Token::EQ:
+      return eq;
+    case Token::LT:
+      return lt;
+    case Token::GT:
+      return gt;
+    case Token::LTE:
+      return le;
+    case Token::GTE:
+      return ge;
+    default:
+      UNREACHABLE();
+      return kNoCondition;
+  }
+}
+
+
+void LCodeGen::DoCmpT(LCmpT* instr) {
+  Token::Value op = instr->op();
+
+  Handle<Code> ic = CompareIC::GetUninitialized(op);
+  CallCode(ic, RelocInfo::CODE_TARGET, instr);
+  // On MIPS there is no need for a "no inlined smi code" marker (nop).
+
+  Condition condition = ComputeCompareCondition(op);
+  // A minor optimization that relies on LoadRoot always emitting one
+  // instruction.
+  Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm());
+  Label done;
+  __ Branch(USE_DELAY_SLOT, &done, condition, v0, Operand(zero_reg));
+  __ LoadRoot(ToRegister(instr->result()), Heap::kTrueValueRootIndex);
+  __ LoadRoot(ToRegister(instr->result()), Heap::kFalseValueRootIndex);
+  ASSERT_EQ(3, masm()->InstructionsGeneratedSince(&done));
+  __ bind(&done);
+}
+
+
+void LCodeGen::DoReturn(LReturn* instr) {
+  if (FLAG_trace) {
+    // Push the return value on the stack as the parameter.
+    // Runtime::TraceExit returns its parameter in v0.
+    __ push(v0);
+    __ CallRuntime(Runtime::kTraceExit, 1);
+  }
+  int32_t sp_delta = (GetParameterCount() + 1) * kPointerSize;
+  __ mov(sp, fp);
+  __ Pop(ra, fp);
+  __ Addu(sp, sp, Operand(sp_delta));
+  __ Jump(ra);
+}
+
+
+void LCodeGen::DoLoadGlobalCell(LLoadGlobalCell* instr) {
+  Register result = ToRegister(instr->result());
+  __ li(at, Operand(Handle<Object>(instr->hydrogen()->cell())));
+  __ lw(result, FieldMemOperand(at, JSGlobalPropertyCell::kValueOffset));
+  if (instr->hydrogen()->RequiresHoleCheck()) {
+    __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
+    DeoptimizeIf(eq, instr->environment(), result, Operand(at));
+  }
+}
+
+
+void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
+  ASSERT(ToRegister(instr->global_object()).is(a0));
+  ASSERT(ToRegister(instr->result()).is(v0));
+
+  __ li(a2, Operand(instr->name()));
+  RelocInfo::Mode mode = instr->for_typeof() ? RelocInfo::CODE_TARGET
+                                             : RelocInfo::CODE_TARGET_CONTEXT;
+  Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
+  CallCode(ic, mode, instr);
+}
+
+
+void LCodeGen::DoStoreGlobalCell(LStoreGlobalCell* instr) {
+  Register value = ToRegister(instr->InputAt(0));
+  Register scratch = scratch0();
+  Register scratch2 = ToRegister(instr->TempAt(0));
+
+  // Load the cell.
+  __ li(scratch, Operand(Handle<Object>(instr->hydrogen()->cell())));
+
+  // If the cell we are storing to contains the hole it could have
+  // been deleted from the property dictionary. In that case, we need
+  // to update the property details in the property dictionary to mark
+  // it as no longer deleted.
+  if (instr->hydrogen()->RequiresHoleCheck()) {
+    __ lw(scratch2,
+          FieldMemOperand(scratch, JSGlobalPropertyCell::kValueOffset));
+    __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
+    DeoptimizeIf(eq, instr->environment(), scratch2, Operand(at));
+  }
+
+  // Store the value.
+  __ sw(value, FieldMemOperand(scratch, JSGlobalPropertyCell::kValueOffset));
+
+  // Cells are always in the remembered set.
+  if (instr->hydrogen()->NeedsWriteBarrier()) {
+    HType type = instr->hydrogen()->value()->type();
+    SmiCheck check_needed =
+        type.IsHeapObject() ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
+    __ RecordWriteField(scratch,
+                        JSGlobalPropertyCell::kValueOffset,
+                        value,
+                        scratch2,
+                        kRAHasBeenSaved,
+                        kSaveFPRegs,
+                        OMIT_REMEMBERED_SET,
+                        check_needed);
+  }
+}
+
+
+void LCodeGen::DoStoreGlobalGeneric(LStoreGlobalGeneric* instr) {
+  ASSERT(ToRegister(instr->global_object()).is(a1));
+  ASSERT(ToRegister(instr->value()).is(a0));
+
+  __ li(a2, Operand(instr->name()));
+  Handle<Code> ic = instr->strict_mode()
+      ? isolate()->builtins()->StoreIC_Initialize_Strict()
+      : isolate()->builtins()->StoreIC_Initialize();
+  CallCode(ic, RelocInfo::CODE_TARGET_CONTEXT, instr);
+}
+
+
+void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
+  Register context = ToRegister(instr->context());
+  Register result = ToRegister(instr->result());
+  __ lw(result, ContextOperand(context, instr->slot_index()));
+}
+
+
+void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
+  Register context = ToRegister(instr->context());
+  Register value = ToRegister(instr->value());
+  MemOperand target = ContextOperand(context, instr->slot_index());
+  __ sw(value, target);
+  if (instr->hydrogen()->NeedsWriteBarrier()) {
+    HType type = instr->hydrogen()->value()->type();
+    SmiCheck check_needed =
+        type.IsHeapObject() ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
+    __ RecordWriteContextSlot(context,
+                              target.offset(),
+                              value,
+                              scratch0(),
+                              kRAHasBeenSaved,
+                              kSaveFPRegs,
+                              EMIT_REMEMBERED_SET,
+                              check_needed);
+  }
+}
+
+
+void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
+  Register object = ToRegister(instr->InputAt(0));
+  Register result = ToRegister(instr->result());
+  if (instr->hydrogen()->is_in_object()) {
+    __ lw(result, FieldMemOperand(object, instr->hydrogen()->offset()));
+  } else {
+    __ lw(result, FieldMemOperand(object, JSObject::kPropertiesOffset));
+    __ lw(result, FieldMemOperand(result, instr->hydrogen()->offset()));
+  }
+}
+
+
+void LCodeGen::EmitLoadFieldOrConstantFunction(Register result,
+                                               Register object,
+                                               Handle<Map> type,
+                                               Handle<String> name) {
+  LookupResult lookup(isolate());
+  type->LookupInDescriptors(NULL, *name, &lookup);
+  ASSERT(lookup.IsProperty() &&
+         (lookup.type() == FIELD || lookup.type() == CONSTANT_FUNCTION));
+  if (lookup.type() == FIELD) {
+    int index = lookup.GetLocalFieldIndexFromMap(*type);
+    int offset = index * kPointerSize;
+    if (index < 0) {
+      // Negative property indices are in-object properties, indexed
+      // from the end of the fixed part of the object.
+      __ lw(result, FieldMemOperand(object, offset + type->instance_size()));
+    } else {
+      // Non-negative property indices are in the properties array.
+      __ lw(result, FieldMemOperand(object, JSObject::kPropertiesOffset));
+      __ lw(result, FieldMemOperand(result, offset + FixedArray::kHeaderSize));
+    }
+  } else {
+    Handle<JSFunction> function(lookup.GetConstantFunctionFromMap(*type));
+    LoadHeapObject(result, Handle<HeapObject>::cast(function));
+  }
+}
+
+
+void LCodeGen::DoLoadNamedFieldPolymorphic(LLoadNamedFieldPolymorphic* instr) {
+  Register object = ToRegister(instr->object());
+  Register result = ToRegister(instr->result());
+  Register scratch = scratch0();
+  int map_count = instr->hydrogen()->types()->length();
+  Handle<String> name = instr->hydrogen()->name();
+  if (map_count == 0) {
+    ASSERT(instr->hydrogen()->need_generic());
+    __ li(a2, Operand(name));
+    Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
+    CallCode(ic, RelocInfo::CODE_TARGET, instr);
+  } else {
+    Label done;
+    __ lw(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
+    for (int i = 0; i < map_count - 1; ++i) {
+      Handle<Map> map = instr->hydrogen()->types()->at(i);
+      Label next;
+      __ Branch(&next, ne, scratch, Operand(map));
+      EmitLoadFieldOrConstantFunction(result, object, map, name);
+      __ Branch(&done);
+      __ bind(&next);
+    }
+    Handle<Map> map = instr->hydrogen()->types()->last();
+    if (instr->hydrogen()->need_generic()) {
+      Label generic;
+      __ Branch(&generic, ne, scratch, Operand(map));
+      EmitLoadFieldOrConstantFunction(result, object, map, name);
+      __ Branch(&done);
+      __ bind(&generic);
+      __ li(a2, Operand(name));
+      Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
+      CallCode(ic, RelocInfo::CODE_TARGET, instr);
+    } else {
+      DeoptimizeIf(ne, instr->environment(), scratch, Operand(map));
+      EmitLoadFieldOrConstantFunction(result, object, map, name);
+    }
+    __ bind(&done);
+  }
+}
+
+
+void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) {
+  ASSERT(ToRegister(instr->object()).is(a0));
+  ASSERT(ToRegister(instr->result()).is(v0));
+
+  // Name is always in a2.
+  __ li(a2, Operand(instr->name()));
+  Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
+  CallCode(ic, RelocInfo::CODE_TARGET, instr);
+}
+
+
+void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) {
+  Register scratch = scratch0();
+  Register function = ToRegister(instr->function());
+  Register result = ToRegister(instr->result());
+
+  // Check that the function really is a function. Load map into the
+  // result register.
+  __ GetObjectType(function, result, scratch);
+  DeoptimizeIf(ne, instr->environment(), scratch, Operand(JS_FUNCTION_TYPE));
+
+  // Make sure that the function has an instance prototype.
+  Label non_instance;
+  __ lbu(scratch, FieldMemOperand(result, Map::kBitFieldOffset));
+  __ And(scratch, scratch, Operand(1 << Map::kHasNonInstancePrototype));
+  __ Branch(&non_instance, ne, scratch, Operand(zero_reg));
+
+  // Get the prototype or initial map from the function.
+  __ lw(result,
+         FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
+
+  // Check that the function has a prototype or an initial map.
+  __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
+  DeoptimizeIf(eq, instr->environment(), result, Operand(at));
+
+  // If the function does not have an initial map, we're done.
+  Label done;
+  __ GetObjectType(result, scratch, scratch);
+  __ Branch(&done, ne, scratch, Operand(MAP_TYPE));
+
+  // Get the prototype from the initial map.
+  __ lw(result, FieldMemOperand(result, Map::kPrototypeOffset));
+  __ Branch(&done);
+
+  // Non-instance prototype: Fetch prototype from constructor field
+  // in initial map.
+  __ bind(&non_instance);
+  __ lw(result, FieldMemOperand(result, Map::kConstructorOffset));
+
+  // All done.
+  __ bind(&done);
+}
+
+
+void LCodeGen::DoLoadElements(LLoadElements* instr) {
+  Register result = ToRegister(instr->result());
+  Register input = ToRegister(instr->InputAt(0));
+  Register scratch = scratch0();
+
+  __ lw(result, FieldMemOperand(input, JSObject::kElementsOffset));
+  if (FLAG_debug_code) {
+    Label done, fail;
+    __ lw(scratch, FieldMemOperand(result, HeapObject::kMapOffset));
+    __ LoadRoot(at, Heap::kFixedArrayMapRootIndex);
+    __ Branch(USE_DELAY_SLOT, &done, eq, scratch, Operand(at));
+    __ LoadRoot(at, Heap::kFixedCOWArrayMapRootIndex);  // In the delay slot.
+    __ Branch(&done, eq, scratch, Operand(at));
+    // |scratch| still contains |input|'s map.
+    __ lbu(scratch, FieldMemOperand(scratch, Map::kBitField2Offset));
+    __ Ext(scratch, scratch, Map::kElementsKindShift,
+           Map::kElementsKindBitCount);
+    __ Branch(&done, eq, scratch,
+              Operand(FAST_ELEMENTS));
+    __ Branch(&fail, lt, scratch,
+              Operand(FIRST_EXTERNAL_ARRAY_ELEMENTS_KIND));
+    __ Branch(&done, le, scratch,
+              Operand(LAST_EXTERNAL_ARRAY_ELEMENTS_KIND));
+    __ bind(&fail);
+    __ Abort("Check for fast or external elements failed.");
+    __ bind(&done);
+  }
+}
+
+
+void LCodeGen::DoLoadExternalArrayPointer(
+    LLoadExternalArrayPointer* instr) {
+  Register to_reg = ToRegister(instr->result());
+  Register from_reg  = ToRegister(instr->InputAt(0));
+  __ lw(to_reg, FieldMemOperand(from_reg,
+                                ExternalArray::kExternalPointerOffset));
+}
+
+
+void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) {
+  Register arguments = ToRegister(instr->arguments());
+  Register length = ToRegister(instr->length());
+  Register index = ToRegister(instr->index());
+  Register result = ToRegister(instr->result());
+
+  // Bailout index is not a valid argument index. Use unsigned check to get
+  // negative check for free.
+
+  // TODO(plind): Shoud be optimized to do the sub before the DeoptimizeIf(),
+  // as they do in Arm. It will save us an instruction.
+  DeoptimizeIf(ls, instr->environment(), length, Operand(index));
+
+  // There are two words between the frame pointer and the last argument.
+  // Subtracting from length accounts for one of them, add one more.
+  __ subu(length, length, index);
+  __ Addu(length, length, Operand(1));
+  __ sll(length, length, kPointerSizeLog2);
+  __ Addu(at, arguments, Operand(length));
+  __ lw(result, MemOperand(at, 0));
+}
+
+
+void LCodeGen::DoLoadKeyedFastElement(LLoadKeyedFastElement* instr) {
+  Register elements = ToRegister(instr->elements());
+  Register key = EmitLoadRegister(instr->key(), scratch0());
+  Register result = ToRegister(instr->result());
+  Register scratch = scratch0();
+
+  // Load the result.
+  __ sll(scratch, key, kPointerSizeLog2);  // Key indexes words.
+  __ addu(scratch, elements, scratch);
+  __ lw(result, FieldMemOperand(scratch, FixedArray::kHeaderSize));
+
+  // Check for the hole value.
+  if (instr->hydrogen()->RequiresHoleCheck()) {
+    __ LoadRoot(scratch, Heap::kTheHoleValueRootIndex);
+    DeoptimizeIf(eq, instr->environment(), result, Operand(scratch));
+  }
+}
+
+
+void LCodeGen::DoLoadKeyedFastDoubleElement(
+    LLoadKeyedFastDoubleElement* instr) {
+  Register elements = ToRegister(instr->elements());
+  bool key_is_constant = instr->key()->IsConstantOperand();
+  Register key = no_reg;
+  DoubleRegister result = ToDoubleRegister(instr->result());
+  Register scratch = scratch0();
+
+  int shift_size =
+      ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS);
+  int constant_key = 0;
+  if (key_is_constant) {
+    constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
+    if (constant_key & 0xF0000000) {
+      Abort("array index constant value too big.");
+    }
+  } else {
+    key = ToRegister(instr->key());
+  }
+
+  if (key_is_constant) {
+    __ Addu(elements, elements, Operand(constant_key * (1 << shift_size) +
+            FixedDoubleArray::kHeaderSize - kHeapObjectTag));
+  } else {
+    __ sll(scratch, key, shift_size);
+    __ Addu(elements, elements, Operand(scratch));
+    __ Addu(elements, elements,
+            Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag));
+  }
+
+  __ lw(scratch, MemOperand(elements, sizeof(kHoleNanLower32)));
+  DeoptimizeIf(eq, instr->environment(), scratch, Operand(kHoleNanUpper32));
+
+  __ ldc1(result, MemOperand(elements));
+}
+
+
+void LCodeGen::DoLoadKeyedSpecializedArrayElement(
+    LLoadKeyedSpecializedArrayElement* instr) {
+  Register external_pointer = ToRegister(instr->external_pointer());
+  Register key = no_reg;
+  ElementsKind elements_kind = instr->elements_kind();
+  bool key_is_constant = instr->key()->IsConstantOperand();
+  int constant_key = 0;
+  if (key_is_constant) {
+    constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
+    if (constant_key & 0xF0000000) {
+      Abort("array index constant value too big.");
+    }
+  } else {
+    key = ToRegister(instr->key());
+  }
+  int shift_size = ElementsKindToShiftSize(elements_kind);
+
+  if (elements_kind == EXTERNAL_FLOAT_ELEMENTS ||
+      elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
+    FPURegister result = ToDoubleRegister(instr->result());
+    if (key_is_constant) {
+      __ Addu(scratch0(), external_pointer, constant_key * (1 << shift_size));
+    } else {
+      __ sll(scratch0(), key, shift_size);
+      __ Addu(scratch0(), scratch0(), external_pointer);
+    }
+
+    if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
+      __ lwc1(result, MemOperand(scratch0()));
+      __ cvt_d_s(result, result);
+    } else  {  // i.e. elements_kind == EXTERNAL_DOUBLE_ELEMENTS
+      __ ldc1(result, MemOperand(scratch0()));
+    }
+  } else {
+    Register result = ToRegister(instr->result());
+    Register scratch = scratch0();
+    MemOperand mem_operand(zero_reg);
+    if (key_is_constant) {
+      mem_operand = MemOperand(external_pointer,
+                               constant_key * (1 << shift_size));
+    } else {
+      __ sll(scratch, key, shift_size);
+      __ Addu(scratch, scratch, external_pointer);
+      mem_operand = MemOperand(scratch);
+    }
+    switch (elements_kind) {
+      case EXTERNAL_BYTE_ELEMENTS:
+        __ lb(result, mem_operand);
+        break;
+      case EXTERNAL_PIXEL_ELEMENTS:
+      case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
+        __ lbu(result, mem_operand);
+        break;
+      case EXTERNAL_SHORT_ELEMENTS:
+        __ lh(result, mem_operand);
+        break;
+      case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
+        __ lhu(result, mem_operand);
+        break;
+      case EXTERNAL_INT_ELEMENTS:
+        __ lw(result, mem_operand);
+        break;
+      case EXTERNAL_UNSIGNED_INT_ELEMENTS:
+        __ lw(result, mem_operand);
+        // TODO(danno): we could be more clever here, perhaps having a special
+        // version of the stub that detects if the overflow case actually
+        // happens, and generate code that returns a double rather than int.
+        DeoptimizeIf(Ugreater_equal, instr->environment(),
+            result, Operand(0x80000000));
+        break;
+      case EXTERNAL_FLOAT_ELEMENTS:
+      case EXTERNAL_DOUBLE_ELEMENTS:
+      case FAST_DOUBLE_ELEMENTS:
+      case FAST_ELEMENTS:
+      case FAST_SMI_ONLY_ELEMENTS:
+      case DICTIONARY_ELEMENTS:
+      case NON_STRICT_ARGUMENTS_ELEMENTS:
+        UNREACHABLE();
+        break;
+    }
+  }
+}
+
+
+void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) {
+  ASSERT(ToRegister(instr->object()).is(a1));
+  ASSERT(ToRegister(instr->key()).is(a0));
+
+  Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
+  CallCode(ic, RelocInfo::CODE_TARGET, instr);
+}
+
+
+void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) {
+  Register scratch = scratch0();
+  Register temp = scratch1();
+  Register result = ToRegister(instr->result());
+
+  // Check if the calling frame is an arguments adaptor frame.
+  Label done, adapted;
+  __ lw(scratch, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+  __ lw(result, MemOperand(scratch, StandardFrameConstants::kContextOffset));
+  __ Xor(temp, result, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+
+  // Result is the frame pointer for the frame if not adapted and for the real
+  // frame below the adaptor frame if adapted.
+  __ movn(result, fp, temp);  // move only if temp is not equal to zero (ne)
+  __ movz(result, scratch, temp);  // move only if temp is equal to zero (eq)
+}
+
+
+void LCodeGen::DoArgumentsLength(LArgumentsLength* instr) {
+  Register elem = ToRegister(instr->InputAt(0));
+  Register result = ToRegister(instr->result());
+
+  Label done;
+
+  // If no arguments adaptor frame the number of arguments is fixed.
+  __ Addu(result, zero_reg, Operand(scope()->num_parameters()));
+  __ Branch(&done, eq, fp, Operand(elem));
+
+  // Arguments adaptor frame present. Get argument length from there.
+  __ lw(result, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+  __ lw(result,
+        MemOperand(result, ArgumentsAdaptorFrameConstants::kLengthOffset));
+  __ SmiUntag(result);
+
+  // Argument length is in result register.
+  __ bind(&done);
+}
+
+
+void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
+  Register receiver = ToRegister(instr->receiver());
+  Register function = ToRegister(instr->function());
+  Register length = ToRegister(instr->length());
+  Register elements = ToRegister(instr->elements());
+  Register scratch = scratch0();
+  ASSERT(receiver.is(a0));  // Used for parameter count.
+  ASSERT(function.is(a1));  // Required by InvokeFunction.
+  ASSERT(ToRegister(instr->result()).is(v0));
+
+  // If the receiver is null or undefined, we have to pass the global
+  // object as a receiver to normal functions. Values have to be
+  // passed unchanged to builtins and strict-mode functions.
+  Label global_object, receiver_ok;
+
+  // Do not transform the receiver to object for strict mode
+  // functions.
+  __ lw(scratch,
+         FieldMemOperand(function, JSFunction::kSharedFunctionInfoOffset));
+  __ lw(scratch,
+         FieldMemOperand(scratch, SharedFunctionInfo::kCompilerHintsOffset));
+
+  // Do not transform the receiver to object for builtins.
+  int32_t strict_mode_function_mask =
+                  1 <<  (SharedFunctionInfo::kStrictModeFunction + kSmiTagSize);
+  int32_t native_mask = 1 << (SharedFunctionInfo::kNative + kSmiTagSize);
+  __ And(scratch, scratch, Operand(strict_mode_function_mask | native_mask));
+  __ Branch(&receiver_ok, ne, scratch, Operand(zero_reg));
+
+  // Normal function. Replace undefined or null with global receiver.
+  __ LoadRoot(scratch, Heap::kNullValueRootIndex);
+  __ Branch(&global_object, eq, receiver, Operand(scratch));
+  __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
+  __ Branch(&global_object, eq, receiver, Operand(scratch));
+
+  // Deoptimize if the receiver is not a JS object.
+  __ And(scratch, receiver, Operand(kSmiTagMask));
+  DeoptimizeIf(eq, instr->environment(), scratch, Operand(zero_reg));
+
+  __ GetObjectType(receiver, scratch, scratch);
+  DeoptimizeIf(lt, instr->environment(),
+               scratch, Operand(FIRST_SPEC_OBJECT_TYPE));
+  __ Branch(&receiver_ok);
+
+  __ bind(&global_object);
+  __ lw(receiver, GlobalObjectOperand());
+  __ lw(receiver,
+         FieldMemOperand(receiver, JSGlobalObject::kGlobalReceiverOffset));
+  __ bind(&receiver_ok);
+
+  // Copy the arguments to this function possibly from the
+  // adaptor frame below it.
+  const uint32_t kArgumentsLimit = 1 * KB;
+  DeoptimizeIf(hi, instr->environment(), length, Operand(kArgumentsLimit));
+
+  // Push the receiver and use the register to keep the original
+  // number of arguments.
+  __ push(receiver);
+  __ Move(receiver, length);
+  // The arguments are at a one pointer size offset from elements.
+  __ Addu(elements, elements, Operand(1 * kPointerSize));
+
+  // Loop through the arguments pushing them onto the execution
+  // stack.
+  Label invoke, loop;
+  // length is a small non-negative integer, due to the test above.
+  __ Branch(USE_DELAY_SLOT, &invoke, eq, length, Operand(zero_reg));
+  __ sll(scratch, length, 2);
+  __ bind(&loop);
+  __ Addu(scratch, elements, scratch);
+  __ lw(scratch, MemOperand(scratch));
+  __ push(scratch);
+  __ Subu(length, length, Operand(1));
+  __ Branch(USE_DELAY_SLOT, &loop, ne, length, Operand(zero_reg));
+  __ sll(scratch, length, 2);
+
+  __ bind(&invoke);
+  ASSERT(instr->HasPointerMap() && instr->HasDeoptimizationEnvironment());
+  LPointerMap* pointers = instr->pointer_map();
+  LEnvironment* env = instr->deoptimization_environment();
+  RecordPosition(pointers->position());
+  RegisterEnvironmentForDeoptimization(env);
+  SafepointGenerator safepoint_generator(this,
+                                         pointers,
+                                         env->deoptimization_index());
+  // The number of arguments is stored in receiver which is a0, as expected
+  // by InvokeFunction.
+  v8::internal::ParameterCount actual(receiver);
+  __ InvokeFunction(function, actual, CALL_FUNCTION,
+                    safepoint_generator, CALL_AS_METHOD);
+  __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+}
+
+
+void LCodeGen::DoPushArgument(LPushArgument* instr) {
+  LOperand* argument = instr->InputAt(0);
+  if (argument->IsDoubleRegister() || argument->IsDoubleStackSlot()) {
+    Abort("DoPushArgument not implemented for double type.");
+  } else {
+    Register argument_reg = EmitLoadRegister(argument, at);
+    __ push(argument_reg);
+  }
+}
+
+
+void LCodeGen::DoThisFunction(LThisFunction* instr) {
+  Register result = ToRegister(instr->result());
+  LoadHeapObject(result, instr->hydrogen()->closure());
+}
+
+
+void LCodeGen::DoContext(LContext* instr) {
+  Register result = ToRegister(instr->result());
+  __ mov(result, cp);
+}
+
+
+void LCodeGen::DoOuterContext(LOuterContext* instr) {
+  Register context = ToRegister(instr->context());
+  Register result = ToRegister(instr->result());
+  __ lw(result,
+        MemOperand(context, Context::SlotOffset(Context::PREVIOUS_INDEX)));
+}
+
+
+void LCodeGen::DoGlobalObject(LGlobalObject* instr) {
+  Register context = ToRegister(instr->context());
+  Register result = ToRegister(instr->result());
+  __ lw(result, ContextOperand(cp, Context::GLOBAL_INDEX));
+}
+
+
+void LCodeGen::DoGlobalReceiver(LGlobalReceiver* instr) {
+  Register global = ToRegister(instr->global());
+  Register result = ToRegister(instr->result());
+  __ lw(result, FieldMemOperand(global, GlobalObject::kGlobalReceiverOffset));
+}
+
+
+void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
+                                 int arity,
+                                 LInstruction* instr,
+                                 CallKind call_kind) {
+  // Change context if needed.
+  bool change_context =
+      (info()->closure()->context() != function->context()) ||
+      scope()->contains_with() ||
+      (scope()->num_heap_slots() > 0);
+  if (change_context) {
+    __ lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
+  }
+
+  // Set a0 to arguments count if adaption is not needed. Assumes that a0
+  // is available to write to at this point.
+  if (!function->NeedsArgumentsAdaption()) {
+    __ li(a0, Operand(arity));
+  }
+
+  LPointerMap* pointers = instr->pointer_map();
+  RecordPosition(pointers->position());
+
+  // Invoke function.
+  __ SetCallKind(t1, call_kind);
+  __ lw(at, FieldMemOperand(a1, JSFunction::kCodeEntryOffset));
+  __ Call(at);
+
+  // Setup deoptimization.
+  RegisterLazyDeoptimization(instr, RECORD_SIMPLE_SAFEPOINT);
+
+  // Restore context.
+  __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+}
+
+
+void LCodeGen::DoCallConstantFunction(LCallConstantFunction* instr) {
+  ASSERT(ToRegister(instr->result()).is(v0));
+  __ mov(a0, v0);
+  __ li(a1, Operand(instr->function()));
+  CallKnownFunction(instr->function(), instr->arity(), instr, CALL_AS_METHOD);
+}
+
+
+void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LUnaryMathOperation* instr) {
+  Register input = ToRegister(instr->InputAt(0));
+  Register result = ToRegister(instr->result());
+  Register scratch = scratch0();
+
+  // Deoptimize if not a heap number.
+  __ lw(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
+  __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
+  DeoptimizeIf(ne, instr->environment(), scratch, Operand(at));
+
+  Label done;
+  Register exponent = scratch0();
+  scratch = no_reg;
+  __ lw(exponent, FieldMemOperand(input, HeapNumber::kExponentOffset));
+  // Check the sign of the argument. If the argument is positive, just
+  // return it.
+  __ Move(result, input);
+  __ And(at, exponent, Operand(HeapNumber::kSignMask));
+  __ Branch(&done, eq, at, Operand(zero_reg));
+
+  // Input is negative. Reverse its sign.
+  // Preserve the value of all registers.
+  {
+    PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
+
+    // Registers were saved at the safepoint, so we can use
+    // many scratch registers.
+    Register tmp1 = input.is(a1) ? a0 : a1;
+    Register tmp2 = input.is(a2) ? a0 : a2;
+    Register tmp3 = input.is(a3) ? a0 : a3;
+    Register tmp4 = input.is(t0) ? a0 : t0;
+
+    // exponent: floating point exponent value.
+
+    Label allocated, slow;
+    __ LoadRoot(tmp4, Heap::kHeapNumberMapRootIndex);
+    __ AllocateHeapNumber(tmp1, tmp2, tmp3, tmp4, &slow);
+    __ Branch(&allocated);
+
+    // Slow case: Call the runtime system to do the number allocation.
+    __ bind(&slow);
+
+    CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr);
+    // Set the pointer to the new heap number in tmp.
+    if (!tmp1.is(v0))
+      __ mov(tmp1, v0);
+    // Restore input_reg after call to runtime.
+    __ LoadFromSafepointRegisterSlot(input, input);
+    __ lw(exponent, FieldMemOperand(input, HeapNumber::kExponentOffset));
+
+    __ bind(&allocated);
+    // exponent: floating point exponent value.
+    // tmp1: allocated heap number.
+    __ And(exponent, exponent, Operand(~HeapNumber::kSignMask));
+    __ sw(exponent, FieldMemOperand(tmp1, HeapNumber::kExponentOffset));
+    __ lw(tmp2, FieldMemOperand(input, HeapNumber::kMantissaOffset));
+    __ sw(tmp2, FieldMemOperand(tmp1, HeapNumber::kMantissaOffset));
+
+    __ StoreToSafepointRegisterSlot(tmp1, result);
+  }
+
+  __ bind(&done);
+}
+
+
+void LCodeGen::EmitIntegerMathAbs(LUnaryMathOperation* instr) {
+  Register input = ToRegister(instr->InputAt(0));
+  Register result = ToRegister(instr->result());
+  Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_);
+  Label done;
+  __ Branch(USE_DELAY_SLOT, &done, ge, input, Operand(zero_reg));
+  __ mov(result, input);
+  ASSERT_EQ(2, masm()->InstructionsGeneratedSince(&done));
+  __ subu(result, zero_reg, input);
+  // Overflow if result is still negative, ie 0x80000000.
+  DeoptimizeIf(lt, instr->environment(), result, Operand(zero_reg));
+  __ bind(&done);
+}
+
+
+void LCodeGen::DoMathAbs(LUnaryMathOperation* instr) {
+  // Class for deferred case.
+  class DeferredMathAbsTaggedHeapNumber: public LDeferredCode {
+   public:
+    DeferredMathAbsTaggedHeapNumber(LCodeGen* codegen,
+                                    LUnaryMathOperation* instr)
+        : LDeferredCode(codegen), instr_(instr) { }
+    virtual void Generate() {
+      codegen()->DoDeferredMathAbsTaggedHeapNumber(instr_);
+    }
+    virtual LInstruction* instr() { return instr_; }
+   private:
+    LUnaryMathOperation* instr_;
+  };
+
+  Representation r = instr->hydrogen()->value()->representation();
+  if (r.IsDouble()) {
+    FPURegister input = ToDoubleRegister(instr->InputAt(0));
+    FPURegister result = ToDoubleRegister(instr->result());
+    __ abs_d(result, input);
+  } else if (r.IsInteger32()) {
+    EmitIntegerMathAbs(instr);
+  } else {
+    // Representation is tagged.
+    DeferredMathAbsTaggedHeapNumber* deferred =
+        new DeferredMathAbsTaggedHeapNumber(this, instr);
+    Register input = ToRegister(instr->InputAt(0));
+    // Smi check.
+    __ JumpIfNotSmi(input, deferred->entry());
+    // If smi, handle it directly.
+    EmitIntegerMathAbs(instr);
+    __ bind(deferred->exit());
+  }
+}
+
+
+void LCodeGen::DoMathFloor(LUnaryMathOperation* instr) {
+  DoubleRegister input = ToDoubleRegister(instr->InputAt(0));
+  Register result = ToRegister(instr->result());
+  FPURegister single_scratch = double_scratch0().low();
+  Register scratch1 = scratch0();
+  Register except_flag = ToRegister(instr->TempAt(0));
+
+  __ EmitFPUTruncate(kRoundToMinusInf,
+                     single_scratch,
+                     input,
+                     scratch1,
+                     except_flag);
+
+  // Deopt if the operation did not succeed.
+  DeoptimizeIf(ne, instr->environment(), except_flag, Operand(zero_reg));
+
+  // Load the result.
+  __ mfc1(result, single_scratch);
+
+  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+    // Test for -0.
+    Label done;
+    __ Branch(&done, ne, result, Operand(zero_reg));
+    __ mfc1(scratch1, input.high());
+    __ And(scratch1, scratch1, Operand(HeapNumber::kSignMask));
+    DeoptimizeIf(ne, instr->environment(), scratch1, Operand(zero_reg));
+    __ bind(&done);
+  }
+}
+
+
+void LCodeGen::DoMathRound(LUnaryMathOperation* instr) {
+  DoubleRegister input = ToDoubleRegister(instr->InputAt(0));
+  Register result = ToRegister(instr->result());
+  Register scratch = scratch0();
+  Label done, check_sign_on_zero;
+
+  // Extract exponent bits.
+  __ mfc1(result, input.high());
+  __ Ext(scratch,
+         result,
+         HeapNumber::kExponentShift,
+         HeapNumber::kExponentBits);
+
+  // If the number is in ]-0.5, +0.5[, the result is +/- 0.
+  Label skip1;
+  __ Branch(&skip1, gt, scratch, Operand(HeapNumber::kExponentBias - 2));
+  __ mov(result, zero_reg);
+  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+    __ Branch(&check_sign_on_zero);
+  } else {
+    __ Branch(&done);
+  }
+  __ bind(&skip1);
+
+  // The following conversion will not work with numbers
+  // outside of ]-2^32, 2^32[.
+  DeoptimizeIf(ge, instr->environment(), scratch,
+               Operand(HeapNumber::kExponentBias + 32));
+
+  // Save the original sign for later comparison.
+  __ And(scratch, result, Operand(HeapNumber::kSignMask));
+
+  __ Move(double_scratch0(), 0.5);
+  __ add_d(input, input, double_scratch0());
+
+  // Check sign of the result: if the sign changed, the input
+  // value was in ]0.5, 0[ and the result should be -0.
+  __ mfc1(result, input.high());
+  __ Xor(result, result, Operand(scratch));
+  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+    // ARM uses 'mi' here, which is 'lt'
+    DeoptimizeIf(lt, instr->environment(), result,
+                 Operand(zero_reg));
+  } else {
+    Label skip2;
+    // ARM uses 'mi' here, which is 'lt'
+    // Negating it results in 'ge'
+    __ Branch(&skip2, ge, result, Operand(zero_reg));
+    __ mov(result, zero_reg);
+    __ Branch(&done);
+    __ bind(&skip2);
+  }
+
+  Register except_flag = scratch;
+
+  __ EmitFPUTruncate(kRoundToMinusInf,
+                     double_scratch0().low(),
+                     input,
+                     result,
+                     except_flag);
+
+  DeoptimizeIf(ne, instr->environment(), except_flag, Operand(zero_reg));
+
+  __ mfc1(result, double_scratch0().low());
+
+  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+    // Test for -0.
+    __ Branch(&done, ne, result, Operand(zero_reg));
+    __ bind(&check_sign_on_zero);
+    __ mfc1(scratch, input.high());
+    __ And(scratch, scratch, Operand(HeapNumber::kSignMask));
+    DeoptimizeIf(ne, instr->environment(), scratch, Operand(zero_reg));
+  }
+  __ bind(&done);
+}
+
+
+void LCodeGen::DoMathSqrt(LUnaryMathOperation* instr) {
+  DoubleRegister input = ToDoubleRegister(instr->InputAt(0));
+  DoubleRegister result = ToDoubleRegister(instr->result());
+  __ sqrt_d(result, input);
+}
+
+
+void LCodeGen::DoMathPowHalf(LUnaryMathOperation* instr) {
+  DoubleRegister input = ToDoubleRegister(instr->InputAt(0));
+  DoubleRegister result = ToDoubleRegister(instr->result());
+  DoubleRegister double_scratch = double_scratch0();
+
+  // Add +0 to convert -0 to +0.
+  __ mtc1(zero_reg, double_scratch.low());
+  __ mtc1(zero_reg, double_scratch.high());
+  __ add_d(result, input, double_scratch);
+  __ sqrt_d(result, result);
+}
+
+
+void LCodeGen::DoPower(LPower* instr) {
+  LOperand* left = instr->InputAt(0);
+  LOperand* right = instr->InputAt(1);
+  Register scratch = scratch0();
+  DoubleRegister result_reg = ToDoubleRegister(instr->result());
+  Representation exponent_type = instr->hydrogen()->right()->representation();
+  if (exponent_type.IsDouble()) {
+    // Prepare arguments and call C function.
+    __ PrepareCallCFunction(0, 2, scratch);
+    __ SetCallCDoubleArguments(ToDoubleRegister(left),
+                               ToDoubleRegister(right));
+    __ CallCFunction(
+        ExternalReference::power_double_double_function(isolate()), 0, 2);
+  } else if (exponent_type.IsInteger32()) {
+    ASSERT(ToRegister(right).is(a0));
+    // Prepare arguments and call C function.
+    __ PrepareCallCFunction(1, 1, scratch);
+    __ SetCallCDoubleArguments(ToDoubleRegister(left), ToRegister(right));
+    __ CallCFunction(
+        ExternalReference::power_double_int_function(isolate()), 1, 1);
+  } else {
+    ASSERT(exponent_type.IsTagged());
+    ASSERT(instr->hydrogen()->left()->representation().IsDouble());
+
+    Register right_reg = ToRegister(right);
+
+    // Check for smi on the right hand side.
+    Label non_smi, call;
+    __ JumpIfNotSmi(right_reg, &non_smi);
+
+    // Untag smi and convert it to a double.
+    __ SmiUntag(right_reg);
+    FPURegister single_scratch = double_scratch0();
+    __ mtc1(right_reg, single_scratch);
+    __ cvt_d_w(result_reg, single_scratch);
+    __ Branch(&call);
+
+    // Heap number map check.
+    __ bind(&non_smi);
+    __ lw(scratch, FieldMemOperand(right_reg, HeapObject::kMapOffset));
+    __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
+    DeoptimizeIf(ne, instr->environment(), scratch, Operand(at));
+    __ ldc1(result_reg, FieldMemOperand(right_reg, HeapNumber::kValueOffset));
+
+    // Prepare arguments and call C function.
+    __ bind(&call);
+    __ PrepareCallCFunction(0, 2, scratch);
+    __ SetCallCDoubleArguments(ToDoubleRegister(left), result_reg);
+    __ CallCFunction(
+        ExternalReference::power_double_double_function(isolate()), 0, 2);
+  }
+  // Store the result in the result register.
+  __ GetCFunctionDoubleResult(result_reg);
+}
+
+
+void LCodeGen::DoMathLog(LUnaryMathOperation* instr) {
+  ASSERT(ToDoubleRegister(instr->result()).is(f4));
+  TranscendentalCacheStub stub(TranscendentalCache::LOG,
+                               TranscendentalCacheStub::UNTAGGED);
+  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+}
+
+
+void LCodeGen::DoMathCos(LUnaryMathOperation* instr) {
+  ASSERT(ToDoubleRegister(instr->result()).is(f4));
+  TranscendentalCacheStub stub(TranscendentalCache::COS,
+                               TranscendentalCacheStub::UNTAGGED);
+  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+}
+
+
+void LCodeGen::DoMathSin(LUnaryMathOperation* instr) {
+  ASSERT(ToDoubleRegister(instr->result()).is(f4));
+  TranscendentalCacheStub stub(TranscendentalCache::SIN,
+                               TranscendentalCacheStub::UNTAGGED);
+  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+}
+
+
+void LCodeGen::DoUnaryMathOperation(LUnaryMathOperation* instr) {
+  switch (instr->op()) {
+    case kMathAbs:
+      DoMathAbs(instr);
+      break;
+    case kMathFloor:
+      DoMathFloor(instr);
+      break;
+    case kMathRound:
+      DoMathRound(instr);
+      break;
+    case kMathSqrt:
+      DoMathSqrt(instr);
+      break;
+    case kMathPowHalf:
+      DoMathPowHalf(instr);
+      break;
+    case kMathCos:
+      DoMathCos(instr);
+      break;
+    case kMathSin:
+      DoMathSin(instr);
+      break;
+    case kMathLog:
+      DoMathLog(instr);
+      break;
+    default:
+      Abort("Unimplemented type of LUnaryMathOperation.");
+      UNREACHABLE();
+  }
+}
+
+
+void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
+  ASSERT(ToRegister(instr->function()).is(a1));
+  ASSERT(instr->HasPointerMap());
+  ASSERT(instr->HasDeoptimizationEnvironment());
+  LPointerMap* pointers = instr->pointer_map();
+  LEnvironment* env = instr->deoptimization_environment();
+  RecordPosition(pointers->position());
+  RegisterEnvironmentForDeoptimization(env);
+  SafepointGenerator generator(this, pointers, env->deoptimization_index());
+  ParameterCount count(instr->arity());
+  __ InvokeFunction(a1, count, CALL_FUNCTION, generator, CALL_AS_METHOD);
+  __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+}
+
+
+void LCodeGen::DoCallKeyed(LCallKeyed* instr) {
+  ASSERT(ToRegister(instr->result()).is(v0));
+
+  int arity = instr->arity();
+  Handle<Code> ic =
+      isolate()->stub_cache()->ComputeKeyedCallInitialize(arity);
+  CallCode(ic, RelocInfo::CODE_TARGET, instr);
+  __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+}
+
+
+void LCodeGen::DoCallNamed(LCallNamed* instr) {
+  ASSERT(ToRegister(instr->result()).is(v0));
+
+  int arity = instr->arity();
+  RelocInfo::Mode mode = RelocInfo::CODE_TARGET;
+  Handle<Code> ic =
+      isolate()->stub_cache()->ComputeCallInitialize(arity, mode);
+  __ li(a2, Operand(instr->name()));
+  CallCode(ic, mode, instr);
+  // Restore context register.
+  __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+}
+
+
+void LCodeGen::DoCallFunction(LCallFunction* instr) {
+  ASSERT(ToRegister(instr->result()).is(v0));
+
+  int arity = instr->arity();
+  CallFunctionStub stub(arity, NO_CALL_FUNCTION_FLAGS);
+  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+  __ Drop(1);
+  __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+}
+
+
+void LCodeGen::DoCallGlobal(LCallGlobal* instr) {
+  ASSERT(ToRegister(instr->result()).is(v0));
+
+  int arity = instr->arity();
+  RelocInfo::Mode mode = RelocInfo::CODE_TARGET_CONTEXT;
+  Handle<Code> ic =
+      isolate()->stub_cache()->ComputeCallInitialize(arity, mode);
+  __ li(a2, Operand(instr->name()));
+  CallCode(ic, mode, instr);
+  __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+}
+
+
+void LCodeGen::DoCallKnownGlobal(LCallKnownGlobal* instr) {
+  ASSERT(ToRegister(instr->result()).is(v0));
+  __ li(a1, Operand(instr->target()));
+  CallKnownFunction(instr->target(), instr->arity(), instr, CALL_AS_FUNCTION);
+}
+
+
+void LCodeGen::DoCallNew(LCallNew* instr) {
+  ASSERT(ToRegister(instr->InputAt(0)).is(a1));
+  ASSERT(ToRegister(instr->result()).is(v0));
+
+  Handle<Code> builtin = isolate()->builtins()->JSConstructCall();
+  __ li(a0, Operand(instr->arity()));
+  CallCode(builtin, RelocInfo::CONSTRUCT_CALL, instr);
+}
+
+
+void LCodeGen::DoCallRuntime(LCallRuntime* instr) {
+  CallRuntime(instr->function(), instr->arity(), instr);
+}
+
+
+void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
+  Register object = ToRegister(instr->object());
+  Register value = ToRegister(instr->value());
+  Register scratch = scratch0();
+  int offset = instr->offset();
+
+  ASSERT(!object.is(value));
+
+  if (!instr->transition().is_null()) {
+    __ li(scratch, Operand(instr->transition()));
+    __ sw(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
+  }
+
+  // Do the store.
+  HType type = instr->hydrogen()->value()->type();
+  SmiCheck check_needed =
+      type.IsHeapObject() ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
+  if (instr->is_in_object()) {
+    __ sw(value, FieldMemOperand(object, offset));
+    if (instr->hydrogen()->NeedsWriteBarrier()) {
+      // Update the write barrier for the object for in-object properties.
+      __ RecordWriteField(object,
+                          offset,
+                          value,
+                          scratch,
+                          kRAHasBeenSaved,
+                          kSaveFPRegs,
+                          EMIT_REMEMBERED_SET,
+                          check_needed);
+    }
+  } else {
+    __ lw(scratch, FieldMemOperand(object, JSObject::kPropertiesOffset));
+    __ sw(value, FieldMemOperand(scratch, offset));
+    if (instr->hydrogen()->NeedsWriteBarrier()) {
+      // Update the write barrier for the properties array.
+      // object is used as a scratch register.
+      __ RecordWriteField(scratch,
+                          offset,
+                          value,
+                          object,
+                          kRAHasBeenSaved,
+                          kSaveFPRegs,
+                          EMIT_REMEMBERED_SET,
+                          check_needed);
+    }
+  }
+}
+
+
+void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
+  ASSERT(ToRegister(instr->object()).is(a1));
+  ASSERT(ToRegister(instr->value()).is(a0));
+
+  // Name is always in a2.
+  __ li(a2, Operand(instr->name()));
+  Handle<Code> ic = instr->strict_mode()
+      ? isolate()->builtins()->StoreIC_Initialize_Strict()
+      : isolate()->builtins()->StoreIC_Initialize();
+  CallCode(ic, RelocInfo::CODE_TARGET, instr);
+}
+
+
+void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
+  DeoptimizeIf(hs,
+               instr->environment(),
+               ToRegister(instr->index()),
+               Operand(ToRegister(instr->length())));
+}
+
+
+void LCodeGen::DoStoreKeyedFastElement(LStoreKeyedFastElement* instr) {
+  Register value = ToRegister(instr->value());
+  Register elements = ToRegister(instr->object());
+  Register key = instr->key()->IsRegister() ? ToRegister(instr->key()) : no_reg;
+  Register scratch = scratch0();
+
+  // This instruction cannot handle the FAST_SMI_ONLY_ELEMENTS -> FAST_ELEMENTS
+  // conversion, so it deopts in that case.
+  if (instr->hydrogen()->ValueNeedsSmiCheck()) {
+    __ And(at, value, Operand(kSmiTagMask));
+    DeoptimizeIf(ne, instr->environment(), at, Operand(zero_reg));
+  }
+
+  // Do the store.
+  if (instr->key()->IsConstantOperand()) {
+    ASSERT(!instr->hydrogen()->NeedsWriteBarrier());
+    LConstantOperand* const_operand = LConstantOperand::cast(instr->key());
+    int offset =
+        ToInteger32(const_operand) * kPointerSize + FixedArray::kHeaderSize;
+    __ sw(value, FieldMemOperand(elements, offset));
+  } else {
+    __ sll(scratch, key, kPointerSizeLog2);
+    __ addu(scratch, elements, scratch);
+    __ sw(value, FieldMemOperand(scratch, FixedArray::kHeaderSize));
+  }
+
+  if (instr->hydrogen()->NeedsWriteBarrier()) {
+    HType type = instr->hydrogen()->value()->type();
+    SmiCheck check_needed =
+        type.IsHeapObject() ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
+    // Compute address of modified element and store it into key register.
+    __ Addu(key, scratch, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+    __ RecordWrite(elements,
+                   key,
+                   value,
+                   kRAHasBeenSaved,
+                   kSaveFPRegs,
+                   EMIT_REMEMBERED_SET,
+                   check_needed);
+  }
+}
+
+
+void LCodeGen::DoStoreKeyedFastDoubleElement(
+    LStoreKeyedFastDoubleElement* instr) {
+  DoubleRegister value = ToDoubleRegister(instr->value());
+  Register elements = ToRegister(instr->elements());
+  Register key = no_reg;
+  Register scratch = scratch0();
+  bool key_is_constant = instr->key()->IsConstantOperand();
+  int constant_key = 0;
+  Label not_nan;
+
+  // Calculate the effective address of the slot in the array to store the
+  // double value.
+  if (key_is_constant) {
+    constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
+    if (constant_key & 0xF0000000) {
+      Abort("array index constant value too big.");
+    }
+  } else {
+    key = ToRegister(instr->key());
+  }
+  int shift_size = ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS);
+  if (key_is_constant) {
+    __ Addu(scratch, elements, Operand(constant_key * (1 << shift_size) +
+            FixedDoubleArray::kHeaderSize - kHeapObjectTag));
+  } else {
+    __ sll(scratch, key, shift_size);
+    __ Addu(scratch, elements, Operand(scratch));
+    __ Addu(scratch, scratch,
+            Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag));
+  }
+
+  Label is_nan;
+  // Check for NaN. All NaNs must be canonicalized.
+  __ BranchF(NULL, &is_nan, eq, value, value);
+  __ Branch(&not_nan);
+
+  // Only load canonical NaN if the comparison above set the overflow.
+  __ bind(&is_nan);
+  __ Move(value, FixedDoubleArray::canonical_not_the_hole_nan_as_double());
+
+  __ bind(&not_nan);
+  __ sdc1(value, MemOperand(scratch));
+}
+
+
+void LCodeGen::DoStoreKeyedSpecializedArrayElement(
+    LStoreKeyedSpecializedArrayElement* instr) {
+
+  Register external_pointer = ToRegister(instr->external_pointer());
+  Register key = no_reg;
+  ElementsKind elements_kind = instr->elements_kind();
+  bool key_is_constant = instr->key()->IsConstantOperand();
+  int constant_key = 0;
+  if (key_is_constant) {
+    constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
+    if (constant_key & 0xF0000000) {
+      Abort("array index constant value too big.");
+    }
+  } else {
+    key = ToRegister(instr->key());
+  }
+  int shift_size = ElementsKindToShiftSize(elements_kind);
+
+  if (elements_kind == EXTERNAL_FLOAT_ELEMENTS ||
+      elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
+    FPURegister value(ToDoubleRegister(instr->value()));
+    if (key_is_constant) {
+      __ Addu(scratch0(), external_pointer, constant_key * (1 << shift_size));
+    } else {
+      __ sll(scratch0(), key, shift_size);
+      __ Addu(scratch0(), scratch0(), external_pointer);
+    }
+
+    if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
+      __ cvt_s_d(double_scratch0(), value);
+      __ swc1(double_scratch0(), MemOperand(scratch0()));
+    } else {  // i.e. elements_kind == EXTERNAL_DOUBLE_ELEMENTS
+      __ sdc1(value, MemOperand(scratch0()));
+    }
+  } else {
+    Register value(ToRegister(instr->value()));
+    MemOperand mem_operand(zero_reg);
+    Register scratch = scratch0();
+    if (key_is_constant) {
+      mem_operand = MemOperand(external_pointer,
+                               constant_key * (1 << shift_size));
+    } else {
+      __ sll(scratch, key, shift_size);
+      __ Addu(scratch, scratch, external_pointer);
+      mem_operand = MemOperand(scratch);
+    }
+    switch (elements_kind) {
+      case EXTERNAL_PIXEL_ELEMENTS:
+      case EXTERNAL_BYTE_ELEMENTS:
+      case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
+        __ sb(value, mem_operand);
+        break;
+      case EXTERNAL_SHORT_ELEMENTS:
+      case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
+        __ sh(value, mem_operand);
+        break;
+      case EXTERNAL_INT_ELEMENTS:
+      case EXTERNAL_UNSIGNED_INT_ELEMENTS:
+        __ sw(value, mem_operand);
+        break;
+      case EXTERNAL_FLOAT_ELEMENTS:
+      case EXTERNAL_DOUBLE_ELEMENTS:
+      case FAST_DOUBLE_ELEMENTS:
+      case FAST_ELEMENTS:
+      case FAST_SMI_ONLY_ELEMENTS:
+      case DICTIONARY_ELEMENTS:
+      case NON_STRICT_ARGUMENTS_ELEMENTS:
+        UNREACHABLE();
+        break;
+    }
+  }
+}
+
+void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
+  ASSERT(ToRegister(instr->object()).is(a2));
+  ASSERT(ToRegister(instr->key()).is(a1));
+  ASSERT(ToRegister(instr->value()).is(a0));
+
+  Handle<Code> ic = instr->strict_mode()
+      ? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
+      : isolate()->builtins()->KeyedStoreIC_Initialize();
+  CallCode(ic, RelocInfo::CODE_TARGET, instr);
+}
+
+
+void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
+  Register object_reg = ToRegister(instr->object());
+  Register new_map_reg = ToRegister(instr->new_map_reg());
+  Register scratch = scratch0();
+
+  Handle<Map> from_map = instr->original_map();
+  Handle<Map> to_map = instr->transitioned_map();
+  ElementsKind from_kind = from_map->elements_kind();
+  ElementsKind to_kind = to_map->elements_kind();
+
+  __ mov(ToRegister(instr->result()), object_reg);
+
+  Label not_applicable;
+  __ lw(scratch, FieldMemOperand(object_reg, HeapObject::kMapOffset));
+  __ Branch(&not_applicable, ne, scratch, Operand(from_map));
+
+  __ li(new_map_reg, Operand(to_map));
+  if (from_kind == FAST_SMI_ONLY_ELEMENTS && to_kind == FAST_ELEMENTS) {
+    __ sw(new_map_reg, FieldMemOperand(object_reg, HeapObject::kMapOffset));
+    // Write barrier.
+    __ RecordWriteField(object_reg, HeapObject::kMapOffset, new_map_reg,
+                        scratch, kRAHasBeenSaved, kDontSaveFPRegs);
+  } else if (from_kind == FAST_SMI_ONLY_ELEMENTS &&
+      to_kind == FAST_DOUBLE_ELEMENTS) {
+    Register fixed_object_reg = ToRegister(instr->temp_reg());
+    ASSERT(fixed_object_reg.is(a2));
+    ASSERT(new_map_reg.is(a3));
+    __ mov(fixed_object_reg, object_reg);
+    CallCode(isolate()->builtins()->TransitionElementsSmiToDouble(),
+             RelocInfo::CODE_TARGET, instr);
+  } else if (from_kind == FAST_DOUBLE_ELEMENTS && to_kind == FAST_ELEMENTS) {
+    Register fixed_object_reg = ToRegister(instr->temp_reg());
+    ASSERT(fixed_object_reg.is(a2));
+    ASSERT(new_map_reg.is(a3));
+    __ mov(fixed_object_reg, object_reg);
+    CallCode(isolate()->builtins()->TransitionElementsDoubleToObject(),
+             RelocInfo::CODE_TARGET, instr);
+  } else {
+    UNREACHABLE();
+  }
+  __ bind(&not_applicable);
+}
+
+
+void LCodeGen::DoStringAdd(LStringAdd* instr) {
+  __ push(ToRegister(instr->left()));
+  __ push(ToRegister(instr->right()));
+  StringAddStub stub(NO_STRING_CHECK_IN_STUB);
+  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+}
+
+
+void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) {
+  class DeferredStringCharCodeAt: public LDeferredCode {
+   public:
+    DeferredStringCharCodeAt(LCodeGen* codegen, LStringCharCodeAt* instr)
+        : LDeferredCode(codegen), instr_(instr) { }
+    virtual void Generate() { codegen()->DoDeferredStringCharCodeAt(instr_); }
+    virtual LInstruction* instr() { return instr_; }
+   private:
+    LStringCharCodeAt* instr_;
+  };
+
+  Register temp = scratch1();
+  Register string = ToRegister(instr->string());
+  Register index = ToRegister(instr->index());
+  Register result = ToRegister(instr->result());
+  DeferredStringCharCodeAt* deferred =
+      new DeferredStringCharCodeAt(this, instr);
+
+  // Fetch the instance type of the receiver into result register.
+  __ lw(result, FieldMemOperand(string, HeapObject::kMapOffset));
+  __ lbu(result, FieldMemOperand(result, Map::kInstanceTypeOffset));
+
+  // We need special handling for indirect strings.
+  Label check_sequential;
+  __ And(temp, result, kIsIndirectStringMask);
+  __ Branch(&check_sequential, eq, temp, Operand(zero_reg));
+
+  // Dispatch on the indirect string shape: slice or cons.
+  Label cons_string;
+  __ And(temp, result, kSlicedNotConsMask);
+  __ Branch(&cons_string, eq, temp, Operand(zero_reg));
+
+  // Handle slices.
+  Label indirect_string_loaded;
+  __ lw(result, FieldMemOperand(string, SlicedString::kOffsetOffset));
+  __ sra(temp, result, kSmiTagSize);
+  __ addu(index, index, temp);
+  __ lw(string, FieldMemOperand(string, SlicedString::kParentOffset));
+  __ jmp(&indirect_string_loaded);
+
+  // Handle conses.
+  // Check whether the right hand side is the empty string (i.e. if
+  // this is really a flat string in a cons string). If that is not
+  // the case we would rather go to the runtime system now to flatten
+  // the string.
+  __ bind(&cons_string);
+  __ lw(result, FieldMemOperand(string, ConsString::kSecondOffset));
+  __ LoadRoot(temp, Heap::kEmptyStringRootIndex);
+  __ Branch(deferred->entry(), ne, result, Operand(temp));
+  // Get the first of the two strings and load its instance type.
+  __ lw(string, FieldMemOperand(string, ConsString::kFirstOffset));
+
+  __ bind(&indirect_string_loaded);
+  __ lw(result, FieldMemOperand(string, HeapObject::kMapOffset));
+  __ lbu(result, FieldMemOperand(result, Map::kInstanceTypeOffset));
+
+  // Check whether the string is sequential. The only non-sequential
+  // shapes we support have just been unwrapped above.
+  __ bind(&check_sequential);
+  STATIC_ASSERT(kSeqStringTag == 0);
+  __ And(temp, result, Operand(kStringRepresentationMask));
+  __ Branch(deferred->entry(), ne, temp, Operand(zero_reg));
+
+  // Dispatch on the encoding: ASCII or two-byte.
+  Label ascii_string;
+  STATIC_ASSERT((kStringEncodingMask & kAsciiStringTag) != 0);
+  STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0);
+  __ And(temp, result, Operand(kStringEncodingMask));
+  __ Branch(&ascii_string, ne, temp, Operand(zero_reg));
+
+  // Two-byte string.
+  // Load the two-byte character code into the result register.
+  Label done;
+  __ Addu(result,
+          string,
+          Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
+  __ sll(temp, index, 1);
+  __ Addu(result, result, temp);
+  __ lhu(result, MemOperand(result, 0));
+  __ Branch(&done);
+
+  // ASCII string.
+  // Load the byte into the result register.
+  __ bind(&ascii_string);
+  __ Addu(result,
+          string,
+          Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
+  __ Addu(result, result, index);
+  __ lbu(result, MemOperand(result, 0));
+
+  __ bind(&done);
+  __ bind(deferred->exit());
+}
+
+
+void LCodeGen::DoDeferredStringCharCodeAt(LStringCharCodeAt* instr) {
+  Register string = ToRegister(instr->string());
+  Register result = ToRegister(instr->result());
+  Register scratch = scratch0();
+
+  // TODO(3095996): Get rid of this. For now, we need to make the
+  // result register contain a valid pointer because it is already
+  // contained in the register pointer map.
+  __ mov(result, zero_reg);
+
+  PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
+  __ push(string);
+  // Push the index as a smi. This is safe because of the checks in
+  // DoStringCharCodeAt above.
+  if (instr->index()->IsConstantOperand()) {
+    int const_index = ToInteger32(LConstantOperand::cast(instr->index()));
+    __ Addu(scratch, zero_reg, Operand(Smi::FromInt(const_index)));
+    __ push(scratch);
+  } else {
+    Register index = ToRegister(instr->index());
+    __ SmiTag(index);
+    __ push(index);
+  }
+  CallRuntimeFromDeferred(Runtime::kStringCharCodeAt, 2, instr);
+  if (FLAG_debug_code) {
+    __ AbortIfNotSmi(v0);
+  }
+  __ SmiUntag(v0);
+  __ StoreToSafepointRegisterSlot(v0, result);
+}
+
+
+void LCodeGen::DoStringCharFromCode(LStringCharFromCode* instr) {
+  class DeferredStringCharFromCode: public LDeferredCode {
+   public:
+    DeferredStringCharFromCode(LCodeGen* codegen, LStringCharFromCode* instr)
+        : LDeferredCode(codegen), instr_(instr) { }
+    virtual void Generate() { codegen()->DoDeferredStringCharFromCode(instr_); }
+    virtual LInstruction* instr() { return instr_; }
+   private:
+    LStringCharFromCode* instr_;
+  };
+
+  DeferredStringCharFromCode* deferred =
+      new DeferredStringCharFromCode(this, instr);
+
+  ASSERT(instr->hydrogen()->value()->representation().IsInteger32());
+  Register char_code = ToRegister(instr->char_code());
+  Register result = ToRegister(instr->result());
+  Register scratch = scratch0();
+  ASSERT(!char_code.is(result));
+
+  __ Branch(deferred->entry(), hi,
+            char_code, Operand(String::kMaxAsciiCharCode));
+  __ LoadRoot(result, Heap::kSingleCharacterStringCacheRootIndex);
+  __ sll(scratch, char_code, kPointerSizeLog2);
+  __ Addu(result, result, scratch);
+  __ lw(result, FieldMemOperand(result, FixedArray::kHeaderSize));
+  __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
+  __ Branch(deferred->entry(), eq, result, Operand(scratch));
+  __ bind(deferred->exit());
+}
+
+
+void LCodeGen::DoDeferredStringCharFromCode(LStringCharFromCode* instr) {
+  Register char_code = ToRegister(instr->char_code());
+  Register result = ToRegister(instr->result());
+
+  // TODO(3095996): Get rid of this. For now, we need to make the
+  // result register contain a valid pointer because it is already
+  // contained in the register pointer map.
+  __ mov(result, zero_reg);
+
+  PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
+  __ SmiTag(char_code);
+  __ push(char_code);
+  CallRuntimeFromDeferred(Runtime::kCharFromCode, 1, instr);
+  __ StoreToSafepointRegisterSlot(v0, result);
+}
+
+
+void LCodeGen::DoStringLength(LStringLength* instr) {
+  Register string = ToRegister(instr->InputAt(0));
+  Register result = ToRegister(instr->result());
+  __ lw(result, FieldMemOperand(string, String::kLengthOffset));
+}
+
+
+void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) {
+  LOperand* input = instr->InputAt(0);
+  ASSERT(input->IsRegister() || input->IsStackSlot());
+  LOperand* output = instr->result();
+  ASSERT(output->IsDoubleRegister());
+  FPURegister single_scratch = double_scratch0().low();
+  if (input->IsStackSlot()) {
+    Register scratch = scratch0();
+    __ lw(scratch, ToMemOperand(input));
+    __ mtc1(scratch, single_scratch);
+  } else {
+    __ mtc1(ToRegister(input), single_scratch);
+  }
+  __ cvt_d_w(ToDoubleRegister(output), single_scratch);
+}
+
+
+void LCodeGen::DoNumberTagI(LNumberTagI* instr) {
+  class DeferredNumberTagI: public LDeferredCode {
+   public:
+    DeferredNumberTagI(LCodeGen* codegen, LNumberTagI* instr)
+        : LDeferredCode(codegen), instr_(instr) { }
+    virtual void Generate() { codegen()->DoDeferredNumberTagI(instr_); }
+    virtual LInstruction* instr() { return instr_; }
+   private:
+    LNumberTagI* instr_;
+  };
+
+  LOperand* input = instr->InputAt(0);
+  ASSERT(input->IsRegister() && input->Equals(instr->result()));
+  Register reg = ToRegister(input);
+  Register overflow = scratch0();
+
+  DeferredNumberTagI* deferred = new DeferredNumberTagI(this, instr);
+  __ SmiTagCheckOverflow(reg, overflow);
+  __ BranchOnOverflow(deferred->entry(), overflow);
+  __ bind(deferred->exit());
+}
+
+
+void LCodeGen::DoDeferredNumberTagI(LNumberTagI* instr) {
+  Label slow;
+  Register reg = ToRegister(instr->InputAt(0));
+  FPURegister dbl_scratch = double_scratch0();
+
+  // Preserve the value of all registers.
+  PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
+
+  // There was overflow, so bits 30 and 31 of the original integer
+  // disagree. Try to allocate a heap number in new space and store
+  // the value in there. If that fails, call the runtime system.
+  Label done;
+  __ SmiUntag(reg);
+  __ Xor(reg, reg, Operand(0x80000000));
+  __ mtc1(reg, dbl_scratch);
+  __ cvt_d_w(dbl_scratch, dbl_scratch);
+  if (FLAG_inline_new) {
+    __ LoadRoot(t2, Heap::kHeapNumberMapRootIndex);
+    __ AllocateHeapNumber(t1, a3, t0, t2, &slow);
+    if (!reg.is(t1)) __ mov(reg, t1);
+    __ Branch(&done);
+  }
+
+  // Slow case: Call the runtime system to do the number allocation.
+  __ bind(&slow);
+
+  // TODO(3095996): Put a valid pointer value in the stack slot where the result
+  // register is stored, as this register is in the pointer map, but contains an
+  // integer value.
+  __ StoreToSafepointRegisterSlot(zero_reg, reg);
+  CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr);
+  if (!reg.is(v0)) __ mov(reg, v0);
+
+  // Done. Put the value in dbl_scratch into the value of the allocated heap
+  // number.
+  __ bind(&done);
+  __ sdc1(dbl_scratch, FieldMemOperand(reg, HeapNumber::kValueOffset));
+  __ StoreToSafepointRegisterSlot(reg, reg);
+}
+
+
+void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
+  class DeferredNumberTagD: public LDeferredCode {
+   public:
+    DeferredNumberTagD(LCodeGen* codegen, LNumberTagD* instr)
+        : LDeferredCode(codegen), instr_(instr) { }
+    virtual void Generate() { codegen()->DoDeferredNumberTagD(instr_); }
+    virtual LInstruction* instr() { return instr_; }
+   private:
+    LNumberTagD* instr_;
+  };
+
+  DoubleRegister input_reg = ToDoubleRegister(instr->InputAt(0));
+  Register scratch = scratch0();
+  Register reg = ToRegister(instr->result());
+  Register temp1 = ToRegister(instr->TempAt(0));
+  Register temp2 = ToRegister(instr->TempAt(1));
+
+  DeferredNumberTagD* deferred = new DeferredNumberTagD(this, instr);
+  if (FLAG_inline_new) {
+    __ LoadRoot(scratch, Heap::kHeapNumberMapRootIndex);
+    __ AllocateHeapNumber(reg, temp1, temp2, scratch, deferred->entry());
+  } else {
+    __ Branch(deferred->entry());
+  }
+  __ bind(deferred->exit());
+  __ sdc1(input_reg, FieldMemOperand(reg, HeapNumber::kValueOffset));
+}
+
+
+void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) {
+  // TODO(3095996): Get rid of this. For now, we need to make the
+  // result register contain a valid pointer because it is already
+  // contained in the register pointer map.
+  Register reg = ToRegister(instr->result());
+  __ mov(reg, zero_reg);
+
+  PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
+  CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr);
+  __ StoreToSafepointRegisterSlot(v0, reg);
+}
+
+
+void LCodeGen::DoSmiTag(LSmiTag* instr) {
+  LOperand* input = instr->InputAt(0);
+  ASSERT(input->IsRegister() && input->Equals(instr->result()));
+  ASSERT(!instr->hydrogen_value()->CheckFlag(HValue::kCanOverflow));
+  __ SmiTag(ToRegister(input));
+}
+
+
+void LCodeGen::DoSmiUntag(LSmiUntag* instr) {
+  Register scratch = scratch0();
+  LOperand* input = instr->InputAt(0);
+  ASSERT(input->IsRegister() && input->Equals(instr->result()));
+  if (instr->needs_check()) {
+    STATIC_ASSERT(kHeapObjectTag == 1);
+    // If the input is a HeapObject, value of scratch won't be zero.
+    __ And(scratch, ToRegister(input), Operand(kHeapObjectTag));
+    __ SmiUntag(ToRegister(input));
+    DeoptimizeIf(ne, instr->environment(), scratch, Operand(zero_reg));
+  } else {
+    __ SmiUntag(ToRegister(input));
+  }
+}
+
+
+void LCodeGen::EmitNumberUntagD(Register input_reg,
+                                DoubleRegister result_reg,
+                                bool deoptimize_on_undefined,
+                                LEnvironment* env) {
+  Register scratch = scratch0();
+
+  Label load_smi, heap_number, done;
+
+  // Smi check.
+  __ JumpIfSmi(input_reg, &load_smi);
+
+  // Heap number map check.
+  __ lw(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset));
+  __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
+  if (deoptimize_on_undefined) {
+    DeoptimizeIf(ne, env, scratch, Operand(at));
+  } else {
+    Label heap_number;
+    __ Branch(&heap_number, eq, scratch, Operand(at));
+
+    __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
+    DeoptimizeIf(ne, env, input_reg, Operand(at));
+
+    // Convert undefined to NaN.
+    __ LoadRoot(at, Heap::kNanValueRootIndex);
+    __ ldc1(result_reg, FieldMemOperand(at, HeapNumber::kValueOffset));
+    __ Branch(&done);
+
+    __ bind(&heap_number);
+  }
+  // Heap number to double register conversion.
+  __ ldc1(result_reg, FieldMemOperand(input_reg, HeapNumber::kValueOffset));
+  __ Branch(&done);
+
+  // Smi to double register conversion
+  __ bind(&load_smi);
+  __ SmiUntag(input_reg);  // Untag smi before converting to float.
+  __ mtc1(input_reg, result_reg);
+  __ cvt_d_w(result_reg, result_reg);
+  __ SmiTag(input_reg);  // Retag smi.
+  __ bind(&done);
+}
+
+
+void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
+  Register input_reg = ToRegister(instr->InputAt(0));
+  Register scratch1 = scratch0();
+  Register scratch2 = ToRegister(instr->TempAt(0));
+  DoubleRegister double_scratch = double_scratch0();
+  FPURegister single_scratch = double_scratch.low();
+
+  ASSERT(!scratch1.is(input_reg) && !scratch1.is(scratch2));
+  ASSERT(!scratch2.is(input_reg) && !scratch2.is(scratch1));
+
+  Label done;
+
+  // The input is a tagged HeapObject.
+  // Heap number map check.
+  __ lw(scratch1, FieldMemOperand(input_reg, HeapObject::kMapOffset));
+  __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
+  // This 'at' value and scratch1 map value are used for tests in both clauses
+  // of the if.
+
+  if (instr->truncating()) {
+    Register scratch3 = ToRegister(instr->TempAt(1));
+    DoubleRegister double_scratch2 = ToDoubleRegister(instr->TempAt(2));
+    ASSERT(!scratch3.is(input_reg) &&
+           !scratch3.is(scratch1) &&
+           !scratch3.is(scratch2));
+    // Performs a truncating conversion of a floating point number as used by
+    // the JS bitwise operations.
+    Label heap_number;
+    __ Branch(&heap_number, eq, scratch1, Operand(at));  // HeapNumber map?
+    // Check for undefined. Undefined is converted to zero for truncating
+    // conversions.
+    __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
+    DeoptimizeIf(ne, instr->environment(), input_reg, Operand(at));
+    ASSERT(ToRegister(instr->result()).is(input_reg));
+    __ mov(input_reg, zero_reg);
+    __ Branch(&done);
+
+    __ bind(&heap_number);
+    __ ldc1(double_scratch2,
+            FieldMemOperand(input_reg, HeapNumber::kValueOffset));
+    __ EmitECMATruncate(input_reg,
+                        double_scratch2,
+                        single_scratch,
+                        scratch1,
+                        scratch2,
+                        scratch3);
+  } else {
+    // Deoptimize if we don't have a heap number.
+    DeoptimizeIf(ne, instr->environment(), scratch1, Operand(at));
+
+    // Load the double value.
+    __ ldc1(double_scratch,
+            FieldMemOperand(input_reg, HeapNumber::kValueOffset));
+
+    Register except_flag = scratch2;
+    __ EmitFPUTruncate(kRoundToZero,
+                       single_scratch,
+                       double_scratch,
+                       scratch1,
+                       except_flag,
+                       kCheckForInexactConversion);
+
+    // Deopt if the operation did not succeed.
+    DeoptimizeIf(ne, instr->environment(), except_flag, Operand(zero_reg));
+
+    // Load the result.
+    __ mfc1(input_reg, single_scratch);
+
+    if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+      __ Branch(&done, ne, input_reg, Operand(zero_reg));
+
+      __ mfc1(scratch1, double_scratch.high());
+      __ And(scratch1, scratch1, Operand(HeapNumber::kSignMask));
+      DeoptimizeIf(ne, instr->environment(), scratch1, Operand(zero_reg));
+    }
+  }
+  __ bind(&done);
+}
+
+
+void LCodeGen::DoTaggedToI(LTaggedToI* instr) {
+  class DeferredTaggedToI: public LDeferredCode {
+   public:
+    DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr)
+        : LDeferredCode(codegen), instr_(instr) { }
+    virtual void Generate() { codegen()->DoDeferredTaggedToI(instr_); }
+    virtual LInstruction* instr() { return instr_; }
+   private:
+    LTaggedToI* instr_;
+  };
+
+  LOperand* input = instr->InputAt(0);
+  ASSERT(input->IsRegister());
+  ASSERT(input->Equals(instr->result()));
+
+  Register input_reg = ToRegister(input);
+
+  DeferredTaggedToI* deferred = new DeferredTaggedToI(this, instr);
+
+  // Let the deferred code handle the HeapObject case.
+  __ JumpIfNotSmi(input_reg, deferred->entry());
+
+  // Smi to int32 conversion.
+  __ SmiUntag(input_reg);
+  __ bind(deferred->exit());
+}
+
+
+void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) {
+  LOperand* input = instr->InputAt(0);
+  ASSERT(input->IsRegister());
+  LOperand* result = instr->result();
+  ASSERT(result->IsDoubleRegister());
+
+  Register input_reg = ToRegister(input);
+  DoubleRegister result_reg = ToDoubleRegister(result);
+
+  EmitNumberUntagD(input_reg, result_reg,
+                   instr->hydrogen()->deoptimize_on_undefined(),
+                   instr->environment());
+}
+
+
+void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
+  Register result_reg = ToRegister(instr->result());
+  Register scratch1 = scratch0();
+  Register scratch2 = ToRegister(instr->TempAt(0));
+  DoubleRegister double_input = ToDoubleRegister(instr->InputAt(0));
+  DoubleRegister double_scratch = double_scratch0();
+  FPURegister single_scratch = double_scratch0().low();
+
+  if (instr->truncating()) {
+    Register scratch3 = ToRegister(instr->TempAt(1));
+    __ EmitECMATruncate(result_reg,
+                        double_input,
+                        single_scratch,
+                        scratch1,
+                        scratch2,
+                        scratch3);
+  } else {
+    Register except_flag = scratch2;
+
+    __ EmitFPUTruncate(kRoundToMinusInf,
+                       single_scratch,
+                       double_input,
+                       scratch1,
+                       except_flag,
+                       kCheckForInexactConversion);
+
+    // Deopt if the operation did not succeed (except_flag != 0).
+    DeoptimizeIf(ne, instr->environment(), except_flag, Operand(zero_reg));
+
+    // Load the result.
+    __ mfc1(result_reg, single_scratch);
+  }
+}
+
+
+void LCodeGen::DoCheckSmi(LCheckSmi* instr) {
+  LOperand* input = instr->InputAt(0);
+  __ And(at, ToRegister(input), Operand(kSmiTagMask));
+  DeoptimizeIf(ne, instr->environment(), at, Operand(zero_reg));
+}
+
+
+void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) {
+  LOperand* input = instr->InputAt(0);
+  __ And(at, ToRegister(input), Operand(kSmiTagMask));
+  DeoptimizeIf(eq, instr->environment(), at, Operand(zero_reg));
+}
+
+
+void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
+  Register input = ToRegister(instr->InputAt(0));
+  Register scratch = scratch0();
+
+  __ GetObjectType(input, scratch, scratch);
+
+  if (instr->hydrogen()->is_interval_check()) {
+    InstanceType first;
+    InstanceType last;
+    instr->hydrogen()->GetCheckInterval(&first, &last);
+
+    // If there is only one type in the interval check for equality.
+    if (first == last) {
+      DeoptimizeIf(ne, instr->environment(), scratch, Operand(first));
+    } else {
+      DeoptimizeIf(lo, instr->environment(), scratch, Operand(first));
+      // Omit check for the last type.
+      if (last != LAST_TYPE) {
+        DeoptimizeIf(hi, instr->environment(), scratch, Operand(last));
+      }
+    }
+  } else {
+    uint8_t mask;
+    uint8_t tag;
+    instr->hydrogen()->GetCheckMaskAndTag(&mask, &tag);
+
+    if (IsPowerOf2(mask)) {
+      ASSERT(tag == 0 || IsPowerOf2(tag));
+      __ And(at, scratch, mask);
+      DeoptimizeIf(tag == 0 ? ne : eq, instr->environment(),
+          at, Operand(zero_reg));
+    } else {
+      __ And(scratch, scratch, Operand(mask));
+      DeoptimizeIf(ne, instr->environment(), scratch, Operand(tag));
+    }
+  }
+}
+
+
+void LCodeGen::DoCheckFunction(LCheckFunction* instr) {
+  ASSERT(instr->InputAt(0)->IsRegister());
+  Register reg = ToRegister(instr->InputAt(0));
+  DeoptimizeIf(ne, instr->environment(), reg,
+               Operand(instr->hydrogen()->target()));
+}
+
+
+void LCodeGen::DoCheckMap(LCheckMap* instr) {
+  Register scratch = scratch0();
+  LOperand* input = instr->InputAt(0);
+  ASSERT(input->IsRegister());
+  Register reg = ToRegister(input);
+  __ lw(scratch, FieldMemOperand(reg, HeapObject::kMapOffset));
+  DeoptimizeIf(ne,
+               instr->environment(),
+               scratch,
+               Operand(instr->hydrogen()->map()));
+}
+
+
+void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) {
+  DoubleRegister value_reg = ToDoubleRegister(instr->unclamped());
+  Register result_reg = ToRegister(instr->result());
+  DoubleRegister temp_reg = ToDoubleRegister(instr->TempAt(0));
+  __ ClampDoubleToUint8(result_reg, value_reg, temp_reg);
+}
+
+
+void LCodeGen::DoClampIToUint8(LClampIToUint8* instr) {
+  Register unclamped_reg = ToRegister(instr->unclamped());
+  Register result_reg = ToRegister(instr->result());
+  __ ClampUint8(result_reg, unclamped_reg);
+}
+
+
+void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
+  Register scratch = scratch0();
+  Register input_reg = ToRegister(instr->unclamped());
+  Register result_reg = ToRegister(instr->result());
+  DoubleRegister temp_reg = ToDoubleRegister(instr->TempAt(0));
+  Label is_smi, done, heap_number;
+
+  // Both smi and heap number cases are handled.
+  __ JumpIfSmi(input_reg, &is_smi);
+
+  // Check for heap number
+  __ lw(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset));
+  __ Branch(&heap_number, eq, scratch, Operand(factory()->heap_number_map()));
+
+  // Check for undefined. Undefined is converted to zero for clamping
+  // conversions.
+  DeoptimizeIf(ne, instr->environment(), input_reg,
+               Operand(factory()->undefined_value()));
+  __ mov(result_reg, zero_reg);
+  __ jmp(&done);
+
+  // Heap number
+  __ bind(&heap_number);
+  __ ldc1(double_scratch0(), FieldMemOperand(input_reg,
+                                             HeapNumber::kValueOffset));
+  __ ClampDoubleToUint8(result_reg, double_scratch0(), temp_reg);
+  __ jmp(&done);
+
+  // smi
+  __ bind(&is_smi);
+  __ SmiUntag(scratch, input_reg);
+  __ ClampUint8(result_reg, scratch);
+
+  __ bind(&done);
+}
+
+
+void LCodeGen::LoadHeapObject(Register result,
+                              Handle<HeapObject> object) {
+  if (heap()->InNewSpace(*object)) {
+    Handle<JSGlobalPropertyCell> cell =
+        factory()->NewJSGlobalPropertyCell(object);
+    __ li(result, Operand(cell));
+    __ lw(result, FieldMemOperand(result, JSGlobalPropertyCell::kValueOffset));
+  } else {
+    __ li(result, Operand(object));
+  }
+}
+
+
+void LCodeGen::DoCheckPrototypeMaps(LCheckPrototypeMaps* instr) {
+  Register temp1 = ToRegister(instr->TempAt(0));
+  Register temp2 = ToRegister(instr->TempAt(1));
+
+  Handle<JSObject> holder = instr->holder();
+  Handle<JSObject> current_prototype = instr->prototype();
+
+  // Load prototype object.
+  LoadHeapObject(temp1, current_prototype);
+
+  // Check prototype maps up to the holder.
+  while (!current_prototype.is_identical_to(holder)) {
+    __ lw(temp2, FieldMemOperand(temp1, HeapObject::kMapOffset));
+    DeoptimizeIf(ne,
+                 instr->environment(),
+                 temp2,
+                 Operand(Handle<Map>(current_prototype->map())));
+    current_prototype =
+        Handle<JSObject>(JSObject::cast(current_prototype->GetPrototype()));
+    // Load next prototype object.
+    LoadHeapObject(temp1, current_prototype);
+  }
+
+  // Check the holder map.
+  __ lw(temp2, FieldMemOperand(temp1, HeapObject::kMapOffset));
+  DeoptimizeIf(ne,
+               instr->environment(),
+               temp2,
+               Operand(Handle<Map>(current_prototype->map())));
+}
+
+
+void LCodeGen::DoArrayLiteral(LArrayLiteral* instr) {
+  Handle<FixedArray> constant_elements = instr->hydrogen()->constant_elements();
+  ASSERT_EQ(2, constant_elements->length());
+  ElementsKind constant_elements_kind =
+      static_cast<ElementsKind>(Smi::cast(constant_elements->get(0))->value());
+
+  __ lw(a3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+  __ lw(a3, FieldMemOperand(a3, JSFunction::kLiteralsOffset));
+  __ li(a2, Operand(Smi::FromInt(instr->hydrogen()->literal_index())));
+  __ li(a1, Operand(constant_elements));
+  __ Push(a3, a2, a1);
+
+  // Pick the right runtime function or stub to call.
+  int length = instr->hydrogen()->length();
+  if (instr->hydrogen()->IsCopyOnWrite()) {
+    ASSERT(instr->hydrogen()->depth() == 1);
+    FastCloneShallowArrayStub::Mode mode =
+        FastCloneShallowArrayStub::COPY_ON_WRITE_ELEMENTS;
+    FastCloneShallowArrayStub stub(mode, length);
+    CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+  } else if (instr->hydrogen()->depth() > 1) {
+    CallRuntime(Runtime::kCreateArrayLiteral, 3, instr);
+  } else if (length > FastCloneShallowArrayStub::kMaximumClonedLength) {
+    CallRuntime(Runtime::kCreateArrayLiteralShallow, 3, instr);
+  } else {
+    FastCloneShallowArrayStub::Mode mode =
+        constant_elements_kind == FAST_DOUBLE_ELEMENTS
+        ? FastCloneShallowArrayStub::CLONE_DOUBLE_ELEMENTS
+        : FastCloneShallowArrayStub::CLONE_ELEMENTS;
+    FastCloneShallowArrayStub stub(mode, length);
+    CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+  }
+}
+
+
+void LCodeGen::DoObjectLiteral(LObjectLiteral* instr) {
+  ASSERT(ToRegister(instr->result()).is(v0));
+  __ lw(t0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+  __ lw(t0, FieldMemOperand(t0, JSFunction::kLiteralsOffset));
+  __ li(a3, Operand(Smi::FromInt(instr->hydrogen()->literal_index())));
+  __ li(a2, Operand(instr->hydrogen()->constant_properties()));
+  __ li(a1, Operand(Smi::FromInt(instr->hydrogen()->fast_elements() ? 1 : 0)));
+  __ Push(t0, a3, a2, a1);
+
+  // Pick the right runtime function to call.
+  if (instr->hydrogen()->depth() > 1) {
+    CallRuntime(Runtime::kCreateObjectLiteral, 4, instr);
+  } else {
+    CallRuntime(Runtime::kCreateObjectLiteralShallow, 4, instr);
+  }
+}
+
+
+void LCodeGen::DoToFastProperties(LToFastProperties* instr) {
+  ASSERT(ToRegister(instr->InputAt(0)).is(a0));
+  ASSERT(ToRegister(instr->result()).is(v0));
+  __ push(a0);
+  CallRuntime(Runtime::kToFastProperties, 1, instr);
+}
+
+
+void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) {
+  Label materialized;
+  // Registers will be used as follows:
+  // a3 = JS function.
+  // t3 = literals array.
+  // a1 = regexp literal.
+  // a0 = regexp literal clone.
+  // a2 and t0-t2 are used as temporaries.
+  __ lw(a3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+  __ lw(t3, FieldMemOperand(a3, JSFunction::kLiteralsOffset));
+  int literal_offset = FixedArray::kHeaderSize +
+      instr->hydrogen()->literal_index() * kPointerSize;
+  __ lw(a1, FieldMemOperand(t3, literal_offset));
+  __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
+  __ Branch(&materialized, ne, a1, Operand(at));
+
+  // Create regexp literal using runtime function
+  // Result will be in v0.
+  __ li(t2, Operand(Smi::FromInt(instr->hydrogen()->literal_index())));
+  __ li(t1, Operand(instr->hydrogen()->pattern()));
+  __ li(t0, Operand(instr->hydrogen()->flags()));
+  __ Push(t3, t2, t1, t0);
+  CallRuntime(Runtime::kMaterializeRegExpLiteral, 4, instr);
+  __ mov(a1, v0);
+
+  __ bind(&materialized);
+  int size = JSRegExp::kSize + JSRegExp::kInObjectFieldCount * kPointerSize;
+  Label allocated, runtime_allocate;
+
+  __ AllocateInNewSpace(size, v0, a2, a3, &runtime_allocate, TAG_OBJECT);
+  __ jmp(&allocated);
+
+  __ bind(&runtime_allocate);
+  __ li(a0, Operand(Smi::FromInt(size)));
+  __ Push(a1, a0);
+  CallRuntime(Runtime::kAllocateInNewSpace, 1, instr);
+  __ pop(a1);
+
+  __ bind(&allocated);
+  // Copy the content into the newly allocated memory.
+  // (Unroll copy loop once for better throughput).
+  for (int i = 0; i < size - kPointerSize; i += 2 * kPointerSize) {
+    __ lw(a3, FieldMemOperand(a1, i));
+    __ lw(a2, FieldMemOperand(a1, i + kPointerSize));
+    __ sw(a3, FieldMemOperand(v0, i));
+    __ sw(a2, FieldMemOperand(v0, i + kPointerSize));
+  }
+  if ((size % (2 * kPointerSize)) != 0) {
+    __ lw(a3, FieldMemOperand(a1, size - kPointerSize));
+    __ sw(a3, FieldMemOperand(v0, size - kPointerSize));
+  }
+}
+
+
+void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) {
+  // Use the fast case closure allocation code that allocates in new
+  // space for nested functions that don't need literals cloning.
+  Handle<SharedFunctionInfo> shared_info = instr->shared_info();
+  bool pretenure = instr->hydrogen()->pretenure();
+  if (!pretenure && shared_info->num_literals() == 0) {
+    FastNewClosureStub stub(shared_info->strict_mode_flag());
+    __ li(a1, Operand(shared_info));
+    __ push(a1);
+    CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+  } else {
+    __ li(a2, Operand(shared_info));
+    __ li(a1, Operand(pretenure
+                       ? factory()->true_value()
+                       : factory()->false_value()));
+    __ Push(cp, a2, a1);
+    CallRuntime(Runtime::kNewClosure, 3, instr);
+  }
+}
+
+
+void LCodeGen::DoTypeof(LTypeof* instr) {
+  ASSERT(ToRegister(instr->result()).is(v0));
+  Register input = ToRegister(instr->InputAt(0));
+  __ push(input);
+  CallRuntime(Runtime::kTypeof, 1, instr);
+}
+
+
+void LCodeGen::DoTypeofIsAndBranch(LTypeofIsAndBranch* instr) {
+  Register input = ToRegister(instr->InputAt(0));
+  int true_block = chunk_->LookupDestination(instr->true_block_id());
+  int false_block = chunk_->LookupDestination(instr->false_block_id());
+  Label* true_label = chunk_->GetAssemblyLabel(true_block);
+  Label* false_label = chunk_->GetAssemblyLabel(false_block);
+
+  Register cmp1 = no_reg;
+  Operand cmp2 = Operand(no_reg);
+
+  Condition final_branch_condition = EmitTypeofIs(true_label,
+                                                  false_label,
+                                                  input,
+                                                  instr->type_literal(),
+                                                  cmp1,
+                                                  cmp2);
+
+  ASSERT(cmp1.is_valid());
+  ASSERT(!cmp2.is_reg() || cmp2.rm().is_valid());
+
+  if (final_branch_condition != kNoCondition) {
+    EmitBranch(true_block, false_block, final_branch_condition, cmp1, cmp2);
+  }
+}
+
+
+Condition LCodeGen::EmitTypeofIs(Label* true_label,
+                                 Label* false_label,
+                                 Register input,
+                                 Handle<String> type_name,
+                                 Register& cmp1,
+                                 Operand& cmp2) {
+  // This function utilizes the delay slot heavily. This is used to load
+  // values that are always usable without depending on the type of the input
+  // register.
+  Condition final_branch_condition = kNoCondition;
+  Register scratch = scratch0();
+  if (type_name->Equals(heap()->number_symbol())) {
+    __ JumpIfSmi(input, true_label);
+    __ lw(input, FieldMemOperand(input, HeapObject::kMapOffset));
+    __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
+    cmp1 = input;
+    cmp2 = Operand(at);
+    final_branch_condition = eq;
+
+  } else if (type_name->Equals(heap()->string_symbol())) {
+    __ JumpIfSmi(input, false_label);
+    __ GetObjectType(input, input, scratch);
+    __ Branch(USE_DELAY_SLOT, false_label,
+              ge, scratch, Operand(FIRST_NONSTRING_TYPE));
+    // input is an object so we can load the BitFieldOffset even if we take the
+    // other branch.
+    __ lbu(at, FieldMemOperand(input, Map::kBitFieldOffset));
+    __ And(at, at, 1 << Map::kIsUndetectable);
+    cmp1 = at;
+    cmp2 = Operand(zero_reg);
+    final_branch_condition = eq;
+
+  } else if (type_name->Equals(heap()->boolean_symbol())) {
+    __ LoadRoot(at, Heap::kTrueValueRootIndex);
+    __ Branch(USE_DELAY_SLOT, true_label, eq, at, Operand(input));
+    __ LoadRoot(at, Heap::kFalseValueRootIndex);
+    cmp1 = at;
+    cmp2 = Operand(input);
+    final_branch_condition = eq;
+
+  } else if (FLAG_harmony_typeof && type_name->Equals(heap()->null_symbol())) {
+    __ LoadRoot(at, Heap::kNullValueRootIndex);
+    cmp1 = at;
+    cmp2 = Operand(input);
+    final_branch_condition = eq;
+
+  } else if (type_name->Equals(heap()->undefined_symbol())) {
+    __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
+    __ Branch(USE_DELAY_SLOT, true_label, eq, at, Operand(input));
+    // The first instruction of JumpIfSmi is an And - it is safe in the delay
+    // slot.
+    __ JumpIfSmi(input, false_label);
+    // Check for undetectable objects => true.
+    __ lw(input, FieldMemOperand(input, HeapObject::kMapOffset));
+    __ lbu(at, FieldMemOperand(input, Map::kBitFieldOffset));
+    __ And(at, at, 1 << Map::kIsUndetectable);
+    cmp1 = at;
+    cmp2 = Operand(zero_reg);
+    final_branch_condition = ne;
+
+  } else if (type_name->Equals(heap()->function_symbol())) {
+    STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
+    __ JumpIfSmi(input, false_label);
+    __ GetObjectType(input, scratch, input);
+    __ Branch(true_label, eq, input, Operand(JS_FUNCTION_TYPE));
+    cmp1 = input;
+    cmp2 = Operand(JS_FUNCTION_PROXY_TYPE);
+    final_branch_condition = eq;
+
+  } else if (type_name->Equals(heap()->object_symbol())) {
+    __ JumpIfSmi(input, false_label);
+    if (!FLAG_harmony_typeof) {
+      __ LoadRoot(at, Heap::kNullValueRootIndex);
+      __ Branch(USE_DELAY_SLOT, true_label, eq, at, Operand(input));
+    }
+    // input is an object, it is safe to use GetObjectType in the delay slot.
+    __ GetObjectType(input, input, scratch);
+    __ Branch(USE_DELAY_SLOT, false_label,
+              lt, scratch, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
+    // Still an object, so the InstanceType can be loaded.
+    __ lbu(scratch, FieldMemOperand(input, Map::kInstanceTypeOffset));
+    __ Branch(USE_DELAY_SLOT, false_label,
+              gt, scratch, Operand(LAST_NONCALLABLE_SPEC_OBJECT_TYPE));
+    // Still an object, so the BitField can be loaded.
+    // Check for undetectable objects => false.
+    __ lbu(at, FieldMemOperand(input, Map::kBitFieldOffset));
+    __ And(at, at, 1 << Map::kIsUndetectable);
+    cmp1 = at;
+    cmp2 = Operand(zero_reg);
+    final_branch_condition = eq;
+
+  } else {
+    cmp1 = at;
+    cmp2 = Operand(zero_reg);  // Set to valid regs, to avoid caller assertion.
+    __ Branch(false_label);
+  }
+
+  return final_branch_condition;
+}
+
+
+void LCodeGen::DoIsConstructCallAndBranch(LIsConstructCallAndBranch* instr) {
+  Register temp1 = ToRegister(instr->TempAt(0));
+  int true_block = chunk_->LookupDestination(instr->true_block_id());
+  int false_block = chunk_->LookupDestination(instr->false_block_id());
+
+  EmitIsConstructCall(temp1, scratch0());
+
+  EmitBranch(true_block, false_block, eq, temp1,
+             Operand(Smi::FromInt(StackFrame::CONSTRUCT)));
+}
+
+
+void LCodeGen::EmitIsConstructCall(Register temp1, Register temp2) {
+  ASSERT(!temp1.is(temp2));
+  // Get the frame pointer for the calling frame.
+  __ lw(temp1, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+
+  // Skip the arguments adaptor frame if it exists.
+  Label check_frame_marker;
+  __ lw(temp2, MemOperand(temp1, StandardFrameConstants::kContextOffset));
+  __ Branch(&check_frame_marker, ne, temp2,
+            Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+  __ lw(temp1, MemOperand(temp1, StandardFrameConstants::kCallerFPOffset));
+
+  // Check the marker in the calling frame.
+  __ bind(&check_frame_marker);
+  __ lw(temp1, MemOperand(temp1, StandardFrameConstants::kMarkerOffset));
+}
+
+
+void LCodeGen::DoLazyBailout(LLazyBailout* instr) {
+  // No code for lazy bailout instruction. Used to capture environment after a
+  // call for populating the safepoint data with deoptimization data.
+}
+
+
+void LCodeGen::DoDeoptimize(LDeoptimize* instr) {
+  DeoptimizeIf(al, instr->environment(), zero_reg, Operand(zero_reg));
+}
+
+
+void LCodeGen::DoDeleteProperty(LDeleteProperty* instr) {
+  Register object = ToRegister(instr->object());
+  Register key = ToRegister(instr->key());
+  Register strict = scratch0();
+  __ li(strict, Operand(Smi::FromInt(strict_mode_flag())));
+  __ Push(object, key, strict);
+  ASSERT(instr->HasPointerMap() && instr->HasDeoptimizationEnvironment());
+  LPointerMap* pointers = instr->pointer_map();
+  LEnvironment* env = instr->deoptimization_environment();
+  RecordPosition(pointers->position());
+  RegisterEnvironmentForDeoptimization(env);
+  SafepointGenerator safepoint_generator(this,
+                                         pointers,
+                                         env->deoptimization_index());
+  __ InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION, safepoint_generator);
+}
+
+
+void LCodeGen::DoIn(LIn* instr) {
+  Register obj = ToRegister(instr->object());
+  Register key = ToRegister(instr->key());
+  __ Push(key, obj);
+  ASSERT(instr->HasPointerMap() && instr->HasDeoptimizationEnvironment());
+  LPointerMap* pointers = instr->pointer_map();
+  LEnvironment* env = instr->deoptimization_environment();
+  RecordPosition(pointers->position());
+  RegisterEnvironmentForDeoptimization(env);
+  SafepointGenerator safepoint_generator(this,
+                                         pointers,
+                                         env->deoptimization_index());
+  __ InvokeBuiltin(Builtins::IN, CALL_FUNCTION, safepoint_generator);
+}
+
+
+void LCodeGen::DoDeferredStackCheck(LStackCheck* instr) {
+  {
+    PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
+    __ CallRuntimeSaveDoubles(Runtime::kStackGuard);
+    RegisterLazyDeoptimization(
+        instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
+  }
+
+  // The gap code includes the restoring of the safepoint registers.
+  int pc = masm()->pc_offset();
+  safepoints_.SetPcAfterGap(pc);
+}
+
+
+void LCodeGen::DoStackCheck(LStackCheck* instr) {
+  class DeferredStackCheck: public LDeferredCode {
+   public:
+    DeferredStackCheck(LCodeGen* codegen, LStackCheck* instr)
+        : LDeferredCode(codegen), instr_(instr) { }
+    virtual void Generate() { codegen()->DoDeferredStackCheck(instr_); }
+    virtual LInstruction* instr() { return instr_; }
+   private:
+    LStackCheck* instr_;
+  };
+
+  if (instr->hydrogen()->is_function_entry()) {
+    // Perform stack overflow check.
+    Label done;
+    __ LoadRoot(at, Heap::kStackLimitRootIndex);
+    __ Branch(&done, hs, sp, Operand(at));
+    StackCheckStub stub;
+    CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+    __ bind(&done);
+  } else {
+    ASSERT(instr->hydrogen()->is_backwards_branch());
+    // Perform stack overflow check if this goto needs it before jumping.
+    DeferredStackCheck* deferred_stack_check =
+        new DeferredStackCheck(this, instr);
+    __ LoadRoot(at, Heap::kStackLimitRootIndex);
+    __ Branch(deferred_stack_check->entry(), lo, sp, Operand(at));
+    __ bind(instr->done_label());
+    deferred_stack_check->SetExit(instr->done_label());
+  }
+}
+
+
+void LCodeGen::DoOsrEntry(LOsrEntry* instr) {
+  // This is a pseudo-instruction that ensures that the environment here is
+  // properly registered for deoptimization and records the assembler's PC
+  // offset.
+  LEnvironment* environment = instr->environment();
+  environment->SetSpilledRegisters(instr->SpilledRegisterArray(),
+                                   instr->SpilledDoubleRegisterArray());
+
+  // If the environment were already registered, we would have no way of
+  // backpatching it with the spill slot operands.
+  ASSERT(!environment->HasBeenRegistered());
+  RegisterEnvironmentForDeoptimization(environment);
+  ASSERT(osr_pc_offset_ == -1);
+  osr_pc_offset_ = masm()->pc_offset();
+}
+
+
+#undef __
+
+} }  // namespace v8::internal
diff --git a/src/mips/lithium-codegen-mips.h b/src/mips/lithium-codegen-mips.h
index 2aec684..f9c013c 100644
--- a/src/mips/lithium-codegen-mips.h
+++ b/src/mips/lithium-codegen-mips.h
@@ -29,35 +29,398 @@
 #define V8_MIPS_LITHIUM_CODEGEN_MIPS_H_
 
 #include "mips/lithium-mips.h"
-
+#include "mips/lithium-gap-resolver-mips.h"
 #include "deoptimizer.h"
 #include "safepoint-table.h"
 #include "scopes.h"
 
-// Note: this file was taken from the X64 version. ARM has a partially working
-// lithium implementation, but for now it is not ported to mips.
-
 namespace v8 {
 namespace internal {
 
 // Forward declarations.
 class LDeferredCode;
+class SafepointGenerator;
 
 class LCodeGen BASE_EMBEDDED {
  public:
-  LCodeGen(LChunk* chunk, MacroAssembler* assembler, CompilationInfo* info) { }
+  LCodeGen(LChunk* chunk, MacroAssembler* assembler, CompilationInfo* info)
+      : chunk_(chunk),
+        masm_(assembler),
+        info_(info),
+        current_block_(-1),
+        current_instruction_(-1),
+        instructions_(chunk->instructions()),
+        deoptimizations_(4),
+        deopt_jump_table_(4),
+        deoptimization_literals_(8),
+        inlined_function_count_(0),
+        scope_(info->scope()),
+        status_(UNUSED),
+        deferred_(8),
+        osr_pc_offset_(-1),
+        resolver_(this),
+        expected_safepoint_kind_(Safepoint::kSimple) {
+    PopulateDeoptimizationLiteralsWithInlinedFunctions();
+  }
+
+
+  // Simple accessors.
+  MacroAssembler* masm() const { return masm_; }
+  CompilationInfo* info() const { return info_; }
+  Isolate* isolate() const { return info_->isolate(); }
+  Factory* factory() const { return isolate()->factory(); }
+  Heap* heap() const { return isolate()->heap(); }
+
+  // Support for converting LOperands to assembler types.
+  // LOperand must be a register.
+  Register ToRegister(LOperand* op) const;
+
+  // LOperand is loaded into scratch, unless already a register.
+  Register EmitLoadRegister(LOperand* op, Register scratch);
+
+  // LOperand must be a double register.
+  DoubleRegister ToDoubleRegister(LOperand* op) const;
+
+  // LOperand is loaded into dbl_scratch, unless already a double register.
+  DoubleRegister EmitLoadDoubleRegister(LOperand* op,
+                                        FloatRegister flt_scratch,
+                                        DoubleRegister dbl_scratch);
+  int ToInteger32(LConstantOperand* op) const;
+  double ToDouble(LConstantOperand* op) const;
+  Operand ToOperand(LOperand* op);
+  MemOperand ToMemOperand(LOperand* op) const;
+  // Returns a MemOperand pointing to the high word of a DoubleStackSlot.
+  MemOperand ToHighMemOperand(LOperand* op) const;
 
   // Try to generate code for the entire chunk, but it may fail if the
   // chunk contains constructs we cannot handle. Returns true if the
   // code generation attempt succeeded.
-  bool GenerateCode() {
-    UNIMPLEMENTED();
-    return false;
-  }
+  bool GenerateCode();
 
   // Finish the code by setting stack height, safepoint, and bailout
   // information on it.
-  void FinishCode(Handle<Code> code) { UNIMPLEMENTED(); }
+  void FinishCode(Handle<Code> code);
+
+  // Deferred code support.
+  template<int T>
+  void DoDeferredBinaryOpStub(LTemplateInstruction<1, 2, T>* instr,
+                              Token::Value op);
+  void DoDeferredNumberTagD(LNumberTagD* instr);
+  void DoDeferredNumberTagI(LNumberTagI* instr);
+  void DoDeferredTaggedToI(LTaggedToI* instr);
+  void DoDeferredMathAbsTaggedHeapNumber(LUnaryMathOperation* instr);
+  void DoDeferredStackCheck(LStackCheck* instr);
+  void DoDeferredStringCharCodeAt(LStringCharCodeAt* instr);
+  void DoDeferredStringCharFromCode(LStringCharFromCode* instr);
+  void DoDeferredLInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
+                                        Label* map_check);
+
+  // Parallel move support.
+  void DoParallelMove(LParallelMove* move);
+  void DoGap(LGap* instr);
+
+  // Emit frame translation commands for an environment.
+  void WriteTranslation(LEnvironment* environment, Translation* translation);
+
+  // Declare methods that deal with the individual node types.
+#define DECLARE_DO(type) void Do##type(L##type* node);
+  LITHIUM_CONCRETE_INSTRUCTION_LIST(DECLARE_DO)
+#undef DECLARE_DO
+
+ private:
+  enum Status {
+    UNUSED,
+    GENERATING,
+    DONE,
+    ABORTED
+  };
+
+  bool is_unused() const { return status_ == UNUSED; }
+  bool is_generating() const { return status_ == GENERATING; }
+  bool is_done() const { return status_ == DONE; }
+  bool is_aborted() const { return status_ == ABORTED; }
+
+  StrictModeFlag strict_mode_flag() const {
+    return info()->strict_mode_flag();
+  }
+
+  LChunk* chunk() const { return chunk_; }
+  Scope* scope() const { return scope_; }
+  HGraph* graph() const { return chunk_->graph(); }
+
+  Register scratch0() { return lithiumScratchReg; }
+  Register scratch1() { return lithiumScratchReg2; }
+  DoubleRegister double_scratch0() { return lithiumScratchDouble; }
+
+  int GetNextEmittedBlock(int block);
+  LInstruction* GetNextInstruction();
+
+  void EmitClassOfTest(Label* if_true,
+                       Label* if_false,
+                       Handle<String> class_name,
+                       Register input,
+                       Register temporary,
+                       Register temporary2);
+
+  int GetStackSlotCount() const { return chunk()->spill_slot_count(); }
+  int GetParameterCount() const { return scope()->num_parameters(); }
+
+  void Abort(const char* format, ...);
+  void Comment(const char* format, ...);
+
+  void AddDeferredCode(LDeferredCode* code) { deferred_.Add(code); }
+
+  // Code generation passes.  Returns true if code generation should
+  // continue.
+  bool GeneratePrologue();
+  bool GenerateBody();
+  bool GenerateDeferredCode();
+  bool GenerateDeoptJumpTable();
+  bool GenerateSafepointTable();
+
+  enum SafepointMode {
+    RECORD_SIMPLE_SAFEPOINT,
+    RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS
+  };
+
+  void CallCode(Handle<Code> code,
+                RelocInfo::Mode mode,
+                LInstruction* instr);
+
+  void CallCodeGeneric(Handle<Code> code,
+                       RelocInfo::Mode mode,
+                       LInstruction* instr,
+                       SafepointMode safepoint_mode);
+
+  void CallRuntime(const Runtime::Function* function,
+                   int num_arguments,
+                   LInstruction* instr);
+
+  void CallRuntime(Runtime::FunctionId id,
+                   int num_arguments,
+                   LInstruction* instr) {
+    const Runtime::Function* function = Runtime::FunctionForId(id);
+    CallRuntime(function, num_arguments, instr);
+  }
+
+  void CallRuntimeFromDeferred(Runtime::FunctionId id,
+                               int argc,
+                               LInstruction* instr);
+
+  // Generate a direct call to a known function.  Expects the function
+  // to be in a1.
+  void CallKnownFunction(Handle<JSFunction> function,
+                         int arity,
+                         LInstruction* instr,
+                         CallKind call_kind);
+
+  void LoadHeapObject(Register result, Handle<HeapObject> object);
+
+  void RegisterLazyDeoptimization(LInstruction* instr,
+                                  SafepointMode safepoint_mode);
+
+  void RegisterEnvironmentForDeoptimization(LEnvironment* environment);
+  void DeoptimizeIf(Condition cc,
+                    LEnvironment* environment,
+                    Register src1,
+                    const Operand& src2);
+
+  void AddToTranslation(Translation* translation,
+                        LOperand* op,
+                        bool is_tagged);
+  void PopulateDeoptimizationData(Handle<Code> code);
+  int DefineDeoptimizationLiteral(Handle<Object> literal);
+
+  void PopulateDeoptimizationLiteralsWithInlinedFunctions();
+
+  Register ToRegister(int index) const;
+  DoubleRegister ToDoubleRegister(int index) const;
+
+  // Specific math operations - used from DoUnaryMathOperation.
+  void EmitIntegerMathAbs(LUnaryMathOperation* instr);
+  void DoMathAbs(LUnaryMathOperation* instr);
+  void DoMathFloor(LUnaryMathOperation* instr);
+  void DoMathRound(LUnaryMathOperation* instr);
+  void DoMathSqrt(LUnaryMathOperation* instr);
+  void DoMathPowHalf(LUnaryMathOperation* instr);
+  void DoMathLog(LUnaryMathOperation* instr);
+  void DoMathCos(LUnaryMathOperation* instr);
+  void DoMathSin(LUnaryMathOperation* instr);
+
+  // Support for recording safepoint and position information.
+  void RecordSafepoint(LPointerMap* pointers,
+                       Safepoint::Kind kind,
+                       int arguments,
+                       int deoptimization_index);
+  void RecordSafepoint(LPointerMap* pointers, int deoptimization_index);
+  void RecordSafepoint(int deoptimization_index);
+  void RecordSafepointWithRegisters(LPointerMap* pointers,
+                                    int arguments,
+                                    int deoptimization_index);
+  void RecordSafepointWithRegistersAndDoubles(LPointerMap* pointers,
+                                              int arguments,
+                                              int deoptimization_index);
+  void RecordPosition(int position);
+  int LastSafepointEnd() {
+    return static_cast<int>(safepoints_.GetPcAfterGap());
+  }
+
+  static Condition TokenToCondition(Token::Value op, bool is_unsigned);
+  void EmitGoto(int block);
+  void EmitBranch(int left_block,
+                  int right_block,
+                  Condition cc,
+                  Register src1,
+                  const Operand& src2);
+  void EmitBranchF(int left_block,
+                   int right_block,
+                   Condition cc,
+                   FPURegister src1,
+                   FPURegister src2);
+  void EmitCmpI(LOperand* left, LOperand* right);
+  void EmitNumberUntagD(Register input,
+                        DoubleRegister result,
+                        bool deoptimize_on_undefined,
+                        LEnvironment* env);
+
+  // Emits optimized code for typeof x == "y".  Modifies input register.
+  // Returns the condition on which a final split to
+  // true and false label should be made, to optimize fallthrough.
+  // Returns two registers in cmp1 and cmp2 that can be used in the
+  // Branch instruction after EmitTypeofIs.
+  Condition EmitTypeofIs(Label* true_label,
+                         Label* false_label,
+                         Register input,
+                         Handle<String> type_name,
+                         Register& cmp1,
+                         Operand& cmp2);
+
+  // Emits optimized code for %_IsObject(x).  Preserves input register.
+  // Returns the condition on which a final split to
+  // true and false label should be made, to optimize fallthrough.
+  Condition EmitIsObject(Register input,
+                         Register temp1,
+                         Label* is_not_object,
+                         Label* is_object);
+
+  // Emits optimized code for %_IsConstructCall().
+  // Caller should branch on equal condition.
+  void EmitIsConstructCall(Register temp1, Register temp2);
+
+  void EmitLoadFieldOrConstantFunction(Register result,
+                                       Register object,
+                                       Handle<Map> type,
+                                       Handle<String> name);
+
+  struct JumpTableEntry {
+    explicit inline JumpTableEntry(Address entry)
+        : label(),
+          address(entry) { }
+    Label label;
+    Address address;
+  };
+
+  LChunk* const chunk_;
+  MacroAssembler* const masm_;
+  CompilationInfo* const info_;
+
+  int current_block_;
+  int current_instruction_;
+  const ZoneList<LInstruction*>* instructions_;
+  ZoneList<LEnvironment*> deoptimizations_;
+  ZoneList<JumpTableEntry> deopt_jump_table_;
+  ZoneList<Handle<Object> > deoptimization_literals_;
+  int inlined_function_count_;
+  Scope* const scope_;
+  Status status_;
+  TranslationBuffer translations_;
+  ZoneList<LDeferredCode*> deferred_;
+  int osr_pc_offset_;
+
+  // Builder that keeps track of safepoints in the code. The table
+  // itself is emitted at the end of the generated code.
+  SafepointTableBuilder safepoints_;
+
+  // Compiler from a set of parallel moves to a sequential list of moves.
+  LGapResolver resolver_;
+
+  Safepoint::Kind expected_safepoint_kind_;
+
+  class PushSafepointRegistersScope BASE_EMBEDDED {
+   public:
+    PushSafepointRegistersScope(LCodeGen* codegen,
+                                Safepoint::Kind kind)
+        : codegen_(codegen) {
+      ASSERT(codegen_->expected_safepoint_kind_ == Safepoint::kSimple);
+      codegen_->expected_safepoint_kind_ = kind;
+
+      switch (codegen_->expected_safepoint_kind_) {
+        case Safepoint::kWithRegisters:
+          codegen_->masm_->PushSafepointRegisters();
+          break;
+        case Safepoint::kWithRegistersAndDoubles:
+          codegen_->masm_->PushSafepointRegistersAndDoubles();
+          break;
+        default:
+          UNREACHABLE();
+      }
+    }
+
+    ~PushSafepointRegistersScope() {
+      Safepoint::Kind kind = codegen_->expected_safepoint_kind_;
+      ASSERT((kind & Safepoint::kWithRegisters) != 0);
+      switch (kind) {
+        case Safepoint::kWithRegisters:
+          codegen_->masm_->PopSafepointRegisters();
+          break;
+        case Safepoint::kWithRegistersAndDoubles:
+          codegen_->masm_->PopSafepointRegistersAndDoubles();
+          break;
+        default:
+          UNREACHABLE();
+      }
+      codegen_->expected_safepoint_kind_ = Safepoint::kSimple;
+    }
+
+   private:
+    LCodeGen* codegen_;
+  };
+
+  friend class LDeferredCode;
+  friend class LEnvironment;
+  friend class SafepointGenerator;
+  DISALLOW_COPY_AND_ASSIGN(LCodeGen);
+};
+
+
+class LDeferredCode: public ZoneObject {
+ public:
+  explicit LDeferredCode(LCodeGen* codegen)
+      : codegen_(codegen),
+        external_exit_(NULL),
+        instruction_index_(codegen->current_instruction_) {
+    codegen->AddDeferredCode(this);
+  }
+
+  virtual ~LDeferredCode() { }
+  virtual void Generate() = 0;
+  virtual LInstruction* instr() = 0;
+
+  void SetExit(Label *exit) { external_exit_ = exit; }
+  Label* entry() { return &entry_; }
+  Label* exit() { return external_exit_ != NULL ? external_exit_ : &exit_; }
+  int instruction_index() const { return instruction_index_; }
+
+ protected:
+  LCodeGen* codegen() const { return codegen_; }
+  MacroAssembler* masm() const { return codegen_->masm(); }
+
+ private:
+  LCodeGen* codegen_;
+  Label entry_;
+  Label exit_;
+  Label* external_exit_;
+  int instruction_index_;
 };
 
 } }  // namespace v8::internal
diff --git a/src/mips/lithium-gap-resolver-mips.cc b/src/mips/lithium-gap-resolver-mips.cc
new file mode 100644
index 0000000..8f7f89c
--- /dev/null
+++ b/src/mips/lithium-gap-resolver-mips.cc
@@ -0,0 +1,309 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#include "mips/lithium-gap-resolver-mips.h"
+#include "mips/lithium-codegen-mips.h"
+
+namespace v8 {
+namespace internal {
+
+static const Register kSavedValueRegister = lithiumScratchReg;
+static const DoubleRegister kSavedDoubleValueRegister = lithiumScratchDouble;
+
+LGapResolver::LGapResolver(LCodeGen* owner)
+    : cgen_(owner),
+      moves_(32),
+      root_index_(0),
+      in_cycle_(false),
+      saved_destination_(NULL) {}
+
+
+void LGapResolver::Resolve(LParallelMove* parallel_move) {
+  ASSERT(moves_.is_empty());
+  // Build up a worklist of moves.
+  BuildInitialMoveList(parallel_move);
+
+  for (int i = 0; i < moves_.length(); ++i) {
+    LMoveOperands move = moves_[i];
+    // Skip constants to perform them last.  They don't block other moves
+    // and skipping such moves with register destinations keeps those
+    // registers free for the whole algorithm.
+    if (!move.IsEliminated() && !move.source()->IsConstantOperand()) {
+      root_index_ = i;  // Any cycle is found when by reaching this move again.
+      PerformMove(i);
+      if (in_cycle_) {
+        RestoreValue();
+      }
+    }
+  }
+
+  // Perform the moves with constant sources.
+  for (int i = 0; i < moves_.length(); ++i) {
+    if (!moves_[i].IsEliminated()) {
+      ASSERT(moves_[i].source()->IsConstantOperand());
+      EmitMove(i);
+    }
+  }
+
+  moves_.Rewind(0);
+}
+
+
+void LGapResolver::BuildInitialMoveList(LParallelMove* parallel_move) {
+  // Perform a linear sweep of the moves to add them to the initial list of
+  // moves to perform, ignoring any move that is redundant (the source is
+  // the same as the destination, the destination is ignored and
+  // unallocated, or the move was already eliminated).
+  const ZoneList<LMoveOperands>* moves = parallel_move->move_operands();
+  for (int i = 0; i < moves->length(); ++i) {
+    LMoveOperands move = moves->at(i);
+    if (!move.IsRedundant()) moves_.Add(move);
+  }
+  Verify();
+}
+
+
+void LGapResolver::PerformMove(int index) {
+  // Each call to this function performs a move and deletes it from the move
+  // graph.  We first recursively perform any move blocking this one.  We
+  // mark a move as "pending" on entry to PerformMove in order to detect
+  // cycles in the move graph.
+
+  // We can only find a cycle, when doing a depth-first traversal of moves,
+  // be encountering the starting move again. So by spilling the source of
+  // the starting move, we break the cycle.  All moves are then unblocked,
+  // and the starting move is completed by writing the spilled value to
+  // its destination.  All other moves from the spilled source have been
+  // completed prior to breaking the cycle.
+  // An additional complication is that moves to MemOperands with large
+  // offsets (more than 1K or 4K) require us to spill this spilled value to
+  // the stack, to free up the register.
+  ASSERT(!moves_[index].IsPending());
+  ASSERT(!moves_[index].IsRedundant());
+
+  // Clear this move's destination to indicate a pending move.  The actual
+  // destination is saved in a stack allocated local.  Multiple moves can
+  // be pending because this function is recursive.
+  ASSERT(moves_[index].source() != NULL);  // Or else it will look eliminated.
+  LOperand* destination = moves_[index].destination();
+  moves_[index].set_destination(NULL);
+
+  // Perform a depth-first traversal of the move graph to resolve
+  // dependencies.  Any unperformed, unpending move with a source the same
+  // as this one's destination blocks this one so recursively perform all
+  // such moves.
+  for (int i = 0; i < moves_.length(); ++i) {
+    LMoveOperands other_move = moves_[i];
+    if (other_move.Blocks(destination) && !other_move.IsPending()) {
+      PerformMove(i);
+      // If there is a blocking, pending move it must be moves_[root_index_]
+      // and all other moves with the same source as moves_[root_index_] are
+      // sucessfully executed (because they are cycle-free) by this loop.
+    }
+  }
+
+  // We are about to resolve this move and don't need it marked as
+  // pending, so restore its destination.
+  moves_[index].set_destination(destination);
+
+  // The move may be blocked on a pending move, which must be the starting move.
+  // In this case, we have a cycle, and we save the source of this move to
+  // a scratch register to break it.
+  LMoveOperands other_move = moves_[root_index_];
+  if (other_move.Blocks(destination)) {
+    ASSERT(other_move.IsPending());
+    BreakCycle(index);
+    return;
+  }
+
+  // This move is no longer blocked.
+  EmitMove(index);
+}
+
+
+void LGapResolver::Verify() {
+#ifdef ENABLE_SLOW_ASSERTS
+  // No operand should be the destination for more than one move.
+  for (int i = 0; i < moves_.length(); ++i) {
+    LOperand* destination = moves_[i].destination();
+    for (int j = i + 1; j < moves_.length(); ++j) {
+      SLOW_ASSERT(!destination->Equals(moves_[j].destination()));
+    }
+  }
+#endif
+}
+
+#define __ ACCESS_MASM(cgen_->masm())
+
+void LGapResolver::BreakCycle(int index) {
+  // We save in a register the value that should end up in the source of
+  // moves_[root_index].  After performing all moves in the tree rooted
+  // in that move, we save the value to that source.
+  ASSERT(moves_[index].destination()->Equals(moves_[root_index_].source()));
+  ASSERT(!in_cycle_);
+  in_cycle_ = true;
+  LOperand* source = moves_[index].source();
+  saved_destination_ = moves_[index].destination();
+  if (source->IsRegister()) {
+    __ mov(kSavedValueRegister, cgen_->ToRegister(source));
+  } else if (source->IsStackSlot()) {
+    __ lw(kSavedValueRegister, cgen_->ToMemOperand(source));
+  } else if (source->IsDoubleRegister()) {
+    __ mov_d(kSavedDoubleValueRegister, cgen_->ToDoubleRegister(source));
+  } else if (source->IsDoubleStackSlot()) {
+    __ ldc1(kSavedDoubleValueRegister, cgen_->ToMemOperand(source));
+  } else {
+    UNREACHABLE();
+  }
+  // This move will be done by restoring the saved value to the destination.
+  moves_[index].Eliminate();
+}
+
+
+void LGapResolver::RestoreValue() {
+  ASSERT(in_cycle_);
+  ASSERT(saved_destination_ != NULL);
+
+  // Spilled value is in kSavedValueRegister or kSavedDoubleValueRegister.
+  if (saved_destination_->IsRegister()) {
+    __ mov(cgen_->ToRegister(saved_destination_), kSavedValueRegister);
+  } else if (saved_destination_->IsStackSlot()) {
+    __ sw(kSavedValueRegister, cgen_->ToMemOperand(saved_destination_));
+  } else if (saved_destination_->IsDoubleRegister()) {
+    __ mov_d(cgen_->ToDoubleRegister(saved_destination_),
+            kSavedDoubleValueRegister);
+  } else if (saved_destination_->IsDoubleStackSlot()) {
+    __ sdc1(kSavedDoubleValueRegister,
+            cgen_->ToMemOperand(saved_destination_));
+  } else {
+    UNREACHABLE();
+  }
+
+  in_cycle_ = false;
+  saved_destination_ = NULL;
+}
+
+
+void LGapResolver::EmitMove(int index) {
+  LOperand* source = moves_[index].source();
+  LOperand* destination = moves_[index].destination();
+
+  // Dispatch on the source and destination operand kinds.  Not all
+  // combinations are possible.
+
+  if (source->IsRegister()) {
+    Register source_register = cgen_->ToRegister(source);
+    if (destination->IsRegister()) {
+      __ mov(cgen_->ToRegister(destination), source_register);
+    } else {
+      ASSERT(destination->IsStackSlot());
+      __ sw(source_register, cgen_->ToMemOperand(destination));
+    }
+
+  } else if (source->IsStackSlot()) {
+    MemOperand source_operand = cgen_->ToMemOperand(source);
+    if (destination->IsRegister()) {
+      __ lw(cgen_->ToRegister(destination), source_operand);
+    } else {
+      ASSERT(destination->IsStackSlot());
+      MemOperand destination_operand = cgen_->ToMemOperand(destination);
+      if (in_cycle_) {
+        if (!destination_operand.OffsetIsInt16Encodable()) {
+          // 'at' is overwritten while saving the value to the destination.
+          // Therefore we can't use 'at'.  It is OK if the read from the source
+          // destroys 'at', since that happens before the value is read.
+          // This uses only a single reg of the double reg-pair.
+          __ lwc1(kSavedDoubleValueRegister, source_operand);
+          __ swc1(kSavedDoubleValueRegister, destination_operand);
+        } else {
+          __ lw(at, source_operand);
+          __ sw(at, destination_operand);
+        }
+      } else {
+        __ lw(kSavedValueRegister, source_operand);
+        __ sw(kSavedValueRegister, destination_operand);
+      }
+    }
+
+  } else if (source->IsConstantOperand()) {
+    Operand source_operand = cgen_->ToOperand(source);
+    if (destination->IsRegister()) {
+      __ li(cgen_->ToRegister(destination), source_operand);
+    } else {
+      ASSERT(destination->IsStackSlot());
+      ASSERT(!in_cycle_);  // Constant moves happen after all cycles are gone.
+      MemOperand destination_operand = cgen_->ToMemOperand(destination);
+      __ li(kSavedValueRegister, source_operand);
+      __ sw(kSavedValueRegister, cgen_->ToMemOperand(destination));
+    }
+
+  } else if (source->IsDoubleRegister()) {
+    DoubleRegister source_register = cgen_->ToDoubleRegister(source);
+    if (destination->IsDoubleRegister()) {
+      __ mov_d(cgen_->ToDoubleRegister(destination), source_register);
+    } else {
+      ASSERT(destination->IsDoubleStackSlot());
+      MemOperand destination_operand = cgen_->ToMemOperand(destination);
+      __ sdc1(source_register, destination_operand);
+    }
+
+  } else if (source->IsDoubleStackSlot()) {
+    MemOperand source_operand = cgen_->ToMemOperand(source);
+    if (destination->IsDoubleRegister()) {
+      __ ldc1(cgen_->ToDoubleRegister(destination), source_operand);
+    } else {
+      ASSERT(destination->IsDoubleStackSlot());
+      MemOperand destination_operand = cgen_->ToMemOperand(destination);
+      if (in_cycle_) {
+        // kSavedDoubleValueRegister was used to break the cycle,
+        // but kSavedValueRegister is free.
+        MemOperand source_high_operand =
+            cgen_->ToHighMemOperand(source);
+        MemOperand destination_high_operand =
+            cgen_->ToHighMemOperand(destination);
+        __ lw(kSavedValueRegister, source_operand);
+        __ sw(kSavedValueRegister, destination_operand);
+        __ lw(kSavedValueRegister, source_high_operand);
+        __ sw(kSavedValueRegister, destination_high_operand);
+      } else {
+        __ ldc1(kSavedDoubleValueRegister, source_operand);
+        __ sdc1(kSavedDoubleValueRegister, destination_operand);
+      }
+    }
+  } else {
+    UNREACHABLE();
+  }
+
+  moves_[index].Eliminate();
+}
+
+
+#undef __
+
+} }  // namespace v8::internal
diff --git a/src/mips/lithium-gap-resolver-mips.h b/src/mips/lithium-gap-resolver-mips.h
new file mode 100644
index 0000000..2506e38
--- /dev/null
+++ b/src/mips/lithium-gap-resolver-mips.h
@@ -0,0 +1,83 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_MIPS_LITHIUM_GAP_RESOLVER_MIPS_H_
+#define V8_MIPS_LITHIUM_GAP_RESOLVER_MIPS_H_
+
+#include "v8.h"
+
+#include "lithium.h"
+
+namespace v8 {
+namespace internal {
+
+class LCodeGen;
+class LGapResolver;
+
+class LGapResolver BASE_EMBEDDED {
+ public:
+  explicit LGapResolver(LCodeGen* owner);
+
+  // Resolve a set of parallel moves, emitting assembler instructions.
+  void Resolve(LParallelMove* parallel_move);
+
+ private:
+  // Build the initial list of moves.
+  void BuildInitialMoveList(LParallelMove* parallel_move);
+
+  // Perform the move at the moves_ index in question (possibly requiring
+  // other moves to satisfy dependencies).
+  void PerformMove(int index);
+
+  // If a cycle is found in the series of moves, save the blocking value to
+  // a scratch register.  The cycle must be found by hitting the root of the
+  // depth-first search.
+  void BreakCycle(int index);
+
+  // After a cycle has been resolved, restore the value from the scratch
+  // register to its proper destination.
+  void RestoreValue();
+
+  // Emit a move and remove it from the move graph.
+  void EmitMove(int index);
+
+  // Verify the move list before performing moves.
+  void Verify();
+
+  LCodeGen* cgen_;
+
+  // List of moves not yet resolved.
+  ZoneList<LMoveOperands> moves_;
+
+  int root_index_;
+  bool in_cycle_;
+  LOperand* saved_destination_;
+};
+
+} }  // namespace v8::internal
+
+#endif  // V8_MIPS_LITHIUM_GAP_RESOLVER_MIPS_H_
diff --git a/src/mips/lithium-mips.cc b/src/mips/lithium-mips.cc
new file mode 100644
index 0000000..a9a302c
--- /dev/null
+++ b/src/mips/lithium-mips.cc
@@ -0,0 +1,2203 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#include "lithium-allocator-inl.h"
+#include "mips/lithium-mips.h"
+#include "mips/lithium-codegen-mips.h"
+
+namespace v8 {
+namespace internal {
+
+#define DEFINE_COMPILE(type)                            \
+  void L##type::CompileToNative(LCodeGen* generator) {  \
+    generator->Do##type(this);                          \
+  }
+LITHIUM_CONCRETE_INSTRUCTION_LIST(DEFINE_COMPILE)
+#undef DEFINE_COMPILE
+
+LOsrEntry::LOsrEntry() {
+  for (int i = 0; i < Register::kNumAllocatableRegisters; ++i) {
+    register_spills_[i] = NULL;
+  }
+  for (int i = 0; i < DoubleRegister::kNumAllocatableRegisters; ++i) {
+    double_register_spills_[i] = NULL;
+  }
+}
+
+
+void LOsrEntry::MarkSpilledRegister(int allocation_index,
+                                    LOperand* spill_operand) {
+  ASSERT(spill_operand->IsStackSlot());
+  ASSERT(register_spills_[allocation_index] == NULL);
+  register_spills_[allocation_index] = spill_operand;
+}
+
+
+#ifdef DEBUG
+void LInstruction::VerifyCall() {
+  // Call instructions can use only fixed registers as temporaries and
+  // outputs because all registers are blocked by the calling convention.
+  // Inputs operands must use a fixed register or use-at-start policy or
+  // a non-register policy.
+  ASSERT(Output() == NULL ||
+         LUnallocated::cast(Output())->HasFixedPolicy() ||
+         !LUnallocated::cast(Output())->HasRegisterPolicy());
+  for (UseIterator it(this); !it.Done(); it.Advance()) {
+    LUnallocated* operand = LUnallocated::cast(it.Current());
+    ASSERT(operand->HasFixedPolicy() ||
+           operand->IsUsedAtStart());
+  }
+  for (TempIterator it(this); !it.Done(); it.Advance()) {
+    LUnallocated* operand = LUnallocated::cast(it.Current());
+    ASSERT(operand->HasFixedPolicy() ||!operand->HasRegisterPolicy());
+  }
+}
+#endif
+
+
+void LOsrEntry::MarkSpilledDoubleRegister(int allocation_index,
+                                          LOperand* spill_operand) {
+  ASSERT(spill_operand->IsDoubleStackSlot());
+  ASSERT(double_register_spills_[allocation_index] == NULL);
+  double_register_spills_[allocation_index] = spill_operand;
+}
+
+
+void LInstruction::PrintTo(StringStream* stream) {
+  stream->Add("%s ", this->Mnemonic());
+
+  PrintOutputOperandTo(stream);
+
+  PrintDataTo(stream);
+
+  if (HasEnvironment()) {
+    stream->Add(" ");
+    environment()->PrintTo(stream);
+  }
+
+  if (HasPointerMap()) {
+    stream->Add(" ");
+    pointer_map()->PrintTo(stream);
+  }
+}
+
+
+template<int R, int I, int T>
+void LTemplateInstruction<R, I, T>::PrintDataTo(StringStream* stream) {
+  stream->Add("= ");
+  for (int i = 0; i < inputs_.length(); i++) {
+    if (i > 0) stream->Add(" ");
+    inputs_[i]->PrintTo(stream);
+  }
+}
+
+
+template<int R, int I, int T>
+void LTemplateInstruction<R, I, T>::PrintOutputOperandTo(StringStream* stream) {
+  for (int i = 0; i < results_.length(); i++) {
+    if (i > 0) stream->Add(" ");
+    results_[i]->PrintTo(stream);
+  }
+}
+
+
+void LLabel::PrintDataTo(StringStream* stream) {
+  LGap::PrintDataTo(stream);
+  LLabel* rep = replacement();
+  if (rep != NULL) {
+    stream->Add(" Dead block replaced with B%d", rep->block_id());
+  }
+}
+
+
+bool LGap::IsRedundant() const {
+  for (int i = 0; i < 4; i++) {
+    if (parallel_moves_[i] != NULL && !parallel_moves_[i]->IsRedundant()) {
+      return false;
+    }
+  }
+
+  return true;
+}
+
+
+void LGap::PrintDataTo(StringStream* stream) {
+  for (int i = 0; i < 4; i++) {
+    stream->Add("(");
+    if (parallel_moves_[i] != NULL) {
+      parallel_moves_[i]->PrintDataTo(stream);
+    }
+    stream->Add(") ");
+  }
+}
+
+
+const char* LArithmeticD::Mnemonic() const {
+  switch (op()) {
+    case Token::ADD: return "add-d";
+    case Token::SUB: return "sub-d";
+    case Token::MUL: return "mul-d";
+    case Token::DIV: return "div-d";
+    case Token::MOD: return "mod-d";
+    default:
+      UNREACHABLE();
+      return NULL;
+  }
+}
+
+
+const char* LArithmeticT::Mnemonic() const {
+  switch (op()) {
+    case Token::ADD: return "add-t";
+    case Token::SUB: return "sub-t";
+    case Token::MUL: return "mul-t";
+    case Token::MOD: return "mod-t";
+    case Token::DIV: return "div-t";
+    case Token::BIT_AND: return "bit-and-t";
+    case Token::BIT_OR: return "bit-or-t";
+    case Token::BIT_XOR: return "bit-xor-t";
+    case Token::SHL: return "sll-t";
+    case Token::SAR: return "sra-t";
+    case Token::SHR: return "srl-t";
+    default:
+      UNREACHABLE();
+      return NULL;
+  }
+}
+
+
+void LGoto::PrintDataTo(StringStream* stream) {
+  stream->Add("B%d", block_id());
+}
+
+
+void LBranch::PrintDataTo(StringStream* stream) {
+  stream->Add("B%d | B%d on ", true_block_id(), false_block_id());
+  InputAt(0)->PrintTo(stream);
+}
+
+
+void LCmpIDAndBranch::PrintDataTo(StringStream* stream) {
+  stream->Add("if ");
+  InputAt(0)->PrintTo(stream);
+  stream->Add(" %s ", Token::String(op()));
+  InputAt(1)->PrintTo(stream);
+  stream->Add(" then B%d else B%d", true_block_id(), false_block_id());
+}
+
+
+void LIsNilAndBranch::PrintDataTo(StringStream* stream) {
+  stream->Add("if ");
+  InputAt(0)->PrintTo(stream);
+  stream->Add(kind() == kStrictEquality ? " === " : " == ");
+  stream->Add(nil() == kNullValue ? "null" : "undefined");
+  stream->Add(" then B%d else B%d", true_block_id(), false_block_id());
+}
+
+
+void LIsObjectAndBranch::PrintDataTo(StringStream* stream) {
+  stream->Add("if is_object(");
+  InputAt(0)->PrintTo(stream);
+  stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
+}
+
+
+void LIsSmiAndBranch::PrintDataTo(StringStream* stream) {
+  stream->Add("if is_smi(");
+  InputAt(0)->PrintTo(stream);
+  stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
+}
+
+
+void LIsUndetectableAndBranch::PrintDataTo(StringStream* stream) {
+  stream->Add("if is_undetectable(");
+  InputAt(0)->PrintTo(stream);
+  stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
+}
+
+
+void LHasInstanceTypeAndBranch::PrintDataTo(StringStream* stream) {
+  stream->Add("if has_instance_type(");
+  InputAt(0)->PrintTo(stream);
+  stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
+}
+
+
+void LHasCachedArrayIndexAndBranch::PrintDataTo(StringStream* stream) {
+  stream->Add("if has_cached_array_index(");
+  InputAt(0)->PrintTo(stream);
+  stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
+}
+
+
+void LClassOfTestAndBranch::PrintDataTo(StringStream* stream) {
+  stream->Add("if class_of_test(");
+  InputAt(0)->PrintTo(stream);
+  stream->Add(", \"%o\") then B%d else B%d",
+              *hydrogen()->class_name(),
+              true_block_id(),
+              false_block_id());
+}
+
+
+void LTypeofIsAndBranch::PrintDataTo(StringStream* stream) {
+  stream->Add("if typeof ");
+  InputAt(0)->PrintTo(stream);
+  stream->Add(" == \"%s\" then B%d else B%d",
+              *hydrogen()->type_literal()->ToCString(),
+              true_block_id(), false_block_id());
+}
+
+
+void LCallConstantFunction::PrintDataTo(StringStream* stream) {
+  stream->Add("#%d / ", arity());
+}
+
+
+void LUnaryMathOperation::PrintDataTo(StringStream* stream) {
+  stream->Add("/%s ", hydrogen()->OpName());
+  InputAt(0)->PrintTo(stream);
+}
+
+
+void LLoadContextSlot::PrintDataTo(StringStream* stream) {
+  InputAt(0)->PrintTo(stream);
+  stream->Add("[%d]", slot_index());
+}
+
+
+void LStoreContextSlot::PrintDataTo(StringStream* stream) {
+  InputAt(0)->PrintTo(stream);
+  stream->Add("[%d] <- ", slot_index());
+  InputAt(1)->PrintTo(stream);
+}
+
+
+void LInvokeFunction::PrintDataTo(StringStream* stream) {
+  stream->Add("= ");
+  InputAt(0)->PrintTo(stream);
+  stream->Add(" #%d / ", arity());
+}
+
+
+void LCallKeyed::PrintDataTo(StringStream* stream) {
+  stream->Add("[a2] #%d / ", arity());
+}
+
+
+void LCallNamed::PrintDataTo(StringStream* stream) {
+  SmartArrayPointer<char> name_string = name()->ToCString();
+  stream->Add("%s #%d / ", *name_string, arity());
+}
+
+
+void LCallGlobal::PrintDataTo(StringStream* stream) {
+  SmartArrayPointer<char> name_string = name()->ToCString();
+  stream->Add("%s #%d / ", *name_string, arity());
+}
+
+
+void LCallKnownGlobal::PrintDataTo(StringStream* stream) {
+  stream->Add("#%d / ", arity());
+}
+
+
+void LCallNew::PrintDataTo(StringStream* stream) {
+  stream->Add("= ");
+  InputAt(0)->PrintTo(stream);
+  stream->Add(" #%d / ", arity());
+}
+
+
+void LAccessArgumentsAt::PrintDataTo(StringStream* stream) {
+  arguments()->PrintTo(stream);
+
+  stream->Add(" length ");
+  length()->PrintTo(stream);
+
+  stream->Add(" index ");
+  index()->PrintTo(stream);
+}
+
+
+void LStoreNamedField::PrintDataTo(StringStream* stream) {
+  object()->PrintTo(stream);
+  stream->Add(".");
+  stream->Add(*String::cast(*name())->ToCString());
+  stream->Add(" <- ");
+  value()->PrintTo(stream);
+}
+
+
+void LStoreNamedGeneric::PrintDataTo(StringStream* stream) {
+  object()->PrintTo(stream);
+  stream->Add(".");
+  stream->Add(*String::cast(*name())->ToCString());
+  stream->Add(" <- ");
+  value()->PrintTo(stream);
+}
+
+
+void LStoreKeyedFastElement::PrintDataTo(StringStream* stream) {
+  object()->PrintTo(stream);
+  stream->Add("[");
+  key()->PrintTo(stream);
+  stream->Add("] <- ");
+  value()->PrintTo(stream);
+}
+
+
+void LStoreKeyedFastDoubleElement::PrintDataTo(StringStream* stream) {
+  elements()->PrintTo(stream);
+  stream->Add("[");
+  key()->PrintTo(stream);
+  stream->Add("] <- ");
+  value()->PrintTo(stream);
+}
+
+
+void LStoreKeyedGeneric::PrintDataTo(StringStream* stream) {
+  object()->PrintTo(stream);
+  stream->Add("[");
+  key()->PrintTo(stream);
+  stream->Add("] <- ");
+  value()->PrintTo(stream);
+}
+
+
+void LTransitionElementsKind::PrintDataTo(StringStream* stream) {
+  object()->PrintTo(stream);
+  stream->Add(" %p -> %p", *original_map(), *transitioned_map());
+}
+
+
+LChunk::LChunk(CompilationInfo* info, HGraph* graph)
+    : spill_slot_count_(0),
+      info_(info),
+      graph_(graph),
+      instructions_(32),
+      pointer_maps_(8),
+      inlined_closures_(1) {
+}
+
+
+int LChunk::GetNextSpillIndex(bool is_double) {
+  // Skip a slot if for a double-width slot.
+  if (is_double) spill_slot_count_++;
+  return spill_slot_count_++;
+}
+
+
+LOperand* LChunk::GetNextSpillSlot(bool is_double)  {
+  int index = GetNextSpillIndex(is_double);
+  if (is_double) {
+    return LDoubleStackSlot::Create(index);
+  } else {
+    return LStackSlot::Create(index);
+  }
+}
+
+
+void LChunk::MarkEmptyBlocks() {
+  HPhase phase("Mark empty blocks", this);
+  for (int i = 0; i < graph()->blocks()->length(); ++i) {
+    HBasicBlock* block = graph()->blocks()->at(i);
+    int first = block->first_instruction_index();
+    int last = block->last_instruction_index();
+    LInstruction* first_instr = instructions()->at(first);
+    LInstruction* last_instr = instructions()->at(last);
+
+    LLabel* label = LLabel::cast(first_instr);
+    if (last_instr->IsGoto()) {
+      LGoto* goto_instr = LGoto::cast(last_instr);
+      if (label->IsRedundant() &&
+          !label->is_loop_header()) {
+        bool can_eliminate = true;
+        for (int i = first + 1; i < last && can_eliminate; ++i) {
+          LInstruction* cur = instructions()->at(i);
+          if (cur->IsGap()) {
+            LGap* gap = LGap::cast(cur);
+            if (!gap->IsRedundant()) {
+              can_eliminate = false;
+            }
+          } else {
+            can_eliminate = false;
+          }
+        }
+
+        if (can_eliminate) {
+          label->set_replacement(GetLabel(goto_instr->block_id()));
+        }
+      }
+    }
+  }
+}
+
+
+void LChunk::AddInstruction(LInstruction* instr, HBasicBlock* block) {
+  LInstructionGap* gap = new LInstructionGap(block);
+  int index = -1;
+  if (instr->IsControl()) {
+    instructions_.Add(gap);
+    index = instructions_.length();
+    instructions_.Add(instr);
+  } else {
+    index = instructions_.length();
+    instructions_.Add(instr);
+    instructions_.Add(gap);
+  }
+  if (instr->HasPointerMap()) {
+    pointer_maps_.Add(instr->pointer_map());
+    instr->pointer_map()->set_lithium_position(index);
+  }
+}
+
+
+LConstantOperand* LChunk::DefineConstantOperand(HConstant* constant) {
+  return LConstantOperand::Create(constant->id());
+}
+
+
+int LChunk::GetParameterStackSlot(int index) const {
+  // The receiver is at index 0, the first parameter at index 1, so we
+  // shift all parameter indexes down by the number of parameters, and
+  // make sure they end up negative so they are distinguishable from
+  // spill slots.
+  int result = index - info()->scope()->num_parameters() - 1;
+  ASSERT(result < 0);
+  return result;
+}
+
+// A parameter relative to ebp in the arguments stub.
+int LChunk::ParameterAt(int index) {
+  ASSERT(-1 <= index);  // -1 is the receiver.
+  return (1 + info()->scope()->num_parameters() - index) *
+      kPointerSize;
+}
+
+
+LGap* LChunk::GetGapAt(int index) const {
+  return LGap::cast(instructions_[index]);
+}
+
+
+bool LChunk::IsGapAt(int index) const {
+  return instructions_[index]->IsGap();
+}
+
+
+int LChunk::NearestGapPos(int index) const {
+  while (!IsGapAt(index)) index--;
+  return index;
+}
+
+
+void LChunk::AddGapMove(int index, LOperand* from, LOperand* to) {
+  GetGapAt(index)->GetOrCreateParallelMove(LGap::START)->AddMove(from, to);
+}
+
+
+Handle<Object> LChunk::LookupLiteral(LConstantOperand* operand) const {
+  return HConstant::cast(graph_->LookupValue(operand->index()))->handle();
+}
+
+
+Representation LChunk::LookupLiteralRepresentation(
+    LConstantOperand* operand) const {
+  return graph_->LookupValue(operand->index())->representation();
+}
+
+
+LChunk* LChunkBuilder::Build() {
+  ASSERT(is_unused());
+  chunk_ = new LChunk(info(), graph());
+  HPhase phase("Building chunk", chunk_);
+  status_ = BUILDING;
+  const ZoneList<HBasicBlock*>* blocks = graph()->blocks();
+  for (int i = 0; i < blocks->length(); i++) {
+    HBasicBlock* next = NULL;
+    if (i < blocks->length() - 1) next = blocks->at(i + 1);
+    DoBasicBlock(blocks->at(i), next);
+    if (is_aborted()) return NULL;
+  }
+  status_ = DONE;
+  return chunk_;
+}
+
+
+void LChunkBuilder::Abort(const char* format, ...) {
+  if (FLAG_trace_bailout) {
+    SmartArrayPointer<char> name(
+        info()->shared_info()->DebugName()->ToCString());
+    PrintF("Aborting LChunk building in @\"%s\": ", *name);
+    va_list arguments;
+    va_start(arguments, format);
+    OS::VPrint(format, arguments);
+    va_end(arguments);
+    PrintF("\n");
+  }
+  status_ = ABORTED;
+}
+
+
+LRegister* LChunkBuilder::ToOperand(Register reg) {
+  return LRegister::Create(Register::ToAllocationIndex(reg));
+}
+
+
+LUnallocated* LChunkBuilder::ToUnallocated(Register reg) {
+  return new LUnallocated(LUnallocated::FIXED_REGISTER,
+                          Register::ToAllocationIndex(reg));
+}
+
+
+LUnallocated* LChunkBuilder::ToUnallocated(DoubleRegister reg) {
+  return new LUnallocated(LUnallocated::FIXED_DOUBLE_REGISTER,
+                          DoubleRegister::ToAllocationIndex(reg));
+}
+
+
+LOperand* LChunkBuilder::UseFixed(HValue* value, Register fixed_register) {
+  return Use(value, ToUnallocated(fixed_register));
+}
+
+
+LOperand* LChunkBuilder::UseFixedDouble(HValue* value, DoubleRegister reg) {
+  return Use(value, ToUnallocated(reg));
+}
+
+
+LOperand* LChunkBuilder::UseRegister(HValue* value) {
+  return Use(value, new LUnallocated(LUnallocated::MUST_HAVE_REGISTER));
+}
+
+
+LOperand* LChunkBuilder::UseRegisterAtStart(HValue* value) {
+  return Use(value,
+             new LUnallocated(LUnallocated::MUST_HAVE_REGISTER,
+                              LUnallocated::USED_AT_START));
+}
+
+
+LOperand* LChunkBuilder::UseTempRegister(HValue* value) {
+  return Use(value, new LUnallocated(LUnallocated::WRITABLE_REGISTER));
+}
+
+
+LOperand* LChunkBuilder::Use(HValue* value) {
+  return Use(value, new LUnallocated(LUnallocated::NONE));
+}
+
+
+LOperand* LChunkBuilder::UseAtStart(HValue* value) {
+  return Use(value, new LUnallocated(LUnallocated::NONE,
+                                     LUnallocated::USED_AT_START));
+}
+
+
+LOperand* LChunkBuilder::UseOrConstant(HValue* value) {
+  return value->IsConstant()
+      ? chunk_->DefineConstantOperand(HConstant::cast(value))
+      : Use(value);
+}
+
+
+LOperand* LChunkBuilder::UseOrConstantAtStart(HValue* value) {
+  return value->IsConstant()
+      ? chunk_->DefineConstantOperand(HConstant::cast(value))
+      : UseAtStart(value);
+}
+
+
+LOperand* LChunkBuilder::UseRegisterOrConstant(HValue* value) {
+  return value->IsConstant()
+      ? chunk_->DefineConstantOperand(HConstant::cast(value))
+      : UseRegister(value);
+}
+
+
+LOperand* LChunkBuilder::UseRegisterOrConstantAtStart(HValue* value) {
+  return value->IsConstant()
+      ? chunk_->DefineConstantOperand(HConstant::cast(value))
+      : UseRegisterAtStart(value);
+}
+
+
+LOperand* LChunkBuilder::UseAny(HValue* value) {
+  return value->IsConstant()
+      ? chunk_->DefineConstantOperand(HConstant::cast(value))
+      :  Use(value, new LUnallocated(LUnallocated::ANY));
+}
+
+
+LOperand* LChunkBuilder::Use(HValue* value, LUnallocated* operand) {
+  if (value->EmitAtUses()) {
+    HInstruction* instr = HInstruction::cast(value);
+    VisitInstruction(instr);
+  }
+  allocator_->RecordUse(value, operand);
+  return operand;
+}
+
+
+template<int I, int T>
+LInstruction* LChunkBuilder::Define(LTemplateInstruction<1, I, T>* instr,
+                                    LUnallocated* result) {
+  allocator_->RecordDefinition(current_instruction_, result);
+  instr->set_result(result);
+  return instr;
+}
+
+
+template<int I, int T>
+LInstruction* LChunkBuilder::Define(LTemplateInstruction<1, I, T>* instr) {
+  return Define(instr, new LUnallocated(LUnallocated::NONE));
+}
+
+
+template<int I, int T>
+LInstruction* LChunkBuilder::DefineAsRegister(
+    LTemplateInstruction<1, I, T>* instr) {
+  return Define(instr, new LUnallocated(LUnallocated::MUST_HAVE_REGISTER));
+}
+
+
+template<int I, int T>
+LInstruction* LChunkBuilder::DefineAsSpilled(
+    LTemplateInstruction<1, I, T>* instr, int index) {
+  return Define(instr, new LUnallocated(LUnallocated::FIXED_SLOT, index));
+}
+
+
+template<int I, int T>
+LInstruction* LChunkBuilder::DefineSameAsFirst(
+    LTemplateInstruction<1, I, T>* instr) {
+  return Define(instr, new LUnallocated(LUnallocated::SAME_AS_FIRST_INPUT));
+}
+
+
+template<int I, int T>
+LInstruction* LChunkBuilder::DefineFixed(
+    LTemplateInstruction<1, I, T>* instr, Register reg) {
+  return Define(instr, ToUnallocated(reg));
+}
+
+
+template<int I, int T>
+LInstruction* LChunkBuilder::DefineFixedDouble(
+    LTemplateInstruction<1, I, T>* instr, DoubleRegister reg) {
+  return Define(instr, ToUnallocated(reg));
+}
+
+
+LInstruction* LChunkBuilder::AssignEnvironment(LInstruction* instr) {
+  HEnvironment* hydrogen_env = current_block_->last_environment();
+  int argument_index_accumulator = 0;
+  instr->set_environment(CreateEnvironment(hydrogen_env,
+                                           &argument_index_accumulator));
+  return instr;
+}
+
+
+LInstruction* LChunkBuilder::SetInstructionPendingDeoptimizationEnvironment(
+    LInstruction* instr, int ast_id) {
+  ASSERT(instruction_pending_deoptimization_environment_ == NULL);
+  ASSERT(pending_deoptimization_ast_id_ == AstNode::kNoNumber);
+  instruction_pending_deoptimization_environment_ = instr;
+  pending_deoptimization_ast_id_ = ast_id;
+  return instr;
+}
+
+
+void LChunkBuilder::ClearInstructionPendingDeoptimizationEnvironment() {
+  instruction_pending_deoptimization_environment_ = NULL;
+  pending_deoptimization_ast_id_ = AstNode::kNoNumber;
+}
+
+
+LInstruction* LChunkBuilder::MarkAsCall(LInstruction* instr,
+                                        HInstruction* hinstr,
+                                        CanDeoptimize can_deoptimize) {
+#ifdef DEBUG
+  instr->VerifyCall();
+#endif
+  instr->MarkAsCall();
+  instr = AssignPointerMap(instr);
+
+  if (hinstr->HasObservableSideEffects()) {
+    ASSERT(hinstr->next()->IsSimulate());
+    HSimulate* sim = HSimulate::cast(hinstr->next());
+    instr = SetInstructionPendingDeoptimizationEnvironment(
+        instr, sim->ast_id());
+  }
+
+  // If instruction does not have side-effects lazy deoptimization
+  // after the call will try to deoptimize to the point before the call.
+  // Thus we still need to attach environment to this call even if
+  // call sequence can not deoptimize eagerly.
+  bool needs_environment =
+      (can_deoptimize == CAN_DEOPTIMIZE_EAGERLY) ||
+      !hinstr->HasObservableSideEffects();
+  if (needs_environment && !instr->HasEnvironment()) {
+    instr = AssignEnvironment(instr);
+  }
+
+  return instr;
+}
+
+
+LInstruction* LChunkBuilder::MarkAsSaveDoubles(LInstruction* instr) {
+  instr->MarkAsSaveDoubles();
+  return instr;
+}
+
+
+LInstruction* LChunkBuilder::AssignPointerMap(LInstruction* instr) {
+  ASSERT(!instr->HasPointerMap());
+  instr->set_pointer_map(new LPointerMap(position_));
+  return instr;
+}
+
+
+LUnallocated* LChunkBuilder::TempRegister() {
+  LUnallocated* operand = new LUnallocated(LUnallocated::MUST_HAVE_REGISTER);
+  allocator_->RecordTemporary(operand);
+  return operand;
+}
+
+
+LOperand* LChunkBuilder::FixedTemp(Register reg) {
+  LUnallocated* operand = ToUnallocated(reg);
+  allocator_->RecordTemporary(operand);
+  return operand;
+}
+
+
+LOperand* LChunkBuilder::FixedTemp(DoubleRegister reg) {
+  LUnallocated* operand = ToUnallocated(reg);
+  allocator_->RecordTemporary(operand);
+  return operand;
+}
+
+
+LInstruction* LChunkBuilder::DoBlockEntry(HBlockEntry* instr) {
+  return new LLabel(instr->block());
+}
+
+
+LInstruction* LChunkBuilder::DoSoftDeoptimize(HSoftDeoptimize* instr) {
+  return AssignEnvironment(new LDeoptimize);
+}
+
+
+LInstruction* LChunkBuilder::DoDeoptimize(HDeoptimize* instr) {
+  return AssignEnvironment(new LDeoptimize);
+}
+
+
+LInstruction* LChunkBuilder::DoShift(Token::Value op,
+                                     HBitwiseBinaryOperation* instr) {
+  if (instr->representation().IsTagged()) {
+    ASSERT(instr->left()->representation().IsTagged());
+    ASSERT(instr->right()->representation().IsTagged());
+
+    LOperand* left = UseFixed(instr->left(), a1);
+    LOperand* right = UseFixed(instr->right(), a0);
+    LArithmeticT* result = new LArithmeticT(op, left, right);
+    return MarkAsCall(DefineFixed(result, v0), instr);
+  }
+
+  ASSERT(instr->representation().IsInteger32());
+  ASSERT(instr->left()->representation().IsInteger32());
+  ASSERT(instr->right()->representation().IsInteger32());
+  LOperand* left = UseRegisterAtStart(instr->left());
+
+  HValue* right_value = instr->right();
+  LOperand* right = NULL;
+  int constant_value = 0;
+  if (right_value->IsConstant()) {
+    HConstant* constant = HConstant::cast(right_value);
+    right = chunk_->DefineConstantOperand(constant);
+    constant_value = constant->Integer32Value() & 0x1f;
+  } else {
+    right = UseRegisterAtStart(right_value);
+  }
+
+  // Shift operations can only deoptimize if we do a logical shift
+  // by 0 and the result cannot be truncated to int32.
+  bool may_deopt = (op == Token::SHR && constant_value == 0);
+  bool does_deopt = false;
+  if (may_deopt) {
+    for (HUseIterator it(instr->uses()); !it.Done(); it.Advance()) {
+      if (!it.value()->CheckFlag(HValue::kTruncatingToInt32)) {
+        does_deopt = true;
+        break;
+      }
+    }
+  }
+
+  LInstruction* result =
+      DefineAsRegister(new LShiftI(op, left, right, does_deopt));
+  return does_deopt ? AssignEnvironment(result) : result;
+}
+
+
+LInstruction* LChunkBuilder::DoArithmeticD(Token::Value op,
+                                           HArithmeticBinaryOperation* instr) {
+  ASSERT(instr->representation().IsDouble());
+  ASSERT(instr->left()->representation().IsDouble());
+  ASSERT(instr->right()->representation().IsDouble());
+  ASSERT(op != Token::MOD);
+  LOperand* left = UseRegisterAtStart(instr->left());
+  LOperand* right = UseRegisterAtStart(instr->right());
+  LArithmeticD* result = new LArithmeticD(op, left, right);
+  return DefineAsRegister(result);
+}
+
+
+LInstruction* LChunkBuilder::DoArithmeticT(Token::Value op,
+                                           HArithmeticBinaryOperation* instr) {
+  ASSERT(op == Token::ADD ||
+         op == Token::DIV ||
+         op == Token::MOD ||
+         op == Token::MUL ||
+         op == Token::SUB);
+  HValue* left = instr->left();
+  HValue* right = instr->right();
+  ASSERT(left->representation().IsTagged());
+  ASSERT(right->representation().IsTagged());
+  LOperand* left_operand = UseFixed(left, a1);
+  LOperand* right_operand = UseFixed(right, a0);
+  LArithmeticT* result = new LArithmeticT(op, left_operand, right_operand);
+  return MarkAsCall(DefineFixed(result, v0), instr);
+}
+
+
+void LChunkBuilder::DoBasicBlock(HBasicBlock* block, HBasicBlock* next_block) {
+  ASSERT(is_building());
+  current_block_ = block;
+  next_block_ = next_block;
+  if (block->IsStartBlock()) {
+    block->UpdateEnvironment(graph_->start_environment());
+    argument_count_ = 0;
+  } else if (block->predecessors()->length() == 1) {
+    // We have a single predecessor => copy environment and outgoing
+    // argument count from the predecessor.
+    ASSERT(block->phis()->length() == 0);
+    HBasicBlock* pred = block->predecessors()->at(0);
+    HEnvironment* last_environment = pred->last_environment();
+    ASSERT(last_environment != NULL);
+    // Only copy the environment, if it is later used again.
+    if (pred->end()->SecondSuccessor() == NULL) {
+      ASSERT(pred->end()->FirstSuccessor() == block);
+    } else {
+      if (pred->end()->FirstSuccessor()->block_id() > block->block_id() ||
+          pred->end()->SecondSuccessor()->block_id() > block->block_id()) {
+        last_environment = last_environment->Copy();
+      }
+    }
+    block->UpdateEnvironment(last_environment);
+    ASSERT(pred->argument_count() >= 0);
+    argument_count_ = pred->argument_count();
+  } else {
+    // We are at a state join => process phis.
+    HBasicBlock* pred = block->predecessors()->at(0);
+    // No need to copy the environment, it cannot be used later.
+    HEnvironment* last_environment = pred->last_environment();
+    for (int i = 0; i < block->phis()->length(); ++i) {
+      HPhi* phi = block->phis()->at(i);
+      last_environment->SetValueAt(phi->merged_index(), phi);
+    }
+    for (int i = 0; i < block->deleted_phis()->length(); ++i) {
+      last_environment->SetValueAt(block->deleted_phis()->at(i),
+                                   graph_->GetConstantUndefined());
+    }
+    block->UpdateEnvironment(last_environment);
+    // Pick up the outgoing argument count of one of the predecessors.
+    argument_count_ = pred->argument_count();
+  }
+  HInstruction* current = block->first();
+  int start = chunk_->instructions()->length();
+  while (current != NULL && !is_aborted()) {
+    // Code for constants in registers is generated lazily.
+    if (!current->EmitAtUses()) {
+      VisitInstruction(current);
+    }
+    current = current->next();
+  }
+  int end = chunk_->instructions()->length() - 1;
+  if (end >= start) {
+    block->set_first_instruction_index(start);
+    block->set_last_instruction_index(end);
+  }
+  block->set_argument_count(argument_count_);
+  next_block_ = NULL;
+  current_block_ = NULL;
+}
+
+
+void LChunkBuilder::VisitInstruction(HInstruction* current) {
+  HInstruction* old_current = current_instruction_;
+  current_instruction_ = current;
+  if (current->has_position()) position_ = current->position();
+  LInstruction* instr = current->CompileToLithium(this);
+
+  if (instr != NULL) {
+    if (FLAG_stress_pointer_maps && !instr->HasPointerMap()) {
+      instr = AssignPointerMap(instr);
+    }
+    if (FLAG_stress_environments && !instr->HasEnvironment()) {
+      instr = AssignEnvironment(instr);
+    }
+    instr->set_hydrogen_value(current);
+    chunk_->AddInstruction(instr, current_block_);
+  }
+  current_instruction_ = old_current;
+}
+
+
+LEnvironment* LChunkBuilder::CreateEnvironment(
+    HEnvironment* hydrogen_env,
+    int* argument_index_accumulator) {
+  if (hydrogen_env == NULL) return NULL;
+
+  LEnvironment* outer =
+      CreateEnvironment(hydrogen_env->outer(), argument_index_accumulator);
+  int ast_id = hydrogen_env->ast_id();
+  ASSERT(ast_id != AstNode::kNoNumber);
+  int value_count = hydrogen_env->length();
+  LEnvironment* result = new LEnvironment(hydrogen_env->closure(),
+                                          ast_id,
+                                          hydrogen_env->parameter_count(),
+                                          argument_count_,
+                                          value_count,
+                                          outer);
+  for (int i = 0; i < value_count; ++i) {
+    if (hydrogen_env->is_special_index(i)) continue;
+
+    HValue* value = hydrogen_env->values()->at(i);
+    LOperand* op = NULL;
+    if (value->IsArgumentsObject()) {
+      op = NULL;
+    } else if (value->IsPushArgument()) {
+      op = new LArgument((*argument_index_accumulator)++);
+    } else {
+      op = UseAny(value);
+    }
+    result->AddValue(op, value->representation());
+  }
+
+  return result;
+}
+
+
+LInstruction* LChunkBuilder::DoGoto(HGoto* instr) {
+  return new LGoto(instr->FirstSuccessor()->block_id());
+}
+
+
+LInstruction* LChunkBuilder::DoBranch(HBranch* instr) {
+  HValue* v = instr->value();
+  if (v->EmitAtUses()) {
+    HBasicBlock* successor = HConstant::cast(v)->ToBoolean()
+        ? instr->FirstSuccessor()
+        : instr->SecondSuccessor();
+    return new LGoto(successor->block_id());
+  }
+  return AssignEnvironment(new LBranch(UseRegister(v)));
+}
+
+
+LInstruction* LChunkBuilder::DoCompareMap(HCompareMap* instr) {
+  ASSERT(instr->value()->representation().IsTagged());
+  LOperand* value = UseRegisterAtStart(instr->value());
+  LOperand* temp = TempRegister();
+  return new LCmpMapAndBranch(value, temp);
+}
+
+
+LInstruction* LChunkBuilder::DoArgumentsLength(HArgumentsLength* length) {
+  return DefineAsRegister(new LArgumentsLength(UseRegister(length->value())));
+}
+
+
+LInstruction* LChunkBuilder::DoArgumentsElements(HArgumentsElements* elems) {
+  return DefineAsRegister(new LArgumentsElements);
+}
+
+
+LInstruction* LChunkBuilder::DoInstanceOf(HInstanceOf* instr) {
+  LInstanceOf* result =
+      new LInstanceOf(UseFixed(instr->left(), a0),
+                      UseFixed(instr->right(), a1));
+  return MarkAsCall(DefineFixed(result, v0), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoInstanceOfKnownGlobal(
+    HInstanceOfKnownGlobal* instr) {
+  LInstanceOfKnownGlobal* result =
+      new LInstanceOfKnownGlobal(UseFixed(instr->left(), a0), FixedTemp(t0));
+  return MarkAsCall(DefineFixed(result, v0), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoApplyArguments(HApplyArguments* instr) {
+  LOperand* function = UseFixed(instr->function(), a1);
+  LOperand* receiver = UseFixed(instr->receiver(), a0);
+  LOperand* length = UseFixed(instr->length(), a2);
+  LOperand* elements = UseFixed(instr->elements(), a3);
+  LApplyArguments* result = new LApplyArguments(function,
+                                                receiver,
+                                                length,
+                                                elements);
+  return MarkAsCall(DefineFixed(result, v0), instr, CAN_DEOPTIMIZE_EAGERLY);
+}
+
+
+LInstruction* LChunkBuilder::DoPushArgument(HPushArgument* instr) {
+  ++argument_count_;
+  LOperand* argument = Use(instr->argument());
+  return new LPushArgument(argument);
+}
+
+
+LInstruction* LChunkBuilder::DoThisFunction(HThisFunction* instr) {
+  return instr->HasNoUses() ? NULL : DefineAsRegister(new LThisFunction);
+}
+
+
+LInstruction* LChunkBuilder::DoContext(HContext* instr) {
+  return instr->HasNoUses() ? NULL : DefineAsRegister(new LContext);
+}
+
+
+LInstruction* LChunkBuilder::DoOuterContext(HOuterContext* instr) {
+  LOperand* context = UseRegisterAtStart(instr->value());
+  return DefineAsRegister(new LOuterContext(context));
+}
+
+
+LInstruction* LChunkBuilder::DoGlobalObject(HGlobalObject* instr) {
+  LOperand* context = UseRegisterAtStart(instr->value());
+  return DefineAsRegister(new LGlobalObject(context));
+}
+
+
+LInstruction* LChunkBuilder::DoGlobalReceiver(HGlobalReceiver* instr) {
+  LOperand* global_object = UseRegisterAtStart(instr->value());
+  return DefineAsRegister(new LGlobalReceiver(global_object));
+}
+
+
+LInstruction* LChunkBuilder::DoCallConstantFunction(
+    HCallConstantFunction* instr) {
+  argument_count_ -= instr->argument_count();
+  return MarkAsCall(DefineFixed(new LCallConstantFunction, v0), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoInvokeFunction(HInvokeFunction* instr) {
+  LOperand* function = UseFixed(instr->function(), a1);
+  argument_count_ -= instr->argument_count();
+  LInvokeFunction* result = new LInvokeFunction(function);
+  return MarkAsCall(DefineFixed(result, v0), instr, CANNOT_DEOPTIMIZE_EAGERLY);
+}
+
+
+LInstruction* LChunkBuilder::DoUnaryMathOperation(HUnaryMathOperation* instr) {
+  BuiltinFunctionId op = instr->op();
+  if (op == kMathLog || op == kMathSin || op == kMathCos) {
+    LOperand* input = UseFixedDouble(instr->value(), f4);
+    LUnaryMathOperation* result = new LUnaryMathOperation(input, NULL);
+    return MarkAsCall(DefineFixedDouble(result, f4), instr);
+  } else {
+    LOperand* input = UseRegisterAtStart(instr->value());
+    LOperand* temp = (op == kMathFloor) ? TempRegister() : NULL;
+    LUnaryMathOperation* result = new LUnaryMathOperation(input, temp);
+    switch (op) {
+      case kMathAbs:
+        return AssignEnvironment(AssignPointerMap(DefineAsRegister(result)));
+      case kMathFloor:
+        return AssignEnvironment(AssignPointerMap(DefineAsRegister(result)));
+      case kMathSqrt:
+        return DefineAsRegister(result);
+      case kMathRound:
+        return AssignEnvironment(DefineAsRegister(result));
+      case kMathPowHalf:
+        return DefineAsRegister(result);
+      default:
+        UNREACHABLE();
+        return NULL;
+    }
+  }
+}
+
+
+LInstruction* LChunkBuilder::DoCallKeyed(HCallKeyed* instr) {
+  ASSERT(instr->key()->representation().IsTagged());
+  argument_count_ -= instr->argument_count();
+  LOperand* key = UseFixed(instr->key(), a2);
+  return MarkAsCall(DefineFixed(new LCallKeyed(key), v0), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoCallNamed(HCallNamed* instr) {
+  argument_count_ -= instr->argument_count();
+  return MarkAsCall(DefineFixed(new LCallNamed, v0), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoCallGlobal(HCallGlobal* instr) {
+  argument_count_ -= instr->argument_count();
+  return MarkAsCall(DefineFixed(new LCallGlobal, v0), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoCallKnownGlobal(HCallKnownGlobal* instr) {
+  argument_count_ -= instr->argument_count();
+  return MarkAsCall(DefineFixed(new LCallKnownGlobal, v0), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoCallNew(HCallNew* instr) {
+  LOperand* constructor = UseFixed(instr->constructor(), a1);
+  argument_count_ -= instr->argument_count();
+  LCallNew* result = new LCallNew(constructor);
+  return MarkAsCall(DefineFixed(result, v0), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoCallFunction(HCallFunction* instr) {
+  argument_count_ -= instr->argument_count();
+  return MarkAsCall(DefineFixed(new LCallFunction, v0), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoCallRuntime(HCallRuntime* instr) {
+  argument_count_ -= instr->argument_count();
+  return MarkAsCall(DefineFixed(new LCallRuntime, v0), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoShr(HShr* instr) {
+  return DoShift(Token::SHR, instr);
+}
+
+
+LInstruction* LChunkBuilder::DoSar(HSar* instr) {
+  return DoShift(Token::SAR, instr);
+}
+
+
+LInstruction* LChunkBuilder::DoShl(HShl* instr) {
+  return DoShift(Token::SHL, instr);
+}
+
+
+LInstruction* LChunkBuilder::DoBitwise(HBitwise* instr) {
+  if (instr->representation().IsInteger32()) {
+    ASSERT(instr->left()->representation().IsInteger32());
+    ASSERT(instr->right()->representation().IsInteger32());
+
+    LOperand* left = UseRegisterAtStart(instr->LeastConstantOperand());
+    LOperand* right = UseOrConstantAtStart(instr->MostConstantOperand());
+    return DefineAsRegister(new LBitI(left, right));
+  } else {
+    ASSERT(instr->representation().IsTagged());
+    ASSERT(instr->left()->representation().IsTagged());
+    ASSERT(instr->right()->representation().IsTagged());
+
+    LOperand* left = UseFixed(instr->left(), a1);
+    LOperand* right = UseFixed(instr->right(), a0);
+    LArithmeticT* result = new LArithmeticT(instr->op(), left, right);
+    return MarkAsCall(DefineFixed(result, v0), instr);
+  }
+}
+
+
+LInstruction* LChunkBuilder::DoBitNot(HBitNot* instr) {
+  ASSERT(instr->value()->representation().IsInteger32());
+  ASSERT(instr->representation().IsInteger32());
+  return DefineAsRegister(new LBitNotI(UseRegisterAtStart(instr->value())));
+}
+
+
+LInstruction* LChunkBuilder::DoDiv(HDiv* instr) {
+  if (instr->representation().IsDouble()) {
+    return DoArithmeticD(Token::DIV, instr);
+  } else if (instr->representation().IsInteger32()) {
+    // TODO(1042) The fixed register allocation
+    // is needed because we call TypeRecordingBinaryOpStub from
+    // the generated code, which requires registers a0
+    // and a1 to be used. We should remove that
+    // when we provide a native implementation.
+    LOperand* dividend = UseFixed(instr->left(), a0);
+    LOperand* divisor = UseFixed(instr->right(), a1);
+    return AssignEnvironment(AssignPointerMap(
+             DefineFixed(new LDivI(dividend, divisor), v0)));
+  } else {
+    return DoArithmeticT(Token::DIV, instr);
+  }
+}
+
+
+LInstruction* LChunkBuilder::DoMod(HMod* instr) {
+  if (instr->representation().IsInteger32()) {
+    ASSERT(instr->left()->representation().IsInteger32());
+    ASSERT(instr->right()->representation().IsInteger32());
+
+    LModI* mod;
+    if (instr->HasPowerOf2Divisor()) {
+      ASSERT(!instr->CheckFlag(HValue::kCanBeDivByZero));
+      LOperand* value = UseRegisterAtStart(instr->left());
+      mod = new LModI(value, UseOrConstant(instr->right()));
+    } else {
+      LOperand* dividend = UseRegister(instr->left());
+      LOperand* divisor = UseRegister(instr->right());
+      mod = new LModI(dividend,
+                      divisor,
+                      TempRegister(),
+                      FixedTemp(f20),
+                      FixedTemp(f22));
+    }
+
+    if (instr->CheckFlag(HValue::kBailoutOnMinusZero) ||
+        instr->CheckFlag(HValue::kCanBeDivByZero)) {
+      return AssignEnvironment(DefineAsRegister(mod));
+    } else {
+      return DefineAsRegister(mod);
+    }
+  } else if (instr->representation().IsTagged()) {
+    return DoArithmeticT(Token::MOD, instr);
+  } else {
+    ASSERT(instr->representation().IsDouble());
+    // We call a C function for double modulo. It can't trigger a GC.
+    // We need to use fixed result register for the call.
+    // TODO(fschneider): Allow any register as input registers.
+    LOperand* left = UseFixedDouble(instr->left(), f2);
+    LOperand* right = UseFixedDouble(instr->right(), f4);
+    LArithmeticD* result = new LArithmeticD(Token::MOD, left, right);
+    return MarkAsCall(DefineFixedDouble(result, f2), instr);
+  }
+}
+
+
+LInstruction* LChunkBuilder::DoMul(HMul* instr) {
+  if (instr->representation().IsInteger32()) {
+    ASSERT(instr->left()->representation().IsInteger32());
+    ASSERT(instr->right()->representation().IsInteger32());
+    LOperand* left;
+    LOperand* right = UseOrConstant(instr->MostConstantOperand());
+    LOperand* temp = NULL;
+    if (instr->CheckFlag(HValue::kBailoutOnMinusZero) &&
+        (instr->CheckFlag(HValue::kCanOverflow) ||
+        !right->IsConstantOperand())) {
+      left = UseRegister(instr->LeastConstantOperand());
+      temp = TempRegister();
+    } else {
+      left = UseRegisterAtStart(instr->LeastConstantOperand());
+    }
+    return AssignEnvironment(DefineAsRegister(new LMulI(left, right, temp)));
+
+  } else if (instr->representation().IsDouble()) {
+    return DoArithmeticD(Token::MUL, instr);
+
+  } else {
+    return DoArithmeticT(Token::MUL, instr);
+  }
+}
+
+
+LInstruction* LChunkBuilder::DoSub(HSub* instr) {
+  if (instr->representation().IsInteger32()) {
+    ASSERT(instr->left()->representation().IsInteger32());
+    ASSERT(instr->right()->representation().IsInteger32());
+    LOperand* left = UseRegisterAtStart(instr->left());
+    LOperand* right = UseOrConstantAtStart(instr->right());
+    LSubI* sub = new LSubI(left, right);
+    LInstruction* result = DefineAsRegister(sub);
+    if (instr->CheckFlag(HValue::kCanOverflow)) {
+      result = AssignEnvironment(result);
+    }
+    return result;
+  } else if (instr->representation().IsDouble()) {
+    return DoArithmeticD(Token::SUB, instr);
+  } else {
+    return DoArithmeticT(Token::SUB, instr);
+  }
+}
+
+
+LInstruction* LChunkBuilder::DoAdd(HAdd* instr) {
+  if (instr->representation().IsInteger32()) {
+    ASSERT(instr->left()->representation().IsInteger32());
+    ASSERT(instr->right()->representation().IsInteger32());
+    LOperand* left = UseRegisterAtStart(instr->LeastConstantOperand());
+    LOperand* right = UseOrConstantAtStart(instr->MostConstantOperand());
+    LAddI* add = new LAddI(left, right);
+    LInstruction* result = DefineAsRegister(add);
+    if (instr->CheckFlag(HValue::kCanOverflow)) {
+      result = AssignEnvironment(result);
+    }
+    return result;
+  } else if (instr->representation().IsDouble()) {
+    return DoArithmeticD(Token::ADD, instr);
+  } else {
+    ASSERT(instr->representation().IsTagged());
+    return DoArithmeticT(Token::ADD, instr);
+  }
+}
+
+
+LInstruction* LChunkBuilder::DoPower(HPower* instr) {
+  ASSERT(instr->representation().IsDouble());
+  // We call a C function for double power. It can't trigger a GC.
+  // We need to use fixed result register for the call.
+  Representation exponent_type = instr->right()->representation();
+  ASSERT(instr->left()->representation().IsDouble());
+  LOperand* left = UseFixedDouble(instr->left(), f2);
+  LOperand* right = exponent_type.IsDouble() ?
+      UseFixedDouble(instr->right(), f4) :
+      UseFixed(instr->right(), a0);
+  LPower* result = new LPower(left, right);
+  return MarkAsCall(DefineFixedDouble(result, f6),
+                    instr,
+                    CAN_DEOPTIMIZE_EAGERLY);
+}
+
+
+LInstruction* LChunkBuilder::DoCompareGeneric(HCompareGeneric* instr) {
+  Representation r = instr->GetInputRepresentation();
+  ASSERT(instr->left()->representation().IsTagged());
+  ASSERT(instr->right()->representation().IsTagged());
+  LOperand* left = UseFixed(instr->left(), a1);
+  LOperand* right = UseFixed(instr->right(), a0);
+  LCmpT* result = new LCmpT(left, right);
+  return MarkAsCall(DefineFixed(result, v0), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoCompareIDAndBranch(
+    HCompareIDAndBranch* instr) {
+  Representation r = instr->GetInputRepresentation();
+  if (r.IsInteger32()) {
+    ASSERT(instr->left()->representation().IsInteger32());
+    ASSERT(instr->right()->representation().IsInteger32());
+    LOperand* left = UseRegisterOrConstantAtStart(instr->left());
+    LOperand* right = UseRegisterOrConstantAtStart(instr->right());
+    return new LCmpIDAndBranch(left, right);
+  } else {
+    ASSERT(r.IsDouble());
+    ASSERT(instr->left()->representation().IsDouble());
+    ASSERT(instr->right()->representation().IsDouble());
+    LOperand* left = UseRegisterAtStart(instr->left());
+    LOperand* right = UseRegisterAtStart(instr->right());
+    return new LCmpIDAndBranch(left, right);
+  }
+}
+
+
+LInstruction* LChunkBuilder::DoCompareObjectEqAndBranch(
+    HCompareObjectEqAndBranch* instr) {
+  LOperand* left = UseRegisterAtStart(instr->left());
+  LOperand* right = UseRegisterAtStart(instr->right());
+  return new LCmpObjectEqAndBranch(left, right);
+}
+
+
+LInstruction* LChunkBuilder::DoCompareConstantEqAndBranch(
+    HCompareConstantEqAndBranch* instr) {
+  return new LCmpConstantEqAndBranch(UseRegisterAtStart(instr->value()));
+}
+
+
+LInstruction* LChunkBuilder::DoIsNilAndBranch(HIsNilAndBranch* instr) {
+  ASSERT(instr->value()->representation().IsTagged());
+  return new LIsNilAndBranch(UseRegisterAtStart(instr->value()));
+}
+
+
+LInstruction* LChunkBuilder::DoIsObjectAndBranch(HIsObjectAndBranch* instr) {
+  ASSERT(instr->value()->representation().IsTagged());
+  LOperand* temp = TempRegister();
+  return new LIsObjectAndBranch(UseRegisterAtStart(instr->value()), temp);
+}
+
+
+LInstruction* LChunkBuilder::DoIsSmiAndBranch(HIsSmiAndBranch* instr) {
+  ASSERT(instr->value()->representation().IsTagged());
+  return new LIsSmiAndBranch(Use(instr->value()));
+}
+
+
+LInstruction* LChunkBuilder::DoIsUndetectableAndBranch(
+    HIsUndetectableAndBranch* instr) {
+  ASSERT(instr->value()->representation().IsTagged());
+  return new LIsUndetectableAndBranch(UseRegisterAtStart(instr->value()),
+                                      TempRegister());
+}
+
+
+LInstruction* LChunkBuilder::DoHasInstanceTypeAndBranch(
+    HHasInstanceTypeAndBranch* instr) {
+  ASSERT(instr->value()->representation().IsTagged());
+  return new LHasInstanceTypeAndBranch(UseRegisterAtStart(instr->value()));
+}
+
+
+LInstruction* LChunkBuilder::DoGetCachedArrayIndex(
+    HGetCachedArrayIndex* instr)  {
+  ASSERT(instr->value()->representation().IsTagged());
+  LOperand* value = UseRegisterAtStart(instr->value());
+
+  return DefineAsRegister(new LGetCachedArrayIndex(value));
+}
+
+
+LInstruction* LChunkBuilder::DoHasCachedArrayIndexAndBranch(
+    HHasCachedArrayIndexAndBranch* instr) {
+  ASSERT(instr->value()->representation().IsTagged());
+  return new LHasCachedArrayIndexAndBranch(
+      UseRegisterAtStart(instr->value()));
+}
+
+
+LInstruction* LChunkBuilder::DoClassOfTestAndBranch(
+    HClassOfTestAndBranch* instr) {
+  ASSERT(instr->value()->representation().IsTagged());
+  return new LClassOfTestAndBranch(UseTempRegister(instr->value()),
+                                   TempRegister());
+}
+
+
+LInstruction* LChunkBuilder::DoJSArrayLength(HJSArrayLength* instr) {
+  LOperand* array = UseRegisterAtStart(instr->value());
+  return DefineAsRegister(new LJSArrayLength(array));
+}
+
+
+LInstruction* LChunkBuilder::DoFixedArrayBaseLength(
+    HFixedArrayBaseLength* instr) {
+  LOperand* array = UseRegisterAtStart(instr->value());
+  return DefineAsRegister(new LFixedArrayBaseLength(array));
+}
+
+
+LInstruction* LChunkBuilder::DoElementsKind(HElementsKind* instr) {
+  LOperand* object = UseRegisterAtStart(instr->value());
+  return DefineAsRegister(new LElementsKind(object));
+}
+
+
+LInstruction* LChunkBuilder::DoValueOf(HValueOf* instr) {
+  LOperand* object = UseRegister(instr->value());
+  LValueOf* result = new LValueOf(object, TempRegister());
+  return AssignEnvironment(DefineAsRegister(result));
+}
+
+
+LInstruction* LChunkBuilder::DoBoundsCheck(HBoundsCheck* instr) {
+  return AssignEnvironment(new LBoundsCheck(UseRegisterAtStart(instr->index()),
+                                            UseRegister(instr->length())));
+}
+
+
+LInstruction* LChunkBuilder::DoAbnormalExit(HAbnormalExit* instr) {
+  // The control instruction marking the end of a block that completed
+  // abruptly (e.g., threw an exception).  There is nothing specific to do.
+  return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoThrow(HThrow* instr) {
+  LOperand* value = UseFixed(instr->value(), a0);
+  return MarkAsCall(new LThrow(value), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoUseConst(HUseConst* instr) {
+  return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoForceRepresentation(HForceRepresentation* bad) {
+  // All HForceRepresentation instructions should be eliminated in the
+  // representation change phase of Hydrogen.
+  UNREACHABLE();
+  return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoChange(HChange* instr) {
+  Representation from = instr->from();
+  Representation to = instr->to();
+  if (from.IsTagged()) {
+    if (to.IsDouble()) {
+      LOperand* value = UseRegister(instr->value());
+      LNumberUntagD* res = new LNumberUntagD(value);
+      return AssignEnvironment(DefineAsRegister(res));
+    } else {
+      ASSERT(to.IsInteger32());
+      LOperand* value = UseRegister(instr->value());
+      bool needs_check = !instr->value()->type().IsSmi();
+      LInstruction* res = NULL;
+      if (!needs_check) {
+        res = DefineSameAsFirst(new LSmiUntag(value, needs_check));
+      } else {
+        LOperand* temp1 = TempRegister();
+        LOperand* temp2 = instr->CanTruncateToInt32() ? TempRegister()
+                                                      : NULL;
+        LOperand* temp3 = instr->CanTruncateToInt32() ? FixedTemp(f22)
+                                                      : NULL;
+        res = DefineSameAsFirst(new LTaggedToI(value, temp1, temp2, temp3));
+        res = AssignEnvironment(res);
+      }
+      return res;
+    }
+  } else if (from.IsDouble()) {
+    if (to.IsTagged()) {
+      LOperand* value = UseRegister(instr->value());
+      LOperand* temp1 = TempRegister();
+      LOperand* temp2 = TempRegister();
+
+      // Make sure that the temp and result_temp registers are
+      // different.
+      LUnallocated* result_temp = TempRegister();
+      LNumberTagD* result = new LNumberTagD(value, temp1, temp2);
+      Define(result, result_temp);
+      return AssignPointerMap(result);
+    } else {
+      ASSERT(to.IsInteger32());
+      LOperand* value = UseRegister(instr->value());
+      LDoubleToI* res =
+        new LDoubleToI(value,
+                       TempRegister(),
+                       instr->CanTruncateToInt32() ? TempRegister() : NULL);
+      return AssignEnvironment(DefineAsRegister(res));
+    }
+  } else if (from.IsInteger32()) {
+    if (to.IsTagged()) {
+      HValue* val = instr->value();
+      LOperand* value = UseRegister(val);
+      if (val->HasRange() && val->range()->IsInSmiRange()) {
+        return DefineSameAsFirst(new LSmiTag(value));
+      } else {
+        LNumberTagI* result = new LNumberTagI(value);
+        return AssignEnvironment(AssignPointerMap(DefineSameAsFirst(result)));
+      }
+    } else {
+      ASSERT(to.IsDouble());
+      LOperand* value = Use(instr->value());
+      return DefineAsRegister(new LInteger32ToDouble(value));
+    }
+  }
+  UNREACHABLE();
+  return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoCheckNonSmi(HCheckNonSmi* instr) {
+  LOperand* value = UseRegisterAtStart(instr->value());
+  return AssignEnvironment(new LCheckNonSmi(value));
+}
+
+
+LInstruction* LChunkBuilder::DoCheckInstanceType(HCheckInstanceType* instr) {
+  LOperand* value = UseRegisterAtStart(instr->value());
+  LInstruction* result = new LCheckInstanceType(value);
+  return AssignEnvironment(result);
+}
+
+
+LInstruction* LChunkBuilder::DoCheckPrototypeMaps(HCheckPrototypeMaps* instr) {
+  LOperand* temp1 = TempRegister();
+  LOperand* temp2 = TempRegister();
+  LInstruction* result = new LCheckPrototypeMaps(temp1, temp2);
+  return AssignEnvironment(result);
+}
+
+
+LInstruction* LChunkBuilder::DoCheckSmi(HCheckSmi* instr) {
+  LOperand* value = UseRegisterAtStart(instr->value());
+  return AssignEnvironment(new LCheckSmi(value));
+}
+
+
+LInstruction* LChunkBuilder::DoCheckFunction(HCheckFunction* instr) {
+  LOperand* value = UseRegisterAtStart(instr->value());
+  return AssignEnvironment(new LCheckFunction(value));
+}
+
+
+LInstruction* LChunkBuilder::DoCheckMap(HCheckMap* instr) {
+  LOperand* value = UseRegisterAtStart(instr->value());
+  LInstruction* result = new LCheckMap(value);
+  return AssignEnvironment(result);
+}
+
+
+LInstruction* LChunkBuilder::DoClampToUint8(HClampToUint8* instr) {
+  HValue* value = instr->value();
+  Representation input_rep = value->representation();
+  LOperand* reg = UseRegister(value);
+  if (input_rep.IsDouble()) {
+    // Revisit this decision, here and 8 lines below.
+    return DefineAsRegister(new LClampDToUint8(reg, FixedTemp(f22)));
+  } else if (input_rep.IsInteger32()) {
+    return DefineAsRegister(new LClampIToUint8(reg));
+  } else {
+    ASSERT(input_rep.IsTagged());
+    // Register allocator doesn't (yet) support allocation of double
+    // temps. Reserve f22 explicitly.
+    LClampTToUint8* result = new LClampTToUint8(reg, FixedTemp(f22));
+    return AssignEnvironment(DefineAsRegister(result));
+  }
+}
+
+
+LInstruction* LChunkBuilder::DoToInt32(HToInt32* instr) {
+  HValue* value = instr->value();
+  Representation input_rep = value->representation();
+  LOperand* reg = UseRegister(value);
+  if (input_rep.IsDouble()) {
+    LOperand* temp1 = TempRegister();
+    LOperand* temp2 = TempRegister();
+    LDoubleToI* res = new LDoubleToI(reg, temp1, temp2);
+    return AssignEnvironment(DefineAsRegister(res));
+  } else if (input_rep.IsInteger32()) {
+    // Canonicalization should already have removed the hydrogen instruction in
+    // this case, since it is a noop.
+    UNREACHABLE();
+    return NULL;
+  } else {
+    ASSERT(input_rep.IsTagged());
+    LOperand* temp1 = TempRegister();
+    LOperand* temp2 = TempRegister();
+    LOperand* temp3 = FixedTemp(f22);
+    LTaggedToI* res = new LTaggedToI(reg, temp1, temp2, temp3);
+    return AssignEnvironment(DefineSameAsFirst(res));
+  }
+}
+
+
+LInstruction* LChunkBuilder::DoReturn(HReturn* instr) {
+  return new LReturn(UseFixed(instr->value(), v0));
+}
+
+
+LInstruction* LChunkBuilder::DoConstant(HConstant* instr) {
+  Representation r = instr->representation();
+  if (r.IsInteger32()) {
+    return DefineAsRegister(new LConstantI);
+  } else if (r.IsDouble()) {
+    return DefineAsRegister(new LConstantD);
+  } else if (r.IsTagged()) {
+    return DefineAsRegister(new LConstantT);
+  } else {
+    UNREACHABLE();
+    return NULL;
+  }
+}
+
+
+LInstruction* LChunkBuilder::DoLoadGlobalCell(HLoadGlobalCell* instr) {
+  LLoadGlobalCell* result = new LLoadGlobalCell;
+  return instr->RequiresHoleCheck()
+      ? AssignEnvironment(DefineAsRegister(result))
+      : DefineAsRegister(result);
+}
+
+
+LInstruction* LChunkBuilder::DoLoadGlobalGeneric(HLoadGlobalGeneric* instr) {
+  LOperand* global_object = UseFixed(instr->global_object(), a0);
+  LLoadGlobalGeneric* result = new LLoadGlobalGeneric(global_object);
+  return MarkAsCall(DefineFixed(result, v0), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoStoreGlobalCell(HStoreGlobalCell* instr) {
+  LOperand* temp = TempRegister();
+  LOperand* value = UseTempRegister(instr->value());
+  LInstruction* result = new LStoreGlobalCell(value, temp);
+  if (instr->RequiresHoleCheck()) result = AssignEnvironment(result);
+  return result;
+}
+
+
+LInstruction* LChunkBuilder::DoStoreGlobalGeneric(HStoreGlobalGeneric* instr) {
+  LOperand* global_object = UseFixed(instr->global_object(), a1);
+  LOperand* value = UseFixed(instr->value(), a0);
+  LStoreGlobalGeneric* result =
+      new LStoreGlobalGeneric(global_object, value);
+  return MarkAsCall(result, instr);
+}
+
+
+LInstruction* LChunkBuilder::DoLoadContextSlot(HLoadContextSlot* instr) {
+  LOperand* context = UseRegisterAtStart(instr->value());
+  return DefineAsRegister(new LLoadContextSlot(context));
+}
+
+
+LInstruction* LChunkBuilder::DoStoreContextSlot(HStoreContextSlot* instr) {
+  LOperand* context;
+  LOperand* value;
+  if (instr->NeedsWriteBarrier()) {
+    context = UseTempRegister(instr->context());
+    value = UseTempRegister(instr->value());
+  } else {
+    context = UseRegister(instr->context());
+    value = UseRegister(instr->value());
+  }
+  return new LStoreContextSlot(context, value);
+}
+
+
+LInstruction* LChunkBuilder::DoLoadNamedField(HLoadNamedField* instr) {
+  return DefineAsRegister(
+      new LLoadNamedField(UseRegisterAtStart(instr->object())));
+}
+
+
+LInstruction* LChunkBuilder::DoLoadNamedFieldPolymorphic(
+    HLoadNamedFieldPolymorphic* instr) {
+  ASSERT(instr->representation().IsTagged());
+  if (instr->need_generic()) {
+    LOperand* obj = UseFixed(instr->object(), a0);
+    LLoadNamedFieldPolymorphic* result = new LLoadNamedFieldPolymorphic(obj);
+    return MarkAsCall(DefineFixed(result, v0), instr);
+  } else {
+    LOperand* obj = UseRegisterAtStart(instr->object());
+    LLoadNamedFieldPolymorphic* result = new LLoadNamedFieldPolymorphic(obj);
+    return AssignEnvironment(DefineAsRegister(result));
+  }
+}
+
+
+LInstruction* LChunkBuilder::DoLoadNamedGeneric(HLoadNamedGeneric* instr) {
+  LOperand* object = UseFixed(instr->object(), a0);
+  LInstruction* result = DefineFixed(new LLoadNamedGeneric(object), v0);
+  return MarkAsCall(result, instr);
+}
+
+
+LInstruction* LChunkBuilder::DoLoadFunctionPrototype(
+    HLoadFunctionPrototype* instr) {
+  return AssignEnvironment(DefineAsRegister(
+      new LLoadFunctionPrototype(UseRegister(instr->function()))));
+}
+
+
+LInstruction* LChunkBuilder::DoLoadElements(HLoadElements* instr) {
+  LOperand* input = UseRegisterAtStart(instr->value());
+  return DefineAsRegister(new LLoadElements(input));
+}
+
+
+LInstruction* LChunkBuilder::DoLoadExternalArrayPointer(
+    HLoadExternalArrayPointer* instr) {
+  LOperand* input = UseRegisterAtStart(instr->value());
+  return DefineAsRegister(new LLoadExternalArrayPointer(input));
+}
+
+
+LInstruction* LChunkBuilder::DoLoadKeyedFastElement(
+    HLoadKeyedFastElement* instr) {
+  ASSERT(instr->representation().IsTagged());
+  ASSERT(instr->key()->representation().IsInteger32());
+  LOperand* obj = UseRegisterAtStart(instr->object());
+  LOperand* key = UseRegisterAtStart(instr->key());
+  LLoadKeyedFastElement* result = new LLoadKeyedFastElement(obj, key);
+  return AssignEnvironment(DefineAsRegister(result));
+}
+
+
+LInstruction* LChunkBuilder::DoLoadKeyedFastDoubleElement(
+    HLoadKeyedFastDoubleElement* instr) {
+  ASSERT(instr->representation().IsDouble());
+  ASSERT(instr->key()->representation().IsInteger32());
+  LOperand* elements = UseTempRegister(instr->elements());
+  LOperand* key = UseRegisterOrConstantAtStart(instr->key());
+  LLoadKeyedFastDoubleElement* result =
+      new LLoadKeyedFastDoubleElement(elements, key);
+  return AssignEnvironment(DefineAsRegister(result));
+}
+
+
+LInstruction* LChunkBuilder::DoLoadKeyedSpecializedArrayElement(
+    HLoadKeyedSpecializedArrayElement* instr) {
+  ElementsKind elements_kind = instr->elements_kind();
+  Representation representation(instr->representation());
+  ASSERT(
+      (representation.IsInteger32() &&
+       (elements_kind != EXTERNAL_FLOAT_ELEMENTS) &&
+       (elements_kind != EXTERNAL_DOUBLE_ELEMENTS)) ||
+      (representation.IsDouble() &&
+       ((elements_kind == EXTERNAL_FLOAT_ELEMENTS) ||
+       (elements_kind == EXTERNAL_DOUBLE_ELEMENTS))));
+  ASSERT(instr->key()->representation().IsInteger32());
+  LOperand* external_pointer = UseRegister(instr->external_pointer());
+  LOperand* key = UseRegisterOrConstant(instr->key());
+  LLoadKeyedSpecializedArrayElement* result =
+      new LLoadKeyedSpecializedArrayElement(external_pointer, key);
+  LInstruction* load_instr = DefineAsRegister(result);
+  // An unsigned int array load might overflow and cause a deopt, make sure it
+  // has an environment.
+  return (elements_kind == EXTERNAL_UNSIGNED_INT_ELEMENTS) ?
+      AssignEnvironment(load_instr) : load_instr;
+}
+
+
+LInstruction* LChunkBuilder::DoLoadKeyedGeneric(HLoadKeyedGeneric* instr) {
+  LOperand* object = UseFixed(instr->object(), a1);
+  LOperand* key = UseFixed(instr->key(), a0);
+
+  LInstruction* result =
+      DefineFixed(new LLoadKeyedGeneric(object, key), v0);
+  return MarkAsCall(result, instr);
+}
+
+
+LInstruction* LChunkBuilder::DoStoreKeyedFastElement(
+    HStoreKeyedFastElement* instr) {
+  bool needs_write_barrier = instr->NeedsWriteBarrier();
+  ASSERT(instr->value()->representation().IsTagged());
+  ASSERT(instr->object()->representation().IsTagged());
+  ASSERT(instr->key()->representation().IsInteger32());
+
+  LOperand* obj = UseTempRegister(instr->object());
+  LOperand* val = needs_write_barrier
+      ? UseTempRegister(instr->value())
+      : UseRegisterAtStart(instr->value());
+  LOperand* key = needs_write_barrier
+      ? UseTempRegister(instr->key())
+      : UseRegisterOrConstantAtStart(instr->key());
+
+  return AssignEnvironment(new LStoreKeyedFastElement(obj, key, val));
+}
+
+
+LInstruction* LChunkBuilder::DoStoreKeyedFastDoubleElement(
+    HStoreKeyedFastDoubleElement* instr) {
+  ASSERT(instr->value()->representation().IsDouble());
+  ASSERT(instr->elements()->representation().IsTagged());
+  ASSERT(instr->key()->representation().IsInteger32());
+
+  LOperand* elements = UseRegisterAtStart(instr->elements());
+  LOperand* val = UseTempRegister(instr->value());
+  LOperand* key = UseRegisterOrConstantAtStart(instr->key());
+
+  return new LStoreKeyedFastDoubleElement(elements, key, val);
+}
+
+
+LInstruction* LChunkBuilder::DoStoreKeyedSpecializedArrayElement(
+    HStoreKeyedSpecializedArrayElement* instr) {
+  Representation representation(instr->value()->representation());
+  ElementsKind elements_kind = instr->elements_kind();
+  ASSERT(
+      (representation.IsInteger32() &&
+       (elements_kind != EXTERNAL_FLOAT_ELEMENTS) &&
+       (elements_kind != EXTERNAL_DOUBLE_ELEMENTS)) ||
+      (representation.IsDouble() &&
+       ((elements_kind == EXTERNAL_FLOAT_ELEMENTS) ||
+       (elements_kind == EXTERNAL_DOUBLE_ELEMENTS))));
+  ASSERT(instr->external_pointer()->representation().IsExternal());
+  ASSERT(instr->key()->representation().IsInteger32());
+
+  LOperand* external_pointer = UseRegister(instr->external_pointer());
+  bool val_is_temp_register =
+      elements_kind == EXTERNAL_PIXEL_ELEMENTS ||
+      elements_kind == EXTERNAL_FLOAT_ELEMENTS;
+  LOperand* val = val_is_temp_register
+      ? UseTempRegister(instr->value())
+      : UseRegister(instr->value());
+  LOperand* key = UseRegisterOrConstant(instr->key());
+
+  return new LStoreKeyedSpecializedArrayElement(external_pointer,
+                                                key,
+                                                val);
+}
+
+
+LInstruction* LChunkBuilder::DoStoreKeyedGeneric(HStoreKeyedGeneric* instr) {
+  LOperand* obj = UseFixed(instr->object(), a2);
+  LOperand* key = UseFixed(instr->key(), a1);
+  LOperand* val = UseFixed(instr->value(), a0);
+
+  ASSERT(instr->object()->representation().IsTagged());
+  ASSERT(instr->key()->representation().IsTagged());
+  ASSERT(instr->value()->representation().IsTagged());
+
+  return MarkAsCall(new LStoreKeyedGeneric(obj, key, val), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoTransitionElementsKind(
+    HTransitionElementsKind* instr) {
+  if (instr->original_map()->elements_kind() == FAST_SMI_ONLY_ELEMENTS &&
+      instr->transitioned_map()->elements_kind() == FAST_ELEMENTS) {
+    LOperand* object = UseRegister(instr->object());
+    LOperand* new_map_reg = TempRegister();
+    LTransitionElementsKind* result =
+        new LTransitionElementsKind(object, new_map_reg, NULL);
+    return DefineSameAsFirst(result);
+  } else {
+    LOperand* object = UseFixed(instr->object(), a0);
+    LOperand* fixed_object_reg = FixedTemp(a2);
+    LOperand* new_map_reg = FixedTemp(a3);
+    LTransitionElementsKind* result =
+        new LTransitionElementsKind(object, new_map_reg, fixed_object_reg);
+    return MarkAsCall(DefineFixed(result, v0), instr);
+  }
+}
+
+
+LInstruction* LChunkBuilder::DoStoreNamedField(HStoreNamedField* instr) {
+  bool needs_write_barrier = instr->NeedsWriteBarrier();
+
+  LOperand* obj = needs_write_barrier
+      ? UseTempRegister(instr->object())
+      : UseRegisterAtStart(instr->object());
+
+  LOperand* val = needs_write_barrier
+      ? UseTempRegister(instr->value())
+      : UseRegister(instr->value());
+
+  return new LStoreNamedField(obj, val);
+}
+
+
+LInstruction* LChunkBuilder::DoStoreNamedGeneric(HStoreNamedGeneric* instr) {
+  LOperand* obj = UseFixed(instr->object(), a1);
+  LOperand* val = UseFixed(instr->value(), a0);
+
+  LInstruction* result = new LStoreNamedGeneric(obj, val);
+  return MarkAsCall(result, instr);
+}
+
+
+LInstruction* LChunkBuilder::DoStringAdd(HStringAdd* instr) {
+  LOperand* left = UseRegisterAtStart(instr->left());
+  LOperand* right = UseRegisterAtStart(instr->right());
+  return MarkAsCall(DefineFixed(new LStringAdd(left, right), v0), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoStringCharCodeAt(HStringCharCodeAt* instr) {
+  LOperand* string = UseTempRegister(instr->string());
+  LOperand* index = UseTempRegister(instr->index());
+  LStringCharCodeAt* result = new LStringCharCodeAt(string, index);
+  return AssignEnvironment(AssignPointerMap(DefineAsRegister(result)));
+}
+
+
+LInstruction* LChunkBuilder::DoStringCharFromCode(HStringCharFromCode* instr) {
+  LOperand* char_code = UseRegister(instr->value());
+  LStringCharFromCode* result = new LStringCharFromCode(char_code);
+  return AssignPointerMap(DefineAsRegister(result));
+}
+
+
+LInstruction* LChunkBuilder::DoStringLength(HStringLength* instr) {
+  LOperand* string = UseRegisterAtStart(instr->value());
+  return DefineAsRegister(new LStringLength(string));
+}
+
+
+LInstruction* LChunkBuilder::DoArrayLiteral(HArrayLiteral* instr) {
+  return MarkAsCall(DefineFixed(new LArrayLiteral, v0), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoObjectLiteral(HObjectLiteral* instr) {
+  return MarkAsCall(DefineFixed(new LObjectLiteral, v0), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoRegExpLiteral(HRegExpLiteral* instr) {
+  return MarkAsCall(DefineFixed(new LRegExpLiteral, v0), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoFunctionLiteral(HFunctionLiteral* instr) {
+  return MarkAsCall(DefineFixed(new LFunctionLiteral, v0), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoDeleteProperty(HDeleteProperty* instr) {
+  LOperand* object = UseFixed(instr->object(), a0);
+  LOperand* key = UseFixed(instr->key(), a1);
+  LDeleteProperty* result = new LDeleteProperty(object, key);
+  return MarkAsCall(DefineFixed(result, v0), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoOsrEntry(HOsrEntry* instr) {
+  allocator_->MarkAsOsrEntry();
+  current_block_->last_environment()->set_ast_id(instr->ast_id());
+  return AssignEnvironment(new LOsrEntry);
+}
+
+
+LInstruction* LChunkBuilder::DoParameter(HParameter* instr) {
+  int spill_index = chunk()->GetParameterStackSlot(instr->index());
+  return DefineAsSpilled(new LParameter, spill_index);
+}
+
+
+LInstruction* LChunkBuilder::DoUnknownOSRValue(HUnknownOSRValue* instr) {
+  int spill_index = chunk()->GetNextSpillIndex(false);  // Not double-width.
+  if (spill_index > LUnallocated::kMaxFixedIndex) {
+    Abort("Too many spill slots needed for OSR");
+    spill_index = 0;
+  }
+  return DefineAsSpilled(new LUnknownOSRValue, spill_index);
+}
+
+
+LInstruction* LChunkBuilder::DoCallStub(HCallStub* instr) {
+  argument_count_ -= instr->argument_count();
+  return MarkAsCall(DefineFixed(new LCallStub, v0), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoArgumentsObject(HArgumentsObject* instr) {
+  // There are no real uses of the arguments object.
+  // arguments.length and element access are supported directly on
+  // stack arguments, and any real arguments object use causes a bailout.
+  // So this value is never used.
+  return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoAccessArgumentsAt(HAccessArgumentsAt* instr) {
+  LOperand* arguments = UseRegister(instr->arguments());
+  LOperand* length = UseTempRegister(instr->length());
+  LOperand* index = UseRegister(instr->index());
+  LAccessArgumentsAt* result = new LAccessArgumentsAt(arguments, length, index);
+  return AssignEnvironment(DefineAsRegister(result));
+}
+
+
+LInstruction* LChunkBuilder::DoToFastProperties(HToFastProperties* instr) {
+  LOperand* object = UseFixed(instr->value(), a0);
+  LToFastProperties* result = new LToFastProperties(object);
+  return MarkAsCall(DefineFixed(result, v0), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoTypeof(HTypeof* instr) {
+  LTypeof* result = new LTypeof(UseFixed(instr->value(), a0));
+  return MarkAsCall(DefineFixed(result, v0), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoTypeofIsAndBranch(HTypeofIsAndBranch* instr) {
+  return new LTypeofIsAndBranch(UseTempRegister(instr->value()));
+}
+
+
+LInstruction* LChunkBuilder::DoIsConstructCallAndBranch(
+    HIsConstructCallAndBranch* instr) {
+  return new LIsConstructCallAndBranch(TempRegister());
+}
+
+
+LInstruction* LChunkBuilder::DoSimulate(HSimulate* instr) {
+  HEnvironment* env = current_block_->last_environment();
+  ASSERT(env != NULL);
+
+  env->set_ast_id(instr->ast_id());
+
+  env->Drop(instr->pop_count());
+  for (int i = 0; i < instr->values()->length(); ++i) {
+    HValue* value = instr->values()->at(i);
+    if (instr->HasAssignedIndexAt(i)) {
+      env->Bind(instr->GetAssignedIndexAt(i), value);
+    } else {
+      env->Push(value);
+    }
+  }
+
+  // If there is an instruction pending deoptimization environment create a
+  // lazy bailout instruction to capture the environment.
+  if (pending_deoptimization_ast_id_ == instr->ast_id()) {
+    LInstruction* result = new LLazyBailout;
+    result = AssignEnvironment(result);
+    instruction_pending_deoptimization_environment_->
+        set_deoptimization_environment(result->environment());
+    ClearInstructionPendingDeoptimizationEnvironment();
+    return result;
+  }
+
+  return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoStackCheck(HStackCheck* instr) {
+  if (instr->is_function_entry()) {
+    return MarkAsCall(new LStackCheck, instr);
+  } else {
+    ASSERT(instr->is_backwards_branch());
+    return AssignEnvironment(AssignPointerMap(new LStackCheck));
+  }
+}
+
+
+LInstruction* LChunkBuilder::DoEnterInlined(HEnterInlined* instr) {
+  HEnvironment* outer = current_block_->last_environment();
+  HConstant* undefined = graph()->GetConstantUndefined();
+  HEnvironment* inner = outer->CopyForInlining(instr->closure(),
+                                               instr->function(),
+                                               undefined,
+                                               instr->call_kind());
+  current_block_->UpdateEnvironment(inner);
+  chunk_->AddInlinedClosure(instr->closure());
+  return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoLeaveInlined(HLeaveInlined* instr) {
+  HEnvironment* outer = current_block_->last_environment()->outer();
+  current_block_->UpdateEnvironment(outer);
+  return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoIn(HIn* instr) {
+  LOperand* key = UseRegisterAtStart(instr->key());
+  LOperand* object = UseRegisterAtStart(instr->object());
+  LIn* result = new LIn(key, object);
+  return MarkAsCall(DefineFixed(result, v0), instr);
+}
+
+
+} }  // namespace v8::internal
diff --git a/src/mips/lithium-mips.h b/src/mips/lithium-mips.h
index ebc1e43..71f0bb2 100644
--- a/src/mips/lithium-mips.h
+++ b/src/mips/lithium-mips.h
@@ -32,131 +32,300 @@
 #include "lithium-allocator.h"
 #include "lithium.h"
 #include "safepoint-table.h"
-
-// Note: this file was taken from the X64 version. ARM has a partially working
-// lithium implementation, but for now it is not ported to mips.
+#include "utils.h"
 
 namespace v8 {
 namespace internal {
 
 // Forward declarations.
 class LCodeGen;
-class LEnvironment;
-class Translation;
+
+#define LITHIUM_ALL_INSTRUCTION_LIST(V)         \
+  V(ControlInstruction)                         \
+  V(Call)                                       \
+  LITHIUM_CONCRETE_INSTRUCTION_LIST(V)
+
+
+#define LITHIUM_CONCRETE_INSTRUCTION_LIST(V)    \
+  V(AccessArgumentsAt)                          \
+  V(AddI)                                       \
+  V(ApplyArguments)                             \
+  V(ArgumentsElements)                          \
+  V(ArgumentsLength)                            \
+  V(ArithmeticD)                                \
+  V(ArithmeticT)                                \
+  V(ArrayLiteral)                               \
+  V(BitI)                                       \
+  V(BitNotI)                                    \
+  V(BoundsCheck)                                \
+  V(Branch)                                     \
+  V(CallConstantFunction)                       \
+  V(CallFunction)                               \
+  V(CallGlobal)                                 \
+  V(CallKeyed)                                  \
+  V(CallKnownGlobal)                            \
+  V(CallNamed)                                  \
+  V(CallNew)                                    \
+  V(CallRuntime)                                \
+  V(CallStub)                                   \
+  V(CheckFunction)                              \
+  V(CheckInstanceType)                          \
+  V(CheckMap)                                   \
+  V(CheckNonSmi)                                \
+  V(CheckPrototypeMaps)                         \
+  V(CheckSmi)                                   \
+  V(ClampDToUint8)                              \
+  V(ClampIToUint8)                              \
+  V(ClampTToUint8)                              \
+  V(ClassOfTestAndBranch)                       \
+  V(CmpConstantEqAndBranch)                     \
+  V(CmpIDAndBranch)                             \
+  V(CmpObjectEqAndBranch)                       \
+  V(CmpMapAndBranch)                            \
+  V(CmpT)                                       \
+  V(ConstantD)                                  \
+  V(ConstantI)                                  \
+  V(ConstantT)                                  \
+  V(Context)                                    \
+  V(DeleteProperty)                             \
+  V(Deoptimize)                                 \
+  V(DivI)                                       \
+  V(DoubleToI)                                  \
+  V(ElementsKind)                               \
+  V(FixedArrayBaseLength)                       \
+  V(FunctionLiteral)                            \
+  V(GetCachedArrayIndex)                        \
+  V(GlobalObject)                               \
+  V(GlobalReceiver)                             \
+  V(Goto)                                       \
+  V(HasCachedArrayIndexAndBranch)               \
+  V(HasInstanceTypeAndBranch)                   \
+  V(In)                                         \
+  V(InstanceOf)                                 \
+  V(InstanceOfKnownGlobal)                      \
+  V(InstructionGap)                             \
+  V(Integer32ToDouble)                          \
+  V(InvokeFunction)                             \
+  V(IsConstructCallAndBranch)                   \
+  V(IsNilAndBranch)                             \
+  V(IsObjectAndBranch)                          \
+  V(IsSmiAndBranch)                             \
+  V(IsUndetectableAndBranch)                    \
+  V(JSArrayLength)                              \
+  V(Label)                                      \
+  V(LazyBailout)                                \
+  V(LoadContextSlot)                            \
+  V(LoadElements)                               \
+  V(LoadExternalArrayPointer)                   \
+  V(LoadFunctionPrototype)                      \
+  V(LoadGlobalCell)                             \
+  V(LoadGlobalGeneric)                          \
+  V(LoadKeyedFastDoubleElement)                 \
+  V(LoadKeyedFastElement)                       \
+  V(LoadKeyedGeneric)                           \
+  V(LoadKeyedSpecializedArrayElement)           \
+  V(LoadNamedField)                             \
+  V(LoadNamedFieldPolymorphic)                  \
+  V(LoadNamedGeneric)                           \
+  V(ModI)                                       \
+  V(MulI)                                       \
+  V(NumberTagD)                                 \
+  V(NumberTagI)                                 \
+  V(NumberUntagD)                               \
+  V(ObjectLiteral)                              \
+  V(OsrEntry)                                   \
+  V(OuterContext)                               \
+  V(Parameter)                                  \
+  V(Power)                                      \
+  V(PushArgument)                               \
+  V(RegExpLiteral)                              \
+  V(Return)                                     \
+  V(ShiftI)                                     \
+  V(SmiTag)                                     \
+  V(SmiUntag)                                   \
+  V(StackCheck)                                 \
+  V(StoreContextSlot)                           \
+  V(StoreGlobalCell)                            \
+  V(StoreGlobalGeneric)                         \
+  V(StoreKeyedFastDoubleElement)                \
+  V(StoreKeyedFastElement)                      \
+  V(StoreKeyedGeneric)                          \
+  V(StoreKeyedSpecializedArrayElement)          \
+  V(StoreNamedField)                            \
+  V(StoreNamedGeneric)                          \
+  V(StringAdd)                                  \
+  V(StringCharCodeAt)                           \
+  V(StringCharFromCode)                         \
+  V(StringLength)                               \
+  V(SubI)                                       \
+  V(TaggedToI)                                  \
+  V(ThisFunction)                               \
+  V(Throw)                                      \
+  V(ToFastProperties)                           \
+  V(TransitionElementsKind)                     \
+  V(Typeof)                                     \
+  V(TypeofIsAndBranch)                          \
+  V(UnaryMathOperation)                         \
+  V(UnknownOSRValue)                            \
+  V(ValueOf)
+
+
+#define DECLARE_CONCRETE_INSTRUCTION(type, mnemonic)              \
+  virtual Opcode opcode() const { return LInstruction::k##type; } \
+  virtual void CompileToNative(LCodeGen* generator);              \
+  virtual const char* Mnemonic() const { return mnemonic; }       \
+  static L##type* cast(LInstruction* instr) {                     \
+    ASSERT(instr->Is##type());                                    \
+    return reinterpret_cast<L##type*>(instr);                     \
+  }
+
+
+#define DECLARE_HYDROGEN_ACCESSOR(type)     \
+  H##type* hydrogen() const {               \
+    return H##type::cast(hydrogen_value()); \
+  }
+
 
 class LInstruction: public ZoneObject {
  public:
-  LInstruction() { }
+  LInstruction()
+      :  environment_(NULL),
+         hydrogen_value_(NULL),
+         is_call_(false),
+         is_save_doubles_(false) { }
   virtual ~LInstruction() { }
 
-  // Predicates should be generated by macro as in lithium-ia32.h.
-  virtual bool IsLabel() const {
-    UNIMPLEMENTED();
-    return false;
+  virtual void CompileToNative(LCodeGen* generator) = 0;
+  virtual const char* Mnemonic() const = 0;
+  virtual void PrintTo(StringStream* stream);
+  virtual void PrintDataTo(StringStream* stream) = 0;
+  virtual void PrintOutputOperandTo(StringStream* stream) = 0;
+
+  enum Opcode {
+    // Declare a unique enum value for each instruction.
+#define DECLARE_OPCODE(type) k##type,
+    LITHIUM_CONCRETE_INSTRUCTION_LIST(DECLARE_OPCODE)
+    kNumberOfInstructions
+#undef DECLARE_OPCODE
+  };
+
+  virtual Opcode opcode() const = 0;
+
+  // Declare non-virtual type testers for all leaf IR classes.
+#define DECLARE_PREDICATE(type) \
+  bool Is##type() const { return opcode() == k##type; }
+  LITHIUM_CONCRETE_INSTRUCTION_LIST(DECLARE_PREDICATE)
+#undef DECLARE_PREDICATE
+
+  // Declare virtual predicates for instructions that don't have
+  // an opcode.
+  virtual bool IsGap() const { return false; }
+
+  virtual bool IsControl() const { return false; }
+
+  void set_environment(LEnvironment* env) { environment_ = env; }
+  LEnvironment* environment() const { return environment_; }
+  bool HasEnvironment() const { return environment_ != NULL; }
+
+  void set_pointer_map(LPointerMap* p) { pointer_map_.set(p); }
+  LPointerMap* pointer_map() const { return pointer_map_.get(); }
+  bool HasPointerMap() const { return pointer_map_.is_set(); }
+
+  void set_hydrogen_value(HValue* value) { hydrogen_value_ = value; }
+  HValue* hydrogen_value() const { return hydrogen_value_; }
+
+  void set_deoptimization_environment(LEnvironment* env) {
+    deoptimization_environment_.set(env);
   }
-  virtual bool IsOsrEntry() const {
-    UNIMPLEMENTED();
-    return false;
+  LEnvironment* deoptimization_environment() const {
+    return deoptimization_environment_.get();
+  }
+  bool HasDeoptimizationEnvironment() const {
+    return deoptimization_environment_.is_set();
   }
 
-  LPointerMap* pointer_map() const {
-    UNIMPLEMENTED();
-    return NULL;
-  }
-
-  bool HasPointerMap() const {
-    UNIMPLEMENTED();
-    return false;
-  }
-
-  void set_environment(LEnvironment* env) { UNIMPLEMENTED(); }
-
-  LEnvironment* environment() const {
-    UNIMPLEMENTED();
-    return NULL;
-  }
-
-  bool HasEnvironment() const {
-    UNIMPLEMENTED();
-    return false;
-  }
-
-  virtual void PrintTo(StringStream* stream) const { UNIMPLEMENTED(); }
-
-  virtual bool IsControl() const {
-    UNIMPLEMENTED();
-    return false;
-  }
-
-  void MarkAsCall() { UNIMPLEMENTED(); }
-  void MarkAsSaveDoubles() { UNIMPLEMENTED(); }
+  void MarkAsCall() { is_call_ = true; }
+  void MarkAsSaveDoubles() { is_save_doubles_ = true; }
 
   // Interface to the register allocator and iterators.
-  bool IsMarkedAsCall() const {
-    UNIMPLEMENTED();
-    return false;
-  }
+  bool IsMarkedAsCall() const { return is_call_; }
+  bool IsMarkedAsSaveDoubles() const { return is_save_doubles_; }
 
-  bool IsMarkedAsSaveDoubles() const {
-    UNIMPLEMENTED();
-    return false;
-  }
+  virtual bool HasResult() const = 0;
+  virtual LOperand* result() = 0;
 
-  virtual bool HasResult() const {
-    UNIMPLEMENTED();
-    return false;
-  }
+  virtual int InputCount() = 0;
+  virtual LOperand* InputAt(int i) = 0;
+  virtual int TempCount() = 0;
+  virtual LOperand* TempAt(int i) = 0;
 
-  virtual LOperand* result() {
-    UNIMPLEMENTED();
-    return NULL;
-  }
-
-  virtual int InputCount() {
-    UNIMPLEMENTED();
-    return 0;
-  }
-
-  virtual LOperand* InputAt(int i) {
-    UNIMPLEMENTED();
-    return NULL;
-  }
-
-  virtual int TempCount() {
-    UNIMPLEMENTED();
-    return 0;
-  }
-
-  virtual LOperand* TempAt(int i) {
-    UNIMPLEMENTED();
-    return NULL;
-  }
-
-  LOperand* FirstInput() {
-    UNIMPLEMENTED();
-    return NULL;
-  }
-
-  LOperand* Output() {
-    UNIMPLEMENTED();
-    return NULL;
-  }
+  LOperand* FirstInput() { return InputAt(0); }
+  LOperand* Output() { return HasResult() ? result() : NULL; }
 
 #ifdef DEBUG
-  void VerifyCall() { UNIMPLEMENTED(); }
+  void VerifyCall();
 #endif
+
+ private:
+  LEnvironment* environment_;
+  SetOncePointer<LPointerMap> pointer_map_;
+  HValue* hydrogen_value_;
+  SetOncePointer<LEnvironment> deoptimization_environment_;
+  bool is_call_;
+  bool is_save_doubles_;
 };
 
 
-class LGap: public LInstruction {
+// R = number of result operands (0 or 1).
+// I = number of input operands.
+// T = number of temporary operands.
+template<int R, int I, int T>
+class LTemplateInstruction: public LInstruction {
  public:
-  explicit LGap(HBasicBlock* block) { }
+  // Allow 0 or 1 output operands.
+  STATIC_ASSERT(R == 0 || R == 1);
+  virtual bool HasResult() const { return R != 0; }
+  void set_result(LOperand* operand) { results_[0] = operand; }
+  LOperand* result() { return results_[0]; }
 
-  HBasicBlock* block() const {
-    UNIMPLEMENTED();
-    return NULL;
+  int InputCount() { return I; }
+  LOperand* InputAt(int i) { return inputs_[i]; }
+
+  int TempCount() { return T; }
+  LOperand* TempAt(int i) { return temps_[i]; }
+
+  virtual void PrintDataTo(StringStream* stream);
+  virtual void PrintOutputOperandTo(StringStream* stream);
+
+ protected:
+  EmbeddedContainer<LOperand*, R> results_;
+  EmbeddedContainer<LOperand*, I> inputs_;
+  EmbeddedContainer<LOperand*, T> temps_;
+};
+
+
+class LGap: public LTemplateInstruction<0, 0, 0> {
+ public:
+  explicit LGap(HBasicBlock* block)
+      : block_(block) {
+    parallel_moves_[BEFORE] = NULL;
+    parallel_moves_[START] = NULL;
+    parallel_moves_[END] = NULL;
+    parallel_moves_[AFTER] = NULL;
   }
 
+  // Can't use the DECLARE-macro here because of sub-classes.
+  virtual bool IsGap() const { return true; }
+  virtual void PrintDataTo(StringStream* stream);
+  static LGap* cast(LInstruction* instr) {
+    ASSERT(instr->IsGap());
+    return reinterpret_cast<LGap*>(instr);
+  }
+
+  bool IsRedundant() const;
+
+  HBasicBlock* block() const { return block_; }
+
   enum InnerPosition {
     BEFORE,
     START,
@@ -166,141 +335,1885 @@
     LAST_INNER_POSITION = AFTER
   };
 
-  LParallelMove* GetOrCreateParallelMove(InnerPosition pos) {
-    UNIMPLEMENTED();
-    return NULL;
+  LParallelMove* GetOrCreateParallelMove(InnerPosition pos)  {
+    if (parallel_moves_[pos] == NULL) parallel_moves_[pos] = new LParallelMove;
+    return parallel_moves_[pos];
   }
 
   LParallelMove* GetParallelMove(InnerPosition pos)  {
-    UNIMPLEMENTED();
-    return NULL;
+    return parallel_moves_[pos];
   }
+
+ private:
+  LParallelMove* parallel_moves_[LAST_INNER_POSITION + 1];
+  HBasicBlock* block_;
+};
+
+
+class LInstructionGap: public LGap {
+ public:
+  explicit LInstructionGap(HBasicBlock* block) : LGap(block) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(InstructionGap, "gap")
+};
+
+
+class LGoto: public LTemplateInstruction<0, 0, 0> {
+ public:
+  explicit LGoto(int block_id) : block_id_(block_id) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(Goto, "goto")
+  virtual void PrintDataTo(StringStream* stream);
+  virtual bool IsControl() const { return true; }
+
+  int block_id() const { return block_id_; }
+
+ private:
+  int block_id_;
+};
+
+
+class LLazyBailout: public LTemplateInstruction<0, 0, 0> {
+ public:
+  LLazyBailout() : gap_instructions_size_(0) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(LazyBailout, "lazy-bailout")
+
+  void set_gap_instructions_size(int gap_instructions_size) {
+    gap_instructions_size_ = gap_instructions_size;
+  }
+  int gap_instructions_size() { return gap_instructions_size_; }
+
+ private:
+  int gap_instructions_size_;
+};
+
+
+class LDeoptimize: public LTemplateInstruction<0, 0, 0> {
+ public:
+  DECLARE_CONCRETE_INSTRUCTION(Deoptimize, "deoptimize")
 };
 
 
 class LLabel: public LGap {
  public:
-  explicit LLabel(HBasicBlock* block) : LGap(block) { }
+  explicit LLabel(HBasicBlock* block)
+      : LGap(block), replacement_(NULL) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(Label, "label")
+
+  virtual void PrintDataTo(StringStream* stream);
+
+  int block_id() const { return block()->block_id(); }
+  bool is_loop_header() const { return block()->IsLoopHeader(); }
+  Label* label() { return &label_; }
+  LLabel* replacement() const { return replacement_; }
+  void set_replacement(LLabel* label) { replacement_ = label; }
+  bool HasReplacement() const { return replacement_ != NULL; }
+
+ private:
+  Label label_;
+  LLabel* replacement_;
 };
 
 
-class LOsrEntry: public LInstruction {
+class LParameter: public LTemplateInstruction<1, 0, 0> {
  public:
-  // Function could be generated by a macro as in lithium-ia32.h.
-  static LOsrEntry* cast(LInstruction* instr) {
-    UNIMPLEMENTED();
-    return NULL;
-  }
+  DECLARE_CONCRETE_INSTRUCTION(Parameter, "parameter")
+};
 
-  LOperand** SpilledRegisterArray() {
-    UNIMPLEMENTED();
-    return NULL;
-  }
-  LOperand** SpilledDoubleRegisterArray() {
-    UNIMPLEMENTED();
-    return NULL;
-  }
 
-  void MarkSpilledRegister(int allocation_index, LOperand* spill_operand) {
-    UNIMPLEMENTED();
-  }
-  void MarkSpilledDoubleRegister(int allocation_index,
-                                 LOperand* spill_operand) {
-    UNIMPLEMENTED();
+class LCallStub: public LTemplateInstruction<1, 0, 0> {
+ public:
+  DECLARE_CONCRETE_INSTRUCTION(CallStub, "call-stub")
+  DECLARE_HYDROGEN_ACCESSOR(CallStub)
+
+  TranscendentalCache::Type transcendental_type() {
+    return hydrogen()->transcendental_type();
   }
 };
 
 
+class LUnknownOSRValue: public LTemplateInstruction<1, 0, 0> {
+ public:
+  DECLARE_CONCRETE_INSTRUCTION(UnknownOSRValue, "unknown-osr-value")
+};
+
+
+template<int I, int T>
+class LControlInstruction: public LTemplateInstruction<0, I, T> {
+ public:
+  virtual bool IsControl() const { return true; }
+
+  int SuccessorCount() { return hydrogen()->SuccessorCount(); }
+  HBasicBlock* SuccessorAt(int i) { return hydrogen()->SuccessorAt(i); }
+  int true_block_id() { return hydrogen()->SuccessorAt(0)->block_id(); }
+  int false_block_id() { return hydrogen()->SuccessorAt(1)->block_id(); }
+
+ private:
+  HControlInstruction* hydrogen() {
+    return HControlInstruction::cast(this->hydrogen_value());
+  }
+};
+
+
+class LApplyArguments: public LTemplateInstruction<1, 4, 0> {
+ public:
+  LApplyArguments(LOperand* function,
+                  LOperand* receiver,
+                  LOperand* length,
+                  LOperand* elements) {
+    inputs_[0] = function;
+    inputs_[1] = receiver;
+    inputs_[2] = length;
+    inputs_[3] = elements;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(ApplyArguments, "apply-arguments")
+
+  LOperand* function() { return inputs_[0]; }
+  LOperand* receiver() { return inputs_[1]; }
+  LOperand* length() { return inputs_[2]; }
+  LOperand* elements() { return inputs_[3]; }
+};
+
+
+class LAccessArgumentsAt: public LTemplateInstruction<1, 3, 0> {
+ public:
+  LAccessArgumentsAt(LOperand* arguments, LOperand* length, LOperand* index) {
+    inputs_[0] = arguments;
+    inputs_[1] = length;
+    inputs_[2] = index;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(AccessArgumentsAt, "access-arguments-at")
+
+  LOperand* arguments() { return inputs_[0]; }
+  LOperand* length() { return inputs_[1]; }
+  LOperand* index() { return inputs_[2]; }
+
+  virtual void PrintDataTo(StringStream* stream);
+};
+
+
+class LArgumentsLength: public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LArgumentsLength(LOperand* elements) {
+    inputs_[0] = elements;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(ArgumentsLength, "arguments-length")
+};
+
+
+class LArgumentsElements: public LTemplateInstruction<1, 0, 0> {
+ public:
+  LArgumentsElements() { }
+
+  DECLARE_CONCRETE_INSTRUCTION(ArgumentsElements, "arguments-elements")
+};
+
+
+class LModI: public LTemplateInstruction<1, 2, 3> {
+ public:
+  // Used when the right hand is a constant power of 2.
+  LModI(LOperand* left,
+        LOperand* right) {
+    inputs_[0] = left;
+    inputs_[1] = right;
+    temps_[0] = NULL;
+    temps_[1] = NULL;
+    temps_[2] = NULL;
+  }
+
+  // Used for the standard case.
+  LModI(LOperand* left,
+        LOperand* right,
+        LOperand* temp1,
+        LOperand* temp2,
+        LOperand* temp3) {
+    inputs_[0] = left;
+    inputs_[1] = right;
+    temps_[0] = temp1;
+    temps_[1] = temp2;
+    temps_[2] = temp3;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(ModI, "mod-i")
+  DECLARE_HYDROGEN_ACCESSOR(Mod)
+};
+
+
+class LDivI: public LTemplateInstruction<1, 2, 0> {
+ public:
+  LDivI(LOperand* left, LOperand* right) {
+    inputs_[0] = left;
+    inputs_[1] = right;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(DivI, "div-i")
+  DECLARE_HYDROGEN_ACCESSOR(Div)
+};
+
+
+class LMulI: public LTemplateInstruction<1, 2, 1> {
+ public:
+  LMulI(LOperand* left, LOperand* right, LOperand* temp) {
+    inputs_[0] = left;
+    inputs_[1] = right;
+    temps_[0] = temp;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(MulI, "mul-i")
+  DECLARE_HYDROGEN_ACCESSOR(Mul)
+};
+
+
+class LCmpIDAndBranch: public LControlInstruction<2, 0> {
+ public:
+  LCmpIDAndBranch(LOperand* left, LOperand* right) {
+    inputs_[0] = left;
+    inputs_[1] = right;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(CmpIDAndBranch, "cmp-id-and-branch")
+  DECLARE_HYDROGEN_ACCESSOR(CompareIDAndBranch)
+
+  Token::Value op() const { return hydrogen()->token(); }
+  bool is_double() const {
+    return hydrogen()->GetInputRepresentation().IsDouble();
+  }
+
+  virtual void PrintDataTo(StringStream* stream);
+};
+
+
+class LUnaryMathOperation: public LTemplateInstruction<1, 1, 1> {
+ public:
+  LUnaryMathOperation(LOperand* value, LOperand* temp) {
+    inputs_[0] = value;
+    temps_[0] = temp;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(UnaryMathOperation, "unary-math-operation")
+  DECLARE_HYDROGEN_ACCESSOR(UnaryMathOperation)
+
+  virtual void PrintDataTo(StringStream* stream);
+  BuiltinFunctionId op() const { return hydrogen()->op(); }
+};
+
+
+class LCmpObjectEqAndBranch: public LControlInstruction<2, 0> {
+ public:
+  LCmpObjectEqAndBranch(LOperand* left, LOperand* right) {
+    inputs_[0] = left;
+    inputs_[1] = right;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(CmpObjectEqAndBranch,
+                               "cmp-object-eq-and-branch")
+  DECLARE_HYDROGEN_ACCESSOR(CompareObjectEqAndBranch)
+};
+
+
+class LCmpConstantEqAndBranch: public LControlInstruction<1, 0> {
+ public:
+  explicit LCmpConstantEqAndBranch(LOperand* left) {
+    inputs_[0] = left;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(CmpConstantEqAndBranch,
+                               "cmp-constant-eq-and-branch")
+  DECLARE_HYDROGEN_ACCESSOR(CompareConstantEqAndBranch)
+};
+
+
+class LIsNilAndBranch: public LControlInstruction<1, 0> {
+ public:
+  explicit LIsNilAndBranch(LOperand* value) {
+    inputs_[0] = value;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(IsNilAndBranch, "is-nil-and-branch")
+  DECLARE_HYDROGEN_ACCESSOR(IsNilAndBranch)
+
+  EqualityKind kind() const { return hydrogen()->kind(); }
+  NilValue nil() const { return hydrogen()->nil(); }
+
+  virtual void PrintDataTo(StringStream* stream);
+};
+
+
+class LIsObjectAndBranch: public LControlInstruction<1, 1> {
+ public:
+  LIsObjectAndBranch(LOperand* value, LOperand* temp) {
+    inputs_[0] = value;
+    temps_[0] = temp;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(IsObjectAndBranch, "is-object-and-branch")
+  DECLARE_HYDROGEN_ACCESSOR(IsObjectAndBranch)
+
+  virtual void PrintDataTo(StringStream* stream);
+};
+
+
+class LIsSmiAndBranch: public LControlInstruction<1, 0> {
+ public:
+  explicit LIsSmiAndBranch(LOperand* value) {
+    inputs_[0] = value;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(IsSmiAndBranch, "is-smi-and-branch")
+  DECLARE_HYDROGEN_ACCESSOR(IsSmiAndBranch)
+
+  virtual void PrintDataTo(StringStream* stream);
+};
+
+
+class LIsUndetectableAndBranch: public LControlInstruction<1, 1> {
+ public:
+  explicit LIsUndetectableAndBranch(LOperand* value, LOperand* temp) {
+    inputs_[0] = value;
+    temps_[0] = temp;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(IsUndetectableAndBranch,
+                               "is-undetectable-and-branch")
+  DECLARE_HYDROGEN_ACCESSOR(IsUndetectableAndBranch)
+
+  virtual void PrintDataTo(StringStream* stream);
+};
+
+
+class LHasInstanceTypeAndBranch: public LControlInstruction<1, 0> {
+ public:
+  explicit LHasInstanceTypeAndBranch(LOperand* value) {
+    inputs_[0] = value;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(HasInstanceTypeAndBranch,
+                               "has-instance-type-and-branch")
+  DECLARE_HYDROGEN_ACCESSOR(HasInstanceTypeAndBranch)
+
+  virtual void PrintDataTo(StringStream* stream);
+};
+
+
+class LGetCachedArrayIndex: public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LGetCachedArrayIndex(LOperand* value) {
+    inputs_[0] = value;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(GetCachedArrayIndex, "get-cached-array-index")
+  DECLARE_HYDROGEN_ACCESSOR(GetCachedArrayIndex)
+};
+
+
+class LHasCachedArrayIndexAndBranch: public LControlInstruction<1, 0> {
+ public:
+  explicit LHasCachedArrayIndexAndBranch(LOperand* value) {
+    inputs_[0] = value;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(HasCachedArrayIndexAndBranch,
+                               "has-cached-array-index-and-branch")
+  DECLARE_HYDROGEN_ACCESSOR(HasCachedArrayIndexAndBranch)
+
+  virtual void PrintDataTo(StringStream* stream);
+};
+
+
+class LClassOfTestAndBranch: public LControlInstruction<1, 1> {
+ public:
+  LClassOfTestAndBranch(LOperand* value, LOperand* temp) {
+    inputs_[0] = value;
+    temps_[0] = temp;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(ClassOfTestAndBranch,
+                               "class-of-test-and-branch")
+  DECLARE_HYDROGEN_ACCESSOR(ClassOfTestAndBranch)
+
+  virtual void PrintDataTo(StringStream* stream);
+};
+
+
+class LCmpT: public LTemplateInstruction<1, 2, 0> {
+ public:
+  LCmpT(LOperand* left, LOperand* right) {
+    inputs_[0] = left;
+    inputs_[1] = right;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(CmpT, "cmp-t")
+  DECLARE_HYDROGEN_ACCESSOR(CompareGeneric)
+
+  Token::Value op() const { return hydrogen()->token(); }
+};
+
+
+class LInstanceOf: public LTemplateInstruction<1, 2, 0> {
+ public:
+  LInstanceOf(LOperand* left, LOperand* right) {
+    inputs_[0] = left;
+    inputs_[1] = right;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(InstanceOf, "instance-of")
+};
+
+
+class LInstanceOfKnownGlobal: public LTemplateInstruction<1, 1, 1> {
+ public:
+  LInstanceOfKnownGlobal(LOperand* value, LOperand* temp) {
+    inputs_[0] = value;
+    temps_[0] = temp;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(InstanceOfKnownGlobal,
+                               "instance-of-known-global")
+  DECLARE_HYDROGEN_ACCESSOR(InstanceOfKnownGlobal)
+
+  Handle<JSFunction> function() const { return hydrogen()->function(); }
+};
+
+
+class LBoundsCheck: public LTemplateInstruction<0, 2, 0> {
+ public:
+  LBoundsCheck(LOperand* index, LOperand* length) {
+    inputs_[0] = index;
+    inputs_[1] = length;
+  }
+
+  LOperand* index() { return inputs_[0]; }
+  LOperand* length() { return inputs_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(BoundsCheck, "bounds-check")
+};
+
+
+class LBitI: public LTemplateInstruction<1, 2, 0> {
+ public:
+  LBitI(LOperand* left, LOperand* right) {
+    inputs_[0] = left;
+    inputs_[1] = right;
+  }
+
+  Token::Value op() const { return hydrogen()->op(); }
+
+  DECLARE_CONCRETE_INSTRUCTION(BitI, "bit-i")
+  DECLARE_HYDROGEN_ACCESSOR(Bitwise)
+};
+
+
+class LShiftI: public LTemplateInstruction<1, 2, 0> {
+ public:
+  LShiftI(Token::Value op, LOperand* left, LOperand* right, bool can_deopt)
+      : op_(op), can_deopt_(can_deopt) {
+    inputs_[0] = left;
+    inputs_[1] = right;
+  }
+
+  Token::Value op() const { return op_; }
+
+  bool can_deopt() const { return can_deopt_; }
+
+  DECLARE_CONCRETE_INSTRUCTION(ShiftI, "shift-i")
+
+ private:
+  Token::Value op_;
+  bool can_deopt_;
+};
+
+
+class LSubI: public LTemplateInstruction<1, 2, 0> {
+ public:
+  LSubI(LOperand* left, LOperand* right) {
+    inputs_[0] = left;
+    inputs_[1] = right;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(SubI, "sub-i")
+  DECLARE_HYDROGEN_ACCESSOR(Sub)
+};
+
+
+class LConstantI: public LTemplateInstruction<1, 0, 0> {
+ public:
+  DECLARE_CONCRETE_INSTRUCTION(ConstantI, "constant-i")
+  DECLARE_HYDROGEN_ACCESSOR(Constant)
+
+  int32_t value() const { return hydrogen()->Integer32Value(); }
+};
+
+
+class LConstantD: public LTemplateInstruction<1, 0, 0> {
+ public:
+  DECLARE_CONCRETE_INSTRUCTION(ConstantD, "constant-d")
+  DECLARE_HYDROGEN_ACCESSOR(Constant)
+
+  double value() const { return hydrogen()->DoubleValue(); }
+};
+
+
+class LConstantT: public LTemplateInstruction<1, 0, 0> {
+ public:
+  DECLARE_CONCRETE_INSTRUCTION(ConstantT, "constant-t")
+  DECLARE_HYDROGEN_ACCESSOR(Constant)
+
+  Handle<Object> value() const { return hydrogen()->handle(); }
+};
+
+
+class LBranch: public LControlInstruction<1, 0> {
+ public:
+  explicit LBranch(LOperand* value) {
+    inputs_[0] = value;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(Branch, "branch")
+  DECLARE_HYDROGEN_ACCESSOR(Branch)
+
+  virtual void PrintDataTo(StringStream* stream);
+};
+
+
+class LCmpMapAndBranch: public LTemplateInstruction<0, 1, 1> {
+ public:
+  LCmpMapAndBranch(LOperand* value, LOperand* temp) {
+    inputs_[0] = value;
+    temps_[0] = temp;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(CmpMapAndBranch, "cmp-map-and-branch")
+  DECLARE_HYDROGEN_ACCESSOR(CompareMap)
+
+  virtual bool IsControl() const { return true; }
+
+  Handle<Map> map() const { return hydrogen()->map(); }
+  int true_block_id() const {
+    return hydrogen()->FirstSuccessor()->block_id();
+  }
+  int false_block_id() const {
+    return hydrogen()->SecondSuccessor()->block_id();
+  }
+};
+
+
+class LJSArrayLength: public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LJSArrayLength(LOperand* value) {
+    inputs_[0] = value;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(JSArrayLength, "js-array-length")
+  DECLARE_HYDROGEN_ACCESSOR(JSArrayLength)
+};
+
+
+class LFixedArrayBaseLength: public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LFixedArrayBaseLength(LOperand* value) {
+    inputs_[0] = value;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(FixedArrayBaseLength,
+                               "fixed-array-base-length")
+  DECLARE_HYDROGEN_ACCESSOR(FixedArrayBaseLength)
+};
+
+
+class LElementsKind: public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LElementsKind(LOperand* value) {
+    inputs_[0] = value;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(ElementsKind, "elements-kind")
+  DECLARE_HYDROGEN_ACCESSOR(ElementsKind)
+};
+
+
+class LValueOf: public LTemplateInstruction<1, 1, 1> {
+ public:
+  LValueOf(LOperand* value, LOperand* temp) {
+    inputs_[0] = value;
+    temps_[0] = temp;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(ValueOf, "value-of")
+  DECLARE_HYDROGEN_ACCESSOR(ValueOf)
+};
+
+
+class LThrow: public LTemplateInstruction<0, 1, 0> {
+ public:
+  explicit LThrow(LOperand* value) {
+    inputs_[0] = value;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(Throw, "throw")
+};
+
+
+class LBitNotI: public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LBitNotI(LOperand* value) {
+    inputs_[0] = value;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(BitNotI, "bit-not-i")
+};
+
+
+class LAddI: public LTemplateInstruction<1, 2, 0> {
+ public:
+  LAddI(LOperand* left, LOperand* right) {
+    inputs_[0] = left;
+    inputs_[1] = right;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(AddI, "add-i")
+  DECLARE_HYDROGEN_ACCESSOR(Add)
+};
+
+
+class LPower: public LTemplateInstruction<1, 2, 0> {
+ public:
+  LPower(LOperand* left, LOperand* right) {
+    inputs_[0] = left;
+    inputs_[1] = right;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(Power, "power")
+  DECLARE_HYDROGEN_ACCESSOR(Power)
+};
+
+
+class LArithmeticD: public LTemplateInstruction<1, 2, 0> {
+ public:
+  LArithmeticD(Token::Value op, LOperand* left, LOperand* right)
+      : op_(op) {
+    inputs_[0] = left;
+    inputs_[1] = right;
+  }
+
+  Token::Value op() const { return op_; }
+
+  virtual Opcode opcode() const { return LInstruction::kArithmeticD; }
+  virtual void CompileToNative(LCodeGen* generator);
+  virtual const char* Mnemonic() const;
+
+ private:
+  Token::Value op_;
+};
+
+
+class LArithmeticT: public LTemplateInstruction<1, 2, 0> {
+ public:
+  LArithmeticT(Token::Value op, LOperand* left, LOperand* right)
+      : op_(op) {
+    inputs_[0] = left;
+    inputs_[1] = right;
+  }
+
+  virtual Opcode opcode() const { return LInstruction::kArithmeticT; }
+  virtual void CompileToNative(LCodeGen* generator);
+  virtual const char* Mnemonic() const;
+
+  Token::Value op() const { return op_; }
+
+ private:
+  Token::Value op_;
+};
+
+
+class LReturn: public LTemplateInstruction<0, 1, 0> {
+ public:
+  explicit LReturn(LOperand* value) {
+    inputs_[0] = value;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(Return, "return")
+};
+
+
+class LLoadNamedField: public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LLoadNamedField(LOperand* object) {
+    inputs_[0] = object;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(LoadNamedField, "load-named-field")
+  DECLARE_HYDROGEN_ACCESSOR(LoadNamedField)
+};
+
+
+class LLoadNamedFieldPolymorphic: public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LLoadNamedFieldPolymorphic(LOperand* object) {
+    inputs_[0] = object;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(LoadNamedField, "load-named-field-polymorphic")
+  DECLARE_HYDROGEN_ACCESSOR(LoadNamedFieldPolymorphic)
+
+  LOperand* object() { return inputs_[0]; }
+};
+
+
+class LLoadNamedGeneric: public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LLoadNamedGeneric(LOperand* object) {
+    inputs_[0] = object;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(LoadNamedGeneric, "load-named-generic")
+  DECLARE_HYDROGEN_ACCESSOR(LoadNamedGeneric)
+
+  LOperand* object() { return inputs_[0]; }
+  Handle<Object> name() const { return hydrogen()->name(); }
+};
+
+
+class LLoadFunctionPrototype: public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LLoadFunctionPrototype(LOperand* function) {
+    inputs_[0] = function;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(LoadFunctionPrototype, "load-function-prototype")
+  DECLARE_HYDROGEN_ACCESSOR(LoadFunctionPrototype)
+
+  LOperand* function() { return inputs_[0]; }
+};
+
+
+class LLoadElements: public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LLoadElements(LOperand* object) {
+    inputs_[0] = object;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(LoadElements, "load-elements")
+};
+
+
+class LLoadExternalArrayPointer: public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LLoadExternalArrayPointer(LOperand* object) {
+    inputs_[0] = object;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(LoadExternalArrayPointer,
+                               "load-external-array-pointer")
+};
+
+
+class LLoadKeyedFastElement: public LTemplateInstruction<1, 2, 0> {
+ public:
+  LLoadKeyedFastElement(LOperand* elements, LOperand* key) {
+    inputs_[0] = elements;
+    inputs_[1] = key;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(LoadKeyedFastElement, "load-keyed-fast-element")
+  DECLARE_HYDROGEN_ACCESSOR(LoadKeyedFastElement)
+
+  LOperand* elements() { return inputs_[0]; }
+  LOperand* key() { return inputs_[1]; }
+};
+
+
+class LLoadKeyedFastDoubleElement: public LTemplateInstruction<1, 2, 0> {
+ public:
+  LLoadKeyedFastDoubleElement(LOperand* elements, LOperand* key) {
+    inputs_[0] = elements;
+    inputs_[1] = key;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(LoadKeyedFastDoubleElement,
+                               "load-keyed-fast-double-element")
+  DECLARE_HYDROGEN_ACCESSOR(LoadKeyedFastDoubleElement)
+
+  LOperand* elements() { return inputs_[0]; }
+  LOperand* key() { return inputs_[1]; }
+};
+
+
+class LLoadKeyedSpecializedArrayElement: public LTemplateInstruction<1, 2, 0> {
+ public:
+  LLoadKeyedSpecializedArrayElement(LOperand* external_pointer,
+                                    LOperand* key) {
+    inputs_[0] = external_pointer;
+    inputs_[1] = key;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(LoadKeyedSpecializedArrayElement,
+                               "load-keyed-specialized-array-element")
+  DECLARE_HYDROGEN_ACCESSOR(LoadKeyedSpecializedArrayElement)
+
+  LOperand* external_pointer() { return inputs_[0]; }
+  LOperand* key() { return inputs_[1]; }
+  ElementsKind elements_kind() const {
+    return hydrogen()->elements_kind();
+  }
+};
+
+
+class LLoadKeyedGeneric: public LTemplateInstruction<1, 2, 0> {
+ public:
+  LLoadKeyedGeneric(LOperand* obj, LOperand* key) {
+    inputs_[0] = obj;
+    inputs_[1] = key;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(LoadKeyedGeneric, "load-keyed-generic")
+
+  LOperand* object() { return inputs_[0]; }
+  LOperand* key() { return inputs_[1]; }
+};
+
+
+class LLoadGlobalCell: public LTemplateInstruction<1, 0, 0> {
+ public:
+  DECLARE_CONCRETE_INSTRUCTION(LoadGlobalCell, "load-global-cell")
+  DECLARE_HYDROGEN_ACCESSOR(LoadGlobalCell)
+};
+
+
+class LLoadGlobalGeneric: public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LLoadGlobalGeneric(LOperand* global_object) {
+    inputs_[0] = global_object;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(LoadGlobalGeneric, "load-global-generic")
+  DECLARE_HYDROGEN_ACCESSOR(LoadGlobalGeneric)
+
+  LOperand* global_object() { return inputs_[0]; }
+  Handle<Object> name() const { return hydrogen()->name(); }
+  bool for_typeof() const { return hydrogen()->for_typeof(); }
+};
+
+
+class LStoreGlobalCell: public LTemplateInstruction<0, 1, 1> {
+ public:
+  LStoreGlobalCell(LOperand* value, LOperand* temp) {
+    inputs_[0] = value;
+    temps_[0] = temp;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(StoreGlobalCell, "store-global-cell")
+  DECLARE_HYDROGEN_ACCESSOR(StoreGlobalCell)
+};
+
+
+class LStoreGlobalGeneric: public LTemplateInstruction<0, 2, 0> {
+ public:
+  explicit LStoreGlobalGeneric(LOperand* global_object,
+                               LOperand* value) {
+    inputs_[0] = global_object;
+    inputs_[1] = value;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(StoreGlobalGeneric, "store-global-generic")
+  DECLARE_HYDROGEN_ACCESSOR(StoreGlobalGeneric)
+
+  LOperand* global_object() { return InputAt(0); }
+  Handle<Object> name() const { return hydrogen()->name(); }
+  LOperand* value() { return InputAt(1); }
+  bool strict_mode() { return hydrogen()->strict_mode(); }
+};
+
+
+class LLoadContextSlot: public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LLoadContextSlot(LOperand* context) {
+    inputs_[0] = context;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(LoadContextSlot, "load-context-slot")
+  DECLARE_HYDROGEN_ACCESSOR(LoadContextSlot)
+
+  LOperand* context() { return InputAt(0); }
+  int slot_index() { return hydrogen()->slot_index(); }
+
+  virtual void PrintDataTo(StringStream* stream);
+};
+
+
+class LStoreContextSlot: public LTemplateInstruction<0, 2, 0> {
+ public:
+  LStoreContextSlot(LOperand* context, LOperand* value) {
+    inputs_[0] = context;
+    inputs_[1] = value;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(StoreContextSlot, "store-context-slot")
+  DECLARE_HYDROGEN_ACCESSOR(StoreContextSlot)
+
+  LOperand* context() { return InputAt(0); }
+  LOperand* value() { return InputAt(1); }
+  int slot_index() { return hydrogen()->slot_index(); }
+
+  virtual void PrintDataTo(StringStream* stream);
+};
+
+
+class LPushArgument: public LTemplateInstruction<0, 1, 0> {
+ public:
+  explicit LPushArgument(LOperand* value) {
+    inputs_[0] = value;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(PushArgument, "push-argument")
+};
+
+
+class LThisFunction: public LTemplateInstruction<1, 0, 0> {
+ public:
+  DECLARE_CONCRETE_INSTRUCTION(ThisFunction, "this-function")
+  DECLARE_HYDROGEN_ACCESSOR(ThisFunction)
+};
+
+
+class LContext: public LTemplateInstruction<1, 0, 0> {
+ public:
+  DECLARE_CONCRETE_INSTRUCTION(Context, "context")
+};
+
+
+class LOuterContext: public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LOuterContext(LOperand* context) {
+    inputs_[0] = context;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(OuterContext, "outer-context")
+
+  LOperand* context() { return InputAt(0); }
+};
+
+
+class LGlobalObject: public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LGlobalObject(LOperand* context) {
+    inputs_[0] = context;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(GlobalObject, "global-object")
+
+  LOperand* context() { return InputAt(0); }
+};
+
+
+class LGlobalReceiver: public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LGlobalReceiver(LOperand* global_object) {
+    inputs_[0] = global_object;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(GlobalReceiver, "global-receiver")
+
+  LOperand* global() { return InputAt(0); }
+};
+
+
+class LCallConstantFunction: public LTemplateInstruction<1, 0, 0> {
+ public:
+  DECLARE_CONCRETE_INSTRUCTION(CallConstantFunction, "call-constant-function")
+  DECLARE_HYDROGEN_ACCESSOR(CallConstantFunction)
+
+  virtual void PrintDataTo(StringStream* stream);
+
+  Handle<JSFunction> function() { return hydrogen()->function(); }
+  int arity() const { return hydrogen()->argument_count() - 1; }
+};
+
+
+class LInvokeFunction: public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LInvokeFunction(LOperand* function) {
+    inputs_[0] = function;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(InvokeFunction, "invoke-function")
+  DECLARE_HYDROGEN_ACCESSOR(InvokeFunction)
+
+  LOperand* function() { return inputs_[0]; }
+
+  virtual void PrintDataTo(StringStream* stream);
+
+  int arity() const { return hydrogen()->argument_count() - 1; }
+};
+
+
+class LCallKeyed: public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LCallKeyed(LOperand* key) {
+    inputs_[0] = key;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(CallKeyed, "call-keyed")
+  DECLARE_HYDROGEN_ACCESSOR(CallKeyed)
+
+  virtual void PrintDataTo(StringStream* stream);
+
+  int arity() const { return hydrogen()->argument_count() - 1; }
+};
+
+
+
+class LCallNamed: public LTemplateInstruction<1, 0, 0> {
+ public:
+  DECLARE_CONCRETE_INSTRUCTION(CallNamed, "call-named")
+  DECLARE_HYDROGEN_ACCESSOR(CallNamed)
+
+  virtual void PrintDataTo(StringStream* stream);
+
+  Handle<String> name() const { return hydrogen()->name(); }
+  int arity() const { return hydrogen()->argument_count() - 1; }
+};
+
+
+class LCallFunction: public LTemplateInstruction<1, 0, 0> {
+ public:
+  DECLARE_CONCRETE_INSTRUCTION(CallFunction, "call-function")
+  DECLARE_HYDROGEN_ACCESSOR(CallFunction)
+
+  int arity() const { return hydrogen()->argument_count() - 2; }
+};
+
+
+class LCallGlobal: public LTemplateInstruction<1, 0, 0> {
+ public:
+  DECLARE_CONCRETE_INSTRUCTION(CallGlobal, "call-global")
+  DECLARE_HYDROGEN_ACCESSOR(CallGlobal)
+
+  virtual void PrintDataTo(StringStream* stream);
+
+  Handle<String> name() const {return hydrogen()->name(); }
+  int arity() const { return hydrogen()->argument_count() - 1; }
+};
+
+
+class LCallKnownGlobal: public LTemplateInstruction<1, 0, 0> {
+ public:
+  DECLARE_CONCRETE_INSTRUCTION(CallKnownGlobal, "call-known-global")
+  DECLARE_HYDROGEN_ACCESSOR(CallKnownGlobal)
+
+  virtual void PrintDataTo(StringStream* stream);
+
+  Handle<JSFunction> target() const { return hydrogen()->target();  }
+  int arity() const { return hydrogen()->argument_count() - 1;  }
+};
+
+
+class LCallNew: public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LCallNew(LOperand* constructor) {
+    inputs_[0] = constructor;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(CallNew, "call-new")
+  DECLARE_HYDROGEN_ACCESSOR(CallNew)
+
+  virtual void PrintDataTo(StringStream* stream);
+
+  int arity() const { return hydrogen()->argument_count() - 1; }
+};
+
+
+class LCallRuntime: public LTemplateInstruction<1, 0, 0> {
+ public:
+  DECLARE_CONCRETE_INSTRUCTION(CallRuntime, "call-runtime")
+  DECLARE_HYDROGEN_ACCESSOR(CallRuntime)
+
+  const Runtime::Function* function() const { return hydrogen()->function(); }
+  int arity() const { return hydrogen()->argument_count(); }
+};
+
+
+class LInteger32ToDouble: public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LInteger32ToDouble(LOperand* value) {
+    inputs_[0] = value;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(Integer32ToDouble, "int32-to-double")
+};
+
+
+class LNumberTagI: public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LNumberTagI(LOperand* value) {
+    inputs_[0] = value;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(NumberTagI, "number-tag-i")
+};
+
+
+class LNumberTagD: public LTemplateInstruction<1, 1, 2> {
+ public:
+  LNumberTagD(LOperand* value, LOperand* temp1, LOperand* temp2) {
+    inputs_[0] = value;
+    temps_[0] = temp1;
+    temps_[1] = temp2;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(NumberTagD, "number-tag-d")
+};
+
+
+// Sometimes truncating conversion from a tagged value to an int32.
+class LDoubleToI: public LTemplateInstruction<1, 1, 2> {
+ public:
+  LDoubleToI(LOperand* value, LOperand* temp1, LOperand* temp2) {
+    inputs_[0] = value;
+    temps_[0] = temp1;
+    temps_[1] = temp2;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(DoubleToI, "double-to-i")
+  DECLARE_HYDROGEN_ACCESSOR(UnaryOperation)
+
+  bool truncating() { return hydrogen()->CanTruncateToInt32(); }
+};
+
+
+// Truncating conversion from a tagged value to an int32.
+class LTaggedToI: public LTemplateInstruction<1, 1, 3> {
+ public:
+  LTaggedToI(LOperand* value,
+             LOperand* temp1,
+             LOperand* temp2,
+             LOperand* temp3) {
+    inputs_[0] = value;
+    temps_[0] = temp1;
+    temps_[1] = temp2;
+    temps_[2] = temp3;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(TaggedToI, "tagged-to-i")
+  DECLARE_HYDROGEN_ACCESSOR(UnaryOperation)
+
+  bool truncating() { return hydrogen()->CanTruncateToInt32(); }
+};
+
+
+class LSmiTag: public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LSmiTag(LOperand* value) {
+    inputs_[0] = value;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(SmiTag, "smi-tag")
+};
+
+
+class LNumberUntagD: public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LNumberUntagD(LOperand* value) {
+    inputs_[0] = value;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(NumberUntagD, "double-untag")
+  DECLARE_HYDROGEN_ACCESSOR(Change)
+};
+
+
+class LSmiUntag: public LTemplateInstruction<1, 1, 0> {
+ public:
+  LSmiUntag(LOperand* value, bool needs_check)
+      : needs_check_(needs_check) {
+    inputs_[0] = value;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(SmiUntag, "smi-untag")
+
+  bool needs_check() const { return needs_check_; }
+
+ private:
+  bool needs_check_;
+};
+
+
+class LStoreNamedField: public LTemplateInstruction<0, 2, 0> {
+ public:
+  LStoreNamedField(LOperand* obj, LOperand* val) {
+    inputs_[0] = obj;
+    inputs_[1] = val;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(StoreNamedField, "store-named-field")
+  DECLARE_HYDROGEN_ACCESSOR(StoreNamedField)
+
+  virtual void PrintDataTo(StringStream* stream);
+
+  LOperand* object() { return inputs_[0]; }
+  LOperand* value() { return inputs_[1]; }
+
+  Handle<Object> name() const { return hydrogen()->name(); }
+  bool is_in_object() { return hydrogen()->is_in_object(); }
+  int offset() { return hydrogen()->offset(); }
+  Handle<Map> transition() const { return hydrogen()->transition(); }
+};
+
+
+class LStoreNamedGeneric: public LTemplateInstruction<0, 2, 0> {
+ public:
+  LStoreNamedGeneric(LOperand* obj, LOperand* val) {
+    inputs_[0] = obj;
+    inputs_[1] = val;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(StoreNamedGeneric, "store-named-generic")
+  DECLARE_HYDROGEN_ACCESSOR(StoreNamedGeneric)
+
+  virtual void PrintDataTo(StringStream* stream);
+
+  LOperand* object() { return inputs_[0]; }
+  LOperand* value() { return inputs_[1]; }
+  Handle<Object> name() const { return hydrogen()->name(); }
+  StrictModeFlag strict_mode_flag() { return hydrogen()->strict_mode_flag(); }
+  bool strict_mode() { return strict_mode_flag() == kStrictMode; }
+};
+
+
+class LStoreKeyedFastElement: public LTemplateInstruction<0, 3, 0> {
+ public:
+  LStoreKeyedFastElement(LOperand* obj, LOperand* key, LOperand* val) {
+    inputs_[0] = obj;
+    inputs_[1] = key;
+    inputs_[2] = val;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(StoreKeyedFastElement,
+                               "store-keyed-fast-element")
+  DECLARE_HYDROGEN_ACCESSOR(StoreKeyedFastElement)
+
+  virtual void PrintDataTo(StringStream* stream);
+
+  LOperand* object() { return inputs_[0]; }
+  LOperand* key() { return inputs_[1]; }
+  LOperand* value() { return inputs_[2]; }
+};
+
+
+class LStoreKeyedFastDoubleElement: public LTemplateInstruction<0, 3, 0> {
+ public:
+  LStoreKeyedFastDoubleElement(LOperand* elements,
+                               LOperand* key,
+                               LOperand* val) {
+    inputs_[0] = elements;
+    inputs_[1] = key;
+    inputs_[2] = val;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(StoreKeyedFastDoubleElement,
+                               "store-keyed-fast-double-element")
+  DECLARE_HYDROGEN_ACCESSOR(StoreKeyedFastDoubleElement)
+
+  virtual void PrintDataTo(StringStream* stream);
+
+  LOperand* elements() { return inputs_[0]; }
+  LOperand* key() { return inputs_[1]; }
+  LOperand* value() { return inputs_[2]; }
+};
+
+
+class LStoreKeyedGeneric: public LTemplateInstruction<0, 3, 0> {
+ public:
+  LStoreKeyedGeneric(LOperand* obj, LOperand* key, LOperand* val) {
+    inputs_[0] = obj;
+    inputs_[1] = key;
+    inputs_[2] = val;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(StoreKeyedGeneric, "store-keyed-generic")
+  DECLARE_HYDROGEN_ACCESSOR(StoreKeyedGeneric)
+
+  virtual void PrintDataTo(StringStream* stream);
+
+  LOperand* object() { return inputs_[0]; }
+  LOperand* key() { return inputs_[1]; }
+  LOperand* value() { return inputs_[2]; }
+  bool strict_mode() { return hydrogen()->strict_mode(); }
+};
+
+class LStoreKeyedSpecializedArrayElement: public LTemplateInstruction<0, 3, 0> {
+ public:
+  LStoreKeyedSpecializedArrayElement(LOperand* external_pointer,
+                                     LOperand* key,
+                                     LOperand* val) {
+    inputs_[0] = external_pointer;
+    inputs_[1] = key;
+    inputs_[2] = val;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(StoreKeyedSpecializedArrayElement,
+                               "store-keyed-specialized-array-element")
+  DECLARE_HYDROGEN_ACCESSOR(StoreKeyedSpecializedArrayElement)
+
+  LOperand* external_pointer() { return inputs_[0]; }
+  LOperand* key() { return inputs_[1]; }
+  LOperand* value() { return inputs_[2]; }
+  ElementsKind elements_kind() const {
+    return hydrogen()->elements_kind();
+  }
+};
+
+
+class LTransitionElementsKind: public LTemplateInstruction<1, 1, 2> {
+ public:
+  LTransitionElementsKind(LOperand* object,
+                          LOperand* new_map_temp,
+                          LOperand* temp_reg) {
+    inputs_[0] = object;
+    temps_[0] = new_map_temp;
+    temps_[1] = temp_reg;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(TransitionElementsKind,
+                               "transition-elements-kind")
+  DECLARE_HYDROGEN_ACCESSOR(TransitionElementsKind)
+
+  virtual void PrintDataTo(StringStream* stream);
+
+  LOperand* object() { return inputs_[0]; }
+  LOperand* new_map_reg() { return temps_[0]; }
+  LOperand* temp_reg() { return temps_[1]; }
+  Handle<Map> original_map() { return hydrogen()->original_map(); }
+  Handle<Map> transitioned_map() { return hydrogen()->transitioned_map(); }
+};
+
+
+class LStringAdd: public LTemplateInstruction<1, 2, 0> {
+ public:
+  LStringAdd(LOperand* left, LOperand* right) {
+    inputs_[0] = left;
+    inputs_[1] = right;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(StringAdd, "string-add")
+  DECLARE_HYDROGEN_ACCESSOR(StringAdd)
+
+  LOperand* left() { return inputs_[0]; }
+  LOperand* right() { return inputs_[1]; }
+};
+
+
+
+class LStringCharCodeAt: public LTemplateInstruction<1, 2, 0> {
+ public:
+  LStringCharCodeAt(LOperand* string, LOperand* index) {
+    inputs_[0] = string;
+    inputs_[1] = index;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(StringCharCodeAt, "string-char-code-at")
+  DECLARE_HYDROGEN_ACCESSOR(StringCharCodeAt)
+
+  LOperand* string() { return inputs_[0]; }
+  LOperand* index() { return inputs_[1]; }
+};
+
+
+class LStringCharFromCode: public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LStringCharFromCode(LOperand* char_code) {
+    inputs_[0] = char_code;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(StringCharFromCode, "string-char-from-code")
+  DECLARE_HYDROGEN_ACCESSOR(StringCharFromCode)
+
+  LOperand* char_code() { return inputs_[0]; }
+};
+
+
+class LStringLength: public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LStringLength(LOperand* string) {
+    inputs_[0] = string;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(StringLength, "string-length")
+  DECLARE_HYDROGEN_ACCESSOR(StringLength)
+
+  LOperand* string() { return inputs_[0]; }
+};
+
+
+class LCheckFunction: public LTemplateInstruction<0, 1, 0> {
+ public:
+  explicit LCheckFunction(LOperand* value) {
+    inputs_[0] = value;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(CheckFunction, "check-function")
+  DECLARE_HYDROGEN_ACCESSOR(CheckFunction)
+};
+
+
+class LCheckInstanceType: public LTemplateInstruction<0, 1, 0> {
+ public:
+  explicit LCheckInstanceType(LOperand* value) {
+    inputs_[0] = value;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(CheckInstanceType, "check-instance-type")
+  DECLARE_HYDROGEN_ACCESSOR(CheckInstanceType)
+};
+
+
+class LCheckMap: public LTemplateInstruction<0, 1, 0> {
+ public:
+  explicit LCheckMap(LOperand* value) {
+    inputs_[0] = value;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(CheckMap, "check-map")
+  DECLARE_HYDROGEN_ACCESSOR(CheckMap)
+};
+
+
+class LCheckPrototypeMaps: public LTemplateInstruction<0, 0, 2> {
+ public:
+  LCheckPrototypeMaps(LOperand* temp1, LOperand* temp2)  {
+    temps_[0] = temp1;
+    temps_[1] = temp2;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(CheckPrototypeMaps, "check-prototype-maps")
+  DECLARE_HYDROGEN_ACCESSOR(CheckPrototypeMaps)
+
+  Handle<JSObject> prototype() const { return hydrogen()->prototype(); }
+  Handle<JSObject> holder() const { return hydrogen()->holder(); }
+};
+
+
+class LCheckSmi: public LTemplateInstruction<0, 1, 0> {
+ public:
+  explicit LCheckSmi(LOperand* value) {
+    inputs_[0] = value;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(CheckSmi, "check-smi")
+};
+
+
+class LCheckNonSmi: public LTemplateInstruction<0, 1, 0> {
+ public:
+  explicit LCheckNonSmi(LOperand* value) {
+    inputs_[0] = value;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(CheckNonSmi, "check-non-smi")
+};
+
+
+class LClampDToUint8: public LTemplateInstruction<1, 1, 1> {
+ public:
+  LClampDToUint8(LOperand* value, LOperand* temp) {
+    inputs_[0] = value;
+    temps_[0] = temp;
+  }
+
+  LOperand* unclamped() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(ClampDToUint8, "clamp-d-to-uint8")
+};
+
+
+class LClampIToUint8: public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LClampIToUint8(LOperand* value) {
+    inputs_[0] = value;
+  }
+
+  LOperand* unclamped() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(ClampIToUint8, "clamp-i-to-uint8")
+};
+
+
+class LClampTToUint8: public LTemplateInstruction<1, 1, 1> {
+ public:
+  LClampTToUint8(LOperand* value, LOperand* temp) {
+    inputs_[0] = value;
+    temps_[0] = temp;
+  }
+
+  LOperand* unclamped() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(ClampTToUint8, "clamp-t-to-uint8")
+};
+
+
+class LArrayLiteral: public LTemplateInstruction<1, 0, 0> {
+ public:
+  DECLARE_CONCRETE_INSTRUCTION(ArrayLiteral, "array-literal")
+  DECLARE_HYDROGEN_ACCESSOR(ArrayLiteral)
+};
+
+
+class LObjectLiteral: public LTemplateInstruction<1, 0, 0> {
+ public:
+  DECLARE_CONCRETE_INSTRUCTION(ObjectLiteral, "object-literal")
+  DECLARE_HYDROGEN_ACCESSOR(ObjectLiteral)
+};
+
+
+class LRegExpLiteral: public LTemplateInstruction<1, 0, 0> {
+ public:
+  DECLARE_CONCRETE_INSTRUCTION(RegExpLiteral, "regexp-literal")
+  DECLARE_HYDROGEN_ACCESSOR(RegExpLiteral)
+};
+
+
+class LFunctionLiteral: public LTemplateInstruction<1, 0, 0> {
+ public:
+  DECLARE_CONCRETE_INSTRUCTION(FunctionLiteral, "function-literal")
+  DECLARE_HYDROGEN_ACCESSOR(FunctionLiteral)
+
+  Handle<SharedFunctionInfo> shared_info() { return hydrogen()->shared_info(); }
+};
+
+
+class LToFastProperties: public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LToFastProperties(LOperand* value) {
+    inputs_[0] = value;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(ToFastProperties, "to-fast-properties")
+  DECLARE_HYDROGEN_ACCESSOR(ToFastProperties)
+};
+
+
+class LTypeof: public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LTypeof(LOperand* value) {
+    inputs_[0] = value;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(Typeof, "typeof")
+};
+
+
+class LTypeofIsAndBranch: public LControlInstruction<1, 0> {
+ public:
+  explicit LTypeofIsAndBranch(LOperand* value) {
+    inputs_[0] = value;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(TypeofIsAndBranch, "typeof-is-and-branch")
+  DECLARE_HYDROGEN_ACCESSOR(TypeofIsAndBranch)
+
+  Handle<String> type_literal() { return hydrogen()->type_literal(); }
+
+  virtual void PrintDataTo(StringStream* stream);
+};
+
+
+class LIsConstructCallAndBranch: public LControlInstruction<0, 1> {
+ public:
+  explicit LIsConstructCallAndBranch(LOperand* temp) {
+    temps_[0] = temp;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(IsConstructCallAndBranch,
+                               "is-construct-call-and-branch")
+};
+
+
+class LDeleteProperty: public LTemplateInstruction<1, 2, 0> {
+ public:
+  LDeleteProperty(LOperand* obj, LOperand* key) {
+    inputs_[0] = obj;
+    inputs_[1] = key;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(DeleteProperty, "delete-property")
+
+  LOperand* object() { return inputs_[0]; }
+  LOperand* key() { return inputs_[1]; }
+};
+
+
+class LOsrEntry: public LTemplateInstruction<0, 0, 0> {
+ public:
+  LOsrEntry();
+
+  DECLARE_CONCRETE_INSTRUCTION(OsrEntry, "osr-entry")
+
+  LOperand** SpilledRegisterArray() { return register_spills_; }
+  LOperand** SpilledDoubleRegisterArray() { return double_register_spills_; }
+
+  void MarkSpilledRegister(int allocation_index, LOperand* spill_operand);
+  void MarkSpilledDoubleRegister(int allocation_index,
+                                 LOperand* spill_operand);
+
+ private:
+  // Arrays of spill slot operands for registers with an assigned spill
+  // slot, i.e., that must also be restored to the spill slot on OSR entry.
+  // NULL if the register has no assigned spill slot.  Indexed by allocation
+  // index.
+  LOperand* register_spills_[Register::kNumAllocatableRegisters];
+  LOperand* double_register_spills_[DoubleRegister::kNumAllocatableRegisters];
+};
+
+
+class LStackCheck: public LTemplateInstruction<0, 0, 0> {
+ public:
+  DECLARE_CONCRETE_INSTRUCTION(StackCheck, "stack-check")
+  DECLARE_HYDROGEN_ACCESSOR(StackCheck)
+
+  Label* done_label() { return &done_label_; }
+
+ private:
+  Label done_label_;
+};
+
+
+class LIn: public LTemplateInstruction<1, 2, 0> {
+ public:
+  LIn(LOperand* key, LOperand* object) {
+    inputs_[0] = key;
+    inputs_[1] = object;
+  }
+
+  LOperand* key() { return inputs_[0]; }
+  LOperand* object() { return inputs_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(In, "in")
+};
+
+
+class LChunkBuilder;
 class LChunk: public ZoneObject {
  public:
-  explicit LChunk(HGraph* graph) { }
+  explicit LChunk(CompilationInfo* info, HGraph* graph);
 
-  HGraph* graph() const {
-    UNIMPLEMENTED();
-    return NULL;
-  }
+  void AddInstruction(LInstruction* instruction, HBasicBlock* block);
+  LConstantOperand* DefineConstantOperand(HConstant* constant);
+  Handle<Object> LookupLiteral(LConstantOperand* operand) const;
+  Representation LookupLiteralRepresentation(LConstantOperand* operand) const;
 
-  const ZoneList<LPointerMap*>* pointer_maps() const {
-    UNIMPLEMENTED();
-    return NULL;
-  }
+  int GetNextSpillIndex(bool is_double);
+  LOperand* GetNextSpillSlot(bool is_double);
 
-  LOperand* GetNextSpillSlot(bool double_slot) {
-    UNIMPLEMENTED();
-    return NULL;
-  }
-
-  LConstantOperand* DefineConstantOperand(HConstant* constant) {
-    UNIMPLEMENTED();
-    return NULL;
-  }
-
+  int ParameterAt(int index);
+  int GetParameterStackSlot(int index) const;
+  int spill_slot_count() const { return spill_slot_count_; }
+  CompilationInfo* info() const { return info_; }
+  HGraph* graph() const { return graph_; }
+  const ZoneList<LInstruction*>* instructions() const { return &instructions_; }
+  void AddGapMove(int index, LOperand* from, LOperand* to);
+  LGap* GetGapAt(int index) const;
+  bool IsGapAt(int index) const;
+  int NearestGapPos(int index) const;
+  void MarkEmptyBlocks();
+  const ZoneList<LPointerMap*>* pointer_maps() const { return &pointer_maps_; }
   LLabel* GetLabel(int block_id) const {
-    UNIMPLEMENTED();
-    return NULL;
+    HBasicBlock* block = graph_->blocks()->at(block_id);
+    int first_instruction = block->first_instruction_index();
+    return LLabel::cast(instructions_[first_instruction]);
+  }
+  int LookupDestination(int block_id) const {
+    LLabel* cur = GetLabel(block_id);
+    while (cur->replacement() != NULL) {
+      cur = cur->replacement();
+    }
+    return cur->block_id();
+  }
+  Label* GetAssemblyLabel(int block_id) const {
+    LLabel* label = GetLabel(block_id);
+    ASSERT(!label->HasReplacement());
+    return label->label();
   }
 
-  const ZoneList<LInstruction*>* instructions() const {
-    UNIMPLEMENTED();
-    return NULL;
+  const ZoneList<Handle<JSFunction> >* inlined_closures() const {
+    return &inlined_closures_;
   }
 
-  int GetParameterStackSlot(int index) const {
-    UNIMPLEMENTED();
-    return 0;
+  void AddInlinedClosure(Handle<JSFunction> closure) {
+    inlined_closures_.Add(closure);
   }
 
-  void AddGapMove(int index, LOperand* from, LOperand* to) { UNIMPLEMENTED(); }
-
-  LGap* GetGapAt(int index) const {
-    UNIMPLEMENTED();
-    return NULL;
-  }
-
-  bool IsGapAt(int index) const {
-    UNIMPLEMENTED();
-    return false;
-  }
-
-  int NearestGapPos(int index) const {
-    UNIMPLEMENTED();
-    return 0;
-  }
-
-  void MarkEmptyBlocks() { UNIMPLEMENTED(); }
-
-  CompilationInfo* info() const {
-    UNIMPLEMENTED();
-    return NULL;
-  }
-
-#ifdef DEBUG
-  void Verify() { UNIMPLEMENTED(); }
-#endif
+ private:
+  int spill_slot_count_;
+  CompilationInfo* info_;
+  HGraph* const graph_;
+  ZoneList<LInstruction*> instructions_;
+  ZoneList<LPointerMap*> pointer_maps_;
+  ZoneList<Handle<JSFunction> > inlined_closures_;
 };
 
 
 class LChunkBuilder BASE_EMBEDDED {
  public:
-  LChunkBuilder(CompilationInfo*&, HGraph* graph, LAllocator* allocator) { }
+  LChunkBuilder(CompilationInfo* info, HGraph* graph, LAllocator* allocator)
+      : chunk_(NULL),
+        info_(info),
+        graph_(graph),
+        status_(UNUSED),
+        current_instruction_(NULL),
+        current_block_(NULL),
+        next_block_(NULL),
+        argument_count_(0),
+        allocator_(allocator),
+        position_(RelocInfo::kNoPosition),
+        instruction_pending_deoptimization_environment_(NULL),
+        pending_deoptimization_ast_id_(AstNode::kNoNumber) { }
 
   // Build the sequence for the graph.
-  LChunk* Build() {
-    UNIMPLEMENTED();
-    return NULL;
-  };
+  LChunk* Build();
 
   // Declare methods that deal with the individual node types.
-#define DECLARE_DO(type) LInstruction* Do##type(H##type* node) { \
-    UNIMPLEMENTED(); \
-    return NULL; \
-  }
+#define DECLARE_DO(type) LInstruction* Do##type(H##type* node);
   HYDROGEN_CONCRETE_INSTRUCTION_LIST(DECLARE_DO)
 #undef DECLARE_DO
 
+ private:
+  enum Status {
+    UNUSED,
+    BUILDING,
+    DONE,
+    ABORTED
+  };
+
+  LChunk* chunk() const { return chunk_; }
+  CompilationInfo* info() const { return info_; }
+  HGraph* graph() const { return graph_; }
+
+  bool is_unused() const { return status_ == UNUSED; }
+  bool is_building() const { return status_ == BUILDING; }
+  bool is_done() const { return status_ == DONE; }
+  bool is_aborted() const { return status_ == ABORTED; }
+
+  void Abort(const char* format, ...);
+
+  // Methods for getting operands for Use / Define / Temp.
+  LRegister* ToOperand(Register reg);
+  LUnallocated* ToUnallocated(Register reg);
+  LUnallocated* ToUnallocated(DoubleRegister reg);
+
+  // Methods for setting up define-use relationships.
+  MUST_USE_RESULT LOperand* Use(HValue* value, LUnallocated* operand);
+  MUST_USE_RESULT LOperand* UseFixed(HValue* value, Register fixed_register);
+  MUST_USE_RESULT LOperand* UseFixedDouble(HValue* value,
+                                           DoubleRegister fixed_register);
+
+  // A value that is guaranteed to be allocated to a register.
+  // Operand created by UseRegister is guaranteed to be live until the end of
+  // instruction. This means that register allocator will not reuse it's
+  // register for any other operand inside instruction.
+  // Operand created by UseRegisterAtStart is guaranteed to be live only at
+  // instruction start. Register allocator is free to assign the same register
+  // to some other operand used inside instruction (i.e. temporary or
+  // output).
+  MUST_USE_RESULT LOperand* UseRegister(HValue* value);
+  MUST_USE_RESULT LOperand* UseRegisterAtStart(HValue* value);
+
+  // An input operand in a register that may be trashed.
+  MUST_USE_RESULT LOperand* UseTempRegister(HValue* value);
+
+  // An input operand in a register or stack slot.
+  MUST_USE_RESULT LOperand* Use(HValue* value);
+  MUST_USE_RESULT LOperand* UseAtStart(HValue* value);
+
+  // An input operand in a register, stack slot or a constant operand.
+  MUST_USE_RESULT LOperand* UseOrConstant(HValue* value);
+  MUST_USE_RESULT LOperand* UseOrConstantAtStart(HValue* value);
+
+  // An input operand in a register or a constant operand.
+  MUST_USE_RESULT LOperand* UseRegisterOrConstant(HValue* value);
+  MUST_USE_RESULT LOperand* UseRegisterOrConstantAtStart(HValue* value);
+
+  // An input operand in register, stack slot or a constant operand.
+  // Will not be moved to a register even if one is freely available.
+  MUST_USE_RESULT LOperand* UseAny(HValue* value);
+
+  // Temporary operand that must be in a register.
+  MUST_USE_RESULT LUnallocated* TempRegister();
+  MUST_USE_RESULT LOperand* FixedTemp(Register reg);
+  MUST_USE_RESULT LOperand* FixedTemp(DoubleRegister reg);
+
+  // Methods for setting up define-use relationships.
+  // Return the same instruction that they are passed.
+  template<int I, int T>
+      LInstruction* Define(LTemplateInstruction<1, I, T>* instr,
+                           LUnallocated* result);
+  template<int I, int T>
+      LInstruction* Define(LTemplateInstruction<1, I, T>* instr);
+  template<int I, int T>
+      LInstruction* DefineAsRegister(LTemplateInstruction<1, I, T>* instr);
+  template<int I, int T>
+      LInstruction* DefineAsSpilled(LTemplateInstruction<1, I, T>* instr,
+                                    int index);
+  template<int I, int T>
+      LInstruction* DefineSameAsFirst(LTemplateInstruction<1, I, T>* instr);
+  template<int I, int T>
+      LInstruction* DefineFixed(LTemplateInstruction<1, I, T>* instr,
+                                Register reg);
+  template<int I, int T>
+      LInstruction* DefineFixedDouble(LTemplateInstruction<1, I, T>* instr,
+                                      DoubleRegister reg);
+  LInstruction* AssignEnvironment(LInstruction* instr);
+  LInstruction* AssignPointerMap(LInstruction* instr);
+
+  enum CanDeoptimize { CAN_DEOPTIMIZE_EAGERLY, CANNOT_DEOPTIMIZE_EAGERLY };
+
+  // By default we assume that instruction sequences generated for calls
+  // cannot deoptimize eagerly and we do not attach environment to this
+  // instruction.
+  LInstruction* MarkAsCall(
+      LInstruction* instr,
+      HInstruction* hinstr,
+      CanDeoptimize can_deoptimize = CANNOT_DEOPTIMIZE_EAGERLY);
+  LInstruction* MarkAsSaveDoubles(LInstruction* instr);
+
+  LInstruction* SetInstructionPendingDeoptimizationEnvironment(
+      LInstruction* instr, int ast_id);
+  void ClearInstructionPendingDeoptimizationEnvironment();
+
+  LEnvironment* CreateEnvironment(HEnvironment* hydrogen_env,
+                                  int* argument_index_accumulator);
+
+  void VisitInstruction(HInstruction* current);
+
+  void DoBasicBlock(HBasicBlock* block, HBasicBlock* next_block);
+  LInstruction* DoBit(Token::Value op, HBitwiseBinaryOperation* instr);
+  LInstruction* DoShift(Token::Value op, HBitwiseBinaryOperation* instr);
+  LInstruction* DoArithmeticD(Token::Value op,
+                              HArithmeticBinaryOperation* instr);
+  LInstruction* DoArithmeticT(Token::Value op,
+                              HArithmeticBinaryOperation* instr);
+
+  LChunk* chunk_;
+  CompilationInfo* info_;
+  HGraph* const graph_;
+  Status status_;
+  HInstruction* current_instruction_;
+  HBasicBlock* current_block_;
+  HBasicBlock* next_block_;
+  int argument_count_;
+  LAllocator* allocator_;
+  int position_;
+  LInstruction* instruction_pending_deoptimization_environment_;
+  int pending_deoptimization_ast_id_;
+
   DISALLOW_COPY_AND_ASSIGN(LChunkBuilder);
 };
 
+#undef DECLARE_HYDROGEN_ACCESSOR
+#undef DECLARE_CONCRETE_INSTRUCTION
 
 } }  // namespace v8::internal
 
diff --git a/src/mips/macro-assembler-mips.cc b/src/mips/macro-assembler-mips.cc
index 32dce66..c9f4d4b 100644
--- a/src/mips/macro-assembler-mips.cc
+++ b/src/mips/macro-assembler-mips.cc
@@ -243,8 +243,7 @@
 
   if (smi_check == INLINE_SMI_CHECK) {
     ASSERT_EQ(0, kSmiTag);
-    And(t8, value, Operand(kSmiTagMask));
-    Branch(&done, eq, t8, Operand(zero_reg));
+    JumpIfSmi(value, &done);
   }
 
   CheckPageFlag(value,
@@ -3610,7 +3609,7 @@
 }
 
 
-void MacroAssembler::InvokeFunction(JSFunction* function,
+void MacroAssembler::InvokeFunction(Handle<JSFunction> function,
                                     const ParameterCount& actual,
                                     InvokeFlag flag,
                                     CallKind call_kind) {
@@ -3618,7 +3617,7 @@
   ASSERT(flag == JUMP_FUNCTION || has_frame());
 
   // Get the function and setup the context.
-  li(a1, Operand(Handle<JSFunction>(function)));
+  li(a1, Operand(function));
   lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
 
   ParameterCount expected(function->shared()->formal_parameter_count());
@@ -3739,45 +3738,19 @@
 }
 
 
-MaybeObject* MacroAssembler::TryCallStub(CodeStub* stub, Condition cond,
-                                         Register r1, const Operand& r2) {
-  ASSERT(AllowThisStubCall(stub));  // Stub calls are not allowed in some stubs.
-  Object* result;
-  { MaybeObject* maybe_result = stub->TryGetCode();
-    if (!maybe_result->ToObject(&result)) return maybe_result;
-  }
-  Call(Handle<Code>(Code::cast(result)), RelocInfo::CODE_TARGET,
-      kNoASTId, cond, r1, r2);
-  return result;
-}
-
-
 void MacroAssembler::TailCallStub(CodeStub* stub) {
   ASSERT(allow_stub_calls_ || stub->CompilingCallsToThisStubIsGCSafe());
   Jump(stub->GetCode(), RelocInfo::CODE_TARGET);
 }
 
 
-MaybeObject* MacroAssembler::TryTailCallStub(CodeStub* stub,
-                                             Condition cond,
-                                             Register r1,
-                                             const Operand& r2) {
-  Object* result;
-  { MaybeObject* maybe_result = stub->TryGetCode();
-    if (!maybe_result->ToObject(&result)) return maybe_result;
-  }
-  Jump(Handle<Code>(Code::cast(result)), RelocInfo::CODE_TARGET, cond, r1, r2);
-  return result;
-}
-
-
 static int AddressOffset(ExternalReference ref0, ExternalReference ref1) {
   return ref0.address() - ref1.address();
 }
 
 
-MaybeObject* MacroAssembler::TryCallApiFunctionAndReturn(
-    ExternalReference function, int stack_space) {
+void MacroAssembler::CallApiFunctionAndReturn(ExternalReference function,
+                                              int stack_space) {
   ExternalReference next_address =
       ExternalReference::handle_scope_next_address();
   const int kNextOffset = 0;
@@ -3848,11 +3821,10 @@
   Ret();
 
   bind(&promote_scheduled_exception);
-  MaybeObject* result = TryTailCallExternalReference(
-      ExternalReference(Runtime::kPromoteScheduledException, isolate()), 0, 1);
-  if (result->IsFailure()) {
-    return result;
-  }
+  TailCallExternalReference(
+      ExternalReference(Runtime::kPromoteScheduledException, isolate()),
+      0,
+      1);
 
   // HandleScope limit has changed. Delete allocated extensions.
   bind(&delete_allocated_handles);
@@ -3865,8 +3837,6 @@
       1);
   mov(v0, s0);
   jmp(&leave_exit_frame);
-
-  return result;
 }
 
 
@@ -4089,17 +4059,6 @@
 }
 
 
-MaybeObject* MacroAssembler::TryTailCallExternalReference(
-    const ExternalReference& ext, int num_arguments, int result_size) {
-  // TODO(1236192): Most runtime routines don't need the number of
-  // arguments passed in because it is constant. At some point we
-  // should remove this need and make the runtime routine entry code
-  // smarter.
-  li(a0, num_arguments);
-  return TryJumpToExternalReference(ext);
-}
-
-
 void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid,
                                      int num_arguments,
                                      int result_size) {
@@ -4116,14 +4075,6 @@
 }
 
 
-MaybeObject* MacroAssembler::TryJumpToExternalReference(
-    const ExternalReference& builtin) {
-  li(a1, Operand(builtin));
-  CEntryStub stub(1);
-  return TryTailCallStub(&stub);
-}
-
-
 void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id,
                                    InvokeFlag flag,
                                    const CallWrapper& call_wrapper) {
@@ -4528,8 +4479,7 @@
   STATIC_ASSERT(kSmiTag == 0);
   ASSERT_EQ(1, kSmiTagMask);
   or_(at, reg1, reg2);
-  andi(at, at, kSmiTagMask);
-  Branch(on_not_both_smi, ne, at, Operand(zero_reg));
+  JumpIfNotSmi(at, on_not_both_smi);
 }
 
 
@@ -4540,8 +4490,7 @@
   ASSERT_EQ(1, kSmiTagMask);
   // Both Smi tags must be 1 (not Smi).
   and_(at, reg1, reg2);
-  andi(at, at, kSmiTagMask);
-  Branch(on_either_smi, eq, at, Operand(zero_reg));
+  JumpIfSmi(at, on_either_smi);
 }
 
 
@@ -4619,8 +4568,7 @@
   // Check that neither is a smi.
   STATIC_ASSERT(kSmiTag == 0);
   And(scratch1, first, Operand(second));
-  And(scratch1, scratch1, Operand(kSmiTagMask));
-  Branch(failure, eq, scratch1, Operand(zero_reg));
+  JumpIfSmi(scratch1, failure);
   JumpIfNonSmisNotBothSequentialAsciiStrings(first,
                                              second,
                                              scratch1,
diff --git a/src/mips/macro-assembler-mips.h b/src/mips/macro-assembler-mips.h
index 84c55f7..b9bd2f3 100644
--- a/src/mips/macro-assembler-mips.h
+++ b/src/mips/macro-assembler-mips.h
@@ -812,7 +812,7 @@
                       const CallWrapper& call_wrapper,
                       CallKind call_kind);
 
-  void InvokeFunction(JSFunction* function,
+  void InvokeFunction(Handle<JSFunction> function,
                       const ParameterCount& actual,
                       InvokeFlag flag,
                       CallKind call_kind);
@@ -1042,27 +1042,9 @@
   void CallStub(CodeStub* stub, Condition cond = cc_always,
                 Register r1 = zero_reg, const Operand& r2 = Operand(zero_reg));
 
-  // Call a code stub and return the code object called.  Try to generate
-  // the code if necessary.  Do not perform a GC but instead return a retry
-  // after GC failure.
-  MUST_USE_RESULT MaybeObject* TryCallStub(CodeStub* stub,
-                                           Condition cond = cc_always,
-                                           Register r1 = zero_reg,
-                                           const Operand& r2 =
-                                               Operand(zero_reg));
-
   // Tail call a code stub (jump).
   void TailCallStub(CodeStub* stub);
 
-  // Tail call a code stub (jump) and return the code object called.  Try to
-  // generate the code if necessary.  Do not perform a GC but instead return
-  // a retry after GC failure.
-  MUST_USE_RESULT MaybeObject* TryTailCallStub(CodeStub* stub,
-                                               Condition cond = cc_always,
-                                               Register r1 = zero_reg,
-                                               const Operand& r2 =
-                                                   Operand(zero_reg));
-
   void CallJSExitStub(CodeStub* stub);
 
   // Call a runtime routine.
@@ -1083,12 +1065,6 @@
                                  int num_arguments,
                                  int result_size);
 
-  // Tail call of a runtime routine (jump). Try to generate the code if
-  // necessary. Do not perform a GC but instead return a retry after GC
-  // failure.
-  MUST_USE_RESULT MaybeObject* TryTailCallExternalReference(
-      const ExternalReference& ext, int num_arguments, int result_size);
-
   // Convenience function: tail call a runtime routine (jump).
   void TailCallRuntime(Runtime::FunctionId fid,
                        int num_arguments,
@@ -1139,16 +1115,15 @@
   void SetCallCDoubleArguments(DoubleRegister dreg1, DoubleRegister dreg2);
   void SetCallCDoubleArguments(DoubleRegister dreg, Register reg);
 
-  // Calls an API function. Allocates HandleScope, extracts returned value
-  // from handle and propagates exceptions. Restores context.
-  MaybeObject* TryCallApiFunctionAndReturn(ExternalReference function,
-                                           int stack_space);
+  // Calls an API function.  Allocates HandleScope, extracts returned value
+  // from handle and propagates exceptions.  Restores context.  stack_space
+  // - space to be unwound on exit (includes the call js arguments space and
+  // the additional space allocated for the fast call).
+  void CallApiFunctionAndReturn(ExternalReference function, int stack_space);
 
   // Jump to the builtin routine.
   void JumpToExternalReference(const ExternalReference& builtin);
 
-  MaybeObject* TryJumpToExternalReference(const ExternalReference& ext);
-
   // Invoke specified builtin JavaScript function. Adds an entry to
   // the unresolved list if the name does not resolve.
   void InvokeBuiltin(Builtins::JavaScript id,
diff --git a/src/mips/stub-cache-mips.cc b/src/mips/stub-cache-mips.cc
index 296f186..2d60693 100644
--- a/src/mips/stub-cache-mips.cc
+++ b/src/mips/stub-cache-mips.cc
@@ -151,66 +151,6 @@
 }
 
 
-// TODO(kmillikin): Eliminate this function when the stub cache is fully
-// handlified.
-MUST_USE_RESULT static MaybeObject* TryGenerateDictionaryNegativeLookup(
-    MacroAssembler* masm,
-    Label* miss_label,
-    Register receiver,
-    String* name,
-    Register scratch0,
-    Register scratch1) {
-  ASSERT(name->IsSymbol());
-  Counters* counters = masm->isolate()->counters();
-  __ IncrementCounter(counters->negative_lookups(), 1, scratch0, scratch1);
-  __ IncrementCounter(counters->negative_lookups_miss(), 1, scratch0, scratch1);
-
-  Label done;
-
-  const int kInterceptorOrAccessCheckNeededMask =
-      (1 << Map::kHasNamedInterceptor) | (1 << Map::kIsAccessCheckNeeded);
-
-  // Bail out if the receiver has a named interceptor or requires access checks.
-  Register map = scratch1;
-  __ lw(map, FieldMemOperand(receiver, HeapObject::kMapOffset));
-  __ lbu(scratch0, FieldMemOperand(map, Map::kBitFieldOffset));
-  __ And(at, scratch0, Operand(kInterceptorOrAccessCheckNeededMask));
-  __ Branch(miss_label, ne, at, Operand(zero_reg));
-
-
-  // Check that receiver is a JSObject.
-  __ lbu(scratch0, FieldMemOperand(map, Map::kInstanceTypeOffset));
-  __ Branch(miss_label, lt, scratch0, Operand(FIRST_SPEC_OBJECT_TYPE));
-
-  // Load properties array.
-  Register properties = scratch0;
-  __ lw(properties, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
-  // Check that the properties array is a dictionary.
-  __ lw(map, FieldMemOperand(properties, HeapObject::kMapOffset));
-  Register tmp = properties;
-  __ LoadRoot(tmp, Heap::kHashTableMapRootIndex);
-  __ Branch(miss_label, ne, map, Operand(tmp));
-
-  // Restore the temporarily used register.
-  __ lw(properties, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
-
-  MaybeObject* result = StringDictionaryLookupStub::TryGenerateNegativeLookup(
-      masm,
-      miss_label,
-      &done,
-      receiver,
-      properties,
-      name,
-      scratch1);
-  if (result->IsFailure()) return result;
-
-  __ bind(&done);
-  __ DecrementCounter(counters->negative_lookups_miss(), 1, scratch0, scratch1);
-
-  return result;
-}
-
-
 void StubCache::GenerateProbe(MacroAssembler* masm,
                               Code::Flags flags,
                               Register receiver,
@@ -294,7 +234,10 @@
 
 
 void StubCompiler::GenerateDirectLoadGlobalFunctionPrototype(
-    MacroAssembler* masm, int index, Register prototype, Label* miss) {
+    MacroAssembler* masm,
+    int index,
+    Register prototype,
+    Label* miss) {
   Isolate* isolate = masm->isolate();
   // Check we're still in the same context.
   __ lw(prototype, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
@@ -302,8 +245,8 @@
   __ li(at, isolate->global());
   __ Branch(miss, ne, prototype, Operand(at));
   // Get the global function with the given index.
-  JSFunction* function =
-      JSFunction::cast(isolate->global_context()->get(index));
+  Handle<JSFunction> function(
+      JSFunction::cast(isolate->global_context()->get(index)));
   // Load its initial map. The global functions all have initial maps.
   __ li(prototype, Handle<Map>(function->initial_map()));
   // Load the prototype from the initial map.
@@ -339,8 +282,7 @@
                                            Register scratch,
                                            Label* miss_label) {
   // Check that the receiver isn't a smi.
-  __ And(scratch, receiver, Operand(kSmiTagMask));
-  __ Branch(miss_label, eq, scratch, Operand(zero_reg));
+  __ JumpIfSmi(receiver, miss_label);
 
   // Check that the object is a JS array.
   __ GetObjectType(receiver, scratch, scratch);
@@ -565,23 +507,24 @@
                                      Register receiver,
                                      Register holder,
                                      Register name,
-                                     JSObject* holder_obj) {
+                                     Handle<JSObject> holder_obj) {
   __ push(name);
-  InterceptorInfo* interceptor = holder_obj->GetNamedInterceptor();
-  ASSERT(!masm->isolate()->heap()->InNewSpace(interceptor));
+  Handle<InterceptorInfo> interceptor(holder_obj->GetNamedInterceptor());
+  ASSERT(!masm->isolate()->heap()->InNewSpace(*interceptor));
   Register scratch = name;
-  __ li(scratch, Operand(Handle<Object>(interceptor)));
+  __ li(scratch, Operand(interceptor));
   __ Push(scratch, receiver, holder);
   __ lw(scratch, FieldMemOperand(scratch, InterceptorInfo::kDataOffset));
   __ push(scratch);
 }
 
 
-static void CompileCallLoadPropertyWithInterceptor(MacroAssembler* masm,
-                                                   Register receiver,
-                                                   Register holder,
-                                                   Register name,
-                                                   JSObject* holder_obj) {
+static void CompileCallLoadPropertyWithInterceptor(
+    MacroAssembler* masm,
+    Register receiver,
+    Register holder,
+    Register name,
+    Handle<JSObject> holder_obj) {
   PushInterceptorArguments(masm, receiver, holder, name, holder_obj);
 
   ExternalReference ref =
@@ -617,10 +560,9 @@
 }
 
 
-static MaybeObject* GenerateFastApiDirectCall(
-    MacroAssembler* masm,
-    const CallOptimization& optimization,
-    int argc) {
+static void GenerateFastApiDirectCall(MacroAssembler* masm,
+                                      const CallOptimization& optimization,
+                                      int argc) {
   // ----------- S t a t e -------------
   //  -- sp[0]              : holder (set by CheckPrototypes)
   //  -- sp[4]              : callee js function
@@ -631,18 +573,18 @@
   //  -- sp[(argc + 4) * 4] : receiver
   // -----------------------------------
   // Get the function and setup the context.
-  JSFunction* function = optimization.constant_function();
-  __ li(t1, Operand(Handle<JSFunction>(function)));
+  Handle<JSFunction> function = optimization.constant_function();
+  __ li(t1, Operand(function));
   __ lw(cp, FieldMemOperand(t1, JSFunction::kContextOffset));
 
   // Pass the additional arguments FastHandleApiCall expects.
-  Object* call_data = optimization.api_call_info()->data();
-  Handle<CallHandlerInfo> api_call_info_handle(optimization.api_call_info());
-  if (masm->isolate()->heap()->InNewSpace(call_data)) {
-    __ li(a0, api_call_info_handle);
+  Handle<CallHandlerInfo> api_call_info = optimization.api_call_info();
+  Handle<Object> call_data(api_call_info->data());
+  if (masm->isolate()->heap()->InNewSpace(*call_data)) {
+    __ li(a0, api_call_info);
     __ lw(t2, FieldMemOperand(a0, CallHandlerInfo::kDataOffset));
   } else {
-    __ li(t2, Operand(Handle<Object>(call_data)));
+    __ li(t2, call_data);
   }
 
   // Store js function and call data.
@@ -653,10 +595,6 @@
   // (refer to layout above).
   __ Addu(a2, sp, Operand(2 * kPointerSize));
 
-  Object* callback = optimization.api_call_info()->callback();
-  Address api_function_address = v8::ToCData<Address>(callback);
-  ApiFunction fun(api_function_address);
-
   const int kApiStackSpace = 4;
 
   FrameScope frame_scope(masm, StackFrame::MANUAL);
@@ -682,17 +620,15 @@
   // v8::Arguments::is_construct_call = 0
   __ sw(zero_reg, MemOperand(a1, 3 * kPointerSize));
 
-  // Emitting a stub call may try to allocate (if the code is not
-  // already generated). Do not allow the assembler to perform a
-  // garbage collection but instead return the allocation failure
-  // object.
   const int kStackUnwindSpace = argc + kFastApiCallArguments + 1;
+  Address function_address = v8::ToCData<Address>(api_call_info->callback());
+  ApiFunction fun(function_address);
   ExternalReference ref =
       ExternalReference(&fun,
                         ExternalReference::DIRECT_API_CALL,
                         masm->isolate());
   AllowExternalCallThatCantCauseGC scope(masm);
-  return masm->TryCallApiFunctionAndReturn(ref, kStackUnwindSpace);
+  __ CallApiFunctionAndReturn(ref, kStackUnwindSpace);
 }
 
 class CallInterceptorCompiler BASE_EMBEDDED {
@@ -706,86 +642,63 @@
         name_(name),
         extra_ic_state_(extra_ic_state) {}
 
-  MaybeObject* Compile(MacroAssembler* masm,
-                       JSObject* object,
-                       JSObject* holder,
-                       String* name,
-                       LookupResult* lookup,
-                       Register receiver,
-                       Register scratch1,
-                       Register scratch2,
-                       Register scratch3,
-                       Label* miss) {
+  void Compile(MacroAssembler* masm,
+               Handle<JSObject> object,
+               Handle<JSObject> holder,
+               Handle<String> name,
+               LookupResult* lookup,
+               Register receiver,
+               Register scratch1,
+               Register scratch2,
+               Register scratch3,
+               Label* miss) {
     ASSERT(holder->HasNamedInterceptor());
     ASSERT(!holder->GetNamedInterceptor()->getter()->IsUndefined());
 
     // Check that the receiver isn't a smi.
     __ JumpIfSmi(receiver, miss);
-
     CallOptimization optimization(lookup);
-
     if (optimization.is_constant_call()) {
-      return CompileCacheable(masm,
-                              object,
-                              receiver,
-                              scratch1,
-                              scratch2,
-                              scratch3,
-                              holder,
-                              lookup,
-                              name,
-                              optimization,
-                              miss);
+      CompileCacheable(masm, object, receiver, scratch1, scratch2, scratch3,
+                       holder, lookup, name, optimization, miss);
     } else {
-      CompileRegular(masm,
-                     object,
-                     receiver,
-                     scratch1,
-                     scratch2,
-                     scratch3,
-                     name,
-                     holder,
-                     miss);
-      return masm->isolate()->heap()->undefined_value();
+      CompileRegular(masm, object, receiver, scratch1, scratch2, scratch3,
+                     name, holder, miss);
     }
   }
 
  private:
-  MaybeObject* CompileCacheable(MacroAssembler* masm,
-                                JSObject* object,
-                                Register receiver,
-                                Register scratch1,
-                                Register scratch2,
-                                Register scratch3,
-                                JSObject* interceptor_holder,
-                                LookupResult* lookup,
-                                String* name,
-                                const CallOptimization& optimization,
-                                Label* miss_label) {
+  void CompileCacheable(MacroAssembler* masm,
+                        Handle<JSObject> object,
+                        Register receiver,
+                        Register scratch1,
+                        Register scratch2,
+                        Register scratch3,
+                        Handle<JSObject> interceptor_holder,
+                        LookupResult* lookup,
+                        Handle<String> name,
+                        const CallOptimization& optimization,
+                        Label* miss_label) {
     ASSERT(optimization.is_constant_call());
     ASSERT(!lookup->holder()->IsGlobalObject());
-
     Counters* counters = masm->isolate()->counters();
-
     int depth1 = kInvalidProtoDepth;
     int depth2 = kInvalidProtoDepth;
     bool can_do_fast_api_call = false;
     if (optimization.is_simple_api_call() &&
-        !lookup->holder()->IsGlobalObject()) {
-      depth1 =
-          optimization.GetPrototypeDepthOfExpectedType(object,
-                                                      interceptor_holder);
+          !lookup->holder()->IsGlobalObject()) {
+      depth1 = optimization.GetPrototypeDepthOfExpectedType(
+          object, interceptor_holder);
       if (depth1 == kInvalidProtoDepth) {
-        depth2 =
-            optimization.GetPrototypeDepthOfExpectedType(interceptor_holder,
-                                                        lookup->holder());
+        depth2 = optimization.GetPrototypeDepthOfExpectedType(
+            interceptor_holder, Handle<JSObject>(lookup->holder()));
       }
-      can_do_fast_api_call = (depth1 != kInvalidProtoDepth) ||
-                             (depth2 != kInvalidProtoDepth);
+      can_do_fast_api_call =
+          depth1 != kInvalidProtoDepth || depth2 != kInvalidProtoDepth;
     }
 
     __ IncrementCounter(counters->call_const_interceptor(), 1,
-                      scratch1, scratch2);
+                        scratch1, scratch2);
 
     if (can_do_fast_api_call) {
       __ IncrementCounter(counters->call_const_interceptor_fast_api(), 1,
@@ -798,9 +711,9 @@
     Label miss_cleanup;
     Label* miss = can_do_fast_api_call ? &miss_cleanup : miss_label;
     Register holder =
-      stub_compiler_->CheckPrototypes(object, receiver,
-                                      interceptor_holder, scratch1,
-                                      scratch2, scratch3, name, depth1, miss);
+        stub_compiler_->CheckPrototypes(object, receiver, interceptor_holder,
+                                        scratch1, scratch2, scratch3,
+                                        name, depth1, miss);
 
     // Invoke an interceptor and if it provides a value,
     // branch to |regular_invoke|.
@@ -813,10 +726,11 @@
 
     // Check that the maps from interceptor's holder to constant function's
     // holder haven't changed and thus we can use cached constant function.
-    if (interceptor_holder != lookup->holder()) {
+    if (*interceptor_holder != lookup->holder()) {
       stub_compiler_->CheckPrototypes(interceptor_holder, receiver,
-                                      lookup->holder(), scratch1,
-                                      scratch2, scratch3, name, depth2, miss);
+                                      Handle<JSObject>(lookup->holder()),
+                                      scratch1, scratch2, scratch3,
+                                      name, depth2, miss);
     } else {
       // CheckPrototypes has a side effect of fetching a 'holder'
       // for API (object which is instanceof for the signature).  It's
@@ -827,10 +741,7 @@
 
     // Invoke function.
     if (can_do_fast_api_call) {
-      MaybeObject* result = GenerateFastApiDirectCall(masm,
-                                                      optimization,
-                                                      arguments_.immediate());
-      if (result->IsFailure()) return result;
+      GenerateFastApiDirectCall(masm, optimization, arguments_.immediate());
     } else {
       CallKind call_kind = CallICBase::Contextual::decode(extra_ic_state_)
           ? CALL_AS_FUNCTION
@@ -851,68 +762,57 @@
     if (can_do_fast_api_call) {
       FreeSpaceForFastApiCall(masm);
     }
-
-    return masm->isolate()->heap()->undefined_value();
   }
 
   void CompileRegular(MacroAssembler* masm,
-                      JSObject* object,
+                      Handle<JSObject> object,
                       Register receiver,
                       Register scratch1,
                       Register scratch2,
                       Register scratch3,
-                      String* name,
-                      JSObject* interceptor_holder,
+                      Handle<String> name,
+                      Handle<JSObject> interceptor_holder,
                       Label* miss_label) {
     Register holder =
         stub_compiler_->CheckPrototypes(object, receiver, interceptor_holder,
-                                        scratch1, scratch2, scratch3, name,
-                                        miss_label);
+                                        scratch1, scratch2, scratch3,
+                                        name, miss_label);
 
     // Call a runtime function to load the interceptor property.
     FrameScope scope(masm, StackFrame::INTERNAL);
     // Save the name_ register across the call.
     __ push(name_);
 
-    PushInterceptorArguments(masm,
-                             receiver,
-                             holder,
-                             name_,
-                             interceptor_holder);
+    PushInterceptorArguments(masm, receiver, holder, name_, interceptor_holder);
 
     __ CallExternalReference(
           ExternalReference(
               IC_Utility(IC::kLoadPropertyWithInterceptorForCall),
               masm->isolate()),
           5);
-
     // Restore the name_ register.
     __ pop(name_);
-
     // Leave the internal frame.
   }
 
   void LoadWithInterceptor(MacroAssembler* masm,
                            Register receiver,
                            Register holder,
-                           JSObject* holder_obj,
+                           Handle<JSObject> holder_obj,
                            Register scratch,
                            Label* interceptor_succeeded) {
     {
       FrameScope scope(masm, StackFrame::INTERNAL);
 
       __ Push(holder, name_);
-
       CompileCallLoadPropertyWithInterceptor(masm,
                                              receiver,
                                              holder,
                                              name_,
                                              holder_obj);
-
       __ pop(name_);  // Restore the name.
       __ pop(receiver);  // Restore the holder.
     }
-
     // If interceptor returns no-result sentinel, call the constant function.
     __ LoadRoot(scratch, Heap::kNoInterceptorResultSentinelRootIndex);
     __ Branch(interceptor_succeeded, ne, v0, Operand(scratch));
@@ -945,29 +845,6 @@
 }
 
 
-// TODO(kmillikin): Eliminate this function when the stub cache is fully
-// handlified.
-MUST_USE_RESULT static MaybeObject* TryGenerateCheckPropertyCell(
-    MacroAssembler* masm,
-    GlobalObject* global,
-    String* name,
-    Register scratch,
-    Label* miss) {
-  Object* probe;
-  { MaybeObject* maybe_probe = global->EnsurePropertyCell(name);
-    if (!maybe_probe->ToObject(&probe)) return maybe_probe;
-  }
-  JSGlobalPropertyCell* cell = JSGlobalPropertyCell::cast(probe);
-  ASSERT(cell->value()->IsTheHole());
-  __ li(scratch, Operand(Handle<Object>(cell)));
-  __ lw(scratch,
-        FieldMemOperand(scratch, JSGlobalPropertyCell::kValueOffset));
-  __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
-  __ Branch(miss, ne, scratch, Operand(at));
-  return cell;
-}
-
-
 // Calls GenerateCheckPropertyCell for each global object in the prototype chain
 // from object to (but not including) holder.
 static void GenerateCheckPropertyCells(MacroAssembler* masm,
@@ -990,34 +867,6 @@
 }
 
 
-// TODO(kmillikin): Eliminate this function when the stub cache is fully
-// handlified.
-MUST_USE_RESULT static MaybeObject* TryGenerateCheckPropertyCells(
-    MacroAssembler* masm,
-    JSObject* object,
-    JSObject* holder,
-    String* name,
-    Register scratch,
-    Label* miss) {
-  JSObject* current = object;
-  while (current != holder) {
-    if (current->IsGlobalObject()) {
-      // Returns a cell or a failure.
-      MaybeObject* result = TryGenerateCheckPropertyCell(
-          masm,
-          GlobalObject::cast(current),
-          name,
-          scratch,
-          miss);
-      if (result->IsFailure()) return result;
-    }
-    ASSERT(current->IsJSObject());
-    current = JSObject::cast(current->GetPrototype());
-  }
-  return NULL;
-}
-
-
 // Convert and store int passed in register ival to IEEE 754 single precision
 // floating point value at memory location (dst + 4 * wordoffset)
 // If FPU is available use it for conversion.
@@ -1240,146 +1089,6 @@
 }
 
 
-Register StubCompiler::CheckPrototypes(JSObject* object,
-                                       Register object_reg,
-                                       JSObject* holder,
-                                       Register holder_reg,
-                                       Register scratch1,
-                                       Register scratch2,
-                                       String* name,
-                                       int save_at_depth,
-                                       Label* miss) {
-  // Make sure there's no overlap between holder and object registers.
-  ASSERT(!scratch1.is(object_reg) && !scratch1.is(holder_reg));
-  ASSERT(!scratch2.is(object_reg) && !scratch2.is(holder_reg)
-         && !scratch2.is(scratch1));
-
-  // Keep track of the current object in register reg.
-  Register reg = object_reg;
-  int depth = 0;
-
-  if (save_at_depth == depth) {
-    __ sw(reg, MemOperand(sp));
-  }
-
-  // Check the maps in the prototype chain.
-  // Traverse the prototype chain from the object and do map checks.
-  JSObject* current = object;
-  while (current != holder) {
-    depth++;
-
-    // Only global objects and objects that do not require access
-    // checks are allowed in stubs.
-    ASSERT(current->IsJSGlobalProxy() || !current->IsAccessCheckNeeded());
-
-    ASSERT(current->GetPrototype()->IsJSObject());
-    JSObject* prototype = JSObject::cast(current->GetPrototype());
-    if (!current->HasFastProperties() &&
-        !current->IsJSGlobalObject() &&
-        !current->IsJSGlobalProxy()) {
-      if (!name->IsSymbol()) {
-        MaybeObject* maybe_lookup_result = heap()->LookupSymbol(name);
-        Object* lookup_result = NULL;  // Initialization to please compiler.
-        if (!maybe_lookup_result->ToObject(&lookup_result)) {
-          set_failure(Failure::cast(maybe_lookup_result));
-          return reg;
-        }
-        name = String::cast(lookup_result);
-      }
-      ASSERT(current->property_dictionary()->FindEntry(name) ==
-             StringDictionary::kNotFound);
-
-      MaybeObject* negative_lookup =
-          TryGenerateDictionaryNegativeLookup(masm(),
-                                              miss,
-                                              reg,
-                                              name,
-                                              scratch1,
-                                              scratch2);
-
-      if (negative_lookup->IsFailure()) {
-        set_failure(Failure::cast(negative_lookup));
-        return reg;
-      }
-
-      __ lw(scratch1, FieldMemOperand(reg, HeapObject::kMapOffset));
-      reg = holder_reg;  // From now the object is in holder_reg.
-      __ lw(reg, FieldMemOperand(scratch1, Map::kPrototypeOffset));
-    } else if (heap()->InNewSpace(prototype)) {
-      // Get the map of the current object.
-      __ lw(scratch1, FieldMemOperand(reg, HeapObject::kMapOffset));
-
-      // Branch on the result of the map check.
-      __ Branch(miss, ne, scratch1, Operand(Handle<Map>(current->map())));
-
-      // Check access rights to the global object.  This has to happen
-      // after the map check so that we know that the object is
-      // actually a global object.
-      if (current->IsJSGlobalProxy()) {
-        __ CheckAccessGlobalProxy(reg, scratch1, miss);
-        // Restore scratch register to be the map of the object.  In the
-        // new space case below, we load the prototype from the map in
-        // the scratch register.
-        __ lw(scratch1, FieldMemOperand(reg, HeapObject::kMapOffset));
-      }
-
-      reg = holder_reg;  // From now the object is in holder_reg.
-      // The prototype is in new space; we cannot store a reference
-      // to it in the code. Load it from the map.
-      __ lw(reg, FieldMemOperand(scratch1, Map::kPrototypeOffset));
-    } else {
-      // Check the map of the current object.
-      __ lw(scratch1, FieldMemOperand(reg, HeapObject::kMapOffset));
-      // Branch on the result of the map check.
-      __ Branch(miss, ne, scratch1, Operand(Handle<Map>(current->map())));
-      // Check access rights to the global object.  This has to happen
-      // after the map check so that we know that the object is
-      // actually a global object.
-      if (current->IsJSGlobalProxy()) {
-        __ CheckAccessGlobalProxy(reg, scratch1, miss);
-      }
-      // The prototype is in old space; load it directly.
-      reg = holder_reg;  // From now the object is in holder_reg.
-      __ li(reg, Operand(Handle<JSObject>(prototype)));
-    }
-
-    if (save_at_depth == depth) {
-      __ sw(reg, MemOperand(sp));
-    }
-
-    // Go to the next object in the prototype chain.
-    current = prototype;
-  }
-
-  // Check the holder map.
-  __ lw(scratch1, FieldMemOperand(reg, HeapObject::kMapOffset));
-  __ Branch(miss, ne, scratch1, Operand(Handle<Map>(current->map())));
-
-  // Log the check depth.
-  LOG(masm()->isolate(), IntEvent("check-maps-depth", depth + 1));
-  // Perform security check for access to the global object.
-  ASSERT(holder->IsJSGlobalProxy() || !holder->IsAccessCheckNeeded());
-  if (holder->IsJSGlobalProxy()) {
-    __ CheckAccessGlobalProxy(reg, scratch1, miss);
-  }
-
-  // If we've skipped any global objects, it's not enough to verify
-  // that their maps haven't changed.  We also need to check that the
-  // property cell for the property is still empty.
-
-  MaybeObject* result = TryGenerateCheckPropertyCells(masm(),
-                                                      object,
-                                                      holder,
-                                                      name,
-                                                      scratch1,
-                                                      miss);
-  if (result->IsFailure()) set_failure(Failure::cast(result));
-
-  // Return the register containing the holder.
-  return reg;
-}
-
-
 void StubCompiler::GenerateLoadField(Handle<JSObject> object,
                                      Handle<JSObject> holder,
                                      Register receiver,
@@ -1390,8 +1099,7 @@
                                      Handle<String> name,
                                      Label* miss) {
   // Check that the receiver isn't a smi.
-  __ And(scratch1, receiver, Operand(kSmiTagMask));
-  __ Branch(miss, eq, scratch1, Operand(zero_reg));
+  __ JumpIfSmi(receiver, miss);
 
   // Check that the maps haven't changed.
   Register reg = CheckPrototypes(
@@ -1424,49 +1132,43 @@
 }
 
 
-MaybeObject* StubCompiler::GenerateLoadCallback(JSObject* object,
-                                                JSObject* holder,
-                                                Register receiver,
-                                                Register name_reg,
-                                                Register scratch1,
-                                                Register scratch2,
-                                                Register scratch3,
-                                                AccessorInfo* callback,
-                                                String* name,
-                                                Label* miss) {
+void StubCompiler::GenerateLoadCallback(Handle<JSObject> object,
+                                        Handle<JSObject> holder,
+                                        Register receiver,
+                                        Register name_reg,
+                                        Register scratch1,
+                                        Register scratch2,
+                                        Register scratch3,
+                                        Handle<AccessorInfo> callback,
+                                        Handle<String> name,
+                                        Label* miss) {
   // Check that the receiver isn't a smi.
   __ JumpIfSmi(receiver, miss, scratch1);
 
   // Check that the maps haven't changed.
-  Register reg =
-    CheckPrototypes(object, receiver, holder, scratch1, scratch2, scratch3,
-                    name, miss);
+  Register reg = CheckPrototypes(object, receiver, holder, scratch1,
+                                 scratch2, scratch3, name, miss);
 
   // Build AccessorInfo::args_ list on the stack and push property name below
   // the exit frame to make GC aware of them and store pointers to them.
   __ push(receiver);
   __ mov(scratch2, sp);  // scratch2 = AccessorInfo::args_
-  Handle<AccessorInfo> callback_handle(callback);
-  if (heap()->InNewSpace(callback_handle->data())) {
-    __ li(scratch3, callback_handle);
+  if (heap()->InNewSpace(callback->data())) {
+    __ li(scratch3, callback);
     __ lw(scratch3, FieldMemOperand(scratch3, AccessorInfo::kDataOffset));
   } else {
-    __ li(scratch3, Handle<Object>(callback_handle->data()));
+    __ li(scratch3, Handle<Object>(callback->data()));
   }
   __ Push(reg, scratch3, name_reg);
   __ mov(a2, scratch2);  // Saved in case scratch2 == a1.
   __ mov(a1, sp);  // a1 (first argument - see note below) = Handle<String>
 
-  Address getter_address = v8::ToCData<Address>(callback->getter());
-  ApiFunction fun(getter_address);
-
   // NOTE: the O32 abi requires a0 to hold a special pointer when returning a
   // struct from the function (which is currently the case). This means we pass
   // the arguments in a1-a2 instead of a0-a1. TryCallApiFunctionAndReturn
   // will handle setting up a0.
 
   const int kApiStackSpace = 1;
-
   FrameScope frame_scope(masm(), StackFrame::MANUAL);
   __ EnterExitFrame(false, kApiStackSpace);
 
@@ -1476,28 +1178,26 @@
   // a2 (second argument - see note above) = AccessorInfo&
   __ Addu(a2, sp, kPointerSize);
 
-  // Emitting a stub call may try to allocate (if the code is not
-  // already generated).  Do not allow the assembler to perform a
-  // garbage collection but instead return the allocation failure
-  // object.
+  const int kStackUnwindSpace = 4;
+  Address getter_address = v8::ToCData<Address>(callback->getter());
+  ApiFunction fun(getter_address);
   ExternalReference ref =
       ExternalReference(&fun,
                         ExternalReference::DIRECT_GETTER_CALL,
                         masm()->isolate());
-  // 4 args - will be freed later by LeaveExitFrame.
-  return masm()->TryCallApiFunctionAndReturn(ref, 4);
+  __ CallApiFunctionAndReturn(ref, kStackUnwindSpace);
 }
 
 
-void StubCompiler::GenerateLoadInterceptor(JSObject* object,
-                                           JSObject* interceptor_holder,
+void StubCompiler::GenerateLoadInterceptor(Handle<JSObject> object,
+                                           Handle<JSObject> interceptor_holder,
                                            LookupResult* lookup,
                                            Register receiver,
                                            Register name_reg,
                                            Register scratch1,
                                            Register scratch2,
                                            Register scratch3,
-                                           String* name,
+                                           Handle<String> name,
                                            Label* miss) {
   ASSERT(interceptor_holder->HasNamedInterceptor());
   ASSERT(!interceptor_holder->GetNamedInterceptor()->getter()->IsUndefined());
@@ -1513,9 +1213,9 @@
     if (lookup->type() == FIELD) {
       compile_followup_inline = true;
     } else if (lookup->type() == CALLBACKS &&
-        lookup->GetCallbackObject()->IsAccessorInfo() &&
-        AccessorInfo::cast(lookup->GetCallbackObject())->getter() != NULL) {
-      compile_followup_inline = true;
+        lookup->GetCallbackObject()->IsAccessorInfo()) {
+      compile_followup_inline =
+          AccessorInfo::cast(lookup->GetCallbackObject())->getter() != NULL;
     }
   }
 
@@ -1532,14 +1232,12 @@
     // Requires a frame to make GC aware of pushed pointers.
     {
       FrameScope frame_scope(masm(), StackFrame::INTERNAL);
-
       if (lookup->type() == CALLBACKS && !receiver.is(holder_reg)) {
         // CALLBACKS case needs a receiver to be passed into C++ callback.
         __ Push(receiver, holder_reg, name_reg);
       } else {
         __ Push(holder_reg, name_reg);
       }
-
       // Invoke an interceptor.  Note: map checks from receiver to
       // interceptor's holder has been compiled before (see a caller
       // of this method).
@@ -1548,7 +1246,6 @@
                                              holder_reg,
                                              name_reg,
                                              interceptor_holder);
-
       // Check if interceptor provided a value for property.  If it's
       // the case, return immediately.
       Label interceptor_failed;
@@ -1563,16 +1260,14 @@
       if (lookup->type() == CALLBACKS && !receiver.is(holder_reg)) {
         __ pop(receiver);
       }
-
       // Leave the internal frame.
     }
-
     // Check that the maps from interceptor's holder to lookup's holder
     // haven't changed.  And load lookup's holder into |holder| register.
-    if (interceptor_holder != lookup->holder()) {
+    if (*interceptor_holder != lookup->holder()) {
       holder_reg = CheckPrototypes(interceptor_holder,
                                    holder_reg,
-                                   lookup->holder(),
+                                   Handle<JSObject>(lookup->holder()),
                                    scratch1,
                                    scratch2,
                                    scratch3,
@@ -1591,15 +1286,14 @@
       // We found CALLBACKS property in prototype chain of interceptor's
       // holder.
       ASSERT(lookup->type() == CALLBACKS);
-      ASSERT(lookup->GetCallbackObject()->IsAccessorInfo());
-      AccessorInfo* callback = AccessorInfo::cast(lookup->GetCallbackObject());
-      ASSERT(callback != NULL);
+      Handle<AccessorInfo> callback(
+          AccessorInfo::cast(lookup->GetCallbackObject()));
       ASSERT(callback->getter() != NULL);
 
       // Tail call to runtime.
       // Important invariant in CALLBACKS case: the code above must be
       // structured to never clobber |receiver| register.
-      __ li(scratch2, Handle<AccessorInfo>(callback));
+      __ li(scratch2, callback);
       // holder_reg is either receiver or scratch1.
       if (!receiver.is(holder_reg)) {
         ASSERT(scratch1.is(holder_reg));
@@ -1642,9 +1336,9 @@
 }
 
 
-void CallStubCompiler::GenerateGlobalReceiverCheck(JSObject* object,
-                                                   JSObject* holder,
-                                                   String* name,
+void CallStubCompiler::GenerateGlobalReceiverCheck(Handle<JSObject> object,
+                                                   Handle<JSObject> holder,
+                                                   Handle<String> name,
                                                    Label* miss) {
   ASSERT(holder->IsGlobalObject());
 
@@ -1657,7 +1351,7 @@
   // If the object is the holder then we know that it's a global
   // object which can only happen for contextual calls. In this case,
   // the receiver cannot be a smi.
-  if (object != holder) {
+  if (!object.is_identical_to(holder)) {
     __ JumpIfSmi(a0, miss);
   }
 
@@ -1666,15 +1360,16 @@
 }
 
 
-void CallStubCompiler::GenerateLoadFunctionFromCell(JSGlobalPropertyCell* cell,
-                                                    JSFunction* function,
-                                                    Label* miss) {
+void CallStubCompiler::GenerateLoadFunctionFromCell(
+    Handle<JSGlobalPropertyCell> cell,
+    Handle<JSFunction> function,
+    Label* miss) {
   // Get the value from the cell.
-  __ li(a3, Operand(Handle<JSGlobalPropertyCell>(cell)));
+  __ li(a3, Operand(cell));
   __ lw(a1, FieldMemOperand(a3, JSGlobalPropertyCell::kValueOffset));
 
   // Check that the cell contains the same function.
-  if (heap()->InNewSpace(function)) {
+  if (heap()->InNewSpace(*function)) {
     // We can't embed a pointer to a function in new space so we have
     // to verify that the shared function info is unchanged. This has
     // the nice side effect that multiple closures based on the same
@@ -1689,7 +1384,7 @@
     __ lw(t0, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
     __ Branch(miss, ne, t0, Operand(a3));
   } else {
-    __ Branch(miss, ne, a1, Operand(Handle<JSFunction>(function)));
+    __ Branch(miss, ne, a1, Operand(function));
   }
 }
 
@@ -1703,20 +1398,6 @@
 }
 
 
-// TODO(kmillikin): Eliminate this function when the stub cache is fully
-// handlified.
-MaybeObject* CallStubCompiler::TryGenerateMissBranch() {
-  MaybeObject* maybe_obj =
-      isolate()->stub_cache()->TryComputeCallMiss(arguments().immediate(),
-                                                  kind_,
-                                                  extra_state_);
-  Object* obj;
-  if (!maybe_obj->ToObject(&obj)) return maybe_obj;
-  __ Jump(Handle<Code>(Code::cast(obj)), RelocInfo::CODE_TARGET);
-  return obj;
-}
-
-
 Handle<Code> CallStubCompiler::CompileCallField(Handle<JSObject> object,
                                                 Handle<JSObject> holder,
                                                 int index,
@@ -1751,11 +1432,12 @@
 }
 
 
-MaybeObject* CallStubCompiler::CompileArrayPushCall(Object* object,
-                                                    JSObject* holder,
-                                                    JSGlobalPropertyCell* cell,
-                                                    JSFunction* function,
-                                                    String* name) {
+Handle<Code> CallStubCompiler::CompileArrayPushCall(
+    Handle<Object> object,
+    Handle<JSObject> holder,
+    Handle<JSGlobalPropertyCell> cell,
+    Handle<JSFunction> function,
+    Handle<String> name) {
   // ----------- S t a t e -------------
   //  -- a2    : name
   //  -- ra    : return address
@@ -1765,11 +1447,11 @@
   // -----------------------------------
 
   // If object is not an array, bail out to regular call.
-  if (!object->IsJSArray() || cell != NULL) return heap()->undefined_value();
+  if (!object->IsJSArray() || !cell.is_null()) return Handle<Code>::null();
 
   Label miss;
 
-  GenerateNameCheck(Handle<String>(name), &miss);
+  GenerateNameCheck(name, &miss);
 
   Register receiver = a1;
 
@@ -1781,8 +1463,8 @@
   __ JumpIfSmi(receiver, &miss);
 
   // Check that the maps haven't changed.
-  CheckPrototypes(JSObject::cast(object), receiver,
-                  holder, a3, v0, t0, name, &miss);
+  CheckPrototypes(Handle<JSObject>::cast(object), receiver, holder, a3, v0, t0,
+                  name, &miss);
 
   if (argc == 0) {
     // Nothing to do, just return the length.
@@ -1791,10 +1473,8 @@
     __ Ret();
   } else {
     Label call_builtin;
-
     Register elements = a3;
     Register end_elements = t1;
-
     // Get the elements array of the object.
     __ lw(elements, FieldMemOperand(receiver, JSArray::kElementsOffset));
 
@@ -1935,19 +1615,19 @@
 
   // Handle call cache miss.
   __ bind(&miss);
-  MaybeObject* maybe_result = TryGenerateMissBranch();
-  if (maybe_result->IsFailure()) return maybe_result;
+  GenerateMissBranch();
 
   // Return the generated code.
-  return TryGetCode(function);
+  return GetCode(function);
 }
 
 
-MaybeObject* CallStubCompiler::CompileArrayPopCall(Object* object,
-                                                   JSObject* holder,
-                                                   JSGlobalPropertyCell* cell,
-                                                   JSFunction* function,
-                                                   String* name) {
+Handle<Code> CallStubCompiler::CompileArrayPopCall(
+    Handle<Object> object,
+    Handle<JSObject> holder,
+    Handle<JSGlobalPropertyCell> cell,
+    Handle<JSFunction> function,
+    Handle<String> name) {
   // ----------- S t a t e -------------
   //  -- a2    : name
   //  -- ra    : return address
@@ -1957,25 +1637,22 @@
   // -----------------------------------
 
   // If object is not an array, bail out to regular call.
-  if (!object->IsJSArray() || cell != NULL) return heap()->undefined_value();
+  if (!object->IsJSArray() || !cell.is_null()) return Handle<Code>::null();
 
   Label miss, return_undefined, call_builtin;
-
   Register receiver = a1;
   Register elements = a3;
-
-  GenerateNameCheck(Handle<String>(name), &miss);
+  GenerateNameCheck(name, &miss);
 
   // Get the receiver from the stack.
   const int argc = arguments().immediate();
   __ lw(receiver, MemOperand(sp, argc * kPointerSize));
-
   // Check that the receiver isn't a smi.
   __ JumpIfSmi(receiver, &miss);
 
   // Check that the maps haven't changed.
-  CheckPrototypes(JSObject::cast(object),
-                  receiver, holder, elements, t0, v0, name, &miss);
+  CheckPrototypes(Handle<JSObject>::cast(object), receiver, holder, elements,
+                  t0, v0, name, &miss);
 
   // Get the elements array of the object.
   __ lw(elements, FieldMemOperand(receiver, JSArray::kElementsOffset));
@@ -2024,20 +1701,19 @@
 
   // Handle call cache miss.
   __ bind(&miss);
-  MaybeObject* maybe_result = TryGenerateMissBranch();
-  if (maybe_result->IsFailure()) return maybe_result;
+  GenerateMissBranch();
 
   // Return the generated code.
-  return TryGetCode(function);
+  return GetCode(function);
 }
 
 
-MaybeObject* CallStubCompiler::CompileStringCharCodeAtCall(
-    Object* object,
-    JSObject* holder,
-    JSGlobalPropertyCell* cell,
-    JSFunction* function,
-    String* name) {
+Handle<Code> CallStubCompiler::CompileStringCharCodeAtCall(
+    Handle<Object> object,
+    Handle<JSObject> holder,
+    Handle<JSGlobalPropertyCell> cell,
+    Handle<JSFunction> function,
+    Handle<String> name) {
   // ----------- S t a t e -------------
   //  -- a2                     : function name
   //  -- ra                     : return address
@@ -2047,10 +1723,9 @@
   // -----------------------------------
 
   // If object is not a string, bail out to regular call.
-  if (!object->IsString() || cell != NULL) return heap()->undefined_value();
+  if (!object->IsString() || !cell.is_null()) return Handle<Code>::null();
 
   const int argc = arguments().immediate();
-
   Label miss;
   Label name_miss;
   Label index_out_of_range;
@@ -2063,16 +1738,16 @@
     index_out_of_range_label = &miss;
   }
 
-  GenerateNameCheck(Handle<String>(name), &name_miss);
+  GenerateNameCheck(name, &name_miss);
 
   // Check that the maps starting from the prototype haven't changed.
   GenerateDirectLoadGlobalFunctionPrototype(masm(),
                                             Context::STRING_FUNCTION_INDEX,
                                             v0,
                                             &miss);
-  ASSERT(object != holder);
-  CheckPrototypes(JSObject::cast(object->GetPrototype()), v0, holder,
-                  a1, a3, t0, name, &miss);
+  ASSERT(!object.is_identical_to(holder));
+  CheckPrototypes(Handle<JSObject>(JSObject::cast(object->GetPrototype())),
+                  v0, holder, a1, a3, t0, name, &miss);
 
   Register receiver = a1;
   Register index = t1;
@@ -2085,20 +1760,20 @@
     __ LoadRoot(index, Heap::kUndefinedValueRootIndex);
   }
 
-  StringCharCodeAtGenerator char_code_at_generator(receiver,
-                                                   index,
-                                                   scratch,
-                                                   result,
-                                                   &miss,  // When not a string.
-                                                   &miss,  // When not a number.
-                                                   index_out_of_range_label,
-                                                   STRING_INDEX_IS_NUMBER);
-  char_code_at_generator.GenerateFast(masm());
+  StringCharCodeAtGenerator generator(receiver,
+                                      index,
+                                      scratch,
+                                      result,
+                                      &miss,  // When not a string.
+                                      &miss,  // When not a number.
+                                      index_out_of_range_label,
+                                      STRING_INDEX_IS_NUMBER);
+  generator.GenerateFast(masm());
   __ Drop(argc + 1);
   __ Ret();
 
   StubRuntimeCallHelper call_helper;
-  char_code_at_generator.GenerateSlow(masm(), call_helper);
+  generator.GenerateSlow(masm(), call_helper);
 
   if (index_out_of_range.is_linked()) {
     __ bind(&index_out_of_range);
@@ -2109,22 +1784,21 @@
 
   __ bind(&miss);
   // Restore function name in a2.
-  __ li(a2, Handle<String>(name));
+  __ li(a2, name);
   __ bind(&name_miss);
-  MaybeObject* maybe_result = TryGenerateMissBranch();
-  if (maybe_result->IsFailure()) return maybe_result;
+  GenerateMissBranch();
 
   // Return the generated code.
-  return TryGetCode(function);
+  return GetCode(function);
 }
 
 
-MaybeObject* CallStubCompiler::CompileStringCharAtCall(
-    Object* object,
-    JSObject* holder,
-    JSGlobalPropertyCell* cell,
-    JSFunction* function,
-    String* name) {
+Handle<Code> CallStubCompiler::CompileStringCharAtCall(
+    Handle<Object> object,
+    Handle<JSObject> holder,
+    Handle<JSGlobalPropertyCell> cell,
+    Handle<JSFunction> function,
+    Handle<String> name) {
   // ----------- S t a t e -------------
   //  -- a2                     : function name
   //  -- ra                     : return address
@@ -2134,31 +1808,28 @@
   // -----------------------------------
 
   // If object is not a string, bail out to regular call.
-  if (!object->IsString() || cell != NULL) return heap()->undefined_value();
+  if (!object->IsString() || !cell.is_null()) return Handle<Code>::null();
 
   const int argc = arguments().immediate();
-
   Label miss;
   Label name_miss;
   Label index_out_of_range;
   Label* index_out_of_range_label = &index_out_of_range;
-
   if (kind_ == Code::CALL_IC &&
       (CallICBase::StringStubState::decode(extra_state_) ==
        DEFAULT_STRING_STUB)) {
     index_out_of_range_label = &miss;
   }
-
-  GenerateNameCheck(Handle<String>(name), &name_miss);
+  GenerateNameCheck(name, &name_miss);
 
   // Check that the maps starting from the prototype haven't changed.
   GenerateDirectLoadGlobalFunctionPrototype(masm(),
                                             Context::STRING_FUNCTION_INDEX,
                                             v0,
                                             &miss);
-  ASSERT(object != holder);
-  CheckPrototypes(JSObject::cast(object->GetPrototype()), v0, holder,
-                  a1, a3, t0, name, &miss);
+  ASSERT(!object.is_identical_to(holder));
+  CheckPrototypes(Handle<JSObject>(JSObject::cast(object->GetPrototype())),
+                  v0, holder, a1, a3, t0, name, &miss);
 
   Register receiver = v0;
   Register index = t1;
@@ -2172,21 +1843,21 @@
     __ LoadRoot(index, Heap::kUndefinedValueRootIndex);
   }
 
-  StringCharAtGenerator char_at_generator(receiver,
-                                          index,
-                                          scratch1,
-                                          scratch2,
-                                          result,
-                                          &miss,  // When not a string.
-                                          &miss,  // When not a number.
-                                          index_out_of_range_label,
-                                          STRING_INDEX_IS_NUMBER);
-  char_at_generator.GenerateFast(masm());
+  StringCharAtGenerator generator(receiver,
+                                  index,
+                                  scratch1,
+                                  scratch2,
+                                  result,
+                                  &miss,  // When not a string.
+                                  &miss,  // When not a number.
+                                  index_out_of_range_label,
+                                  STRING_INDEX_IS_NUMBER);
+  generator.GenerateFast(masm());
   __ Drop(argc + 1);
   __ Ret();
 
   StubRuntimeCallHelper call_helper;
-  char_at_generator.GenerateSlow(masm(), call_helper);
+  generator.GenerateSlow(masm(), call_helper);
 
   if (index_out_of_range.is_linked()) {
     __ bind(&index_out_of_range);
@@ -2197,22 +1868,21 @@
 
   __ bind(&miss);
   // Restore function name in a2.
-  __ li(a2, Handle<String>(name));
+  __ li(a2, name);
   __ bind(&name_miss);
-  MaybeObject* maybe_result = TryGenerateMissBranch();
-  if (maybe_result->IsFailure()) return maybe_result;
+  GenerateMissBranch();
 
   // Return the generated code.
-  return TryGetCode(function);
+  return GetCode(function);
 }
 
 
-MaybeObject* CallStubCompiler::CompileStringFromCharCodeCall(
-    Object* object,
-    JSObject* holder,
-    JSGlobalPropertyCell* cell,
-    JSFunction* function,
-    String* name) {
+Handle<Code> CallStubCompiler::CompileStringFromCharCodeCall(
+    Handle<Object> object,
+    Handle<JSObject> holder,
+    Handle<JSGlobalPropertyCell> cell,
+    Handle<JSFunction> function,
+    Handle<String> name) {
   // ----------- S t a t e -------------
   //  -- a2                     : function name
   //  -- ra                     : return address
@@ -2225,22 +1895,23 @@
 
   // If the object is not a JSObject or we got an unexpected number of
   // arguments, bail out to the regular call.
-  if (!object->IsJSObject() || argc != 1) return heap()->undefined_value();
+  if (!object->IsJSObject() || argc != 1) return Handle<Code>::null();
 
   Label miss;
-  GenerateNameCheck(Handle<String>(name), &miss);
+  GenerateNameCheck(name, &miss);
 
-  if (cell == NULL) {
+  if (cell.is_null()) {
     __ lw(a1, MemOperand(sp, 1 * kPointerSize));
 
     STATIC_ASSERT(kSmiTag == 0);
     __ JumpIfSmi(a1, &miss);
 
-    CheckPrototypes(JSObject::cast(object), a1, holder, v0, a3, t0, name,
-                    &miss);
+    CheckPrototypes(Handle<JSObject>::cast(object), a1, holder, v0, a3, t0,
+                    name, &miss);
   } else {
-    ASSERT(cell->value() == function);
-    GenerateGlobalReceiverCheck(JSObject::cast(object), holder, name, &miss);
+    ASSERT(cell->value() == *function);
+    GenerateGlobalReceiverCheck(Handle<JSObject>::cast(object), holder, name,
+                                &miss);
     GenerateLoadFunctionFromCell(cell, function, &miss);
   }
 
@@ -2256,13 +1927,13 @@
   // Convert the smi code to uint16.
   __ And(code, code, Operand(Smi::FromInt(0xffff)));
 
-  StringCharFromCodeGenerator char_from_code_generator(code, v0);
-  char_from_code_generator.GenerateFast(masm());
+  StringCharFromCodeGenerator generator(code, v0);
+  generator.GenerateFast(masm());
   __ Drop(argc + 1);
   __ Ret();
 
   StubRuntimeCallHelper call_helper;
-  char_from_code_generator.GenerateSlow(masm(), call_helper);
+  generator.GenerateSlow(masm(), call_helper);
 
   // Tail call the full function. We do not have to patch the receiver
   // because the function makes no use of it.
@@ -2271,19 +1942,19 @@
 
   __ bind(&miss);
   // a2: function name.
-  MaybeObject* maybe_result = TryGenerateMissBranch();
-  if (maybe_result->IsFailure()) return maybe_result;
+  GenerateMissBranch();
 
   // Return the generated code.
-  return (cell == NULL) ? TryGetCode(function) : TryGetCode(NORMAL, name);
+  return cell.is_null() ? GetCode(function) : GetCode(NORMAL, name);
 }
 
 
-MaybeObject* CallStubCompiler::CompileMathFloorCall(Object* object,
-                                                    JSObject* holder,
-                                                    JSGlobalPropertyCell* cell,
-                                                    JSFunction* function,
-                                                    String* name) {
+Handle<Code> CallStubCompiler::CompileMathFloorCall(
+    Handle<Object> object,
+    Handle<JSObject> holder,
+    Handle<JSGlobalPropertyCell> cell,
+    Handle<JSFunction> function,
+    Handle<String> name) {
   // ----------- S t a t e -------------
   //  -- a2                     : function name
   //  -- ra                     : return address
@@ -2292,30 +1963,29 @@
   //  -- sp[argc * 4]           : receiver
   // -----------------------------------
 
-  if (!CpuFeatures::IsSupported(FPU))
-    return heap()->undefined_value();
+  if (!CpuFeatures::IsSupported(FPU)) {
+    return Handle<Code>::null();
+  }
+
   CpuFeatures::Scope scope_fpu(FPU);
-
   const int argc = arguments().immediate();
-
   // If the object is not a JSObject or we got an unexpected number of
   // arguments, bail out to the regular call.
-  if (!object->IsJSObject() || argc != 1) return heap()->undefined_value();
+  if (!object->IsJSObject() || argc != 1) return Handle<Code>::null();
 
   Label miss, slow;
-  GenerateNameCheck(Handle<String>(name), &miss);
+  GenerateNameCheck(name, &miss);
 
-  if (cell == NULL) {
+  if (cell.is_null()) {
     __ lw(a1, MemOperand(sp, 1 * kPointerSize));
-
     STATIC_ASSERT(kSmiTag == 0);
     __ JumpIfSmi(a1, &miss);
-
-    CheckPrototypes(JSObject::cast(object), a1, holder, a0, a3, t0, name,
-                    &miss);
+    CheckPrototypes(Handle<JSObject>::cast(object), a1, holder, a0, a3, t0,
+                    name, &miss);
   } else {
-    ASSERT(cell->value() == function);
-    GenerateGlobalReceiverCheck(JSObject::cast(object), holder, name, &miss);
+    ASSERT(cell->value() == *function);
+    GenerateGlobalReceiverCheck(Handle<JSObject>::cast(object), holder, name,
+                                &miss);
     GenerateLoadFunctionFromCell(cell, function, &miss);
   }
 
@@ -2405,19 +2075,19 @@
 
   __ bind(&miss);
   // a2: function name.
-  MaybeObject* obj = TryGenerateMissBranch();
-  if (obj->IsFailure()) return obj;
+  GenerateMissBranch();
 
   // Return the generated code.
-  return (cell == NULL) ? TryGetCode(function) : TryGetCode(NORMAL, name);
+  return cell.is_null() ? GetCode(function) : GetCode(NORMAL, name);
 }
 
 
-MaybeObject* CallStubCompiler::CompileMathAbsCall(Object* object,
-                                                  JSObject* holder,
-                                                  JSGlobalPropertyCell* cell,
-                                                  JSFunction* function,
-                                                  String* name) {
+Handle<Code> CallStubCompiler::CompileMathAbsCall(
+    Handle<Object> object,
+    Handle<JSObject> holder,
+    Handle<JSGlobalPropertyCell> cell,
+    Handle<JSFunction> function,
+    Handle<String> name) {
   // ----------- S t a t e -------------
   //  -- a2                     : function name
   //  -- ra                     : return address
@@ -2427,25 +2097,23 @@
   // -----------------------------------
 
   const int argc = arguments().immediate();
-
   // If the object is not a JSObject or we got an unexpected number of
   // arguments, bail out to the regular call.
-  if (!object->IsJSObject() || argc != 1) return heap()->undefined_value();
+  if (!object->IsJSObject() || argc != 1) return Handle<Code>::null();
 
   Label miss;
-  GenerateNameCheck(Handle<String>(name), &miss);
 
-  if (cell == NULL) {
+  GenerateNameCheck(name, &miss);
+  if (cell.is_null()) {
     __ lw(a1, MemOperand(sp, 1 * kPointerSize));
-
     STATIC_ASSERT(kSmiTag == 0);
     __ JumpIfSmi(a1, &miss);
-
-    CheckPrototypes(JSObject::cast(object), a1, holder, v0, a3, t0, name,
-                    &miss);
+    CheckPrototypes(Handle<JSObject>::cast(object), a1, holder, v0, a3, t0,
+                    name, &miss);
   } else {
-    ASSERT(cell->value() == function);
-    GenerateGlobalReceiverCheck(JSObject::cast(object), holder, name, &miss);
+    ASSERT(cell->value() == *function);
+    GenerateGlobalReceiverCheck(Handle<JSObject>::cast(object), holder, name,
+                                &miss);
     GenerateLoadFunctionFromCell(cell, function, &miss);
   }
 
@@ -2507,37 +2175,36 @@
 
   __ bind(&miss);
   // a2: function name.
-  MaybeObject* maybe_result = TryGenerateMissBranch();
-  if (maybe_result->IsFailure()) return maybe_result;
+  GenerateMissBranch();
 
   // Return the generated code.
-  return (cell == NULL) ? TryGetCode(function) : TryGetCode(NORMAL, name);
+  return cell.is_null() ? GetCode(function) : GetCode(NORMAL, name);
 }
 
 
-MaybeObject* CallStubCompiler::CompileFastApiCall(
+Handle<Code> CallStubCompiler::CompileFastApiCall(
     const CallOptimization& optimization,
-    Object* object,
-    JSObject* holder,
-    JSGlobalPropertyCell* cell,
-    JSFunction* function,
-    String* name) {
+    Handle<Object> object,
+    Handle<JSObject> holder,
+    Handle<JSGlobalPropertyCell> cell,
+    Handle<JSFunction> function,
+    Handle<String> name) {
 
   Counters* counters = isolate()->counters();
 
   ASSERT(optimization.is_simple_api_call());
   // Bail out if object is a global object as we don't want to
   // repatch it to global receiver.
-  if (object->IsGlobalObject()) return heap()->undefined_value();
-  if (cell != NULL) return heap()->undefined_value();
-  if (!object->IsJSObject()) return heap()->undefined_value();
+  if (object->IsGlobalObject()) return Handle<Code>::null();
+  if (!cell.is_null()) return Handle<Code>::null();
+  if (!object->IsJSObject()) return Handle<Code>::null();
   int depth = optimization.GetPrototypeDepthOfExpectedType(
-            JSObject::cast(object), holder);
-  if (depth == kInvalidProtoDepth) return heap()->undefined_value();
+      Handle<JSObject>::cast(object), holder);
+  if (depth == kInvalidProtoDepth) return Handle<Code>::null();
 
   Label miss, miss_before_stack_reserved;
 
-  GenerateNameCheck(Handle<String>(name), &miss_before_stack_reserved);
+  GenerateNameCheck(name, &miss_before_stack_reserved);
 
   // Get the receiver from the stack.
   const int argc = arguments().immediate();
@@ -2552,45 +2219,42 @@
   ReserveSpaceForFastApiCall(masm(), a0);
 
   // Check that the maps haven't changed and find a Holder as a side effect.
-  CheckPrototypes(JSObject::cast(object), a1, holder, a0, a3, t0, name,
+  CheckPrototypes(Handle<JSObject>::cast(object), a1, holder, a0, a3, t0, name,
                   depth, &miss);
 
-  MaybeObject* result = GenerateFastApiDirectCall(masm(), optimization, argc);
-  if (result->IsFailure()) return result;
+  GenerateFastApiDirectCall(masm(), optimization, argc);
 
   __ bind(&miss);
   FreeSpaceForFastApiCall(masm());
 
   __ bind(&miss_before_stack_reserved);
-  MaybeObject* maybe_result = TryGenerateMissBranch();
-  if (maybe_result->IsFailure()) return maybe_result;
+  GenerateMissBranch();
 
   // Return the generated code.
-  return TryGetCode(function);
+  return GetCode(function);
 }
 
 
-MaybeObject* CallStubCompiler::CompileCallConstant(Object* object,
-                                                   JSObject* holder,
-                                                   JSFunction* function,
-                                                   String* name,
+Handle<Code> CallStubCompiler::CompileCallConstant(Handle<Object> object,
+                                                   Handle<JSObject> holder,
+                                                   Handle<JSFunction> function,
+                                                   Handle<String> name,
                                                    CheckType check) {
   // ----------- S t a t e -------------
   //  -- a2    : name
   //  -- ra    : return address
   // -----------------------------------
   if (HasCustomCallGenerator(function)) {
-    MaybeObject* maybe_result = CompileCustomCall(
-        object, holder, NULL, function, name);
-    Object* result;
-    if (!maybe_result->ToObject(&result)) return maybe_result;
-    // Undefined means bail out to regular compiler.
-    if (!result->IsUndefined()) return result;
+    Handle<Code> code = CompileCustomCall(object, holder,
+                                          Handle<JSGlobalPropertyCell>::null(),
+                                          function, name);
+    // A null handle means bail out to the regular compiler code below.
+    if (!code.is_null()) return code;
   }
 
   Label miss;
 
-  GenerateNameCheck(Handle<String>(name), &miss);
+  GenerateNameCheck(name, &miss);
 
   // Get the receiver from the stack.
   const int argc = arguments().immediate();
@@ -2598,23 +2262,20 @@
 
   // Check that the receiver isn't a smi.
   if (check != NUMBER_CHECK) {
-    __ And(t1, a1, Operand(kSmiTagMask));
-    __ Branch(&miss, eq, t1, Operand(zero_reg));
+    __ JumpIfSmi(a1, &miss);
   }
 
   // Make sure that it's okay not to patch the on stack receiver
   // unless we're doing a receiver map check.
   ASSERT(!object->IsGlobalObject() || check == RECEIVER_MAP_CHECK);
-
-  SharedFunctionInfo* function_info = function->shared();
   switch (check) {
     case RECEIVER_MAP_CHECK:
       __ IncrementCounter(masm()->isolate()->counters()->call_const(),
           1, a0, a3);
 
       // Check that the maps haven't changed.
-      CheckPrototypes(JSObject::cast(object), a1, holder, a0, a3, t0, name,
-                      &miss);
+      CheckPrototypes(Handle<JSObject>::cast(object), a1, holder, a0, a3, t0,
+                      name, &miss);
 
       // Patch the receiver on the stack with the global proxy if
       // necessary.
@@ -2625,50 +2286,46 @@
       break;
 
     case STRING_CHECK:
-      if (!function->IsBuiltin() && !function_info->strict_mode()) {
-        // Calling non-strict non-builtins with a value as the receiver
-        // requires boxing.
-        __ jmp(&miss);
-      } else {
+      if (function->IsBuiltin() || function->shared()->strict_mode()) {
         // Check that the object is a two-byte string or a symbol.
         __ GetObjectType(a1, a3, a3);
         __ Branch(&miss, Ugreater_equal, a3, Operand(FIRST_NONSTRING_TYPE));
         // Check that the maps starting from the prototype haven't changed.
         GenerateDirectLoadGlobalFunctionPrototype(
             masm(), Context::STRING_FUNCTION_INDEX, a0, &miss);
-        CheckPrototypes(JSObject::cast(object->GetPrototype()), a0, holder, a3,
-                        a1, t0, name, &miss);
-      }
-      break;
-
-    case NUMBER_CHECK: {
-      if (!function->IsBuiltin() && !function_info->strict_mode()) {
+        CheckPrototypes(
+            Handle<JSObject>(JSObject::cast(object->GetPrototype())),
+            a0, holder, a3, a1, t0, name, &miss);
+      } else {
         // Calling non-strict non-builtins with a value as the receiver
         // requires boxing.
         __ jmp(&miss);
-      } else {
+      }
+      break;
+
+    case NUMBER_CHECK:
+      if (function->IsBuiltin() || function->shared()->strict_mode()) {
       Label fast;
         // Check that the object is a smi or a heap number.
-        __ And(t1, a1, Operand(kSmiTagMask));
-        __ Branch(&fast, eq, t1, Operand(zero_reg));
+        __ JumpIfSmi(a1, &fast);
         __ GetObjectType(a1, a0, a0);
         __ Branch(&miss, ne, a0, Operand(HEAP_NUMBER_TYPE));
         __ bind(&fast);
         // Check that the maps starting from the prototype haven't changed.
         GenerateDirectLoadGlobalFunctionPrototype(
             masm(), Context::NUMBER_FUNCTION_INDEX, a0, &miss);
-        CheckPrototypes(JSObject::cast(object->GetPrototype()), a0, holder, a3,
-                        a1, t0, name, &miss);
-      }
-      break;
-    }
-
-    case BOOLEAN_CHECK: {
-      if (!function->IsBuiltin() && !function_info->strict_mode()) {
+        CheckPrototypes(
+            Handle<JSObject>(JSObject::cast(object->GetPrototype())),
+            a0, holder, a3, a1, t0, name, &miss);
+      } else {
         // Calling non-strict non-builtins with a value as the receiver
         // requires boxing.
         __ jmp(&miss);
-      } else {
+      }
+      break;
+
+    case BOOLEAN_CHECK:
+      if (function->IsBuiltin() || function->shared()->strict_mode()) {
         Label fast;
         // Check that the object is a boolean.
         __ LoadRoot(t0, Heap::kTrueValueRootIndex);
@@ -2679,16 +2336,17 @@
         // Check that the maps starting from the prototype haven't changed.
         GenerateDirectLoadGlobalFunctionPrototype(
             masm(), Context::BOOLEAN_FUNCTION_INDEX, a0, &miss);
-        CheckPrototypes(JSObject::cast(object->GetPrototype()), a0, holder, a3,
-                        a1, t0, name, &miss);
+        CheckPrototypes(
+            Handle<JSObject>(JSObject::cast(object->GetPrototype())),
+            a0, holder, a3, a1, t0, name, &miss);
+      } else {
+        // Calling non-strict non-builtins with a value as the receiver
+        // requires boxing.
+        __ jmp(&miss);
       }
       break;
     }
 
-    default:
-      UNREACHABLE();
-  }
-
   CallKind call_kind = CallICBase::Contextual::decode(extra_state_)
       ? CALL_AS_FUNCTION
       : CALL_AS_METHOD;
@@ -2697,17 +2355,16 @@
   // Handle call cache miss.
   __ bind(&miss);
 
-  MaybeObject* maybe_result = TryGenerateMissBranch();
-  if (maybe_result->IsFailure()) return maybe_result;
+  GenerateMissBranch();
 
   // Return the generated code.
-  return TryGetCode(function);
+  return GetCode(function);
 }
 
 
-MaybeObject* CallStubCompiler::CompileCallInterceptor(JSObject* object,
-                                                      JSObject* holder,
-                                                      String* name) {
+Handle<Code> CallStubCompiler::CompileCallInterceptor(Handle<JSObject> object,
+                                                      Handle<JSObject> holder,
+                                                      Handle<String> name) {
   // ----------- S t a t e -------------
   //  -- a2    : name
   //  -- ra    : return address
@@ -2715,11 +2372,10 @@
 
   Label miss;
 
-  GenerateNameCheck(Handle<String>(name), &miss);
+  GenerateNameCheck(name, &miss);
 
   // Get the number of arguments.
   const int argc = arguments().immediate();
-
   LookupResult lookup(isolate());
   LookupPostInterceptor(holder, name, &lookup);
 
@@ -2727,64 +2383,47 @@
   __ lw(a1, MemOperand(sp, argc * kPointerSize));
 
   CallInterceptorCompiler compiler(this, arguments(), a2, extra_state_);
-  MaybeObject* result = compiler.Compile(masm(),
-                                         object,
-                                         holder,
-                                         name,
-                                         &lookup,
-                                         a1,
-                                         a3,
-                                         t0,
-                                         a0,
-                                         &miss);
-  if (result->IsFailure()) {
-    return result;
-  }
+  compiler.Compile(masm(), object, holder, name, &lookup, a1, a3, t0, a0,
+                   &miss);
 
   // Move returned value, the function to call, to a1.
   __ mov(a1, v0);
   // Restore receiver.
   __ lw(a0, MemOperand(sp, argc * kPointerSize));
 
-  GenerateCallFunction(masm(), Handle<Object>(object), arguments(), &miss,
-                       extra_state_);
+  GenerateCallFunction(masm(), object, arguments(), &miss, extra_state_);
 
   // Handle call cache miss.
   __ bind(&miss);
-  MaybeObject* maybe_result = TryGenerateMissBranch();
-  if (maybe_result->IsFailure()) return maybe_result;
+  GenerateMissBranch();
 
   // Return the generated code.
-  return TryGetCode(INTERCEPTOR, name);
+  return GetCode(INTERCEPTOR, name);
 }
 
 
-MaybeObject* CallStubCompiler::CompileCallGlobal(JSObject* object,
-                                                 GlobalObject* holder,
-                                                 JSGlobalPropertyCell* cell,
-                                                 JSFunction* function,
-                                                 String* name) {
+Handle<Code> CallStubCompiler::CompileCallGlobal(
+    Handle<JSObject> object,
+    Handle<GlobalObject> holder,
+    Handle<JSGlobalPropertyCell> cell,
+    Handle<JSFunction> function,
+    Handle<String> name) {
   // ----------- S t a t e -------------
   //  -- a2    : name
   //  -- ra    : return address
   // -----------------------------------
 
   if (HasCustomCallGenerator(function)) {
-    MaybeObject* maybe_result = CompileCustomCall(
-        object, holder, cell, function, name);
-    Object* result;
-    if (!maybe_result->ToObject(&result)) return maybe_result;
-    // Undefined means bail out to regular compiler.
-    if (!result->IsUndefined()) return result;
+    Handle<Code> code = CompileCustomCall(object, holder, cell, function, name);
+    // A null handle means bail out to the regular compiler code below.
+    if (!code.is_null()) return code;
   }
 
   Label miss;
-
-  GenerateNameCheck(Handle<String>(name), &miss);
+  GenerateNameCheck(name, &miss);
 
   // Get the number of arguments.
   const int argc = arguments().immediate();
-
   GenerateGlobalReceiverCheck(object, holder, name, &miss);
   GenerateLoadFunctionFromCell(cell, function, &miss);
 
@@ -2801,7 +2440,6 @@
   // Jump to the cached code (tail call).
   Counters* counters = masm()->isolate()->counters();
   __ IncrementCounter(counters->call_global_inline(), 1, a3, t0);
-  Handle<Code> code(function->code());
   ParameterCount expected(function->shared()->formal_parameter_count());
   CallKind call_kind = CallICBase::Contextual::decode(extra_state_)
       ? CALL_AS_FUNCTION
@@ -2816,11 +2454,10 @@
   // Handle call cache miss.
   __ bind(&miss);
   __ IncrementCounter(counters->call_global_inline_miss(), 1, a1, a3);
-  MaybeObject* maybe_result = TryGenerateMissBranch();
-  if (maybe_result->IsFailure()) return maybe_result;
+  GenerateMissBranch();
 
   // Return the generated code.
-  return TryGetCode(NORMAL, name);
+  return GetCode(NORMAL, name);
 }
 
 
@@ -3053,29 +2690,24 @@
 }
 
 
-MaybeObject* LoadStubCompiler::CompileLoadCallback(String* name,
-                                                   JSObject* object,
-                                                   JSObject* holder,
-                                                   AccessorInfo* callback) {
+Handle<Code> LoadStubCompiler::CompileLoadCallback(
+    Handle<String> name,
+    Handle<JSObject> object,
+    Handle<JSObject> holder,
+    Handle<AccessorInfo> callback) {
   // ----------- S t a t e -------------
   //  -- a0    : receiver
   //  -- a2    : name
   //  -- ra    : return address
   // -----------------------------------
   Label miss;
-
-  MaybeObject* result = GenerateLoadCallback(object, holder, a0, a2, a3, a1, t0,
-                                             callback, name, &miss);
-  if (result->IsFailure()) {
-    miss.Unuse();
-    return result;
-  }
-
+  GenerateLoadCallback(object, holder, a0, a2, a3, a1, t0, callback, name,
+                       &miss);
   __ bind(&miss);
   GenerateLoadMiss(masm(), Code::LOAD_IC);
 
   // Return the generated code.
-  return TryGetCode(CALLBACKS, name);
+  return GetCode(CALLBACKS, name);
 }
 
 
@@ -3099,9 +2731,9 @@
 }
 
 
-MaybeObject* LoadStubCompiler::CompileLoadInterceptor(JSObject* object,
-                                                      JSObject* holder,
-                                                      String* name) {
+Handle<Code> LoadStubCompiler::CompileLoadInterceptor(Handle<JSObject> object,
+                                                      Handle<JSObject> holder,
+                                                      Handle<String> name) {
   // ----------- S t a t e -------------
   //  -- a0    : receiver
   //  -- a2    : name
@@ -3112,21 +2744,13 @@
 
   LookupResult lookup(isolate());
   LookupPostInterceptor(holder, name, &lookup);
-  GenerateLoadInterceptor(object,
-                          holder,
-                          &lookup,
-                          a0,
-                          a2,
-                          a3,
-                          a1,
-                          t0,
-                          name,
+  GenerateLoadInterceptor(object, holder, &lookup, a0, a2, a3, a1, t0, name,
                           &miss);
   __ bind(&miss);
   GenerateLoadMiss(masm(), Code::LOAD_IC);
 
   // Return the generated code.
-  return TryGetCode(INTERCEPTOR, name);
+  return GetCode(INTERCEPTOR, name);
 }
 
 
@@ -3147,8 +2771,7 @@
   // object which can only happen for contextual calls. In this case,
   // the receiver cannot be a smi.
   if (!object.is_identical_to(holder)) {
-    __ And(t0, a0, Operand(kSmiTagMask));
-    __ Branch(&miss, eq, t0, Operand(zero_reg));
+    __ JumpIfSmi(a0, &miss);
   }
 
   // Check that the map of the global has not changed.
@@ -3200,11 +2823,11 @@
 }
 
 
-MaybeObject* KeyedLoadStubCompiler::CompileLoadCallback(
-    String* name,
-    JSObject* receiver,
-    JSObject* holder,
-    AccessorInfo* callback) {
+Handle<Code> KeyedLoadStubCompiler::CompileLoadCallback(
+    Handle<String> name,
+    Handle<JSObject> receiver,
+    Handle<JSObject> holder,
+    Handle<AccessorInfo> callback) {
   // ----------- S t a t e -------------
   //  -- ra    : return address
   //  -- a0    : key
@@ -3213,19 +2836,14 @@
   Label miss;
 
   // Check the key is the cached one.
-  __ Branch(&miss, ne, a0, Operand(Handle<String>(name)));
+  __ Branch(&miss, ne, a0, Operand(name));
 
-  MaybeObject* result = GenerateLoadCallback(receiver, holder, a1, a0, a2, a3,
-                                             t0, callback, name, &miss);
-  if (result->IsFailure()) {
-    miss.Unuse();
-    return result;
-  }
-
+  GenerateLoadCallback(receiver, holder, a1, a0, a2, a3, t0, callback, name,
+                       &miss);
   __ bind(&miss);
   GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
 
-  return TryGetCode(CALLBACKS, name);
+  return GetCode(CALLBACKS, name);
 }
 
 
@@ -3253,9 +2871,10 @@
 }
 
 
-MaybeObject* KeyedLoadStubCompiler::CompileLoadInterceptor(JSObject* receiver,
-                                                           JSObject* holder,
-                                                           String* name) {
+Handle<Code> KeyedLoadStubCompiler::CompileLoadInterceptor(
+    Handle<JSObject> receiver,
+    Handle<JSObject> holder,
+    Handle<String> name) {
   // ----------- S t a t e -------------
   //  -- ra    : return address
   //  -- a0    : key
@@ -3264,24 +2883,16 @@
   Label miss;
 
   // Check the key is the cached one.
-  __ Branch(&miss, ne, a0, Operand(Handle<String>(name)));
+  __ Branch(&miss, ne, a0, Operand(name));
 
   LookupResult lookup(isolate());
   LookupPostInterceptor(holder, name, &lookup);
-  GenerateLoadInterceptor(receiver,
-                          holder,
-                          &lookup,
-                          a1,
-                          a0,
-                          a2,
-                          a3,
-                          t0,
-                          name,
+  GenerateLoadInterceptor(receiver, holder, &lookup, a1, a0, a2, a3, t0, name,
                           &miss);
   __ bind(&miss);
   GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
 
-  return TryGetCode(INTERCEPTOR, name);
+  return GetCode(INTERCEPTOR, name);
 }
 
 
@@ -3496,7 +3107,8 @@
 }
 
 
-MaybeObject* ConstructStubCompiler::CompileConstructStub(JSFunction* function) {
+Handle<Code> ConstructStubCompiler::CompileConstructStub(
+    Handle<JSFunction> function) {
   // a0    : argc
   // a1    : constructor
   // ra    : return address
@@ -3519,8 +3131,7 @@
   // a1: constructor function
   // t7: undefined
   __ lw(a2, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset));
-  __ And(t0, a2, Operand(kSmiTagMask));
-  __ Branch(&generic_stub_call, eq, t0, Operand(zero_reg));
+  __ JumpIfSmi(a2, &generic_stub_call);
   __ GetObjectType(a2, a3, t0);
   __ Branch(&generic_stub_call, ne, t0, Operand(MAP_TYPE));
 
@@ -3541,12 +3152,7 @@
   // a2: initial map
   // t7: undefined
   __ lbu(a3, FieldMemOperand(a2, Map::kInstanceSizeOffset));
-  __ AllocateInNewSpace(a3,
-                        t4,
-                        t5,
-                        t6,
-                        &generic_stub_call,
-                        SIZE_IN_WORDS);
+  __ AllocateInNewSpace(a3, t4, t5, t6, &generic_stub_call, SIZE_IN_WORDS);
 
   // Allocated the JSObject, now initialize the fields. Map is set to initial
   // map and properties and elements are set to empty fixed array.
@@ -3581,7 +3187,7 @@
   // t7: undefined
   // Fill the initialized properties with a constant value or a passed argument
   // depending on the this.x = ...; assignment in the function.
-  SharedFunctionInfo* shared = function->shared();
+  Handle<SharedFunctionInfo> shared(function->shared());
   for (int i = 0; i < shared->this_property_assignments_count(); i++) {
     if (shared->IsThisPropertyAssignmentArgument(i)) {
       Label not_passed, next;
@@ -4053,9 +3659,9 @@
   __ TailCallRuntime(Runtime::kKeyedGetProperty, 2, 1);
 
   __ bind(&miss_force_generic);
-  Code* stub = masm->isolate()->builtins()->builtin(
-      Builtins::kKeyedLoadIC_MissForceGeneric);
-  __ Jump(Handle<Code>(stub), RelocInfo::CODE_TARGET);
+  Handle<Code> stub =
+      masm->isolate()->builtins()->KeyedLoadIC_MissForceGeneric();
+  __ Jump(stub, RelocInfo::CODE_TARGET);
 }
 
 
@@ -4483,9 +4089,9 @@
   __ Ret();
 
   __ bind(&miss_force_generic);
-  Code* stub = masm->isolate()->builtins()->builtin(
-      Builtins::kKeyedLoadIC_MissForceGeneric);
-  __ Jump(Handle<Code>(stub), RelocInfo::CODE_TARGET);
+  Handle<Code> stub =
+      masm->isolate()->builtins()->KeyedLoadIC_MissForceGeneric();
+  __ Jump(stub, RelocInfo::CODE_TARGET);
 }
 
 
diff --git a/src/objects-inl.h b/src/objects-inl.h
index dc3aa46..55a3b2f 100644
--- a/src/objects-inl.h
+++ b/src/objects-inl.h
@@ -563,10 +563,10 @@
 }
 
 
-bool Object::IsSerializedScopeInfo() {
+bool Object::IsScopeInfo() {
   return Object::IsHeapObject() &&
       HeapObject::cast(this)->map() ==
-      HeapObject::cast(this)->GetHeap()->serialized_scope_info_map();
+      HeapObject::cast(this)->GetHeap()->scope_info_map();
 }
 
 
@@ -1855,7 +1855,7 @@
 AccessorDescriptor* DescriptorArray::GetCallbacks(int descriptor_number) {
   ASSERT(GetType(descriptor_number) == CALLBACKS);
   Foreign* p = Foreign::cast(GetCallbacksObject(descriptor_number));
-  return reinterpret_cast<AccessorDescriptor*>(p->address());
+  return reinterpret_cast<AccessorDescriptor*>(p->foreign_address());
 }
 
 
@@ -1975,7 +1975,7 @@
   while (true) {
     Object* element = KeyAt(entry);
     if (element == isolate->heap()->undefined_value()) break;  // Empty entry.
-    if (element != isolate->heap()->null_value() &&
+    if (element != isolate->heap()->the_hole_value() &&
         Shape::IsMatch(key, element)) return entry;
     entry = NextProbe(entry, count++, capacity);
   }
@@ -2015,6 +2015,7 @@
 CAST_ACCESSOR(SymbolTable)
 CAST_ACCESSOR(JSFunctionResultCache)
 CAST_ACCESSOR(NormalizedMapCache)
+CAST_ACCESSOR(ScopeInfo)
 CAST_ACCESSOR(CompilationCacheTable)
 CAST_ACCESSOR(CodeCacheHashTable)
 CAST_ACCESSOR(PolymorphicCodeCacheHashTable)
@@ -3545,13 +3546,12 @@
 }
 
 
-SerializedScopeInfo* SharedFunctionInfo::scope_info() {
-  return reinterpret_cast<SerializedScopeInfo*>(
-      READ_FIELD(this, kScopeInfoOffset));
+ScopeInfo* SharedFunctionInfo::scope_info() {
+  return reinterpret_cast<ScopeInfo*>(READ_FIELD(this, kScopeInfoOffset));
 }
 
 
-void SharedFunctionInfo::set_scope_info(SerializedScopeInfo* value,
+void SharedFunctionInfo::set_scope_info(ScopeInfo* value,
                                         WriteBarrierMode mode) {
   WRITE_FIELD(this, kScopeInfoOffset, reinterpret_cast<Object*>(value));
   CONDITIONAL_WRITE_BARRIER(GetHeap(),
@@ -3847,13 +3847,13 @@
 }
 
 
-Address Foreign::address() {
-  return AddressFrom<Address>(READ_INTPTR_FIELD(this, kAddressOffset));
+Address Foreign::foreign_address() {
+  return AddressFrom<Address>(READ_INTPTR_FIELD(this, kForeignAddressOffset));
 }
 
 
-void Foreign::set_address(Address value) {
-  WRITE_INTPTR_FIELD(this, kAddressOffset, OffsetFrom(value));
+void Foreign::set_foreign_address(Address value) {
+  WRITE_INTPTR_FIELD(this, kForeignAddressOffset, OffsetFrom(value));
 }
 
 
@@ -4434,7 +4434,6 @@
 
 template <int entrysize>
 uint32_t ObjectHashTableShape<entrysize>::Hash(Object* key) {
-  ASSERT(!key->IsUndefined() && !key->IsNull());
   MaybeObject* maybe_hash = key->GetHash(OMIT_CREATION);
   return Smi::cast(maybe_hash->ToObjectChecked())->value();
 }
@@ -4443,7 +4442,6 @@
 template <int entrysize>
 uint32_t ObjectHashTableShape<entrysize>::HashForObject(Object* key,
                                                         Object* other) {
-  ASSERT(!other->IsUndefined() && !other->IsNull());
   MaybeObject* maybe_hash = other->GetHash(OMIT_CREATION);
   return Smi::cast(maybe_hash->ToObjectChecked())->value();
 }
@@ -4455,11 +4453,6 @@
 }
 
 
-void ObjectHashTable::RemoveEntry(int entry) {
-  RemoveEntry(entry, GetHeap());
-}
-
-
 void Map::ClearCodeCache(Heap* heap) {
   // No write barrier is needed since empty_fixed_array is not in new space.
   // Please note this function is used during marking:
@@ -4536,14 +4529,14 @@
 
 void Foreign::ForeignIterateBody(ObjectVisitor* v) {
   v->VisitExternalReference(
-      reinterpret_cast<Address *>(FIELD_ADDR(this, kAddressOffset)));
+      reinterpret_cast<Address*>(FIELD_ADDR(this, kForeignAddressOffset)));
 }
 
 
 template<typename StaticVisitor>
 void Foreign::ForeignIterateBody() {
   StaticVisitor::VisitExternalReference(
-      reinterpret_cast<Address *>(FIELD_ADDR(this, kAddressOffset)));
+      reinterpret_cast<Address*>(FIELD_ADDR(this, kForeignAddressOffset)));
 }
 
 
diff --git a/src/objects-printer.cc b/src/objects-printer.cc
index b788504..1ca97de 100644
--- a/src/objects-printer.cc
+++ b/src/objects-printer.cc
@@ -759,7 +759,7 @@
 
 
 void Foreign::ForeignPrint(FILE* out) {
-  PrintF(out, "foreign address : %p", address());
+  PrintF(out, "foreign address : %p", foreign_address());
 }
 
 
diff --git a/src/objects.cc b/src/objects.cc
index 9a87ac5..3a18184 100644
--- a/src/objects.cc
+++ b/src/objects.cc
@@ -220,7 +220,7 @@
   if (structure->IsForeign()) {
     AccessorDescriptor* callback =
         reinterpret_cast<AccessorDescriptor*>(
-            Foreign::cast(structure)->address());
+            Foreign::cast(structure)->foreign_address());
     MaybeObject* value = (callback->getter)(receiver, callback->data);
     RETURN_IF_SCHEDULED_EXCEPTION(isolate);
     return value;
@@ -2006,7 +2006,7 @@
   if (structure->IsForeign()) {
     AccessorDescriptor* callback =
         reinterpret_cast<AccessorDescriptor*>(
-            Foreign::cast(structure)->address());
+            Foreign::cast(structure)->foreign_address());
     MaybeObject* obj = (callback->setter)(this,  value, callback->data);
     RETURN_IF_SCHEDULED_EXCEPTION(isolate);
     if (obj->IsFailure()) return obj;
@@ -2148,47 +2148,35 @@
     bool* found,
     StrictModeFlag strict_mode) {
   Heap* heap = GetHeap();
-  LookupResult result(heap->isolate());
-  LookupCallbackSetterInPrototypes(name, &result);
-  if (result.IsFound()) {
+  // We could not find a local property so let's check whether there is an
+  // accessor that wants to handle the property.
+  LookupResult accessor_result(heap->isolate());
+  LookupCallbackSetterInPrototypes(name, &accessor_result);
+  if (accessor_result.IsFound()) {
     *found = true;
-    if (result.type() == CALLBACKS) {
-      return SetPropertyWithCallback(result.GetCallbackObject(),
+    if (accessor_result.type() == CALLBACKS) {
+      return SetPropertyWithCallback(accessor_result.GetCallbackObject(),
                                      name,
                                      value,
-                                     result.holder(),
+                                     accessor_result.holder(),
                                      strict_mode);
-    } else if (result.type() == HANDLER) {
-      // We could not find a local property so let's check whether there is an
-      // accessor that wants to handle the property.
-      LookupResult accessor_result(heap->isolate());
-      LookupCallbackSetterInPrototypes(name, &accessor_result);
-      if (accessor_result.IsFound()) {
-        if (accessor_result.type() == CALLBACKS) {
-          return SetPropertyWithCallback(accessor_result.GetCallbackObject(),
-                                         name,
-                                         value,
-                                         accessor_result.holder(),
-                                         strict_mode);
-        } else if (accessor_result.type() == HANDLER) {
-          // There is a proxy in the prototype chain. Invoke its
-          // getPropertyDescriptor trap.
-          bool found = false;
-          // SetPropertyWithHandlerIfDefiningSetter can cause GC,
-          // make sure to use the handlified references after calling
-          // the function.
-          Handle<JSObject> self(this);
-          Handle<String> hname(name);
-          Handle<Object> hvalue(value);
-          MaybeObject* result =
-              accessor_result.proxy()->SetPropertyWithHandlerIfDefiningSetter(
-                  name, value, attributes, strict_mode, &found);
-          if (found) return result;
-          // The proxy does not define the property as an accessor.
-          // Consequently, it has no effect on setting the receiver.
-          return self->AddProperty(*hname, *hvalue, attributes, strict_mode);
-        }
-      }
+    } else if (accessor_result.type() == HANDLER) {
+      // There is a proxy in the prototype chain. Invoke its
+      // getPropertyDescriptor trap.
+      bool found = false;
+      // SetPropertyWithHandlerIfDefiningSetter can cause GC,
+      // make sure to use the handlified references after calling
+      // the function.
+      Handle<JSObject> self(this);
+      Handle<String> hname(name);
+      Handle<Object> hvalue(value);
+      MaybeObject* result =
+          accessor_result.proxy()->SetPropertyWithHandlerIfDefiningSetter(
+              name, value, attributes, strict_mode, &found);
+      if (found) return result;
+      // The proxy does not define the property as an accessor.
+      // Consequently, it has no effect on setting the receiver.
+      return self->AddProperty(*hname, *hvalue, attributes, strict_mode);
     }
   }
   *found = false;
@@ -5190,8 +5178,8 @@
 void CodeCacheHashTable::RemoveByIndex(int index) {
   ASSERT(index >= 0);
   Heap* heap = GetHeap();
-  set(EntryToIndex(index), heap->null_value());
-  set(EntryToIndex(index) + 1, heap->null_value());
+  set(EntryToIndex(index), heap->the_hole_value());
+  set(EntryToIndex(index) + 1, heap->the_hole_value());
   ElementRemoved();
 }
 
@@ -8755,7 +8743,12 @@
 MaybeObject* JSObject::EnsureCanContainElements(Arguments* args,
                                                 uint32_t first_arg,
                                                 uint32_t arg_count) {
-  return EnsureCanContainElements(args->arguments() - first_arg, arg_count);
+  // Elements in |Arguments| are ordered backwards (because they're on the
+  // stack), but the method that's called here iterates over them in forward
+  // direction.
+  return EnsureCanContainElements(
+      args->arguments() - first_arg - (arg_count - 1),
+      arg_count);
 }
 
 
@@ -10524,8 +10517,10 @@
     FixedArray* pair = FixedArray::cast(other);
     SharedFunctionInfo* shared = SharedFunctionInfo::cast(pair->get(0));
     if (shared != shared_) return false;
-    StrictModeFlag strict_mode = static_cast<StrictModeFlag>(
-        Smi::cast(pair->get(2))->value());
+    int strict_unchecked = Smi::cast(pair->get(2))->value();
+    ASSERT(strict_unchecked == kStrictMode ||
+           strict_unchecked == kNonStrictMode);
+    StrictModeFlag strict_mode = static_cast<StrictModeFlag>(strict_unchecked);
     if (strict_mode != strict_mode_) return false;
     String* source = String::cast(pair->get(1));
     return source->Equals(source_);
@@ -10557,8 +10552,10 @@
     FixedArray* pair = FixedArray::cast(obj);
     SharedFunctionInfo* shared = SharedFunctionInfo::cast(pair->get(0));
     String* source = String::cast(pair->get(1));
-    StrictModeFlag strict_mode = static_cast<StrictModeFlag>(
-        Smi::cast(pair->get(2))->value());
+    int strict_unchecked = Smi::cast(pair->get(2))->value();
+    ASSERT(strict_unchecked == kStrictMode ||
+           strict_unchecked == kNonStrictMode);
+    StrictModeFlag strict_mode = static_cast<StrictModeFlag>(strict_unchecked);
     return StringSharedHashHelper(source, shared, strict_mode);
   }
 
@@ -10905,14 +10902,14 @@
     if (element->IsUndefined()) break;  // Empty entry.
     if (key == element) return entry;
     if (!element->IsSymbol() &&
-        !element->IsNull() &&
+        !element->IsTheHole() &&
         String::cast(element)->Equals(key)) {
       // Replace a non-symbol key by the equivalent symbol for faster further
       // lookups.
       set(index, key);
       return entry;
     }
-    ASSERT(element->IsNull() || !String::cast(element)->Equals(key));
+    ASSERT(element->IsTheHole() || !String::cast(element)->Equals(key));
     entry = NextProbe(entry, count++, capacity);
   }
   return kNotFound;
@@ -11016,7 +11013,7 @@
   // EnsureCapacity will guarantee the hash table is never full.
   while (true) {
     Object* element = KeyAt(entry);
-    if (element->IsUndefined() || element->IsNull()) break;
+    if (element->IsUndefined() || element->IsTheHole()) break;
     entry = NextProbe(entry, count++, capacity);
   }
   return entry;
@@ -11815,13 +11812,13 @@
 
 
 void CompilationCacheTable::Remove(Object* value) {
-  Object* null_value = GetHeap()->null_value();
+  Object* the_hole_value = GetHeap()->the_hole_value();
   for (int entry = 0, size = Capacity(); entry < size; entry++) {
     int entry_index = EntryToIndex(entry);
     int value_index = entry_index + 1;
     if (get(value_index) == value) {
-      NoWriteBarrierSet(this, entry_index, null_value);
-      NoWriteBarrierSet(this, value_index, null_value);
+      NoWriteBarrierSet(this, entry_index, the_hole_value);
+      NoWriteBarrierSet(this, value_index, the_hole_value);
       ElementRemoved();
     }
   }
@@ -11980,14 +11977,14 @@
 
   Heap* heap = GetHeap();
   int removed_entries = 0;
-  Object* sentinel = heap->null_value();
+  Object* the_hole_value = heap->the_hole_value();
   int capacity = Capacity();
   for (int i = 0; i < capacity; i++) {
     Object* key = KeyAt(i);
     if (key->IsNumber()) {
       uint32_t number = static_cast<uint32_t>(key->Number());
       if (from <= number && number < to) {
-        SetEntry(i, sentinel, sentinel);
+        SetEntry(i, the_hole_value, the_hole_value);
         removed_entries++;
       }
     }
@@ -12007,7 +12004,7 @@
   if (details.IsDontDelete() && mode != JSReceiver::FORCE_DELETION) {
     return heap->false_value();
   }
-  SetEntry(entry, heap->null_value(), heap->null_value());
+  SetEntry(entry, heap->the_hole_value(), heap->the_hole_value());
   HashTable<Shape, Key>::ElementRemoved();
   return heap->true_value();
 }
@@ -12391,6 +12388,8 @@
 
 
 bool ObjectHashSet::Contains(Object* key) {
+  ASSERT(IsKey(key));
+
   // If the object does not have an identity hash, it was never used as a key.
   { MaybeObject* maybe_hash = key->GetHash(OMIT_CREATION);
     if (maybe_hash->ToObjectUnchecked()->IsUndefined()) return false;
@@ -12400,6 +12399,8 @@
 
 
 MaybeObject* ObjectHashSet::Add(Object* key) {
+  ASSERT(IsKey(key));
+
   // Make sure the key object has an identity hash code.
   int hash;
   { MaybeObject* maybe_hash = key->GetHash(ALLOW_CREATION);
@@ -12425,6 +12426,8 @@
 
 
 MaybeObject* ObjectHashSet::Remove(Object* key) {
+  ASSERT(IsKey(key));
+
   // If the object does not have an identity hash, it was never used as a key.
   { MaybeObject* maybe_hash = key->GetHash(OMIT_CREATION);
     if (maybe_hash->ToObjectUnchecked()->IsUndefined()) return this;
@@ -12435,13 +12438,15 @@
   if (entry == kNotFound) return this;
 
   // Remove entry and try to shrink this hash set.
-  set_null(EntryToIndex(entry));
+  set_the_hole(EntryToIndex(entry));
   ElementRemoved();
   return Shrink(key);
 }
 
 
 Object* ObjectHashTable::Lookup(Object* key) {
+  ASSERT(IsKey(key));
+
   // If the object does not have an identity hash, it was never used as a key.
   { MaybeObject* maybe_hash = key->GetHash(OMIT_CREATION);
     if (maybe_hash->ToObjectUnchecked()->IsUndefined()) {
@@ -12455,6 +12460,8 @@
 
 
 MaybeObject* ObjectHashTable::Put(Object* key, Object* value) {
+  ASSERT(IsKey(key));
+
   // Make sure the key object has an identity hash code.
   int hash;
   { MaybeObject* maybe_hash = key->GetHash(ALLOW_CREATION);
@@ -12494,9 +12501,9 @@
 }
 
 
-void ObjectHashTable::RemoveEntry(int entry, Heap* heap) {
-  set_null(heap, EntryToIndex(entry));
-  set_null(heap, EntryToIndex(entry) + 1);
+void ObjectHashTable::RemoveEntry(int entry) {
+  set_the_hole(EntryToIndex(entry));
+  set_the_hole(EntryToIndex(entry) + 1);
   ElementRemoved();
 }
 
diff --git a/src/objects.h b/src/objects.h
index f7d2180..b423cd4 100644
--- a/src/objects.h
+++ b/src/objects.h
@@ -78,7 +78,7 @@
 //             - MapCache
 //           - Context
 //           - JSFunctionResultCache
-//           - SerializedScopeInfo
+//           - ScopeInfo
 //         - FixedDoubleArray
 //         - ExternalArray
 //           - ExternalPixelArray
@@ -814,7 +814,7 @@
   V(FixedDoubleArray)                          \
   V(Context)                                   \
   V(GlobalContext)                             \
-  V(SerializedScopeInfo)                       \
+  V(ScopeInfo)                                 \
   V(JSFunction)                                \
   V(Code)                                      \
   V(Oddball)                                   \
@@ -2509,7 +2509,7 @@
 // encountered and stops when unused elements are encountered.
 //
 // - Elements with key == undefined have not been used yet.
-// - Elements with key == null have been deleted.
+// - Elements with key == the_hole have been deleted.
 //
 // The hash table class is parameterized with a Shape and a Key.
 // Shape must be a class with the following interface:
@@ -2578,10 +2578,10 @@
   // Returns the key at entry.
   Object* KeyAt(int entry) { return get(EntryToIndex(entry)); }
 
-  // Tells whether k is a real key.  Null and undefined are not allowed
+  // Tells whether k is a real key.  The hole and undefined are not allowed
   // as keys and can be used to indicate missing or deleted elements.
   bool IsKey(Object* k) {
-    return !k->IsNull() && !k->IsUndefined();
+    return !k->IsTheHole() && !k->IsUndefined();
   }
 
   // Garbage collection support.
@@ -3050,8 +3050,7 @@
   friend class MarkCompactCollector;
 
   void AddEntry(int entry, Object* key, Object* value);
-  void RemoveEntry(int entry, Heap* heap);
-  inline void RemoveEntry(int entry);
+  void RemoveEntry(int entry);
 
   // Returns the index to the value of an entry.
   static inline int EntryToValueIndex(int entry) {
@@ -3098,14 +3097,16 @@
 };
 
 
+// ScopeInfo represents information about different scopes of a source
+// program  and the allocation of the scope's variables. Scope information
+// is stored in a compressed form in ScopeInfo objects and is used
+// at runtime (stack dumps, deoptimization, etc.).
+
 // This object provides quick access to scope info details for runtime
-// routines w/o the need to explicitly create a ScopeInfo object.
-class SerializedScopeInfo : public FixedArray {
- public :
-  static SerializedScopeInfo* cast(Object* object) {
-    ASSERT(object->IsSerializedScopeInfo());
-    return reinterpret_cast<SerializedScopeInfo*>(object);
-  }
+// routines.
+class ScopeInfo : public FixedArray {
+ public:
+  static inline ScopeInfo* cast(Object* object);
 
   // Return the type of this scope.
   ScopeType Type();
@@ -3116,18 +3117,60 @@
   // Is this scope a strict mode scope?
   bool IsStrictMode();
 
-  // Return the number of stack slots for code.
-  int NumberOfStackSlots();
+  // Does this scope make a non-strict eval call?
+  bool CallsNonStrictEval() {
+    return CallsEval() && !IsStrictMode();
+  }
 
-  // Return the number of context slots for code.
-  int NumberOfContextSlots();
+  // Return the total number of locals allocated on the stack and in the
+  // context. This includes the parameters that are allocated in the context.
+  int LocalCount();
 
-  // Return if this has context slots besides MIN_CONTEXT_SLOTS;
+  // Return the number of stack slots for code. This number consists of two
+  // parts:
+  //  1. One stack slot per stack allocated local.
+  //  2. One stack slot for the function name if it is stack allocated.
+  int StackSlotCount();
+
+  // Return the number of context slots for code if a context is allocated. This
+  // number consists of three parts:
+  //  1. Size of fixed header for every context: Context::MIN_CONTEXT_SLOTS
+  //  2. One context slot per context allocated local.
+  //  3. One context slot for the function name if it is context allocated.
+  // Parameters allocated in the context count as context allocated locals. If
+  // no contexts are allocated for this scope ContextLength returns 0.
+  int ContextLength();
+
+  // Is this scope the scope of a named function expression?
+  bool HasFunctionName();
+
+  // Return if this has context allocated locals.
   bool HasHeapAllocatedLocals();
 
   // Return if contexts are allocated for this scope.
   bool HasContext();
 
+  // Return the function_name if present.
+  String* FunctionName();
+
+  // Return the name of the given parameter.
+  String* ParameterName(int var);
+
+  // Return the name of the given local.
+  String* LocalName(int var);
+
+  // Return the name of the given stack local.
+  String* StackLocalName(int var);
+
+  // Return the name of the given context local.
+  String* ContextLocalName(int var);
+
+  // Return the mode of the given context local.
+  VariableMode ContextLocalMode(int var);
+
+  // Return the initialization flag of the given context local.
+  InitializationFlag ContextLocalInitFlag(int var);
+
   // Lookup support for serialized scope info. Returns the
   // the stack slot index for a given slot name if the slot is
   // present; otherwise returns a value < 0. The name must be a symbol
@@ -3139,7 +3182,9 @@
   // returns a value < 0. The name must be a symbol (canonicalized).
   // If the slot is present and mode != NULL, sets *mode to the corresponding
   // mode for that variable.
-  int ContextSlotIndex(String* name, VariableMode* mode);
+  int ContextSlotIndex(String* name,
+                       VariableMode* mode,
+                       InitializationFlag* init_flag);
 
   // Lookup support for serialized scope info. Returns the
   // parameter index for a given parameter name if the parameter is present;
@@ -3152,17 +3197,104 @@
   // must be a symbol (canonicalized).
   int FunctionContextSlotIndex(String* name, VariableMode* mode);
 
-  static Handle<SerializedScopeInfo> Create(Scope* scope);
+  static Handle<ScopeInfo> Create(Scope* scope);
 
   // Serializes empty scope info.
-  static SerializedScopeInfo* Empty();
+  static ScopeInfo* Empty();
+
+#ifdef DEBUG
+  void Print();
+#endif
+
+  // The layout of the static part of a ScopeInfo is as follows. Each entry is
+  // numeric and occupies one array slot.
+  // 1. A set of properties of the scope
+  // 2. The number of parameters. This only applies to function scopes. For
+  //    non-function scopes this is 0.
+  // 3. The number of non-parameter variables allocated on the stack.
+  // 4. The number of non-parameter and parameter variables allocated in the
+  //    context.
+#define FOR_EACH_NUMERIC_FIELD(V)          \
+  V(Flags)                                 \
+  V(ParameterCount)                        \
+  V(StackLocalCount)                       \
+  V(ContextLocalCount)
+
+#define FIELD_ACCESSORS(name)                            \
+  void Set##name(int value) {                            \
+    set(k##name, Smi::FromInt(value));                   \
+  }                                                      \
+  int name() {                                           \
+    if (length() > 0) {                                  \
+      return Smi::cast(get(k##name))->value();           \
+    } else {                                             \
+      return 0;                                          \
+    }                                                    \
+  }
+  FOR_EACH_NUMERIC_FIELD(FIELD_ACCESSORS)
+#undef FIELD_ACCESSORS
 
  private:
-  Object** ContextEntriesAddr();
+  enum {
+#define DECL_INDEX(name) k##name,
+  FOR_EACH_NUMERIC_FIELD(DECL_INDEX)
+#undef DECL_INDEX
+#undef FOR_EACH_NUMERIC_FIELD
+  kVariablePartIndex
+  };
 
-  Object** ParameterEntriesAddr();
+  // The layout of the variable part of a ScopeInfo is as follows:
+  // 1. ParameterEntries:
+  //    This part stores the names of the parameters for function scopes. One
+  //    slot is used per parameter, so in total this part occupies
+  //    ParameterCount() slots in the array. For other scopes than function
+  //    scopes ParameterCount() is 0.
+  // 2. StackLocalEntries:
+  //    Contains the names of local variables that are allocated on the stack,
+  //    in increasing order of the stack slot index. One slot is used per stack
+  //    local, so in total this part occupies StackLocalCount() slots in the
+  //    array.
+  // 3. ContextLocalNameEntries:
+  //    Contains the names of local variables and parameters that are allocated
+  //    in the context. They are stored in increasing order of the context slot
+  //    index starting with Context::MIN_CONTEXT_SLOTS. One slot is used per
+  //    context local, so in total this part occupies ContextLocalCount() slots
+  //    in the array.
+  // 4. ContextLocalInfoEntries:
+  //    Contains the variable modes and initialization flags corresponding to
+  //    the context locals in ContextLocalNameEntries. One slot is used per
+  //    context local, so in total this part occupies ContextLocalCount()
+  //    slots in the array.
+  // 5. FunctionNameEntryIndex:
+  //    If the scope belongs to a named function expression this part contains
+  //    information about the function variable. It always occupies two array
+  //    slots:  a. The name of the function variable.
+  //            b. The context or stack slot index for the variable.
+  int ParameterEntriesIndex();
+  int StackLocalEntriesIndex();
+  int ContextLocalNameEntriesIndex();
+  int ContextLocalInfoEntriesIndex();
+  int FunctionNameEntryIndex();
 
-  Object** StackSlotEntriesAddr();
+  // Location of the function variable for named function expressions.
+  enum FunctionVariableInfo {
+    NONE,     // No function name present.
+    STACK,    // Function
+    CONTEXT,
+    UNUSED
+  };
+
+  // Properties of scopes.
+  class TypeField:             public BitField<ScopeType,            0, 3> {};
+  class CallsEvalField:        public BitField<bool,                 3, 1> {};
+  class StrictModeField:       public BitField<bool,                 4, 1> {};
+  class FunctionVariableField: public BitField<FunctionVariableInfo, 5, 2> {};
+  class FunctionVariableMode:  public BitField<VariableMode,         7, 3> {};
+
+  // BitFields representing the encoded information for context locals in the
+  // ContextLocalInfoEntries part.
+  class ContextLocalMode:      public BitField<VariableMode,         0, 3> {};
+  class ContextLocalInitFlag:  public BitField<InitializationFlag,   3, 1> {};
 };
 
 
@@ -4685,7 +4817,7 @@
   DECL_ACCESSORS(code, Code)
 
   // [scope_info]: Scope info.
-  DECL_ACCESSORS(scope_info, SerializedScopeInfo)
+  DECL_ACCESSORS(scope_info, ScopeInfo)
 
   // [construct stub]: Code stub for constructing instances of this function.
   DECL_ACCESSORS(construct_stub, Code)
@@ -7080,7 +7212,7 @@
 };
 
 
-// The JSSet describes EcmaScript Harmony maps
+// The JSSet describes EcmaScript Harmony sets
 class JSSet: public JSObject {
  public:
   // [set]: the backing hash set containing keys.
@@ -7174,8 +7306,8 @@
 class Foreign: public HeapObject {
  public:
   // [address]: field containing the address.
-  inline Address address();
-  inline void set_address(Address value);
+  inline Address foreign_address();
+  inline void set_foreign_address(Address value);
 
   // Casting.
   static inline Foreign* cast(Object* obj);
@@ -7198,10 +7330,10 @@
 
   // Layout description.
 
-  static const int kAddressOffset = HeapObject::kHeaderSize;
-  static const int kSize = kAddressOffset + kPointerSize;
+  static const int kForeignAddressOffset = HeapObject::kHeaderSize;
+  static const int kSize = kForeignAddressOffset + kPointerSize;
 
-  STATIC_CHECK(kAddressOffset == Internals::kForeignAddressOffset);
+  STATIC_CHECK(kForeignAddressOffset == Internals::kForeignAddressOffset);
 
  private:
   DISALLOW_IMPLICIT_CONSTRUCTORS(Foreign);
diff --git a/src/parser.cc b/src/parser.cc
index 37204c9..c2177bb 100644
--- a/src/parser.cc
+++ b/src/parser.cc
@@ -1385,7 +1385,9 @@
     var = declaration_scope->LocalLookup(name);
     if (var == NULL) {
       // Declare the name.
-      var = declaration_scope->DeclareLocal(name, mode);
+      InitializationFlag init_flag = (fun != NULL || mode == VAR)
+          ? kCreatedInitialized : kNeedsInitialization;
+      var = declaration_scope->DeclareLocal(name, mode, init_flag);
     } else {
       // The name was declared in this scope before; check for conflicting
       // re-declarations. We have a conflict if either of the declarations is
@@ -1452,7 +1454,12 @@
       declaration_scope->is_global_scope()) {
     ASSERT(resolve);  // should be set by all callers
     Variable::Kind kind = Variable::NORMAL;
-    var = new(zone()) Variable(declaration_scope, name, CONST, true, kind);
+    var = new(zone()) Variable(declaration_scope,
+                               name,
+                               CONST,
+                               true,
+                               kind,
+                               kNeedsInitialization);
   }
 
   // If requested and we have a local variable, bind the proxy to the variable
@@ -1522,7 +1529,7 @@
   Handle<Code> construct_stub = Handle<Code>(fun->shared()->construct_stub());
   Handle<SharedFunctionInfo> shared =
       isolate()->factory()->NewSharedFunctionInfo(name, literals, code,
-          Handle<SerializedScopeInfo>(fun->shared()->scope_info()));
+          Handle<ScopeInfo>(fun->shared()->scope_info()));
   shared->set_construct_stub(*construct_stub);
 
   // Copy the function data to the shared function info.
@@ -1766,7 +1773,7 @@
     // For let/const declarations in harmony mode, we can also immediately
     // pre-resolve the proxy because it resides in the same scope as the
     // declaration.
-    Declare(name, mode, NULL, mode != VAR, CHECK_OK);
+    VariableProxy* proxy = Declare(name, mode, NULL, mode != VAR, CHECK_OK);
     nvars++;
     if (declaration_scope->num_var_or_const() > kMaxNumFunctionLocals) {
       ReportMessageAt(scanner().location(), "too_many_variables",
@@ -1821,6 +1828,11 @@
       if (decl_props != NULL) *decl_props = kHasInitializers;
     }
 
+    // Record the end position of the initializer.
+    if (proxy->var() != NULL) {
+      proxy->var()->set_initializer_position(scanner().location().end_pos);
+    }
+
     // Make sure that 'const x' and 'let x' initialize 'x' to undefined.
     if (value == NULL && needs_init) {
       value = GetLiteralUndefined();
@@ -2283,7 +2295,8 @@
     if (peek() == Token::LBRACE) {
       Target target(&this->target_stack_, &catch_collector);
       VariableMode mode = harmony_scoping_ ? LET : VAR;
-      catch_variable = catch_scope->DeclareLocal(name, mode);
+      catch_variable =
+          catch_scope->DeclareLocal(name, mode, kCreatedInitialized);
 
       SaveScope save_scope(this, catch_scope);
       catch_block = ParseBlock(NULL, CHECK_OK);
@@ -2460,7 +2473,7 @@
         // implementing stack allocated block scoped variables.
         Variable* temp = top_scope_->DeclarationScope()->NewTemporary(name);
         VariableProxy* temp_proxy = new(zone()) VariableProxy(isolate(), temp);
-        VariableProxy* each = top_scope_->NewUnresolved(name, inside_with());
+        VariableProxy* each = top_scope_->NewUnresolved(name);
         ForInStatement* loop = new(zone()) ForInStatement(isolate(), labels);
         Target target(&this->target_stack_, loop);
 
@@ -2932,23 +2945,14 @@
         // Keep track of eval() calls since they disable all local variable
         // optimizations.
         // The calls that need special treatment are the
-        // direct (i.e. not aliased) eval calls. These calls are all of the
-        // form eval(...) with no explicit receiver object where eval is not
-        // declared in the current scope chain.
+        // direct eval calls. These calls are all of the form eval(...), with
+        // no explicit receiver.
         // These calls are marked as potentially direct eval calls. Whether
         // they are actually direct calls to eval is determined at run time.
-        // TODO(994): In ES5, it doesn't matter if the "eval" var is declared
-        // in the local scope chain. It only matters that it's called "eval",
-        // is called without a receiver and it refers to the original eval
-        // function.
         VariableProxy* callee = result->AsVariableProxy();
         if (callee != NULL &&
             callee->IsVariable(isolate()->factory()->eval_symbol())) {
-          Handle<String> name = callee->name();
-          Variable* var = top_scope_->Lookup(name);
-          if (var == NULL) {
-            top_scope_->DeclarationScope()->RecordEvalCall();
-          }
+          top_scope_->DeclarationScope()->RecordEvalCall();
         }
         result = NewCall(result, args, pos);
         break;
@@ -5311,7 +5315,7 @@
                                   int flags,
                                   ParserRecorder* recorder) {
   Isolate* isolate = Isolate::Current();
-  JavaScriptScanner scanner(isolate->unicode_cache());
+  Scanner scanner(isolate->unicode_cache());
   scanner.SetHarmonyScoping((flags & kHarmonyScoping) != 0);
   scanner.Initialize(source);
   intptr_t stack_limit = isolate->stack_guard()->real_climit();
diff --git a/src/parser.h b/src/parser.h
index eaae6f7..9624301 100644
--- a/src/parser.h
+++ b/src/parser.h
@@ -77,6 +77,8 @@
   int literal_count() { return backing_[kLiteralCountOffset]; }
   int property_count() { return backing_[kPropertyCountOffset]; }
   StrictModeFlag strict_mode_flag() {
+    ASSERT(backing_[kStrictModeOffset] == kStrictMode ||
+           backing_[kStrictModeOffset] == kNonStrictMode);
     return static_cast<StrictModeFlag>(backing_[kStrictModeOffset]);
   }
 
@@ -484,7 +486,7 @@
   void ReportMessage(const char* message, Vector<const char*> args);
 
   bool inside_with() const { return top_scope_->inside_with(); }
-  JavaScriptScanner& scanner()  { return scanner_; }
+  Scanner& scanner()  { return scanner_; }
   Mode mode() const { return mode_; }
   ScriptDataImpl* pre_data() const { return pre_data_; }
 
@@ -724,7 +726,7 @@
   ZoneList<Handle<String> > symbol_cache_;
 
   Handle<Script> script_;
-  JavaScriptScanner scanner_;
+  Scanner scanner_;
 
   Scope* top_scope_;
 
diff --git a/src/platform-freebsd.cc b/src/platform-freebsd.cc
index 685ec3c..20bd837 100644
--- a/src/platform-freebsd.cc
+++ b/src/platform-freebsd.cc
@@ -333,44 +333,126 @@
 static const int kMmapFd = -1;
 static const int kMmapFdOffset = 0;
 
+VirtualMemory::VirtualMemory() : address_(NULL), size_(0) { }
 
 VirtualMemory::VirtualMemory(size_t size) {
-  address_ = mmap(NULL, size, PROT_NONE,
-                  MAP_PRIVATE | MAP_ANON | MAP_NORESERVE,
-                  kMmapFd, kMmapFdOffset);
+  address_ = ReserveRegion(size);
   size_ = size;
 }
 
 
+VirtualMemory::VirtualMemory(size_t size, size_t alignment)
+    : address_(NULL), size_(0) {
+  ASSERT(IsAligned(alignment, static_cast<intptr_t>(OS::AllocateAlignment())));
+  size_t request_size = RoundUp(size + alignment,
+                                static_cast<intptr_t>(OS::AllocateAlignment()));
+  void* reservation = mmap(OS::GetRandomMmapAddr(),
+                           request_size,
+                           PROT_NONE,
+                           MAP_PRIVATE | MAP_ANON | MAP_NORESERVE,
+                           kMmapFd,
+                           kMmapFdOffset);
+  if (reservation == MAP_FAILED) return;
+
+  Address base = static_cast<Address>(reservation);
+  Address aligned_base = RoundUp(base, alignment);
+  ASSERT_LE(base, aligned_base);
+
+  // Unmap extra memory reserved before and after the desired block.
+  if (aligned_base != base) {
+    size_t prefix_size = static_cast<size_t>(aligned_base - base);
+    OS::Free(base, prefix_size);
+    request_size -= prefix_size;
+  }
+
+  size_t aligned_size = RoundUp(size, OS::AllocateAlignment());
+  ASSERT_LE(aligned_size, request_size);
+
+  if (aligned_size != request_size) {
+    size_t suffix_size = request_size - aligned_size;
+    OS::Free(aligned_base + aligned_size, suffix_size);
+    request_size -= suffix_size;
+  }
+
+  ASSERT(aligned_size == request_size);
+
+  address_ = static_cast<void*>(aligned_base);
+  size_ = aligned_size;
+}
+
+
 VirtualMemory::~VirtualMemory() {
   if (IsReserved()) {
-    if (0 == munmap(address(), size())) address_ = MAP_FAILED;
+    bool result = ReleaseRegion(address(), size());
+    ASSERT(result);
+    USE(result);
   }
 }
 
 
 bool VirtualMemory::IsReserved() {
-  return address_ != MAP_FAILED;
+  return address_ != NULL;
 }
 
 
-bool VirtualMemory::Commit(void* address, size_t size, bool executable) {
-  int prot = PROT_READ | PROT_WRITE | (executable ? PROT_EXEC : 0);
-  if (MAP_FAILED == mmap(address, size, prot,
-                         MAP_PRIVATE | MAP_ANON | MAP_FIXED,
-                         kMmapFd, kMmapFdOffset)) {
-    return false;
-  }
+void VirtualMemory::Reset() {
+  address_ = NULL;
+  size_ = 0;
+}
 
-  UpdateAllocatedSpaceLimits(address, size);
-  return true;
+
+bool VirtualMemory::Commit(void* address, size_t size, bool is_executable) {
+  return CommitRegion(address, size, is_executable);
 }
 
 
 bool VirtualMemory::Uncommit(void* address, size_t size) {
-  return mmap(address, size, PROT_NONE,
+  return UncommitRegion(address, size);
+}
+
+
+void* VirtualMemory::ReserveRegion(size_t size) {
+  void* result = mmap(OS::GetRandomMmapAddr(),
+                      size,
+                      PROT_NONE,
+                      MAP_PRIVATE | MAP_ANON | MAP_NORESERVE,
+                      kMmapFd,
+                      kMmapFdOffset);
+
+  if (result == MAP_FAILED) return NULL;
+
+  return result;
+}
+
+
+bool VirtualMemory::CommitRegion(void* base, size_t size, bool is_executable) {
+  int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
+  if (MAP_FAILED == mmap(base,
+                         size,
+                         prot,
+                         MAP_PRIVATE | MAP_ANON | MAP_FIXED,
+                         kMmapFd,
+                         kMmapFdOffset)) {
+    return false;
+  }
+
+  UpdateAllocatedSpaceLimits(base, size);
+  return true;
+}
+
+
+bool VirtualMemory::UncommitRegion(void* base, size_t size) {
+  return mmap(base,
+              size,
+              PROT_NONE,
               MAP_PRIVATE | MAP_ANON | MAP_NORESERVE | MAP_FIXED,
-              kMmapFd, kMmapFdOffset) != MAP_FAILED;
+              kMmapFd,
+              kMmapFdOffset) != MAP_FAILED;
+}
+
+
+bool VirtualMemory::ReleaseRegion(void* base, size_t size) {
+  return munmap(base, size) == 0;
 }
 
 
diff --git a/src/preparser-api.cc b/src/preparser-api.cc
index 25c7a82..1bca9a3 100644
--- a/src/preparser-api.cc
+++ b/src/preparser-api.cc
@@ -182,7 +182,7 @@
   internal::InputStreamUTF16Buffer buffer(input);
   uintptr_t stack_limit = reinterpret_cast<uintptr_t>(&buffer) - max_stack;
   internal::UnicodeCache unicode_cache;
-  internal::JavaScriptScanner scanner(&unicode_cache);
+  internal::Scanner scanner(&unicode_cache);
   scanner.Initialize(&buffer);
   internal::CompleteParserRecorder recorder;
   preparser::PreParser::PreParseResult result =
diff --git a/src/preparser.cc b/src/preparser.cc
index b1628eb..291dfd3 100644
--- a/src/preparser.cc
+++ b/src/preparser.cc
@@ -72,7 +72,7 @@
   if (token == i::Token::ILLEGAL && stack_overflow_) {
     return;
   }
-  i::JavaScriptScanner::Location source_location = scanner_->location();
+  i::Scanner::Location source_location = scanner_->location();
 
   // Four of the tokens are treated specially
   switch (token) {
@@ -647,7 +647,7 @@
 
   Expect(i::Token::THROW, CHECK_OK);
   if (scanner_->HasAnyLineTerminatorBeforeNext()) {
-    i::JavaScriptScanner::Location pos = scanner_->location();
+    i::Scanner::Location pos = scanner_->location();
     ReportMessageAt(pos, "newline_after_throw", NULL);
     *ok = false;
     return Statement::Default();
diff --git a/src/preparser.h b/src/preparser.h
index 45e81e9..647a142 100644
--- a/src/preparser.h
+++ b/src/preparser.h
@@ -116,7 +116,7 @@
   // success (even if parsing failed, the pre-parse data successfully
   // captured the syntax error), and false if a stack-overflow happened
   // during parsing.
-  static PreParseResult PreParseProgram(i::JavaScriptScanner* scanner,
+  static PreParseResult PreParseProgram(i::Scanner* scanner,
                                         i::ParserRecorder* log,
                                         int flags,
                                         uintptr_t stack_limit) {
@@ -449,7 +449,7 @@
   };
 
   // Private constructor only used in PreParseProgram.
-  PreParser(i::JavaScriptScanner* scanner,
+  PreParser(i::Scanner* scanner,
             i::ParserRecorder* log,
             uintptr_t stack_limit,
             bool allow_lazy,
@@ -619,7 +619,7 @@
                                      Identifier identifier,
                                      bool* ok);
 
-  i::JavaScriptScanner* scanner_;
+  i::Scanner* scanner_;
   i::ParserRecorder* log_;
   Scope* scope_;
   uintptr_t stack_limit_;
diff --git a/src/profile-generator.cc b/src/profile-generator.cc
index 9812c26..7760d8f 100644
--- a/src/profile-generator.cc
+++ b/src/profile-generator.cc
@@ -2061,20 +2061,27 @@
 void V8HeapExplorer::ExtractClosureReferences(JSObject* js_obj,
                                               HeapEntry* entry) {
   if (js_obj->IsJSFunction()) {
-    HandleScope hs;
     JSFunction* func = JSFunction::cast(js_obj);
     Context* context = func->context();
-    ZoneScope zscope(Isolate::Current(), DELETE_ON_EXIT);
-    SerializedScopeInfo* serialized_scope_info =
-        context->closure()->shared()->scope_info();
-    ScopeInfo<ZoneListAllocationPolicy> zone_scope_info(serialized_scope_info);
-    int locals_number = zone_scope_info.NumberOfLocals();
-    for (int i = 0; i < locals_number; ++i) {
-      String* local_name = *zone_scope_info.LocalName(i);
-      int idx = serialized_scope_info->ContextSlotIndex(local_name, NULL);
-      if (idx >= 0 && idx < context->length()) {
-        SetClosureReference(js_obj, entry, local_name, context->get(idx));
-      }
+    ScopeInfo* scope_info = context->closure()->shared()->scope_info();
+
+    // Add context allocated locals.
+    int context_locals = scope_info->ContextLocalCount();
+    for (int i = 0; i < context_locals; ++i) {
+      String* local_name = scope_info->ContextLocalName(i);
+      int idx = Context::MIN_CONTEXT_SLOTS + i;
+      SetClosureReference(js_obj, entry, local_name, context->get(idx));
+    }
+
+    // Add function variable.
+    if (scope_info->HasFunctionName()) {
+      String* name = scope_info->FunctionName();
+      int idx = Context::MIN_CONTEXT_SLOTS + context_locals;
+#ifdef DEBUG
+      VariableMode mode;
+      ASSERT(idx == scope_info->FunctionContextSlotIndex(name, &mode));
+#endif
+      SetClosureReference(js_obj, entry, name, context->get(idx));
     }
   }
 }
diff --git a/src/runtime.cc b/src/runtime.cc
index 9c23c2c..29807da 100644
--- a/src/runtime.cc
+++ b/src/runtime.cc
@@ -106,6 +106,16 @@
   type name = NumberTo##Type(obj);
 
 
+// Assert that the given argument has a valid value for a StrictModeFlag
+// and store it in a StrictModeFlag variable with the given name.
+#define CONVERT_STRICT_MODE_ARG(name, index)                         \
+  ASSERT(args[index]->IsSmi());                                      \
+  ASSERT(args.smi_at(index) == kStrictMode ||                        \
+         args.smi_at(index) == kNonStrictMode);                      \
+  StrictModeFlag name =                                              \
+      static_cast<StrictModeFlag>(args.smi_at(index));
+
+
 MUST_USE_RESULT static MaybeObject* DeepCopyBoilerplate(Isolate* isolate,
                                                    JSObject* boilerplate) {
   StackLimitCheck check(isolate);
@@ -1515,8 +1525,7 @@
   CONVERT_ARG_CHECKED(String, name, 0);
   GlobalObject* global = isolate->context()->global();
   RUNTIME_ASSERT(args[1]->IsSmi());
-  StrictModeFlag strict_mode = static_cast<StrictModeFlag>(args.smi_at(1));
-  ASSERT(strict_mode == kStrictMode || strict_mode == kNonStrictMode);
+  CONVERT_STRICT_MODE_ARG(strict_mode, 1);
 
   // According to ECMA-262, section 12.2, page 62, the property must
   // not be deletable.
@@ -4171,6 +4180,23 @@
 }
 
 
+MaybeObject* TransitionElements(Handle<Object> object,
+                                ElementsKind to_kind,
+                                Isolate* isolate) {
+  HandleScope scope(isolate);
+  if (!object->IsJSObject()) return isolate->ThrowIllegalOperation();
+  ElementsKind from_kind =
+      Handle<JSObject>::cast(object)->map()->elements_kind();
+  if (Map::IsValidElementsTransition(from_kind, to_kind)) {
+    Handle<Object> result =
+        TransitionElementsKind(Handle<JSObject>::cast(object), to_kind);
+    if (result.is_null()) return isolate->ThrowIllegalOperation();
+    return *result;
+  }
+  return isolate->ThrowIllegalOperation();
+}
+
+
 // KeyedStringGetProperty is called from KeyedLoadIC::GenerateGeneric.
 RUNTIME_FUNCTION(MaybeObject*, Runtime_KeyedGetProperty) {
   NoHandleAllocation ha;
@@ -4187,40 +4213,63 @@
   //
   // Additionally, we need to make sure that we do not cache results
   // for objects that require access checks.
-  if (args[0]->IsJSObject() &&
-      !args[0]->IsJSGlobalProxy() &&
-      !args[0]->IsAccessCheckNeeded() &&
-      args[1]->IsString()) {
-    JSObject* receiver = JSObject::cast(args[0]);
-    String* key = String::cast(args[1]);
-    if (receiver->HasFastProperties()) {
-      // Attempt to use lookup cache.
-      Map* receiver_map = receiver->map();
-      KeyedLookupCache* keyed_lookup_cache = isolate->keyed_lookup_cache();
-      int offset = keyed_lookup_cache->Lookup(receiver_map, key);
-      if (offset != -1) {
-        Object* value = receiver->FastPropertyAt(offset);
-        return value->IsTheHole() ? isolate->heap()->undefined_value() : value;
+  if (args[0]->IsJSObject()) {
+    if (!args[0]->IsJSGlobalProxy() &&
+        !args[0]->IsAccessCheckNeeded() &&
+        args[1]->IsString()) {
+      JSObject* receiver = JSObject::cast(args[0]);
+      String* key = String::cast(args[1]);
+      if (receiver->HasFastProperties()) {
+        // Attempt to use lookup cache.
+        Map* receiver_map = receiver->map();
+        KeyedLookupCache* keyed_lookup_cache = isolate->keyed_lookup_cache();
+        int offset = keyed_lookup_cache->Lookup(receiver_map, key);
+        if (offset != -1) {
+          Object* value = receiver->FastPropertyAt(offset);
+          return value->IsTheHole()
+              ? isolate->heap()->undefined_value()
+              : value;
+        }
+        // Lookup cache miss.  Perform lookup and update the cache if
+        // appropriate.
+        LookupResult result(isolate);
+        receiver->LocalLookup(key, &result);
+        if (result.IsProperty() && result.type() == FIELD) {
+          int offset = result.GetFieldIndex();
+          keyed_lookup_cache->Update(receiver_map, key, offset);
+          return receiver->FastPropertyAt(offset);
+        }
+      } else {
+        // Attempt dictionary lookup.
+        StringDictionary* dictionary = receiver->property_dictionary();
+        int entry = dictionary->FindEntry(key);
+        if ((entry != StringDictionary::kNotFound) &&
+            (dictionary->DetailsAt(entry).type() == NORMAL)) {
+          Object* value = dictionary->ValueAt(entry);
+          if (!receiver->IsGlobalObject()) return value;
+          value = JSGlobalPropertyCell::cast(value)->value();
+          if (!value->IsTheHole()) return value;
+          // If value is the hole do the general lookup.
+        }
       }
-      // Lookup cache miss.  Perform lookup and update the cache if appropriate.
-      LookupResult result(isolate);
-      receiver->LocalLookup(key, &result);
-      if (result.IsProperty() && result.type() == FIELD) {
-        int offset = result.GetFieldIndex();
-        keyed_lookup_cache->Update(receiver_map, key, offset);
-        return receiver->FastPropertyAt(offset);
-      }
-    } else {
-      // Attempt dictionary lookup.
-      StringDictionary* dictionary = receiver->property_dictionary();
-      int entry = dictionary->FindEntry(key);
-      if ((entry != StringDictionary::kNotFound) &&
-          (dictionary->DetailsAt(entry).type() == NORMAL)) {
-        Object* value = dictionary->ValueAt(entry);
-        if (!receiver->IsGlobalObject()) return value;
-        value = JSGlobalPropertyCell::cast(value)->value();
-        if (!value->IsTheHole()) return value;
-        // If value is the hole do the general lookup.
+    } else if (FLAG_smi_only_arrays && args.at<Object>(1)->IsSmi()) {
+      // JSObject without a string key. If the key is a Smi, check for a
+      // definite out-of-bounds access to elements, which is a strong indicator
+      // that subsequent accesses will also call the runtime. Proactively
+      // transition elements to FAST_ELEMENTS to avoid excessive boxing of
+      // doubles for those future calls in the case that the elements would
+      // become FAST_DOUBLE_ELEMENTS.
+      Handle<JSObject> js_object(args.at<JSObject>(0));
+      ElementsKind elements_kind = js_object->GetElementsKind();
+      if (elements_kind == FAST_SMI_ONLY_ELEMENTS ||
+          elements_kind == FAST_DOUBLE_ELEMENTS) {
+        FixedArrayBase* elements = js_object->elements();
+        if (args.at<Smi>(1)->value() >= elements->length()) {
+          MaybeObject* maybe_object = TransitionElements(js_object,
+                                                         FAST_ELEMENTS,
+                                                         isolate);
+          if (maybe_object->IsFailure()) return maybe_object;
+        }
       }
     }
   } else if (args[0]->IsString() && args[1]->IsSmi()) {
@@ -4594,10 +4643,8 @@
 
   StrictModeFlag strict_mode = kNonStrictMode;
   if (args.length() == 5) {
-    CONVERT_SMI_ARG_CHECKED(strict_unchecked, 4);
-    RUNTIME_ASSERT(strict_unchecked == kStrictMode ||
-                   strict_unchecked == kNonStrictMode);
-    strict_mode = static_cast<StrictModeFlag>(strict_unchecked);
+    CONVERT_STRICT_MODE_ARG(strict_mode_flag, 4);
+    strict_mode = strict_mode_flag;
   }
 
   return Runtime::SetObjectProperty(isolate,
@@ -4609,23 +4656,6 @@
 }
 
 
-MaybeObject* TransitionElements(Handle<Object> object,
-                                ElementsKind to_kind,
-                                Isolate* isolate) {
-  HandleScope scope(isolate);
-  if (!object->IsJSObject()) return isolate->ThrowIllegalOperation();
-  ElementsKind from_kind =
-      Handle<JSObject>::cast(object)->map()->elements_kind();
-  if (Map::IsValidElementsTransition(from_kind, to_kind)) {
-    Handle<Object> result =
-        TransitionElementsKind(Handle<JSObject>::cast(object), to_kind);
-    if (result.is_null()) return isolate->ThrowIllegalOperation();
-    return *result;
-  }
-  return isolate->ThrowIllegalOperation();
-}
-
-
 RUNTIME_FUNCTION(MaybeObject*, Runtime_TransitionElementsSmiToDouble) {
   NoHandleAllocation ha;
   RUNTIME_ASSERT(args.length() == 1);
@@ -4659,6 +4689,44 @@
 }
 
 
+RUNTIME_FUNCTION(MaybeObject*, Runtime_StoreArrayLiteralElement) {
+  RUNTIME_ASSERT(args.length() == 5);
+  CONVERT_ARG_CHECKED(JSObject, object, 0);
+  CONVERT_SMI_ARG_CHECKED(store_index, 1);
+  Handle<Object> value = args.at<Object>(2);
+  CONVERT_ARG_CHECKED(FixedArray, literals, 3);
+  CONVERT_SMI_ARG_CHECKED(literal_index, 4);
+  HandleScope scope;
+
+  Object* raw_boilerplate_object = literals->get(literal_index);
+  Handle<JSArray> boilerplate_object(JSArray::cast(raw_boilerplate_object));
+#if DEBUG
+  ElementsKind elements_kind = object->GetElementsKind();
+#endif
+  ASSERT(elements_kind <= FAST_DOUBLE_ELEMENTS);
+  // Smis should never trigger transitions.
+  ASSERT(!value->IsSmi());
+
+  if (value->IsNumber()) {
+    ASSERT(elements_kind == FAST_SMI_ONLY_ELEMENTS);
+    TransitionElementsKind(object, FAST_DOUBLE_ELEMENTS);
+    ASSERT(object->GetElementsKind() == FAST_DOUBLE_ELEMENTS);
+    FixedDoubleArray* double_array =
+        FixedDoubleArray::cast(object->elements());
+    HeapNumber* number = HeapNumber::cast(*value);
+    double_array->set(store_index, number->Number());
+  } else {
+    ASSERT(elements_kind == FAST_SMI_ONLY_ELEMENTS ||
+           elements_kind == FAST_DOUBLE_ELEMENTS);
+    TransitionElementsKind(object, FAST_ELEMENTS);
+    FixedArray* object_array =
+        FixedArray::cast(object->elements());
+    object_array->set(store_index, *value);
+  }
+  return *object;
+}
+
+
 // Set a local property, even if it is READ_ONLY.  If the property does not
 // exist, it will be added with attributes NONE.
 RUNTIME_FUNCTION(MaybeObject*, Runtime_IgnoreAttributesAndSetProperty) {
@@ -7490,7 +7558,7 @@
 }
 
 
-static int MakeDay(int year, int month, int day) {
+static int MakeDay(int year, int month) {
   static const int day_from_month[] = {0, 31, 59, 90, 120, 151,
                                        181, 212, 243, 273, 304, 334};
   static const int day_from_month_leap[] = {0, 31, 60, 91, 121, 152,
@@ -7527,23 +7595,22 @@
                       year1 / 400 -
                       base_day;
 
-  if (year % 4 || (year % 100 == 0 && year % 400 != 0)) {
-    return day_from_year + day_from_month[month] + day - 1;
+  if ((year % 4 != 0) || (year % 100 == 0 && year % 400 != 0)) {
+    return day_from_year + day_from_month[month];
   }
 
-  return day_from_year + day_from_month_leap[month] + day - 1;
+  return day_from_year + day_from_month_leap[month];
 }
 
 
 RUNTIME_FUNCTION(MaybeObject*, Runtime_DateMakeDay) {
   NoHandleAllocation ha;
-  ASSERT(args.length() == 3);
+  ASSERT(args.length() == 2);
 
   CONVERT_SMI_ARG_CHECKED(year, 0);
   CONVERT_SMI_ARG_CHECKED(month, 1);
-  CONVERT_SMI_ARG_CHECKED(date, 2);
 
-  return Smi::FromInt(MakeDay(year, month, date));
+  return Smi::FromInt(MakeDay(year, month));
 }
 
 
@@ -7772,7 +7839,7 @@
   month = kMonthInYear[date];
   day = kDayInYear[date];
 
-  ASSERT(MakeDay(year, month, day) == save_date);
+  ASSERT(MakeDay(year, month) + day - 1 == save_date);
 }
 
 
@@ -7786,7 +7853,7 @@
   year = 400 * (date / kDaysIn400Years) - kYearsOffset;
   date %= kDaysIn400Years;
 
-  ASSERT(MakeDay(year, 0, 1) + date == save_date);
+  ASSERT(MakeDay(year, 0) + date == save_date);
 
   date--;
   int yd1 = date / kDaysIn100Years;
@@ -7809,8 +7876,8 @@
   ASSERT(is_leap || (date >= 0));
   ASSERT((date < 365) || (is_leap && (date < 366)));
   ASSERT(is_leap == ((year % 4 == 0) && (year % 100 || (year % 400 == 0))));
-  ASSERT(is_leap || ((MakeDay(year, 0, 1) + date) == save_date));
-  ASSERT(!is_leap || ((MakeDay(year, 0, 1) + date + 1) == save_date));
+  ASSERT(is_leap || ((MakeDay(year, 0) + date) == save_date));
+  ASSERT(!is_leap || ((MakeDay(year, 0) + date + 1) == save_date));
 
   if (is_leap) {
     day = kDayInYear[2*365 + 1 + date];
@@ -7820,7 +7887,7 @@
     month = kMonthInYear[date];
   }
 
-  ASSERT(MakeDay(year, month, day) == save_date);
+  ASSERT(MakeDay(year, month) + day - 1 == save_date);
 }
 
 
@@ -7846,19 +7913,14 @@
 
   FixedArrayBase* elms_base = FixedArrayBase::cast(res_array->elements());
   RUNTIME_ASSERT(elms_base->length() == 3);
-  RUNTIME_ASSERT(res_array->GetElementsKind() <= FAST_DOUBLE_ELEMENTS);
+  RUNTIME_ASSERT(res_array->HasFastTypeElements());
 
-  if (res_array->HasFastDoubleElements()) {
-    FixedDoubleArray* elms = FixedDoubleArray::cast(res_array->elements());
-    elms->set(0, year);
-    elms->set(1, month);
-    elms->set(2, day);
-  } else {
-    FixedArray* elms = FixedArray::cast(res_array->elements());
-    elms->set(0, Smi::FromInt(year));
-    elms->set(1, Smi::FromInt(month));
-    elms->set(2, Smi::FromInt(day));
-  }
+  MaybeObject* maybe = res_array->EnsureWritableFastElements();
+  if (maybe->IsFailure()) return maybe;
+  FixedArray* elms = FixedArray::cast(res_array->elements());
+  elms->set(0, Smi::FromInt(year));
+  elms->set(1, Smi::FromInt(month));
+  elms->set(2, Smi::FromInt(day));
 
   return isolate->heap()->undefined_value();
 }
@@ -7909,14 +7971,14 @@
         --index;
       }
 
-      ScopeInfo<> scope_info(callee->shared()->scope_info());
+      Handle<ScopeInfo> scope_info(callee->shared()->scope_info());
       while (index >= 0) {
         // Detect duplicate names to the right in the parameter list.
-        Handle<String> name = scope_info.parameter_name(index);
-        int context_slot_count = scope_info.number_of_context_slots();
+        Handle<String> name(scope_info->ParameterName(index));
+        int context_local_count = scope_info->ContextLocalCount();
         bool duplicate = false;
         for (int j = index + 1; j < parameter_count; ++j) {
-          if (scope_info.parameter_name(j).is_identical_to(name)) {
+          if (scope_info->ParameterName(j) == *name) {
             duplicate = true;
             break;
           }
@@ -7931,17 +7993,16 @@
           // The context index goes in the parameter map with a hole in the
           // arguments array.
           int context_index = -1;
-          for (int j = Context::MIN_CONTEXT_SLOTS;
-               j < context_slot_count;
-               ++j) {
-            if (scope_info.context_slot_name(j).is_identical_to(name)) {
+          for (int j = 0; j < context_local_count; ++j) {
+            if (scope_info->ContextLocalName(j) == *name) {
               context_index = j;
               break;
             }
           }
           ASSERT(context_index >= 0);
           arguments->set_the_hole(index);
-          parameter_map->set(index + 2, Smi::FromInt(context_index));
+          parameter_map->set(index + 2, Smi::FromInt(
+              Context::MIN_CONTEXT_SLOTS + context_index));
         }
 
         --index;
@@ -8185,13 +8246,9 @@
     prototype = Handle<Object>(function->instance_prototype(), isolate);
   }
   if (function->shared()->CanGenerateInlineConstructor(*prototype)) {
-    HandleScope scope(isolate);
     ConstructStubCompiler compiler(isolate);
-    MaybeObject* code = compiler.CompileConstructStub(*function);
-    if (!code->IsFailure()) {
-      function->shared()->set_construct_stub(
-          Code::cast(code->ToObjectUnchecked()));
-    }
+    Handle<Code> code = compiler.CompileConstructStub(function);
+    function->shared()->set_construct_stub(*code);
   }
 }
 
@@ -8706,7 +8763,7 @@
   ASSERT(args.length() == 1);
 
   CONVERT_CHECKED(JSFunction, function, args[0]);
-  int length = function->shared()->scope_info()->NumberOfContextSlots();
+  int length = function->shared()->scope_info()->ContextLength();
   Object* result;
   { MaybeObject* maybe_result =
         isolate->heap()->AllocateFunctionContext(length, function);
@@ -8792,7 +8849,7 @@
 RUNTIME_FUNCTION(MaybeObject*, Runtime_PushBlockContext) {
   NoHandleAllocation ha;
   ASSERT(args.length() == 2);
-  SerializedScopeInfo* scope_info = SerializedScopeInfo::cast(args[0]);
+  ScopeInfo* scope_info = ScopeInfo::cast(args[0]);
   JSFunction* function;
   if (args[1]->IsSmi()) {
     // A smi sentinel indicates a context nested inside global code rather
@@ -9012,10 +9069,7 @@
   Handle<Object> value(args[0], isolate);
   CONVERT_ARG_CHECKED(Context, context, 1);
   CONVERT_ARG_CHECKED(String, name, 2);
-  CONVERT_SMI_ARG_CHECKED(strict_unchecked, 3);
-  RUNTIME_ASSERT(strict_unchecked == kStrictMode ||
-                 strict_unchecked == kNonStrictMode);
-  StrictModeFlag strict_mode = static_cast<StrictModeFlag>(strict_unchecked);
+  CONVERT_STRICT_MODE_ARG(strict_mode, 3);
 
   int index;
   PropertyAttributes attributes;
@@ -9430,90 +9484,22 @@
 
   HandleScope scope(isolate);
   Handle<Object> callee = args.at<Object>(0);
-  Handle<Object> receiver;  // Will be overwritten.
 
-  // Compute the calling context.
-  Handle<Context> context = Handle<Context>(isolate->context(), isolate);
-#ifdef DEBUG
-  // Make sure Isolate::context() agrees with the old code that traversed
-  // the stack frames to compute the context.
-  StackFrameLocator locator;
-  JavaScriptFrame* frame = locator.FindJavaScriptFrame(0);
-  ASSERT(Context::cast(frame->context()) == *context);
-#endif
-
-  // Find where the 'eval' symbol is bound. It is unaliased only if
-  // it is bound in the global context.
-  int index = -1;
-  PropertyAttributes attributes = ABSENT;
-  BindingFlags binding_flags;
-  while (true) {
-    // Don't follow context chains in Context::Lookup and implement the loop
-    // up the context chain here, so that we can know the context where eval
-    // was found.
-    receiver = context->Lookup(isolate->factory()->eval_symbol(),
-                               FOLLOW_PROTOTYPE_CHAIN,
-                               &index,
-                               &attributes,
-                               &binding_flags);
-    // Stop search when eval is found or when the global context is
-    // reached.
-    if (attributes != ABSENT || context->IsGlobalContext()) break;
-    context = Handle<Context>(context->previous(), isolate);
-  }
-
-  // If eval could not be resolved, it has been deleted and we need to
-  // throw a reference error.
-  if (attributes == ABSENT) {
-    Handle<Object> name = isolate->factory()->eval_symbol();
-    Handle<Object> reference_error =
-        isolate->factory()->NewReferenceError("not_defined",
-                                              HandleVector(&name, 1));
-    return MakePair(isolate->Throw(*reference_error), NULL);
-  }
-
-  if (!context->IsGlobalContext()) {
-    // 'eval' is not bound in the global context. Just call the function
-    // with the given arguments. This is not necessarily the global eval.
-    if (receiver->IsContext() || receiver->IsJSContextExtensionObject()) {
-      receiver = isolate->factory()->the_hole_value();
-    }
-    return MakePair(*callee, *receiver);
-  }
-
-  // 'eval' is bound in the global context, but it may have been overwritten.
-  // Compare it to the builtin 'GlobalEval' function to make sure.
+  // If "eval" didn't refer to the original GlobalEval, it's not a
+  // direct call to eval.
+  // (And even if it is, but the first argument isn't a string, just let
+  // execution default to an indirect call to eval, which will also return
+  // the first argument without doing anything).
   if (*callee != isolate->global_context()->global_eval_fun() ||
       !args[1]->IsString()) {
     return MakePair(*callee, isolate->heap()->the_hole_value());
   }
 
-  ASSERT(args[3]->IsSmi());
+  CONVERT_STRICT_MODE_ARG(strict_mode, 3);
   return CompileGlobalEval(isolate,
                            args.at<String>(1),
                            args.at<Object>(2),
-                           static_cast<StrictModeFlag>(args.smi_at(3)));
-}
-
-
-RUNTIME_FUNCTION(ObjectPair, Runtime_ResolvePossiblyDirectEvalNoLookup) {
-  ASSERT(args.length() == 4);
-
-  HandleScope scope(isolate);
-  Handle<Object> callee = args.at<Object>(0);
-
-  // 'eval' is bound in the global context, but it may have been overwritten.
-  // Compare it to the builtin 'GlobalEval' function to make sure.
-  if (*callee != isolate->global_context()->global_eval_fun() ||
-      !args[1]->IsString()) {
-    return MakePair(*callee, isolate->heap()->the_hole_value());
-  }
-
-  ASSERT(args[3]->IsSmi());
-  return CompileGlobalEval(isolate,
-                           args.at<String>(1),
-                           args.at<Object>(2),
-                           static_cast<StrictModeFlag>(args.smi_at(3)));
+                           strict_mode);
 }
 
 
@@ -9708,6 +9694,7 @@
   uint32_t length = static_cast<uint32_t>(array->length()->Number());
   int element_count = 0;
   switch (array->GetElementsKind()) {
+    case FAST_SMI_ONLY_ELEMENTS:
     case FAST_ELEMENTS: {
       // Fast elements can't have lengths that are not representable by
       // a 32-bit signed integer.
@@ -9719,6 +9706,10 @@
       }
       break;
     }
+    case FAST_DOUBLE_ELEMENTS:
+      // TODO(1810): Decide if it's worthwhile to implement this.
+      UNREACHABLE();
+      break;
     case DICTIONARY_ELEMENTS: {
       Handle<NumberDictionary> dictionary(
           NumberDictionary::cast(array->elements()));
@@ -9731,7 +9722,16 @@
       }
       break;
     }
-    default:
+    case NON_STRICT_ARGUMENTS_ELEMENTS:
+    case EXTERNAL_BYTE_ELEMENTS:
+    case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
+    case EXTERNAL_SHORT_ELEMENTS:
+    case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
+    case EXTERNAL_INT_ELEMENTS:
+    case EXTERNAL_UNSIGNED_INT_ELEMENTS:
+    case EXTERNAL_FLOAT_ELEMENTS:
+    case EXTERNAL_DOUBLE_ELEMENTS:
+    case EXTERNAL_PIXEL_ELEMENTS:
       // External arrays are always dense.
       return length;
   }
@@ -9809,6 +9809,11 @@
       }
       break;
     }
+    case FAST_DOUBLE_ELEMENTS: {
+      // TODO(1810): Decide if it's worthwhile to implement this.
+      UNREACHABLE();
+      break;
+    }
     case DICTIONARY_ELEMENTS: {
       Handle<NumberDictionary> dict(NumberDictionary::cast(object->elements()));
       uint32_t capacity = dict->Capacity();
@@ -9939,6 +9944,11 @@
       }
       break;
     }
+    case FAST_DOUBLE_ELEMENTS: {
+      // TODO(1810): Decide if it's worthwhile to implement this.
+      UNREACHABLE();
+      break;
+    }
     case DICTIONARY_ELEMENTS: {
       Handle<NumberDictionary> dict(receiver->element_dictionary());
       List<uint32_t> indices(dict->Capacity() / 2);
@@ -10049,6 +10059,13 @@
       uint32_t element_estimate;
       if (obj->IsJSArray()) {
         Handle<JSArray> array(Handle<JSArray>::cast(obj));
+        // TODO(1810): Find out if it's worthwhile to properly support
+        // arbitrary ElementsKinds. For now, pessimistically transition to
+        // FAST_ELEMENTS.
+        if (array->HasFastDoubleElements()) {
+          array = Handle<JSArray>::cast(
+              TransitionElementsKind(array, FAST_ELEMENTS));
+        }
         length_estimate =
             static_cast<uint32_t>(array->length()->Number());
         element_estimate =
@@ -10699,6 +10716,18 @@
 static const int kFrameDetailsFlagsIndex = 8;
 static const int kFrameDetailsFirstDynamicIndex = 9;
 
+
+static SaveContext* FindSavedContextForFrame(Isolate* isolate,
+                                             JavaScriptFrame* frame) {
+  SaveContext* save = isolate->save_context();
+  while (save != NULL && !save->IsBelowFrame(frame)) {
+    save = save->prev();
+  }
+  ASSERT(save != NULL);
+  return save;
+}
+
+
 // Return an array with frame details
 // args[0]: number: break id
 // args[1]: number: frame index
@@ -10754,11 +10783,7 @@
 
   // Traverse the saved contexts chain to find the active context for the
   // selected frame.
-  SaveContext* save = isolate->save_context();
-  while (save != NULL && !save->below(it.frame())) {
-    save = save->prev();
-  }
-  ASSERT(save != NULL);
+  SaveContext* save = FindSavedContextForFrame(isolate, it.frame());
 
   // Get the frame id.
   Handle<Object> frame_id(WrapFrameId(it.frame()->id()), isolate);
@@ -10775,9 +10800,8 @@
   // Get scope info and read from it for local variable information.
   Handle<JSFunction> function(JSFunction::cast(it.frame()->function()));
   Handle<SharedFunctionInfo> shared(function->shared());
-  Handle<SerializedScopeInfo> scope_info(shared->scope_info());
-  ASSERT(*scope_info != SerializedScopeInfo::Empty());
-  ScopeInfo<> info(*scope_info);
+  Handle<ScopeInfo> scope_info(shared->scope_info());
+  ASSERT(*scope_info != ScopeInfo::Empty());
 
   // Get the locals names and values into a temporary array.
   //
@@ -10785,24 +10809,26 @@
   // (e.g. .result)?  For users of the debugger, they will probably be
   // confusing.
   Handle<FixedArray> locals =
-      isolate->factory()->NewFixedArray(info.NumberOfLocals() * 2);
+      isolate->factory()->NewFixedArray(scope_info->LocalCount() * 2);
 
   // Fill in the values of the locals.
   int i = 0;
-  for (; i < info.number_of_stack_slots(); ++i) {
+  for (; i < scope_info->StackLocalCount(); ++i) {
     // Use the value from the stack.
-    locals->set(i * 2, *info.LocalName(i));
+    locals->set(i * 2, scope_info->LocalName(i));
     locals->set(i * 2 + 1, frame_inspector.GetExpression(i));
   }
-  if (i < info.NumberOfLocals()) {
+  if (i < scope_info->LocalCount()) {
     // Get the context containing declarations.
     Handle<Context> context(
         Context::cast(it.frame()->context())->declaration_context());
-    for (; i < info.NumberOfLocals(); ++i) {
-      Handle<String> name = info.LocalName(i);
+    for (; i < scope_info->LocalCount(); ++i) {
+      Handle<String> name(scope_info->LocalName(i));
+      VariableMode mode;
+      InitializationFlag init_flag;
       locals->set(i * 2, *name);
-      locals->set(i * 2 + 1,
-                  context->get(scope_info->ContextSlotIndex(*name, NULL)));
+      locals->set(i * 2 + 1, context->get(
+          scope_info->ContextSlotIndex(*name, &mode, &init_flag)));
     }
   }
 
@@ -10856,7 +10882,7 @@
 
   // Find the number of arguments to fill. At least fill the number of
   // parameters for the function and fill more if more parameters are provided.
-  int argument_count = info.number_of_parameters();
+  int argument_count = scope_info->ParameterCount();
   if (argument_count < frame_inspector.GetParametersCount()) {
     argument_count = frame_inspector.GetParametersCount();
   }
@@ -10868,7 +10894,7 @@
 
   // Calculate the size of the result.
   int details_size = kFrameDetailsFirstDynamicIndex +
-                     2 * (argument_count + info.NumberOfLocals()) +
+                     2 * (argument_count + scope_info->LocalCount()) +
                      (at_return ? 1 : 0);
   Handle<FixedArray> details = isolate->factory()->NewFixedArray(details_size);
 
@@ -10883,7 +10909,7 @@
 
   // Add the locals count
   details->set(kFrameDetailsLocalCountIndex,
-               Smi::FromInt(info.NumberOfLocals()));
+               Smi::FromInt(scope_info->LocalCount()));
 
   // Add the source position.
   if (position != RelocInfo::kNoPosition) {
@@ -10918,8 +10944,8 @@
   // Add arguments name and value.
   for (int i = 0; i < argument_count; i++) {
     // Name of the argument.
-    if (i < info.number_of_parameters()) {
-      details->set(details_index++, *info.parameter_name(i));
+    if (i < scope_info->ParameterCount()) {
+      details->set(details_index++, scope_info->ParameterName(i));
     } else {
       details->set(details_index++, heap->undefined_value());
     }
@@ -10934,7 +10960,7 @@
   }
 
   // Add locals name and value from the temporary copy from the function frame.
-  for (int i = 0; i < info.NumberOfLocals() * 2; i++) {
+  for (int i = 0; i < scope_info->LocalCount() * 2; i++) {
     details->set(details_index++, locals->get(i));
   }
 
@@ -10970,21 +10996,20 @@
 // Copy all the context locals into an object used to materialize a scope.
 static bool CopyContextLocalsToScopeObject(
     Isolate* isolate,
-    Handle<SerializedScopeInfo> serialized_scope_info,
-    ScopeInfo<>& scope_info,
+    Handle<ScopeInfo> scope_info,
     Handle<Context> context,
     Handle<JSObject> scope_object) {
   // Fill all context locals to the context extension.
-  for (int i = Context::MIN_CONTEXT_SLOTS;
-       i < scope_info.number_of_context_slots();
-       i++) {
-    int context_index = serialized_scope_info->ContextSlotIndex(
-        *scope_info.context_slot_name(i), NULL);
+  for (int i = 0; i < scope_info->ContextLocalCount(); i++) {
+    VariableMode mode;
+    InitializationFlag init_flag;
+    int context_index = scope_info->ContextSlotIndex(
+        scope_info->ContextLocalName(i), &mode, &init_flag);
 
     RETURN_IF_EMPTY_HANDLE_VALUE(
         isolate,
         SetProperty(scope_object,
-                    scope_info.context_slot_name(i),
+                    Handle<String>(scope_info->ContextLocalName(i)),
                     Handle<Object>(context->get(context_index), isolate),
                     NONE,
                     kNonStrictMode),
@@ -11003,8 +11028,7 @@
     int inlined_frame_index) {
   Handle<JSFunction> function(JSFunction::cast(frame->function()));
   Handle<SharedFunctionInfo> shared(function->shared());
-  Handle<SerializedScopeInfo> serialized_scope_info(shared->scope_info());
-  ScopeInfo<> scope_info(*serialized_scope_info);
+  Handle<ScopeInfo> scope_info(shared->scope_info());
   FrameInspector frame_inspector(frame, inlined_frame_index, isolate);
 
   // Allocate and initialize a JSObject with all the arguments, stack locals
@@ -11013,11 +11037,11 @@
       isolate->factory()->NewJSObject(isolate->object_function());
 
   // First fill all parameters.
-  for (int i = 0; i < scope_info.number_of_parameters(); ++i) {
+  for (int i = 0; i < scope_info->ParameterCount(); ++i) {
     RETURN_IF_EMPTY_HANDLE_VALUE(
         isolate,
         SetProperty(local_scope,
-                    scope_info.parameter_name(i),
+                    Handle<String>(scope_info->ParameterName(i)),
                     Handle<Object>(frame_inspector.GetParameter(i)),
                     NONE,
                     kNonStrictMode),
@@ -11025,24 +11049,23 @@
   }
 
   // Second fill all stack locals.
-  for (int i = 0; i < scope_info.number_of_stack_slots(); ++i) {
+  for (int i = 0; i < scope_info->StackLocalCount(); ++i) {
     RETURN_IF_EMPTY_HANDLE_VALUE(
         isolate,
         SetProperty(local_scope,
-                    scope_info.stack_slot_name(i),
+                    Handle<String>(scope_info->StackLocalName(i)),
                     Handle<Object>(frame_inspector.GetExpression(i)),
                     NONE,
                     kNonStrictMode),
         Handle<JSObject>());
   }
 
-  if (scope_info.number_of_context_slots() > Context::MIN_CONTEXT_SLOTS) {
+  if (scope_info->HasContext()) {
     // Third fill all context locals.
     Handle<Context> frame_context(Context::cast(frame->context()));
     Handle<Context> function_context(frame_context->declaration_context());
-    if (!CopyContextLocalsToScopeObject(isolate,
-                                        serialized_scope_info, scope_info,
-                                        function_context, local_scope)) {
+    if (!CopyContextLocalsToScopeObject(
+            isolate, scope_info, function_context, local_scope)) {
       return Handle<JSObject>();
     }
 
@@ -11085,8 +11108,7 @@
   ASSERT(context->IsFunctionContext());
 
   Handle<SharedFunctionInfo> shared(context->closure()->shared());
-  Handle<SerializedScopeInfo> serialized_scope_info(shared->scope_info());
-  ScopeInfo<> scope_info(*serialized_scope_info);
+  Handle<ScopeInfo> scope_info(shared->scope_info());
 
   // Allocate and initialize a JSObject with all the content of theis function
   // closure.
@@ -11094,9 +11116,8 @@
       isolate->factory()->NewJSObject(isolate->object_function());
 
   // Fill all context locals to the context extension.
-  if (!CopyContextLocalsToScopeObject(isolate,
-                                      serialized_scope_info, scope_info,
-                                      context, closure_scope)) {
+  if (!CopyContextLocalsToScopeObject(
+          isolate, scope_info, context, closure_scope)) {
     return Handle<JSObject>();
   }
 
@@ -11151,9 +11172,7 @@
     Isolate* isolate,
     Handle<Context> context) {
   ASSERT(context->IsBlockContext());
-  Handle<SerializedScopeInfo> serialized_scope_info(
-      SerializedScopeInfo::cast(context->extension()));
-  ScopeInfo<> scope_info(*serialized_scope_info);
+  Handle<ScopeInfo> scope_info(ScopeInfo::cast(context->extension()));
 
   // Allocate and initialize a JSObject with all the arguments, stack locals
   // heap locals and extension properties of the debugged function.
@@ -11161,12 +11180,9 @@
       isolate->factory()->NewJSObject(isolate->object_function());
 
   // Fill all context locals.
-  if (scope_info.number_of_context_slots() > Context::MIN_CONTEXT_SLOTS) {
-    if (!CopyContextLocalsToScopeObject(isolate,
-                                        serialized_scope_info, scope_info,
-                                        context, block_scope)) {
-      return Handle<JSObject>();
-    }
+  if (!CopyContextLocalsToScopeObject(
+          isolate, scope_info, context, block_scope)) {
+    return Handle<JSObject>();
   }
 
   return block_scope;
@@ -11223,18 +11239,14 @@
       // Global code
       CompilationInfo info(script);
       info.MarkAsGlobal();
-      bool result = ParserApi::Parse(&info);
-      ASSERT(result);
-      result = Scope::Analyze(&info);
-      ASSERT(result);
+      CHECK(ParserApi::Parse(&info));
+      CHECK(Scope::Analyze(&info));
       scope = info.function()->scope();
     } else {
       // Function code
       CompilationInfo info(shared_info);
-      bool result = ParserApi::Parse(&info);
-      ASSERT(result);
-      result = Scope::Analyze(&info);
-      ASSERT(result);
+      CHECK(ParserApi::Parse(&info));
+      CHECK(Scope::Analyze(&info));
       scope = info.function()->scope();
     }
 
@@ -11269,7 +11281,7 @@
   // Return the type of the current scope.
   ScopeType Type() {
     if (!nested_scope_chain_.is_empty()) {
-      Handle<SerializedScopeInfo> scope_info = nested_scope_chain_.last();
+      Handle<ScopeInfo> scope_info = nested_scope_chain_.last();
       switch (scope_info->Type()) {
         case FUNCTION_SCOPE:
           ASSERT(context_->IsFunctionContext() ||
@@ -11333,17 +11345,15 @@
     return Handle<JSObject>();
   }
 
-  Handle<SerializedScopeInfo> CurrentScopeInfo() {
+  Handle<ScopeInfo> CurrentScopeInfo() {
     if (!nested_scope_chain_.is_empty()) {
       return nested_scope_chain_.last();
     } else if (context_->IsBlockContext()) {
-      return Handle<SerializedScopeInfo>(
-          SerializedScopeInfo::cast(context_->extension()));
+      return Handle<ScopeInfo>(ScopeInfo::cast(context_->extension()));
     } else if (context_->IsFunctionContext()) {
-      return Handle<SerializedScopeInfo>(
-          context_->closure()->shared()->scope_info());
+      return Handle<ScopeInfo>(context_->closure()->shared()->scope_info());
     }
-    return Handle<SerializedScopeInfo>::null();
+    return Handle<ScopeInfo>::null();
   }
 
   // Return the context for this scope. For the local context there might not
@@ -11370,8 +11380,7 @@
 
       case ScopeIterator::ScopeTypeLocal: {
         PrintF("Local:\n");
-        ScopeInfo<> scope_info(function_->shared()->scope_info());
-        scope_info.Print();
+        function_->shared()->scope_info()->Print();
         if (!CurrentContext().is_null()) {
           CurrentContext()->Print();
           if (CurrentContext()->has_extension()) {
@@ -11419,7 +11428,7 @@
   int inlined_frame_index_;
   Handle<JSFunction> function_;
   Handle<Context> context_;
-  List<Handle<SerializedScopeInfo> > nested_scope_chain_;
+  List<Handle<ScopeInfo> > nested_scope_chain_;
 
   DISALLOW_IMPLICIT_CONSTRUCTORS(ScopeIterator);
 };
@@ -11888,7 +11897,7 @@
                                                    JavaScriptFrame* frame,
                                                    int inlined_frame_index) {
   HandleScope scope(isolate);
-  List<Handle<SerializedScopeInfo> > scope_chain;
+  List<Handle<ScopeInfo> > scope_chain;
   List<Handle<Context> > context_chain;
 
   ScopeIterator it(isolate, frame, inlined_frame_index);
@@ -11904,7 +11913,7 @@
 
   // Iteratively copy and or materialize the nested contexts.
   while (!scope_chain.is_empty()) {
-    Handle<SerializedScopeInfo> scope_info = scope_chain.RemoveLast();
+    Handle<ScopeInfo> scope_info = scope_chain.RemoveLast();
     Handle<Context> current = context_chain.RemoveLast();
     ASSERT(!(scope_info->HasContext() & current.is_null()));
 
@@ -11950,23 +11959,24 @@
                                          JavaScriptFrame* frame,
                                          int inlined_frame_index,
                                          Handle<JSFunction> function,
-                                         Handle<SerializedScopeInfo> scope_info,
-                                         const ScopeInfo<>* sinfo,
+                                         Handle<ScopeInfo> scope_info,
                                          Handle<Context> function_context) {
   // Try to find the value of 'arguments' to pass as parameter. If it is not
   // found (that is the debugged function does not reference 'arguments' and
   // does not support eval) then create an 'arguments' object.
   int index;
-  if (sinfo->number_of_stack_slots() > 0) {
+  if (scope_info->StackLocalCount() > 0) {
     index = scope_info->StackSlotIndex(isolate->heap()->arguments_symbol());
     if (index != -1) {
       return Handle<Object>(frame->GetExpression(index), isolate);
     }
   }
 
-  if (sinfo->number_of_context_slots() > Context::MIN_CONTEXT_SLOTS) {
-    index = scope_info->ContextSlotIndex(isolate->heap()->arguments_symbol(),
-                                         NULL);
+  if (scope_info->HasHeapAllocatedLocals()) {
+    VariableMode mode;
+    InitializationFlag init_flag;
+    index = scope_info->ContextSlotIndex(
+        isolate->heap()->arguments_symbol(), &mode, &init_flag);
     if (index != -1) {
       return Handle<Object>(function_context->get(index), isolate);
     }
@@ -12031,16 +12041,12 @@
   JavaScriptFrameIterator it(isolate, id);
   JavaScriptFrame* frame = it.frame();
   Handle<JSFunction> function(JSFunction::cast(frame->function()));
-  Handle<SerializedScopeInfo> scope_info(function->shared()->scope_info());
-  ScopeInfo<> sinfo(*scope_info);
+  Handle<ScopeInfo> scope_info(function->shared()->scope_info());
 
   // Traverse the saved contexts chain to find the active context for the
   // selected frame.
-  SaveContext* save = isolate->save_context();
-  while (save != NULL && !save->below(frame)) {
-    save = save->prev();
-  }
-  ASSERT(save != NULL);
+  SaveContext* save = FindSavedContextForFrame(isolate, frame);
+
   SaveContext savex(isolate);
   isolate->set_context(*(save->context()));
 
@@ -12055,9 +12061,9 @@
                                       isolate->factory()->undefined_value());
   go_between->set_context(function->context());
 #ifdef DEBUG
-  ScopeInfo<> go_between_sinfo(go_between->shared()->scope_info());
-  ASSERT(go_between_sinfo.number_of_parameters() == 0);
-  ASSERT(go_between_sinfo.number_of_context_slots() == 0);
+  Handle<ScopeInfo> go_between_scope_info(go_between->shared()->scope_info());
+  ASSERT(go_between_scope_info->ParameterCount() == 0);
+  ASSERT(go_between_scope_info->ContextLocalCount() == 0);
 #endif
 
   // Materialize the content of the local scope into a JSObject.
@@ -12075,7 +12081,7 @@
   Handle<Context> frame_context(Context::cast(frame->context()));
   Handle<Context> function_context;
   // Get the function's context if it has one.
-  if (scope_info->HasHeapAllocatedLocals()) {
+  if (scope_info->HasContext()) {
     function_context = Handle<Context>(frame_context->declaration_context());
   }
   context = CopyNestedScopeContextChain(isolate,
@@ -12120,9 +12126,11 @@
   if (has_pending_exception) return Failure::Exception();
 
   Handle<Object> arguments = GetArgumentsObject(isolate,
-                                                frame, inlined_frame_index,
-                                                function, scope_info,
-                                                &sinfo, function_context);
+                                                frame,
+                                                inlined_frame_index,
+                                                function,
+                                                scope_info,
+                                                function_context);
 
   // Invoke the evaluation function and return the result.
   Handle<Object> argv[] = { arguments, source };
diff --git a/src/runtime.h b/src/runtime.h
index 67fc628..c411b30 100644
--- a/src/runtime.h
+++ b/src/runtime.h
@@ -98,6 +98,7 @@
   F(SetNewFunctionAttributes, 1, 1) \
   F(AllocateInNewSpace, 1, 1) \
   F(SetNativeFlag, 1, 1) \
+  F(StoreArrayLiteralElement, 5, 1) \
   \
   /* Array join support */ \
   F(PushIfAbsent, 2, 1) \
@@ -246,7 +247,7 @@
   F(DateLocalTimezone, 1, 1) \
   F(DateLocalTimeOffset, 0, 1) \
   F(DateDaylightSavingsOffset, 1, 1) \
-  F(DateMakeDay, 3, 1) \
+  F(DateMakeDay, 2, 1) \
   F(DateYMDFromTime, 2, 1) \
   \
   /* Numbers */ \
@@ -258,7 +259,6 @@
   /* Eval */ \
   F(GlobalReceiver, 1, 1) \
   F(ResolvePossiblyDirectEval, 4, 2) \
-  F(ResolvePossiblyDirectEvalNoLookup, 4, 2) \
   \
   F(SetProperty, -1 /* 4 or 5 */, 1) \
   F(DefineOrRedefineDataProperty, 4, 1) \
diff --git a/src/scanner.cc b/src/scanner.cc
index 95748f2..01fe81c 100644
--- a/src/scanner.cc
+++ b/src/scanner.cc
@@ -36,29 +36,25 @@
 namespace internal {
 
 // ----------------------------------------------------------------------------
-// Scanner::LiteralScope
-
-Scanner::LiteralScope::LiteralScope(Scanner* self)
-    : scanner_(self), complete_(false) {
-  self->StartLiteral();
-}
-
-
-Scanner::LiteralScope::~LiteralScope() {
-  if (!complete_) scanner_->DropLiteral();
-}
-
-
-void Scanner::LiteralScope::Complete() {
-  scanner_->TerminateLiteral();
-  complete_ = true;
-}
-
-// ----------------------------------------------------------------------------
 // Scanner
 
 Scanner::Scanner(UnicodeCache* unicode_cache)
-    : unicode_cache_(unicode_cache) { }
+    : unicode_cache_(unicode_cache),
+      octal_pos_(Location::invalid()),
+      harmony_scoping_(false) { }
+
+
+void Scanner::Initialize(UC16CharacterStream* source) {
+  source_ = source;
+  // Need to capture identifiers in order to recognize "get" and "set"
+  // in object literals.
+  Init();
+  // Skip initial whitespace allowing HTML comment ends just like
+  // after a newline and scan first token.
+  has_line_terminator_before_next_ = true;
+  SkipWhiteSpace();
+  Scan();
+}
 
 
 uc32 Scanner::ScanHexNumber(int expected_length) {
@@ -88,29 +84,6 @@
 }
 
 
-
-// ----------------------------------------------------------------------------
-// JavaScriptScanner
-
-JavaScriptScanner::JavaScriptScanner(UnicodeCache* scanner_contants)
-    : Scanner(scanner_contants),
-      octal_pos_(Location::invalid()),
-      harmony_scoping_(false) { }
-
-
-void JavaScriptScanner::Initialize(UC16CharacterStream* source) {
-  source_ = source;
-  // Need to capture identifiers in order to recognize "get" and "set"
-  // in object literals.
-  Init();
-  // Skip initial whitespace allowing HTML comment ends just like
-  // after a newline and scan first token.
-  has_line_terminator_before_next_ = true;
-  SkipWhiteSpace();
-  Scan();
-}
-
-
 // Ensure that tokens can be stored in a byte.
 STATIC_ASSERT(Token::NUM_TOKENS <= 0x100);
 
@@ -247,7 +220,7 @@
 };
 
 
-Token::Value JavaScriptScanner::Next() {
+Token::Value Scanner::Next() {
   current_ = next_;
   has_line_terminator_before_next_ = false;
   has_multiline_comment_before_next_ = false;
@@ -279,7 +252,7 @@
 }
 
 
-bool JavaScriptScanner::SkipWhiteSpace() {
+bool Scanner::SkipWhiteSpace() {
   int start_position = source_pos();
 
   while (true) {
@@ -319,7 +292,7 @@
 }
 
 
-Token::Value JavaScriptScanner::SkipSingleLineComment() {
+Token::Value Scanner::SkipSingleLineComment() {
   Advance();
 
   // The line terminator at the end of the line is not considered
@@ -335,7 +308,7 @@
 }
 
 
-Token::Value JavaScriptScanner::SkipMultiLineComment() {
+Token::Value Scanner::SkipMultiLineComment() {
   ASSERT(c0_ == '*');
   Advance();
 
@@ -361,7 +334,7 @@
 }
 
 
-Token::Value JavaScriptScanner::ScanHtmlComment() {
+Token::Value Scanner::ScanHtmlComment() {
   // Check for <!-- comments.
   ASSERT(c0_ == '!');
   Advance();
@@ -376,7 +349,7 @@
 }
 
 
-void JavaScriptScanner::Scan() {
+void Scanner::Scan() {
   next_.literal_chars = NULL;
   Token::Value token;
   do {
@@ -616,7 +589,7 @@
 }
 
 
-void JavaScriptScanner::SeekForward(int pos) {
+void Scanner::SeekForward(int pos) {
   // After this call, we will have the token at the given position as
   // the "next" token. The "current" token will be invalid.
   if (pos == next_.location.beg_pos) return;
@@ -637,7 +610,7 @@
 }
 
 
-void JavaScriptScanner::ScanEscape() {
+void Scanner::ScanEscape() {
   uc32 c = c0_;
   Advance();
 
@@ -689,7 +662,7 @@
 
 // Octal escapes of the forms '\0xx' and '\xxx' are not a part of
 // ECMA-262. Other JS VMs support them.
-uc32 JavaScriptScanner::ScanOctalEscape(uc32 c, int length) {
+uc32 Scanner::ScanOctalEscape(uc32 c, int length) {
   uc32 x = c - '0';
   int i = 0;
   for (; i < length; i++) {
@@ -712,7 +685,7 @@
 }
 
 
-Token::Value JavaScriptScanner::ScanString() {
+Token::Value Scanner::ScanString() {
   uc32 quote = c0_;
   Advance();  // consume quote
 
@@ -736,13 +709,13 @@
 }
 
 
-void JavaScriptScanner::ScanDecimalDigits() {
+void Scanner::ScanDecimalDigits() {
   while (IsDecimalDigit(c0_))
     AddLiteralCharAdvance();
 }
 
 
-Token::Value JavaScriptScanner::ScanNumber(bool seen_period) {
+Token::Value Scanner::ScanNumber(bool seen_period) {
   ASSERT(IsDecimalDigit(c0_));  // the first digit of the number or the fraction
 
   enum { DECIMAL, HEX, OCTAL } kind = DECIMAL;
@@ -827,7 +800,7 @@
 }
 
 
-uc32 JavaScriptScanner::ScanIdentifierUnicodeEscape() {
+uc32 Scanner::ScanIdentifierUnicodeEscape() {
   Advance();
   if (c0_ != 'u') return -1;
   Advance();
@@ -944,7 +917,7 @@
 }
 
 
-Token::Value JavaScriptScanner::ScanIdentifierOrKeyword() {
+Token::Value Scanner::ScanIdentifierOrKeyword() {
   ASSERT(unicode_cache_->IsIdentifierStart(c0_));
   LiteralScope literal(this);
   // Scan identifier start character.
@@ -989,7 +962,7 @@
 }
 
 
-Token::Value JavaScriptScanner::ScanIdentifierSuffix(LiteralScope* literal) {
+Token::Value Scanner::ScanIdentifierSuffix(LiteralScope* literal) {
   // Scan the rest of the identifier characters.
   while (unicode_cache_->IsIdentifierPart(c0_)) {
     if (c0_ == '\\') {
@@ -1012,7 +985,7 @@
 }
 
 
-bool JavaScriptScanner::ScanRegExpPattern(bool seen_equal) {
+bool Scanner::ScanRegExpPattern(bool seen_equal) {
   // Scan: ('/' | '/=') RegularExpressionBody '/' RegularExpressionFlags
   bool in_character_class = false;
 
@@ -1059,7 +1032,7 @@
 }
 
 
-bool JavaScriptScanner::ScanLiteralUnicodeEscape() {
+bool Scanner::ScanLiteralUnicodeEscape() {
   ASSERT(c0_ == '\\');
   uc32 chars_read[6] = {'\\', 'u', 0, 0, 0, 0};
   Advance();
@@ -1089,7 +1062,7 @@
 }
 
 
-bool JavaScriptScanner::ScanRegExpFlags() {
+bool Scanner::ScanRegExpFlags() {
   // Scan regular expression flags.
   LiteralScope literal(this);
   while (unicode_cache_->IsIdentifierPart(c0_)) {
diff --git a/src/scanner.h b/src/scanner.h
index a2e64a9..88e3bce 100644
--- a/src/scanner.h
+++ b/src/scanner.h
@@ -169,7 +169,7 @@
     }
   }
 
-  inline void AddChar(uc16 character) {
+  INLINE(void AddChar(uc16 character)) {
     if (position_ >= backing_store_.length()) ExpandBuffer();
     if (is_ascii_) {
       if (character < kMaxAsciiCharCodeU) {
@@ -260,35 +260,32 @@
 
 
 // ----------------------------------------------------------------------------
-// Scanner base-class.
+// JavaScript Scanner.
 
-// Generic functionality used by both JSON and JavaScript scanners.
 class Scanner {
  public:
-  // -1 is outside of the range of any real source code.
-  static const int kNoOctalLocation = -1;
-
-  typedef unibrow::Utf8InputBuffer<1024> Utf8Decoder;
-
+  // Scoped helper for literal recording. Automatically drops the literal
+  // if aborting the scanning before it's complete.
   class LiteralScope {
    public:
-    explicit LiteralScope(Scanner* self);
-    ~LiteralScope();
-    void Complete();
+    explicit LiteralScope(Scanner* self)
+        : scanner_(self), complete_(false) {
+      scanner_->StartLiteral();
+    }
+     ~LiteralScope() {
+       if (!complete_) scanner_->DropLiteral();
+     }
+    void Complete() {
+      scanner_->TerminateLiteral();
+      complete_ = true;
+    }
 
    private:
     Scanner* scanner_;
     bool complete_;
   };
 
-  explicit Scanner(UnicodeCache* scanner_contants);
-
-  // Returns the current token again.
-  Token::Value current_token() { return current_.token; }
-
-  // One token look-ahead (past the token returned by Next()).
-  Token::Value peek() const { return next_.token; }
-
+  // Representation of an interval of source positions.
   struct Location {
     Location(int b, int e) : beg_pos(b), end_pos(e) { }
     Location() : beg_pos(0), end_pos(0) { }
@@ -303,21 +300,28 @@
     int end_pos;
   };
 
-  // Returns the location information for the current token
-  // (the token returned by Next()).
-  Location location() const { return current_.location; }
-  Location peek_location() const { return next_.location; }
+  // -1 is outside of the range of any real source code.
+  static const int kNoOctalLocation = -1;
 
+  typedef unibrow::Utf8InputBuffer<1024> Utf8Decoder;
+
+  explicit Scanner(UnicodeCache* scanner_contants);
+
+  void Initialize(UC16CharacterStream* source);
+
+  // Returns the next token and advances input.
+  Token::Value Next();
+  // Returns the current token again.
+  Token::Value current_token() { return current_.token; }
+  // Returns the location information for the current token
+  // (the token last returned by Next()).
+  Location location() const { return current_.location; }
   // Returns the literal string, if any, for the current token (the
-  // token returned by Next()). The string is 0-terminated and in
-  // UTF-8 format; they may contain 0-characters. Literal strings are
-  // collected for identifiers, strings, and numbers.
+  // token last returned by Next()). The string is 0-terminated.
+  // Literal strings are collected for identifiers, strings, and
+  // numbers.
   // These functions only give the correct result if the literal
   // was scanned between calls to StartLiteral() and TerminateLiteral().
-  bool is_literal_ascii() {
-    ASSERT_NOT_NULL(current_.literal_chars);
-    return current_.literal_chars->is_ascii();
-  }
   Vector<const char> literal_ascii_string() {
     ASSERT_NOT_NULL(current_.literal_chars);
     return current_.literal_chars->ascii_literal();
@@ -326,6 +330,10 @@
     ASSERT_NOT_NULL(current_.literal_chars);
     return current_.literal_chars->uc16_literal();
   }
+  bool is_literal_ascii() {
+    ASSERT_NOT_NULL(current_.literal_chars);
+    return current_.literal_chars->is_ascii();
+  }
   int literal_length() const {
     ASSERT_NOT_NULL(current_.literal_chars);
     return current_.literal_chars->length();
@@ -341,12 +349,15 @@
     return current_.literal_chars->length() != source_length;
   }
 
+  // Similar functions for the upcoming token.
+
+  // One token look-ahead (past the token returned by Next()).
+  Token::Value peek() const { return next_.token; }
+
+  Location peek_location() const { return next_.location; }
+
   // Returns the literal string for the next token (the token that
   // would be returned if Next() were called).
-  bool is_next_literal_ascii() {
-    ASSERT_NOT_NULL(next_.literal_chars);
-    return next_.literal_chars->is_ascii();
-  }
   Vector<const char> next_literal_ascii_string() {
     ASSERT_NOT_NULL(next_.literal_chars);
     return next_.literal_chars->ascii_literal();
@@ -355,6 +366,10 @@
     ASSERT_NOT_NULL(next_.literal_chars);
     return next_.literal_chars->uc16_literal();
   }
+  bool is_next_literal_ascii() {
+    ASSERT_NOT_NULL(next_.literal_chars);
+    return next_.literal_chars->is_ascii();
+  }
   int next_literal_length() const {
     ASSERT_NOT_NULL(next_.literal_chars);
     return next_.literal_chars->length();
@@ -364,7 +379,46 @@
 
   static const int kCharacterLookaheadBufferSize = 1;
 
- protected:
+  // Scans octal escape sequence. Also accepts "\0" decimal escape sequence.
+  uc32 ScanOctalEscape(uc32 c, int length);
+
+  // Returns the location of the last seen octal literal.
+  Location octal_position() const { return octal_pos_; }
+  void clear_octal_position() { octal_pos_ = Location::invalid(); }
+
+  // Seek forward to the given position.  This operation does not
+  // work in general, for instance when there are pushed back
+  // characters, but works for seeking forward until simple delimiter
+  // tokens, which is what it is used for.
+  void SeekForward(int pos);
+
+  bool HarmonyScoping() const {
+    return harmony_scoping_;
+  }
+  void SetHarmonyScoping(bool block_scoping) {
+    harmony_scoping_ = block_scoping;
+  }
+
+
+  // Returns true if there was a line terminator before the peek'ed token,
+  // possibly inside a multi-line comment.
+  bool HasAnyLineTerminatorBeforeNext() const {
+    return has_line_terminator_before_next_ ||
+           has_multiline_comment_before_next_;
+  }
+
+  // Scans the input as a regular expression pattern, previous
+  // character(s) must be /(=). Returns true if a pattern is scanned.
+  bool ScanRegExpPattern(bool seen_equal);
+  // Returns true if regexp flags are scanned (always since flags can
+  // be empty).
+  bool ScanRegExpFlags();
+
+  // Tells whether the buffer contains an identifier (no escapes).
+  // Used for checking if a property name is an identifier.
+  static bool IsIdentifier(unibrow::CharacterStream* buffer);
+
+ private:
   // The current and look-ahead token.
   struct TokenDesc {
     Token::Value token;
@@ -389,7 +443,7 @@
     next_.literal_chars = free_buffer;
   }
 
-  inline void AddLiteralChar(uc32 c) {
+  INLINE(void AddLiteralChar(uc32 c)) {
     ASSERT_NOT_NULL(next_.literal_chars);
     next_.literal_chars->AddChar(c);
   }
@@ -434,6 +488,31 @@
 
   uc32 ScanHexNumber(int expected_length);
 
+  // Scans a single JavaScript token.
+  void Scan();
+
+  bool SkipWhiteSpace();
+  Token::Value SkipSingleLineComment();
+  Token::Value SkipMultiLineComment();
+  // Scans a possible HTML comment -- begins with '<!'.
+  Token::Value ScanHtmlComment();
+
+  void ScanDecimalDigits();
+  Token::Value ScanNumber(bool seen_period);
+  Token::Value ScanIdentifierOrKeyword();
+  Token::Value ScanIdentifierSuffix(LiteralScope* literal);
+
+  void ScanEscape();
+  Token::Value ScanString();
+
+  // Decodes a unicode escape-sequence which is part of an identifier.
+  // If the escape sequence cannot be decoded the result is kBadChar.
+  uc32 ScanIdentifierUnicodeEscape();
+  // Recognizes a uniocde escape-sequence and adds its characters,
+  // uninterpreted, to the current literal. Used for parsing RegExp
+  // flags.
+  bool ScanLiteralUnicodeEscape();
+
   // Return the current source position.
   int source_pos() {
     return source_->pos() - kCharacterLookaheadBufferSize;
@@ -451,113 +530,13 @@
   // Input stream. Must be initialized to an UC16CharacterStream.
   UC16CharacterStream* source_;
 
-  // One Unicode character look-ahead; c0_ < 0 at the end of the input.
-  uc32 c0_;
-};
-
-// ----------------------------------------------------------------------------
-// JavaScriptScanner - base logic for JavaScript scanning.
-
-class JavaScriptScanner : public Scanner {
- public:
-  // A LiteralScope that disables recording of some types of JavaScript
-  // literals. If the scanner is configured to not record the specific
-  // type of literal, the scope will not call StartLiteral.
-  class LiteralScope {
-   public:
-    explicit LiteralScope(JavaScriptScanner* self)
-        : scanner_(self), complete_(false) {
-      scanner_->StartLiteral();
-    }
-     ~LiteralScope() {
-       if (!complete_) scanner_->DropLiteral();
-     }
-    void Complete() {
-      scanner_->TerminateLiteral();
-      complete_ = true;
-    }
-
-   private:
-    JavaScriptScanner* scanner_;
-    bool complete_;
-  };
-
-  explicit JavaScriptScanner(UnicodeCache* scanner_contants);
-
-  void Initialize(UC16CharacterStream* source);
-
-  // Returns the next token.
-  Token::Value Next();
-
-  // Returns true if there was a line terminator before the peek'ed token,
-  // possibly inside a multi-line comment.
-  bool HasAnyLineTerminatorBeforeNext() const {
-    return has_line_terminator_before_next_ ||
-           has_multiline_comment_before_next_;
-  }
-
-  // Scans the input as a regular expression pattern, previous
-  // character(s) must be /(=). Returns true if a pattern is scanned.
-  bool ScanRegExpPattern(bool seen_equal);
-  // Returns true if regexp flags are scanned (always since flags can
-  // be empty).
-  bool ScanRegExpFlags();
-
-  // Tells whether the buffer contains an identifier (no escapes).
-  // Used for checking if a property name is an identifier.
-  static bool IsIdentifier(unibrow::CharacterStream* buffer);
-
-  // Scans octal escape sequence. Also accepts "\0" decimal escape sequence.
-  uc32 ScanOctalEscape(uc32 c, int length);
-
-  // Returns the location of the last seen octal literal
-  Location octal_position() const { return octal_pos_; }
-  void clear_octal_position() { octal_pos_ = Location::invalid(); }
-
-  // Seek forward to the given position.  This operation does not
-  // work in general, for instance when there are pushed back
-  // characters, but works for seeking forward until simple delimiter
-  // tokens, which is what it is used for.
-  void SeekForward(int pos);
-
-  bool HarmonyScoping() const {
-    return harmony_scoping_;
-  }
-  void SetHarmonyScoping(bool block_scoping) {
-    harmony_scoping_ = block_scoping;
-  }
-
-
- protected:
-  bool SkipWhiteSpace();
-  Token::Value SkipSingleLineComment();
-  Token::Value SkipMultiLineComment();
-
-  // Scans a single JavaScript token.
-  void Scan();
-
-  void ScanDecimalDigits();
-  Token::Value ScanNumber(bool seen_period);
-  Token::Value ScanIdentifierOrKeyword();
-  Token::Value ScanIdentifierSuffix(LiteralScope* literal);
-
-  void ScanEscape();
-  Token::Value ScanString();
-
-  // Scans a possible HTML comment -- begins with '<!'.
-  Token::Value ScanHtmlComment();
-
-  // Decodes a unicode escape-sequence which is part of an identifier.
-  // If the escape sequence cannot be decoded the result is kBadChar.
-  uc32 ScanIdentifierUnicodeEscape();
-  // Recognizes a uniocde escape-sequence and adds its characters,
-  // uninterpreted, to the current literal. Used for parsing RegExp
-  // flags.
-  bool ScanLiteralUnicodeEscape();
 
   // Start position of the octal literal last scanned.
   Location octal_pos_;
 
+  // One Unicode character look-ahead; c0_ < 0 at the end of the input.
+  uc32 c0_;
+
   // Whether there is a line terminator whitespace character after
   // the current token, and  before the next. Does not count newlines
   // inside multiline comments.
diff --git a/src/scopeinfo.cc b/src/scopeinfo.cc
index 8ea5f1e..6ea92ce 100644
--- a/src/scopeinfo.cc
+++ b/src/scopeinfo.cc
@@ -38,483 +38,297 @@
 namespace internal {
 
 
-static int CompareLocal(Variable* const* v, Variable* const* w) {
-  int x = (*v)->index();
-  int y = (*w)->index();
-  // Consider sorting them according to type as well?
-  return x - y;
-}
+Handle<ScopeInfo> ScopeInfo::Create(Scope* scope) {
+  // Collect stack and context locals.
+  ZoneList<Variable*> stack_locals(scope->StackLocalCount());
+  ZoneList<Variable*> context_locals(scope->ContextLocalCount());
+  scope->CollectStackAndContextLocals(&stack_locals, &context_locals);
+  const int stack_local_count = stack_locals.length();
+  const int context_local_count = context_locals.length();
+  // Make sure we allocate the correct amount.
+  ASSERT(scope->StackLocalCount() == stack_local_count);
+  ASSERT(scope->ContextLocalCount() == context_local_count);
 
+  // Determine use and location of the function variable if it is present.
+  FunctionVariableInfo function_name_info;
+  VariableMode function_variable_mode;
+  if (scope->is_function_scope() && scope->function() != NULL) {
+    Variable* var = scope->function()->var();
+    if (!var->is_used()) {
+      function_name_info = UNUSED;
+    } else if (var->IsContextSlot()) {
+      function_name_info = CONTEXT;
+    } else {
+      ASSERT(var->IsStackLocal());
+      function_name_info = STACK;
+    }
+    function_variable_mode = var->mode();
+  } else {
+    function_name_info = NONE;
+    function_variable_mode = VAR;
+  }
 
-template<class Allocator>
-ScopeInfo<Allocator>::ScopeInfo(Scope* scope)
-    : function_name_(FACTORY->empty_symbol()),
-      calls_eval_(scope->calls_eval()),
-      is_strict_mode_(scope->is_strict_mode()),
-      type_(scope->type()),
-      parameters_(scope->num_parameters()),
-      stack_slots_(scope->num_stack_slots()),
-      context_slots_(scope->num_heap_slots()),
-      context_modes_(scope->num_heap_slots()) {
+  const bool has_function_name = function_name_info != NONE;
+  const int parameter_count = scope->num_parameters();
+  const int length = kVariablePartIndex
+      + parameter_count + stack_local_count + 2 * context_local_count
+      + (has_function_name ? 2 : 0);
+
+  Handle<ScopeInfo> scope_info = FACTORY->NewScopeInfo(length);
+
+  // Encode the flags.
+  int flags = TypeField::encode(scope->type()) |
+      CallsEvalField::encode(scope->calls_eval()) |
+      StrictModeField::encode(scope->is_strict_mode()) |
+      FunctionVariableField::encode(function_name_info) |
+      FunctionVariableMode::encode(function_variable_mode);
+  scope_info->SetFlags(flags);
+  scope_info->SetParameterCount(parameter_count);
+  scope_info->SetStackLocalCount(stack_local_count);
+  scope_info->SetContextLocalCount(context_local_count);
+
+  int index = kVariablePartIndex;
   // Add parameters.
-  for (int i = 0; i < scope->num_parameters(); i++) {
-    ASSERT(parameters_.length() == i);
-    parameters_.Add(scope->parameter(i)->name());
+  ASSERT(index == scope_info->ParameterEntriesIndex());
+  for (int i = 0; i < parameter_count; ++i) {
+    scope_info->set(index++, *scope->parameter(i)->name());
   }
 
-  // Add stack locals and collect heap locals.
-  // We are assuming that the locals' slots are allocated in
-  // increasing order, so we can simply add them to the
-  // ScopeInfo lists. However, due to usage analysis, this is
-  // not true for context-allocated locals: Some of them
-  // may be parameters which are allocated before the
-  // non-parameter locals. When the non-parameter locals are
-  // sorted according to usage, the allocated slot indices may
-  // not be in increasing order with the variable list anymore.
-  // Thus, we first collect the context-allocated locals, and then
-  // sort them by context slot index before adding them to the
-  // ScopeInfo list.
-  List<Variable*, Allocator> locals(32);  // 32 is a wild guess
-  ASSERT(locals.is_empty());
-  scope->CollectUsedVariables(&locals);
-  locals.Sort(&CompareLocal);
-
-  List<Variable*, Allocator> heap_locals(locals.length());
-  for (int i = 0; i < locals.length(); i++) {
-    Variable* var = locals[i];
-    if (var->is_used()) {
-      switch (var->location()) {
-        case Variable::UNALLOCATED:
-        case Variable::PARAMETER:
-          break;
-
-        case Variable::LOCAL:
-          ASSERT(stack_slots_.length() == var->index());
-          stack_slots_.Add(var->name());
-          break;
-
-        case Variable::CONTEXT:
-          heap_locals.Add(var);
-          break;
-
-        case Variable::LOOKUP:
-          // We don't expect lookup variables in the locals list.
-          UNREACHABLE();
-          break;
-      }
-    }
+  // Add stack locals' names. We are assuming that the stack locals'
+  // slots are allocated in increasing order, so we can simply add
+  // them to the ScopeInfo object.
+  ASSERT(index == scope_info->StackLocalEntriesIndex());
+  for (int i = 0; i < stack_local_count; ++i) {
+    ASSERT(stack_locals[i]->index() == i);
+    scope_info->set(index++, *stack_locals[i]->name());
   }
 
-  // Add heap locals.
-  if (scope->num_heap_slots() > 0) {
-    // Add user-defined slots.
-    for (int i = 0; i < heap_locals.length(); i++) {
-      ASSERT(heap_locals[i]->index() - Context::MIN_CONTEXT_SLOTS ==
-             context_slots_.length());
-      ASSERT(heap_locals[i]->index() - Context::MIN_CONTEXT_SLOTS ==
-             context_modes_.length());
-      context_slots_.Add(heap_locals[i]->name());
-      context_modes_.Add(heap_locals[i]->mode());
-    }
+  // Due to usage analysis, context-allocated locals are not necessarily in
+  // increasing order: Some of them may be parameters which are allocated before
+  // the non-parameter locals. When the non-parameter locals are sorted
+  // according to usage, the allocated slot indices may not be in increasing
+  // order with the variable list anymore. Thus, we first need to sort them by
+  // context slot index before adding them to the ScopeInfo object.
+  context_locals.Sort(&Variable::CompareIndex);
 
-  } else {
-    ASSERT(heap_locals.length() == 0);
+  // Add context locals' names.
+  ASSERT(index == scope_info->ContextLocalNameEntriesIndex());
+  for (int i = 0; i < context_local_count; ++i) {
+    scope_info->set(index++, *context_locals[i]->name());
   }
 
-  // Add the function context slot, if present.
-  // For now, this must happen at the very end because of the
-  // ordering of the scope info slots and the respective slot indices.
-  if (scope->is_function_scope()) {
-    VariableProxy* proxy = scope->function();
-    if (proxy != NULL &&
-        proxy->var()->is_used() &&
-        proxy->var()->IsContextSlot()) {
-      function_name_ = proxy->name();
-      // Note that we must not find the function name in the context slot
-      // list - instead it must be handled separately in the
-      // Contexts::Lookup() function. Thus record an empty symbol here so we
-      // get the correct number of context slots.
-      ASSERT(proxy->var()->index() - Context::MIN_CONTEXT_SLOTS ==
-             context_slots_.length());
-      ASSERT(proxy->var()->index() - Context::MIN_CONTEXT_SLOTS ==
-             context_modes_.length());
-      context_slots_.Add(FACTORY->empty_symbol());
-      context_modes_.Add(proxy->var()->mode());
-    }
+  // Add context locals' info.
+  ASSERT(index == scope_info->ContextLocalInfoEntriesIndex());
+  for (int i = 0; i < context_local_count; ++i) {
+    Variable* var = context_locals[i];
+    uint32_t value = ContextLocalMode::encode(var->mode()) |
+        ContextLocalInitFlag::encode(var->initialization_flag());
+    scope_info->set(index++, Smi::FromInt(value));
   }
-}
 
-
-// Encoding format in a FixedArray object:
-//
-// - function name
-//
-// - calls eval boolean flag
-//
-// - is strict mode scope
-//
-// - scope type
-//
-// - number of variables in the context object (smi) (= function context
-//   slot index + 1)
-// - list of pairs (name, Var mode) of context-allocated variables (starting
-//   with context slot 0)
-//
-// - number of parameters (smi)
-// - list of parameter names (starting with parameter 0 first)
-//
-// - number of variables on the stack (smi)
-// - list of names of stack-allocated variables (starting with stack slot 0)
-
-// The ScopeInfo representation could be simplified and the ScopeInfo
-// re-implemented (with almost the same interface). Here is a
-// suggestion for the new format:
-//
-// - have a single list with all variable names (parameters, stack locals,
-//   context locals), followed by a list of non-Object* values containing
-//   the variables information (what kind, index, attributes)
-// - searching the linear list of names is fast and yields an index into the
-//   list if the variable name is found
-// - that list index is then used to find the variable information in the
-//   subsequent list
-// - the list entries don't have to be in any particular order, so all the
-//   current sorting business can go away
-// - the ScopeInfo lookup routines can be reduced to perhaps a single lookup
-//   which returns all information at once
-// - when gathering the information from a Scope, we only need to iterate
-//   through the local variables (parameters and context info is already
-//   present)
-
-
-template <class T>
-static inline Object** ReadInt(Object** p, T* x) {
-  *x = static_cast<T>((reinterpret_cast<Smi*>(*p++))->value());
-  return p;
-}
-
-
-static inline Object** ReadBool(Object** p, bool* x) {
-  *x = (reinterpret_cast<Smi*>(*p++))->value() != 0;
-  return p;
-}
-
-
-template <class T>
-static inline Object** ReadObject(Object** p, Handle<T>* s) {
-  *s = Handle<T>::cast(Handle<Object>(*p++));
-  return p;
-}
-
-
-template <class Allocator, class T>
-static Object** ReadList(Object** p, List<Handle<T>, Allocator >* list) {
-  ASSERT(list->is_empty());
-  int n;
-  p = ReadInt(p, &n);
-  while (n-- > 0) {
-    Handle<T> s;
-    p = ReadObject(p, &s);
-    list->Add(s);
+  // If present, add the function variable name and its index.
+  ASSERT(index == scope_info->FunctionNameEntryIndex());
+  if (has_function_name) {
+    int var_index = scope->function()->var()->index();
+    scope_info->set(index++, *scope->function()->name());
+    scope_info->set(index++, Smi::FromInt(var_index));
+    ASSERT(function_name_info != STACK ||
+           (var_index == scope_info->StackLocalCount() &&
+            var_index == scope_info->StackSlotCount() - 1));
+    ASSERT(function_name_info != CONTEXT ||
+           var_index == scope_info->ContextLength() - 1);
   }
-  return p;
+
+  ASSERT(index == scope_info->length());
+  ASSERT(scope->num_parameters() == scope_info->ParameterCount());
+  ASSERT(scope->num_stack_slots() == scope_info->StackSlotCount());
+  ASSERT(scope->num_heap_slots() == scope_info->ContextLength());
+  return scope_info;
 }
 
 
-template <class Allocator>
-static Object** ReadList(Object** p,
-                         List<Handle<String>, Allocator>* list,
-                         List<VariableMode, Allocator>* modes) {
-  ASSERT(list->is_empty());
-  int n;
-  p = ReadInt(p, &n);
-  while (n-- > 0) {
-    Handle<String> s;
-    int m;
-    p = ReadObject(p, &s);
-    p = ReadInt(p, &m);
-    list->Add(s);
-    modes->Add(static_cast<VariableMode>(m));
-  }
-  return p;
+ScopeInfo* ScopeInfo::Empty() {
+  return reinterpret_cast<ScopeInfo*>(HEAP->empty_fixed_array());
 }
 
 
-template<class Allocator>
-ScopeInfo<Allocator>::ScopeInfo(SerializedScopeInfo* data)
-  : function_name_(FACTORY->empty_symbol()),
-    parameters_(4),
-    stack_slots_(8),
-    context_slots_(8),
-    context_modes_(8) {
-  if (data->length() > 0) {
-    Object** p0 = data->data_start();
-    Object** p = p0;
-    p = ReadObject(p, &function_name_);
-    p = ReadBool(p, &calls_eval_);
-    p = ReadBool(p, &is_strict_mode_);
-    p = ReadInt(p, &type_);
-    p = ReadList<Allocator>(p, &context_slots_, &context_modes_);
-    p = ReadList<Allocator>(p, &parameters_);
-    p = ReadList<Allocator>(p, &stack_slots_);
-    ASSERT((p - p0) == FixedArray::cast(data)->length());
-  }
-}
-
-
-static inline Object** WriteInt(Object** p, int x) {
-  *p++ = Smi::FromInt(x);
-  return p;
-}
-
-
-static inline Object** WriteBool(Object** p, bool b) {
-  *p++ = Smi::FromInt(b ? 1 : 0);
-  return p;
-}
-
-
-template <class T>
-static inline Object** WriteObject(Object** p, Handle<T> s) {
-  *p++ = *s;
-  return p;
-}
-
-
-template <class Allocator, class T>
-static Object** WriteList(Object** p, List<Handle<T>, Allocator >* list) {
-  const int n = list->length();
-  p = WriteInt(p, n);
-  for (int i = 0; i < n; i++) {
-    p = WriteObject(p, list->at(i));
-  }
-  return p;
-}
-
-
-template <class Allocator>
-static Object** WriteList(Object** p,
-                          List<Handle<String>, Allocator>* list,
-                          List<VariableMode, Allocator>* modes) {
-  const int n = list->length();
-  p = WriteInt(p, n);
-  for (int i = 0; i < n; i++) {
-    p = WriteObject(p, list->at(i));
-    p = WriteInt(p, modes->at(i));
-  }
-  return p;
-}
-
-
-template<class Allocator>
-Handle<SerializedScopeInfo> ScopeInfo<Allocator>::Serialize() {
-  // function name, calls eval, is_strict_mode, scope type,
-  // length for 3 tables:
-  const int extra_slots = 1 + 1 + 1 + 1 + 3;
-  int length = extra_slots +
-               context_slots_.length() * 2 +
-               parameters_.length() +
-               stack_slots_.length();
-
-  Handle<SerializedScopeInfo> data(
-      SerializedScopeInfo::cast(*FACTORY->NewSerializedScopeInfo(length)));
-  AssertNoAllocation nogc;
-
-  Object** p0 = data->data_start();
-  Object** p = p0;
-  p = WriteObject(p, function_name_);
-  p = WriteBool(p, calls_eval_);
-  p = WriteBool(p, is_strict_mode_);
-  p = WriteInt(p, type_);
-  p = WriteList(p, &context_slots_, &context_modes_);
-  p = WriteList(p, &parameters_);
-  p = WriteList(p, &stack_slots_);
-  ASSERT((p - p0) == length);
-
-  return data;
-}
-
-
-template<class Allocator>
-Handle<String> ScopeInfo<Allocator>::LocalName(int i) const {
-  // A local variable can be allocated either on the stack or in the context.
-  // For variables allocated in the context they are always preceded by
-  // Context::MIN_CONTEXT_SLOTS of fixed allocated slots in the context.
-  if (i < number_of_stack_slots()) {
-    return stack_slot_name(i);
-  } else {
-    return context_slot_name(i - number_of_stack_slots() +
-                             Context::MIN_CONTEXT_SLOTS);
-  }
-}
-
-
-template<class Allocator>
-int ScopeInfo<Allocator>::NumberOfLocals() const {
-  int number_of_locals = number_of_stack_slots();
-  if (number_of_context_slots() > 0) {
-    ASSERT(number_of_context_slots() >= Context::MIN_CONTEXT_SLOTS);
-    number_of_locals += number_of_context_slots() - Context::MIN_CONTEXT_SLOTS;
-  }
-  return number_of_locals;
-}
-
-
-Handle<SerializedScopeInfo> SerializedScopeInfo::Create(Scope* scope) {
-  ScopeInfo<ZoneListAllocationPolicy> sinfo(scope);
-  return sinfo.Serialize();
-}
-
-
-SerializedScopeInfo* SerializedScopeInfo::Empty() {
-  return reinterpret_cast<SerializedScopeInfo*>(HEAP->empty_fixed_array());
-}
-
-
-Object** SerializedScopeInfo::ContextEntriesAddr() {
+ScopeType ScopeInfo::Type() {
   ASSERT(length() > 0);
-  // +4 for function name, calls eval, strict mode, scope type.
-  return data_start() + 4;
+  return TypeField::decode(Flags());
 }
 
 
-Object** SerializedScopeInfo::ParameterEntriesAddr() {
-  ASSERT(length() > 0);
-  Object** p = ContextEntriesAddr();
-  int number_of_context_slots;
-  p = ReadInt(p, &number_of_context_slots);
-  return p + number_of_context_slots*2;  // *2 for pairs
+bool ScopeInfo::CallsEval() {
+  return length() > 0 && CallsEvalField::decode(Flags());
 }
 
 
-Object** SerializedScopeInfo::StackSlotEntriesAddr() {
-  ASSERT(length() > 0);
-  Object** p = ParameterEntriesAddr();
-  int number_of_parameter_slots;
-  p = ReadInt(p, &number_of_parameter_slots);
-  return p + number_of_parameter_slots;
+bool ScopeInfo::IsStrictMode() {
+  return length() > 0 && StrictModeField::decode(Flags());
 }
 
 
-bool SerializedScopeInfo::CallsEval() {
+int ScopeInfo::LocalCount() {
+  return StackLocalCount() + ContextLocalCount();
+}
+
+
+int ScopeInfo::StackSlotCount() {
   if (length() > 0) {
-    Object** p = data_start() + 1;  // +1 for function name.
-    bool calls_eval;
-    p = ReadBool(p, &calls_eval);
-    return calls_eval;
-  }
-  return false;
-}
-
-
-bool SerializedScopeInfo::IsStrictMode() {
-  if (length() > 0) {
-    Object** p = data_start() + 2;  // +2 for function name, calls eval.
-    bool strict_mode;
-    p = ReadBool(p, &strict_mode);
-    return strict_mode;
-  }
-  return false;
-}
-
-
-ScopeType SerializedScopeInfo::Type() {
-  ASSERT(length() > 0);
-  // +3 for function name, calls eval, strict mode.
-  Object** p = data_start() + 3;
-  ScopeType type;
-  p = ReadInt(p, &type);
-  return type;
-}
-
-
-int SerializedScopeInfo::NumberOfStackSlots() {
-  if (length() > 0) {
-    Object** p = StackSlotEntriesAddr();
-    int number_of_stack_slots;
-    ReadInt(p, &number_of_stack_slots);
-    return number_of_stack_slots;
+    bool function_name_stack_slot =
+        FunctionVariableField::decode(Flags()) == STACK;
+    return StackLocalCount() + (function_name_stack_slot ? 1 : 0);
   }
   return 0;
 }
 
 
-int SerializedScopeInfo::NumberOfContextSlots() {
+int ScopeInfo::ContextLength() {
   if (length() > 0) {
-    Object** p = ContextEntriesAddr();
-    int number_of_context_slots;
-    ReadInt(p, &number_of_context_slots);
-    return number_of_context_slots + Context::MIN_CONTEXT_SLOTS;
+    int context_locals = ContextLocalCount();
+    bool function_name_context_slot =
+        FunctionVariableField::decode(Flags()) == CONTEXT;
+    bool has_context = context_locals > 0 ||
+        function_name_context_slot ||
+        Type() == WITH_SCOPE ||
+        (Type() == FUNCTION_SCOPE && CallsEval());
+    if (has_context) {
+      return Context::MIN_CONTEXT_SLOTS + context_locals +
+          (function_name_context_slot ? 1 : 0);
+    }
   }
   return 0;
 }
 
 
-bool SerializedScopeInfo::HasHeapAllocatedLocals() {
+bool ScopeInfo::HasFunctionName() {
   if (length() > 0) {
-    Object** p = ContextEntriesAddr();
-    int number_of_context_slots;
-    ReadInt(p, &number_of_context_slots);
-    return number_of_context_slots > 0;
+    return NONE != FunctionVariableField::decode(Flags());
+  } else {
+    return false;
   }
-  return false;
 }
 
 
-bool SerializedScopeInfo::HasContext() {
-  return HasHeapAllocatedLocals() ||
-      Type() == WITH_SCOPE;
+bool ScopeInfo::HasHeapAllocatedLocals() {
+  if (length() > 0) {
+    return ContextLocalCount() > 0;
+  } else {
+    return false;
+  }
 }
 
 
-int SerializedScopeInfo::StackSlotIndex(String* name) {
+bool ScopeInfo::HasContext() {
+  if (length() > 0) {
+    return ContextLength() > 0;
+  } else {
+    return false;
+  }
+}
+
+
+String* ScopeInfo::FunctionName() {
+  ASSERT(HasFunctionName());
+  return String::cast(get(FunctionNameEntryIndex()));
+}
+
+
+String* ScopeInfo::ParameterName(int var) {
+  ASSERT(0 <= var && var < ParameterCount());
+  int info_index = ParameterEntriesIndex() + var;
+  return String::cast(get(info_index));
+}
+
+
+String* ScopeInfo::LocalName(int var) {
+  ASSERT(0 <= var && var < LocalCount());
+  ASSERT(StackLocalEntriesIndex() + StackLocalCount() ==
+         ContextLocalNameEntriesIndex());
+  int info_index = StackLocalEntriesIndex() + var;
+  return String::cast(get(info_index));
+}
+
+
+String* ScopeInfo::StackLocalName(int var) {
+  ASSERT(0 <= var && var < StackLocalCount());
+  int info_index = StackLocalEntriesIndex() + var;
+  return String::cast(get(info_index));
+}
+
+
+String* ScopeInfo::ContextLocalName(int var) {
+  ASSERT(0 <= var && var < ContextLocalCount());
+  int info_index = ContextLocalNameEntriesIndex() + var;
+  return String::cast(get(info_index));
+}
+
+
+VariableMode ScopeInfo::ContextLocalMode(int var) {
+  ASSERT(0 <= var && var < ContextLocalCount());
+  int info_index = ContextLocalInfoEntriesIndex() + var;
+  int value = Smi::cast(get(info_index))->value();
+  return ContextLocalMode::decode(value);
+}
+
+
+InitializationFlag ScopeInfo::ContextLocalInitFlag(int var) {
+  ASSERT(0 <= var && var < ContextLocalCount());
+  int info_index = ContextLocalInfoEntriesIndex() + var;
+  int value = Smi::cast(get(info_index))->value();
+  return ContextLocalInitFlag::decode(value);
+}
+
+
+int ScopeInfo::StackSlotIndex(String* name) {
   ASSERT(name->IsSymbol());
   if (length() > 0) {
-    // Slots start after length entry.
-    Object** p0 = StackSlotEntriesAddr();
-    int number_of_stack_slots;
-    p0 = ReadInt(p0, &number_of_stack_slots);
-    Object** p = p0;
-    Object** end = p0 + number_of_stack_slots;
-    while (p != end) {
-      if (*p == name) return static_cast<int>(p - p0);
-      p++;
+    int start = StackLocalEntriesIndex();
+    int end = StackLocalEntriesIndex() + StackLocalCount();
+    for (int i = start; i < end; ++i) {
+      if (name == get(i)) {
+        return i - start;
+      }
     }
   }
   return -1;
 }
 
-int SerializedScopeInfo::ContextSlotIndex(String* name, VariableMode* mode) {
+
+int ScopeInfo::ContextSlotIndex(String* name,
+                                VariableMode* mode,
+                                InitializationFlag* init_flag) {
   ASSERT(name->IsSymbol());
-  Isolate* isolate = GetIsolate();
-  int result = isolate->context_slot_cache()->Lookup(this, name, mode);
-  if (result != ContextSlotCache::kNotFound) return result;
+  ASSERT(mode != NULL);
+  ASSERT(init_flag != NULL);
   if (length() > 0) {
-    // Slots start after length entry.
-    Object** p0 = ContextEntriesAddr();
-    int number_of_context_slots;
-    p0 = ReadInt(p0, &number_of_context_slots);
-    Object** p = p0;
-    Object** end = p0 + number_of_context_slots * 2;
-    while (p != end) {
-      if (*p == name) {
-        ASSERT(((p - p0) & 1) == 0);
-        int v;
-        ReadInt(p + 1, &v);
-        VariableMode mode_value = static_cast<VariableMode>(v);
-        if (mode != NULL) *mode = mode_value;
-        result = static_cast<int>((p - p0) >> 1) + Context::MIN_CONTEXT_SLOTS;
-        isolate->context_slot_cache()->Update(this, name, mode_value, result);
+    ContextSlotCache* context_slot_cache = GetIsolate()->context_slot_cache();
+    int result = context_slot_cache->Lookup(this, name, mode, init_flag);
+    if (result != ContextSlotCache::kNotFound) {
+      ASSERT(result < ContextLength());
+      return result;
+    }
+
+    int start = ContextLocalNameEntriesIndex();
+    int end = ContextLocalNameEntriesIndex() + ContextLocalCount();
+    for (int i = start; i < end; ++i) {
+      if (name == get(i)) {
+        int var = i - start;
+        *mode = ContextLocalMode(var);
+        *init_flag = ContextLocalInitFlag(var);
+        result = Context::MIN_CONTEXT_SLOTS + var;
+        context_slot_cache->Update(this, name, *mode, *init_flag, result);
+        ASSERT(result < ContextLength());
         return result;
       }
-      p += 2;
     }
+    context_slot_cache->Update(this, name, INTERNAL, kNeedsInitialization, -1);
   }
-  isolate->context_slot_cache()->Update(this, name, INTERNAL, -1);
   return -1;
 }
 
 
-int SerializedScopeInfo::ParameterIndex(String* name) {
+int ScopeInfo::ParameterIndex(String* name) {
   ASSERT(name->IsSymbol());
   if (length() > 0) {
     // We must read parameters from the end since for
@@ -522,49 +336,58 @@
     // last declaration of that parameter is used
     // inside a function (and thus we need to look
     // at the last index). Was bug# 1110337.
-    //
-    // Eventually, we should only register such parameters
-    // once, with corresponding index. This requires a new
-    // implementation of the ScopeInfo code. See also other
-    // comments in this file regarding this.
-    Object** p = ParameterEntriesAddr();
-    int number_of_parameter_slots;
-    Object** p0 = ReadInt(p, &number_of_parameter_slots);
-    p = p0 + number_of_parameter_slots;
-    while (p > p0) {
-      p--;
-      if (*p == name) return static_cast<int>(p - p0);
+    int start = ParameterEntriesIndex();
+    int end = ParameterEntriesIndex() + ParameterCount();
+    for (int i = end - 1; i >= start; --i) {
+      if (name == get(i)) {
+        return i - start;
+      }
     }
   }
   return -1;
 }
 
 
-int SerializedScopeInfo::FunctionContextSlotIndex(String* name,
-                                                  VariableMode* mode) {
+int ScopeInfo::FunctionContextSlotIndex(String* name, VariableMode* mode) {
   ASSERT(name->IsSymbol());
+  ASSERT(mode != NULL);
   if (length() > 0) {
-    Object** p = data_start();
-    if (*p == name) {
-      p = ContextEntriesAddr();
-      int number_of_context_slots;
-      p = ReadInt(p, &number_of_context_slots);
-      ASSERT(number_of_context_slots != 0);
-      // The function context slot is the last entry.
-      if (mode != NULL) {
-        // Seek to context slot entry.
-        p += (number_of_context_slots - 1) * 2;
-        // Seek to mode.
-        ++p;
-        ReadInt(p, mode);
-      }
-      return number_of_context_slots + Context::MIN_CONTEXT_SLOTS - 1;
+    if (FunctionVariableField::decode(Flags()) == CONTEXT &&
+        FunctionName() == name) {
+      *mode = FunctionVariableMode::decode(Flags());
+      return Smi::cast(get(FunctionNameEntryIndex() + 1))->value();
     }
   }
   return -1;
 }
 
 
+int ScopeInfo::ParameterEntriesIndex() {
+  ASSERT(length() > 0);
+  return kVariablePartIndex;
+}
+
+
+int ScopeInfo::StackLocalEntriesIndex() {
+  return ParameterEntriesIndex() + ParameterCount();
+}
+
+
+int ScopeInfo::ContextLocalNameEntriesIndex() {
+  return StackLocalEntriesIndex() + StackLocalCount();
+}
+
+
+int ScopeInfo::ContextLocalInfoEntriesIndex() {
+  return ContextLocalNameEntriesIndex() + ContextLocalCount();
+}
+
+
+int ScopeInfo::FunctionNameEntryIndex() {
+  return ContextLocalInfoEntriesIndex() + ContextLocalCount();
+}
+
+
 int ContextSlotCache::Hash(Object* data, String* name) {
   // Uses only lower 32 bits if pointers are larger.
   uintptr_t addr_hash =
@@ -575,12 +398,14 @@
 
 int ContextSlotCache::Lookup(Object* data,
                              String* name,
-                             VariableMode* mode) {
+                             VariableMode* mode,
+                             InitializationFlag* init_flag) {
   int index = Hash(data, name);
   Key& key = keys_[index];
   if ((key.data == data) && key.name->Equals(name)) {
     Value result(values_[index]);
     if (mode != NULL) *mode = result.mode();
+    if (init_flag != NULL) *init_flag = result.initialization_flag();
     return result.index() + kNotFound;
   }
   return kNotFound;
@@ -590,6 +415,7 @@
 void ContextSlotCache::Update(Object* data,
                               String* name,
                               VariableMode mode,
+                              InitializationFlag init_flag,
                               int slot_index) {
   String* symbol;
   ASSERT(slot_index > kNotFound);
@@ -599,9 +425,9 @@
     key.data = data;
     key.name = symbol;
     // Please note value only takes a uint as index.
-    values_[index] = Value(mode, slot_index - kNotFound).raw();
+    values_[index] = Value(mode, init_flag, slot_index - kNotFound).raw();
 #ifdef DEBUG
-    ValidateEntry(data, name, mode, slot_index);
+    ValidateEntry(data, name, mode, init_flag, slot_index);
 #endif
   }
 }
@@ -617,6 +443,7 @@
 void ContextSlotCache::ValidateEntry(Object* data,
                                      String* name,
                                      VariableMode mode,
+                                     InitializationFlag init_flag,
                                      int slot_index) {
   String* symbol;
   if (HEAP->LookupSymbolIfExists(name, &symbol)) {
@@ -626,51 +453,56 @@
     ASSERT(key.name->Equals(name));
     Value result(values_[index]);
     ASSERT(result.mode() == mode);
+    ASSERT(result.initialization_flag() == init_flag);
     ASSERT(result.index() + kNotFound == slot_index);
   }
 }
 
 
-template <class Allocator>
 static void PrintList(const char* list_name,
                       int nof_internal_slots,
-                      List<Handle<String>, Allocator>& list) {
-  if (list.length() > 0) {
+                      int start,
+                      int end,
+                      ScopeInfo* scope_info) {
+  if (start < end) {
     PrintF("\n  // %s\n", list_name);
     if (nof_internal_slots > 0) {
       PrintF("  %2d - %2d [internal slots]\n", 0 , nof_internal_slots - 1);
     }
-    for (int i = 0; i < list.length(); i++) {
-      PrintF("  %2d ", i + nof_internal_slots);
-      list[i]->ShortPrint();
+    for (int i = nof_internal_slots; start < end; ++i, ++start) {
+      PrintF("  %2d ", i);
+      String::cast(scope_info->get(start))->ShortPrint();
       PrintF("\n");
     }
   }
 }
 
 
-template<class Allocator>
-void ScopeInfo<Allocator>::Print() {
+void ScopeInfo::Print() {
   PrintF("ScopeInfo ");
-  if (function_name_->length() > 0)
-    function_name_->ShortPrint();
-  else
+  if (HasFunctionName()) {
+    FunctionName()->ShortPrint();
+  } else {
     PrintF("/* no function name */");
+  }
   PrintF("{");
 
-  PrintList<Allocator>("parameters", 0, parameters_);
-  PrintList<Allocator>("stack slots", 0, stack_slots_);
-  PrintList<Allocator>("context slots", Context::MIN_CONTEXT_SLOTS,
-                       context_slots_);
+  PrintList("parameters", 0,
+            ParameterEntriesIndex(),
+            ParameterEntriesIndex() + ParameterCount(),
+            this);
+  PrintList("stack slots", 0,
+            StackLocalEntriesIndex(),
+            StackLocalEntriesIndex() + StackLocalCount(),
+            this);
+  PrintList("context slots",
+            Context::MIN_CONTEXT_SLOTS,
+            ContextLocalNameEntriesIndex(),
+            ContextLocalNameEntriesIndex() + ContextLocalCount(),
+            this);
 
   PrintF("}\n");
 }
 #endif  // DEBUG
 
-
-// Make sure the classes get instantiated by the template system.
-template class ScopeInfo<FreeStoreAllocationPolicy>;
-template class ScopeInfo<PreallocatedStorage>;
-template class ScopeInfo<ZoneListAllocationPolicy>;
-
 } }  // namespace v8::internal
diff --git a/src/scopeinfo.h b/src/scopeinfo.h
index eeb3047..93734f5 100644
--- a/src/scopeinfo.h
+++ b/src/scopeinfo.h
@@ -35,67 +35,6 @@
 namespace v8 {
 namespace internal {
 
-// ScopeInfo represents information about different scopes of a source
-// program  and the allocation of the scope's variables. Scope information
-// is stored in a compressed form in SerializedScopeInfo objects and is used
-// at runtime (stack dumps, deoptimization, etc.).
-
-// Forward defined as
-// template <class Allocator = FreeStoreAllocationPolicy> class ScopeInfo;
-template<class Allocator>
-class ScopeInfo BASE_EMBEDDED {
- public:
-  // Create a ScopeInfo instance from a scope.
-  explicit ScopeInfo(Scope* scope);
-
-  // Create a ScopeInfo instance from SerializedScopeInfo.
-  explicit ScopeInfo(SerializedScopeInfo* data);
-
-  // Creates a SerializedScopeInfo holding the serialized scope info.
-  Handle<SerializedScopeInfo> Serialize();
-
-  // --------------------------------------------------------------------------
-  // Lookup
-
-  Handle<String> function_name() const { return function_name_; }
-
-  Handle<String> parameter_name(int i) const { return parameters_[i]; }
-  int number_of_parameters() const { return parameters_.length(); }
-
-  Handle<String> stack_slot_name(int i) const { return stack_slots_[i]; }
-  int number_of_stack_slots() const { return stack_slots_.length(); }
-
-  Handle<String> context_slot_name(int i) const {
-    return context_slots_[i - Context::MIN_CONTEXT_SLOTS];
-  }
-  int number_of_context_slots() const {
-    int l = context_slots_.length();
-    return l == 0 ? 0 : l + Context::MIN_CONTEXT_SLOTS;
-  }
-
-  Handle<String> LocalName(int i) const;
-  int NumberOfLocals() const;
-
-  ScopeType type() const { return type_; }
-  // --------------------------------------------------------------------------
-  // Debugging support
-
-#ifdef DEBUG
-  void Print();
-#endif
-
- private:
-  Handle<String> function_name_;
-  bool calls_eval_;
-  bool is_strict_mode_;
-  ScopeType type_;
-  List<Handle<String>, Allocator > parameters_;
-  List<Handle<String>, Allocator > stack_slots_;
-  List<Handle<String>, Allocator > context_slots_;
-  List<VariableMode, Allocator > context_modes_;
-};
-
-
 // Cache for mapping (data, property name) into context slot index.
 // The cache contains both positive and negative results.
 // Slot index equals -1 means the property is absent.
@@ -106,12 +45,14 @@
   // If absent, kNotFound is returned.
   int Lookup(Object* data,
              String* name,
-             VariableMode* mode);
+             VariableMode* mode,
+             InitializationFlag* init_flag);
 
   // Update an element in the cache.
   void Update(Object* data,
               String* name,
               VariableMode mode,
+              InitializationFlag init_flag,
               int slot_index);
 
   // Clear the cache.
@@ -134,6 +75,7 @@
   void ValidateEntry(Object* data,
                      String* name,
                      VariableMode mode,
+                     InitializationFlag init_flag,
                      int slot_index);
 #endif
 
@@ -144,11 +86,17 @@
   };
 
   struct Value {
-    Value(VariableMode mode, int index) {
+    Value(VariableMode mode,
+          InitializationFlag init_flag,
+          int index) {
       ASSERT(ModeField::is_valid(mode));
+      ASSERT(InitField::is_valid(init_flag));
       ASSERT(IndexField::is_valid(index));
-      value_ = ModeField::encode(mode) | IndexField::encode(index);
+      value_ = ModeField::encode(mode) |
+          IndexField::encode(index) |
+          InitField::encode(init_flag);
       ASSERT(mode == this->mode());
+      ASSERT(init_flag == this->initialization_flag());
       ASSERT(index == this->index());
     }
 
@@ -158,12 +106,18 @@
 
     VariableMode mode() { return ModeField::decode(value_); }
 
+    InitializationFlag initialization_flag() {
+      return InitField::decode(value_);
+    }
+
     int index() { return IndexField::decode(value_); }
 
     // Bit fields in value_ (type, shift, size). Must be public so the
     // constants can be embedded in generated code.
-    class ModeField:  public BitField<VariableMode, 0, 3> {};
-    class IndexField: public BitField<int,          3, 32-3> {};
+    class ModeField:  public BitField<VariableMode,       0, 3> {};
+    class InitField:  public BitField<InitializationFlag, 3, 1> {};
+    class IndexField: public BitField<int,                4, 32-4> {};
+
    private:
     uint32_t value_;
   };
diff --git a/src/scopes.cc b/src/scopes.cc
index 3167c4d..f9cebcd 100644
--- a/src/scopes.cc
+++ b/src/scopes.cc
@@ -76,23 +76,27 @@
 }
 
 
-// Dummy constructor
-VariableMap::VariableMap(bool gotta_love_static_overloading) : HashMap() {}
-
 VariableMap::VariableMap() : HashMap(Match, &LocalsMapAllocator, 8) {}
 VariableMap::~VariableMap() {}
 
 
-Variable* VariableMap::Declare(Scope* scope,
-                               Handle<String> name,
-                               VariableMode mode,
-                               bool is_valid_lhs,
-                               Variable::Kind kind) {
+Variable* VariableMap::Declare(
+    Scope* scope,
+    Handle<String> name,
+    VariableMode mode,
+    bool is_valid_lhs,
+    Variable::Kind kind,
+    InitializationFlag initialization_flag) {
   HashMap::Entry* p = HashMap::Lookup(name.location(), name->Hash(), true);
   if (p->value == NULL) {
     // The variable has not been declared yet -> insert it.
     ASSERT(p->key == name.location());
-    p->value = new Variable(scope, name, mode, is_valid_lhs, kind);
+    p->value = new Variable(scope,
+                            name,
+                            mode,
+                            is_valid_lhs,
+                            kind,
+                            initialization_flag);
   }
   return reinterpret_cast<Variable*>(p->value);
 }
@@ -112,21 +116,6 @@
 // ----------------------------------------------------------------------------
 // Implementation of Scope
 
-
-// Dummy constructor
-Scope::Scope(ScopeType type)
-    : isolate_(Isolate::Current()),
-      inner_scopes_(0),
-      variables_(false),
-      temps_(0),
-      params_(0),
-      unresolved_(0),
-      decls_(0),
-      already_resolved_(false) {
-  SetDefaults(type, NULL, Handle<SerializedScopeInfo>::null());
-}
-
-
 Scope::Scope(Scope* outer_scope, ScopeType type)
     : isolate_(Isolate::Current()),
       inner_scopes_(4),
@@ -136,7 +125,7 @@
       unresolved_(16),
       decls_(4),
       already_resolved_(false) {
-  SetDefaults(type, outer_scope, Handle<SerializedScopeInfo>::null());
+  SetDefaults(type, outer_scope, Handle<ScopeInfo>::null());
   // At some point we might want to provide outer scopes to
   // eval scopes (by walking the stack and reading the scope info).
   // In that case, the ASSERT below needs to be adjusted.
@@ -147,7 +136,7 @@
 
 Scope::Scope(Scope* inner_scope,
              ScopeType type,
-             Handle<SerializedScopeInfo> scope_info)
+             Handle<ScopeInfo> scope_info)
     : isolate_(Isolate::Current()),
       inner_scopes_(4),
       variables_(),
@@ -157,8 +146,8 @@
       decls_(4),
       already_resolved_(true) {
   SetDefaults(type, NULL, scope_info);
-  if (!scope_info.is_null() && scope_info->HasHeapAllocatedLocals()) {
-    num_heap_slots_ = scope_info_->NumberOfContextSlots();
+  if (!scope_info.is_null()) {
+    num_heap_slots_ = scope_info_->ContextLength();
   }
   AddInnerScope(inner_scope);
 }
@@ -173,21 +162,22 @@
       unresolved_(0),
       decls_(0),
       already_resolved_(true) {
-  SetDefaults(CATCH_SCOPE, NULL, Handle<SerializedScopeInfo>::null());
+  SetDefaults(CATCH_SCOPE, NULL, Handle<ScopeInfo>::null());
   AddInnerScope(inner_scope);
   ++num_var_or_const_;
   Variable* variable = variables_.Declare(this,
                                           catch_variable_name,
                                           VAR,
                                           true,  // Valid left-hand side.
-                                          Variable::NORMAL);
+                                          Variable::NORMAL,
+                                          kCreatedInitialized);
   AllocateHeapSlot(variable);
 }
 
 
 void Scope::SetDefaults(ScopeType type,
                         Scope* outer_scope,
-                        Handle<SerializedScopeInfo> scope_info) {
+                        Handle<ScopeInfo> scope_info) {
   outer_scope_ = outer_scope;
   type_ = type;
   scope_name_ = isolate_->factory()->empty_symbol();
@@ -224,8 +214,9 @@
   bool contains_with = false;
   while (!context->IsGlobalContext()) {
     if (context->IsWithContext()) {
-      Scope* with_scope = new Scope(current_scope, WITH_SCOPE,
-                                    Handle<SerializedScopeInfo>::null());
+      Scope* with_scope = new Scope(current_scope,
+                                    WITH_SCOPE,
+                                    Handle<ScopeInfo>::null());
       current_scope = with_scope;
       // All the inner scopes are inside a with.
       contains_with = true;
@@ -233,15 +224,15 @@
         s->scope_inside_with_ = true;
       }
     } else if (context->IsFunctionContext()) {
-      SerializedScopeInfo* scope_info =
-          context->closure()->shared()->scope_info();
-      current_scope = new Scope(current_scope, FUNCTION_SCOPE,
-                                Handle<SerializedScopeInfo>(scope_info));
+      ScopeInfo* scope_info = context->closure()->shared()->scope_info();
+      current_scope = new Scope(current_scope,
+                                FUNCTION_SCOPE,
+                                Handle<ScopeInfo>(scope_info));
     } else if (context->IsBlockContext()) {
-      SerializedScopeInfo* scope_info =
-          SerializedScopeInfo::cast(context->extension());
-      current_scope = new Scope(current_scope, BLOCK_SCOPE,
-                                Handle<SerializedScopeInfo>(scope_info));
+      ScopeInfo* scope_info = ScopeInfo::cast(context->extension());
+      current_scope = new Scope(current_scope,
+                                BLOCK_SCOPE,
+                                Handle<ScopeInfo>(scope_info));
     } else {
       ASSERT(context->IsCatchContext());
       String* name = String::cast(context->extension());
@@ -307,7 +298,8 @@
                            isolate_->factory()->this_symbol(),
                            VAR,
                            false,
-                           Variable::THIS);
+                           Variable::THIS,
+                           kCreatedInitialized);
     var->AllocateTo(Variable::PARAMETER, -1);
     receiver_ = var;
   } else {
@@ -323,7 +315,8 @@
                        isolate_->factory()->arguments_symbol(),
                        VAR,
                        true,
-                       Variable::ARGUMENTS);
+                       Variable::ARGUMENTS,
+                       kCreatedInitialized);
   }
 }
 
@@ -372,20 +365,27 @@
 
   // Check context slot lookup.
   VariableMode mode;
-  int index = scope_info_->ContextSlotIndex(*name, &mode);
+  InitializationFlag init_flag;
+  int index = scope_info_->ContextSlotIndex(*name, &mode, &init_flag);
   if (index < 0) {
     // Check parameters.
     mode = VAR;
+    init_flag = kCreatedInitialized;
     index = scope_info_->ParameterIndex(*name);
     if (index < 0) {
       // Check the function name.
-      index = scope_info_->FunctionContextSlotIndex(*name, NULL);
+      index = scope_info_->FunctionContextSlotIndex(*name, &mode);
       if (index < 0) return NULL;
     }
   }
 
   Variable* var =
-      variables_.Declare(this, name, mode, true, Variable::NORMAL);
+      variables_.Declare(this,
+                         name,
+                         mode,
+                         true,
+                         Variable::NORMAL,
+                         init_flag);
   var->AllocateTo(Variable::CONTEXT, index);
   return var;
 }
@@ -404,8 +404,8 @@
 
 Variable* Scope::DeclareFunctionVar(Handle<String> name, VariableMode mode) {
   ASSERT(is_function_scope() && function_ == NULL);
-  Variable* function_var =
-      new Variable(this, name, mode, true, Variable::NORMAL);
+  Variable* function_var = new Variable(
+      this, name, mode, true, Variable::NORMAL, kCreatedInitialized);
   function_ = new(isolate_->zone()) VariableProxy(isolate_, function_var);
   return function_var;
 }
@@ -414,13 +414,15 @@
 void Scope::DeclareParameter(Handle<String> name, VariableMode mode) {
   ASSERT(!already_resolved());
   ASSERT(is_function_scope());
-  Variable* var =
-      variables_.Declare(this, name, mode, true, Variable::NORMAL);
+  Variable* var = variables_.Declare(
+      this, name, mode, true, Variable::NORMAL, kCreatedInitialized);
   params_.Add(var);
 }
 
 
-Variable* Scope::DeclareLocal(Handle<String> name, VariableMode mode) {
+Variable* Scope::DeclareLocal(Handle<String> name,
+                              VariableMode mode,
+                              InitializationFlag init_flag) {
   ASSERT(!already_resolved());
   // This function handles VAR and CONST modes.  DYNAMIC variables are
   // introduces during variable allocation, INTERNAL variables are allocated
@@ -430,15 +432,19 @@
          mode == CONST_HARMONY ||
          mode == LET);
   ++num_var_or_const_;
-  return variables_.Declare(this, name, mode, true, Variable::NORMAL);
+  return
+      variables_.Declare(this, name, mode, true, Variable::NORMAL, init_flag);
 }
 
 
 Variable* Scope::DeclareGlobal(Handle<String> name) {
   ASSERT(is_global_scope());
-  return variables_.Declare(this, name, DYNAMIC_GLOBAL,
+  return variables_.Declare(this,
+                            name,
+                            DYNAMIC_GLOBAL,
                             true,
-                            Variable::NORMAL);
+                            Variable::NORMAL,
+                            kCreatedInitialized);
 }
 
 
@@ -472,7 +478,8 @@
                                name,
                                TEMPORARY,
                                true,
-                               Variable::NORMAL);
+                               Variable::NORMAL,
+                               kCreatedInitialized);
   temps_.Add(var);
   return var;
 }
@@ -522,38 +529,36 @@
 }
 
 
-template<class Allocator>
-void Scope::CollectUsedVariables(List<Variable*, Allocator>* locals) {
-  // Collect variables in this scope.
-  // Note that the function_ variable - if present - is not
-  // collected here but handled separately in ScopeInfo
-  // which is the current user of this function).
+void Scope::CollectStackAndContextLocals(ZoneList<Variable*>* stack_locals,
+                                         ZoneList<Variable*>* context_locals) {
+  ASSERT(stack_locals != NULL);
+  ASSERT(context_locals != NULL);
+
+  // Collect temporaries which are always allocated on the stack.
   for (int i = 0; i < temps_.length(); i++) {
     Variable* var = temps_[i];
     if (var->is_used()) {
-      locals->Add(var);
+      ASSERT(var->IsStackLocal());
+      stack_locals->Add(var);
     }
   }
+
+  // Collect declared local variables.
   for (VariableMap::Entry* p = variables_.Start();
        p != NULL;
        p = variables_.Next(p)) {
     Variable* var = reinterpret_cast<Variable*>(p->value);
     if (var->is_used()) {
-      locals->Add(var);
+      if (var->IsStackLocal()) {
+        stack_locals->Add(var);
+      } else if (var->IsContextSlot()) {
+        context_locals->Add(var);
+      }
     }
   }
 }
 
 
-// Make sure the method gets instantiated by the template system.
-template void Scope::CollectUsedVariables(
-    List<Variable*, FreeStoreAllocationPolicy>* locals);
-template void Scope::CollectUsedVariables(
-    List<Variable*, PreallocatedStorage>* locals);
-template void Scope::CollectUsedVariables(
-    List<Variable*, ZoneListAllocationPolicy>* locals);
-
-
 void Scope::AllocateVariables(Handle<Context> context) {
   ASSERT(outer_scope_ == NULL);  // eval or global scopes only
 
@@ -628,18 +633,18 @@
 }
 
 
-Handle<SerializedScopeInfo> Scope::GetSerializedScopeInfo() {
+Handle<ScopeInfo> Scope::GetScopeInfo() {
   if (scope_info_.is_null()) {
-    scope_info_ = SerializedScopeInfo::Create(this);
+    scope_info_ = ScopeInfo::Create(this);
   }
   return scope_info_;
 }
 
 
 void Scope::GetNestedScopeChain(
-    List<Handle<SerializedScopeInfo> >* chain,
+    List<Handle<ScopeInfo> >* chain,
     int position) {
-  chain->Add(Handle<SerializedScopeInfo>(GetSerializedScopeInfo()));
+  chain->Add(Handle<ScopeInfo>(GetScopeInfo()));
 
   for (int i = 0; i < inner_scopes_.length(); i++) {
     Scope* scope = inner_scopes_[i];
@@ -707,9 +712,9 @@
     PrintName(var->name());
     PrintF(";  // ");
     PrintLocation(var);
-    if (var->is_accessed_from_inner_scope()) {
+    if (var->has_forced_context_allocation()) {
       if (!var->IsUnallocated()) PrintF(", ");
-      PrintF("inner scope access");
+      PrintF("forced context allocation");
     }
     PrintF("\n");
   }
@@ -811,7 +816,14 @@
   Variable* var = map->Lookup(name);
   if (var == NULL) {
     // Declare a new non-local.
-    var = map->Declare(NULL, name, mode, true, Variable::NORMAL);
+    InitializationFlag init_flag = (mode == VAR)
+        ? kCreatedInitialized : kNeedsInitialization;
+    var = map->Declare(NULL,
+                       name,
+                       mode,
+                       true,
+                       Variable::NORMAL,
+                       init_flag);
     // Allocate it by giving it a dynamic lookup.
     var->AllocateTo(Variable::LOOKUP, -1);
   }
@@ -848,7 +860,9 @@
     *binding_kind = BOUND;
   } else if (outer_scope_ != NULL) {
     var = outer_scope_->LookupRecursive(name, context, binding_kind);
-    if (*binding_kind == BOUND) var->MarkAsAccessedFromInnerScope();
+    if (*binding_kind == BOUND && (is_function_scope() || is_with_scope())) {
+      var->ForceContextAllocation();
+    }
   }
 
   if (is_with_scope()) {
@@ -980,7 +994,7 @@
   // via an eval() call.  This is only possible if the variable has a
   // visible name.
   if ((var->is_this() || var->name()->length() > 0) &&
-      (var->is_accessed_from_inner_scope() ||
+      (var->has_forced_context_allocation() ||
        scope_calls_eval_ ||
        inner_scope_calls_eval_ ||
        scope_contains_with_ ||
@@ -1003,7 +1017,7 @@
   // catch-bound variables are always allocated in a context.
   if (var->mode() == TEMPORARY) return false;
   if (is_catch_scope() || is_block_scope()) return true;
-  return var->is_accessed_from_inner_scope() ||
+  return var->has_forced_context_allocation() ||
       scope_calls_eval_ ||
       inner_scope_calls_eval_ ||
       scope_contains_with_ ||
@@ -1067,9 +1081,8 @@
     Variable* var = params_[i];
     ASSERT(var->scope() == this);
     if (uses_nonstrict_arguments) {
-      // Give the parameter a use from an inner scope, to force allocation
-      // to the context.
-      var->MarkAsAccessedFromInnerScope();
+      // Force context allocation of the parameter.
+      var->ForceContextAllocation();
     }
 
     if (MustAllocate(var)) {
@@ -1144,21 +1157,15 @@
   if (is_function_scope()) AllocateParameterLocals();
   AllocateNonParameterLocals();
 
-  // Allocate context if necessary.
-  bool must_have_local_context = false;
-  if (scope_calls_eval_ || scope_contains_with_) {
-    // The context for the eval() call or 'with' statement in this scope.
-    // Unless we are in the global or an eval scope, we need a local
-    // context even if we didn't statically allocate any locals in it,
-    // and the compiler will access the context variable. If we are
-    // not in an inner scope, the scope is provided from the outside.
-    must_have_local_context = is_function_scope();
-  }
+  // Force allocation of a context for this scope if necessary. For a 'with'
+  // scope and for a function scope that makes an 'eval' call we need a context,
+  // even if no local variables were statically allocated in the scope.
+  bool must_have_context = is_with_scope() ||
+      (is_function_scope() && calls_eval());
 
   // If we didn't allocate any locals in the local context, then we only
-  // need the minimal number of slots if we must have a local context.
-  if (num_heap_slots_ == Context::MIN_CONTEXT_SLOTS &&
-      !must_have_local_context) {
+  // need the minimal number of slots if we must have a context.
+  if (num_heap_slots_ == Context::MIN_CONTEXT_SLOTS && !must_have_context) {
     num_heap_slots_ = 0;
   }
 
@@ -1166,4 +1173,17 @@
   ASSERT(num_heap_slots_ == 0 || num_heap_slots_ >= Context::MIN_CONTEXT_SLOTS);
 }
 
+
+int Scope::StackLocalCount() const {
+  return num_stack_slots() -
+      (function_ != NULL && function_->var()->IsStackLocal() ? 1 : 0);
+}
+
+
+int Scope::ContextLocalCount() const {
+  if (num_heap_slots() == 0) return 0;
+  return num_heap_slots() - Context::MIN_CONTEXT_SLOTS -
+      (function_ != NULL && function_->var()->IsContextSlot() ? 1 : 0);
+}
+
 } }  // namespace v8::internal
diff --git a/src/scopes.h b/src/scopes.h
index a141887..d08e294 100644
--- a/src/scopes.h
+++ b/src/scopes.h
@@ -42,17 +42,14 @@
  public:
   VariableMap();
 
-  // Dummy constructor.  This constructor doesn't set up the map
-  // properly so don't use it unless you have a good reason.
-  explicit VariableMap(bool gotta_love_static_overloading);
-
   virtual ~VariableMap();
 
   Variable* Declare(Scope* scope,
                     Handle<String> name,
                     VariableMode mode,
                     bool is_valid_lhs,
-                    Variable::Kind kind);
+                    Variable::Kind kind,
+                    InitializationFlag initialization_flag);
 
   Variable* Lookup(Handle<String> name);
 };
@@ -131,7 +128,9 @@
 
   // Declare a local variable in this scope. If the variable has been
   // declared before, the previously declared variable is returned.
-  Variable* DeclareLocal(Handle<String> name, VariableMode mode);
+  Variable* DeclareLocal(Handle<String> name,
+                         VariableMode mode,
+                         InitializationFlag init_flag);
 
   // Declare an implicit global variable in this scope which must be a
   // global scope.  The variable was introduced (possibly from an inner
@@ -304,9 +303,11 @@
   // ---------------------------------------------------------------------------
   // Variable allocation.
 
-  // Collect all used locals in this scope.
-  template<class Allocator>
-  void CollectUsedVariables(List<Variable*, Allocator>* locals);
+  // Collect stack and context allocated local variables in this scope. Note
+  // that the function variable - if present - is not collected and should be
+  // handled separately.
+  void CollectStackAndContextLocals(ZoneList<Variable*>* stack_locals,
+                                    ZoneList<Variable*>* context_locals);
 
   // Resolve and fill in the allocation information for all variables
   // in this scopes. Must be called *after* all scopes have been
@@ -325,6 +326,9 @@
   int num_stack_slots() const { return num_stack_slots_; }
   int num_heap_slots() const { return num_heap_slots_; }
 
+  int StackLocalCount() const;
+  int ContextLocalCount() const;
+
   // Make sure this scope and all outer scopes are eagerly compiled.
   void ForceEagerCompilation()  { force_eager_compilation_ = true; }
 
@@ -341,13 +345,13 @@
   // where var declarations will be hoisted to in the implementation.
   Scope* DeclarationScope();
 
-  Handle<SerializedScopeInfo> GetSerializedScopeInfo();
+  Handle<ScopeInfo> GetScopeInfo();
 
   // Get the chain of nested scopes within this scope for the source statement
   // position. The scopes will be added to the list from the outermost scope to
   // the innermost scope. Only nested block, catch or with scopes are tracked
   // and will be returned, but no inner function scopes.
-  void GetNestedScopeChain(List<Handle<SerializedScopeInfo> >* chain,
+  void GetNestedScopeChain(List<Handle<ScopeInfo> >* chain,
                            int statement_position);
 
   // ---------------------------------------------------------------------------
@@ -373,8 +377,6 @@
  protected:
   friend class ParserFactory;
 
-  explicit Scope(ScopeType type);
-
   Isolate* const isolate_;
 
   // Scope tree.
@@ -444,8 +446,8 @@
   int num_stack_slots_;
   int num_heap_slots_;
 
-  // Serialized scopes support.
-  Handle<SerializedScopeInfo> scope_info_;
+  // Serialized scope info support.
+  Handle<ScopeInfo> scope_info_;
   bool already_resolved() { return already_resolved_; }
 
   // Create a non-local variable with a given name.
@@ -529,9 +531,7 @@
 
  private:
   // Construct a scope based on the scope info.
-  Scope(Scope* inner_scope,
-        ScopeType type,
-        Handle<SerializedScopeInfo> scope_info);
+  Scope(Scope* inner_scope, ScopeType type, Handle<ScopeInfo> scope_info);
 
   // Construct a catch scope with a binding for the name.
   Scope(Scope* inner_scope, Handle<String> catch_variable_name);
@@ -545,7 +545,7 @@
 
   void SetDefaults(ScopeType type,
                    Scope* outer_scope,
-                   Handle<SerializedScopeInfo> scope_info);
+                   Handle<ScopeInfo> scope_info);
 };
 
 } }  // namespace v8::internal
diff --git a/src/serialize.h b/src/serialize.h
index 49695ec..b598c6c 100644
--- a/src/serialize.h
+++ b/src/serialize.h
@@ -608,7 +608,7 @@
     ASSERT(!o->IsScript());
     return o->IsString() || o->IsSharedFunctionInfo() ||
            o->IsHeapNumber() || o->IsCode() ||
-           o->IsSerializedScopeInfo() ||
+           o->IsScopeInfo() ||
            o->map() == HEAP->fixed_cow_array_map();
   }
 
diff --git a/src/stub-cache.cc b/src/stub-cache.cc
index 139bc2d..8b6e28f 100644
--- a/src/stub-cache.cc
+++ b/src/stub-cache.cc
@@ -161,18 +161,6 @@
 }
 
 
-Handle<Code> LoadStubCompiler::CompileLoadCallback(
-    Handle<String> name,
-    Handle<JSObject> object,
-    Handle<JSObject> holder,
-    Handle<AccessorInfo> callback) {
-  CALL_HEAP_FUNCTION(isolate(),
-                     (set_failure(NULL),
-                      CompileLoadCallback(*name, *object, *holder, *callback)),
-                     Code);
-}
-
-
 Handle<Code> StubCache::ComputeLoadCallback(Handle<String> name,
                                             Handle<JSObject> receiver,
                                             Handle<JSObject> holder,
@@ -213,16 +201,6 @@
 }
 
 
-Handle<Code> LoadStubCompiler::CompileLoadInterceptor(Handle<JSObject> object,
-                                                      Handle<JSObject> holder,
-                                                      Handle<String> name) {
-  CALL_HEAP_FUNCTION(isolate(),
-                     (set_failure(NULL),
-                      CompileLoadInterceptor(*object, *holder, *name)),
-                     Code);
-}
-
-
 Handle<Code> StubCache::ComputeLoadInterceptor(Handle<String> name,
                                                Handle<JSObject> receiver,
                                                Handle<JSObject> holder) {
@@ -305,17 +283,6 @@
 }
 
 
-Handle<Code> KeyedLoadStubCompiler::CompileLoadInterceptor(
-    Handle<JSObject> object,
-    Handle<JSObject> holder,
-    Handle<String> name) {
-  CALL_HEAP_FUNCTION(isolate(),
-                     (set_failure(NULL),
-                      CompileLoadInterceptor(*object, *holder, *name)),
-                     Code);
-}
-
-
 Handle<Code> StubCache::ComputeKeyedLoadInterceptor(Handle<String> name,
                                                     Handle<JSObject> receiver,
                                                     Handle<JSObject> holder) {
@@ -334,18 +301,6 @@
 }
 
 
-Handle<Code> KeyedLoadStubCompiler::CompileLoadCallback(
-    Handle<String> name,
-    Handle<JSObject> object,
-    Handle<JSObject> holder,
-    Handle<AccessorInfo> callback) {
-  CALL_HEAP_FUNCTION(isolate(),
-                     (set_failure(NULL),
-                      CompileLoadCallback(*name, *object, *holder, *callback)),
-                     Code);
-}
-
-
 Handle<Code> StubCache::ComputeKeyedLoadCallback(
     Handle<String> name,
     Handle<JSObject> receiver,
@@ -577,19 +532,6 @@
 #define CALL_LOGGER_TAG(kind, type) \
     (kind == Code::CALL_IC ? Logger::type : Logger::KEYED_##type)
 
-Handle<Code> CallStubCompiler::CompileCallConstant(Handle<Object> object,
-                                                   Handle<JSObject> holder,
-                                                   Handle<JSFunction> function,
-                                                   Handle<String> name,
-                                                   CheckType check) {
-  CALL_HEAP_FUNCTION(
-      isolate(),
-      (set_failure(NULL),
-       CompileCallConstant(*object, *holder, *function, *name, check)),
-      Code);
-}
-
-
 Handle<Code> StubCache::ComputeCallConstant(int argc,
                                             Code::Kind kind,
                                             Code::ExtraICState extra_state,
@@ -669,16 +611,6 @@
 }
 
 
-Handle<Code> CallStubCompiler::CompileCallInterceptor(Handle<JSObject> object,
-                                                      Handle<JSObject> holder,
-                                                      Handle<String> name) {
-  CALL_HEAP_FUNCTION(
-      isolate(),
-      (set_failure(NULL), CompileCallInterceptor(*object, *holder, *name)),
-      Code);
-}
-
-
 Handle<Code> StubCache::ComputeCallInterceptor(int argc,
                                                Code::Kind kind,
                                                Code::ExtraICState extra_state,
@@ -716,20 +648,6 @@
 }
 
 
-Handle<Code> CallStubCompiler::CompileCallGlobal(
-    Handle<JSObject> object,
-    Handle<GlobalObject> holder,
-    Handle<JSGlobalPropertyCell> cell,
-    Handle<JSFunction> function,
-    Handle<String> name) {
-  CALL_HEAP_FUNCTION(
-      isolate(),
-      (set_failure(NULL),
-       CompileCallGlobal(*object, *holder, *cell, *function, *name)),
-      Code);
-}
-
-
 Handle<Code> StubCache::ComputeCallGlobal(int argc,
                                           Code::Kind kind,
                                           Code::ExtraICState extra_state,
@@ -906,33 +824,6 @@
 }
 
 
-// The CallStubCompiler needs a version of ComputeCallMiss that does not
-// perform GC.  This function is temporary, because the stub cache but not
-// yet the stub compiler uses handles.
-MaybeObject* StubCache::TryComputeCallMiss(int argc,
-                                           Code::Kind kind,
-                                           Code::ExtraICState extra_state) {
-  Code::Flags flags =
-      Code::ComputeFlags(kind, MONOMORPHIC_PROTOTYPE_FAILURE, extra_state,
-                         NORMAL, argc, OWN_MAP);
-  NumberDictionary* cache = isolate_->heap()->non_monomorphic_cache();
-  int entry = cache->FindEntry(isolate_, flags);
-  if (entry != -1) return cache->ValueAt(entry);
-
-  StubCompiler compiler(isolate_);
-  Code* code = NULL;
-  MaybeObject* maybe_code = compiler.TryCompileCallMiss(flags);
-  if (!maybe_code->To(&code)) return maybe_code;
-
-  NumberDictionary* new_cache = NULL;
-  MaybeObject* maybe_new_cache = cache->AtNumberPut(flags, code);
-  if (!maybe_new_cache->To(&new_cache)) return maybe_new_cache;
-  isolate_->heap()->public_set_non_monomorphic_cache(new_cache);
-
-  return code;
-}
-
-
 #ifdef ENABLE_DEBUGGER_SUPPORT
 Handle<Code> StubCache::ComputeCallDebugBreak(int argc,
                                               Code::Kind kind) {
@@ -1213,8 +1104,8 @@
   JSObject* recv = JSObject::cast(args[0]);
   String* name = String::cast(args[1]);
   Object* value = args[2];
+  ASSERT(args.smi_at(3) == kStrictMode || args.smi_at(3) == kNonStrictMode);
   StrictModeFlag strict_mode = static_cast<StrictModeFlag>(args.smi_at(3));
-  ASSERT(strict_mode == kStrictMode || strict_mode == kNonStrictMode);
   ASSERT(recv->HasNamedInterceptor());
   PropertyAttributes attr = NONE;
   MaybeObject* result = recv->SetPropertyWithInterceptor(
@@ -1343,33 +1234,6 @@
 }
 
 
-// TODO(kmillikin): This annoying raw pointer implementation should be
-// eliminated when the stub compiler no longer needs it.
-MaybeObject* StubCompiler::TryCompileCallMiss(Code::Flags flags) {
-  HandleScope scope(isolate());
-  int argc = Code::ExtractArgumentsCountFromFlags(flags);
-  Code::Kind kind = Code::ExtractKindFromFlags(flags);
-  Code::ExtraICState extra_state = Code::ExtractExtraICStateFromFlags(flags);
-  if (kind == Code::CALL_IC) {
-    CallIC::GenerateMiss(masm(), argc, extra_state);
-  } else {
-    KeyedCallIC::GenerateMiss(masm(), argc);
-  }
-  Object* result;
-  { MaybeObject* maybe_result = TryGetCodeWithFlags(flags, "CompileCallMiss");
-    if (!maybe_result->ToObject(&result)) return maybe_result;
-  }
-  isolate()->counters()->call_megamorphic_stubs()->Increment();
-  Code* code = Code::cast(result);
-  USE(code);
-  PROFILE(isolate(),
-          CodeCreateEvent(CALL_LOGGER_TAG(kind, CALL_MISS_TAG),
-                          code, code->arguments_count()));
-  GDBJIT(AddCode(GDBJITInterface::CALL_MISS, Code::cast(code)));
-  return result;
-}
-
-
 #ifdef ENABLE_DEBUGGER_SUPPORT
 Handle<Code> StubCompiler::CompileCallDebugBreak(Code::Flags flags) {
   Debug::GenerateCallICDebugBreak(masm());
@@ -1427,44 +1291,16 @@
 }
 
 
-MaybeObject* StubCompiler::TryGetCodeWithFlags(Code::Flags flags,
-                                               const char* name) {
-  // Check for allocation failures during stub compilation.
-  if (failure_->IsFailure()) return failure_;
-
-  // Create code object in the heap.
-  CodeDesc desc;
-  masm_.GetCode(&desc);
-  MaybeObject* result = heap()->CreateCode(desc, flags, masm_.CodeObject());
-#ifdef ENABLE_DISASSEMBLER
-  if (FLAG_print_code_stubs && !result->IsFailure()) {
-    Code::cast(result->ToObjectUnchecked())->Disassemble(name);
-  }
-#endif
-  return result;
-}
-
-
-MaybeObject* StubCompiler::TryGetCodeWithFlags(Code::Flags flags,
-                                               String* name) {
-  if (FLAG_print_code_stubs && name != NULL) {
-    return TryGetCodeWithFlags(flags, *name->ToCString());
-  }
-  return TryGetCodeWithFlags(flags, reinterpret_cast<char*>(NULL));
-}
-
-
-void StubCompiler::LookupPostInterceptor(JSObject* holder,
-                                         String* name,
+void StubCompiler::LookupPostInterceptor(Handle<JSObject> holder,
+                                         Handle<String> name,
                                          LookupResult* lookup) {
-  holder->LocalLookupRealNamedProperty(name, lookup);
-  if (!lookup->IsProperty()) {
-    lookup->NotFound();
-    Object* proto = holder->GetPrototype();
-    if (!proto->IsNull()) {
-      proto->Lookup(name, lookup);
-    }
-  }
+  holder->LocalLookupRealNamedProperty(*name, lookup);
+  if (lookup->IsProperty()) return;
+
+  lookup->NotFound();
+  if (holder->GetPrototype()->IsNull()) return;
+
+  holder->GetPrototype()->Lookup(*name, lookup);
 }
 
 
@@ -1477,24 +1313,6 @@
 }
 
 
-// TODO(ulan): Eliminate this function when the stub cache is fully
-// handlified.
-MaybeObject* LoadStubCompiler::TryGetCode(PropertyType type, String* name) {
-  Code::Flags flags = Code::ComputeMonomorphicFlags(Code::LOAD_IC, type);
-  MaybeObject* result = TryGetCodeWithFlags(flags, name);
-  if (!result->IsFailure()) {
-    PROFILE(isolate(),
-            CodeCreateEvent(Logger::LOAD_IC_TAG,
-                            Code::cast(result->ToObjectUnchecked()),
-                            name));
-    GDBJIT(AddCode(GDBJITInterface::LOAD_IC,
-                   name,
-                   Code::cast(result->ToObjectUnchecked())));
-  }
-  return result;
-}
-
-
 Handle<Code> KeyedLoadStubCompiler::GetCode(PropertyType type,
                                             Handle<String> name,
                                             InlineCacheState state) {
@@ -1506,26 +1324,6 @@
   return code;
 }
 
-// TODO(ulan): Eliminate this function when the stub cache is fully
-// handlified.
-MaybeObject* KeyedLoadStubCompiler::TryGetCode(PropertyType type,
-                                            String* name,
-                                            InlineCacheState state) {
-  Code::Flags flags = Code::ComputeFlags(
-      Code::KEYED_LOAD_IC, state, Code::kNoExtraICState, type);
-  MaybeObject* result = TryGetCodeWithFlags(flags, name);
-  if (!result->IsFailure()) {
-    PROFILE(isolate(),
-            CodeCreateEvent(Logger::KEYED_LOAD_IC_TAG,
-                            Code::cast(result->ToObjectUnchecked()),
-                            name));
-    GDBJIT(AddCode(GDBJITInterface::LOAD_IC,
-                   name,
-                   Code::cast(result->ToObjectUnchecked())));
-  }
-  return result;
-}
-
 
 Handle<Code> StoreStubCompiler::GetCode(PropertyType type,
                                         Handle<String> name) {
@@ -1569,39 +1367,36 @@
 }
 
 
-bool CallStubCompiler::HasCustomCallGenerator(JSFunction* function) {
-  SharedFunctionInfo* info = function->shared();
-  if (info->HasBuiltinFunctionId()) {
-    BuiltinFunctionId id = info->builtin_function_id();
+bool CallStubCompiler::HasCustomCallGenerator(Handle<JSFunction> function) {
+  if (function->shared()->HasBuiltinFunctionId()) {
+    BuiltinFunctionId id = function->shared()->builtin_function_id();
 #define CALL_GENERATOR_CASE(name) if (id == k##name) return true;
     CUSTOM_CALL_IC_GENERATORS(CALL_GENERATOR_CASE)
 #undef CALL_GENERATOR_CASE
   }
+
   CallOptimization optimization(function);
-  if (optimization.is_simple_api_call()) {
-    return true;
-  }
-  return false;
+  return optimization.is_simple_api_call();
 }
 
 
-MaybeObject* CallStubCompiler::CompileCustomCall(Object* object,
-                                                 JSObject* holder,
-                                                 JSGlobalPropertyCell* cell,
-                                                 JSFunction* function,
-                                                 String* fname) {
+Handle<Code> CallStubCompiler::CompileCustomCall(
+    Handle<Object> object,
+    Handle<JSObject> holder,
+    Handle<JSGlobalPropertyCell> cell,
+    Handle<JSFunction> function,
+    Handle<String> fname) {
   ASSERT(HasCustomCallGenerator(function));
 
-  SharedFunctionInfo* info = function->shared();
-  if (info->HasBuiltinFunctionId()) {
-    BuiltinFunctionId id = info->builtin_function_id();
-#define CALL_GENERATOR_CASE(name)                           \
-    if (id == k##name) {                                    \
-      return CallStubCompiler::Compile##name##Call(object,  \
-                                                  holder,   \
-                                                  cell,     \
-                                                  function, \
-                                                  fname);   \
+  if (function->shared()->HasBuiltinFunctionId()) {
+    BuiltinFunctionId id = function->shared()->builtin_function_id();
+#define CALL_GENERATOR_CASE(name)                               \
+    if (id == k##name) {                                        \
+      return CallStubCompiler::Compile##name##Call(object,      \
+                                                   holder,      \
+                                                   cell,        \
+                                                   function,    \
+                                                   fname);      \
     }
     CUSTOM_CALL_IC_GENERATORS(CALL_GENERATOR_CASE)
 #undef CALL_GENERATOR_CASE
@@ -1637,104 +1432,79 @@
 }
 
 
-// TODO(kmillikin): Eliminate this function when the stub cache is fully
-// handlified.
-MaybeObject* CallStubCompiler::TryGetCode(PropertyType type, String* name) {
-  int argc = arguments_.immediate();
-  Code::Flags flags = Code::ComputeMonomorphicFlags(kind_,
-                                                    type,
-                                                    extra_state_,
-                                                    cache_holder_,
-                                                    argc);
-  return TryGetCodeWithFlags(flags, name);
-}
-
-
-// TODO(kmillikin): Eliminate this function when the stub cache is fully
-// handlified.
-MaybeObject* CallStubCompiler::TryGetCode(JSFunction* function) {
-  String* function_name = NULL;
-  if (function->shared()->name()->IsString()) {
-    function_name = String::cast(function->shared()->name());
-  }
-  return TryGetCode(CONSTANT_FUNCTION, function_name);
-}
-
-
-MaybeObject* ConstructStubCompiler::GetCode() {
+Handle<Code> ConstructStubCompiler::GetCode() {
   Code::Flags flags = Code::ComputeFlags(Code::STUB);
-  Object* result;
-  { MaybeObject* maybe_result = TryGetCodeWithFlags(flags, "ConstructStub");
-    if (!maybe_result->ToObject(&result)) return maybe_result;
-  }
-  Code* code = Code::cast(result);
-  USE(code);
-  PROFILE(isolate(), CodeCreateEvent(Logger::STUB_TAG, code, "ConstructStub"));
-  GDBJIT(AddCode(GDBJITInterface::STUB, "ConstructStub", Code::cast(code)));
-  return result;
+  Handle<Code> code = GetCodeWithFlags(flags, "ConstructStub");
+  PROFILE(isolate(), CodeCreateEvent(Logger::STUB_TAG, *code, "ConstructStub"));
+  GDBJIT(AddCode(GDBJITInterface::STUB, "ConstructStub", *code));
+  return code;
 }
 
 
 CallOptimization::CallOptimization(LookupResult* lookup) {
-  if (!lookup->IsProperty() || !lookup->IsCacheable() ||
+  if (!lookup->IsProperty() ||
+      !lookup->IsCacheable() ||
       lookup->type() != CONSTANT_FUNCTION) {
-    Initialize(NULL);
+    Initialize(Handle<JSFunction>::null());
   } else {
     // We only optimize constant function calls.
-    Initialize(lookup->GetConstantFunction());
+    Initialize(Handle<JSFunction>(lookup->GetConstantFunction()));
   }
 }
 
-CallOptimization::CallOptimization(JSFunction* function) {
+CallOptimization::CallOptimization(Handle<JSFunction> function) {
   Initialize(function);
 }
 
 
-int CallOptimization::GetPrototypeDepthOfExpectedType(JSObject* object,
-                                                      JSObject* holder) const {
-  ASSERT(is_simple_api_call_);
-  if (expected_receiver_type_ == NULL) return 0;
+int CallOptimization::GetPrototypeDepthOfExpectedType(
+    Handle<JSObject> object,
+    Handle<JSObject> holder) const {
+  ASSERT(is_simple_api_call());
+  if (expected_receiver_type_.is_null()) return 0;
   int depth = 0;
-  while (object != holder) {
-    if (object->IsInstanceOf(expected_receiver_type_)) return depth;
-    object = JSObject::cast(object->GetPrototype());
+  while (!object.is_identical_to(holder)) {
+    if (object->IsInstanceOf(*expected_receiver_type_)) return depth;
+    object = Handle<JSObject>(JSObject::cast(object->GetPrototype()));
     ++depth;
   }
-  if (holder->IsInstanceOf(expected_receiver_type_)) return depth;
+  if (holder->IsInstanceOf(*expected_receiver_type_)) return depth;
   return kInvalidProtoDepth;
 }
 
 
-void CallOptimization::Initialize(JSFunction* function) {
-  constant_function_ = NULL;
+void CallOptimization::Initialize(Handle<JSFunction> function) {
+  constant_function_ = Handle<JSFunction>::null();
   is_simple_api_call_ = false;
-  expected_receiver_type_ = NULL;
-  api_call_info_ = NULL;
+  expected_receiver_type_ = Handle<FunctionTemplateInfo>::null();
+  api_call_info_ = Handle<CallHandlerInfo>::null();
 
-  if (function == NULL || !function->is_compiled()) return;
+  if (function.is_null() || !function->is_compiled()) return;
 
   constant_function_ = function;
   AnalyzePossibleApiFunction(function);
 }
 
 
-void CallOptimization::AnalyzePossibleApiFunction(JSFunction* function) {
-  SharedFunctionInfo* sfi = function->shared();
-  if (!sfi->IsApiFunction()) return;
-  FunctionTemplateInfo* info = sfi->get_api_func_data();
+void CallOptimization::AnalyzePossibleApiFunction(Handle<JSFunction> function) {
+  if (!function->shared()->IsApiFunction()) return;
+  Handle<FunctionTemplateInfo> info(function->shared()->get_api_func_data());
 
   // Require a C++ callback.
   if (info->call_code()->IsUndefined()) return;
-  api_call_info_ = CallHandlerInfo::cast(info->call_code());
+  api_call_info_ =
+      Handle<CallHandlerInfo>(CallHandlerInfo::cast(info->call_code()));
 
   // Accept signatures that either have no restrictions at all or
   // only have restrictions on the receiver.
   if (!info->signature()->IsUndefined()) {
-    SignatureInfo* signature = SignatureInfo::cast(info->signature());
+    Handle<SignatureInfo> signature =
+        Handle<SignatureInfo>(SignatureInfo::cast(info->signature()));
     if (!signature->args()->IsUndefined()) return;
     if (!signature->receiver()->IsUndefined()) {
       expected_receiver_type_ =
-          FunctionTemplateInfo::cast(signature->receiver());
+          Handle<FunctionTemplateInfo>(
+              FunctionTemplateInfo::cast(signature->receiver()));
     }
   }
 
diff --git a/src/stub-cache.h b/src/stub-cache.h
index 11fdb89..cc42e05 100644
--- a/src/stub-cache.h
+++ b/src/stub-cache.h
@@ -230,10 +230,6 @@
                                Code::Kind kind,
                                Code::ExtraICState state);
 
-  MUST_USE_RESULT MaybeObject* TryComputeCallMiss(int argc,
-                                                  Code::Kind kind,
-                                                  Code::ExtraICState state);
-
   // Finds the Code object stored in the Heap::non_monomorphic_cache().
   Code* FindCallInitialize(int argc, RelocInfo::Mode mode, Code::Kind kind);
 
@@ -392,8 +388,6 @@
   Handle<Code> CompileCallArguments(Code::Flags flags);
   Handle<Code> CompileCallMiss(Code::Flags flags);
 
-  MUST_USE_RESULT MaybeObject* TryCompileCallMiss(Code::Flags flags);
-
 #ifdef ENABLE_DEBUGGER_SUPPORT
   Handle<Code> CompileCallDebugBreak(Code::Flags flags);
   Handle<Code> CompileCallDebugPrepareStepIn(Code::Flags flags);
@@ -490,41 +484,10 @@
                            int save_at_depth,
                            Label* miss);
 
-  // TODO(kmillikin): Eliminate this function when the stub cache is fully
-  // handlified.
-  Register CheckPrototypes(JSObject* object,
-                           Register object_reg,
-                           JSObject* holder,
-                           Register holder_reg,
-                           Register scratch1,
-                           Register scratch2,
-                           String* name,
-                           Label* miss) {
-    return CheckPrototypes(object, object_reg, holder, holder_reg, scratch1,
-                           scratch2, name, kInvalidProtoDepth, miss);
-  }
-
-  // TODO(kmillikin): Eliminate this function when the stub cache is fully
-  // handlified.
-  Register CheckPrototypes(JSObject* object,
-                           Register object_reg,
-                           JSObject* holder,
-                           Register holder_reg,
-                           Register scratch1,
-                           Register scratch2,
-                           String* name,
-                           int save_at_depth,
-                           Label* miss);
-
  protected:
   Handle<Code> GetCodeWithFlags(Code::Flags flags, const char* name);
   Handle<Code> GetCodeWithFlags(Code::Flags flags, Handle<String> name);
 
-  MUST_USE_RESULT MaybeObject* TryGetCodeWithFlags(Code::Flags flags,
-                                                   const char* name);
-  MUST_USE_RESULT MaybeObject* TryGetCodeWithFlags(Code::Flags flags,
-                                                   String* name);
-
   MacroAssembler* masm() { return &masm_; }
   void set_failure(Failure* failure) { failure_ = failure; }
 
@@ -538,16 +501,16 @@
                          Handle<String> name,
                          Label* miss);
 
-  MaybeObject* GenerateLoadCallback(JSObject* object,
-                                    JSObject* holder,
-                                    Register receiver,
-                                    Register name_reg,
-                                    Register scratch1,
-                                    Register scratch2,
-                                    Register scratch3,
-                                    AccessorInfo* callback,
-                                    String* name,
-                                    Label* miss);
+  void GenerateLoadCallback(Handle<JSObject> object,
+                            Handle<JSObject> holder,
+                            Register receiver,
+                            Register name_reg,
+                            Register scratch1,
+                            Register scratch2,
+                            Register scratch3,
+                            Handle<AccessorInfo> callback,
+                            Handle<String> name,
+                            Label* miss);
 
   void GenerateLoadConstant(Handle<JSObject> object,
                             Handle<JSObject> holder,
@@ -559,19 +522,19 @@
                             Handle<String> name,
                             Label* miss);
 
-  void GenerateLoadInterceptor(JSObject* object,
-                               JSObject* holder,
+  void GenerateLoadInterceptor(Handle<JSObject> object,
+                               Handle<JSObject> holder,
                                LookupResult* lookup,
                                Register receiver,
                                Register name_reg,
                                Register scratch1,
                                Register scratch2,
                                Register scratch3,
-                               String* name,
+                               Handle<String> name,
                                Label* miss);
 
-  static void LookupPostInterceptor(JSObject* holder,
-                                    String* name,
+  static void LookupPostInterceptor(Handle<JSObject> holder,
+                                    Handle<String> name,
                                     LookupResult* lookup);
 
   Isolate* isolate() { return isolate_; }
@@ -603,11 +566,6 @@
                                    Handle<JSObject> holder,
                                    Handle<AccessorInfo> callback);
 
-  MUST_USE_RESULT MaybeObject* CompileLoadCallback(String* name,
-                                                   JSObject* object,
-                                                   JSObject* holder,
-                                                   AccessorInfo* callback);
-
   Handle<Code> CompileLoadConstant(Handle<JSObject> object,
                                    Handle<JSObject> holder,
                                    Handle<Object> value,
@@ -617,10 +575,6 @@
                                       Handle<JSObject> holder,
                                       Handle<String> name);
 
-  MUST_USE_RESULT MaybeObject* CompileLoadInterceptor(JSObject* object,
-                                                      JSObject* holder,
-                                                      String* name);
-
   Handle<Code> CompileLoadGlobal(Handle<JSObject> object,
                                  Handle<GlobalObject> holder,
                                  Handle<JSGlobalPropertyCell> cell,
@@ -628,8 +582,6 @@
                                  bool is_dont_delete);
 
  private:
-  MUST_USE_RESULT MaybeObject* TryGetCode(PropertyType type, String* name);
-
   Handle<Code> GetCode(PropertyType type, Handle<String> name);
 };
 
@@ -648,11 +600,6 @@
                                    Handle<JSObject> holder,
                                    Handle<AccessorInfo> callback);
 
-  MUST_USE_RESULT MaybeObject* CompileLoadCallback(String* name,
-                                                   JSObject* object,
-                                                   JSObject* holder,
-                                                   AccessorInfo* callback);
-
   Handle<Code> CompileLoadConstant(Handle<String> name,
                                    Handle<JSObject> object,
                                    Handle<JSObject> holder,
@@ -662,10 +609,6 @@
                                       Handle<JSObject> holder,
                                       Handle<String> name);
 
-  MUST_USE_RESULT MaybeObject* CompileLoadInterceptor(JSObject* object,
-                                                      JSObject* holder,
-                                                      String* name);
-
   Handle<Code> CompileLoadArrayLength(Handle<String> name);
 
   Handle<Code> CompileLoadStringLength(Handle<String> name);
@@ -687,10 +630,6 @@
   static void GenerateLoadDictionaryElement(MacroAssembler* masm);
 
  private:
-  MaybeObject* TryGetCode(PropertyType type,
-                          String* name,
-                          InlineCacheState state = MONOMORPHIC);
-
   Handle<Code> GetCode(PropertyType type,
                        Handle<String> name,
                        InlineCacheState state = MONOMORPHIC);
@@ -796,95 +735,69 @@
                                    Handle<String> name,
                                    CheckType check);
 
-  MUST_USE_RESULT MaybeObject* CompileCallConstant(Object* object,
-                                                   JSObject* holder,
-                                                   JSFunction* function,
-                                                   String* name,
-                                                   CheckType check);
-
   Handle<Code> CompileCallInterceptor(Handle<JSObject> object,
                                       Handle<JSObject> holder,
                                       Handle<String> name);
 
-  MUST_USE_RESULT MaybeObject* CompileCallInterceptor(JSObject* object,
-                                                      JSObject* holder,
-                                                      String* name);
-
   Handle<Code> CompileCallGlobal(Handle<JSObject> object,
                                  Handle<GlobalObject> holder,
                                  Handle<JSGlobalPropertyCell> cell,
                                  Handle<JSFunction> function,
                                  Handle<String> name);
 
-  MUST_USE_RESULT MaybeObject* CompileCallGlobal(JSObject* object,
-                                                 GlobalObject* holder,
-                                                 JSGlobalPropertyCell* cell,
-                                                 JSFunction* function,
-                                                 String* name);
-
-  static bool HasCustomCallGenerator(JSFunction* function);
+  static bool HasCustomCallGenerator(Handle<JSFunction> function);
 
  private:
-  // Compiles a custom call constant/global IC. For constant calls
-  // cell is NULL. Returns undefined if there is no custom call code
-  // for the given function or it can't be generated.
-  MUST_USE_RESULT MaybeObject* CompileCustomCall(Object* object,
-                                                 JSObject* holder,
-                                                 JSGlobalPropertyCell* cell,
-                                                 JSFunction* function,
-                                                 String* name);
+  // Compiles a custom call constant/global IC.  For constant calls cell is
+  // NULL.  Returns an empty handle if there is no custom call code for the
+  // given function.
+  Handle<Code> CompileCustomCall(Handle<Object> object,
+                                 Handle<JSObject> holder,
+                                 Handle<JSGlobalPropertyCell> cell,
+                                 Handle<JSFunction> function,
+                                 Handle<String> name);
 
-#define DECLARE_CALL_GENERATOR(name)                                           \
-  MUST_USE_RESULT MaybeObject* Compile##name##Call(Object* object,             \
-                                                   JSObject* holder,           \
-                                                   JSGlobalPropertyCell* cell, \
-                                                   JSFunction* function,       \
-                                                   String* fname);
+#define DECLARE_CALL_GENERATOR(name)                                    \
+  Handle<Code> Compile##name##Call(Handle<Object> object,               \
+                                   Handle<JSObject> holder,             \
+                                   Handle<JSGlobalPropertyCell> cell,   \
+                                   Handle<JSFunction> function,         \
+                                   Handle<String> fname);
   CUSTOM_CALL_IC_GENERATORS(DECLARE_CALL_GENERATOR)
 #undef DECLARE_CALL_GENERATOR
 
-  MUST_USE_RESULT MaybeObject* CompileFastApiCall(
-      const CallOptimization& optimization,
-      Object* object,
-      JSObject* holder,
-      JSGlobalPropertyCell* cell,
-      JSFunction* function,
-      String* name);
-
-  const ParameterCount arguments_;
-  const Code::Kind kind_;
-  const Code::ExtraICState extra_state_;
-  const InlineCacheHolderFlag cache_holder_;
-
-  const ParameterCount& arguments() { return arguments_; }
+  Handle<Code> CompileFastApiCall(const CallOptimization& optimization,
+                                  Handle<Object> object,
+                                  Handle<JSObject> holder,
+                                  Handle<JSGlobalPropertyCell> cell,
+                                  Handle<JSFunction> function,
+                                  Handle<String> name);
 
   Handle<Code> GetCode(PropertyType type, Handle<String> name);
   Handle<Code> GetCode(Handle<JSFunction> function);
 
-  // TODO(kmillikin): Eliminate these functions when the stub cache is fully
-  // handlified.
-  MUST_USE_RESULT MaybeObject* TryGetCode(PropertyType type, String* name);
-  MUST_USE_RESULT MaybeObject* TryGetCode(JSFunction* function);
+  const ParameterCount& arguments() { return arguments_; }
 
   void GenerateNameCheck(Handle<String> name, Label* miss);
 
-  void GenerateGlobalReceiverCheck(JSObject* object,
-                                   JSObject* holder,
-                                   String* name,
+  void GenerateGlobalReceiverCheck(Handle<JSObject> object,
+                                   Handle<JSObject> holder,
+                                   Handle<String> name,
                                    Label* miss);
 
   // Generates code to load the function from the cell checking that
   // it still contains the same function.
-  void GenerateLoadFunctionFromCell(JSGlobalPropertyCell* cell,
-                                    JSFunction* function,
+  void GenerateLoadFunctionFromCell(Handle<JSGlobalPropertyCell> cell,
+                                    Handle<JSFunction> function,
                                     Label* miss);
 
   // Generates a jump to CallIC miss stub.
   void GenerateMissBranch();
 
-  // TODO(kmillikin): Eliminate this function when the stub cache is fully
-  // handlified.
-  MUST_USE_RESULT MaybeObject* TryGenerateMissBranch();
+  const ParameterCount arguments_;
+  const Code::Kind kind_;
+  const Code::ExtraICState extra_state_;
+  const InlineCacheHolderFlag cache_holder_;
 };
 
 
@@ -892,10 +805,10 @@
  public:
   explicit ConstructStubCompiler(Isolate* isolate) : StubCompiler(isolate) { }
 
-  MUST_USE_RESULT MaybeObject* CompileConstructStub(JSFunction* function);
+  Handle<Code> CompileConstructStub(Handle<JSFunction> function);
 
  private:
-  MaybeObject* GetCode();
+  Handle<Code> GetCode();
 };
 
 
@@ -904,14 +817,14 @@
  public:
   explicit CallOptimization(LookupResult* lookup);
 
-  explicit CallOptimization(JSFunction* function);
+  explicit CallOptimization(Handle<JSFunction> function);
 
   bool is_constant_call() const {
-    return constant_function_ != NULL;
+    return !constant_function_.is_null();
   }
 
-  JSFunction* constant_function() const {
-    ASSERT(constant_function_ != NULL);
+  Handle<JSFunction> constant_function() const {
+    ASSERT(is_constant_call());
     return constant_function_;
   }
 
@@ -919,32 +832,32 @@
     return is_simple_api_call_;
   }
 
-  FunctionTemplateInfo* expected_receiver_type() const {
-    ASSERT(is_simple_api_call_);
+  Handle<FunctionTemplateInfo> expected_receiver_type() const {
+    ASSERT(is_simple_api_call());
     return expected_receiver_type_;
   }
 
-  CallHandlerInfo* api_call_info() const {
-    ASSERT(is_simple_api_call_);
+  Handle<CallHandlerInfo> api_call_info() const {
+    ASSERT(is_simple_api_call());
     return api_call_info_;
   }
 
   // Returns the depth of the object having the expected type in the
   // prototype chain between the two arguments.
-  int GetPrototypeDepthOfExpectedType(JSObject* object,
-                                      JSObject* holder) const;
+  int GetPrototypeDepthOfExpectedType(Handle<JSObject> object,
+                                      Handle<JSObject> holder) const;
 
  private:
-  void Initialize(JSFunction* function);
+  void Initialize(Handle<JSFunction> function);
 
   // Determines whether the given function can be called using the
   // fast api call builtin.
-  void AnalyzePossibleApiFunction(JSFunction* function);
+  void AnalyzePossibleApiFunction(Handle<JSFunction> function);
 
-  JSFunction* constant_function_;
+  Handle<JSFunction> constant_function_;
   bool is_simple_api_call_;
-  FunctionTemplateInfo* expected_receiver_type_;
-  CallHandlerInfo* api_call_info_;
+  Handle<FunctionTemplateInfo> expected_receiver_type_;
+  Handle<CallHandlerInfo> api_call_info_;
 };
 
 
diff --git a/src/v8globals.h b/src/v8globals.h
index 40ce30c..560e368 100644
--- a/src/v8globals.h
+++ b/src/v8globals.h
@@ -160,8 +160,7 @@
 class RegExpCompiler;
 class RegExpVisitor;
 class Scope;
-template<class Allocator = FreeStoreAllocationPolicy> class ScopeInfo;
-class SerializedScopeInfo;
+class ScopeInfo;
 class Script;
 class Slot;
 class Smi;
@@ -550,6 +549,43 @@
 };
 
 
+// ES6 Draft Rev3 10.2 specifies declarative environment records with mutable
+// and immutable bindings that can be in two states: initialized and
+// uninitialized. In ES5 only immutable bindings have these two states. When
+// accessing a binding, it needs to be checked for initialization. However in
+// the following cases the binding is initialized immediately after creation
+// so the initialization check can always be skipped:
+// 1. Var declared local variables.
+//      var foo;
+// 2. A local variable introduced by a function declaration.
+//      function foo() {}
+// 3. Parameters
+//      function x(foo) {}
+// 4. Catch bound variables.
+//      try {} catch (foo) {}
+// 6. Function variables of named function expressions.
+//      var x = function foo() {}
+// 7. Implicit binding of 'this'.
+// 8. Implicit binding of 'arguments' in functions.
+//
+// ES5 specified object environment records which are introduced by ES elements
+// such as Program and WithStatement that associate identifier bindings with the
+// properties of some object. In the specification only mutable bindings exist
+// (which may be non-writable) and have no distinct initialization step. However
+// V8 allows const declarations in global code with distinct creation and
+// initialization steps which are represented by non-writable properties in the
+// global object. As a result also these bindings need to be checked for
+// initialization.
+//
+// The following enum specifies a flag that indicates if the binding needs a
+// distinct initialization step (kNeedsInitialization) or if the binding is
+// immediately initialized upon creation (kCreatedInitialized).
+enum InitializationFlag {
+  kNeedsInitialization,
+  kCreatedInitialized
+};
+
+
 enum ClearExceptionFlag {
   KEEP_EXCEPTION,
   CLEAR_EXCEPTION
diff --git a/src/v8natives.js b/src/v8natives.js
index e6669d5..6173352 100644
--- a/src/v8natives.js
+++ b/src/v8natives.js
@@ -162,28 +162,23 @@
 function GlobalEval(x) {
   if (!IS_STRING(x)) return x;
 
-  var receiver = this;
   var global_receiver = %GlobalReceiver(global);
-
-  if (receiver == null && !IS_UNDETECTABLE(receiver)) {
-    receiver = global_receiver;
-  }
-
-  var this_is_global_receiver = (receiver === global_receiver);
   var global_is_detached = (global === global_receiver);
 
   // For consistency with JSC we require the global object passed to
   // eval to be the global object from which 'eval' originated. This
   // is not mandated by the spec.
-  if (!this_is_global_receiver || global_is_detached) {
-    throw new $EvalError('The "this" object passed to eval must ' +
+  // We only throw if the global has been detached, since we need the
+  // receiver as this-value for the call.
+  if (global_is_detached) {
+    throw new $EvalError('The "this" value passed to eval must ' +
                          'be the global object from which eval originated');
   }
 
   var f = %CompileString(x);
   if (!IS_FUNCTION(f)) return f;
 
-  return %_CallFunction(receiver, f);
+  return %_CallFunction(global_receiver, f);
 }
 
 
@@ -691,7 +686,12 @@
 
 
 // ES5 8.12.9.
-function DefineObjectProperty(obj, p, desc, should_throw) {
+function DefineOwnProperty(obj, p, desc, should_throw) {
+  if (%IsJSProxy(obj)) {
+    var attributes = FromGenericPropertyDescriptor(desc);
+    return DefineProxyProperty(obj, p, attributes, should_throw);
+  }
+
   var current_or_access = %GetOwnProperty(ToObject(obj), ToString(p));
   // A false value here means that access checks failed.
   if (current_or_access === false) return void 0;
@@ -855,63 +855,6 @@
 }
 
 
-// ES5 section 15.4.5.1.
-function DefineArrayProperty(obj, p, desc, should_throw) {
-  var length_desc = GetOwnProperty(obj, "length");
-  var length = length_desc.getValue();
-
-  // Step 3 - Special handling for the length property.
-  if (p == "length") {
-    if (!desc.hasValue()) {
-      return DefineObjectProperty(obj, "length", desc, should_throw);
-    }
-    var new_length = ToUint32(desc.getValue());
-    if (new_length != ToNumber(desc.getValue())) {
-      throw new $RangeError('defineProperty() array length out of range');
-    }
-    // TODO(1756): There still are some uncovered corner cases left on how to
-    // handle changes to the length property of arrays.
-    return DefineObjectProperty(obj, "length", desc, should_throw);
-  }
-
-  // Step 4 - Special handling for array index.
-  var index = ToUint32(p);
-  if (index == ToNumber(p) && index != 4294967295) {
-    if ((index >= length && !length_desc.isWritable()) ||
-        !DefineObjectProperty(obj, p, desc, true)) {
-      if (should_throw) {
-        throw MakeTypeError("define_disallowed", [p]);
-      } else {
-        return false;
-      }
-    }
-    if (index >= length) {
-      // TODO(mstarzinger): We should actually set the value of the property
-      // descriptor here and pass it to DefineObjectProperty(). Take a look at
-      // ES5 section 15.4.5.1, step 4.e.i and 4.e.ii for details.
-      obj.length = index + 1;
-    }
-    return true;
-  }
-
-  // Step 5 - Fallback to default implementation.
-  return DefineObjectProperty(obj, p, desc, should_throw);
-}
-
-
-// ES5 section 8.12.9, ES5 section 15.4.5.1 and Harmony proxies.
-function DefineOwnProperty(obj, p, desc, should_throw) {
-  if (%IsJSProxy(obj)) {
-    var attributes = FromGenericPropertyDescriptor(desc);
-    return DefineProxyProperty(obj, p, attributes, should_throw);
-  } else if (IS_ARRAY(obj)) {
-    return DefineArrayProperty(obj, p, desc, should_throw);
-  } else {
-    return DefineObjectProperty(obj, p, desc, should_throw);
-  }
-}
-
-
 // ES5 section 15.2.3.2.
 function ObjectGetPrototypeOf(obj) {
   if (!IS_SPEC_OBJECT(obj))
diff --git a/src/variables.cc b/src/variables.cc
index d85e1b2..aa6a010 100644
--- a/src/variables.cc
+++ b/src/variables.cc
@@ -58,19 +58,24 @@
                    Handle<String> name,
                    VariableMode mode,
                    bool is_valid_LHS,
-                   Kind kind)
+                   Kind kind,
+                   InitializationFlag initialization_flag)
   : scope_(scope),
     name_(name),
     mode_(mode),
     kind_(kind),
     location_(UNALLOCATED),
     index_(-1),
+    initializer_position_(RelocInfo::kNoPosition),
     local_if_not_shadowed_(NULL),
     is_valid_LHS_(is_valid_LHS),
-    is_accessed_from_inner_scope_(false),
-    is_used_(false) {
-  // names must be canonicalized for fast equality checks
+    force_context_allocation_(false),
+    is_used_(false),
+    initialization_flag_(initialization_flag) {
+  // Names must be canonicalized for fast equality checks.
   ASSERT(name->IsSymbol());
+  // Var declared variables never need initialization.
+  ASSERT(!(mode == VAR && initialization_flag == kNeedsInitialization));
 }
 
 
@@ -80,4 +85,12 @@
   return mode_ != TEMPORARY && scope_ != NULL && scope_->is_global_scope();
 }
 
+
+int Variable::CompareIndex(Variable* const* v, Variable* const* w) {
+  int x = (*v)->index();
+  int y = (*w)->index();
+  // Consider sorting them according to type as well?
+  return x - y;
+}
+
 } }  // namespace v8::internal
diff --git a/src/variables.h b/src/variables.h
index 8b2d869..f20bd39 100644
--- a/src/variables.h
+++ b/src/variables.h
@@ -77,7 +77,8 @@
            Handle<String> name,
            VariableMode mode,
            bool is_valid_lhs,
-           Kind kind);
+           Kind kind,
+           InitializationFlag initialization_flag);
 
   // Printing support
   static const char* Mode2String(VariableMode mode);
@@ -92,16 +93,19 @@
 
   Handle<String> name() const { return name_; }
   VariableMode mode() const { return mode_; }
-  bool is_accessed_from_inner_scope() const {
-    return is_accessed_from_inner_scope_;
+  bool has_forced_context_allocation() const {
+    return force_context_allocation_;
   }
-  void MarkAsAccessedFromInnerScope() {
+  void ForceContextAllocation() {
     ASSERT(mode_ != TEMPORARY);
-    is_accessed_from_inner_scope_ = true;
+    force_context_allocation_ = true;
   }
   bool is_used() { return is_used_; }
   void set_is_used(bool flag) { is_used_ = flag; }
 
+  int initializer_position() { return initializer_position_; }
+  void set_initializer_position(int pos) { initializer_position_ = pos; }
+
   bool IsVariable(Handle<String> n) const {
     return !is_this() && name().is_identical_to(n);
   }
@@ -123,9 +127,7 @@
             mode_ == CONST_HARMONY);
   }
   bool binding_needs_init() const {
-    return (mode_ == LET ||
-            mode_ == CONST ||
-            mode_ == CONST_HARMONY);
+    return initialization_flag_ == kNeedsInitialization;
   }
 
   bool is_global() const;
@@ -134,8 +136,7 @@
 
   // True if the variable is named eval and not known to be shadowed.
   bool is_possibly_eval() const {
-    return IsVariable(FACTORY->eval_symbol()) &&
-        (mode_ == DYNAMIC || mode_ == DYNAMIC_GLOBAL);
+    return IsVariable(FACTORY->eval_symbol());
   }
 
   Variable* local_if_not_shadowed() const {
@@ -149,12 +150,17 @@
 
   Location location() const { return location_; }
   int index() const { return index_; }
+  InitializationFlag initialization_flag() const {
+    return initialization_flag_;
+  }
 
   void AllocateTo(Location location, int index) {
     location_ = location;
     index_ = index;
   }
 
+  static int CompareIndex(Variable* const* v, Variable* const* w);
+
  private:
   Scope* scope_;
   Handle<String> name_;
@@ -162,6 +168,7 @@
   Kind kind_;
   Location location_;
   int index_;
+  int initializer_position_;
 
   // If this field is set, this variable references the stored locally bound
   // variable, but it might be shadowed by variable bindings introduced by
@@ -173,8 +180,9 @@
   bool is_valid_LHS_;
 
   // Usage info.
-  bool is_accessed_from_inner_scope_;  // set by variable resolver
+  bool force_context_allocation_;  // set by variable resolver
   bool is_used_;
+  InitializationFlag initialization_flag_;
 };
 
 
diff --git a/src/version.cc b/src/version.cc
index ecb2fbd..683524b 100644
--- a/src/version.cc
+++ b/src/version.cc
@@ -34,7 +34,7 @@
 // cannot be changed without changing the SCons build script.
 #define MAJOR_VERSION     3
 #define MINOR_VERSION     7
-#define BUILD_NUMBER      3
+#define BUILD_NUMBER      4
 #define PATCH_LEVEL       0
 // Use 1 for candidates and 0 otherwise.
 // (Boolean macro values are not supported by all preprocessors.)
diff --git a/src/x64/code-stubs-x64.cc b/src/x64/code-stubs-x64.cc
index f62c517..8a00b89 100644
--- a/src/x64/code-stubs-x64.cc
+++ b/src/x64/code-stubs-x64.cc
@@ -4150,7 +4150,8 @@
 
 
 void StringCharCodeAtGenerator::GenerateSlow(
-    MacroAssembler* masm, const RuntimeCallHelper& call_helper) {
+    MacroAssembler* masm,
+    const RuntimeCallHelper& call_helper) {
   __ Abort("Unexpected fallthrough to CharCodeAt slow case");
 
   Factory* factory = masm->isolate()->factory();
@@ -4226,7 +4227,8 @@
 
 
 void StringCharFromCodeGenerator::GenerateSlow(
-    MacroAssembler* masm, const RuntimeCallHelper& call_helper) {
+    MacroAssembler* masm,
+    const RuntimeCallHelper& call_helper) {
   __ Abort("Unexpected fallthrough to CharFromCode slow case");
 
   __ bind(&slow_case_);
@@ -4253,7 +4255,8 @@
 
 
 void StringCharAtGenerator::GenerateSlow(
-    MacroAssembler* masm, const RuntimeCallHelper& call_helper) {
+    MacroAssembler* masm,
+    const RuntimeCallHelper& call_helper) {
   char_code_at_generator_.GenerateSlow(masm, call_helper);
   char_from_code_generator_.GenerateSlow(masm, call_helper);
 }
@@ -5520,70 +5523,6 @@
 }
 
 
-// TODO(kmillikin): Eliminate this function when the stub cache is fully
-// handlified.
-MaybeObject* StringDictionaryLookupStub::TryGenerateNegativeLookup(
-    MacroAssembler* masm,
-    Label* miss,
-    Label* done,
-    Register properties,
-    String* name,
-    Register r0) {
-  // If names of slots in range from 1 to kProbes - 1 for the hash value are
-  // not equal to the name and kProbes-th slot is not used (its name is the
-  // undefined value), it guarantees the hash table doesn't contain the
-  // property. It's true even if some slots represent deleted properties
-  // (their names are the null value).
-  for (int i = 0; i < kInlinedProbes; i++) {
-    // r0 points to properties hash.
-    // Compute the masked index: (hash + i + i * i) & mask.
-    Register index = r0;
-    // Capacity is smi 2^n.
-    __ SmiToInteger32(index, FieldOperand(properties, kCapacityOffset));
-    __ decl(index);
-    __ and_(index,
-            Immediate(name->Hash() + StringDictionary::GetProbeOffset(i)));
-
-    // Scale the index by multiplying by the entry size.
-    ASSERT(StringDictionary::kEntrySize == 3);
-    __ lea(index, Operand(index, index, times_2, 0));  // index *= 3.
-
-    Register entity_name = r0;
-    // Having undefined at this place means the name is not contained.
-    ASSERT_EQ(kSmiTagSize, 1);
-    __ movq(entity_name, Operand(properties,
-                                 index,
-                                 times_pointer_size,
-                                 kElementsStartOffset - kHeapObjectTag));
-    __ Cmp(entity_name, masm->isolate()->factory()->undefined_value());
-    __ j(equal, done);
-
-    // Stop if found the property.
-    __ Cmp(entity_name, Handle<String>(name));
-    __ j(equal, miss);
-
-    // Check if the entry name is not a symbol.
-    __ movq(entity_name, FieldOperand(entity_name, HeapObject::kMapOffset));
-    __ testb(FieldOperand(entity_name, Map::kInstanceTypeOffset),
-             Immediate(kIsSymbolMask));
-    __ j(zero, miss);
-  }
-
-  StringDictionaryLookupStub stub(properties,
-                                  r0,
-                                  r0,
-                                  StringDictionaryLookupStub::NEGATIVE_LOOKUP);
-  __ Push(Handle<Object>(name));
-  __ push(Immediate(name->Hash()));
-  MaybeObject* result = masm->TryCallStub(&stub);
-  if (result->IsFailure()) return result;
-  __ testq(r0, r0);
-  __ j(not_zero, miss);
-  __ jmp(done);
-  return result;
-}
-
-
 // Probe the string dictionary in the |elements| register. Jump to the
 // |done| label if a property with the given name is found leaving the
 // index into the dictionary in |r1|. Jump to the |miss| label
diff --git a/src/x64/code-stubs-x64.h b/src/x64/code-stubs-x64.h
index ffa3f4d..34435d7 100644
--- a/src/x64/code-stubs-x64.h
+++ b/src/x64/code-stubs-x64.h
@@ -430,16 +430,6 @@
                                      Handle<String> name,
                                      Register r0);
 
-  // TODO(kmillikin): Eliminate this function when the stub cache is fully
-  // handlified.
-  MUST_USE_RESULT static MaybeObject* TryGenerateNegativeLookup(
-      MacroAssembler* masm,
-      Label* miss,
-      Label* done,
-      Register properties,
-      String* name,
-      Register r0);
-
   static void GeneratePositiveLookup(MacroAssembler* masm,
                                      Label* miss,
                                      Label* done,
diff --git a/src/x64/codegen-x64.cc b/src/x64/codegen-x64.cc
index a4880d2..4090ce4 100644
--- a/src/x64/codegen-x64.cc
+++ b/src/x64/codegen-x64.cc
@@ -254,6 +254,12 @@
            xmm0);
   __ jmp(&entry);
   __ bind(&convert_hole);
+
+  if (FLAG_debug_code) {
+    __ CompareRoot(rbx, Heap::kTheHoleValueRootIndex);
+    __ Assert(equal, "object found in smi-only array");
+  }
+
   __ movq(FieldOperand(r14, r9, times_8, FixedDoubleArray::kHeaderSize), r15);
   __ bind(&entry);
   __ decq(r9);
diff --git a/src/x64/full-codegen-x64.cc b/src/x64/full-codegen-x64.cc
index bf640db..f9186c7 100644
--- a/src/x64/full-codegen-x64.cc
+++ b/src/x64/full-codegen-x64.cc
@@ -44,11 +44,6 @@
 #define __ ACCESS_MASM(masm_)
 
 
-static unsigned GetPropertyId(Property* property) {
-  return property->id();
-}
-
-
 class JumpPatchSite BASE_EMBEDDED {
  public:
   explicit JumpPatchSite(MacroAssembler* masm) : masm_(masm) {
@@ -383,7 +378,7 @@
 
 void FullCodeGenerator::TestContext::Plug(Variable* var) const {
   codegen()->GetVar(result_register(), var);
-  codegen()->PrepareForBailoutBeforeSplit(TOS_REG, false, NULL, NULL);
+  codegen()->PrepareForBailoutBeforeSplit(condition(), false, NULL, NULL);
   codegen()->DoTest(this);
 }
 
@@ -405,7 +400,7 @@
 
 
 void FullCodeGenerator::TestContext::Plug(Heap::RootListIndex index) const {
-  codegen()->PrepareForBailoutBeforeSplit(TOS_REG,
+  codegen()->PrepareForBailoutBeforeSplit(condition(),
                                           true,
                                           true_label_,
                                           false_label_);
@@ -438,7 +433,7 @@
 
 
 void FullCodeGenerator::TestContext::Plug(Handle<Object> lit) const {
-  codegen()->PrepareForBailoutBeforeSplit(TOS_REG,
+  codegen()->PrepareForBailoutBeforeSplit(condition(),
                                           true,
                                           true_label_,
                                           false_label_);
@@ -497,7 +492,7 @@
   // For simplicity we always test the accumulator register.
   __ Drop(count);
   __ Move(result_register(), reg);
-  codegen()->PrepareForBailoutBeforeSplit(TOS_REG, false, NULL, NULL);
+  codegen()->PrepareForBailoutBeforeSplit(condition(), false, NULL, NULL);
   codegen()->DoTest(this);
 }
 
@@ -561,7 +556,7 @@
 
 
 void FullCodeGenerator::TestContext::Plug(bool flag) const {
-  codegen()->PrepareForBailoutBeforeSplit(TOS_REG,
+  codegen()->PrepareForBailoutBeforeSplit(condition(),
                                           true,
                                           true_label_,
                                           false_label_);
@@ -653,7 +648,7 @@
 }
 
 
-void FullCodeGenerator::PrepareForBailoutBeforeSplit(State state,
+void FullCodeGenerator::PrepareForBailoutBeforeSplit(Expression* expr,
                                                      bool should_normalize,
                                                      Label* if_true,
                                                      Label* if_false) {
@@ -664,13 +659,7 @@
 
   Label skip;
   if (should_normalize) __ jmp(&skip, Label::kNear);
-
-  ForwardBailoutStack* current = forward_bailout_stack_;
-  while (current != NULL) {
-    PrepareForBailout(current->expr(), state);
-    current = current->parent();
-  }
-
+  PrepareForBailout(expr, TOS_REG);
   if (should_normalize) {
     __ CompareRoot(rax, Heap::kTrueValueRootIndex);
     Split(equal, if_true, if_false, NULL);
@@ -687,8 +676,8 @@
   // need to "declare" it at runtime to make sure it actually exists in the
   // local context.
   Variable* variable = proxy->var();
-  bool binding_needs_init =
-      mode == CONST || mode == CONST_HARMONY || mode == LET;
+  bool binding_needs_init = (function == NULL) &&
+      (mode == CONST || mode == CONST_HARMONY || mode == LET);
   switch (variable->location()) {
     case Variable::UNALLOCATED:
       ++(*global_count);
@@ -1251,27 +1240,59 @@
     case Variable::LOCAL:
     case Variable::CONTEXT: {
       Comment cmnt(masm_, var->IsContextSlot() ? "Context slot" : "Stack slot");
-      if (!var->binding_needs_init()) {
-        context()->Plug(var);
-      } else {
-        // Let and const need a read barrier.
-        Label done;
-        GetVar(rax, var);
-        __ CompareRoot(rax, Heap::kTheHoleValueRootIndex);
-        __ j(not_equal, &done, Label::kNear);
-        if (var->mode() == LET || var->mode() == CONST_HARMONY) {
-          // Throw a reference error when using an uninitialized let/const
-          // binding in harmony mode.
-          __ Push(var->name());
-          __ CallRuntime(Runtime::kThrowReferenceError, 1);
-        } else {
-          // Uninitalized const bindings outside of harmony mode are unholed.
-          ASSERT(var->mode() == CONST);
-          __ LoadRoot(rax, Heap::kUndefinedValueRootIndex);
+      if (var->binding_needs_init()) {
+        // var->scope() may be NULL when the proxy is located in eval code and
+        // refers to a potential outside binding. Currently those bindings are
+        // always looked up dynamically, i.e. in that case
+        //     var->location() == LOOKUP.
+        // always holds.
+        ASSERT(var->scope() != NULL);
+
+        // Check if the binding really needs an initialization check. The check
+        // can be skipped in the following situation: we have a LET or CONST
+        // binding in harmony mode, both the Variable and the VariableProxy have
+        // the same declaration scope (i.e. they are both in global code, in the
+        // same function or in the same eval code) and the VariableProxy is in
+        // the source physically located after the initializer of the variable.
+        //
+        // We cannot skip any initialization checks for CONST in non-harmony
+        // mode because const variables may be declared but never initialized:
+        //   if (false) { const x; }; var y = x;
+        //
+        // The condition on the declaration scopes is a conservative check for
+        // nested functions that access a binding and are called before the
+        // binding is initialized:
+        //   function() { f(); let x = 1; function f() { x = 2; } }
+        //
+        // Check that we always have valid source position.
+        ASSERT(var->initializer_position() != RelocInfo::kNoPosition);
+        ASSERT(proxy->position() != RelocInfo::kNoPosition);
+        bool skip_init_check =
+            var->mode() != CONST &&
+            var->scope()->DeclarationScope() == scope()->DeclarationScope() &&
+            var->initializer_position() < proxy->position();
+        if (!skip_init_check) {
+          // Let and const need a read barrier.
+          Label done;
+          GetVar(rax, var);
+          __ CompareRoot(rax, Heap::kTheHoleValueRootIndex);
+          __ j(not_equal, &done, Label::kNear);
+          if (var->mode() == LET || var->mode() == CONST_HARMONY) {
+            // Throw a reference error when using an uninitialized let/const
+            // binding in harmony mode.
+            __ Push(var->name());
+            __ CallRuntime(Runtime::kThrowReferenceError, 1);
+          } else {
+            // Uninitalized const bindings outside of harmony mode are unholed.
+            ASSERT(var->mode() == CONST);
+            __ LoadRoot(rax, Heap::kUndefinedValueRootIndex);
+          }
+          __ bind(&done);
+          context()->Plug(rax);
+          break;
         }
-        __ bind(&done);
-        context()->Plug(rax);
       }
+      context()->Plug(var);
       break;
     }
 
@@ -1688,14 +1709,14 @@
   Literal* key = prop->key()->AsLiteral();
   __ Move(rcx, key->handle());
   Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
-  __ call(ic, RelocInfo::CODE_TARGET, GetPropertyId(prop));
+  __ call(ic, RelocInfo::CODE_TARGET, prop->id());
 }
 
 
 void FullCodeGenerator::EmitKeyedPropertyLoad(Property* prop) {
   SetSourcePosition(prop->position());
   Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
-  __ call(ic, RelocInfo::CODE_TARGET, GetPropertyId(prop));
+  __ call(ic, RelocInfo::CODE_TARGET, prop->id());
 }
 
 
@@ -2099,8 +2120,7 @@
 }
 
 
-void FullCodeGenerator::EmitResolvePossiblyDirectEval(ResolveEvalFlag flag,
-                                                      int arg_count) {
+void FullCodeGenerator::EmitResolvePossiblyDirectEval(int arg_count) {
   // Push copy of the first argument or undefined if it doesn't exist.
   if (arg_count > 0) {
     __ push(Operand(rsp, arg_count * kPointerSize));
@@ -2117,9 +2137,7 @@
       FLAG_harmony_scoping ? kStrictMode : strict_mode_flag();
   __ Push(Smi::FromInt(strict_mode));
 
-  __ CallRuntime(flag == SKIP_CONTEXT_LOOKUP
-                 ? Runtime::kResolvePossiblyDirectEvalNoLookup
-                 : Runtime::kResolvePossiblyDirectEval, 4);
+  __ CallRuntime(Runtime::kResolvePossiblyDirectEval, 4);
 }
 
 
@@ -2150,27 +2168,10 @@
         VisitForStackValue(args->at(i));
       }
 
-      // If we know that eval can only be shadowed by eval-introduced
-      // variables we attempt to load the global eval function directly in
-      // generated code. If we succeed, there is no need to perform a
-      // context lookup in the runtime system.
-      Label done;
-      Variable* var = proxy->var();
-      if (!var->IsUnallocated() && var->mode() == DYNAMIC_GLOBAL) {
-        Label slow;
-        EmitLoadGlobalCheckExtensions(var, NOT_INSIDE_TYPEOF, &slow);
-        // Push the function and resolve eval.
-        __ push(rax);
-        EmitResolvePossiblyDirectEval(SKIP_CONTEXT_LOOKUP, arg_count);
-        __ jmp(&done);
-        __ bind(&slow);
-      }
-
       // Push a copy of the function (found below the arguments) and resolve
       // eval.
       __ push(Operand(rsp, (arg_count + 1) * kPointerSize));
-      EmitResolvePossiblyDirectEval(PERFORM_CONTEXT_LOOKUP, arg_count);
-      __ bind(&done);
+      EmitResolvePossiblyDirectEval(arg_count);
 
       // The runtime call returns a pair of values in rax (function) and
       // rdx (receiver). Touch up the stack with the right values.
@@ -2289,7 +2290,8 @@
 }
 
 
-void FullCodeGenerator::EmitIsSmi(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitIsSmi(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 1);
 
   VisitForAccumulatorValue(args->at(0));
@@ -2301,7 +2303,7 @@
   context()->PrepareTest(&materialize_true, &materialize_false,
                          &if_true, &if_false, &fall_through);
 
-  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+  PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
   __ JumpIfSmi(rax, if_true);
   __ jmp(if_false);
 
@@ -2309,7 +2311,8 @@
 }
 
 
-void FullCodeGenerator::EmitIsNonNegativeSmi(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitIsNonNegativeSmi(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 1);
 
   VisitForAccumulatorValue(args->at(0));
@@ -2321,7 +2324,7 @@
   context()->PrepareTest(&materialize_true, &materialize_false,
                          &if_true, &if_false, &fall_through);
 
-  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+  PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
   Condition non_negative_smi = masm()->CheckNonNegativeSmi(rax);
   Split(non_negative_smi, if_true, if_false, fall_through);
 
@@ -2329,7 +2332,8 @@
 }
 
 
-void FullCodeGenerator::EmitIsObject(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitIsObject(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 1);
 
   VisitForAccumulatorValue(args->at(0));
@@ -2353,14 +2357,15 @@
   __ cmpq(rbx, Immediate(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
   __ j(below, if_false);
   __ cmpq(rbx, Immediate(LAST_NONCALLABLE_SPEC_OBJECT_TYPE));
-  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+  PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
   Split(below_equal, if_true, if_false, fall_through);
 
   context()->Plug(if_true, if_false);
 }
 
 
-void FullCodeGenerator::EmitIsSpecObject(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitIsSpecObject(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 1);
 
   VisitForAccumulatorValue(args->at(0));
@@ -2374,14 +2379,15 @@
 
   __ JumpIfSmi(rax, if_false);
   __ CmpObjectType(rax, FIRST_SPEC_OBJECT_TYPE, rbx);
-  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+  PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
   Split(above_equal, if_true, if_false, fall_through);
 
   context()->Plug(if_true, if_false);
 }
 
 
-void FullCodeGenerator::EmitIsUndetectableObject(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitIsUndetectableObject(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 1);
 
   VisitForAccumulatorValue(args->at(0));
@@ -2397,7 +2403,7 @@
   __ movq(rbx, FieldOperand(rax, HeapObject::kMapOffset));
   __ testb(FieldOperand(rbx, Map::kBitFieldOffset),
            Immediate(1 << Map::kIsUndetectable));
-  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+  PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
   Split(not_zero, if_true, if_false, fall_through);
 
   context()->Plug(if_true, if_false);
@@ -2405,7 +2411,8 @@
 
 
 void FullCodeGenerator::EmitIsStringWrapperSafeForDefaultValueOf(
-    ZoneList<Expression*>* args) {
+    CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 1);
 
   VisitForAccumulatorValue(args->at(0));
@@ -2481,12 +2488,13 @@
          Immediate(1 << Map::kStringWrapperSafeForDefaultValueOf));
   __ jmp(if_true);
 
-  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+  PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
   context()->Plug(if_true, if_false);
 }
 
 
-void FullCodeGenerator::EmitIsFunction(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitIsFunction(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 1);
 
   VisitForAccumulatorValue(args->at(0));
@@ -2500,14 +2508,15 @@
 
   __ JumpIfSmi(rax, if_false);
   __ CmpObjectType(rax, JS_FUNCTION_TYPE, rbx);
-  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+  PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
   Split(equal, if_true, if_false, fall_through);
 
   context()->Plug(if_true, if_false);
 }
 
 
-void FullCodeGenerator::EmitIsArray(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitIsArray(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 1);
 
   VisitForAccumulatorValue(args->at(0));
@@ -2521,14 +2530,15 @@
 
   __ JumpIfSmi(rax, if_false);
   __ CmpObjectType(rax, JS_ARRAY_TYPE, rbx);
-  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+  PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
   Split(equal, if_true, if_false, fall_through);
 
   context()->Plug(if_true, if_false);
 }
 
 
-void FullCodeGenerator::EmitIsRegExp(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitIsRegExp(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 1);
 
   VisitForAccumulatorValue(args->at(0));
@@ -2542,7 +2552,7 @@
 
   __ JumpIfSmi(rax, if_false);
   __ CmpObjectType(rax, JS_REGEXP_TYPE, rbx);
-  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+  PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
   Split(equal, if_true, if_false, fall_through);
 
   context()->Plug(if_true, if_false);
@@ -2550,8 +2560,8 @@
 
 
 
-void FullCodeGenerator::EmitIsConstructCall(ZoneList<Expression*>* args) {
-  ASSERT(args->length() == 0);
+void FullCodeGenerator::EmitIsConstructCall(CallRuntime* expr) {
+  ASSERT(expr->arguments()->length() == 0);
 
   Label materialize_true, materialize_false;
   Label* if_true = NULL;
@@ -2574,14 +2584,15 @@
   __ bind(&check_frame_marker);
   __ Cmp(Operand(rax, StandardFrameConstants::kMarkerOffset),
          Smi::FromInt(StackFrame::CONSTRUCT));
-  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+  PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
   Split(equal, if_true, if_false, fall_through);
 
   context()->Plug(if_true, if_false);
 }
 
 
-void FullCodeGenerator::EmitObjectEquals(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitObjectEquals(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 2);
 
   // Load the two objects into registers and perform the comparison.
@@ -2597,14 +2608,15 @@
 
   __ pop(rbx);
   __ cmpq(rax, rbx);
-  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+  PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
   Split(equal, if_true, if_false, fall_through);
 
   context()->Plug(if_true, if_false);
 }
 
 
-void FullCodeGenerator::EmitArguments(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitArguments(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 1);
 
   // ArgumentsAccessStub expects the key in rdx and the formal
@@ -2618,8 +2630,8 @@
 }
 
 
-void FullCodeGenerator::EmitArgumentsLength(ZoneList<Expression*>* args) {
-  ASSERT(args->length() == 0);
+void FullCodeGenerator::EmitArgumentsLength(CallRuntime* expr) {
+  ASSERT(expr->arguments()->length() == 0);
 
   Label exit;
   // Get the number of formal parameters.
@@ -2641,7 +2653,8 @@
 }
 
 
-void FullCodeGenerator::EmitClassOf(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitClassOf(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 1);
   Label done, null, function, non_function_constructor;
 
@@ -2701,7 +2714,7 @@
 }
 
 
-void FullCodeGenerator::EmitLog(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitLog(CallRuntime* expr) {
   // Conditionally generate a log call.
   // Args:
   //   0 (literal string): The type of logging (corresponds to the flags).
@@ -2709,6 +2722,7 @@
   //   1 (string): Format string.  Access the string at argument index 2
   //     with '%2s' (see Logger::LogRuntime for all the formats).
   //   2 (array): Arguments to the format string.
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT_EQ(args->length(), 3);
   if (CodeGenerator::ShouldGenerateLog(args->at(0))) {
     VisitForStackValue(args->at(1));
@@ -2721,8 +2735,8 @@
 }
 
 
-void FullCodeGenerator::EmitRandomHeapNumber(ZoneList<Expression*>* args) {
-  ASSERT(args->length() == 0);
+void FullCodeGenerator::EmitRandomHeapNumber(CallRuntime* expr) {
+  ASSERT(expr->arguments()->length() == 0);
 
   Label slow_allocate_heapnumber;
   Label heapnumber_allocated;
@@ -2766,9 +2780,10 @@
 }
 
 
-void FullCodeGenerator::EmitSubString(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitSubString(CallRuntime* expr) {
   // Load the arguments on the stack and call the stub.
   SubStringStub stub;
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 3);
   VisitForStackValue(args->at(0));
   VisitForStackValue(args->at(1));
@@ -2778,9 +2793,10 @@
 }
 
 
-void FullCodeGenerator::EmitRegExpExec(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitRegExpExec(CallRuntime* expr) {
   // Load the arguments on the stack and call the stub.
   RegExpExecStub stub;
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 4);
   VisitForStackValue(args->at(0));
   VisitForStackValue(args->at(1));
@@ -2791,7 +2807,8 @@
 }
 
 
-void FullCodeGenerator::EmitValueOf(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitValueOf(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 1);
 
   VisitForAccumulatorValue(args->at(0));  // Load the object.
@@ -2809,8 +2826,9 @@
 }
 
 
-void FullCodeGenerator::EmitMathPow(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitMathPow(CallRuntime* expr) {
   // Load the arguments on the stack and call the runtime function.
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 2);
   VisitForStackValue(args->at(0));
   VisitForStackValue(args->at(1));
@@ -2820,7 +2838,8 @@
 }
 
 
-void FullCodeGenerator::EmitSetValueOf(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitSetValueOf(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 2);
 
   VisitForStackValue(args->at(0));  // Load the object.
@@ -2847,7 +2866,8 @@
 }
 
 
-void FullCodeGenerator::EmitNumberToString(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitNumberToString(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT_EQ(args->length(), 1);
 
   // Load the argument on the stack and call the stub.
@@ -2859,7 +2879,8 @@
 }
 
 
-void FullCodeGenerator::EmitStringCharFromCode(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitStringCharFromCode(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 1);
 
   VisitForAccumulatorValue(args->at(0));
@@ -2877,7 +2898,8 @@
 }
 
 
-void FullCodeGenerator::EmitStringCharCodeAt(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitStringCharCodeAt(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 2);
 
   VisitForStackValue(args->at(0));
@@ -2924,7 +2946,8 @@
 }
 
 
-void FullCodeGenerator::EmitStringCharAt(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitStringCharAt(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 2);
 
   VisitForStackValue(args->at(0));
@@ -2973,7 +2996,8 @@
 }
 
 
-void FullCodeGenerator::EmitStringAdd(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitStringAdd(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT_EQ(2, args->length());
 
   VisitForStackValue(args->at(0));
@@ -2985,7 +3009,8 @@
 }
 
 
-void FullCodeGenerator::EmitStringCompare(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitStringCompare(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT_EQ(2, args->length());
 
   VisitForStackValue(args->at(0));
@@ -2997,10 +3022,11 @@
 }
 
 
-void FullCodeGenerator::EmitMathSin(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitMathSin(CallRuntime* expr) {
   // Load the argument on the stack and call the stub.
   TranscendentalCacheStub stub(TranscendentalCache::SIN,
                                TranscendentalCacheStub::TAGGED);
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 1);
   VisitForStackValue(args->at(0));
   __ CallStub(&stub);
@@ -3008,10 +3034,11 @@
 }
 
 
-void FullCodeGenerator::EmitMathCos(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitMathCos(CallRuntime* expr) {
   // Load the argument on the stack and call the stub.
   TranscendentalCacheStub stub(TranscendentalCache::COS,
                                TranscendentalCacheStub::TAGGED);
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 1);
   VisitForStackValue(args->at(0));
   __ CallStub(&stub);
@@ -3019,10 +3046,11 @@
 }
 
 
-void FullCodeGenerator::EmitMathLog(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitMathLog(CallRuntime* expr) {
   // Load the argument on the stack and call the stub.
   TranscendentalCacheStub stub(TranscendentalCache::LOG,
                                TranscendentalCacheStub::TAGGED);
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 1);
   VisitForStackValue(args->at(0));
   __ CallStub(&stub);
@@ -3030,8 +3058,9 @@
 }
 
 
-void FullCodeGenerator::EmitMathSqrt(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitMathSqrt(CallRuntime* expr) {
   // Load the argument on the stack and call the runtime function.
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 1);
   VisitForStackValue(args->at(0));
   __ CallRuntime(Runtime::kMath_sqrt, 1);
@@ -3039,7 +3068,8 @@
 }
 
 
-void FullCodeGenerator::EmitCallFunction(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitCallFunction(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() >= 2);
 
   int arg_count = args->length() - 2;  // 2 ~ receiver and function.
@@ -3058,8 +3088,9 @@
 }
 
 
-void FullCodeGenerator::EmitRegExpConstructResult(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitRegExpConstructResult(CallRuntime* expr) {
   RegExpConstructResultStub stub;
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 3);
   VisitForStackValue(args->at(0));
   VisitForStackValue(args->at(1));
@@ -3069,7 +3100,8 @@
 }
 
 
-void FullCodeGenerator::EmitSwapElements(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitSwapElements(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 3);
   VisitForStackValue(args->at(0));
   VisitForStackValue(args->at(1));
@@ -3164,7 +3196,8 @@
 }
 
 
-void FullCodeGenerator::EmitGetFromCache(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitGetFromCache(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT_EQ(2, args->length());
 
   ASSERT_NE(NULL, args->at(0)->AsLiteral());
@@ -3220,7 +3253,8 @@
 }
 
 
-void FullCodeGenerator::EmitIsRegExpEquivalent(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitIsRegExpEquivalent(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT_EQ(2, args->length());
 
   Register right = rax;
@@ -3258,7 +3292,8 @@
 }
 
 
-void FullCodeGenerator::EmitHasCachedArrayIndex(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitHasCachedArrayIndex(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 1);
 
   VisitForAccumulatorValue(args->at(0));
@@ -3272,7 +3307,7 @@
 
   __ testl(FieldOperand(rax, String::kHashFieldOffset),
            Immediate(String::kContainsCachedArrayIndexMask));
-  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+  PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
   __ j(zero, if_true);
   __ jmp(if_false);
 
@@ -3280,7 +3315,8 @@
 }
 
 
-void FullCodeGenerator::EmitGetCachedArrayIndex(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitGetCachedArrayIndex(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 1);
   VisitForAccumulatorValue(args->at(0));
 
@@ -3296,10 +3332,11 @@
 }
 
 
-void FullCodeGenerator::EmitFastAsciiArrayJoin(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
   Label bailout, return_result, done, one_char_separator, long_separator,
       non_trivial_array, not_size_one_array, loop,
       loop_1, loop_1_condition, loop_2, loop_2_entry, loop_3, loop_3_entry;
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 2);
   // We will leave the separator on the stack until the end of the function.
   VisitForStackValue(args->at(1));
@@ -3678,17 +3715,41 @@
         // Unary NOT has no side effects so it's only necessary to visit the
         // subexpression.  Match the optimizing compiler by not branching.
         VisitForEffect(expr->expression());
+      } else if (context()->IsTest()) {
+        const TestContext* test = TestContext::cast(context());
+        // The labels are swapped for the recursive call.
+        VisitForControl(expr->expression(),
+                        test->false_label(),
+                        test->true_label(),
+                        test->fall_through());
+        context()->Plug(test->true_label(), test->false_label());
       } else {
-        Label materialize_true, materialize_false;
-        Label* if_true = NULL;
-        Label* if_false = NULL;
-        Label* fall_through = NULL;
-        // Notice that the labels are swapped.
-        context()->PrepareTest(&materialize_true, &materialize_false,
-                               &if_false, &if_true, &fall_through);
-        if (context()->IsTest()) ForwardBailoutToChild(expr);
-        VisitForControl(expr->expression(), if_true, if_false, fall_through);
-        context()->Plug(if_false, if_true);  // Labels swapped.
+        // We handle value contexts explicitly rather than simply visiting
+        // for control and plugging the control flow into the context,
+        // because we need to prepare a pair of extra administrative AST ids
+        // for the optimizing compiler.
+        ASSERT(context()->IsAccumulatorValue() || context()->IsStackValue());
+        Label materialize_true, materialize_false, done;
+        VisitForControl(expr->expression(),
+                        &materialize_false,
+                        &materialize_true,
+                        &materialize_true);
+        __ bind(&materialize_true);
+        PrepareForBailoutForId(expr->MaterializeTrueId(), NO_REGISTERS);
+        if (context()->IsAccumulatorValue()) {
+          __ LoadRoot(rax, Heap::kTrueValueRootIndex);
+        } else {
+          __ PushRoot(Heap::kTrueValueRootIndex);
+        }
+        __ jmp(&done, Label::kNear);
+        __ bind(&materialize_false);
+        PrepareForBailoutForId(expr->MaterializeFalseId(), NO_REGISTERS);
+        if (context()->IsAccumulatorValue()) {
+          __ LoadRoot(rax, Heap::kFalseValueRootIndex);
+        } else {
+          __ PushRoot(Heap::kFalseValueRootIndex);
+        }
+        __ bind(&done);
       }
       break;
     }
@@ -3960,12 +4021,13 @@
     context()->Plug(rax);
   } else {
     // This expression cannot throw a reference error at the top level.
-    VisitInCurrentContext(expr);
+    VisitInDuplicateContext(expr);
   }
 }
 
 
 void FullCodeGenerator::EmitLiteralCompareTypeof(Expression* expr,
+                                                 Expression* sub_expr,
                                                  Handle<String> check) {
   Label materialize_true, materialize_false;
   Label* if_true = NULL;
@@ -3975,9 +4037,9 @@
                          &if_true, &if_false, &fall_through);
 
   { AccumulatorValueContext context(this);
-    VisitForTypeofValue(expr);
+    VisitForTypeofValue(sub_expr);
   }
-  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+  PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
 
   if (check->Equals(isolate()->heap()->number_symbol())) {
     __ JumpIfSmi(rax, if_true);
@@ -4061,7 +4123,7 @@
     case Token::IN:
       VisitForStackValue(expr->right());
       __ InvokeBuiltin(Builtins::IN, CALL_FUNCTION);
-      PrepareForBailoutBeforeSplit(TOS_REG, false, NULL, NULL);
+      PrepareForBailoutBeforeSplit(expr, false, NULL, NULL);
       __ CompareRoot(rax, Heap::kTrueValueRootIndex);
       Split(equal, if_true, if_false, fall_through);
       break;
@@ -4070,7 +4132,7 @@
       VisitForStackValue(expr->right());
       InstanceofStub stub(InstanceofStub::kNoFlags);
       __ CallStub(&stub);
-      PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+      PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
       __ testq(rax, rax);
        // The stub returns 0 for true.
       Split(zero, if_true, if_false, fall_through);
@@ -4122,7 +4184,7 @@
       __ call(ic, RelocInfo::CODE_TARGET, expr->id());
       patch_site.EmitPatchInfo();
 
-      PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+      PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
       __ testq(rax, rax);
       Split(cc, if_true, if_false, fall_through);
     }
@@ -4145,7 +4207,7 @@
                          &if_true, &if_false, &fall_through);
 
   VisitForAccumulatorValue(sub_expr);
-  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+  PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
   Heap::RootListIndex nil_value = nil == kNullValue ?
       Heap::kNullValueRootIndex :
       Heap::kUndefinedValueRootIndex;
diff --git a/src/x64/lithium-x64.cc b/src/x64/lithium-x64.cc
index 0af2ce4..598f890 100644
--- a/src/x64/lithium-x64.cc
+++ b/src/x64/lithium-x64.cc
@@ -745,7 +745,7 @@
   instr->MarkAsCall();
   instr = AssignPointerMap(instr);
 
-  if (hinstr->HasSideEffects()) {
+  if (hinstr->HasObservableSideEffects()) {
     ASSERT(hinstr->next()->IsSimulate());
     HSimulate* sim = HSimulate::cast(hinstr->next());
     instr = SetInstructionPendingDeoptimizationEnvironment(
@@ -757,7 +757,8 @@
   // Thus we still need to attach environment to this call even if
   // call sequence can not deoptimize eagerly.
   bool needs_environment =
-      (can_deoptimize == CAN_DEOPTIMIZE_EAGERLY) || !hinstr->HasSideEffects();
+      (can_deoptimize == CAN_DEOPTIMIZE_EAGERLY) ||
+      !hinstr->HasObservableSideEffects();
   if (needs_environment && !instr->HasEnvironment()) {
     instr = AssignEnvironment(instr);
   }
@@ -815,28 +816,6 @@
 }
 
 
-LInstruction* LChunkBuilder::DoBit(Token::Value op,
-                                   HBitwiseBinaryOperation* instr) {
-  if (instr->representation().IsInteger32()) {
-    ASSERT(instr->left()->representation().IsInteger32());
-    ASSERT(instr->right()->representation().IsInteger32());
-
-    LOperand* left = UseRegisterAtStart(instr->LeastConstantOperand());
-    LOperand* right = UseOrConstantAtStart(instr->MostConstantOperand());
-    return DefineSameAsFirst(new LBitI(op, left, right));
-  } else {
-    ASSERT(instr->representation().IsTagged());
-    ASSERT(instr->left()->representation().IsTagged());
-    ASSERT(instr->right()->representation().IsTagged());
-
-    LOperand* left = UseFixed(instr->left(), rdx);
-    LOperand* right = UseFixed(instr->right(), rax);
-    LArithmeticT* result = new LArithmeticT(op, left, right);
-    return MarkAsCall(DefineFixed(result, rax), instr);
-  }
-}
-
-
 LInstruction* LChunkBuilder::DoShift(Token::Value op,
                                      HBitwiseBinaryOperation* instr) {
   if (instr->representation().IsTagged()) {
@@ -1239,8 +1218,24 @@
 }
 
 
-LInstruction* LChunkBuilder::DoBitAnd(HBitAnd* instr) {
-  return DoBit(Token::BIT_AND, instr);
+LInstruction* LChunkBuilder::DoBitwise(HBitwise* instr) {
+  if (instr->representation().IsInteger32()) {
+    ASSERT(instr->left()->representation().IsInteger32());
+    ASSERT(instr->right()->representation().IsInteger32());
+
+    LOperand* left = UseRegisterAtStart(instr->LeastConstantOperand());
+    LOperand* right = UseOrConstantAtStart(instr->MostConstantOperand());
+    return DefineSameAsFirst(new LBitI(left, right));
+  } else {
+    ASSERT(instr->representation().IsTagged());
+    ASSERT(instr->left()->representation().IsTagged());
+    ASSERT(instr->right()->representation().IsTagged());
+
+    LOperand* left = UseFixed(instr->left(), rdx);
+    LOperand* right = UseFixed(instr->right(), rax);
+    LArithmeticT* result = new LArithmeticT(instr->op(), left, right);
+    return MarkAsCall(DefineFixed(result, rax), instr);
+  }
 }
 
 
@@ -1253,16 +1248,6 @@
 }
 
 
-LInstruction* LChunkBuilder::DoBitOr(HBitOr* instr) {
-  return DoBit(Token::BIT_OR, instr);
-}
-
-
-LInstruction* LChunkBuilder::DoBitXor(HBitXor* instr) {
-  return DoBit(Token::BIT_XOR, instr);
-}
-
-
 LInstruction* LChunkBuilder::DoDiv(HDiv* instr) {
   if (instr->representation().IsDouble()) {
     return DoArithmeticD(Token::DIV, instr);
diff --git a/src/x64/lithium-x64.h b/src/x64/lithium-x64.h
index 20a6937..b542071 100644
--- a/src/x64/lithium-x64.h
+++ b/src/x64/lithium-x64.h
@@ -793,18 +793,15 @@
 
 class LBitI: public LTemplateInstruction<1, 2, 0> {
  public:
-  LBitI(Token::Value op, LOperand* left, LOperand* right)
-      : op_(op) {
+  LBitI(LOperand* left, LOperand* right) {
     inputs_[0] = left;
     inputs_[1] = right;
   }
 
-  Token::Value op() const { return op_; }
+  Token::Value op() const { return hydrogen()->op(); }
 
   DECLARE_CONCRETE_INSTRUCTION(BitI, "bit-i")
-
- private:
-  Token::Value op_;
+  DECLARE_HYDROGEN_ACCESSOR(Bitwise)
 };
 
 
@@ -2181,7 +2178,6 @@
   void VisitInstruction(HInstruction* current);
 
   void DoBasicBlock(HBasicBlock* block, HBasicBlock* next_block);
-  LInstruction* DoBit(Token::Value op, HBitwiseBinaryOperation* instr);
   LInstruction* DoShift(Token::Value op, HBitwiseBinaryOperation* instr);
   LInstruction* DoArithmeticD(Token::Value op,
                               HArithmeticBinaryOperation* instr);
diff --git a/src/x64/macro-assembler-x64.cc b/src/x64/macro-assembler-x64.cc
index e3d4634..5b81fa6 100644
--- a/src/x64/macro-assembler-x64.cc
+++ b/src/x64/macro-assembler-x64.cc
@@ -530,33 +530,12 @@
 }
 
 
-MaybeObject* MacroAssembler::TryCallStub(CodeStub* stub) {
-  ASSERT(AllowThisStubCall(stub));  // Calls are not allowed in some stubs.
-  MaybeObject* result = stub->TryGetCode();
-  if (!result->IsFailure()) {
-    call(Handle<Code>(Code::cast(result->ToObjectUnchecked())),
-         RelocInfo::CODE_TARGET);
-  }
-  return result;
-}
-
-
 void MacroAssembler::TailCallStub(CodeStub* stub) {
   ASSERT(allow_stub_calls_ || stub->CompilingCallsToThisStubIsGCSafe());
   Jump(stub->GetCode(), RelocInfo::CODE_TARGET);
 }
 
 
-MaybeObject* MacroAssembler::TryTailCallStub(CodeStub* stub) {
-  MaybeObject* result = stub->TryGetCode();
-  if (!result->IsFailure()) {
-    jmp(Handle<Code>(Code::cast(result->ToObjectUnchecked())),
-        RelocInfo::CODE_TARGET);
-  }
-  return result;
-}
-
-
 void MacroAssembler::StubReturn(int argc) {
   ASSERT(argc >= 1 && generating_stub());
   ret((argc - 1) * kPointerSize);
@@ -610,12 +589,6 @@
 }
 
 
-MaybeObject* MacroAssembler::TryCallRuntime(Runtime::FunctionId id,
-                                            int num_arguments) {
-  return TryCallRuntime(Runtime::FunctionForId(id), num_arguments);
-}
-
-
 void MacroAssembler::CallRuntime(const Runtime::Function* f,
                                  int num_arguments) {
   // If the expected number of arguments of the runtime function is
@@ -637,26 +610,6 @@
 }
 
 
-MaybeObject* MacroAssembler::TryCallRuntime(const Runtime::Function* f,
-                                            int num_arguments) {
-  if (f->nargs >= 0 && f->nargs != num_arguments) {
-    IllegalOperation(num_arguments);
-    // Since we did not call the stub, there was no allocation failure.
-    // Return some non-failure object.
-    return HEAP->undefined_value();
-  }
-
-  // TODO(1236192): Most runtime routines don't need the number of
-  // arguments passed in because it is constant. At some point we
-  // should remove this need and make the runtime routine entry code
-  // smarter.
-  Set(rax, num_arguments);
-  LoadAddress(rbx, ExternalReference(f, isolate()));
-  CEntryStub ces(f->result_size);
-  return TryCallStub(&ces);
-}
-
-
 void MacroAssembler::CallExternalReference(const ExternalReference& ext,
                                            int num_arguments) {
   Set(rax, num_arguments);
@@ -686,24 +639,6 @@
 }
 
 
-MaybeObject* MacroAssembler::TryTailCallExternalReference(
-    const ExternalReference& ext, int num_arguments, int result_size) {
-  // ----------- S t a t e -------------
-  //  -- rsp[0] : return address
-  //  -- rsp[8] : argument num_arguments - 1
-  //  ...
-  //  -- rsp[8 * num_arguments] : argument 0 (receiver)
-  // -----------------------------------
-
-  // TODO(1236192): Most runtime routines don't need the number of
-  // arguments passed in because it is constant. At some point we
-  // should remove this need and make the runtime routine entry code
-  // smarter.
-  Set(rax, num_arguments);
-  return TryJumpToExternalReference(ext, result_size);
-}
-
-
 void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid,
                                      int num_arguments,
                                      int result_size) {
@@ -713,15 +648,6 @@
 }
 
 
-MaybeObject* MacroAssembler::TryTailCallRuntime(Runtime::FunctionId fid,
-                                                int num_arguments,
-                                                int result_size) {
-  return TryTailCallExternalReference(ExternalReference(fid, isolate()),
-                                      num_arguments,
-                                      result_size);
-}
-
-
 static int Offset(ExternalReference ref0, ExternalReference ref1) {
   int64_t offset = (ref0.address() - ref1.address());
   // Check that fits into int.
@@ -744,8 +670,8 @@
 }
 
 
-MaybeObject* MacroAssembler::TryCallApiFunctionAndReturn(
-    ApiFunction* function, int stack_space) {
+void MacroAssembler::CallApiFunctionAndReturn(Address function_address,
+                                              int stack_space) {
   Label empty_result;
   Label prologue;
   Label promote_scheduled_exception;
@@ -775,8 +701,7 @@
   movq(prev_limit_reg, Operand(base_reg, kLimitOffset));
   addl(Operand(base_reg, kLevelOffset), Immediate(1));
   // Call the api function!
-  movq(rax,
-       reinterpret_cast<int64_t>(function->address()),
+  movq(rax, reinterpret_cast<int64_t>(function_address),
        RelocInfo::RUNTIME_ENTRY);
   call(rax);
 
@@ -808,11 +733,7 @@
   ret(stack_space * kPointerSize);
 
   bind(&promote_scheduled_exception);
-  MaybeObject* result = TryTailCallRuntime(Runtime::kPromoteScheduledException,
-                                           0, 1);
-  if (result->IsFailure()) {
-    return result;
-  }
+  TailCallRuntime(Runtime::kPromoteScheduledException, 0, 1);
 
   bind(&empty_result);
   // It was zero; the result is undefined.
@@ -833,8 +754,6 @@
   call(rax);
   movq(rax, prev_limit_reg);
   jmp(&leave_exit_frame);
-
-  return result;
 }
 
 
@@ -847,15 +766,6 @@
 }
 
 
-MaybeObject* MacroAssembler::TryJumpToExternalReference(
-    const ExternalReference& ext, int result_size) {
-  // Set the entry point and jump to the C entry runtime stub.
-  LoadAddress(rbx, ext);
-  CEntryStub ces(result_size);
-  return TryTailCallStub(&ces);
-}
-
-
 void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id,
                                    InvokeFlag flag,
                                    const CallWrapper& call_wrapper) {
@@ -3112,7 +3022,7 @@
 }
 
 
-void MacroAssembler::InvokeFunction(JSFunction* function,
+void MacroAssembler::InvokeFunction(Handle<JSFunction> function,
                                     const ParameterCount& actual,
                                     InvokeFlag flag,
                                     const CallWrapper& call_wrapper,
@@ -3121,7 +3031,7 @@
   ASSERT(flag == JUMP_FUNCTION || has_frame());
 
   // Get the function and setup the context.
-  Move(rdi, Handle<JSFunction>(function));
+  Move(rdi, function);
   movq(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
 
   // We call indirectly through the code field in the function to
diff --git a/src/x64/macro-assembler-x64.h b/src/x64/macro-assembler-x64.h
index f5f81b1..3a655ef 100644
--- a/src/x64/macro-assembler-x64.h
+++ b/src/x64/macro-assembler-x64.h
@@ -357,7 +357,7 @@
                       const CallWrapper& call_wrapper,
                       CallKind call_kind);
 
-  void InvokeFunction(JSFunction* function,
+  void InvokeFunction(Handle<JSFunction> function,
                       const ParameterCount& actual,
                       InvokeFlag flag,
                       const CallWrapper& call_wrapper,
@@ -1107,19 +1107,9 @@
   // Call a code stub.
   void CallStub(CodeStub* stub, unsigned ast_id = kNoASTId);
 
-  // Call a code stub and return the code object called.  Try to generate
-  // the code if necessary.  Do not perform a GC but instead return a retry
-  // after GC failure.
-  MUST_USE_RESULT MaybeObject* TryCallStub(CodeStub* stub);
-
   // Tail call a code stub (jump).
   void TailCallStub(CodeStub* stub);
 
-  // Tail call a code stub (jump) and return the code object called.  Try to
-  // generate the code if necessary.  Do not perform a GC but instead return
-  // a retry after GC failure.
-  MUST_USE_RESULT MaybeObject* TryTailCallStub(CodeStub* stub);
-
   // Return from a code stub after popping its arguments.
   void StubReturn(int argc);
 
@@ -1129,19 +1119,9 @@
   // Call a runtime function and save the value of XMM registers.
   void CallRuntimeSaveDoubles(Runtime::FunctionId id);
 
-  // Call a runtime function, returning the CodeStub object called.
-  // Try to generate the stub code if necessary.  Do not perform a GC
-  // but instead return a retry after GC failure.
-  MUST_USE_RESULT MaybeObject* TryCallRuntime(const Runtime::Function* f,
-                                              int num_arguments);
-
   // Convenience function: Same as above, but takes the fid instead.
   void CallRuntime(Runtime::FunctionId id, int num_arguments);
 
-  // Convenience function: Same as above, but takes the fid instead.
-  MUST_USE_RESULT MaybeObject* TryCallRuntime(Runtime::FunctionId id,
-                                              int num_arguments);
-
   // Convenience function: call an external reference.
   void CallExternalReference(const ExternalReference& ext,
                              int num_arguments);
@@ -1153,38 +1133,26 @@
                                  int num_arguments,
                                  int result_size);
 
-  MUST_USE_RESULT MaybeObject* TryTailCallExternalReference(
-      const ExternalReference& ext, int num_arguments, int result_size);
-
   // Convenience function: tail call a runtime routine (jump).
   void TailCallRuntime(Runtime::FunctionId fid,
                        int num_arguments,
                        int result_size);
 
-  MUST_USE_RESULT  MaybeObject* TryTailCallRuntime(Runtime::FunctionId fid,
-                                                   int num_arguments,
-                                                   int result_size);
-
   // Jump to a runtime routine.
   void JumpToExternalReference(const ExternalReference& ext, int result_size);
 
-  // Jump to a runtime routine.
-  MaybeObject* TryJumpToExternalReference(const ExternalReference& ext,
-                                          int result_size);
-
-  // Prepares stack to put arguments (aligns and so on).
-  // WIN64 calling convention requires to put the pointer to the return value
-  // slot into rcx (rcx must be preserverd until TryCallApiFunctionAndReturn).
-  // Saves context (rsi). Clobbers rax. Allocates arg_stack_space * kPointerSize
+  // Prepares stack to put arguments (aligns and so on).  WIN64 calling
+  // convention requires to put the pointer to the return value slot into
+  // rcx (rcx must be preserverd until CallApiFunctionAndReturn).  Saves
+  // context (rsi).  Clobbers rax.  Allocates arg_stack_space * kPointerSize
   // inside the exit frame (not GCed) accessible via StackSpaceOperand.
   void PrepareCallApiFunction(int arg_stack_space);
 
-  // Calls an API function. Allocates HandleScope, extracts
-  // returned value from handle and propagates exceptions.
-  // Clobbers r14, r15, rbx and caller-save registers. Restores context.
-  // On return removes stack_space * kPointerSize (GCed).
-  MUST_USE_RESULT MaybeObject* TryCallApiFunctionAndReturn(
-      ApiFunction* function, int stack_space);
+  // Calls an API function.  Allocates HandleScope, extracts returned value
+  // from handle and propagates exceptions.  Clobbers r14, r15, rbx and
+  // caller-save registers.  Restores context.  On return removes
+  // stack_space * kPointerSize (GCed).
+  void CallApiFunctionAndReturn(Address function_address, int stack_space);
 
   // Before calling a C-function from generated code, align arguments on stack.
   // After aligning the frame, arguments must be stored in esp[0], esp[4],
diff --git a/src/x64/stub-cache-x64.cc b/src/x64/stub-cache-x64.cc
index 8af1bf2..867c71a 100644
--- a/src/x64/stub-cache-x64.cc
+++ b/src/x64/stub-cache-x64.cc
@@ -128,60 +128,6 @@
 }
 
 
-// TODO(kmillikin): Eliminate this function when the stub cache is fully
-// handlified.
-MUST_USE_RESULT static MaybeObject* TryGenerateDictionaryNegativeLookup(
-    MacroAssembler* masm,
-    Label* miss_label,
-    Register receiver,
-    String* name,
-    Register r0,
-    Register r1) {
-  ASSERT(name->IsSymbol());
-  Counters* counters = masm->isolate()->counters();
-  __ IncrementCounter(counters->negative_lookups(), 1);
-  __ IncrementCounter(counters->negative_lookups_miss(), 1);
-
-  __ movq(r0, FieldOperand(receiver, HeapObject::kMapOffset));
-
-  const int kInterceptorOrAccessCheckNeededMask =
-      (1 << Map::kHasNamedInterceptor) | (1 << Map::kIsAccessCheckNeeded);
-
-  // Bail out if the receiver has a named interceptor or requires access checks.
-  __ testb(FieldOperand(r0, Map::kBitFieldOffset),
-           Immediate(kInterceptorOrAccessCheckNeededMask));
-  __ j(not_zero, miss_label);
-
-  // Check that receiver is a JSObject.
-  __ CmpInstanceType(r0, FIRST_SPEC_OBJECT_TYPE);
-  __ j(below, miss_label);
-
-  // Load properties array.
-  Register properties = r0;
-  __ movq(properties, FieldOperand(receiver, JSObject::kPropertiesOffset));
-
-  // Check that the properties array is a dictionary.
-  __ CompareRoot(FieldOperand(properties, HeapObject::kMapOffset),
-                 Heap::kHashTableMapRootIndex);
-  __ j(not_equal, miss_label);
-
-  Label done;
-  MaybeObject* result = StringDictionaryLookupStub::TryGenerateNegativeLookup(
-      masm,
-      miss_label,
-      &done,
-      properties,
-      name,
-      r1);
-  if (result->IsFailure()) return result;
-
-  __ bind(&done);
-  __ DecrementCounter(counters->negative_lookups_miss(), 1);
-
-  return result;
-}
-
-
 void StubCache::GenerateProbe(MacroAssembler* masm,
                               Code::Flags flags,
                               Register receiver,
@@ -259,7 +205,10 @@
 
 
 void StubCompiler::GenerateDirectLoadGlobalFunctionPrototype(
-    MacroAssembler* masm, int index, Register prototype, Label* miss) {
+    MacroAssembler* masm,
+    int index,
+    Register prototype,
+    Label* miss) {
   Isolate* isolate = masm->isolate();
   // Check we're still in the same context.
   __ Move(prototype, isolate->global());
@@ -267,8 +216,8 @@
           prototype);
   __ j(not_equal, miss);
   // Get the global function with the given index.
-  JSFunction* function =
-      JSFunction::cast(isolate->global_context()->get(index));
+  Handle<JSFunction> function(
+      JSFunction::cast(isolate->global_context()->get(index)));
   // Load its initial map. The global functions all have initial maps.
   __ Move(prototype, Handle<Map>(function->initial_map()));
   // Load the prototype from the initial map.
@@ -383,11 +332,11 @@
                                      Register receiver,
                                      Register holder,
                                      Register name,
-                                     JSObject* holder_obj) {
+                                     Handle<JSObject> holder_obj) {
   __ push(name);
-  InterceptorInfo* interceptor = holder_obj->GetNamedInterceptor();
-  ASSERT(!masm->isolate()->heap()->InNewSpace(interceptor));
-  __ Move(kScratchRegister, Handle<Object>(interceptor));
+  Handle<InterceptorInfo> interceptor(holder_obj->GetNamedInterceptor());
+  ASSERT(!masm->isolate()->heap()->InNewSpace(*interceptor));
+  __ Move(kScratchRegister, interceptor);
   __ push(kScratchRegister);
   __ push(receiver);
   __ push(holder);
@@ -395,11 +344,12 @@
 }
 
 
-static void CompileCallLoadPropertyWithInterceptor(MacroAssembler* masm,
-                                                   Register receiver,
-                                                   Register holder,
-                                                   Register name,
-                                                   JSObject* holder_obj) {
+static void CompileCallLoadPropertyWithInterceptor(
+    MacroAssembler* masm,
+    Register receiver,
+    Register holder,
+    Register name,
+    Handle<JSObject> holder_obj) {
   PushInterceptorArguments(masm, receiver, holder, name, holder_obj);
 
   ExternalReference ref =
@@ -453,9 +403,9 @@
 
 
 // Generates call to API function.
-static MaybeObject* GenerateFastApiCall(MacroAssembler* masm,
-                                        const CallOptimization& optimization,
-                                        int argc) {
+static void GenerateFastApiCall(MacroAssembler* masm,
+                                const CallOptimization& optimization,
+                                int argc) {
   // ----------- S t a t e -------------
   //  -- rsp[0]              : return address
   //  -- rsp[8]              : object passing the type check
@@ -470,29 +420,25 @@
   //  -- rsp[(argc + 4) * 8] : receiver
   // -----------------------------------
   // Get the function and setup the context.
-  JSFunction* function = optimization.constant_function();
-  __ Move(rdi, Handle<JSFunction>(function));
+  Handle<JSFunction> function = optimization.constant_function();
+  __ Move(rdi, function);
   __ movq(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
 
   // Pass the additional arguments.
   __ movq(Operand(rsp, 2 * kPointerSize), rdi);
-  Object* call_data = optimization.api_call_info()->data();
-  Handle<CallHandlerInfo> api_call_info_handle(optimization.api_call_info());
-  if (masm->isolate()->heap()->InNewSpace(call_data)) {
-    __ Move(rcx, api_call_info_handle);
+  Handle<CallHandlerInfo> api_call_info = optimization.api_call_info();
+  Handle<Object> call_data(api_call_info->data());
+  if (masm->isolate()->heap()->InNewSpace(*call_data)) {
+    __ Move(rcx, api_call_info);
     __ movq(rbx, FieldOperand(rcx, CallHandlerInfo::kDataOffset));
     __ movq(Operand(rsp, 3 * kPointerSize), rbx);
   } else {
-    __ Move(Operand(rsp, 3 * kPointerSize), Handle<Object>(call_data));
+    __ Move(Operand(rsp, 3 * kPointerSize), call_data);
   }
 
   // Prepare arguments.
   __ lea(rbx, Operand(rsp, 3 * kPointerSize));
 
-  Object* callback = optimization.api_call_info()->callback();
-  Address api_function_address = v8::ToCData<Address>(callback);
-  ApiFunction fun(api_function_address);
-
 #ifdef _WIN64
   // Win64 uses first register--rcx--for returned value.
   Register arguments_arg = rdx;
@@ -515,12 +461,11 @@
 
   // v8::InvocationCallback's argument.
   __ lea(arguments_arg, StackSpaceOperand(0));
-  // Emitting a stub call may try to allocate (if the code is not
-  // already generated).  Do not allow the assembler to perform a
-  // garbage collection but instead return the allocation failure
-  // object.
-  return masm->TryCallApiFunctionAndReturn(&fun,
-                                           argc + kFastApiCallArguments + 1);
+
+  // Function address is a foreign pointer outside V8's heap.
+  Address function_address = v8::ToCData<Address>(api_call_info->callback());
+  __ CallApiFunctionAndReturn(function_address,
+                              argc + kFastApiCallArguments + 1);
 }
 
 
@@ -535,16 +480,16 @@
         name_(name),
         extra_ic_state_(extra_ic_state) {}
 
-  MaybeObject* Compile(MacroAssembler* masm,
-                       JSObject* object,
-                       JSObject* holder,
-                       String* name,
-                       LookupResult* lookup,
-                       Register receiver,
-                       Register scratch1,
-                       Register scratch2,
-                       Register scratch3,
-                       Label* miss) {
+  void Compile(MacroAssembler* masm,
+               Handle<JSObject> object,
+               Handle<JSObject> holder,
+               Handle<String> name,
+               LookupResult* lookup,
+               Register receiver,
+               Register scratch1,
+               Register scratch2,
+               Register scratch3,
+               Label* miss) {
     ASSERT(holder->HasNamedInterceptor());
     ASSERT(!holder->GetNamedInterceptor()->getter()->IsUndefined());
 
@@ -552,45 +497,27 @@
     __ JumpIfSmi(receiver, miss);
 
     CallOptimization optimization(lookup);
-
     if (optimization.is_constant_call()) {
-      return CompileCacheable(masm,
-                              object,
-                              receiver,
-                              scratch1,
-                              scratch2,
-                              scratch3,
-                              holder,
-                              lookup,
-                              name,
-                              optimization,
-                              miss);
+      CompileCacheable(masm, object, receiver, scratch1, scratch2, scratch3,
+                       holder, lookup, name, optimization, miss);
     } else {
-      CompileRegular(masm,
-                     object,
-                     receiver,
-                     scratch1,
-                     scratch2,
-                     scratch3,
-                     name,
-                     holder,
-                     miss);
-      return masm->isolate()->heap()->undefined_value();  // Success.
+      CompileRegular(masm, object, receiver, scratch1, scratch2, scratch3,
+                     name, holder, miss);
     }
   }
 
  private:
-  MaybeObject* CompileCacheable(MacroAssembler* masm,
-                                JSObject* object,
-                                Register receiver,
-                                Register scratch1,
-                                Register scratch2,
-                                Register scratch3,
-                                JSObject* interceptor_holder,
-                                LookupResult* lookup,
-                                String* name,
-                                const CallOptimization& optimization,
-                                Label* miss_label) {
+  void CompileCacheable(MacroAssembler* masm,
+                        Handle<JSObject> object,
+                        Register receiver,
+                        Register scratch1,
+                        Register scratch2,
+                        Register scratch3,
+                        Handle<JSObject> interceptor_holder,
+                        LookupResult* lookup,
+                        Handle<String> name,
+                        const CallOptimization& optimization,
+                        Label* miss_label) {
     ASSERT(optimization.is_constant_call());
     ASSERT(!lookup->holder()->IsGlobalObject());
 
@@ -599,16 +526,14 @@
     bool can_do_fast_api_call = false;
     if (optimization.is_simple_api_call() &&
         !lookup->holder()->IsGlobalObject()) {
-      depth1 =
-          optimization.GetPrototypeDepthOfExpectedType(object,
-                                                       interceptor_holder);
+      depth1 = optimization.GetPrototypeDepthOfExpectedType(
+          object, interceptor_holder);
       if (depth1 == kInvalidProtoDepth) {
-        depth2 =
-            optimization.GetPrototypeDepthOfExpectedType(interceptor_holder,
-                                                         lookup->holder());
+        depth2 = optimization.GetPrototypeDepthOfExpectedType(
+            interceptor_holder, Handle<JSObject>(lookup->holder()));
       }
-      can_do_fast_api_call = (depth1 != kInvalidProtoDepth) ||
-                             (depth2 != kInvalidProtoDepth);
+      can_do_fast_api_call =
+          depth1 != kInvalidProtoDepth || depth2 != kInvalidProtoDepth;
     }
 
     Counters* counters = masm->isolate()->counters();
@@ -624,9 +549,9 @@
     Label miss_cleanup;
     Label* miss = can_do_fast_api_call ? &miss_cleanup : miss_label;
     Register holder =
-        stub_compiler_->CheckPrototypes(object, receiver,
-                                        interceptor_holder, scratch1,
-                                        scratch2, scratch3, name, depth1, miss);
+        stub_compiler_->CheckPrototypes(object, receiver, interceptor_holder,
+                                        scratch1, scratch2, scratch3,
+                                        name, depth1, miss);
 
     // Invoke an interceptor and if it provides a value,
     // branch to |regular_invoke|.
@@ -639,10 +564,11 @@
 
     // Check that the maps from interceptor's holder to constant function's
     // holder haven't changed and thus we can use cached constant function.
-    if (interceptor_holder != lookup->holder()) {
+    if (*interceptor_holder != lookup->holder()) {
       stub_compiler_->CheckPrototypes(interceptor_holder, receiver,
-                                      lookup->holder(), scratch1,
-                                      scratch2, scratch3, name, depth2, miss);
+                                      Handle<JSObject>(lookup->holder()),
+                                      scratch1, scratch2, scratch3,
+                                      name, depth2, miss);
     } else {
       // CheckPrototypes has a side effect of fetching a 'holder'
       // for API (object which is instanceof for the signature).  It's
@@ -653,10 +579,7 @@
 
     // Invoke function.
     if (can_do_fast_api_call) {
-      MaybeObject* result = GenerateFastApiCall(masm,
-                                                optimization,
-                                                arguments_.immediate());
-      if (result->IsFailure()) return result;
+      GenerateFastApiCall(masm, optimization, arguments_.immediate());
     } else {
       CallKind call_kind = CallICBase::Contextual::decode(extra_ic_state_)
           ? CALL_AS_FUNCTION
@@ -677,33 +600,27 @@
     if (can_do_fast_api_call) {
       FreeSpaceForFastApiCall(masm, scratch1);
     }
-
-    return masm->isolate()->heap()->undefined_value();  // Success.
   }
 
   void CompileRegular(MacroAssembler* masm,
-                      JSObject* object,
+                      Handle<JSObject> object,
                       Register receiver,
                       Register scratch1,
                       Register scratch2,
                       Register scratch3,
-                      String* name,
-                      JSObject* interceptor_holder,
+                      Handle<String> name,
+                      Handle<JSObject> interceptor_holder,
                       Label* miss_label) {
     Register holder =
         stub_compiler_->CheckPrototypes(object, receiver, interceptor_holder,
-                                        scratch1, scratch2, scratch3, name,
-                                        miss_label);
+                                        scratch1, scratch2, scratch3,
+                                        name, miss_label);
 
     FrameScope scope(masm, StackFrame::INTERNAL);
     // Save the name_ register across the call.
     __ push(name_);
 
-    PushInterceptorArguments(masm,
-                             receiver,
-                             holder,
-                             name_,
-                             interceptor_holder);
+    PushInterceptorArguments(masm, receiver, holder, name_, interceptor_holder);
 
     __ CallExternalReference(
         ExternalReference(IC_Utility(IC::kLoadPropertyWithInterceptorForCall),
@@ -719,7 +636,7 @@
   void LoadWithInterceptor(MacroAssembler* masm,
                            Register receiver,
                            Register holder,
-                           JSObject* holder_obj,
+                           Handle<JSObject> holder_obj,
                            Label* interceptor_succeeded) {
     {
       FrameScope scope(masm, StackFrame::INTERNAL);
@@ -758,10 +675,9 @@
 
 
 void StubCompiler::GenerateKeyedLoadMissForceGeneric(MacroAssembler* masm) {
-  Code* code = masm->isolate()->builtins()->builtin(
-      Builtins::kKeyedLoadIC_MissForceGeneric);
-  Handle<Code> ic(code);
-  __ Jump(ic, RelocInfo::CODE_TARGET);
+  Handle<Code> code =
+      masm->isolate()->builtins()->KeyedLoadIC_MissForceGeneric();
+  __ Jump(code, RelocInfo::CODE_TARGET);
 }
 
 
@@ -867,28 +783,6 @@
 }
 
 
-// TODO(kmillikin): Eliminate this function when the stub cache is fully
-// handlified.
-MUST_USE_RESULT static MaybeObject* TryGenerateCheckPropertyCell(
-    MacroAssembler* masm,
-    GlobalObject* global,
-    String* name,
-    Register scratch,
-    Label* miss) {
-  Object* probe;
-  { MaybeObject* maybe_probe = global->EnsurePropertyCell(name);
-    if (!maybe_probe->ToObject(&probe)) return maybe_probe;
-  }
-  JSGlobalPropertyCell* cell = JSGlobalPropertyCell::cast(probe);
-  ASSERT(cell->value()->IsTheHole());
-  __ Move(scratch, Handle<Object>(cell));
-  __ Cmp(FieldOperand(scratch, JSGlobalPropertyCell::kValueOffset),
-         masm->isolate()->factory()->the_hole_value());
-  __ j(not_equal, miss);
-  return cell;
-}
-
-
 // Calls GenerateCheckPropertyCell for each global object in the prototype chain
 // from object to (but not including) holder.
 static void GenerateCheckPropertyCells(MacroAssembler* masm,
@@ -910,35 +804,6 @@
   }
 }
 
-
-// TODO(kmillikin): Eliminate this function when the stub cache is fully
-// handlified.
-MUST_USE_RESULT static MaybeObject* TryGenerateCheckPropertyCells(
-    MacroAssembler* masm,
-    JSObject* object,
-    JSObject* holder,
-    String* name,
-    Register scratch,
-    Label* miss) {
-  JSObject* current = object;
-  while (current != holder) {
-    if (current->IsGlobalObject()) {
-      // Returns a cell or a failure.
-      MaybeObject* result = TryGenerateCheckPropertyCell(
-          masm,
-          GlobalObject::cast(current),
-          name,
-          scratch,
-          miss);
-      if (result->IsFailure()) return result;
-    }
-    ASSERT(current->IsJSObject());
-    current = JSObject::cast(current->GetPrototype());
-  }
-  return NULL;
-}
-
-
 #undef __
 #define __ ACCESS_MASM((masm()))
 
@@ -1055,148 +920,6 @@
 }
 
 
-Register StubCompiler::CheckPrototypes(JSObject* object,
-                                       Register object_reg,
-                                       JSObject* holder,
-                                       Register holder_reg,
-                                       Register scratch1,
-                                       Register scratch2,
-                                       String* name,
-                                       int save_at_depth,
-                                       Label* miss) {
-  // Make sure there's no overlap between holder and object registers.
-  ASSERT(!scratch1.is(object_reg) && !scratch1.is(holder_reg));
-  ASSERT(!scratch2.is(object_reg) && !scratch2.is(holder_reg)
-         && !scratch2.is(scratch1));
-
-  // Keep track of the current object in register reg.  On the first
-  // iteration, reg is an alias for object_reg, on later iterations,
-  // it is an alias for holder_reg.
-  Register reg = object_reg;
-  int depth = 0;
-
-  if (save_at_depth == depth) {
-    __ movq(Operand(rsp, kPointerSize), object_reg);
-  }
-
-  // Check the maps in the prototype chain.
-  // Traverse the prototype chain from the object and do map checks.
-  JSObject* current = object;
-  while (current != holder) {
-    depth++;
-
-    // Only global objects and objects that do not require access
-    // checks are allowed in stubs.
-    ASSERT(current->IsJSGlobalProxy() || !current->IsAccessCheckNeeded());
-
-    JSObject* prototype = JSObject::cast(current->GetPrototype());
-    if (!current->HasFastProperties() &&
-        !current->IsJSGlobalObject() &&
-        !current->IsJSGlobalProxy()) {
-      if (!name->IsSymbol()) {
-        MaybeObject* lookup_result = heap()->LookupSymbol(name);
-        if (lookup_result->IsFailure()) {
-          set_failure(Failure::cast(lookup_result));
-          return reg;
-        } else {
-          name = String::cast(lookup_result->ToObjectUnchecked());
-        }
-      }
-      ASSERT(current->property_dictionary()->FindEntry(name) ==
-             StringDictionary::kNotFound);
-
-      MaybeObject* negative_lookup =
-          TryGenerateDictionaryNegativeLookup(masm(),
-                                              miss,
-                                              reg,
-                                              name,
-                                              scratch1,
-                                              scratch2);
-      if (negative_lookup->IsFailure()) {
-        set_failure(Failure::cast(negative_lookup));
-        return reg;
-      }
-
-      __ movq(scratch1, FieldOperand(reg, HeapObject::kMapOffset));
-      reg = holder_reg;  // from now the object is in holder_reg
-      __ movq(reg, FieldOperand(scratch1, Map::kPrototypeOffset));
-    } else if (heap()->InNewSpace(prototype)) {
-      // Get the map of the current object.
-      __ movq(scratch1, FieldOperand(reg, HeapObject::kMapOffset));
-      __ Cmp(scratch1, Handle<Map>(current->map()));
-      // Branch on the result of the map check.
-      __ j(not_equal, miss);
-      // Check access rights to the global object.  This has to happen
-      // after the map check so that we know that the object is
-      // actually a global object.
-      if (current->IsJSGlobalProxy()) {
-        __ CheckAccessGlobalProxy(reg, scratch1, miss);
-
-        // Restore scratch register to be the map of the object.
-        // We load the prototype from the map in the scratch register.
-        __ movq(scratch1, FieldOperand(reg, HeapObject::kMapOffset));
-      }
-      // The prototype is in new space; we cannot store a reference
-      // to it in the code. Load it from the map.
-      reg = holder_reg;  // from now the object is in holder_reg
-      __ movq(reg, FieldOperand(scratch1, Map::kPrototypeOffset));
-
-    } else {
-      // Check the map of the current object.
-      __ Cmp(FieldOperand(reg, HeapObject::kMapOffset),
-          Handle<Map>(current->map()));
-      // Branch on the result of the map check.
-      __ j(not_equal, miss);
-      // Check access rights to the global object.  This has to happen
-      // after the map check so that we know that the object is
-      // actually a global object.
-      if (current->IsJSGlobalProxy()) {
-        __ CheckAccessGlobalProxy(reg, scratch1, miss);
-      }
-      // The prototype is in old space; load it directly.
-      reg = holder_reg;  // from now the object is in holder_reg
-      __ Move(reg, Handle<JSObject>(prototype));
-    }
-
-    if (save_at_depth == depth) {
-      __ movq(Operand(rsp, kPointerSize), reg);
-    }
-
-    // Go to the next object in the prototype chain.
-    current = prototype;
-  }
-
-  // Check the holder map.
-  __ Cmp(FieldOperand(reg, HeapObject::kMapOffset), Handle<Map>(holder->map()));
-  __ j(not_equal, miss);
-
-  // Log the check depth.
-  LOG(isolate(), IntEvent("check-maps-depth", depth + 1));
-
-  // Perform security check for access to the global object and return
-  // the holder register.
-  ASSERT(current == holder);
-  ASSERT(current->IsJSGlobalProxy() || !current->IsAccessCheckNeeded());
-  if (current->IsJSGlobalProxy()) {
-    __ CheckAccessGlobalProxy(reg, scratch1, miss);
-  }
-
-  // If we've skipped any global objects, it's not enough to verify
-  // that their maps haven't changed.  We also need to check that the
-  // property cell for the property is still empty.
-  MaybeObject* result = TryGenerateCheckPropertyCells(masm(),
-                                                      object,
-                                                      holder,
-                                                      name,
-                                                      scratch1,
-                                                      miss);
-  if (result->IsFailure()) set_failure(Failure::cast(result));
-
-  // Return the register containing the holder.
-  return reg;
-}
-
-
 void StubCompiler::GenerateLoadField(Handle<JSObject> object,
                                      Handle<JSObject> holder,
                                      Register receiver,
@@ -1219,25 +942,22 @@
 }
 
 
-MaybeObject* StubCompiler::GenerateLoadCallback(JSObject* object,
-                                                JSObject* holder,
-                                                Register receiver,
-                                                Register name_reg,
-                                                Register scratch1,
-                                                Register scratch2,
-                                                Register scratch3,
-                                                AccessorInfo* callback,
-                                                String* name,
-                                                Label* miss) {
+void StubCompiler::GenerateLoadCallback(Handle<JSObject> object,
+                                        Handle<JSObject> holder,
+                                        Register receiver,
+                                        Register name_reg,
+                                        Register scratch1,
+                                        Register scratch2,
+                                        Register scratch3,
+                                        Handle<AccessorInfo> callback,
+                                        Handle<String> name,
+                                        Label* miss) {
   // Check that the receiver isn't a smi.
   __ JumpIfSmi(receiver, miss);
 
   // Check that the maps haven't changed.
-  Register reg =
-      CheckPrototypes(object, receiver, holder, scratch1,
-                      scratch2, scratch3, name, miss);
-
-  Handle<AccessorInfo> callback_handle(callback);
+  Register reg = CheckPrototypes(object, receiver, holder, scratch1,
+                                 scratch2, scratch3, name, miss);
 
   // Insert additional parameters into the stack frame above return address.
   ASSERT(!scratch2.is(reg));
@@ -1245,11 +965,11 @@
 
   __ push(receiver);  // receiver
   __ push(reg);  // holder
-  if (heap()->InNewSpace(callback_handle->data())) {
-    __ Move(scratch1, callback_handle);
+  if (heap()->InNewSpace(callback->data())) {
+    __ Move(scratch1, callback);
     __ push(FieldOperand(scratch1, AccessorInfo::kDataOffset));  // data
   } else {
-    __ Push(Handle<Object>(callback_handle->data()));
+    __ Push(Handle<Object>(callback->data()));
   }
   __ push(name_reg);  // name
   // Save a pointer to where we pushed the arguments pointer.
@@ -1268,10 +988,6 @@
   __ movq(name_arg, rsp);
   __ push(scratch2);  // Restore return address.
 
-  // Do call through the api.
-  Address getter_address = v8::ToCData<Address>(callback->getter());
-  ApiFunction fun(getter_address);
-
   // 3 elements array for v8::Agruments::values_ and handler for name.
   const int kStackSpace = 4;
 
@@ -1288,11 +1004,8 @@
   // could be used to pass arguments.
   __ lea(accessor_info_arg, StackSpaceOperand(0));
 
-  // Emitting a stub call may try to allocate (if the code is not
-  // already generated).  Do not allow the assembler to perform a
-  // garbage collection but instead return the allocation failure
-  // object.
-  return masm()->TryCallApiFunctionAndReturn(&fun, kStackSpace);
+  Address getter_address = v8::ToCData<Address>(callback->getter());
+  __ CallApiFunctionAndReturn(getter_address, kStackSpace);
 }
 
 
@@ -1318,15 +1031,15 @@
 }
 
 
-void StubCompiler::GenerateLoadInterceptor(JSObject* object,
-                                           JSObject* interceptor_holder,
+void StubCompiler::GenerateLoadInterceptor(Handle<JSObject> object,
+                                           Handle<JSObject> interceptor_holder,
                                            LookupResult* lookup,
                                            Register receiver,
                                            Register name_reg,
                                            Register scratch1,
                                            Register scratch2,
                                            Register scratch3,
-                                           String* name,
+                                           Handle<String> name,
                                            Label* miss) {
   ASSERT(interceptor_holder->HasNamedInterceptor());
   ASSERT(!interceptor_holder->GetNamedInterceptor()->getter()->IsUndefined());
@@ -1342,9 +1055,9 @@
     if (lookup->type() == FIELD) {
       compile_followup_inline = true;
     } else if (lookup->type() == CALLBACKS &&
-        lookup->GetCallbackObject()->IsAccessorInfo() &&
-        AccessorInfo::cast(lookup->GetCallbackObject())->getter() != NULL) {
-      compile_followup_inline = true;
+               lookup->GetCallbackObject()->IsAccessorInfo()) {
+      compile_followup_inline =
+          AccessorInfo::cast(lookup->GetCallbackObject())->getter() != NULL;
     }
   }
 
@@ -1398,10 +1111,10 @@
 
     // Check that the maps from interceptor's holder to lookup's holder
     // haven't changed.  And load lookup's holder into |holder| register.
-    if (interceptor_holder != lookup->holder()) {
+    if (*interceptor_holder != lookup->holder()) {
       holder_reg = CheckPrototypes(interceptor_holder,
                                    holder_reg,
-                                   lookup->holder(),
+                                   Handle<JSObject>(lookup->holder()),
                                    scratch1,
                                    scratch2,
                                    scratch3,
@@ -1420,9 +1133,8 @@
       // We found CALLBACKS property in prototype chain of interceptor's
       // holder.
       ASSERT(lookup->type() == CALLBACKS);
-      ASSERT(lookup->GetCallbackObject()->IsAccessorInfo());
-      AccessorInfo* callback = AccessorInfo::cast(lookup->GetCallbackObject());
-      ASSERT(callback != NULL);
+      Handle<AccessorInfo> callback(
+          AccessorInfo::cast(lookup->GetCallbackObject()));
       ASSERT(callback->getter() != NULL);
 
       // Tail call to runtime.
@@ -1431,7 +1143,7 @@
       __ pop(scratch2);  // return address
       __ push(receiver);
       __ push(holder_reg);
-      __ Move(holder_reg, Handle<AccessorInfo>(callback));
+      __ Move(holder_reg, callback);
       __ push(FieldOperand(holder_reg, AccessorInfo::kDataOffset));
       __ push(holder_reg);
       __ push(name_reg);
@@ -1468,9 +1180,9 @@
 }
 
 
-void CallStubCompiler::GenerateGlobalReceiverCheck(JSObject* object,
-                                                   JSObject* holder,
-                                                   String* name,
+void CallStubCompiler::GenerateGlobalReceiverCheck(Handle<JSObject> object,
+                                                   Handle<JSObject> holder,
+                                                   Handle<String> name,
                                                    Label* miss) {
   ASSERT(holder->IsGlobalObject());
 
@@ -1483,7 +1195,7 @@
   // If the object is the holder then we know that it's a global
   // object which can only happen for contextual calls. In this case,
   // the receiver cannot be a smi.
-  if (object != holder) {
+  if (!object.is_identical_to(holder)) {
     __ JumpIfSmi(rdx, miss);
   }
 
@@ -1492,15 +1204,16 @@
 }
 
 
-void CallStubCompiler::GenerateLoadFunctionFromCell(JSGlobalPropertyCell* cell,
-                                                    JSFunction* function,
-                                                    Label* miss) {
+void CallStubCompiler::GenerateLoadFunctionFromCell(
+    Handle<JSGlobalPropertyCell> cell,
+    Handle<JSFunction> function,
+    Label* miss) {
   // Get the value from the cell.
-  __ Move(rdi, Handle<JSGlobalPropertyCell>(cell));
+  __ Move(rdi, cell);
   __ movq(rdi, FieldOperand(rdi, JSGlobalPropertyCell::kValueOffset));
 
   // Check that the cell contains the same function.
-  if (heap()->InNewSpace(function)) {
+  if (heap()->InNewSpace(*function)) {
     // We can't embed a pointer to a function in new space so we have
     // to verify that the shared function info is unchanged. This has
     // the nice side effect that multiple closures based on the same
@@ -1513,11 +1226,10 @@
     // Check the shared function info. Make sure it hasn't changed.
     __ Move(rax, Handle<SharedFunctionInfo>(function->shared()));
     __ cmpq(FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset), rax);
-    __ j(not_equal, miss);
   } else {
-    __ Cmp(rdi, Handle<JSFunction>(function));
-    __ j(not_equal, miss);
+    __ Cmp(rdi, function);
   }
+  __ j(not_equal, miss);
 }
 
 
@@ -1530,20 +1242,6 @@
 }
 
 
-// TODO(kmillikin): Eliminate this function when the stub cache is fully
-// handlified.
-MaybeObject* CallStubCompiler::TryGenerateMissBranch() {
-  MaybeObject* maybe_obj =
-      isolate()->stub_cache()->TryComputeCallMiss(arguments().immediate(),
-                                                  kind_,
-                                                  extra_state_);
-  Object* obj;
-  if (!maybe_obj->ToObject(&obj)) return maybe_obj;
-  __ Jump(Handle<Code>(Code::cast(obj)), RelocInfo::CODE_TARGET);
-  return obj;
-}
-
-
 Handle<Code> CallStubCompiler::CompileCallField(Handle<JSObject> object,
                                                 Handle<JSObject> holder,
                                                 int index,
@@ -1602,11 +1300,12 @@
 }
 
 
-MaybeObject* CallStubCompiler::CompileArrayPushCall(Object* object,
-                                                    JSObject* holder,
-                                                    JSGlobalPropertyCell* cell,
-                                                    JSFunction* function,
-                                                    String* name) {
+Handle<Code> CallStubCompiler::CompileArrayPushCall(
+    Handle<Object> object,
+    Handle<JSObject> holder,
+    Handle<JSGlobalPropertyCell> cell,
+    Handle<JSFunction> function,
+    Handle<String> name) {
   // ----------- S t a t e -------------
   //  -- rcx                 : name
   //  -- rsp[0]              : return address
@@ -1616,11 +1315,10 @@
   // -----------------------------------
 
   // If object is not an array, bail out to regular call.
-  if (!object->IsJSArray() || cell != NULL) return heap()->undefined_value();
+  if (!object->IsJSArray() || !cell.is_null()) return Handle<Code>::null();
 
   Label miss;
-
-  GenerateNameCheck(Handle<String>(name), &miss);
+  GenerateNameCheck(name, &miss);
 
   // Get the receiver from the stack.
   const int argc = arguments().immediate();
@@ -1629,14 +1327,8 @@
   // Check that the receiver isn't a smi.
   __ JumpIfSmi(rdx, &miss);
 
-  CheckPrototypes(JSObject::cast(object),
-                  rdx,
-                  holder,
-                  rbx,
-                  rax,
-                  rdi,
-                  name,
-                  &miss);
+  CheckPrototypes(Handle<JSObject>::cast(object), rdx, holder, rbx, rax, rdi,
+                  name, &miss);
 
   if (argc == 0) {
     // Noop, return the length.
@@ -1698,8 +1390,8 @@
                                FixedArray::kHeaderSize - argc * kPointerSize));
       __ movq(Operand(rdx, 0), rcx);
 
-      __ RecordWrite(
-          rbx, rdx, rcx, kDontSaveFPRegs, EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
+      __ RecordWrite(rbx, rdx, rcx, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
+                     OMIT_SMI_CHECK);
 
       __ Integer32ToSmi(rax, rax);  // Return new length as smi.
       __ ret((argc + 1) * kPointerSize);
@@ -1779,19 +1471,19 @@
   }
 
   __ bind(&miss);
-  MaybeObject* maybe_result = TryGenerateMissBranch();
-  if (maybe_result->IsFailure()) return maybe_result;
+  GenerateMissBranch();
 
   // Return the generated code.
-  return TryGetCode(function);
+  return GetCode(function);
 }
 
 
-MaybeObject* CallStubCompiler::CompileArrayPopCall(Object* object,
-                                                   JSObject* holder,
-                                                   JSGlobalPropertyCell* cell,
-                                                   JSFunction* function,
-                                                   String* name) {
+Handle<Code> CallStubCompiler::CompileArrayPopCall(
+    Handle<Object> object,
+    Handle<JSObject> holder,
+    Handle<JSGlobalPropertyCell> cell,
+    Handle<JSFunction> function,
+    Handle<String> name) {
   // ----------- S t a t e -------------
   //  -- rcx                 : name
   //  -- rsp[0]              : return address
@@ -1801,11 +1493,10 @@
   // -----------------------------------
 
   // If object is not an array, bail out to regular call.
-  if (!object->IsJSArray() || cell != NULL) return heap()->undefined_value();
+  if (!object->IsJSArray() || !cell.is_null()) return Handle<Code>::null();
 
   Label miss, return_undefined, call_builtin;
-
-  GenerateNameCheck(Handle<String>(name), &miss);
+  GenerateNameCheck(name, &miss);
 
   // Get the receiver from the stack.
   const int argc = arguments().immediate();
@@ -1814,9 +1505,8 @@
   // Check that the receiver isn't a smi.
   __ JumpIfSmi(rdx, &miss);
 
-  CheckPrototypes(JSObject::cast(object), rdx,
-                  holder, rbx,
-                  rax, rdi, name, &miss);
+  CheckPrototypes(Handle<JSObject>::cast(object), rdx, holder, rbx, rax, rdi,
+                  name, &miss);
 
   // Get the elements array of the object.
   __ movq(rbx, FieldOperand(rdx, JSArray::kElementsOffset));
@@ -1862,20 +1552,19 @@
       1);
 
   __ bind(&miss);
-  MaybeObject* maybe_result = TryGenerateMissBranch();
-  if (maybe_result->IsFailure()) return maybe_result;
+  GenerateMissBranch();
 
   // Return the generated code.
-  return TryGetCode(function);
+  return GetCode(function);
 }
 
 
-MaybeObject* CallStubCompiler::CompileStringCharCodeAtCall(
-    Object* object,
-    JSObject* holder,
-    JSGlobalPropertyCell* cell,
-    JSFunction* function,
-    String* name) {
+Handle<Code> CallStubCompiler::CompileStringCharCodeAtCall(
+    Handle<Object> object,
+    Handle<JSObject> holder,
+    Handle<JSGlobalPropertyCell> cell,
+    Handle<JSFunction> function,
+    Handle<String> name) {
   // ----------- S t a t e -------------
   //  -- rcx                 : function name
   //  -- rsp[0]              : return address
@@ -1885,7 +1574,7 @@
   // -----------------------------------
 
   // If object is not a string, bail out to regular call.
-  if (!object->IsString() || cell != NULL) return heap()->undefined_value();
+  if (!object->IsString() || !cell.is_null()) return Handle<Code>::null();
 
   const int argc = arguments().immediate();
 
@@ -1893,23 +1582,21 @@
   Label name_miss;
   Label index_out_of_range;
   Label* index_out_of_range_label = &index_out_of_range;
-
   if (kind_ == Code::CALL_IC &&
       (CallICBase::StringStubState::decode(extra_state_) ==
        DEFAULT_STRING_STUB)) {
     index_out_of_range_label = &miss;
   }
-
-  GenerateNameCheck(Handle<String>(name), &name_miss);
+  GenerateNameCheck(name, &name_miss);
 
   // Check that the maps starting from the prototype haven't changed.
   GenerateDirectLoadGlobalFunctionPrototype(masm(),
                                             Context::STRING_FUNCTION_INDEX,
                                             rax,
                                             &miss);
-  ASSERT(object != holder);
-  CheckPrototypes(JSObject::cast(object->GetPrototype()), rax, holder,
-                  rbx, rdx, rdi, name, &miss);
+  ASSERT(!object.is_identical_to(holder));
+  CheckPrototypes(Handle<JSObject>(JSObject::cast(object->GetPrototype())),
+                  rax, holder, rbx, rdx, rdi, name, &miss);
 
   Register receiver = rbx;
   Register index = rdi;
@@ -1922,19 +1609,19 @@
     __ LoadRoot(index, Heap::kUndefinedValueRootIndex);
   }
 
-  StringCharCodeAtGenerator char_code_at_generator(receiver,
-                                                   index,
-                                                   scratch,
-                                                   result,
-                                                   &miss,  // When not a string.
-                                                   &miss,  // When not a number.
-                                                   index_out_of_range_label,
-                                                   STRING_INDEX_IS_NUMBER);
-  char_code_at_generator.GenerateFast(masm());
+  StringCharCodeAtGenerator generator(receiver,
+                                      index,
+                                      scratch,
+                                      result,
+                                      &miss,  // When not a string.
+                                      &miss,  // When not a number.
+                                      index_out_of_range_label,
+                                      STRING_INDEX_IS_NUMBER);
+  generator.GenerateFast(masm());
   __ ret((argc + 1) * kPointerSize);
 
   StubRuntimeCallHelper call_helper;
-  char_code_at_generator.GenerateSlow(masm(), call_helper);
+  generator.GenerateSlow(masm(), call_helper);
 
   if (index_out_of_range.is_linked()) {
     __ bind(&index_out_of_range);
@@ -1944,22 +1631,21 @@
 
   __ bind(&miss);
   // Restore function name in rcx.
-  __ Move(rcx, Handle<String>(name));
+  __ Move(rcx, name);
   __ bind(&name_miss);
-  MaybeObject* maybe_result = TryGenerateMissBranch();
-  if (maybe_result->IsFailure()) return maybe_result;
+  GenerateMissBranch();
 
   // Return the generated code.
-  return TryGetCode(function);
+  return GetCode(function);
 }
 
 
-MaybeObject* CallStubCompiler::CompileStringCharAtCall(
-    Object* object,
-    JSObject* holder,
-    JSGlobalPropertyCell* cell,
-    JSFunction* function,
-    String* name) {
+Handle<Code> CallStubCompiler::CompileStringCharAtCall(
+    Handle<Object> object,
+    Handle<JSObject> holder,
+    Handle<JSGlobalPropertyCell> cell,
+    Handle<JSFunction> function,
+    Handle<String> name) {
   // ----------- S t a t e -------------
   //  -- rcx                 : function name
   //  -- rsp[0]              : return address
@@ -1969,31 +1655,28 @@
   // -----------------------------------
 
   // If object is not a string, bail out to regular call.
-  if (!object->IsString() || cell != NULL) return heap()->undefined_value();
+  if (!object->IsString() || !cell.is_null()) return Handle<Code>::null();
 
   const int argc = arguments().immediate();
-
   Label miss;
   Label name_miss;
   Label index_out_of_range;
   Label* index_out_of_range_label = &index_out_of_range;
-
   if (kind_ == Code::CALL_IC &&
       (CallICBase::StringStubState::decode(extra_state_) ==
        DEFAULT_STRING_STUB)) {
     index_out_of_range_label = &miss;
   }
-
-  GenerateNameCheck(Handle<String>(name), &name_miss);
+  GenerateNameCheck(name, &name_miss);
 
   // Check that the maps starting from the prototype haven't changed.
   GenerateDirectLoadGlobalFunctionPrototype(masm(),
                                             Context::STRING_FUNCTION_INDEX,
                                             rax,
                                             &miss);
-  ASSERT(object != holder);
-  CheckPrototypes(JSObject::cast(object->GetPrototype()), rax, holder,
-                  rbx, rdx, rdi, name, &miss);
+  ASSERT(!object.is_identical_to(holder));
+  CheckPrototypes(Handle<JSObject>(JSObject::cast(object->GetPrototype())),
+                  rax, holder, rbx, rdx, rdi, name, &miss);
 
   Register receiver = rax;
   Register index = rdi;
@@ -2007,45 +1690,43 @@
     __ LoadRoot(index, Heap::kUndefinedValueRootIndex);
   }
 
-  StringCharAtGenerator char_at_generator(receiver,
-                                          index,
-                                          scratch1,
-                                          scratch2,
-                                          result,
-                                          &miss,  // When not a string.
-                                          &miss,  // When not a number.
-                                          index_out_of_range_label,
-                                          STRING_INDEX_IS_NUMBER);
-  char_at_generator.GenerateFast(masm());
+  StringCharAtGenerator generator(receiver,
+                                  index,
+                                  scratch1,
+                                  scratch2,
+                                  result,
+                                  &miss,  // When not a string.
+                                  &miss,  // When not a number.
+                                  index_out_of_range_label,
+                                  STRING_INDEX_IS_NUMBER);
+  generator.GenerateFast(masm());
   __ ret((argc + 1) * kPointerSize);
 
   StubRuntimeCallHelper call_helper;
-  char_at_generator.GenerateSlow(masm(), call_helper);
+  generator.GenerateSlow(masm(), call_helper);
 
   if (index_out_of_range.is_linked()) {
     __ bind(&index_out_of_range);
     __ LoadRoot(rax, Heap::kEmptyStringRootIndex);
     __ ret((argc + 1) * kPointerSize);
   }
-
   __ bind(&miss);
   // Restore function name in rcx.
-  __ Move(rcx, Handle<String>(name));
+  __ Move(rcx, name);
   __ bind(&name_miss);
-  MaybeObject* maybe_result = TryGenerateMissBranch();
-  if (maybe_result->IsFailure()) return maybe_result;
+  GenerateMissBranch();
 
   // Return the generated code.
-  return TryGetCode(function);
+  return GetCode(function);
 }
 
 
-MaybeObject* CallStubCompiler::CompileStringFromCharCodeCall(
-    Object* object,
-    JSObject* holder,
-    JSGlobalPropertyCell* cell,
-    JSFunction* function,
-    String* name) {
+Handle<Code> CallStubCompiler::CompileStringFromCharCodeCall(
+    Handle<Object> object,
+    Handle<JSObject> holder,
+    Handle<JSGlobalPropertyCell> cell,
+    Handle<JSFunction> function,
+    Handle<String> name) {
   // ----------- S t a t e -------------
   //  -- rcx                 : function name
   //  -- rsp[0]              : return address
@@ -2054,25 +1735,23 @@
   //  -- rsp[(argc + 1) * 8] : receiver
   // -----------------------------------
 
-  const int argc = arguments().immediate();
-
   // If the object is not a JSObject or we got an unexpected number of
   // arguments, bail out to the regular call.
-  if (!object->IsJSObject() || argc != 1) return heap()->undefined_value();
+  const int argc = arguments().immediate();
+  if (!object->IsJSObject() || argc != 1) return Handle<Code>::null();
 
   Label miss;
-  GenerateNameCheck(Handle<String>(name), &miss);
+  GenerateNameCheck(name, &miss);
 
-  if (cell == NULL) {
+  if (cell.is_null()) {
     __ movq(rdx, Operand(rsp, 2 * kPointerSize));
-
     __ JumpIfSmi(rdx, &miss);
-
-    CheckPrototypes(JSObject::cast(object), rdx, holder, rbx, rax, rdi, name,
-                    &miss);
+    CheckPrototypes(Handle<JSObject>::cast(object), rdx, holder, rbx, rax, rdi,
+                    name, &miss);
   } else {
-    ASSERT(cell->value() == function);
-    GenerateGlobalReceiverCheck(JSObject::cast(object), holder, name, &miss);
+    ASSERT(cell->value() == *function);
+    GenerateGlobalReceiverCheck(Handle<JSObject>::cast(object), holder, name,
+                                &miss);
     GenerateLoadFunctionFromCell(cell, function, &miss);
   }
 
@@ -2087,12 +1766,12 @@
   // Convert the smi code to uint16.
   __ SmiAndConstant(code, code, Smi::FromInt(0xffff));
 
-  StringCharFromCodeGenerator char_from_code_generator(code, rax);
-  char_from_code_generator.GenerateFast(masm());
+  StringCharFromCodeGenerator generator(code, rax);
+  generator.GenerateFast(masm());
   __ ret(2 * kPointerSize);
 
   StubRuntimeCallHelper call_helper;
-  char_from_code_generator.GenerateSlow(masm(), call_helper);
+  generator.GenerateSlow(masm(), call_helper);
 
   // Tail call the full function. We do not have to patch the receiver
   // because the function makes no use of it.
@@ -2105,29 +1784,30 @@
 
   __ bind(&miss);
   // rcx: function name.
-  MaybeObject* maybe_result = TryGenerateMissBranch();
-  if (maybe_result->IsFailure()) return maybe_result;
+  GenerateMissBranch();
 
   // Return the generated code.
-  return (cell == NULL) ? TryGetCode(function) : TryGetCode(NORMAL, name);
+  return cell.is_null() ? GetCode(function) : GetCode(NORMAL, name);
 }
 
 
-MaybeObject* CallStubCompiler::CompileMathFloorCall(Object* object,
-                                                    JSObject* holder,
-                                                    JSGlobalPropertyCell* cell,
-                                                    JSFunction* function,
-                                                    String* name) {
+Handle<Code> CallStubCompiler::CompileMathFloorCall(
+    Handle<Object> object,
+    Handle<JSObject> holder,
+    Handle<JSGlobalPropertyCell> cell,
+    Handle<JSFunction> function,
+    Handle<String> name) {
   // TODO(872): implement this.
-  return heap()->undefined_value();
+  return Handle<Code>::null();
 }
 
 
-MaybeObject* CallStubCompiler::CompileMathAbsCall(Object* object,
-                                                  JSObject* holder,
-                                                  JSGlobalPropertyCell* cell,
-                                                  JSFunction* function,
-                                                  String* name) {
+Handle<Code> CallStubCompiler::CompileMathAbsCall(
+    Handle<Object> object,
+    Handle<JSObject> holder,
+    Handle<JSGlobalPropertyCell> cell,
+    Handle<JSFunction> function,
+    Handle<String> name) {
   // ----------- S t a t e -------------
   //  -- rcx                 : function name
   //  -- rsp[0]              : return address
@@ -2136,28 +1816,25 @@
   //  -- rsp[(argc + 1) * 8] : receiver
   // -----------------------------------
 
-  const int argc = arguments().immediate();
-
   // If the object is not a JSObject or we got an unexpected number of
   // arguments, bail out to the regular call.
-  if (!object->IsJSObject() || argc != 1) return heap()->undefined_value();
+  const int argc = arguments().immediate();
+  if (!object->IsJSObject() || argc != 1) return Handle<Code>::null();
 
   Label miss;
-  GenerateNameCheck(Handle<String>(name), &miss);
+  GenerateNameCheck(name, &miss);
 
-  if (cell == NULL) {
+  if (cell.is_null()) {
     __ movq(rdx, Operand(rsp, 2 * kPointerSize));
-
     __ JumpIfSmi(rdx, &miss);
-
-    CheckPrototypes(JSObject::cast(object), rdx, holder, rbx, rax, rdi, name,
-                    &miss);
+    CheckPrototypes(Handle<JSObject>::cast(object), rdx, holder, rbx, rax, rdi,
+                    name, &miss);
   } else {
-    ASSERT(cell->value() == function);
-    GenerateGlobalReceiverCheck(JSObject::cast(object), holder, name, &miss);
+    ASSERT(cell->value() == *function);
+    GenerateGlobalReceiverCheck(Handle<JSObject>::cast(object), holder, name,
+                                &miss);
     GenerateLoadFunctionFromCell(cell, function, &miss);
   }
-
   // Load the (only) argument into rax.
   __ movq(rax, Operand(rsp, 1 * kPointerSize));
 
@@ -2222,34 +1899,32 @@
 
   __ bind(&miss);
   // rcx: function name.
-  MaybeObject* maybe_result = TryGenerateMissBranch();
-  if (maybe_result->IsFailure()) return maybe_result;
+  GenerateMissBranch();
 
   // Return the generated code.
-  return (cell == NULL) ? TryGetCode(function) : TryGetCode(NORMAL, name);
+  return cell.is_null() ? GetCode(function) : GetCode(NORMAL, name);
 }
 
 
-MaybeObject* CallStubCompiler::CompileFastApiCall(
+Handle<Code> CallStubCompiler::CompileFastApiCall(
     const CallOptimization& optimization,
-    Object* object,
-    JSObject* holder,
-    JSGlobalPropertyCell* cell,
-    JSFunction* function,
-    String* name) {
+    Handle<Object> object,
+    Handle<JSObject> holder,
+    Handle<JSGlobalPropertyCell> cell,
+    Handle<JSFunction> function,
+    Handle<String> name) {
   ASSERT(optimization.is_simple_api_call());
   // Bail out if object is a global object as we don't want to
   // repatch it to global receiver.
-  if (object->IsGlobalObject()) return heap()->undefined_value();
-  if (cell != NULL) return heap()->undefined_value();
-  if (!object->IsJSObject()) return heap()->undefined_value();
+  if (object->IsGlobalObject()) return Handle<Code>::null();
+  if (!cell.is_null()) return Handle<Code>::null();
+  if (!object->IsJSObject()) return Handle<Code>::null();
   int depth = optimization.GetPrototypeDepthOfExpectedType(
-            JSObject::cast(object), holder);
-  if (depth == kInvalidProtoDepth) return heap()->undefined_value();
+      Handle<JSObject>::cast(object), holder);
+  if (depth == kInvalidProtoDepth) return Handle<Code>::null();
 
   Label miss, miss_before_stack_reserved;
-
-  GenerateNameCheck(Handle<String>(name), &miss_before_stack_reserved);
+  GenerateNameCheck(name, &miss_before_stack_reserved);
 
   // Get the receiver from the stack.
   const int argc = arguments().immediate();
@@ -2267,32 +1942,30 @@
   __ subq(rsp, Immediate(kFastApiCallArguments * kPointerSize));
 
   // Check that the maps haven't changed and find a Holder as a side effect.
-  CheckPrototypes(JSObject::cast(object), rdx, holder,
-                  rbx, rax, rdi, name, depth, &miss);
+  CheckPrototypes(Handle<JSObject>::cast(object), rdx, holder, rbx, rax, rdi,
+                  name, depth, &miss);
 
   // Move the return address on top of the stack.
   __ movq(rax, Operand(rsp, 3 * kPointerSize));
   __ movq(Operand(rsp, 0 * kPointerSize), rax);
 
-  MaybeObject* result = GenerateFastApiCall(masm(), optimization, argc);
-  if (result->IsFailure()) return result;
+  GenerateFastApiCall(masm(), optimization, argc);
 
   __ bind(&miss);
   __ addq(rsp, Immediate(kFastApiCallArguments * kPointerSize));
 
   __ bind(&miss_before_stack_reserved);
-  MaybeObject* maybe_result = TryGenerateMissBranch();
-  if (maybe_result->IsFailure()) return maybe_result;
+  GenerateMissBranch();
 
   // Return the generated code.
-  return TryGetCode(function);
+  return GetCode(function);
 }
 
 
-MaybeObject* CallStubCompiler::CompileCallConstant(Object* object,
-                                                   JSObject* holder,
-                                                   JSFunction* function,
-                                                   String* name,
+Handle<Code> CallStubCompiler::CompileCallConstant(Handle<Object> object,
+                                                   Handle<JSObject> holder,
+                                                   Handle<JSFunction> function,
+                                                   Handle<String> name,
                                                    CheckType check) {
   // ----------- S t a t e -------------
   // rcx                 : function name
@@ -2305,17 +1978,15 @@
   // -----------------------------------
 
   if (HasCustomCallGenerator(function)) {
-    MaybeObject* maybe_result = CompileCustomCall(
-        object, holder, NULL, function, name);
-    Object* result;
-    if (!maybe_result->ToObject(&result)) return maybe_result;
-    // undefined means bail out to regular compiler.
-    if (!result->IsUndefined()) return result;
+    Handle<Code> code = CompileCustomCall(object, holder,
+                                          Handle<JSGlobalPropertyCell>::null(),
+                                          function, name);
+    // A null handle means bail out to the regular compiler code below.
+    if (!code.is_null()) return code;
   }
 
   Label miss;
-
-  GenerateNameCheck(Handle<String>(name), &miss);
+  GenerateNameCheck(name, &miss);
 
   // Get the receiver from the stack.
   const int argc = arguments().immediate();
@@ -2331,14 +2002,13 @@
   ASSERT(!object->IsGlobalObject() || check == RECEIVER_MAP_CHECK);
 
   Counters* counters = isolate()->counters();
-  SharedFunctionInfo* function_info = function->shared();
   switch (check) {
     case RECEIVER_MAP_CHECK:
       __ IncrementCounter(counters->call_const(), 1);
 
       // Check that the maps haven't changed.
-      CheckPrototypes(JSObject::cast(object), rdx, holder,
-                      rbx, rax, rdi, name, &miss);
+      CheckPrototypes(Handle<JSObject>::cast(object), rdx, holder, rbx, rax,
+                      rdi, name, &miss);
 
       // Patch the receiver on the stack with the global proxy if
       // necessary.
@@ -2349,28 +2019,25 @@
       break;
 
     case STRING_CHECK:
-      if (!function->IsBuiltin() && !function_info->strict_mode()) {
-        // Calling non-strict non-builtins with a value as the receiver
-        // requires boxing.
-        __ jmp(&miss);
-      } else {
+      if (function->IsBuiltin() || function->shared()->strict_mode()) {
         // Check that the object is a two-byte string or a symbol.
         __ CmpObjectType(rdx, FIRST_NONSTRING_TYPE, rax);
         __ j(above_equal, &miss);
         // Check that the maps starting from the prototype haven't changed.
         GenerateDirectLoadGlobalFunctionPrototype(
             masm(), Context::STRING_FUNCTION_INDEX, rax, &miss);
-        CheckPrototypes(JSObject::cast(object->GetPrototype()), rax, holder,
-                        rbx, rdx, rdi, name, &miss);
-      }
-      break;
-
-    case NUMBER_CHECK: {
-      if (!function->IsBuiltin() && !function_info->strict_mode()) {
+        CheckPrototypes(
+            Handle<JSObject>(JSObject::cast(object->GetPrototype())),
+            rax, holder, rbx, rdx, rdi, name, &miss);
+      } else {
         // Calling non-strict non-builtins with a value as the receiver
         // requires boxing.
         __ jmp(&miss);
-      } else {
+      }
+      break;
+
+    case NUMBER_CHECK:
+      if (function->IsBuiltin() || function->shared()->strict_mode()) {
         Label fast;
         // Check that the object is a smi or a heap number.
         __ JumpIfSmi(rdx, &fast);
@@ -2380,18 +2047,18 @@
         // Check that the maps starting from the prototype haven't changed.
         GenerateDirectLoadGlobalFunctionPrototype(
             masm(), Context::NUMBER_FUNCTION_INDEX, rax, &miss);
-        CheckPrototypes(JSObject::cast(object->GetPrototype()), rax, holder,
-                        rbx, rdx, rdi, name, &miss);
-      }
-      break;
-    }
-
-    case BOOLEAN_CHECK: {
-      if (!function->IsBuiltin() && !function_info->strict_mode()) {
+        CheckPrototypes(
+            Handle<JSObject>(JSObject::cast(object->GetPrototype())),
+            rax, holder, rbx, rdx, rdi, name, &miss);
+      } else {
         // Calling non-strict non-builtins with a value as the receiver
         // requires boxing.
         __ jmp(&miss);
-      } else {
+      }
+      break;
+
+    case BOOLEAN_CHECK:
+      if (function->IsBuiltin() || function->shared()->strict_mode()) {
         Label fast;
         // Check that the object is a boolean.
         __ CompareRoot(rdx, Heap::kTrueValueRootIndex);
@@ -2402,14 +2069,15 @@
         // Check that the maps starting from the prototype haven't changed.
         GenerateDirectLoadGlobalFunctionPrototype(
             masm(), Context::BOOLEAN_FUNCTION_INDEX, rax, &miss);
-        CheckPrototypes(JSObject::cast(object->GetPrototype()), rax, holder,
-                        rbx, rdx, rdi, name, &miss);
+        CheckPrototypes(
+            Handle<JSObject>(JSObject::cast(object->GetPrototype())),
+            rax, holder, rbx, rdx, rdi, name, &miss);
+      } else {
+        // Calling non-strict non-builtins with a value as the receiver
+        // requires boxing.
+        __ jmp(&miss);
       }
       break;
-    }
-
-    default:
-      UNREACHABLE();
   }
 
   CallKind call_kind = CallICBase::Contextual::decode(extra_state_)
@@ -2420,17 +2088,16 @@
 
   // Handle call cache miss.
   __ bind(&miss);
-  MaybeObject* maybe_result = TryGenerateMissBranch();
-  if (maybe_result->IsFailure()) return maybe_result;
+  GenerateMissBranch();
 
   // Return the generated code.
-  return TryGetCode(function);
+  return GetCode(function);
 }
 
 
-MaybeObject* CallStubCompiler::CompileCallInterceptor(JSObject* object,
-                                                      JSObject* holder,
-                                                      String* name) {
+Handle<Code> CallStubCompiler::CompileCallInterceptor(Handle<JSObject> object,
+                                                      Handle<JSObject> holder,
+                                                      Handle<String> name) {
   // ----------- S t a t e -------------
   // rcx                 : function name
   // rsp[0]              : return address
@@ -2441,8 +2108,7 @@
   // rsp[(argc + 1) * 8] : argument 0 = receiver
   // -----------------------------------
   Label miss;
-
-  GenerateNameCheck(Handle<String>(name), &miss);
+  GenerateNameCheck(name, &miss);
 
   // Get the number of arguments.
   const int argc = arguments().immediate();
@@ -2454,17 +2120,8 @@
   __ movq(rdx, Operand(rsp, (argc + 1) * kPointerSize));
 
   CallInterceptorCompiler compiler(this, arguments(), rcx, extra_state_);
-  MaybeObject* result = compiler.Compile(masm(),
-                                         object,
-                                         holder,
-                                         name,
-                                         &lookup,
-                                         rdx,
-                                         rbx,
-                                         rdi,
-                                         rax,
-                                         &miss);
-  if (result->IsFailure()) return result;
+  compiler.Compile(masm(), object, holder, name, &lookup, rdx, rbx, rdi, rax,
+                   &miss);
 
   // Restore receiver.
   __ movq(rdx, Operand(rsp, (argc + 1) * kPointerSize));
@@ -2491,19 +2148,19 @@
 
   // Handle load cache miss.
   __ bind(&miss);
-  MaybeObject* maybe_result = TryGenerateMissBranch();
-  if (maybe_result->IsFailure()) return maybe_result;
+  GenerateMissBranch();
 
   // Return the generated code.
-  return TryGetCode(INTERCEPTOR, name);
+  return GetCode(INTERCEPTOR, name);
 }
 
 
-MaybeObject* CallStubCompiler::CompileCallGlobal(JSObject* object,
-                                                 GlobalObject* holder,
-                                                 JSGlobalPropertyCell* cell,
-                                                 JSFunction* function,
-                                                 String* name) {
+Handle<Code> CallStubCompiler::CompileCallGlobal(
+    Handle<JSObject> object,
+    Handle<GlobalObject> holder,
+    Handle<JSGlobalPropertyCell> cell,
+    Handle<JSFunction> function,
+    Handle<String> name) {
   // ----------- S t a t e -------------
   // rcx                 : function name
   // rsp[0]              : return address
@@ -2515,23 +2172,17 @@
   // -----------------------------------
 
   if (HasCustomCallGenerator(function)) {
-    MaybeObject* maybe_result = CompileCustomCall(
-        object, holder, cell, function, name);
-    Object* result;
-    if (!maybe_result->ToObject(&result)) return maybe_result;
-    // undefined means bail out to regular compiler.
-    if (!result->IsUndefined()) return result;
+    Handle<Code> code = CompileCustomCall(object, holder, cell, function, name);
+    // A null handle means bail out to the regular compiler code below.
+    if (!code.is_null()) return code;
   }
 
   Label miss;
-
-  GenerateNameCheck(Handle<String>(name), &miss);
+  GenerateNameCheck(name, &miss);
 
   // Get the number of arguments.
   const int argc = arguments().immediate();
-
   GenerateGlobalReceiverCheck(object, holder, name, &miss);
-
   GenerateLoadFunctionFromCell(cell, function, &miss);
 
   // Patch the receiver on the stack with the global proxy.
@@ -2560,11 +2211,10 @@
   // Handle call cache miss.
   __ bind(&miss);
   __ IncrementCounter(counters->call_global_inline_miss(), 1);
-  MaybeObject* maybe_result = TryGenerateMissBranch();
-  if (maybe_result->IsFailure()) return maybe_result;
+  GenerateMissBranch();
 
   // Return the generated code.
-  return TryGetCode(NORMAL, name);
+  return GetCode(NORMAL, name);
 }
 
 
@@ -2909,29 +2559,24 @@
 }
 
 
-MaybeObject* LoadStubCompiler::CompileLoadCallback(String* name,
-                                                   JSObject* object,
-                                                   JSObject* holder,
-                                                   AccessorInfo* callback) {
+Handle<Code> LoadStubCompiler::CompileLoadCallback(
+    Handle<String> name,
+    Handle<JSObject> object,
+    Handle<JSObject> holder,
+    Handle<AccessorInfo> callback) {
   // ----------- S t a t e -------------
   //  -- rax    : receiver
   //  -- rcx    : name
   //  -- rsp[0] : return address
   // -----------------------------------
   Label miss;
-
-  MaybeObject* result = GenerateLoadCallback(object, holder, rax, rcx, rdx, rbx,
-                                             rdi, callback, name, &miss);
-  if (result->IsFailure()) {
-    miss.Unuse();
-    return result;
-  }
-
+  GenerateLoadCallback(object, holder, rax, rcx, rdx, rbx, rdi, callback,
+                       name, &miss);
   __ bind(&miss);
   GenerateLoadMiss(masm(), Code::LOAD_IC);
 
   // Return the generated code.
-  return TryGetCode(CALLBACKS, name);
+  return GetCode(CALLBACKS, name);
 }
 
 
@@ -2955,37 +2600,27 @@
 }
 
 
-MaybeObject* LoadStubCompiler::CompileLoadInterceptor(JSObject* receiver,
-                                                      JSObject* holder,
-                                                      String* name) {
+Handle<Code> LoadStubCompiler::CompileLoadInterceptor(Handle<JSObject> receiver,
+                                                      Handle<JSObject> holder,
+                                                      Handle<String> name) {
   // ----------- S t a t e -------------
   //  -- rax    : receiver
   //  -- rcx    : name
   //  -- rsp[0] : return address
   // -----------------------------------
   Label miss;
-
   LookupResult lookup(isolate());
   LookupPostInterceptor(holder, name, &lookup);
 
   // TODO(368): Compile in the whole chain: all the interceptors in
   // prototypes and ultimate answer.
-  GenerateLoadInterceptor(receiver,
-                          holder,
-                          &lookup,
-                          rax,
-                          rcx,
-                          rdx,
-                          rbx,
-                          rdi,
-                          name,
-                          &miss);
-
+  GenerateLoadInterceptor(receiver, holder, &lookup, rax, rcx, rdx, rbx, rdi,
+                          name, &miss);
   __ bind(&miss);
   GenerateLoadMiss(masm(), Code::LOAD_IC);
 
   // Return the generated code.
-  return TryGetCode(INTERCEPTOR, name);
+  return GetCode(INTERCEPTOR, name);
 }
 
 
@@ -3068,39 +2703,32 @@
 }
 
 
-MaybeObject* KeyedLoadStubCompiler::CompileLoadCallback(
-    String* name,
-    JSObject* receiver,
-    JSObject* holder,
-    AccessorInfo* callback) {
+Handle<Code> KeyedLoadStubCompiler::CompileLoadCallback(
+    Handle<String> name,
+    Handle<JSObject> receiver,
+    Handle<JSObject> holder,
+    Handle<AccessorInfo> callback) {
   // ----------- S t a t e -------------
   //  -- rax     : key
   //  -- rdx     : receiver
   //  -- rsp[0]  : return address
   // -----------------------------------
   Label miss;
-
   Counters* counters = isolate()->counters();
   __ IncrementCounter(counters->keyed_load_callback(), 1);
 
   // Check that the name has not changed.
-  __ Cmp(rax, Handle<String>(name));
+  __ Cmp(rax, name);
   __ j(not_equal, &miss);
 
-  MaybeObject* result = GenerateLoadCallback(receiver, holder, rdx, rax, rbx,
-                                             rcx, rdi, callback, name, &miss);
-  if (result->IsFailure()) {
-    miss.Unuse();
-    return result;
-  }
-
+  GenerateLoadCallback(receiver, holder, rdx, rax, rbx, rcx, rdi, callback,
+                       name, &miss);
   __ bind(&miss);
-
   __ DecrementCounter(counters->keyed_load_callback(), 1);
   GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
 
   // Return the generated code.
-  return TryGetCode(CALLBACKS, name);
+  return GetCode(CALLBACKS, name);
 }
 
 
@@ -3120,7 +2748,7 @@
   __ IncrementCounter(counters->keyed_load_constant_function(), 1);
 
   // Check that the name has not changed.
-  __ Cmp(rax, Handle<String>(name));
+  __ Cmp(rax, name);
   __ j(not_equal, &miss);
 
   GenerateLoadConstant(receiver, holder, rdx, rbx, rcx, rdi,
@@ -3134,41 +2762,33 @@
 }
 
 
-MaybeObject* KeyedLoadStubCompiler::CompileLoadInterceptor(JSObject* receiver,
-                                                           JSObject* holder,
-                                                           String* name) {
+Handle<Code> KeyedLoadStubCompiler::CompileLoadInterceptor(
+    Handle<JSObject> receiver,
+    Handle<JSObject> holder,
+    Handle<String> name) {
   // ----------- S t a t e -------------
   //  -- rax    : key
   //  -- rdx    : receiver
   //  -- rsp[0]  : return address
   // -----------------------------------
   Label miss;
-
   Counters* counters = isolate()->counters();
   __ IncrementCounter(counters->keyed_load_interceptor(), 1);
 
   // Check that the name has not changed.
-  __ Cmp(rax, Handle<String>(name));
+  __ Cmp(rax, name);
   __ j(not_equal, &miss);
 
   LookupResult lookup(isolate());
   LookupPostInterceptor(holder, name, &lookup);
-  GenerateLoadInterceptor(receiver,
-                          holder,
-                          &lookup,
-                          rdx,
-                          rax,
-                          rcx,
-                          rbx,
-                          rdi,
-                          name,
-                          &miss);
+  GenerateLoadInterceptor(receiver, holder, &lookup, rdx, rax, rcx, rbx, rdi,
+                          name, &miss);
   __ bind(&miss);
   __ DecrementCounter(counters->keyed_load_interceptor(), 1);
   GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
 
   // Return the generated code.
-  return TryGetCode(INTERCEPTOR, name);
+  return GetCode(INTERCEPTOR, name);
 }
 
 
@@ -3300,7 +2920,8 @@
 
 // Specialized stub for constructing objects from functions which only have only
 // simple assignments of the form this.x = ...; in their body.
-MaybeObject* ConstructStubCompiler::CompileConstructStub(JSFunction* function) {
+Handle<Code> ConstructStubCompiler::CompileConstructStub(
+    Handle<JSFunction> function) {
   // ----------- S t a t e -------------
   //  -- rax : argc
   //  -- rdi : constructor
@@ -3343,12 +2964,8 @@
   // rbx: initial map
   __ movzxbq(rcx, FieldOperand(rbx, Map::kInstanceSizeOffset));
   __ shl(rcx, Immediate(kPointerSizeLog2));
-  __ AllocateInNewSpace(rcx,
-                        rdx,
-                        rcx,
-                        no_reg,
-                        &generic_stub_call,
-                        NO_ALLOCATION_FLAGS);
+  __ AllocateInNewSpace(rcx, rdx, rcx, no_reg,
+                        &generic_stub_call, NO_ALLOCATION_FLAGS);
 
   // Allocated the JSObject, now initialize the fields and add the heap tag.
   // rbx: initial map
@@ -3373,7 +2990,7 @@
   // r9: first in-object property of the JSObject
   // Fill the initialized properties with a constant value or a passed argument
   // depending on the this.x = ...; assignment in the function.
-  SharedFunctionInfo* shared = function->shared();
+  Handle<SharedFunctionInfo> shared(function->shared());
   for (int i = 0; i < shared->this_property_assignments_count(); i++) {
     if (shared->IsThisPropertyAssignmentArgument(i)) {
       // Check if the argument assigned to the property is actually passed.
@@ -3421,10 +3038,8 @@
   // Jump to the generic stub in case the specialized code cannot handle the
   // construction.
   __ bind(&generic_stub_call);
-  Code* code =
-      isolate()->builtins()->builtin(Builtins::kJSConstructStubGeneric);
-  Handle<Code> generic_construct_stub(code);
-  __ Jump(generic_construct_stub, RelocInfo::CODE_TARGET);
+  Handle<Code> code = isolate()->builtins()->JSConstructStubGeneric();
+  __ Jump(code, RelocInfo::CODE_TARGET);
 
   // Return the generated code.
   return GetCode();