Upgrade to V8 3.3

Merge V8 at 3.3.10.39

Simple merge required updates to makefiles only.

Bug: 5688872
Change-Id: I14703f418235f5ce6013b9b3e2e502407a9f6dfd
diff --git a/src/mips/assembler-mips-inl.h b/src/mips/assembler-mips-inl.h
index f7453d1..e787fed 100644
--- a/src/mips/assembler-mips-inl.h
+++ b/src/mips/assembler-mips-inl.h
@@ -30,7 +30,7 @@
 
 // The original source code covered by the above license above has been
 // modified significantly by Google Inc.
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 
 
 #ifndef V8_MIPS_ASSEMBLER_MIPS_INL_H_
@@ -45,7 +45,7 @@
 namespace internal {
 
 // -----------------------------------------------------------------------------
-// Operand and MemOperand
+// Operand and MemOperand.
 
 Operand::Operand(int32_t immediate, RelocInfo::Mode rmode)  {
   rm_ = no_reg;
@@ -80,7 +80,7 @@
 
 
 // -----------------------------------------------------------------------------
-// RelocInfo
+// RelocInfo.
 
 void RelocInfo::apply(intptr_t delta) {
   // On MIPS we do not use pc relative addressing, so we don't need to patch the
@@ -95,24 +95,8 @@
 
 
 Address RelocInfo::target_address_address() {
-  ASSERT(IsCodeTarget(rmode_) || rmode_ == RUNTIME_ENTRY
-                              || rmode_ == EMBEDDED_OBJECT
-                              || rmode_ == EXTERNAL_REFERENCE);
-  // Read the address of the word containing the target_address in an
-  // instruction stream.
-  // The only architecture-independent user of this function is the serializer.
-  // The serializer uses it to find out how many raw bytes of instruction to
-  // output before the next target.
-  // For an instructions like LUI/ORI where the target bits are mixed into the
-  // instruction bits, the size of the target will be zero, indicating that the
-  // serializer should not step forward in memory after a target is resolved
-  // and written.  In this case the target_address_address function should
-  // return the end of the instructions to be patched, allowing the
-  // deserializer to deserialize the instructions as raw bytes and put them in
-  // place, ready to be patched with the target. In our case, that is the
-  // address of the instruction that follows LUI/ORI instruction pair.
-  return reinterpret_cast<Address>(
-    pc_ + Assembler::kInstructionsFor32BitConstant * Assembler::kInstrSize);
+  ASSERT(IsCodeTarget(rmode_) || rmode_ == RUNTIME_ENTRY);
+  return reinterpret_cast<Address>(pc_);
 }
 
 
@@ -144,12 +128,9 @@
   // Provide a "natural pointer" to the embedded object,
   // which can be de-referenced during heap iteration.
   ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
-  // TODO(mips): Commenting out, to simplify arch-independent changes.
-  // GC won't work like this, but this commit is for asm/disasm/sim.
-  // reconstructed_obj_ptr_ =
-  //   reinterpret_cast<Object*>(Assembler::target_address_at(pc_));
-  // return &reconstructed_obj_ptr_;
-  return NULL;
+  reconstructed_obj_ptr_ =
+      reinterpret_cast<Object*>(Assembler::target_address_at(pc_));
+  return &reconstructed_obj_ptr_;
 }
 
 
@@ -161,11 +142,8 @@
 
 Address* RelocInfo::target_reference_address() {
   ASSERT(rmode_ == EXTERNAL_REFERENCE);
-  // TODO(mips): Commenting out, to simplify arch-independent changes.
-  // GC won't work like this, but this commit is for asm/disasm/sim.
-  // reconstructed_adr_ptr_ = Assembler::target_address_at(pc_);
-  // return &reconstructed_adr_ptr_;
-  return NULL;
+  reconstructed_adr_ptr_ = Assembler::target_address_at(pc_);
+  return &reconstructed_adr_ptr_;
 }
 
 
@@ -251,26 +229,23 @@
 void RelocInfo::Visit(ObjectVisitor* visitor) {
   RelocInfo::Mode mode = rmode();
   if (mode == RelocInfo::EMBEDDED_OBJECT) {
-    // RelocInfo is needed when pointer must be updated/serialized, such as
-    // UpdatingVisitor in mark-compact.cc or Serializer in serialize.cc.
-    // It is ignored by visitors that do not need it.
-    // Commenting out, to simplify arch-independednt changes.
-    // GC won't work like this, but this commit is for asm/disasm/sim.
-    // visitor->VisitPointer(target_object_address(), this);
+    Object** p = target_object_address();
+    Object* orig = *p;
+    visitor->VisitPointer(p);
+    if (*p != orig) {
+      set_target_object(*p);
+    }
   } else if (RelocInfo::IsCodeTarget(mode)) {
     visitor->VisitCodeTarget(this);
+  } else if (mode == RelocInfo::GLOBAL_PROPERTY_CELL) {
+    visitor->VisitGlobalPropertyCell(this);
   } else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
-    // RelocInfo is needed when external-references must be serialized by
-    // Serializer Visitor in serialize.cc. It is ignored by visitors that
-    // do not need it.
-    // Commenting out, to simplify arch-independednt changes.
-    // Serializer won't work like this, but this commit is for asm/disasm/sim.
-    // visitor->VisitExternalReference(target_reference_address(), this);
+    visitor->VisitExternalReference(target_reference_address());
 #ifdef ENABLE_DEBUGGER_SUPPORT
   // TODO(isolates): Get a cached isolate below.
   } else if (((RelocInfo::IsJSReturn(mode) &&
                IsPatchedReturnSequence()) ||
-             (RelocInfo::IsDebugBreakSlot(mode) &&
+              (RelocInfo::IsDebugBreakSlot(mode) &&
                IsPatchedDebugBreakSlotSequence())) &&
              Isolate::Current()->debug()->has_break_points()) {
     visitor->VisitDebugTarget(this);
@@ -287,7 +262,9 @@
   if (mode == RelocInfo::EMBEDDED_OBJECT) {
     StaticVisitor::VisitPointer(heap, target_object_address());
   } else if (RelocInfo::IsCodeTarget(mode)) {
-    StaticVisitor::VisitCodeTarget(this);
+    StaticVisitor::VisitCodeTarget(heap, this);
+  } else if (mode == RelocInfo::GLOBAL_PROPERTY_CELL) {
+    StaticVisitor::VisitGlobalPropertyCell(heap, this);
   } else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
     StaticVisitor::VisitExternalReference(target_reference_address());
 #ifdef ENABLE_DEBUGGER_SUPPORT
@@ -296,7 +273,7 @@
               IsPatchedReturnSequence()) ||
              (RelocInfo::IsDebugBreakSlot(mode) &&
               IsPatchedDebugBreakSlotSequence()))) {
-    StaticVisitor::VisitDebugTarget(this);
+    StaticVisitor::VisitDebugTarget(heap, this);
 #endif
   } else if (mode == RelocInfo::RUNTIME_ENTRY) {
     StaticVisitor::VisitRuntimeEntry(this);
@@ -305,7 +282,7 @@
 
 
 // -----------------------------------------------------------------------------
-// Assembler
+// Assembler.
 
 
 void Assembler::CheckBuffer() {
diff --git a/src/mips/assembler-mips.cc b/src/mips/assembler-mips.cc
index 7d00da1..2e10904 100644
--- a/src/mips/assembler-mips.cc
+++ b/src/mips/assembler-mips.cc
@@ -30,7 +30,7 @@
 
 // The original source code covered by the above license above has been
 // modified significantly by Google Inc.
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 
 
 #include "v8.h"
@@ -43,13 +43,17 @@
 namespace v8 {
 namespace internal {
 
-CpuFeatures::CpuFeatures()
-    : supported_(0),
-      enabled_(0),
-      found_by_runtime_probing_(0) {
-}
+#ifdef DEBUG
+bool CpuFeatures::initialized_ = false;
+#endif
+unsigned CpuFeatures::supported_ = 0;
+unsigned CpuFeatures::found_by_runtime_probing_ = 0;
 
-void CpuFeatures::Probe(bool portable) {
+void CpuFeatures::Probe() {
+  ASSERT(!initialized_);
+#ifdef DEBUG
+  initialized_ = true;
+#endif
   // If the compiler is allowed to use fpu then we can use fpu too in our
   // code generation.
 #if !defined(__mips__)
@@ -58,7 +62,7 @@
       supported_ |= 1u << FPU;
   }
 #else
-  if (portable && Serializer::enabled()) {
+  if (Serializer::enabled()) {
     supported_ |= OS::CpuFeaturesImpliedByPlatform();
     return;  // No features if we might serialize.
   }
@@ -69,8 +73,6 @@
     supported_ |= 1u << FPU;
     found_by_runtime_probing_ |= 1u << FPU;
   }
-
-  if (!portable) found_by_runtime_probing_ = 0;
 #endif
 }
 
@@ -235,12 +237,10 @@
 static const int kMinimalBufferSize = 4 * KB;
 
 
-Assembler::Assembler(void* buffer, int buffer_size)
-    : AssemblerBase(Isolate::Current()),
+Assembler::Assembler(Isolate* arg_isolate, void* buffer, int buffer_size)
+    : AssemblerBase(arg_isolate),
       positions_recorder_(this),
-      allow_peephole_optimization_(false) {
-  // BUG(3245989): disable peephole optimization if crankshaft is enabled.
-  allow_peephole_optimization_ = FLAG_peephole_optimization;
+      emit_debug_code_(FLAG_debug_code) {
   if (buffer == NULL) {
     // Do our own buffer management.
     if (buffer_size <= kMinimalBufferSize) {
@@ -276,13 +276,17 @@
   no_trampoline_pool_before_ = 0;
   trampoline_pool_blocked_nesting_ = 0;
   next_buffer_check_ = kMaxBranchOffset - kTrampolineSize;
+  internal_trampoline_exception_ = false;
+  last_bound_pos_ = 0;
+
+  ast_id_for_reloc_info_ = kNoASTId;
 }
 
 
 Assembler::~Assembler() {
   if (own_buffer_) {
     if (isolate()->assembler_spare_buffer() == NULL &&
-      buffer_size_ == kMinimalBufferSize) {
+        buffer_size_ == kMinimalBufferSize) {
       isolate()->set_assembler_spare_buffer(buffer_);
     } else {
       DeleteArray(buffer_);
@@ -316,13 +320,82 @@
 }
 
 
-Register Assembler::GetRt(Instr instr) {
+Register Assembler::GetRtReg(Instr instr) {
   Register rt;
-  rt.code_ = (instr & kRtMask) >> kRtShift;
+  rt.code_ = (instr & kRtFieldMask) >> kRtShift;
   return rt;
 }
 
 
+Register Assembler::GetRsReg(Instr instr) {
+  Register rs;
+  rs.code_ = (instr & kRsFieldMask) >> kRsShift;
+  return rs;
+}
+
+
+Register Assembler::GetRdReg(Instr instr) {
+  Register rd;
+  rd.code_ = (instr & kRdFieldMask) >> kRdShift;
+  return rd;
+}
+
+
+uint32_t Assembler::GetRt(Instr instr) {
+  return (instr & kRtFieldMask) >> kRtShift;
+}
+
+
+uint32_t Assembler::GetRtField(Instr instr) {
+  return instr & kRtFieldMask;
+}
+
+
+uint32_t Assembler::GetRs(Instr instr) {
+  return (instr & kRsFieldMask) >> kRsShift;
+}
+
+
+uint32_t Assembler::GetRsField(Instr instr) {
+  return instr & kRsFieldMask;
+}
+
+
+uint32_t Assembler::GetRd(Instr instr) {
+  return  (instr & kRdFieldMask) >> kRdShift;
+}
+
+
+uint32_t Assembler::GetRdField(Instr instr) {
+  return  instr & kRdFieldMask;
+}
+
+
+uint32_t Assembler::GetSa(Instr instr) {
+  return (instr & kSaFieldMask) >> kSaShift;
+}
+
+
+uint32_t Assembler::GetSaField(Instr instr) {
+  return instr & kSaFieldMask;
+}
+
+
+uint32_t Assembler::GetOpcodeField(Instr instr) {
+  return instr & kOpcodeMask;
+}
+
+
+uint32_t Assembler::GetImmediate16(Instr instr) {
+  return instr & kImm16Mask;
+}
+
+
+uint32_t Assembler::GetLabelConst(Instr instr) {
+  return instr & ~kImm16Mask;
+}
+
+
 bool Assembler::IsPop(Instr instr) {
   return (instr & ~kRtMask) == kPopRegPattern;
 }
@@ -374,10 +447,10 @@
 
 
 bool Assembler::IsBranch(Instr instr) {
-  uint32_t opcode   = ((instr & kOpcodeMask));
-  uint32_t rt_field = ((instr & kRtFieldMask));
-  uint32_t rs_field = ((instr & kRsFieldMask));
-  uint32_t label_constant = (instr & ~kImm16Mask);
+  uint32_t opcode   = GetOpcodeField(instr);
+  uint32_t rt_field = GetRtField(instr);
+  uint32_t rs_field = GetRsField(instr);
+  uint32_t label_constant = GetLabelConst(instr);
   // Checks if the instruction is a branch.
   return opcode == BEQ ||
       opcode == BNE ||
@@ -386,7 +459,7 @@
       opcode == BEQL ||
       opcode == BNEL ||
       opcode == BLEZL ||
-      opcode == BGTZL||
+      opcode == BGTZL ||
       (opcode == REGIMM && (rt_field == BLTZ || rt_field == BGEZ ||
                             rt_field == BLTZAL || rt_field == BGEZAL)) ||
       (opcode == COP1 && rs_field == BC1) ||  // Coprocessor branch.
@@ -394,13 +467,23 @@
 }
 
 
+bool Assembler::IsBeq(Instr instr) {
+  return GetOpcodeField(instr) == BEQ;
+}
+
+
+bool Assembler::IsBne(Instr instr) {
+  return GetOpcodeField(instr) == BNE;
+}
+
+
 bool Assembler::IsNop(Instr instr, unsigned int type) {
   // See Assembler::nop(type).
   ASSERT(type < 32);
-  uint32_t opcode = ((instr & kOpcodeMask));
-  uint32_t rt = ((instr & kRtFieldMask) >> kRtShift);
-  uint32_t rs = ((instr & kRsFieldMask) >> kRsShift);
-  uint32_t sa = ((instr & kSaFieldMask) >> kSaShift);
+  uint32_t opcode = GetOpcodeField(instr);
+  uint32_t rt = GetRt(instr);
+  uint32_t rs = GetRs(instr);
+  uint32_t sa = GetSa(instr);
 
   // nop(type) == sll(zero_reg, zero_reg, type);
   // Technically all these values will be 0 but
@@ -465,6 +548,11 @@
 }
 
 
+bool Assembler::IsAndImmediate(Instr instr) {
+  return GetOpcodeField(instr) == ANDI;
+}
+
+
 int Assembler::target_at(int32_t pos) {
   Instr instr = instr_at(pos);
   if ((instr & ~kImm16Mask) == 0) {
@@ -546,6 +634,10 @@
     if (dist > kMaxBranchOffset) {
       do {
         int32_t trampoline_pos = get_trampoline_entry(fixup_pos);
+        if (kInvalidSlotPos == trampoline_pos) {
+          // Internal error.
+          return;
+        }
         ASSERT((trampoline_pos - fixup_pos) <= kMaxBranchOffset);
         target_at_put(fixup_pos, trampoline_pos);
         fixup_pos = trampoline_pos;
@@ -554,6 +646,10 @@
     } else if (dist < -kMaxBranchOffset) {
       do {
         int32_t trampoline_pos = get_trampoline_entry(fixup_pos, false);
+        if (kInvalidSlotPos == trampoline_pos) {
+          // Internal error.
+          return;
+        }
         ASSERT((trampoline_pos - fixup_pos) >= -kMaxBranchOffset);
         target_at_put(fixup_pos, trampoline_pos);
         fixup_pos = trampoline_pos;
@@ -652,7 +748,7 @@
                                  FPURegister fd,
                                  SecondaryField func) {
   ASSERT(fd.is_valid() && fs.is_valid() && ft.is_valid());
-  ASSERT(isolate()->cpu_features()->IsEnabled(FPU));
+  ASSERT(CpuFeatures::IsEnabled(FPU));
   Instr instr = opcode | fmt | (ft.code() << kFtShift) | (fs.code() << kFsShift)
       | (fd.code() << kFdShift) | func;
   emit(instr);
@@ -666,7 +762,7 @@
                                  FPURegister fd,
                                  SecondaryField func) {
   ASSERT(fd.is_valid() && fs.is_valid() && rt.is_valid());
-  ASSERT(isolate()->cpu_features()->IsEnabled(FPU));
+  ASSERT(CpuFeatures::IsEnabled(FPU));
   Instr instr = opcode | fmt | (rt.code() << kRtShift)
       | (fs.code() << kFsShift) | (fd.code() << kFdShift) | func;
   emit(instr);
@@ -679,7 +775,7 @@
                                  FPUControlRegister fs,
                                  SecondaryField func) {
   ASSERT(fs.is_valid() && rt.is_valid());
-  ASSERT(isolate()->cpu_features()->IsEnabled(FPU));
+  ASSERT(CpuFeatures::IsEnabled(FPU));
   Instr instr =
       opcode | fmt | (rt.code() << kRtShift) | (fs.code() << kFsShift) | func;
   emit(instr);
@@ -714,7 +810,7 @@
                                   FPURegister ft,
                                   int32_t j) {
   ASSERT(rs.is_valid() && ft.is_valid() && (is_int16(j) || is_uint16(j)));
-  ASSERT(isolate()->cpu_features()->IsEnabled(FPU));
+  ASSERT(CpuFeatures::IsEnabled(FPU));
   Instr instr = opcode | (rs.code() << kRsShift) | (ft.code() << kFtShift)
       | (j & kImm16Mask);
   emit(instr);
@@ -760,22 +856,27 @@
 // Returns the next free trampoline entry from the next trampoline pool.
 int32_t Assembler::get_trampoline_entry(int32_t pos, bool next_pool) {
   int trampoline_count = trampolines_.length();
-  int32_t trampoline_entry = 0;
+  int32_t trampoline_entry = kInvalidSlotPos;
   ASSERT(trampoline_count > 0);
 
-  if (next_pool) {
-    for (int i = 0; i < trampoline_count; i++) {
-      if (trampolines_[i].start() > pos) {
-       trampoline_entry = trampolines_[i].take_slot();
-       break;
+  if (!internal_trampoline_exception_) {
+    if (next_pool) {
+      for (int i = 0; i < trampoline_count; i++) {
+        if (trampolines_[i].start() > pos) {
+         trampoline_entry = trampolines_[i].take_slot();
+         break;
+        }
+      }
+    } else {  //  Caller needs a trampoline entry from the previous pool.
+      for (int i = trampoline_count-1; i >= 0; i--) {
+        if (trampolines_[i].end() < pos) {
+         trampoline_entry = trampolines_[i].take_slot();
+         break;
+        }
       }
     }
-  } else {  // Caller needs a trampoline entry from the previous pool.
-    for (int i = trampoline_count-1; i >= 0; i--) {
-      if (trampolines_[i].end() < pos) {
-       trampoline_entry = trampolines_[i].take_slot();
-       break;
-      }
+    if (kInvalidSlotPos == trampoline_entry) {
+      internal_trampoline_exception_ = true;
     }
   }
   return trampoline_entry;
@@ -792,6 +893,10 @@
     if (dist > kMaxBranchOffset) {
       do {
         int32_t trampoline_pos = get_trampoline_entry(target_pos);
+        if (kInvalidSlotPos == trampoline_pos) {
+          // Internal error.
+          return 0;
+        }
         ASSERT((trampoline_pos - target_pos) > 0);
         ASSERT((trampoline_pos - target_pos) <= kMaxBranchOffset);
         target_at_put(trampoline_pos, target_pos);
@@ -801,6 +906,10 @@
     } else if (dist < -kMaxBranchOffset) {
       do {
         int32_t trampoline_pos = get_trampoline_entry(target_pos, false);
+        if (kInvalidSlotPos == trampoline_pos) {
+          // Internal error.
+          return 0;
+        }
         ASSERT((target_pos - trampoline_pos) > 0);
         ASSERT((target_pos - trampoline_pos) <= kMaxBranchOffset);
         target_at_put(trampoline_pos, target_pos);
@@ -979,157 +1088,6 @@
 
 void Assembler::addiu(Register rd, Register rs, int32_t j) {
   GenInstrImmediate(ADDIU, rs, rd, j);
-
-  // Eliminate pattern: push(r), pop().
-  //   addiu(sp, sp, Operand(-kPointerSize));
-  //   sw(src, MemOperand(sp, 0);
-  //   addiu(sp, sp, Operand(kPointerSize));
-  // Both instructions can be eliminated.
-  if (can_peephole_optimize(3) &&
-      // Pattern.
-      instr_at(pc_ - 1 * kInstrSize) == kPopInstruction &&
-      (instr_at(pc_ - 2 * kInstrSize) & ~kRtMask) == kPushRegPattern &&
-      (instr_at(pc_ - 3 * kInstrSize)) == kPushInstruction) {
-    pc_ -= 3 * kInstrSize;
-    if (FLAG_print_peephole_optimization) {
-      PrintF("%x push(reg)/pop() eliminated\n", pc_offset());
-    }
-  }
-
-  // Eliminate pattern: push(ry), pop(rx).
-  //   addiu(sp, sp, -kPointerSize)
-  //   sw(ry, MemOperand(sp, 0)
-  //   lw(rx, MemOperand(sp, 0)
-  //   addiu(sp, sp, kPointerSize);
-  // Both instructions can be eliminated if ry = rx.
-  // If ry != rx, a register copy from ry to rx is inserted
-  // after eliminating the push and the pop instructions.
-  if (can_peephole_optimize(4)) {
-    Instr pre_push_sp_set = instr_at(pc_ - 4 * kInstrSize);
-    Instr push_instr = instr_at(pc_ - 3 * kInstrSize);
-    Instr pop_instr = instr_at(pc_ - 2 * kInstrSize);
-    Instr post_pop_sp_set = instr_at(pc_ - 1 * kInstrSize);
-
-    if (IsPush(push_instr) &&
-        IsPop(pop_instr) && pre_push_sp_set == kPushInstruction &&
-        post_pop_sp_set == kPopInstruction) {
-      if ((pop_instr & kRtMask) != (push_instr & kRtMask)) {
-        // For consecutive push and pop on different registers,
-        // we delete both the push & pop and insert a register move.
-        // push ry, pop rx --> mov rx, ry.
-        Register reg_pushed, reg_popped;
-        reg_pushed = GetRt(push_instr);
-        reg_popped = GetRt(pop_instr);
-        pc_ -= 4 * kInstrSize;
-        // Insert a mov instruction, which is better than a pair of push & pop.
-        or_(reg_popped, reg_pushed, zero_reg);
-        if (FLAG_print_peephole_optimization) {
-          PrintF("%x push/pop (diff reg) replaced by a reg move\n",
-                 pc_offset());
-        }
-      } else {
-        // For consecutive push and pop on the same register,
-        // both the push and the pop can be deleted.
-        pc_ -= 4 * kInstrSize;
-        if (FLAG_print_peephole_optimization) {
-          PrintF("%x push/pop (same reg) eliminated\n", pc_offset());
-        }
-      }
-    }
-  }
-
-  if (can_peephole_optimize(5)) {
-    Instr pre_push_sp_set = instr_at(pc_ - 5 * kInstrSize);
-    Instr mem_write_instr = instr_at(pc_ - 4 * kInstrSize);
-    Instr lw_instr = instr_at(pc_ - 3 * kInstrSize);
-    Instr mem_read_instr = instr_at(pc_ - 2 * kInstrSize);
-    Instr post_pop_sp_set = instr_at(pc_ - 1 * kInstrSize);
-
-    if (IsPush(mem_write_instr) &&
-        pre_push_sp_set == kPushInstruction &&
-        IsPop(mem_read_instr) &&
-        post_pop_sp_set == kPopInstruction) {
-      if ((IsLwRegFpOffset(lw_instr) ||
-        IsLwRegFpNegOffset(lw_instr))) {
-        if ((mem_write_instr & kRtMask) ==
-              (mem_read_instr & kRtMask)) {
-          // Pattern: push & pop from/to same register,
-          // with a fp+offset lw in between.
-          //
-          // The following:
-          // addiu sp, sp, -4
-          // sw rx, [sp, #0]!
-          // lw rz, [fp, #-24]
-          // lw rx, [sp, 0],
-          // addiu sp, sp, 4
-          //
-          // Becomes:
-          // if(rx == rz)
-          //   delete all
-          // else
-          //   lw rz, [fp, #-24]
-
-          if ((mem_write_instr & kRtMask) == (lw_instr & kRtMask)) {
-            pc_ -= 5 * kInstrSize;
-          } else {
-            pc_ -= 5 * kInstrSize;
-            // Reinsert back the lw rz.
-            emit(lw_instr);
-          }
-          if (FLAG_print_peephole_optimization) {
-            PrintF("%x push/pop -dead ldr fp+offset in middle\n", pc_offset());
-          }
-        } else {
-          // Pattern: push & pop from/to different registers
-          // with a fp + offset lw in between.
-          //
-          // The following:
-          // addiu sp, sp ,-4
-          // sw rx, [sp, 0]
-          // lw rz, [fp, #-24]
-          // lw ry, [sp, 0]
-          // addiu sp, sp, 4
-          //
-          // Becomes:
-          // if(ry == rz)
-          //   mov ry, rx;
-          // else if(rx != rz)
-          //   lw rz, [fp, #-24]
-          //   mov ry, rx
-          // else if((ry != rz) || (rx == rz)) becomes:
-          //   mov ry, rx
-          //   lw rz, [fp, #-24]
-
-          Register reg_pushed, reg_popped;
-          if ((mem_read_instr & kRtMask) == (lw_instr & kRtMask)) {
-            reg_pushed = GetRt(mem_write_instr);
-            reg_popped = GetRt(mem_read_instr);
-            pc_ -= 5 * kInstrSize;
-            or_(reg_popped, reg_pushed, zero_reg);  // Move instruction.
-          } else if ((mem_write_instr & kRtMask)
-                                != (lw_instr & kRtMask)) {
-            reg_pushed = GetRt(mem_write_instr);
-            reg_popped = GetRt(mem_read_instr);
-            pc_ -= 5 * kInstrSize;
-            emit(lw_instr);
-            or_(reg_popped, reg_pushed, zero_reg);  // Move instruction.
-          } else if (((mem_read_instr & kRtMask)
-                                     != (lw_instr & kRtMask)) ||
-                    ((mem_write_instr & kRtMask)
-                                     == (lw_instr & kRtMask)) ) {
-            reg_pushed = GetRt(mem_write_instr);
-            reg_popped = GetRt(mem_read_instr);
-            pc_ -= 5 * kInstrSize;
-            or_(reg_popped, reg_pushed, zero_reg);  // Move instruction.
-            emit(lw_instr);
-          }
-          if (FLAG_print_peephole_optimization) {
-            PrintF("%x push/pop (ldr fp+off in middle)\n", pc_offset());
-          }
-        }
-      }
-    }
-  }
 }
 
 
@@ -1317,54 +1275,6 @@
     LoadRegPlusOffsetToAt(rs);
     GenInstrImmediate(LW, at, rd, 0);  // Equiv to lw(rd, MemOperand(at, 0));
   }
-
-  if (can_peephole_optimize(2)) {
-    Instr sw_instr = instr_at(pc_ - 2 * kInstrSize);
-    Instr lw_instr = instr_at(pc_ - 1 * kInstrSize);
-
-    if ((IsSwRegFpOffset(sw_instr) &&
-         IsLwRegFpOffset(lw_instr)) ||
-       (IsSwRegFpNegOffset(sw_instr) &&
-         IsLwRegFpNegOffset(lw_instr))) {
-      if ((lw_instr & kLwSwInstrArgumentMask) ==
-            (sw_instr & kLwSwInstrArgumentMask)) {
-        // Pattern: Lw/sw same fp+offset, same register.
-        //
-        // The following:
-        // sw rx, [fp, #-12]
-        // lw rx, [fp, #-12]
-        //
-        // Becomes:
-        // sw rx, [fp, #-12]
-
-        pc_ -= 1 * kInstrSize;
-        if (FLAG_print_peephole_optimization) {
-          PrintF("%x sw/lw (fp + same offset), same reg\n", pc_offset());
-        }
-      } else if ((lw_instr & kLwSwOffsetMask) ==
-                 (sw_instr & kLwSwOffsetMask)) {
-        // Pattern: Lw/sw same fp+offset, different register.
-        //
-        // The following:
-        // sw rx, [fp, #-12]
-        // lw ry, [fp, #-12]
-        //
-        // Becomes:
-        // sw rx, [fp, #-12]
-        // mov ry, rx
-
-        Register reg_stored, reg_loaded;
-        reg_stored = GetRt(sw_instr);
-        reg_loaded = GetRt(lw_instr);
-        pc_ -= 1 * kInstrSize;
-        // Insert a mov instruction, which is better than lw.
-        or_(reg_loaded, reg_stored, zero_reg);  // Move instruction.
-        if (FLAG_print_peephole_optimization) {
-          PrintF("%x sw/lw (fp + same offset), diff reg \n", pc_offset());
-        }
-      }
-    }
-  }
 }
 
 
@@ -1405,23 +1315,6 @@
     LoadRegPlusOffsetToAt(rs);
     GenInstrImmediate(SW, at, rd, 0);  // Equiv to sw(rd, MemOperand(at, 0));
   }
-
-  // Eliminate pattern: pop(), push(r).
-  //     addiu sp, sp, Operand(kPointerSize);
-  //     addiu sp, sp, Operand(-kPointerSize);
-  // ->  sw r, MemOpernad(sp, 0);
-  if (can_peephole_optimize(3) &&
-     // Pattern.
-     instr_at(pc_ - 1 * kInstrSize) ==
-       (kPushRegPattern | (rd.code() << kRtShift)) &&
-     instr_at(pc_ - 2 * kInstrSize) == kPushInstruction &&
-     instr_at(pc_ - 3 * kInstrSize) == kPopInstruction) {
-    pc_ -= 3 * kInstrSize;
-    GenInstrImmediate(SW, rs.rm(), rd, rs.offset_);
-    if (FLAG_print_peephole_optimization) {
-      PrintF("%x pop()/push(reg) eliminated\n", pc_offset());
-    }
-  }
 }
 
 
@@ -1545,14 +1438,14 @@
 
 void Assembler::movt(Register rd, Register rs, uint16_t cc) {
   Register rt;
-  rt.code_ = (cc & 0x0003) << 2 | 1;
+  rt.code_ = (cc & 0x0007) << 2 | 1;
   GenInstrRegister(SPECIAL, rs, rt, rd, 0, MOVCI);
 }
 
 
 void Assembler::movf(Register rd, Register rs, uint16_t cc) {
   Register rt;
-  rt.code_ = (cc & 0x0003) << 2 | 0;
+  rt.code_ = (cc & 0x0007) << 2 | 0;
   GenInstrRegister(SPECIAL, rs, rt, rd, 0, MOVCI);
 }
 
@@ -1816,7 +1709,7 @@
 // Conditions.
 void Assembler::c(FPUCondition cond, SecondaryField fmt,
     FPURegister fs, FPURegister ft, uint16_t cc) {
-  ASSERT(isolate()->cpu_features()->IsEnabled(FPU));
+  ASSERT(CpuFeatures::IsEnabled(FPU));
   ASSERT(is_uint3(cc));
   ASSERT((fmt & ~(31 << kRsShift)) == 0);
   Instr instr = COP1 | fmt | ft.code() << 16 | fs.code() << kFsShift
@@ -1827,7 +1720,7 @@
 
 void Assembler::fcmp(FPURegister src1, const double src2,
       FPUCondition cond) {
-  ASSERT(isolate()->cpu_features()->IsSupported(FPU));
+  ASSERT(CpuFeatures::IsEnabled(FPU));
   ASSERT(src2 == 0.0);
   mtc1(zero_reg, f14);
   cvt_d_w(f14, f14);
@@ -1836,7 +1729,7 @@
 
 
 void Assembler::bc1f(int16_t offset, uint16_t cc) {
-  ASSERT(isolate()->cpu_features()->IsEnabled(FPU));
+  ASSERT(CpuFeatures::IsEnabled(FPU));
   ASSERT(is_uint3(cc));
   Instr instr = COP1 | BC1 | cc << 18 | 0 << 16 | (offset & kImm16Mask);
   emit(instr);
@@ -1844,7 +1737,7 @@
 
 
 void Assembler::bc1t(int16_t offset, uint16_t cc) {
-  ASSERT(isolate()->cpu_features()->IsEnabled(FPU));
+  ASSERT(CpuFeatures::IsEnabled(FPU));
   ASSERT(is_uint3(cc));
   Instr instr = COP1 | BC1 | cc << 18 | 1 << 16 | (offset & kImm16Mask);
   emit(instr);
@@ -1949,7 +1842,14 @@
       return;
     }
     ASSERT(buffer_space() >= kMaxRelocSize);  // Too late to grow buffer here.
-    reloc_info_writer.Write(&rinfo);
+    if (rmode == RelocInfo::CODE_TARGET_WITH_ID) {
+      ASSERT(ast_id_for_reloc_info_ != kNoASTId);
+      RelocInfo reloc_info_with_ast_id(pc_, rmode, ast_id_for_reloc_info_);
+      ast_id_for_reloc_info_ = kNoASTId;
+      reloc_info_writer.Write(&reloc_info_with_ast_id);
+    } else {
+      reloc_info_writer.Write(&rinfo);
+    }
   }
 }
 
@@ -2017,72 +1917,39 @@
 Address Assembler::target_address_at(Address pc) {
   Instr instr1 = instr_at(pc);
   Instr instr2 = instr_at(pc + kInstrSize);
-  // Check we have 2 instructions generated by li.
-  ASSERT(((instr1 & kOpcodeMask) == LUI && (instr2 & kOpcodeMask) == ORI) ||
-         ((instr1 == nopInstr) && ((instr2 & kOpcodeMask) == ADDI ||
-                            (instr2 & kOpcodeMask) == ORI ||
-                            (instr2 & kOpcodeMask) == LUI)));
-  // Interpret these 2 instructions.
-  if (instr1 == nopInstr) {
-    if ((instr2 & kOpcodeMask) == ADDI) {
-      return reinterpret_cast<Address>(((instr2 & kImm16Mask) << 16) >> 16);
-    } else if ((instr2 & kOpcodeMask) == ORI) {
-      return reinterpret_cast<Address>(instr2 & kImm16Mask);
-    } else if ((instr2 & kOpcodeMask) == LUI) {
-      return reinterpret_cast<Address>((instr2 & kImm16Mask) << 16);
-    }
-  } else if ((instr1 & kOpcodeMask) == LUI && (instr2 & kOpcodeMask) == ORI) {
-    // 32 bit value.
+  // Interpret 2 instructions generated by li: lui/ori
+  if ((GetOpcodeField(instr1) == LUI) && (GetOpcodeField(instr2) == ORI)) {
+    // Assemble the 32 bit value.
     return reinterpret_cast<Address>(
-        (instr1 & kImm16Mask) << 16 | (instr2 & kImm16Mask));
+        (GetImmediate16(instr1) << 16) | GetImmediate16(instr2));
   }
 
-  // We should never get here.
+  // We should never get here, force a bad address if we do.
   UNREACHABLE();
   return (Address)0x0;
 }
 
 
 void Assembler::set_target_address_at(Address pc, Address target) {
-  // On MIPS we need to patch the code to generate.
+  // On MIPS we patch the address into lui/ori instruction pair.
 
-  // First check we have a li.
+  // First check we have an li (lui/ori pair).
   Instr instr2 = instr_at(pc + kInstrSize);
 #ifdef DEBUG
   Instr instr1 = instr_at(pc);
 
   // Check we have indeed the result from a li with MustUseReg true.
-  CHECK(((instr1 & kOpcodeMask) == LUI && (instr2 & kOpcodeMask) == ORI) ||
-        ((instr1 == 0) && ((instr2 & kOpcodeMask)== ADDIU ||
-                           (instr2 & kOpcodeMask)== ORI ||
-                           (instr2 & kOpcodeMask)== LUI)));
+  CHECK((GetOpcodeField(instr1) == LUI && GetOpcodeField(instr2) == ORI));
 #endif
 
-  uint32_t rt_code = (instr2 & kRtFieldMask);
+  uint32_t rt_code = GetRtField(instr2);
   uint32_t* p = reinterpret_cast<uint32_t*>(pc);
   uint32_t itarget = reinterpret_cast<uint32_t>(target);
 
-  if (is_int16(itarget)) {
-    // nop.
-    // addiu rt zero_reg j.
-    *p = nopInstr;
-    *(p+1) = ADDIU | rt_code | (itarget & kImm16Mask);
-  } else if (!(itarget & kHiMask)) {
-    // nop.
-    // ori rt zero_reg j.
-    *p = nopInstr;
-    *(p+1) = ORI | rt_code | (itarget & kImm16Mask);
-  } else if (!(itarget & kImm16Mask)) {
-    // nop.
-    // lui rt (kHiMask & itarget) >> kLuiShift.
-    *p = nopInstr;
-    *(p+1) = LUI | rt_code | ((itarget & kHiMask) >> kLuiShift);
-  } else {
-    // lui rt (kHiMask & itarget) >> kLuiShift.
-    // ori rt rt, (kImm16Mask & itarget).
-    *p = LUI | rt_code | ((itarget & kHiMask) >> kLuiShift);
-    *(p+1) = ORI | rt_code | (rt_code << 5) | (itarget & kImm16Mask);
-  }
+  // lui rt, high-16.
+  // ori rt rt, low-16.
+  *p = LUI | rt_code | ((itarget & kHiMask) >> kLuiShift);
+  *(p+1) = ORI | rt_code | (rt_code << 5) | (itarget & kImm16Mask);
 
   CPU::FlushICache(pc, 2 * sizeof(int32_t));
 }
diff --git a/src/mips/assembler-mips.h b/src/mips/assembler-mips.h
index 5a6e271..a167393 100644
--- a/src/mips/assembler-mips.h
+++ b/src/mips/assembler-mips.h
@@ -30,7 +30,7 @@
 
 // The original source code covered by the above license above has been
 // modified significantly by Google Inc.
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 
 
 #ifndef V8_MIPS_ASSEMBLER_MIPS_H_
@@ -67,12 +67,13 @@
 
 
 // -----------------------------------------------------------------------------
-// Implementation of Register and FPURegister
+// Implementation of Register and FPURegister.
 
 // Core register.
 struct Register {
   static const int kNumRegisters = v8::internal::kNumRegisters;
-  static const int kNumAllocatableRegisters = 14;  // v0 through t7
+  static const int kNumAllocatableRegisters = 14;  // v0 through t7.
+  static const int kSizeInBytes = 4;
 
   static int ToAllocationIndex(Register reg) {
     return reg.code() - 2;  // zero_reg and 'at' are skipped.
@@ -267,9 +268,6 @@
 // FPU (coprocessor 1) control registers.
 // Currently only FCSR (#31) is implemented.
 struct FPUControlRegister {
-  static const int kFCSRRegister = 31;
-  static const int kInvalidFPUControlRegister = -1;
-
   bool is_valid() const { return code_ == kFCSRRegister; }
   bool is(FPUControlRegister creg) const { return code_ == creg.code_; }
   int code() const {
@@ -288,7 +286,7 @@
   int code_;
 };
 
-const FPUControlRegister no_fpucreg = { -1 };
+const FPUControlRegister no_fpucreg = { kInvalidFPUControlRegister };
 const FPUControlRegister FCSR = { kFCSRRegister };
 
 
@@ -318,7 +316,7 @@
 
  private:
   Register rm_;
-  int32_t imm32_;  // Valid if rm_ == no_reg
+  int32_t imm32_;  // Valid if rm_ == no_reg.
   RelocInfo::Mode rmode_;
 
   friend class Assembler;
@@ -342,58 +340,98 @@
 
 // CpuFeatures keeps track of which features are supported by the target CPU.
 // Supported features must be enabled by a Scope before use.
-class CpuFeatures {
+class CpuFeatures : public AllStatic {
  public:
   // Detect features of the target CPU. Set safe defaults if the serializer
   // is enabled (snapshots must be portable).
-  void Probe(bool portable);
+  static void Probe();
 
   // Check whether a feature is supported by the target CPU.
-  bool IsSupported(CpuFeature f) const {
+  static bool IsSupported(CpuFeature f) {
+    ASSERT(initialized_);
     if (f == FPU && !FLAG_enable_fpu) return false;
     return (supported_ & (1u << f)) != 0;
   }
 
+
+#ifdef DEBUG
   // Check whether a feature is currently enabled.
-  bool IsEnabled(CpuFeature f) const {
-    return (enabled_ & (1u << f)) != 0;
+  static bool IsEnabled(CpuFeature f) {
+    ASSERT(initialized_);
+    Isolate* isolate = Isolate::UncheckedCurrent();
+    if (isolate == NULL) {
+      // When no isolate is available, work as if we're running in
+      // release mode.
+      return IsSupported(f);
+    }
+    unsigned enabled = static_cast<unsigned>(isolate->enabled_cpu_features());
+    return (enabled & (1u << f)) != 0;
   }
+#endif
 
   // Enable a specified feature within a scope.
   class Scope BASE_EMBEDDED {
 #ifdef DEBUG
    public:
-    explicit Scope(CpuFeature f)
-        : cpu_features_(Isolate::Current()->cpu_features()),
-          isolate_(Isolate::Current()) {
-      ASSERT(cpu_features_->IsSupported(f));
+    explicit Scope(CpuFeature f) {
+      unsigned mask = 1u << f;
+      ASSERT(CpuFeatures::IsSupported(f));
       ASSERT(!Serializer::enabled() ||
-             (cpu_features_->found_by_runtime_probing_ & (1u << f)) == 0);
-      old_enabled_ = cpu_features_->enabled_;
-      cpu_features_->enabled_ |= 1u << f;
+             (CpuFeatures::found_by_runtime_probing_ & mask) == 0);
+      isolate_ = Isolate::UncheckedCurrent();
+      old_enabled_ = 0;
+      if (isolate_ != NULL) {
+        old_enabled_ = static_cast<unsigned>(isolate_->enabled_cpu_features());
+        isolate_->set_enabled_cpu_features(old_enabled_ | mask);
+      }
     }
     ~Scope() {
-      ASSERT_EQ(Isolate::Current(), isolate_);
-      cpu_features_->enabled_ = old_enabled_;
-     }
+      ASSERT_EQ(Isolate::UncheckedCurrent(), isolate_);
+      if (isolate_ != NULL) {
+        isolate_->set_enabled_cpu_features(old_enabled_);
+      }
+    }
    private:
-    unsigned old_enabled_;
-    CpuFeatures* cpu_features_;
     Isolate* isolate_;
+    unsigned old_enabled_;
 #else
    public:
     explicit Scope(CpuFeature f) {}
 #endif
   };
 
+  class TryForceFeatureScope BASE_EMBEDDED {
+   public:
+    explicit TryForceFeatureScope(CpuFeature f)
+        : old_supported_(CpuFeatures::supported_) {
+      if (CanForce()) {
+        CpuFeatures::supported_ |= (1u << f);
+      }
+    }
+
+    ~TryForceFeatureScope() {
+      if (CanForce()) {
+        CpuFeatures::supported_ = old_supported_;
+      }
+    }
+
+   private:
+    static bool CanForce() {
+      // It's only safe to temporarily force support of CPU features
+      // when there's only a single isolate, which is guaranteed when
+      // the serializer is enabled.
+      return Serializer::enabled();
+    }
+
+    const unsigned old_supported_;
+  };
+
  private:
-  CpuFeatures();
-
-  unsigned supported_;
-  unsigned enabled_;
-  unsigned found_by_runtime_probing_;
-
-  friend class Isolate;
+#ifdef DEBUG
+  static bool initialized_;
+#endif
+  static unsigned supported_;
+  static unsigned found_by_runtime_probing_;
 
   DISALLOW_COPY_AND_ASSIGN(CpuFeatures);
 };
@@ -414,7 +452,7 @@
   // for code generation and assumes its size to be buffer_size. If the buffer
   // is too small, a fatal error occurs. No deallocation of the buffer is done
   // upon destruction of the assembler.
-  Assembler(void* buffer, int buffer_size);
+  Assembler(Isolate* isolate, void* buffer, int buffer_size);
   ~Assembler();
 
   // Overrides the default provided by FLAG_debug_code.
@@ -439,10 +477,10 @@
   //
   // Note: The same Label can be used for forward and backward branches
   // but it may be bound only once.
-  void bind(Label* L);  // binds an unbound label L to the current code position
+  void bind(Label* L);  // Binds an unbound label L to current code position.
 
-  // Returns the branch offset to the given label from the current code position
-  // Links the label to the current position if it is still unbound
+  // Returns the branch offset to the given label from the current code
+  // position. Links the label to the current position if it is still unbound.
   // Manages the jump elimination optimization if the second parameter is true.
   int32_t branch_offset(Label* L, bool jump_elimination_allowed);
   int32_t shifted_branch_offset(Label* L, bool jump_elimination_allowed) {
@@ -541,14 +579,14 @@
     FIRST_IC_MARKER = PROPERTY_ACCESS_INLINED
   };
 
-  // type == 0 is the default non-marking type.
+  // Type == 0 is the default non-marking type.
   void nop(unsigned int type = 0) {
     ASSERT(type < 32);
     sll(zero_reg, zero_reg, type, true);
   }
 
 
-  //------- Branch and jump  instructions --------
+  // --------Branch-and-jump-instructions----------
   // We don't use likely variant of instructions.
   void b(int16_t offset);
   void b(Label* L) { b(branch_offset(L, false)>>2); }
@@ -571,7 +609,7 @@
   }
 
   // Never use the int16_t b(l)cond version with a branch offset
-  // instead of using the Label* version. See Twiki for infos.
+  // instead of using the Label* version.
 
   // Jump targets must be in the current 256 MB-aligned region. ie 28 bits.
   void j(int32_t target);
@@ -761,6 +799,10 @@
   // Mark address of a debug break slot.
   void RecordDebugBreakSlot();
 
+  // Record the AST id of the CallIC being compiled, so that it can be placed
+  // in the relocation information.
+  void RecordAstId(unsigned ast_id) { ast_id_for_reloc_info_ = ast_id; }
+
   // Record a comment relocation entry that can be used by a disassembler.
   // Use --code-comments to enable.
   void RecordComment(const char* msg);
@@ -774,12 +816,6 @@
 
   PositionsRecorder* positions_recorder() { return &positions_recorder_; }
 
-  bool can_peephole_optimize(int instructions) {
-    if (!allow_peephole_optimization_) return false;
-    if (last_bound_pos_ > pc_offset() - instructions * kInstrSize) return false;
-    return reloc_info_writer.last_pc() <= pc_ - instructions * kInstrSize;
-  }
-
   // Postpone the generation of the trampoline pool for the specified number of
   // instructions.
   void BlockTrampolinePoolFor(int instructions);
@@ -804,6 +840,8 @@
 
   // Check if an instruction is a branch of some kind.
   static bool IsBranch(Instr instr);
+  static bool IsBeq(Instr instr);
+  static bool IsBne(Instr instr);
 
   static bool IsNop(Instr instr, unsigned int type);
   static bool IsPop(Instr instr);
@@ -813,7 +851,21 @@
   static bool IsLwRegFpNegOffset(Instr instr);
   static bool IsSwRegFpNegOffset(Instr instr);
 
-  static Register GetRt(Instr instr);
+  static Register GetRtReg(Instr instr);
+  static Register GetRsReg(Instr instr);
+  static Register GetRdReg(Instr instr);
+
+  static uint32_t GetRt(Instr instr);
+  static uint32_t GetRtField(Instr instr);
+  static uint32_t GetRs(Instr instr);
+  static uint32_t GetRsField(Instr instr);
+  static uint32_t GetRd(Instr instr);
+  static uint32_t GetRdField(Instr instr);
+  static uint32_t GetSa(Instr instr);
+  static uint32_t GetSaField(Instr instr);
+  static uint32_t GetOpcodeField(Instr instr);
+  static uint32_t GetImmediate16(Instr instr);
+  static uint32_t GetLabelConst(Instr instr);
 
   static int32_t GetBranchOffset(Instr instr);
   static bool IsLw(Instr instr);
@@ -825,9 +877,16 @@
   static bool IsAddImmediate(Instr instr);
   static Instr SetAddImmediateOffset(Instr instr, int16_t offset);
 
+  static bool IsAndImmediate(Instr instr);
+
   void CheckTrampolinePool(bool force_emit = false);
 
  protected:
+  // Relocation for a type-recording IC has the AST id added to it.  This
+  // member variable is a way to pass the information from the call site to
+  // the relocation info.
+  unsigned ast_id_for_reloc_info_;
+
   bool emit_debug_code() const { return emit_debug_code_; }
 
   int32_t buffer_space() const { return reloc_info_writer.pos() - pc_; }
@@ -861,6 +920,10 @@
     return trampoline_pool_blocked_nesting_ > 0;
   }
 
+  bool has_exception() const {
+    return internal_trampoline_exception_;
+  }
+
  private:
   // Code buffer:
   // The buffer into which code and relocation info are generated.
@@ -1005,10 +1068,18 @@
       return end_;
     }
     int take_slot() {
-      int trampoline_slot = next_slot_;
-      ASSERT(free_slot_count_ > 0);
-      free_slot_count_--;
-      next_slot_ += 2 * kInstrSize;
+      int trampoline_slot = kInvalidSlotPos;
+      if (free_slot_count_ <= 0) {
+        // We have run out of space on trampolines.
+        // Make sure we fail in debug mode, so we become aware of each case
+        // when this happens.
+        ASSERT(0);
+        // Internal exception will be caught.
+      } else {
+        trampoline_slot = next_slot_;
+        free_slot_count_--;
+        next_slot_ += 2*kInstrSize;
+      }
       return trampoline_slot;
     }
     int take_label() {
@@ -1038,8 +1109,10 @@
   static const int kMaxBranchOffset = (1 << (18 - 1)) - 1;
   static const int kMaxDistBetweenPools =
       kMaxBranchOffset - 2 * kTrampolineSize;
+  static const int kInvalidSlotPos = -1;
 
   List<Trampoline> trampolines_;
+  bool internal_trampoline_exception_;
 
   friend class RegExpMacroAssemblerMIPS;
   friend class RelocInfo;
@@ -1047,7 +1120,6 @@
   friend class BlockTrampolinePoolScope;
 
   PositionsRecorder positions_recorder_;
-  bool allow_peephole_optimization_;
   bool emit_debug_code_;
   friend class PositionsRecorder;
   friend class EnsureSpace;
diff --git a/src/mips/builtins-mips.cc b/src/mips/builtins-mips.cc
index b4bab8e..e22259d 100644
--- a/src/mips/builtins-mips.cc
+++ b/src/mips/builtins-mips.cc
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -31,7 +31,7 @@
 
 #if defined(V8_TARGET_ARCH_MIPS)
 
-#include "codegen-inl.h"
+#include "codegen.h"
 #include "debug.h"
 #include "deoptimizer.h"
 #include "full-codegen.h"
@@ -47,97 +47,1577 @@
 void Builtins::Generate_Adaptor(MacroAssembler* masm,
                                 CFunctionId id,
                                 BuiltinExtraArguments extra_args) {
-  UNIMPLEMENTED_MIPS();
+  // ----------- S t a t e -------------
+  //  -- a0                 : number of arguments excluding receiver
+  //  -- a1                 : called function (only guaranteed when
+  //  --                      extra_args requires it)
+  //  -- cp                 : context
+  //  -- sp[0]              : last argument
+  //  -- ...
+  //  -- sp[4 * (argc - 1)] : first argument
+  //  -- sp[4 * agrc]       : receiver
+  // -----------------------------------
+
+  // Insert extra arguments.
+  int num_extra_args = 0;
+  if (extra_args == NEEDS_CALLED_FUNCTION) {
+    num_extra_args = 1;
+    __ push(a1);
+  } else {
+    ASSERT(extra_args == NO_EXTRA_ARGUMENTS);
+  }
+
+  // JumpToExternalReference expects a0 to contain the number of arguments
+  // including the receiver and the extra arguments.
+  __ Addu(a0, a0, Operand(num_extra_args + 1));
+  __ JumpToExternalReference(ExternalReference(id, masm->isolate()));
+}
+
+
+// Load the built-in Array function from the current context.
+static void GenerateLoadArrayFunction(MacroAssembler* masm, Register result) {
+  // Load the global context.
+
+  __ lw(result, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
+  __ lw(result,
+         FieldMemOperand(result, GlobalObject::kGlobalContextOffset));
+  // Load the Array function from the global context.
+  __ lw(result,
+         MemOperand(result,
+                    Context::SlotOffset(Context::ARRAY_FUNCTION_INDEX)));
+}
+
+
+// This constant has the same value as JSArray::kPreallocatedArrayElements and
+// if JSArray::kPreallocatedArrayElements is changed handling of loop unfolding
+// below should be reconsidered.
+static const int kLoopUnfoldLimit = 4;
+
+
+// Allocate an empty JSArray. The allocated array is put into the result
+// register. An elements backing store is allocated with size initial_capacity
+// and filled with the hole values.
+static void AllocateEmptyJSArray(MacroAssembler* masm,
+                                 Register array_function,
+                                 Register result,
+                                 Register scratch1,
+                                 Register scratch2,
+                                 Register scratch3,
+                                 int initial_capacity,
+                                 Label* gc_required) {
+  ASSERT(initial_capacity > 0);
+  // Load the initial map from the array function.
+  __ lw(scratch1, FieldMemOperand(array_function,
+                                  JSFunction::kPrototypeOrInitialMapOffset));
+
+  // Allocate the JSArray object together with space for a fixed array with the
+  // requested elements.
+  int size = JSArray::kSize + FixedArray::SizeFor(initial_capacity);
+  __ AllocateInNewSpace(size,
+                        result,
+                        scratch2,
+                        scratch3,
+                        gc_required,
+                        TAG_OBJECT);
+  // Allocated the JSArray. Now initialize the fields except for the elements
+  // array.
+  // result: JSObject
+  // scratch1: initial map
+  // scratch2: start of next object
+  __ sw(scratch1, FieldMemOperand(result, JSObject::kMapOffset));
+  __ LoadRoot(scratch1, Heap::kEmptyFixedArrayRootIndex);
+  __ sw(scratch1, FieldMemOperand(result, JSArray::kPropertiesOffset));
+  // Field JSArray::kElementsOffset is initialized later.
+  __ mov(scratch3,  zero_reg);
+  __ sw(scratch3, FieldMemOperand(result, JSArray::kLengthOffset));
+
+  // Calculate the location of the elements array and set elements array member
+  // of the JSArray.
+  // result: JSObject
+  // scratch2: start of next object
+  __ Addu(scratch1, result, Operand(JSArray::kSize));
+  __ sw(scratch1, FieldMemOperand(result, JSArray::kElementsOffset));
+
+  // Clear the heap tag on the elements array.
+  __ And(scratch1, scratch1, Operand(~kHeapObjectTagMask));
+
+  // Initialize the FixedArray and fill it with holes. FixedArray length is
+  // stored as a smi.
+  // result: JSObject
+  // scratch1: elements array (untagged)
+  // scratch2: start of next object
+  __ LoadRoot(scratch3, Heap::kFixedArrayMapRootIndex);
+  ASSERT_EQ(0 * kPointerSize, FixedArray::kMapOffset);
+  __ sw(scratch3, MemOperand(scratch1));
+  __ Addu(scratch1, scratch1, kPointerSize);
+  __ li(scratch3,  Operand(Smi::FromInt(initial_capacity)));
+  ASSERT_EQ(1 * kPointerSize, FixedArray::kLengthOffset);
+  __ sw(scratch3, MemOperand(scratch1));
+  __ Addu(scratch1, scratch1, kPointerSize);
+
+  // Fill the FixedArray with the hole value.
+  ASSERT_EQ(2 * kPointerSize, FixedArray::kHeaderSize);
+  ASSERT(initial_capacity <= kLoopUnfoldLimit);
+  __ LoadRoot(scratch3, Heap::kTheHoleValueRootIndex);
+  for (int i = 0; i < initial_capacity; i++) {
+    __ sw(scratch3, MemOperand(scratch1));
+    __ Addu(scratch1, scratch1, kPointerSize);
+  }
+}
+
+
+// Allocate a JSArray with the number of elements stored in a register. The
+// register array_function holds the built-in Array function and the register
+// array_size holds the size of the array as a smi. The allocated array is put
+// into the result register and beginning and end of the FixedArray elements
+// storage is put into registers elements_array_storage and elements_array_end
+// (see  below for when that is not the case). If the parameter fill_with_holes
+// is true the allocated elements backing store is filled with the hole values
+// otherwise it is left uninitialized. When the backing store is filled the
+// register elements_array_storage is scratched.
+static void AllocateJSArray(MacroAssembler* masm,
+                            Register array_function,  // Array function.
+                            Register array_size,  // As a smi.
+                            Register result,
+                            Register elements_array_storage,
+                            Register elements_array_end,
+                            Register scratch1,
+                            Register scratch2,
+                            bool fill_with_hole,
+                            Label* gc_required) {
+  Label not_empty, allocated;
+
+  // Load the initial map from the array function.
+  __ lw(elements_array_storage,
+         FieldMemOperand(array_function,
+                         JSFunction::kPrototypeOrInitialMapOffset));
+
+  // Check whether an empty sized array is requested.
+  __ Branch(&not_empty, ne, array_size, Operand(zero_reg));
+
+  // If an empty array is requested allocate a small elements array anyway. This
+  // keeps the code below free of special casing for the empty array.
+  int size = JSArray::kSize +
+             FixedArray::SizeFor(JSArray::kPreallocatedArrayElements);
+  __ AllocateInNewSpace(size,
+                        result,
+                        elements_array_end,
+                        scratch1,
+                        gc_required,
+                        TAG_OBJECT);
+  __ Branch(&allocated);
+
+  // Allocate the JSArray object together with space for a FixedArray with the
+  // requested number of elements.
+  __ bind(&not_empty);
+  ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
+  __ li(elements_array_end,
+        (JSArray::kSize + FixedArray::kHeaderSize) / kPointerSize);
+  __ sra(scratch1, array_size, kSmiTagSize);
+  __ Addu(elements_array_end, elements_array_end, scratch1);
+  __ AllocateInNewSpace(
+      elements_array_end,
+      result,
+      scratch1,
+      scratch2,
+      gc_required,
+      static_cast<AllocationFlags>(TAG_OBJECT | SIZE_IN_WORDS));
+
+  // Allocated the JSArray. Now initialize the fields except for the elements
+  // array.
+  // result: JSObject
+  // elements_array_storage: initial map
+  // array_size: size of array (smi)
+  __ bind(&allocated);
+  __ sw(elements_array_storage, FieldMemOperand(result, JSObject::kMapOffset));
+  __ LoadRoot(elements_array_storage, Heap::kEmptyFixedArrayRootIndex);
+  __ sw(elements_array_storage,
+         FieldMemOperand(result, JSArray::kPropertiesOffset));
+  // Field JSArray::kElementsOffset is initialized later.
+  __ sw(array_size, FieldMemOperand(result, JSArray::kLengthOffset));
+
+  // Calculate the location of the elements array and set elements array member
+  // of the JSArray.
+  // result: JSObject
+  // array_size: size of array (smi)
+  __ Addu(elements_array_storage, result, Operand(JSArray::kSize));
+  __ sw(elements_array_storage,
+         FieldMemOperand(result, JSArray::kElementsOffset));
+
+  // Clear the heap tag on the elements array.
+  __ And(elements_array_storage,
+          elements_array_storage,
+          Operand(~kHeapObjectTagMask));
+  // Initialize the fixed array and fill it with holes. FixedArray length is
+  // stored as a smi.
+  // result: JSObject
+  // elements_array_storage: elements array (untagged)
+  // array_size: size of array (smi)
+  __ LoadRoot(scratch1, Heap::kFixedArrayMapRootIndex);
+  ASSERT_EQ(0 * kPointerSize, FixedArray::kMapOffset);
+  __ sw(scratch1, MemOperand(elements_array_storage));
+  __ Addu(elements_array_storage, elements_array_storage, kPointerSize);
+
+  // Length of the FixedArray is the number of pre-allocated elements if
+  // the actual JSArray has length 0 and the size of the JSArray for non-empty
+  // JSArrays. The length of a FixedArray is stored as a smi.
+  ASSERT(kSmiTag == 0);
+  __ li(at, Operand(Smi::FromInt(JSArray::kPreallocatedArrayElements)));
+  __ movz(array_size, at, array_size);
+
+  ASSERT_EQ(1 * kPointerSize, FixedArray::kLengthOffset);
+  __ sw(array_size, MemOperand(elements_array_storage));
+  __ Addu(elements_array_storage, elements_array_storage, kPointerSize);
+
+  // Calculate elements array and elements array end.
+  // result: JSObject
+  // elements_array_storage: elements array element storage
+  // array_size: smi-tagged size of elements array
+  ASSERT(kSmiTag == 0 && kSmiTagSize < kPointerSizeLog2);
+  __ sll(elements_array_end, array_size, kPointerSizeLog2 - kSmiTagSize);
+  __ Addu(elements_array_end, elements_array_storage, elements_array_end);
+
+  // Fill the allocated FixedArray with the hole value if requested.
+  // result: JSObject
+  // elements_array_storage: elements array element storage
+  // elements_array_end: start of next object
+  if (fill_with_hole) {
+    Label loop, entry;
+    __ LoadRoot(scratch1, Heap::kTheHoleValueRootIndex);
+    __ Branch(&entry);
+    __ bind(&loop);
+    __ sw(scratch1, MemOperand(elements_array_storage));
+    __ Addu(elements_array_storage, elements_array_storage, kPointerSize);
+
+    __ bind(&entry);
+    __ Branch(&loop, lt, elements_array_storage, Operand(elements_array_end));
+  }
+}
+
+
+// Create a new array for the built-in Array function. This function allocates
+// the JSArray object and the FixedArray elements array and initializes these.
+// If the Array cannot be constructed in native code the runtime is called. This
+// function assumes the following state:
+//   a0: argc
+//   a1: constructor (built-in Array function)
+//   ra: return address
+//   sp[0]: last argument
+// This function is used for both construct and normal calls of Array. The only
+// difference between handling a construct call and a normal call is that for a
+// construct call the constructor function in a1 needs to be preserved for
+// entering the generic code. In both cases argc in a0 needs to be preserved.
+// Both registers are preserved by this code so no need to differentiate between
+// construct call and normal call.
+static void ArrayNativeCode(MacroAssembler* masm,
+                            Label* call_generic_code) {
+  Counters* counters = masm->isolate()->counters();
+  Label argc_one_or_more, argc_two_or_more;
+
+  // Check for array construction with zero arguments or one.
+  __ Branch(&argc_one_or_more, ne, a0, Operand(zero_reg));
+  // Handle construction of an empty array.
+  AllocateEmptyJSArray(masm,
+                       a1,
+                       a2,
+                       a3,
+                       t0,
+                       t1,
+                       JSArray::kPreallocatedArrayElements,
+                       call_generic_code);
+  __ IncrementCounter(counters->array_function_native(), 1, a3, t0);
+  // Setup return value, remove receiver from stack and return.
+  __ mov(v0, a2);
+  __ Addu(sp, sp, Operand(kPointerSize));
+  __ Ret();
+
+  // Check for one argument. Bail out if argument is not smi or if it is
+  // negative.
+  __ bind(&argc_one_or_more);
+  __ Branch(&argc_two_or_more, ne, a0, Operand(1));
+
+  ASSERT(kSmiTag == 0);
+  __ lw(a2, MemOperand(sp));  // Get the argument from the stack.
+  __ And(a3, a2, Operand(kIntptrSignBit | kSmiTagMask));
+  __ Branch(call_generic_code, eq, a3, Operand(zero_reg));
+
+  // Handle construction of an empty array of a certain size. Bail out if size
+  // is too large to actually allocate an elements array.
+  ASSERT(kSmiTag == 0);
+  __ Branch(call_generic_code, ge, a2,
+            Operand(JSObject::kInitialMaxFastElementArray << kSmiTagSize));
+
+  // a0: argc
+  // a1: constructor
+  // a2: array_size (smi)
+  // sp[0]: argument
+  AllocateJSArray(masm,
+                  a1,
+                  a2,
+                  a3,
+                  t0,
+                  t1,
+                  t2,
+                  t3,
+                  true,
+                  call_generic_code);
+  __ IncrementCounter(counters->array_function_native(), 1, a2, t0);
+
+  // Setup return value, remove receiver and argument from stack and return.
+  __ mov(v0, a3);
+  __ Addu(sp, sp, Operand(2 * kPointerSize));
+  __ Ret();
+
+  // Handle construction of an array from a list of arguments.
+  __ bind(&argc_two_or_more);
+  __ sll(a2, a0, kSmiTagSize);  // Convert argc to a smi.
+
+  // a0: argc
+  // a1: constructor
+  // a2: array_size (smi)
+  // sp[0]: last argument
+  AllocateJSArray(masm,
+                  a1,
+                  a2,
+                  a3,
+                  t0,
+                  t1,
+                  t2,
+                  t3,
+                  false,
+                  call_generic_code);
+  __ IncrementCounter(counters->array_function_native(), 1, a2, t2);
+
+  // Fill arguments as array elements. Copy from the top of the stack (last
+  // element) to the array backing store filling it backwards. Note:
+  // elements_array_end points after the backing store.
+  // a0: argc
+  // a3: JSArray
+  // t0: elements_array storage start (untagged)
+  // t1: elements_array_end (untagged)
+  // sp[0]: last argument
+
+  Label loop, entry;
+  __ Branch(&entry);
+  __ bind(&loop);
+  __ pop(a2);
+  __ Addu(t1, t1, -kPointerSize);
+  __ sw(a2, MemOperand(t1));
+  __ bind(&entry);
+  __ Branch(&loop, lt, t0, Operand(t1));
+
+  // Remove caller arguments and receiver from the stack, setup return value and
+  // return.
+  // a0: argc
+  // a3: JSArray
+  // sp[0]: receiver
+  __ Addu(sp, sp, Operand(kPointerSize));
+  __ mov(v0, a3);
+  __ Ret();
 }
 
 
 void Builtins::Generate_ArrayCode(MacroAssembler* masm) {
-  UNIMPLEMENTED_MIPS();
+  // ----------- S t a t e -------------
+  //  -- a0     : number of arguments
+  //  -- ra     : return address
+  //  -- sp[...]: constructor arguments
+  // -----------------------------------
+  Label generic_array_code;
+
+  // Get the Array function.
+  GenerateLoadArrayFunction(masm, a1);
+
+  if (FLAG_debug_code) {
+    // Initial map for the builtin Array functions should be maps.
+    __ lw(a2, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset));
+    __ And(t0, a2, Operand(kSmiTagMask));
+    __ Assert(ne, "Unexpected initial map for Array function (1)",
+              t0, Operand(zero_reg));
+    __ GetObjectType(a2, a3, t0);
+    __ Assert(eq, "Unexpected initial map for Array function (2)",
+              t0, Operand(MAP_TYPE));
+  }
+
+  // Run the native code for the Array function called as a normal function.
+  ArrayNativeCode(masm, &generic_array_code);
+
+  // Jump to the generic array code if the specialized code cannot handle
+  // the construction.
+  __ bind(&generic_array_code);
+
+  Handle<Code> array_code =
+      masm->isolate()->builtins()->ArrayCodeGeneric();
+  __ Jump(array_code, RelocInfo::CODE_TARGET);
 }
 
 
 void Builtins::Generate_ArrayConstructCode(MacroAssembler* masm) {
-  UNIMPLEMENTED_MIPS();
+  // ----------- S t a t e -------------
+  //  -- a0     : number of arguments
+  //  -- a1     : constructor function
+  //  -- ra     : return address
+  //  -- sp[...]: constructor arguments
+  // -----------------------------------
+  Label generic_constructor;
+
+  if (FLAG_debug_code) {
+    // The array construct code is only set for the builtin and internal
+    // Array functions which always have a map.
+    // Initial map for the builtin Array function should be a map.
+    __ lw(a2, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset));
+    __ And(t0, a2, Operand(kSmiTagMask));
+    __ Assert(ne, "Unexpected initial map for Array function (3)",
+              t0, Operand(zero_reg));
+    __ GetObjectType(a2, a3, t0);
+    __ Assert(eq, "Unexpected initial map for Array function (4)",
+              t0, Operand(MAP_TYPE));
+  }
+
+  // Run the native code for the Array function called as a constructor.
+  ArrayNativeCode(masm, &generic_constructor);
+
+  // Jump to the generic construct code in case the specialized code cannot
+  // handle the construction.
+  __ bind(&generic_constructor);
+
+  Handle<Code> generic_construct_stub =
+      masm->isolate()->builtins()->JSConstructStubGeneric();
+  __ Jump(generic_construct_stub, RelocInfo::CODE_TARGET);
 }
 
 
 void Builtins::Generate_StringConstructCode(MacroAssembler* masm) {
-  UNIMPLEMENTED_MIPS();
+  // ----------- S t a t e -------------
+  //  -- a0                     : number of arguments
+  //  -- a1                     : constructor function
+  //  -- ra                     : return address
+  //  -- sp[(argc - n - 1) * 4] : arg[n] (zero based)
+  //  -- sp[argc * 4]           : receiver
+  // -----------------------------------
+  Counters* counters = masm->isolate()->counters();
+  __ IncrementCounter(counters->string_ctor_calls(), 1, a2, a3);
+
+  Register function = a1;
+  if (FLAG_debug_code) {
+    __ LoadGlobalFunction(Context::STRING_FUNCTION_INDEX, a2);
+    __ Assert(eq, "Unexpected String function", function, Operand(a2));
+  }
+
+  // Load the first arguments in a0 and get rid of the rest.
+  Label no_arguments;
+  __ Branch(&no_arguments, eq, a0, Operand(zero_reg));
+  // First args = sp[(argc - 1) * 4].
+  __ Subu(a0, a0, Operand(1));
+  __ sll(a0, a0, kPointerSizeLog2);
+  __ Addu(sp, a0, sp);
+  __ lw(a0, MemOperand(sp));
+  // sp now point to args[0], drop args[0] + receiver.
+  __ Drop(2);
+
+  Register argument = a2;
+  Label not_cached, argument_is_string;
+  NumberToStringStub::GenerateLookupNumberStringCache(
+      masm,
+      a0,        // Input.
+      argument,  // Result.
+      a3,        // Scratch.
+      t0,        // Scratch.
+      t1,        // Scratch.
+      false,     // Is it a Smi?
+      &not_cached);
+  __ IncrementCounter(counters->string_ctor_cached_number(), 1, a3, t0);
+  __ bind(&argument_is_string);
+
+  // ----------- S t a t e -------------
+  //  -- a2     : argument converted to string
+  //  -- a1     : constructor function
+  //  -- ra     : return address
+  // -----------------------------------
+
+  Label gc_required;
+  __ AllocateInNewSpace(JSValue::kSize,
+                        v0,  // Result.
+                        a3,  // Scratch.
+                        t0,  // Scratch.
+                        &gc_required,
+                        TAG_OBJECT);
+
+  // Initialising the String Object.
+  Register map = a3;
+  __ LoadGlobalFunctionInitialMap(function, map, t0);
+  if (FLAG_debug_code) {
+    __ lbu(t0, FieldMemOperand(map, Map::kInstanceSizeOffset));
+    __ Assert(eq, "Unexpected string wrapper instance size",
+        t0, Operand(JSValue::kSize >> kPointerSizeLog2));
+    __ lbu(t0, FieldMemOperand(map, Map::kUnusedPropertyFieldsOffset));
+    __ Assert(eq, "Unexpected unused properties of string wrapper",
+        t0, Operand(zero_reg));
+  }
+  __ sw(map, FieldMemOperand(v0, HeapObject::kMapOffset));
+
+  __ LoadRoot(a3, Heap::kEmptyFixedArrayRootIndex);
+  __ sw(a3, FieldMemOperand(v0, JSObject::kPropertiesOffset));
+  __ sw(a3, FieldMemOperand(v0, JSObject::kElementsOffset));
+
+  __ sw(argument, FieldMemOperand(v0, JSValue::kValueOffset));
+
+  // Ensure the object is fully initialized.
+  STATIC_ASSERT(JSValue::kSize == 4 * kPointerSize);
+
+  __ Ret();
+
+  // The argument was not found in the number to string cache. Check
+  // if it's a string already before calling the conversion builtin.
+  Label convert_argument;
+  __ bind(&not_cached);
+  __ JumpIfSmi(a0, &convert_argument);
+
+  // Is it a String?
+  __ lw(a2, FieldMemOperand(a0, HeapObject::kMapOffset));
+  __ lbu(a3, FieldMemOperand(a2, Map::kInstanceTypeOffset));
+  ASSERT(kNotStringTag != 0);
+  __ And(t0, a3, Operand(kIsNotStringMask));
+  __ Branch(&convert_argument, ne, t0, Operand(zero_reg));
+  __ mov(argument, a0);
+  __ IncrementCounter(counters->string_ctor_conversions(), 1, a3, t0);
+  __ Branch(&argument_is_string);
+
+  // Invoke the conversion builtin and put the result into a2.
+  __ bind(&convert_argument);
+  __ push(function);  // Preserve the function.
+  __ IncrementCounter(counters->string_ctor_conversions(), 1, a3, t0);
+  __ EnterInternalFrame();
+  __ push(v0);
+  __ InvokeBuiltin(Builtins::TO_STRING, CALL_FUNCTION);
+  __ LeaveInternalFrame();
+  __ pop(function);
+  __ mov(argument, v0);
+  __ Branch(&argument_is_string);
+
+  // Load the empty string into a2, remove the receiver from the
+  // stack, and jump back to the case where the argument is a string.
+  __ bind(&no_arguments);
+  __ LoadRoot(argument, Heap::kEmptyStringRootIndex);
+  __ Drop(1);
+  __ Branch(&argument_is_string);
+
+  // At this point the argument is already a string. Call runtime to
+  // create a string wrapper.
+  __ bind(&gc_required);
+  __ IncrementCounter(counters->string_ctor_gc_required(), 1, a3, t0);
+  __ EnterInternalFrame();
+  __ push(argument);
+  __ CallRuntime(Runtime::kNewStringWrapper, 1);
+  __ LeaveInternalFrame();
+  __ Ret();
 }
 
 
 void Builtins::Generate_JSConstructCall(MacroAssembler* masm) {
-  UNIMPLEMENTED_MIPS();
+  // ----------- S t a t e -------------
+  //  -- a0     : number of arguments
+  //  -- a1     : constructor function
+  //  -- ra     : return address
+  //  -- sp[...]: constructor arguments
+  // -----------------------------------
+
+  Label non_function_call;
+  // Check that the function is not a smi.
+  __ And(t0, a1, Operand(kSmiTagMask));
+  __ Branch(&non_function_call, eq, t0, Operand(zero_reg));
+  // Check that the function is a JSFunction.
+  __ GetObjectType(a1, a2, a2);
+  __ Branch(&non_function_call, ne, a2, Operand(JS_FUNCTION_TYPE));
+
+  // Jump to the function-specific construct stub.
+  __ lw(a2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
+  __ lw(a2, FieldMemOperand(a2, SharedFunctionInfo::kConstructStubOffset));
+  __ Addu(t9, a2, Operand(Code::kHeaderSize - kHeapObjectTag));
+  __ Jump(Operand(t9));
+
+  // a0: number of arguments
+  // a1: called object
+  __ bind(&non_function_call);
+  // CALL_NON_FUNCTION expects the non-function constructor as receiver
+  // (instead of the original receiver from the call site). The receiver is
+  // stack element argc.
+  // Set expected number of arguments to zero (not changing a0).
+  __ mov(a2, zero_reg);
+  __ GetBuiltinEntry(a3, Builtins::CALL_NON_FUNCTION_AS_CONSTRUCTOR);
+  __ SetCallKind(t1, CALL_AS_METHOD);
+  __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
+          RelocInfo::CODE_TARGET);
+}
+
+
+static void Generate_JSConstructStubHelper(MacroAssembler* masm,
+                                           bool is_api_function,
+                                           bool count_constructions) {
+  // Should never count constructions for api objects.
+  ASSERT(!is_api_function || !count_constructions);
+
+  Isolate* isolate = masm->isolate();
+
+  // ----------- S t a t e -------------
+  //  -- a0     : number of arguments
+  //  -- a1     : constructor function
+  //  -- ra     : return address
+  //  -- sp[...]: constructor arguments
+  // -----------------------------------
+
+  // Enter a construct frame.
+  __ EnterConstructFrame();
+
+  // Preserve the two incoming parameters on the stack.
+  __ sll(a0, a0, kSmiTagSize);  // Tag arguments count.
+  __ MultiPushReversed(a0.bit() | a1.bit());
+
+  // Use t7 to hold undefined, which is used in several places below.
+  __ LoadRoot(t7, Heap::kUndefinedValueRootIndex);
+
+  Label rt_call, allocated;
+  // Try to allocate the object without transitioning into C code. If any of the
+  // preconditions is not met, the code bails out to the runtime call.
+  if (FLAG_inline_new) {
+    Label undo_allocation;
+#ifdef ENABLE_DEBUGGER_SUPPORT
+    ExternalReference debug_step_in_fp =
+        ExternalReference::debug_step_in_fp_address(isolate);
+    __ li(a2, Operand(debug_step_in_fp));
+    __ lw(a2, MemOperand(a2));
+    __ Branch(&rt_call, ne, a2, Operand(zero_reg));
+#endif
+
+    // Load the initial map and verify that it is in fact a map.
+    // a1: constructor function
+    __ lw(a2, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset));
+    __ And(t0, a2, Operand(kSmiTagMask));
+    __ Branch(&rt_call, eq, t0, Operand(zero_reg));
+    __ GetObjectType(a2, a3, t4);
+    __ Branch(&rt_call, ne, t4, Operand(MAP_TYPE));
+
+    // Check that the constructor is not constructing a JSFunction (see comments
+    // in Runtime_NewObject in runtime.cc). In which case the initial map's
+    // instance type would be JS_FUNCTION_TYPE.
+    // a1: constructor function
+    // a2: initial map
+    __ lbu(a3, FieldMemOperand(a2, Map::kInstanceTypeOffset));
+    __ Branch(&rt_call, eq, a3, Operand(JS_FUNCTION_TYPE));
+
+    if (count_constructions) {
+      Label allocate;
+      // Decrease generous allocation count.
+      __ lw(a3, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
+      MemOperand constructor_count =
+         FieldMemOperand(a3, SharedFunctionInfo::kConstructionCountOffset);
+      __ lbu(t0, constructor_count);
+      __ Subu(t0, t0, Operand(1));
+      __ sb(t0, constructor_count);
+      __ Branch(&allocate, ne, t0, Operand(zero_reg));
+
+      __ Push(a1, a2);
+
+      __ push(a1);  // Constructor.
+      // The call will replace the stub, so the countdown is only done once.
+      __ CallRuntime(Runtime::kFinalizeInstanceSize, 1);
+
+      __ pop(a2);
+      __ pop(a1);
+
+      __ bind(&allocate);
+    }
+
+    // Now allocate the JSObject on the heap.
+    // a1: constructor function
+    // a2: initial map
+    __ lbu(a3, FieldMemOperand(a2, Map::kInstanceSizeOffset));
+    __ AllocateInNewSpace(a3, t4, t5, t6, &rt_call, SIZE_IN_WORDS);
+
+    // Allocated the JSObject, now initialize the fields. Map is set to initial
+    // map and properties and elements are set to empty fixed array.
+    // a1: constructor function
+    // a2: initial map
+    // a3: object size
+    // t4: JSObject (not tagged)
+    __ LoadRoot(t6, Heap::kEmptyFixedArrayRootIndex);
+    __ mov(t5, t4);
+    __ sw(a2, MemOperand(t5, JSObject::kMapOffset));
+    __ sw(t6, MemOperand(t5, JSObject::kPropertiesOffset));
+    __ sw(t6, MemOperand(t5, JSObject::kElementsOffset));
+    __ Addu(t5, t5, Operand(3*kPointerSize));
+    ASSERT_EQ(0 * kPointerSize, JSObject::kMapOffset);
+    ASSERT_EQ(1 * kPointerSize, JSObject::kPropertiesOffset);
+    ASSERT_EQ(2 * kPointerSize, JSObject::kElementsOffset);
+
+    // Fill all the in-object properties with appropriate filler.
+    // a1: constructor function
+    // a2: initial map
+    // a3: object size (in words)
+    // t4: JSObject (not tagged)
+    // t5: First in-object property of JSObject (not tagged)
+    __ sll(t0, a3, kPointerSizeLog2);
+    __ addu(t6, t4, t0);   // End of object.
+    ASSERT_EQ(3 * kPointerSize, JSObject::kHeaderSize);
+    { Label loop, entry;
+      if (count_constructions) {
+        // To allow for truncation.
+        __ LoadRoot(t7, Heap::kOnePointerFillerMapRootIndex);
+      } else {
+        __ LoadRoot(t7, Heap::kUndefinedValueRootIndex);
+      }
+      __ jmp(&entry);
+      __ bind(&loop);
+      __ sw(t7, MemOperand(t5, 0));
+      __ addiu(t5, t5, kPointerSize);
+      __ bind(&entry);
+      __ Branch(&loop, Uless, t5, Operand(t6));
+    }
+
+    // Add the object tag to make the JSObject real, so that we can continue and
+    // jump into the continuation code at any time from now on. Any failures
+    // need to undo the allocation, so that the heap is in a consistent state
+    // and verifiable.
+    __ Addu(t4, t4, Operand(kHeapObjectTag));
+
+    // Check if a non-empty properties array is needed. Continue with allocated
+    // object if not fall through to runtime call if it is.
+    // a1: constructor function
+    // t4: JSObject
+    // t5: start of next object (not tagged)
+    __ lbu(a3, FieldMemOperand(a2, Map::kUnusedPropertyFieldsOffset));
+    // The field instance sizes contains both pre-allocated property fields and
+    // in-object properties.
+    __ lw(a0, FieldMemOperand(a2, Map::kInstanceSizesOffset));
+    __ And(t6,
+           a0,
+           Operand(0x000000FF << Map::kPreAllocatedPropertyFieldsByte * 8));
+    __ srl(t0, t6, Map::kPreAllocatedPropertyFieldsByte * 8);
+    __ Addu(a3, a3, Operand(t0));
+    __ And(t6, a0, Operand(0x000000FF << Map::kInObjectPropertiesByte * 8));
+    __ srl(t0, t6, Map::kInObjectPropertiesByte * 8);
+    __ subu(a3, a3, t0);
+
+    // Done if no extra properties are to be allocated.
+    __ Branch(&allocated, eq, a3, Operand(zero_reg));
+    __ Assert(greater_equal, "Property allocation count failed.",
+        a3, Operand(zero_reg));
+
+    // Scale the number of elements by pointer size and add the header for
+    // FixedArrays to the start of the next object calculation from above.
+    // a1: constructor
+    // a3: number of elements in properties array
+    // t4: JSObject
+    // t5: start of next object
+    __ Addu(a0, a3, Operand(FixedArray::kHeaderSize / kPointerSize));
+    __ AllocateInNewSpace(
+        a0,
+        t5,
+        t6,
+        a2,
+        &undo_allocation,
+        static_cast<AllocationFlags>(RESULT_CONTAINS_TOP | SIZE_IN_WORDS));
+
+    // Initialize the FixedArray.
+    // a1: constructor
+    // a3: number of elements in properties array (un-tagged)
+    // t4: JSObject
+    // t5: start of next object
+    __ LoadRoot(t6, Heap::kFixedArrayMapRootIndex);
+    __ mov(a2, t5);
+    __ sw(t6, MemOperand(a2, JSObject::kMapOffset));
+    __ sll(a0, a3, kSmiTagSize);
+    __ sw(a0, MemOperand(a2, FixedArray::kLengthOffset));
+    __ Addu(a2, a2, Operand(2 * kPointerSize));
+
+    ASSERT_EQ(0 * kPointerSize, JSObject::kMapOffset);
+    ASSERT_EQ(1 * kPointerSize, FixedArray::kLengthOffset);
+
+    // Initialize the fields to undefined.
+    // a1: constructor
+    // a2: First element of FixedArray (not tagged)
+    // a3: number of elements in properties array
+    // t4: JSObject
+    // t5: FixedArray (not tagged)
+    __ sll(t3, a3, kPointerSizeLog2);
+    __ addu(t6, a2, t3);  // End of object.
+    ASSERT_EQ(2 * kPointerSize, FixedArray::kHeaderSize);
+    { Label loop, entry;
+      if (count_constructions) {
+        __ LoadRoot(t7, Heap::kUndefinedValueRootIndex);
+      } else if (FLAG_debug_code) {
+        __ LoadRoot(t8, Heap::kUndefinedValueRootIndex);
+        __ Assert(eq, "Undefined value not loaded.", t7, Operand(t8));
+      }
+      __ jmp(&entry);
+      __ bind(&loop);
+      __ sw(t7, MemOperand(a2));
+      __ addiu(a2, a2, kPointerSize);
+      __ bind(&entry);
+      __ Branch(&loop, less, a2, Operand(t6));
+    }
+
+    // Store the initialized FixedArray into the properties field of
+    // the JSObject.
+    // a1: constructor function
+    // t4: JSObject
+    // t5: FixedArray (not tagged)
+    __ Addu(t5, t5, Operand(kHeapObjectTag));  // Add the heap tag.
+    __ sw(t5, FieldMemOperand(t4, JSObject::kPropertiesOffset));
+
+    // Continue with JSObject being successfully allocated.
+    // a1: constructor function
+    // a4: JSObject
+    __ jmp(&allocated);
+
+    // Undo the setting of the new top so that the heap is verifiable. For
+    // example, the map's unused properties potentially do not match the
+    // allocated objects unused properties.
+    // t4: JSObject (previous new top)
+    __ bind(&undo_allocation);
+    __ UndoAllocationInNewSpace(t4, t5);
+  }
+
+  __ bind(&rt_call);
+  // Allocate the new receiver object using the runtime call.
+  // a1: constructor function
+  __ push(a1);  // Argument for Runtime_NewObject.
+  __ CallRuntime(Runtime::kNewObject, 1);
+  __ mov(t4, v0);
+
+  // Receiver for constructor call allocated.
+  // t4: JSObject
+  __ bind(&allocated);
+  __ push(t4);
+
+  // Push the function and the allocated receiver from the stack.
+  // sp[0]: receiver (newly allocated object)
+  // sp[1]: constructor function
+  // sp[2]: number of arguments (smi-tagged)
+  __ lw(a1, MemOperand(sp, kPointerSize));
+  __ MultiPushReversed(a1.bit() | t4.bit());
+
+  // Reload the number of arguments from the stack.
+  // a1: constructor function
+  // sp[0]: receiver
+  // sp[1]: constructor function
+  // sp[2]: receiver
+  // sp[3]: constructor function
+  // sp[4]: number of arguments (smi-tagged)
+  __ lw(a3, MemOperand(sp, 4 * kPointerSize));
+
+  // Setup pointer to last argument.
+  __ Addu(a2, fp, Operand(StandardFrameConstants::kCallerSPOffset));
+
+  // Setup number of arguments for function call below.
+  __ srl(a0, a3, kSmiTagSize);
+
+  // Copy arguments and receiver to the expression stack.
+  // a0: number of arguments
+  // a1: constructor function
+  // a2: address of last argument (caller sp)
+  // a3: number of arguments (smi-tagged)
+  // sp[0]: receiver
+  // sp[1]: constructor function
+  // sp[2]: receiver
+  // sp[3]: constructor function
+  // sp[4]: number of arguments (smi-tagged)
+  Label loop, entry;
+  __ jmp(&entry);
+  __ bind(&loop);
+  __ sll(t0, a3, kPointerSizeLog2 - kSmiTagSize);
+  __ Addu(t0, a2, Operand(t0));
+  __ lw(t1, MemOperand(t0));
+  __ push(t1);
+  __ bind(&entry);
+  __ Addu(a3, a3, Operand(-2));
+  __ Branch(&loop, greater_equal, a3, Operand(zero_reg));
+
+  // Call the function.
+  // a0: number of arguments
+  // a1: constructor function
+  if (is_api_function) {
+    __ lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
+    Handle<Code> code =
+        masm->isolate()->builtins()->HandleApiCallConstruct();
+    ParameterCount expected(0);
+    __ InvokeCode(code, expected, expected,
+                  RelocInfo::CODE_TARGET, CALL_FUNCTION);
+  } else {
+    ParameterCount actual(a0);
+    __ InvokeFunction(a1, actual, CALL_FUNCTION);
+  }
+
+  // Pop the function from the stack.
+  // v0: result
+  // sp[0]: constructor function
+  // sp[2]: receiver
+  // sp[3]: constructor function
+  // sp[4]: number of arguments (smi-tagged)
+  __ Pop();
+
+  // Restore context from the frame.
+  __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+
+  // If the result is an object (in the ECMA sense), we should get rid
+  // of the receiver and use the result; see ECMA-262 section 13.2.2-7
+  // on page 74.
+  Label use_receiver, exit;
+
+  // If the result is a smi, it is *not* an object in the ECMA sense.
+  // v0: result
+  // sp[0]: receiver (newly allocated object)
+  // sp[1]: constructor function
+  // sp[2]: number of arguments (smi-tagged)
+  __ And(t0, v0, Operand(kSmiTagMask));
+  __ Branch(&use_receiver, eq, t0, Operand(zero_reg));
+
+  // If the type of the result (stored in its map) is less than
+  // FIRST_JS_OBJECT_TYPE, it is not an object in the ECMA sense.
+  __ GetObjectType(v0, a3, a3);
+  __ Branch(&exit, greater_equal, a3, Operand(FIRST_JS_OBJECT_TYPE));
+
+  // Throw away the result of the constructor invocation and use the
+  // on-stack receiver as the result.
+  __ bind(&use_receiver);
+  __ lw(v0, MemOperand(sp));
+
+  // Remove receiver from the stack, remove caller arguments, and
+  // return.
+  __ bind(&exit);
+  // v0: result
+  // sp[0]: receiver (newly allocated object)
+  // sp[1]: constructor function
+  // sp[2]: number of arguments (smi-tagged)
+  __ lw(a1, MemOperand(sp, 2 * kPointerSize));
+  __ LeaveConstructFrame();
+  __ sll(t0, a1, kPointerSizeLog2 - 1);
+  __ Addu(sp, sp, t0);
+  __ Addu(sp, sp, kPointerSize);
+  __ IncrementCounter(isolate->counters()->constructed_objects(), 1, a1, a2);
+  __ Ret();
 }
 
 
 void Builtins::Generate_JSConstructStubCountdown(MacroAssembler* masm) {
-  UNIMPLEMENTED_MIPS();
+  Generate_JSConstructStubHelper(masm, false, true);
 }
 
 
 void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
-  UNIMPLEMENTED_MIPS();
+  Generate_JSConstructStubHelper(masm, false, false);
 }
 
 
 void Builtins::Generate_JSConstructStubApi(MacroAssembler* masm) {
-  UNIMPLEMENTED_MIPS();
+  Generate_JSConstructStubHelper(masm, true, false);
+}
+
+
+static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
+                                             bool is_construct) {
+  // Called from JSEntryStub::GenerateBody
+
+  // ----------- S t a t e -------------
+  //  -- a0: code entry
+  //  -- a1: function
+  //  -- a2: reveiver_pointer
+  //  -- a3: argc
+  //  -- s0: argv
+  // -----------------------------------
+
+  // Clear the context before we push it when entering the JS frame.
+  __ mov(cp, zero_reg);
+
+  // Enter an internal frame.
+  __ EnterInternalFrame();
+
+  // Set up the context from the function argument.
+  __ lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
+
+  // Set up the roots register.
+  ExternalReference roots_address =
+      ExternalReference::roots_address(masm->isolate());
+  __ li(s6, Operand(roots_address));
+
+  // Push the function and the receiver onto the stack.
+  __ Push(a1, a2);
+
+  // Copy arguments to the stack in a loop.
+  // a3: argc
+  // s0: argv, ie points to first arg
+  Label loop, entry;
+  __ sll(t0, a3, kPointerSizeLog2);
+  __ addu(t2, s0, t0);
+  __ b(&entry);
+  __ nop();   // Branch delay slot nop.
+  // t2 points past last arg.
+  __ bind(&loop);
+  __ lw(t0, MemOperand(s0));  // Read next parameter.
+  __ addiu(s0, s0, kPointerSize);
+  __ lw(t0, MemOperand(t0));  // Dereference handle.
+  __ push(t0);  // Push parameter.
+  __ bind(&entry);
+  __ Branch(&loop, ne, s0, Operand(t2));
+
+  // Initialize all JavaScript callee-saved registers, since they will be seen
+  // by the garbage collector as part of handlers.
+  __ LoadRoot(t0, Heap::kUndefinedValueRootIndex);
+  __ mov(s1, t0);
+  __ mov(s2, t0);
+  __ mov(s3, t0);
+  __ mov(s4, t0);
+  __ mov(s5, t0);
+  // s6 holds the root address. Do not clobber.
+  // s7 is cp. Do not init.
+
+  // Invoke the code and pass argc as a0.
+  __ mov(a0, a3);
+  if (is_construct) {
+    __ Call(masm->isolate()->builtins()->JSConstructCall(),
+            RelocInfo::CODE_TARGET);
+  } else {
+    ParameterCount actual(a0);
+    __ InvokeFunction(a1, actual, CALL_FUNCTION);
+  }
+
+  __ LeaveInternalFrame();
+
+  __ Jump(ra);
 }
 
 
 void Builtins::Generate_JSEntryTrampoline(MacroAssembler* masm) {
-  UNIMPLEMENTED_MIPS();
+  Generate_JSEntryTrampolineHelper(masm, false);
 }
 
 
 void Builtins::Generate_JSConstructEntryTrampoline(MacroAssembler* masm) {
-  UNIMPLEMENTED_MIPS();
+  Generate_JSEntryTrampolineHelper(masm, true);
 }
 
 
 void Builtins::Generate_LazyCompile(MacroAssembler* masm) {
-  UNIMPLEMENTED_MIPS();
+  // Enter an internal frame.
+  __ EnterInternalFrame();
+
+  // Preserve the function.
+  __ push(a1);
+  // Push call kind information.
+  __ push(t1);
+
+  // Push the function on the stack as the argument to the runtime function.
+  __ push(a1);
+  // Call the runtime function.
+  __ CallRuntime(Runtime::kLazyCompile, 1);
+  // Calculate the entry point.
+  __ addiu(t9, v0, Code::kHeaderSize - kHeapObjectTag);
+
+  // Restore call kind information.
+  __ pop(t1);
+  // Restore saved function.
+  __ pop(a1);
+
+  // Tear down temporary frame.
+  __ LeaveInternalFrame();
+
+  // Do a tail-call of the compiled function.
+  __ Jump(t9);
 }
 
 
 void Builtins::Generate_LazyRecompile(MacroAssembler* masm) {
-  UNIMPLEMENTED_MIPS();
+  // Enter an internal frame.
+  __ EnterInternalFrame();
+
+  // Preserve the function.
+  __ push(a1);
+  // Push call kind information.
+  __ push(t1);
+
+  // Push the function on the stack as the argument to the runtime function.
+  __ push(a1);
+  __ CallRuntime(Runtime::kLazyRecompile, 1);
+  // Calculate the entry point.
+  __ Addu(t9, v0, Operand(Code::kHeaderSize - kHeapObjectTag));
+
+  // Restore call kind information.
+  __ pop(t1);
+  // Restore saved function.
+  __ pop(a1);
+
+  // Tear down temporary frame.
+  __ LeaveInternalFrame();
+
+  // Do a tail-call of the compiled function.
+  __ Jump(t9);
 }
 
 
+// These functions are called from C++ but cannot be used in live code.
 void Builtins::Generate_NotifyDeoptimized(MacroAssembler* masm) {
-  UNIMPLEMENTED_MIPS();
+  __ Abort("Call to unimplemented function in builtins-mips.cc");
 }
 
 
 void Builtins::Generate_NotifyLazyDeoptimized(MacroAssembler* masm) {
-  UNIMPLEMENTED_MIPS();
+  __ Abort("Call to unimplemented function in builtins-mips.cc");
 }
 
 
 void Builtins::Generate_NotifyOSR(MacroAssembler* masm) {
-  UNIMPLEMENTED_MIPS();
+  __ Abort("Call to unimplemented function in builtins-mips.cc");
 }
 
 
 void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
-  UNIMPLEMENTED_MIPS();
+  __ Abort("Call to unimplemented function in builtins-mips.cc");
 }
 
 
 void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
-  UNIMPLEMENTED_MIPS();
+  // 1. Make sure we have at least one argument.
+  // a0: actual number of arguments
+  { Label done;
+    __ Branch(&done, ne, a0, Operand(zero_reg));
+    __ LoadRoot(t2, Heap::kUndefinedValueRootIndex);
+    __ push(t2);
+    __ Addu(a0, a0, Operand(1));
+    __ bind(&done);
+  }
+
+  // 2. Get the function to call (passed as receiver) from the stack, check
+  //    if it is a function.
+  // a0: actual number of arguments
+  Label non_function;
+  __ sll(at, a0, kPointerSizeLog2);
+  __ addu(at, sp, at);
+  __ lw(a1, MemOperand(at));
+  __ And(at, a1, Operand(kSmiTagMask));
+  __ Branch(&non_function, eq, at, Operand(zero_reg));
+  __ GetObjectType(a1, a2, a2);
+  __ Branch(&non_function, ne, a2, Operand(JS_FUNCTION_TYPE));
+
+  // 3a. Patch the first argument if necessary when calling a function.
+  // a0: actual number of arguments
+  // a1: function
+  Label shift_arguments;
+  { Label convert_to_object, use_global_receiver, patch_receiver;
+    // Change context eagerly in case we need the global receiver.
+    __ lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
+
+    // Do not transform the receiver for strict mode functions.
+    __ lw(a2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
+    __ lw(a3, FieldMemOperand(a2, SharedFunctionInfo::kCompilerHintsOffset));
+    __ And(t0, a3, Operand(1 << (SharedFunctionInfo::kStrictModeFunction +
+                                 kSmiTagSize)));
+    __ Branch(&shift_arguments, ne, t0, Operand(zero_reg));
+
+    // Do not transform the receiver for native (Compilerhints already in a3).
+    __ And(t0, a3, Operand(1 << (SharedFunctionInfo::kES5Native +
+                                 kSmiTagSize)));
+    __ Branch(&shift_arguments, ne, t0, Operand(zero_reg));
+
+    // Compute the receiver in non-strict mode.
+    // Load first argument in a2. a2 = -kPointerSize(sp + n_args << 2).
+    __ sll(at, a0, kPointerSizeLog2);
+    __ addu(a2, sp, at);
+    __ lw(a2, MemOperand(a2, -kPointerSize));
+    // a0: actual number of arguments
+    // a1: function
+    // a2: first argument
+    __ JumpIfSmi(a2, &convert_to_object, t2);
+
+    __ LoadRoot(a3, Heap::kUndefinedValueRootIndex);
+    __ Branch(&use_global_receiver, eq, a2, Operand(a3));
+    __ LoadRoot(a3, Heap::kNullValueRootIndex);
+    __ Branch(&use_global_receiver, eq, a2, Operand(a3));
+
+    STATIC_ASSERT(LAST_JS_OBJECT_TYPE + 1 == LAST_TYPE);
+    STATIC_ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
+    __ GetObjectType(a2, a3, a3);
+    __ Branch(&shift_arguments, ge, a3, Operand(FIRST_JS_OBJECT_TYPE));
+
+    __ bind(&convert_to_object);
+    __ EnterInternalFrame();  // In order to preserve argument count.
+    __ sll(a0, a0, kSmiTagSize);  // Smi tagged.
+    __ push(a0);
+
+    __ push(a2);
+    __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
+    __ mov(a2, v0);
+
+    __ pop(a0);
+    __ sra(a0, a0, kSmiTagSize);  // Un-tag.
+    __ LeaveInternalFrame();
+    // Restore the function to a1.
+    __ sll(at, a0, kPointerSizeLog2);
+    __ addu(at, sp, at);
+    __ lw(a1, MemOperand(at));
+    __ Branch(&patch_receiver);
+
+    // Use the global receiver object from the called function as the
+    // receiver.
+    __ bind(&use_global_receiver);
+    const int kGlobalIndex =
+        Context::kHeaderSize + Context::GLOBAL_INDEX * kPointerSize;
+    __ lw(a2, FieldMemOperand(cp, kGlobalIndex));
+    __ lw(a2, FieldMemOperand(a2, GlobalObject::kGlobalContextOffset));
+    __ lw(a2, FieldMemOperand(a2, kGlobalIndex));
+    __ lw(a2, FieldMemOperand(a2, GlobalObject::kGlobalReceiverOffset));
+
+    __ bind(&patch_receiver);
+    __ sll(at, a0, kPointerSizeLog2);
+    __ addu(a3, sp, at);
+    __ sw(a2, MemOperand(a3, -kPointerSize));
+
+    __ Branch(&shift_arguments);
+  }
+
+  // 3b. Patch the first argument when calling a non-function.  The
+  //     CALL_NON_FUNCTION builtin expects the non-function callee as
+  //     receiver, so overwrite the first argument which will ultimately
+  //     become the receiver.
+  // a0: actual number of arguments
+  // a1: function
+  __ bind(&non_function);
+  // Restore the function in case it has been modified.
+  __ sll(at, a0, kPointerSizeLog2);
+  __ addu(a2, sp, at);
+  __ sw(a1, MemOperand(a2, -kPointerSize));
+  // Clear a1 to indicate a non-function being called.
+  __ mov(a1, zero_reg);
+
+  // 4. Shift arguments and return address one slot down on the stack
+  //    (overwriting the original receiver).  Adjust argument count to make
+  //    the original first argument the new receiver.
+  // a0: actual number of arguments
+  // a1: function
+  __ bind(&shift_arguments);
+  { Label loop;
+    // Calculate the copy start address (destination). Copy end address is sp.
+    __ sll(at, a0, kPointerSizeLog2);
+    __ addu(a2, sp, at);
+
+    __ bind(&loop);
+    __ lw(at, MemOperand(a2, -kPointerSize));
+    __ sw(at, MemOperand(a2));
+    __ Subu(a2, a2, Operand(kPointerSize));
+    __ Branch(&loop, ne, a2, Operand(sp));
+    // Adjust the actual number of arguments and remove the top element
+    // (which is a copy of the last argument).
+    __ Subu(a0, a0, Operand(1));
+    __ Pop();
+  }
+
+  // 5a. Call non-function via tail call to CALL_NON_FUNCTION builtin.
+  // a0: actual number of arguments
+  // a1: function
+  { Label function;
+    __ Branch(&function, ne, a1, Operand(zero_reg));
+    __ mov(a2, zero_reg);  // expected arguments is 0 for CALL_NON_FUNCTION
+    __ GetBuiltinEntry(a3, Builtins::CALL_NON_FUNCTION);
+    __ SetCallKind(t1, CALL_AS_METHOD);
+    __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
+            RelocInfo::CODE_TARGET);
+    __ bind(&function);
+  }
+
+  // 5b. Get the code to call from the function and check that the number of
+  //     expected arguments matches what we're providing.  If so, jump
+  //     (tail-call) to the code in register edx without checking arguments.
+  // a0: actual number of arguments
+  // a1: function
+  __ lw(a3, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
+  __ lw(a2,
+         FieldMemOperand(a3, SharedFunctionInfo::kFormalParameterCountOffset));
+  __ sra(a2, a2, kSmiTagSize);
+  __ lw(a3, FieldMemOperand(a1, JSFunction::kCodeEntryOffset));
+  __ SetCallKind(t1, CALL_AS_METHOD);
+  // Check formal and actual parameter counts.
+  __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
+          RelocInfo::CODE_TARGET, ne, a2, Operand(a0));
+
+  ParameterCount expected(0);
+  __ InvokeCode(a3, expected, expected, JUMP_FUNCTION);
 }
 
 
 void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
-  UNIMPLEMENTED_MIPS();
+  const int kIndexOffset    = -5 * kPointerSize;
+  const int kLimitOffset    = -4 * kPointerSize;
+  const int kArgsOffset     =  2 * kPointerSize;
+  const int kRecvOffset     =  3 * kPointerSize;
+  const int kFunctionOffset =  4 * kPointerSize;
+
+  __ EnterInternalFrame();
+
+  __ lw(a0, MemOperand(fp, kFunctionOffset));  // Get the function.
+  __ push(a0);
+  __ lw(a0, MemOperand(fp, kArgsOffset));  // Get the args array.
+  __ push(a0);
+  // Returns (in v0) number of arguments to copy to stack as Smi.
+  __ InvokeBuiltin(Builtins::APPLY_PREPARE, CALL_FUNCTION);
+
+  // Check the stack for overflow. We are not trying need to catch
+  // interruptions (e.g. debug break and preemption) here, so the "real stack
+  // limit" is checked.
+  Label okay;
+  __ LoadRoot(a2, Heap::kRealStackLimitRootIndex);
+  // Make a2 the space we have left. The stack might already be overflowed
+  // here which will cause a2 to become negative.
+  __ subu(a2, sp, a2);
+  // Check if the arguments will overflow the stack.
+  __ sll(t0, v0, kPointerSizeLog2 - kSmiTagSize);
+  __ Branch(&okay, gt, a2, Operand(t0));  // Signed comparison.
+
+  // Out of stack space.
+  __ lw(a1, MemOperand(fp, kFunctionOffset));
+  __ push(a1);
+  __ push(v0);
+  __ InvokeBuiltin(Builtins::APPLY_OVERFLOW, CALL_FUNCTION);
+  // End of stack check.
+
+  // Push current limit and index.
+  __ bind(&okay);
+  __ push(v0);  // Limit.
+  __ mov(a1, zero_reg);  // Initial index.
+  __ push(a1);
+
+  // Change context eagerly to get the right global object if necessary.
+  __ lw(a0, MemOperand(fp, kFunctionOffset));
+  __ lw(cp, FieldMemOperand(a0, JSFunction::kContextOffset));
+  // Load the shared function info while the function is still in a0.
+  __ lw(a1, FieldMemOperand(a0, JSFunction::kSharedFunctionInfoOffset));
+
+  // Compute the receiver.
+  Label call_to_object, use_global_receiver, push_receiver;
+  __ lw(a0, MemOperand(fp, kRecvOffset));
+
+  // Do not transform the receiver for strict mode functions.
+  __ lw(a2, FieldMemOperand(a1, SharedFunctionInfo::kCompilerHintsOffset));
+  __ And(t0, a2, Operand(1 << (SharedFunctionInfo::kStrictModeFunction +
+                               kSmiTagSize)));
+  __ Branch(&push_receiver, ne, t0, Operand(zero_reg));
+
+  // Do not transform the receiver for native (Compilerhints already in a2).
+  __ And(t0, a2, Operand(1 << (SharedFunctionInfo::kES5Native +
+                               kSmiTagSize)));
+  __ Branch(&push_receiver, ne, t0, Operand(zero_reg));
+
+  // Compute the receiver in non-strict mode.
+  __ And(t0, a0, Operand(kSmiTagMask));
+  __ Branch(&call_to_object, eq, t0, Operand(zero_reg));
+  __ LoadRoot(a1, Heap::kNullValueRootIndex);
+  __ Branch(&use_global_receiver, eq, a0, Operand(a1));
+  __ LoadRoot(a2, Heap::kUndefinedValueRootIndex);
+  __ Branch(&use_global_receiver, eq, a0, Operand(a2));
+
+  // Check if the receiver is already a JavaScript object.
+  // a0: receiver
+  STATIC_ASSERT(LAST_JS_OBJECT_TYPE + 1 == LAST_TYPE);
+  STATIC_ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
+  __ GetObjectType(a0, a1, a1);
+  __ Branch(&push_receiver, ge, a1, Operand(FIRST_JS_OBJECT_TYPE));
+
+  // Convert the receiver to a regular object.
+  // a0: receiver
+  __ bind(&call_to_object);
+  __ push(a0);
+  __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
+  __ mov(a0, v0);  // Put object in a0 to match other paths to push_receiver.
+  __ Branch(&push_receiver);
+
+  // Use the current global receiver object as the receiver.
+  __ bind(&use_global_receiver);
+  const int kGlobalOffset =
+      Context::kHeaderSize + Context::GLOBAL_INDEX * kPointerSize;
+  __ lw(a0, FieldMemOperand(cp, kGlobalOffset));
+  __ lw(a0, FieldMemOperand(a0, GlobalObject::kGlobalContextOffset));
+  __ lw(a0, FieldMemOperand(a0, kGlobalOffset));
+  __ lw(a0, FieldMemOperand(a0, GlobalObject::kGlobalReceiverOffset));
+
+  // Push the receiver.
+  // a0: receiver
+  __ bind(&push_receiver);
+  __ push(a0);
+
+  // Copy all arguments from the array to the stack.
+  Label entry, loop;
+  __ lw(a0, MemOperand(fp, kIndexOffset));
+  __ Branch(&entry);
+
+  // Load the current argument from the arguments array and push it to the
+  // stack.
+  // a0: current argument index
+  __ bind(&loop);
+  __ lw(a1, MemOperand(fp, kArgsOffset));
+  __ push(a1);
+  __ push(a0);
+
+  // Call the runtime to access the property in the arguments array.
+  __ CallRuntime(Runtime::kGetProperty, 2);
+  __ push(v0);
+
+  // Use inline caching to access the arguments.
+  __ lw(a0, MemOperand(fp, kIndexOffset));
+  __ Addu(a0, a0, Operand(1 << kSmiTagSize));
+  __ sw(a0, MemOperand(fp, kIndexOffset));
+
+  // Test if the copy loop has finished copying all the elements from the
+  // arguments object.
+  __ bind(&entry);
+  __ lw(a1, MemOperand(fp, kLimitOffset));
+  __ Branch(&loop, ne, a0, Operand(a1));
+  // Invoke the function.
+  ParameterCount actual(a0);
+  __ sra(a0, a0, kSmiTagSize);
+  __ lw(a1, MemOperand(fp, kFunctionOffset));
+  __ InvokeFunction(a1, actual, CALL_FUNCTION);
+
+  // Tear down the internal frame and remove function, receiver and args.
+  __ LeaveInternalFrame();
+  __ Addu(sp, sp, Operand(3 * kPointerSize));
+  __ Ret();
+}
+
+
+static void EnterArgumentsAdaptorFrame(MacroAssembler* masm) {
+  __ sll(a0, a0, kSmiTagSize);
+  __ li(t0, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+  __ MultiPush(a0.bit() | a1.bit() | t0.bit() | fp.bit() | ra.bit());
+  __ Addu(fp, sp, Operand(3 * kPointerSize));
+}
+
+
+static void LeaveArgumentsAdaptorFrame(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- v0 : result being passed through
+  // -----------------------------------
+  // Get the number of arguments passed (as a smi), tear down the frame and
+  // then tear down the parameters.
+  __ lw(a1, MemOperand(fp, -3 * kPointerSize));
+  __ mov(sp, fp);
+  __ MultiPop(fp.bit() | ra.bit());
+  __ sll(t0, a1, kPointerSizeLog2 - kSmiTagSize);
+  __ Addu(sp, sp, t0);
+  // Adjust for the receiver.
+  __ Addu(sp, sp, Operand(kPointerSize));
 }
 
 
 void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
-  UNIMPLEMENTED_MIPS();
+  // State setup as expected by MacroAssembler::InvokePrologue.
+  // ----------- S t a t e -------------
+  //  -- a0: actual arguments count
+  //  -- a1: function (passed through to callee)
+  //  -- a2: expected arguments count
+  //  -- a3: callee code entry
+  //  -- t1: call kind information
+  // -----------------------------------
+
+  Label invoke, dont_adapt_arguments;
+
+  Label enough, too_few;
+  __ Branch(&dont_adapt_arguments, eq,
+      a2, Operand(SharedFunctionInfo::kDontAdaptArgumentsSentinel));
+  // We use Uless as the number of argument should always be greater than 0.
+  __ Branch(&too_few, Uless, a0, Operand(a2));
+
+  {  // Enough parameters: actual >= expected.
+    // a0: actual number of arguments as a smi
+    // a1: function
+    // a2: expected number of arguments
+    // a3: code entry to call
+    __ bind(&enough);
+    EnterArgumentsAdaptorFrame(masm);
+
+    // Calculate copy start address into a0 and copy end address into a2.
+    __ sll(a0, a0, kPointerSizeLog2 - kSmiTagSize);
+    __ Addu(a0, fp, a0);
+    // Adjust for return address and receiver.
+    __ Addu(a0, a0, Operand(2 * kPointerSize));
+    // Compute copy end address.
+    __ sll(a2, a2, kPointerSizeLog2);
+    __ subu(a2, a0, a2);
+
+    // Copy the arguments (including the receiver) to the new stack frame.
+    // a0: copy start address
+    // a1: function
+    // a2: copy end address
+    // a3: code entry to call
+
+    Label copy;
+    __ bind(&copy);
+    __ lw(t0, MemOperand(a0));
+    __ push(t0);
+    __ Branch(USE_DELAY_SLOT, &copy, ne, a0, Operand(a2));
+    __ addiu(a0, a0, -kPointerSize);  // In delay slot.
+
+    __ jmp(&invoke);
+  }
+
+  {  // Too few parameters: Actual < expected.
+    __ bind(&too_few);
+    EnterArgumentsAdaptorFrame(masm);
+
+    // TODO(MIPS): Optimize these loops.
+
+    // Calculate copy start address into a0 and copy end address is fp.
+    // a0: actual number of arguments as a smi
+    // a1: function
+    // a2: expected number of arguments
+    // a3: code entry to call
+    __ sll(a0, a0, kPointerSizeLog2 - kSmiTagSize);
+    __ Addu(a0, fp, a0);
+    // Adjust for return address and receiver.
+    __ Addu(a0, a0, Operand(2 * kPointerSize));
+    // Compute copy end address. Also adjust for return address.
+    __ Addu(t1, fp, kPointerSize);
+
+    // Copy the arguments (including the receiver) to the new stack frame.
+    // a0: copy start address
+    // a1: function
+    // a2: expected number of arguments
+    // a3: code entry to call
+    // t1: copy end address
+    Label copy;
+    __ bind(&copy);
+    __ lw(t0, MemOperand(a0));  // Adjusted above for return addr and receiver.
+    __ push(t0);
+    __ Subu(a0, a0, kPointerSize);
+    __ Branch(&copy, ne, a0, Operand(t1));
+
+    // Fill the remaining expected arguments with undefined.
+    // a1: function
+    // a2: expected number of arguments
+    // a3: code entry to call
+    __ LoadRoot(t0, Heap::kUndefinedValueRootIndex);
+    __ sll(t2, a2, kPointerSizeLog2);
+    __ Subu(a2, fp, Operand(t2));
+    __ Addu(a2, a2, Operand(-4 * kPointerSize));  // Adjust for frame.
+
+    Label fill;
+    __ bind(&fill);
+    __ push(t0);
+    __ Branch(&fill, ne, sp, Operand(a2));
+  }
+
+  // Call the entry point.
+  __ bind(&invoke);
+
+  __ Call(a3);
+
+  // Exit frame and return.
+  LeaveArgumentsAdaptorFrame(masm);
+  __ Ret();
+
+
+  // -------------------------------------------
+  // Don't adapt arguments.
+  // -------------------------------------------
+  __ bind(&dont_adapt_arguments);
+  __ Jump(a3);
 }
 
 
diff --git a/src/mips/code-stubs-mips.cc b/src/mips/code-stubs-mips.cc
index 6cc272c..c999994 100644
--- a/src/mips/code-stubs-mips.cc
+++ b/src/mips/code-stubs-mips.cc
@@ -31,7 +31,7 @@
 
 #include "bootstrapper.h"
 #include "code-stubs.h"
-#include "codegen-inl.h"
+#include "codegen.h"
 #include "regexp-macro-assembler.h"
 
 namespace v8 {
@@ -40,24 +40,233 @@
 
 #define __ ACCESS_MASM(masm)
 
+static void EmitIdenticalObjectComparison(MacroAssembler* masm,
+                                          Label* slow,
+                                          Condition cc,
+                                          bool never_nan_nan);
+static void EmitSmiNonsmiComparison(MacroAssembler* masm,
+                                    Register lhs,
+                                    Register rhs,
+                                    Label* rhs_not_nan,
+                                    Label* slow,
+                                    bool strict);
+static void EmitTwoNonNanDoubleComparison(MacroAssembler* masm, Condition cc);
+static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm,
+                                           Register lhs,
+                                           Register rhs);
+
+
+// Check if the operand is a heap number.
+static void EmitCheckForHeapNumber(MacroAssembler* masm, Register operand,
+                                   Register scratch1, Register scratch2,
+                                   Label* not_a_heap_number) {
+  __ lw(scratch1, FieldMemOperand(operand, HeapObject::kMapOffset));
+  __ LoadRoot(scratch2, Heap::kHeapNumberMapRootIndex);
+  __ Branch(not_a_heap_number, ne, scratch1, Operand(scratch2));
+}
+
 
 void ToNumberStub::Generate(MacroAssembler* masm) {
-  UNIMPLEMENTED_MIPS();
+  // The ToNumber stub takes one argument in a0.
+  Label check_heap_number, call_builtin;
+  __ JumpIfNotSmi(a0, &check_heap_number);
+  __ mov(v0, a0);
+  __ Ret();
+
+  __ bind(&check_heap_number);
+  EmitCheckForHeapNumber(masm, a0, a1, t0, &call_builtin);
+  __ mov(v0, a0);
+  __ Ret();
+
+  __ bind(&call_builtin);
+  __ push(a0);
+  __ InvokeBuiltin(Builtins::TO_NUMBER, JUMP_FUNCTION);
 }
 
 
 void FastNewClosureStub::Generate(MacroAssembler* masm) {
-  UNIMPLEMENTED_MIPS();
+  // Create a new closure from the given function info in new
+  // space. Set the context to the current context in cp.
+  Label gc;
+
+  // Pop the function info from the stack.
+  __ pop(a3);
+
+  // Attempt to allocate new JSFunction in new space.
+  __ AllocateInNewSpace(JSFunction::kSize,
+                        v0,
+                        a1,
+                        a2,
+                        &gc,
+                        TAG_OBJECT);
+
+  int map_index = strict_mode_ == kStrictMode
+      ? Context::STRICT_MODE_FUNCTION_MAP_INDEX
+      : Context::FUNCTION_MAP_INDEX;
+
+  // Compute the function map in the current global context and set that
+  // as the map of the allocated object.
+  __ lw(a2, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
+  __ lw(a2, FieldMemOperand(a2, GlobalObject::kGlobalContextOffset));
+  __ lw(a2, MemOperand(a2, Context::SlotOffset(map_index)));
+  __ sw(a2, FieldMemOperand(v0, HeapObject::kMapOffset));
+
+  // Initialize the rest of the function. We don't have to update the
+  // write barrier because the allocated object is in new space.
+  __ LoadRoot(a1, Heap::kEmptyFixedArrayRootIndex);
+  __ LoadRoot(a2, Heap::kTheHoleValueRootIndex);
+  __ LoadRoot(t0, Heap::kUndefinedValueRootIndex);
+  __ sw(a1, FieldMemOperand(v0, JSObject::kPropertiesOffset));
+  __ sw(a1, FieldMemOperand(v0, JSObject::kElementsOffset));
+  __ sw(a2, FieldMemOperand(v0, JSFunction::kPrototypeOrInitialMapOffset));
+  __ sw(a3, FieldMemOperand(v0, JSFunction::kSharedFunctionInfoOffset));
+  __ sw(cp, FieldMemOperand(v0, JSFunction::kContextOffset));
+  __ sw(a1, FieldMemOperand(v0, JSFunction::kLiteralsOffset));
+  __ sw(t0, FieldMemOperand(v0, JSFunction::kNextFunctionLinkOffset));
+
+  // Initialize the code pointer in the function to be the one
+  // found in the shared function info object.
+  __ lw(a3, FieldMemOperand(a3, SharedFunctionInfo::kCodeOffset));
+  __ Addu(a3, a3, Operand(Code::kHeaderSize - kHeapObjectTag));
+  __ sw(a3, FieldMemOperand(v0, JSFunction::kCodeEntryOffset));
+
+  // Return result. The argument function info has been popped already.
+  __ Ret();
+
+  // Create a new closure through the slower runtime call.
+  __ bind(&gc);
+  __ LoadRoot(t0, Heap::kFalseValueRootIndex);
+  __ Push(cp, a3, t0);
+  __ TailCallRuntime(Runtime::kNewClosure, 3, 1);
 }
 
 
 void FastNewContextStub::Generate(MacroAssembler* masm) {
-  UNIMPLEMENTED_MIPS();
+  // Try to allocate the context in new space.
+  Label gc;
+  int length = slots_ + Context::MIN_CONTEXT_SLOTS;
+
+  // Attempt to allocate the context in new space.
+  __ AllocateInNewSpace(FixedArray::SizeFor(length),
+                        v0,
+                        a1,
+                        a2,
+                        &gc,
+                        TAG_OBJECT);
+
+  // Load the function from the stack.
+  __ lw(a3, MemOperand(sp, 0));
+
+  // Setup the object header.
+  __ LoadRoot(a2, Heap::kContextMapRootIndex);
+  __ sw(a2, FieldMemOperand(v0, HeapObject::kMapOffset));
+  __ li(a2, Operand(Smi::FromInt(length)));
+  __ sw(a2, FieldMemOperand(v0, FixedArray::kLengthOffset));
+
+  // Setup the fixed slots.
+  __ li(a1, Operand(Smi::FromInt(0)));
+  __ sw(a3, MemOperand(v0, Context::SlotOffset(Context::CLOSURE_INDEX)));
+  __ sw(v0, MemOperand(v0, Context::SlotOffset(Context::FCONTEXT_INDEX)));
+  __ sw(a1, MemOperand(v0, Context::SlotOffset(Context::PREVIOUS_INDEX)));
+  __ sw(a1, MemOperand(v0, Context::SlotOffset(Context::EXTENSION_INDEX)));
+
+  // Copy the global object from the surrounding context.
+  __ lw(a1, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
+  __ sw(a1, MemOperand(v0, Context::SlotOffset(Context::GLOBAL_INDEX)));
+
+  // Initialize the rest of the slots to undefined.
+  __ LoadRoot(a1, Heap::kUndefinedValueRootIndex);
+  for (int i = Context::MIN_CONTEXT_SLOTS; i < length; i++) {
+    __ sw(a1, MemOperand(v0, Context::SlotOffset(i)));
+  }
+
+  // Remove the on-stack argument and return.
+  __ mov(cp, v0);
+  __ Pop();
+  __ Ret();
+
+  // Need to collect. Call into runtime system.
+  __ bind(&gc);
+  __ TailCallRuntime(Runtime::kNewContext, 1, 1);
 }
 
 
 void FastCloneShallowArrayStub::Generate(MacroAssembler* masm) {
-  UNIMPLEMENTED_MIPS();
+  // Stack layout on entry:
+  // [sp]: constant elements.
+  // [sp + kPointerSize]: literal index.
+  // [sp + (2 * kPointerSize)]: literals array.
+
+  // All sizes here are multiples of kPointerSize.
+  int elements_size = (length_ > 0) ? FixedArray::SizeFor(length_) : 0;
+  int size = JSArray::kSize + elements_size;
+
+  // Load boilerplate object into r3 and check if we need to create a
+  // boilerplate.
+  Label slow_case;
+  __ lw(a3, MemOperand(sp, 2 * kPointerSize));
+  __ lw(a0, MemOperand(sp, 1 * kPointerSize));
+  __ Addu(a3, a3, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+  __ sll(t0, a0, kPointerSizeLog2 - kSmiTagSize);
+  __ Addu(t0, a3, t0);
+  __ lw(a3, MemOperand(t0));
+  __ LoadRoot(t1, Heap::kUndefinedValueRootIndex);
+  __ Branch(&slow_case, eq, a3, Operand(t1));
+
+  if (FLAG_debug_code) {
+    const char* message;
+    Heap::RootListIndex expected_map_index;
+    if (mode_ == CLONE_ELEMENTS) {
+      message = "Expected (writable) fixed array";
+      expected_map_index = Heap::kFixedArrayMapRootIndex;
+    } else {
+      ASSERT(mode_ == COPY_ON_WRITE_ELEMENTS);
+      message = "Expected copy-on-write fixed array";
+      expected_map_index = Heap::kFixedCOWArrayMapRootIndex;
+    }
+    __ push(a3);
+    __ lw(a3, FieldMemOperand(a3, JSArray::kElementsOffset));
+    __ lw(a3, FieldMemOperand(a3, HeapObject::kMapOffset));
+    __ LoadRoot(at, expected_map_index);
+    __ Assert(eq, message, a3, Operand(at));
+    __ pop(a3);
+  }
+
+  // Allocate both the JS array and the elements array in one big
+  // allocation. This avoids multiple limit checks.
+  // Return new object in v0.
+  __ AllocateInNewSpace(size,
+                        v0,
+                        a1,
+                        a2,
+                        &slow_case,
+                        TAG_OBJECT);
+
+  // Copy the JS array part.
+  for (int i = 0; i < JSArray::kSize; i += kPointerSize) {
+    if ((i != JSArray::kElementsOffset) || (length_ == 0)) {
+      __ lw(a1, FieldMemOperand(a3, i));
+      __ sw(a1, FieldMemOperand(v0, i));
+    }
+  }
+
+  if (length_ > 0) {
+    // Get hold of the elements array of the boilerplate and setup the
+    // elements pointer in the resulting object.
+    __ lw(a3, FieldMemOperand(a3, JSArray::kElementsOffset));
+    __ Addu(a2, v0, Operand(JSArray::kSize));
+    __ sw(a2, FieldMemOperand(v0, JSArray::kElementsOffset));
+
+    // Copy the elements array.
+    __ CopyFields(a2, a3, a1.bit(), elements_size / kPointerSize);
+  }
+
+  // Return and remove the on-stack parameters.
+  __ Addu(sp, sp, Operand(3 * kPointerSize));
+  __ Ret();
+
+  __ bind(&slow_case);
+  __ TailCallRuntime(Runtime::kCreateArrayLiteralShallow, 3, 1);
 }
 
 
@@ -107,72 +316,94 @@
 
 
 void ConvertToDoubleStub::Generate(MacroAssembler* masm) {
-  UNIMPLEMENTED_MIPS();
+#ifndef BIG_ENDIAN_FLOATING_POINT
+  Register exponent = result1_;
+  Register mantissa = result2_;
+#else
+  Register exponent = result2_;
+  Register mantissa = result1_;
+#endif
+  Label not_special;
+  // Convert from Smi to integer.
+  __ sra(source_, source_, kSmiTagSize);
+  // Move sign bit from source to destination.  This works because the sign bit
+  // in the exponent word of the double has the same position and polarity as
+  // the 2's complement sign bit in a Smi.
+  STATIC_ASSERT(HeapNumber::kSignMask == 0x80000000u);
+  __ And(exponent, source_, Operand(HeapNumber::kSignMask));
+  // Subtract from 0 if source was negative.
+  __ subu(at, zero_reg, source_);
+  __ movn(source_, at, exponent);
+
+  // We have -1, 0 or 1, which we treat specially. Register source_ contains
+  // absolute value: it is either equal to 1 (special case of -1 and 1),
+  // greater than 1 (not a special case) or less than 1 (special case of 0).
+  __ Branch(&not_special, gt, source_, Operand(1));
+
+  // For 1 or -1 we need to or in the 0 exponent (biased to 1023).
+  static const uint32_t exponent_word_for_1 =
+      HeapNumber::kExponentBias << HeapNumber::kExponentShift;
+  // Safe to use 'at' as dest reg here.
+  __ Or(at, exponent, Operand(exponent_word_for_1));
+  __ movn(exponent, at, source_);  // Write exp when source not 0.
+  // 1, 0 and -1 all have 0 for the second word.
+  __ mov(mantissa, zero_reg);
+  __ Ret();
+
+  __ bind(&not_special);
+  // Count leading zeros.
+  // Gets the wrong answer for 0, but we already checked for that case above.
+  __ clz(zeros_, source_);
+  // Compute exponent and or it into the exponent register.
+  // We use mantissa as a scratch register here.
+  __ li(mantissa, Operand(31 + HeapNumber::kExponentBias));
+  __ subu(mantissa, mantissa, zeros_);
+  __ sll(mantissa, mantissa, HeapNumber::kExponentShift);
+  __ Or(exponent, exponent, mantissa);
+
+  // Shift up the source chopping the top bit off.
+  __ Addu(zeros_, zeros_, Operand(1));
+  // This wouldn't work for 1.0 or -1.0 as the shift would be 32 which means 0.
+  __ sllv(source_, source_, zeros_);
+  // Compute lower part of fraction (last 12 bits).
+  __ sll(mantissa, source_, HeapNumber::kMantissaBitsInTopWord);
+  // And the top (top 20 bits).
+  __ srl(source_, source_, 32 - HeapNumber::kMantissaBitsInTopWord);
+  __ or_(exponent, exponent, source_);
+
+  __ Ret();
 }
 
 
-class FloatingPointHelper : public AllStatic {
- public:
-
-  enum Destination {
-    kFPURegisters,
-    kCoreRegisters
-  };
-
-
-  // Loads smis from a0 and a1 (right and left in binary operations) into
-  // floating point registers. Depending on the destination the values ends up
-  // either f14 and f12 or in a2/a3 and a0/a1 respectively. If the destination
-  // is floating point registers FPU must be supported. If core registers are
-  // requested when FPU is supported f12 and f14 will be scratched.
-  static void LoadSmis(MacroAssembler* masm,
-                       Destination destination,
-                       Register scratch1,
-                       Register scratch2);
-
-  // Loads objects from a0 and a1 (right and left in binary operations) into
-  // floating point registers. Depending on the destination the values ends up
-  // either f14 and f12 or in a2/a3 and a0/a1 respectively. If the destination
-  // is floating point registers FPU must be supported. If core registers are
-  // requested when FPU is supported f12 and f14 will still be scratched. If
-  // either a0 or a1 is not a number (not smi and not heap number object) the
-  // not_number label is jumped to with a0 and a1 intact.
-  static void LoadOperands(MacroAssembler* masm,
-                           FloatingPointHelper::Destination destination,
-                           Register heap_number_map,
-                           Register scratch1,
-                           Register scratch2,
-                           Label* not_number);
-  // Loads the number from object into dst as a 32-bit integer if possible. If
-  // the object is not a 32-bit integer control continues at the label
-  // not_int32. If FPU is supported double_scratch is used but not scratch2.
-  static void LoadNumberAsInteger(MacroAssembler* masm,
-                                  Register object,
-                                  Register dst,
-                                  Register heap_number_map,
-                                  Register scratch1,
-                                  Register scratch2,
-                                  FPURegister double_scratch,
-                                  Label* not_int32);
- private:
-  static void LoadNumber(MacroAssembler* masm,
-                         FloatingPointHelper::Destination destination,
-                         Register object,
-                         FPURegister dst,
-                         Register dst1,
-                         Register dst2,
-                         Register heap_number_map,
-                         Register scratch1,
-                         Register scratch2,
-                         Label* not_number);
-};
-
-
 void FloatingPointHelper::LoadSmis(MacroAssembler* masm,
                                    FloatingPointHelper::Destination destination,
                                    Register scratch1,
                                    Register scratch2) {
-  UNIMPLEMENTED_MIPS();
+  if (CpuFeatures::IsSupported(FPU)) {
+    CpuFeatures::Scope scope(FPU);
+    __ sra(scratch1, a0, kSmiTagSize);
+    __ mtc1(scratch1, f14);
+    __ cvt_d_w(f14, f14);
+    __ sra(scratch1, a1, kSmiTagSize);
+    __ mtc1(scratch1, f12);
+    __ cvt_d_w(f12, f12);
+    if (destination == kCoreRegisters) {
+      __ Move(a2, a3, f14);
+      __ Move(a0, a1, f12);
+    }
+  } else {
+    ASSERT(destination == kCoreRegisters);
+    // Write Smi from a0 to a3 and a2 in double format.
+    __ mov(scratch1, a0);
+    ConvertToDoubleStub stub1(a3, a2, scratch1, scratch2);
+    __ push(ra);
+    __ Call(stub1.GetCode(), RelocInfo::CODE_TARGET);
+    // Write Smi from a1 to a1 and a0 in double format.
+    __ mov(scratch1, a1);
+    ConvertToDoubleStub stub2(a1, a0, scratch1, scratch2);
+    __ Call(stub2.GetCode(), RelocInfo::CODE_TARGET);
+    __ pop(ra);
+  }
 }
 
 
@@ -183,7 +414,14 @@
     Register scratch1,
     Register scratch2,
     Label* slow) {
-  UNIMPLEMENTED_MIPS();
+
+  // Load right operand (a0) to f12 or a2/a3.
+  LoadNumber(masm, destination,
+             a0, f14, a2, a3, heap_number_map, scratch1, scratch2, slow);
+
+  // Load left operand (a1) to f14 or a0/a1.
+  LoadNumber(masm, destination,
+             a1, f12, a0, a1, heap_number_map, scratch1, scratch2, slow);
 }
 
 
@@ -197,30 +435,991 @@
                                      Register scratch1,
                                      Register scratch2,
                                      Label* not_number) {
-  UNIMPLEMENTED_MIPS();
+  if (FLAG_debug_code) {
+    __ AbortIfNotRootValue(heap_number_map,
+                           Heap::kHeapNumberMapRootIndex,
+                           "HeapNumberMap register clobbered.");
+  }
+
+  Label is_smi, done;
+
+  __ JumpIfSmi(object, &is_smi);
+  __ JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_number);
+
+  // Handle loading a double from a heap number.
+  if (CpuFeatures::IsSupported(FPU) &&
+      destination == kFPURegisters) {
+    CpuFeatures::Scope scope(FPU);
+    // Load the double from tagged HeapNumber to double register.
+
+    // ARM uses a workaround here because of the unaligned HeapNumber
+    // kValueOffset. On MIPS this workaround is built into ldc1 so there's no
+    // point in generating even more instructions.
+    __ ldc1(dst, FieldMemOperand(object, HeapNumber::kValueOffset));
+  } else {
+    ASSERT(destination == kCoreRegisters);
+    // Load the double from heap number to dst1 and dst2 in double format.
+    __ lw(dst1, FieldMemOperand(object, HeapNumber::kValueOffset));
+    __ lw(dst2, FieldMemOperand(object,
+        HeapNumber::kValueOffset + kPointerSize));
+  }
+  __ Branch(&done);
+
+  // Handle loading a double from a smi.
+  __ bind(&is_smi);
+  if (CpuFeatures::IsSupported(FPU)) {
+    CpuFeatures::Scope scope(FPU);
+    // Convert smi to double using FPU instructions.
+    __ SmiUntag(scratch1, object);
+    __ mtc1(scratch1, dst);
+    __ cvt_d_w(dst, dst);
+    if (destination == kCoreRegisters) {
+      // Load the converted smi to dst1 and dst2 in double format.
+      __ Move(dst1, dst2, dst);
+    }
+  } else {
+    ASSERT(destination == kCoreRegisters);
+    // Write smi to dst1 and dst2 double format.
+    __ mov(scratch1, object);
+    ConvertToDoubleStub stub(dst2, dst1, scratch1, scratch2);
+    __ push(ra);
+    __ Call(stub.GetCode(), RelocInfo::CODE_TARGET);
+    __ pop(ra);
+  }
+
+  __ bind(&done);
 }
 
 
-void FloatingPointHelper::LoadNumberAsInteger(MacroAssembler* masm,
-                                              Register object,
-                                              Register dst,
-                                              Register heap_number_map,
-                                              Register scratch1,
-                                              Register scratch2,
-                                              FPURegister double_scratch,
-                                              Label* not_int32) {
-  UNIMPLEMENTED_MIPS();
+void FloatingPointHelper::ConvertNumberToInt32(MacroAssembler* masm,
+                                               Register object,
+                                               Register dst,
+                                               Register heap_number_map,
+                                               Register scratch1,
+                                               Register scratch2,
+                                               Register scratch3,
+                                               FPURegister double_scratch,
+                                               Label* not_number) {
+  if (FLAG_debug_code) {
+    __ AbortIfNotRootValue(heap_number_map,
+                           Heap::kHeapNumberMapRootIndex,
+                           "HeapNumberMap register clobbered.");
+  }
+  Label is_smi;
+  Label done;
+  Label not_in_int32_range;
+
+  __ JumpIfSmi(object, &is_smi);
+  __ lw(scratch1, FieldMemOperand(object, HeapNumber::kMapOffset));
+  __ Branch(not_number, ne, scratch1, Operand(heap_number_map));
+  __ ConvertToInt32(object,
+                    dst,
+                    scratch1,
+                    scratch2,
+                    double_scratch,
+                    &not_in_int32_range);
+  __ jmp(&done);
+
+  __ bind(&not_in_int32_range);
+  __ lw(scratch1, FieldMemOperand(object, HeapNumber::kExponentOffset));
+  __ lw(scratch2, FieldMemOperand(object, HeapNumber::kMantissaOffset));
+
+  __ EmitOutOfInt32RangeTruncate(dst,
+                                 scratch1,
+                                 scratch2,
+                                 scratch3);
+
+  __ jmp(&done);
+
+  __ bind(&is_smi);
+  __ SmiUntag(dst, object);
+  __ bind(&done);
+}
+
+
+void FloatingPointHelper::ConvertIntToDouble(MacroAssembler* masm,
+                                             Register int_scratch,
+                                             Destination destination,
+                                             FPURegister double_dst,
+                                             Register dst1,
+                                             Register dst2,
+                                             Register scratch2,
+                                             FPURegister single_scratch) {
+  ASSERT(!int_scratch.is(scratch2));
+  ASSERT(!int_scratch.is(dst1));
+  ASSERT(!int_scratch.is(dst2));
+
+  Label done;
+
+  if (CpuFeatures::IsSupported(FPU)) {
+    CpuFeatures::Scope scope(FPU);
+    __ mtc1(int_scratch, single_scratch);
+    __ cvt_d_w(double_dst, single_scratch);
+    if (destination == kCoreRegisters) {
+      __ Move(dst1, dst2, double_dst);
+    }
+  } else {
+    Label fewer_than_20_useful_bits;
+    // Expected output:
+    // |         dst2            |         dst1            |
+    // | s |   exp   |              mantissa               |
+
+    // Check for zero.
+    __ mov(dst2, int_scratch);
+    __ mov(dst1, int_scratch);
+    __ Branch(&done, eq, int_scratch, Operand(zero_reg));
+
+    // Preload the sign of the value.
+    __ And(dst2, int_scratch, Operand(HeapNumber::kSignMask));
+    // Get the absolute value of the object (as an unsigned integer).
+    Label skip_sub;
+    __ Branch(&skip_sub, ge, dst2, Operand(zero_reg));
+    __ Subu(int_scratch, zero_reg, int_scratch);
+    __ bind(&skip_sub);
+
+    // Get mantisssa[51:20].
+
+    // Get the position of the first set bit.
+    __ clz(dst1, int_scratch);
+    __ li(scratch2, 31);
+    __ Subu(dst1, scratch2, dst1);
+
+    // Set the exponent.
+    __ Addu(scratch2, dst1, Operand(HeapNumber::kExponentBias));
+    __ Ins(dst2, scratch2,
+        HeapNumber::kExponentShift, HeapNumber::kExponentBits);
+
+    // Clear the first non null bit.
+    __ li(scratch2, Operand(1));
+    __ sllv(scratch2, scratch2, dst1);
+    __ li(at, -1);
+    __ Xor(scratch2, scratch2, at);
+    __ And(int_scratch, int_scratch, scratch2);
+
+    // Get the number of bits to set in the lower part of the mantissa.
+    __ Subu(scratch2, dst1, Operand(HeapNumber::kMantissaBitsInTopWord));
+    __ Branch(&fewer_than_20_useful_bits, lt, scratch2, Operand(zero_reg));
+    // Set the higher 20 bits of the mantissa.
+    __ srlv(at, int_scratch, scratch2);
+    __ or_(dst2, dst2, at);
+    __ li(at, 32);
+    __ subu(scratch2, at, scratch2);
+    __ sllv(dst1, int_scratch, scratch2);
+    __ Branch(&done);
+
+    __ bind(&fewer_than_20_useful_bits);
+    __ li(at, HeapNumber::kMantissaBitsInTopWord);
+    __ subu(scratch2, at, dst1);
+    __ sllv(scratch2, int_scratch, scratch2);
+    __ Or(dst2, dst2, scratch2);
+    // Set dst1 to 0.
+    __ mov(dst1, zero_reg);
+  }
+  __ bind(&done);
+}
+
+
+void FloatingPointHelper::LoadNumberAsInt32Double(MacroAssembler* masm,
+                                                  Register object,
+                                                  Destination destination,
+                                                  FPURegister double_dst,
+                                                  Register dst1,
+                                                  Register dst2,
+                                                  Register heap_number_map,
+                                                  Register scratch1,
+                                                  Register scratch2,
+                                                  FPURegister single_scratch,
+                                                  Label* not_int32) {
+  ASSERT(!scratch1.is(object) && !scratch2.is(object));
+  ASSERT(!scratch1.is(scratch2));
+  ASSERT(!heap_number_map.is(object) &&
+         !heap_number_map.is(scratch1) &&
+         !heap_number_map.is(scratch2));
+
+  Label done, obj_is_not_smi;
+
+  __ JumpIfNotSmi(object, &obj_is_not_smi);
+  __ SmiUntag(scratch1, object);
+  ConvertIntToDouble(masm, scratch1, destination, double_dst, dst1, dst2,
+                     scratch2, single_scratch);
+  __ Branch(&done);
+
+  __ bind(&obj_is_not_smi);
+  if (FLAG_debug_code) {
+    __ AbortIfNotRootValue(heap_number_map,
+                           Heap::kHeapNumberMapRootIndex,
+                           "HeapNumberMap register clobbered.");
+  }
+  __ JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_int32);
+
+  // Load the number.
+  if (CpuFeatures::IsSupported(FPU)) {
+    CpuFeatures::Scope scope(FPU);
+    // Load the double value.
+    __ ldc1(double_dst, FieldMemOperand(object, HeapNumber::kValueOffset));
+
+    // NOTE: ARM uses a MacroAssembler function here (EmitVFPTruncate).
+    // On MIPS a lot of things cannot be implemented the same way so right
+    // now it makes a lot more sense to just do things manually.
+
+    // Save FCSR.
+    __ cfc1(scratch1, FCSR);
+    // Disable FPU exceptions.
+    __ ctc1(zero_reg, FCSR);
+    __ trunc_w_d(single_scratch, double_dst);
+    // Retrieve FCSR.
+    __ cfc1(scratch2, FCSR);
+    // Restore FCSR.
+    __ ctc1(scratch1, FCSR);
+
+    // Check for inexact conversion.
+    __ srl(scratch2, scratch2, kFCSRFlagShift);
+    __ And(scratch2, scratch2, (kFCSRFlagMask | kFCSRInexactFlagBit));
+
+    // Jump to not_int32 if the operation did not succeed.
+    __ Branch(not_int32, ne, scratch2, Operand(zero_reg));
+
+    if (destination == kCoreRegisters) {
+      __ Move(dst1, dst2, double_dst);
+    }
+
+  } else {
+    ASSERT(!scratch1.is(object) && !scratch2.is(object));
+    // Load the double value in the destination registers.
+    __ lw(dst2, FieldMemOperand(object, HeapNumber::kExponentOffset));
+    __ lw(dst1, FieldMemOperand(object, HeapNumber::kMantissaOffset));
+
+    // Check for 0 and -0.
+    __ And(scratch1, dst1, Operand(~HeapNumber::kSignMask));
+    __ Or(scratch1, scratch1, Operand(dst2));
+    __ Branch(&done, eq, scratch1, Operand(zero_reg));
+
+    // Check that the value can be exactly represented by a 32-bit integer.
+    // Jump to not_int32 if that's not the case.
+    DoubleIs32BitInteger(masm, dst1, dst2, scratch1, scratch2, not_int32);
+
+    // dst1 and dst2 were trashed. Reload the double value.
+    __ lw(dst2, FieldMemOperand(object, HeapNumber::kExponentOffset));
+    __ lw(dst1, FieldMemOperand(object, HeapNumber::kMantissaOffset));
+  }
+
+  __ bind(&done);
+}
+
+
+void FloatingPointHelper::LoadNumberAsInt32(MacroAssembler* masm,
+                                            Register object,
+                                            Register dst,
+                                            Register heap_number_map,
+                                            Register scratch1,
+                                            Register scratch2,
+                                            Register scratch3,
+                                            FPURegister double_scratch,
+                                            Label* not_int32) {
+  ASSERT(!dst.is(object));
+  ASSERT(!scratch1.is(object) && !scratch2.is(object) && !scratch3.is(object));
+  ASSERT(!scratch1.is(scratch2) &&
+         !scratch1.is(scratch3) &&
+         !scratch2.is(scratch3));
+
+  Label done;
+
+  // Untag the object into the destination register.
+  __ SmiUntag(dst, object);
+  // Just return if the object is a smi.
+  __ JumpIfSmi(object, &done);
+
+  if (FLAG_debug_code) {
+    __ AbortIfNotRootValue(heap_number_map,
+                           Heap::kHeapNumberMapRootIndex,
+                           "HeapNumberMap register clobbered.");
+  }
+  __ JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_int32);
+
+  // Object is a heap number.
+  // Convert the floating point value to a 32-bit integer.
+  if (CpuFeatures::IsSupported(FPU)) {
+    CpuFeatures::Scope scope(FPU);
+    // Load the double value.
+    __ ldc1(double_scratch, FieldMemOperand(object, HeapNumber::kValueOffset));
+
+    // NOTE: ARM uses a MacroAssembler function here (EmitVFPTruncate).
+    // On MIPS a lot of things cannot be implemented the same way so right
+    // now it makes a lot more sense to just do things manually.
+
+    // Save FCSR.
+    __ cfc1(scratch1, FCSR);
+    // Disable FPU exceptions.
+    __ ctc1(zero_reg, FCSR);
+    __ trunc_w_d(double_scratch, double_scratch);
+    // Retrieve FCSR.
+    __ cfc1(scratch2, FCSR);
+    // Restore FCSR.
+    __ ctc1(scratch1, FCSR);
+
+    // Check for inexact conversion.
+    __ srl(scratch2, scratch2, kFCSRFlagShift);
+    __ And(scratch2, scratch2, (kFCSRFlagMask | kFCSRInexactFlagBit));
+
+    // Jump to not_int32 if the operation did not succeed.
+    __ Branch(not_int32, ne, scratch2, Operand(zero_reg));
+    // Get the result in the destination register.
+    __ mfc1(dst, double_scratch);
+
+  } else {
+    // Load the double value in the destination registers.
+    __ lw(scratch2, FieldMemOperand(object, HeapNumber::kExponentOffset));
+    __ lw(scratch1, FieldMemOperand(object, HeapNumber::kMantissaOffset));
+
+    // Check for 0 and -0.
+    __ And(dst, scratch1, Operand(~HeapNumber::kSignMask));
+    __ Or(dst, scratch2, Operand(dst));
+    __ Branch(&done, eq, dst, Operand(zero_reg));
+
+    DoubleIs32BitInteger(masm, scratch1, scratch2, dst, scratch3, not_int32);
+
+    // Registers state after DoubleIs32BitInteger.
+    // dst: mantissa[51:20].
+    // scratch2: 1
+
+    // Shift back the higher bits of the mantissa.
+    __ srlv(dst, dst, scratch3);
+    // Set the implicit first bit.
+    __ li(at, 32);
+    __ subu(scratch3, at, scratch3);
+    __ sllv(scratch2, scratch2, scratch3);
+    __ Or(dst, dst, scratch2);
+    // Set the sign.
+    __ lw(scratch1, FieldMemOperand(object, HeapNumber::kExponentOffset));
+    __ And(scratch1, scratch1, Operand(HeapNumber::kSignMask));
+    Label skip_sub;
+    __ Branch(&skip_sub, ge, scratch1, Operand(zero_reg));
+    __ Subu(dst, zero_reg, dst);
+    __ bind(&skip_sub);
+  }
+
+  __ bind(&done);
+}
+
+
+void FloatingPointHelper::DoubleIs32BitInteger(MacroAssembler* masm,
+                                               Register src1,
+                                               Register src2,
+                                               Register dst,
+                                               Register scratch,
+                                               Label* not_int32) {
+  // Get exponent alone in scratch.
+  __ Ext(scratch,
+         src1,
+         HeapNumber::kExponentShift,
+         HeapNumber::kExponentBits);
+
+  // Substract the bias from the exponent.
+  __ Subu(scratch, scratch, Operand(HeapNumber::kExponentBias));
+
+  // src1: higher (exponent) part of the double value.
+  // src2: lower (mantissa) part of the double value.
+  // scratch: unbiased exponent.
+
+  // Fast cases. Check for obvious non 32-bit integer values.
+  // Negative exponent cannot yield 32-bit integers.
+  __ Branch(not_int32, lt, scratch, Operand(zero_reg));
+  // Exponent greater than 31 cannot yield 32-bit integers.
+  // Also, a positive value with an exponent equal to 31 is outside of the
+  // signed 32-bit integer range.
+  // Another way to put it is that if (exponent - signbit) > 30 then the
+  // number cannot be represented as an int32.
+  Register tmp = dst;
+  __ srl(at, src1, 31);
+  __ subu(tmp, scratch, at);
+  __ Branch(not_int32, gt, tmp, Operand(30));
+  // - Bits [21:0] in the mantissa are not null.
+  __ And(tmp, src2, 0x3fffff);
+  __ Branch(not_int32, ne, tmp, Operand(zero_reg));
+
+  // Otherwise the exponent needs to be big enough to shift left all the
+  // non zero bits left. So we need the (30 - exponent) last bits of the
+  // 31 higher bits of the mantissa to be null.
+  // Because bits [21:0] are null, we can check instead that the
+  // (32 - exponent) last bits of the 32 higher bits of the mantisssa are null.
+
+  // Get the 32 higher bits of the mantissa in dst.
+  __ Ext(dst,
+         src2,
+         HeapNumber::kMantissaBitsInTopWord,
+         32 - HeapNumber::kMantissaBitsInTopWord);
+  __ sll(at, src1, HeapNumber::kNonMantissaBitsInTopWord);
+  __ or_(dst, dst, at);
+
+  // Create the mask and test the lower bits (of the higher bits).
+  __ li(at, 32);
+  __ subu(scratch, at, scratch);
+  __ li(src2, 1);
+  __ sllv(src1, src2, scratch);
+  __ Subu(src1, src1, Operand(1));
+  __ And(src1, dst, src1);
+  __ Branch(not_int32, ne, src1, Operand(zero_reg));
+}
+
+
+void FloatingPointHelper::CallCCodeForDoubleOperation(
+    MacroAssembler* masm,
+    Token::Value op,
+    Register heap_number_result,
+    Register scratch) {
+  // Using core registers:
+  // a0: Left value (least significant part of mantissa).
+  // a1: Left value (sign, exponent, top of mantissa).
+  // a2: Right value (least significant part of mantissa).
+  // a3: Right value (sign, exponent, top of mantissa).
+
+  // Assert that heap_number_result is saved.
+  // We currently always use s0 to pass it.
+  ASSERT(heap_number_result.is(s0));
+
+  // Push the current return address before the C call.
+  __ push(ra);
+  __ PrepareCallCFunction(4, scratch);  // Two doubles are 4 arguments.
+  if (!IsMipsSoftFloatABI) {
+    CpuFeatures::Scope scope(FPU);
+    // We are not using MIPS FPU instructions, and parameters for the runtime
+    // function call are prepaired in a0-a3 registers, but function we are
+    // calling is compiled with hard-float flag and expecting hard float ABI
+    // (parameters in f12/f14 registers). We need to copy parameters from
+    // a0-a3 registers to f12/f14 register pairs.
+    __ Move(f12, a0, a1);
+    __ Move(f14, a2, a3);
+  }
+  // Call C routine that may not cause GC or other trouble.
+  __ CallCFunction(ExternalReference::double_fp_operation(op, masm->isolate()),
+                   4);
+  // Store answer in the overwritable heap number.
+  if (!IsMipsSoftFloatABI) {
+    CpuFeatures::Scope scope(FPU);
+    // Double returned in register f0.
+    __ sdc1(f0, FieldMemOperand(heap_number_result, HeapNumber::kValueOffset));
+  } else {
+    // Double returned in registers v0 and v1.
+    __ sw(v1, FieldMemOperand(heap_number_result, HeapNumber::kExponentOffset));
+    __ sw(v0, FieldMemOperand(heap_number_result, HeapNumber::kMantissaOffset));
+  }
+  // Place heap_number_result in v0 and return to the pushed return address.
+  __ mov(v0, heap_number_result);
+  __ pop(ra);
+  __ Ret();
 }
 
 
 // See comment for class, this does NOT work for int32's that are in Smi range.
 void WriteInt32ToHeapNumberStub::Generate(MacroAssembler* masm) {
-  UNIMPLEMENTED_MIPS();
+  Label max_negative_int;
+  // the_int_ has the answer which is a signed int32 but not a Smi.
+  // We test for the special value that has a different exponent.
+  STATIC_ASSERT(HeapNumber::kSignMask == 0x80000000u);
+  // Test sign, and save for later conditionals.
+  __ And(sign_, the_int_, Operand(0x80000000u));
+  __ Branch(&max_negative_int, eq, the_int_, Operand(0x80000000u));
+
+  // Set up the correct exponent in scratch_.  All non-Smi int32s have the same.
+  // A non-Smi integer is 1.xxx * 2^30 so the exponent is 30 (biased).
+  uint32_t non_smi_exponent =
+      (HeapNumber::kExponentBias + 30) << HeapNumber::kExponentShift;
+  __ li(scratch_, Operand(non_smi_exponent));
+  // Set the sign bit in scratch_ if the value was negative.
+  __ or_(scratch_, scratch_, sign_);
+  // Subtract from 0 if the value was negative.
+  __ subu(at, zero_reg, the_int_);
+  __ movn(the_int_, at, sign_);
+  // We should be masking the implict first digit of the mantissa away here,
+  // but it just ends up combining harmlessly with the last digit of the
+  // exponent that happens to be 1.  The sign bit is 0 so we shift 10 to get
+  // the most significant 1 to hit the last bit of the 12 bit sign and exponent.
+  ASSERT(((1 << HeapNumber::kExponentShift) & non_smi_exponent) != 0);
+  const int shift_distance = HeapNumber::kNonMantissaBitsInTopWord - 2;
+  __ srl(at, the_int_, shift_distance);
+  __ or_(scratch_, scratch_, at);
+  __ sw(scratch_, FieldMemOperand(the_heap_number_,
+                                   HeapNumber::kExponentOffset));
+  __ sll(scratch_, the_int_, 32 - shift_distance);
+  __ sw(scratch_, FieldMemOperand(the_heap_number_,
+                                   HeapNumber::kMantissaOffset));
+  __ Ret();
+
+  __ bind(&max_negative_int);
+  // The max negative int32 is stored as a positive number in the mantissa of
+  // a double because it uses a sign bit instead of using two's complement.
+  // The actual mantissa bits stored are all 0 because the implicit most
+  // significant 1 bit is not stored.
+  non_smi_exponent += 1 << HeapNumber::kExponentShift;
+  __ li(scratch_, Operand(HeapNumber::kSignMask | non_smi_exponent));
+  __ sw(scratch_,
+        FieldMemOperand(the_heap_number_, HeapNumber::kExponentOffset));
+  __ mov(scratch_, zero_reg);
+  __ sw(scratch_,
+        FieldMemOperand(the_heap_number_, HeapNumber::kMantissaOffset));
+  __ Ret();
+}
+
+
+// Handle the case where the lhs and rhs are the same object.
+// Equality is almost reflexive (everything but NaN), so this is a test
+// for "identity and not NaN".
+static void EmitIdenticalObjectComparison(MacroAssembler* masm,
+                                          Label* slow,
+                                          Condition cc,
+                                          bool never_nan_nan) {
+  Label not_identical;
+  Label heap_number, return_equal;
+  Register exp_mask_reg = t5;
+
+  __ Branch(&not_identical, ne, a0, Operand(a1));
+
+  // The two objects are identical. If we know that one of them isn't NaN then
+  // we now know they test equal.
+  if (cc != eq || !never_nan_nan) {
+    __ li(exp_mask_reg, Operand(HeapNumber::kExponentMask));
+
+    // Test for NaN. Sadly, we can't just compare to factory->nan_value(),
+    // so we do the second best thing - test it ourselves.
+    // They are both equal and they are not both Smis so both of them are not
+    // Smis. If it's not a heap number, then return equal.
+    if (cc == less || cc == greater) {
+      __ GetObjectType(a0, t4, t4);
+      __ Branch(slow, greater, t4, Operand(FIRST_JS_OBJECT_TYPE));
+    } else {
+      __ GetObjectType(a0, t4, t4);
+      __ Branch(&heap_number, eq, t4, Operand(HEAP_NUMBER_TYPE));
+      // Comparing JS objects with <=, >= is complicated.
+      if (cc != eq) {
+      __ Branch(slow, greater, t4, Operand(FIRST_JS_OBJECT_TYPE));
+        // Normally here we fall through to return_equal, but undefined is
+        // special: (undefined == undefined) == true, but
+        // (undefined <= undefined) == false!  See ECMAScript 11.8.5.
+        if (cc == less_equal || cc == greater_equal) {
+          __ Branch(&return_equal, ne, t4, Operand(ODDBALL_TYPE));
+          __ LoadRoot(t2, Heap::kUndefinedValueRootIndex);
+          __ Branch(&return_equal, ne, a0, Operand(t2));
+          if (cc == le) {
+            // undefined <= undefined should fail.
+            __ li(v0, Operand(GREATER));
+          } else  {
+            // undefined >= undefined should fail.
+            __ li(v0, Operand(LESS));
+          }
+          __ Ret();
+        }
+      }
+    }
+  }
+
+  __ bind(&return_equal);
+  if (cc == less) {
+    __ li(v0, Operand(GREATER));  // Things aren't less than themselves.
+  } else if (cc == greater) {
+    __ li(v0, Operand(LESS));     // Things aren't greater than themselves.
+  } else {
+    __ mov(v0, zero_reg);         // Things are <=, >=, ==, === themselves.
+  }
+  __ Ret();
+
+  if (cc != eq || !never_nan_nan) {
+    // For less and greater we don't have to check for NaN since the result of
+    // x < x is false regardless.  For the others here is some code to check
+    // for NaN.
+    if (cc != lt && cc != gt) {
+      __ bind(&heap_number);
+      // It is a heap number, so return non-equal if it's NaN and equal if it's
+      // not NaN.
+
+      // The representation of NaN values has all exponent bits (52..62) set,
+      // and not all mantissa bits (0..51) clear.
+      // Read top bits of double representation (second word of value).
+      __ lw(t2, FieldMemOperand(a0, HeapNumber::kExponentOffset));
+      // Test that exponent bits are all set.
+      __ And(t3, t2, Operand(exp_mask_reg));
+      // If all bits not set (ne cond), then not a NaN, objects are equal.
+      __ Branch(&return_equal, ne, t3, Operand(exp_mask_reg));
+
+      // Shift out flag and all exponent bits, retaining only mantissa.
+      __ sll(t2, t2, HeapNumber::kNonMantissaBitsInTopWord);
+      // Or with all low-bits of mantissa.
+      __ lw(t3, FieldMemOperand(a0, HeapNumber::kMantissaOffset));
+      __ Or(v0, t3, Operand(t2));
+      // For equal we already have the right value in v0:  Return zero (equal)
+      // if all bits in mantissa are zero (it's an Infinity) and non-zero if
+      // not (it's a NaN).  For <= and >= we need to load v0 with the failing
+      // value if it's a NaN.
+      if (cc != eq) {
+        // All-zero means Infinity means equal.
+        __ Ret(eq, v0, Operand(zero_reg));
+        if (cc == le) {
+          __ li(v0, Operand(GREATER));  // NaN <= NaN should fail.
+        } else {
+          __ li(v0, Operand(LESS));     // NaN >= NaN should fail.
+        }
+      }
+      __ Ret();
+    }
+    // No fall through here.
+  }
+
+  __ bind(&not_identical);
+}
+
+
+static void EmitSmiNonsmiComparison(MacroAssembler* masm,
+                                    Register lhs,
+                                    Register rhs,
+                                    Label* both_loaded_as_doubles,
+                                    Label* slow,
+                                    bool strict) {
+  ASSERT((lhs.is(a0) && rhs.is(a1)) ||
+         (lhs.is(a1) && rhs.is(a0)));
+
+  Label lhs_is_smi;
+  __ And(t0, lhs, Operand(kSmiTagMask));
+  __ Branch(&lhs_is_smi, eq, t0, Operand(zero_reg));
+  // Rhs is a Smi.
+  // Check whether the non-smi is a heap number.
+  __ GetObjectType(lhs, t4, t4);
+  if (strict) {
+    // If lhs was not a number and rhs was a Smi then strict equality cannot
+    // succeed. Return non-equal (lhs is already not zero).
+    __ mov(v0, lhs);
+    __ Ret(ne, t4, Operand(HEAP_NUMBER_TYPE));
+  } else {
+    // Smi compared non-strictly with a non-Smi non-heap-number. Call
+    // the runtime.
+    __ Branch(slow, ne, t4, Operand(HEAP_NUMBER_TYPE));
+  }
+
+  // Rhs is a smi, lhs is a number.
+  // Convert smi rhs to double.
+  if (CpuFeatures::IsSupported(FPU)) {
+    CpuFeatures::Scope scope(FPU);
+    __ sra(at, rhs, kSmiTagSize);
+    __ mtc1(at, f14);
+    __ cvt_d_w(f14, f14);
+    __ ldc1(f12, FieldMemOperand(lhs, HeapNumber::kValueOffset));
+  } else {
+    // Load lhs to a double in a2, a3.
+    __ lw(a3, FieldMemOperand(lhs, HeapNumber::kValueOffset + 4));
+    __ lw(a2, FieldMemOperand(lhs, HeapNumber::kValueOffset));
+
+    // Write Smi from rhs to a1 and a0 in double format. t5 is scratch.
+    __ mov(t6, rhs);
+    ConvertToDoubleStub stub1(a1, a0, t6, t5);
+    __ push(ra);
+    __ Call(stub1.GetCode(), RelocInfo::CODE_TARGET);
+
+    __ pop(ra);
+  }
+
+  // We now have both loaded as doubles.
+  __ jmp(both_loaded_as_doubles);
+
+  __ bind(&lhs_is_smi);
+  // Lhs is a Smi.  Check whether the non-smi is a heap number.
+  __ GetObjectType(rhs, t4, t4);
+  if (strict) {
+    // If lhs was not a number and rhs was a Smi then strict equality cannot
+    // succeed. Return non-equal.
+    __ li(v0, Operand(1));
+    __ Ret(ne, t4, Operand(HEAP_NUMBER_TYPE));
+  } else {
+    // Smi compared non-strictly with a non-Smi non-heap-number. Call
+    // the runtime.
+    __ Branch(slow, ne, t4, Operand(HEAP_NUMBER_TYPE));
+  }
+
+  // Lhs is a smi, rhs is a number.
+  // Convert smi lhs to double.
+  if (CpuFeatures::IsSupported(FPU)) {
+    CpuFeatures::Scope scope(FPU);
+    __ sra(at, lhs, kSmiTagSize);
+    __ mtc1(at, f12);
+    __ cvt_d_w(f12, f12);
+    __ ldc1(f14, FieldMemOperand(rhs, HeapNumber::kValueOffset));
+  } else {
+    // Convert lhs to a double format. t5 is scratch.
+    __ mov(t6, lhs);
+    ConvertToDoubleStub stub2(a3, a2, t6, t5);
+    __ push(ra);
+    __ Call(stub2.GetCode(), RelocInfo::CODE_TARGET);
+    __ pop(ra);
+    // Load rhs to a double in a1, a0.
+    if (rhs.is(a0)) {
+      __ lw(a1, FieldMemOperand(rhs, HeapNumber::kValueOffset + 4));
+      __ lw(a0, FieldMemOperand(rhs, HeapNumber::kValueOffset));
+    } else {
+      __ lw(a0, FieldMemOperand(rhs, HeapNumber::kValueOffset));
+      __ lw(a1, FieldMemOperand(rhs, HeapNumber::kValueOffset + 4));
+    }
+  }
+  // Fall through to both_loaded_as_doubles.
 }
 
 
 void EmitNanCheck(MacroAssembler* masm, Condition cc) {
-  UNIMPLEMENTED_MIPS();
+  bool exp_first = (HeapNumber::kExponentOffset == HeapNumber::kValueOffset);
+  if (CpuFeatures::IsSupported(FPU)) {
+    CpuFeatures::Scope scope(FPU);
+    // Lhs and rhs are already loaded to f12 and f14 register pairs.
+    __ Move(t0, t1, f14);
+    __ Move(t2, t3, f12);
+  } else {
+    // Lhs and rhs are already loaded to GP registers.
+    __ mov(t0, a0);  // a0 has LS 32 bits of rhs.
+    __ mov(t1, a1);  // a1 has MS 32 bits of rhs.
+    __ mov(t2, a2);  // a2 has LS 32 bits of lhs.
+    __ mov(t3, a3);  // a3 has MS 32 bits of lhs.
+  }
+  Register rhs_exponent = exp_first ? t0 : t1;
+  Register lhs_exponent = exp_first ? t2 : t3;
+  Register rhs_mantissa = exp_first ? t1 : t0;
+  Register lhs_mantissa = exp_first ? t3 : t2;
+  Label one_is_nan, neither_is_nan;
+  Label lhs_not_nan_exp_mask_is_loaded;
+
+  Register exp_mask_reg = t4;
+  __ li(exp_mask_reg, HeapNumber::kExponentMask);
+  __ and_(t5, lhs_exponent, exp_mask_reg);
+  __ Branch(&lhs_not_nan_exp_mask_is_loaded, ne, t5, Operand(exp_mask_reg));
+
+  __ sll(t5, lhs_exponent, HeapNumber::kNonMantissaBitsInTopWord);
+  __ Branch(&one_is_nan, ne, t5, Operand(zero_reg));
+
+  __ Branch(&one_is_nan, ne, lhs_mantissa, Operand(zero_reg));
+
+  __ li(exp_mask_reg, HeapNumber::kExponentMask);
+  __ bind(&lhs_not_nan_exp_mask_is_loaded);
+  __ and_(t5, rhs_exponent, exp_mask_reg);
+
+  __ Branch(&neither_is_nan, ne, t5, Operand(exp_mask_reg));
+
+  __ sll(t5, rhs_exponent, HeapNumber::kNonMantissaBitsInTopWord);
+  __ Branch(&one_is_nan, ne, t5, Operand(zero_reg));
+
+  __ Branch(&neither_is_nan, eq, rhs_mantissa, Operand(zero_reg));
+
+  __ bind(&one_is_nan);
+  // NaN comparisons always fail.
+  // Load whatever we need in v0 to make the comparison fail.
+  if (cc == lt || cc == le) {
+    __ li(v0, Operand(GREATER));
+  } else {
+    __ li(v0, Operand(LESS));
+  }
+  __ Ret();  // Return.
+
+  __ bind(&neither_is_nan);
+}
+
+
+static void EmitTwoNonNanDoubleComparison(MacroAssembler* masm, Condition cc) {
+  // f12 and f14 have the two doubles.  Neither is a NaN.
+  // Call a native function to do a comparison between two non-NaNs.
+  // Call C routine that may not cause GC or other trouble.
+  // We use a call_was and return manually because we need arguments slots to
+  // be freed.
+
+  Label return_result_not_equal, return_result_equal;
+  if (cc == eq) {
+    // Doubles are not equal unless they have the same bit pattern.
+    // Exception: 0 and -0.
+    bool exp_first = (HeapNumber::kExponentOffset == HeapNumber::kValueOffset);
+    if (CpuFeatures::IsSupported(FPU)) {
+      CpuFeatures::Scope scope(FPU);
+      // Lhs and rhs are already loaded to f12 and f14 register pairs.
+      __ Move(t0, t1, f14);
+      __ Move(t2, t3, f12);
+    } else {
+      // Lhs and rhs are already loaded to GP registers.
+      __ mov(t0, a0);  // a0 has LS 32 bits of rhs.
+      __ mov(t1, a1);  // a1 has MS 32 bits of rhs.
+      __ mov(t2, a2);  // a2 has LS 32 bits of lhs.
+      __ mov(t3, a3);  // a3 has MS 32 bits of lhs.
+    }
+    Register rhs_exponent = exp_first ? t0 : t1;
+    Register lhs_exponent = exp_first ? t2 : t3;
+    Register rhs_mantissa = exp_first ? t1 : t0;
+    Register lhs_mantissa = exp_first ? t3 : t2;
+
+    __ xor_(v0, rhs_mantissa, lhs_mantissa);
+    __ Branch(&return_result_not_equal, ne, v0, Operand(zero_reg));
+
+    __ subu(v0, rhs_exponent, lhs_exponent);
+    __ Branch(&return_result_equal, eq, v0, Operand(zero_reg));
+    // 0, -0 case.
+    __ sll(rhs_exponent, rhs_exponent, kSmiTagSize);
+    __ sll(lhs_exponent, lhs_exponent, kSmiTagSize);
+    __ or_(t4, rhs_exponent, lhs_exponent);
+    __ or_(t4, t4, rhs_mantissa);
+
+    __ Branch(&return_result_not_equal, ne, t4, Operand(zero_reg));
+
+    __ bind(&return_result_equal);
+    __ li(v0, Operand(EQUAL));
+    __ Ret();
+  }
+
+  __ bind(&return_result_not_equal);
+
+  if (!CpuFeatures::IsSupported(FPU)) {
+    __ push(ra);
+    __ PrepareCallCFunction(4, t4);  // Two doubles count as 4 arguments.
+    if (!IsMipsSoftFloatABI) {
+      // We are not using MIPS FPU instructions, and parameters for the runtime
+      // function call are prepaired in a0-a3 registers, but function we are
+      // calling is compiled with hard-float flag and expecting hard float ABI
+      // (parameters in f12/f14 registers). We need to copy parameters from
+      // a0-a3 registers to f12/f14 register pairs.
+      __ Move(f12, a0, a1);
+      __ Move(f14, a2, a3);
+    }
+    __ CallCFunction(ExternalReference::compare_doubles(masm->isolate()), 4);
+    __ pop(ra);  // Because this function returns int, result is in v0.
+    __ Ret();
+  } else {
+    CpuFeatures::Scope scope(FPU);
+    Label equal, less_than;
+    __ c(EQ, D, f12, f14);
+    __ bc1t(&equal);
+    __ nop();
+
+    __ c(OLT, D, f12, f14);
+    __ bc1t(&less_than);
+    __ nop();
+
+    // Not equal, not less, not NaN, must be greater.
+    __ li(v0, Operand(GREATER));
+    __ Ret();
+
+    __ bind(&equal);
+    __ li(v0, Operand(EQUAL));
+    __ Ret();
+
+    __ bind(&less_than);
+    __ li(v0, Operand(LESS));
+    __ Ret();
+  }
+}
+
+
+static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm,
+                                           Register lhs,
+                                           Register rhs) {
+    // If either operand is a JSObject or an oddball value, then they are
+    // not equal since their pointers are different.
+    // There is no test for undetectability in strict equality.
+    STATIC_ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
+    Label first_non_object;
+    // Get the type of the first operand into a2 and compare it with
+    // FIRST_JS_OBJECT_TYPE.
+    __ GetObjectType(lhs, a2, a2);
+    __ Branch(&first_non_object, less, a2, Operand(FIRST_JS_OBJECT_TYPE));
+
+    // Return non-zero.
+    Label return_not_equal;
+    __ bind(&return_not_equal);
+    __ li(v0, Operand(1));
+    __ Ret();
+
+    __ bind(&first_non_object);
+    // Check for oddballs: true, false, null, undefined.
+    __ Branch(&return_not_equal, eq, a2, Operand(ODDBALL_TYPE));
+
+    __ GetObjectType(rhs, a3, a3);
+    __ Branch(&return_not_equal, greater, a3, Operand(FIRST_JS_OBJECT_TYPE));
+
+    // Check for oddballs: true, false, null, undefined.
+    __ Branch(&return_not_equal, eq, a3, Operand(ODDBALL_TYPE));
+
+    // Now that we have the types we might as well check for symbol-symbol.
+    // Ensure that no non-strings have the symbol bit set.
+    STATIC_ASSERT(LAST_TYPE < kNotStringTag + kIsSymbolMask);
+    STATIC_ASSERT(kSymbolTag != 0);
+    __ And(t2, a2, Operand(a3));
+    __ And(t0, t2, Operand(kIsSymbolMask));
+    __ Branch(&return_not_equal, ne, t0, Operand(zero_reg));
+}
+
+
+static void EmitCheckForTwoHeapNumbers(MacroAssembler* masm,
+                                       Register lhs,
+                                       Register rhs,
+                                       Label* both_loaded_as_doubles,
+                                       Label* not_heap_numbers,
+                                       Label* slow) {
+  __ GetObjectType(lhs, a3, a2);
+  __ Branch(not_heap_numbers, ne, a2, Operand(HEAP_NUMBER_TYPE));
+  __ lw(a2, FieldMemOperand(rhs, HeapObject::kMapOffset));
+  // If first was a heap number & second wasn't, go to slow case.
+  __ Branch(slow, ne, a3, Operand(a2));
+
+  // Both are heap numbers. Load them up then jump to the code we have
+  // for that.
+  if (CpuFeatures::IsSupported(FPU)) {
+    CpuFeatures::Scope scope(FPU);
+    __ ldc1(f12, FieldMemOperand(lhs, HeapNumber::kValueOffset));
+    __ ldc1(f14, FieldMemOperand(rhs, HeapNumber::kValueOffset));
+  } else {
+    __ lw(a2, FieldMemOperand(lhs, HeapNumber::kValueOffset));
+    __ lw(a3, FieldMemOperand(lhs, HeapNumber::kValueOffset + 4));
+    if (rhs.is(a0)) {
+      __ lw(a1, FieldMemOperand(rhs, HeapNumber::kValueOffset + 4));
+      __ lw(a0, FieldMemOperand(rhs, HeapNumber::kValueOffset));
+    } else {
+      __ lw(a0, FieldMemOperand(rhs, HeapNumber::kValueOffset));
+      __ lw(a1, FieldMemOperand(rhs, HeapNumber::kValueOffset + 4));
+    }
+  }
+  __ jmp(both_loaded_as_doubles);
+}
+
+
+// Fast negative check for symbol-to-symbol equality.
+static void EmitCheckForSymbolsOrObjects(MacroAssembler* masm,
+                                         Register lhs,
+                                         Register rhs,
+                                         Label* possible_strings,
+                                         Label* not_both_strings) {
+  ASSERT((lhs.is(a0) && rhs.is(a1)) ||
+         (lhs.is(a1) && rhs.is(a0)));
+
+  // a2 is object type of lhs.
+  // Ensure that no non-strings have the symbol bit set.
+  Label object_test;
+  STATIC_ASSERT(kSymbolTag != 0);
+  __ And(at, a2, Operand(kIsNotStringMask));
+  __ Branch(&object_test, ne, at, Operand(zero_reg));
+  __ And(at, a2, Operand(kIsSymbolMask));
+  __ Branch(possible_strings, eq, at, Operand(zero_reg));
+  __ GetObjectType(rhs, a3, a3);
+  __ Branch(not_both_strings, ge, a3, Operand(FIRST_NONSTRING_TYPE));
+  __ And(at, a3, Operand(kIsSymbolMask));
+  __ Branch(possible_strings, eq, at, Operand(zero_reg));
+
+  // Both are symbols. We already checked they weren't the same pointer
+  // so they are not equal.
+  __ li(v0, Operand(1));   // Non-zero indicates not equal.
+  __ Ret();
+
+  __ bind(&object_test);
+  __ Branch(not_both_strings, lt, a2, Operand(FIRST_JS_OBJECT_TYPE));
+  __ GetObjectType(rhs, a2, a3);
+  __ Branch(not_both_strings, lt, a3, Operand(FIRST_JS_OBJECT_TYPE));
+
+  // If both objects are undetectable, they are equal.  Otherwise, they
+  // are not equal, since they are different objects and an object is not
+  // equal to undefined.
+  __ lw(a3, FieldMemOperand(lhs, HeapObject::kMapOffset));
+  __ lbu(a2, FieldMemOperand(a2, Map::kBitFieldOffset));
+  __ lbu(a3, FieldMemOperand(a3, Map::kBitFieldOffset));
+  __ and_(a0, a2, a3);
+  __ And(a0, a0, Operand(1 << Map::kIsUndetectable));
+  __ Xor(v0, a0, Operand(1 << Map::kIsUndetectable));
+  __ Ret();
 }
 
 
@@ -232,12 +1431,109 @@
                                                          Register scratch3,
                                                          bool object_is_smi,
                                                          Label* not_found) {
-  UNIMPLEMENTED_MIPS();
+  // Use of registers. Register result is used as a temporary.
+  Register number_string_cache = result;
+  Register mask = scratch3;
+
+  // Load the number string cache.
+  __ LoadRoot(number_string_cache, Heap::kNumberStringCacheRootIndex);
+
+  // Make the hash mask from the length of the number string cache. It
+  // contains two elements (number and string) for each cache entry.
+  __ lw(mask, FieldMemOperand(number_string_cache, FixedArray::kLengthOffset));
+  // Divide length by two (length is a smi).
+  __ sra(mask, mask, kSmiTagSize + 1);
+  __ Addu(mask, mask, -1);  // Make mask.
+
+  // Calculate the entry in the number string cache. The hash value in the
+  // number string cache for smis is just the smi value, and the hash for
+  // doubles is the xor of the upper and lower words. See
+  // Heap::GetNumberStringCache.
+  Isolate* isolate = masm->isolate();
+  Label is_smi;
+  Label load_result_from_cache;
+  if (!object_is_smi) {
+    __ JumpIfSmi(object, &is_smi);
+    if (CpuFeatures::IsSupported(FPU)) {
+      CpuFeatures::Scope scope(FPU);
+      __ CheckMap(object,
+                  scratch1,
+                  Heap::kHeapNumberMapRootIndex,
+                  not_found,
+                  DONT_DO_SMI_CHECK);
+
+      STATIC_ASSERT(8 == kDoubleSize);
+      __ Addu(scratch1,
+              object,
+              Operand(HeapNumber::kValueOffset - kHeapObjectTag));
+      __ lw(scratch2, MemOperand(scratch1, kPointerSize));
+      __ lw(scratch1, MemOperand(scratch1, 0));
+      __ Xor(scratch1, scratch1, Operand(scratch2));
+      __ And(scratch1, scratch1, Operand(mask));
+
+      // Calculate address of entry in string cache: each entry consists
+      // of two pointer sized fields.
+      __ sll(scratch1, scratch1, kPointerSizeLog2 + 1);
+      __ Addu(scratch1, number_string_cache, scratch1);
+
+      Register probe = mask;
+      __ lw(probe,
+             FieldMemOperand(scratch1, FixedArray::kHeaderSize));
+      __ JumpIfSmi(probe, not_found);
+      __ ldc1(f12, FieldMemOperand(object, HeapNumber::kValueOffset));
+      __ ldc1(f14, FieldMemOperand(probe, HeapNumber::kValueOffset));
+      __ c(EQ, D, f12, f14);
+      __ bc1t(&load_result_from_cache);
+      __ nop();   // bc1t() requires explicit fill of branch delay slot.
+      __ Branch(not_found);
+    } else {
+      // Note that there is no cache check for non-FPU case, even though
+      // it seems there could be. May be a tiny opimization for non-FPU
+      // cores.
+      __ Branch(not_found);
+    }
+  }
+
+  __ bind(&is_smi);
+  Register scratch = scratch1;
+  __ sra(scratch, object, 1);   // Shift away the tag.
+  __ And(scratch, mask, Operand(scratch));
+
+  // Calculate address of entry in string cache: each entry consists
+  // of two pointer sized fields.
+  __ sll(scratch, scratch, kPointerSizeLog2 + 1);
+  __ Addu(scratch, number_string_cache, scratch);
+
+  // Check if the entry is the smi we are looking for.
+  Register probe = mask;
+  __ lw(probe, FieldMemOperand(scratch, FixedArray::kHeaderSize));
+  __ Branch(not_found, ne, object, Operand(probe));
+
+  // Get the result from the cache.
+  __ bind(&load_result_from_cache);
+  __ lw(result,
+         FieldMemOperand(scratch, FixedArray::kHeaderSize + kPointerSize));
+
+  __ IncrementCounter(isolate->counters()->number_to_string_native(),
+                      1,
+                      scratch1,
+                      scratch2);
 }
 
 
 void NumberToStringStub::Generate(MacroAssembler* masm) {
-  UNIMPLEMENTED_MIPS();
+  Label runtime;
+
+  __ lw(a1, MemOperand(sp, 0));
+
+  // Generate code to lookup number in the number string cache.
+  GenerateLookupNumberStringCache(masm, a1, v0, a2, a3, t0, false, &runtime);
+  __ Addu(sp, sp, Operand(1 * kPointerSize));
+  __ Ret();
+
+  __ bind(&runtime);
+  // Handle number to string in the runtime system if not found in the cache.
+  __ TailCallRuntime(Runtime::kNumberToString, 1, 1);
 }
 
 
@@ -245,105 +1541,1018 @@
 // On exit, v0 is 0, positive, or negative (smi) to indicate the result
 // of the comparison.
 void CompareStub::Generate(MacroAssembler* masm) {
-  UNIMPLEMENTED_MIPS();
+  Label slow;  // Call builtin.
+  Label not_smis, both_loaded_as_doubles;
+
+
+  if (include_smi_compare_) {
+    Label not_two_smis, smi_done;
+    __ Or(a2, a1, a0);
+    __ JumpIfNotSmi(a2, &not_two_smis);
+    __ sra(a1, a1, 1);
+    __ sra(a0, a0, 1);
+    __ Subu(v0, a1, a0);
+    __ Ret();
+    __ bind(&not_two_smis);
+  } else if (FLAG_debug_code) {
+    __ Or(a2, a1, a0);
+    __ And(a2, a2, kSmiTagMask);
+    __ Assert(ne, "CompareStub: unexpected smi operands.",
+        a2, Operand(zero_reg));
+  }
+
+
+  // NOTICE! This code is only reached after a smi-fast-case check, so
+  // it is certain that at least one operand isn't a smi.
+
+  // Handle the case where the objects are identical.  Either returns the answer
+  // or goes to slow.  Only falls through if the objects were not identical.
+  EmitIdenticalObjectComparison(masm, &slow, cc_, never_nan_nan_);
+
+  // If either is a Smi (we know that not both are), then they can only
+  // be strictly equal if the other is a HeapNumber.
+  STATIC_ASSERT(kSmiTag == 0);
+  ASSERT_EQ(0, Smi::FromInt(0));
+  __ And(t2, lhs_, Operand(rhs_));
+  __ JumpIfNotSmi(t2, &not_smis, t0);
+  // One operand is a smi. EmitSmiNonsmiComparison generates code that can:
+  // 1) Return the answer.
+  // 2) Go to slow.
+  // 3) Fall through to both_loaded_as_doubles.
+  // 4) Jump to rhs_not_nan.
+  // In cases 3 and 4 we have found out we were dealing with a number-number
+  // comparison and the numbers have been loaded into f12 and f14 as doubles,
+  // or in GP registers (a0, a1, a2, a3) depending on the presence of the FPU.
+  EmitSmiNonsmiComparison(masm, lhs_, rhs_,
+                          &both_loaded_as_doubles, &slow, strict_);
+
+  __ bind(&both_loaded_as_doubles);
+  // f12, f14 are the double representations of the left hand side
+  // and the right hand side if we have FPU. Otherwise a2, a3 represent
+  // left hand side and a0, a1 represent right hand side.
+
+  Isolate* isolate = masm->isolate();
+  if (CpuFeatures::IsSupported(FPU)) {
+    CpuFeatures::Scope scope(FPU);
+    Label nan;
+    __ li(t0, Operand(LESS));
+    __ li(t1, Operand(GREATER));
+    __ li(t2, Operand(EQUAL));
+
+    // Check if either rhs or lhs is NaN.
+    __ c(UN, D, f12, f14);
+    __ bc1t(&nan);
+    __ nop();
+
+    // Check if LESS condition is satisfied. If true, move conditionally
+    // result to v0.
+    __ c(OLT, D, f12, f14);
+    __ movt(v0, t0);
+    // Use previous check to store conditionally to v0 oposite condition
+    // (GREATER). If rhs is equal to lhs, this will be corrected in next
+    // check.
+    __ movf(v0, t1);
+    // Check if EQUAL condition is satisfied. If true, move conditionally
+    // result to v0.
+    __ c(EQ, D, f12, f14);
+    __ movt(v0, t2);
+
+    __ Ret();
+
+    __ bind(&nan);
+    // NaN comparisons always fail.
+    // Load whatever we need in v0 to make the comparison fail.
+    if (cc_ == lt || cc_ == le) {
+      __ li(v0, Operand(GREATER));
+    } else {
+      __ li(v0, Operand(LESS));
+    }
+    __ Ret();
+  } else {
+    // Checks for NaN in the doubles we have loaded.  Can return the answer or
+    // fall through if neither is a NaN.  Also binds rhs_not_nan.
+    EmitNanCheck(masm, cc_);
+
+    // Compares two doubles that are not NaNs. Returns the answer.
+    // Never falls through.
+    EmitTwoNonNanDoubleComparison(masm, cc_);
+  }
+
+  __ bind(&not_smis);
+  // At this point we know we are dealing with two different objects,
+  // and neither of them is a Smi. The objects are in lhs_ and rhs_.
+  if (strict_) {
+    // This returns non-equal for some object types, or falls through if it
+    // was not lucky.
+    EmitStrictTwoHeapObjectCompare(masm, lhs_, rhs_);
+  }
+
+  Label check_for_symbols;
+  Label flat_string_check;
+  // Check for heap-number-heap-number comparison. Can jump to slow case,
+  // or load both doubles and jump to the code that handles
+  // that case. If the inputs are not doubles then jumps to check_for_symbols.
+  // In this case a2 will contain the type of lhs_.
+  EmitCheckForTwoHeapNumbers(masm,
+                             lhs_,
+                             rhs_,
+                             &both_loaded_as_doubles,
+                             &check_for_symbols,
+                             &flat_string_check);
+
+  __ bind(&check_for_symbols);
+  if (cc_ == eq && !strict_) {
+    // Returns an answer for two symbols or two detectable objects.
+    // Otherwise jumps to string case or not both strings case.
+    // Assumes that a2 is the type of lhs_ on entry.
+    EmitCheckForSymbolsOrObjects(masm, lhs_, rhs_, &flat_string_check, &slow);
+  }
+
+  // Check for both being sequential ASCII strings, and inline if that is the
+  // case.
+  __ bind(&flat_string_check);
+
+  __ JumpIfNonSmisNotBothSequentialAsciiStrings(lhs_, rhs_, a2, a3, &slow);
+
+  __ IncrementCounter(isolate->counters()->string_compare_native(), 1, a2, a3);
+  if (cc_ == eq) {
+    StringCompareStub::GenerateFlatAsciiStringEquals(masm,
+                                                     lhs_,
+                                                     rhs_,
+                                                     a2,
+                                                     a3,
+                                                     t0);
+  } else {
+    StringCompareStub::GenerateCompareFlatAsciiStrings(masm,
+                                                       lhs_,
+                                                       rhs_,
+                                                       a2,
+                                                       a3,
+                                                       t0,
+                                                       t1);
+  }
+  // Never falls through to here.
+
+  __ bind(&slow);
+  // Prepare for call to builtin. Push object pointers, a0 (lhs) first,
+  // a1 (rhs) second.
+  __ Push(lhs_, rhs_);
+  // Figure out which native to call and setup the arguments.
+  Builtins::JavaScript native;
+  if (cc_ == eq) {
+    native = strict_ ? Builtins::STRICT_EQUALS : Builtins::EQUALS;
+  } else {
+    native = Builtins::COMPARE;
+    int ncr;  // NaN compare result.
+    if (cc_ == lt || cc_ == le) {
+      ncr = GREATER;
+    } else {
+      ASSERT(cc_ == gt || cc_ == ge);  // Remaining cases.
+      ncr = LESS;
+    }
+    __ li(a0, Operand(Smi::FromInt(ncr)));
+    __ push(a0);
+  }
+
+  // Call the native; it returns -1 (less), 0 (equal), or 1 (greater)
+  // tagged as a small integer.
+  __ InvokeBuiltin(native, JUMP_FUNCTION);
 }
 
 
 // This stub does not handle the inlined cases (Smis, Booleans, undefined).
 // The stub returns zero for false, and a non-zero value for true.
 void ToBooleanStub::Generate(MacroAssembler* masm) {
-  UNIMPLEMENTED_MIPS();
+  // This stub uses FPU instructions.
+  CpuFeatures::Scope scope(FPU);
+
+  Label false_result;
+  Label not_heap_number;
+  Register scratch0 = t5.is(tos_) ? t3 : t5;
+
+  // undefined -> false
+  __ LoadRoot(scratch0, Heap::kUndefinedValueRootIndex);
+  __ Branch(&false_result, eq, tos_, Operand(scratch0));
+
+  // Boolean -> its value
+  __ LoadRoot(scratch0, Heap::kFalseValueRootIndex);
+  __ Branch(&false_result, eq, tos_, Operand(scratch0));
+  __ LoadRoot(scratch0, Heap::kTrueValueRootIndex);
+  // "tos_" is a register and contains a non-zero value.  Hence we implicitly
+  // return true if the equal condition is satisfied.
+  __ Ret(eq, tos_, Operand(scratch0));
+
+  // Smis: 0 -> false, all other -> true
+  __ And(scratch0, tos_, tos_);
+  __ Branch(&false_result, eq, scratch0, Operand(zero_reg));
+  __ And(scratch0, tos_, Operand(kSmiTagMask));
+  // "tos_" is a register and contains a non-zero value.  Hence we implicitly
+  // return true if the not equal condition is satisfied.
+  __ Ret(eq, scratch0, Operand(zero_reg));
+
+  // 'null' -> false
+  __ LoadRoot(scratch0, Heap::kNullValueRootIndex);
+  __ Branch(&false_result, eq, tos_, Operand(scratch0));
+
+  // HeapNumber => false if +0, -0, or NaN.
+  __ lw(scratch0, FieldMemOperand(tos_, HeapObject::kMapOffset));
+  __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
+  __ Branch(&not_heap_number, ne, scratch0, Operand(at));
+
+  __ ldc1(f12, FieldMemOperand(tos_, HeapNumber::kValueOffset));
+  __ fcmp(f12, 0.0, UEQ);
+
+  // "tos_" is a register, and contains a non zero value by default.
+  // Hence we only need to overwrite "tos_" with zero to return false for
+  // FP_ZERO or FP_NAN cases. Otherwise, by default it returns true.
+  __ movt(tos_, zero_reg);
+  __ Ret();
+
+  __ bind(&not_heap_number);
+
+  // It can be an undetectable object.
+  // Undetectable => false.
+  __ lw(at, FieldMemOperand(tos_, HeapObject::kMapOffset));
+  __ lbu(scratch0, FieldMemOperand(at, Map::kBitFieldOffset));
+  __ And(scratch0, scratch0, Operand(1 << Map::kIsUndetectable));
+  __ Branch(&false_result, eq, scratch0, Operand(1 << Map::kIsUndetectable));
+
+  // JavaScript object => true.
+  __ lw(scratch0, FieldMemOperand(tos_, HeapObject::kMapOffset));
+  __ lbu(scratch0, FieldMemOperand(scratch0, Map::kInstanceTypeOffset));
+
+  // "tos_" is a register and contains a non-zero value.
+  // Hence we implicitly return true if the greater than
+  // condition is satisfied.
+  __ Ret(gt, scratch0, Operand(FIRST_JS_OBJECT_TYPE));
+
+  // Check for string.
+  __ lw(scratch0, FieldMemOperand(tos_, HeapObject::kMapOffset));
+  __ lbu(scratch0, FieldMemOperand(scratch0, Map::kInstanceTypeOffset));
+  // "tos_" is a register and contains a non-zero value.
+  // Hence we implicitly return true if the greater than
+  // condition is satisfied.
+  __ Ret(gt, scratch0, Operand(FIRST_NONSTRING_TYPE));
+
+  // String value => false iff empty, i.e., length is zero.
+  __ lw(tos_, FieldMemOperand(tos_, String::kLengthOffset));
+  // If length is zero, "tos_" contains zero ==> false.
+  // If length is not zero, "tos_" contains a non-zero value ==> true.
+  __ Ret();
+
+  // Return 0 in "tos_" for false.
+  __ bind(&false_result);
+  __ mov(tos_, zero_reg);
+  __ Ret();
 }
 
 
-// We fall into this code if the operands were Smis, but the result was
-// not (eg. overflow).  We branch into this code (to the not_smi label) if
-// the operands were not both Smi.  The operands are in lhs and rhs.
-// To call the C-implemented binary fp operation routines we need to end up
-// with the double precision floating point operands in a0 and a1 (for the
-// value in a1) and a2 and a3 (for the value in a0).
-void GenericBinaryOpStub::HandleBinaryOpSlowCases(MacroAssembler* masm,
-                                    Label* not_smi,
-                                    Register lhs,
-                                    Register rhs,
-                                    const Builtins::JavaScript& builtin) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-// For bitwise ops where the inputs are not both Smis we here try to determine
-// whether both inputs are either Smis or at least heap numbers that can be
-// represented by a 32 bit signed value.  We truncate towards zero as required
-// by the ES spec.  If this is the case we do the bitwise op and see if the
-// result is a Smi.  If so, great, otherwise we try to find a heap number to
-// write the answer into (either by allocating or by overwriting).
-// On entry the operands are in lhs (x) and rhs (y). (Result = x op y).
-// On exit the result is in v0.
-void GenericBinaryOpStub::HandleNonSmiBitwiseOp(MacroAssembler* masm,
-                                                Register lhs,
-                                                Register rhs) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void GenericBinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-Handle<Code> GetBinaryOpStub(int key, BinaryOpIC::TypeInfo type_info) {
-  GenericBinaryOpStub stub(key, type_info);
+Handle<Code> GetUnaryOpStub(int key, UnaryOpIC::TypeInfo type_info) {
+  UnaryOpStub stub(key, type_info);
   return stub.GetCode();
 }
 
 
-Handle<Code> GetTypeRecordingBinaryOpStub(int key,
-    TRBinaryOpIC::TypeInfo type_info,
-    TRBinaryOpIC::TypeInfo result_type_info) {
-  TypeRecordingBinaryOpStub stub(key, type_info, result_type_info);
+const char* UnaryOpStub::GetName() {
+  if (name_ != NULL) return name_;
+  const int kMaxNameLength = 100;
+  name_ = Isolate::Current()->bootstrapper()->AllocateAutoDeletedArray(
+      kMaxNameLength);
+  if (name_ == NULL) return "OOM";
+  const char* op_name = Token::Name(op_);
+  const char* overwrite_name = NULL;  // Make g++ happy.
+  switch (mode_) {
+    case UNARY_NO_OVERWRITE: overwrite_name = "Alloc"; break;
+    case UNARY_OVERWRITE: overwrite_name = "Overwrite"; break;
+  }
+
+  OS::SNPrintF(Vector<char>(name_, kMaxNameLength),
+               "UnaryOpStub_%s_%s_%s",
+               op_name,
+               overwrite_name,
+               UnaryOpIC::GetName(operand_type_));
+  return name_;
+}
+
+
+// TODO(svenpanne): Use virtual functions instead of switch.
+void UnaryOpStub::Generate(MacroAssembler* masm) {
+  switch (operand_type_) {
+    case UnaryOpIC::UNINITIALIZED:
+      GenerateTypeTransition(masm);
+      break;
+    case UnaryOpIC::SMI:
+      GenerateSmiStub(masm);
+      break;
+    case UnaryOpIC::HEAP_NUMBER:
+      GenerateHeapNumberStub(masm);
+      break;
+    case UnaryOpIC::GENERIC:
+      GenerateGenericStub(masm);
+      break;
+  }
+}
+
+
+void UnaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
+  // Argument is in a0 and v0 at this point, so we can overwrite a0.
+  // Push this stub's key. Although the operation and the type info are
+  // encoded into the key, the encoding is opaque, so push them too.
+  __ li(a2, Operand(Smi::FromInt(MinorKey())));
+  __ li(a1, Operand(Smi::FromInt(op_)));
+  __ li(a0, Operand(Smi::FromInt(operand_type_)));
+
+  __ Push(v0, a2, a1, a0);
+
+  __ TailCallExternalReference(
+      ExternalReference(IC_Utility(IC::kUnaryOp_Patch),
+                        masm->isolate()),
+      4,
+      1);
+}
+
+
+// TODO(svenpanne): Use virtual functions instead of switch.
+void UnaryOpStub::GenerateSmiStub(MacroAssembler* masm) {
+  switch (op_) {
+    case Token::SUB:
+      GenerateSmiStubSub(masm);
+      break;
+    case Token::BIT_NOT:
+      GenerateSmiStubBitNot(masm);
+      break;
+    default:
+      UNREACHABLE();
+  }
+}
+
+
+void UnaryOpStub::GenerateSmiStubSub(MacroAssembler* masm) {
+  Label non_smi, slow;
+  GenerateSmiCodeSub(masm, &non_smi, &slow);
+  __ bind(&non_smi);
+  __ bind(&slow);
+  GenerateTypeTransition(masm);
+}
+
+
+void UnaryOpStub::GenerateSmiStubBitNot(MacroAssembler* masm) {
+  Label non_smi;
+  GenerateSmiCodeBitNot(masm, &non_smi);
+  __ bind(&non_smi);
+  GenerateTypeTransition(masm);
+}
+
+
+void UnaryOpStub::GenerateSmiCodeSub(MacroAssembler* masm,
+                                     Label* non_smi,
+                                     Label* slow) {
+  __ JumpIfNotSmi(a0, non_smi);
+
+  // The result of negating zero or the smallest negative smi is not a smi.
+  __ And(t0, a0, ~0x80000000);
+  __ Branch(slow, eq, t0, Operand(zero_reg));
+
+  // Return '0 - value'.
+  __ Subu(v0, zero_reg, a0);
+  __ Ret();
+}
+
+
+void UnaryOpStub::GenerateSmiCodeBitNot(MacroAssembler* masm,
+                                        Label* non_smi) {
+  __ JumpIfNotSmi(a0, non_smi);
+
+  // Flip bits and revert inverted smi-tag.
+  __ Neg(v0, a0);
+  __ And(v0, v0, ~kSmiTagMask);
+  __ Ret();
+}
+
+
+// TODO(svenpanne): Use virtual functions instead of switch.
+void UnaryOpStub::GenerateHeapNumberStub(MacroAssembler* masm) {
+  switch (op_) {
+    case Token::SUB:
+      GenerateHeapNumberStubSub(masm);
+      break;
+    case Token::BIT_NOT:
+      GenerateHeapNumberStubBitNot(masm);
+      break;
+    default:
+      UNREACHABLE();
+  }
+}
+
+
+void UnaryOpStub::GenerateHeapNumberStubSub(MacroAssembler* masm) {
+  Label non_smi, slow, call_builtin;
+  GenerateSmiCodeSub(masm, &non_smi, &call_builtin);
+  __ bind(&non_smi);
+  GenerateHeapNumberCodeSub(masm, &slow);
+  __ bind(&slow);
+  GenerateTypeTransition(masm);
+  __ bind(&call_builtin);
+  GenerateGenericCodeFallback(masm);
+}
+
+
+void UnaryOpStub::GenerateHeapNumberStubBitNot(MacroAssembler* masm) {
+  Label non_smi, slow;
+  GenerateSmiCodeBitNot(masm, &non_smi);
+  __ bind(&non_smi);
+  GenerateHeapNumberCodeBitNot(masm, &slow);
+  __ bind(&slow);
+  GenerateTypeTransition(masm);
+}
+
+void UnaryOpStub::GenerateHeapNumberCodeSub(MacroAssembler* masm,
+                                            Label* slow) {
+  EmitCheckForHeapNumber(masm, a0, a1, t2, slow);
+  // a0 is a heap number.  Get a new heap number in a1.
+  if (mode_ == UNARY_OVERWRITE) {
+    __ lw(a2, FieldMemOperand(a0, HeapNumber::kExponentOffset));
+    __ Xor(a2, a2, Operand(HeapNumber::kSignMask));  // Flip sign.
+    __ sw(a2, FieldMemOperand(a0, HeapNumber::kExponentOffset));
+  } else {
+    Label slow_allocate_heapnumber, heapnumber_allocated;
+    __ AllocateHeapNumber(a1, a2, a3, t2, &slow_allocate_heapnumber);
+    __ jmp(&heapnumber_allocated);
+
+    __ bind(&slow_allocate_heapnumber);
+    __ EnterInternalFrame();
+    __ push(a0);
+    __ CallRuntime(Runtime::kNumberAlloc, 0);
+    __ mov(a1, v0);
+    __ pop(a0);
+    __ LeaveInternalFrame();
+
+    __ bind(&heapnumber_allocated);
+    __ lw(a3, FieldMemOperand(a0, HeapNumber::kMantissaOffset));
+    __ lw(a2, FieldMemOperand(a0, HeapNumber::kExponentOffset));
+    __ sw(a3, FieldMemOperand(a1, HeapNumber::kMantissaOffset));
+    __ Xor(a2, a2, Operand(HeapNumber::kSignMask));  // Flip sign.
+    __ sw(a2, FieldMemOperand(a1, HeapNumber::kExponentOffset));
+    __ mov(v0, a1);
+  }
+  __ Ret();
+}
+
+
+void UnaryOpStub::GenerateHeapNumberCodeBitNot(
+    MacroAssembler* masm,
+    Label* slow) {
+  EmitCheckForHeapNumber(masm, a0, a1, t2, slow);
+  // Convert the heap number in a0 to an untagged integer in a1.
+  __ ConvertToInt32(a0, a1, a2, a3, f0, slow);
+
+  // Do the bitwise operation and check if the result fits in a smi.
+  Label try_float;
+  __ Neg(a1, a1);
+  __ Addu(a2, a1, Operand(0x40000000));
+  __ Branch(&try_float, lt, a2, Operand(zero_reg));
+
+  // Tag the result as a smi and we're done.
+  __ SmiTag(v0, a1);
+  __ Ret();
+
+  // Try to store the result in a heap number.
+  __ bind(&try_float);
+  if (mode_ == UNARY_NO_OVERWRITE) {
+    Label slow_allocate_heapnumber, heapnumber_allocated;
+    __ AllocateHeapNumber(v0, a2, a3, t2, &slow_allocate_heapnumber);
+    __ jmp(&heapnumber_allocated);
+
+    __ bind(&slow_allocate_heapnumber);
+    __ EnterInternalFrame();
+    __ push(a1);
+    __ CallRuntime(Runtime::kNumberAlloc, 0);
+    __ pop(a1);
+    __ LeaveInternalFrame();
+
+    __ bind(&heapnumber_allocated);
+  }
+
+  if (CpuFeatures::IsSupported(FPU)) {
+    // Convert the int32 in a1 to the heap number in v0. a2 is corrupted.
+    CpuFeatures::Scope scope(FPU);
+    __ mtc1(a1, f0);
+    __ cvt_d_w(f0, f0);
+    __ sdc1(f0, FieldMemOperand(v0, HeapNumber::kValueOffset));
+    __ Ret();
+  } else {
+    // WriteInt32ToHeapNumberStub does not trigger GC, so we do not
+    // have to set up a frame.
+    WriteInt32ToHeapNumberStub stub(a1, v0, a2, a3);
+    __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
+  }
+}
+
+
+// TODO(svenpanne): Use virtual functions instead of switch.
+void UnaryOpStub::GenerateGenericStub(MacroAssembler* masm) {
+  switch (op_) {
+    case Token::SUB:
+      GenerateGenericStubSub(masm);
+      break;
+    case Token::BIT_NOT:
+      GenerateGenericStubBitNot(masm);
+      break;
+    default:
+      UNREACHABLE();
+  }
+}
+
+
+void UnaryOpStub::GenerateGenericStubSub(MacroAssembler* masm) {
+  Label non_smi, slow;
+  GenerateSmiCodeSub(masm, &non_smi, &slow);
+  __ bind(&non_smi);
+  GenerateHeapNumberCodeSub(masm, &slow);
+  __ bind(&slow);
+  GenerateGenericCodeFallback(masm);
+}
+
+
+void UnaryOpStub::GenerateGenericStubBitNot(MacroAssembler* masm) {
+  Label non_smi, slow;
+  GenerateSmiCodeBitNot(masm, &non_smi);
+  __ bind(&non_smi);
+  GenerateHeapNumberCodeBitNot(masm, &slow);
+  __ bind(&slow);
+  GenerateGenericCodeFallback(masm);
+}
+
+
+void UnaryOpStub::GenerateGenericCodeFallback(
+    MacroAssembler* masm) {
+  // Handle the slow case by jumping to the JavaScript builtin.
+  __ push(a0);
+  switch (op_) {
+    case Token::SUB:
+      __ InvokeBuiltin(Builtins::UNARY_MINUS, JUMP_FUNCTION);
+      break;
+    case Token::BIT_NOT:
+      __ InvokeBuiltin(Builtins::BIT_NOT, JUMP_FUNCTION);
+      break;
+    default:
+      UNREACHABLE();
+  }
+}
+
+
+Handle<Code> GetBinaryOpStub(int key,
+                             BinaryOpIC::TypeInfo type_info,
+                             BinaryOpIC::TypeInfo result_type_info) {
+  BinaryOpStub stub(key, type_info, result_type_info);
   return stub.GetCode();
 }
 
 
-void TypeRecordingBinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
-  UNIMPLEMENTED_MIPS();
+void BinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
+  Label get_result;
+
+  __ Push(a1, a0);
+
+  __ li(a2, Operand(Smi::FromInt(MinorKey())));
+  __ li(a1, Operand(Smi::FromInt(op_)));
+  __ li(a0, Operand(Smi::FromInt(operands_type_)));
+  __ Push(a2, a1, a0);
+
+  __ TailCallExternalReference(
+      ExternalReference(IC_Utility(IC::kBinaryOp_Patch),
+                        masm->isolate()),
+      5,
+      1);
 }
 
 
-void TypeRecordingBinaryOpStub::GenerateTypeTransitionWithSavedArgs(
+void BinaryOpStub::GenerateTypeTransitionWithSavedArgs(
     MacroAssembler* masm) {
   UNIMPLEMENTED();
 }
 
 
-void TypeRecordingBinaryOpStub::Generate(MacroAssembler* masm) {
-  UNIMPLEMENTED_MIPS();
+void BinaryOpStub::Generate(MacroAssembler* masm) {
+  switch (operands_type_) {
+    case BinaryOpIC::UNINITIALIZED:
+      GenerateTypeTransition(masm);
+      break;
+    case BinaryOpIC::SMI:
+      GenerateSmiStub(masm);
+      break;
+    case BinaryOpIC::INT32:
+      GenerateInt32Stub(masm);
+      break;
+    case BinaryOpIC::HEAP_NUMBER:
+      GenerateHeapNumberStub(masm);
+      break;
+    case BinaryOpIC::ODDBALL:
+      GenerateOddballStub(masm);
+      break;
+    case BinaryOpIC::BOTH_STRING:
+      GenerateBothStringStub(masm);
+      break;
+    case BinaryOpIC::STRING:
+      GenerateStringStub(masm);
+      break;
+    case BinaryOpIC::GENERIC:
+      GenerateGeneric(masm);
+      break;
+    default:
+      UNREACHABLE();
+  }
 }
 
 
-const char* TypeRecordingBinaryOpStub::GetName() {
-  UNIMPLEMENTED_MIPS();
+const char* BinaryOpStub::GetName() {
+  if (name_ != NULL) return name_;
+  const int kMaxNameLength = 100;
+  name_ = Isolate::Current()->bootstrapper()->AllocateAutoDeletedArray(
+      kMaxNameLength);
+  if (name_ == NULL) return "OOM";
+  const char* op_name = Token::Name(op_);
+  const char* overwrite_name;
+  switch (mode_) {
+    case NO_OVERWRITE: overwrite_name = "Alloc"; break;
+    case OVERWRITE_RIGHT: overwrite_name = "OverwriteRight"; break;
+    case OVERWRITE_LEFT: overwrite_name = "OverwriteLeft"; break;
+    default: overwrite_name = "UnknownOverwrite"; break;
+  }
+
+  OS::SNPrintF(Vector<char>(name_, kMaxNameLength),
+               "BinaryOpStub_%s_%s_%s",
+               op_name,
+               overwrite_name,
+               BinaryOpIC::GetName(operands_type_));
   return name_;
 }
 
 
 
-void TypeRecordingBinaryOpStub::GenerateSmiSmiOperation(
-    MacroAssembler* masm) {
-  UNIMPLEMENTED_MIPS();
+void BinaryOpStub::GenerateSmiSmiOperation(MacroAssembler* masm) {
+  Register left = a1;
+  Register right = a0;
+
+  Register scratch1 = t0;
+  Register scratch2 = t1;
+
+  ASSERT(right.is(a0));
+  STATIC_ASSERT(kSmiTag == 0);
+
+  Label not_smi_result;
+  switch (op_) {
+    case Token::ADD:
+      __ AdduAndCheckForOverflow(v0, left, right, scratch1);
+      __ RetOnNoOverflow(scratch1);
+      // No need to revert anything - right and left are intact.
+      break;
+    case Token::SUB:
+      __ SubuAndCheckForOverflow(v0, left, right, scratch1);
+      __ RetOnNoOverflow(scratch1);
+      // No need to revert anything - right and left are intact.
+      break;
+    case Token::MUL: {
+      // Remove tag from one of the operands. This way the multiplication result
+      // will be a smi if it fits the smi range.
+      __ SmiUntag(scratch1, right);
+      // Do multiplication.
+      // lo = lower 32 bits of scratch1 * left.
+      // hi = higher 32 bits of scratch1 * left.
+      __ Mult(left, scratch1);
+      // Check for overflowing the smi range - no overflow if higher 33 bits of
+      // the result are identical.
+      __ mflo(scratch1);
+      __ mfhi(scratch2);
+      __ sra(scratch1, scratch1, 31);
+      __ Branch(&not_smi_result, ne, scratch1, Operand(scratch2));
+      // Go slow on zero result to handle -0.
+      __ mflo(v0);
+      __ Ret(ne, v0, Operand(zero_reg));
+      // We need -0 if we were multiplying a negative number with 0 to get 0.
+      // We know one of them was zero.
+      __ Addu(scratch2, right, left);
+      Label skip;
+      // ARM uses the 'pl' condition, which is 'ge'.
+      // Negating it results in 'lt'.
+      __ Branch(&skip, lt, scratch2, Operand(zero_reg));
+      ASSERT(Smi::FromInt(0) == 0);
+      __ mov(v0, zero_reg);
+      __ Ret();  // Return smi 0 if the non-zero one was positive.
+      __ bind(&skip);
+      // We fall through here if we multiplied a negative number with 0, because
+      // that would mean we should produce -0.
+      }
+      break;
+    case Token::DIV: {
+      Label done;
+      __ SmiUntag(scratch2, right);
+      __ SmiUntag(scratch1, left);
+      __ Div(scratch1, scratch2);
+      // A minor optimization: div may be calculated asynchronously, so we check
+      // for division by zero before getting the result.
+      __ Branch(&not_smi_result, eq, scratch2, Operand(zero_reg));
+      // If the result is 0, we need to make sure the dividsor (right) is
+      // positive, otherwise it is a -0 case.
+      // Quotient is in 'lo', remainder is in 'hi'.
+      // Check for no remainder first.
+      __ mfhi(scratch1);
+      __ Branch(&not_smi_result, ne, scratch1, Operand(zero_reg));
+      __ mflo(scratch1);
+      __ Branch(&done, ne, scratch1, Operand(zero_reg));
+      __ Branch(&not_smi_result, lt, scratch2, Operand(zero_reg));
+      __ bind(&done);
+      // Check that the signed result fits in a Smi.
+      __ Addu(scratch2, scratch1, Operand(0x40000000));
+      __ Branch(&not_smi_result, lt, scratch2, Operand(zero_reg));
+      __ SmiTag(v0, scratch1);
+      __ Ret();
+      }
+      break;
+    case Token::MOD: {
+      Label done;
+      __ SmiUntag(scratch2, right);
+      __ SmiUntag(scratch1, left);
+      __ Div(scratch1, scratch2);
+      // A minor optimization: div may be calculated asynchronously, so we check
+      // for division by 0 before calling mfhi.
+      // Check for zero on the right hand side.
+      __ Branch(&not_smi_result, eq, scratch2, Operand(zero_reg));
+      // If the result is 0, we need to make sure the dividend (left) is
+      // positive (or 0), otherwise it is a -0 case.
+      // Remainder is in 'hi'.
+      __ mfhi(scratch2);
+      __ Branch(&done, ne, scratch2, Operand(zero_reg));
+      __ Branch(&not_smi_result, lt, scratch1, Operand(zero_reg));
+      __ bind(&done);
+      // Check that the signed result fits in a Smi.
+      __ Addu(scratch1, scratch2, Operand(0x40000000));
+      __ Branch(&not_smi_result, lt, scratch1, Operand(zero_reg));
+      __ SmiTag(v0, scratch2);
+      __ Ret();
+      }
+      break;
+    case Token::BIT_OR:
+      __ Or(v0, left, Operand(right));
+      __ Ret();
+      break;
+    case Token::BIT_AND:
+      __ And(v0, left, Operand(right));
+      __ Ret();
+      break;
+    case Token::BIT_XOR:
+      __ Xor(v0, left, Operand(right));
+      __ Ret();
+      break;
+    case Token::SAR:
+      // Remove tags from right operand.
+      __ GetLeastBitsFromSmi(scratch1, right, 5);
+      __ srav(scratch1, left, scratch1);
+      // Smi tag result.
+      __ And(v0, scratch1, Operand(~kSmiTagMask));
+      __ Ret();
+      break;
+    case Token::SHR:
+      // Remove tags from operands. We can't do this on a 31 bit number
+      // because then the 0s get shifted into bit 30 instead of bit 31.
+      __ SmiUntag(scratch1, left);
+      __ GetLeastBitsFromSmi(scratch2, right, 5);
+      __ srlv(v0, scratch1, scratch2);
+      // Unsigned shift is not allowed to produce a negative number, so
+      // check the sign bit and the sign bit after Smi tagging.
+      __ And(scratch1, v0, Operand(0xc0000000));
+      __ Branch(&not_smi_result, ne, scratch1, Operand(zero_reg));
+      // Smi tag result.
+      __ SmiTag(v0);
+      __ Ret();
+      break;
+    case Token::SHL:
+      // Remove tags from operands.
+      __ SmiUntag(scratch1, left);
+      __ GetLeastBitsFromSmi(scratch2, right, 5);
+      __ sllv(scratch1, scratch1, scratch2);
+      // Check that the signed result fits in a Smi.
+      __ Addu(scratch2, scratch1, Operand(0x40000000));
+      __ Branch(&not_smi_result, lt, scratch2, Operand(zero_reg));
+      __ SmiTag(v0, scratch1);
+      __ Ret();
+      break;
+    default:
+      UNREACHABLE();
+  }
+  __ bind(&not_smi_result);
 }
 
 
-void TypeRecordingBinaryOpStub::GenerateFPOperation(MacroAssembler* masm,
-                                                    bool smi_operands,
-                                                    Label* not_numbers,
-                                                    Label* gc_required) {
-  UNIMPLEMENTED_MIPS();
+void BinaryOpStub::GenerateFPOperation(MacroAssembler* masm,
+                                       bool smi_operands,
+                                       Label* not_numbers,
+                                       Label* gc_required) {
+  Register left = a1;
+  Register right = a0;
+  Register scratch1 = t3;
+  Register scratch2 = t5;
+  Register scratch3 = t0;
+
+  ASSERT(smi_operands || (not_numbers != NULL));
+  if (smi_operands && FLAG_debug_code) {
+    __ AbortIfNotSmi(left);
+    __ AbortIfNotSmi(right);
+  }
+
+  Register heap_number_map = t2;
+  __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
+
+  switch (op_) {
+    case Token::ADD:
+    case Token::SUB:
+    case Token::MUL:
+    case Token::DIV:
+    case Token::MOD: {
+      // Load left and right operands into f12 and f14 or a0/a1 and a2/a3
+      // depending on whether FPU is available or not.
+      FloatingPointHelper::Destination destination =
+          CpuFeatures::IsSupported(FPU) &&
+          op_ != Token::MOD ?
+              FloatingPointHelper::kFPURegisters :
+              FloatingPointHelper::kCoreRegisters;
+
+      // Allocate new heap number for result.
+      Register result = s0;
+      GenerateHeapResultAllocation(
+          masm, result, heap_number_map, scratch1, scratch2, gc_required);
+
+      // Load the operands.
+      if (smi_operands) {
+        FloatingPointHelper::LoadSmis(masm, destination, scratch1, scratch2);
+      } else {
+        FloatingPointHelper::LoadOperands(masm,
+                                          destination,
+                                          heap_number_map,
+                                          scratch1,
+                                          scratch2,
+                                          not_numbers);
+      }
+
+      // Calculate the result.
+      if (destination == FloatingPointHelper::kFPURegisters) {
+        // Using FPU registers:
+        // f12: Left value.
+        // f14: Right value.
+        CpuFeatures::Scope scope(FPU);
+        switch (op_) {
+        case Token::ADD:
+          __ add_d(f10, f12, f14);
+          break;
+        case Token::SUB:
+          __ sub_d(f10, f12, f14);
+          break;
+        case Token::MUL:
+          __ mul_d(f10, f12, f14);
+          break;
+        case Token::DIV:
+          __ div_d(f10, f12, f14);
+          break;
+        default:
+          UNREACHABLE();
+        }
+
+        // ARM uses a workaround here because of the unaligned HeapNumber
+        // kValueOffset. On MIPS this workaround is built into sdc1 so
+        // there's no point in generating even more instructions.
+        __ sdc1(f10, FieldMemOperand(result, HeapNumber::kValueOffset));
+        __ mov(v0, result);
+        __ Ret();
+      } else {
+        // Call the C function to handle the double operation.
+        FloatingPointHelper::CallCCodeForDoubleOperation(masm,
+                                                         op_,
+                                                         result,
+                                                         scratch1);
+        if (FLAG_debug_code) {
+          __ stop("Unreachable code.");
+        }
+      }
+      break;
+    }
+    case Token::BIT_OR:
+    case Token::BIT_XOR:
+    case Token::BIT_AND:
+    case Token::SAR:
+    case Token::SHR:
+    case Token::SHL: {
+      if (smi_operands) {
+        __ SmiUntag(a3, left);
+        __ SmiUntag(a2, right);
+      } else {
+        // Convert operands to 32-bit integers. Right in a2 and left in a3.
+        FloatingPointHelper::ConvertNumberToInt32(masm,
+                                                  left,
+                                                  a3,
+                                                  heap_number_map,
+                                                  scratch1,
+                                                  scratch2,
+                                                  scratch3,
+                                                  f0,
+                                                  not_numbers);
+        FloatingPointHelper::ConvertNumberToInt32(masm,
+                                                  right,
+                                                  a2,
+                                                  heap_number_map,
+                                                  scratch1,
+                                                  scratch2,
+                                                  scratch3,
+                                                  f0,
+                                                  not_numbers);
+      }
+      Label result_not_a_smi;
+      switch (op_) {
+        case Token::BIT_OR:
+          __ Or(a2, a3, Operand(a2));
+          break;
+        case Token::BIT_XOR:
+          __ Xor(a2, a3, Operand(a2));
+          break;
+        case Token::BIT_AND:
+          __ And(a2, a3, Operand(a2));
+          break;
+        case Token::SAR:
+          // Use only the 5 least significant bits of the shift count.
+          __ GetLeastBitsFromInt32(a2, a2, 5);
+          __ srav(a2, a3, a2);
+          break;
+        case Token::SHR:
+          // Use only the 5 least significant bits of the shift count.
+          __ GetLeastBitsFromInt32(a2, a2, 5);
+          __ srlv(a2, a3, a2);
+          // SHR is special because it is required to produce a positive answer.
+          // The code below for writing into heap numbers isn't capable of
+          // writing the register as an unsigned int so we go to slow case if we
+          // hit this case.
+          if (CpuFeatures::IsSupported(FPU)) {
+            __ Branch(&result_not_a_smi, lt, a2, Operand(zero_reg));
+          } else {
+            __ Branch(not_numbers, lt, a2, Operand(zero_reg));
+          }
+          break;
+        case Token::SHL:
+          // Use only the 5 least significant bits of the shift count.
+          __ GetLeastBitsFromInt32(a2, a2, 5);
+          __ sllv(a2, a3, a2);
+          break;
+        default:
+          UNREACHABLE();
+      }
+      // Check that the *signed* result fits in a smi.
+      __ Addu(a3, a2, Operand(0x40000000));
+      __ Branch(&result_not_a_smi, lt, a3, Operand(zero_reg));
+      __ SmiTag(v0, a2);
+      __ Ret();
+
+      // Allocate new heap number for result.
+      __ bind(&result_not_a_smi);
+      Register result = t1;
+      if (smi_operands) {
+        __ AllocateHeapNumber(
+            result, scratch1, scratch2, heap_number_map, gc_required);
+      } else {
+        GenerateHeapResultAllocation(
+            masm, result, heap_number_map, scratch1, scratch2, gc_required);
+      }
+
+      // a2: Answer as signed int32.
+      // t1: Heap number to write answer into.
+
+      // Nothing can go wrong now, so move the heap number to v0, which is the
+      // result.
+      __ mov(v0, t1);
+
+      if (CpuFeatures::IsSupported(FPU)) {
+        // Convert the int32 in a2 to the heap number in a0. As
+        // mentioned above SHR needs to always produce a positive result.
+        CpuFeatures::Scope scope(FPU);
+        __ mtc1(a2, f0);
+        if (op_ == Token::SHR) {
+          __ Cvt_d_uw(f0, f0);
+        } else {
+          __ cvt_d_w(f0, f0);
+        }
+        // ARM uses a workaround here because of the unaligned HeapNumber
+        // kValueOffset. On MIPS this workaround is built into sdc1 so
+        // there's no point in generating even more instructions.
+        __ sdc1(f0, FieldMemOperand(v0, HeapNumber::kValueOffset));
+        __ Ret();
+      } else {
+        // Tail call that writes the int32 in a2 to the heap number in v0, using
+        // a3 and a0 as scratch. v0 is preserved and returned.
+        WriteInt32ToHeapNumberStub stub(a2, v0, a3, a0);
+        __ TailCallStub(&stub);
+      }
+      break;
+    }
+    default:
+      UNREACHABLE();
+  }
 }
 
 
@@ -351,83 +2560,929 @@
 // generated. If the result is not a smi and heap number allocation is not
 // requested the code falls through. If number allocation is requested but a
 // heap number cannot be allocated the code jumps to the lable gc_required.
-void TypeRecordingBinaryOpStub::GenerateSmiCode(MacroAssembler* masm,
+void BinaryOpStub::GenerateSmiCode(
+    MacroAssembler* masm,
+    Label* use_runtime,
     Label* gc_required,
     SmiCodeGenerateHeapNumberResults allow_heapnumber_results) {
-  UNIMPLEMENTED_MIPS();
+  Label not_smis;
+
+  Register left = a1;
+  Register right = a0;
+  Register scratch1 = t3;
+  Register scratch2 = t5;
+
+  // Perform combined smi check on both operands.
+  __ Or(scratch1, left, Operand(right));
+  STATIC_ASSERT(kSmiTag == 0);
+  __ JumpIfNotSmi(scratch1, &not_smis);
+
+  // If the smi-smi operation results in a smi return is generated.
+  GenerateSmiSmiOperation(masm);
+
+  // If heap number results are possible generate the result in an allocated
+  // heap number.
+  if (allow_heapnumber_results == ALLOW_HEAPNUMBER_RESULTS) {
+    GenerateFPOperation(masm, true, use_runtime, gc_required);
+  }
+  __ bind(&not_smis);
 }
 
 
-void TypeRecordingBinaryOpStub::GenerateSmiStub(MacroAssembler* masm) {
-  UNIMPLEMENTED_MIPS();
+void BinaryOpStub::GenerateSmiStub(MacroAssembler* masm) {
+  Label not_smis, call_runtime;
+
+  if (result_type_ == BinaryOpIC::UNINITIALIZED ||
+      result_type_ == BinaryOpIC::SMI) {
+    // Only allow smi results.
+    GenerateSmiCode(masm, &call_runtime, NULL, NO_HEAPNUMBER_RESULTS);
+  } else {
+    // Allow heap number result and don't make a transition if a heap number
+    // cannot be allocated.
+    GenerateSmiCode(masm,
+                    &call_runtime,
+                    &call_runtime,
+                    ALLOW_HEAPNUMBER_RESULTS);
+  }
+
+  // Code falls through if the result is not returned as either a smi or heap
+  // number.
+  GenerateTypeTransition(masm);
+
+  __ bind(&call_runtime);
+  GenerateCallRuntime(masm);
 }
 
 
-void TypeRecordingBinaryOpStub::GenerateStringStub(MacroAssembler* masm) {
-  UNIMPLEMENTED_MIPS();
+void BinaryOpStub::GenerateStringStub(MacroAssembler* masm) {
+  ASSERT(operands_type_ == BinaryOpIC::STRING);
+  // Try to add arguments as strings, otherwise, transition to the generic
+  // BinaryOpIC type.
+  GenerateAddStrings(masm);
+  GenerateTypeTransition(masm);
 }
 
 
-void TypeRecordingBinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
-  UNIMPLEMENTED_MIPS();
+void BinaryOpStub::GenerateBothStringStub(MacroAssembler* masm) {
+  Label call_runtime;
+  ASSERT(operands_type_ == BinaryOpIC::BOTH_STRING);
+  ASSERT(op_ == Token::ADD);
+  // If both arguments are strings, call the string add stub.
+  // Otherwise, do a transition.
+
+  // Registers containing left and right operands respectively.
+  Register left = a1;
+  Register right = a0;
+
+  // Test if left operand is a string.
+  __ JumpIfSmi(left, &call_runtime);
+  __ GetObjectType(left, a2, a2);
+  __ Branch(&call_runtime, ge, a2, Operand(FIRST_NONSTRING_TYPE));
+
+  // Test if right operand is a string.
+  __ JumpIfSmi(right, &call_runtime);
+  __ GetObjectType(right, a2, a2);
+  __ Branch(&call_runtime, ge, a2, Operand(FIRST_NONSTRING_TYPE));
+
+  StringAddStub string_add_stub(NO_STRING_CHECK_IN_STUB);
+  GenerateRegisterArgsPush(masm);
+  __ TailCallStub(&string_add_stub);
+
+  __ bind(&call_runtime);
+  GenerateTypeTransition(masm);
 }
 
 
-void TypeRecordingBinaryOpStub::GenerateHeapNumberStub(MacroAssembler* masm) {
-  UNIMPLEMENTED_MIPS();
+void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
+  ASSERT(operands_type_ == BinaryOpIC::INT32);
+
+  Register left = a1;
+  Register right = a0;
+  Register scratch1 = t3;
+  Register scratch2 = t5;
+  FPURegister double_scratch = f0;
+  FPURegister single_scratch = f6;
+
+  Register heap_number_result = no_reg;
+  Register heap_number_map = t2;
+  __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
+
+  Label call_runtime;
+  // Labels for type transition, used for wrong input or output types.
+  // Both label are currently actually bound to the same position. We use two
+  // different label to differentiate the cause leading to type transition.
+  Label transition;
+
+  // Smi-smi fast case.
+  Label skip;
+  __ Or(scratch1, left, right);
+  __ JumpIfNotSmi(scratch1, &skip);
+  GenerateSmiSmiOperation(masm);
+  // Fall through if the result is not a smi.
+  __ bind(&skip);
+
+  switch (op_) {
+    case Token::ADD:
+    case Token::SUB:
+    case Token::MUL:
+    case Token::DIV:
+    case Token::MOD: {
+    // Load both operands and check that they are 32-bit integer.
+    // Jump to type transition if they are not. The registers a0 and a1 (right
+    // and left) are preserved for the runtime call.
+    FloatingPointHelper::Destination destination =
+        CpuFeatures::IsSupported(FPU) &&
+        op_ != Token::MOD ?
+        FloatingPointHelper::kFPURegisters :
+        FloatingPointHelper::kCoreRegisters;
+
+    FloatingPointHelper::LoadNumberAsInt32Double(masm,
+                                                 right,
+                                                 destination,
+                                                 f14,
+                                                 a2,
+                                                 a3,
+                                                 heap_number_map,
+                                                 scratch1,
+                                                 scratch2,
+                                                 f2,
+                                                 &transition);
+    FloatingPointHelper::LoadNumberAsInt32Double(masm,
+                                                 left,
+                                                 destination,
+                                                 f12,
+                                                 t0,
+                                                 t1,
+                                                 heap_number_map,
+                                                 scratch1,
+                                                 scratch2,
+                                                 f2,
+                                                 &transition);
+
+      if (destination == FloatingPointHelper::kFPURegisters) {
+        CpuFeatures::Scope scope(FPU);
+        Label return_heap_number;
+        switch (op_) {
+          case Token::ADD:
+            __ add_d(f10, f12, f14);
+            break;
+          case Token::SUB:
+            __ sub_d(f10, f12, f14);
+            break;
+          case Token::MUL:
+            __ mul_d(f10, f12, f14);
+            break;
+          case Token::DIV:
+            __ div_d(f10, f12, f14);
+            break;
+          default:
+            UNREACHABLE();
+        }
+
+        if (op_ != Token::DIV) {
+          // These operations produce an integer result.
+          // Try to return a smi if we can.
+          // Otherwise return a heap number if allowed, or jump to type
+          // transition.
+
+          // NOTE: ARM uses a MacroAssembler function here (EmitVFPTruncate).
+          // On MIPS a lot of things cannot be implemented the same way so right
+          // now it makes a lot more sense to just do things manually.
+
+          // Save FCSR.
+          __ cfc1(scratch1, FCSR);
+          // Disable FPU exceptions.
+          __ ctc1(zero_reg, FCSR);
+          __ trunc_w_d(single_scratch, f10);
+          // Retrieve FCSR.
+          __ cfc1(scratch2, FCSR);
+          // Restore FCSR.
+          __ ctc1(scratch1, FCSR);
+
+          // Check for inexact conversion.
+          __ srl(scratch2, scratch2, kFCSRFlagShift);
+          __ And(scratch2, scratch2, kFCSRFlagMask);
+
+          if (result_type_ <= BinaryOpIC::INT32) {
+            // If scratch2 != 0, result does not fit in a 32-bit integer.
+            __ Branch(&transition, ne, scratch2, Operand(zero_reg));
+          }
+
+          // Check if the result fits in a smi.
+          __ mfc1(scratch1, single_scratch);
+          __ Addu(scratch2, scratch1, Operand(0x40000000));
+          // If not try to return a heap number.
+          __ Branch(&return_heap_number, lt, scratch2, Operand(zero_reg));
+          // Check for minus zero. Return heap number for minus zero.
+          Label not_zero;
+          __ Branch(&not_zero, ne, scratch1, Operand(zero_reg));
+          __ mfc1(scratch2, f11);
+          __ And(scratch2, scratch2, HeapNumber::kSignMask);
+          __ Branch(&return_heap_number, ne, scratch2, Operand(zero_reg));
+          __ bind(&not_zero);
+
+          // Tag the result and return.
+          __ SmiTag(v0, scratch1);
+          __ Ret();
+        } else {
+          // DIV just falls through to allocating a heap number.
+        }
+
+        if (result_type_ >= (op_ == Token::DIV) ? BinaryOpIC::HEAP_NUMBER
+                                                : BinaryOpIC::INT32) {
+          __ bind(&return_heap_number);
+          // We are using FPU registers so s0 is available.
+          heap_number_result = s0;
+          GenerateHeapResultAllocation(masm,
+                                       heap_number_result,
+                                       heap_number_map,
+                                       scratch1,
+                                       scratch2,
+                                       &call_runtime);
+          __ mov(v0, heap_number_result);
+          __ sdc1(f10, FieldMemOperand(v0, HeapNumber::kValueOffset));
+          __ Ret();
+        }
+
+        // A DIV operation expecting an integer result falls through
+        // to type transition.
+
+      } else {
+        // We preserved a0 and a1 to be able to call runtime.
+        // Save the left value on the stack.
+        __ Push(t1, t0);
+
+        Label pop_and_call_runtime;
+
+        // Allocate a heap number to store the result.
+        heap_number_result = s0;
+        GenerateHeapResultAllocation(masm,
+                                     heap_number_result,
+                                     heap_number_map,
+                                     scratch1,
+                                     scratch2,
+                                     &pop_and_call_runtime);
+
+        // Load the left value from the value saved on the stack.
+        __ Pop(a1, a0);
+
+        // Call the C function to handle the double operation.
+        FloatingPointHelper::CallCCodeForDoubleOperation(
+            masm, op_, heap_number_result, scratch1);
+        if (FLAG_debug_code) {
+          __ stop("Unreachable code.");
+        }
+
+        __ bind(&pop_and_call_runtime);
+        __ Drop(2);
+        __ Branch(&call_runtime);
+      }
+
+      break;
+    }
+
+    case Token::BIT_OR:
+    case Token::BIT_XOR:
+    case Token::BIT_AND:
+    case Token::SAR:
+    case Token::SHR:
+    case Token::SHL: {
+      Label return_heap_number;
+      Register scratch3 = t1;
+      // Convert operands to 32-bit integers. Right in a2 and left in a3. The
+      // registers a0 and a1 (right and left) are preserved for the runtime
+      // call.
+      FloatingPointHelper::LoadNumberAsInt32(masm,
+                                             left,
+                                             a3,
+                                             heap_number_map,
+                                             scratch1,
+                                             scratch2,
+                                             scratch3,
+                                             f0,
+                                             &transition);
+      FloatingPointHelper::LoadNumberAsInt32(masm,
+                                             right,
+                                             a2,
+                                             heap_number_map,
+                                             scratch1,
+                                             scratch2,
+                                             scratch3,
+                                             f0,
+                                             &transition);
+
+      // The ECMA-262 standard specifies that, for shift operations, only the
+      // 5 least significant bits of the shift value should be used.
+      switch (op_) {
+        case Token::BIT_OR:
+          __ Or(a2, a3, Operand(a2));
+          break;
+        case Token::BIT_XOR:
+          __ Xor(a2, a3, Operand(a2));
+          break;
+        case Token::BIT_AND:
+          __ And(a2, a3, Operand(a2));
+          break;
+        case Token::SAR:
+          __ And(a2, a2, Operand(0x1f));
+          __ srav(a2, a3, a2);
+          break;
+        case Token::SHR:
+          __ And(a2, a2, Operand(0x1f));
+          __ srlv(a2, a3, a2);
+          // SHR is special because it is required to produce a positive answer.
+          // We only get a negative result if the shift value (a2) is 0.
+          // This result cannot be respresented as a signed 32-bit integer, try
+          // to return a heap number if we can.
+          // The non FPU code does not support this special case, so jump to
+          // runtime if we don't support it.
+          if (CpuFeatures::IsSupported(FPU)) {
+            __ Branch((result_type_ <= BinaryOpIC::INT32)
+                        ? &transition
+                        : &return_heap_number,
+                       lt,
+                       a2,
+                       Operand(zero_reg));
+          } else {
+            __ Branch((result_type_ <= BinaryOpIC::INT32)
+                        ? &transition
+                        : &call_runtime,
+                       lt,
+                       a2,
+                       Operand(zero_reg));
+          }
+          break;
+        case Token::SHL:
+          __ And(a2, a2, Operand(0x1f));
+          __ sllv(a2, a3, a2);
+          break;
+        default:
+          UNREACHABLE();
+      }
+
+      // Check if the result fits in a smi.
+      __ Addu(scratch1, a2, Operand(0x40000000));
+      // If not try to return a heap number. (We know the result is an int32.)
+      __ Branch(&return_heap_number, lt, scratch1, Operand(zero_reg));
+      // Tag the result and return.
+      __ SmiTag(v0, a2);
+      __ Ret();
+
+      __ bind(&return_heap_number);
+      heap_number_result = t1;
+      GenerateHeapResultAllocation(masm,
+                                   heap_number_result,
+                                   heap_number_map,
+                                   scratch1,
+                                   scratch2,
+                                   &call_runtime);
+
+      if (CpuFeatures::IsSupported(FPU)) {
+        CpuFeatures::Scope scope(FPU);
+
+        if (op_ != Token::SHR) {
+          // Convert the result to a floating point value.
+          __ mtc1(a2, double_scratch);
+          __ cvt_d_w(double_scratch, double_scratch);
+        } else {
+          // The result must be interpreted as an unsigned 32-bit integer.
+          __ mtc1(a2, double_scratch);
+          __ Cvt_d_uw(double_scratch, double_scratch);
+        }
+
+        // Store the result.
+        __ mov(v0, heap_number_result);
+        __ sdc1(double_scratch, FieldMemOperand(v0, HeapNumber::kValueOffset));
+        __ Ret();
+      } else {
+        // Tail call that writes the int32 in a2 to the heap number in v0, using
+        // a3 and a1 as scratch. v0 is preserved and returned.
+        __ mov(a0, t1);
+        WriteInt32ToHeapNumberStub stub(a2, v0, a3, a1);
+        __ TailCallStub(&stub);
+      }
+
+      break;
+    }
+
+    default:
+      UNREACHABLE();
+  }
+
+  if (transition.is_linked()) {
+    __ bind(&transition);
+    GenerateTypeTransition(masm);
+  }
+
+  __ bind(&call_runtime);
+  GenerateCallRuntime(masm);
 }
 
 
-void TypeRecordingBinaryOpStub::GenerateGeneric(MacroAssembler* masm) {
-  UNIMPLEMENTED_MIPS();
+void BinaryOpStub::GenerateOddballStub(MacroAssembler* masm) {
+  Label call_runtime;
+
+  if (op_ == Token::ADD) {
+    // Handle string addition here, because it is the only operation
+    // that does not do a ToNumber conversion on the operands.
+    GenerateAddStrings(masm);
+  }
+
+  // Convert oddball arguments to numbers.
+  Label check, done;
+  __ LoadRoot(t0, Heap::kUndefinedValueRootIndex);
+  __ Branch(&check, ne, a1, Operand(t0));
+  if (Token::IsBitOp(op_)) {
+    __ li(a1, Operand(Smi::FromInt(0)));
+  } else {
+    __ LoadRoot(a1, Heap::kNanValueRootIndex);
+  }
+  __ jmp(&done);
+  __ bind(&check);
+  __ LoadRoot(t0, Heap::kUndefinedValueRootIndex);
+  __ Branch(&done, ne, a0, Operand(t0));
+  if (Token::IsBitOp(op_)) {
+    __ li(a0, Operand(Smi::FromInt(0)));
+  } else {
+    __ LoadRoot(a0, Heap::kNanValueRootIndex);
+  }
+  __ bind(&done);
+
+  GenerateHeapNumberStub(masm);
 }
 
 
-void TypeRecordingBinaryOpStub::GenerateAddStrings(MacroAssembler* masm) {
-  UNIMPLEMENTED_MIPS();
+void BinaryOpStub::GenerateHeapNumberStub(MacroAssembler* masm) {
+  Label call_runtime;
+  GenerateFPOperation(masm, false, &call_runtime, &call_runtime);
+
+  __ bind(&call_runtime);
+  GenerateCallRuntime(masm);
 }
 
 
-void TypeRecordingBinaryOpStub::GenerateCallRuntime(MacroAssembler* masm) {
-  UNIMPLEMENTED_MIPS();
+void BinaryOpStub::GenerateGeneric(MacroAssembler* masm) {
+  Label call_runtime, call_string_add_or_runtime;
+
+  GenerateSmiCode(masm, &call_runtime, &call_runtime, ALLOW_HEAPNUMBER_RESULTS);
+
+  GenerateFPOperation(masm, false, &call_string_add_or_runtime, &call_runtime);
+
+  __ bind(&call_string_add_or_runtime);
+  if (op_ == Token::ADD) {
+    GenerateAddStrings(masm);
+  }
+
+  __ bind(&call_runtime);
+  GenerateCallRuntime(masm);
 }
 
 
-void TypeRecordingBinaryOpStub::GenerateHeapResultAllocation(
+void BinaryOpStub::GenerateAddStrings(MacroAssembler* masm) {
+  ASSERT(op_ == Token::ADD);
+  Label left_not_string, call_runtime;
+
+  Register left = a1;
+  Register right = a0;
+
+  // Check if left argument is a string.
+  __ JumpIfSmi(left, &left_not_string);
+  __ GetObjectType(left, a2, a2);
+  __ Branch(&left_not_string, ge, a2, Operand(FIRST_NONSTRING_TYPE));
+
+  StringAddStub string_add_left_stub(NO_STRING_CHECK_LEFT_IN_STUB);
+  GenerateRegisterArgsPush(masm);
+  __ TailCallStub(&string_add_left_stub);
+
+  // Left operand is not a string, test right.
+  __ bind(&left_not_string);
+  __ JumpIfSmi(right, &call_runtime);
+  __ GetObjectType(right, a2, a2);
+  __ Branch(&call_runtime, ge, a2, Operand(FIRST_NONSTRING_TYPE));
+
+  StringAddStub string_add_right_stub(NO_STRING_CHECK_RIGHT_IN_STUB);
+  GenerateRegisterArgsPush(masm);
+  __ TailCallStub(&string_add_right_stub);
+
+  // At least one argument is not a string.
+  __ bind(&call_runtime);
+}
+
+
+void BinaryOpStub::GenerateCallRuntime(MacroAssembler* masm) {
+  GenerateRegisterArgsPush(masm);
+  switch (op_) {
+    case Token::ADD:
+      __ InvokeBuiltin(Builtins::ADD, JUMP_FUNCTION);
+      break;
+    case Token::SUB:
+      __ InvokeBuiltin(Builtins::SUB, JUMP_FUNCTION);
+      break;
+    case Token::MUL:
+      __ InvokeBuiltin(Builtins::MUL, JUMP_FUNCTION);
+      break;
+    case Token::DIV:
+      __ InvokeBuiltin(Builtins::DIV, JUMP_FUNCTION);
+      break;
+    case Token::MOD:
+      __ InvokeBuiltin(Builtins::MOD, JUMP_FUNCTION);
+      break;
+    case Token::BIT_OR:
+      __ InvokeBuiltin(Builtins::BIT_OR, JUMP_FUNCTION);
+      break;
+    case Token::BIT_AND:
+      __ InvokeBuiltin(Builtins::BIT_AND, JUMP_FUNCTION);
+      break;
+    case Token::BIT_XOR:
+      __ InvokeBuiltin(Builtins::BIT_XOR, JUMP_FUNCTION);
+      break;
+    case Token::SAR:
+      __ InvokeBuiltin(Builtins::SAR, JUMP_FUNCTION);
+      break;
+    case Token::SHR:
+      __ InvokeBuiltin(Builtins::SHR, JUMP_FUNCTION);
+      break;
+    case Token::SHL:
+      __ InvokeBuiltin(Builtins::SHL, JUMP_FUNCTION);
+      break;
+    default:
+      UNREACHABLE();
+  }
+}
+
+
+void BinaryOpStub::GenerateHeapResultAllocation(
     MacroAssembler* masm,
     Register result,
     Register heap_number_map,
     Register scratch1,
     Register scratch2,
     Label* gc_required) {
-  UNIMPLEMENTED_MIPS();
+
+  // Code below will scratch result if allocation fails. To keep both arguments
+  // intact for the runtime call result cannot be one of these.
+  ASSERT(!result.is(a0) && !result.is(a1));
+
+  if (mode_ == OVERWRITE_LEFT || mode_ == OVERWRITE_RIGHT) {
+    Label skip_allocation, allocated;
+    Register overwritable_operand = mode_ == OVERWRITE_LEFT ? a1 : a0;
+    // If the overwritable operand is already an object, we skip the
+    // allocation of a heap number.
+    __ JumpIfNotSmi(overwritable_operand, &skip_allocation);
+    // Allocate a heap number for the result.
+    __ AllocateHeapNumber(
+        result, scratch1, scratch2, heap_number_map, gc_required);
+    __ Branch(&allocated);
+    __ bind(&skip_allocation);
+    // Use object holding the overwritable operand for result.
+    __ mov(result, overwritable_operand);
+    __ bind(&allocated);
+  } else {
+    ASSERT(mode_ == NO_OVERWRITE);
+    __ AllocateHeapNumber(
+        result, scratch1, scratch2, heap_number_map, gc_required);
+  }
 }
 
 
-void TypeRecordingBinaryOpStub::GenerateRegisterArgsPush(MacroAssembler* masm) {
-  UNIMPLEMENTED_MIPS();
+void BinaryOpStub::GenerateRegisterArgsPush(MacroAssembler* masm) {
+  __ Push(a1, a0);
 }
 
 
 
 void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
-  UNIMPLEMENTED_MIPS();
+  // Untagged case: double input in f4, double result goes
+  //   into f4.
+  // Tagged case: tagged input on top of stack and in a0,
+  //   tagged result (heap number) goes into v0.
+
+  Label input_not_smi;
+  Label loaded;
+  Label calculate;
+  Label invalid_cache;
+  const Register scratch0 = t5;
+  const Register scratch1 = t3;
+  const Register cache_entry = a0;
+  const bool tagged = (argument_type_ == TAGGED);
+
+  if (CpuFeatures::IsSupported(FPU)) {
+    CpuFeatures::Scope scope(FPU);
+
+    if (tagged) {
+      // Argument is a number and is on stack and in a0.
+      // Load argument and check if it is a smi.
+      __ JumpIfNotSmi(a0, &input_not_smi);
+
+      // Input is a smi. Convert to double and load the low and high words
+      // of the double into a2, a3.
+      __ sra(t0, a0, kSmiTagSize);
+      __ mtc1(t0, f4);
+      __ cvt_d_w(f4, f4);
+      __ Move(a2, a3, f4);
+      __ Branch(&loaded);
+
+      __ bind(&input_not_smi);
+      // Check if input is a HeapNumber.
+      __ CheckMap(a0,
+                  a1,
+                  Heap::kHeapNumberMapRootIndex,
+                  &calculate,
+                  DONT_DO_SMI_CHECK);
+      // Input is a HeapNumber. Store the
+      // low and high words into a2, a3.
+      __ lw(a2, FieldMemOperand(a0, HeapNumber::kValueOffset));
+      __ lw(a3, FieldMemOperand(a0, HeapNumber::kValueOffset + 4));
+    } else {
+      // Input is untagged double in f4. Output goes to f4.
+      __ Move(a2, a3, f4);
+    }
+    __ bind(&loaded);
+    // a2 = low 32 bits of double value.
+    // a3 = high 32 bits of double value.
+    // Compute hash (the shifts are arithmetic):
+    //   h = (low ^ high); h ^= h >> 16; h ^= h >> 8; h = h & (cacheSize - 1);
+    __ Xor(a1, a2, a3);
+    __ sra(t0, a1, 16);
+    __ Xor(a1, a1, t0);
+    __ sra(t0, a1, 8);
+    __ Xor(a1, a1, t0);
+    ASSERT(IsPowerOf2(TranscendentalCache::SubCache::kCacheSize));
+    __ And(a1, a1, Operand(TranscendentalCache::SubCache::kCacheSize - 1));
+
+    // a2 = low 32 bits of double value.
+    // a3 = high 32 bits of double value.
+    // a1 = TranscendentalCache::hash(double value).
+    __ li(cache_entry, Operand(
+        ExternalReference::transcendental_cache_array_address(
+            masm->isolate())));
+    // a0 points to cache array.
+    __ lw(cache_entry, MemOperand(cache_entry, type_ * sizeof(
+        Isolate::Current()->transcendental_cache()->caches_[0])));
+    // a0 points to the cache for the type type_.
+    // If NULL, the cache hasn't been initialized yet, so go through runtime.
+    __ Branch(&invalid_cache, eq, cache_entry, Operand(zero_reg));
+
+#ifdef DEBUG
+    // Check that the layout of cache elements match expectations.
+    { TranscendentalCache::SubCache::Element test_elem[2];
+      char* elem_start = reinterpret_cast<char*>(&test_elem[0]);
+      char* elem2_start = reinterpret_cast<char*>(&test_elem[1]);
+      char* elem_in0 = reinterpret_cast<char*>(&(test_elem[0].in[0]));
+      char* elem_in1 = reinterpret_cast<char*>(&(test_elem[0].in[1]));
+      char* elem_out = reinterpret_cast<char*>(&(test_elem[0].output));
+      CHECK_EQ(12, elem2_start - elem_start);  // Two uint_32's and a pointer.
+      CHECK_EQ(0, elem_in0 - elem_start);
+      CHECK_EQ(kIntSize, elem_in1 - elem_start);
+      CHECK_EQ(2 * kIntSize, elem_out - elem_start);
+    }
+#endif
+
+    // Find the address of the a1'st entry in the cache, i.e., &a0[a1*12].
+    __ sll(t0, a1, 1);
+    __ Addu(a1, a1, t0);
+    __ sll(t0, a1, 2);
+    __ Addu(cache_entry, cache_entry, t0);
+
+    // Check if cache matches: Double value is stored in uint32_t[2] array.
+    __ lw(t0, MemOperand(cache_entry, 0));
+    __ lw(t1, MemOperand(cache_entry, 4));
+    __ lw(t2, MemOperand(cache_entry, 8));
+    __ Addu(cache_entry, cache_entry, 12);
+    __ Branch(&calculate, ne, a2, Operand(t0));
+    __ Branch(&calculate, ne, a3, Operand(t1));
+    // Cache hit. Load result, cleanup and return.
+    if (tagged) {
+      // Pop input value from stack and load result into v0.
+      __ Drop(1);
+      __ mov(v0, t2);
+    } else {
+      // Load result into f4.
+      __ ldc1(f4, FieldMemOperand(t2, HeapNumber::kValueOffset));
+    }
+    __ Ret();
+  }  // if (CpuFeatures::IsSupported(FPU))
+
+  __ bind(&calculate);
+  if (tagged) {
+    __ bind(&invalid_cache);
+    __ TailCallExternalReference(ExternalReference(RuntimeFunction(),
+                                                   masm->isolate()),
+                                 1,
+                                 1);
+  } else {
+    if (!CpuFeatures::IsSupported(FPU)) UNREACHABLE();
+    CpuFeatures::Scope scope(FPU);
+
+    Label no_update;
+    Label skip_cache;
+    const Register heap_number_map = t2;
+
+    // Call C function to calculate the result and update the cache.
+    // Register a0 holds precalculated cache entry address; preserve
+    // it on the stack and pop it into register cache_entry after the
+    // call.
+    __ push(cache_entry);
+    GenerateCallCFunction(masm, scratch0);
+    __ GetCFunctionDoubleResult(f4);
+
+    // Try to update the cache. If we cannot allocate a
+    // heap number, we return the result without updating.
+    __ pop(cache_entry);
+    __ LoadRoot(t1, Heap::kHeapNumberMapRootIndex);
+    __ AllocateHeapNumber(t2, scratch0, scratch1, t1, &no_update);
+    __ sdc1(f4, FieldMemOperand(t2, HeapNumber::kValueOffset));
+
+    __ sw(a2, MemOperand(cache_entry, 0 * kPointerSize));
+    __ sw(a3, MemOperand(cache_entry, 1 * kPointerSize));
+    __ sw(t2, MemOperand(cache_entry, 2 * kPointerSize));
+
+    __ mov(v0, cache_entry);
+    __ Ret();
+
+    __ bind(&invalid_cache);
+    // The cache is invalid. Call runtime which will recreate the
+    // cache.
+    __ LoadRoot(t1, Heap::kHeapNumberMapRootIndex);
+    __ AllocateHeapNumber(a0, scratch0, scratch1, t1, &skip_cache);
+    __ sdc1(f4, FieldMemOperand(a0, HeapNumber::kValueOffset));
+    __ EnterInternalFrame();
+    __ push(a0);
+    __ CallRuntime(RuntimeFunction(), 1);
+    __ LeaveInternalFrame();
+    __ ldc1(f4, FieldMemOperand(v0, HeapNumber::kValueOffset));
+    __ Ret();
+
+    __ bind(&skip_cache);
+    // Call C function to calculate the result and answer directly
+    // without updating the cache.
+    GenerateCallCFunction(masm, scratch0);
+    __ GetCFunctionDoubleResult(f4);
+    __ bind(&no_update);
+
+    // We return the value in f4 without adding it to the cache, but
+    // we cause a scavenging GC so that future allocations will succeed.
+    __ EnterInternalFrame();
+
+    // Allocate an aligned object larger than a HeapNumber.
+    ASSERT(4 * kPointerSize >= HeapNumber::kSize);
+    __ li(scratch0, Operand(4 * kPointerSize));
+    __ push(scratch0);
+    __ CallRuntimeSaveDoubles(Runtime::kAllocateInNewSpace);
+    __ LeaveInternalFrame();
+    __ Ret();
+  }
+}
+
+
+void TranscendentalCacheStub::GenerateCallCFunction(MacroAssembler* masm,
+                                                    Register scratch) {
+  __ push(ra);
+  __ PrepareCallCFunction(2, scratch);
+  if (IsMipsSoftFloatABI) {
+    __ Move(v0, v1, f4);
+  } else {
+    __ mov_d(f12, f4);
+  }
+  switch (type_) {
+    case TranscendentalCache::SIN:
+      __ CallCFunction(
+          ExternalReference::math_sin_double_function(masm->isolate()), 2);
+      break;
+    case TranscendentalCache::COS:
+      __ CallCFunction(
+          ExternalReference::math_cos_double_function(masm->isolate()), 2);
+      break;
+    case TranscendentalCache::LOG:
+      __ CallCFunction(
+          ExternalReference::math_log_double_function(masm->isolate()), 2);
+      break;
+    default:
+      UNIMPLEMENTED();
+      break;
+  }
+  __ pop(ra);
 }
 
 
 Runtime::FunctionId TranscendentalCacheStub::RuntimeFunction() {
-  UNIMPLEMENTED_MIPS();
-  return Runtime::kAbort;
+  switch (type_) {
+    // Add more cases when necessary.
+    case TranscendentalCache::SIN: return Runtime::kMath_sin;
+    case TranscendentalCache::COS: return Runtime::kMath_cos;
+    case TranscendentalCache::LOG: return Runtime::kMath_log;
+    default:
+      UNIMPLEMENTED();
+      return Runtime::kAbort;
+  }
 }
 
 
 void StackCheckStub::Generate(MacroAssembler* masm) {
-  UNIMPLEMENTED_MIPS();
+  __ TailCallRuntime(Runtime::kStackGuard, 0, 1);
 }
 
 
-void GenericUnaryOpStub::Generate(MacroAssembler* masm) {
-  UNIMPLEMENTED_MIPS();
+void MathPowStub::Generate(MacroAssembler* masm) {
+  Label call_runtime;
+
+  if (CpuFeatures::IsSupported(FPU)) {
+    CpuFeatures::Scope scope(FPU);
+
+    Label base_not_smi;
+    Label exponent_not_smi;
+    Label convert_exponent;
+
+    const Register base = a0;
+    const Register exponent = a2;
+    const Register heapnumbermap = t1;
+    const Register heapnumber = s0;  // Callee-saved register.
+    const Register scratch = t2;
+    const Register scratch2 = t3;
+
+    // Alocate FP values in the ABI-parameter-passing regs.
+    const DoubleRegister double_base = f12;
+    const DoubleRegister double_exponent = f14;
+    const DoubleRegister double_result = f0;
+    const DoubleRegister double_scratch = f2;
+
+    __ LoadRoot(heapnumbermap, Heap::kHeapNumberMapRootIndex);
+    __ lw(base, MemOperand(sp, 1 * kPointerSize));
+    __ lw(exponent, MemOperand(sp, 0 * kPointerSize));
+
+    // Convert base to double value and store it in f0.
+    __ JumpIfNotSmi(base, &base_not_smi);
+    // Base is a Smi. Untag and convert it.
+    __ SmiUntag(base);
+    __ mtc1(base, double_scratch);
+    __ cvt_d_w(double_base, double_scratch);
+    __ Branch(&convert_exponent);
+
+    __ bind(&base_not_smi);
+    __ lw(scratch, FieldMemOperand(base, JSObject::kMapOffset));
+    __ Branch(&call_runtime, ne, scratch, Operand(heapnumbermap));
+    // Base is a heapnumber. Load it into double register.
+    __ ldc1(double_base, FieldMemOperand(base, HeapNumber::kValueOffset));
+
+    __ bind(&convert_exponent);
+    __ JumpIfNotSmi(exponent, &exponent_not_smi);
+    __ SmiUntag(exponent);
+
+    // The base is in a double register and the exponent is
+    // an untagged smi. Allocate a heap number and call a
+    // C function for integer exponents. The register containing
+    // the heap number is callee-saved.
+    __ AllocateHeapNumber(heapnumber,
+                          scratch,
+                          scratch2,
+                          heapnumbermap,
+                          &call_runtime);
+    __ push(ra);
+    __ PrepareCallCFunction(3, scratch);
+    __ SetCallCDoubleArguments(double_base, exponent);
+    __ CallCFunction(
+        ExternalReference::power_double_int_function(masm->isolate()), 3);
+    __ pop(ra);
+    __ GetCFunctionDoubleResult(double_result);
+    __ sdc1(double_result,
+            FieldMemOperand(heapnumber, HeapNumber::kValueOffset));
+    __ mov(v0, heapnumber);
+    __ DropAndRet(2 * kPointerSize);
+
+    __ bind(&exponent_not_smi);
+    __ lw(scratch, FieldMemOperand(exponent, JSObject::kMapOffset));
+    __ Branch(&call_runtime, ne, scratch, Operand(heapnumbermap));
+    // Exponent is a heapnumber. Load it into double register.
+    __ ldc1(double_exponent,
+            FieldMemOperand(exponent, HeapNumber::kValueOffset));
+
+    // The base and the exponent are in double registers.
+    // Allocate a heap number and call a C function for
+    // double exponents. The register containing
+    // the heap number is callee-saved.
+    __ AllocateHeapNumber(heapnumber,
+                          scratch,
+                          scratch2,
+                          heapnumbermap,
+                          &call_runtime);
+    __ push(ra);
+    __ PrepareCallCFunction(4, scratch);
+    // ABI (o32) for func(double a, double b): a in f12, b in f14.
+    ASSERT(double_base.is(f12));
+    ASSERT(double_exponent.is(f14));
+    __ SetCallCDoubleArguments(double_base, double_exponent);
+    __ CallCFunction(
+        ExternalReference::power_double_double_function(masm->isolate()), 4);
+    __ pop(ra);
+    __ GetCFunctionDoubleResult(double_result);
+    __ sdc1(double_result,
+            FieldMemOperand(heapnumber, HeapNumber::kValueOffset));
+    __ mov(v0, heapnumber);
+    __ DropAndRet(2 * kPointerSize);
+  }
+
+  __ bind(&call_runtime);
+  __ TailCallRuntime(Runtime::kMath_pow_cfunction, 2, 1);
 }
 
 
@@ -437,13 +3492,13 @@
 
 
 void CEntryStub::GenerateThrowTOS(MacroAssembler* masm) {
-  UNIMPLEMENTED_MIPS();
+  __ Throw(v0);
 }
 
 
 void CEntryStub::GenerateThrowUncatchable(MacroAssembler* masm,
                                           UncatchableExceptionType type) {
-  UNIMPLEMENTED_MIPS();
+  __ ThrowUncatchable(type, v0);
 }
 
 
@@ -453,78 +3508,1427 @@
                               Label* throw_out_of_memory_exception,
                               bool do_gc,
                               bool always_allocate) {
-  UNIMPLEMENTED_MIPS();
+  // v0: result parameter for PerformGC, if any
+  // s0: number of arguments including receiver (C callee-saved)
+  // s1: pointer to the first argument          (C callee-saved)
+  // s2: pointer to builtin function            (C callee-saved)
+
+  if (do_gc) {
+    // Move result passed in v0 into a0 to call PerformGC.
+    __ mov(a0, v0);
+    __ PrepareCallCFunction(1, a1);
+    __ CallCFunction(
+        ExternalReference::perform_gc_function(masm->isolate()), 1);
+  }
+
+  ExternalReference scope_depth =
+      ExternalReference::heap_always_allocate_scope_depth(masm->isolate());
+  if (always_allocate) {
+    __ li(a0, Operand(scope_depth));
+    __ lw(a1, MemOperand(a0));
+    __ Addu(a1, a1, Operand(1));
+    __ sw(a1, MemOperand(a0));
+  }
+
+  // Prepare arguments for C routine: a0 = argc, a1 = argv
+  __ mov(a0, s0);
+  __ mov(a1, s1);
+
+  // We are calling compiled C/C++ code. a0 and a1 hold our two arguments. We
+  // also need to reserve the 4 argument slots on the stack.
+
+  __ AssertStackIsAligned();
+
+  __ li(a2, Operand(ExternalReference::isolate_address()));
+
+  // From arm version of this function:
+  // TODO(1242173): To let the GC traverse the return address of the exit
+  // frames, we need to know where the return address is. Right now,
+  // we push it on the stack to be able to find it again, but we never
+  // restore from it in case of changes, which makes it impossible to
+  // support moving the C entry code stub. This should be fixed, but currently
+  // this is OK because the CEntryStub gets generated so early in the V8 boot
+  // sequence that it is not moving ever.
+
+  { Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm);
+    // This branch-and-link sequence is needed to find the current PC on mips,
+    // saved to the ra register.
+    // Use masm-> here instead of the double-underscore macro since extra
+    // coverage code can interfere with the proper calculation of ra.
+    Label find_ra;
+    masm->bal(&find_ra);  // bal exposes branch delay slot.
+    masm->nop();  // Branch delay slot nop.
+    masm->bind(&find_ra);
+
+    // Adjust the value in ra to point to the correct return location, 2nd
+    // instruction past the real call into C code (the jalr(t9)), and push it.
+    // This is the return address of the exit frame.
+    const int kNumInstructionsToJump = 6;
+    masm->Addu(ra, ra, kNumInstructionsToJump * kPointerSize);
+    masm->sw(ra, MemOperand(sp));  // This spot was reserved in EnterExitFrame.
+    masm->Subu(sp, sp, StandardFrameConstants::kCArgsSlotsSize);
+    // Stack is still aligned.
+
+    // Call the C routine.
+    masm->mov(t9, s2);  // Function pointer to t9 to conform to ABI for PIC.
+    masm->jalr(t9);
+    masm->nop();    // Branch delay slot nop.
+    // Make sure the stored 'ra' points to this position.
+    ASSERT_EQ(kNumInstructionsToJump,
+              masm->InstructionsGeneratedSince(&find_ra));
+  }
+
+  // Restore stack (remove arg slots).
+  __ Addu(sp, sp, StandardFrameConstants::kCArgsSlotsSize);
+
+  if (always_allocate) {
+    // It's okay to clobber a2 and a3 here. v0 & v1 contain result.
+    __ li(a2, Operand(scope_depth));
+    __ lw(a3, MemOperand(a2));
+    __ Subu(a3, a3, Operand(1));
+    __ sw(a3, MemOperand(a2));
+  }
+
+  // Check for failure result.
+  Label failure_returned;
+  STATIC_ASSERT(((kFailureTag + 1) & kFailureTagMask) == 0);
+  __ addiu(a2, v0, 1);
+  __ andi(t0, a2, kFailureTagMask);
+  __ Branch(&failure_returned, eq, t0, Operand(zero_reg));
+
+  // Exit C frame and return.
+  // v0:v1: result
+  // sp: stack pointer
+  // fp: frame pointer
+  __ LeaveExitFrame(save_doubles_, s0);
+  __ Ret();
+
+  // Check if we should retry or throw exception.
+  Label retry;
+  __ bind(&failure_returned);
+  STATIC_ASSERT(Failure::RETRY_AFTER_GC == 0);
+  __ andi(t0, v0, ((1 << kFailureTypeTagSize) - 1) << kFailureTagSize);
+  __ Branch(&retry, eq, t0, Operand(zero_reg));
+
+  // Special handling of out of memory exceptions.
+  Failure* out_of_memory = Failure::OutOfMemoryException();
+  __ Branch(throw_out_of_memory_exception, eq,
+            v0, Operand(reinterpret_cast<int32_t>(out_of_memory)));
+
+  // Retrieve the pending exception and clear the variable.
+  __ li(t0,
+        Operand(ExternalReference::the_hole_value_location(masm->isolate())));
+  __ lw(a3, MemOperand(t0));
+  __ li(t0, Operand(ExternalReference(Isolate::k_pending_exception_address,
+                                      masm->isolate())));
+  __ lw(v0, MemOperand(t0));
+  __ sw(a3, MemOperand(t0));
+
+  // Special handling of termination exceptions which are uncatchable
+  // by javascript code.
+  __ Branch(throw_termination_exception, eq,
+            v0, Operand(masm->isolate()->factory()->termination_exception()));
+
+  // Handle normal exception.
+  __ jmp(throw_normal_exception);
+
+  __ bind(&retry);
+  // Last failure (v0) will be moved to (a0) for parameter when retrying.
 }
 
 
 void CEntryStub::Generate(MacroAssembler* masm) {
-  UNIMPLEMENTED_MIPS();
+  // Called from JavaScript; parameters are on stack as if calling JS function
+  // a0: number of arguments including receiver
+  // a1: pointer to builtin function
+  // fp: frame pointer    (restored after C call)
+  // sp: stack pointer    (restored as callee's sp after C call)
+  // cp: current context  (C callee-saved)
+
+  // NOTE: Invocations of builtins may return failure objects
+  // instead of a proper result. The builtin entry handles
+  // this by performing a garbage collection and retrying the
+  // builtin once.
+
+  // Compute the argv pointer in a callee-saved register.
+  __ sll(s1, a0, kPointerSizeLog2);
+  __ Addu(s1, sp, s1);
+  __ Subu(s1, s1, Operand(kPointerSize));
+
+  // Enter the exit frame that transitions from JavaScript to C++.
+  __ EnterExitFrame(save_doubles_);
+
+  // Setup argc and the builtin function in callee-saved registers.
+  __ mov(s0, a0);
+  __ mov(s2, a1);
+
+  // s0: number of arguments (C callee-saved)
+  // s1: pointer to first argument (C callee-saved)
+  // s2: pointer to builtin function (C callee-saved)
+
+  Label throw_normal_exception;
+  Label throw_termination_exception;
+  Label throw_out_of_memory_exception;
+
+  // Call into the runtime system.
+  GenerateCore(masm,
+               &throw_normal_exception,
+               &throw_termination_exception,
+               &throw_out_of_memory_exception,
+               false,
+               false);
+
+  // Do space-specific GC and retry runtime call.
+  GenerateCore(masm,
+               &throw_normal_exception,
+               &throw_termination_exception,
+               &throw_out_of_memory_exception,
+               true,
+               false);
+
+  // Do full GC and retry runtime call one final time.
+  Failure* failure = Failure::InternalError();
+  __ li(v0, Operand(reinterpret_cast<int32_t>(failure)));
+  GenerateCore(masm,
+               &throw_normal_exception,
+               &throw_termination_exception,
+               &throw_out_of_memory_exception,
+               true,
+               true);
+
+  __ bind(&throw_out_of_memory_exception);
+  GenerateThrowUncatchable(masm, OUT_OF_MEMORY);
+
+  __ bind(&throw_termination_exception);
+  GenerateThrowUncatchable(masm, TERMINATION);
+
+  __ bind(&throw_normal_exception);
+  GenerateThrowTOS(masm);
 }
 
 
 void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
-  UNIMPLEMENTED_MIPS();
+  Label invoke, exit;
+
+  // Registers:
+  // a0: entry address
+  // a1: function
+  // a2: reveiver
+  // a3: argc
+  //
+  // Stack:
+  // 4 args slots
+  // args
+
+  // Save callee saved registers on the stack.
+  __ MultiPush((kCalleeSaved | ra.bit()) & ~sp.bit());
+
+  // Load argv in s0 register.
+  __ lw(s0, MemOperand(sp, kNumCalleeSaved * kPointerSize +
+                           StandardFrameConstants::kCArgsSlotsSize));
+
+  // We build an EntryFrame.
+  __ li(t3, Operand(-1));  // Push a bad frame pointer to fail if it is used.
+  int marker = is_construct ? StackFrame::ENTRY_CONSTRUCT : StackFrame::ENTRY;
+  __ li(t2, Operand(Smi::FromInt(marker)));
+  __ li(t1, Operand(Smi::FromInt(marker)));
+  __ li(t0, Operand(ExternalReference(Isolate::k_c_entry_fp_address,
+                                      masm->isolate())));
+  __ lw(t0, MemOperand(t0));
+  __ Push(t3, t2, t1, t0);
+  // Setup frame pointer for the frame to be pushed.
+  __ addiu(fp, sp, -EntryFrameConstants::kCallerFPOffset);
+
+  // Registers:
+  // a0: entry_address
+  // a1: function
+  // a2: reveiver_pointer
+  // a3: argc
+  // s0: argv
+  //
+  // Stack:
+  // caller fp          |
+  // function slot      | entry frame
+  // context slot       |
+  // bad fp (0xff...f)  |
+  // callee saved registers + ra
+  // 4 args slots
+  // args
+
+  #ifdef ENABLE_LOGGING_AND_PROFILING
+    // If this is the outermost JS call, set js_entry_sp value.
+    Label non_outermost_js;
+    ExternalReference js_entry_sp(Isolate::k_js_entry_sp_address,
+                                  masm->isolate());
+    __ li(t1, Operand(ExternalReference(js_entry_sp)));
+    __ lw(t2, MemOperand(t1));
+    __ Branch(&non_outermost_js, ne, t2, Operand(zero_reg));
+    __ sw(fp, MemOperand(t1));
+    __ li(t0, Operand(Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME)));
+    Label cont;
+    __ b(&cont);
+    __ nop();   // Branch delay slot nop.
+    __ bind(&non_outermost_js);
+    __ li(t0, Operand(Smi::FromInt(StackFrame::INNER_JSENTRY_FRAME)));
+    __ bind(&cont);
+    __ push(t0);
+  #endif
+
+  // Call a faked try-block that does the invoke.
+  __ bal(&invoke);  // bal exposes branch delay slot.
+  __ nop();   // Branch delay slot nop.
+
+  // Caught exception: Store result (exception) in the pending
+  // exception field in the JSEnv and return a failure sentinel.
+  // Coming in here the fp will be invalid because the PushTryHandler below
+  // sets it to 0 to signal the existence of the JSEntry frame.
+  __ li(t0, Operand(ExternalReference(Isolate::k_pending_exception_address,
+                                      masm->isolate())));
+  __ sw(v0, MemOperand(t0));  // We come back from 'invoke'. result is in v0.
+  __ li(v0, Operand(reinterpret_cast<int32_t>(Failure::Exception())));
+  __ b(&exit);  // b exposes branch delay slot.
+  __ nop();   // Branch delay slot nop.
+
+  // Invoke: Link this frame into the handler chain.
+  __ bind(&invoke);
+  __ PushTryHandler(IN_JS_ENTRY, JS_ENTRY_HANDLER);
+  // If an exception not caught by another handler occurs, this handler
+  // returns control to the code after the bal(&invoke) above, which
+  // restores all kCalleeSaved registers (including cp and fp) to their
+  // saved values before returning a failure to C.
+
+  // Clear any pending exceptions.
+  __ li(t0,
+        Operand(ExternalReference::the_hole_value_location(masm->isolate())));
+  __ lw(t1, MemOperand(t0));
+  __ li(t0, Operand(ExternalReference(Isolate::k_pending_exception_address,
+                                      masm->isolate())));
+  __ sw(t1, MemOperand(t0));
+
+  // Invoke the function by calling through JS entry trampoline builtin.
+  // Notice that we cannot store a reference to the trampoline code directly in
+  // this stub, because runtime stubs are not traversed when doing GC.
+
+  // Registers:
+  // a0: entry_address
+  // a1: function
+  // a2: reveiver_pointer
+  // a3: argc
+  // s0: argv
+  //
+  // Stack:
+  // handler frame
+  // entry frame
+  // callee saved registers + ra
+  // 4 args slots
+  // args
+
+  if (is_construct) {
+    ExternalReference construct_entry(Builtins::kJSConstructEntryTrampoline,
+                                      masm->isolate());
+    __ li(t0, Operand(construct_entry));
+  } else {
+    ExternalReference entry(Builtins::kJSEntryTrampoline, masm->isolate());
+    __ li(t0, Operand(entry));
+  }
+  __ lw(t9, MemOperand(t0));  // Deref address.
+
+  // Call JSEntryTrampoline.
+  __ addiu(t9, t9, Code::kHeaderSize - kHeapObjectTag);
+  __ Call(t9);
+
+  // Unlink this frame from the handler chain.
+  __ PopTryHandler();
+
+  __ bind(&exit);  // v0 holds result
+  #ifdef ENABLE_LOGGING_AND_PROFILING
+    // Check if the current stack frame is marked as the outermost JS frame.
+    Label non_outermost_js_2;
+    __ pop(t1);
+    __ Branch(&non_outermost_js_2, ne, t1,
+              Operand(Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME)));
+    __ li(t1, Operand(ExternalReference(js_entry_sp)));
+    __ sw(zero_reg, MemOperand(t1));
+    __ bind(&non_outermost_js_2);
+  #endif
+
+  // Restore the top frame descriptors from the stack.
+  __ pop(t1);
+  __ li(t0, Operand(ExternalReference(Isolate::k_c_entry_fp_address,
+                                      masm->isolate())));
+  __ sw(t1, MemOperand(t0));
+
+  // Reset the stack to the callee saved registers.
+  __ addiu(sp, sp, -EntryFrameConstants::kCallerFPOffset);
+
+  // Restore callee saved registers from the stack.
+  __ MultiPop((kCalleeSaved | ra.bit()) & ~sp.bit());
+  // Return.
+  __ Jump(ra);
 }
 
 
-// Uses registers a0 to t0. Expected input is
-// object in a0 (or at sp+1*kPointerSize) and function in
-// a1 (or at sp), depending on whether or not
-// args_in_registers() is true.
+// Uses registers a0 to t0.
+// Expected input (depending on whether args are in registers or on the stack):
+// * object: a0 or at sp + 1 * kPointerSize.
+// * function: a1 or at sp.
+//
+// Inlined call site patching is a crankshaft-specific feature that is not
+// implemented on MIPS.
 void InstanceofStub::Generate(MacroAssembler* masm) {
-  UNIMPLEMENTED_MIPS();
+  // This is a crankshaft-specific feature that has not been implemented yet.
+  ASSERT(!HasCallSiteInlineCheck());
+  // Call site inlining and patching implies arguments in registers.
+  ASSERT(HasArgsInRegisters() || !HasCallSiteInlineCheck());
+  // ReturnTrueFalse is only implemented for inlined call sites.
+  ASSERT(!ReturnTrueFalseObject() || HasCallSiteInlineCheck());
+
+  // Fixed register usage throughout the stub:
+  const Register object = a0;  // Object (lhs).
+  Register map = a3;  // Map of the object.
+  const Register function = a1;  // Function (rhs).
+  const Register prototype = t0;  // Prototype of the function.
+  const Register inline_site = t5;
+  const Register scratch = a2;
+
+  Label slow, loop, is_instance, is_not_instance, not_js_object;
+
+  if (!HasArgsInRegisters()) {
+    __ lw(object, MemOperand(sp, 1 * kPointerSize));
+    __ lw(function, MemOperand(sp, 0));
+  }
+
+  // Check that the left hand is a JS object and load map.
+  __ JumpIfSmi(object, &not_js_object);
+  __ IsObjectJSObjectType(object, map, scratch, &not_js_object);
+
+  // If there is a call site cache don't look in the global cache, but do the
+  // real lookup and update the call site cache.
+  if (!HasCallSiteInlineCheck()) {
+    Label miss;
+    __ LoadRoot(t1, Heap::kInstanceofCacheFunctionRootIndex);
+    __ Branch(&miss, ne, function, Operand(t1));
+    __ LoadRoot(t1, Heap::kInstanceofCacheMapRootIndex);
+    __ Branch(&miss, ne, map, Operand(t1));
+    __ LoadRoot(v0, Heap::kInstanceofCacheAnswerRootIndex);
+    __ DropAndRet(HasArgsInRegisters() ? 0 : 2);
+
+    __ bind(&miss);
+  }
+
+  // Get the prototype of the function.
+  __ TryGetFunctionPrototype(function, prototype, scratch, &slow);
+
+  // Check that the function prototype is a JS object.
+  __ JumpIfSmi(prototype, &slow);
+  __ IsObjectJSObjectType(prototype, scratch, scratch, &slow);
+
+  // Update the global instanceof or call site inlined cache with the current
+  // map and function. The cached answer will be set when it is known below.
+  if (!HasCallSiteInlineCheck()) {
+    __ StoreRoot(function, Heap::kInstanceofCacheFunctionRootIndex);
+    __ StoreRoot(map, Heap::kInstanceofCacheMapRootIndex);
+  } else {
+    UNIMPLEMENTED_MIPS();
+  }
+
+  // Register mapping: a3 is object map and t0 is function prototype.
+  // Get prototype of object into a2.
+  __ lw(scratch, FieldMemOperand(map, Map::kPrototypeOffset));
+
+  // We don't need map any more. Use it as a scratch register.
+  Register scratch2 = map;
+  map = no_reg;
+
+  // Loop through the prototype chain looking for the function prototype.
+  __ LoadRoot(scratch2, Heap::kNullValueRootIndex);
+  __ bind(&loop);
+  __ Branch(&is_instance, eq, scratch, Operand(prototype));
+  __ Branch(&is_not_instance, eq, scratch, Operand(scratch2));
+  __ lw(scratch, FieldMemOperand(scratch, HeapObject::kMapOffset));
+  __ lw(scratch, FieldMemOperand(scratch, Map::kPrototypeOffset));
+  __ Branch(&loop);
+
+  __ bind(&is_instance);
+  ASSERT(Smi::FromInt(0) == 0);
+  if (!HasCallSiteInlineCheck()) {
+    __ mov(v0, zero_reg);
+    __ StoreRoot(v0, Heap::kInstanceofCacheAnswerRootIndex);
+  } else {
+    UNIMPLEMENTED_MIPS();
+  }
+  __ DropAndRet(HasArgsInRegisters() ? 0 : 2);
+
+  __ bind(&is_not_instance);
+  if (!HasCallSiteInlineCheck()) {
+    __ li(v0, Operand(Smi::FromInt(1)));
+    __ StoreRoot(v0, Heap::kInstanceofCacheAnswerRootIndex);
+  } else {
+    UNIMPLEMENTED_MIPS();
+  }
+  __ DropAndRet(HasArgsInRegisters() ? 0 : 2);
+
+  Label object_not_null, object_not_null_or_smi;
+  __ bind(&not_js_object);
+  // Before null, smi and string value checks, check that the rhs is a function
+  // as for a non-function rhs an exception needs to be thrown.
+  __ JumpIfSmi(function, &slow);
+  __ GetObjectType(function, scratch2, scratch);
+  __ Branch(&slow, ne, scratch, Operand(JS_FUNCTION_TYPE));
+
+  // Null is not instance of anything.
+  __ Branch(&object_not_null, ne, scratch,
+      Operand(masm->isolate()->factory()->null_value()));
+  __ li(v0, Operand(Smi::FromInt(1)));
+  __ DropAndRet(HasArgsInRegisters() ? 0 : 2);
+
+  __ bind(&object_not_null);
+  // Smi values are not instances of anything.
+  __ JumpIfNotSmi(object, &object_not_null_or_smi);
+  __ li(v0, Operand(Smi::FromInt(1)));
+  __ DropAndRet(HasArgsInRegisters() ? 0 : 2);
+
+  __ bind(&object_not_null_or_smi);
+  // String values are not instances of anything.
+  __ IsObjectJSStringType(object, scratch, &slow);
+  __ li(v0, Operand(Smi::FromInt(1)));
+  __ DropAndRet(HasArgsInRegisters() ? 0 : 2);
+
+  // Slow-case.  Tail call builtin.
+  __ bind(&slow);
+  if (!ReturnTrueFalseObject()) {
+    if (HasArgsInRegisters()) {
+      __ Push(a0, a1);
+    }
+  __ InvokeBuiltin(Builtins::INSTANCE_OF, JUMP_FUNCTION);
+  } else {
+    __ EnterInternalFrame();
+    __ Push(a0, a1);
+    __ InvokeBuiltin(Builtins::INSTANCE_OF, CALL_FUNCTION);
+    __ LeaveInternalFrame();
+    __ mov(a0, v0);
+    __ LoadRoot(v0, Heap::kTrueValueRootIndex);
+    __ DropAndRet(HasArgsInRegisters() ? 0 : 2, eq, a0, Operand(zero_reg));
+    __ LoadRoot(v0, Heap::kFalseValueRootIndex);
+    __ DropAndRet(HasArgsInRegisters() ? 0 : 2);
+  }
 }
 
 
+Register InstanceofStub::left() { return a0; }
+
+
+Register InstanceofStub::right() { return a1; }
+
+
 void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
-  UNIMPLEMENTED_MIPS();
+  // The displacement is the offset of the last parameter (if any)
+  // relative to the frame pointer.
+  static const int kDisplacement =
+      StandardFrameConstants::kCallerSPOffset - kPointerSize;
+
+  // Check that the key is a smiGenerateReadElement.
+  Label slow;
+  __ JumpIfNotSmi(a1, &slow);
+
+  // Check if the calling frame is an arguments adaptor frame.
+  Label adaptor;
+  __ lw(a2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+  __ lw(a3, MemOperand(a2, StandardFrameConstants::kContextOffset));
+  __ Branch(&adaptor,
+            eq,
+            a3,
+            Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+
+  // Check index (a1) against formal parameters count limit passed in
+  // through register a0. Use unsigned comparison to get negative
+  // check for free.
+  __ Branch(&slow, hs, a1, Operand(a0));
+
+  // Read the argument from the stack and return it.
+  __ subu(a3, a0, a1);
+  __ sll(t3, a3, kPointerSizeLog2 - kSmiTagSize);
+  __ Addu(a3, fp, Operand(t3));
+  __ lw(v0, MemOperand(a3, kDisplacement));
+  __ Ret();
+
+  // Arguments adaptor case: Check index (a1) against actual arguments
+  // limit found in the arguments adaptor frame. Use unsigned
+  // comparison to get negative check for free.
+  __ bind(&adaptor);
+  __ lw(a0, MemOperand(a2, ArgumentsAdaptorFrameConstants::kLengthOffset));
+  __ Branch(&slow, Ugreater_equal, a1, Operand(a0));
+
+  // Read the argument from the adaptor frame and return it.
+  __ subu(a3, a0, a1);
+  __ sll(t3, a3, kPointerSizeLog2 - kSmiTagSize);
+  __ Addu(a3, a2, Operand(t3));
+  __ lw(v0, MemOperand(a3, kDisplacement));
+  __ Ret();
+
+  // Slow-case: Handle non-smi or out-of-bounds access to arguments
+  // by calling the runtime system.
+  __ bind(&slow);
+  __ push(a1);
+  __ TailCallRuntime(Runtime::kGetArgumentsProperty, 1, 1);
 }
 
 
 void ArgumentsAccessStub::GenerateNewObject(MacroAssembler* masm) {
-  UNIMPLEMENTED_MIPS();
+  // sp[0] : number of parameters
+  // sp[4] : receiver displacement
+  // sp[8] : function
+
+  // Check if the calling frame is an arguments adaptor frame.
+  Label adaptor_frame, try_allocate, runtime;
+  __ lw(a2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+  __ lw(a3, MemOperand(a2, StandardFrameConstants::kContextOffset));
+  __ Branch(&adaptor_frame,
+            eq,
+            a3,
+            Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+
+  // Get the length from the frame.
+  __ lw(a1, MemOperand(sp, 0));
+  __ Branch(&try_allocate);
+
+  // Patch the arguments.length and the parameters pointer.
+  __ bind(&adaptor_frame);
+  __ lw(a1, MemOperand(a2, ArgumentsAdaptorFrameConstants::kLengthOffset));
+  __ sw(a1, MemOperand(sp, 0));
+  __ sll(at, a1, kPointerSizeLog2 - kSmiTagSize);
+  __ Addu(a3, a2, Operand(at));
+
+  __ Addu(a3, a3, Operand(StandardFrameConstants::kCallerSPOffset));
+  __ sw(a3, MemOperand(sp, 1 * kPointerSize));
+
+  // Try the new space allocation. Start out with computing the size
+  // of the arguments object and the elements array in words.
+  Label add_arguments_object;
+  __ bind(&try_allocate);
+  __ Branch(&add_arguments_object, eq, a1, Operand(zero_reg));
+  __ srl(a1, a1, kSmiTagSize);
+
+  __ Addu(a1, a1, Operand(FixedArray::kHeaderSize / kPointerSize));
+  __ bind(&add_arguments_object);
+  __ Addu(a1, a1, Operand(GetArgumentsObjectSize() / kPointerSize));
+
+  // Do the allocation of both objects in one go.
+  __ AllocateInNewSpace(
+      a1,
+      v0,
+      a2,
+      a3,
+      &runtime,
+      static_cast<AllocationFlags>(TAG_OBJECT | SIZE_IN_WORDS));
+
+  // Get the arguments boilerplate from the current (global) context.
+  __ lw(t0, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
+  __ lw(t0, FieldMemOperand(t0, GlobalObject::kGlobalContextOffset));
+  __ lw(t0, MemOperand(t0,
+                       Context::SlotOffset(GetArgumentsBoilerplateIndex())));
+
+  // Copy the JS object part.
+  __ CopyFields(v0, t0, a3.bit(), JSObject::kHeaderSize / kPointerSize);
+
+  if (type_ == NEW_NON_STRICT) {
+    // Setup the callee in-object property.
+    STATIC_ASSERT(Heap::kArgumentsCalleeIndex == 1);
+    __ lw(a3, MemOperand(sp, 2 * kPointerSize));
+    const int kCalleeOffset = JSObject::kHeaderSize +
+                              Heap::kArgumentsCalleeIndex * kPointerSize;
+    __ sw(a3, FieldMemOperand(v0, kCalleeOffset));
+  }
+
+  // Get the length (smi tagged) and set that as an in-object property too.
+  STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0);
+  __ lw(a1, MemOperand(sp, 0 * kPointerSize));
+  __ sw(a1, FieldMemOperand(v0, JSObject::kHeaderSize +
+                                Heap::kArgumentsLengthIndex * kPointerSize));
+
+  Label done;
+  __ Branch(&done, eq, a1, Operand(zero_reg));
+
+  // Get the parameters pointer from the stack.
+  __ lw(a2, MemOperand(sp, 1 * kPointerSize));
+
+  // Setup the elements pointer in the allocated arguments object and
+  // initialize the header in the elements fixed array.
+  __ Addu(t0, v0, Operand(GetArgumentsObjectSize()));
+  __ sw(t0, FieldMemOperand(v0, JSObject::kElementsOffset));
+  __ LoadRoot(a3, Heap::kFixedArrayMapRootIndex);
+  __ sw(a3, FieldMemOperand(t0, FixedArray::kMapOffset));
+  __ sw(a1, FieldMemOperand(t0, FixedArray::kLengthOffset));
+  __ srl(a1, a1, kSmiTagSize);  // Untag the length for the loop.
+
+  // Copy the fixed array slots.
+  Label loop;
+  // Setup t0 to point to the first array slot.
+  __ Addu(t0, t0, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+  __ bind(&loop);
+  // Pre-decrement a2 with kPointerSize on each iteration.
+  // Pre-decrement in order to skip receiver.
+  __ Addu(a2, a2, Operand(-kPointerSize));
+  __ lw(a3, MemOperand(a2));
+  // Post-increment t0 with kPointerSize on each iteration.
+  __ sw(a3, MemOperand(t0));
+  __ Addu(t0, t0, Operand(kPointerSize));
+  __ Subu(a1, a1, Operand(1));
+  __ Branch(&loop, ne, a1, Operand(zero_reg));
+
+  // Return and remove the on-stack parameters.
+  __ bind(&done);
+  __ Addu(sp, sp, Operand(3 * kPointerSize));
+  __ Ret();
+
+  // Do the runtime call to allocate the arguments object.
+  __ bind(&runtime);
+  __ TailCallRuntime(Runtime::kNewArgumentsFast, 3, 1);
 }
 
 
 void RegExpExecStub::Generate(MacroAssembler* masm) {
-  UNIMPLEMENTED_MIPS();
+  // Just jump directly to runtime if native RegExp is not selected at compile
+  // time or if regexp entry in generated code is turned off runtime switch or
+  // at compilation.
+#ifdef V8_INTERPRETED_REGEXP
+  __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
+#else  // V8_INTERPRETED_REGEXP
+  if (!FLAG_regexp_entry_native) {
+    __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
+    return;
+  }
+
+  // Stack frame on entry.
+  //  sp[0]: last_match_info (expected JSArray)
+  //  sp[4]: previous index
+  //  sp[8]: subject string
+  //  sp[12]: JSRegExp object
+
+  static const int kLastMatchInfoOffset = 0 * kPointerSize;
+  static const int kPreviousIndexOffset = 1 * kPointerSize;
+  static const int kSubjectOffset = 2 * kPointerSize;
+  static const int kJSRegExpOffset = 3 * kPointerSize;
+
+  Label runtime, invoke_regexp;
+
+  // Allocation of registers for this function. These are in callee save
+  // registers and will be preserved by the call to the native RegExp code, as
+  // this code is called using the normal C calling convention. When calling
+  // directly from generated code the native RegExp code will not do a GC and
+  // therefore the content of these registers are safe to use after the call.
+  // MIPS - using s0..s2, since we are not using CEntry Stub.
+  Register subject = s0;
+  Register regexp_data = s1;
+  Register last_match_info_elements = s2;
+
+  // Ensure that a RegExp stack is allocated.
+  ExternalReference address_of_regexp_stack_memory_address =
+      ExternalReference::address_of_regexp_stack_memory_address(
+          masm->isolate());
+  ExternalReference address_of_regexp_stack_memory_size =
+      ExternalReference::address_of_regexp_stack_memory_size(masm->isolate());
+  __ li(a0, Operand(address_of_regexp_stack_memory_size));
+  __ lw(a0, MemOperand(a0, 0));
+  __ Branch(&runtime, eq, a0, Operand(zero_reg));
+
+  // Check that the first argument is a JSRegExp object.
+  __ lw(a0, MemOperand(sp, kJSRegExpOffset));
+  STATIC_ASSERT(kSmiTag == 0);
+  __ JumpIfSmi(a0, &runtime);
+  __ GetObjectType(a0, a1, a1);
+  __ Branch(&runtime, ne, a1, Operand(JS_REGEXP_TYPE));
+
+  // Check that the RegExp has been compiled (data contains a fixed array).
+  __ lw(regexp_data, FieldMemOperand(a0, JSRegExp::kDataOffset));
+  if (FLAG_debug_code) {
+    __ And(t0, regexp_data, Operand(kSmiTagMask));
+    __ Check(nz,
+             "Unexpected type for RegExp data, FixedArray expected",
+             t0,
+             Operand(zero_reg));
+    __ GetObjectType(regexp_data, a0, a0);
+    __ Check(eq,
+             "Unexpected type for RegExp data, FixedArray expected",
+             a0,
+             Operand(FIXED_ARRAY_TYPE));
+  }
+
+  // regexp_data: RegExp data (FixedArray)
+  // Check the type of the RegExp. Only continue if type is JSRegExp::IRREGEXP.
+  __ lw(a0, FieldMemOperand(regexp_data, JSRegExp::kDataTagOffset));
+  __ Branch(&runtime, ne, a0, Operand(Smi::FromInt(JSRegExp::IRREGEXP)));
+
+  // regexp_data: RegExp data (FixedArray)
+  // Check that the number of captures fit in the static offsets vector buffer.
+  __ lw(a2,
+         FieldMemOperand(regexp_data, JSRegExp::kIrregexpCaptureCountOffset));
+  // Calculate number of capture registers (number_of_captures + 1) * 2. This
+  // uses the asumption that smis are 2 * their untagged value.
+  STATIC_ASSERT(kSmiTag == 0);
+  STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
+  __ Addu(a2, a2, Operand(2));  // a2 was a smi.
+  // Check that the static offsets vector buffer is large enough.
+  __ Branch(&runtime, hi, a2, Operand(OffsetsVector::kStaticOffsetsVectorSize));
+
+  // a2: Number of capture registers
+  // regexp_data: RegExp data (FixedArray)
+  // Check that the second argument is a string.
+  __ lw(subject, MemOperand(sp, kSubjectOffset));
+  __ JumpIfSmi(subject, &runtime);
+  __ GetObjectType(subject, a0, a0);
+  __ And(a0, a0, Operand(kIsNotStringMask));
+  STATIC_ASSERT(kStringTag == 0);
+  __ Branch(&runtime, ne, a0, Operand(zero_reg));
+
+  // Get the length of the string to r3.
+  __ lw(a3, FieldMemOperand(subject, String::kLengthOffset));
+
+  // a2: Number of capture registers
+  // a3: Length of subject string as a smi
+  // subject: Subject string
+  // regexp_data: RegExp data (FixedArray)
+  // Check that the third argument is a positive smi less than the subject
+  // string length. A negative value will be greater (unsigned comparison).
+  __ lw(a0, MemOperand(sp, kPreviousIndexOffset));
+  __ And(at, a0, Operand(kSmiTagMask));
+  __ Branch(&runtime, ne, at, Operand(zero_reg));
+  __ Branch(&runtime, ls, a3, Operand(a0));
+
+  // a2: Number of capture registers
+  // subject: Subject string
+  // regexp_data: RegExp data (FixedArray)
+  // Check that the fourth object is a JSArray object.
+  __ lw(a0, MemOperand(sp, kLastMatchInfoOffset));
+  __ JumpIfSmi(a0, &runtime);
+  __ GetObjectType(a0, a1, a1);
+  __ Branch(&runtime, ne, a1, Operand(JS_ARRAY_TYPE));
+  // Check that the JSArray is in fast case.
+  __ lw(last_match_info_elements,
+         FieldMemOperand(a0, JSArray::kElementsOffset));
+  __ lw(a0, FieldMemOperand(last_match_info_elements, HeapObject::kMapOffset));
+  __ Branch(&runtime, ne, a0, Operand(
+      masm->isolate()->factory()->fixed_array_map()));
+  // Check that the last match info has space for the capture registers and the
+  // additional information.
+  __ lw(a0,
+         FieldMemOperand(last_match_info_elements, FixedArray::kLengthOffset));
+  __ Addu(a2, a2, Operand(RegExpImpl::kLastMatchOverhead));
+  __ sra(at, a0, kSmiTagSize);  // Untag length for comparison.
+  __ Branch(&runtime, gt, a2, Operand(at));
+  // subject: Subject string
+  // regexp_data: RegExp data (FixedArray)
+  // Check the representation and encoding of the subject string.
+  Label seq_string;
+  __ lw(a0, FieldMemOperand(subject, HeapObject::kMapOffset));
+  __ lbu(a0, FieldMemOperand(a0, Map::kInstanceTypeOffset));
+  // First check for flat string.
+  __ And(at, a0, Operand(kIsNotStringMask | kStringRepresentationMask));
+  STATIC_ASSERT((kStringTag | kSeqStringTag) == 0);
+  __ Branch(&seq_string, eq, at, Operand(zero_reg));
+
+  // subject: Subject string
+  // a0: instance type if Subject string
+  // regexp_data: RegExp data (FixedArray)
+  // Check for flat cons string.
+  // A flat cons string is a cons string where the second part is the empty
+  // string. In that case the subject string is just the first part of the cons
+  // string. Also in this case the first part of the cons string is known to be
+  // a sequential string or an external string.
+  STATIC_ASSERT(kExternalStringTag != 0);
+  STATIC_ASSERT((kConsStringTag & kExternalStringTag) == 0);
+  __ And(at, a0, Operand(kIsNotStringMask | kExternalStringTag));
+  __ Branch(&runtime, ne, at, Operand(zero_reg));
+  __ lw(a0, FieldMemOperand(subject, ConsString::kSecondOffset));
+  __ LoadRoot(a1, Heap::kEmptyStringRootIndex);
+  __ Branch(&runtime, ne, a0, Operand(a1));
+  __ lw(subject, FieldMemOperand(subject, ConsString::kFirstOffset));
+  __ lw(a0, FieldMemOperand(subject, HeapObject::kMapOffset));
+  __ lbu(a0, FieldMemOperand(a0, Map::kInstanceTypeOffset));
+  // Is first part a flat string?
+  STATIC_ASSERT(kSeqStringTag == 0);
+  __ And(at, a0, Operand(kStringRepresentationMask));
+  __ Branch(&runtime, ne, at, Operand(zero_reg));
+
+  __ bind(&seq_string);
+  // subject: Subject string
+  // regexp_data: RegExp data (FixedArray)
+  // a0: Instance type of subject string
+  STATIC_ASSERT(kStringEncodingMask == 4);
+  STATIC_ASSERT(kAsciiStringTag == 4);
+  STATIC_ASSERT(kTwoByteStringTag == 0);
+  // Find the code object based on the assumptions above.
+  __ And(a0, a0, Operand(kStringEncodingMask));  // Non-zero for ascii.
+  __ lw(t9, FieldMemOperand(regexp_data, JSRegExp::kDataAsciiCodeOffset));
+  __ sra(a3, a0, 2);  // a3 is 1 for ascii, 0 for UC16 (usyed below).
+  __ lw(t0, FieldMemOperand(regexp_data, JSRegExp::kDataUC16CodeOffset));
+  __ movz(t9, t0, a0);  // If UC16 (a0 is 0), replace t9 w/kDataUC16CodeOffset.
+
+  // Check that the irregexp code has been generated for the actual string
+  // encoding. If it has, the field contains a code object otherwise it
+  // contains the hole.
+  __ GetObjectType(t9, a0, a0);
+  __ Branch(&runtime, ne, a0, Operand(CODE_TYPE));
+
+  // a3: encoding of subject string (1 if ASCII, 0 if two_byte);
+  // t9: code
+  // subject: Subject string
+  // regexp_data: RegExp data (FixedArray)
+  // Load used arguments before starting to push arguments for call to native
+  // RegExp code to avoid handling changing stack height.
+  __ lw(a1, MemOperand(sp, kPreviousIndexOffset));
+  __ sra(a1, a1, kSmiTagSize);  // Untag the Smi.
+
+  // a1: previous index
+  // a3: encoding of subject string (1 if ASCII, 0 if two_byte);
+  // t9: code
+  // subject: Subject string
+  // regexp_data: RegExp data (FixedArray)
+  // All checks done. Now push arguments for native regexp code.
+  __ IncrementCounter(masm->isolate()->counters()->regexp_entry_native(),
+                      1, a0, a2);
+
+  // Isolates: note we add an additional parameter here (isolate pointer).
+  static const int kRegExpExecuteArguments = 8;
+  static const int kParameterRegisters = 4;
+  __ EnterExitFrame(false, kRegExpExecuteArguments - kParameterRegisters);
+
+  // Stack pointer now points to cell where return address is to be written.
+  // Arguments are before that on the stack or in registers, meaning we
+  // treat the return address as argument 5. Thus every argument after that
+  // needs to be shifted back by 1. Since DirectCEntryStub will handle
+  // allocating space for the c argument slots, we don't need to calculate
+  // that into the argument positions on the stack. This is how the stack will
+  // look (sp meaning the value of sp at this moment):
+  // [sp + 4] - Argument 8
+  // [sp + 3] - Argument 7
+  // [sp + 2] - Argument 6
+  // [sp + 1] - Argument 5
+  // [sp + 0] - saved ra
+
+  // Argument 8: Pass current isolate address.
+  // CFunctionArgumentOperand handles MIPS stack argument slots.
+  __ li(a0, Operand(ExternalReference::isolate_address()));
+  __ sw(a0, MemOperand(sp, 4 * kPointerSize));
+
+  // Argument 7: Indicate that this is a direct call from JavaScript.
+  __ li(a0, Operand(1));
+  __ sw(a0, MemOperand(sp, 3 * kPointerSize));
+
+  // Argument 6: Start (high end) of backtracking stack memory area.
+  __ li(a0, Operand(address_of_regexp_stack_memory_address));
+  __ lw(a0, MemOperand(a0, 0));
+  __ li(a2, Operand(address_of_regexp_stack_memory_size));
+  __ lw(a2, MemOperand(a2, 0));
+  __ addu(a0, a0, a2);
+  __ sw(a0, MemOperand(sp, 2 * kPointerSize));
+
+  // Argument 5: static offsets vector buffer.
+  __ li(a0, Operand(
+        ExternalReference::address_of_static_offsets_vector(masm->isolate())));
+  __ sw(a0, MemOperand(sp, 1 * kPointerSize));
+
+  // For arguments 4 and 3 get string length, calculate start of string data
+  // and calculate the shift of the index (0 for ASCII and 1 for two byte).
+  __ lw(a0, FieldMemOperand(subject, String::kLengthOffset));
+  __ sra(a0, a0, kSmiTagSize);
+  STATIC_ASSERT(SeqAsciiString::kHeaderSize == SeqTwoByteString::kHeaderSize);
+  __ Addu(t0, subject, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
+  __ Xor(a3, a3, Operand(1));  // 1 for 2-byte str, 0 for 1-byte.
+  // Argument 4 (a3): End of string data
+  // Argument 3 (a2): Start of string data
+  __ sllv(t1, a1, a3);
+  __ addu(a2, t0, t1);
+  __ sllv(t1, a0, a3);
+  __ addu(a3, t0, t1);
+
+  // Argument 2 (a1): Previous index.
+  // Already there
+
+  // Argument 1 (a0): Subject string.
+  __ mov(a0, subject);
+
+  // Locate the code entry and call it.
+  __ Addu(t9, t9, Operand(Code::kHeaderSize - kHeapObjectTag));
+  DirectCEntryStub stub;
+  stub.GenerateCall(masm, t9);
+
+  __ LeaveExitFrame(false, no_reg);
+
+  // v0: result
+  // subject: subject string (callee saved)
+  // regexp_data: RegExp data (callee saved)
+  // last_match_info_elements: Last match info elements (callee saved)
+
+  // Check the result.
+
+  Label success;
+  __ Branch(&success, eq, v0, Operand(NativeRegExpMacroAssembler::SUCCESS));
+  Label failure;
+  __ Branch(&failure, eq, v0, Operand(NativeRegExpMacroAssembler::FAILURE));
+  // If not exception it can only be retry. Handle that in the runtime system.
+  __ Branch(&runtime, ne, v0, Operand(NativeRegExpMacroAssembler::EXCEPTION));
+  // Result must now be exception. If there is no pending exception already a
+  // stack overflow (on the backtrack stack) was detected in RegExp code but
+  // haven't created the exception yet. Handle that in the runtime system.
+  // TODO(592): Rerunning the RegExp to get the stack overflow exception.
+  __ li(a1, Operand(
+      ExternalReference::the_hole_value_location(masm->isolate())));
+  __ lw(a1, MemOperand(a1, 0));
+  __ li(a2, Operand(ExternalReference(Isolate::k_pending_exception_address,
+                                      masm->isolate())));
+  __ lw(v0, MemOperand(a2, 0));
+  __ Branch(&runtime, eq, v0, Operand(a1));
+
+  __ sw(a1, MemOperand(a2, 0));  // Clear pending exception.
+
+  // Check if the exception is a termination. If so, throw as uncatchable.
+  __ LoadRoot(a0, Heap::kTerminationExceptionRootIndex);
+  Label termination_exception;
+  __ Branch(&termination_exception, eq, v0, Operand(a0));
+
+  __ Throw(a0);  // Expects thrown value in v0.
+
+  __ bind(&termination_exception);
+  __ ThrowUncatchable(TERMINATION, v0);  // Expects thrown value in v0.
+
+  __ bind(&failure);
+  // For failure and exception return null.
+  __ li(v0, Operand(masm->isolate()->factory()->null_value()));
+  __ Addu(sp, sp, Operand(4 * kPointerSize));
+  __ Ret();
+
+  // Process the result from the native regexp code.
+  __ bind(&success);
+  __ lw(a1,
+         FieldMemOperand(regexp_data, JSRegExp::kIrregexpCaptureCountOffset));
+  // Calculate number of capture registers (number_of_captures + 1) * 2.
+  STATIC_ASSERT(kSmiTag == 0);
+  STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
+  __ Addu(a1, a1, Operand(2));  // a1 was a smi.
+
+  // a1: number of capture registers
+  // subject: subject string
+  // Store the capture count.
+  __ sll(a2, a1, kSmiTagSize + kSmiShiftSize);  // To smi.
+  __ sw(a2, FieldMemOperand(last_match_info_elements,
+                             RegExpImpl::kLastCaptureCountOffset));
+  // Store last subject and last input.
+  __ mov(a3, last_match_info_elements);  // Moved up to reduce latency.
+  __ sw(subject,
+         FieldMemOperand(last_match_info_elements,
+                         RegExpImpl::kLastSubjectOffset));
+  __ RecordWrite(a3, Operand(RegExpImpl::kLastSubjectOffset), a2, t0);
+  __ sw(subject,
+         FieldMemOperand(last_match_info_elements,
+                         RegExpImpl::kLastInputOffset));
+  __ mov(a3, last_match_info_elements);
+  __ RecordWrite(a3, Operand(RegExpImpl::kLastInputOffset), a2, t0);
+
+  // Get the static offsets vector filled by the native regexp code.
+  ExternalReference address_of_static_offsets_vector =
+      ExternalReference::address_of_static_offsets_vector(masm->isolate());
+  __ li(a2, Operand(address_of_static_offsets_vector));
+
+  // a1: number of capture registers
+  // a2: offsets vector
+  Label next_capture, done;
+  // Capture register counter starts from number of capture registers and
+  // counts down until wrapping after zero.
+  __ Addu(a0,
+         last_match_info_elements,
+         Operand(RegExpImpl::kFirstCaptureOffset - kHeapObjectTag));
+  __ bind(&next_capture);
+  __ Subu(a1, a1, Operand(1));
+  __ Branch(&done, lt, a1, Operand(zero_reg));
+  // Read the value from the static offsets vector buffer.
+  __ lw(a3, MemOperand(a2, 0));
+  __ addiu(a2, a2, kPointerSize);
+  // Store the smi value in the last match info.
+  __ sll(a3, a3, kSmiTagSize);  // Convert to Smi.
+  __ sw(a3, MemOperand(a0, 0));
+  __ Branch(&next_capture, USE_DELAY_SLOT);
+  __ addiu(a0, a0, kPointerSize);   // In branch delay slot.
+
+  __ bind(&done);
+
+  // Return last match info.
+  __ lw(v0, MemOperand(sp, kLastMatchInfoOffset));
+  __ Addu(sp, sp, Operand(4 * kPointerSize));
+  __ Ret();
+
+  // Do the runtime call to execute the regexp.
+  __ bind(&runtime);
+  __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
+#endif  // V8_INTERPRETED_REGEXP
 }
 
 
 void RegExpConstructResultStub::Generate(MacroAssembler* masm) {
-  UNIMPLEMENTED_MIPS();
+  const int kMaxInlineLength = 100;
+  Label slowcase;
+  Label done;
+  __ lw(a1, MemOperand(sp, kPointerSize * 2));
+  STATIC_ASSERT(kSmiTag == 0);
+  STATIC_ASSERT(kSmiTagSize == 1);
+  __ JumpIfNotSmi(a1, &slowcase);
+  __ Branch(&slowcase, hi, a1, Operand(Smi::FromInt(kMaxInlineLength)));
+  // Smi-tagging is equivalent to multiplying by 2.
+  // Allocate RegExpResult followed by FixedArray with size in ebx.
+  // JSArray:   [Map][empty properties][Elements][Length-smi][index][input]
+  // Elements:  [Map][Length][..elements..]
+  // Size of JSArray with two in-object properties and the header of a
+  // FixedArray.
+  int objects_size =
+      (JSRegExpResult::kSize + FixedArray::kHeaderSize) / kPointerSize;
+  __ srl(t1, a1, kSmiTagSize + kSmiShiftSize);
+  __ Addu(a2, t1, Operand(objects_size));
+  __ AllocateInNewSpace(
+      a2,  // In: Size, in words.
+      v0,  // Out: Start of allocation (tagged).
+      a3,  // Scratch register.
+      t0,  // Scratch register.
+      &slowcase,
+      static_cast<AllocationFlags>(TAG_OBJECT | SIZE_IN_WORDS));
+  // v0: Start of allocated area, object-tagged.
+  // a1: Number of elements in array, as smi.
+  // t1: Number of elements, untagged.
+
+  // Set JSArray map to global.regexp_result_map().
+  // Set empty properties FixedArray.
+  // Set elements to point to FixedArray allocated right after the JSArray.
+  // Interleave operations for better latency.
+  __ lw(a2, ContextOperand(cp, Context::GLOBAL_INDEX));
+  __ Addu(a3, v0, Operand(JSRegExpResult::kSize));
+  __ li(t0, Operand(masm->isolate()->factory()->empty_fixed_array()));
+  __ lw(a2, FieldMemOperand(a2, GlobalObject::kGlobalContextOffset));
+  __ sw(a3, FieldMemOperand(v0, JSObject::kElementsOffset));
+  __ lw(a2, ContextOperand(a2, Context::REGEXP_RESULT_MAP_INDEX));
+  __ sw(t0, FieldMemOperand(v0, JSObject::kPropertiesOffset));
+  __ sw(a2, FieldMemOperand(v0, HeapObject::kMapOffset));
+
+  // Set input, index and length fields from arguments.
+  __ lw(a1, MemOperand(sp, kPointerSize * 0));
+  __ sw(a1, FieldMemOperand(v0, JSRegExpResult::kInputOffset));
+  __ lw(a1, MemOperand(sp, kPointerSize * 1));
+  __ sw(a1, FieldMemOperand(v0, JSRegExpResult::kIndexOffset));
+  __ lw(a1, MemOperand(sp, kPointerSize * 2));
+  __ sw(a1, FieldMemOperand(v0, JSArray::kLengthOffset));
+
+  // Fill out the elements FixedArray.
+  // v0: JSArray, tagged.
+  // a3: FixedArray, tagged.
+  // t1: Number of elements in array, untagged.
+
+  // Set map.
+  __ li(a2, Operand(masm->isolate()->factory()->fixed_array_map()));
+  __ sw(a2, FieldMemOperand(a3, HeapObject::kMapOffset));
+  // Set FixedArray length.
+  __ sll(t2, t1, kSmiTagSize);
+  __ sw(t2, FieldMemOperand(a3, FixedArray::kLengthOffset));
+  // Fill contents of fixed-array with the-hole.
+  __ li(a2, Operand(masm->isolate()->factory()->the_hole_value()));
+  __ Addu(a3, a3, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+  // Fill fixed array elements with hole.
+  // v0: JSArray, tagged.
+  // a2: the hole.
+  // a3: Start of elements in FixedArray.
+  // t1: Number of elements to fill.
+  Label loop;
+  __ sll(t1, t1, kPointerSizeLog2);  // Convert num elements to num bytes.
+  __ addu(t1, t1, a3);  // Point past last element to store.
+  __ bind(&loop);
+  __ Branch(&done, ge, a3, Operand(t1));  // Break when a3 past end of elem.
+  __ sw(a2, MemOperand(a3));
+  __ Branch(&loop, USE_DELAY_SLOT);
+  __ addiu(a3, a3, kPointerSize);  // In branch delay slot.
+
+  __ bind(&done);
+  __ Addu(sp, sp, Operand(3 * kPointerSize));
+  __ Ret();
+
+  __ bind(&slowcase);
+  __ TailCallRuntime(Runtime::kRegExpConstructResult, 3, 1);
 }
 
 
 void CallFunctionStub::Generate(MacroAssembler* masm) {
-  UNIMPLEMENTED_MIPS();
+  Label slow;
+
+  // The receiver might implicitly be the global object. This is
+  // indicated by passing the hole as the receiver to the call
+  // function stub.
+  if (ReceiverMightBeImplicit()) {
+    Label call;
+    // Get the receiver from the stack.
+    // function, receiver [, arguments]
+    __ lw(t0, MemOperand(sp, argc_ * kPointerSize));
+    // Call as function is indicated with the hole.
+    __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
+    __ Branch(&call, ne, t0, Operand(at));
+    // Patch the receiver on the stack with the global receiver object.
+    __ lw(a1, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
+    __ lw(a1, FieldMemOperand(a1, GlobalObject::kGlobalReceiverOffset));
+    __ sw(a1, MemOperand(sp, argc_ * kPointerSize));
+    __ bind(&call);
+  }
+
+  // Get the function to call from the stack.
+  // function, receiver [, arguments]
+  __ lw(a1, MemOperand(sp, (argc_ + 1) * kPointerSize));
+
+  // Check that the function is really a JavaScript function.
+  // a1: pushed function (to be verified)
+  __ JumpIfSmi(a1, &slow);
+  // Get the map of the function object.
+  __ GetObjectType(a1, a2, a2);
+  __ Branch(&slow, ne, a2, Operand(JS_FUNCTION_TYPE));
+
+  // Fast-case: Invoke the function now.
+  // a1: pushed function
+  ParameterCount actual(argc_);
+
+  if (ReceiverMightBeImplicit()) {
+    Label call_as_function;
+    __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
+    __ Branch(&call_as_function, eq, t0, Operand(at));
+    __ InvokeFunction(a1, actual, JUMP_FUNCTION);
+    __ bind(&call_as_function);
+  }
+  __ InvokeFunction(a1,
+                    actual,
+                    JUMP_FUNCTION,
+                    NullCallWrapper(),
+                    CALL_AS_FUNCTION);
+
+  // Slow-case: Non-function called.
+  __ bind(&slow);
+  // CALL_NON_FUNCTION expects the non-function callee as receiver (instead
+  // of the original receiver from the call site).
+  __ sw(a1, MemOperand(sp, argc_ * kPointerSize));
+  __ li(a0, Operand(argc_));  // Setup the number of arguments.
+  __ mov(a2, zero_reg);
+  __ GetBuiltinEntry(a3, Builtins::CALL_NON_FUNCTION);
+  __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
+          RelocInfo::CODE_TARGET);
 }
 
 
 // Unfortunately you have to run without snapshots to see most of these
 // names in the profile since most compare stubs end up in the snapshot.
 const char* CompareStub::GetName() {
-  UNIMPLEMENTED_MIPS();
+  ASSERT((lhs_.is(a0) && rhs_.is(a1)) ||
+         (lhs_.is(a1) && rhs_.is(a0)));
+
+  if (name_ != NULL) return name_;
+  const int kMaxNameLength = 100;
+  name_ = Isolate::Current()->bootstrapper()->AllocateAutoDeletedArray(
+      kMaxNameLength);
+  if (name_ == NULL) return "OOM";
+
+  const char* cc_name;
+  switch (cc_) {
+    case lt: cc_name = "LT"; break;
+    case gt: cc_name = "GT"; break;
+    case le: cc_name = "LE"; break;
+    case ge: cc_name = "GE"; break;
+    case eq: cc_name = "EQ"; break;
+    case ne: cc_name = "NE"; break;
+    default: cc_name = "UnknownCondition"; break;
+  }
+
+  const char* lhs_name = lhs_.is(a0) ? "_a0" : "_a1";
+  const char* rhs_name = rhs_.is(a0) ? "_a0" : "_a1";
+
+  const char* strict_name = "";
+  if (strict_ && (cc_ == eq || cc_ == ne)) {
+    strict_name = "_STRICT";
+  }
+
+  const char* never_nan_nan_name = "";
+  if (never_nan_nan_ && (cc_ == eq || cc_ == ne)) {
+    never_nan_nan_name = "_NO_NAN";
+  }
+
+  const char* include_number_compare_name = "";
+  if (!include_number_compare_) {
+    include_number_compare_name = "_NO_NUMBER";
+  }
+
+  const char* include_smi_compare_name = "";
+  if (!include_smi_compare_) {
+    include_smi_compare_name = "_NO_SMI";
+  }
+
+  OS::SNPrintF(Vector<char>(name_, kMaxNameLength),
+               "CompareStub_%s%s%s%s%s%s",
+               cc_name,
+               lhs_name,
+               rhs_name,
+               strict_name,
+               never_nan_nan_name,
+               include_number_compare_name,
+               include_smi_compare_name);
   return name_;
 }
 
 
 int CompareStub::MinorKey() {
-  UNIMPLEMENTED_MIPS();
-  return 0;
+  // Encode the two parameters in a unique 16 bit value.
+  ASSERT(static_cast<unsigned>(cc_) < (1 << 14));
+  ASSERT((lhs_.is(a0) && rhs_.is(a1)) ||
+         (lhs_.is(a1) && rhs_.is(a0)));
+  return ConditionField::encode(static_cast<unsigned>(cc_))
+         | RegisterField::encode(lhs_.is(a0))
+         | StrictField::encode(strict_)
+         | NeverNanNanField::encode(cc_ == eq ? never_nan_nan_ : false)
+         | IncludeSmiCompareField::encode(include_smi_compare_);
 }
 
 
-// StringCharCodeAtGenerator
-
+// StringCharCodeAtGenerator.
 void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) {
-  UNIMPLEMENTED_MIPS();
+  Label flat_string;
+  Label ascii_string;
+  Label got_char_code;
+
+  ASSERT(!t0.is(scratch_));
+  ASSERT(!t0.is(index_));
+  ASSERT(!t0.is(result_));
+  ASSERT(!t0.is(object_));
+
+  // If the receiver is a smi trigger the non-string case.
+  __ JumpIfSmi(object_, receiver_not_string_);
+
+  // Fetch the instance type of the receiver into result register.
+  __ lw(result_, FieldMemOperand(object_, HeapObject::kMapOffset));
+  __ lbu(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset));
+  // If the receiver is not a string trigger the non-string case.
+  __ And(t0, result_, Operand(kIsNotStringMask));
+  __ Branch(receiver_not_string_, ne, t0, Operand(zero_reg));
+
+  // If the index is non-smi trigger the non-smi case.
+  __ JumpIfNotSmi(index_, &index_not_smi_);
+
+  // Put smi-tagged index into scratch register.
+  __ mov(scratch_, index_);
+  __ bind(&got_smi_index_);
+
+  // Check for index out of range.
+  __ lw(t0, FieldMemOperand(object_, String::kLengthOffset));
+  __ Branch(index_out_of_range_, ls, t0, Operand(scratch_));
+
+  // We need special handling for non-flat strings.
+  STATIC_ASSERT(kSeqStringTag == 0);
+  __ And(t0, result_, Operand(kStringRepresentationMask));
+  __ Branch(&flat_string, eq, t0, Operand(zero_reg));
+
+  // Handle non-flat strings.
+  __ And(t0, result_, Operand(kIsConsStringMask));
+  __ Branch(&call_runtime_, eq, t0, Operand(zero_reg));
+
+  // ConsString.
+  // Check whether the right hand side is the empty string (i.e. if
+  // this is really a flat string in a cons string). If that is not
+  // the case we would rather go to the runtime system now to flatten
+  // the string.
+  __ lw(result_, FieldMemOperand(object_, ConsString::kSecondOffset));
+  __ LoadRoot(t0, Heap::kEmptyStringRootIndex);
+  __ Branch(&call_runtime_, ne, result_, Operand(t0));
+
+  // Get the first of the two strings and load its instance type.
+  __ lw(object_, FieldMemOperand(object_, ConsString::kFirstOffset));
+  __ lw(result_, FieldMemOperand(object_, HeapObject::kMapOffset));
+  __ lbu(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset));
+  // If the first cons component is also non-flat, then go to runtime.
+  STATIC_ASSERT(kSeqStringTag == 0);
+
+  __ And(t0, result_, Operand(kStringRepresentationMask));
+  __ Branch(&call_runtime_, ne, t0, Operand(zero_reg));
+
+  // Check for 1-byte or 2-byte string.
+  __ bind(&flat_string);
+  STATIC_ASSERT(kAsciiStringTag != 0);
+  __ And(t0, result_, Operand(kStringEncodingMask));
+  __ Branch(&ascii_string, ne, t0, Operand(zero_reg));
+
+  // 2-byte string.
+  // Load the 2-byte character code into the result register. We can
+  // add without shifting since the smi tag size is the log2 of the
+  // number of bytes in a two-byte character.
+  STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize == 1 && kSmiShiftSize == 0);
+  __ Addu(scratch_, object_, Operand(scratch_));
+  __ lhu(result_, FieldMemOperand(scratch_, SeqTwoByteString::kHeaderSize));
+  __ Branch(&got_char_code);
+
+  // ASCII string.
+  // Load the byte into the result register.
+  __ bind(&ascii_string);
+
+  __ srl(t0, scratch_, kSmiTagSize);
+  __ Addu(scratch_, object_, t0);
+
+  __ lbu(result_, FieldMemOperand(scratch_, SeqAsciiString::kHeaderSize));
+
+  __ bind(&got_char_code);
+  __ sll(result_, result_, kSmiTagSize);
+  __ bind(&exit_);
 }
 
 
 void StringCharCodeAtGenerator::GenerateSlow(
     MacroAssembler* masm, const RuntimeCallHelper& call_helper) {
-  UNIMPLEMENTED_MIPS();
+  __ Abort("Unexpected fallthrough to CharCodeAt slow case");
+
+  // Index is not a smi.
+  __ bind(&index_not_smi_);
+  // If index is a heap number, try converting it to an integer.
+  __ CheckMap(index_,
+              scratch_,
+              Heap::kHeapNumberMapRootIndex,
+              index_not_number_,
+              DONT_DO_SMI_CHECK);
+  call_helper.BeforeCall(masm);
+  // Consumed by runtime conversion function:
+  __ Push(object_, index_, index_);
+  if (index_flags_ == STRING_INDEX_IS_NUMBER) {
+    __ CallRuntime(Runtime::kNumberToIntegerMapMinusZero, 1);
+  } else {
+    ASSERT(index_flags_ == STRING_INDEX_IS_ARRAY_INDEX);
+    // NumberToSmi discards numbers that are not exact integers.
+    __ CallRuntime(Runtime::kNumberToSmi, 1);
+  }
+
+  // Save the conversion result before the pop instructions below
+  // have a chance to overwrite it.
+
+  __ Move(scratch_, v0);
+
+  __ pop(index_);
+  __ pop(object_);
+  // Reload the instance type.
+  __ lw(result_, FieldMemOperand(object_, HeapObject::kMapOffset));
+  __ lbu(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset));
+  call_helper.AfterCall(masm);
+  // If index is still not a smi, it must be out of range.
+  __ JumpIfNotSmi(scratch_, index_out_of_range_);
+  // Otherwise, return to the fast path.
+  __ Branch(&got_smi_index_);
+
+  // Call runtime. We get here when the receiver is a string and the
+  // index is a number, but the code of getting the actual character
+  // is too complex (e.g., when the string needs to be flattened).
+  __ bind(&call_runtime_);
+  call_helper.BeforeCall(masm);
+  __ Push(object_, index_);
+  __ CallRuntime(Runtime::kStringCharCodeAt, 2);
+
+  __ Move(result_, v0);
+
+  call_helper.AfterCall(masm);
+  __ jmp(&exit_);
+
+  __ Abort("Unexpected fallthrough from CharCodeAt slow case");
 }
 
 
@@ -532,13 +4936,46 @@
 // StringCharFromCodeGenerator
 
 void StringCharFromCodeGenerator::GenerateFast(MacroAssembler* masm) {
-  UNIMPLEMENTED_MIPS();
+  // Fast case of Heap::LookupSingleCharacterStringFromCode.
+
+  ASSERT(!t0.is(result_));
+  ASSERT(!t0.is(code_));
+
+  STATIC_ASSERT(kSmiTag == 0);
+  STATIC_ASSERT(kSmiShiftSize == 0);
+  ASSERT(IsPowerOf2(String::kMaxAsciiCharCode + 1));
+  __ And(t0,
+         code_,
+         Operand(kSmiTagMask |
+                 ((~String::kMaxAsciiCharCode) << kSmiTagSize)));
+  __ Branch(&slow_case_, ne, t0, Operand(zero_reg));
+
+  __ LoadRoot(result_, Heap::kSingleCharacterStringCacheRootIndex);
+  // At this point code register contains smi tagged ASCII char code.
+  STATIC_ASSERT(kSmiTag == 0);
+  __ sll(t0, code_, kPointerSizeLog2 - kSmiTagSize);
+  __ Addu(result_, result_, t0);
+  __ lw(result_, FieldMemOperand(result_, FixedArray::kHeaderSize));
+  __ LoadRoot(t0, Heap::kUndefinedValueRootIndex);
+  __ Branch(&slow_case_, eq, result_, Operand(t0));
+  __ bind(&exit_);
 }
 
 
 void StringCharFromCodeGenerator::GenerateSlow(
     MacroAssembler* masm, const RuntimeCallHelper& call_helper) {
-  UNIMPLEMENTED_MIPS();
+  __ Abort("Unexpected fallthrough to CharFromCode slow case");
+
+  __ bind(&slow_case_);
+  call_helper.BeforeCall(masm);
+  __ push(code_);
+  __ CallRuntime(Runtime::kCharFromCode, 1);
+  __ Move(result_, v0);
+
+  call_helper.AfterCall(masm);
+  __ Branch(&exit_);
+
+  __ Abort("Unexpected fallthrough from CharFromCode slow case");
 }
 
 
@@ -546,13 +4983,15 @@
 // StringCharAtGenerator
 
 void StringCharAtGenerator::GenerateFast(MacroAssembler* masm) {
-  UNIMPLEMENTED_MIPS();
+  char_code_at_generator_.GenerateFast(masm);
+  char_from_code_generator_.GenerateFast(masm);
 }
 
 
 void StringCharAtGenerator::GenerateSlow(
     MacroAssembler* masm, const RuntimeCallHelper& call_helper) {
-  UNIMPLEMENTED_MIPS();
+  char_code_at_generator_.GenerateSlow(masm, call_helper);
+  char_from_code_generator_.GenerateSlow(masm, call_helper);
 }
 
 
@@ -626,7 +5065,24 @@
                                           Register count,
                                           Register scratch,
                                           bool ascii) {
-  UNIMPLEMENTED_MIPS();
+  Label loop;
+  Label done;
+  // This loop just copies one character at a time, as it is only used for
+  // very short strings.
+  if (!ascii) {
+    __ addu(count, count, count);
+  }
+  __ Branch(&done, eq, count, Operand(zero_reg));
+  __ addu(count, dest, count);  // Count now points to the last dest byte.
+
+  __ bind(&loop);
+  __ lbu(scratch, MemOperand(src));
+  __ addiu(src, src, 1);
+  __ sb(scratch, MemOperand(dest));
+  __ addiu(dest, dest, 1);
+  __ Branch(&loop, lt, dest, Operand(count));
+
+  __ bind(&done);
 }
 
 
@@ -646,7 +5102,105 @@
                                               Register scratch4,
                                               Register scratch5,
                                               int flags) {
-  UNIMPLEMENTED_MIPS();
+  bool ascii = (flags & COPY_ASCII) != 0;
+  bool dest_always_aligned = (flags & DEST_ALWAYS_ALIGNED) != 0;
+
+  if (dest_always_aligned && FLAG_debug_code) {
+    // Check that destination is actually word aligned if the flag says
+    // that it is.
+    __ And(scratch4, dest, Operand(kPointerAlignmentMask));
+    __ Check(eq,
+             "Destination of copy not aligned.",
+             scratch4,
+             Operand(zero_reg));
+  }
+
+  const int kReadAlignment = 4;
+  const int kReadAlignmentMask = kReadAlignment - 1;
+  // Ensure that reading an entire aligned word containing the last character
+  // of a string will not read outside the allocated area (because we pad up
+  // to kObjectAlignment).
+  STATIC_ASSERT(kObjectAlignment >= kReadAlignment);
+  // Assumes word reads and writes are little endian.
+  // Nothing to do for zero characters.
+  Label done;
+
+  if (!ascii) {
+    __ addu(count, count, count);
+  }
+  __ Branch(&done, eq, count, Operand(zero_reg));
+
+  Label byte_loop;
+  // Must copy at least eight bytes, otherwise just do it one byte at a time.
+  __ Subu(scratch1, count, Operand(8));
+  __ Addu(count, dest, Operand(count));
+  Register limit = count;  // Read until src equals this.
+  __ Branch(&byte_loop, lt, scratch1, Operand(zero_reg));
+
+  if (!dest_always_aligned) {
+    // Align dest by byte copying. Copies between zero and three bytes.
+    __ And(scratch4, dest, Operand(kReadAlignmentMask));
+    Label dest_aligned;
+    __ Branch(&dest_aligned, eq, scratch4, Operand(zero_reg));
+    Label aligned_loop;
+    __ bind(&aligned_loop);
+    __ lbu(scratch1, MemOperand(src));
+    __ addiu(src, src, 1);
+    __ sb(scratch1, MemOperand(dest));
+    __ addiu(dest, dest, 1);
+    __ addiu(scratch4, scratch4, 1);
+    __ Branch(&aligned_loop, le, scratch4, Operand(kReadAlignmentMask));
+    __ bind(&dest_aligned);
+  }
+
+  Label simple_loop;
+
+  __ And(scratch4, src, Operand(kReadAlignmentMask));
+  __ Branch(&simple_loop, eq, scratch4, Operand(zero_reg));
+
+  // Loop for src/dst that are not aligned the same way.
+  // This loop uses lwl and lwr instructions. These instructions
+  // depend on the endianness, and the implementation assumes little-endian.
+  {
+    Label loop;
+    __ bind(&loop);
+    __ lwr(scratch1, MemOperand(src));
+    __ Addu(src, src, Operand(kReadAlignment));
+    __ lwl(scratch1, MemOperand(src, -1));
+    __ sw(scratch1, MemOperand(dest));
+    __ Addu(dest, dest, Operand(kReadAlignment));
+    __ Subu(scratch2, limit, dest);
+    __ Branch(&loop, ge, scratch2, Operand(kReadAlignment));
+  }
+
+  __ Branch(&byte_loop);
+
+  // Simple loop.
+  // Copy words from src to dest, until less than four bytes left.
+  // Both src and dest are word aligned.
+  __ bind(&simple_loop);
+  {
+    Label loop;
+    __ bind(&loop);
+    __ lw(scratch1, MemOperand(src));
+    __ Addu(src, src, Operand(kReadAlignment));
+    __ sw(scratch1, MemOperand(dest));
+    __ Addu(dest, dest, Operand(kReadAlignment));
+    __ Subu(scratch2, limit, dest);
+    __ Branch(&loop, ge, scratch2, Operand(kReadAlignment));
+  }
+
+  // Copy bytes from src to dest until dest hits limit.
+  __ bind(&byte_loop);
+  // Test if dest has already reached the limit.
+  __ Branch(&done, ge, dest, Operand(limit));
+  __ lbu(scratch1, MemOperand(src));
+  __ addiu(src, src, 1);
+  __ sb(scratch1, MemOperand(dest));
+  __ addiu(dest, dest, 1);
+  __ Branch(&byte_loop);
+
+  __ bind(&done);
 }
 
 
@@ -659,88 +5213,1439 @@
                                                         Register scratch4,
                                                         Register scratch5,
                                                         Label* not_found) {
-  UNIMPLEMENTED_MIPS();
+  // Register scratch3 is the general scratch register in this function.
+  Register scratch = scratch3;
+
+  // Make sure that both characters are not digits as such strings has a
+  // different hash algorithm. Don't try to look for these in the symbol table.
+  Label not_array_index;
+  __ Subu(scratch, c1, Operand(static_cast<int>('0')));
+  __ Branch(&not_array_index,
+            Ugreater,
+            scratch,
+            Operand(static_cast<int>('9' - '0')));
+  __ Subu(scratch, c2, Operand(static_cast<int>('0')));
+
+  // If check failed combine both characters into single halfword.
+  // This is required by the contract of the method: code at the
+  // not_found branch expects this combination in c1 register.
+  Label tmp;
+  __ sll(scratch1, c2, kBitsPerByte);
+  __ Branch(&tmp, Ugreater, scratch, Operand(static_cast<int>('9' - '0')));
+  __ Or(c1, c1, scratch1);
+  __ bind(&tmp);
+  __ Branch(not_found,
+            Uless_equal,
+            scratch,
+            Operand(static_cast<int>('9' - '0')));
+
+  __ bind(&not_array_index);
+  // Calculate the two character string hash.
+  Register hash = scratch1;
+  StringHelper::GenerateHashInit(masm, hash, c1);
+  StringHelper::GenerateHashAddCharacter(masm, hash, c2);
+  StringHelper::GenerateHashGetHash(masm, hash);
+
+  // Collect the two characters in a register.
+  Register chars = c1;
+  __ sll(scratch, c2, kBitsPerByte);
+  __ Or(chars, chars, scratch);
+
+  // chars: two character string, char 1 in byte 0 and char 2 in byte 1.
+  // hash:  hash of two character string.
+
+  // Load symbol table.
+  // Load address of first element of the symbol table.
+  Register symbol_table = c2;
+  __ LoadRoot(symbol_table, Heap::kSymbolTableRootIndex);
+
+  Register undefined = scratch4;
+  __ LoadRoot(undefined, Heap::kUndefinedValueRootIndex);
+
+  // Calculate capacity mask from the symbol table capacity.
+  Register mask = scratch2;
+  __ lw(mask, FieldMemOperand(symbol_table, SymbolTable::kCapacityOffset));
+  __ sra(mask, mask, 1);
+  __ Addu(mask, mask, -1);
+
+  // Calculate untagged address of the first element of the symbol table.
+  Register first_symbol_table_element = symbol_table;
+  __ Addu(first_symbol_table_element, symbol_table,
+         Operand(SymbolTable::kElementsStartOffset - kHeapObjectTag));
+
+  // Registers.
+  // chars: two character string, char 1 in byte 0 and char 2 in byte 1.
+  // hash:  hash of two character string
+  // mask:  capacity mask
+  // first_symbol_table_element: address of the first element of
+  //                             the symbol table
+  // undefined: the undefined object
+  // scratch: -
+
+  // Perform a number of probes in the symbol table.
+  static const int kProbes = 4;
+  Label found_in_symbol_table;
+  Label next_probe[kProbes];
+  Register candidate = scratch5;  // Scratch register contains candidate.
+  for (int i = 0; i < kProbes; i++) {
+    // Calculate entry in symbol table.
+    if (i > 0) {
+      __ Addu(candidate, hash, Operand(SymbolTable::GetProbeOffset(i)));
+    } else {
+      __ mov(candidate, hash);
+    }
+
+    __ And(candidate, candidate, Operand(mask));
+
+    // Load the entry from the symble table.
+    STATIC_ASSERT(SymbolTable::kEntrySize == 1);
+    __ sll(scratch, candidate, kPointerSizeLog2);
+    __ Addu(scratch, scratch, first_symbol_table_element);
+    __ lw(candidate, MemOperand(scratch));
+
+    // If entry is undefined no string with this hash can be found.
+    Label is_string;
+    __ GetObjectType(candidate, scratch, scratch);
+    __ Branch(&is_string, ne, scratch, Operand(ODDBALL_TYPE));
+
+    __ Branch(not_found, eq, undefined, Operand(candidate));
+    // Must be null (deleted entry).
+    if (FLAG_debug_code) {
+      __ LoadRoot(scratch, Heap::kNullValueRootIndex);
+      __ Assert(eq, "oddball in symbol table is not undefined or null",
+          scratch, Operand(candidate));
+    }
+    __ jmp(&next_probe[i]);
+
+    __ bind(&is_string);
+
+    // Check that the candidate is a non-external ASCII string.  The instance
+    // type is still in the scratch register from the CompareObjectType
+    // operation.
+    __ JumpIfInstanceTypeIsNotSequentialAscii(scratch, scratch, &next_probe[i]);
+
+    // If length is not 2 the string is not a candidate.
+    __ lw(scratch, FieldMemOperand(candidate, String::kLengthOffset));
+    __ Branch(&next_probe[i], ne, scratch, Operand(Smi::FromInt(2)));
+
+    // Check if the two characters match.
+    // Assumes that word load is little endian.
+    __ lhu(scratch, FieldMemOperand(candidate, SeqAsciiString::kHeaderSize));
+    __ Branch(&found_in_symbol_table, eq, chars, Operand(scratch));
+    __ bind(&next_probe[i]);
+  }
+
+  // No matching 2 character string found by probing.
+  __ jmp(not_found);
+
+  // Scratch register contains result when we fall through to here.
+  Register result = candidate;
+  __ bind(&found_in_symbol_table);
+  __ mov(v0, result);
 }
 
 
 void StringHelper::GenerateHashInit(MacroAssembler* masm,
                                       Register hash,
                                       Register character) {
-  UNIMPLEMENTED_MIPS();
+  // hash = character + (character << 10);
+  __ sll(hash, character, 10);
+  __ addu(hash, hash, character);
+  // hash ^= hash >> 6;
+  __ sra(at, hash, 6);
+  __ xor_(hash, hash, at);
 }
 
 
 void StringHelper::GenerateHashAddCharacter(MacroAssembler* masm,
                                               Register hash,
                                               Register character) {
-  UNIMPLEMENTED_MIPS();
+  // hash += character;
+  __ addu(hash, hash, character);
+  // hash += hash << 10;
+  __ sll(at, hash, 10);
+  __ addu(hash, hash, at);
+  // hash ^= hash >> 6;
+  __ sra(at, hash, 6);
+  __ xor_(hash, hash, at);
 }
 
 
 void StringHelper::GenerateHashGetHash(MacroAssembler* masm,
                                          Register hash) {
-  UNIMPLEMENTED_MIPS();
+  // hash += hash << 3;
+  __ sll(at, hash, 3);
+  __ addu(hash, hash, at);
+  // hash ^= hash >> 11;
+  __ sra(at, hash, 11);
+  __ xor_(hash, hash, at);
+  // hash += hash << 15;
+  __ sll(at, hash, 15);
+  __ addu(hash, hash, at);
+
+  // if (hash == 0) hash = 27;
+  __ ori(at, zero_reg, 27);
+  __ movz(hash, at, hash);
 }
 
 
 void SubStringStub::Generate(MacroAssembler* masm) {
-  UNIMPLEMENTED_MIPS();
+  Label sub_string_runtime;
+  // Stack frame on entry.
+  //  ra: return address
+  //  sp[0]: to
+  //  sp[4]: from
+  //  sp[8]: string
+
+  // This stub is called from the native-call %_SubString(...), so
+  // nothing can be assumed about the arguments. It is tested that:
+  //  "string" is a sequential string,
+  //  both "from" and "to" are smis, and
+  //  0 <= from <= to <= string.length.
+  // If any of these assumptions fail, we call the runtime system.
+
+  static const int kToOffset = 0 * kPointerSize;
+  static const int kFromOffset = 1 * kPointerSize;
+  static const int kStringOffset = 2 * kPointerSize;
+
+  Register to = t2;
+  Register from = t3;
+
+  // Check bounds and smi-ness.
+  __ lw(to, MemOperand(sp, kToOffset));
+  __ lw(from, MemOperand(sp, kFromOffset));
+  STATIC_ASSERT(kFromOffset == kToOffset + 4);
+  STATIC_ASSERT(kSmiTag == 0);
+  STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
+
+  __ JumpIfNotSmi(from, &sub_string_runtime);
+  __ JumpIfNotSmi(to, &sub_string_runtime);
+
+  __ sra(a3, from, kSmiTagSize);  // Remove smi tag.
+  __ sra(t5, to, kSmiTagSize);  // Remove smi tag.
+
+  // a3: from index (untagged smi)
+  // t5: to index (untagged smi)
+
+  __ Branch(&sub_string_runtime, lt, a3, Operand(zero_reg));  // From < 0.
+
+  __ subu(a2, t5, a3);
+  __ Branch(&sub_string_runtime, gt, a3, Operand(t5));  // Fail if from > to.
+
+  // Special handling of sub-strings of length 1 and 2. One character strings
+  // are handled in the runtime system (looked up in the single character
+  // cache). Two character strings are looked for in the symbol cache.
+  __ Branch(&sub_string_runtime, lt, a2, Operand(2));
+
+  // Both to and from are smis.
+
+  // a2: result string length
+  // a3: from index (untagged smi)
+  // t2: (a.k.a. to): to (smi)
+  // t3: (a.k.a. from): from offset (smi)
+  // t5: to index (untagged smi)
+
+  // Make sure first argument is a sequential (or flat) string.
+  __ lw(t1, MemOperand(sp, kStringOffset));
+  __ Branch(&sub_string_runtime, eq, t1, Operand(kSmiTagMask));
+
+  __ lw(a1, FieldMemOperand(t1, HeapObject::kMapOffset));
+  __ lbu(a1, FieldMemOperand(a1, Map::kInstanceTypeOffset));
+  __ And(t4, a1, Operand(kIsNotStringMask));
+
+  __ Branch(&sub_string_runtime, ne, t4, Operand(zero_reg));
+
+  // a1: instance type
+  // a2: result string length
+  // a3: from index (untagged smi)
+  // t1: string
+  // t2: (a.k.a. to): to (smi)
+  // t3: (a.k.a. from): from offset (smi)
+  // t5: to index (untagged smi)
+
+  Label seq_string;
+  __ And(t0, a1, Operand(kStringRepresentationMask));
+  STATIC_ASSERT(kSeqStringTag < kConsStringTag);
+  STATIC_ASSERT(kConsStringTag < kExternalStringTag);
+
+  // External strings go to runtime.
+  __ Branch(&sub_string_runtime, gt, t0, Operand(kConsStringTag));
+
+  // Sequential strings are handled directly.
+  __ Branch(&seq_string, lt, t0, Operand(kConsStringTag));
+
+  // Cons string. Try to recurse (once) on the first substring.
+  // (This adds a little more generality than necessary to handle flattened
+  // cons strings, but not much).
+  __ lw(t1, FieldMemOperand(t1, ConsString::kFirstOffset));
+  __ lw(t0, FieldMemOperand(t1, HeapObject::kMapOffset));
+  __ lbu(a1, FieldMemOperand(t0, Map::kInstanceTypeOffset));
+  STATIC_ASSERT(kSeqStringTag == 0);
+  // Cons and External strings go to runtime.
+  __ Branch(&sub_string_runtime, ne, a1, Operand(kStringRepresentationMask));
+
+  // Definitly a sequential string.
+  __ bind(&seq_string);
+
+  // a1: instance type
+  // a2: result string length
+  // a3: from index (untagged smi)
+  // t1: string
+  // t2: (a.k.a. to): to (smi)
+  // t3: (a.k.a. from): from offset (smi)
+  // t5: to index (untagged smi)
+
+  __ lw(t0, FieldMemOperand(t1, String::kLengthOffset));
+  __ Branch(&sub_string_runtime, lt, t0, Operand(to));  // Fail if to > length.
+  to = no_reg;
+
+  // a1: instance type
+  // a2: result string length
+  // a3: from index (untagged smi)
+  // t1: string
+  // t3: (a.k.a. from): from offset (smi)
+  // t5: to index (untagged smi)
+
+  // Check for flat ASCII string.
+  Label non_ascii_flat;
+  STATIC_ASSERT(kTwoByteStringTag == 0);
+
+  __ And(t4, a1, Operand(kStringEncodingMask));
+  __ Branch(&non_ascii_flat, eq, t4, Operand(zero_reg));
+
+  Label result_longer_than_two;
+  __ Branch(&result_longer_than_two, gt, a2, Operand(2));
+
+  // Sub string of length 2 requested.
+  // Get the two characters forming the sub string.
+  __ Addu(t1, t1, Operand(a3));
+  __ lbu(a3, FieldMemOperand(t1, SeqAsciiString::kHeaderSize));
+  __ lbu(t0, FieldMemOperand(t1, SeqAsciiString::kHeaderSize + 1));
+
+  // Try to lookup two character string in symbol table.
+  Label make_two_character_string;
+  StringHelper::GenerateTwoCharacterSymbolTableProbe(
+      masm, a3, t0, a1, t1, t2, t3, t4, &make_two_character_string);
+  Counters* counters = masm->isolate()->counters();
+  __ IncrementCounter(counters->sub_string_native(), 1, a3, t0);
+  __ Addu(sp, sp, Operand(3 * kPointerSize));
+  __ Ret();
+
+
+  // a2: result string length.
+  // a3: two characters combined into halfword in little endian byte order.
+  __ bind(&make_two_character_string);
+  __ AllocateAsciiString(v0, a2, t0, t1, t4, &sub_string_runtime);
+  __ sh(a3, FieldMemOperand(v0, SeqAsciiString::kHeaderSize));
+  __ IncrementCounter(counters->sub_string_native(), 1, a3, t0);
+  __ Addu(sp, sp, Operand(3 * kPointerSize));
+  __ Ret();
+
+  __ bind(&result_longer_than_two);
+
+  // Allocate the result.
+  __ AllocateAsciiString(v0, a2, t4, t0, a1, &sub_string_runtime);
+
+  // v0: result string.
+  // a2: result string length.
+  // a3: from index (untagged smi)
+  // t1: string.
+  // t3: (a.k.a. from): from offset (smi)
+  // Locate first character of result.
+  __ Addu(a1, v0, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
+  // Locate 'from' character of string.
+  __ Addu(t1, t1, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
+  __ Addu(t1, t1, Operand(a3));
+
+  // v0: result string.
+  // a1: first character of result string.
+  // a2: result string length.
+  // t1: first character of sub string to copy.
+  STATIC_ASSERT((SeqAsciiString::kHeaderSize & kObjectAlignmentMask) == 0);
+  StringHelper::GenerateCopyCharactersLong(
+      masm, a1, t1, a2, a3, t0, t2, t3, t4, COPY_ASCII | DEST_ALWAYS_ALIGNED);
+  __ IncrementCounter(counters->sub_string_native(), 1, a3, t0);
+  __ Addu(sp, sp, Operand(3 * kPointerSize));
+  __ Ret();
+
+  __ bind(&non_ascii_flat);
+  // a2: result string length.
+  // t1: string.
+  // t3: (a.k.a. from): from offset (smi)
+  // Check for flat two byte string.
+
+  // Allocate the result.
+  __ AllocateTwoByteString(v0, a2, a1, a3, t0, &sub_string_runtime);
+
+  // v0: result string.
+  // a2: result string length.
+  // t1: string.
+  // Locate first character of result.
+  __ Addu(a1, v0, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
+  // Locate 'from' character of string.
+  __ Addu(t1, t1, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
+  // As "from" is a smi it is 2 times the value which matches the size of a two
+  // byte character.
+  __ Addu(t1, t1, Operand(from));
+  from = no_reg;
+
+  // v0: result string.
+  // a1: first character of result.
+  // a2: result length.
+  // t1: first character of string to copy.
+  STATIC_ASSERT((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0);
+  StringHelper::GenerateCopyCharactersLong(
+      masm, a1, t1, a2, a3, t0, t2, t3, t4, DEST_ALWAYS_ALIGNED);
+  __ IncrementCounter(counters->sub_string_native(), 1, a3, t0);
+  __ Addu(sp, sp, Operand(3 * kPointerSize));
+  __ Ret();
+
+  // Just jump to runtime to create the sub string.
+  __ bind(&sub_string_runtime);
+  __ TailCallRuntime(Runtime::kSubString, 3, 1);
+}
+
+
+void StringCompareStub::GenerateFlatAsciiStringEquals(MacroAssembler* masm,
+                                                      Register left,
+                                                      Register right,
+                                                      Register scratch1,
+                                                      Register scratch2,
+                                                      Register scratch3) {
+  Register length = scratch1;
+
+  // Compare lengths.
+  Label strings_not_equal, check_zero_length;
+  __ lw(length, FieldMemOperand(left, String::kLengthOffset));
+  __ lw(scratch2, FieldMemOperand(right, String::kLengthOffset));
+  __ Branch(&check_zero_length, eq, length, Operand(scratch2));
+  __ bind(&strings_not_equal);
+  __ li(v0, Operand(Smi::FromInt(NOT_EQUAL)));
+  __ Ret();
+
+  // Check if the length is zero.
+  Label compare_chars;
+  __ bind(&check_zero_length);
+  STATIC_ASSERT(kSmiTag == 0);
+  __ Branch(&compare_chars, ne, length, Operand(zero_reg));
+  __ li(v0, Operand(Smi::FromInt(EQUAL)));
+  __ Ret();
+
+  // Compare characters.
+  __ bind(&compare_chars);
+
+  GenerateAsciiCharsCompareLoop(masm,
+                                left, right, length, scratch2, scratch3, v0,
+                                &strings_not_equal);
+
+  // Characters are equal.
+  __ li(v0, Operand(Smi::FromInt(EQUAL)));
+  __ Ret();
 }
 
 
 void StringCompareStub::GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
-                                                        Register right,
                                                         Register left,
+                                                        Register right,
                                                         Register scratch1,
                                                         Register scratch2,
                                                         Register scratch3,
                                                         Register scratch4) {
-  UNIMPLEMENTED_MIPS();
+  Label result_not_equal, compare_lengths;
+  // Find minimum length and length difference.
+  __ lw(scratch1, FieldMemOperand(left, String::kLengthOffset));
+  __ lw(scratch2, FieldMemOperand(right, String::kLengthOffset));
+  __ Subu(scratch3, scratch1, Operand(scratch2));
+  Register length_delta = scratch3;
+  __ slt(scratch4, scratch2, scratch1);
+  __ movn(scratch1, scratch2, scratch4);
+  Register min_length = scratch1;
+  STATIC_ASSERT(kSmiTag == 0);
+  __ Branch(&compare_lengths, eq, min_length, Operand(zero_reg));
+
+  // Compare loop.
+  GenerateAsciiCharsCompareLoop(masm,
+                                left, right, min_length, scratch2, scratch4, v0,
+                                &result_not_equal);
+
+  // Compare lengths - strings up to min-length are equal.
+  __ bind(&compare_lengths);
+  ASSERT(Smi::FromInt(EQUAL) == static_cast<Smi*>(0));
+  // Use length_delta as result if it's zero.
+  __ mov(scratch2, length_delta);
+  __ mov(scratch4, zero_reg);
+  __ mov(v0, zero_reg);
+
+  __ bind(&result_not_equal);
+  // Conditionally update the result based either on length_delta or
+  // the last comparion performed in the loop above.
+  Label ret;
+  __ Branch(&ret, eq, scratch2, Operand(scratch4));
+  __ li(v0, Operand(Smi::FromInt(GREATER)));
+  __ Branch(&ret, gt, scratch2, Operand(scratch4));
+  __ li(v0, Operand(Smi::FromInt(LESS)));
+  __ bind(&ret);
+  __ Ret();
+}
+
+
+void StringCompareStub::GenerateAsciiCharsCompareLoop(
+    MacroAssembler* masm,
+    Register left,
+    Register right,
+    Register length,
+    Register scratch1,
+    Register scratch2,
+    Register scratch3,
+    Label* chars_not_equal) {
+  // Change index to run from -length to -1 by adding length to string
+  // start. This means that loop ends when index reaches zero, which
+  // doesn't need an additional compare.
+  __ SmiUntag(length);
+  __ Addu(scratch1, length,
+          Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
+  __ Addu(left, left, Operand(scratch1));
+  __ Addu(right, right, Operand(scratch1));
+  __ Subu(length, zero_reg, length);
+  Register index = length;  // index = -length;
+
+
+  // Compare loop.
+  Label loop;
+  __ bind(&loop);
+  __ Addu(scratch3, left, index);
+  __ lbu(scratch1, MemOperand(scratch3));
+  __ Addu(scratch3, right, index);
+  __ lbu(scratch2, MemOperand(scratch3));
+  __ Branch(chars_not_equal, ne, scratch1, Operand(scratch2));
+  __ Addu(index, index, 1);
+  __ Branch(&loop, ne, index, Operand(zero_reg));
 }
 
 
 void StringCompareStub::Generate(MacroAssembler* masm) {
-  UNIMPLEMENTED_MIPS();
+  Label runtime;
+
+  Counters* counters = masm->isolate()->counters();
+
+  // Stack frame on entry.
+  //  sp[0]: right string
+  //  sp[4]: left string
+  __ lw(a1, MemOperand(sp, 1 * kPointerSize));  // Left.
+  __ lw(a0, MemOperand(sp, 0 * kPointerSize));  // Right.
+
+  Label not_same;
+  __ Branch(&not_same, ne, a0, Operand(a1));
+  STATIC_ASSERT(EQUAL == 0);
+  STATIC_ASSERT(kSmiTag == 0);
+  __ li(v0, Operand(Smi::FromInt(EQUAL)));
+  __ IncrementCounter(counters->string_compare_native(), 1, a1, a2);
+  __ Addu(sp, sp, Operand(2 * kPointerSize));
+  __ Ret();
+
+  __ bind(&not_same);
+
+  // Check that both objects are sequential ASCII strings.
+  __ JumpIfNotBothSequentialAsciiStrings(a1, a0, a2, a3, &runtime);
+
+  // Compare flat ASCII strings natively. Remove arguments from stack first.
+  __ IncrementCounter(counters->string_compare_native(), 1, a2, a3);
+  __ Addu(sp, sp, Operand(2 * kPointerSize));
+  GenerateCompareFlatAsciiStrings(masm, a1, a0, a2, a3, t0, t1);
+
+  __ bind(&runtime);
+  __ TailCallRuntime(Runtime::kStringCompare, 2, 1);
 }
 
 
 void StringAddStub::Generate(MacroAssembler* masm) {
-  UNIMPLEMENTED_MIPS();
+  Label string_add_runtime, call_builtin;
+  Builtins::JavaScript builtin_id = Builtins::ADD;
+
+  Counters* counters = masm->isolate()->counters();
+
+  // Stack on entry:
+  // sp[0]: second argument (right).
+  // sp[4]: first argument (left).
+
+  // Load the two arguments.
+  __ lw(a0, MemOperand(sp, 1 * kPointerSize));  // First argument.
+  __ lw(a1, MemOperand(sp, 0 * kPointerSize));  // Second argument.
+
+  // Make sure that both arguments are strings if not known in advance.
+  if (flags_ == NO_STRING_ADD_FLAGS) {
+    __ JumpIfEitherSmi(a0, a1, &string_add_runtime);
+    // Load instance types.
+    __ lw(t0, FieldMemOperand(a0, HeapObject::kMapOffset));
+    __ lw(t1, FieldMemOperand(a1, HeapObject::kMapOffset));
+    __ lbu(t0, FieldMemOperand(t0, Map::kInstanceTypeOffset));
+    __ lbu(t1, FieldMemOperand(t1, Map::kInstanceTypeOffset));
+    STATIC_ASSERT(kStringTag == 0);
+    // If either is not a string, go to runtime.
+    __ Or(t4, t0, Operand(t1));
+    __ And(t4, t4, Operand(kIsNotStringMask));
+    __ Branch(&string_add_runtime, ne, t4, Operand(zero_reg));
+  } else {
+    // Here at least one of the arguments is definitely a string.
+    // We convert the one that is not known to be a string.
+    if ((flags_ & NO_STRING_CHECK_LEFT_IN_STUB) == 0) {
+      ASSERT((flags_ & NO_STRING_CHECK_RIGHT_IN_STUB) != 0);
+      GenerateConvertArgument(
+          masm, 1 * kPointerSize, a0, a2, a3, t0, t1, &call_builtin);
+      builtin_id = Builtins::STRING_ADD_RIGHT;
+    } else if ((flags_ & NO_STRING_CHECK_RIGHT_IN_STUB) == 0) {
+      ASSERT((flags_ & NO_STRING_CHECK_LEFT_IN_STUB) != 0);
+      GenerateConvertArgument(
+          masm, 0 * kPointerSize, a1, a2, a3, t0, t1, &call_builtin);
+      builtin_id = Builtins::STRING_ADD_LEFT;
+    }
+  }
+
+  // Both arguments are strings.
+  // a0: first string
+  // a1: second string
+  // t0: first string instance type (if flags_ == NO_STRING_ADD_FLAGS)
+  // t1: second string instance type (if flags_ == NO_STRING_ADD_FLAGS)
+  {
+    Label strings_not_empty;
+    // Check if either of the strings are empty. In that case return the other.
+    // These tests use zero-length check on string-length whch is an Smi.
+    // Assert that Smi::FromInt(0) is really 0.
+    STATIC_ASSERT(kSmiTag == 0);
+    ASSERT(Smi::FromInt(0) == 0);
+    __ lw(a2, FieldMemOperand(a0, String::kLengthOffset));
+    __ lw(a3, FieldMemOperand(a1, String::kLengthOffset));
+    __ mov(v0, a0);       // Assume we'll return first string (from a0).
+    __ movz(v0, a1, a2);  // If first is empty, return second (from a1).
+    __ slt(t4, zero_reg, a2);   // if (a2 > 0) t4 = 1.
+    __ slt(t5, zero_reg, a3);   // if (a3 > 0) t5 = 1.
+    __ and_(t4, t4, t5);        // Branch if both strings were non-empty.
+    __ Branch(&strings_not_empty, ne, t4, Operand(zero_reg));
+
+    __ IncrementCounter(counters->string_add_native(), 1, a2, a3);
+    __ Addu(sp, sp, Operand(2 * kPointerSize));
+    __ Ret();
+
+    __ bind(&strings_not_empty);
+  }
+
+  // Untag both string-lengths.
+  __ sra(a2, a2, kSmiTagSize);
+  __ sra(a3, a3, kSmiTagSize);
+
+  // Both strings are non-empty.
+  // a0: first string
+  // a1: second string
+  // a2: length of first string
+  // a3: length of second string
+  // t0: first string instance type (if flags_ == NO_STRING_ADD_FLAGS)
+  // t1: second string instance type (if flags_ == NO_STRING_ADD_FLAGS)
+  // Look at the length of the result of adding the two strings.
+  Label string_add_flat_result, longer_than_two;
+  // Adding two lengths can't overflow.
+  STATIC_ASSERT(String::kMaxLength < String::kMaxLength * 2);
+  __ Addu(t2, a2, Operand(a3));
+  // Use the symbol table when adding two one character strings, as it
+  // helps later optimizations to return a symbol here.
+  __ Branch(&longer_than_two, ne, t2, Operand(2));
+
+  // Check that both strings are non-external ASCII strings.
+  if (flags_ != NO_STRING_ADD_FLAGS) {
+    __ lw(t0, FieldMemOperand(a0, HeapObject::kMapOffset));
+    __ lw(t1, FieldMemOperand(a1, HeapObject::kMapOffset));
+    __ lbu(t0, FieldMemOperand(t0, Map::kInstanceTypeOffset));
+    __ lbu(t1, FieldMemOperand(t1, Map::kInstanceTypeOffset));
+  }
+  __ JumpIfBothInstanceTypesAreNotSequentialAscii(t0, t1, t2, t3,
+                                                 &string_add_runtime);
+
+  // Get the two characters forming the sub string.
+  __ lbu(a2, FieldMemOperand(a0, SeqAsciiString::kHeaderSize));
+  __ lbu(a3, FieldMemOperand(a1, SeqAsciiString::kHeaderSize));
+
+  // Try to lookup two character string in symbol table. If it is not found
+  // just allocate a new one.
+  Label make_two_character_string;
+  StringHelper::GenerateTwoCharacterSymbolTableProbe(
+      masm, a2, a3, t2, t3, t0, t1, t4, &make_two_character_string);
+  __ IncrementCounter(counters->string_add_native(), 1, a2, a3);
+  __ Addu(sp, sp, Operand(2 * kPointerSize));
+  __ Ret();
+
+  __ bind(&make_two_character_string);
+  // Resulting string has length 2 and first chars of two strings
+  // are combined into single halfword in a2 register.
+  // So we can fill resulting string without two loops by a single
+  // halfword store instruction (which assumes that processor is
+  // in a little endian mode).
+  __ li(t2, Operand(2));
+  __ AllocateAsciiString(v0, t2, t0, t1, t4, &string_add_runtime);
+  __ sh(a2, FieldMemOperand(v0, SeqAsciiString::kHeaderSize));
+  __ IncrementCounter(counters->string_add_native(), 1, a2, a3);
+  __ Addu(sp, sp, Operand(2 * kPointerSize));
+  __ Ret();
+
+  __ bind(&longer_than_two);
+  // Check if resulting string will be flat.
+  __ Branch(&string_add_flat_result, lt, t2,
+           Operand(String::kMinNonFlatLength));
+  // Handle exceptionally long strings in the runtime system.
+  STATIC_ASSERT((String::kMaxLength & 0x80000000) == 0);
+  ASSERT(IsPowerOf2(String::kMaxLength + 1));
+  // kMaxLength + 1 is representable as shifted literal, kMaxLength is not.
+  __ Branch(&string_add_runtime, hs, t2, Operand(String::kMaxLength + 1));
+
+  // If result is not supposed to be flat, allocate a cons string object.
+  // If both strings are ASCII the result is an ASCII cons string.
+  if (flags_ != NO_STRING_ADD_FLAGS) {
+    __ lw(t0, FieldMemOperand(a0, HeapObject::kMapOffset));
+    __ lw(t1, FieldMemOperand(a1, HeapObject::kMapOffset));
+    __ lbu(t0, FieldMemOperand(t0, Map::kInstanceTypeOffset));
+    __ lbu(t1, FieldMemOperand(t1, Map::kInstanceTypeOffset));
+  }
+  Label non_ascii, allocated, ascii_data;
+  STATIC_ASSERT(kTwoByteStringTag == 0);
+  // Branch to non_ascii if either string-encoding field is zero (non-ascii).
+  __ And(t4, t0, Operand(t1));
+  __ And(t4, t4, Operand(kStringEncodingMask));
+  __ Branch(&non_ascii, eq, t4, Operand(zero_reg));
+
+  // Allocate an ASCII cons string.
+  __ bind(&ascii_data);
+  __ AllocateAsciiConsString(t3, t2, t0, t1, &string_add_runtime);
+  __ bind(&allocated);
+  // Fill the fields of the cons string.
+  __ sw(a0, FieldMemOperand(t3, ConsString::kFirstOffset));
+  __ sw(a1, FieldMemOperand(t3, ConsString::kSecondOffset));
+  __ mov(v0, t3);
+  __ IncrementCounter(counters->string_add_native(), 1, a2, a3);
+  __ Addu(sp, sp, Operand(2 * kPointerSize));
+  __ Ret();
+
+  __ bind(&non_ascii);
+  // At least one of the strings is two-byte. Check whether it happens
+  // to contain only ASCII characters.
+  // t0: first instance type.
+  // t1: second instance type.
+  // Branch to if _both_ instances have kAsciiDataHintMask set.
+  __ And(at, t0, Operand(kAsciiDataHintMask));
+  __ and_(at, at, t1);
+  __ Branch(&ascii_data, ne, at, Operand(zero_reg));
+
+  __ xor_(t0, t0, t1);
+  STATIC_ASSERT(kAsciiStringTag != 0 && kAsciiDataHintTag != 0);
+  __ And(t0, t0, Operand(kAsciiStringTag | kAsciiDataHintTag));
+  __ Branch(&ascii_data, eq, t0, Operand(kAsciiStringTag | kAsciiDataHintTag));
+
+  // Allocate a two byte cons string.
+  __ AllocateTwoByteConsString(t3, t2, t0, t1, &string_add_runtime);
+  __ Branch(&allocated);
+
+  // Handle creating a flat result. First check that both strings are
+  // sequential and that they have the same encoding.
+  // a0: first string
+  // a1: second string
+  // a2: length of first string
+  // a3: length of second string
+  // t0: first string instance type (if flags_ == NO_STRING_ADD_FLAGS)
+  // t1: second string instance type (if flags_ == NO_STRING_ADD_FLAGS)
+  // t2: sum of lengths.
+  __ bind(&string_add_flat_result);
+  if (flags_ != NO_STRING_ADD_FLAGS) {
+    __ lw(t0, FieldMemOperand(a0, HeapObject::kMapOffset));
+    __ lw(t1, FieldMemOperand(a1, HeapObject::kMapOffset));
+    __ lbu(t0, FieldMemOperand(t0, Map::kInstanceTypeOffset));
+    __ lbu(t1, FieldMemOperand(t1, Map::kInstanceTypeOffset));
+  }
+  // Check that both strings are sequential, meaning that we
+  // branch to runtime if either string tag is non-zero.
+  STATIC_ASSERT(kSeqStringTag == 0);
+  __ Or(t4, t0, Operand(t1));
+  __ And(t4, t4, Operand(kStringRepresentationMask));
+  __ Branch(&string_add_runtime, ne, t4, Operand(zero_reg));
+
+  // Now check if both strings have the same encoding (ASCII/Two-byte).
+  // a0: first string
+  // a1: second string
+  // a2: length of first string
+  // a3: length of second string
+  // t0: first string instance type
+  // t1: second string instance type
+  // t2: sum of lengths.
+  Label non_ascii_string_add_flat_result;
+  ASSERT(IsPowerOf2(kStringEncodingMask));  // Just one bit to test.
+  __ xor_(t3, t1, t0);
+  __ And(t3, t3, Operand(kStringEncodingMask));
+  __ Branch(&string_add_runtime, ne, t3, Operand(zero_reg));
+  // And see if it's ASCII (0) or two-byte (1).
+  __ And(t3, t0, Operand(kStringEncodingMask));
+  __ Branch(&non_ascii_string_add_flat_result, eq, t3, Operand(zero_reg));
+
+  // Both strings are sequential ASCII strings. We also know that they are
+  // short (since the sum of the lengths is less than kMinNonFlatLength).
+  // t2: length of resulting flat string
+  __ AllocateAsciiString(t3, t2, t0, t1, t4, &string_add_runtime);
+  // Locate first character of result.
+  __ Addu(t2, t3, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
+  // Locate first character of first argument.
+  __ Addu(a0, a0, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
+  // a0: first character of first string.
+  // a1: second string.
+  // a2: length of first string.
+  // a3: length of second string.
+  // t2: first character of result.
+  // t3: result string.
+  StringHelper::GenerateCopyCharacters(masm, t2, a0, a2, t0, true);
+
+  // Load second argument and locate first character.
+  __ Addu(a1, a1, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
+  // a1: first character of second string.
+  // a3: length of second string.
+  // t2: next character of result.
+  // t3: result string.
+  StringHelper::GenerateCopyCharacters(masm, t2, a1, a3, t0, true);
+  __ mov(v0, t3);
+  __ IncrementCounter(counters->string_add_native(), 1, a2, a3);
+  __ Addu(sp, sp, Operand(2 * kPointerSize));
+  __ Ret();
+
+  __ bind(&non_ascii_string_add_flat_result);
+  // Both strings are sequential two byte strings.
+  // a0: first string.
+  // a1: second string.
+  // a2: length of first string.
+  // a3: length of second string.
+  // t2: sum of length of strings.
+  __ AllocateTwoByteString(t3, t2, t0, t1, t4, &string_add_runtime);
+  // a0: first string.
+  // a1: second string.
+  // a2: length of first string.
+  // a3: length of second string.
+  // t3: result string.
+
+  // Locate first character of result.
+  __ Addu(t2, t3, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
+  // Locate first character of first argument.
+  __ Addu(a0, a0, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
+
+  // a0: first character of first string.
+  // a1: second string.
+  // a2: length of first string.
+  // a3: length of second string.
+  // t2: first character of result.
+  // t3: result string.
+  StringHelper::GenerateCopyCharacters(masm, t2, a0, a2, t0, false);
+
+  // Locate first character of second argument.
+  __ Addu(a1, a1, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
+
+  // a1: first character of second string.
+  // a3: length of second string.
+  // t2: next character of result (after copy of first string).
+  // t3: result string.
+  StringHelper::GenerateCopyCharacters(masm, t2, a1, a3, t0, false);
+
+  __ mov(v0, t3);
+  __ IncrementCounter(counters->string_add_native(), 1, a2, a3);
+  __ Addu(sp, sp, Operand(2 * kPointerSize));
+  __ Ret();
+
+  // Just jump to runtime to add the two strings.
+  __ bind(&string_add_runtime);
+  __ TailCallRuntime(Runtime::kStringAdd, 2, 1);
+
+  if (call_builtin.is_linked()) {
+    __ bind(&call_builtin);
+    __ InvokeBuiltin(builtin_id, JUMP_FUNCTION);
+  }
+}
+
+
+void StringAddStub::GenerateConvertArgument(MacroAssembler* masm,
+                                            int stack_offset,
+                                            Register arg,
+                                            Register scratch1,
+                                            Register scratch2,
+                                            Register scratch3,
+                                            Register scratch4,
+                                            Label* slow) {
+  // First check if the argument is already a string.
+  Label not_string, done;
+  __ JumpIfSmi(arg, &not_string);
+  __ GetObjectType(arg, scratch1, scratch1);
+  __ Branch(&done, lt, scratch1, Operand(FIRST_NONSTRING_TYPE));
+
+  // Check the number to string cache.
+  Label not_cached;
+  __ bind(&not_string);
+  // Puts the cached result into scratch1.
+  NumberToStringStub::GenerateLookupNumberStringCache(masm,
+                                                      arg,
+                                                      scratch1,
+                                                      scratch2,
+                                                      scratch3,
+                                                      scratch4,
+                                                      false,
+                                                      &not_cached);
+  __ mov(arg, scratch1);
+  __ sw(arg, MemOperand(sp, stack_offset));
+  __ jmp(&done);
+
+  // Check if the argument is a safe string wrapper.
+  __ bind(&not_cached);
+  __ JumpIfSmi(arg, slow);
+  __ GetObjectType(arg, scratch1, scratch2);  // map -> scratch1.
+  __ Branch(slow, ne, scratch2, Operand(JS_VALUE_TYPE));
+  __ lbu(scratch2, FieldMemOperand(scratch1, Map::kBitField2Offset));
+  __ li(scratch4, 1 << Map::kStringWrapperSafeForDefaultValueOf);
+  __ And(scratch2, scratch2, scratch4);
+  __ Branch(slow, ne, scratch2, Operand(scratch4));
+  __ lw(arg, FieldMemOperand(arg, JSValue::kValueOffset));
+  __ sw(arg, MemOperand(sp, stack_offset));
+
+  __ bind(&done);
 }
 
 
 void ICCompareStub::GenerateSmis(MacroAssembler* masm) {
-  UNIMPLEMENTED_MIPS();
+  ASSERT(state_ == CompareIC::SMIS);
+  Label miss;
+  __ Or(a2, a1, a0);
+  __ JumpIfNotSmi(a2, &miss);
+
+  if (GetCondition() == eq) {
+    // For equality we do not care about the sign of the result.
+    __ Subu(v0, a0, a1);
+  } else {
+    // Untag before subtracting to avoid handling overflow.
+    __ SmiUntag(a1);
+    __ SmiUntag(a0);
+    __ Subu(v0, a1, a0);
+  }
+  __ Ret();
+
+  __ bind(&miss);
+  GenerateMiss(masm);
 }
 
 
 void ICCompareStub::GenerateHeapNumbers(MacroAssembler* masm) {
-  UNIMPLEMENTED_MIPS();
+  ASSERT(state_ == CompareIC::HEAP_NUMBERS);
+
+  Label generic_stub;
+  Label unordered;
+  Label miss;
+  __ And(a2, a1, Operand(a0));
+  __ JumpIfSmi(a2, &generic_stub);
+
+  __ GetObjectType(a0, a2, a2);
+  __ Branch(&miss, ne, a2, Operand(HEAP_NUMBER_TYPE));
+  __ GetObjectType(a1, a2, a2);
+  __ Branch(&miss, ne, a2, Operand(HEAP_NUMBER_TYPE));
+
+  // Inlining the double comparison and falling back to the general compare
+  // stub if NaN is involved or FPU is unsupported.
+  if (CpuFeatures::IsSupported(FPU)) {
+    CpuFeatures::Scope scope(FPU);
+
+    // Load left and right operand.
+    __ Subu(a2, a1, Operand(kHeapObjectTag));
+    __ ldc1(f0, MemOperand(a2, HeapNumber::kValueOffset));
+    __ Subu(a2, a0, Operand(kHeapObjectTag));
+    __ ldc1(f2, MemOperand(a2, HeapNumber::kValueOffset));
+
+    Label fpu_eq, fpu_lt, fpu_gt;
+    // Compare operands (test if unordered).
+    __ c(UN, D, f0, f2);
+    // Don't base result on status bits when a NaN is involved.
+    __ bc1t(&unordered);
+    __ nop();
+
+    // Test if equal.
+    __ c(EQ, D, f0, f2);
+    __ bc1t(&fpu_eq);
+    __ nop();
+
+    // Test if unordered or less (unordered case is already handled).
+    __ c(ULT, D, f0, f2);
+    __ bc1t(&fpu_lt);
+    __ nop();
+
+    // Otherwise it's greater.
+    __ bc1f(&fpu_gt);
+    __ nop();
+
+    // Return a result of -1, 0, or 1.
+    __ bind(&fpu_eq);
+    __ li(v0, Operand(EQUAL));
+    __ Ret();
+
+    __ bind(&fpu_lt);
+    __ li(v0, Operand(LESS));
+    __ Ret();
+
+    __ bind(&fpu_gt);
+    __ li(v0, Operand(GREATER));
+    __ Ret();
+
+    __ bind(&unordered);
+  }
+
+  CompareStub stub(GetCondition(), strict(), NO_COMPARE_FLAGS, a1, a0);
+  __ bind(&generic_stub);
+  __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
+
+  __ bind(&miss);
+  GenerateMiss(masm);
+}
+
+
+void ICCompareStub::GenerateSymbols(MacroAssembler* masm) {
+  ASSERT(state_ == CompareIC::SYMBOLS);
+  Label miss;
+
+  // Registers containing left and right operands respectively.
+  Register left = a1;
+  Register right = a0;
+  Register tmp1 = a2;
+  Register tmp2 = a3;
+
+  // Check that both operands are heap objects.
+  __ JumpIfEitherSmi(left, right, &miss);
+
+  // Check that both operands are symbols.
+  __ lw(tmp1, FieldMemOperand(left, HeapObject::kMapOffset));
+  __ lw(tmp2, FieldMemOperand(right, HeapObject::kMapOffset));
+  __ lbu(tmp1, FieldMemOperand(tmp1, Map::kInstanceTypeOffset));
+  __ lbu(tmp2, FieldMemOperand(tmp2, Map::kInstanceTypeOffset));
+  STATIC_ASSERT(kSymbolTag != 0);
+  __ And(tmp1, tmp1, Operand(tmp2));
+  __ And(tmp1, tmp1, kIsSymbolMask);
+  __ Branch(&miss, eq, tmp1, Operand(zero_reg));
+  // Make sure a0 is non-zero. At this point input operands are
+  // guaranteed to be non-zero.
+  ASSERT(right.is(a0));
+  STATIC_ASSERT(EQUAL == 0);
+  STATIC_ASSERT(kSmiTag == 0);
+  __ mov(v0, right);
+  // Symbols are compared by identity.
+  __ Ret(ne, left, Operand(right));
+  __ li(v0, Operand(Smi::FromInt(EQUAL)));
+  __ Ret();
+
+  __ bind(&miss);
+  GenerateMiss(masm);
+}
+
+
+void ICCompareStub::GenerateStrings(MacroAssembler* masm) {
+  ASSERT(state_ == CompareIC::STRINGS);
+  Label miss;
+
+  // Registers containing left and right operands respectively.
+  Register left = a1;
+  Register right = a0;
+  Register tmp1 = a2;
+  Register tmp2 = a3;
+  Register tmp3 = t0;
+  Register tmp4 = t1;
+  Register tmp5 = t2;
+
+  // Check that both operands are heap objects.
+  __ JumpIfEitherSmi(left, right, &miss);
+
+  // Check that both operands are strings. This leaves the instance
+  // types loaded in tmp1 and tmp2.
+  __ lw(tmp1, FieldMemOperand(left, HeapObject::kMapOffset));
+  __ lw(tmp2, FieldMemOperand(right, HeapObject::kMapOffset));
+  __ lbu(tmp1, FieldMemOperand(tmp1, Map::kInstanceTypeOffset));
+  __ lbu(tmp2, FieldMemOperand(tmp2, Map::kInstanceTypeOffset));
+  STATIC_ASSERT(kNotStringTag != 0);
+  __ Or(tmp3, tmp1, tmp2);
+  __ And(tmp5, tmp3, Operand(kIsNotStringMask));
+  __ Branch(&miss, ne, tmp5, Operand(zero_reg));
+
+  // Fast check for identical strings.
+  Label left_ne_right;
+  STATIC_ASSERT(EQUAL == 0);
+  STATIC_ASSERT(kSmiTag == 0);
+  __ Branch(&left_ne_right, ne, left, Operand(right), USE_DELAY_SLOT);
+  __ mov(v0, zero_reg);  // In the delay slot.
+  __ Ret();
+  __ bind(&left_ne_right);
+
+  // Handle not identical strings.
+
+  // Check that both strings are symbols. If they are, we're done
+  // because we already know they are not identical.
+  ASSERT(GetCondition() == eq);
+  STATIC_ASSERT(kSymbolTag != 0);
+  __ And(tmp3, tmp1, Operand(tmp2));
+  __ And(tmp5, tmp3, Operand(kIsSymbolMask));
+  Label is_symbol;
+  __ Branch(&is_symbol, eq, tmp5, Operand(zero_reg), USE_DELAY_SLOT);
+  __ mov(v0, a0);  // In the delay slot.
+  // Make sure a0 is non-zero. At this point input operands are
+  // guaranteed to be non-zero.
+  ASSERT(right.is(a0));
+  __ Ret();
+  __ bind(&is_symbol);
+
+  // Check that both strings are sequential ASCII.
+  Label runtime;
+  __ JumpIfBothInstanceTypesAreNotSequentialAscii(tmp1, tmp2, tmp3, tmp4,
+                                                  &runtime);
+
+  // Compare flat ASCII strings. Returns when done.
+  StringCompareStub::GenerateFlatAsciiStringEquals(
+      masm, left, right, tmp1, tmp2, tmp3);
+
+  // Handle more complex cases in runtime.
+  __ bind(&runtime);
+  __ Push(left, right);
+  __ TailCallRuntime(Runtime::kStringEquals, 2, 1);
+
+  __ bind(&miss);
+  GenerateMiss(masm);
 }
 
 
 void ICCompareStub::GenerateObjects(MacroAssembler* masm) {
-  UNIMPLEMENTED_MIPS();
+  ASSERT(state_ == CompareIC::OBJECTS);
+  Label miss;
+  __ And(a2, a1, Operand(a0));
+  __ JumpIfSmi(a2, &miss);
+
+  __ GetObjectType(a0, a2, a2);
+  __ Branch(&miss, ne, a2, Operand(JS_OBJECT_TYPE));
+  __ GetObjectType(a1, a2, a2);
+  __ Branch(&miss, ne, a2, Operand(JS_OBJECT_TYPE));
+
+  ASSERT(GetCondition() == eq);
+  __ Subu(v0, a0, Operand(a1));
+  __ Ret();
+
+  __ bind(&miss);
+  GenerateMiss(masm);
 }
 
 
 void ICCompareStub::GenerateMiss(MacroAssembler* masm) {
-  UNIMPLEMENTED_MIPS();
+  __ Push(a1, a0);
+  __ push(ra);
+
+  // Call the runtime system in a fresh internal frame.
+  ExternalReference miss = ExternalReference(IC_Utility(IC::kCompareIC_Miss),
+                                             masm->isolate());
+  __ EnterInternalFrame();
+  __ Push(a1, a0);
+  __ li(t0, Operand(Smi::FromInt(op_)));
+  __ push(t0);
+  __ CallExternalReference(miss, 3);
+  __ LeaveInternalFrame();
+  // Compute the entry point of the rewritten stub.
+  __ Addu(a2, v0, Operand(Code::kHeaderSize - kHeapObjectTag));
+  // Restore registers.
+  __ pop(ra);
+  __ pop(a0);
+  __ pop(a1);
+  __ Jump(a2);
+}
+
+void DirectCEntryStub::Generate(MacroAssembler* masm) {
+  // No need to pop or drop anything, LeaveExitFrame will restore the old
+  // stack, thus dropping the allocated space for the return value.
+  // The saved ra is after the reserved stack space for the 4 args.
+  __ lw(t9, MemOperand(sp, kCArgsSlotsSize));
+
+  if (FLAG_debug_code && EnableSlowAsserts()) {
+    // In case of an error the return address may point to a memory area
+    // filled with kZapValue by the GC.
+    // Dereference the address and check for this.
+    __ lw(t0, MemOperand(t9));
+    __ Assert(ne, "Received invalid return address.", t0,
+        Operand(reinterpret_cast<uint32_t>(kZapValue)));
+  }
+  __ Jump(t9);
 }
 
 
-void GenerateFastPixelArrayLoad(MacroAssembler* masm,
-                                Register receiver,
-                                Register key,
-                                Register elements_map,
-                                Register elements,
-                                Register scratch1,
-                                Register scratch2,
-                                Register result,
-                                Label* not_pixel_array,
-                                Label* key_not_smi,
-                                Label* out_of_range) {
-  UNIMPLEMENTED_MIPS();
+void DirectCEntryStub::GenerateCall(MacroAssembler* masm,
+                                    ExternalReference function) {
+  __ li(t9, Operand(function));
+  this->GenerateCall(masm, t9);
+}
+
+void DirectCEntryStub::GenerateCall(MacroAssembler* masm,
+                                    Register target) {
+  __ Move(t9, target);
+  __ AssertStackIsAligned();
+  // Allocate space for arg slots.
+  __ Subu(sp, sp, kCArgsSlotsSize);
+
+  // Block the trampoline pool through the whole function to make sure the
+  // number of generated instructions is constant.
+  Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm);
+
+  // We need to get the current 'pc' value, which is not available on MIPS.
+  Label find_ra;
+  masm->bal(&find_ra);  // ra = pc + 8.
+  masm->nop();  // Branch delay slot nop.
+  masm->bind(&find_ra);
+
+  const int kNumInstructionsToJump = 6;
+  masm->addiu(ra, ra, kNumInstructionsToJump * kPointerSize);
+  // Push return address (accessible to GC through exit frame pc).
+  // This spot for ra was reserved in EnterExitFrame.
+  masm->sw(ra, MemOperand(sp, kCArgsSlotsSize));
+  masm->li(ra, Operand(reinterpret_cast<intptr_t>(GetCode().location()),
+                    RelocInfo::CODE_TARGET), true);
+  // Call the function.
+  masm->Jump(t9);
+  // Make sure the stored 'ra' points to this position.
+  ASSERT_EQ(kNumInstructionsToJump, masm->InstructionsGeneratedSince(&find_ra));
+}
+
+
+MaybeObject* StringDictionaryLookupStub::GenerateNegativeLookup(
+    MacroAssembler* masm,
+    Label* miss,
+    Label* done,
+    Register receiver,
+    Register properties,
+    String* name,
+    Register scratch0) {
+// If names of slots in range from 1 to kProbes - 1 for the hash value are
+  // not equal to the name and kProbes-th slot is not used (its name is the
+  // undefined value), it guarantees the hash table doesn't contain the
+  // property. It's true even if some slots represent deleted properties
+  // (their names are the null value).
+  for (int i = 0; i < kInlinedProbes; i++) {
+    // scratch0 points to properties hash.
+    // Compute the masked index: (hash + i + i * i) & mask.
+    Register index = scratch0;
+    // Capacity is smi 2^n.
+    __ lw(index, FieldMemOperand(properties, kCapacityOffset));
+    __ Subu(index, index, Operand(1));
+    __ And(index, index, Operand(
+         Smi::FromInt(name->Hash() + StringDictionary::GetProbeOffset(i))));
+
+    // Scale the index by multiplying by the entry size.
+    ASSERT(StringDictionary::kEntrySize == 3);
+    // index *= 3.
+    __ mov(at, index);
+    __ sll(index, index, 1);
+    __ Addu(index, index, at);
+
+    Register entity_name = scratch0;
+    // Having undefined at this place means the name is not contained.
+    ASSERT_EQ(kSmiTagSize, 1);
+    Register tmp = properties;
+
+    __ sll(scratch0, index, 1);
+    __ Addu(tmp, properties, scratch0);
+    __ lw(entity_name, FieldMemOperand(tmp, kElementsStartOffset));
+
+    ASSERT(!tmp.is(entity_name));
+    __ LoadRoot(tmp, Heap::kUndefinedValueRootIndex);
+    __ Branch(done, eq, entity_name, Operand(tmp));
+
+    if (i != kInlinedProbes - 1) {
+      // Stop if found the property.
+      __ Branch(miss, eq, entity_name, Operand(Handle<String>(name)));
+
+      // Check if the entry name is not a symbol.
+      __ lw(entity_name, FieldMemOperand(entity_name, HeapObject::kMapOffset));
+      __ lbu(entity_name,
+             FieldMemOperand(entity_name, Map::kInstanceTypeOffset));
+      __ And(scratch0, entity_name, Operand(kIsSymbolMask));
+      __ Branch(miss, eq, scratch0, Operand(zero_reg));
+
+      // Restore the properties.
+      __ lw(properties,
+            FieldMemOperand(receiver, JSObject::kPropertiesOffset));
+    }
+  }
+
+  const int spill_mask =
+      (ra.bit() | t2.bit() | t1.bit() | t0.bit() | a3.bit() |
+       a2.bit() | a1.bit() | a0.bit());
+
+  __ MultiPush(spill_mask);
+  __ lw(a0, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
+  __ li(a1, Operand(Handle<String>(name)));
+  StringDictionaryLookupStub stub(NEGATIVE_LOOKUP);
+  MaybeObject* result = masm->TryCallStub(&stub);
+  if (result->IsFailure()) return result;
+  __ MultiPop(spill_mask);
+
+  __ Branch(done, eq, v0, Operand(zero_reg));
+  __ Branch(miss, ne, v0, Operand(zero_reg));
+  return result;
+}
+
+
+// Probe the string dictionary in the |elements| register. Jump to the
+// |done| label if a property with the given name is found. Jump to
+// the |miss| label otherwise.
+// If lookup was successful |scratch2| will be equal to elements + 4 * index.
+void StringDictionaryLookupStub::GeneratePositiveLookup(MacroAssembler* masm,
+                                                        Label* miss,
+                                                        Label* done,
+                                                        Register elements,
+                                                        Register name,
+                                                        Register scratch1,
+                                                        Register scratch2) {
+  // Assert that name contains a string.
+  if (FLAG_debug_code) __ AbortIfNotString(name);
+
+  // Compute the capacity mask.
+  __ lw(scratch1, FieldMemOperand(elements, kCapacityOffset));
+  __ sra(scratch1, scratch1, kSmiTagSize);  // convert smi to int
+  __ Subu(scratch1, scratch1, Operand(1));
+
+  // Generate an unrolled loop that performs a few probes before
+  // giving up. Measurements done on Gmail indicate that 2 probes
+  // cover ~93% of loads from dictionaries.
+  for (int i = 0; i < kInlinedProbes; i++) {
+    // Compute the masked index: (hash + i + i * i) & mask.
+    __ lw(scratch2, FieldMemOperand(name, String::kHashFieldOffset));
+    if (i > 0) {
+      // Add the probe offset (i + i * i) left shifted to avoid right shifting
+      // the hash in a separate instruction. The value hash + i + i * i is right
+      // shifted in the following and instruction.
+      ASSERT(StringDictionary::GetProbeOffset(i) <
+             1 << (32 - String::kHashFieldOffset));
+      __ Addu(scratch2, scratch2, Operand(
+           StringDictionary::GetProbeOffset(i) << String::kHashShift));
+    }
+    __ srl(scratch2, scratch2, String::kHashShift);
+    __ And(scratch2, scratch1, scratch2);
+
+    // Scale the index by multiplying by the element size.
+    ASSERT(StringDictionary::kEntrySize == 3);
+    // scratch2 = scratch2 * 3.
+
+    __ mov(at, scratch2);
+    __ sll(scratch2, scratch2, 1);
+    __ Addu(scratch2, scratch2, at);
+
+    // Check if the key is identical to the name.
+    __ sll(at, scratch2, 2);
+    __ Addu(scratch2, elements, at);
+    __ lw(at, FieldMemOperand(scratch2, kElementsStartOffset));
+    __ Branch(done, eq, name, Operand(at));
+  }
+
+  const int spill_mask =
+      (ra.bit() | t2.bit() | t1.bit() | t0.bit() |
+       a3.bit() | a2.bit() | a1.bit() | a0.bit()) &
+      ~(scratch1.bit() | scratch2.bit());
+
+  __ MultiPush(spill_mask);
+  __ Move(a0, elements);
+  __ Move(a1, name);
+  StringDictionaryLookupStub stub(POSITIVE_LOOKUP);
+  __ CallStub(&stub);
+  __ mov(scratch2, a2);
+  __ MultiPop(spill_mask);
+
+  __ Branch(done, ne, v0, Operand(zero_reg));
+  __ Branch(miss, eq, v0, Operand(zero_reg));
+}
+
+
+void StringDictionaryLookupStub::Generate(MacroAssembler* masm) {
+  // Registers:
+  //  result: StringDictionary to probe
+  //  a1: key
+  //  : StringDictionary to probe.
+  //  index_: will hold an index of entry if lookup is successful.
+  //          might alias with result_.
+  // Returns:
+  //  result_ is zero if lookup failed, non zero otherwise.
+
+  Register result = v0;
+  Register dictionary = a0;
+  Register key = a1;
+  Register index = a2;
+  Register mask = a3;
+  Register hash = t0;
+  Register undefined = t1;
+  Register entry_key = t2;
+
+  Label in_dictionary, maybe_in_dictionary, not_in_dictionary;
+
+  __ lw(mask, FieldMemOperand(dictionary, kCapacityOffset));
+  __ sra(mask, mask, kSmiTagSize);
+  __ Subu(mask, mask, Operand(1));
+
+  __ lw(hash, FieldMemOperand(key, String::kHashFieldOffset));
+
+  __ LoadRoot(undefined, Heap::kUndefinedValueRootIndex);
+
+  for (int i = kInlinedProbes; i < kTotalProbes; i++) {
+    // Compute the masked index: (hash + i + i * i) & mask.
+    // Capacity is smi 2^n.
+    if (i > 0) {
+      // Add the probe offset (i + i * i) left shifted to avoid right shifting
+      // the hash in a separate instruction. The value hash + i + i * i is right
+      // shifted in the following and instruction.
+      ASSERT(StringDictionary::GetProbeOffset(i) <
+             1 << (32 - String::kHashFieldOffset));
+      __ Addu(index, hash, Operand(
+           StringDictionary::GetProbeOffset(i) << String::kHashShift));
+    } else {
+      __ mov(index, hash);
+    }
+    __ srl(index, index, String::kHashShift);
+    __ And(index, mask, index);
+
+    // Scale the index by multiplying by the entry size.
+    ASSERT(StringDictionary::kEntrySize == 3);
+    // index *= 3.
+    __ mov(at, index);
+    __ sll(index, index, 1);
+    __ Addu(index, index, at);
+
+
+    ASSERT_EQ(kSmiTagSize, 1);
+    __ sll(index, index, 2);
+    __ Addu(index, index, dictionary);
+    __ lw(entry_key, FieldMemOperand(index, kElementsStartOffset));
+
+    // Having undefined at this place means the name is not contained.
+    __ Branch(&not_in_dictionary, eq, entry_key, Operand(undefined));
+
+    // Stop if found the property.
+    __ Branch(&in_dictionary, eq, entry_key, Operand(key));
+
+    if (i != kTotalProbes - 1 && mode_ == NEGATIVE_LOOKUP) {
+      // Check if the entry name is not a symbol.
+      __ lw(entry_key, FieldMemOperand(entry_key, HeapObject::kMapOffset));
+      __ lbu(entry_key,
+             FieldMemOperand(entry_key, Map::kInstanceTypeOffset));
+      __ And(result, entry_key, Operand(kIsSymbolMask));
+      __ Branch(&maybe_in_dictionary, eq, result, Operand(zero_reg));
+    }
+  }
+
+  __ bind(&maybe_in_dictionary);
+  // If we are doing negative lookup then probing failure should be
+  // treated as a lookup success. For positive lookup probing failure
+  // should be treated as lookup failure.
+  if (mode_ == POSITIVE_LOOKUP) {
+    __ mov(result, zero_reg);
+    __ Ret();
+  }
+
+  __ bind(&in_dictionary);
+  __ li(result, 1);
+  __ Ret();
+
+  __ bind(&not_in_dictionary);
+  __ mov(result, zero_reg);
+  __ Ret();
 }
 
 
@@ -749,4 +6654,3 @@
 } }  // namespace v8::internal
 
 #endif  // V8_TARGET_ARCH_MIPS
-
diff --git a/src/mips/code-stubs-mips.h b/src/mips/code-stubs-mips.h
index 675730a..356aa97 100644
--- a/src/mips/code-stubs-mips.h
+++ b/src/mips/code-stubs-mips.h
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -39,13 +39,22 @@
 // TranscendentalCache runtime function.
 class TranscendentalCacheStub: public CodeStub {
  public:
-  explicit TranscendentalCacheStub(TranscendentalCache::Type type)
-      : type_(type) {}
+  enum ArgumentType {
+    TAGGED = 0 << TranscendentalCache::kTranscendentalTypeBits,
+    UNTAGGED = 1 << TranscendentalCache::kTranscendentalTypeBits
+  };
+
+  TranscendentalCacheStub(TranscendentalCache::Type type,
+                          ArgumentType argument_type)
+      : type_(type), argument_type_(argument_type) { }
   void Generate(MacroAssembler* masm);
  private:
   TranscendentalCache::Type type_;
+  ArgumentType argument_type_;
+  void GenerateCallCFunction(MacroAssembler* masm, Register scratch);
+
   Major MajorKey() { return TranscendentalCache; }
-  int MinorKey() { return type_; }
+  int MinorKey() { return type_ | argument_type_; }
   Runtime::FunctionId RuntimeFunction();
 };
 
@@ -63,176 +72,108 @@
 };
 
 
-class GenericBinaryOpStub : public CodeStub {
+class UnaryOpStub: public CodeStub {
  public:
-  static const int kUnknownIntValue = -1;
-
-  GenericBinaryOpStub(Token::Value op,
-                      OverwriteMode mode,
-                      Register lhs,
-                      Register rhs,
-                      int constant_rhs = kUnknownIntValue)
+  UnaryOpStub(Token::Value op, UnaryOverwriteMode mode)
       : op_(op),
         mode_(mode),
-        lhs_(lhs),
-        rhs_(rhs),
-        constant_rhs_(constant_rhs),
-        specialized_on_rhs_(RhsIsOneWeWantToOptimizeFor(op, constant_rhs)),
-        runtime_operands_type_(BinaryOpIC::UNINIT_OR_SMI),
-        name_(NULL) { }
+        operand_type_(UnaryOpIC::UNINITIALIZED),
+        name_(NULL) {
+  }
 
-  GenericBinaryOpStub(int key, BinaryOpIC::TypeInfo type_info)
+  UnaryOpStub(
+      int key,
+      UnaryOpIC::TypeInfo operand_type)
       : op_(OpBits::decode(key)),
         mode_(ModeBits::decode(key)),
-        lhs_(LhsRegister(RegisterBits::decode(key))),
-        rhs_(RhsRegister(RegisterBits::decode(key))),
-        constant_rhs_(KnownBitsForMinorKey(KnownIntBits::decode(key))),
-        specialized_on_rhs_(RhsIsOneWeWantToOptimizeFor(op_, constant_rhs_)),
-        runtime_operands_type_(type_info),
-        name_(NULL) { }
+        operand_type_(operand_type),
+        name_(NULL) {
+  }
 
  private:
   Token::Value op_;
-  OverwriteMode mode_;
-  Register lhs_;
-  Register rhs_;
-  int constant_rhs_;
-  bool specialized_on_rhs_;
-  BinaryOpIC::TypeInfo runtime_operands_type_;
+  UnaryOverwriteMode mode_;
+
+  // Operand type information determined at runtime.
+  UnaryOpIC::TypeInfo operand_type_;
+
   char* name_;
 
-  static const int kMaxKnownRhs = 0x40000000;
-  static const int kKnownRhsKeyBits = 6;
-
-  // Minor key encoding in 16 bits.
-  class ModeBits: public BitField<OverwriteMode, 0, 2> {};
-  class OpBits: public BitField<Token::Value, 2, 6> {};
-  class TypeInfoBits: public BitField<int, 8, 3> {};
-  class RegisterBits: public BitField<bool, 11, 1> {};
-  class KnownIntBits: public BitField<int, 12, kKnownRhsKeyBits> {};
-
-  Major MajorKey() { return GenericBinaryOp; }
-  int MinorKey() {
-    ASSERT((lhs_.is(a0) && rhs_.is(a1)) ||
-           (lhs_.is(a1) && rhs_.is(a0)));
-    // Encode the parameters in a unique 16 bit value.
-    return OpBits::encode(op_)
-           | ModeBits::encode(mode_)
-           | KnownIntBits::encode(MinorKeyForKnownInt())
-           | TypeInfoBits::encode(runtime_operands_type_)
-           | RegisterBits::encode(lhs_.is(a0));
-  }
-
-  void Generate(MacroAssembler* masm);
-  void HandleNonSmiBitwiseOp(MacroAssembler* masm,
-                             Register lhs,
-                             Register rhs);
-  void HandleBinaryOpSlowCases(MacroAssembler* masm,
-                               Label* not_smi,
-                               Register lhs,
-                               Register rhs,
-                               const Builtins::JavaScript& builtin);
-  void GenerateTypeTransition(MacroAssembler* masm);
-
-  static bool RhsIsOneWeWantToOptimizeFor(Token::Value op, int constant_rhs) {
-    if (constant_rhs == kUnknownIntValue) return false;
-    if (op == Token::DIV) return constant_rhs >= 2 && constant_rhs <= 3;
-    if (op == Token::MOD) {
-      if (constant_rhs <= 1) return false;
-      if (constant_rhs <= 10) return true;
-      if (constant_rhs <= kMaxKnownRhs && IsPowerOf2(constant_rhs)) return true;
-      return false;
-    }
-    return false;
-  }
-
-  int MinorKeyForKnownInt() {
-    if (!specialized_on_rhs_) return 0;
-    if (constant_rhs_ <= 10) return constant_rhs_ + 1;
-    ASSERT(IsPowerOf2(constant_rhs_));
-    int key = 12;
-    int d = constant_rhs_;
-    while ((d & 1) == 0) {
-      key++;
-      d >>= 1;
-    }
-    ASSERT(key >= 0 && key < (1 << kKnownRhsKeyBits));
-    return key;
-  }
-
-  int KnownBitsForMinorKey(int key) {
-    if (!key) return 0;
-    if (key <= 11) return key - 1;
-    int d = 1;
-    while (key != 12) {
-      key--;
-      d <<= 1;
-    }
-    return d;
-  }
-
-  Register LhsRegister(bool lhs_is_a0) {
-    return lhs_is_a0 ? a0 : a1;
-  }
-
-  Register RhsRegister(bool lhs_is_a0) {
-    return lhs_is_a0 ? a1 : a0;
-  }
-
-  bool HasSmiSmiFastPath() {
-    return op_ != Token::DIV;
-  }
-
-  bool ShouldGenerateSmiCode() {
-    return ((op_ != Token::DIV && op_ != Token::MOD) || specialized_on_rhs_) &&
-        runtime_operands_type_ != BinaryOpIC::HEAP_NUMBERS &&
-        runtime_operands_type_ != BinaryOpIC::STRINGS;
-  }
-
-  bool ShouldGenerateFPCode() {
-    return runtime_operands_type_ != BinaryOpIC::STRINGS;
-  }
-
-  virtual int GetCodeKind() { return Code::BINARY_OP_IC; }
-
-  virtual InlineCacheState GetICState() {
-    return BinaryOpIC::ToState(runtime_operands_type_);
-  }
-
   const char* GetName();
 
-  virtual void FinishCode(Code* code) {
-    code->set_binary_op_type(runtime_operands_type_);
-  }
-
 #ifdef DEBUG
   void Print() {
-    if (!specialized_on_rhs_) {
-      PrintF("GenericBinaryOpStub (%s)\n", Token::String(op_));
-    } else {
-      PrintF("GenericBinaryOpStub (%s by %d)\n",
-             Token::String(op_),
-             constant_rhs_);
-    }
+    PrintF("UnaryOpStub %d (op %s), "
+           "(mode %d, runtime_type_info %s)\n",
+           MinorKey(),
+           Token::String(op_),
+           static_cast<int>(mode_),
+           UnaryOpIC::GetName(operand_type_));
   }
 #endif
-};
 
-class TypeRecordingBinaryOpStub: public CodeStub {
- public:
-  TypeRecordingBinaryOpStub(Token::Value op, OverwriteMode mode)
-      : op_(op),
-        mode_(mode),
-        operands_type_(TRBinaryOpIC::UNINITIALIZED),
-        result_type_(TRBinaryOpIC::UNINITIALIZED),
-        name_(NULL) {
-    UNIMPLEMENTED_MIPS();
+  class ModeBits: public BitField<UnaryOverwriteMode, 0, 1> {};
+  class OpBits: public BitField<Token::Value, 1, 7> {};
+  class OperandTypeInfoBits: public BitField<UnaryOpIC::TypeInfo, 8, 3> {};
+
+  Major MajorKey() { return UnaryOp; }
+  int MinorKey() {
+    return ModeBits::encode(mode_)
+           | OpBits::encode(op_)
+           | OperandTypeInfoBits::encode(operand_type_);
   }
 
-  TypeRecordingBinaryOpStub(
+  // Note: A lot of the helper functions below will vanish when we use virtual
+  // function instead of switch more often.
+  void Generate(MacroAssembler* masm);
+
+  void GenerateTypeTransition(MacroAssembler* masm);
+
+  void GenerateSmiStub(MacroAssembler* masm);
+  void GenerateSmiStubSub(MacroAssembler* masm);
+  void GenerateSmiStubBitNot(MacroAssembler* masm);
+  void GenerateSmiCodeSub(MacroAssembler* masm, Label* non_smi, Label* slow);
+  void GenerateSmiCodeBitNot(MacroAssembler* masm, Label* slow);
+
+  void GenerateHeapNumberStub(MacroAssembler* masm);
+  void GenerateHeapNumberStubSub(MacroAssembler* masm);
+  void GenerateHeapNumberStubBitNot(MacroAssembler* masm);
+  void GenerateHeapNumberCodeSub(MacroAssembler* masm, Label* slow);
+  void GenerateHeapNumberCodeBitNot(MacroAssembler* masm, Label* slow);
+
+  void GenerateGenericStub(MacroAssembler* masm);
+  void GenerateGenericStubSub(MacroAssembler* masm);
+  void GenerateGenericStubBitNot(MacroAssembler* masm);
+  void GenerateGenericCodeFallback(MacroAssembler* masm);
+
+  virtual int GetCodeKind() { return Code::UNARY_OP_IC; }
+
+  virtual InlineCacheState GetICState() {
+    return UnaryOpIC::ToState(operand_type_);
+  }
+
+  virtual void FinishCode(Code* code) {
+    code->set_unary_op_type(operand_type_);
+  }
+};
+
+
+class BinaryOpStub: public CodeStub {
+ public:
+  BinaryOpStub(Token::Value op, OverwriteMode mode)
+      : op_(op),
+        mode_(mode),
+        operands_type_(BinaryOpIC::UNINITIALIZED),
+        result_type_(BinaryOpIC::UNINITIALIZED),
+        name_(NULL) {
+    use_fpu_ = CpuFeatures::IsSupported(FPU);
+    ASSERT(OpBits::is_valid(Token::NUM_TOKENS));
+  }
+
+  BinaryOpStub(
       int key,
-      TRBinaryOpIC::TypeInfo operands_type,
-      TRBinaryOpIC::TypeInfo result_type = TRBinaryOpIC::UNINITIALIZED)
+      BinaryOpIC::TypeInfo operands_type,
+      BinaryOpIC::TypeInfo result_type = BinaryOpIC::UNINITIALIZED)
       : op_(OpBits::decode(key)),
         mode_(ModeBits::decode(key)),
         use_fpu_(FPUBits::decode(key)),
@@ -251,8 +192,8 @@
   bool use_fpu_;
 
   // Operand type information determined at runtime.
-  TRBinaryOpIC::TypeInfo operands_type_;
-  TRBinaryOpIC::TypeInfo result_type_;
+  BinaryOpIC::TypeInfo operands_type_;
+  BinaryOpIC::TypeInfo result_type_;
 
   char* name_;
 
@@ -260,12 +201,12 @@
 
 #ifdef DEBUG
   void Print() {
-    PrintF("TypeRecordingBinaryOpStub %d (op %s), "
+    PrintF("BinaryOpStub %d (op %s), "
            "(mode %d, runtime_type_info %s)\n",
            MinorKey(),
            Token::String(op_),
            static_cast<int>(mode_),
-           TRBinaryOpIC::GetName(operands_type_));
+           BinaryOpIC::GetName(operands_type_));
   }
 #endif
 
@@ -273,10 +214,10 @@
   class ModeBits: public BitField<OverwriteMode, 0, 2> {};
   class OpBits: public BitField<Token::Value, 2, 7> {};
   class FPUBits: public BitField<bool, 9, 1> {};
-  class OperandTypeInfoBits: public BitField<TRBinaryOpIC::TypeInfo, 10, 3> {};
-  class ResultTypeInfoBits: public BitField<TRBinaryOpIC::TypeInfo, 13, 3> {};
+  class OperandTypeInfoBits: public BitField<BinaryOpIC::TypeInfo, 10, 3> {};
+  class ResultTypeInfoBits: public BitField<BinaryOpIC::TypeInfo, 13, 3> {};
 
-  Major MajorKey() { return TypeRecordingBinaryOp; }
+  Major MajorKey() { return BinaryOp; }
   int MinorKey() {
     return OpBits::encode(op_)
            | ModeBits::encode(mode_)
@@ -293,6 +234,7 @@
                            Label* not_numbers,
                            Label* gc_required);
   void GenerateSmiCode(MacroAssembler* masm,
+                       Label* use_runtime,
                        Label* gc_required,
                        SmiCodeGenerateHeapNumberResults heapnumber_results);
   void GenerateLoadArguments(MacroAssembler* masm);
@@ -301,7 +243,9 @@
   void GenerateSmiStub(MacroAssembler* masm);
   void GenerateInt32Stub(MacroAssembler* masm);
   void GenerateHeapNumberStub(MacroAssembler* masm);
+  void GenerateOddballStub(MacroAssembler* masm);
   void GenerateStringStub(MacroAssembler* masm);
+  void GenerateBothStringStub(MacroAssembler* masm);
   void GenerateGenericStub(MacroAssembler* masm);
   void GenerateAddStrings(MacroAssembler* masm);
   void GenerateCallRuntime(MacroAssembler* masm);
@@ -316,15 +260,15 @@
   void GenerateTypeTransition(MacroAssembler* masm);
   void GenerateTypeTransitionWithSavedArgs(MacroAssembler* masm);
 
-  virtual int GetCodeKind() { return Code::TYPE_RECORDING_BINARY_OP_IC; }
+  virtual int GetCodeKind() { return Code::BINARY_OP_IC; }
 
   virtual InlineCacheState GetICState() {
-    return TRBinaryOpIC::ToState(operands_type_);
+    return BinaryOpIC::ToState(operands_type_);
   }
 
   virtual void FinishCode(Code* code) {
-    code->set_type_recording_binary_op_type(operands_type_);
-    code->set_type_recording_binary_op_result_type(result_type_);
+    code->set_binary_op_type(operands_type_);
+    code->set_binary_op_result_type(result_type_);
   }
 
   friend class CodeGenerator;
@@ -334,24 +278,36 @@
 // Flag that indicates how to generate code for the stub StringAddStub.
 enum StringAddFlags {
   NO_STRING_ADD_FLAGS = 0,
-  NO_STRING_CHECK_IN_STUB = 1 << 0  // Omit string check in stub.
+  // Omit left string check in stub (left is definitely a string).
+  NO_STRING_CHECK_LEFT_IN_STUB = 1 << 0,
+  // Omit right string check in stub (right is definitely a string).
+  NO_STRING_CHECK_RIGHT_IN_STUB = 1 << 1,
+  // Omit both string checks in stub.
+  NO_STRING_CHECK_IN_STUB =
+      NO_STRING_CHECK_LEFT_IN_STUB | NO_STRING_CHECK_RIGHT_IN_STUB
 };
 
 
 class StringAddStub: public CodeStub {
  public:
-  explicit StringAddStub(StringAddFlags flags) {
-    string_check_ = ((flags & NO_STRING_CHECK_IN_STUB) == 0);
-  }
+  explicit StringAddStub(StringAddFlags flags) : flags_(flags) {}
 
  private:
   Major MajorKey() { return StringAdd; }
-  int MinorKey() { return string_check_ ? 0 : 1; }
+  int MinorKey() { return flags_; }
 
   void Generate(MacroAssembler* masm);
 
-  // Should the stub check whether arguments are strings?
-  bool string_check_;
+  void GenerateConvertArgument(MacroAssembler* masm,
+                               int stack_offset,
+                               Register arg,
+                               Register scratch1,
+                               Register scratch2,
+                               Register scratch3,
+                               Register scratch4,
+                               Label* slow);
+
+  const StringAddFlags flags_;
 };
 
 
@@ -372,7 +328,6 @@
   StringCompareStub() { }
 
   // Compare two flat ASCII strings and returns result in v0.
-  // Does not use the stack.
   static void GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
                                               Register left,
                                               Register right,
@@ -381,11 +336,28 @@
                                               Register scratch3,
                                               Register scratch4);
 
- private:
-  Major MajorKey() { return StringCompare; }
-  int MinorKey() { return 0; }
+  // Compares two flat ASCII strings for equality and returns result
+  // in v0.
+  static void GenerateFlatAsciiStringEquals(MacroAssembler* masm,
+                                            Register left,
+                                            Register right,
+                                            Register scratch1,
+                                            Register scratch2,
+                                            Register scratch3);
 
-  void Generate(MacroAssembler* masm);
+ private:
+  virtual Major MajorKey() { return StringCompare; }
+  virtual int MinorKey() { return 0; }
+  virtual void Generate(MacroAssembler* masm);
+
+  static void GenerateAsciiCharsCompareLoop(MacroAssembler* masm,
+                                            Register left,
+                                            Register right,
+                                            Register length,
+                                            Register scratch1,
+                                            Register scratch2,
+                                            Register scratch3,
+                                            Label* chars_not_equal);
 };
 
 
@@ -484,26 +456,225 @@
   const char* GetName() { return "RegExpCEntryStub"; }
 };
 
+// Trampoline stub to call into native code. To call safely into native code
+// in the presence of compacting GC (which can move code objects) we need to
+// keep the code which called into native pinned in the memory. Currently the
+// simplest approach is to generate such stub early enough so it can never be
+// moved by GC
+class DirectCEntryStub: public CodeStub {
+ public:
+  DirectCEntryStub() {}
+  void Generate(MacroAssembler* masm);
+  void GenerateCall(MacroAssembler* masm,
+                                ExternalReference function);
+  void GenerateCall(MacroAssembler* masm, Register target);
 
-// Generate code the to load an element from a pixel array. The receiver is
-// assumed to not be a smi and to have elements, the caller must guarantee this
-// precondition. If the receiver does not have elements that are pixel arrays,
-// the generated code jumps to not_pixel_array. If key is not a smi, then the
-// generated code branches to key_not_smi. Callers can specify NULL for
-// key_not_smi to signal that a smi check has already been performed on key so
-// that the smi check is not generated . If key is not a valid index within the
-// bounds of the pixel array, the generated code jumps to out_of_range.
-void GenerateFastPixelArrayLoad(MacroAssembler* masm,
-                                Register receiver,
-                                Register key,
-                                Register elements_map,
-                                Register elements,
+ private:
+  Major MajorKey() { return DirectCEntry; }
+  int MinorKey() { return 0; }
+
+  bool NeedsImmovableCode() { return true; }
+
+  const char* GetName() { return "DirectCEntryStub"; }
+};
+
+class FloatingPointHelper : public AllStatic {
+ public:
+
+  enum Destination {
+    kFPURegisters,
+    kCoreRegisters
+  };
+
+
+  // Loads smis from a0 and a1 (right and left in binary operations) into
+  // floating point registers. Depending on the destination the values ends up
+  // either f14 and f12 or in a2/a3 and a0/a1 respectively. If the destination
+  // is floating point registers FPU must be supported. If core registers are
+  // requested when FPU is supported f12 and f14 will be scratched.
+  static void LoadSmis(MacroAssembler* masm,
+                       Destination destination,
+                       Register scratch1,
+                       Register scratch2);
+
+  // Loads objects from a0 and a1 (right and left in binary operations) into
+  // floating point registers. Depending on the destination the values ends up
+  // either f14 and f12 or in a2/a3 and a0/a1 respectively. If the destination
+  // is floating point registers FPU must be supported. If core registers are
+  // requested when FPU is supported f12 and f14 will still be scratched. If
+  // either a0 or a1 is not a number (not smi and not heap number object) the
+  // not_number label is jumped to with a0 and a1 intact.
+  static void LoadOperands(MacroAssembler* masm,
+                           FloatingPointHelper::Destination destination,
+                           Register heap_number_map,
+                           Register scratch1,
+                           Register scratch2,
+                           Label* not_number);
+
+  // Convert the smi or heap number in object to an int32 using the rules
+  // for ToInt32 as described in ECMAScript 9.5.: the value is truncated
+  // and brought into the range -2^31 .. +2^31 - 1.
+  static void ConvertNumberToInt32(MacroAssembler* masm,
+                                   Register object,
+                                   Register dst,
+                                   Register heap_number_map,
+                                   Register scratch1,
+                                   Register scratch2,
+                                   Register scratch3,
+                                   FPURegister double_scratch,
+                                   Label* not_int32);
+
+  // Converts the integer (untagged smi) in |int_scratch| to a double, storing
+  // the result either in |double_dst| or |dst2:dst1|, depending on
+  // |destination|.
+  // Warning: The value in |int_scratch| will be changed in the process!
+  static void ConvertIntToDouble(MacroAssembler* masm,
+                                 Register int_scratch,
+                                 Destination destination,
+                                 FPURegister double_dst,
+                                 Register dst1,
+                                 Register dst2,
+                                 Register scratch2,
+                                 FPURegister single_scratch);
+
+  // Load the number from object into double_dst in the double format.
+  // Control will jump to not_int32 if the value cannot be exactly represented
+  // by a 32-bit integer.
+  // Floating point value in the 32-bit integer range that are not exact integer
+  // won't be loaded.
+  static void LoadNumberAsInt32Double(MacroAssembler* masm,
+                                      Register object,
+                                      Destination destination,
+                                      FPURegister double_dst,
+                                      Register dst1,
+                                      Register dst2,
+                                      Register heap_number_map,
+                                      Register scratch1,
+                                      Register scratch2,
+                                      FPURegister single_scratch,
+                                      Label* not_int32);
+
+  // Loads the number from object into dst as a 32-bit integer.
+  // Control will jump to not_int32 if the object cannot be exactly represented
+  // by a 32-bit integer.
+  // Floating point value in the 32-bit integer range that are not exact integer
+  // won't be converted.
+  // scratch3 is not used when FPU is supported.
+  static void LoadNumberAsInt32(MacroAssembler* masm,
+                                Register object,
+                                Register dst,
+                                Register heap_number_map,
                                 Register scratch1,
                                 Register scratch2,
-                                Register result,
-                                Label* not_pixel_array,
-                                Label* key_not_smi,
-                                Label* out_of_range);
+                                Register scratch3,
+                                FPURegister double_scratch,
+                                Label* not_int32);
+
+  // Generate non FPU code to check if a double can be exactly represented by a
+  // 32-bit integer. This does not check for 0 or -0, which need
+  // to be checked for separately.
+  // Control jumps to not_int32 if the value is not a 32-bit integer, and falls
+  // through otherwise.
+  // src1 and src2 will be cloberred.
+  //
+  // Expected input:
+  // - src1: higher (exponent) part of the double value.
+  // - src2: lower (mantissa) part of the double value.
+  // Output status:
+  // - dst: 32 higher bits of the mantissa. (mantissa[51:20])
+  // - src2: contains 1.
+  // - other registers are clobbered.
+  static void DoubleIs32BitInteger(MacroAssembler* masm,
+                                   Register src1,
+                                   Register src2,
+                                   Register dst,
+                                   Register scratch,
+                                   Label* not_int32);
+
+  // Generates code to call a C function to do a double operation using core
+  // registers. (Used when FPU is not supported.)
+  // This code never falls through, but returns with a heap number containing
+  // the result in v0.
+  // Register heapnumber_result must be a heap number in which the
+  // result of the operation will be stored.
+  // Requires the following layout on entry:
+  // a0: Left value (least significant part of mantissa).
+  // a1: Left value (sign, exponent, top of mantissa).
+  // a2: Right value (least significant part of mantissa).
+  // a3: Right value (sign, exponent, top of mantissa).
+  static void CallCCodeForDoubleOperation(MacroAssembler* masm,
+                                          Token::Value op,
+                                          Register heap_number_result,
+                                          Register scratch);
+
+ private:
+  static void LoadNumber(MacroAssembler* masm,
+                         FloatingPointHelper::Destination destination,
+                         Register object,
+                         FPURegister dst,
+                         Register dst1,
+                         Register dst2,
+                         Register heap_number_map,
+                         Register scratch1,
+                         Register scratch2,
+                         Label* not_number);
+};
+
+
+class StringDictionaryLookupStub: public CodeStub {
+ public:
+  enum LookupMode { POSITIVE_LOOKUP, NEGATIVE_LOOKUP };
+
+  explicit StringDictionaryLookupStub(LookupMode mode) : mode_(mode) { }
+
+  void Generate(MacroAssembler* masm);
+
+  MUST_USE_RESULT static MaybeObject* GenerateNegativeLookup(
+      MacroAssembler* masm,
+      Label* miss,
+      Label* done,
+      Register receiver,
+      Register properties,
+      String* name,
+      Register scratch0);
+
+  static void GeneratePositiveLookup(MacroAssembler* masm,
+                                     Label* miss,
+                                     Label* done,
+                                     Register elements,
+                                     Register name,
+                                     Register r0,
+                                     Register r1);
+
+ private:
+  static const int kInlinedProbes = 4;
+  static const int kTotalProbes = 20;
+
+  static const int kCapacityOffset =
+      StringDictionary::kHeaderSize +
+      StringDictionary::kCapacityIndex * kPointerSize;
+
+  static const int kElementsStartOffset =
+      StringDictionary::kHeaderSize +
+      StringDictionary::kElementsStartIndex * kPointerSize;
+
+
+#ifdef DEBUG
+  void Print() {
+    PrintF("StringDictionaryLookupStub\n");
+  }
+#endif
+
+  Major MajorKey() { return StringDictionaryNegativeLookup; }
+
+  int MinorKey() {
+    return LookupModeBits::encode(mode_);
+  }
+
+  class LookupModeBits: public BitField<LookupMode, 0, 1> {};
+
+  LookupMode mode_;
+};
 
 
 } }  // namespace v8::internal
diff --git a/src/mips/codegen-mips-inl.h b/src/mips/codegen-mips-inl.h
deleted file mode 100644
index be9ae9e..0000000
--- a/src/mips/codegen-mips-inl.h
+++ /dev/null
@@ -1,64 +0,0 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-//       notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-//       copyright notice, this list of conditions and the following
-//       disclaimer in the documentation and/or other materials provided
-//       with the distribution.
-//     * Neither the name of Google Inc. nor the names of its
-//       contributors may be used to endorse or promote products derived
-//       from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
-#ifndef V8_MIPS_CODEGEN_MIPS_INL_H_
-#define V8_MIPS_CODEGEN_MIPS_INL_H_
-
-#include "virtual-frame-mips.h"
-
-namespace v8 {
-namespace internal {
-
-#define __ ACCESS_MASM(masm_)
-
-// Platform-specific inline functions.
-
-void DeferredCode::Jump() {
-  __ b(&entry_label_);
-  __ nop();
-}
-
-
-// Note: this has been hacked for submisson. Mips branches require two
-//  additional operands: Register src1, const Operand& src2.
-void DeferredCode::Branch(Condition cond) {
-  __ Branch(&entry_label_, cond, zero_reg, Operand(0));
-}
-
-
-void Reference::GetValueAndSpill() {
-  GetValue();
-}
-
-
-#undef __
-
-} }  // namespace v8::internal
-
-#endif  // V8_MIPS_CODEGEN_MIPS_INL_H_
-
diff --git a/src/mips/codegen-mips.cc b/src/mips/codegen-mips.cc
index c1149df..4400b64 100644
--- a/src/mips/codegen-mips.cc
+++ b/src/mips/codegen-mips.cc
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -25,61 +25,18 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-
 #include "v8.h"
 
 #if defined(V8_TARGET_ARCH_MIPS)
 
-#include "bootstrapper.h"
-#include "code-stubs.h"
-#include "codegen-inl.h"
-#include "compiler.h"
-#include "debug.h"
-#include "ic-inl.h"
-#include "jsregexp.h"
-#include "jump-target-inl.h"
-#include "parser.h"
-#include "regexp-macro-assembler.h"
-#include "regexp-stack.h"
-#include "register-allocator-inl.h"
-#include "runtime.h"
-#include "scopes.h"
-#include "stub-cache.h"
-#include "virtual-frame-inl.h"
-#include "virtual-frame-mips-inl.h"
+#include "codegen.h"
 
 namespace v8 {
 namespace internal {
 
-
-#define __ ACCESS_MASM(masm_)
-
-// -------------------------------------------------------------------------
-// Platform-specific DeferredCode functions.
-
-void DeferredCode::SaveRegisters() {
-  // On MIPS you either have a completely spilled frame or you
-  // handle it yourself, but at the moment there's no automation
-  // of registers and deferred code.
-}
-
-
-void DeferredCode::RestoreRegisters() {
-}
-
-
 // -------------------------------------------------------------------------
 // Platform-specific RuntimeCallHelper functions.
 
-void VirtualFrameRuntimeCallHelper::BeforeCall(MacroAssembler* masm) const {
-  frame_state_->frame()->AssertIsSpilled();
-}
-
-
-void VirtualFrameRuntimeCallHelper::AfterCall(MacroAssembler* masm) const {
-}
-
-
 void StubRuntimeCallHelper::BeforeCall(MacroAssembler* masm) const {
   masm->EnterInternalFrame();
 }
@@ -90,1124 +47,6 @@
 }
 
 
-// -----------------------------------------------------------------------------
-// CodeGenState implementation.
-
-CodeGenState::CodeGenState(CodeGenerator* owner)
-    : owner_(owner),
-      previous_(owner->state()) {
-  owner->set_state(this);
-}
-
-
-ConditionCodeGenState::ConditionCodeGenState(CodeGenerator* owner,
-                           JumpTarget* true_target,
-                           JumpTarget* false_target)
-    : CodeGenState(owner),
-      true_target_(true_target),
-      false_target_(false_target) {
-  owner->set_state(this);
-}
-
-
-TypeInfoCodeGenState::TypeInfoCodeGenState(CodeGenerator* owner,
-                                           Slot* slot,
-                                           TypeInfo type_info)
-    : CodeGenState(owner),
-      slot_(slot) {
-  owner->set_state(this);
-  old_type_info_ = owner->set_type_info(slot, type_info);
-}
-
-
-CodeGenState::~CodeGenState() {
-  ASSERT(owner_->state() == this);
-  owner_->set_state(previous_);
-}
-
-
-TypeInfoCodeGenState::~TypeInfoCodeGenState() {
-  owner()->set_type_info(slot_, old_type_info_);
-}
-
-
-// -----------------------------------------------------------------------------
-// CodeGenerator implementation.
-
-CodeGenerator::CodeGenerator(MacroAssembler* masm)
-    : deferred_(8),
-      masm_(masm),
-      info_(NULL),
-      frame_(NULL),
-      allocator_(NULL),
-      cc_reg_(cc_always),
-      state_(NULL),
-      loop_nesting_(0),
-      type_info_(NULL),
-      function_return_(JumpTarget::BIDIRECTIONAL),
-      function_return_is_shadowed_(false) {
-}
-
-
-// Calling conventions:
-// fp: caller's frame pointer
-// sp: stack pointer
-// a1: called JS function
-// cp: callee's context
-
-void CodeGenerator::Generate(CompilationInfo* info) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-int CodeGenerator::NumberOfSlot(Slot* slot) {
-  UNIMPLEMENTED_MIPS();
-  return 0;
-}
-
-
-MemOperand CodeGenerator::SlotOperand(Slot* slot, Register tmp) {
-  UNIMPLEMENTED_MIPS();
-  return MemOperand(zero_reg, 0);
-}
-
-
-MemOperand CodeGenerator::ContextSlotOperandCheckExtensions(
-    Slot* slot,
-    Register tmp,
-    Register tmp2,
-    JumpTarget* slow) {
-  UNIMPLEMENTED_MIPS();
-  return MemOperand(zero_reg, 0);
-}
-
-
-void CodeGenerator::LoadCondition(Expression* x,
-                                  JumpTarget* true_target,
-                                  JumpTarget* false_target,
-                                  bool force_cc) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::Load(Expression* x) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::LoadGlobal() {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::LoadGlobalReceiver(Register scratch) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-ArgumentsAllocationMode CodeGenerator::ArgumentsMode() {
-  UNIMPLEMENTED_MIPS();
-  return EAGER_ARGUMENTS_ALLOCATION;
-}
-
-
-void CodeGenerator::StoreArgumentsObject(bool initial) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::LoadTypeofExpression(Expression* x) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-Reference::Reference(CodeGenerator* cgen,
-                     Expression* expression,
-                     bool persist_after_get)
-    : cgen_(cgen),
-      expression_(expression),
-      type_(ILLEGAL),
-      persist_after_get_(persist_after_get) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-Reference::~Reference() {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::LoadReference(Reference* ref) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::UnloadReference(Reference* ref) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-// ECMA-262, section 9.2, page 30: ToBoolean(). Convert the given
-// register to a boolean in the condition code register. The code
-// may jump to 'false_target' in case the register converts to 'false'.
-void CodeGenerator::ToBoolean(JumpTarget* true_target,
-                              JumpTarget* false_target) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::GenericBinaryOperation(Token::Value op,
-                                           OverwriteMode overwrite_mode,
-                                           GenerateInlineSmi inline_smi,
-                                           int constant_rhs) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-class DeferredInlineSmiOperation: public DeferredCode {
- public:
-  DeferredInlineSmiOperation(Token::Value op,
-                             int value,
-                             bool reversed,
-                             OverwriteMode overwrite_mode,
-                             Register tos)
-      : op_(op),
-        value_(value),
-        reversed_(reversed),
-        overwrite_mode_(overwrite_mode),
-        tos_register_(tos) {
-    set_comment("[ DeferredInlinedSmiOperation");
-  }
-
-  virtual void Generate();
-  // This stub makes explicit calls to SaveRegisters(), RestoreRegisters() and
-  // Exit(). Currently on MIPS SaveRegisters() and RestoreRegisters() are empty
-  // methods, it is the responsibility of the deferred code to save and restore
-  // registers.
-  virtual bool AutoSaveAndRestore() { return false; }
-
-  void JumpToNonSmiInput(Condition cond, Register cmp1, const Operand& cmp2);
-  void JumpToAnswerOutOfRange(Condition cond,
-                              Register cmp1,
-                              const Operand& cmp2);
-
- private:
-  void GenerateNonSmiInput();
-  void GenerateAnswerOutOfRange();
-  void WriteNonSmiAnswer(Register answer,
-                         Register heap_number,
-                         Register scratch);
-
-  Token::Value op_;
-  int value_;
-  bool reversed_;
-  OverwriteMode overwrite_mode_;
-  Register tos_register_;
-  Label non_smi_input_;
-  Label answer_out_of_range_;
-};
-
-
-// For bit operations we try harder and handle the case where the input is not
-// a Smi but a 32bits integer without calling the generic stub.
-void DeferredInlineSmiOperation::JumpToNonSmiInput(Condition cond,
-                                                   Register cmp1,
-                                                   const Operand& cmp2) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-// For bit operations the result is always 32bits so we handle the case where
-// the result does not fit in a Smi without calling the generic stub.
-void DeferredInlineSmiOperation::JumpToAnswerOutOfRange(Condition cond,
-                                                        Register cmp1,
-                                                        const Operand& cmp2) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-// On entry the non-constant side of the binary operation is in tos_register_
-// and the constant smi side is nowhere.  The tos_register_ is not used by the
-// virtual frame.  On exit the answer is in the tos_register_ and the virtual
-// frame is unchanged.
-void DeferredInlineSmiOperation::Generate() {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-// Convert and write the integer answer into heap_number.
-void DeferredInlineSmiOperation::WriteNonSmiAnswer(Register answer,
-                                                   Register heap_number,
-                                                   Register scratch) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void DeferredInlineSmiOperation::GenerateNonSmiInput() {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void DeferredInlineSmiOperation::GenerateAnswerOutOfRange() {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::SmiOperation(Token::Value op,
-                                 Handle<Object> value,
-                                 bool reversed,
-                                 OverwriteMode mode) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-// On MIPS we load registers condReg1 and condReg2 with the values which should
-// be compared. With the CodeGenerator::cc_reg_ condition, functions will be
-// able to evaluate correctly the condition. (eg CodeGenerator::Branch)
-void CodeGenerator::Comparison(Condition cc,
-                               Expression* left,
-                               Expression* right,
-                               bool strict) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::CallWithArguments(ZoneList<Expression*>* args,
-                                      CallFunctionFlags flags,
-                                      int position) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::CallApplyLazy(Expression* applicand,
-                                  Expression* receiver,
-                                  VariableProxy* arguments,
-                                  int position) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::Branch(bool if_true, JumpTarget* target) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::CheckStack() {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::VisitStatements(ZoneList<Statement*>* statements) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::VisitBlock(Block* node) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::VisitDeclaration(Declaration* node) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::VisitExpressionStatement(ExpressionStatement* node) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::VisitEmptyStatement(EmptyStatement* node) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::VisitIfStatement(IfStatement* node) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::VisitContinueStatement(ContinueStatement* node) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::VisitBreakStatement(BreakStatement* node) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::VisitReturnStatement(ReturnStatement* node) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::GenerateReturnSequence() {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::VisitWithEnterStatement(WithEnterStatement* node) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::VisitWithExitStatement(WithExitStatement* node) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::VisitSwitchStatement(SwitchStatement* node) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::VisitDoWhileStatement(DoWhileStatement* node) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::VisitWhileStatement(WhileStatement* node) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::VisitForStatement(ForStatement* node) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::VisitForInStatement(ForInStatement* node) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::VisitTryCatchStatement(TryCatchStatement* node) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::VisitTryFinallyStatement(TryFinallyStatement* node) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::VisitDebuggerStatement(DebuggerStatement* node) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::InstantiateFunction(
-    Handle<SharedFunctionInfo> function_info,
-    bool pretenure) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::VisitFunctionLiteral(FunctionLiteral* node) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::VisitSharedFunctionInfoLiteral(
-    SharedFunctionInfoLiteral* node) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::VisitConditional(Conditional* node) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::LoadFromSlot(Slot* slot, TypeofState typeof_state) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::LoadFromSlotCheckForArguments(Slot* slot,
-                                                  TypeofState state) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::StoreToSlot(Slot* slot, InitState init_state) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::LoadFromGlobalSlotCheckExtensions(Slot* slot,
-                                                      TypeofState typeof_state,
-                                                      JumpTarget* slow) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::EmitDynamicLoadFromSlotFastCase(Slot* slot,
-                                                    TypeofState typeof_state,
-                                                    JumpTarget* slow,
-                                                    JumpTarget* done) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::VisitSlot(Slot* node) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::VisitVariableProxy(VariableProxy* node) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::VisitLiteral(Literal* node) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::VisitRegExpLiteral(RegExpLiteral* node) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::VisitObjectLiteral(ObjectLiteral* node) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::VisitArrayLiteral(ArrayLiteral* node) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::VisitCatchExtensionObject(CatchExtensionObject* node) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::EmitSlotAssignment(Assignment* node) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::EmitNamedPropertyAssignment(Assignment* node) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::EmitKeyedPropertyAssignment(Assignment* node) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::VisitAssignment(Assignment* node) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::VisitThrow(Throw* node) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::VisitProperty(Property* node) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::VisitCall(Call* node) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::VisitCallNew(CallNew* node) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::GenerateClassOf(ZoneList<Expression*>* args) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::GenerateValueOf(ZoneList<Expression*>* args) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::GenerateSetValueOf(ZoneList<Expression*>* args) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::GenerateIsSmi(ZoneList<Expression*>* args) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::GenerateLog(ZoneList<Expression*>* args) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::GenerateIsNonNegativeSmi(ZoneList<Expression*>* args) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::GenerateMathPow(ZoneList<Expression*>* args) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::GenerateMathSqrt(ZoneList<Expression*>* args) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-class DeferredStringCharCodeAt : public DeferredCode {
- public:
-  DeferredStringCharCodeAt(Register object,
-                           Register index,
-                           Register scratch,
-                           Register result)
-      : result_(result),
-        char_code_at_generator_(object,
-                                index,
-                                scratch,
-                                result,
-                                &need_conversion_,
-                                &need_conversion_,
-                                &index_out_of_range_,
-                                STRING_INDEX_IS_NUMBER) {}
-
-  StringCharCodeAtGenerator* fast_case_generator() {
-    return &char_code_at_generator_;
-  }
-
-  virtual void Generate() {
-    UNIMPLEMENTED_MIPS();
-  }
-
- private:
-  Register result_;
-
-  Label need_conversion_;
-  Label index_out_of_range_;
-
-  StringCharCodeAtGenerator char_code_at_generator_;
-};
-
-
-void CodeGenerator::GenerateStringCharCodeAt(ZoneList<Expression*>* args) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-class DeferredStringCharFromCode : public DeferredCode {
- public:
-  DeferredStringCharFromCode(Register code,
-                             Register result)
-      : char_from_code_generator_(code, result) {}
-
-  StringCharFromCodeGenerator* fast_case_generator() {
-    return &char_from_code_generator_;
-  }
-
-  virtual void Generate() {
-    VirtualFrameRuntimeCallHelper call_helper(frame_state());
-    char_from_code_generator_.GenerateSlow(masm(), call_helper);
-  }
-
- private:
-  StringCharFromCodeGenerator char_from_code_generator_;
-};
-
-
-void CodeGenerator::GenerateStringCharFromCode(ZoneList<Expression*>* args) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-class DeferredStringCharAt : public DeferredCode {
- public:
-  DeferredStringCharAt(Register object,
-                       Register index,
-                       Register scratch1,
-                       Register scratch2,
-                       Register result)
-      : result_(result),
-        char_at_generator_(object,
-                           index,
-                           scratch1,
-                           scratch2,
-                           result,
-                           &need_conversion_,
-                           &need_conversion_,
-                           &index_out_of_range_,
-                           STRING_INDEX_IS_NUMBER) {}
-
-  StringCharAtGenerator* fast_case_generator() {
-    return &char_at_generator_;
-  }
-
-  virtual void Generate() {
-  UNIMPLEMENTED_MIPS();
-}
-
- private:
-  Register result_;
-
-  Label need_conversion_;
-  Label index_out_of_range_;
-
-  StringCharAtGenerator char_at_generator_;
-};
-
-
-void CodeGenerator::GenerateStringCharAt(ZoneList<Expression*>* args) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::GenerateIsArray(ZoneList<Expression*>* args) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::GenerateIsRegExp(ZoneList<Expression*>* args) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::GenerateIsObject(ZoneList<Expression*>* args) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::GenerateIsSpecObject(ZoneList<Expression*>* args) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-class DeferredIsStringWrapperSafeForDefaultValueOf : public DeferredCode {
- public:
-  DeferredIsStringWrapperSafeForDefaultValueOf(Register object,
-                                               Register map_result,
-                                               Register scratch1,
-                                               Register scratch2)
-      : object_(object),
-        map_result_(map_result),
-        scratch1_(scratch1),
-        scratch2_(scratch2) { }
-
-  virtual void Generate() {
-    UNIMPLEMENTED_MIPS();
-  }
-
- private:
-  Register object_;
-  Register map_result_;
-  Register scratch1_;
-  Register scratch2_;
-};
-
-
-void CodeGenerator::GenerateIsStringWrapperSafeForDefaultValueOf(
-    ZoneList<Expression*>* args) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::GenerateIsFunction(ZoneList<Expression*>* args) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::GenerateIsUndetectableObject(ZoneList<Expression*>* args) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::GenerateIsConstructCall(ZoneList<Expression*>* args) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::GenerateArgumentsLength(ZoneList<Expression*>* args) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::GenerateArguments(ZoneList<Expression*>* args) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::GenerateRandomHeapNumber(
-    ZoneList<Expression*>* args) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::GenerateStringAdd(ZoneList<Expression*>* args) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::GenerateSubString(ZoneList<Expression*>* args) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::GenerateStringCompare(ZoneList<Expression*>* args) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::GenerateRegExpExec(ZoneList<Expression*>* args) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::GenerateRegExpConstructResult(ZoneList<Expression*>* args) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-class DeferredSearchCache: public DeferredCode {
- public:
-  DeferredSearchCache(Register dst, Register cache, Register key)
-      : dst_(dst), cache_(cache), key_(key) {
-    set_comment("[ DeferredSearchCache");
-  }
-
-  virtual void Generate();
-
- private:
-  Register dst_, cache_, key_;
-};
-
-
-void DeferredSearchCache::Generate() {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::GenerateGetFromCache(ZoneList<Expression*>* args) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::GenerateNumberToString(ZoneList<Expression*>* args) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-class DeferredSwapElements: public DeferredCode {
- public:
-  DeferredSwapElements(Register object, Register index1, Register index2)
-      : object_(object), index1_(index1), index2_(index2) {
-    set_comment("[ DeferredSwapElements");
-  }
-
-  virtual void Generate();
-
- private:
-  Register object_, index1_, index2_;
-};
-
-
-void DeferredSwapElements::Generate() {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::GenerateSwapElements(ZoneList<Expression*>* args) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::GenerateCallFunction(ZoneList<Expression*>* args) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::GenerateMathSin(ZoneList<Expression*>* args) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::GenerateMathCos(ZoneList<Expression*>* args) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::GenerateMathLog(ZoneList<Expression*>* args) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::GenerateObjectEquals(ZoneList<Expression*>* args) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::GenerateIsRegExpEquivalent(ZoneList<Expression*>* args) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::GenerateHasCachedArrayIndex(ZoneList<Expression*>* args) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::GenerateGetCachedArrayIndex(ZoneList<Expression*>* args) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::GenerateFastAsciiArrayJoin(ZoneList<Expression*>* args) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::VisitCallRuntime(CallRuntime* node) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::VisitUnaryOperation(UnaryOperation* node) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-class DeferredCountOperation: public DeferredCode {
- public:
-  DeferredCountOperation(Register value,
-                         bool is_increment,
-                         bool is_postfix,
-                         int target_size)
-      : value_(value),
-        is_increment_(is_increment),
-        is_postfix_(is_postfix),
-        target_size_(target_size) {}
-
-  virtual void Generate() {
-    UNIMPLEMENTED_MIPS();
-  }
-
- private:
-  Register value_;
-  bool is_increment_;
-  bool is_postfix_;
-  int target_size_;
-};
-
-
-void CodeGenerator::VisitCountOperation(CountOperation* node) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::GenerateLogicalBooleanOperation(BinaryOperation* node) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::VisitBinaryOperation(BinaryOperation* node) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::VisitThisFunction(ThisFunction* node) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::VisitCompareOperation(CompareOperation* node) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::VisitCompareToNull(CompareToNull* node) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-class DeferredReferenceGetNamedValue: public DeferredCode {
- public:
-  explicit DeferredReferenceGetNamedValue(Register receiver,
-                                          Handle<String> name,
-                                          bool is_contextual)
-      : receiver_(receiver),
-        name_(name),
-        is_contextual_(is_contextual),
-        is_dont_delete_(false) {
-    set_comment(is_contextual
-                ? "[ DeferredReferenceGetNamedValue (contextual)"
-                : "[ DeferredReferenceGetNamedValue");
-  }
-
-  virtual void Generate();
-
-  void set_is_dont_delete(bool value) {
-    ASSERT(is_contextual_);
-    is_dont_delete_ = value;
-  }
-
- private:
-  Register receiver_;
-  Handle<String> name_;
-  bool is_contextual_;
-  bool is_dont_delete_;
-};
-
-
-
-void DeferredReferenceGetNamedValue::Generate() {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-class DeferredReferenceGetKeyedValue: public DeferredCode {
- public:
-  DeferredReferenceGetKeyedValue(Register key, Register receiver)
-      : key_(key), receiver_(receiver) {
-    set_comment("[ DeferredReferenceGetKeyedValue");
-  }
-
-  virtual void Generate();
-
- private:
-  Register key_;
-  Register receiver_;
-};
-
-
-void DeferredReferenceGetKeyedValue::Generate() {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-class DeferredReferenceSetKeyedValue: public DeferredCode {
- public:
-  DeferredReferenceSetKeyedValue(Register value,
-                                 Register key,
-                                 Register receiver)
-      : value_(value), key_(key), receiver_(receiver) {
-    set_comment("[ DeferredReferenceSetKeyedValue");
-  }
-
-  virtual void Generate();
-
- private:
-  Register value_;
-  Register key_;
-  Register receiver_;
-};
-
-
-void DeferredReferenceSetKeyedValue::Generate() {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-class DeferredReferenceSetNamedValue: public DeferredCode {
- public:
-  DeferredReferenceSetNamedValue(Register value,
-                                 Register receiver,
-                                 Handle<String> name)
-      : value_(value), receiver_(receiver), name_(name) {
-    set_comment("[ DeferredReferenceSetNamedValue");
-  }
-
-  virtual void Generate();
-
- private:
-  Register value_;
-  Register receiver_;
-  Handle<String> name_;
-};
-
-
-void DeferredReferenceSetNamedValue::Generate() {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::EmitNamedLoad(Handle<String> name, bool is_contextual) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::EmitNamedStore(Handle<String> name, bool is_contextual) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::EmitKeyedLoad() {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::EmitKeyedStore(StaticType* key_type,
-                                   WriteBarrierCharacter wb_info) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-#ifdef DEBUG
-bool CodeGenerator::HasValidEntryRegisters() {
-  UNIMPLEMENTED_MIPS();
-  return false;
-}
-#endif
-
-
-#undef __
-#define __ ACCESS_MASM(masm)
-
-// -----------------------------------------------------------------------------
-// Reference support.
-
-
-Handle<String> Reference::GetName() {
-  UNIMPLEMENTED_MIPS();
-  return Handle<String>();
-}
-
-
-void Reference::DupIfPersist() {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void Reference::GetValue() {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void Reference::SetValue(InitState init_state, WriteBarrierCharacter wb_info) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-const char* GenericBinaryOpStub::GetName() {
-  UNIMPLEMENTED_MIPS();
-  return name_;
-}
-
-
-#undef __
-
 } }  // namespace v8::internal
 
 #endif  // V8_TARGET_ARCH_MIPS
diff --git a/src/mips/codegen-mips.h b/src/mips/codegen-mips.h
index 0a2cd45..fecd321 100644
--- a/src/mips/codegen-mips.h
+++ b/src/mips/codegen-mips.h
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -37,204 +37,16 @@
 namespace v8 {
 namespace internal {
 
-#if(defined(__mips_hard_float) && __mips_hard_float != 0)
-// Use floating-point coprocessor instructions. This flag is raised when
-// -mhard-float is passed to the compiler.
-static const bool IsMipsSoftFloatABI = false;
-#elif(defined(__mips_soft_float) && __mips_soft_float != 0)
-// Not using floating-point coprocessor instructions. This flag is raised when
-// -msoft-float is passed to the compiler.
-static const bool IsMipsSoftFloatABI = true;
-#else
-static const bool IsMipsSoftFloatABI = true;
-#endif
-
 // Forward declarations
 class CompilationInfo;
-class DeferredCode;
-class JumpTarget;
-class RegisterAllocator;
-class RegisterFile;
 
-enum InitState { CONST_INIT, NOT_CONST_INIT };
 enum TypeofState { INSIDE_TYPEOF, NOT_INSIDE_TYPEOF };
-enum GenerateInlineSmi { DONT_GENERATE_INLINE_SMI, GENERATE_INLINE_SMI };
-enum WriteBarrierCharacter { UNLIKELY_SMI, LIKELY_SMI, NEVER_NEWSPACE };
-
-
-// -----------------------------------------------------------------------------
-// Reference support
-
-// A reference is a C++ stack-allocated object that keeps an ECMA
-// reference on the execution stack while in scope. For variables
-// the reference is empty, indicating that it isn't necessary to
-// store state on the stack for keeping track of references to those.
-// For properties, we keep either one (named) or two (indexed) values
-// on the execution stack to represent the reference.
-class Reference BASE_EMBEDDED {
- public:
-  // The values of the types is important, see size().
-  enum Type { UNLOADED = -2, ILLEGAL = -1, SLOT = 0, NAMED = 1, KEYED = 2 };
-  Reference(CodeGenerator* cgen,
-            Expression* expression,
-            bool persist_after_get = false);
-  ~Reference();
-
-  Expression* expression() const { return expression_; }
-  Type type() const { return type_; }
-  void set_type(Type value) {
-    ASSERT_EQ(ILLEGAL, type_);
-    type_ = value;
-  }
-
-  void set_unloaded() {
-    ASSERT_NE(ILLEGAL, type_);
-    ASSERT_NE(UNLOADED, type_);
-    type_ = UNLOADED;
-  }
-  // The size the reference takes up on the stack.
-  int size() const {
-    return (type_ < SLOT) ? 0 : type_;
-  }
-
-  bool is_illegal() const { return type_ == ILLEGAL; }
-  bool is_slot() const { return type_ == SLOT; }
-  bool is_property() const { return type_ == NAMED || type_ == KEYED; }
-  bool is_unloaded() const { return type_ == UNLOADED; }
-
-  // Return the name. Only valid for named property references.
-  Handle<String> GetName();
-
-  // Generate code to push the value of the reference on top of the
-  // expression stack.  The reference is expected to be already on top of
-  // the expression stack, and it is consumed by the call unless the
-  // reference is for a compound assignment.
-  // If the reference is not consumed, it is left in place under its value.
-  void GetValue();
-
-  // Generate code to pop a reference, push the value of the reference,
-  // and then spill the stack frame.
-  inline void GetValueAndSpill();
-
-  // Generate code to store the value on top of the expression stack in the
-  // reference.  The reference is expected to be immediately below the value
-  // on the expression stack.  The  value is stored in the location specified
-  // by the reference, and is left on top of the stack, after the reference
-  // is popped from beneath it (unloaded).
-  void SetValue(InitState init_state, WriteBarrierCharacter wb);
-
-  // This is in preparation for something that uses the reference on the stack.
-  // If we need this reference afterwards get then dup it now.  Otherwise mark
-  // it as used.
-  inline void DupIfPersist();
-
- private:
-  CodeGenerator* cgen_;
-  Expression* expression_;
-  Type type_;
-  // Keep the reference on the stack after get, so it can be used by set later.
-  bool persist_after_get_;
-};
-
-
-// -----------------------------------------------------------------------------
-// Code generation state
-
-// The state is passed down the AST by the code generator (and back up, in
-// the form of the state of the label pair).  It is threaded through the
-// call stack.  Constructing a state implicitly pushes it on the owning code
-// generator's stack of states, and destroying one implicitly pops it.
-
-class CodeGenState BASE_EMBEDDED {
- public:
-  // Create an initial code generator state.  Destroying the initial state
-  // leaves the code generator with a NULL state.
-  explicit CodeGenState(CodeGenerator* owner);
-
-
-
-  // Destroy a code generator state and restore the owning code generator's
-  // previous state.
-  virtual ~CodeGenState();
-
-  virtual JumpTarget* true_target() const { return NULL; }
-  virtual JumpTarget* false_target() const { return NULL; }
-
- protected:
-  inline CodeGenerator* owner() { return owner_; }
-  inline CodeGenState* previous() const { return previous_; }
-
- private:
-  // The owning code generator.
-  CodeGenerator* owner_;
-
-
-
-  // The previous state of the owning code generator, restored when
-  // this state is destroyed.
-  CodeGenState* previous_;
-};
-
-
-class ConditionCodeGenState : public CodeGenState {
- public:
-  // Create a code generator state based on a code generator's current
-  // state.  The new state has its own pair of branch labels.
-  ConditionCodeGenState(CodeGenerator* owner,
-                        JumpTarget* true_target,
-                        JumpTarget* false_target);
-
-  virtual JumpTarget* true_target() const { return true_target_; }
-  virtual JumpTarget* false_target() const { return false_target_; }
-
- private:
-  JumpTarget* true_target_;
-  JumpTarget* false_target_;
-};
-
-
-class TypeInfoCodeGenState : public CodeGenState {
- public:
-  TypeInfoCodeGenState(CodeGenerator* owner,
-                       Slot* slot_number,
-                       TypeInfo info);
-  virtual ~TypeInfoCodeGenState();
-
-  virtual JumpTarget* true_target() const { return previous()->true_target(); }
-  virtual JumpTarget* false_target() const {
-    return previous()->false_target();
-  }
-
- private:
-  Slot* slot_;
-  TypeInfo old_type_info_;
-};
-
 
 // -------------------------------------------------------------------------
-// Arguments allocation mode
-
-enum ArgumentsAllocationMode {
-  NO_ARGUMENTS_ALLOCATION,
-  EAGER_ARGUMENTS_ALLOCATION,
-  LAZY_ARGUMENTS_ALLOCATION
-};
-
-
-// -----------------------------------------------------------------------------
 // CodeGenerator
 
 class CodeGenerator: public AstVisitor {
  public:
-  // Compilation mode.  Either the compiler is used as the primary
-  // compiler and needs to setup everything or the compiler is used as
-  // the secondary compiler for split compilation and has to handle
-  // bailouts.
-  enum Mode {
-    PRIMARY,
-    SECONDARY
-  };
-
   static bool MakeCode(CompilationInfo* info);
 
   // Printing of AST, etc. as requested by flags.
@@ -261,50 +73,14 @@
                               int pos,
                               bool right_here = false);
 
-  // Accessors
-  MacroAssembler* masm() { return masm_; }
-  VirtualFrame* frame() const { return frame_; }
-  inline Handle<Script> script();
-
-  bool has_valid_frame() const { return frame_ != NULL; }
-
-  // Set the virtual frame to be new_frame, with non-frame register
-  // reference counts given by non_frame_registers.  The non-frame
-  // register reference counts of the old frame are returned in
-  // non_frame_registers.
-  void SetFrame(VirtualFrame* new_frame, RegisterFile* non_frame_registers);
-
-  void DeleteFrame();
-
-  RegisterAllocator* allocator() const { return allocator_; }
-
-  CodeGenState* state() { return state_; }
-  void set_state(CodeGenState* state) { state_ = state; }
-
-  TypeInfo type_info(Slot* slot) {
-    int index = NumberOfSlot(slot);
-    if (index == kInvalidSlotNumber) return TypeInfo::Unknown();
-    return (*type_info_)[index];
-  }
-
-  TypeInfo set_type_info(Slot* slot, TypeInfo info) {
-    int index = NumberOfSlot(slot);
-    ASSERT(index >= kInvalidSlotNumber);
-    if (index != kInvalidSlotNumber) {
-      TypeInfo previous_value = (*type_info_)[index];
-      (*type_info_)[index] = info;
-      return previous_value;
-    }
-    return TypeInfo::Unknown();
-  }
-  void AddDeferred(DeferredCode* code) { deferred_.Add(code); }
-
   // Constants related to patching of inlined load/store.
   static int GetInlinedKeyedLoadInstructionsAfterPatch() {
     // This is in correlation with the padding in MacroAssembler::Abort.
     return FLAG_debug_code ? 45 : 20;
   }
-  static const int kInlinedKeyedStoreInstructionsAfterPatch = 9;
+
+  static const int kInlinedKeyedStoreInstructionsAfterPatch = 13;
+
   static int GetInlinedNamedStoreInstructionsAfterPatch() {
     ASSERT(Isolate::Current()->inlined_write_barrier_size() != -1);
     // Magic number 5: instruction count after patched map load:
@@ -313,317 +89,6 @@
   }
 
  private:
-  // Type of a member function that generates inline code for a native function.
-  typedef void (CodeGenerator::*InlineFunctionGenerator)
-      (ZoneList<Expression*>*);
-
-  static const InlineFunctionGenerator kInlineFunctionGenerators[];
-
-
-  // Construction/Destruction.
-  explicit CodeGenerator(MacroAssembler* masm);
-
-  // Accessors.
-  inline bool is_eval();
-  inline Scope* scope();
-  inline bool is_strict_mode();
-  inline StrictModeFlag strict_mode_flag();
-
-  // Generating deferred code.
-  void ProcessDeferred();
-
-  static const int kInvalidSlotNumber = -1;
-
-  int NumberOfSlot(Slot* slot);
-  // State
-  bool has_cc() const { return cc_reg_ != cc_always; }
-
-  JumpTarget* true_target() const { return state_->true_target(); }
-  JumpTarget* false_target() const { return state_->false_target(); }
-
-  // Track loop nesting level.
-  int loop_nesting() const { return loop_nesting_; }
-  void IncrementLoopNesting() { loop_nesting_++; }
-  void DecrementLoopNesting() { loop_nesting_--; }
-
-  // Node visitors.
-  void VisitStatements(ZoneList<Statement*>* statements);
-
-  virtual void VisitSlot(Slot* node);
-#define DEF_VISIT(type) \
-  virtual void Visit##type(type* node);
-  AST_NODE_LIST(DEF_VISIT)
-#undef DEF_VISIT
-
-  // Main code generation function
-  void Generate(CompilationInfo* info);
-
-  // Generate the return sequence code.  Should be called no more than
-  // once per compiled function, immediately after binding the return
-  // target (which can not be done more than once).  The return value should
-  // be in v0.
-  void GenerateReturnSequence();
-
-  // Returns the arguments allocation mode.
-  ArgumentsAllocationMode ArgumentsMode();
-
-  // Store the arguments object and allocate it if necessary.
-  void StoreArgumentsObject(bool initial);
-
-  // The following are used by class Reference.
-  void LoadReference(Reference* ref);
-  void UnloadReference(Reference* ref);
-
-  MemOperand SlotOperand(Slot* slot, Register tmp);
-
-  MemOperand ContextSlotOperandCheckExtensions(Slot* slot,
-                                               Register tmp,
-                                               Register tmp2,
-                                               JumpTarget* slow);
-
-  void LoadCondition(Expression* x,
-                     JumpTarget* true_target,
-                     JumpTarget* false_target,
-                     bool force_cc);
-  void Load(Expression* x);
-  void LoadGlobal();
-  void LoadGlobalReceiver(Register scratch);
-
-
-  // Special code for typeof expressions: Unfortunately, we must
-  // be careful when loading the expression in 'typeof'
-  // expressions. We are not allowed to throw reference errors for
-  // non-existing properties of the global object, so we must make it
-  // look like an explicit property access, instead of an access
-  // through the context chain.
-  void LoadTypeofExpression(Expression* x);
-
-  // Store a keyed property. Key and receiver are on the stack and the value is
-  // in a0. Result is returned in r0.
-  void EmitKeyedStore(StaticType* key_type, WriteBarrierCharacter wb_info);
-
-  // Read a value from a slot and leave it on top of the expression stack.
-  void LoadFromSlot(Slot* slot, TypeofState typeof_state);
-  void LoadFromGlobalSlotCheckExtensions(Slot* slot,
-                                         TypeofState typeof_state,
-                                         JumpTarget* slow);
-  void LoadFromSlotCheckForArguments(Slot* slot, TypeofState state);
-
-  // Support for loading from local/global variables and arguments
-  // whose location is known unless they are shadowed by
-  // eval-introduced bindings. Generates no code for unsupported slot
-  // types and therefore expects to fall through to the slow jump target.
-  void EmitDynamicLoadFromSlotFastCase(Slot* slot,
-                                       TypeofState typeof_state,
-                                       JumpTarget* slow,
-                                       JumpTarget* done);
-
-  // Store the value on top of the stack to a slot.
-  void StoreToSlot(Slot* slot, InitState init_state);
-
-  // Support for compiling assignment expressions.
-  void EmitSlotAssignment(Assignment* node);
-  void EmitNamedPropertyAssignment(Assignment* node);
-  void EmitKeyedPropertyAssignment(Assignment* node);
-
-  // Load a named property, returning it in v0. The receiver is passed on the
-  // stack, and remains there.
-  void EmitNamedLoad(Handle<String> name, bool is_contextual);
-
-  // Store to a named property. If the store is contextual, value is passed on
-  // the frame and consumed. Otherwise, receiver and value are passed on the
-  // frame and consumed. The result is returned in v0.
-  void EmitNamedStore(Handle<String> name, bool is_contextual);
-
-  // Load a keyed property, leaving it in v0. The receiver and key are
-  // passed on the stack, and remain there.
-  void EmitKeyedLoad();
-
-  void ToBoolean(JumpTarget* true_target, JumpTarget* false_target);
-
-  // Generate code that computes a shortcutting logical operation.
-  void GenerateLogicalBooleanOperation(BinaryOperation* node);
-
-  void GenericBinaryOperation(Token::Value op,
-                              OverwriteMode overwrite_mode,
-                              GenerateInlineSmi inline_smi,
-                              int known_rhs =
-                                GenericBinaryOpStub::kUnknownIntValue);
-
-  void VirtualFrameBinaryOperation(Token::Value op,
-                                   OverwriteMode overwrite_mode,
-                                   int known_rhs =
-                                      GenericBinaryOpStub::kUnknownIntValue);
-
-  void SmiOperation(Token::Value op,
-                    Handle<Object> value,
-                    bool reversed,
-                    OverwriteMode mode);
-
-  void Comparison(Condition cc,
-                  Expression* left,
-                  Expression* right,
-                  bool strict = false);
-
-  void CallWithArguments(ZoneList<Expression*>* arguments,
-                         CallFunctionFlags flags,
-                         int position);
-
-  // An optimized implementation of expressions of the form
-  // x.apply(y, arguments).  We call x the applicand and y the receiver.
-  // The optimization avoids allocating an arguments object if possible.
-  void CallApplyLazy(Expression* applicand,
-                     Expression* receiver,
-                     VariableProxy* arguments,
-                     int position);
-
-  // Control flow
-  void Branch(bool if_true, JumpTarget* target);
-  void CheckStack();
-
-  bool CheckForInlineRuntimeCall(CallRuntime* node);
-
-  static Handle<Code> ComputeLazyCompile(int argc);
-  void ProcessDeclarations(ZoneList<Declaration*>* declarations);
-
-  // Declare global variables and functions in the given array of
-  // name/value pairs.
-  void DeclareGlobals(Handle<FixedArray> pairs);
-
-  // Instantiate the function based on the shared function info.
-  void InstantiateFunction(Handle<SharedFunctionInfo> function_info,
-                           bool pretenure);
-
-  // Support for type checks.
-  void GenerateIsSmi(ZoneList<Expression*>* args);
-  void GenerateIsNonNegativeSmi(ZoneList<Expression*>* args);
-  void GenerateIsArray(ZoneList<Expression*>* args);
-  void GenerateIsRegExp(ZoneList<Expression*>* args);
-
-  // Support for construct call checks.
-  void GenerateIsConstructCall(ZoneList<Expression*>* args);
-
-  // Support for arguments.length and arguments[?].
-  void GenerateArgumentsLength(ZoneList<Expression*>* args);
-  void GenerateArguments(ZoneList<Expression*>* args);
-
-  // Support for accessing the class and value fields of an object.
-  void GenerateClassOf(ZoneList<Expression*>* args);
-  void GenerateValueOf(ZoneList<Expression*>* args);
-  void GenerateSetValueOf(ZoneList<Expression*>* args);
-
-  // Fast support for charCodeAt(n).
-  void GenerateStringCharCodeAt(ZoneList<Expression*>* args);
-
-  // Fast support for string.charAt(n) and string[n].
-  void GenerateStringCharFromCode(ZoneList<Expression*>* args);
-
-  // Fast support for string.charAt(n) and string[n].
-  void GenerateStringCharAt(ZoneList<Expression*>* args);
-
-  // Fast support for object equality testing.
-  void GenerateObjectEquals(ZoneList<Expression*>* args);
-
-  void GenerateLog(ZoneList<Expression*>* args);
-
-  // Fast support for Math.random().
-  void GenerateRandomHeapNumber(ZoneList<Expression*>* args);
-
-  void GenerateIsObject(ZoneList<Expression*>* args);
-  void GenerateIsSpecObject(ZoneList<Expression*>* args);
-  void GenerateIsFunction(ZoneList<Expression*>* args);
-  void GenerateIsUndetectableObject(ZoneList<Expression*>* args);
-  void GenerateStringAdd(ZoneList<Expression*>* args);
-  void GenerateSubString(ZoneList<Expression*>* args);
-  void GenerateStringCompare(ZoneList<Expression*>* args);
-  void GenerateIsStringWrapperSafeForDefaultValueOf(
-      ZoneList<Expression*>* args);
-
-  // Support for direct calls from JavaScript to native RegExp code.
-  void GenerateRegExpExec(ZoneList<Expression*>* args);
-
-  void GenerateRegExpConstructResult(ZoneList<Expression*>* args);
-
-  // Support for fast native caches.
-  void GenerateGetFromCache(ZoneList<Expression*>* args);
-
-  // Fast support for number to string.
-  void GenerateNumberToString(ZoneList<Expression*>* args);
-
-  // Fast swapping of elements.
-  void GenerateSwapElements(ZoneList<Expression*>* args);
-
-  // Fast call for custom callbacks.
-  void GenerateCallFunction(ZoneList<Expression*>* args);
-
-  // Fast call to math functions.
-  void GenerateMathPow(ZoneList<Expression*>* args);
-  void GenerateMathSin(ZoneList<Expression*>* args);
-  void GenerateMathCos(ZoneList<Expression*>* args);
-  void GenerateMathSqrt(ZoneList<Expression*>* args);
-  void GenerateMathLog(ZoneList<Expression*>* args);
-
-  void GenerateIsRegExpEquivalent(ZoneList<Expression*>* args);
-
-  void GenerateHasCachedArrayIndex(ZoneList<Expression*>* args);
-  void GenerateGetCachedArrayIndex(ZoneList<Expression*>* args);
-  void GenerateFastAsciiArrayJoin(ZoneList<Expression*>* args);
-
-  // Simple condition analysis.
-  enum ConditionAnalysis {
-    ALWAYS_TRUE,
-    ALWAYS_FALSE,
-    DONT_KNOW
-  };
-  ConditionAnalysis AnalyzeCondition(Expression* cond);
-
-  // Methods used to indicate which source code is generated for. Source
-  // positions are collected by the assembler and emitted with the relocation
-  // information.
-  void CodeForFunctionPosition(FunctionLiteral* fun);
-  void CodeForReturnPosition(FunctionLiteral* fun);
-  void CodeForStatementPosition(Statement* node);
-  void CodeForDoWhileConditionPosition(DoWhileStatement* stmt);
-  void CodeForSourcePosition(int pos);
-
-#ifdef DEBUG
-  // True if the registers are valid for entry to a block.
-  bool HasValidEntryRegisters();
-#endif
-
-  List<DeferredCode*> deferred_;
-
-  // Assembler
-  MacroAssembler* masm_;  // to generate code
-
-  CompilationInfo* info_;
-
-  // Code generation state
-  VirtualFrame* frame_;
-  RegisterAllocator* allocator_;
-  Condition cc_reg_;
-  CodeGenState* state_;
-  int loop_nesting_;
-
-  Vector<TypeInfo>* type_info_;
-  // Jump targets
-  BreakTarget function_return_;
-
-  // True if the function return is shadowed (ie, jumping to the target
-  // function_return_ does not jump to the true function return, but rather
-  // to some unlinking code).
-  bool function_return_is_shadowed_;
-
-  friend class VirtualFrame;
-  friend class Isolate;
-  friend class JumpTarget;
-  friend class Reference;
-  friend class FastCodeGenerator;
-  friend class FullCodeGenerator;
-  friend class FullCodeGenSyntaxChecker;
-  friend class InlineRuntimeFunctionsTable;
-  friend class LCodeGen;
-
   DISALLOW_COPY_AND_ASSIGN(CodeGenerator);
 };
 
diff --git a/src/mips/constants-mips.cc b/src/mips/constants-mips.cc
index 16e49c9..96a2333 100644
--- a/src/mips/constants-mips.cc
+++ b/src/mips/constants-mips.cc
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -36,7 +36,7 @@
 
 
 // -----------------------------------------------------------------------------
-// Registers
+// Registers.
 
 
 // These register names are defined in a way to match the native disassembler
@@ -145,7 +145,7 @@
 
 
 // -----------------------------------------------------------------------------
-// Instruction
+// Instructions.
 
 bool Instruction::IsForbiddenInBranchDelay() const {
   const int op = OpcodeFieldRaw();
@@ -293,15 +293,15 @@
           UNREACHABLE();
       };
       break;
-    case COP1:    // Coprocessor instructions
+    case COP1:    // Coprocessor instructions.
       switch (RsFieldRawNoAssert()) {
-        case BC1:   // branch on coprocessor condition
+        case BC1:   // Branch on coprocessor condition.
           return kImmediateType;
         default:
           return kRegisterType;
       };
       break;
-    // 16 bits Immediate type instructions. eg: addi dest, src, imm16
+    // 16 bits Immediate type instructions. eg: addi dest, src, imm16.
     case REGIMM:
     case BEQ:
     case BNE:
@@ -336,7 +336,7 @@
     case SWC1:
     case SDC1:
       return kImmediateType;
-    // 26 bits immediate type instructions. eg: j imm26
+    // 26 bits immediate type instructions. eg: j imm26.
     case J:
     case JAL:
       return kJumpType;
diff --git a/src/mips/constants-mips.h b/src/mips/constants-mips.h
index b20e9a2..2567330 100644
--- a/src/mips/constants-mips.h
+++ b/src/mips/constants-mips.h
@@ -47,6 +47,19 @@
 #endif
 
 
+#if(defined(__mips_hard_float) && __mips_hard_float != 0)
+// Use floating-point coprocessor instructions. This flag is raised when
+// -mhard-float is passed to the compiler.
+static const bool IsMipsSoftFloatABI = false;
+#elif(defined(__mips_soft_float) && __mips_soft_float != 0)
+// Not using floating-point coprocessor instructions. This flag is raised when
+// -msoft-float is passed to the compiler.
+static const bool IsMipsSoftFloatABI = true;
+#else
+static const bool IsMipsSoftFloatABI = true;
+#endif
+
+
 // Defines constants and accessor classes to assemble, disassemble and
 // simulate MIPS32 instructions.
 //
@@ -58,7 +71,7 @@
 namespace internal {
 
 // -----------------------------------------------------------------------------
-// Registers and FPURegister.
+// Registers and FPURegisters.
 
 // Number of general purpose registers.
 static const int kNumRegisters = 32;
@@ -82,6 +95,11 @@
 // FCSR constants.
 static const uint32_t kFCSRFlagMask = (1 << 6) - 1;
 static const uint32_t kFCSRFlagShift = 2;
+static const uint32_t kFCSRInexactFlagBit = 1 << 0;
+static const uint32_t kFCSRUnderflowFlagBit = 1 << 1;
+static const uint32_t kFCSROverflowFlagBit = 1 << 2;
+static const uint32_t kFCSRDivideByZeroFlagBit = 1 << 3;
+static const uint32_t kFCSRInvalidOpFlagBit = 1 << 4;
 
 // Helper functions for converting between register numbers and names.
 class Registers {
@@ -133,8 +151,6 @@
 // On MIPS all instructions are 32 bits.
 typedef int32_t Instr;
 
-typedef unsigned char byte_;
-
 // Special Software Interrupt codes when used in the presence of the MIPS
 // simulator.
 enum SoftwareInterruptCodes {
@@ -175,7 +191,7 @@
 static const int kFBtrueShift   = 16;
 static const int kFBtrueBits    = 1;
 
-// ----- Miscellianous useful masks.
+// ----- Miscellaneous useful masks.
 // Instruction bit masks.
 static const int  kOpcodeMask   = ((1 << kOpcodeBits) - 1) << kOpcodeShift;
 static const int  kImm16Mask    = ((1 << kImm16Bits) - 1) << kImm16Shift;
@@ -215,7 +231,7 @@
   XORI      =   ((1 << 3) + 6) << kOpcodeShift,
   LUI       =   ((1 << 3) + 7) << kOpcodeShift,
 
-  COP1      =   ((2 << 3) + 1) << kOpcodeShift,  // Coprocessor 1 class
+  COP1      =   ((2 << 3) + 1) << kOpcodeShift,  // Coprocessor 1 class.
   BEQL      =   ((2 << 3) + 4) << kOpcodeShift,
   BNEL      =   ((2 << 3) + 5) << kOpcodeShift,
   BLEZL     =   ((2 << 3) + 6) << kOpcodeShift,
@@ -393,7 +409,7 @@
 
   cc_always     = 16,
 
-  // aliases
+  // Aliases.
   carry         = Uless,
   not_carry     = Ugreater_equal,
   zero          = equal,
@@ -455,14 +471,14 @@
 
 // ----- Coprocessor conditions.
 enum FPUCondition {
-  F,    // False
-  UN,   // Unordered
-  EQ,   // Equal
-  UEQ,  // Unordered or Equal
-  OLT,  // Ordered or Less Than
-  ULT,  // Unordered or Less Than
-  OLE,  // Ordered or Less Than or Equal
-  ULE   // Unordered or Less Than or Equal
+  F,    // False.
+  UN,   // Unordered.
+  EQ,   // Equal.
+  UEQ,  // Unordered or Equal.
+  OLT,  // Ordered or Less Than.
+  ULT,  // Unordered or Less Than.
+  OLE,  // Ordered or Less Than or Equal.
+  ULE   // Unordered or Less Than or Equal.
 };
 
 
@@ -494,7 +510,7 @@
 extern const Instr kPushInstruction;
 // sw(r, MemOperand(sp, 0))
 extern const Instr kPushRegPattern;
-//  lw(r, MemOperand(sp, 0))
+// lw(r, MemOperand(sp, 0))
 extern const Instr kPopRegPattern;
 extern const Instr kLwRegFpOffsetPattern;
 extern const Instr kSwRegFpOffsetPattern;
@@ -687,7 +703,7 @@
   // reference to an instruction is to convert a pointer. There is no way
   // to allocate or create instances of class Instruction.
   // Use the At(pc) function to create references to Instruction.
-  static Instruction* At(byte_* pc) {
+  static Instruction* At(byte* pc) {
     return reinterpret_cast<Instruction*>(pc);
   }
 
diff --git a/src/mips/cpu-mips.cc b/src/mips/cpu-mips.cc
index 36f577b..26e95fb 100644
--- a/src/mips/cpu-mips.cc
+++ b/src/mips/cpu-mips.cc
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -48,19 +48,25 @@
 
 
 void CPU::Setup() {
-  CpuFeatures* cpu_features = Isolate::Current()->cpu_features();
-  cpu_features->Probe(true);
-  if (!cpu_features->IsSupported(FPU) || Serializer::enabled()) {
-    V8::DisableCrankshaft();
-  }
+  CpuFeatures::Probe();
+}
+
+
+bool CPU::SupportsCrankshaft() {
+  return CpuFeatures::IsSupported(FPU);
 }
 
 
 void CPU::FlushICache(void* start, size_t size) {
+  // Nothing to do, flushing no instructions.
+  if (size == 0) {
+    return;
+  }
+
 #if !defined (USE_SIMULATOR)
   int res;
 
-  // See http://www.linux-mips.org/wiki/Cacheflush_Syscall
+  // See http://www.linux-mips.org/wiki/Cacheflush_Syscall.
   res = syscall(__NR_cacheflush, start, size, ICACHE);
 
   if (res) {
diff --git a/src/mips/debug-mips.cc b/src/mips/debug-mips.cc
index 35df69b..e323c50 100644
--- a/src/mips/debug-mips.cc
+++ b/src/mips/debug-mips.cc
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -31,7 +31,7 @@
 
 #if defined(V8_TARGET_ARCH_MIPS)
 
-#include "codegen-inl.h"
+#include "codegen.h"
 #include "debug.h"
 
 namespace v8 {
@@ -40,106 +40,259 @@
 #ifdef ENABLE_DEBUGGER_SUPPORT
 
 bool BreakLocationIterator::IsDebugBreakAtReturn() {
-  UNIMPLEMENTED_MIPS();
-  return false;
+  return Debug::IsDebugBreakAtReturn(rinfo());
 }
 
 
 void BreakLocationIterator::SetDebugBreakAtReturn() {
-  UNIMPLEMENTED_MIPS();
+  // Mips return sequence:
+  // mov sp, fp
+  // lw fp, sp(0)
+  // lw ra, sp(4)
+  // addiu sp, sp, 8
+  // addiu sp, sp, N
+  // jr ra
+  // nop (in branch delay slot)
+
+  // Make sure this constant matches the number if instrucntions we emit.
+  ASSERT(Assembler::kJSReturnSequenceInstructions == 7);
+  CodePatcher patcher(rinfo()->pc(), Assembler::kJSReturnSequenceInstructions);
+  // li and Call pseudo-instructions emit two instructions each.
+  patcher.masm()->li(v8::internal::t9,
+      Operand(reinterpret_cast<int32_t>(
+          Isolate::Current()->debug()->debug_break_return()->entry())));
+  patcher.masm()->Call(v8::internal::t9);
+  patcher.masm()->nop();
+  patcher.masm()->nop();
+  patcher.masm()->nop();
+
+  // TODO(mips): Open issue about using breakpoint instruction instead of nops.
+  // patcher.masm()->bkpt(0);
 }
 
 
 // Restore the JS frame exit code.
 void BreakLocationIterator::ClearDebugBreakAtReturn() {
-  UNIMPLEMENTED_MIPS();
+  rinfo()->PatchCode(original_rinfo()->pc(),
+                     Assembler::kJSReturnSequenceInstructions);
 }
 
 
 // A debug break in the exit code is identified by the JS frame exit code
-// having been patched with li/call psuedo-instrunction (liu/ori/jalr)
+// having been patched with li/call psuedo-instrunction (liu/ori/jalr).
 bool Debug::IsDebugBreakAtReturn(RelocInfo* rinfo) {
-  UNIMPLEMENTED_MIPS();
-  return false;
+  ASSERT(RelocInfo::IsJSReturn(rinfo->rmode()));
+  return rinfo->IsPatchedReturnSequence();
 }
 
 
 bool BreakLocationIterator::IsDebugBreakAtSlot() {
-  UNIMPLEMENTED_MIPS();
-  return false;
+  ASSERT(IsDebugBreakSlot());
+  // Check whether the debug break slot instructions have been patched.
+  return rinfo()->IsPatchedDebugBreakSlotSequence();
 }
 
 
 void BreakLocationIterator::SetDebugBreakAtSlot() {
-  UNIMPLEMENTED_MIPS();
+  ASSERT(IsDebugBreakSlot());
+  // Patch the code changing the debug break slot code from:
+  //   nop(DEBUG_BREAK_NOP) - nop(1) is sll(zero_reg, zero_reg, 1)
+  //   nop(DEBUG_BREAK_NOP)
+  //   nop(DEBUG_BREAK_NOP)
+  //   nop(DEBUG_BREAK_NOP)
+  // to a call to the debug break slot code.
+  //   li t9, address   (lui t9 / ori t9 instruction pair)
+  //   call t9          (jalr t9 / nop instruction pair)
+  CodePatcher patcher(rinfo()->pc(), Assembler::kDebugBreakSlotInstructions);
+  patcher.masm()->li(v8::internal::t9, Operand(reinterpret_cast<int32_t>(
+      Isolate::Current()->debug()->debug_break_slot()->entry())));
+  patcher.masm()->Call(v8::internal::t9);
 }
 
 
 void BreakLocationIterator::ClearDebugBreakAtSlot() {
-  UNIMPLEMENTED_MIPS();
+  ASSERT(IsDebugBreakSlot());
+  rinfo()->PatchCode(original_rinfo()->pc(),
+                     Assembler::kDebugBreakSlotInstructions);
 }
 
 
 #define __ ACCESS_MASM(masm)
 
 
+
+static void Generate_DebugBreakCallHelper(MacroAssembler* masm,
+                                          RegList object_regs,
+                                          RegList non_object_regs) {
+  __ EnterInternalFrame();
+
+  // Store the registers containing live values on the expression stack to
+  // make sure that these are correctly updated during GC. Non object values
+  // are stored as a smi causing it to be untouched by GC.
+  ASSERT((object_regs & ~kJSCallerSaved) == 0);
+  ASSERT((non_object_regs & ~kJSCallerSaved) == 0);
+  ASSERT((object_regs & non_object_regs) == 0);
+  if ((object_regs | non_object_regs) != 0) {
+    for (int i = 0; i < kNumJSCallerSaved; i++) {
+      int r = JSCallerSavedCode(i);
+      Register reg = { r };
+      if ((non_object_regs & (1 << r)) != 0) {
+        if (FLAG_debug_code) {
+          __ And(at, reg, 0xc0000000);
+          __ Assert(eq, "Unable to encode value as smi", at, Operand(zero_reg));
+        }
+        __ sll(reg, reg, kSmiTagSize);
+      }
+    }
+    __ MultiPush(object_regs | non_object_regs);
+  }
+
+#ifdef DEBUG
+  __ RecordComment("// Calling from debug break to runtime - come in - over");
+#endif
+  __ mov(a0, zero_reg);  // No arguments.
+  __ li(a1, Operand(ExternalReference::debug_break(masm->isolate())));
+
+  CEntryStub ceb(1);
+  __ CallStub(&ceb);
+
+  // Restore the register values from the expression stack.
+  if ((object_regs | non_object_regs) != 0) {
+    __ MultiPop(object_regs | non_object_regs);
+    for (int i = 0; i < kNumJSCallerSaved; i++) {
+      int r = JSCallerSavedCode(i);
+      Register reg = { r };
+      if ((non_object_regs & (1 << r)) != 0) {
+        __ srl(reg, reg, kSmiTagSize);
+      }
+      if (FLAG_debug_code &&
+          (((object_regs |non_object_regs) & (1 << r)) == 0)) {
+        __ li(reg, kDebugZapValue);
+      }
+    }
+  }
+
+  __ LeaveInternalFrame();
+
+  // Now that the break point has been handled, resume normal execution by
+  // jumping to the target address intended by the caller and that was
+  // overwritten by the address of DebugBreakXXX.
+  __ li(t9, Operand(
+      ExternalReference(Debug_Address::AfterBreakTarget(), masm->isolate())));
+  __ lw(t9, MemOperand(t9));
+  __ Jump(t9);
+}
+
+
 void Debug::GenerateLoadICDebugBreak(MacroAssembler* masm) {
-  UNIMPLEMENTED_MIPS();
+  // Calling convention for IC load (from ic-mips.cc).
+  // ----------- S t a t e -------------
+  //  -- a2    : name
+  //  -- ra    : return address
+  //  -- a0    : receiver
+  //  -- [sp]  : receiver
+  // -----------------------------------
+  // Registers a0 and a2 contain objects that need to be pushed on the
+  // expression stack of the fake JS frame.
+  Generate_DebugBreakCallHelper(masm, a0.bit() | a2.bit(), 0);
 }
 
 
 void Debug::GenerateStoreICDebugBreak(MacroAssembler* masm) {
-  UNIMPLEMENTED_MIPS();
+  // Calling convention for IC store (from ic-mips.cc).
+  // ----------- S t a t e -------------
+  //  -- a0    : value
+  //  -- a1    : receiver
+  //  -- a2    : name
+  //  -- ra    : return address
+  // -----------------------------------
+  // Registers a0, a1, and a2 contain objects that need to be pushed on the
+  // expression stack of the fake JS frame.
+  Generate_DebugBreakCallHelper(masm, a0.bit() | a1.bit() | a2.bit(), 0);
 }
 
 
 void Debug::GenerateKeyedLoadICDebugBreak(MacroAssembler* masm) {
-  UNIMPLEMENTED_MIPS();
+  // ---------- S t a t e --------------
+  //  -- ra  : return address
+  //  -- a0  : key
+  //  -- a1  : receiver
+  Generate_DebugBreakCallHelper(masm, a0.bit() | a1.bit(), 0);
 }
 
 
 void Debug::GenerateKeyedStoreICDebugBreak(MacroAssembler* masm) {
-  UNIMPLEMENTED_MIPS();
+  // ---------- S t a t e --------------
+  //  -- a0     : value
+  //  -- a1     : key
+  //  -- a2     : receiver
+  //  -- ra     : return address
+  Generate_DebugBreakCallHelper(masm, a0.bit() | a1.bit() | a2.bit(), 0);
 }
 
 
 void Debug::GenerateCallICDebugBreak(MacroAssembler* masm) {
-  UNIMPLEMENTED_MIPS();
+  // Calling convention for IC call (from ic-mips.cc).
+  // ----------- S t a t e -------------
+  //  -- a2: name
+  // -----------------------------------
+  Generate_DebugBreakCallHelper(masm, a2.bit(), 0);
 }
 
 
 void Debug::GenerateConstructCallDebugBreak(MacroAssembler* masm) {
-  UNIMPLEMENTED_MIPS();
+  // Calling convention for construct call (from builtins-mips.cc).
+  //  -- a0     : number of arguments (not smi)
+  //  -- a1     : constructor function
+  Generate_DebugBreakCallHelper(masm, a1.bit(), a0.bit());
 }
 
 
 void Debug::GenerateReturnDebugBreak(MacroAssembler* masm) {
-  UNIMPLEMENTED_MIPS();
+  // In places other than IC call sites it is expected that v0 is TOS which
+  // is an object - this is not generally the case so this should be used with
+  // care.
+  Generate_DebugBreakCallHelper(masm, v0.bit(), 0);
 }
 
 
 void Debug::GenerateStubNoRegistersDebugBreak(MacroAssembler* masm) {
-  UNIMPLEMENTED_MIPS();
+  // ----------- S t a t e -------------
+  //  No registers used on entry.
+  // -----------------------------------
+  Generate_DebugBreakCallHelper(masm, 0, 0);
 }
 
 
 void Debug::GenerateSlot(MacroAssembler* masm) {
-  UNIMPLEMENTED_MIPS();
+  // Generate enough nop's to make space for a call instruction. Avoid emitting
+  // the trampoline pool in the debug break slot code.
+  Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm);
+  Label check_codesize;
+  __ bind(&check_codesize);
+  __ RecordDebugBreakSlot();
+  for (int i = 0; i < Assembler::kDebugBreakSlotInstructions; i++) {
+    __ nop(MacroAssembler::DEBUG_BREAK_NOP);
+  }
+  ASSERT_EQ(Assembler::kDebugBreakSlotInstructions,
+            masm->InstructionsGeneratedSince(&check_codesize));
 }
 
 
 void Debug::GenerateSlotDebugBreak(MacroAssembler* masm) {
-  UNIMPLEMENTED_MIPS();
+  // In the places where a debug break slot is inserted no registers can contain
+  // object pointers.
+  Generate_DebugBreakCallHelper(masm, 0, 0);
 }
 
 
 void Debug::GeneratePlainReturnLiveEdit(MacroAssembler* masm) {
-  UNIMPLEMENTED_MIPS();
+  masm->Abort("LiveEdit frame dropping is not supported on mips");
 }
 
 
 void Debug::GenerateFrameDropperLiveEdit(MacroAssembler* masm) {
-  UNIMPLEMENTED_MIPS();
+  masm->Abort("LiveEdit frame dropping is not supported on mips");
 }
 
 
diff --git a/src/mips/disasm-mips.cc b/src/mips/disasm-mips.cc
index b7ceb2b..7df5c41 100644
--- a/src/mips/disasm-mips.cc
+++ b/src/mips/disasm-mips.cc
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -33,7 +33,7 @@
 //
 //   NameConverter converter;
 //   Disassembler d(converter);
-//   for (byte_* pc = begin; pc < end;) {
+//   for (byte* pc = begin; pc < end;) {
 //     v8::internal::EmbeddedVector<char, 256> buffer;
 //     byte* prev_pc = pc;
 //     pc += d.InstructionDecode(buffer, pc);
@@ -85,7 +85,7 @@
 
   // Writes one disassembled instruction into 'buffer' (0-terminated).
   // Returns the length of the disassembled machine instruction in bytes.
-  int InstructionDecode(byte_* instruction);
+  int InstructionDecode(byte* instruction);
 
  private:
   // Bottleneck functions to print into the out_buffer.
@@ -103,6 +103,8 @@
   void PrintFd(Instruction* instr);
   void PrintSa(Instruction* instr);
   void PrintSd(Instruction* instr);
+  void PrintSs1(Instruction* instr);
+  void PrintSs2(Instruction* instr);
   void PrintBc(Instruction* instr);
   void PrintCc(Instruction* instr);
   void PrintFunction(Instruction* instr);
@@ -212,13 +214,29 @@
 }
 
 
-// Print the integer value of the rd field, (when it is not used as reg).
+// Print the integer value of the rd field, when it is not used as reg.
 void Decoder::PrintSd(Instruction* instr) {
   int sd = instr->RdValue();
   out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_, "%d", sd);
 }
 
 
+// Print the integer value of the rd field, when used as 'ext' size.
+void Decoder::PrintSs1(Instruction* instr) {
+  int ss = instr->RdValue();
+  out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_, "%d", ss + 1);
+}
+
+
+// Print the integer value of the rd field, when used as 'ins' size.
+void Decoder::PrintSs2(Instruction* instr) {
+  int ss = instr->RdValue();
+  int pos = instr->SaValue();
+  out_buffer_pos_ +=
+      OS::SNPrintF(out_buffer_ + out_buffer_pos_, "%d", ss - pos + 1);
+}
+
+
 // Print the integer value of the cc field for the bc1t/f instructions.
 void Decoder::PrintBc(Instruction* instr) {
   int cc = instr->FBccValue();
@@ -242,7 +260,7 @@
 
 // Print 16-bit signed immediate value.
 void Decoder::PrintSImm16(Instruction* instr) {
-  int32_t imm = ((instr->Imm16Value())<<16)>>16;
+  int32_t imm = ((instr->Imm16Value()) << 16) >> 16;
   out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_, "%d", imm);
 }
 
@@ -298,15 +316,15 @@
 // complexity of FormatOption.
 int Decoder::FormatRegister(Instruction* instr, const char* format) {
   ASSERT(format[0] == 'r');
-  if (format[1] == 's') {  // 'rs: Rs register
+  if (format[1] == 's') {  // 'rs: Rs register.
     int reg = instr->RsValue();
     PrintRegister(reg);
     return 2;
-  } else if (format[1] == 't') {  // 'rt: rt register
+  } else if (format[1] == 't') {  // 'rt: rt register.
     int reg = instr->RtValue();
     PrintRegister(reg);
     return 2;
-  } else if (format[1] == 'd') {  // 'rd: rd register
+  } else if (format[1] == 'd') {  // 'rd: rd register.
     int reg = instr->RdValue();
     PrintRegister(reg);
     return 2;
@@ -320,15 +338,15 @@
 // complexity of FormatOption.
 int Decoder::FormatFPURegister(Instruction* instr, const char* format) {
   ASSERT(format[0] == 'f');
-  if (format[1] == 's') {  // 'fs: fs register
+  if (format[1] == 's') {  // 'fs: fs register.
     int reg = instr->FsValue();
     PrintFPURegister(reg);
     return 2;
-  } else if (format[1] == 't') {  // 'ft: ft register
+  } else if (format[1] == 't') {  // 'ft: ft register.
     int reg = instr->FtValue();
     PrintFPURegister(reg);
     return 2;
-  } else if (format[1] == 'd') {  // 'fd: fd register
+  } else if (format[1] == 'd') {  // 'fd: fd register.
     int reg = instr->FdValue();
     PrintFPURegister(reg);
     return 2;
@@ -345,12 +363,12 @@
 // characters that were consumed from the formatting string.
 int Decoder::FormatOption(Instruction* instr, const char* format) {
   switch (format[0]) {
-    case 'c': {   // 'code for break or trap instructions
+    case 'c': {   // 'code for break or trap instructions.
       ASSERT(STRING_STARTS_WITH(format, "code"));
       PrintCode(instr);
       return 4;
     }
-    case 'i': {   // 'imm16u or 'imm26
+    case 'i': {   // 'imm16u or 'imm26.
       if (format[3] == '1') {
         ASSERT(STRING_STARTS_WITH(format, "imm16"));
         if (format[5] == 's') {
@@ -370,13 +388,13 @@
         return 5;
       }
     }
-    case 'r': {   // 'r: registers
+    case 'r': {   // 'r: registers.
       return FormatRegister(instr, format);
     }
-    case 'f': {   // 'f: FPUregisters
+    case 'f': {   // 'f: FPUregisters.
       return FormatFPURegister(instr, format);
     }
-    case 's': {   // 'sa
+    case 's': {   // 'sa.
       switch (format[1]) {
         case 'a': {
           ASSERT(STRING_STARTS_WITH(format, "sa"));
@@ -388,6 +406,17 @@
           PrintSd(instr);
           return 2;
         }
+        case 's': {
+          if (format[2] == '1') {
+              ASSERT(STRING_STARTS_WITH(format, "ss1"));  /* ext size */
+              PrintSs1(instr);
+              return 3;
+          } else {
+              ASSERT(STRING_STARTS_WITH(format, "ss2"));  /* ins size */
+              PrintSs2(instr);
+              return 3;
+          }
+        }
       }
     }
     case 'b': {   // 'bc - Special for bc1 cc field.
@@ -432,29 +461,29 @@
 
 void Decoder::DecodeTypeRegister(Instruction* instr) {
   switch (instr->OpcodeFieldRaw()) {
-    case COP1:    // Coprocessor instructions
+    case COP1:    // Coprocessor instructions.
       switch (instr->RsFieldRaw()) {
         case BC1:   // bc1 handled in DecodeTypeImmediate.
           UNREACHABLE();
           break;
         case MFC1:
-          Format(instr, "mfc1   'rt, 'fs");
+          Format(instr, "mfc1    'rt, 'fs");
           break;
         case MFHC1:
-          Format(instr, "mfhc1  'rt, 'fs");
+          Format(instr, "mfhc1   'rt, 'fs");
           break;
         case MTC1:
-          Format(instr, "mtc1   'rt, 'fs");
+          Format(instr, "mtc1    'rt, 'fs");
           break;
         // These are called "fs" too, although they are not FPU registers.
         case CTC1:
-          Format(instr, "ctc1   'rt, 'fs");
+          Format(instr, "ctc1    'rt, 'fs");
           break;
         case CFC1:
-          Format(instr, "cfc1   'rt, 'fs");
+          Format(instr, "cfc1    'rt, 'fs");
           break;
         case MTHC1:
-          Format(instr, "mthc1  'rt, 'fs");
+          Format(instr, "mthc1   'rt, 'fs");
           break;
         case D:
           switch (instr->FunctionFieldRaw()) {
@@ -480,7 +509,7 @@
               Format(instr, "neg.d   'fd, 'fs");
               break;
             case SQRT_D:
-              Format(instr, "sqrt.d   'fd, 'fs");
+              Format(instr, "sqrt.d  'fd, 'fs");
               break;
             case CVT_W_D:
               Format(instr, "cvt.w.d 'fd, 'fs");
@@ -592,134 +621,134 @@
     case SPECIAL:
       switch (instr->FunctionFieldRaw()) {
         case JR:
-          Format(instr, "jr   'rs");
+          Format(instr, "jr      'rs");
           break;
         case JALR:
-          Format(instr, "jalr 'rs");
+          Format(instr, "jalr    'rs");
           break;
         case SLL:
           if ( 0x0 == static_cast<int>(instr->InstructionBits()))
             Format(instr, "nop");
           else
-            Format(instr, "sll  'rd, 'rt, 'sa");
+            Format(instr, "sll     'rd, 'rt, 'sa");
           break;
         case SRL:
           if (instr->RsValue() == 0) {
-            Format(instr, "srl  'rd, 'rt, 'sa");
+            Format(instr, "srl     'rd, 'rt, 'sa");
           } else {
             if (mips32r2) {
-              Format(instr, "rotr  'rd, 'rt, 'sa");
+              Format(instr, "rotr    'rd, 'rt, 'sa");
             } else {
               Unknown(instr);
             }
           }
           break;
         case SRA:
-          Format(instr, "sra  'rd, 'rt, 'sa");
+          Format(instr, "sra     'rd, 'rt, 'sa");
           break;
         case SLLV:
-          Format(instr, "sllv 'rd, 'rt, 'rs");
+          Format(instr, "sllv    'rd, 'rt, 'rs");
           break;
         case SRLV:
           if (instr->SaValue() == 0) {
-            Format(instr, "srlv 'rd, 'rt, 'rs");
+            Format(instr, "srlv    'rd, 'rt, 'rs");
           } else {
             if (mips32r2) {
-              Format(instr, "rotrv 'rd, 'rt, 'rs");
+              Format(instr, "rotrv   'rd, 'rt, 'rs");
             } else {
               Unknown(instr);
             }
           }
           break;
         case SRAV:
-          Format(instr, "srav 'rd, 'rt, 'rs");
+          Format(instr, "srav    'rd, 'rt, 'rs");
           break;
         case MFHI:
-          Format(instr, "mfhi 'rd");
+          Format(instr, "mfhi    'rd");
           break;
         case MFLO:
-          Format(instr, "mflo 'rd");
+          Format(instr, "mflo    'rd");
           break;
         case MULT:
-          Format(instr, "mult 'rs, 'rt");
+          Format(instr, "mult    'rs, 'rt");
           break;
         case MULTU:
-          Format(instr, "multu  'rs, 'rt");
+          Format(instr, "multu   'rs, 'rt");
           break;
         case DIV:
-          Format(instr, "div  'rs, 'rt");
+          Format(instr, "div     'rs, 'rt");
           break;
         case DIVU:
-          Format(instr, "divu 'rs, 'rt");
+          Format(instr, "divu    'rs, 'rt");
           break;
         case ADD:
-          Format(instr, "add  'rd, 'rs, 'rt");
+          Format(instr, "add     'rd, 'rs, 'rt");
           break;
         case ADDU:
-          Format(instr, "addu 'rd, 'rs, 'rt");
+          Format(instr, "addu    'rd, 'rs, 'rt");
           break;
         case SUB:
-          Format(instr, "sub  'rd, 'rs, 'rt");
+          Format(instr, "sub     'rd, 'rs, 'rt");
           break;
         case SUBU:
-          Format(instr, "sub  'rd, 'rs, 'rt");
+          Format(instr, "subu    'rd, 'rs, 'rt");
           break;
         case AND:
-          Format(instr, "and  'rd, 'rs, 'rt");
+          Format(instr, "and     'rd, 'rs, 'rt");
           break;
         case OR:
           if (0 == instr->RsValue()) {
-            Format(instr, "mov  'rd, 'rt");
+            Format(instr, "mov     'rd, 'rt");
           } else if (0 == instr->RtValue()) {
-            Format(instr, "mov  'rd, 'rs");
+            Format(instr, "mov     'rd, 'rs");
           } else {
-            Format(instr, "or   'rd, 'rs, 'rt");
+            Format(instr, "or      'rd, 'rs, 'rt");
           }
           break;
         case XOR:
-          Format(instr, "xor  'rd, 'rs, 'rt");
+          Format(instr, "xor     'rd, 'rs, 'rt");
           break;
         case NOR:
-          Format(instr, "nor  'rd, 'rs, 'rt");
+          Format(instr, "nor     'rd, 'rs, 'rt");
           break;
         case SLT:
-          Format(instr, "slt  'rd, 'rs, 'rt");
+          Format(instr, "slt     'rd, 'rs, 'rt");
           break;
         case SLTU:
-          Format(instr, "sltu 'rd, 'rs, 'rt");
+          Format(instr, "sltu    'rd, 'rs, 'rt");
           break;
         case BREAK:
           Format(instr, "break, code: 'code");
           break;
         case TGE:
-          Format(instr, "tge  'rs, 'rt, code: 'code");
+          Format(instr, "tge     'rs, 'rt, code: 'code");
           break;
         case TGEU:
-          Format(instr, "tgeu 'rs, 'rt, code: 'code");
+          Format(instr, "tgeu    'rs, 'rt, code: 'code");
           break;
         case TLT:
-          Format(instr, "tlt  'rs, 'rt, code: 'code");
+          Format(instr, "tlt     'rs, 'rt, code: 'code");
           break;
         case TLTU:
-          Format(instr, "tltu 'rs, 'rt, code: 'code");
+          Format(instr, "tltu    'rs, 'rt, code: 'code");
           break;
         case TEQ:
-          Format(instr, "teq  'rs, 'rt, code: 'code");
+          Format(instr, "teq     'rs, 'rt, code: 'code");
           break;
         case TNE:
-          Format(instr, "tne  'rs, 'rt, code: 'code");
+          Format(instr, "tne     'rs, 'rt, code: 'code");
           break;
         case MOVZ:
-          Format(instr, "movz 'rd, 'rs, 'rt");
+          Format(instr, "movz    'rd, 'rs, 'rt");
           break;
         case MOVN:
-          Format(instr, "movn 'rd, 'rs, 'rt");
+          Format(instr, "movn    'rd, 'rs, 'rt");
           break;
         case MOVCI:
           if (instr->Bit(16)) {
-            Format(instr, "movt 'rd, 'rs, 'Cc");
+            Format(instr, "movt    'rd, 'rs, 'bc");
           } else {
-            Format(instr, "movf 'rd, 'rs, 'Cc");
+            Format(instr, "movf    'rd, 'rs, 'bc");
           }
           break;
         default:
@@ -729,10 +758,10 @@
     case SPECIAL2:
       switch (instr->FunctionFieldRaw()) {
         case MUL:
-          Format(instr, "mul  'rd, 'rs, 'rt");
+          Format(instr, "mul     'rd, 'rs, 'rt");
           break;
         case CLZ:
-          Format(instr, "clz  'rd, 'rs");
+          Format(instr, "clz     'rd, 'rs");
           break;
         default:
           UNREACHABLE();
@@ -742,7 +771,7 @@
       switch (instr->FunctionFieldRaw()) {
         case INS: {
           if (mips32r2) {
-            Format(instr, "ins  'rt, 'rs, 'sd, 'sa");
+            Format(instr, "ins     'rt, 'rs, 'sa, 'ss2");
           } else {
             Unknown(instr);
           }
@@ -750,7 +779,7 @@
         }
         case EXT: {
           if (mips32r2) {
-            Format(instr, "ext  'rt, 'rs, 'sd, 'sa");
+            Format(instr, "ext     'rt, 'rs, 'sa, 'ss1");
           } else {
             Unknown(instr);
           }
@@ -785,16 +814,16 @@
     case REGIMM:
       switch (instr->RtFieldRaw()) {
         case BLTZ:
-          Format(instr, "bltz 'rs, 'imm16u");
+          Format(instr, "bltz    'rs, 'imm16u");
           break;
         case BLTZAL:
-          Format(instr, "bltzal 'rs, 'imm16u");
+          Format(instr, "bltzal  'rs, 'imm16u");
           break;
         case BGEZ:
-          Format(instr, "bgez 'rs, 'imm16u");
+          Format(instr, "bgez    'rs, 'imm16u");
           break;
         case BGEZAL:
-          Format(instr, "bgezal 'rs, 'imm16u");
+          Format(instr, "bgezal  'rs, 'imm16u");
           break;
         default:
           UNREACHABLE();
@@ -802,90 +831,90 @@
     break;  // Case REGIMM.
     // ------------- Branch instructions.
     case BEQ:
-      Format(instr, "beq  'rs, 'rt, 'imm16u");
+      Format(instr, "beq     'rs, 'rt, 'imm16u");
       break;
     case BNE:
-      Format(instr, "bne  'rs, 'rt, 'imm16u");
+      Format(instr, "bne     'rs, 'rt, 'imm16u");
       break;
     case BLEZ:
-      Format(instr, "blez 'rs, 'imm16u");
+      Format(instr, "blez    'rs, 'imm16u");
       break;
     case BGTZ:
-      Format(instr, "bgtz 'rs, 'imm16u");
+      Format(instr, "bgtz    'rs, 'imm16u");
       break;
     // ------------- Arithmetic instructions.
     case ADDI:
-      Format(instr, "addi   'rt, 'rs, 'imm16s");
+      Format(instr, "addi    'rt, 'rs, 'imm16s");
       break;
     case ADDIU:
-      Format(instr, "addiu  'rt, 'rs, 'imm16s");
+      Format(instr, "addiu   'rt, 'rs, 'imm16s");
       break;
     case SLTI:
-      Format(instr, "slti   'rt, 'rs, 'imm16s");
+      Format(instr, "slti    'rt, 'rs, 'imm16s");
       break;
     case SLTIU:
-      Format(instr, "sltiu  'rt, 'rs, 'imm16u");
+      Format(instr, "sltiu   'rt, 'rs, 'imm16u");
       break;
     case ANDI:
-      Format(instr, "andi   'rt, 'rs, 'imm16x");
+      Format(instr, "andi    'rt, 'rs, 'imm16x");
       break;
     case ORI:
-      Format(instr, "ori    'rt, 'rs, 'imm16x");
+      Format(instr, "ori     'rt, 'rs, 'imm16x");
       break;
     case XORI:
-      Format(instr, "xori   'rt, 'rs, 'imm16x");
+      Format(instr, "xori    'rt, 'rs, 'imm16x");
       break;
     case LUI:
-      Format(instr, "lui    'rt, 'imm16x");
+      Format(instr, "lui     'rt, 'imm16x");
       break;
     // ------------- Memory instructions.
     case LB:
-      Format(instr, "lb     'rt, 'imm16s('rs)");
+      Format(instr, "lb      'rt, 'imm16s('rs)");
       break;
     case LH:
-      Format(instr, "lh     'rt, 'imm16s('rs)");
+      Format(instr, "lh      'rt, 'imm16s('rs)");
       break;
     case LWL:
-      Format(instr, "lwl    'rt, 'imm16s('rs)");
+      Format(instr, "lwl     'rt, 'imm16s('rs)");
       break;
     case LW:
-      Format(instr, "lw     'rt, 'imm16s('rs)");
+      Format(instr, "lw      'rt, 'imm16s('rs)");
       break;
     case LBU:
-      Format(instr, "lbu    'rt, 'imm16s('rs)");
+      Format(instr, "lbu     'rt, 'imm16s('rs)");
       break;
     case LHU:
-      Format(instr, "lhu    'rt, 'imm16s('rs)");
+      Format(instr, "lhu     'rt, 'imm16s('rs)");
       break;
     case LWR:
-      Format(instr, "lwr    'rt, 'imm16s('rs)");
+      Format(instr, "lwr     'rt, 'imm16s('rs)");
       break;
     case SB:
-      Format(instr, "sb     'rt, 'imm16s('rs)");
+      Format(instr, "sb      'rt, 'imm16s('rs)");
       break;
     case SH:
-      Format(instr, "sh     'rt, 'imm16s('rs)");
+      Format(instr, "sh      'rt, 'imm16s('rs)");
       break;
     case SWL:
-      Format(instr, "swl    'rt, 'imm16s('rs)");
+      Format(instr, "swl     'rt, 'imm16s('rs)");
       break;
     case SW:
-      Format(instr, "sw     'rt, 'imm16s('rs)");
+      Format(instr, "sw      'rt, 'imm16s('rs)");
       break;
     case SWR:
-      Format(instr, "swr    'rt, 'imm16s('rs)");
+      Format(instr, "swr     'rt, 'imm16s('rs)");
       break;
     case LWC1:
-      Format(instr, "lwc1   'ft, 'imm16s('rs)");
+      Format(instr, "lwc1    'ft, 'imm16s('rs)");
       break;
     case LDC1:
-      Format(instr, "ldc1   'ft, 'imm16s('rs)");
+      Format(instr, "ldc1    'ft, 'imm16s('rs)");
       break;
     case SWC1:
-      Format(instr, "swc1   'ft, 'imm16s('rs)");
+      Format(instr, "swc1    'ft, 'imm16s('rs)");
       break;
     case SDC1:
-      Format(instr, "sdc1   'ft, 'imm16s('rs)");
+      Format(instr, "sdc1    'ft, 'imm16s('rs)");
       break;
     default:
       UNREACHABLE();
@@ -897,10 +926,10 @@
 void Decoder::DecodeTypeJump(Instruction* instr) {
   switch (instr->OpcodeFieldRaw()) {
     case J:
-      Format(instr, "j    'imm26");
+      Format(instr, "j       'imm26");
       break;
     case JAL:
-      Format(instr, "jal  'imm26");
+      Format(instr, "jal     'imm26");
       break;
     default:
       UNREACHABLE();
@@ -909,7 +938,7 @@
 
 
 // Disassemble the instruction at *instr_ptr into the output buffer.
-int Decoder::InstructionDecode(byte_* instr_ptr) {
+int Decoder::InstructionDecode(byte* instr_ptr) {
   Instruction* instr = Instruction::At(instr_ptr);
   // Print raw instruction bytes.
   out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_,
@@ -944,15 +973,13 @@
 
 namespace disasm {
 
-using v8::internal::byte_;
-
-const char* NameConverter::NameOfAddress(byte_* addr) const {
+const char* NameConverter::NameOfAddress(byte* addr) const {
   v8::internal::OS::SNPrintF(tmp_buffer_, "%p", addr);
   return tmp_buffer_.start();
 }
 
 
-const char* NameConverter::NameOfConstant(byte_* addr) const {
+const char* NameConverter::NameOfConstant(byte* addr) const {
   return NameOfAddress(addr);
 }
 
@@ -968,12 +995,12 @@
 
 
 const char* NameConverter::NameOfByteCPURegister(int reg) const {
-  UNREACHABLE();  // MIPS does not have the concept of a byte register
+  UNREACHABLE();  // MIPS does not have the concept of a byte register.
   return "nobytereg";
 }
 
 
-const char* NameConverter::NameInCode(byte_* addr) const {
+const char* NameConverter::NameInCode(byte* addr) const {
   // The default name converter is called for unknown code. So we will not try
   // to access any memory.
   return "";
@@ -990,25 +1017,25 @@
 
 
 int Disassembler::InstructionDecode(v8::internal::Vector<char> buffer,
-                                    byte_* instruction) {
+                                    byte* instruction) {
   v8::internal::Decoder d(converter_, buffer);
   return d.InstructionDecode(instruction);
 }
 
 
 // The MIPS assembler does not currently use constant pools.
-int Disassembler::ConstantPoolSizeAt(byte_* instruction) {
+int Disassembler::ConstantPoolSizeAt(byte* instruction) {
   return -1;
 }
 
 
-void Disassembler::Disassemble(FILE* f, byte_* begin, byte_* end) {
+void Disassembler::Disassemble(FILE* f, byte* begin, byte* end) {
   NameConverter converter;
   Disassembler d(converter);
-  for (byte_* pc = begin; pc < end;) {
+  for (byte* pc = begin; pc < end;) {
     v8::internal::EmbeddedVector<char, 128> buffer;
     buffer[0] = '\0';
-    byte_* prev_pc = pc;
+    byte* prev_pc = pc;
     pc += d.InstructionDecode(buffer, pc);
     fprintf(f, "%p    %08x      %s\n",
             prev_pc, *reinterpret_cast<int32_t*>(prev_pc), buffer.start());
diff --git a/src/mips/frames-mips.cc b/src/mips/frames-mips.cc
index e2e0c91..faaa0e0 100644
--- a/src/mips/frames-mips.cc
+++ b/src/mips/frames-mips.cc
@@ -38,8 +38,7 @@
 
 
 Address ExitFrame::ComputeStackPointer(Address fp) {
-  UNIMPLEMENTED_MIPS();
-  return fp;
+  return Memory::Address_at(fp + ExitFrameConstants::kSPOffset);
 }
 
 
diff --git a/src/mips/frames-mips.h b/src/mips/frames-mips.h
index f507590..2e720fb 100644
--- a/src/mips/frames-mips.h
+++ b/src/mips/frames-mips.h
@@ -59,7 +59,7 @@
   // Saved temporaries.
   1 << 16 | 1 << 17 | 1 << 18 | 1 << 19 |
   1 << 20 | 1 << 21 | 1 << 22 | 1 << 23 |
-  // gp, sp, fp
+  // gp, sp, fp.
   1 << 28 | 1 << 29 | 1 << 30;
 
 static const int kNumCalleeSaved = 11;
@@ -79,6 +79,43 @@
 
 typedef Object* JSCallerSavedBuffer[kNumJSCallerSaved];
 
+static const int kUndefIndex = -1;
+// Map with indexes on stack that corresponds to codes of saved registers.
+static const int kSafepointRegisterStackIndexMap[kNumRegs] = {
+  kUndefIndex,
+  kUndefIndex,
+  0,  // v0
+  kUndefIndex,
+  1,  // a0
+  2,  // a1
+  3,  // a2
+  4,  // a3
+  kUndefIndex,
+  kUndefIndex,
+  kUndefIndex,
+  kUndefIndex,
+  kUndefIndex,
+  kUndefIndex,
+  kUndefIndex,
+  kUndefIndex,
+  5,  // Saved temporaries.
+  6,
+  7,
+  8,
+  9,
+  10,
+  11,
+  12,
+  kUndefIndex,
+  kUndefIndex,
+  kUndefIndex,
+  kUndefIndex,
+  13,  // gp
+  14,  // sp
+  15,  // fp
+  kUndefIndex
+};
+
 
 // ----------------------------------------------------
 
@@ -101,22 +138,24 @@
 
 class ExitFrameConstants : public AllStatic {
  public:
-  static const int kDebugMarkOffset = -1 * kPointerSize;
-  // Must be the same as kDebugMarkOffset. Alias introduced when upgrading.
-  static const int kCodeOffset = -1 * kPointerSize;
-  static const int kSPOffset = -1 * kPointerSize;
+  // See some explanation in MacroAssembler::EnterExitFrame.
+  // This marks the top of the extra allocated stack space.
+  static const int kStackSpaceOffset = -3 * kPointerSize;
 
-  // TODO(mips): Use a patched sp value on the stack instead.
-  // A marker of 0 indicates that double registers are saved.
-  static const int kMarkerOffset = -2 * kPointerSize;
+  static const int kCodeOffset = -2 * kPointerSize;
+
+  static const int kSPOffset = -1 * kPointerSize;
 
   // The caller fields are below the frame pointer on the stack.
   static const int kCallerFPOffset = +0 * kPointerSize;
   // The calling JS function is between FP and PC.
   static const int kCallerPCOffset = +1 * kPointerSize;
 
+  // MIPS-specific: a pointer to the old sp to avoid unnecessary calculations.
+  static const int kCallerSPOffset = +2 * kPointerSize;
+
   // FP-relative displacement of the caller's SP.
-  static const int kCallerSPDisplacement = +3 * kPointerSize;
+  static const int kCallerSPDisplacement = +2 * kPointerSize;
 };
 
 
@@ -135,7 +174,8 @@
   static const int kRegularArgsSlotsSize = kRArgsSlotsSize;
 
   // C/C++ argument slots size.
-  static const int kCArgsSlotsSize = 4 * kPointerSize;
+  static const int kCArgSlotCount = 4;
+  static const int kCArgsSlotsSize = kCArgSlotCount * kPointerSize;
   // JS argument slots size.
   static const int kJSArgsSlotsSize = 0 * kPointerSize;
   // Assembly builtins argument slots size.
diff --git a/src/mips/full-codegen-mips.cc b/src/mips/full-codegen-mips.cc
index 87507ff..9c93c63 100644
--- a/src/mips/full-codegen-mips.cc
+++ b/src/mips/full-codegen-mips.cc
@@ -38,7 +38,7 @@
 // next call: mov(a0, v0). This is not needed on the other architectures.
 
 #include "code-stubs.h"
-#include "codegen-inl.h"
+#include "codegen.h"
 #include "compiler.h"
 #include "debug.h"
 #include "full-codegen.h"
@@ -53,6 +53,73 @@
 
 #define __ ACCESS_MASM(masm_)
 
+
+static unsigned GetPropertyId(Property* property) {
+  if (property->is_synthetic()) return AstNode::kNoNumber;
+  return property->id();
+}
+
+
+// A patch site is a location in the code which it is possible to patch. This
+// class has a number of methods to emit the code which is patchable and the
+// method EmitPatchInfo to record a marker back to the patchable code. This
+// marker is a andi at, rx, #yyy instruction, and x * 0x0000ffff + yyy (raw 16
+// bit immediate value is used) is the delta from the pc to the first
+// instruction of the patchable code.
+class JumpPatchSite BASE_EMBEDDED {
+ public:
+  explicit JumpPatchSite(MacroAssembler* masm) : masm_(masm) {
+#ifdef DEBUG
+    info_emitted_ = false;
+#endif
+  }
+
+  ~JumpPatchSite() {
+    ASSERT(patch_site_.is_bound() == info_emitted_);
+  }
+
+  // When initially emitting this ensure that a jump is always generated to skip
+  // the inlined smi code.
+  void EmitJumpIfNotSmi(Register reg, Label* target) {
+    ASSERT(!patch_site_.is_bound() && !info_emitted_);
+    Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_);
+    __ bind(&patch_site_);
+    __ andi(at, reg, 0);
+    // Always taken before patched.
+    __ Branch(target, eq, at, Operand(zero_reg));
+  }
+
+  // When initially emitting this ensure that a jump is never generated to skip
+  // the inlined smi code.
+  void EmitJumpIfSmi(Register reg, Label* target) {
+    Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_);
+    ASSERT(!patch_site_.is_bound() && !info_emitted_);
+    __ bind(&patch_site_);
+    __ andi(at, reg, 0);
+    // Never taken before patched.
+    __ Branch(target, ne, at, Operand(zero_reg));
+  }
+
+  void EmitPatchInfo() {
+    int delta_to_patch_site = masm_->InstructionsGeneratedSince(&patch_site_);
+    Register reg = Register::from_code(delta_to_patch_site / kImm16Mask);
+    __ andi(at, reg, delta_to_patch_site % kImm16Mask);
+#ifdef DEBUG
+    info_emitted_ = true;
+#endif
+  }
+
+  bool is_bound() const { return patch_site_.is_bound(); }
+
+ private:
+  MacroAssembler* masm_;
+  Label patch_site_;
+#ifdef DEBUG
+  bool info_emitted_;
+#endif
+};
+
+
 // Generate code for a JS function.  On entry to the function the receiver
 // and arguments have been pushed on the stack left to right.  The actual
 // argument count matches the formal parameter count expected by the
@@ -68,189 +135,510 @@
 // The function builds a JS frame.  Please see JavaScriptFrameConstants in
 // frames-mips.h for its layout.
 void FullCodeGenerator::Generate(CompilationInfo* info) {
-  UNIMPLEMENTED_MIPS();
+  ASSERT(info_ == NULL);
+  info_ = info;
+  SetFunctionPosition(function());
+  Comment cmnt(masm_, "[ function compiled by full code generator");
+
+#ifdef DEBUG
+  if (strlen(FLAG_stop_at) > 0 &&
+      info->function()->name()->IsEqualTo(CStrVector(FLAG_stop_at))) {
+    __ stop("stop-at");
+  }
+#endif
+
+  // Strict mode functions need to replace the receiver with undefined
+  // when called as functions (without an explicit receiver
+  // object). t1 is zero for method calls and non-zero for function
+  // calls.
+  if (info->is_strict_mode()) {
+    Label ok;
+    __ Branch(&ok, eq, t1, Operand(zero_reg));
+    int receiver_offset = scope()->num_parameters() * kPointerSize;
+    __ LoadRoot(a2, Heap::kUndefinedValueRootIndex);
+    __ sw(a2, MemOperand(sp, receiver_offset));
+    __ bind(&ok);
+  }
+
+  int locals_count = scope()->num_stack_slots();
+
+  __ Push(ra, fp, cp, a1);
+  if (locals_count > 0) {
+    // Load undefined value here, so the value is ready for the loop
+    // below.
+    __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
+  }
+  // Adjust fp to point to caller's fp.
+  __ Addu(fp, sp, Operand(2 * kPointerSize));
+
+  { Comment cmnt(masm_, "[ Allocate locals");
+    for (int i = 0; i < locals_count; i++) {
+      __ push(at);
+    }
+  }
+
+  bool function_in_register = true;
+
+  // Possibly allocate a local context.
+  int heap_slots = scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
+  if (heap_slots > 0) {
+    Comment cmnt(masm_, "[ Allocate local context");
+    // Argument to NewContext is the function, which is in a1.
+    __ push(a1);
+    if (heap_slots <= FastNewContextStub::kMaximumSlots) {
+      FastNewContextStub stub(heap_slots);
+      __ CallStub(&stub);
+    } else {
+      __ CallRuntime(Runtime::kNewContext, 1);
+    }
+    function_in_register = false;
+    // Context is returned in both v0 and cp.  It replaces the context
+    // passed to us.  It's saved in the stack and kept live in cp.
+    __ sw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+    // Copy any necessary parameters into the context.
+    int num_parameters = scope()->num_parameters();
+    for (int i = 0; i < num_parameters; i++) {
+      Slot* slot = scope()->parameter(i)->AsSlot();
+      if (slot != NULL && slot->type() == Slot::CONTEXT) {
+        int parameter_offset = StandardFrameConstants::kCallerSPOffset +
+                                 (num_parameters - 1 - i) * kPointerSize;
+        // Load parameter from stack.
+        __ lw(a0, MemOperand(fp, parameter_offset));
+        // Store it in the context.
+        __ li(a1, Operand(Context::SlotOffset(slot->index())));
+        __ addu(a2, cp, a1);
+        __ sw(a0, MemOperand(a2, 0));
+        // Update the write barrier. This clobbers all involved
+        // registers, so we have to use two more registers to avoid
+        // clobbering cp.
+        __ mov(a2, cp);
+        __ RecordWrite(a2, a1, a3);
+      }
+    }
+  }
+
+  Variable* arguments = scope()->arguments();
+  if (arguments != NULL) {
+    // Function uses arguments object.
+    Comment cmnt(masm_, "[ Allocate arguments object");
+    if (!function_in_register) {
+      // Load this again, if it's used by the local context below.
+      __ lw(a3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+    } else {
+      __ mov(a3, a1);
+    }
+    // Receiver is just before the parameters on the caller's stack.
+    int offset = scope()->num_parameters() * kPointerSize;
+    __ Addu(a2, fp,
+           Operand(StandardFrameConstants::kCallerSPOffset + offset));
+    __ li(a1, Operand(Smi::FromInt(scope()->num_parameters())));
+    __ Push(a3, a2, a1);
+
+    // Arguments to ArgumentsAccessStub:
+    //   function, receiver address, parameter count.
+    // The stub will rewrite receiever and parameter count if the previous
+    // stack frame was an arguments adapter frame.
+    ArgumentsAccessStub stub(
+        is_strict_mode() ? ArgumentsAccessStub::NEW_STRICT
+                         : ArgumentsAccessStub::NEW_NON_STRICT);
+    __ CallStub(&stub);
+
+    Variable* arguments_shadow = scope()->arguments_shadow();
+    if (arguments_shadow != NULL) {
+      // Duplicate the value; move-to-slot operation might clobber registers.
+      __ mov(a3, v0);
+      Move(arguments_shadow->AsSlot(), a3, a1, a2);
+    }
+    Move(arguments->AsSlot(), v0, a1, a2);
+  }
+
+  if (FLAG_trace) {
+    __ CallRuntime(Runtime::kTraceEnter, 0);
+  }
+
+  // Visit the declarations and body unless there is an illegal
+  // redeclaration.
+  if (scope()->HasIllegalRedeclaration()) {
+    Comment cmnt(masm_, "[ Declarations");
+    scope()->VisitIllegalRedeclaration(this);
+
+  } else {
+    { Comment cmnt(masm_, "[ Declarations");
+      // For named function expressions, declare the function name as a
+      // constant.
+      if (scope()->is_function_scope() && scope()->function() != NULL) {
+        EmitDeclaration(scope()->function(), Variable::CONST, NULL);
+      }
+      VisitDeclarations(scope()->declarations());
+    }
+
+    { Comment cmnt(masm_, "[ Stack check");
+      PrepareForBailoutForId(AstNode::kFunctionEntryId, NO_REGISTERS);
+      Label ok;
+      __ LoadRoot(t0, Heap::kStackLimitRootIndex);
+      __ Branch(&ok, hs, sp, Operand(t0));
+      StackCheckStub stub;
+      __ CallStub(&stub);
+      __ bind(&ok);
+    }
+
+    { Comment cmnt(masm_, "[ Body");
+      ASSERT(loop_depth() == 0);
+      VisitStatements(function()->body());
+      ASSERT(loop_depth() == 0);
+    }
+  }
+
+  // Always emit a 'return undefined' in case control fell off the end of
+  // the body.
+  { Comment cmnt(masm_, "[ return <undefined>;");
+    __ LoadRoot(v0, Heap::kUndefinedValueRootIndex);
+  }
+  EmitReturnSequence();
 }
 
 
 void FullCodeGenerator::ClearAccumulator() {
-  UNIMPLEMENTED_MIPS();
+  ASSERT(Smi::FromInt(0) == 0);
+  __ mov(v0, zero_reg);
 }
 
 
 void FullCodeGenerator::EmitStackCheck(IterationStatement* stmt) {
-  UNIMPLEMENTED_MIPS();
+  Comment cmnt(masm_, "[ Stack check");
+  Label ok;
+  __ LoadRoot(t0, Heap::kStackLimitRootIndex);
+  __ Branch(&ok, hs, sp, Operand(t0));
+  StackCheckStub stub;
+  // Record a mapping of this PC offset to the OSR id.  This is used to find
+  // the AST id from the unoptimized code in order to use it as a key into
+  // the deoptimization input data found in the optimized code.
+  RecordStackCheck(stmt->OsrEntryId());
+
+  __ CallStub(&stub);
+  __ bind(&ok);
+  PrepareForBailoutForId(stmt->EntryId(), NO_REGISTERS);
+  // Record a mapping of the OSR id to this PC.  This is used if the OSR
+  // entry becomes the target of a bailout.  We don't expect it to be, but
+  // we want it to work if it is.
+  PrepareForBailoutForId(stmt->OsrEntryId(), NO_REGISTERS);
 }
 
 
 void FullCodeGenerator::EmitReturnSequence() {
-  UNIMPLEMENTED_MIPS();
+  Comment cmnt(masm_, "[ Return sequence");
+  if (return_label_.is_bound()) {
+    __ Branch(&return_label_);
+  } else {
+    __ bind(&return_label_);
+    if (FLAG_trace) {
+      // Push the return value on the stack as the parameter.
+      // Runtime::TraceExit returns its parameter in v0.
+      __ push(v0);
+      __ CallRuntime(Runtime::kTraceExit, 1);
+    }
+
+#ifdef DEBUG
+    // Add a label for checking the size of the code used for returning.
+    Label check_exit_codesize;
+    masm_->bind(&check_exit_codesize);
+#endif
+    // Make sure that the constant pool is not emitted inside of the return
+    // sequence.
+    { Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_);
+      // Here we use masm_-> instead of the __ macro to avoid the code coverage
+      // tool from instrumenting as we rely on the code size here.
+      int32_t sp_delta = (scope()->num_parameters() + 1) * kPointerSize;
+      CodeGenerator::RecordPositions(masm_, function()->end_position() - 1);
+      __ RecordJSReturn();
+      masm_->mov(sp, fp);
+      masm_->MultiPop(static_cast<RegList>(fp.bit() | ra.bit()));
+      masm_->Addu(sp, sp, Operand(sp_delta));
+      masm_->Jump(ra);
+    }
+
+#ifdef DEBUG
+    // Check that the size of the code used for returning is large enough
+    // for the debugger's requirements.
+    ASSERT(Assembler::kJSReturnSequenceInstructions <=
+           masm_->InstructionsGeneratedSince(&check_exit_codesize));
+#endif
+  }
 }
 
 
 void FullCodeGenerator::EffectContext::Plug(Slot* slot) const {
-  UNIMPLEMENTED_MIPS();
 }
 
 
 void FullCodeGenerator::AccumulatorValueContext::Plug(Slot* slot) const {
-  UNIMPLEMENTED_MIPS();
+  codegen()->Move(result_register(), slot);
 }
 
 
 void FullCodeGenerator::StackValueContext::Plug(Slot* slot) const {
-  UNIMPLEMENTED_MIPS();
+  codegen()->Move(result_register(), slot);
+  __ push(result_register());
 }
 
 
 void FullCodeGenerator::TestContext::Plug(Slot* slot) const {
-  UNIMPLEMENTED_MIPS();
+  // For simplicity we always test the accumulator register.
+  codegen()->Move(result_register(), slot);
+  codegen()->PrepareForBailoutBeforeSplit(TOS_REG, false, NULL, NULL);
+  codegen()->DoTest(true_label_, false_label_, fall_through_);
 }
 
 
 void FullCodeGenerator::EffectContext::Plug(Heap::RootListIndex index) const {
-  UNIMPLEMENTED_MIPS();
 }
 
 
 void FullCodeGenerator::AccumulatorValueContext::Plug(
     Heap::RootListIndex index) const {
-  UNIMPLEMENTED_MIPS();
+  __ LoadRoot(result_register(), index);
 }
 
 
 void FullCodeGenerator::StackValueContext::Plug(
     Heap::RootListIndex index) const {
-  UNIMPLEMENTED_MIPS();
+  __ LoadRoot(result_register(), index);
+  __ push(result_register());
 }
 
 
 void FullCodeGenerator::TestContext::Plug(Heap::RootListIndex index) const {
-  UNIMPLEMENTED_MIPS();
+  codegen()->PrepareForBailoutBeforeSplit(TOS_REG,
+                                          true,
+                                          true_label_,
+                                          false_label_);
+  if (index == Heap::kUndefinedValueRootIndex ||
+      index == Heap::kNullValueRootIndex ||
+      index == Heap::kFalseValueRootIndex) {
+    if (false_label_ != fall_through_) __ Branch(false_label_);
+  } else if (index == Heap::kTrueValueRootIndex) {
+    if (true_label_ != fall_through_) __ Branch(true_label_);
+  } else {
+    __ LoadRoot(result_register(), index);
+    codegen()->DoTest(true_label_, false_label_, fall_through_);
+  }
 }
 
 
 void FullCodeGenerator::EffectContext::Plug(Handle<Object> lit) const {
-  UNIMPLEMENTED_MIPS();
 }
 
 
 void FullCodeGenerator::AccumulatorValueContext::Plug(
     Handle<Object> lit) const {
-  UNIMPLEMENTED_MIPS();
+  __ li(result_register(), Operand(lit));
 }
 
 
 void FullCodeGenerator::StackValueContext::Plug(Handle<Object> lit) const {
-  UNIMPLEMENTED_MIPS();
+  // Immediates cannot be pushed directly.
+  __ li(result_register(), Operand(lit));
+  __ push(result_register());
 }
 
 
 void FullCodeGenerator::TestContext::Plug(Handle<Object> lit) const {
-  UNIMPLEMENTED_MIPS();
+  codegen()->PrepareForBailoutBeforeSplit(TOS_REG,
+                                          true,
+                                          true_label_,
+                                          false_label_);
+  ASSERT(!lit->IsUndetectableObject());  // There are no undetectable literals.
+  if (lit->IsUndefined() || lit->IsNull() || lit->IsFalse()) {
+    if (false_label_ != fall_through_) __ Branch(false_label_);
+  } else if (lit->IsTrue() || lit->IsJSObject()) {
+    if (true_label_ != fall_through_) __ Branch(true_label_);
+  } else if (lit->IsString()) {
+    if (String::cast(*lit)->length() == 0) {
+      if (false_label_ != fall_through_) __ Branch(false_label_);
+    } else {
+      if (true_label_ != fall_through_) __ Branch(true_label_);
+    }
+  } else if (lit->IsSmi()) {
+    if (Smi::cast(*lit)->value() == 0) {
+      if (false_label_ != fall_through_) __ Branch(false_label_);
+    } else {
+      if (true_label_ != fall_through_) __ Branch(true_label_);
+    }
+  } else {
+    // For simplicity we always test the accumulator register.
+    __ li(result_register(), Operand(lit));
+    codegen()->DoTest(true_label_, false_label_, fall_through_);
+  }
 }
 
 
 void FullCodeGenerator::EffectContext::DropAndPlug(int count,
                                                    Register reg) const {
-  UNIMPLEMENTED_MIPS();
+  ASSERT(count > 0);
+  __ Drop(count);
 }
 
 
 void FullCodeGenerator::AccumulatorValueContext::DropAndPlug(
     int count,
     Register reg) const {
-  UNIMPLEMENTED_MIPS();
+  ASSERT(count > 0);
+  __ Drop(count);
+  __ Move(result_register(), reg);
 }
 
 
 void FullCodeGenerator::StackValueContext::DropAndPlug(int count,
                                                        Register reg) const {
-  UNIMPLEMENTED_MIPS();
+  ASSERT(count > 0);
+  if (count > 1) __ Drop(count - 1);
+  __ sw(reg, MemOperand(sp, 0));
 }
 
 
 void FullCodeGenerator::TestContext::DropAndPlug(int count,
                                                  Register reg) const {
-  UNIMPLEMENTED_MIPS();
+  ASSERT(count > 0);
+  // For simplicity we always test the accumulator register.
+  __ Drop(count);
+  __ Move(result_register(), reg);
+  codegen()->PrepareForBailoutBeforeSplit(TOS_REG, false, NULL, NULL);
+  codegen()->DoTest(true_label_, false_label_, fall_through_);
 }
 
 
 void FullCodeGenerator::EffectContext::Plug(Label* materialize_true,
                                             Label* materialize_false) const {
-  UNIMPLEMENTED_MIPS();
+  ASSERT(materialize_true == materialize_false);
+  __ bind(materialize_true);
 }
 
 
 void FullCodeGenerator::AccumulatorValueContext::Plug(
     Label* materialize_true,
     Label* materialize_false) const {
-  UNIMPLEMENTED_MIPS();
+  Label done;
+  __ bind(materialize_true);
+  __ LoadRoot(result_register(), Heap::kTrueValueRootIndex);
+  __ Branch(&done);
+  __ bind(materialize_false);
+  __ LoadRoot(result_register(), Heap::kFalseValueRootIndex);
+  __ bind(&done);
 }
 
 
 void FullCodeGenerator::StackValueContext::Plug(
     Label* materialize_true,
     Label* materialize_false) const {
-  UNIMPLEMENTED_MIPS();
+  Label done;
+  __ bind(materialize_true);
+  __ LoadRoot(at, Heap::kTrueValueRootIndex);
+  __ push(at);
+  __ Branch(&done);
+  __ bind(materialize_false);
+  __ LoadRoot(at, Heap::kFalseValueRootIndex);
+  __ push(at);
+  __ bind(&done);
 }
 
 
 void FullCodeGenerator::TestContext::Plug(Label* materialize_true,
                                           Label* materialize_false) const {
-  UNIMPLEMENTED_MIPS();
+  ASSERT(materialize_true == true_label_);
+  ASSERT(materialize_false == false_label_);
 }
 
 
 void FullCodeGenerator::EffectContext::Plug(bool flag) const {
-  UNIMPLEMENTED_MIPS();
 }
 
 
 void FullCodeGenerator::AccumulatorValueContext::Plug(bool flag) const {
-  UNIMPLEMENTED_MIPS();
+  Heap::RootListIndex value_root_index =
+      flag ? Heap::kTrueValueRootIndex : Heap::kFalseValueRootIndex;
+  __ LoadRoot(result_register(), value_root_index);
 }
 
 
 void FullCodeGenerator::StackValueContext::Plug(bool flag) const {
-  UNIMPLEMENTED_MIPS();
+  Heap::RootListIndex value_root_index =
+      flag ? Heap::kTrueValueRootIndex : Heap::kFalseValueRootIndex;
+  __ LoadRoot(at, value_root_index);
+  __ push(at);
 }
 
 
 void FullCodeGenerator::TestContext::Plug(bool flag) const {
-  UNIMPLEMENTED_MIPS();
+  codegen()->PrepareForBailoutBeforeSplit(TOS_REG,
+                                          true,
+                                          true_label_,
+                                          false_label_);
+  if (flag) {
+    if (true_label_ != fall_through_) __ Branch(true_label_);
+  } else {
+    if (false_label_ != fall_through_) __ Branch(false_label_);
+  }
 }
 
 
 void FullCodeGenerator::DoTest(Label* if_true,
                                Label* if_false,
                                Label* fall_through) {
-  UNIMPLEMENTED_MIPS();
+  if (CpuFeatures::IsSupported(FPU)) {
+    ToBooleanStub stub(result_register());
+    __ CallStub(&stub);
+    __ mov(at, zero_reg);
+  } else {
+    // Call the runtime to find the boolean value of the source and then
+    // translate it into control flow to the pair of labels.
+    __ push(result_register());
+    __ CallRuntime(Runtime::kToBool, 1);
+    __ LoadRoot(at, Heap::kFalseValueRootIndex);
+  }
+  Split(ne, v0, Operand(at), if_true, if_false, fall_through);
 }
 
 
-// Original prototype for mips, needs arch-indep change. Leave out for now.
-// void FullCodeGenerator::Split(Condition cc,
-//                               Register lhs,
-//                               const Operand&  rhs,
-//                               Label* if_true,
-//                               Label* if_false,
-//                               Label* fall_through) {
 void FullCodeGenerator::Split(Condition cc,
+                              Register lhs,
+                              const Operand&  rhs,
                               Label* if_true,
                               Label* if_false,
                               Label* fall_through) {
-  UNIMPLEMENTED_MIPS();
+  if (if_false == fall_through) {
+    __ Branch(if_true, cc, lhs, rhs);
+  } else if (if_true == fall_through) {
+    __ Branch(if_false, NegateCondition(cc), lhs, rhs);
+  } else {
+    __ Branch(if_true, cc, lhs, rhs);
+    __ Branch(if_false);
+  }
 }
 
 
 MemOperand FullCodeGenerator::EmitSlotSearch(Slot* slot, Register scratch) {
-  UNIMPLEMENTED_MIPS();
-  return MemOperand(zero_reg, 0);
+  switch (slot->type()) {
+    case Slot::PARAMETER:
+    case Slot::LOCAL:
+      return MemOperand(fp, SlotOffset(slot));
+    case Slot::CONTEXT: {
+      int context_chain_length =
+          scope()->ContextChainLength(slot->var()->scope());
+      __ LoadContext(scratch, context_chain_length);
+      return ContextOperand(scratch, slot->index());
+    }
+    case Slot::LOOKUP:
+      UNREACHABLE();
+  }
+  UNREACHABLE();
+  return MemOperand(v0, 0);
 }
 
 
 void FullCodeGenerator::Move(Register destination, Slot* source) {
-  UNIMPLEMENTED_MIPS();
+  // Use destination as scratch.
+  MemOperand slot_operand = EmitSlotSearch(source, destination);
+  __ lw(destination, slot_operand);
 }
 
 
@@ -258,7 +646,25 @@
                                                      bool should_normalize,
                                                      Label* if_true,
                                                      Label* if_false) {
-  UNIMPLEMENTED_MIPS();
+  // Only prepare for bailouts before splits if we're in a test
+  // context. Otherwise, we let the Visit function deal with the
+  // preparation to avoid preparing with the same AST id twice.
+  if (!context()->IsTest() || !info_->IsOptimizable()) return;
+
+  Label skip;
+  if (should_normalize) __ Branch(&skip);
+
+  ForwardBailoutStack* current = forward_bailout_stack_;
+  while (current != NULL) {
+    PrepareForBailout(current->expr(), state);
+    current = current->parent();
+  }
+
+  if (should_normalize) {
+    __ LoadRoot(t0, Heap::kTrueValueRootIndex);
+    Split(eq, a0, Operand(t0), if_true, if_false, NULL);
+    __ bind(&skip);
+  }
 }
 
 
@@ -266,53 +672,532 @@
                              Register src,
                              Register scratch1,
                              Register scratch2) {
-  UNIMPLEMENTED_MIPS();
+  ASSERT(dst->type() != Slot::LOOKUP);  // Not yet implemented.
+  ASSERT(!scratch1.is(src) && !scratch2.is(src));
+  MemOperand location = EmitSlotSearch(dst, scratch1);
+  __ sw(src, location);
+  // Emit the write barrier code if the location is in the heap.
+  if (dst->type() == Slot::CONTEXT) {
+    __ RecordWrite(scratch1,
+                   Operand(Context::SlotOffset(dst->index())),
+                   scratch2,
+                   src);
+  }
 }
 
 
 void FullCodeGenerator::EmitDeclaration(Variable* variable,
                                         Variable::Mode mode,
                                         FunctionLiteral* function) {
-  UNIMPLEMENTED_MIPS();
+  Comment cmnt(masm_, "[ Declaration");
+  ASSERT(variable != NULL);  // Must have been resolved.
+  Slot* slot = variable->AsSlot();
+  Property* prop = variable->AsProperty();
+
+  if (slot != NULL) {
+    switch (slot->type()) {
+      case Slot::PARAMETER:
+      case Slot::LOCAL:
+        if (mode == Variable::CONST) {
+          __ LoadRoot(t0, Heap::kTheHoleValueRootIndex);
+          __ sw(t0, MemOperand(fp, SlotOffset(slot)));
+        } else if (function != NULL) {
+          VisitForAccumulatorValue(function);
+          __ sw(result_register(), MemOperand(fp, SlotOffset(slot)));
+        }
+        break;
+
+      case Slot::CONTEXT:
+        // We bypass the general EmitSlotSearch because we know more about
+        // this specific context.
+
+        // The variable in the decl always resides in the current function
+        // context.
+        ASSERT_EQ(0, scope()->ContextChainLength(variable->scope()));
+        if (FLAG_debug_code) {
+          // Check that we're not inside a 'with'.
+          __ lw(a1, ContextOperand(cp, Context::FCONTEXT_INDEX));
+          __ Check(eq, "Unexpected declaration in current context.",
+                   a1, Operand(cp));
+        }
+        if (mode == Variable::CONST) {
+          __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
+          __ sw(at, ContextOperand(cp, slot->index()));
+          // No write barrier since the_hole_value is in old space.
+        } else if (function != NULL) {
+          VisitForAccumulatorValue(function);
+          __ sw(result_register(), ContextOperand(cp, slot->index()));
+          int offset = Context::SlotOffset(slot->index());
+          // We know that we have written a function, which is not a smi.
+          __ mov(a1, cp);
+          __ RecordWrite(a1, Operand(offset), a2, result_register());
+        }
+        break;
+
+      case Slot::LOOKUP: {
+        __ li(a2, Operand(variable->name()));
+        // Declaration nodes are always introduced in one of two modes.
+        ASSERT(mode == Variable::VAR ||
+               mode == Variable::CONST);
+        PropertyAttributes attr =
+            (mode == Variable::VAR) ? NONE : READ_ONLY;
+        __ li(a1, Operand(Smi::FromInt(attr)));
+        // Push initial value, if any.
+        // Note: For variables we must not push an initial value (such as
+        // 'undefined') because we may have a (legal) redeclaration and we
+        // must not destroy the current value.
+        if (mode == Variable::CONST) {
+          __ LoadRoot(a0, Heap::kTheHoleValueRootIndex);
+          __ Push(cp, a2, a1, a0);
+        } else if (function != NULL) {
+          __ Push(cp, a2, a1);
+          // Push initial value for function declaration.
+          VisitForStackValue(function);
+        } else {
+          ASSERT(Smi::FromInt(0) == 0);
+          // No initial value!
+          __ mov(a0, zero_reg);  // Operand(Smi::FromInt(0)));
+          __ Push(cp, a2, a1, a0);
+        }
+        __ CallRuntime(Runtime::kDeclareContextSlot, 4);
+        break;
+      }
+    }
+
+  } else if (prop != NULL) {
+    // A const declaration aliasing a parameter is an illegal redeclaration.
+    ASSERT(mode != Variable::CONST);
+    if (function != NULL) {
+      // We are declaring a function that rewrites to a property.
+      // Use (keyed) IC to set the initial value.  We cannot visit the
+      // rewrite because it's shared and we risk recording duplicate AST
+      // IDs for bailouts from optimized code.
+      ASSERT(prop->obj()->AsVariableProxy() != NULL);
+      { AccumulatorValueContext for_object(this);
+        EmitVariableLoad(prop->obj()->AsVariableProxy()->var());
+      }
+
+      __ push(result_register());
+      VisitForAccumulatorValue(function);
+      __ mov(a0, result_register());
+      __ pop(a2);
+
+      ASSERT(prop->key()->AsLiteral() != NULL &&
+             prop->key()->AsLiteral()->handle()->IsSmi());
+      __ li(a1, Operand(prop->key()->AsLiteral()->handle()));
+
+      Handle<Code> ic = is_strict_mode()
+          ? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
+          : isolate()->builtins()->KeyedStoreIC_Initialize();
+      EmitCallIC(ic, RelocInfo::CODE_TARGET, AstNode::kNoNumber);
+      // Value in v0 is ignored (declarations are statements).
+    }
+  }
 }
 
 
 void FullCodeGenerator::VisitDeclaration(Declaration* decl) {
-  UNIMPLEMENTED_MIPS();
+  EmitDeclaration(decl->proxy()->var(), decl->mode(), decl->fun());
 }
 
 
 void FullCodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
-  UNIMPLEMENTED_MIPS();
+  // Call the runtime to declare the globals.
+  // The context is the first argument.
+  __ li(a2, Operand(pairs));
+  __ li(a1, Operand(Smi::FromInt(is_eval() ? 1 : 0)));
+  __ li(a0, Operand(Smi::FromInt(strict_mode_flag())));
+  __ Push(cp, a2, a1, a0);
+  __ CallRuntime(Runtime::kDeclareGlobals, 4);
+  // Return value is ignored.
 }
 
 
 void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
-  UNIMPLEMENTED_MIPS();
+  Comment cmnt(masm_, "[ SwitchStatement");
+  Breakable nested_statement(this, stmt);
+  SetStatementPosition(stmt);
+
+  // Keep the switch value on the stack until a case matches.
+  VisitForStackValue(stmt->tag());
+  PrepareForBailoutForId(stmt->EntryId(), NO_REGISTERS);
+
+  ZoneList<CaseClause*>* clauses = stmt->cases();
+  CaseClause* default_clause = NULL;  // Can occur anywhere in the list.
+
+  Label next_test;  // Recycled for each test.
+  // Compile all the tests with branches to their bodies.
+  for (int i = 0; i < clauses->length(); i++) {
+    CaseClause* clause = clauses->at(i);
+    clause->body_target()->Unuse();
+
+    // The default is not a test, but remember it as final fall through.
+    if (clause->is_default()) {
+      default_clause = clause;
+      continue;
+    }
+
+    Comment cmnt(masm_, "[ Case comparison");
+    __ bind(&next_test);
+    next_test.Unuse();
+
+    // Compile the label expression.
+    VisitForAccumulatorValue(clause->label());
+    __ mov(a0, result_register());  // CompareStub requires args in a0, a1.
+
+    // Perform the comparison as if via '==='.
+    __ lw(a1, MemOperand(sp, 0));  // Switch value.
+    bool inline_smi_code = ShouldInlineSmiCase(Token::EQ_STRICT);
+    JumpPatchSite patch_site(masm_);
+    if (inline_smi_code) {
+      Label slow_case;
+      __ or_(a2, a1, a0);
+      patch_site.EmitJumpIfNotSmi(a2, &slow_case);
+
+      __ Branch(&next_test, ne, a1, Operand(a0));
+      __ Drop(1);  // Switch value is no longer needed.
+      __ Branch(clause->body_target());
+
+      __ bind(&slow_case);
+    }
+
+    // Record position before stub call for type feedback.
+    SetSourcePosition(clause->position());
+    Handle<Code> ic = CompareIC::GetUninitialized(Token::EQ_STRICT);
+    EmitCallIC(ic, &patch_site, clause->CompareId());
+
+    __ Branch(&next_test, ne, v0, Operand(zero_reg));
+    __ Drop(1);  // Switch value is no longer needed.
+    __ Branch(clause->body_target());
+  }
+
+  // Discard the test value and jump to the default if present, otherwise to
+  // the end of the statement.
+  __ bind(&next_test);
+  __ Drop(1);  // Switch value is no longer needed.
+  if (default_clause == NULL) {
+    __ Branch(nested_statement.break_target());
+  } else {
+    __ Branch(default_clause->body_target());
+  }
+
+  // Compile all the case bodies.
+  for (int i = 0; i < clauses->length(); i++) {
+    Comment cmnt(masm_, "[ Case body");
+    CaseClause* clause = clauses->at(i);
+    __ bind(clause->body_target());
+    PrepareForBailoutForId(clause->EntryId(), NO_REGISTERS);
+    VisitStatements(clause->statements());
+  }
+
+  __ bind(nested_statement.break_target());
+  PrepareForBailoutForId(stmt->ExitId(), NO_REGISTERS);
 }
 
 
 void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
-  UNIMPLEMENTED_MIPS();
+  Comment cmnt(masm_, "[ ForInStatement");
+  SetStatementPosition(stmt);
+
+  Label loop, exit;
+  ForIn loop_statement(this, stmt);
+  increment_loop_depth();
+
+  // Get the object to enumerate over. Both SpiderMonkey and JSC
+  // ignore null and undefined in contrast to the specification; see
+  // ECMA-262 section 12.6.4.
+  VisitForAccumulatorValue(stmt->enumerable());
+  __ mov(a0, result_register());  // Result as param to InvokeBuiltin below.
+  __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
+  __ Branch(&exit, eq, a0, Operand(at));
+  Register null_value = t1;
+  __ LoadRoot(null_value, Heap::kNullValueRootIndex);
+  __ Branch(&exit, eq, a0, Operand(null_value));
+
+  // Convert the object to a JS object.
+  Label convert, done_convert;
+  __ JumpIfSmi(a0, &convert);
+  __ GetObjectType(a0, a1, a1);
+  __ Branch(&done_convert, hs, a1, Operand(FIRST_JS_OBJECT_TYPE));
+  __ bind(&convert);
+  __ push(a0);
+  __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
+  __ mov(a0, v0);
+  __ bind(&done_convert);
+  __ push(a0);
+
+  // Check cache validity in generated code. This is a fast case for
+  // the JSObject::IsSimpleEnum cache validity checks. If we cannot
+  // guarantee cache validity, call the runtime system to check cache
+  // validity or get the property names in a fixed array.
+  Label next, call_runtime;
+  // Preload a couple of values used in the loop.
+  Register  empty_fixed_array_value = t2;
+  __ LoadRoot(empty_fixed_array_value, Heap::kEmptyFixedArrayRootIndex);
+  Register empty_descriptor_array_value = t3;
+  __ LoadRoot(empty_descriptor_array_value,
+              Heap::kEmptyDescriptorArrayRootIndex);
+  __ mov(a1, a0);
+  __ bind(&next);
+
+  // Check that there are no elements.  Register a1 contains the
+  // current JS object we've reached through the prototype chain.
+  __ lw(a2, FieldMemOperand(a1, JSObject::kElementsOffset));
+  __ Branch(&call_runtime, ne, a2, Operand(empty_fixed_array_value));
+
+  // Check that instance descriptors are not empty so that we can
+  // check for an enum cache.  Leave the map in a2 for the subsequent
+  // prototype load.
+  __ lw(a2, FieldMemOperand(a1, HeapObject::kMapOffset));
+  __ lw(a3, FieldMemOperand(a2, Map::kInstanceDescriptorsOrBitField3Offset));
+  __ JumpIfSmi(a3, &call_runtime);
+
+  // Check that there is an enum cache in the non-empty instance
+  // descriptors (a3).  This is the case if the next enumeration
+  // index field does not contain a smi.
+  __ lw(a3, FieldMemOperand(a3, DescriptorArray::kEnumerationIndexOffset));
+  __ JumpIfSmi(a3, &call_runtime);
+
+  // For all objects but the receiver, check that the cache is empty.
+  Label check_prototype;
+  __ Branch(&check_prototype, eq, a1, Operand(a0));
+  __ lw(a3, FieldMemOperand(a3, DescriptorArray::kEnumCacheBridgeCacheOffset));
+  __ Branch(&call_runtime, ne, a3, Operand(empty_fixed_array_value));
+
+  // Load the prototype from the map and loop if non-null.
+  __ bind(&check_prototype);
+  __ lw(a1, FieldMemOperand(a2, Map::kPrototypeOffset));
+  __ Branch(&next, ne, a1, Operand(null_value));
+
+  // The enum cache is valid.  Load the map of the object being
+  // iterated over and use the cache for the iteration.
+  Label use_cache;
+  __ lw(v0, FieldMemOperand(a0, HeapObject::kMapOffset));
+  __ Branch(&use_cache);
+
+  // Get the set of properties to enumerate.
+  __ bind(&call_runtime);
+  __ push(a0);  // Duplicate the enumerable object on the stack.
+  __ CallRuntime(Runtime::kGetPropertyNamesFast, 1);
+
+  // If we got a map from the runtime call, we can do a fast
+  // modification check. Otherwise, we got a fixed array, and we have
+  // to do a slow check.
+  Label fixed_array;
+  __ mov(a2, v0);
+  __ lw(a1, FieldMemOperand(a2, HeapObject::kMapOffset));
+  __ LoadRoot(at, Heap::kMetaMapRootIndex);
+  __ Branch(&fixed_array, ne, a1, Operand(at));
+
+  // We got a map in register v0. Get the enumeration cache from it.
+  __ bind(&use_cache);
+  __ LoadInstanceDescriptors(v0, a1);
+  __ lw(a1, FieldMemOperand(a1, DescriptorArray::kEnumerationIndexOffset));
+  __ lw(a2, FieldMemOperand(a1, DescriptorArray::kEnumCacheBridgeCacheOffset));
+
+  // Setup the four remaining stack slots.
+  __ push(v0);  // Map.
+  __ lw(a1, FieldMemOperand(a2, FixedArray::kLengthOffset));
+  __ li(a0, Operand(Smi::FromInt(0)));
+  // Push enumeration cache, enumeration cache length (as smi) and zero.
+  __ Push(a2, a1, a0);
+  __ jmp(&loop);
+
+  // We got a fixed array in register v0. Iterate through that.
+  __ bind(&fixed_array);
+  __ li(a1, Operand(Smi::FromInt(0)));  // Map (0) - force slow check.
+  __ Push(a1, v0);
+  __ lw(a1, FieldMemOperand(v0, FixedArray::kLengthOffset));
+  __ li(a0, Operand(Smi::FromInt(0)));
+  __ Push(a1, a0);  // Fixed array length (as smi) and initial index.
+
+  // Generate code for doing the condition check.
+  __ bind(&loop);
+  // Load the current count to a0, load the length to a1.
+  __ lw(a0, MemOperand(sp, 0 * kPointerSize));
+  __ lw(a1, MemOperand(sp, 1 * kPointerSize));
+  __ Branch(loop_statement.break_target(), hs, a0, Operand(a1));
+
+  // Get the current entry of the array into register a3.
+  __ lw(a2, MemOperand(sp, 2 * kPointerSize));
+  __ Addu(a2, a2, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+  __ sll(t0, a0, kPointerSizeLog2 - kSmiTagSize);
+  __ addu(t0, a2, t0);  // Array base + scaled (smi) index.
+  __ lw(a3, MemOperand(t0));  // Current entry.
+
+  // Get the expected map from the stack or a zero map in the
+  // permanent slow case into register a2.
+  __ lw(a2, MemOperand(sp, 3 * kPointerSize));
+
+  // Check if the expected map still matches that of the enumerable.
+  // If not, we have to filter the key.
+  Label update_each;
+  __ lw(a1, MemOperand(sp, 4 * kPointerSize));
+  __ lw(t0, FieldMemOperand(a1, HeapObject::kMapOffset));
+  __ Branch(&update_each, eq, t0, Operand(a2));
+
+  // Convert the entry to a string or (smi) 0 if it isn't a property
+  // any more. If the property has been removed while iterating, we
+  // just skip it.
+  __ push(a1);  // Enumerable.
+  __ push(a3);  // Current entry.
+  __ InvokeBuiltin(Builtins::FILTER_KEY, CALL_FUNCTION);
+  __ mov(a3, result_register());
+  __ Branch(loop_statement.continue_target(), eq, a3, Operand(zero_reg));
+
+  // Update the 'each' property or variable from the possibly filtered
+  // entry in register a3.
+  __ bind(&update_each);
+  __ mov(result_register(), a3);
+  // Perform the assignment as if via '='.
+  { EffectContext context(this);
+    EmitAssignment(stmt->each(), stmt->AssignmentId());
+  }
+
+  // Generate code for the body of the loop.
+  Visit(stmt->body());
+
+  // Generate code for the going to the next element by incrementing
+  // the index (smi) stored on top of the stack.
+  __ bind(loop_statement.continue_target());
+  __ pop(a0);
+  __ Addu(a0, a0, Operand(Smi::FromInt(1)));
+  __ push(a0);
+
+  EmitStackCheck(stmt);
+  __ Branch(&loop);
+
+  // Remove the pointers stored on the stack.
+  __ bind(loop_statement.break_target());
+  __ Drop(5);
+
+  // Exit and decrement the loop depth.
+  __ bind(&exit);
+  decrement_loop_depth();
 }
 
 
 void FullCodeGenerator::EmitNewClosure(Handle<SharedFunctionInfo> info,
                                        bool pretenure) {
-  UNIMPLEMENTED_MIPS();
+  // Use the fast case closure allocation code that allocates in new
+  // space for nested functions that don't need literals cloning. If
+  // we're running with the --always-opt or the --prepare-always-opt
+  // flag, we need to use the runtime function so that the new function
+  // we are creating here gets a chance to have its code optimized and
+  // doesn't just get a copy of the existing unoptimized code.
+  if (!FLAG_always_opt &&
+      !FLAG_prepare_always_opt &&
+      !pretenure &&
+      scope()->is_function_scope() &&
+      info->num_literals() == 0) {
+    FastNewClosureStub stub(info->strict_mode() ? kStrictMode : kNonStrictMode);
+    __ li(a0, Operand(info));
+    __ push(a0);
+    __ CallStub(&stub);
+  } else {
+    __ li(a0, Operand(info));
+    __ LoadRoot(a1, pretenure ? Heap::kTrueValueRootIndex
+                              : Heap::kFalseValueRootIndex);
+    __ Push(cp, a0, a1);
+    __ CallRuntime(Runtime::kNewClosure, 3);
+  }
+  context()->Plug(v0);
 }
 
 
 void FullCodeGenerator::VisitVariableProxy(VariableProxy* expr) {
-  UNIMPLEMENTED_MIPS();
+  Comment cmnt(masm_, "[ VariableProxy");
+  EmitVariableLoad(expr->var());
+}
+
+
+void FullCodeGenerator::EmitLoadGlobalSlotCheckExtensions(
+    Slot* slot,
+    TypeofState typeof_state,
+    Label* slow) {
+  Register current = cp;
+  Register next = a1;
+  Register temp = a2;
+
+  Scope* s = scope();
+  while (s != NULL) {
+    if (s->num_heap_slots() > 0) {
+      if (s->calls_eval()) {
+        // Check that extension is NULL.
+        __ lw(temp, ContextOperand(current, Context::EXTENSION_INDEX));
+        __ Branch(slow, ne, temp, Operand(zero_reg));
+      }
+      // Load next context in chain.
+      __ lw(next, ContextOperand(current, Context::CLOSURE_INDEX));
+      __ lw(next, FieldMemOperand(next, JSFunction::kContextOffset));
+      // Walk the rest of the chain without clobbering cp.
+      current = next;
+    }
+    // If no outer scope calls eval, we do not need to check more
+    // context extensions.
+    if (!s->outer_scope_calls_eval() || s->is_eval_scope()) break;
+    s = s->outer_scope();
+  }
+
+  if (s->is_eval_scope()) {
+    Label loop, fast;
+    if (!current.is(next)) {
+      __ Move(next, current);
+    }
+    __ bind(&loop);
+    // Terminate at global context.
+    __ lw(temp, FieldMemOperand(next, HeapObject::kMapOffset));
+    __ LoadRoot(t0, Heap::kGlobalContextMapRootIndex);
+    __ Branch(&fast, eq, temp, Operand(t0));
+    // Check that extension is NULL.
+    __ lw(temp, ContextOperand(next, Context::EXTENSION_INDEX));
+    __ Branch(slow, ne, temp, Operand(zero_reg));
+    // Load next context in chain.
+    __ lw(next, ContextOperand(next, Context::CLOSURE_INDEX));
+    __ lw(next, FieldMemOperand(next, JSFunction::kContextOffset));
+    __ Branch(&loop);
+    __ bind(&fast);
+  }
+
+  __ lw(a0, GlobalObjectOperand());
+  __ li(a2, Operand(slot->var()->name()));
+  RelocInfo::Mode mode = (typeof_state == INSIDE_TYPEOF)
+      ? RelocInfo::CODE_TARGET
+      : RelocInfo::CODE_TARGET_CONTEXT;
+  Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
+  EmitCallIC(ic, mode, AstNode::kNoNumber);
 }
 
 
 MemOperand FullCodeGenerator::ContextSlotOperandCheckExtensions(
     Slot* slot,
     Label* slow) {
-  UNIMPLEMENTED_MIPS();
-  return MemOperand(zero_reg, 0);
+  ASSERT(slot->type() == Slot::CONTEXT);
+  Register context = cp;
+  Register next = a3;
+  Register temp = t0;
+
+  for (Scope* s = scope(); s != slot->var()->scope(); s = s->outer_scope()) {
+    if (s->num_heap_slots() > 0) {
+      if (s->calls_eval()) {
+        // Check that extension is NULL.
+        __ lw(temp, ContextOperand(context, Context::EXTENSION_INDEX));
+        __ Branch(slow, ne, temp, Operand(zero_reg));
+      }
+      __ lw(next, ContextOperand(context, Context::CLOSURE_INDEX));
+      __ lw(next, FieldMemOperand(next, JSFunction::kContextOffset));
+      // Walk the rest of the chain without clobbering cp.
+      context = next;
+    }
+  }
+  // Check that last extension is NULL.
+  __ lw(temp, ContextOperand(context, Context::EXTENSION_INDEX));
+  __ Branch(slow, ne, temp, Operand(zero_reg));
+
+  // This function is used only for loads, not stores, so it's safe to
+  // return an cp-based operand (the write barrier cannot be allowed to
+  // destroy the cp register).
+  return ContextOperand(context, slot->index());
 }
 
 
@@ -321,336 +1206,2841 @@
     TypeofState typeof_state,
     Label* slow,
     Label* done) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void FullCodeGenerator::EmitLoadGlobalSlotCheckExtensions(
-    Slot* slot,
-    TypeofState typeof_state,
-    Label* slow) {
-  UNIMPLEMENTED_MIPS();
+  // Generate fast-case code for variables that might be shadowed by
+  // eval-introduced variables.  Eval is used a lot without
+  // introducing variables.  In those cases, we do not want to
+  // perform a runtime call for all variables in the scope
+  // containing the eval.
+  if (slot->var()->mode() == Variable::DYNAMIC_GLOBAL) {
+    EmitLoadGlobalSlotCheckExtensions(slot, typeof_state, slow);
+    __ Branch(done);
+  } else if (slot->var()->mode() == Variable::DYNAMIC_LOCAL) {
+    Slot* potential_slot = slot->var()->local_if_not_shadowed()->AsSlot();
+    Expression* rewrite = slot->var()->local_if_not_shadowed()->rewrite();
+    if (potential_slot != NULL) {
+      // Generate fast case for locals that rewrite to slots.
+      __ lw(v0, ContextSlotOperandCheckExtensions(potential_slot, slow));
+      if (potential_slot->var()->mode() == Variable::CONST) {
+        __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
+        __ subu(at, v0, at);  // Sub as compare: at == 0 on eq.
+        __ LoadRoot(a0, Heap::kUndefinedValueRootIndex);
+        __ movz(v0, a0, at);  // Conditional move.
+      }
+      __ Branch(done);
+    } else if (rewrite != NULL) {
+      // Generate fast case for calls of an argument function.
+      Property* property = rewrite->AsProperty();
+      if (property != NULL) {
+        VariableProxy* obj_proxy = property->obj()->AsVariableProxy();
+        Literal* key_literal = property->key()->AsLiteral();
+        if (obj_proxy != NULL &&
+            key_literal != NULL &&
+            obj_proxy->IsArguments() &&
+            key_literal->handle()->IsSmi()) {
+          // Load arguments object if there are no eval-introduced
+          // variables. Then load the argument from the arguments
+          // object using keyed load.
+          __ lw(a1,
+                 ContextSlotOperandCheckExtensions(obj_proxy->var()->AsSlot(),
+                                                   slow));
+          __ li(a0, Operand(key_literal->handle()));
+          Handle<Code> ic =
+              isolate()->builtins()->KeyedLoadIC_Initialize();
+          EmitCallIC(ic, RelocInfo::CODE_TARGET, GetPropertyId(property));
+          __ Branch(done);
+        }
+      }
+    }
+  }
 }
 
 
 void FullCodeGenerator::EmitVariableLoad(Variable* var) {
-  UNIMPLEMENTED_MIPS();
+  // Four cases: non-this global variables, lookup slots, all other
+  // types of slots, and parameters that rewrite to explicit property
+  // accesses on the arguments object.
+  Slot* slot = var->AsSlot();
+  Property* property = var->AsProperty();
+
+  if (var->is_global() && !var->is_this()) {
+    Comment cmnt(masm_, "Global variable");
+    // Use inline caching. Variable name is passed in a2 and the global
+    // object (receiver) in a0.
+    __ lw(a0, GlobalObjectOperand());
+    __ li(a2, Operand(var->name()));
+    Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
+    EmitCallIC(ic, RelocInfo::CODE_TARGET_CONTEXT, AstNode::kNoNumber);
+    context()->Plug(v0);
+
+  } else if (slot != NULL && slot->type() == Slot::LOOKUP) {
+    Label done, slow;
+
+    // Generate code for loading from variables potentially shadowed
+    // by eval-introduced variables.
+    EmitDynamicLoadFromSlotFastCase(slot, NOT_INSIDE_TYPEOF, &slow, &done);
+
+    __ bind(&slow);
+    Comment cmnt(masm_, "Lookup slot");
+    __ li(a1, Operand(var->name()));
+    __ Push(cp, a1);  // Context and name.
+    __ CallRuntime(Runtime::kLoadContextSlot, 2);
+    __ bind(&done);
+
+    context()->Plug(v0);
+
+  } else if (slot != NULL) {
+    Comment cmnt(masm_, (slot->type() == Slot::CONTEXT)
+                            ? "Context slot"
+                            : "Stack slot");
+    if (var->mode() == Variable::CONST) {
+       // Constants may be the hole value if they have not been initialized.
+       // Unhole them.
+       MemOperand slot_operand = EmitSlotSearch(slot, a0);
+       __ lw(v0, slot_operand);
+       __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
+       __ subu(at, v0, at);  // Sub as compare: at == 0 on eq.
+       __ LoadRoot(a0, Heap::kUndefinedValueRootIndex);
+       __ movz(v0, a0, at);  // Conditional move.
+       context()->Plug(v0);
+     } else {
+       context()->Plug(slot);
+     }
+  } else {
+    Comment cmnt(masm_, "Rewritten parameter");
+    ASSERT_NOT_NULL(property);
+    // Rewritten parameter accesses are of the form "slot[literal]".
+    // Assert that the object is in a slot.
+    Variable* object_var = property->obj()->AsVariableProxy()->AsVariable();
+    ASSERT_NOT_NULL(object_var);
+    Slot* object_slot = object_var->AsSlot();
+    ASSERT_NOT_NULL(object_slot);
+
+    // Load the object.
+    Move(a1, object_slot);
+
+    // Assert that the key is a smi.
+    Literal* key_literal = property->key()->AsLiteral();
+    ASSERT_NOT_NULL(key_literal);
+    ASSERT(key_literal->handle()->IsSmi());
+
+    // Load the key.
+    __ li(a0, Operand(key_literal->handle()));
+
+    // Call keyed load IC. It has arguments key and receiver in a0 and a1.
+    Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
+    EmitCallIC(ic, RelocInfo::CODE_TARGET, GetPropertyId(property));
+    context()->Plug(v0);
+  }
 }
 
 
 void FullCodeGenerator::VisitRegExpLiteral(RegExpLiteral* expr) {
-  UNIMPLEMENTED_MIPS();
+  Comment cmnt(masm_, "[ RegExpLiteral");
+  Label materialized;
+  // Registers will be used as follows:
+  // t1 = materialized value (RegExp literal)
+  // t0 = JS function, literals array
+  // a3 = literal index
+  // a2 = RegExp pattern
+  // a1 = RegExp flags
+  // a0 = RegExp literal clone
+  __ lw(a0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+  __ lw(t0, FieldMemOperand(a0, JSFunction::kLiteralsOffset));
+  int literal_offset =
+      FixedArray::kHeaderSize + expr->literal_index() * kPointerSize;
+  __ lw(t1, FieldMemOperand(t0, literal_offset));
+  __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
+  __ Branch(&materialized, ne, t1, Operand(at));
+
+  // Create regexp literal using runtime function.
+  // Result will be in v0.
+  __ li(a3, Operand(Smi::FromInt(expr->literal_index())));
+  __ li(a2, Operand(expr->pattern()));
+  __ li(a1, Operand(expr->flags()));
+  __ Push(t0, a3, a2, a1);
+  __ CallRuntime(Runtime::kMaterializeRegExpLiteral, 4);
+  __ mov(t1, v0);
+
+  __ bind(&materialized);
+  int size = JSRegExp::kSize + JSRegExp::kInObjectFieldCount * kPointerSize;
+  Label allocated, runtime_allocate;
+  __ AllocateInNewSpace(size, v0, a2, a3, &runtime_allocate, TAG_OBJECT);
+  __ jmp(&allocated);
+
+  __ bind(&runtime_allocate);
+  __ push(t1);
+  __ li(a0, Operand(Smi::FromInt(size)));
+  __ push(a0);
+  __ CallRuntime(Runtime::kAllocateInNewSpace, 1);
+  __ pop(t1);
+
+  __ bind(&allocated);
+
+  // After this, registers are used as follows:
+  // v0: Newly allocated regexp.
+  // t1: Materialized regexp.
+  // a2: temp.
+  __ CopyFields(v0, t1, a2.bit(), size / kPointerSize);
+  context()->Plug(v0);
 }
 
 
 void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
-  UNIMPLEMENTED_MIPS();
+  Comment cmnt(masm_, "[ ObjectLiteral");
+  __ lw(a3, MemOperand(fp,  JavaScriptFrameConstants::kFunctionOffset));
+  __ lw(a3, FieldMemOperand(a3, JSFunction::kLiteralsOffset));
+  __ li(a2, Operand(Smi::FromInt(expr->literal_index())));
+  __ li(a1, Operand(expr->constant_properties()));
+  int flags = expr->fast_elements()
+      ? ObjectLiteral::kFastElements
+      : ObjectLiteral::kNoFlags;
+  flags |= expr->has_function()
+      ? ObjectLiteral::kHasFunction
+      : ObjectLiteral::kNoFlags;
+  __ li(a0, Operand(Smi::FromInt(flags)));
+  __ Push(a3, a2, a1, a0);
+  if (expr->depth() > 1) {
+    __ CallRuntime(Runtime::kCreateObjectLiteral, 4);
+  } else {
+    __ CallRuntime(Runtime::kCreateObjectLiteralShallow, 4);
+  }
+
+  // If result_saved is true the result is on top of the stack.  If
+  // result_saved is false the result is in v0.
+  bool result_saved = false;
+
+  // Mark all computed expressions that are bound to a key that
+  // is shadowed by a later occurrence of the same key. For the
+  // marked expressions, no store code is emitted.
+  expr->CalculateEmitStore();
+
+  for (int i = 0; i < expr->properties()->length(); i++) {
+    ObjectLiteral::Property* property = expr->properties()->at(i);
+    if (property->IsCompileTimeValue()) continue;
+
+    Literal* key = property->key();
+    Expression* value = property->value();
+    if (!result_saved) {
+      __ push(v0);  // Save result on stack.
+      result_saved = true;
+    }
+    switch (property->kind()) {
+      case ObjectLiteral::Property::CONSTANT:
+        UNREACHABLE();
+      case ObjectLiteral::Property::MATERIALIZED_LITERAL:
+        ASSERT(!CompileTimeValue::IsCompileTimeValue(property->value()));
+        // Fall through.
+      case ObjectLiteral::Property::COMPUTED:
+        if (key->handle()->IsSymbol()) {
+          if (property->emit_store()) {
+            VisitForAccumulatorValue(value);
+            __ mov(a0, result_register());
+            __ li(a2, Operand(key->handle()));
+            __ lw(a1, MemOperand(sp));
+            Handle<Code> ic = is_strict_mode()
+                ? isolate()->builtins()->StoreIC_Initialize_Strict()
+                : isolate()->builtins()->StoreIC_Initialize();
+            EmitCallIC(ic, RelocInfo::CODE_TARGET, key->id());
+            PrepareForBailoutForId(key->id(), NO_REGISTERS);
+          } else {
+            VisitForEffect(value);
+          }
+          break;
+        }
+        // Fall through.
+      case ObjectLiteral::Property::PROTOTYPE:
+        // Duplicate receiver on stack.
+        __ lw(a0, MemOperand(sp));
+        __ push(a0);
+        VisitForStackValue(key);
+        VisitForStackValue(value);
+        if (property->emit_store()) {
+          __ li(a0, Operand(Smi::FromInt(NONE)));  // PropertyAttributes.
+          __ push(a0);
+          __ CallRuntime(Runtime::kSetProperty, 4);
+        } else {
+          __ Drop(3);
+        }
+        break;
+      case ObjectLiteral::Property::GETTER:
+      case ObjectLiteral::Property::SETTER:
+        // Duplicate receiver on stack.
+        __ lw(a0, MemOperand(sp));
+        __ push(a0);
+        VisitForStackValue(key);
+        __ li(a1, Operand(property->kind() == ObjectLiteral::Property::SETTER ?
+                           Smi::FromInt(1) :
+                           Smi::FromInt(0)));
+        __ push(a1);
+        VisitForStackValue(value);
+        __ CallRuntime(Runtime::kDefineAccessor, 4);
+        break;
+    }
+  }
+
+  if (expr->has_function()) {
+    ASSERT(result_saved);
+    __ lw(a0, MemOperand(sp));
+    __ push(a0);
+    __ CallRuntime(Runtime::kToFastProperties, 1);
+  }
+
+  if (result_saved) {
+    context()->PlugTOS();
+  } else {
+    context()->Plug(v0);
+  }
 }
 
 
 void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
-  UNIMPLEMENTED_MIPS();
+  Comment cmnt(masm_, "[ ArrayLiteral");
+
+  ZoneList<Expression*>* subexprs = expr->values();
+  int length = subexprs->length();
+  __ mov(a0, result_register());
+  __ lw(a3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+  __ lw(a3, FieldMemOperand(a3, JSFunction::kLiteralsOffset));
+  __ li(a2, Operand(Smi::FromInt(expr->literal_index())));
+  __ li(a1, Operand(expr->constant_elements()));
+  __ Push(a3, a2, a1);
+  if (expr->constant_elements()->map() ==
+      isolate()->heap()->fixed_cow_array_map()) {
+    FastCloneShallowArrayStub stub(
+        FastCloneShallowArrayStub::COPY_ON_WRITE_ELEMENTS, length);
+    __ CallStub(&stub);
+    __ IncrementCounter(isolate()->counters()->cow_arrays_created_stub(),
+        1, a1, a2);
+  } else if (expr->depth() > 1) {
+    __ CallRuntime(Runtime::kCreateArrayLiteral, 3);
+  } else if (length > FastCloneShallowArrayStub::kMaximumClonedLength) {
+    __ CallRuntime(Runtime::kCreateArrayLiteralShallow, 3);
+  } else {
+    FastCloneShallowArrayStub stub(
+        FastCloneShallowArrayStub::CLONE_ELEMENTS, length);
+    __ CallStub(&stub);
+  }
+
+  bool result_saved = false;  // Is the result saved to the stack?
+
+  // Emit code to evaluate all the non-constant subexpressions and to store
+  // them into the newly cloned array.
+  for (int i = 0; i < length; i++) {
+    Expression* subexpr = subexprs->at(i);
+    // If the subexpression is a literal or a simple materialized literal it
+    // is already set in the cloned array.
+    if (subexpr->AsLiteral() != NULL ||
+        CompileTimeValue::IsCompileTimeValue(subexpr)) {
+      continue;
+    }
+
+    if (!result_saved) {
+      __ push(v0);
+      result_saved = true;
+    }
+    VisitForAccumulatorValue(subexpr);
+
+    // Store the subexpression value in the array's elements.
+    __ lw(a1, MemOperand(sp));  // Copy of array literal.
+    __ lw(a1, FieldMemOperand(a1, JSObject::kElementsOffset));
+    int offset = FixedArray::kHeaderSize + (i * kPointerSize);
+    __ sw(result_register(), FieldMemOperand(a1, offset));
+
+    // Update the write barrier for the array store with v0 as the scratch
+    // register.
+    __ li(a2, Operand(offset));
+    // TODO(PJ): double check this RecordWrite call.
+    __ RecordWrite(a1, a2, result_register());
+
+    PrepareForBailoutForId(expr->GetIdForElement(i), NO_REGISTERS);
+  }
+
+  if (result_saved) {
+    context()->PlugTOS();
+  } else {
+    context()->Plug(v0);
+  }
 }
 
 
 void FullCodeGenerator::VisitAssignment(Assignment* expr) {
-  UNIMPLEMENTED_MIPS();
+  Comment cmnt(masm_, "[ Assignment");
+  // Invalid left-hand sides are rewritten to have a 'throw ReferenceError'
+  // on the left-hand side.
+  if (!expr->target()->IsValidLeftHandSide()) {
+    VisitForEffect(expr->target());
+    return;
+  }
+
+  // Left-hand side can only be a property, a global or a (parameter or local)
+  // slot. Variables with rewrite to .arguments are treated as KEYED_PROPERTY.
+  enum LhsKind { VARIABLE, NAMED_PROPERTY, KEYED_PROPERTY };
+  LhsKind assign_type = VARIABLE;
+  Property* property = expr->target()->AsProperty();
+  if (property != NULL) {
+    assign_type = (property->key()->IsPropertyName())
+        ? NAMED_PROPERTY
+        : KEYED_PROPERTY;
+  }
+
+  // Evaluate LHS expression.
+  switch (assign_type) {
+    case VARIABLE:
+      // Nothing to do here.
+      break;
+    case NAMED_PROPERTY:
+      if (expr->is_compound()) {
+        // We need the receiver both on the stack and in the accumulator.
+        VisitForAccumulatorValue(property->obj());
+        __ push(result_register());
+      } else {
+        VisitForStackValue(property->obj());
+      }
+      break;
+    case KEYED_PROPERTY:
+      // We need the key and receiver on both the stack and in v0 and a1.
+      if (expr->is_compound()) {
+        if (property->is_arguments_access()) {
+          VariableProxy* obj_proxy = property->obj()->AsVariableProxy();
+          __ lw(v0, EmitSlotSearch(obj_proxy->var()->AsSlot(), v0));
+          __ push(v0);
+          __ li(v0, Operand(property->key()->AsLiteral()->handle()));
+        } else {
+          VisitForStackValue(property->obj());
+          VisitForAccumulatorValue(property->key());
+        }
+        __ lw(a1, MemOperand(sp, 0));
+        __ push(v0);
+      } else {
+        if (property->is_arguments_access()) {
+          VariableProxy* obj_proxy = property->obj()->AsVariableProxy();
+          __ lw(a1, EmitSlotSearch(obj_proxy->var()->AsSlot(), v0));
+          __ li(v0, Operand(property->key()->AsLiteral()->handle()));
+          __ Push(a1, v0);
+        } else {
+          VisitForStackValue(property->obj());
+          VisitForStackValue(property->key());
+        }
+      }
+      break;
+  }
+
+  // For compound assignments we need another deoptimization point after the
+  // variable/property load.
+  if (expr->is_compound()) {
+    { AccumulatorValueContext context(this);
+      switch (assign_type) {
+        case VARIABLE:
+          EmitVariableLoad(expr->target()->AsVariableProxy()->var());
+          PrepareForBailout(expr->target(), TOS_REG);
+          break;
+        case NAMED_PROPERTY:
+          EmitNamedPropertyLoad(property);
+          PrepareForBailoutForId(expr->CompoundLoadId(), TOS_REG);
+          break;
+        case KEYED_PROPERTY:
+          EmitKeyedPropertyLoad(property);
+          PrepareForBailoutForId(expr->CompoundLoadId(), TOS_REG);
+          break;
+      }
+    }
+
+    Token::Value op = expr->binary_op();
+    __ push(v0);  // Left operand goes on the stack.
+    VisitForAccumulatorValue(expr->value());
+
+    OverwriteMode mode = expr->value()->ResultOverwriteAllowed()
+        ? OVERWRITE_RIGHT
+        : NO_OVERWRITE;
+    SetSourcePosition(expr->position() + 1);
+    AccumulatorValueContext context(this);
+    if (ShouldInlineSmiCase(op)) {
+      EmitInlineSmiBinaryOp(expr->binary_operation(),
+                            op,
+                            mode,
+                            expr->target(),
+                            expr->value());
+    } else {
+      EmitBinaryOp(expr->binary_operation(), op, mode);
+    }
+
+    // Deoptimization point in case the binary operation may have side effects.
+    PrepareForBailout(expr->binary_operation(), TOS_REG);
+  } else {
+    VisitForAccumulatorValue(expr->value());
+  }
+
+  // Record source position before possible IC call.
+  SetSourcePosition(expr->position());
+
+  // Store the value.
+  switch (assign_type) {
+    case VARIABLE:
+      EmitVariableAssignment(expr->target()->AsVariableProxy()->var(),
+                             expr->op());
+      PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
+      context()->Plug(v0);
+      break;
+    case NAMED_PROPERTY:
+      EmitNamedPropertyAssignment(expr);
+      break;
+    case KEYED_PROPERTY:
+      EmitKeyedPropertyAssignment(expr);
+      break;
+  }
 }
 
 
 void FullCodeGenerator::EmitNamedPropertyLoad(Property* prop) {
-  UNIMPLEMENTED_MIPS();
+  SetSourcePosition(prop->position());
+  Literal* key = prop->key()->AsLiteral();
+  __ mov(a0, result_register());
+  __ li(a2, Operand(key->handle()));
+  // Call load IC. It has arguments receiver and property name a0 and a2.
+  Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
+  EmitCallIC(ic, RelocInfo::CODE_TARGET, GetPropertyId(prop));
 }
 
 
 void FullCodeGenerator::EmitKeyedPropertyLoad(Property* prop) {
-  UNIMPLEMENTED_MIPS();
+  SetSourcePosition(prop->position());
+  __ mov(a0, result_register());
+  // Call keyed load IC. It has arguments key and receiver in a0 and a1.
+  Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
+  EmitCallIC(ic, RelocInfo::CODE_TARGET, GetPropertyId(prop));
 }
 
 
-void FullCodeGenerator::EmitInlineSmiBinaryOp(Expression* expr,
+void FullCodeGenerator::EmitInlineSmiBinaryOp(BinaryOperation* expr,
                                               Token::Value op,
                                               OverwriteMode mode,
-                                              Expression* left,
-                                              Expression* right) {
-  UNIMPLEMENTED_MIPS();
+                                              Expression* left_expr,
+                                              Expression* right_expr) {
+  Label done, smi_case, stub_call;
+
+  Register scratch1 = a2;
+  Register scratch2 = a3;
+
+  // Get the arguments.
+  Register left = a1;
+  Register right = a0;
+  __ pop(left);
+  __ mov(a0, result_register());
+
+  // Perform combined smi check on both operands.
+  __ Or(scratch1, left, Operand(right));
+  STATIC_ASSERT(kSmiTag == 0);
+  JumpPatchSite patch_site(masm_);
+  patch_site.EmitJumpIfSmi(scratch1, &smi_case);
+
+  __ bind(&stub_call);
+  BinaryOpStub stub(op, mode);
+  EmitCallIC(stub.GetCode(), &patch_site, expr->id());
+  __ jmp(&done);
+
+  __ bind(&smi_case);
+  // Smi case. This code works the same way as the smi-smi case in the type
+  // recording binary operation stub, see
+  // BinaryOpStub::GenerateSmiSmiOperation for comments.
+  switch (op) {
+    case Token::SAR:
+      __ Branch(&stub_call);
+      __ GetLeastBitsFromSmi(scratch1, right, 5);
+      __ srav(right, left, scratch1);
+      __ And(v0, right, Operand(~kSmiTagMask));
+      break;
+    case Token::SHL: {
+      __ Branch(&stub_call);
+      __ SmiUntag(scratch1, left);
+      __ GetLeastBitsFromSmi(scratch2, right, 5);
+      __ sllv(scratch1, scratch1, scratch2);
+      __ Addu(scratch2, scratch1, Operand(0x40000000));
+      __ Branch(&stub_call, lt, scratch2, Operand(zero_reg));
+      __ SmiTag(v0, scratch1);
+      break;
+    }
+    case Token::SHR: {
+      __ Branch(&stub_call);
+      __ SmiUntag(scratch1, left);
+      __ GetLeastBitsFromSmi(scratch2, right, 5);
+      __ srlv(scratch1, scratch1, scratch2);
+      __ And(scratch2, scratch1, 0xc0000000);
+      __ Branch(&stub_call, ne, scratch2, Operand(zero_reg));
+      __ SmiTag(v0, scratch1);
+      break;
+    }
+    case Token::ADD:
+      __ AdduAndCheckForOverflow(v0, left, right, scratch1);
+      __ BranchOnOverflow(&stub_call, scratch1);
+      break;
+    case Token::SUB:
+      __ SubuAndCheckForOverflow(v0, left, right, scratch1);
+      __ BranchOnOverflow(&stub_call, scratch1);
+      break;
+    case Token::MUL: {
+      __ SmiUntag(scratch1, right);
+      __ Mult(left, scratch1);
+      __ mflo(scratch1);
+      __ mfhi(scratch2);
+      __ sra(scratch1, scratch1, 31);
+      __ Branch(&stub_call, ne, scratch1, Operand(scratch2));
+      __ mflo(v0);
+      __ Branch(&done, ne, v0, Operand(zero_reg));
+      __ Addu(scratch2, right, left);
+      __ Branch(&stub_call, lt, scratch2, Operand(zero_reg));
+      ASSERT(Smi::FromInt(0) == 0);
+      __ mov(v0, zero_reg);
+      break;
+    }
+    case Token::BIT_OR:
+      __ Or(v0, left, Operand(right));
+      break;
+    case Token::BIT_AND:
+      __ And(v0, left, Operand(right));
+      break;
+    case Token::BIT_XOR:
+      __ Xor(v0, left, Operand(right));
+      break;
+    default:
+      UNREACHABLE();
+  }
+
+  __ bind(&done);
+  context()->Plug(v0);
 }
 
 
-void FullCodeGenerator::EmitBinaryOp(Token::Value op,
+void FullCodeGenerator::EmitBinaryOp(BinaryOperation* expr,
+                                     Token::Value op,
                                      OverwriteMode mode) {
-  UNIMPLEMENTED_MIPS();
+  __ mov(a0, result_register());
+  __ pop(a1);
+  BinaryOpStub stub(op, mode);
+  EmitCallIC(stub.GetCode(), NULL, expr->id());
+  context()->Plug(v0);
 }
 
 
 void FullCodeGenerator::EmitAssignment(Expression* expr, int bailout_ast_id) {
-  UNIMPLEMENTED_MIPS();
+  // Invalid left-hand sides are rewritten to have a 'throw
+  // ReferenceError' on the left-hand side.
+  if (!expr->IsValidLeftHandSide()) {
+    VisitForEffect(expr);
+    return;
+  }
+
+  // Left-hand side can only be a property, a global or a (parameter or local)
+  // slot. Variables with rewrite to .arguments are treated as KEYED_PROPERTY.
+  enum LhsKind { VARIABLE, NAMED_PROPERTY, KEYED_PROPERTY };
+  LhsKind assign_type = VARIABLE;
+  Property* prop = expr->AsProperty();
+  if (prop != NULL) {
+    assign_type = (prop->key()->IsPropertyName())
+        ? NAMED_PROPERTY
+        : KEYED_PROPERTY;
+  }
+
+  switch (assign_type) {
+    case VARIABLE: {
+      Variable* var = expr->AsVariableProxy()->var();
+      EffectContext context(this);
+      EmitVariableAssignment(var, Token::ASSIGN);
+      break;
+    }
+    case NAMED_PROPERTY: {
+      __ push(result_register());  // Preserve value.
+      VisitForAccumulatorValue(prop->obj());
+      __ mov(a1, result_register());
+      __ pop(a0);  // Restore value.
+      __ li(a2, Operand(prop->key()->AsLiteral()->handle()));
+      Handle<Code> ic = is_strict_mode()
+          ? isolate()->builtins()->StoreIC_Initialize_Strict()
+          : isolate()->builtins()->StoreIC_Initialize();
+      EmitCallIC(ic, RelocInfo::CODE_TARGET, AstNode::kNoNumber);
+      break;
+    }
+    case KEYED_PROPERTY: {
+      __ push(result_register());  // Preserve value.
+      if (prop->is_synthetic()) {
+        ASSERT(prop->obj()->AsVariableProxy() != NULL);
+        ASSERT(prop->key()->AsLiteral() != NULL);
+        { AccumulatorValueContext for_object(this);
+          EmitVariableLoad(prop->obj()->AsVariableProxy()->var());
+        }
+        __ mov(a2, result_register());
+        __ li(a1, Operand(prop->key()->AsLiteral()->handle()));
+      } else {
+        VisitForStackValue(prop->obj());
+        VisitForAccumulatorValue(prop->key());
+        __ mov(a1, result_register());
+        __ pop(a2);
+      }
+      __ pop(a0);  // Restore value.
+      Handle<Code> ic = is_strict_mode()
+        ? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
+        : isolate()->builtins()->KeyedStoreIC_Initialize();
+      EmitCallIC(ic, RelocInfo::CODE_TARGET, AstNode::kNoNumber);
+      break;
+    }
+  }
+  PrepareForBailoutForId(bailout_ast_id, TOS_REG);
+  context()->Plug(v0);
 }
 
 
 void FullCodeGenerator::EmitVariableAssignment(Variable* var,
                                                Token::Value op) {
-  UNIMPLEMENTED_MIPS();
+  // Left-hand sides that rewrite to explicit property accesses do not reach
+  // here.
+  ASSERT(var != NULL);
+  ASSERT(var->is_global() || var->AsSlot() != NULL);
+
+  if (var->is_global()) {
+    ASSERT(!var->is_this());
+    // Assignment to a global variable.  Use inline caching for the
+    // assignment.  Right-hand-side value is passed in a0, variable name in
+    // a2, and the global object in a1.
+    __ mov(a0, result_register());
+    __ li(a2, Operand(var->name()));
+    __ lw(a1, GlobalObjectOperand());
+    Handle<Code> ic = is_strict_mode()
+        ? isolate()->builtins()->StoreIC_Initialize_Strict()
+        : isolate()->builtins()->StoreIC_Initialize();
+    EmitCallIC(ic, RelocInfo::CODE_TARGET_CONTEXT, AstNode::kNoNumber);
+
+  } else if (op == Token::INIT_CONST) {
+    // Like var declarations, const declarations are hoisted to function
+    // scope.  However, unlike var initializers, const initializers are able
+    // to drill a hole to that function context, even from inside a 'with'
+    // context.  We thus bypass the normal static scope lookup.
+    Slot* slot = var->AsSlot();
+    Label skip;
+    switch (slot->type()) {
+      case Slot::PARAMETER:
+        // No const parameters.
+        UNREACHABLE();
+        break;
+      case Slot::LOCAL:
+        // Detect const reinitialization by checking for the hole value.
+        __ lw(a1, MemOperand(fp, SlotOffset(slot)));
+        __ LoadRoot(t0, Heap::kTheHoleValueRootIndex);
+        __ Branch(&skip, ne, a1, Operand(t0));
+        __ sw(result_register(), MemOperand(fp, SlotOffset(slot)));
+        break;
+      case Slot::CONTEXT: {
+        __ lw(a1, ContextOperand(cp, Context::FCONTEXT_INDEX));
+        __ lw(a2, ContextOperand(a1, slot->index()));
+        __ LoadRoot(t0, Heap::kTheHoleValueRootIndex);
+        __ Branch(&skip, ne, a2, Operand(t0));
+        __ sw(result_register(), ContextOperand(a1, slot->index()));
+        int offset = Context::SlotOffset(slot->index());
+        __ mov(a3, result_register());  // Preserve the stored value in v0.
+        __ RecordWrite(a1, Operand(offset), a3, a2);
+        break;
+      }
+      case Slot::LOOKUP:
+        __ push(result_register());
+        __ li(a0, Operand(slot->var()->name()));
+        __ Push(cp, a0);  // Context and name.
+        __ CallRuntime(Runtime::kInitializeConstContextSlot, 3);
+        break;
+    }
+    __ bind(&skip);
+
+  } else if (var->mode() != Variable::CONST) {
+    // Perform the assignment for non-const variables.  Const assignments
+    // are simply skipped.
+    Slot* slot = var->AsSlot();
+    switch (slot->type()) {
+      case Slot::PARAMETER:
+      case Slot::LOCAL:
+        // Perform the assignment.
+        __ sw(result_register(), MemOperand(fp, SlotOffset(slot)));
+        break;
+
+      case Slot::CONTEXT: {
+        MemOperand target = EmitSlotSearch(slot, a1);
+        // Perform the assignment and issue the write barrier.
+        __ sw(result_register(), target);
+        // RecordWrite may destroy all its register arguments.
+        __ mov(a3, result_register());
+         int offset = FixedArray::kHeaderSize + slot->index() * kPointerSize;
+        __ RecordWrite(a1, Operand(offset), a2, a3);
+        break;
+      }
+
+      case Slot::LOOKUP:
+        // Call the runtime for the assignment.
+        __ push(v0);  // Value.
+        __ li(a1, Operand(slot->var()->name()));
+        __ li(a0, Operand(Smi::FromInt(strict_mode_flag())));
+        __ Push(cp, a1, a0);  // Context, name, strict mode.
+        __ CallRuntime(Runtime::kStoreContextSlot, 4);
+        break;
+    }
+  }
 }
 
 
 void FullCodeGenerator::EmitNamedPropertyAssignment(Assignment* expr) {
-  UNIMPLEMENTED_MIPS();
+  // Assignment to a property, using a named store IC.
+  Property* prop = expr->target()->AsProperty();
+  ASSERT(prop != NULL);
+  ASSERT(prop->key()->AsLiteral() != NULL);
+
+  // If the assignment starts a block of assignments to the same object,
+  // change to slow case to avoid the quadratic behavior of repeatedly
+  // adding fast properties.
+  if (expr->starts_initialization_block()) {
+    __ push(result_register());
+    __ lw(t0, MemOperand(sp, kPointerSize));  // Receiver is now under value.
+    __ push(t0);
+    __ CallRuntime(Runtime::kToSlowProperties, 1);
+    __ pop(result_register());
+  }
+
+  // Record source code position before IC call.
+  SetSourcePosition(expr->position());
+  __ mov(a0, result_register());  // Load the value.
+  __ li(a2, Operand(prop->key()->AsLiteral()->handle()));
+  // Load receiver to a1. Leave a copy in the stack if needed for turning the
+  // receiver into fast case.
+  if (expr->ends_initialization_block()) {
+    __ lw(a1, MemOperand(sp));
+  } else {
+    __ pop(a1);
+  }
+
+  Handle<Code> ic = is_strict_mode()
+        ? isolate()->builtins()->StoreIC_Initialize_Strict()
+        : isolate()->builtins()->StoreIC_Initialize();
+  EmitCallIC(ic, RelocInfo::CODE_TARGET, expr->id());
+
+  // If the assignment ends an initialization block, revert to fast case.
+  if (expr->ends_initialization_block()) {
+    __ push(v0);  // Result of assignment, saved even if not needed.
+    // Receiver is under the result value.
+    __ lw(t0, MemOperand(sp, kPointerSize));
+    __ push(t0);
+    __ CallRuntime(Runtime::kToFastProperties, 1);
+    __ pop(v0);
+    __ Drop(1);
+  }
+  PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
+  context()->Plug(v0);
 }
 
 
 void FullCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) {
-  UNIMPLEMENTED_MIPS();
+  // Assignment to a property, using a keyed store IC.
+
+  // If the assignment starts a block of assignments to the same object,
+  // change to slow case to avoid the quadratic behavior of repeatedly
+  // adding fast properties.
+  if (expr->starts_initialization_block()) {
+    __ push(result_register());
+    // Receiver is now under the key and value.
+    __ lw(t0, MemOperand(sp, 2 * kPointerSize));
+    __ push(t0);
+    __ CallRuntime(Runtime::kToSlowProperties, 1);
+    __ pop(result_register());
+  }
+
+  // Record source code position before IC call.
+  SetSourcePosition(expr->position());
+  // Call keyed store IC.
+  // The arguments are:
+  // - a0 is the value,
+  // - a1 is the key,
+  // - a2 is the receiver.
+  __ mov(a0, result_register());
+  __ pop(a1);  // Key.
+  // Load receiver to a2. Leave a copy in the stack if needed for turning the
+  // receiver into fast case.
+  if (expr->ends_initialization_block()) {
+    __ lw(a2, MemOperand(sp));
+  } else {
+    __ pop(a2);
+  }
+
+  Handle<Code> ic = is_strict_mode()
+      ? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
+      : isolate()->builtins()->KeyedStoreIC_Initialize();
+  EmitCallIC(ic, RelocInfo::CODE_TARGET, expr->id());
+
+  // If the assignment ends an initialization block, revert to fast case.
+  if (expr->ends_initialization_block()) {
+    __ push(v0);  // Result of assignment, saved even if not needed.
+    // Receiver is under the result value.
+    __ lw(t0, MemOperand(sp, kPointerSize));
+    __ push(t0);
+    __ CallRuntime(Runtime::kToFastProperties, 1);
+    __ pop(v0);
+    __ Drop(1);
+  }
+  PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
+  context()->Plug(v0);
 }
 
 
 void FullCodeGenerator::VisitProperty(Property* expr) {
-  UNIMPLEMENTED_MIPS();
+  Comment cmnt(masm_, "[ Property");
+  Expression* key = expr->key();
+
+  if (key->IsPropertyName()) {
+    VisitForAccumulatorValue(expr->obj());
+    EmitNamedPropertyLoad(expr);
+    context()->Plug(v0);
+  } else {
+    VisitForStackValue(expr->obj());
+    VisitForAccumulatorValue(expr->key());
+    __ pop(a1);
+    EmitKeyedPropertyLoad(expr);
+    context()->Plug(v0);
+  }
 }
 
 
 void FullCodeGenerator::EmitCallWithIC(Call* expr,
                                        Handle<Object> name,
                                        RelocInfo::Mode mode) {
-  UNIMPLEMENTED_MIPS();
+  // Code common for calls using the IC.
+  ZoneList<Expression*>* args = expr->arguments();
+  int arg_count = args->length();
+  { PreservePositionScope scope(masm()->positions_recorder());
+    for (int i = 0; i < arg_count; i++) {
+      VisitForStackValue(args->at(i));
+    }
+    __ li(a2, Operand(name));
+  }
+  // Record source position for debugger.
+  SetSourcePosition(expr->position());
+  // Call the IC initialization code.
+  InLoopFlag in_loop = (loop_depth() > 0) ? IN_LOOP : NOT_IN_LOOP;
+  Handle<Code> ic =
+      isolate()->stub_cache()->ComputeCallInitialize(arg_count, in_loop, mode);
+  EmitCallIC(ic, mode, expr->id());
+  RecordJSReturnSite(expr);
+  // Restore context register.
+  __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+  context()->Plug(v0);
 }
 
 
 void FullCodeGenerator::EmitKeyedCallWithIC(Call* expr,
-                                            Expression* key,
-                                            RelocInfo::Mode mode) {
-  UNIMPLEMENTED_MIPS();
+                                            Expression* key) {
+  // Load the key.
+  VisitForAccumulatorValue(key);
+
+  // Swap the name of the function and the receiver on the stack to follow
+  // the calling convention for call ICs.
+  __ pop(a1);
+  __ push(v0);
+  __ push(a1);
+
+  // Code common for calls using the IC.
+  ZoneList<Expression*>* args = expr->arguments();
+  int arg_count = args->length();
+  { PreservePositionScope scope(masm()->positions_recorder());
+    for (int i = 0; i < arg_count; i++) {
+      VisitForStackValue(args->at(i));
+    }
+  }
+  // Record source position for debugger.
+  SetSourcePosition(expr->position());
+  // Call the IC initialization code.
+  InLoopFlag in_loop = (loop_depth() > 0) ? IN_LOOP : NOT_IN_LOOP;
+  Handle<Code> ic =
+      isolate()->stub_cache()->ComputeKeyedCallInitialize(arg_count, in_loop);
+  __ lw(a2, MemOperand(sp, (arg_count + 1) * kPointerSize));  // Key.
+  EmitCallIC(ic, RelocInfo::CODE_TARGET, expr->id());
+  RecordJSReturnSite(expr);
+  // Restore context register.
+  __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+  context()->DropAndPlug(1, v0);  // Drop the key still on the stack.
 }
 
 
-void FullCodeGenerator::EmitCallWithStub(Call* expr) {
-  UNIMPLEMENTED_MIPS();
+void FullCodeGenerator::EmitCallWithStub(Call* expr, CallFunctionFlags flags) {
+  // Code common for calls using the call stub.
+  ZoneList<Expression*>* args = expr->arguments();
+  int arg_count = args->length();
+  { PreservePositionScope scope(masm()->positions_recorder());
+    for (int i = 0; i < arg_count; i++) {
+      VisitForStackValue(args->at(i));
+    }
+  }
+  // Record source position for debugger.
+  SetSourcePosition(expr->position());
+  InLoopFlag in_loop = (loop_depth() > 0) ? IN_LOOP : NOT_IN_LOOP;
+  CallFunctionStub stub(arg_count, in_loop, flags);
+  __ CallStub(&stub);
+  RecordJSReturnSite(expr);
+  // Restore context register.
+  __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+  context()->DropAndPlug(1, v0);
+}
+
+
+void FullCodeGenerator::EmitResolvePossiblyDirectEval(ResolveEvalFlag flag,
+                                                      int arg_count) {
+  // Push copy of the first argument or undefined if it doesn't exist.
+  if (arg_count > 0) {
+    __ lw(a1, MemOperand(sp, arg_count * kPointerSize));
+  } else {
+    __ LoadRoot(a1, Heap::kUndefinedValueRootIndex);
+  }
+  __ push(a1);
+
+  // Push the receiver of the enclosing function and do runtime call.
+  __ lw(a1, MemOperand(fp, (2 + scope()->num_parameters()) * kPointerSize));
+  __ push(a1);
+  // Push the strict mode flag.
+  __ li(a1, Operand(Smi::FromInt(strict_mode_flag())));
+  __ push(a1);
+
+  __ CallRuntime(flag == SKIP_CONTEXT_LOOKUP
+                 ? Runtime::kResolvePossiblyDirectEvalNoLookup
+                 : Runtime::kResolvePossiblyDirectEval, 4);
 }
 
 
 void FullCodeGenerator::VisitCall(Call* expr) {
-  UNIMPLEMENTED_MIPS();
+#ifdef DEBUG
+  // We want to verify that RecordJSReturnSite gets called on all paths
+  // through this function.  Avoid early returns.
+  expr->return_is_recorded_ = false;
+#endif
+
+  Comment cmnt(masm_, "[ Call");
+  Expression* fun = expr->expression();
+  Variable* var = fun->AsVariableProxy()->AsVariable();
+
+  if (var != NULL && var->is_possibly_eval()) {
+    // In a call to eval, we first call %ResolvePossiblyDirectEval to
+    // resolve the function we need to call and the receiver of the
+    // call.  Then we call the resolved function using the given
+    // arguments.
+    ZoneList<Expression*>* args = expr->arguments();
+    int arg_count = args->length();
+
+    { PreservePositionScope pos_scope(masm()->positions_recorder());
+      VisitForStackValue(fun);
+      __ LoadRoot(a2, Heap::kUndefinedValueRootIndex);
+      __ push(a2);  // Reserved receiver slot.
+
+      // Push the arguments.
+      for (int i = 0; i < arg_count; i++) {
+        VisitForStackValue(args->at(i));
+      }
+      // If we know that eval can only be shadowed by eval-introduced
+      // variables we attempt to load the global eval function directly
+      // in generated code. If we succeed, there is no need to perform a
+      // context lookup in the runtime system.
+      Label done;
+      if (var->AsSlot() != NULL && var->mode() == Variable::DYNAMIC_GLOBAL) {
+        Label slow;
+        EmitLoadGlobalSlotCheckExtensions(var->AsSlot(),
+                                          NOT_INSIDE_TYPEOF,
+                                          &slow);
+        // Push the function and resolve eval.
+        __ push(v0);
+        EmitResolvePossiblyDirectEval(SKIP_CONTEXT_LOOKUP, arg_count);
+        __ jmp(&done);
+        __ bind(&slow);
+      }
+
+      // Push copy of the function (found below the arguments) and
+      // resolve eval.
+      __ lw(a1, MemOperand(sp, (arg_count + 1) * kPointerSize));
+      __ push(a1);
+      EmitResolvePossiblyDirectEval(PERFORM_CONTEXT_LOOKUP, arg_count);
+      if (done.is_linked()) {
+        __ bind(&done);
+      }
+
+      // The runtime call returns a pair of values in v0 (function) and
+      // v1 (receiver). Touch up the stack with the right values.
+      __ sw(v0, MemOperand(sp, (arg_count + 1) * kPointerSize));
+      __ sw(v1, MemOperand(sp, arg_count * kPointerSize));
+    }
+    // Record source position for debugger.
+    SetSourcePosition(expr->position());
+    InLoopFlag in_loop = (loop_depth() > 0) ? IN_LOOP : NOT_IN_LOOP;
+    CallFunctionStub stub(arg_count, in_loop, RECEIVER_MIGHT_BE_IMPLICIT);
+    __ CallStub(&stub);
+    RecordJSReturnSite(expr);
+    // Restore context register.
+    __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+    context()->DropAndPlug(1, v0);
+  } else if (var != NULL && !var->is_this() && var->is_global()) {
+    // Push global object as receiver for the call IC.
+    __ lw(a0, GlobalObjectOperand());
+    __ push(a0);
+    EmitCallWithIC(expr, var->name(), RelocInfo::CODE_TARGET_CONTEXT);
+  } else if (var != NULL && var->AsSlot() != NULL &&
+             var->AsSlot()->type() == Slot::LOOKUP) {
+    // Call to a lookup slot (dynamically introduced variable).
+    Label slow, done;
+
+    { PreservePositionScope scope(masm()->positions_recorder());
+      // Generate code for loading from variables potentially shadowed
+      // by eval-introduced variables.
+      EmitDynamicLoadFromSlotFastCase(var->AsSlot(),
+                                      NOT_INSIDE_TYPEOF,
+                                      &slow,
+                                      &done);
+    }
+
+    __ bind(&slow);
+    // Call the runtime to find the function to call (returned in v0)
+    // and the object holding it (returned in v1).
+    __ push(context_register());
+    __ li(a2, Operand(var->name()));
+    __ push(a2);
+    __ CallRuntime(Runtime::kLoadContextSlot, 2);
+    __ Push(v0, v1);  // Function, receiver.
+
+    // If fast case code has been generated, emit code to push the
+    // function and receiver and have the slow path jump around this
+    // code.
+    if (done.is_linked()) {
+      Label call;
+      __ Branch(&call);
+      __ bind(&done);
+      // Push function.
+      __ push(v0);
+      // Push global receiver.
+      __ lw(a1, GlobalObjectOperand());
+      __ lw(a1, FieldMemOperand(a1, GlobalObject::kGlobalReceiverOffset));
+      __ push(a1);
+      __ bind(&call);
+    }
+
+    // The receiver is either the global receiver or an object found
+    // by LoadContextSlot. That object could be the hole if the
+    // receiver is implicitly the global object.
+    EmitCallWithStub(expr, RECEIVER_MIGHT_BE_IMPLICIT);
+  } else if (fun->AsProperty() != NULL) {
+    // Call to an object property.
+    Property* prop = fun->AsProperty();
+    Literal* key = prop->key()->AsLiteral();
+    if (key != NULL && key->handle()->IsSymbol()) {
+      // Call to a named property, use call IC.
+      { PreservePositionScope scope(masm()->positions_recorder());
+        VisitForStackValue(prop->obj());
+      }
+      EmitCallWithIC(expr, key->handle(), RelocInfo::CODE_TARGET);
+    } else {
+      // Call to a keyed property.
+      // For a synthetic property use keyed load IC followed by function call,
+      // for a regular property use keyed EmitCallIC.
+      if (prop->is_synthetic()) {
+        // Do not visit the object and key subexpressions (they are shared
+        // by all occurrences of the same rewritten parameter).
+        ASSERT(prop->obj()->AsVariableProxy() != NULL);
+        ASSERT(prop->obj()->AsVariableProxy()->var()->AsSlot() != NULL);
+        Slot* slot = prop->obj()->AsVariableProxy()->var()->AsSlot();
+        MemOperand operand = EmitSlotSearch(slot, a1);
+        __ lw(a1, operand);
+
+        ASSERT(prop->key()->AsLiteral() != NULL);
+        ASSERT(prop->key()->AsLiteral()->handle()->IsSmi());
+        __ li(a0, Operand(prop->key()->AsLiteral()->handle()));
+
+        // Record source code position for IC call.
+        SetSourcePosition(prop->position());
+
+        Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
+        EmitCallIC(ic, RelocInfo::CODE_TARGET, GetPropertyId(prop));
+        __ lw(a1, GlobalObjectOperand());
+        __ lw(a1, FieldMemOperand(a1, GlobalObject::kGlobalReceiverOffset));
+        __ Push(v0, a1);  // Function, receiver.
+        EmitCallWithStub(expr, NO_CALL_FUNCTION_FLAGS);
+      } else {
+        { PreservePositionScope scope(masm()->positions_recorder());
+          VisitForStackValue(prop->obj());
+        }
+        EmitKeyedCallWithIC(expr, prop->key());
+      }
+    }
+  } else {
+    { PreservePositionScope scope(masm()->positions_recorder());
+      VisitForStackValue(fun);
+    }
+    // Load global receiver object.
+    __ lw(a1, GlobalObjectOperand());
+    __ lw(a1, FieldMemOperand(a1, GlobalObject::kGlobalReceiverOffset));
+    __ push(a1);
+    // Emit function call.
+    EmitCallWithStub(expr, NO_CALL_FUNCTION_FLAGS);
+  }
+
+#ifdef DEBUG
+  // RecordJSReturnSite should have been called.
+  ASSERT(expr->return_is_recorded_);
+#endif
 }
 
 
 void FullCodeGenerator::VisitCallNew(CallNew* expr) {
-  UNIMPLEMENTED_MIPS();
+  Comment cmnt(masm_, "[ CallNew");
+  // According to ECMA-262, section 11.2.2, page 44, the function
+  // expression in new calls must be evaluated before the
+  // arguments.
+
+  // Push constructor on the stack.  If it's not a function it's used as
+  // receiver for CALL_NON_FUNCTION, otherwise the value on the stack is
+  // ignored.
+  VisitForStackValue(expr->expression());
+
+  // Push the arguments ("left-to-right") on the stack.
+  ZoneList<Expression*>* args = expr->arguments();
+  int arg_count = args->length();
+  for (int i = 0; i < arg_count; i++) {
+    VisitForStackValue(args->at(i));
+  }
+
+  // Call the construct call builtin that handles allocation and
+  // constructor invocation.
+  SetSourcePosition(expr->position());
+
+  // Load function and argument count into a1 and a0.
+  __ li(a0, Operand(arg_count));
+  __ lw(a1, MemOperand(sp, arg_count * kPointerSize));
+
+  Handle<Code> construct_builtin =
+      isolate()->builtins()->JSConstructCall();
+  __ Call(construct_builtin, RelocInfo::CONSTRUCT_CALL);
+  context()->Plug(v0);
 }
 
 
 void FullCodeGenerator::EmitIsSmi(ZoneList<Expression*>* args) {
-  UNIMPLEMENTED_MIPS();
+  ASSERT(args->length() == 1);
+
+  VisitForAccumulatorValue(args->at(0));
+
+  Label materialize_true, materialize_false;
+  Label* if_true = NULL;
+  Label* if_false = NULL;
+  Label* fall_through = NULL;
+  context()->PrepareTest(&materialize_true, &materialize_false,
+                         &if_true, &if_false, &fall_through);
+
+  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+  __ And(t0, v0, Operand(kSmiTagMask));
+  Split(eq, t0, Operand(zero_reg), if_true, if_false, fall_through);
+
+  context()->Plug(if_true, if_false);
 }
 
 
 void FullCodeGenerator::EmitIsNonNegativeSmi(ZoneList<Expression*>* args) {
-  UNIMPLEMENTED_MIPS();
+  ASSERT(args->length() == 1);
+
+  VisitForAccumulatorValue(args->at(0));
+
+  Label materialize_true, materialize_false;
+  Label* if_true = NULL;
+  Label* if_false = NULL;
+  Label* fall_through = NULL;
+  context()->PrepareTest(&materialize_true, &materialize_false,
+                         &if_true, &if_false, &fall_through);
+
+  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+  __ And(at, v0, Operand(kSmiTagMask | 0x80000000));
+  Split(eq, at, Operand(zero_reg), if_true, if_false, fall_through);
+
+  context()->Plug(if_true, if_false);
 }
 
 
 void FullCodeGenerator::EmitIsObject(ZoneList<Expression*>* args) {
-  UNIMPLEMENTED_MIPS();
+  ASSERT(args->length() == 1);
+
+  VisitForAccumulatorValue(args->at(0));
+
+  Label materialize_true, materialize_false;
+  Label* if_true = NULL;
+  Label* if_false = NULL;
+  Label* fall_through = NULL;
+  context()->PrepareTest(&materialize_true, &materialize_false,
+                         &if_true, &if_false, &fall_through);
+
+  __ JumpIfSmi(v0, if_false);
+  __ LoadRoot(at, Heap::kNullValueRootIndex);
+  __ Branch(if_true, eq, v0, Operand(at));
+  __ lw(a2, FieldMemOperand(v0, HeapObject::kMapOffset));
+  // Undetectable objects behave like undefined when tested with typeof.
+  __ lbu(a1, FieldMemOperand(a2, Map::kBitFieldOffset));
+  __ And(at, a1, Operand(1 << Map::kIsUndetectable));
+  __ Branch(if_false, ne, at, Operand(zero_reg));
+  __ lbu(a1, FieldMemOperand(a2, Map::kInstanceTypeOffset));
+  __ Branch(if_false, lt, a1, Operand(FIRST_JS_OBJECT_TYPE));
+  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+  Split(le, a1, Operand(LAST_JS_OBJECT_TYPE), if_true, if_false, fall_through);
+
+  context()->Plug(if_true, if_false);
 }
 
 
 void FullCodeGenerator::EmitIsSpecObject(ZoneList<Expression*>* args) {
-  UNIMPLEMENTED_MIPS();
+  ASSERT(args->length() == 1);
+
+  VisitForAccumulatorValue(args->at(0));
+
+  Label materialize_true, materialize_false;
+  Label* if_true = NULL;
+  Label* if_false = NULL;
+  Label* fall_through = NULL;
+  context()->PrepareTest(&materialize_true, &materialize_false,
+                         &if_true, &if_false, &fall_through);
+
+  __ JumpIfSmi(v0, if_false);
+  __ GetObjectType(v0, a1, a1);
+  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+  Split(ge, a1, Operand(FIRST_JS_OBJECT_TYPE),
+        if_true, if_false, fall_through);
+
+  context()->Plug(if_true, if_false);
 }
 
 
 void FullCodeGenerator::EmitIsUndetectableObject(ZoneList<Expression*>* args) {
-  UNIMPLEMENTED_MIPS();
+  ASSERT(args->length() == 1);
+
+  VisitForAccumulatorValue(args->at(0));
+
+  Label materialize_true, materialize_false;
+  Label* if_true = NULL;
+  Label* if_false = NULL;
+  Label* fall_through = NULL;
+  context()->PrepareTest(&materialize_true, &materialize_false,
+                         &if_true, &if_false, &fall_through);
+
+  __ JumpIfSmi(v0, if_false);
+  __ lw(a1, FieldMemOperand(v0, HeapObject::kMapOffset));
+  __ lbu(a1, FieldMemOperand(a1, Map::kBitFieldOffset));
+  __ And(at, a1, Operand(1 << Map::kIsUndetectable));
+  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+  Split(ne, at, Operand(zero_reg), if_true, if_false, fall_through);
+
+  context()->Plug(if_true, if_false);
 }
 
 
 void FullCodeGenerator::EmitIsStringWrapperSafeForDefaultValueOf(
     ZoneList<Expression*>* args) {
-  UNIMPLEMENTED_MIPS();
+
+  ASSERT(args->length() == 1);
+
+  VisitForAccumulatorValue(args->at(0));
+
+  Label materialize_true, materialize_false;
+  Label* if_true = NULL;
+  Label* if_false = NULL;
+  Label* fall_through = NULL;
+  context()->PrepareTest(&materialize_true, &materialize_false,
+                         &if_true, &if_false, &fall_through);
+
+  if (FLAG_debug_code) __ AbortIfSmi(v0);
+
+  __ lw(a1, FieldMemOperand(v0, HeapObject::kMapOffset));
+  __ lbu(t0, FieldMemOperand(a1, Map::kBitField2Offset));
+  __ And(t0, t0, 1 << Map::kStringWrapperSafeForDefaultValueOf);
+  __ Branch(if_true, ne, t0, Operand(zero_reg));
+
+  // Check for fast case object. Generate false result for slow case object.
+  __ lw(a2, FieldMemOperand(v0, JSObject::kPropertiesOffset));
+  __ lw(a2, FieldMemOperand(a2, HeapObject::kMapOffset));
+  __ LoadRoot(t0, Heap::kHashTableMapRootIndex);
+  __ Branch(if_false, eq, a2, Operand(t0));
+
+  // Look for valueOf symbol in the descriptor array, and indicate false if
+  // found. The type is not checked, so if it is a transition it is a false
+  // negative.
+  __ LoadInstanceDescriptors(a1, t0);
+  __ lw(a3, FieldMemOperand(t0, FixedArray::kLengthOffset));
+  // t0: descriptor array
+  // a3: length of descriptor array
+  // Calculate the end of the descriptor array.
+  STATIC_ASSERT(kSmiTag == 0);
+  STATIC_ASSERT(kSmiTagSize == 1);
+  STATIC_ASSERT(kPointerSize == 4);
+  __ Addu(a2, t0, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+  __ sll(t1, a3, kPointerSizeLog2 - kSmiTagSize);
+  __ Addu(a2, a2, t1);
+
+  // Calculate location of the first key name.
+  __ Addu(t0,
+          t0,
+          Operand(FixedArray::kHeaderSize - kHeapObjectTag +
+                  DescriptorArray::kFirstIndex * kPointerSize));
+  // Loop through all the keys in the descriptor array. If one of these is the
+  // symbol valueOf the result is false.
+  Label entry, loop;
+  // The use of t2 to store the valueOf symbol asumes that it is not otherwise
+  // used in the loop below.
+  __ li(t2, Operand(FACTORY->value_of_symbol()));
+  __ jmp(&entry);
+  __ bind(&loop);
+  __ lw(a3, MemOperand(t0, 0));
+  __ Branch(if_false, eq, a3, Operand(t2));
+  __ Addu(t0, t0, Operand(kPointerSize));
+  __ bind(&entry);
+  __ Branch(&loop, ne, t0, Operand(a2));
+
+  // If a valueOf property is not found on the object check that it's
+  // prototype is the un-modified String prototype. If not result is false.
+  __ lw(a2, FieldMemOperand(a1, Map::kPrototypeOffset));
+  __ JumpIfSmi(a2, if_false);
+  __ lw(a2, FieldMemOperand(a2, HeapObject::kMapOffset));
+  __ lw(a3, ContextOperand(cp, Context::GLOBAL_INDEX));
+  __ lw(a3, FieldMemOperand(a3, GlobalObject::kGlobalContextOffset));
+  __ lw(a3, ContextOperand(a3, Context::STRING_FUNCTION_PROTOTYPE_MAP_INDEX));
+  __ Branch(if_false, ne, a2, Operand(a3));
+
+  // Set the bit in the map to indicate that it has been checked safe for
+  // default valueOf and set true result.
+  __ lbu(a2, FieldMemOperand(a1, Map::kBitField2Offset));
+  __ Or(a2, a2, Operand(1 << Map::kStringWrapperSafeForDefaultValueOf));
+  __ sb(a2, FieldMemOperand(a1, Map::kBitField2Offset));
+  __ jmp(if_true);
+
+  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+  context()->Plug(if_true, if_false);
 }
 
 
 void FullCodeGenerator::EmitIsFunction(ZoneList<Expression*>* args) {
-  UNIMPLEMENTED_MIPS();
+  ASSERT(args->length() == 1);
+
+  VisitForAccumulatorValue(args->at(0));
+
+  Label materialize_true, materialize_false;
+  Label* if_true = NULL;
+  Label* if_false = NULL;
+  Label* fall_through = NULL;
+  context()->PrepareTest(&materialize_true, &materialize_false,
+                         &if_true, &if_false, &fall_through);
+
+  __ JumpIfSmi(v0, if_false);
+  __ GetObjectType(v0, a1, a2);
+  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+  __ Branch(if_true, eq, a2, Operand(JS_FUNCTION_TYPE));
+  __ Branch(if_false);
+
+  context()->Plug(if_true, if_false);
 }
 
 
 void FullCodeGenerator::EmitIsArray(ZoneList<Expression*>* args) {
-  UNIMPLEMENTED_MIPS();
+  ASSERT(args->length() == 1);
+
+  VisitForAccumulatorValue(args->at(0));
+
+  Label materialize_true, materialize_false;
+  Label* if_true = NULL;
+  Label* if_false = NULL;
+  Label* fall_through = NULL;
+  context()->PrepareTest(&materialize_true, &materialize_false,
+                         &if_true, &if_false, &fall_through);
+
+  __ JumpIfSmi(v0, if_false);
+  __ GetObjectType(v0, a1, a1);
+  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+  Split(eq, a1, Operand(JS_ARRAY_TYPE),
+        if_true, if_false, fall_through);
+
+  context()->Plug(if_true, if_false);
 }
 
 
 void FullCodeGenerator::EmitIsRegExp(ZoneList<Expression*>* args) {
-  UNIMPLEMENTED_MIPS();
+  ASSERT(args->length() == 1);
+
+  VisitForAccumulatorValue(args->at(0));
+
+  Label materialize_true, materialize_false;
+  Label* if_true = NULL;
+  Label* if_false = NULL;
+  Label* fall_through = NULL;
+  context()->PrepareTest(&materialize_true, &materialize_false,
+                         &if_true, &if_false, &fall_through);
+
+  __ JumpIfSmi(v0, if_false);
+  __ GetObjectType(v0, a1, a1);
+  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+  Split(eq, a1, Operand(JS_REGEXP_TYPE), if_true, if_false, fall_through);
+
+  context()->Plug(if_true, if_false);
 }
 
 
 void FullCodeGenerator::EmitIsConstructCall(ZoneList<Expression*>* args) {
-  UNIMPLEMENTED_MIPS();
+  ASSERT(args->length() == 0);
+
+  Label materialize_true, materialize_false;
+  Label* if_true = NULL;
+  Label* if_false = NULL;
+  Label* fall_through = NULL;
+  context()->PrepareTest(&materialize_true, &materialize_false,
+                         &if_true, &if_false, &fall_through);
+
+  // Get the frame pointer for the calling frame.
+  __ lw(a2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+
+  // Skip the arguments adaptor frame if it exists.
+  Label check_frame_marker;
+  __ lw(a1, MemOperand(a2, StandardFrameConstants::kContextOffset));
+  __ Branch(&check_frame_marker, ne,
+            a1, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+  __ lw(a2, MemOperand(a2, StandardFrameConstants::kCallerFPOffset));
+
+  // Check the marker in the calling frame.
+  __ bind(&check_frame_marker);
+  __ lw(a1, MemOperand(a2, StandardFrameConstants::kMarkerOffset));
+  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+  Split(eq, a1, Operand(Smi::FromInt(StackFrame::CONSTRUCT)),
+        if_true, if_false, fall_through);
+
+  context()->Plug(if_true, if_false);
 }
 
 
 void FullCodeGenerator::EmitObjectEquals(ZoneList<Expression*>* args) {
-  UNIMPLEMENTED_MIPS();
+  ASSERT(args->length() == 2);
+
+  // Load the two objects into registers and perform the comparison.
+  VisitForStackValue(args->at(0));
+  VisitForAccumulatorValue(args->at(1));
+
+  Label materialize_true, materialize_false;
+  Label* if_true = NULL;
+  Label* if_false = NULL;
+  Label* fall_through = NULL;
+  context()->PrepareTest(&materialize_true, &materialize_false,
+                         &if_true, &if_false, &fall_through);
+
+  __ pop(a1);
+  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+  Split(eq, v0, Operand(a1), if_true, if_false, fall_through);
+
+  context()->Plug(if_true, if_false);
 }
 
 
 void FullCodeGenerator::EmitArguments(ZoneList<Expression*>* args) {
-  UNIMPLEMENTED_MIPS();
+  ASSERT(args->length() == 1);
+
+  // ArgumentsAccessStub expects the key in a1 and the formal
+  // parameter count in a0.
+  VisitForAccumulatorValue(args->at(0));
+  __ mov(a1, v0);
+  __ li(a0, Operand(Smi::FromInt(scope()->num_parameters())));
+  ArgumentsAccessStub stub(ArgumentsAccessStub::READ_ELEMENT);
+  __ CallStub(&stub);
+  context()->Plug(v0);
 }
 
 
 void FullCodeGenerator::EmitArgumentsLength(ZoneList<Expression*>* args) {
-  UNIMPLEMENTED_MIPS();
+  ASSERT(args->length() == 0);
+
+  Label exit;
+  // Get the number of formal parameters.
+  __ li(v0, Operand(Smi::FromInt(scope()->num_parameters())));
+
+  // Check if the calling frame is an arguments adaptor frame.
+  __ lw(a2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+  __ lw(a3, MemOperand(a2, StandardFrameConstants::kContextOffset));
+  __ Branch(&exit, ne, a3,
+            Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+
+  // Arguments adaptor case: Read the arguments length from the
+  // adaptor frame.
+  __ lw(v0, MemOperand(a2, ArgumentsAdaptorFrameConstants::kLengthOffset));
+
+  __ bind(&exit);
+  context()->Plug(v0);
 }
 
 
 void FullCodeGenerator::EmitClassOf(ZoneList<Expression*>* args) {
-  UNIMPLEMENTED_MIPS();
+  ASSERT(args->length() == 1);
+  Label done, null, function, non_function_constructor;
+
+  VisitForAccumulatorValue(args->at(0));
+
+  // If the object is a smi, we return null.
+  __ JumpIfSmi(v0, &null);
+
+  // Check that the object is a JS object but take special care of JS
+  // functions to make sure they have 'Function' as their class.
+  __ GetObjectType(v0, v0, a1);  // Map is now in v0.
+  __ Branch(&null, lt, a1, Operand(FIRST_JS_OBJECT_TYPE));
+
+  // As long as JS_FUNCTION_TYPE is the last instance type and it is
+  // right after LAST_JS_OBJECT_TYPE, we can avoid checking for
+  // LAST_JS_OBJECT_TYPE.
+  ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
+  ASSERT(JS_FUNCTION_TYPE == LAST_JS_OBJECT_TYPE + 1);
+  __ Branch(&function, eq, a1, Operand(JS_FUNCTION_TYPE));
+
+  // Check if the constructor in the map is a function.
+  __ lw(v0, FieldMemOperand(v0, Map::kConstructorOffset));
+  __ GetObjectType(v0, a1, a1);
+  __ Branch(&non_function_constructor, ne, a1, Operand(JS_FUNCTION_TYPE));
+
+  // v0 now contains the constructor function. Grab the
+  // instance class name from there.
+  __ lw(v0, FieldMemOperand(v0, JSFunction::kSharedFunctionInfoOffset));
+  __ lw(v0, FieldMemOperand(v0, SharedFunctionInfo::kInstanceClassNameOffset));
+  __ Branch(&done);
+
+  // Functions have class 'Function'.
+  __ bind(&function);
+  __ LoadRoot(v0, Heap::kfunction_class_symbolRootIndex);
+  __ jmp(&done);
+
+  // Objects with a non-function constructor have class 'Object'.
+  __ bind(&non_function_constructor);
+  __ LoadRoot(v0, Heap::kfunction_class_symbolRootIndex);
+  __ jmp(&done);
+
+  // Non-JS objects have class null.
+  __ bind(&null);
+  __ LoadRoot(v0, Heap::kNullValueRootIndex);
+
+  // All done.
+  __ bind(&done);
+
+  context()->Plug(v0);
 }
 
 
 void FullCodeGenerator::EmitLog(ZoneList<Expression*>* args) {
-  UNIMPLEMENTED_MIPS();
+  // Conditionally generate a log call.
+  // Args:
+  //   0 (literal string): The type of logging (corresponds to the flags).
+  //     This is used to determine whether or not to generate the log call.
+  //   1 (string): Format string.  Access the string at argument index 2
+  //     with '%2s' (see Logger::LogRuntime for all the formats).
+  //   2 (array): Arguments to the format string.
+  ASSERT_EQ(args->length(), 3);
+#ifdef ENABLE_LOGGING_AND_PROFILING
+  if (CodeGenerator::ShouldGenerateLog(args->at(0))) {
+    VisitForStackValue(args->at(1));
+    VisitForStackValue(args->at(2));
+    __ CallRuntime(Runtime::kLog, 2);
+  }
+#endif
+  // Finally, we're expected to leave a value on the top of the stack.
+  __ LoadRoot(v0, Heap::kUndefinedValueRootIndex);
+  context()->Plug(v0);
 }
 
 
 void FullCodeGenerator::EmitRandomHeapNumber(ZoneList<Expression*>* args) {
-  UNIMPLEMENTED_MIPS();
+  ASSERT(args->length() == 0);
+
+  Label slow_allocate_heapnumber;
+  Label heapnumber_allocated;
+
+  // Save the new heap number in callee-saved register s0, since
+  // we call out to external C code below.
+  __ LoadRoot(t6, Heap::kHeapNumberMapRootIndex);
+  __ AllocateHeapNumber(s0, a1, a2, t6, &slow_allocate_heapnumber);
+  __ jmp(&heapnumber_allocated);
+
+  __ bind(&slow_allocate_heapnumber);
+
+  // Allocate a heap number.
+  __ CallRuntime(Runtime::kNumberAlloc, 0);
+  __ mov(s0, v0);   // Save result in s0, so it is saved thru CFunc call.
+
+  __ bind(&heapnumber_allocated);
+
+  // Convert 32 random bits in v0 to 0.(32 random bits) in a double
+  // by computing:
+  // ( 1.(20 0s)(32 random bits) x 2^20 ) - (1.0 x 2^20)).
+  if (CpuFeatures::IsSupported(FPU)) {
+    __ PrepareCallCFunction(1, a0);
+    __ li(a0, Operand(ExternalReference::isolate_address()));
+    __ CallCFunction(ExternalReference::random_uint32_function(isolate()), 1);
+
+
+    CpuFeatures::Scope scope(FPU);
+    // 0x41300000 is the top half of 1.0 x 2^20 as a double.
+    __ li(a1, Operand(0x41300000));
+    // Move 0x41300000xxxxxxxx (x = random bits in v0) to FPU.
+    __ Move(f12, v0, a1);
+    // Move 0x4130000000000000 to FPU.
+    __ Move(f14, zero_reg, a1);
+    // Subtract and store the result in the heap number.
+    __ sub_d(f0, f12, f14);
+    __ sdc1(f0, MemOperand(s0, HeapNumber::kValueOffset - kHeapObjectTag));
+    __ mov(v0, s0);
+  } else {
+    __ PrepareCallCFunction(2, a0);
+    __ mov(a0, s0);
+    __ li(a1, Operand(ExternalReference::isolate_address()));
+    __ CallCFunction(
+        ExternalReference::fill_heap_number_with_random_function(isolate()), 2);
+  }
+
+  context()->Plug(v0);
 }
 
 
 void FullCodeGenerator::EmitSubString(ZoneList<Expression*>* args) {
-  UNIMPLEMENTED_MIPS();
+  // Load the arguments on the stack and call the stub.
+  SubStringStub stub;
+  ASSERT(args->length() == 3);
+  VisitForStackValue(args->at(0));
+  VisitForStackValue(args->at(1));
+  VisitForStackValue(args->at(2));
+  __ CallStub(&stub);
+  context()->Plug(v0);
 }
 
 
 void FullCodeGenerator::EmitRegExpExec(ZoneList<Expression*>* args) {
-  UNIMPLEMENTED_MIPS();
+  // Load the arguments on the stack and call the stub.
+  RegExpExecStub stub;
+  ASSERT(args->length() == 4);
+  VisitForStackValue(args->at(0));
+  VisitForStackValue(args->at(1));
+  VisitForStackValue(args->at(2));
+  VisitForStackValue(args->at(3));
+  __ CallStub(&stub);
+  context()->Plug(v0);
 }
 
 
 void FullCodeGenerator::EmitValueOf(ZoneList<Expression*>* args) {
-  UNIMPLEMENTED_MIPS();
+  ASSERT(args->length() == 1);
+
+  VisitForAccumulatorValue(args->at(0));  // Load the object.
+
+  Label done;
+  // If the object is a smi return the object.
+  __ JumpIfSmi(v0, &done);
+  // If the object is not a value type, return the object.
+  __ GetObjectType(v0, a1, a1);
+  __ Branch(&done, ne, a1, Operand(JS_VALUE_TYPE));
+
+  __ lw(v0, FieldMemOperand(v0, JSValue::kValueOffset));
+
+  __ bind(&done);
+  context()->Plug(v0);
 }
 
 
 void FullCodeGenerator::EmitMathPow(ZoneList<Expression*>* args) {
-  UNIMPLEMENTED_MIPS();
+  // Load the arguments on the stack and call the runtime function.
+  ASSERT(args->length() == 2);
+  VisitForStackValue(args->at(0));
+  VisitForStackValue(args->at(1));
+  MathPowStub stub;
+  __ CallStub(&stub);
+  context()->Plug(v0);
 }
 
 
 void FullCodeGenerator::EmitSetValueOf(ZoneList<Expression*>* args) {
-  UNIMPLEMENTED_MIPS();
+  ASSERT(args->length() == 2);
+
+  VisitForStackValue(args->at(0));  // Load the object.
+  VisitForAccumulatorValue(args->at(1));  // Load the value.
+  __ pop(a1);  // v0 = value. a1 = object.
+
+  Label done;
+  // If the object is a smi, return the value.
+  __ JumpIfSmi(a1, &done);
+
+  // If the object is not a value type, return the value.
+  __ GetObjectType(a1, a2, a2);
+  __ Branch(&done, ne, a2, Operand(JS_VALUE_TYPE));
+
+  // Store the value.
+  __ sw(v0, FieldMemOperand(a1, JSValue::kValueOffset));
+  // Update the write barrier.  Save the value as it will be
+  // overwritten by the write barrier code and is needed afterward.
+  __ RecordWrite(a1, Operand(JSValue::kValueOffset - kHeapObjectTag), a2, a3);
+
+  __ bind(&done);
+  context()->Plug(v0);
 }
 
 
 void FullCodeGenerator::EmitNumberToString(ZoneList<Expression*>* args) {
-  UNIMPLEMENTED_MIPS();
+  ASSERT_EQ(args->length(), 1);
+
+  // Load the argument on the stack and call the stub.
+  VisitForStackValue(args->at(0));
+
+  NumberToStringStub stub;
+  __ CallStub(&stub);
+  context()->Plug(v0);
 }
 
 
 void FullCodeGenerator::EmitStringCharFromCode(ZoneList<Expression*>* args) {
-  UNIMPLEMENTED_MIPS();
+  ASSERT(args->length() == 1);
+
+  VisitForAccumulatorValue(args->at(0));
+
+  Label done;
+  StringCharFromCodeGenerator generator(v0, a1);
+  generator.GenerateFast(masm_);
+  __ jmp(&done);
+
+  NopRuntimeCallHelper call_helper;
+  generator.GenerateSlow(masm_, call_helper);
+
+  __ bind(&done);
+  context()->Plug(a1);
 }
 
 
 void FullCodeGenerator::EmitStringCharCodeAt(ZoneList<Expression*>* args) {
-  UNIMPLEMENTED_MIPS();
+  ASSERT(args->length() == 2);
+
+  VisitForStackValue(args->at(0));
+  VisitForAccumulatorValue(args->at(1));
+  __ mov(a0, result_register());
+
+  Register object = a1;
+  Register index = a0;
+  Register scratch = a2;
+  Register result = v0;
+
+  __ pop(object);
+
+  Label need_conversion;
+  Label index_out_of_range;
+  Label done;
+  StringCharCodeAtGenerator generator(object,
+                                      index,
+                                      scratch,
+                                      result,
+                                      &need_conversion,
+                                      &need_conversion,
+                                      &index_out_of_range,
+                                      STRING_INDEX_IS_NUMBER);
+  generator.GenerateFast(masm_);
+  __ jmp(&done);
+
+  __ bind(&index_out_of_range);
+  // When the index is out of range, the spec requires us to return
+  // NaN.
+  __ LoadRoot(result, Heap::kNanValueRootIndex);
+  __ jmp(&done);
+
+  __ bind(&need_conversion);
+  // Load the undefined value into the result register, which will
+  // trigger conversion.
+  __ LoadRoot(result, Heap::kUndefinedValueRootIndex);
+  __ jmp(&done);
+
+  NopRuntimeCallHelper call_helper;
+  generator.GenerateSlow(masm_, call_helper);
+
+  __ bind(&done);
+  context()->Plug(result);
 }
 
 
 void FullCodeGenerator::EmitStringCharAt(ZoneList<Expression*>* args) {
-  UNIMPLEMENTED_MIPS();
+  ASSERT(args->length() == 2);
+
+  VisitForStackValue(args->at(0));
+  VisitForAccumulatorValue(args->at(1));
+  __ mov(a0, result_register());
+
+  Register object = a1;
+  Register index = a0;
+  Register scratch1 = a2;
+  Register scratch2 = a3;
+  Register result = v0;
+
+  __ pop(object);
+
+  Label need_conversion;
+  Label index_out_of_range;
+  Label done;
+  StringCharAtGenerator generator(object,
+                                  index,
+                                  scratch1,
+                                  scratch2,
+                                  result,
+                                  &need_conversion,
+                                  &need_conversion,
+                                  &index_out_of_range,
+                                  STRING_INDEX_IS_NUMBER);
+  generator.GenerateFast(masm_);
+  __ jmp(&done);
+
+  __ bind(&index_out_of_range);
+  // When the index is out of range, the spec requires us to return
+  // the empty string.
+  __ LoadRoot(result, Heap::kEmptyStringRootIndex);
+  __ jmp(&done);
+
+  __ bind(&need_conversion);
+  // Move smi zero into the result register, which will trigger
+  // conversion.
+  __ li(result, Operand(Smi::FromInt(0)));
+  __ jmp(&done);
+
+  NopRuntimeCallHelper call_helper;
+  generator.GenerateSlow(masm_, call_helper);
+
+  __ bind(&done);
+  context()->Plug(result);
 }
 
 
 void FullCodeGenerator::EmitStringAdd(ZoneList<Expression*>* args) {
-  UNIMPLEMENTED_MIPS();
+  ASSERT_EQ(2, args->length());
+
+  VisitForStackValue(args->at(0));
+  VisitForStackValue(args->at(1));
+
+  StringAddStub stub(NO_STRING_ADD_FLAGS);
+  __ CallStub(&stub);
+  context()->Plug(v0);
 }
 
 
 void FullCodeGenerator::EmitStringCompare(ZoneList<Expression*>* args) {
-  UNIMPLEMENTED_MIPS();
+  ASSERT_EQ(2, args->length());
+
+  VisitForStackValue(args->at(0));
+  VisitForStackValue(args->at(1));
+
+  StringCompareStub stub;
+  __ CallStub(&stub);
+  context()->Plug(v0);
 }
 
 
 void FullCodeGenerator::EmitMathSin(ZoneList<Expression*>* args) {
-  UNIMPLEMENTED_MIPS();
+  // Load the argument on the stack and call the stub.
+  TranscendentalCacheStub stub(TranscendentalCache::SIN,
+                               TranscendentalCacheStub::TAGGED);
+  ASSERT(args->length() == 1);
+  VisitForStackValue(args->at(0));
+  __ mov(a0, result_register());  // Stub requires parameter in a0 and on tos.
+  __ CallStub(&stub);
+  context()->Plug(v0);
 }
 
 
 void FullCodeGenerator::EmitMathCos(ZoneList<Expression*>* args) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void FullCodeGenerator::EmitMathSqrt(ZoneList<Expression*>* args) {
-  UNIMPLEMENTED_MIPS();
+  // Load the argument on the stack and call the stub.
+  TranscendentalCacheStub stub(TranscendentalCache::COS,
+                               TranscendentalCacheStub::TAGGED);
+  ASSERT(args->length() == 1);
+  VisitForStackValue(args->at(0));
+  __ mov(a0, result_register());  // Stub requires parameter in a0 and on tos.
+  __ CallStub(&stub);
+  context()->Plug(v0);
 }
 
 
 void FullCodeGenerator::EmitMathLog(ZoneList<Expression*>* args) {
-  UNIMPLEMENTED_MIPS();
+  // Load the argument on the stack and call the stub.
+  TranscendentalCacheStub stub(TranscendentalCache::LOG,
+                               TranscendentalCacheStub::TAGGED);
+  ASSERT(args->length() == 1);
+  VisitForStackValue(args->at(0));
+  __ mov(a0, result_register());  // Stub requires parameter in a0 and on tos.
+  __ CallStub(&stub);
+  context()->Plug(v0);
+}
+
+
+void FullCodeGenerator::EmitMathSqrt(ZoneList<Expression*>* args) {
+  // Load the argument on the stack and call the runtime function.
+  ASSERT(args->length() == 1);
+  VisitForStackValue(args->at(0));
+  __ CallRuntime(Runtime::kMath_sqrt, 1);
+  context()->Plug(v0);
 }
 
 
 void FullCodeGenerator::EmitCallFunction(ZoneList<Expression*>* args) {
-  UNIMPLEMENTED_MIPS();
+  ASSERT(args->length() >= 2);
+
+  int arg_count = args->length() - 2;  // 2 ~ receiver and function.
+  for (int i = 0; i < arg_count + 1; i++) {
+    VisitForStackValue(args->at(i));
+  }
+  VisitForAccumulatorValue(args->last());  // Function.
+
+  // InvokeFunction requires the function in a1. Move it in there.
+  __ mov(a1, result_register());
+  ParameterCount count(arg_count);
+  __ InvokeFunction(a1, count, CALL_FUNCTION);
+  __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+  context()->Plug(v0);
 }
 
 
 void FullCodeGenerator::EmitRegExpConstructResult(ZoneList<Expression*>* args) {
-  UNIMPLEMENTED_MIPS();
+  RegExpConstructResultStub stub;
+  ASSERT(args->length() == 3);
+  VisitForStackValue(args->at(0));
+  VisitForStackValue(args->at(1));
+  VisitForStackValue(args->at(2));
+  __ CallStub(&stub);
+  context()->Plug(v0);
 }
 
 
 void FullCodeGenerator::EmitSwapElements(ZoneList<Expression*>* args) {
-  UNIMPLEMENTED_MIPS();
+  ASSERT(args->length() == 3);
+  VisitForStackValue(args->at(0));
+  VisitForStackValue(args->at(1));
+  VisitForStackValue(args->at(2));
+  Label done;
+  Label slow_case;
+  Register object = a0;
+  Register index1 = a1;
+  Register index2 = a2;
+  Register elements = a3;
+  Register scratch1 = t0;
+  Register scratch2 = t1;
+
+  __ lw(object, MemOperand(sp, 2 * kPointerSize));
+  // Fetch the map and check if array is in fast case.
+  // Check that object doesn't require security checks and
+  // has no indexed interceptor.
+  __ GetObjectType(object, scratch1, scratch2);
+  __ Branch(&slow_case, ne, scratch2, Operand(JS_ARRAY_TYPE));
+  // Map is now in scratch1.
+
+  __ lbu(scratch2, FieldMemOperand(scratch1, Map::kBitFieldOffset));
+  __ And(scratch2, scratch2, Operand(KeyedLoadIC::kSlowCaseBitFieldMask));
+  __ Branch(&slow_case, ne, scratch2, Operand(zero_reg));
+
+  // Check the object's elements are in fast case and writable.
+  __ lw(elements, FieldMemOperand(object, JSObject::kElementsOffset));
+  __ lw(scratch1, FieldMemOperand(elements, HeapObject::kMapOffset));
+  __ LoadRoot(scratch2, Heap::kFixedArrayMapRootIndex);
+  __ Branch(&slow_case, ne, scratch1, Operand(scratch2));
+
+  // Check that both indices are smis.
+  __ lw(index1, MemOperand(sp, 1 * kPointerSize));
+  __ lw(index2, MemOperand(sp, 0));
+  __ JumpIfNotBothSmi(index1, index2, &slow_case);
+
+  // Check that both indices are valid.
+  Label not_hi;
+  __ lw(scratch1, FieldMemOperand(object, JSArray::kLengthOffset));
+  __ Branch(&slow_case, ls, scratch1, Operand(index1));
+  __ Branch(&not_hi, NegateCondition(hi), scratch1, Operand(index1));
+  __ Branch(&slow_case, ls, scratch1, Operand(index2));
+  __ bind(&not_hi);
+
+  // Bring the address of the elements into index1 and index2.
+  __ Addu(scratch1, elements,
+      Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+  __ sll(index1, index1, kPointerSizeLog2 - kSmiTagSize);
+  __ Addu(index1, scratch1, index1);
+  __ sll(index2, index2, kPointerSizeLog2 - kSmiTagSize);
+  __ Addu(index2, scratch1, index2);
+
+  // Swap elements.
+  __ lw(scratch1, MemOperand(index1, 0));
+  __ lw(scratch2, MemOperand(index2, 0));
+  __ sw(scratch1, MemOperand(index2, 0));
+  __ sw(scratch2, MemOperand(index1, 0));
+
+  Label new_space;
+  __ InNewSpace(elements, scratch1, eq, &new_space);
+  // Possible optimization: do a check that both values are Smis
+  // (or them and test against Smi mask).
+
+  __ mov(scratch1, elements);
+  __ RecordWriteHelper(elements, index1, scratch2);
+  __ RecordWriteHelper(scratch1, index2, scratch2);  // scratch1 holds elements.
+
+  __ bind(&new_space);
+  // We are done. Drop elements from the stack, and return undefined.
+  __ Drop(3);
+  __ LoadRoot(v0, Heap::kUndefinedValueRootIndex);
+  __ jmp(&done);
+
+  __ bind(&slow_case);
+  __ CallRuntime(Runtime::kSwapElements, 3);
+
+  __ bind(&done);
+  context()->Plug(v0);
 }
 
 
 void FullCodeGenerator::EmitGetFromCache(ZoneList<Expression*>* args) {
-  UNIMPLEMENTED_MIPS();
+  ASSERT_EQ(2, args->length());
+
+  ASSERT_NE(NULL, args->at(0)->AsLiteral());
+  int cache_id = Smi::cast(*(args->at(0)->AsLiteral()->handle()))->value();
+
+  Handle<FixedArray> jsfunction_result_caches(
+      isolate()->global_context()->jsfunction_result_caches());
+  if (jsfunction_result_caches->length() <= cache_id) {
+    __ Abort("Attempt to use undefined cache.");
+    __ LoadRoot(v0, Heap::kUndefinedValueRootIndex);
+    context()->Plug(v0);
+    return;
+  }
+
+  VisitForAccumulatorValue(args->at(1));
+
+  Register key = v0;
+  Register cache = a1;
+  __ lw(cache, ContextOperand(cp, Context::GLOBAL_INDEX));
+  __ lw(cache, FieldMemOperand(cache, GlobalObject::kGlobalContextOffset));
+  __ lw(cache,
+         ContextOperand(
+             cache, Context::JSFUNCTION_RESULT_CACHES_INDEX));
+  __ lw(cache,
+         FieldMemOperand(cache, FixedArray::OffsetOfElementAt(cache_id)));
+
+
+  Label done, not_found;
+  ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
+  __ lw(a2, FieldMemOperand(cache, JSFunctionResultCache::kFingerOffset));
+  // a2 now holds finger offset as a smi.
+  __ Addu(a3, cache, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+  // a3 now points to the start of fixed array elements.
+  __ sll(at, a2, kPointerSizeLog2 - kSmiTagSize);
+  __ addu(a3, a3, at);
+  // a3 now points to key of indexed element of cache.
+  __ lw(a2, MemOperand(a3));
+  __ Branch(&not_found, ne, key, Operand(a2));
+
+  __ lw(v0, MemOperand(a3, kPointerSize));
+  __ Branch(&done);
+
+  __ bind(&not_found);
+  // Call runtime to perform the lookup.
+  __ Push(cache, key);
+  __ CallRuntime(Runtime::kGetFromCache, 2);
+
+  __ bind(&done);
+  context()->Plug(v0);
 }
 
 
 void FullCodeGenerator::EmitIsRegExpEquivalent(ZoneList<Expression*>* args) {
-  UNIMPLEMENTED_MIPS();
+  ASSERT_EQ(2, args->length());
+
+  Register right = v0;
+  Register left = a1;
+  Register tmp = a2;
+  Register tmp2 = a3;
+
+  VisitForStackValue(args->at(0));
+  VisitForAccumulatorValue(args->at(1));  // Result (right) in v0.
+  __ pop(left);
+
+  Label done, fail, ok;
+  __ Branch(&ok, eq, left, Operand(right));
+  // Fail if either is a non-HeapObject.
+  __ And(tmp, left, Operand(right));
+  __ And(at, tmp, Operand(kSmiTagMask));
+  __ Branch(&fail, eq, at, Operand(zero_reg));
+  __ lw(tmp, FieldMemOperand(left, HeapObject::kMapOffset));
+  __ lbu(tmp2, FieldMemOperand(tmp, Map::kInstanceTypeOffset));
+  __ Branch(&fail, ne, tmp2, Operand(JS_REGEXP_TYPE));
+  __ lw(tmp2, FieldMemOperand(right, HeapObject::kMapOffset));
+  __ Branch(&fail, ne, tmp, Operand(tmp2));
+  __ lw(tmp, FieldMemOperand(left, JSRegExp::kDataOffset));
+  __ lw(tmp2, FieldMemOperand(right, JSRegExp::kDataOffset));
+  __ Branch(&ok, eq, tmp, Operand(tmp2));
+  __ bind(&fail);
+  __ LoadRoot(v0, Heap::kFalseValueRootIndex);
+  __ jmp(&done);
+  __ bind(&ok);
+  __ LoadRoot(v0, Heap::kTrueValueRootIndex);
+  __ bind(&done);
+
+  context()->Plug(v0);
 }
 
 
 void FullCodeGenerator::EmitHasCachedArrayIndex(ZoneList<Expression*>* args) {
-  UNIMPLEMENTED_MIPS();
+  VisitForAccumulatorValue(args->at(0));
+
+  Label materialize_true, materialize_false;
+  Label* if_true = NULL;
+  Label* if_false = NULL;
+  Label* fall_through = NULL;
+  context()->PrepareTest(&materialize_true, &materialize_false,
+                         &if_true, &if_false, &fall_through);
+
+  __ lw(a0, FieldMemOperand(v0, String::kHashFieldOffset));
+  __ And(a0, a0, Operand(String::kContainsCachedArrayIndexMask));
+
+  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+  Split(eq, a0, Operand(zero_reg), if_true, if_false, fall_through);
+
+  context()->Plug(if_true, if_false);
 }
 
 
 void FullCodeGenerator::EmitGetCachedArrayIndex(ZoneList<Expression*>* args) {
-  UNIMPLEMENTED_MIPS();
+  ASSERT(args->length() == 1);
+  VisitForAccumulatorValue(args->at(0));
+
+  if (FLAG_debug_code) {
+    __ AbortIfNotString(v0);
+  }
+
+  __ lw(v0, FieldMemOperand(v0, String::kHashFieldOffset));
+  __ IndexFromHash(v0, v0);
+
+  context()->Plug(v0);
 }
 
 
 void FullCodeGenerator::EmitFastAsciiArrayJoin(ZoneList<Expression*>* args) {
-  UNIMPLEMENTED_MIPS();
+  Label bailout, done, one_char_separator, long_separator,
+      non_trivial_array, not_size_one_array, loop,
+      empty_separator_loop, one_char_separator_loop,
+      one_char_separator_loop_entry, long_separator_loop;
+
+  ASSERT(args->length() == 2);
+  VisitForStackValue(args->at(1));
+  VisitForAccumulatorValue(args->at(0));
+
+  // All aliases of the same register have disjoint lifetimes.
+  Register array = v0;
+  Register elements = no_reg;  // Will be v0.
+  Register result = no_reg;  // Will be v0.
+  Register separator = a1;
+  Register array_length = a2;
+  Register result_pos = no_reg;  // Will be a2.
+  Register string_length = a3;
+  Register string = t0;
+  Register element = t1;
+  Register elements_end = t2;
+  Register scratch1 = t3;
+  Register scratch2 = t5;
+  Register scratch3 = t4;
+  Register scratch4 = v1;
+
+  // Separator operand is on the stack.
+  __ pop(separator);
+
+  // Check that the array is a JSArray.
+  __ JumpIfSmi(array, &bailout);
+  __ GetObjectType(array, scratch1, scratch2);
+  __ Branch(&bailout, ne, scratch2, Operand(JS_ARRAY_TYPE));
+
+  // Check that the array has fast elements.
+  __ lbu(scratch2, FieldMemOperand(scratch1, Map::kBitField2Offset));
+  __ And(scratch3, scratch2, Operand(1 << Map::kHasFastElements));
+  __ Branch(&bailout, eq, scratch3, Operand(zero_reg));
+
+  // If the array has length zero, return the empty string.
+  __ lw(array_length, FieldMemOperand(array, JSArray::kLengthOffset));
+  __ SmiUntag(array_length);
+  __ Branch(&non_trivial_array, ne, array_length, Operand(zero_reg));
+  __ LoadRoot(v0, Heap::kEmptyStringRootIndex);
+  __ Branch(&done);
+
+  __ bind(&non_trivial_array);
+
+  // Get the FixedArray containing array's elements.
+  elements = array;
+  __ lw(elements, FieldMemOperand(array, JSArray::kElementsOffset));
+  array = no_reg;  // End of array's live range.
+
+  // Check that all array elements are sequential ASCII strings, and
+  // accumulate the sum of their lengths, as a smi-encoded value.
+  __ mov(string_length, zero_reg);
+  __ Addu(element,
+          elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+  __ sll(elements_end, array_length, kPointerSizeLog2);
+  __ Addu(elements_end, element, elements_end);
+  // Loop condition: while (element < elements_end).
+  // Live values in registers:
+  //   elements: Fixed array of strings.
+  //   array_length: Length of the fixed array of strings (not smi)
+  //   separator: Separator string
+  //   string_length: Accumulated sum of string lengths (smi).
+  //   element: Current array element.
+  //   elements_end: Array end.
+  if (FLAG_debug_code) {
+    __ Assert(gt, "No empty arrays here in EmitFastAsciiArrayJoin",
+        array_length, Operand(zero_reg));
+  }
+  __ bind(&loop);
+  __ lw(string, MemOperand(element));
+  __ Addu(element, element, kPointerSize);
+  __ JumpIfSmi(string, &bailout);
+  __ lw(scratch1, FieldMemOperand(string, HeapObject::kMapOffset));
+  __ lbu(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset));
+  __ JumpIfInstanceTypeIsNotSequentialAscii(scratch1, scratch2, &bailout);
+  __ lw(scratch1, FieldMemOperand(string, SeqAsciiString::kLengthOffset));
+  __ AdduAndCheckForOverflow(string_length, string_length, scratch1, scratch3);
+  __ BranchOnOverflow(&bailout, scratch3);
+  __ Branch(&loop, lt, element, Operand(elements_end));
+
+  // If array_length is 1, return elements[0], a string.
+  __ Branch(&not_size_one_array, ne, array_length, Operand(1));
+  __ lw(v0, FieldMemOperand(elements, FixedArray::kHeaderSize));
+  __ Branch(&done);
+
+  __ bind(&not_size_one_array);
+
+  // Live values in registers:
+  //   separator: Separator string
+  //   array_length: Length of the array.
+  //   string_length: Sum of string lengths (smi).
+  //   elements: FixedArray of strings.
+
+  // Check that the separator is a flat ASCII string.
+  __ JumpIfSmi(separator, &bailout);
+  __ lw(scratch1, FieldMemOperand(separator, HeapObject::kMapOffset));
+  __ lbu(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset));
+  __ JumpIfInstanceTypeIsNotSequentialAscii(scratch1, scratch2, &bailout);
+
+  // Add (separator length times array_length) - separator length to the
+  // string_length to get the length of the result string. array_length is not
+  // smi but the other values are, so the result is a smi.
+  __ lw(scratch1, FieldMemOperand(separator, SeqAsciiString::kLengthOffset));
+  __ Subu(string_length, string_length, Operand(scratch1));
+  __ Mult(array_length, scratch1);
+  // Check for smi overflow. No overflow if higher 33 bits of 64-bit result are
+  // zero.
+  __ mfhi(scratch2);
+  __ Branch(&bailout, ne, scratch2, Operand(zero_reg));
+  __ mflo(scratch2);
+  __ And(scratch3, scratch2, Operand(0x80000000));
+  __ Branch(&bailout, ne, scratch3, Operand(zero_reg));
+  __ AdduAndCheckForOverflow(string_length, string_length, scratch2, scratch3);
+  __ BranchOnOverflow(&bailout, scratch3);
+  __ SmiUntag(string_length);
+
+  // Get first element in the array to free up the elements register to be used
+  // for the result.
+  __ Addu(element,
+          elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+  result = elements;  // End of live range for elements.
+  elements = no_reg;
+  // Live values in registers:
+  //   element: First array element
+  //   separator: Separator string
+  //   string_length: Length of result string (not smi)
+  //   array_length: Length of the array.
+  __ AllocateAsciiString(result,
+                         string_length,
+                         scratch1,
+                         scratch2,
+                         elements_end,
+                         &bailout);
+  // Prepare for looping. Set up elements_end to end of the array. Set
+  // result_pos to the position of the result where to write the first
+  // character.
+  __ sll(elements_end, array_length, kPointerSizeLog2);
+  __ Addu(elements_end, element, elements_end);
+  result_pos = array_length;  // End of live range for array_length.
+  array_length = no_reg;
+  __ Addu(result_pos,
+          result,
+          Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
+
+  // Check the length of the separator.
+  __ lw(scratch1, FieldMemOperand(separator, SeqAsciiString::kLengthOffset));
+  __ li(at, Operand(Smi::FromInt(1)));
+  __ Branch(&one_char_separator, eq, scratch1, Operand(at));
+  __ Branch(&long_separator, gt, scratch1, Operand(at));
+
+  // Empty separator case.
+  __ bind(&empty_separator_loop);
+  // Live values in registers:
+  //   result_pos: the position to which we are currently copying characters.
+  //   element: Current array element.
+  //   elements_end: Array end.
+
+  // Copy next array element to the result.
+  __ lw(string, MemOperand(element));
+  __ Addu(element, element, kPointerSize);
+  __ lw(string_length, FieldMemOperand(string, String::kLengthOffset));
+  __ SmiUntag(string_length);
+  __ Addu(string, string, SeqAsciiString::kHeaderSize - kHeapObjectTag);
+  __ CopyBytes(string, result_pos, string_length, scratch1);
+  // End while (element < elements_end).
+  __ Branch(&empty_separator_loop, lt, element, Operand(elements_end));
+  ASSERT(result.is(v0));
+  __ Branch(&done);
+
+  // One-character separator case.
+  __ bind(&one_char_separator);
+  // Replace separator with its ascii character value.
+  __ lbu(separator, FieldMemOperand(separator, SeqAsciiString::kHeaderSize));
+  // Jump into the loop after the code that copies the separator, so the first
+  // element is not preceded by a separator.
+  __ jmp(&one_char_separator_loop_entry);
+
+  __ bind(&one_char_separator_loop);
+  // Live values in registers:
+  //   result_pos: the position to which we are currently copying characters.
+  //   element: Current array element.
+  //   elements_end: Array end.
+  //   separator: Single separator ascii char (in lower byte).
+
+  // Copy the separator character to the result.
+  __ sb(separator, MemOperand(result_pos));
+  __ Addu(result_pos, result_pos, 1);
+
+  // Copy next array element to the result.
+  __ bind(&one_char_separator_loop_entry);
+  __ lw(string, MemOperand(element));
+  __ Addu(element, element, kPointerSize);
+  __ lw(string_length, FieldMemOperand(string, String::kLengthOffset));
+  __ SmiUntag(string_length);
+  __ Addu(string, string, SeqAsciiString::kHeaderSize - kHeapObjectTag);
+  __ CopyBytes(string, result_pos, string_length, scratch1);
+  // End while (element < elements_end).
+  __ Branch(&one_char_separator_loop, lt, element, Operand(elements_end));
+  ASSERT(result.is(v0));
+  __ Branch(&done);
+
+  // Long separator case (separator is more than one character). Entry is at the
+  // label long_separator below.
+  __ bind(&long_separator_loop);
+  // Live values in registers:
+  //   result_pos: the position to which we are currently copying characters.
+  //   element: Current array element.
+  //   elements_end: Array end.
+  //   separator: Separator string.
+
+  // Copy the separator to the result.
+  __ lw(string_length, FieldMemOperand(separator, String::kLengthOffset));
+  __ SmiUntag(string_length);
+  __ Addu(string,
+          separator,
+          Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
+  __ CopyBytes(string, result_pos, string_length, scratch1);
+
+  __ bind(&long_separator);
+  __ lw(string, MemOperand(element));
+  __ Addu(element, element, kPointerSize);
+  __ lw(string_length, FieldMemOperand(string, String::kLengthOffset));
+  __ SmiUntag(string_length);
+  __ Addu(string, string, SeqAsciiString::kHeaderSize - kHeapObjectTag);
+  __ CopyBytes(string, result_pos, string_length, scratch1);
+  // End while (element < elements_end).
+  __ Branch(&long_separator_loop, lt, element, Operand(elements_end));
+  ASSERT(result.is(v0));
+  __ Branch(&done);
+
+  __ bind(&bailout);
+  __ LoadRoot(v0, Heap::kUndefinedValueRootIndex);
+  __ bind(&done);
+  context()->Plug(v0);
 }
 
 
 void FullCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
-  UNIMPLEMENTED_MIPS();
+  Handle<String> name = expr->name();
+  if (name->length() > 0 && name->Get(0) == '_') {
+    Comment cmnt(masm_, "[ InlineRuntimeCall");
+    EmitInlineRuntimeCall(expr);
+    return;
+  }
+
+  Comment cmnt(masm_, "[ CallRuntime");
+  ZoneList<Expression*>* args = expr->arguments();
+
+  if (expr->is_jsruntime()) {
+    // Prepare for calling JS runtime function.
+    __ lw(a0, GlobalObjectOperand());
+    __ lw(a0, FieldMemOperand(a0, GlobalObject::kBuiltinsOffset));
+    __ push(a0);
+  }
+
+  // Push the arguments ("left-to-right").
+  int arg_count = args->length();
+  for (int i = 0; i < arg_count; i++) {
+    VisitForStackValue(args->at(i));
+  }
+
+  if (expr->is_jsruntime()) {
+    // Call the JS runtime function.
+    __ li(a2, Operand(expr->name()));
+    RelocInfo::Mode mode = RelocInfo::CODE_TARGET;
+    Handle<Code> ic =
+        isolate()->stub_cache()->ComputeCallInitialize(arg_count,
+                                                       NOT_IN_LOOP,
+                                                       mode);
+    EmitCallIC(ic, mode, expr->id());
+    // Restore context register.
+    __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+  } else {
+    // Call the C runtime function.
+    __ CallRuntime(expr->function(), arg_count);
+  }
+  context()->Plug(v0);
 }
 
 
 void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
-  UNIMPLEMENTED_MIPS();
+  switch (expr->op()) {
+    case Token::DELETE: {
+      Comment cmnt(masm_, "[ UnaryOperation (DELETE)");
+      Property* prop = expr->expression()->AsProperty();
+      Variable* var = expr->expression()->AsVariableProxy()->AsVariable();
+
+      if (prop != NULL) {
+        if (prop->is_synthetic()) {
+          // Result of deleting parameters is false, even when they rewrite
+          // to accesses on the arguments object.
+          context()->Plug(false);
+        } else {
+          VisitForStackValue(prop->obj());
+          VisitForStackValue(prop->key());
+          __ li(a1, Operand(Smi::FromInt(strict_mode_flag())));
+          __ push(a1);
+          __ InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION);
+          context()->Plug(v0);
+        }
+      } else if (var != NULL) {
+        // Delete of an unqualified identifier is disallowed in strict mode
+        // but "delete this" is.
+        ASSERT(strict_mode_flag() == kNonStrictMode || var->is_this());
+        if (var->is_global()) {
+          __ lw(a2, GlobalObjectOperand());
+          __ li(a1, Operand(var->name()));
+          __ li(a0, Operand(Smi::FromInt(kNonStrictMode)));
+          __ Push(a2, a1, a0);
+          __ InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION);
+          context()->Plug(v0);
+        } else if (var->AsSlot() != NULL &&
+                   var->AsSlot()->type() != Slot::LOOKUP) {
+          // Result of deleting non-global, non-dynamic variables is false.
+          // The subexpression does not have side effects.
+          context()->Plug(false);
+        } else {
+          // Non-global variable.  Call the runtime to try to delete from the
+          // context where the variable was introduced.
+          __ push(context_register());
+          __ li(a2, Operand(var->name()));
+          __ push(a2);
+          __ CallRuntime(Runtime::kDeleteContextSlot, 2);
+          context()->Plug(v0);
+        }
+      } else {
+        // Result of deleting non-property, non-variable reference is true.
+        // The subexpression may have side effects.
+        VisitForEffect(expr->expression());
+        context()->Plug(true);
+      }
+      break;
+    }
+
+    case Token::VOID: {
+      Comment cmnt(masm_, "[ UnaryOperation (VOID)");
+      VisitForEffect(expr->expression());
+      context()->Plug(Heap::kUndefinedValueRootIndex);
+      break;
+    }
+
+    case Token::NOT: {
+      Comment cmnt(masm_, "[ UnaryOperation (NOT)");
+      if (context()->IsEffect()) {
+        // Unary NOT has no side effects so it's only necessary to visit the
+        // subexpression.  Match the optimizing compiler by not branching.
+        VisitForEffect(expr->expression());
+      } else {
+        Label materialize_true, materialize_false;
+        Label* if_true = NULL;
+        Label* if_false = NULL;
+        Label* fall_through = NULL;
+
+        // Notice that the labels are swapped.
+        context()->PrepareTest(&materialize_true, &materialize_false,
+                               &if_false, &if_true, &fall_through);
+        if (context()->IsTest()) ForwardBailoutToChild(expr);
+        VisitForControl(expr->expression(), if_true, if_false, fall_through);
+        context()->Plug(if_false, if_true);  // Labels swapped.
+      }
+      break;
+    }
+
+    case Token::TYPEOF: {
+      Comment cmnt(masm_, "[ UnaryOperation (TYPEOF)");
+      { StackValueContext context(this);
+        VisitForTypeofValue(expr->expression());
+      }
+      __ CallRuntime(Runtime::kTypeof, 1);
+      context()->Plug(v0);
+      break;
+    }
+
+    case Token::ADD: {
+      Comment cmt(masm_, "[ UnaryOperation (ADD)");
+      VisitForAccumulatorValue(expr->expression());
+      Label no_conversion;
+      __ JumpIfSmi(result_register(), &no_conversion);
+      __ mov(a0, result_register());
+      ToNumberStub convert_stub;
+      __ CallStub(&convert_stub);
+      __ bind(&no_conversion);
+      context()->Plug(result_register());
+      break;
+    }
+
+    case Token::SUB:
+      EmitUnaryOperation(expr, "[ UnaryOperation (SUB)");
+      break;
+
+    case Token::BIT_NOT:
+      EmitUnaryOperation(expr, "[ UnaryOperation (BIT_NOT)");
+      break;
+
+    default:
+      UNREACHABLE();
+  }
+}
+
+
+void FullCodeGenerator::EmitUnaryOperation(UnaryOperation* expr,
+                                           const char* comment) {
+  // TODO(svenpanne): Allowing format strings in Comment would be nice here...
+  Comment cmt(masm_, comment);
+  bool can_overwrite = expr->expression()->ResultOverwriteAllowed();
+  UnaryOverwriteMode overwrite =
+      can_overwrite ? UNARY_OVERWRITE : UNARY_NO_OVERWRITE;
+  UnaryOpStub stub(expr->op(), overwrite);
+  // GenericUnaryOpStub expects the argument to be in a0.
+  VisitForAccumulatorValue(expr->expression());
+  SetSourcePosition(expr->position());
+  __ mov(a0, result_register());
+  EmitCallIC(stub.GetCode(), NULL, expr->id());
+  context()->Plug(v0);
 }
 
 
 void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
-  UNIMPLEMENTED_MIPS();
+  Comment cmnt(masm_, "[ CountOperation");
+  SetSourcePosition(expr->position());
+
+  // Invalid left-hand sides are rewritten to have a 'throw ReferenceError'
+  // as the left-hand side.
+  if (!expr->expression()->IsValidLeftHandSide()) {
+    VisitForEffect(expr->expression());
+    return;
+  }
+
+  // Expression can only be a property, a global or a (parameter or local)
+  // slot. Variables with rewrite to .arguments are treated as KEYED_PROPERTY.
+  enum LhsKind { VARIABLE, NAMED_PROPERTY, KEYED_PROPERTY };
+  LhsKind assign_type = VARIABLE;
+  Property* prop = expr->expression()->AsProperty();
+  // In case of a property we use the uninitialized expression context
+  // of the key to detect a named property.
+  if (prop != NULL) {
+    assign_type =
+        (prop->key()->IsPropertyName()) ? NAMED_PROPERTY : KEYED_PROPERTY;
+  }
+
+  // Evaluate expression and get value.
+  if (assign_type == VARIABLE) {
+    ASSERT(expr->expression()->AsVariableProxy()->var() != NULL);
+    AccumulatorValueContext context(this);
+    EmitVariableLoad(expr->expression()->AsVariableProxy()->var());
+  } else {
+    // Reserve space for result of postfix operation.
+    if (expr->is_postfix() && !context()->IsEffect()) {
+      __ li(at, Operand(Smi::FromInt(0)));
+      __ push(at);
+    }
+    if (assign_type == NAMED_PROPERTY) {
+      // Put the object both on the stack and in the accumulator.
+      VisitForAccumulatorValue(prop->obj());
+      __ push(v0);
+      EmitNamedPropertyLoad(prop);
+    } else {
+      if (prop->is_arguments_access()) {
+        VariableProxy* obj_proxy = prop->obj()->AsVariableProxy();
+        __ lw(v0, EmitSlotSearch(obj_proxy->var()->AsSlot(), v0));
+        __ push(v0);
+        __ li(v0, Operand(prop->key()->AsLiteral()->handle()));
+      } else {
+        VisitForStackValue(prop->obj());
+        VisitForAccumulatorValue(prop->key());
+      }
+      __ lw(a1, MemOperand(sp, 0));
+      __ push(v0);
+      EmitKeyedPropertyLoad(prop);
+    }
+  }
+
+  // We need a second deoptimization point after loading the value
+  // in case evaluating the property load my have a side effect.
+  if (assign_type == VARIABLE) {
+    PrepareForBailout(expr->expression(), TOS_REG);
+  } else {
+    PrepareForBailoutForId(expr->CountId(), TOS_REG);
+  }
+
+  // Call ToNumber only if operand is not a smi.
+  Label no_conversion;
+  __ JumpIfSmi(v0, &no_conversion);
+  __ mov(a0, v0);
+  ToNumberStub convert_stub;
+  __ CallStub(&convert_stub);
+  __ bind(&no_conversion);
+
+  // Save result for postfix expressions.
+  if (expr->is_postfix()) {
+    if (!context()->IsEffect()) {
+      // Save the result on the stack. If we have a named or keyed property
+      // we store the result under the receiver that is currently on top
+      // of the stack.
+      switch (assign_type) {
+        case VARIABLE:
+          __ push(v0);
+          break;
+        case NAMED_PROPERTY:
+          __ sw(v0, MemOperand(sp, kPointerSize));
+          break;
+        case KEYED_PROPERTY:
+          __ sw(v0, MemOperand(sp, 2 * kPointerSize));
+          break;
+      }
+    }
+  }
+  __ mov(a0, result_register());
+
+  // Inline smi case if we are in a loop.
+  Label stub_call, done;
+  JumpPatchSite patch_site(masm_);
+
+  int count_value = expr->op() == Token::INC ? 1 : -1;
+  __ li(a1, Operand(Smi::FromInt(count_value)));
+
+  if (ShouldInlineSmiCase(expr->op())) {
+    __ AdduAndCheckForOverflow(v0, a0, a1, t0);
+    __ BranchOnOverflow(&stub_call, t0);  // Do stub on overflow.
+
+    // We could eliminate this smi check if we split the code at
+    // the first smi check before calling ToNumber.
+    patch_site.EmitJumpIfSmi(v0, &done);
+    __ bind(&stub_call);
+  }
+
+  // Record position before stub call.
+  SetSourcePosition(expr->position());
+
+  BinaryOpStub stub(Token::ADD, NO_OVERWRITE);
+  EmitCallIC(stub.GetCode(), &patch_site, expr->CountId());
+  __ bind(&done);
+
+  // Store the value returned in v0.
+  switch (assign_type) {
+    case VARIABLE:
+      if (expr->is_postfix()) {
+        { EffectContext context(this);
+          EmitVariableAssignment(expr->expression()->AsVariableProxy()->var(),
+                                 Token::ASSIGN);
+          PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
+          context.Plug(v0);
+        }
+        // For all contexts except EffectConstant we have the result on
+        // top of the stack.
+        if (!context()->IsEffect()) {
+          context()->PlugTOS();
+        }
+      } else {
+        EmitVariableAssignment(expr->expression()->AsVariableProxy()->var(),
+                               Token::ASSIGN);
+        PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
+        context()->Plug(v0);
+      }
+      break;
+    case NAMED_PROPERTY: {
+      __ mov(a0, result_register());  // Value.
+      __ li(a2, Operand(prop->key()->AsLiteral()->handle()));  // Name.
+      __ pop(a1);  // Receiver.
+      Handle<Code> ic = is_strict_mode()
+          ? isolate()->builtins()->StoreIC_Initialize_Strict()
+          : isolate()->builtins()->StoreIC_Initialize();
+      EmitCallIC(ic, RelocInfo::CODE_TARGET, expr->id());
+      PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
+      if (expr->is_postfix()) {
+        if (!context()->IsEffect()) {
+          context()->PlugTOS();
+        }
+      } else {
+        context()->Plug(v0);
+      }
+      break;
+    }
+    case KEYED_PROPERTY: {
+      __ mov(a0, result_register());  // Value.
+      __ pop(a1);  // Key.
+      __ pop(a2);  // Receiver.
+      Handle<Code> ic = is_strict_mode()
+          ? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
+          : isolate()->builtins()->KeyedStoreIC_Initialize();
+      EmitCallIC(ic, RelocInfo::CODE_TARGET, expr->id());
+      PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
+      if (expr->is_postfix()) {
+        if (!context()->IsEffect()) {
+          context()->PlugTOS();
+        }
+      } else {
+        context()->Plug(v0);
+      }
+      break;
+    }
+  }
 }
 
 
 void FullCodeGenerator::VisitForTypeofValue(Expression* expr) {
-  UNIMPLEMENTED_MIPS();
+  VariableProxy* proxy = expr->AsVariableProxy();
+  if (proxy != NULL && !proxy->var()->is_this() && proxy->var()->is_global()) {
+    Comment cmnt(masm_, "Global variable");
+    __ lw(a0, GlobalObjectOperand());
+    __ li(a2, Operand(proxy->name()));
+    Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
+    // Use a regular load, not a contextual load, to avoid a reference
+    // error.
+    EmitCallIC(ic, RelocInfo::CODE_TARGET, AstNode::kNoNumber);
+    PrepareForBailout(expr, TOS_REG);
+    context()->Plug(v0);
+  } else if (proxy != NULL &&
+             proxy->var()->AsSlot() != NULL &&
+             proxy->var()->AsSlot()->type() == Slot::LOOKUP) {
+    Label done, slow;
+
+    // Generate code for loading from variables potentially shadowed
+    // by eval-introduced variables.
+    Slot* slot = proxy->var()->AsSlot();
+    EmitDynamicLoadFromSlotFastCase(slot, INSIDE_TYPEOF, &slow, &done);
+
+    __ bind(&slow);
+    __ li(a0, Operand(proxy->name()));
+    __ Push(cp, a0);
+    __ CallRuntime(Runtime::kLoadContextSlotNoReferenceError, 2);
+    PrepareForBailout(expr, TOS_REG);
+    __ bind(&done);
+
+    context()->Plug(v0);
+  } else {
+    // This expression cannot throw a reference error at the top level.
+    context()->HandleExpression(expr);
+  }
 }
 
 
@@ -660,50 +4050,305 @@
                                           Label* if_true,
                                           Label* if_false,
                                           Label* fall_through) {
-  UNIMPLEMENTED_MIPS();
-  return false;
+  if (op != Token::EQ && op != Token::EQ_STRICT) return false;
+
+  // Check for the pattern: typeof <expression> == <string literal>.
+  Literal* right_literal = right->AsLiteral();
+  if (right_literal == NULL) return false;
+  Handle<Object> right_literal_value = right_literal->handle();
+  if (!right_literal_value->IsString()) return false;
+  UnaryOperation* left_unary = left->AsUnaryOperation();
+  if (left_unary == NULL || left_unary->op() != Token::TYPEOF) return false;
+  Handle<String> check = Handle<String>::cast(right_literal_value);
+
+  { AccumulatorValueContext context(this);
+    VisitForTypeofValue(left_unary->expression());
+  }
+  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+
+  if (check->Equals(isolate()->heap()->number_symbol())) {
+    __ JumpIfSmi(v0, if_true);
+    __ lw(v0, FieldMemOperand(v0, HeapObject::kMapOffset));
+    __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
+    Split(eq, v0, Operand(at), if_true, if_false, fall_through);
+  } else if (check->Equals(isolate()->heap()->string_symbol())) {
+    __ JumpIfSmi(v0, if_false);
+    // Check for undetectable objects => false.
+    __ GetObjectType(v0, v0, a1);
+    __ Branch(if_false, ge, a1, Operand(FIRST_NONSTRING_TYPE));
+    __ lbu(a1, FieldMemOperand(v0, Map::kBitFieldOffset));
+    __ And(a1, a1, Operand(1 << Map::kIsUndetectable));
+    Split(eq, a1, Operand(zero_reg),
+          if_true, if_false, fall_through);
+  } else if (check->Equals(isolate()->heap()->boolean_symbol())) {
+    __ LoadRoot(at, Heap::kTrueValueRootIndex);
+    __ Branch(if_true, eq, v0, Operand(at));
+    __ LoadRoot(at, Heap::kFalseValueRootIndex);
+    Split(eq, v0, Operand(at), if_true, if_false, fall_through);
+  } else if (check->Equals(isolate()->heap()->undefined_symbol())) {
+    __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
+    __ Branch(if_true, eq, v0, Operand(at));
+    __ JumpIfSmi(v0, if_false);
+    // Check for undetectable objects => true.
+    __ lw(v0, FieldMemOperand(v0, HeapObject::kMapOffset));
+    __ lbu(a1, FieldMemOperand(v0, Map::kBitFieldOffset));
+    __ And(a1, a1, Operand(1 << Map::kIsUndetectable));
+    Split(ne, a1, Operand(zero_reg), if_true, if_false, fall_through);
+  } else if (check->Equals(isolate()->heap()->function_symbol())) {
+    __ JumpIfSmi(v0, if_false);
+    __ GetObjectType(v0, a1, v0);  // Leave map in a1.
+    Split(ge, v0, Operand(FIRST_FUNCTION_CLASS_TYPE),
+        if_true, if_false, fall_through);
+
+  } else if (check->Equals(isolate()->heap()->object_symbol())) {
+    __ JumpIfSmi(v0, if_false);
+    __ LoadRoot(at, Heap::kNullValueRootIndex);
+    __ Branch(if_true, eq, v0, Operand(at));
+    // Check for JS objects => true.
+    __ GetObjectType(v0, v0, a1);
+    __ Branch(if_false, lo, a1, Operand(FIRST_JS_OBJECT_TYPE));
+    __ lbu(a1, FieldMemOperand(v0, Map::kInstanceTypeOffset));
+    __ Branch(if_false, hs, a1, Operand(FIRST_FUNCTION_CLASS_TYPE));
+    // Check for undetectable objects => false.
+    __ lbu(a1, FieldMemOperand(v0, Map::kBitFieldOffset));
+    __ And(a1, a1, Operand(1 << Map::kIsUndetectable));
+    Split(eq, a1, Operand(zero_reg), if_true, if_false, fall_through);
+  } else {
+    if (if_false != fall_through) __ jmp(if_false);
+  }
+
+  return true;
 }
 
 
 void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
-  UNIMPLEMENTED_MIPS();
+  Comment cmnt(masm_, "[ CompareOperation");
+  SetSourcePosition(expr->position());
+
+  // Always perform the comparison for its control flow.  Pack the result
+  // into the expression's context after the comparison is performed.
+
+  Label materialize_true, materialize_false;
+  Label* if_true = NULL;
+  Label* if_false = NULL;
+  Label* fall_through = NULL;
+  context()->PrepareTest(&materialize_true, &materialize_false,
+                         &if_true, &if_false, &fall_through);
+
+  // First we try a fast inlined version of the compare when one of
+  // the operands is a literal.
+  Token::Value op = expr->op();
+  Expression* left = expr->left();
+  Expression* right = expr->right();
+  if (TryLiteralCompare(op, left, right, if_true, if_false, fall_through)) {
+    context()->Plug(if_true, if_false);
+    return;
+  }
+
+  VisitForStackValue(expr->left());
+  switch (op) {
+    case Token::IN:
+      VisitForStackValue(expr->right());
+      __ InvokeBuiltin(Builtins::IN, CALL_FUNCTION);
+      PrepareForBailoutBeforeSplit(TOS_REG, false, NULL, NULL);
+      __ LoadRoot(t0, Heap::kTrueValueRootIndex);
+      Split(eq, v0, Operand(t0), if_true, if_false, fall_through);
+      break;
+
+    case Token::INSTANCEOF: {
+      VisitForStackValue(expr->right());
+      InstanceofStub stub(InstanceofStub::kNoFlags);
+      __ CallStub(&stub);
+      PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+      // The stub returns 0 for true.
+      Split(eq, v0, Operand(zero_reg), if_true, if_false, fall_through);
+      break;
+    }
+
+    default: {
+      VisitForAccumulatorValue(expr->right());
+      Condition cc = eq;
+      bool strict = false;
+      switch (op) {
+        case Token::EQ_STRICT:
+          strict = true;
+          // Fall through.
+        case Token::EQ:
+          cc = eq;
+          __ mov(a0, result_register());
+          __ pop(a1);
+          break;
+        case Token::LT:
+          cc = lt;
+          __ mov(a0, result_register());
+          __ pop(a1);
+          break;
+        case Token::GT:
+          // Reverse left and right sides to obtain ECMA-262 conversion order.
+          cc = lt;
+          __ mov(a1, result_register());
+          __ pop(a0);
+         break;
+        case Token::LTE:
+          // Reverse left and right sides to obtain ECMA-262 conversion order.
+          cc = ge;
+          __ mov(a1, result_register());
+          __ pop(a0);
+          break;
+        case Token::GTE:
+          cc = ge;
+          __ mov(a0, result_register());
+          __ pop(a1);
+          break;
+        case Token::IN:
+        case Token::INSTANCEOF:
+        default:
+          UNREACHABLE();
+      }
+
+      bool inline_smi_code = ShouldInlineSmiCase(op);
+      JumpPatchSite patch_site(masm_);
+      if (inline_smi_code) {
+        Label slow_case;
+        __ Or(a2, a0, Operand(a1));
+        patch_site.EmitJumpIfNotSmi(a2, &slow_case);
+        Split(cc, a1, Operand(a0), if_true, if_false, NULL);
+        __ bind(&slow_case);
+      }
+      // Record position and call the compare IC.
+      SetSourcePosition(expr->position());
+      Handle<Code> ic = CompareIC::GetUninitialized(op);
+      EmitCallIC(ic, &patch_site, expr->id());
+      PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+      Split(cc, v0, Operand(zero_reg), if_true, if_false, fall_through);
+    }
+  }
+
+  // Convert the result of the comparison into one expected for this
+  // expression's context.
+  context()->Plug(if_true, if_false);
 }
 
 
 void FullCodeGenerator::VisitCompareToNull(CompareToNull* expr) {
-  UNIMPLEMENTED_MIPS();
+  Comment cmnt(masm_, "[ CompareToNull");
+  Label materialize_true, materialize_false;
+  Label* if_true = NULL;
+  Label* if_false = NULL;
+  Label* fall_through = NULL;
+  context()->PrepareTest(&materialize_true, &materialize_false,
+                         &if_true, &if_false, &fall_through);
+
+  VisitForAccumulatorValue(expr->expression());
+  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+  __ mov(a0, result_register());
+  __ LoadRoot(a1, Heap::kNullValueRootIndex);
+  if (expr->is_strict()) {
+    Split(eq, a0, Operand(a1), if_true, if_false, fall_through);
+  } else {
+    __ Branch(if_true, eq, a0, Operand(a1));
+    __ LoadRoot(a1, Heap::kUndefinedValueRootIndex);
+    __ Branch(if_true, eq, a0, Operand(a1));
+    __ And(at, a0, Operand(kSmiTagMask));
+    __ Branch(if_false, eq, at, Operand(zero_reg));
+    // It can be an undetectable object.
+    __ lw(a1, FieldMemOperand(a0, HeapObject::kMapOffset));
+    __ lbu(a1, FieldMemOperand(a1, Map::kBitFieldOffset));
+    __ And(a1, a1, Operand(1 << Map::kIsUndetectable));
+    Split(ne, a1, Operand(zero_reg), if_true, if_false, fall_through);
+  }
+  context()->Plug(if_true, if_false);
 }
 
 
 void FullCodeGenerator::VisitThisFunction(ThisFunction* expr) {
-  UNIMPLEMENTED_MIPS();
+  __ lw(v0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+  context()->Plug(v0);
 }
 
 
 Register FullCodeGenerator::result_register() {
-  UNIMPLEMENTED_MIPS();
   return v0;
 }
 
 
 Register FullCodeGenerator::context_register() {
-  UNIMPLEMENTED_MIPS();
   return cp;
 }
 
 
-void FullCodeGenerator::EmitCallIC(Handle<Code> ic, RelocInfo::Mode mode) {
-  UNIMPLEMENTED_MIPS();
+void FullCodeGenerator::EmitCallIC(Handle<Code> ic,
+                                   RelocInfo::Mode mode,
+                                   unsigned ast_id) {
+  ASSERT(mode == RelocInfo::CODE_TARGET ||
+         mode == RelocInfo::CODE_TARGET_CONTEXT);
+  Counters* counters = isolate()->counters();
+  switch (ic->kind()) {
+    case Code::LOAD_IC:
+      __ IncrementCounter(counters->named_load_full(), 1, a1, a2);
+      break;
+    case Code::KEYED_LOAD_IC:
+      __ IncrementCounter(counters->keyed_load_full(), 1, a1, a2);
+      break;
+    case Code::STORE_IC:
+      __ IncrementCounter(counters->named_store_full(), 1, a1, a2);
+      break;
+    case Code::KEYED_STORE_IC:
+      __ IncrementCounter(counters->keyed_store_full(), 1, a1, a2);
+    default:
+      break;
+  }
+  if (ast_id == kNoASTId || mode == RelocInfo::CODE_TARGET_CONTEXT) {
+    __ Call(ic, mode);
+  } else {
+    ASSERT(mode == RelocInfo::CODE_TARGET);
+    mode = RelocInfo::CODE_TARGET_WITH_ID;
+    __ CallWithAstId(ic, mode, ast_id);
+  }
+}
+
+
+void FullCodeGenerator::EmitCallIC(Handle<Code> ic,
+                                   JumpPatchSite* patch_site,
+                                   unsigned ast_id) {
+  Counters* counters = isolate()->counters();
+  switch (ic->kind()) {
+    case Code::LOAD_IC:
+      __ IncrementCounter(counters->named_load_full(), 1, a1, a2);
+      break;
+    case Code::KEYED_LOAD_IC:
+      __ IncrementCounter(counters->keyed_load_full(), 1, a1, a2);
+      break;
+    case Code::STORE_IC:
+      __ IncrementCounter(counters->named_store_full(), 1, a1, a2);
+      break;
+    case Code::KEYED_STORE_IC:
+      __ IncrementCounter(counters->keyed_store_full(), 1, a1, a2);
+    default:
+      break;
+  }
+
+  if (ast_id == kNoASTId) {
+    __ Call(ic, RelocInfo::CODE_TARGET);
+  } else {
+    __ CallWithAstId(ic, RelocInfo::CODE_TARGET_WITH_ID, ast_id);
+  }
+  if (patch_site != NULL && patch_site->is_bound()) {
+    patch_site->EmitPatchInfo();
+  } else {
+    __ nop();  // Signals no inlined code.
+  }
 }
 
 
 void FullCodeGenerator::StoreToFrameField(int frame_offset, Register value) {
-  UNIMPLEMENTED_MIPS();
+  ASSERT_EQ(POINTER_SIZE_ALIGN(frame_offset), frame_offset);
+  __ sw(value, MemOperand(fp, frame_offset));
 }
 
 
 void FullCodeGenerator::LoadContextField(Register dst, int context_index) {
-  UNIMPLEMENTED_MIPS();
+  __ lw(dst, ContextOperand(cp, context_index));
 }
 
 
@@ -711,12 +4356,28 @@
 // Non-local control flow support.
 
 void FullCodeGenerator::EnterFinallyBlock() {
-  UNIMPLEMENTED_MIPS();
+  ASSERT(!result_register().is(a1));
+  // Store result register while executing finally block.
+  __ push(result_register());
+  // Cook return address in link register to stack (smi encoded Code* delta).
+  __ Subu(a1, ra, Operand(masm_->CodeObject()));
+  ASSERT_EQ(1, kSmiTagSize + kSmiShiftSize);
+  ASSERT_EQ(0, kSmiTag);
+  __ Addu(a1, a1, Operand(a1));  // Convert to smi.
+  __ push(a1);
 }
 
 
 void FullCodeGenerator::ExitFinallyBlock() {
-  UNIMPLEMENTED_MIPS();
+  ASSERT(!result_register().is(a1));
+  // Restore result register from stack.
+  __ pop(a1);
+  // Uncook return address and return.
+  __ pop(result_register());
+  ASSERT_EQ(1, kSmiTagSize + kSmiShiftSize);
+  __ sra(a1, a1, 1);  // Un-smi-tag value.
+  __ Addu(at, a1, Operand(masm_->CodeObject()));
+  __ Jump(at);
 }
 
 
diff --git a/src/mips/ic-mips.cc b/src/mips/ic-mips.cc
index fa8a7bb..12c81c2 100644
--- a/src/mips/ic-mips.cc
+++ b/src/mips/ic-mips.cc
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -31,7 +31,7 @@
 
 #if defined(V8_TARGET_ARCH_MIPS)
 
-#include "codegen-inl.h"
+#include "codegen.h"
 #include "code-stubs.h"
 #include "ic-inl.h"
 #include "runtime.h"
@@ -48,52 +48,783 @@
 #define __ ACCESS_MASM(masm)
 
 
+static void GenerateGlobalInstanceTypeCheck(MacroAssembler* masm,
+                                            Register type,
+                                            Label* global_object) {
+  // Register usage:
+  //   type: holds the receiver instance type on entry.
+  __ Branch(global_object, eq, type, Operand(JS_GLOBAL_OBJECT_TYPE));
+  __ Branch(global_object, eq, type, Operand(JS_BUILTINS_OBJECT_TYPE));
+  __ Branch(global_object, eq, type, Operand(JS_GLOBAL_PROXY_TYPE));
+}
+
+
+// Generated code falls through if the receiver is a regular non-global
+// JS object with slow properties and no interceptors.
+static void GenerateStringDictionaryReceiverCheck(MacroAssembler* masm,
+                                                  Register receiver,
+                                                  Register elements,
+                                                  Register scratch0,
+                                                  Register scratch1,
+                                                  Label* miss) {
+  // Register usage:
+  //   receiver: holds the receiver on entry and is unchanged.
+  //   elements: holds the property dictionary on fall through.
+  // Scratch registers:
+  //   scratch0: used to holds the receiver map.
+  //   scratch1: used to holds the receiver instance type, receiver bit mask
+  //     and elements map.
+
+  // Check that the receiver isn't a smi.
+  __ JumpIfSmi(receiver, miss);
+
+  // Check that the receiver is a valid JS object.
+  __ GetObjectType(receiver, scratch0, scratch1);
+  __ Branch(miss, lt, scratch1, Operand(FIRST_JS_OBJECT_TYPE));
+
+  // If this assert fails, we have to check upper bound too.
+  ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
+
+  GenerateGlobalInstanceTypeCheck(masm, scratch1, miss);
+
+  // Check that the global object does not require access checks.
+  __ lbu(scratch1, FieldMemOperand(scratch0, Map::kBitFieldOffset));
+  __ And(scratch1, scratch1, Operand((1 << Map::kIsAccessCheckNeeded) |
+                           (1 << Map::kHasNamedInterceptor)));
+  __ Branch(miss, ne, scratch1, Operand(zero_reg));
+
+  __ lw(elements, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
+  __ lw(scratch1, FieldMemOperand(elements, HeapObject::kMapOffset));
+  __ LoadRoot(scratch0, Heap::kHashTableMapRootIndex);
+  __ Branch(miss, ne, scratch1, Operand(scratch0));
+}
+
+
+// Helper function used from LoadIC/CallIC GenerateNormal.
+//
+// elements: Property dictionary. It is not clobbered if a jump to the miss
+//           label is done.
+// name:     Property name. It is not clobbered if a jump to the miss label is
+//           done
+// result:   Register for the result. It is only updated if a jump to the miss
+//           label is not done. Can be the same as elements or name clobbering
+//           one of these in the case of not jumping to the miss label.
+// The two scratch registers need to be different from elements, name and
+// result.
+// The generated code assumes that the receiver has slow properties,
+// is not a global object and does not have interceptors.
+// The address returned from GenerateStringDictionaryProbes() in scratch2
+// is used.
+static void GenerateDictionaryLoad(MacroAssembler* masm,
+                                   Label* miss,
+                                   Register elements,
+                                   Register name,
+                                   Register result,
+                                   Register scratch1,
+                                   Register scratch2) {
+  // Main use of the scratch registers.
+  // scratch1: Used as temporary and to hold the capacity of the property
+  //           dictionary.
+  // scratch2: Used as temporary.
+  Label done;
+
+  // Probe the dictionary.
+  StringDictionaryLookupStub::GeneratePositiveLookup(masm,
+                                                     miss,
+                                                     &done,
+                                                     elements,
+                                                     name,
+                                                     scratch1,
+                                                     scratch2);
+
+  // If probing finds an entry check that the value is a normal
+  // property.
+  __ bind(&done);  // scratch2 == elements + 4 * index.
+  const int kElementsStartOffset = StringDictionary::kHeaderSize +
+      StringDictionary::kElementsStartIndex * kPointerSize;
+  const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize;
+  __ lw(scratch1, FieldMemOperand(scratch2, kDetailsOffset));
+  __ And(at,
+         scratch1,
+         Operand(PropertyDetails::TypeField::mask() << kSmiTagSize));
+  __ Branch(miss, ne, at, Operand(zero_reg));
+
+  // Get the value at the masked, scaled index and return.
+  __ lw(result,
+        FieldMemOperand(scratch2, kElementsStartOffset + 1 * kPointerSize));
+}
+
+
+// Helper function used from StoreIC::GenerateNormal.
+//
+// elements: Property dictionary. It is not clobbered if a jump to the miss
+//           label is done.
+// name:     Property name. It is not clobbered if a jump to the miss label is
+//           done
+// value:    The value to store.
+// The two scratch registers need to be different from elements, name and
+// result.
+// The generated code assumes that the receiver has slow properties,
+// is not a global object and does not have interceptors.
+// The address returned from GenerateStringDictionaryProbes() in scratch2
+// is used.
+static void GenerateDictionaryStore(MacroAssembler* masm,
+                                    Label* miss,
+                                    Register elements,
+                                    Register name,
+                                    Register value,
+                                    Register scratch1,
+                                    Register scratch2) {
+  // Main use of the scratch registers.
+  // scratch1: Used as temporary and to hold the capacity of the property
+  //           dictionary.
+  // scratch2: Used as temporary.
+  Label done;
+
+  // Probe the dictionary.
+  StringDictionaryLookupStub::GeneratePositiveLookup(masm,
+                                                     miss,
+                                                     &done,
+                                                     elements,
+                                                     name,
+                                                     scratch1,
+                                                     scratch2);
+
+  // If probing finds an entry in the dictionary check that the value
+  // is a normal property that is not read only.
+  __ bind(&done);  // scratch2 == elements + 4 * index.
+  const int kElementsStartOffset = StringDictionary::kHeaderSize +
+      StringDictionary::kElementsStartIndex * kPointerSize;
+  const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize;
+  const int kTypeAndReadOnlyMask
+      = (PropertyDetails::TypeField::mask() |
+         PropertyDetails::AttributesField::encode(READ_ONLY)) << kSmiTagSize;
+  __ lw(scratch1, FieldMemOperand(scratch2, kDetailsOffset));
+  __ And(at, scratch1, Operand(kTypeAndReadOnlyMask));
+  __ Branch(miss, ne, at, Operand(zero_reg));
+
+  // Store the value at the masked, scaled index and return.
+  const int kValueOffset = kElementsStartOffset + kPointerSize;
+  __ Addu(scratch2, scratch2, Operand(kValueOffset - kHeapObjectTag));
+  __ sw(value, MemOperand(scratch2));
+
+  // Update the write barrier. Make sure not to clobber the value.
+  __ mov(scratch1, value);
+  __ RecordWrite(elements, scratch2, scratch1);
+}
+
+
+static void GenerateNumberDictionaryLoad(MacroAssembler* masm,
+                                         Label* miss,
+                                         Register elements,
+                                         Register key,
+                                         Register result,
+                                         Register reg0,
+                                         Register reg1,
+                                         Register reg2) {
+  // Register use:
+  //
+  // elements - holds the slow-case elements of the receiver on entry.
+  //            Unchanged unless 'result' is the same register.
+  //
+  // key      - holds the smi key on entry.
+  //            Unchanged unless 'result' is the same register.
+  //
+  //
+  // result   - holds the result on exit if the load succeeded.
+  //            Allowed to be the same as 'key' or 'result'.
+  //            Unchanged on bailout so 'key' or 'result' can be used
+  //            in further computation.
+  //
+  // Scratch registers:
+  //
+  // reg0 - holds the untagged key on entry and holds the hash once computed.
+  //
+  // reg1 - Used to hold the capacity mask of the dictionary.
+  //
+  // reg2 - Used for the index into the dictionary.
+  // at   - Temporary (avoid MacroAssembler instructions also using 'at').
+  Label done;
+
+  // Compute the hash code from the untagged key.  This must be kept in sync
+  // with ComputeIntegerHash in utils.h.
+  //
+  // hash = ~hash + (hash << 15);
+  __ nor(reg1, reg0, zero_reg);
+  __ sll(at, reg0, 15);
+  __ addu(reg0, reg1, at);
+
+  // hash = hash ^ (hash >> 12);
+  __ srl(at, reg0, 12);
+  __ xor_(reg0, reg0, at);
+
+  // hash = hash + (hash << 2);
+  __ sll(at, reg0, 2);
+  __ addu(reg0, reg0, at);
+
+  // hash = hash ^ (hash >> 4);
+  __ srl(at, reg0, 4);
+  __ xor_(reg0, reg0, at);
+
+  // hash = hash * 2057;
+  __ li(reg1, Operand(2057));
+  __ mul(reg0, reg0, reg1);
+
+  // hash = hash ^ (hash >> 16);
+  __ srl(at, reg0, 16);
+  __ xor_(reg0, reg0, at);
+
+  // Compute the capacity mask.
+  __ lw(reg1, FieldMemOperand(elements, NumberDictionary::kCapacityOffset));
+  __ sra(reg1, reg1, kSmiTagSize);
+  __ Subu(reg1, reg1, Operand(1));
+
+  // Generate an unrolled loop that performs a few probes before giving up.
+  static const int kProbes = 4;
+  for (int i = 0; i < kProbes; i++) {
+    // Use reg2 for index calculations and keep the hash intact in reg0.
+    __ mov(reg2, reg0);
+    // Compute the masked index: (hash + i + i * i) & mask.
+    if (i > 0) {
+      __ Addu(reg2, reg2, Operand(NumberDictionary::GetProbeOffset(i)));
+    }
+    __ and_(reg2, reg2, reg1);
+
+    // Scale the index by multiplying by the element size.
+    ASSERT(NumberDictionary::kEntrySize == 3);
+    __ sll(at, reg2, 1);  // 2x.
+    __ addu(reg2, reg2, at);  // reg2 = reg2 * 3.
+
+    // Check if the key is identical to the name.
+    __ sll(at, reg2, kPointerSizeLog2);
+    __ addu(reg2, elements, at);
+
+    __ lw(at, FieldMemOperand(reg2, NumberDictionary::kElementsStartOffset));
+    if (i != kProbes - 1) {
+      __ Branch(&done, eq, key, Operand(at));
+    } else {
+      __ Branch(miss, ne, key, Operand(at));
+    }
+  }
+
+  __ bind(&done);
+  // Check that the value is a normal property.
+  // reg2: elements + (index * kPointerSize).
+  const int kDetailsOffset =
+      NumberDictionary::kElementsStartOffset + 2 * kPointerSize;
+  __ lw(reg1, FieldMemOperand(reg2, kDetailsOffset));
+  __ And(at, reg1, Operand(Smi::FromInt(PropertyDetails::TypeField::mask())));
+  __ Branch(miss, ne, at, Operand(zero_reg));
+
+  // Get the value at the masked, scaled index and return.
+  const int kValueOffset =
+      NumberDictionary::kElementsStartOffset + kPointerSize;
+  __ lw(result, FieldMemOperand(reg2, kValueOffset));
+}
+
+
 void LoadIC::GenerateArrayLength(MacroAssembler* masm) {
-  UNIMPLEMENTED_MIPS();
+  // ----------- S t a t e -------------
+  //  -- a2    : name
+  //  -- ra    : return address
+  //  -- a0    : receiver
+  //  -- sp[0] : receiver
+  // -----------------------------------
+  Label miss;
+
+  StubCompiler::GenerateLoadArrayLength(masm, a0, a3, &miss);
+  __ bind(&miss);
+  StubCompiler::GenerateLoadMiss(masm, Code::LOAD_IC);
 }
 
 
 void LoadIC::GenerateStringLength(MacroAssembler* masm, bool support_wrappers) {
-  UNIMPLEMENTED_MIPS();
+  // ----------- S t a t e -------------
+  //  -- a2    : name
+  //  -- lr    : return address
+  //  -- a0    : receiver
+  //  -- sp[0] : receiver
+  // -----------------------------------
+  Label miss;
+
+  StubCompiler::GenerateLoadStringLength(masm, a0, a1, a3, &miss,
+                                         support_wrappers);
+  // Cache miss: Jump to runtime.
+  __ bind(&miss);
+  StubCompiler::GenerateLoadMiss(masm, Code::LOAD_IC);
 }
 
 
 void LoadIC::GenerateFunctionPrototype(MacroAssembler* masm) {
-  UNIMPLEMENTED_MIPS();
+  // ----------- S t a t e -------------
+  //  -- a2    : name
+  //  -- lr    : return address
+  //  -- a0    : receiver
+  //  -- sp[0] : receiver
+  // -----------------------------------
+  Label miss;
+
+  StubCompiler::GenerateLoadFunctionPrototype(masm, a0, a1, a3, &miss);
+  __ bind(&miss);
+  StubCompiler::GenerateLoadMiss(masm, Code::LOAD_IC);
+}
+
+
+// Checks the receiver for special cases (value type, slow case bits).
+// Falls through for regular JS object.
+static void GenerateKeyedLoadReceiverCheck(MacroAssembler* masm,
+                                           Register receiver,
+                                           Register map,
+                                           Register scratch,
+                                           int interceptor_bit,
+                                           Label* slow) {
+  // Check that the object isn't a smi.
+  __ JumpIfSmi(receiver, slow);
+  // Get the map of the receiver.
+  __ lw(map, FieldMemOperand(receiver, HeapObject::kMapOffset));
+  // Check bit field.
+  __ lbu(scratch, FieldMemOperand(map, Map::kBitFieldOffset));
+  __ And(at, scratch, Operand(KeyedLoadIC::kSlowCaseBitFieldMask));
+  __ Branch(slow, ne, at, Operand(zero_reg));
+  // Check that the object is some kind of JS object EXCEPT JS Value type.
+  // In the case that the object is a value-wrapper object,
+  // we enter the runtime system to make sure that indexing into string
+  // objects work as intended.
+  ASSERT(JS_OBJECT_TYPE > JS_VALUE_TYPE);
+  __ lbu(scratch, FieldMemOperand(map, Map::kInstanceTypeOffset));
+  __ Branch(slow, lt, scratch, Operand(JS_OBJECT_TYPE));
+}
+
+
+// Loads an indexed element from a fast case array.
+// If not_fast_array is NULL, doesn't perform the elements map check.
+static void GenerateFastArrayLoad(MacroAssembler* masm,
+                                  Register receiver,
+                                  Register key,
+                                  Register elements,
+                                  Register scratch1,
+                                  Register scratch2,
+                                  Register result,
+                                  Label* not_fast_array,
+                                  Label* out_of_range) {
+  // Register use:
+  //
+  // receiver - holds the receiver on entry.
+  //            Unchanged unless 'result' is the same register.
+  //
+  // key      - holds the smi key on entry.
+  //            Unchanged unless 'result' is the same register.
+  //
+  // elements - holds the elements of the receiver on exit.
+  //
+  // result   - holds the result on exit if the load succeeded.
+  //            Allowed to be the the same as 'receiver' or 'key'.
+  //            Unchanged on bailout so 'receiver' and 'key' can be safely
+  //            used by further computation.
+  //
+  // Scratch registers:
+  //
+  // scratch1 - used to hold elements map and elements length.
+  //            Holds the elements map if not_fast_array branch is taken.
+  //
+  // scratch2 - used to hold the loaded value.
+
+  __ lw(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
+  if (not_fast_array != NULL) {
+    // Check that the object is in fast mode (not dictionary).
+    __ lw(scratch1, FieldMemOperand(elements, HeapObject::kMapOffset));
+    __ LoadRoot(at, Heap::kFixedArrayMapRootIndex);
+    __ Branch(not_fast_array, ne, scratch1, Operand(at));
+  } else {
+    __ AssertFastElements(elements);
+  }
+
+  // Check that the key (index) is within bounds.
+  __ lw(scratch1, FieldMemOperand(elements, FixedArray::kLengthOffset));
+  __ Branch(out_of_range, hs, key, Operand(scratch1));
+
+  // Fast case: Do the load.
+  __ Addu(scratch1, elements,
+          Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+  // The key is a smi.
+  ASSERT(kSmiTag == 0 && kSmiTagSize < kPointerSizeLog2);
+  __ sll(at, key, kPointerSizeLog2 - kSmiTagSize);
+  __ addu(at, at, scratch1);
+  __ lw(scratch2, MemOperand(at));
+
+  __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
+  // In case the loaded value is the_hole we have to consult GetProperty
+  // to ensure the prototype chain is searched.
+  __ Branch(out_of_range, eq, scratch2, Operand(at));
+  __ mov(result, scratch2);
+}
+
+
+// Checks whether a key is an array index string or a symbol string.
+// Falls through if a key is a symbol.
+static void GenerateKeyStringCheck(MacroAssembler* masm,
+                                   Register key,
+                                   Register map,
+                                   Register hash,
+                                   Label* index_string,
+                                   Label* not_symbol) {
+  // The key is not a smi.
+  // Is it a string?
+  __ GetObjectType(key, map, hash);
+  __ Branch(not_symbol, ge, hash, Operand(FIRST_NONSTRING_TYPE));
+
+  // Is the string an array index, with cached numeric value?
+  __ lw(hash, FieldMemOperand(key, String::kHashFieldOffset));
+  __ And(at, hash, Operand(String::kContainsCachedArrayIndexMask));
+  __ Branch(index_string, eq, at, Operand(zero_reg));
+
+  // Is the string a symbol?
+  // map: key map
+  __ lbu(hash, FieldMemOperand(map, Map::kInstanceTypeOffset));
+  ASSERT(kSymbolTag != 0);
+  __ And(at, hash, Operand(kIsSymbolMask));
+  __ Branch(not_symbol, eq, at, Operand(zero_reg));
 }
 
 
 // Defined in ic.cc.
 Object* CallIC_Miss(Arguments args);
 
+// The generated code does not accept smi keys.
+// The generated code falls through if both probes miss.
+static void GenerateMonomorphicCacheProbe(MacroAssembler* masm,
+                                          int argc,
+                                          Code::Kind kind,
+                                          Code::ExtraICState extra_ic_state) {
+  // ----------- S t a t e -------------
+  //  -- a1    : receiver
+  //  -- a2    : name
+  // -----------------------------------
+  Label number, non_number, non_string, boolean, probe, miss;
 
-void CallIC::GenerateMiss(MacroAssembler* masm, int argc) {
-  UNIMPLEMENTED_MIPS();
+  // Probe the stub cache.
+  Code::Flags flags = Code::ComputeFlags(kind,
+                                         NOT_IN_LOOP,
+                                         MONOMORPHIC,
+                                         extra_ic_state,
+                                         NORMAL,
+                                         argc);
+  Isolate::Current()->stub_cache()->GenerateProbe(
+      masm, flags, a1, a2, a3, t0, t1);
+
+  // If the stub cache probing failed, the receiver might be a value.
+  // For value objects, we use the map of the prototype objects for
+  // the corresponding JSValue for the cache and that is what we need
+  // to probe.
+  //
+  // Check for number.
+  __ JumpIfSmi(a1, &number, t1);
+  __ GetObjectType(a1, a3, a3);
+  __ Branch(&non_number, ne, a3, Operand(HEAP_NUMBER_TYPE));
+  __ bind(&number);
+  StubCompiler::GenerateLoadGlobalFunctionPrototype(
+      masm, Context::NUMBER_FUNCTION_INDEX, a1);
+  __ Branch(&probe);
+
+  // Check for string.
+  __ bind(&non_number);
+  __ Branch(&non_string, Ugreater_equal, a3, Operand(FIRST_NONSTRING_TYPE));
+  StubCompiler::GenerateLoadGlobalFunctionPrototype(
+      masm, Context::STRING_FUNCTION_INDEX, a1);
+  __ Branch(&probe);
+
+  // Check for boolean.
+  __ bind(&non_string);
+  __ LoadRoot(t0, Heap::kTrueValueRootIndex);
+  __ Branch(&boolean, eq, a1, Operand(t0));
+  __ LoadRoot(t1, Heap::kFalseValueRootIndex);
+  __ Branch(&miss, ne, a1, Operand(t1));
+  __ bind(&boolean);
+  StubCompiler::GenerateLoadGlobalFunctionPrototype(
+      masm, Context::BOOLEAN_FUNCTION_INDEX, a1);
+
+  // Probe the stub cache for the value object.
+  __ bind(&probe);
+  Isolate::Current()->stub_cache()->GenerateProbe(
+      masm, flags, a1, a2, a3, t0, t1);
+
+  __ bind(&miss);
 }
 
 
-void CallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) {
-  UNIMPLEMENTED_MIPS();
+static void GenerateFunctionTailCall(MacroAssembler* masm,
+                                     int argc,
+                                     Label* miss,
+                                     Register scratch) {
+  // a1: function
+
+  // Check that the value isn't a smi.
+  __ JumpIfSmi(a1, miss);
+
+  // Check that the value is a JSFunction.
+  __ GetObjectType(a1, scratch, scratch);
+  __ Branch(miss, ne, scratch, Operand(JS_FUNCTION_TYPE));
+
+  // Invoke the function.
+  ParameterCount actual(argc);
+  __ InvokeFunction(a1, actual, JUMP_FUNCTION);
+}
+
+
+static void GenerateCallNormal(MacroAssembler* masm, int argc) {
+  // ----------- S t a t e -------------
+  //  -- a2    : name
+  //  -- ra    : return address
+  // -----------------------------------
+  Label miss;
+
+  // Get the receiver of the function from the stack into a1.
+  __ lw(a1, MemOperand(sp, argc * kPointerSize));
+
+  GenerateStringDictionaryReceiverCheck(masm, a1, a0, a3, t0, &miss);
+
+  // a0: elements
+  // Search the dictionary - put result in register a1.
+  GenerateDictionaryLoad(masm, &miss, a0, a2, a1, a3, t0);
+
+  GenerateFunctionTailCall(masm, argc, &miss, t0);
+
+  // Cache miss: Jump to runtime.
+  __ bind(&miss);
+}
+
+
+static void GenerateCallMiss(MacroAssembler* masm,
+                             int argc,
+                             IC::UtilityId id,
+                             Code::ExtraICState extra_ic_state) {
+  // ----------- S t a t e -------------
+  //  -- a2    : name
+  //  -- ra    : return address
+  // -----------------------------------
+  Isolate* isolate = masm->isolate();
+
+  if (id == IC::kCallIC_Miss) {
+    __ IncrementCounter(isolate->counters()->call_miss(), 1, a3, t0);
+  } else {
+    __ IncrementCounter(isolate->counters()->keyed_call_miss(), 1, a3, t0);
+  }
+
+  // Get the receiver of the function from the stack.
+  __ lw(a3, MemOperand(sp, argc*kPointerSize));
+
+  __ EnterInternalFrame();
+
+  // Push the receiver and the name of the function.
+  __ Push(a3, a2);
+
+  // Call the entry.
+  __ li(a0, Operand(2));
+  __ li(a1, Operand(ExternalReference(IC_Utility(id), isolate)));
+
+  CEntryStub stub(1);
+  __ CallStub(&stub);
+
+  // Move result to a1 and leave the internal frame.
+  __ mov(a1, v0);
+  __ LeaveInternalFrame();
+
+  // Check if the receiver is a global object of some sort.
+  // This can happen only for regular CallIC but not KeyedCallIC.
+  if (id == IC::kCallIC_Miss) {
+    Label invoke, global;
+    __ lw(a2, MemOperand(sp, argc * kPointerSize));
+    __ andi(t0, a2, kSmiTagMask);
+    __ Branch(&invoke, eq, t0, Operand(zero_reg));
+    __ GetObjectType(a2, a3, a3);
+    __ Branch(&global, eq, a3, Operand(JS_GLOBAL_OBJECT_TYPE));
+    __ Branch(&invoke, ne, a3, Operand(JS_BUILTINS_OBJECT_TYPE));
+
+    // Patch the receiver on the stack.
+    __ bind(&global);
+    __ lw(a2, FieldMemOperand(a2, GlobalObject::kGlobalReceiverOffset));
+    __ sw(a2, MemOperand(sp, argc * kPointerSize));
+    __ bind(&invoke);
+  }
+  // Invoke the function.
+  CallKind call_kind = CallICBase::Contextual::decode(extra_ic_state)
+      ? CALL_AS_FUNCTION
+      : CALL_AS_METHOD;
+  ParameterCount actual(argc);
+  __ InvokeFunction(a1,
+                    actual,
+                    JUMP_FUNCTION,
+                    NullCallWrapper(),
+                    call_kind);
+}
+
+
+void CallIC::GenerateMiss(MacroAssembler* masm,
+                          int argc,
+                          Code::ExtraICState extra_ic_state) {
+  // ----------- S t a t e -------------
+  //  -- a2    : name
+  //  -- ra    : return address
+  // -----------------------------------
+
+  GenerateCallMiss(masm, argc, IC::kCallIC_Miss, extra_ic_state);
+}
+
+
+void CallIC::GenerateMegamorphic(MacroAssembler* masm,
+                                 int argc,
+                                 Code::ExtraICState extra_ic_state) {
+  // ----------- S t a t e -------------
+  //  -- a2    : name
+  //  -- ra    : return address
+  // -----------------------------------
+
+  // Get the receiver of the function from the stack into a1.
+  __ lw(a1, MemOperand(sp, argc * kPointerSize));
+  GenerateMonomorphicCacheProbe(masm, argc, Code::CALL_IC, extra_ic_state);
+  GenerateMiss(masm, argc, extra_ic_state);
 }
 
 
 void CallIC::GenerateNormal(MacroAssembler* masm, int argc) {
-  UNIMPLEMENTED_MIPS();
+  // ----------- S t a t e -------------
+  //  -- a2    : name
+  //  -- ra    : return address
+  // -----------------------------------
+
+  GenerateCallNormal(masm, argc);
+  GenerateMiss(masm, argc, Code::kNoExtraICState);
 }
 
 
 void KeyedCallIC::GenerateMiss(MacroAssembler* masm, int argc) {
-  UNIMPLEMENTED_MIPS();
+  // ----------- S t a t e -------------
+  //  -- a2    : name
+  //  -- ra    : return address
+  // -----------------------------------
+
+  GenerateCallMiss(masm, argc, IC::kKeyedCallIC_Miss, Code::kNoExtraICState);
 }
 
 
 void KeyedCallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) {
-  UNIMPLEMENTED_MIPS();
+  // ----------- S t a t e -------------
+  //  -- a2    : name
+  //  -- ra    : return address
+  // -----------------------------------
+
+  // Get the receiver of the function from the stack into a1.
+  __ lw(a1, MemOperand(sp, argc * kPointerSize));
+
+  Label do_call, slow_call, slow_load, slow_reload_receiver;
+  Label check_number_dictionary, check_string, lookup_monomorphic_cache;
+  Label index_smi, index_string;
+
+  // Check that the key is a smi.
+  __ JumpIfNotSmi(a2, &check_string);
+  __ bind(&index_smi);
+  // Now the key is known to be a smi. This place is also jumped to from below
+  // where a numeric string is converted to a smi.
+
+  GenerateKeyedLoadReceiverCheck(
+      masm, a1, a0, a3, Map::kHasIndexedInterceptor, &slow_call);
+
+  GenerateFastArrayLoad(
+      masm, a1, a2, t0, a3, a0, a1, &check_number_dictionary, &slow_load);
+  Counters* counters = masm->isolate()->counters();
+  __ IncrementCounter(counters->keyed_call_generic_smi_fast(), 1, a0, a3);
+
+  __ bind(&do_call);
+  // receiver in a1 is not used after this point.
+  // a2: key
+  // a1: function
+
+  GenerateFunctionTailCall(masm, argc, &slow_call, a0);
+
+  __ bind(&check_number_dictionary);
+  // a2: key
+  // a3: elements map
+  // t0: elements pointer
+  // Check whether the elements is a number dictionary.
+  __ LoadRoot(at, Heap::kHashTableMapRootIndex);
+  __ Branch(&slow_load, ne, a3, Operand(at));
+  __ sra(a0, a2, kSmiTagSize);
+  // a0: untagged index
+  GenerateNumberDictionaryLoad(masm, &slow_load, t0, a2, a1, a0, a3, t1);
+  __ IncrementCounter(counters->keyed_call_generic_smi_dict(), 1, a0, a3);
+  __ jmp(&do_call);
+
+  __ bind(&slow_load);
+  // This branch is taken when calling KeyedCallIC_Miss is neither required
+  // nor beneficial.
+  __ IncrementCounter(counters->keyed_call_generic_slow_load(), 1, a0, a3);
+  __ EnterInternalFrame();
+  __ push(a2);  // Save the key.
+  __ Push(a1, a2);  // Pass the receiver and the key.
+  __ CallRuntime(Runtime::kKeyedGetProperty, 2);
+  __ pop(a2);  // Restore the key.
+  __ LeaveInternalFrame();
+  __ mov(a1, v0);
+  __ jmp(&do_call);
+
+  __ bind(&check_string);
+  GenerateKeyStringCheck(masm, a2, a0, a3, &index_string, &slow_call);
+
+  // The key is known to be a symbol.
+  // If the receiver is a regular JS object with slow properties then do
+  // a quick inline probe of the receiver's dictionary.
+  // Otherwise do the monomorphic cache probe.
+  GenerateKeyedLoadReceiverCheck(
+      masm, a1, a0, a3, Map::kHasNamedInterceptor, &lookup_monomorphic_cache);
+
+  __ lw(a0, FieldMemOperand(a1, JSObject::kPropertiesOffset));
+  __ lw(a3, FieldMemOperand(a0, HeapObject::kMapOffset));
+  __ LoadRoot(at, Heap::kHashTableMapRootIndex);
+  __ Branch(&lookup_monomorphic_cache, ne, a3, Operand(at));
+
+  GenerateDictionaryLoad(masm, &slow_load, a0, a2, a1, a3, t0);
+  __ IncrementCounter(counters->keyed_call_generic_lookup_dict(), 1, a0, a3);
+  __ jmp(&do_call);
+
+  __ bind(&lookup_monomorphic_cache);
+  __ IncrementCounter(counters->keyed_call_generic_lookup_cache(), 1, a0, a3);
+  GenerateMonomorphicCacheProbe(masm,
+                                argc,
+                                Code::KEYED_CALL_IC,
+                                Code::kNoExtraICState);
+  // Fall through on miss.
+
+  __ bind(&slow_call);
+  // This branch is taken if:
+  // - the receiver requires boxing or access check,
+  // - the key is neither smi nor symbol,
+  // - the value loaded is not a function,
+  // - there is hope that the runtime will create a monomorphic call stub,
+  //   that will get fetched next time.
+  __ IncrementCounter(counters->keyed_call_generic_slow(), 1, a0, a3);
+  GenerateMiss(masm, argc);
+
+  __ bind(&index_string);
+  __ IndexFromHash(a3, a2);
+  // Now jump to the place where smi keys are handled.
+  __ jmp(&index_smi);
 }
 
 
 void KeyedCallIC::GenerateNormal(MacroAssembler* masm, int argc) {
-  UNIMPLEMENTED_MIPS();
+  // ----------- S t a t e -------------
+  //  -- a2    : name
+  //  -- ra    : return address
+  // -----------------------------------
+
+  // Check if the name is a string.
+  Label miss;
+  __ JumpIfSmi(a2, &miss);
+  __ IsObjectJSStringType(a2, a0, &miss);
+
+  GenerateCallNormal(masm, argc);
+  __ bind(&miss);
+  GenerateMiss(masm, argc);
 }
 
 
@@ -101,122 +832,626 @@
 Object* LoadIC_Miss(Arguments args);
 
 void LoadIC::GenerateMegamorphic(MacroAssembler* masm) {
-  UNIMPLEMENTED_MIPS();
+  // ----------- S t a t e -------------
+  //  -- a2    : name
+  //  -- ra    : return address
+  //  -- a0    : receiver
+  //  -- sp[0] : receiver
+  // -----------------------------------
+
+  // Probe the stub cache.
+  Code::Flags flags = Code::ComputeFlags(Code::LOAD_IC,
+                                         NOT_IN_LOOP,
+                                         MONOMORPHIC);
+  Isolate::Current()->stub_cache()->GenerateProbe(
+      masm, flags, a0, a2, a3, t0, t1);
+
+  // Cache miss: Jump to runtime.
+  GenerateMiss(masm);
 }
 
 
 void LoadIC::GenerateNormal(MacroAssembler* masm) {
-  UNIMPLEMENTED_MIPS();
+  // ----------- S t a t e -------------
+  //  -- a2    : name
+  //  -- lr    : return address
+  //  -- a0    : receiver
+  //  -- sp[0] : receiver
+  // -----------------------------------
+  Label miss;
+
+  GenerateStringDictionaryReceiverCheck(masm, a0, a1, a3, t0, &miss);
+
+  // a1: elements
+  GenerateDictionaryLoad(masm, &miss, a1, a2, v0, a3, t0);
+  __ Ret();
+
+  // Cache miss: Jump to runtime.
+  __ bind(&miss);
+  GenerateMiss(masm);
 }
 
 
 void LoadIC::GenerateMiss(MacroAssembler* masm) {
-  UNIMPLEMENTED_MIPS();
+  // ----------- S t a t e -------------
+  //  -- a2    : name
+  //  -- ra    : return address
+  //  -- a0    : receiver
+  //  -- sp[0] : receiver
+  // -----------------------------------
+  Isolate* isolate = masm->isolate();
+
+  __ IncrementCounter(isolate->counters()->keyed_load_miss(), 1, a3, t0);
+
+  __ mov(a3, a0);
+  __ Push(a3, a2);
+
+  // Perform tail call to the entry.
+  ExternalReference ref = ExternalReference(IC_Utility(kLoadIC_Miss), isolate);
+  __ TailCallExternalReference(ref, 2, 1);
 }
 
 
-bool LoadIC::PatchInlinedLoad(Address address, Object* map, int offset) {
-  UNIMPLEMENTED_MIPS();
-  return false;
-}
+void KeyedLoadIC::GenerateMiss(MacroAssembler* masm, bool force_generic) {
+  // ---------- S t a t e --------------
+  //  -- ra     : return address
+  //  -- a0     : key
+  //  -- a1     : receiver
+  // -----------------------------------
+  Isolate* isolate = masm->isolate();
 
+  __ IncrementCounter(isolate->counters()->keyed_load_miss(), 1, a3, t0);
 
-bool LoadIC::PatchInlinedContextualLoad(Address address,
-                                        Object* map,
-                                        Object* cell,
-                                        bool is_dont_delete) {
-  UNIMPLEMENTED_MIPS();
-  return false;
-}
+  __ Push(a1, a0);
 
+  // Perform tail call to the entry.
+  ExternalReference ref = force_generic
+      ? ExternalReference(IC_Utility(kKeyedLoadIC_MissForceGeneric), isolate)
+      : ExternalReference(IC_Utility(kKeyedLoadIC_Miss), isolate);
 
-bool StoreIC::PatchInlinedStore(Address address, Object* map, int offset) {
-  UNIMPLEMENTED_MIPS();
-  return false;
-}
-
-
-bool KeyedLoadIC::PatchInlinedLoad(Address address, Object* map) {
-  UNIMPLEMENTED_MIPS();
-  return false;
-}
-
-
-bool KeyedStoreIC::PatchInlinedStore(Address address, Object* map) {
-  UNIMPLEMENTED_MIPS();
-  return false;
-}
-
-
-Object* KeyedLoadIC_Miss(Arguments args);
-
-
-void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) {
-  UNIMPLEMENTED_MIPS();
+  __ TailCallExternalReference(ref, 2, 1);
 }
 
 
 void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
-  UNIMPLEMENTED_MIPS();
+  // ---------- S t a t e --------------
+  //  -- ra     : return address
+  //  -- a0     : key
+  //  -- a1     : receiver
+  // -----------------------------------
+
+  __ Push(a1, a0);
+
+  __ TailCallRuntime(Runtime::kKeyedGetProperty, 2, 1);
 }
 
 
 void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
-  UNIMPLEMENTED_MIPS();
+  // ---------- S t a t e --------------
+  //  -- ra     : return address
+  //  -- a0     : key
+  //  -- a1     : receiver
+  // -----------------------------------
+  Label slow, check_string, index_smi, index_string, property_array_property;
+  Label probe_dictionary, check_number_dictionary;
+
+  Register key = a0;
+  Register receiver = a1;
+
+  Isolate* isolate = masm->isolate();
+
+  // Check that the key is a smi.
+  __ JumpIfNotSmi(key, &check_string);
+  __ bind(&index_smi);
+  // Now the key is known to be a smi. This place is also jumped to from below
+  // where a numeric string is converted to a smi.
+
+  GenerateKeyedLoadReceiverCheck(
+      masm, receiver, a2, a3, Map::kHasIndexedInterceptor, &slow);
+
+  // Check the "has fast elements" bit in the receiver's map which is
+  // now in a2.
+  __ lbu(a3, FieldMemOperand(a2, Map::kBitField2Offset));
+  __ And(at, a3, Operand(1 << Map::kHasFastElements));
+  __ Branch(&check_number_dictionary, eq, at, Operand(zero_reg));
+
+  GenerateFastArrayLoad(
+      masm, receiver, key, t0, a3, a2, v0, NULL, &slow);
+
+  __ IncrementCounter(isolate->counters()->keyed_load_generic_smi(), 1, a2, a3);
+  __ Ret();
+
+  __ bind(&check_number_dictionary);
+  __ lw(t0, FieldMemOperand(receiver, JSObject::kElementsOffset));
+  __ lw(a3, FieldMemOperand(t0, JSObject::kMapOffset));
+
+  // Check whether the elements is a number dictionary.
+  // a0: key
+  // a3: elements map
+  // t0: elements
+  __ LoadRoot(at, Heap::kHashTableMapRootIndex);
+  __ Branch(&slow, ne, a3, Operand(at));
+  __ sra(a2, a0, kSmiTagSize);
+  GenerateNumberDictionaryLoad(masm, &slow, t0, a0, v0, a2, a3, t1);
+  __ Ret();
+
+  // Slow case, key and receiver still in a0 and a1.
+  __ bind(&slow);
+  __ IncrementCounter(isolate->counters()->keyed_load_generic_slow(),
+                      1,
+                      a2,
+                      a3);
+  GenerateRuntimeGetProperty(masm);
+
+  __ bind(&check_string);
+  GenerateKeyStringCheck(masm, key, a2, a3, &index_string, &slow);
+
+  GenerateKeyedLoadReceiverCheck(
+       masm, receiver, a2, a3, Map::kHasIndexedInterceptor, &slow);
+
+
+  // If the receiver is a fast-case object, check the keyed lookup
+  // cache. Otherwise probe the dictionary.
+  __ lw(a3, FieldMemOperand(a1, JSObject::kPropertiesOffset));
+  __ lw(t0, FieldMemOperand(a3, HeapObject::kMapOffset));
+  __ LoadRoot(at, Heap::kHashTableMapRootIndex);
+  __ Branch(&probe_dictionary, eq, t0, Operand(at));
+
+  // Load the map of the receiver, compute the keyed lookup cache hash
+  // based on 32 bits of the map pointer and the string hash.
+  __ lw(a2, FieldMemOperand(a1, HeapObject::kMapOffset));
+  __ sra(a3, a2, KeyedLookupCache::kMapHashShift);
+  __ lw(t0, FieldMemOperand(a0, String::kHashFieldOffset));
+  __ sra(at, t0, String::kHashShift);
+  __ xor_(a3, a3, at);
+  __ And(a3, a3, Operand(KeyedLookupCache::kCapacityMask));
+
+  // Load the key (consisting of map and symbol) from the cache and
+  // check for match.
+  ExternalReference cache_keys =
+      ExternalReference::keyed_lookup_cache_keys(isolate);
+  __ li(t0, Operand(cache_keys));
+  __ sll(at, a3, kPointerSizeLog2 + 1);
+  __ addu(t0, t0, at);
+  __ lw(t1, MemOperand(t0));  // Move t0 to symbol.
+  __ Addu(t0, t0, Operand(kPointerSize));
+  __ Branch(&slow, ne, a2, Operand(t1));
+  __ lw(t1, MemOperand(t0));
+  __ Branch(&slow, ne, a0, Operand(t1));
+
+  // Get field offset.
+  // a0     : key
+  // a1     : receiver
+  // a2     : receiver's map
+  // a3     : lookup cache index
+  ExternalReference cache_field_offsets =
+      ExternalReference::keyed_lookup_cache_field_offsets(isolate);
+  __ li(t0, Operand(cache_field_offsets));
+  __ sll(at, a3, kPointerSizeLog2);
+  __ addu(at, t0, at);
+  __ lw(t1, MemOperand(at));
+  __ lbu(t2, FieldMemOperand(a2, Map::kInObjectPropertiesOffset));
+  __ Subu(t1, t1, t2);
+  __ Branch(&property_array_property, ge, t1, Operand(zero_reg));
+
+  // Load in-object property.
+  __ lbu(t2, FieldMemOperand(a2, Map::kInstanceSizeOffset));
+  __ addu(t2, t2, t1);  // Index from start of object.
+  __ Subu(a1, a1, Operand(kHeapObjectTag));  // Remove the heap tag.
+  __ sll(at, t2, kPointerSizeLog2);
+  __ addu(at, a1, at);
+  __ lw(v0, MemOperand(at));
+  __ IncrementCounter(isolate->counters()->keyed_load_generic_lookup_cache(),
+                      1,
+                      a2,
+                      a3);
+  __ Ret();
+
+  // Load property array property.
+  __ bind(&property_array_property);
+  __ lw(a1, FieldMemOperand(a1, JSObject::kPropertiesOffset));
+  __ Addu(a1, a1, FixedArray::kHeaderSize - kHeapObjectTag);
+  __ sll(t0, t1, kPointerSizeLog2);
+  __ Addu(t0, t0, a1);
+  __ lw(v0, MemOperand(t0));
+  __ IncrementCounter(isolate->counters()->keyed_load_generic_lookup_cache(),
+                      1,
+                      a2,
+                      a3);
+  __ Ret();
+
+
+  // Do a quick inline probe of the receiver's dictionary, if it
+  // exists.
+  __ bind(&probe_dictionary);
+  // a1: receiver
+  // a0: key
+  // a3: elements
+  __ lw(a2, FieldMemOperand(a1, HeapObject::kMapOffset));
+  __ lbu(a2, FieldMemOperand(a2, Map::kInstanceTypeOffset));
+  GenerateGlobalInstanceTypeCheck(masm, a2, &slow);
+  // Load the property to v0.
+  GenerateDictionaryLoad(masm, &slow, a3, a0, v0, a2, t0);
+  __ IncrementCounter(isolate->counters()->keyed_load_generic_symbol(),
+                      1,
+                      a2,
+                      a3);
+  __ Ret();
+
+  __ bind(&index_string);
+  __ IndexFromHash(a3, key);
+  // Now jump to the place where smi keys are handled.
+  __ Branch(&index_smi);
 }
 
 
 void KeyedLoadIC::GenerateString(MacroAssembler* masm) {
-  UNIMPLEMENTED_MIPS();
+  // ---------- S t a t e --------------
+  //  -- ra     : return address
+  //  -- a0     : key (index)
+  //  -- a1     : receiver
+  // -----------------------------------
+  Label miss;
+
+  Register receiver = a1;
+  Register index = a0;
+  Register scratch1 = a2;
+  Register scratch2 = a3;
+  Register result = v0;
+
+  StringCharAtGenerator char_at_generator(receiver,
+                                          index,
+                                          scratch1,
+                                          scratch2,
+                                          result,
+                                          &miss,  // When not a string.
+                                          &miss,  // When not a number.
+                                          &miss,  // When index out of range.
+                                          STRING_INDEX_IS_ARRAY_INDEX);
+  char_at_generator.GenerateFast(masm);
+  __ Ret();
+
+  StubRuntimeCallHelper call_helper;
+  char_at_generator.GenerateSlow(masm, call_helper);
+
+  __ bind(&miss);
+  GenerateMiss(masm, false);
 }
 
 
 void KeyedStoreIC::GenerateRuntimeSetProperty(MacroAssembler* masm,
                                               StrictModeFlag strict_mode) {
-  UNIMPLEMENTED_MIPS();
+  // ---------- S t a t e --------------
+  //  -- a0     : value
+  //  -- a1     : key
+  //  -- a2     : receiver
+  //  -- ra     : return address
+  // -----------------------------------
+
+  // Push receiver, key and value for runtime call.
+  __ Push(a2, a1, a0);
+  __ li(a1, Operand(Smi::FromInt(NONE)));          // PropertyAttributes.
+  __ li(a0, Operand(Smi::FromInt(strict_mode)));   // Strict mode.
+  __ Push(a1, a0);
+
+  __ TailCallRuntime(Runtime::kSetProperty, 5, 1);
 }
 
 
 void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
                                    StrictModeFlag strict_mode) {
-  UNIMPLEMENTED_MIPS();
+  // ---------- S t a t e --------------
+  //  -- a0     : value
+  //  -- a1     : key
+  //  -- a2     : receiver
+  //  -- ra     : return address
+  // -----------------------------------
+
+  Label slow, fast, array, extra, exit;
+
+  // Register usage.
+  Register value = a0;
+  Register key = a1;
+  Register receiver = a2;
+  Register elements = a3;  // Elements array of the receiver.
+  // t0 is used as ip in the arm version.
+  // t3-t4 are used as temporaries.
+
+  // Check that the key is a smi.
+  __ JumpIfNotSmi(key, &slow);
+  // Check that the object isn't a smi.
+  __ JumpIfSmi(receiver, &slow);
+
+  // Get the map of the object.
+  __ lw(t3, FieldMemOperand(receiver, HeapObject::kMapOffset));
+  // Check that the receiver does not require access checks.  We need
+  // to do this because this generic stub does not perform map checks.
+  __ lbu(t0, FieldMemOperand(t3, Map::kBitFieldOffset));
+  __ And(t0, t0, Operand(1 << Map::kIsAccessCheckNeeded));
+  __ Branch(&slow, ne, t0, Operand(zero_reg));
+  // Check if the object is a JS array or not.
+  __ lbu(t3, FieldMemOperand(t3, Map::kInstanceTypeOffset));
+
+  __ Branch(&array, eq, t3, Operand(JS_ARRAY_TYPE));
+  // Check that the object is some kind of JS object.
+  __ Branch(&slow, lt, t3, Operand(FIRST_JS_OBJECT_TYPE));
+
+  // Object case: Check key against length in the elements array.
+  __ lw(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
+  // Check that the object is in fast mode and writable.
+  __ lw(t3, FieldMemOperand(elements, HeapObject::kMapOffset));
+  __ LoadRoot(t0, Heap::kFixedArrayMapRootIndex);
+  __ Branch(&slow, ne, t3, Operand(t0));
+  // Check array bounds. Both the key and the length of FixedArray are smis.
+  __ lw(t0, FieldMemOperand(elements, FixedArray::kLengthOffset));
+  __ Branch(&fast, lo, key, Operand(t0));
+  // Fall thru to slow if un-tagged index >= length.
+
+  // Slow case, handle jump to runtime.
+  __ bind(&slow);
+
+  // Entry registers are intact.
+  // a0: value.
+  // a1: key.
+  // a2: receiver.
+
+  GenerateRuntimeSetProperty(masm, strict_mode);
+
+  // Extra capacity case: Check if there is extra capacity to
+  // perform the store and update the length. Used for adding one
+  // element to the array by writing to array[array.length].
+
+  __ bind(&extra);
+  // Only support writing to array[array.length].
+  __ Branch(&slow, ne, key, Operand(t0));
+  // Check for room in the elements backing store.
+  // Both the key and the length of FixedArray are smis.
+  __ lw(t0, FieldMemOperand(elements, FixedArray::kLengthOffset));
+  __ Branch(&slow, hs, key, Operand(t0));
+  // Calculate key + 1 as smi.
+  ASSERT_EQ(0, kSmiTag);
+  __ Addu(t3, key, Operand(Smi::FromInt(1)));
+  __ sw(t3, FieldMemOperand(receiver, JSArray::kLengthOffset));
+  __ Branch(&fast);
+
+
+  // Array case: Get the length and the elements array from the JS
+  // array. Check that the array is in fast mode (and writable); if it
+  // is the length is always a smi.
+
+  __ bind(&array);
+  __ lw(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
+  __ lw(t3, FieldMemOperand(elements, HeapObject::kMapOffset));
+  __ LoadRoot(t0, Heap::kFixedArrayMapRootIndex);
+  __ Branch(&slow, ne, t3, Operand(t0));
+
+  // Check the key against the length in the array.
+  __ lw(t0, FieldMemOperand(receiver, JSArray::kLengthOffset));
+  __ Branch(&extra, hs, key, Operand(t0));
+  // Fall through to fast case.
+
+  __ bind(&fast);
+  // Fast case, store the value to the elements backing store.
+  __ Addu(t4, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+  __ sll(t1, key, kPointerSizeLog2 - kSmiTagSize);
+  __ Addu(t4, t4, Operand(t1));
+  __ sw(value, MemOperand(t4));
+  // Skip write barrier if the written value is a smi.
+  __ JumpIfSmi(value, &exit);
+
+  // Update write barrier for the elements array address.
+  __ Subu(t3, t4, Operand(elements));
+
+  __ RecordWrite(elements, Operand(t3), t4, t5);
+  __ bind(&exit);
+
+  __ mov(v0, a0);  // Return the value written.
+  __ Ret();
 }
 
 
 void KeyedLoadIC::GenerateIndexedInterceptor(MacroAssembler* masm) {
-  UNIMPLEMENTED_MIPS();
+  // ---------- S t a t e --------------
+  //  -- ra     : return address
+  //  -- a0     : key
+  //  -- a1     : receiver
+  // -----------------------------------
+  Label slow;
+
+  // Check that the receiver isn't a smi.
+  __ JumpIfSmi(a1, &slow);
+
+  // Check that the key is an array index, that is Uint32.
+  __ And(t0, a0, Operand(kSmiTagMask | kSmiSignMask));
+  __ Branch(&slow, ne, t0, Operand(zero_reg));
+
+  // Get the map of the receiver.
+  __ lw(a2, FieldMemOperand(a1, HeapObject::kMapOffset));
+
+  // Check that it has indexed interceptor and access checks
+  // are not enabled for this object.
+  __ lbu(a3, FieldMemOperand(a2, Map::kBitFieldOffset));
+  __ And(a3, a3, Operand(kSlowCaseBitFieldMask));
+  __ Branch(&slow, ne, a3, Operand(1 << Map::kHasIndexedInterceptor));
+  // Everything is fine, call runtime.
+  __ Push(a1, a0);  // Receiver, key.
+
+  // Perform tail call to the entry.
+  __ TailCallExternalReference(ExternalReference(
+       IC_Utility(kKeyedLoadPropertyWithInterceptor), masm->isolate()), 2, 1);
+
+  __ bind(&slow);
+  GenerateMiss(masm, false);
 }
 
 
-void KeyedStoreIC::GenerateMiss(MacroAssembler* masm) {
-  UNIMPLEMENTED_MIPS();
+void KeyedStoreIC::GenerateMiss(MacroAssembler* masm, bool force_generic) {
+  // ---------- S t a t e --------------
+  //  -- a0     : value
+  //  -- a1     : key
+  //  -- a2     : receiver
+  //  -- ra     : return address
+  // -----------------------------------
+
+  // Push receiver, key and value for runtime call.
+  __ Push(a2, a1, a0);
+
+  ExternalReference ref = force_generic
+      ? ExternalReference(IC_Utility(kKeyedStoreIC_MissForceGeneric),
+                          masm->isolate())
+      : ExternalReference(IC_Utility(kKeyedStoreIC_Miss), masm->isolate());
+  __ TailCallExternalReference(ref, 3, 1);
+}
+
+
+void KeyedStoreIC::GenerateSlow(MacroAssembler* masm) {
+  // ---------- S t a t e --------------
+  //  -- a0     : value
+  //  -- a1     : key
+  //  -- a2     : receiver
+  //  -- ra     : return address
+  // -----------------------------------
+
+  // Push receiver, key and value for runtime call.
+  // We can't use MultiPush as the order of the registers is important.
+  __ Push(a2, a1, a0);
+
+  // The slow case calls into the runtime to complete the store without causing
+  // an IC miss that would otherwise cause a transition to the generic stub.
+  ExternalReference ref =
+      ExternalReference(IC_Utility(kKeyedStoreIC_Slow), masm->isolate());
+
+  __ TailCallExternalReference(ref, 3, 1);
 }
 
 
 void StoreIC::GenerateMegamorphic(MacroAssembler* masm,
                                   StrictModeFlag strict_mode) {
-  UNIMPLEMENTED_MIPS();
+  // ----------- S t a t e -------------
+  //  -- a0    : value
+  //  -- a1    : receiver
+  //  -- a2    : name
+  //  -- ra    : return address
+  // -----------------------------------
+
+  // Get the receiver from the stack and probe the stub cache.
+  Code::Flags flags = Code::ComputeFlags(Code::STORE_IC,
+                                         NOT_IN_LOOP,
+                                         MONOMORPHIC,
+                                         strict_mode);
+  Isolate::Current()->stub_cache()->GenerateProbe(
+      masm, flags, a1, a2, a3, t0, t1);
+
+  // Cache miss: Jump to runtime.
+  GenerateMiss(masm);
 }
 
 
 void StoreIC::GenerateMiss(MacroAssembler* masm) {
-  UNIMPLEMENTED_MIPS();
+  // ----------- S t a t e -------------
+  //  -- a0    : value
+  //  -- a1    : receiver
+  //  -- a2    : name
+  //  -- ra    : return address
+  // -----------------------------------
+
+  __ Push(a1, a2, a0);
+  // Perform tail call to the entry.
+  ExternalReference ref = ExternalReference(IC_Utility(kStoreIC_Miss),
+                                            masm->isolate());
+  __ TailCallExternalReference(ref, 3, 1);
 }
 
 
 void StoreIC::GenerateArrayLength(MacroAssembler* masm) {
-  UNIMPLEMENTED_MIPS();
+  // ----------- S t a t e -------------
+  //  -- a0    : value
+  //  -- a1    : receiver
+  //  -- a2    : name
+  //  -- ra    : return address
+  // -----------------------------------
+  //
+  // This accepts as a receiver anything JSObject::SetElementsLength accepts
+  // (currently anything except for external and pixel arrays which means
+  // anything with elements of FixedArray type.), but currently is restricted
+  // to JSArray.
+  // Value must be a number, but only smis are accepted as the most common case.
+
+  Label miss;
+
+  Register receiver = a1;
+  Register value = a0;
+  Register scratch = a3;
+
+  // Check that the receiver isn't a smi.
+  __ JumpIfSmi(receiver, &miss);
+
+  // Check that the object is a JS array.
+  __ GetObjectType(receiver, scratch, scratch);
+  __ Branch(&miss, ne, scratch, Operand(JS_ARRAY_TYPE));
+
+  // Check that elements are FixedArray.
+  // We rely on StoreIC_ArrayLength below to deal with all types of
+  // fast elements (including COW).
+  __ lw(scratch, FieldMemOperand(receiver, JSArray::kElementsOffset));
+  __ GetObjectType(scratch, scratch, scratch);
+  __ Branch(&miss, ne, scratch, Operand(FIXED_ARRAY_TYPE));
+
+  // Check that value is a smi.
+  __ JumpIfNotSmi(value, &miss);
+
+  // Prepare tail call to StoreIC_ArrayLength.
+  __ Push(receiver, value);
+
+  ExternalReference ref = ExternalReference(IC_Utility(kStoreIC_ArrayLength),
+                                            masm->isolate());
+  __ TailCallExternalReference(ref, 2, 1);
+
+  __ bind(&miss);
+
+  GenerateMiss(masm);
 }
 
 
 void StoreIC::GenerateNormal(MacroAssembler* masm) {
-  UNIMPLEMENTED_MIPS();
+  // ----------- S t a t e -------------
+  //  -- a0    : value
+  //  -- a1    : receiver
+  //  -- a2    : name
+  //  -- ra    : return address
+  // -----------------------------------
+  Label miss;
+
+  GenerateStringDictionaryReceiverCheck(masm, a1, a3, t0, t1, &miss);
+
+  GenerateDictionaryStore(masm, &miss, a3, a2, a0, t0, t1);
+  Counters* counters = masm->isolate()->counters();
+  __ IncrementCounter(counters->store_normal_hit(), 1, t0, t1);
+  __ Ret();
+
+  __ bind(&miss);
+  __ IncrementCounter(counters->store_normal_miss(), 1, t0, t1);
+  GenerateMiss(masm);
 }
 
 
 void StoreIC::GenerateGlobalProxy(MacroAssembler* masm,
                                   StrictModeFlag strict_mode) {
-  UNIMPLEMENTED_MIPS();
+  // ----------- S t a t e -------------
+  //  -- a0    : value
+  //  -- a1    : receiver
+  //  -- a2    : name
+  //  -- ra    : return address
+  // -----------------------------------
+
+  __ Push(a1, a2, a0);
+
+  __ li(a1, Operand(Smi::FromInt(NONE)));  // PropertyAttributes.
+  __ li(a0, Operand(Smi::FromInt(strict_mode)));
+  __ Push(a1, a0);
+
+  // Do tail-call to runtime routine.
+  __ TailCallRuntime(Runtime::kSetProperty, 5, 1);
 }
 
 
@@ -224,18 +1459,119 @@
 
 
 Condition CompareIC::ComputeCondition(Token::Value op) {
-  UNIMPLEMENTED_MIPS();
-  return kNoCondition;
+  switch (op) {
+    case Token::EQ_STRICT:
+    case Token::EQ:
+      return eq;
+    case Token::LT:
+      return lt;
+    case Token::GT:
+      // Reverse left and right operands to obtain ECMA-262 conversion order.
+      return lt;
+    case Token::LTE:
+      // Reverse left and right operands to obtain ECMA-262 conversion order.
+      return ge;
+    case Token::GTE:
+      return ge;
+    default:
+      UNREACHABLE();
+      return kNoCondition;
+  }
 }
 
 
 void CompareIC::UpdateCaches(Handle<Object> x, Handle<Object> y) {
-  UNIMPLEMENTED_MIPS();
+  HandleScope scope;
+  Handle<Code> rewritten;
+  State previous_state = GetState();
+  State state = TargetState(previous_state, false, x, y);
+  if (state == GENERIC) {
+    CompareStub stub(GetCondition(), strict(), NO_COMPARE_FLAGS, a1, a0);
+    rewritten = stub.GetCode();
+  } else {
+    ICCompareStub stub(op_, state);
+    rewritten = stub.GetCode();
+  }
+  set_target(*rewritten);
+
+#ifdef DEBUG
+  if (FLAG_trace_ic) {
+    PrintF("[CompareIC (%s->%s)#%s]\n",
+           GetStateName(previous_state),
+           GetStateName(state),
+           Token::Name(op_));
+  }
+#endif
+
+  // Activate inlined smi code.
+  if (previous_state == UNINITIALIZED) {
+    PatchInlinedSmiCode(address());
+  }
 }
 
 
 void PatchInlinedSmiCode(Address address) {
-  // Currently there is no smi inlining in the MIPS full code generator.
+  Address andi_instruction_address =
+      address + Assembler::kCallTargetAddressOffset;
+
+  // If the instruction following the call is not a andi at, rx, #yyy, nothing
+  // was inlined.
+  Instr instr = Assembler::instr_at(andi_instruction_address);
+  if (!Assembler::IsAndImmediate(instr)) {
+    return;
+  }
+
+  // The delta to the start of the map check instruction and the
+  // condition code uses at the patched jump.
+  int delta = Assembler::GetImmediate16(instr);
+  delta += Assembler::GetRs(instr) * kImm16Mask;
+  // If the delta is 0 the instruction is andi at, zero_reg, #0 which also
+  // signals that nothing was inlined.
+  if (delta == 0) {
+    return;
+  }
+
+#ifdef DEBUG
+  if (FLAG_trace_ic) {
+    PrintF("[  patching ic at %p, andi=%p, delta=%d\n",
+           address, andi_instruction_address, delta);
+  }
+#endif
+
+  Address patch_address =
+      andi_instruction_address - delta * Instruction::kInstrSize;
+  Instr instr_at_patch = Assembler::instr_at(patch_address);
+  Instr branch_instr =
+      Assembler::instr_at(patch_address + Instruction::kInstrSize);
+  ASSERT(Assembler::IsAndImmediate(instr_at_patch));
+  ASSERT_EQ(0, Assembler::GetImmediate16(instr_at_patch));
+  ASSERT(Assembler::IsBranch(branch_instr));
+  if (Assembler::IsBeq(branch_instr)) {
+    // This is patching a "jump if not smi" site to be active.
+    // Changing:
+    //   andi at, rx, 0
+    //   Branch <target>, eq, at, Operand(zero_reg)
+    // to:
+    //   andi at, rx, #kSmiTagMask
+    //   Branch <target>, ne, at, Operand(zero_reg)
+    CodePatcher patcher(patch_address, 2);
+    Register reg = Register::from_code(Assembler::GetRs(instr_at_patch));
+    patcher.masm()->andi(at, reg, kSmiTagMask);
+    patcher.ChangeBranchCondition(ne);
+  } else {
+    ASSERT(Assembler::IsBne(branch_instr));
+    // This is patching a "jump if smi" site to be active.
+    // Changing:
+    //   andi at, rx, 0
+    //   Branch <target>, ne, at, Operand(zero_reg)
+    // to:
+    //   andi at, rx, #kSmiTagMask
+    //   Branch <target>, eq, at, Operand(zero_reg)
+    CodePatcher patcher(patch_address, 2);
+    Register reg = Register::from_code(Assembler::GetRs(instr_at_patch));
+    patcher.masm()->andi(at, reg, kSmiTagMask);
+    patcher.ChangeBranchCondition(eq);
+  }
 }
 
 
diff --git a/src/mips/jump-target-mips.cc b/src/mips/jump-target-mips.cc
deleted file mode 100644
index bd6d60b..0000000
--- a/src/mips/jump-target-mips.cc
+++ /dev/null
@@ -1,80 +0,0 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-//       notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-//       copyright notice, this list of conditions and the following
-//       disclaimer in the documentation and/or other materials provided
-//       with the distribution.
-//     * Neither the name of Google Inc. nor the names of its
-//       contributors may be used to endorse or promote products derived
-//       from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
-#include "v8.h"
-
-#if defined(V8_TARGET_ARCH_MIPS)
-
-#include "codegen-inl.h"
-#include "jump-target-inl.h"
-#include "register-allocator-inl.h"
-#include "virtual-frame-inl.h"
-
-namespace v8 {
-namespace internal {
-
-// -------------------------------------------------------------------------
-// JumpTarget implementation.
-
-#define __ ACCESS_MASM(cgen()->masm())
-
-// BRANCH_ARGS_CHECK checks that conditional jump arguments are correct.
-#define BRANCH_ARGS_CHECK(cond, rs, rt) ASSERT(                                \
-    (cond == cc_always && rs.is(zero_reg) && rt.rm().is(zero_reg)) ||          \
-    (cond != cc_always && (!rs.is(zero_reg) || !rt.rm().is(zero_reg))))
-
-
-void JumpTarget::DoJump() {
-  UNIMPLEMENTED_MIPS();
-}
-
-// Original prototype for mips, needs arch-indep change. Leave out for now.
-// void JumpTarget::DoBranch(Condition cc, Hint ignored,
-//     Register src1, const Operand& src2) {
-void JumpTarget::DoBranch(Condition cc, Hint ignored) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void JumpTarget::Call() {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void JumpTarget::DoBind() {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-#undef __
-#undef BRANCH_ARGS_CHECK
-
-
-} }  // namespace v8::internal
-
-#endif  // V8_TARGET_ARCH_MIPS
diff --git a/src/mips/lithium-codegen-mips.h b/src/mips/lithium-codegen-mips.h
index 345d912..2aec684 100644
--- a/src/mips/lithium-codegen-mips.h
+++ b/src/mips/lithium-codegen-mips.h
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
diff --git a/src/mips/lithium-mips.h b/src/mips/lithium-mips.h
index e11dfab..ebc1e43 100644
--- a/src/mips/lithium-mips.h
+++ b/src/mips/lithium-mips.h
@@ -78,7 +78,7 @@
 
   bool HasEnvironment() const {
     UNIMPLEMENTED();
-    return NULL;
+    return false;
   }
 
   virtual void PrintTo(StringStream* stream) const { UNIMPLEMENTED(); }
@@ -213,15 +213,13 @@
 
 class LChunk: public ZoneObject {
  public:
-  explicit LChunk(CompilationInfo* info, HGraph* graph) { }
+  explicit LChunk(HGraph* graph) { }
 
   HGraph* graph() const {
     UNIMPLEMENTED();
     return NULL;
   }
 
-  CompilationInfo* info() const { return NULL; }
-
   const ZoneList<LPointerMap*>* pointer_maps() const {
     UNIMPLEMENTED();
     return NULL;
@@ -271,6 +269,11 @@
 
   void MarkEmptyBlocks() { UNIMPLEMENTED(); }
 
+  CompilationInfo* info() const {
+    UNIMPLEMENTED();
+    return NULL;
+  }
+
 #ifdef DEBUG
   void Verify() { UNIMPLEMENTED(); }
 #endif
@@ -279,7 +282,7 @@
 
 class LChunkBuilder BASE_EMBEDDED {
  public:
-  LChunkBuilder(CompilationInfo* info, HGraph* graph, LAllocator* allocator) { }
+  LChunkBuilder(CompilationInfo*&, HGraph* graph, LAllocator* allocator) { }
 
   // Build the sequence for the graph.
   LChunk* Build() {
diff --git a/src/mips/macro-assembler-mips.cc b/src/mips/macro-assembler-mips.cc
index bd4ab48..8b342a2 100644
--- a/src/mips/macro-assembler-mips.cc
+++ b/src/mips/macro-assembler-mips.cc
@@ -25,29 +25,32 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-#include <limits.h>  // For LONG_MIN, LONG_MAX
+#include <limits.h>  // For LONG_MIN, LONG_MAX.
 
 #include "v8.h"
 
 #if defined(V8_TARGET_ARCH_MIPS)
 
 #include "bootstrapper.h"
-#include "codegen-inl.h"
+#include "codegen.h"
 #include "debug.h"
 #include "runtime.h"
 
 namespace v8 {
 namespace internal {
 
-MacroAssembler::MacroAssembler(void* buffer, int size)
-    : Assembler(buffer, size),
+MacroAssembler::MacroAssembler(Isolate* arg_isolate, void* buffer, int size)
+    : Assembler(arg_isolate, buffer, size),
       generating_stub_(false),
-      allow_stub_calls_(true),
-      code_object_(HEAP->undefined_value()) {
+      allow_stub_calls_(true) {
+  if (isolate() != NULL) {
+    code_object_ = Handle<Object>(isolate()->heap()->undefined_value(),
+                                  isolate());
+  }
 }
 
 
-// Arguments macros
+// Arguments macros.
 #define COND_TYPED_ARGS Condition cond, Register r1, const Operand& r2
 #define COND_ARGS cond, r1, r2
 
@@ -161,7 +164,7 @@
 void MacroAssembler::RecordWriteHelper(Register object,
                                        Register address,
                                        Register scratch) {
-  if (FLAG_debug_code) {
+  if (emit_debug_code()) {
     // Check that the object is not in new space.
     Label not_in_new_space;
     InNewSpace(object, scratch, ne, &not_in_new_space);
@@ -190,6 +193,77 @@
   sw(scratch, MemOperand(object, Page::kDirtyFlagOffset));
 }
 
+// Push and pop all registers that can hold pointers.
+void MacroAssembler::PushSafepointRegisters() {
+  // Safepoints expect a block of kNumSafepointRegisters values on the
+  // stack, so adjust the stack for unsaved registers.
+  const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters;
+  ASSERT(num_unsaved >= 0);
+  Subu(sp, sp, Operand(num_unsaved * kPointerSize));
+  MultiPush(kSafepointSavedRegisters);
+}
+
+void MacroAssembler::PopSafepointRegisters() {
+  const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters;
+  MultiPop(kSafepointSavedRegisters);
+  Addu(sp, sp, Operand(num_unsaved * kPointerSize));
+}
+
+void MacroAssembler::PushSafepointRegistersAndDoubles() {
+  PushSafepointRegisters();
+  Subu(sp, sp, Operand(FPURegister::kNumAllocatableRegisters * kDoubleSize));
+  for (int i = 0; i < FPURegister::kNumAllocatableRegisters; i+=2) {
+    FPURegister reg = FPURegister::FromAllocationIndex(i);
+    sdc1(reg, MemOperand(sp, i * kDoubleSize));
+  }
+}
+
+void MacroAssembler::PopSafepointRegistersAndDoubles() {
+  for (int i = 0; i < FPURegister::kNumAllocatableRegisters; i+=2) {
+    FPURegister reg = FPURegister::FromAllocationIndex(i);
+    ldc1(reg, MemOperand(sp, i * kDoubleSize));
+  }
+  Addu(sp, sp, Operand(FPURegister::kNumAllocatableRegisters * kDoubleSize));
+  PopSafepointRegisters();
+}
+
+void MacroAssembler::StoreToSafepointRegistersAndDoublesSlot(Register src,
+                                                             Register dst) {
+  sw(src, SafepointRegistersAndDoublesSlot(dst));
+}
+
+
+void MacroAssembler::StoreToSafepointRegisterSlot(Register src, Register dst) {
+  sw(src, SafepointRegisterSlot(dst));
+}
+
+
+void MacroAssembler::LoadFromSafepointRegisterSlot(Register dst, Register src) {
+  lw(dst, SafepointRegisterSlot(src));
+}
+
+
+int MacroAssembler::SafepointRegisterStackIndex(int reg_code) {
+  // The registers are pushed starting with the highest encoding,
+  // which means that lowest encodings are closest to the stack pointer.
+  return kSafepointRegisterStackIndexMap[reg_code];
+}
+
+
+MemOperand MacroAssembler::SafepointRegisterSlot(Register reg) {
+  return MemOperand(sp, SafepointRegisterStackIndex(reg.code()) * kPointerSize);
+}
+
+
+MemOperand MacroAssembler::SafepointRegistersAndDoublesSlot(Register reg) {
+  // General purpose registers are pushed last on the stack.
+  int doubles_size = FPURegister::kNumAllocatableRegisters * kDoubleSize;
+  int register_offset = SafepointRegisterStackIndex(reg.code()) * kPointerSize;
+  return MemOperand(sp, doubles_size + register_offset);
+}
+
+
+
 
 void MacroAssembler::InNewSpace(Register object,
                                 Register scratch,
@@ -230,7 +304,7 @@
 
   // Clobber all input registers when running with the debug-code flag
   // turned on to provoke errors.
-  if (FLAG_debug_code) {
+  if (emit_debug_code()) {
     li(object, Operand(BitCast<int32_t>(kZapValue)));
     li(scratch0, Operand(BitCast<int32_t>(kZapValue)));
     li(scratch1, Operand(BitCast<int32_t>(kZapValue)));
@@ -262,7 +336,7 @@
 
   // Clobber all input registers when running with the debug-code flag
   // turned on to provoke errors.
-  if (FLAG_debug_code) {
+  if (emit_debug_code()) {
     li(object, Operand(BitCast<int32_t>(kZapValue)));
     li(address, Operand(BitCast<int32_t>(kZapValue)));
     li(scratch, Operand(BitCast<int32_t>(kZapValue)));
@@ -271,7 +345,7 @@
 
 
 // -----------------------------------------------------------------------------
-// Allocation support
+// Allocation support.
 
 
 void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
@@ -297,15 +371,15 @@
   lw(scratch, FieldMemOperand(scratch, GlobalObject::kGlobalContextOffset));
 
   // Check the context is a global context.
-  if (FLAG_debug_code) {
+  if (emit_debug_code()) {
     // TODO(119): Avoid push(holder_reg)/pop(holder_reg).
-    Push(holder_reg);  // Temporarily save holder on the stack.
+    push(holder_reg);  // Temporarily save holder on the stack.
     // Read the first word and compare to the global_context_map.
     lw(holder_reg, FieldMemOperand(scratch, HeapObject::kMapOffset));
     LoadRoot(at, Heap::kGlobalContextMapRootIndex);
     Check(eq, "JSGlobalObject::global_context should be a global context.",
           holder_reg, Operand(at));
-    Pop(holder_reg);  // Restore holder.
+    pop(holder_reg);  // Restore holder.
   }
 
   // Check if both contexts are the same.
@@ -313,9 +387,9 @@
   Branch(&same_contexts, eq, scratch, Operand(at));
 
   // Check the context is a global context.
-  if (FLAG_debug_code) {
+  if (emit_debug_code()) {
     // TODO(119): Avoid push(holder_reg)/pop(holder_reg).
-    Push(holder_reg);  // Temporarily save holder on the stack.
+    push(holder_reg);  // Temporarily save holder on the stack.
     mov(holder_reg, at);  // Move at to its holding place.
     LoadRoot(at, Heap::kNullValueRootIndex);
     Check(ne, "JSGlobalProxy::context() should not be null.",
@@ -326,7 +400,7 @@
     Check(eq, "JSGlobalObject::global_context should be a global context.",
           holder_reg, Operand(at));
     // Restore at is not needed. at is reloaded below.
-    Pop(holder_reg);  // Restore holder.
+    pop(holder_reg);  // Restore holder.
     // Restore at to holder's context.
     lw(at, FieldMemOperand(holder_reg, JSGlobalProxy::kContextOffset));
   }
@@ -346,7 +420,7 @@
 
 
 // ---------------------------------------------------------------------------
-// Instruction macros
+// Instruction macros.
 
 void MacroAssembler::Addu(Register rd, Register rs, const Operand& rt) {
   if (rt.is_reg()) {
@@ -500,6 +574,15 @@
 }
 
 
+void MacroAssembler::Neg(Register rs, const Operand& rt) {
+  ASSERT(rt.is_reg());
+  ASSERT(!at.is(rs));
+  ASSERT(!at.is(rt.rm()));
+  li(at, -1);
+  xor_(rs, rt.rm(), at);
+}
+
+
 void MacroAssembler::Slt(Register rd, Register rs, const Operand& rt) {
   if (rt.is_reg()) {
     slt(rd, rs, rt.rm());
@@ -581,24 +664,13 @@
     }
     // We need always the same number of instructions as we may need to patch
     // this code to load another value which may need 2 instructions to load.
-    if (is_int16(j.imm32_)) {
-      nop();
-      addiu(rd, zero_reg, j.imm32_);
-    } else if (!(j.imm32_ & kHiMask)) {
-      nop();
-      ori(rd, zero_reg, j.imm32_);
-    } else if (!(j.imm32_ & kImm16Mask)) {
-      nop();
-      lui(rd, (j.imm32_ & kHiMask) >> kLuiShift);
-    } else {
-      lui(rd, (j.imm32_ & kHiMask) >> kLuiShift);
-      ori(rd, rd, (j.imm32_ & kImm16Mask));
-    }
+    lui(rd, (j.imm32_ & kHiMask) >> kLuiShift);
+    ori(rd, rd, (j.imm32_ & kImm16Mask));
   }
 }
 
 
-// Exception-generating instructions and debugging support
+// Exception-generating instructions and debugging support.
 void MacroAssembler::stop(const char* msg) {
   // TO_UPGRADE: Just a break for now. Maybe we could upgrade it.
   // We use the 0x54321 value to be able to find it easily when reading memory.
@@ -727,11 +799,11 @@
   ASSERT(!rs.is(t9));
   ASSERT(!rs.is(t8));
 
-  // Save rs's MSB to t8
+  // Save rs's MSB to t8.
   And(t8, rs, 0x80000000);
   // Remove rs's MSB.
   And(t9, rs, 0x7FFFFFFF);
-  // Move t9 to fd
+  // Move t9 to fd.
   mtc1(t9, fd);
 
   // Convert fd to a real FP value.
@@ -839,7 +911,7 @@
   Subu(scratch2, scratch2, Operand(zero_exponent));
   // Dest already has a Smi zero.
   Branch(&done, lt, scratch2, Operand(zero_reg));
-  if (!Isolate::Current()->cpu_features()->IsSupported(FPU)) {
+  if (!CpuFeatures::IsSupported(FPU)) {
     // We have a shifted exponent between 0 and 30 in scratch2.
     srl(dest, scratch2, HeapNumber::kExponentShift);
     // We now have the exponent in dest.  Subtract from 30 to get
@@ -848,7 +920,7 @@
     subu(dest, at, dest);
   }
   bind(&right_exponent);
-  if (Isolate::Current()->cpu_features()->IsSupported(FPU)) {
+  if (CpuFeatures::IsSupported(FPU)) {
     CpuFeatures::Scope scope(FPU);
     // MIPS FPU instructions implementing double precision to integer
     // conversion using round to zero. Since the FP value was qualified
@@ -898,6 +970,102 @@
 }
 
 
+void MacroAssembler::EmitOutOfInt32RangeTruncate(Register result,
+                                                 Register input_high,
+                                                 Register input_low,
+                                                 Register scratch) {
+  Label done, normal_exponent, restore_sign;
+  // Extract the biased exponent in result.
+  Ext(result,
+      input_high,
+      HeapNumber::kExponentShift,
+      HeapNumber::kExponentBits);
+
+  // Check for Infinity and NaNs, which should return 0.
+  Subu(scratch, result, HeapNumber::kExponentMask);
+  movz(result, zero_reg, scratch);
+  Branch(&done, eq, scratch, Operand(zero_reg));
+
+  // Express exponent as delta to (number of mantissa bits + 31).
+  Subu(result,
+       result,
+       Operand(HeapNumber::kExponentBias + HeapNumber::kMantissaBits + 31));
+
+  // If the delta is strictly positive, all bits would be shifted away,
+  // which means that we can return 0.
+  Branch(&normal_exponent, le, result, Operand(zero_reg));
+  mov(result, zero_reg);
+  Branch(&done);
+
+  bind(&normal_exponent);
+  const int kShiftBase = HeapNumber::kNonMantissaBitsInTopWord - 1;
+  // Calculate shift.
+  Addu(scratch, result, Operand(kShiftBase + HeapNumber::kMantissaBits));
+
+  // Save the sign.
+  Register sign = result;
+  result = no_reg;
+  And(sign, input_high, Operand(HeapNumber::kSignMask));
+
+  // On ARM shifts > 31 bits are valid and will result in zero. On MIPS we need
+  // to check for this specific case.
+  Label high_shift_needed, high_shift_done;
+  Branch(&high_shift_needed, lt, scratch, Operand(32));
+  mov(input_high, zero_reg);
+  Branch(&high_shift_done);
+  bind(&high_shift_needed);
+
+  // Set the implicit 1 before the mantissa part in input_high.
+  Or(input_high,
+     input_high,
+     Operand(1 << HeapNumber::kMantissaBitsInTopWord));
+  // Shift the mantissa bits to the correct position.
+  // We don't need to clear non-mantissa bits as they will be shifted away.
+  // If they weren't, it would mean that the answer is in the 32bit range.
+  sllv(input_high, input_high, scratch);
+
+  bind(&high_shift_done);
+
+  // Replace the shifted bits with bits from the lower mantissa word.
+  Label pos_shift, shift_done;
+  li(at, 32);
+  subu(scratch, at, scratch);
+  Branch(&pos_shift, ge, scratch, Operand(zero_reg));
+
+  // Negate scratch.
+  Subu(scratch, zero_reg, scratch);
+  sllv(input_low, input_low, scratch);
+  Branch(&shift_done);
+
+  bind(&pos_shift);
+  srlv(input_low, input_low, scratch);
+
+  bind(&shift_done);
+  Or(input_high, input_high, Operand(input_low));
+  // Restore sign if necessary.
+  mov(scratch, sign);
+  result = sign;
+  sign = no_reg;
+  Subu(result, zero_reg, input_high);
+  movz(result, input_high, scratch);
+  bind(&done);
+}
+
+
+void MacroAssembler::GetLeastBitsFromSmi(Register dst,
+                                         Register src,
+                                         int num_least_bits) {
+  Ext(dst, src, kSmiTagSize, num_least_bits);
+}
+
+
+void MacroAssembler::GetLeastBitsFromInt32(Register dst,
+                                           Register src,
+                                           int num_least_bits) {
+  And(dst, src, Operand((1 << num_least_bits) - 1));
+}
+
+
 // Emulated condtional branches do not emit a nop in the branch delay slot.
 //
 // BRANCH_ARGS_CHECK checks that conditional jump arguments are correct.
@@ -937,7 +1105,7 @@
       case ne:
         bne(rs, r2, offset);
         break;
-      // Signed comparison
+      // Signed comparison.
       case greater:
         if (r2.is(zero_reg)) {
           bgtz(rs, offset);
@@ -1028,7 +1196,7 @@
         li(r2, rt);
         bne(rs, r2, offset);
         break;
-      // Signed comparison
+      // Signed comparison.
       case greater:
         if (rt.imm32_ == 0) {
           bgtz(rs, offset);
@@ -1170,7 +1338,7 @@
         offset = shifted_branch_offset(L, false);
         bne(rs, r2, offset);
         break;
-      // Signed comparison
+      // Signed comparison.
       case greater:
         if (r2.is(zero_reg)) {
           offset = shifted_branch_offset(L, false);
@@ -1276,7 +1444,7 @@
         offset = shifted_branch_offset(L, false);
         bne(rs, r2, offset);
         break;
-      // Signed comparison
+      // Signed comparison.
       case greater:
         if (rt.imm32_ == 0) {
           offset = shifted_branch_offset(L, false);
@@ -1444,7 +1612,7 @@
       bal(offset);
       break;
 
-    // Signed comparison
+    // Signed comparison.
     case greater:
       slt(scratch, r2, rs);
       addiu(scratch, scratch, -1);
@@ -1539,7 +1707,7 @@
       bal(offset);
       break;
 
-    // Signed comparison
+    // Signed comparison.
     case greater:
       slt(scratch, r2, rs);
       addiu(scratch, scratch, -1);
@@ -1642,7 +1810,7 @@
         Branch(2, NegateCondition(cond), rs, rt);
         j(target.imm32_);  // Will generate only one instruction.
       }
-    } else {  // MustUseReg(target)
+    } else {  // MustUseReg(target).
       li(t9, target);
       if (cond == cc_always) {
         jr(t9);
@@ -1658,15 +1826,28 @@
 }
 
 
+int MacroAssembler::CallSize(Handle<Code> code, RelocInfo::Mode rmode) {
+  return 4 * kInstrSize;
+}
+
+
+int MacroAssembler::CallSize(Register reg) {
+  return 2 * kInstrSize;
+}
+
+
 // Note: To call gcc-compiled C code on mips, you must call thru t9.
 void MacroAssembler::Call(const Operand& target, BranchDelaySlot bdslot) {
   BlockTrampolinePoolScope block_trampoline_pool(this);
   if (target.is_reg()) {
       jalr(target.rm());
-  } else {    // !target.is_reg()
+  } else {  // !target.is_reg().
     if (!MustUseReg(target.rmode_)) {
       jal(target.imm32_);
-    } else {  // MustUseReg(target)
+    } else {  // MustUseReg(target).
+      // Must record previous source positions before the
+      // li() generates a new code target.
+      positions_recorder()->WriteRecordedPositions();
       li(t9, target);
       jalr(t9);
     }
@@ -1690,7 +1871,7 @@
       Branch(2, NegateCondition(cond), rs, rt);
       jalr(target.rm());
     }
-  } else {    // !target.is_reg()
+  } else {  // !target.is_reg().
     if (!MustUseReg(target.rmode_)) {
       if (cond == cc_always) {
         jal(target.imm32_);
@@ -1714,6 +1895,20 @@
 }
 
 
+void MacroAssembler::CallWithAstId(Handle<Code> code,
+                                   RelocInfo::Mode rmode,
+                                   unsigned ast_id,
+                                   Condition cond,
+                                   Register r1,
+                                   const Operand& r2) {
+  ASSERT(rmode == RelocInfo::CODE_TARGET_WITH_ID);
+  ASSERT(ast_id != kNoASTId);
+  ASSERT(ast_id_for_reloc_info_ == kNoASTId);
+  ast_id_for_reloc_info_ = ast_id;
+  Call(reinterpret_cast<intptr_t>(code.location()), rmode, cond, r1, r2);
+}
+
+
 void MacroAssembler::Drop(int count,
                           Condition cond,
                           Register reg,
@@ -1779,13 +1974,6 @@
 }
 
 
-void MacroAssembler::Move(Register dst, Register src) {
-  if (!dst.is(src)) {
-    mov(dst, src);
-  }
-}
-
-
 #ifdef ENABLE_DEBUGGER_SUPPORT
 
 void MacroAssembler::DebugBreak() {
@@ -1800,7 +1988,7 @@
 
 
 // ---------------------------------------------------------------------------
-// Exception handling
+// Exception handling.
 
 void MacroAssembler::PushTryHandler(CodeLocation try_location,
                                     HandlerType type) {
@@ -1868,6 +2056,159 @@
 }
 
 
+void MacroAssembler::Throw(Register value) {
+  // v0 is expected to hold the exception.
+  Move(v0, value);
+
+  // Adjust this code if not the case.
+  STATIC_ASSERT(StackHandlerConstants::kSize == 4 * kPointerSize);
+
+  // Drop the sp to the top of the handler.
+  li(a3, Operand(ExternalReference(Isolate::k_handler_address,
+                                      isolate())));
+  lw(sp, MemOperand(a3));
+
+  // Restore the next handler and frame pointer, discard handler state.
+  STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
+  pop(a2);
+  sw(a2, MemOperand(a3));
+  STATIC_ASSERT(StackHandlerConstants::kFPOffset == 2 * kPointerSize);
+  MultiPop(a3.bit() | fp.bit());
+
+  // Before returning we restore the context from the frame pointer if
+  // not NULL.  The frame pointer is NULL in the exception handler of a
+  // JS entry frame.
+  // Set cp to NULL if fp is NULL.
+  Label done;
+  Branch(USE_DELAY_SLOT, &done, eq, fp, Operand(zero_reg));
+  mov(cp, zero_reg);   // In branch delay slot.
+  lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+  bind(&done);
+
+#ifdef DEBUG
+  // When emitting debug_code, set ra as return address for the jump.
+  // 5 instructions: add: 1, pop: 2, jump: 2.
+  const int kOffsetRaInstructions = 5;
+  Label find_ra;
+
+  if (emit_debug_code()) {
+    // Compute ra for the Jump(t9).
+    const int kOffsetRaBytes = kOffsetRaInstructions * Assembler::kInstrSize;
+
+    // This branch-and-link sequence is needed to get the current PC on mips,
+    // saved to the ra register. Then adjusted for instruction count.
+    bal(&find_ra);  // bal exposes branch-delay.
+    nop();  // Branch delay slot nop.
+    bind(&find_ra);
+    addiu(ra, ra, kOffsetRaBytes);
+  }
+#endif
+
+  STATIC_ASSERT(StackHandlerConstants::kPCOffset == 3 * kPointerSize);
+  pop(t9);  // 2 instructions: lw, add sp.
+  Jump(t9);  // 2 instructions: jr, nop (in delay slot).
+
+  if (emit_debug_code()) {
+    // Make sure that the expected number of instructions were generated.
+    ASSERT_EQ(kOffsetRaInstructions,
+              InstructionsGeneratedSince(&find_ra));
+  }
+}
+
+
+void MacroAssembler::ThrowUncatchable(UncatchableExceptionType type,
+                                      Register value) {
+  // Adjust this code if not the case.
+  STATIC_ASSERT(StackHandlerConstants::kSize == 4 * kPointerSize);
+
+  // v0 is expected to hold the exception.
+  Move(v0, value);
+
+  // Drop sp to the top stack handler.
+  li(a3, Operand(ExternalReference(Isolate::k_handler_address, isolate())));
+  lw(sp, MemOperand(a3));
+
+  // Unwind the handlers until the ENTRY handler is found.
+  Label loop, done;
+  bind(&loop);
+  // Load the type of the current stack handler.
+  const int kStateOffset = StackHandlerConstants::kStateOffset;
+  lw(a2, MemOperand(sp, kStateOffset));
+  Branch(&done, eq, a2, Operand(StackHandler::ENTRY));
+  // Fetch the next handler in the list.
+  const int kNextOffset = StackHandlerConstants::kNextOffset;
+  lw(sp, MemOperand(sp, kNextOffset));
+  jmp(&loop);
+  bind(&done);
+
+  // Set the top handler address to next handler past the current ENTRY handler.
+  STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
+  pop(a2);
+  sw(a2, MemOperand(a3));
+
+  if (type == OUT_OF_MEMORY) {
+    // Set external caught exception to false.
+    ExternalReference external_caught(
+           Isolate::k_external_caught_exception_address, isolate());
+    li(a0, Operand(false, RelocInfo::NONE));
+    li(a2, Operand(external_caught));
+    sw(a0, MemOperand(a2));
+
+    // Set pending exception and v0 to out of memory exception.
+    Failure* out_of_memory = Failure::OutOfMemoryException();
+    li(v0, Operand(reinterpret_cast<int32_t>(out_of_memory)));
+    li(a2, Operand(ExternalReference(Isolate::k_pending_exception_address,
+                                        isolate())));
+    sw(v0, MemOperand(a2));
+  }
+
+  // Stack layout at this point. See also StackHandlerConstants.
+  // sp ->   state (ENTRY)
+  //         fp
+  //         ra
+
+  // Discard handler state (a2 is not used) and restore frame pointer.
+  STATIC_ASSERT(StackHandlerConstants::kFPOffset == 2 * kPointerSize);
+  MultiPop(a2.bit() | fp.bit());  // a2: discarded state.
+  // Before returning we restore the context from the frame pointer if
+  // not NULL.  The frame pointer is NULL in the exception handler of a
+  // JS entry frame.
+  Label cp_null;
+  Branch(USE_DELAY_SLOT, &cp_null, eq, fp, Operand(zero_reg));
+  mov(cp, zero_reg);   // In the branch delay slot.
+  lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+  bind(&cp_null);
+
+#ifdef DEBUG
+  // When emitting debug_code, set ra as return address for the jump.
+  // 5 instructions: add: 1, pop: 2, jump: 2.
+  const int kOffsetRaInstructions = 5;
+  Label find_ra;
+
+  if (emit_debug_code()) {
+    // Compute ra for the Jump(t9).
+    const int kOffsetRaBytes = kOffsetRaInstructions * Assembler::kInstrSize;
+
+    // This branch-and-link sequence is needed to get the current PC on mips,
+    // saved to the ra register. Then adjusted for instruction count.
+    bal(&find_ra);  // bal exposes branch-delay slot.
+    nop();  // Branch delay slot nop.
+    bind(&find_ra);
+    addiu(ra, ra, kOffsetRaBytes);
+  }
+#endif
+  STATIC_ASSERT(StackHandlerConstants::kPCOffset == 3 * kPointerSize);
+  pop(t9);  // 2 instructions: lw, add sp.
+  Jump(t9);  // 2 instructions: jr, nop (in delay slot).
+
+  if (emit_debug_code()) {
+    // Make sure that the expected number of instructions were generated.
+    ASSERT_EQ(kOffsetRaInstructions,
+              InstructionsGeneratedSince(&find_ra));
+  }
+}
+
+
 void MacroAssembler::AllocateInNewSpace(int object_size,
                                         Register result,
                                         Register scratch1,
@@ -1875,7 +2216,7 @@
                                         Label* gc_required,
                                         AllocationFlags flags) {
   if (!FLAG_inline_new) {
-    if (FLAG_debug_code) {
+    if (emit_debug_code()) {
       // Trash the registers to simulate an allocation failure.
       li(result, 0x7091);
       li(scratch1, 0x7191);
@@ -1923,7 +2264,7 @@
     lw(result, MemOperand(topaddr));
     lw(t9, MemOperand(topaddr, kPointerSize));
   } else {
-    if (FLAG_debug_code) {
+    if (emit_debug_code()) {
       // Assert that result actually contains top on entry. t9 is used
       // immediately below so this use of t9 does not cause difference with
       // respect to register content between debug and release mode.
@@ -1954,7 +2295,7 @@
                                         Label* gc_required,
                                         AllocationFlags flags) {
   if (!FLAG_inline_new) {
-    if (FLAG_debug_code) {
+    if (emit_debug_code()) {
       // Trash the registers to simulate an allocation failure.
       li(result, 0x7091);
       li(scratch1, 0x7191);
@@ -1992,7 +2333,7 @@
     lw(result, MemOperand(topaddr));
     lw(t9, MemOperand(topaddr, kPointerSize));
   } else {
-    if (FLAG_debug_code) {
+    if (emit_debug_code()) {
       // Assert that result actually contains top on entry. t9 is used
       // immediately below so this use of t9 does not cause difference with
       // respect to register content between debug and release mode.
@@ -2015,7 +2356,7 @@
   Branch(gc_required, Ugreater, scratch2, Operand(t9));
 
   // Update allocation top. result temporarily holds the new top.
-  if (FLAG_debug_code) {
+  if (emit_debug_code()) {
     And(t9, scratch2, Operand(kObjectAlignmentMask));
     Check(eq, "Unaligned allocation in new space", t9, Operand(zero_reg));
   }
@@ -2206,12 +2547,70 @@
 }
 
 
+void MacroAssembler::CopyBytes(Register src,
+                               Register dst,
+                               Register length,
+                               Register scratch) {
+  Label align_loop, align_loop_1, word_loop, byte_loop, byte_loop_1, done;
+
+  // Align src before copying in word size chunks.
+  bind(&align_loop);
+  Branch(&done, eq, length, Operand(zero_reg));
+  bind(&align_loop_1);
+  And(scratch, src, kPointerSize - 1);
+  Branch(&word_loop, eq, scratch, Operand(zero_reg));
+  lbu(scratch, MemOperand(src));
+  Addu(src, src, 1);
+  sb(scratch, MemOperand(dst));
+  Addu(dst, dst, 1);
+  Subu(length, length, Operand(1));
+  Branch(&byte_loop_1, ne, length, Operand(zero_reg));
+
+  // Copy bytes in word size chunks.
+  bind(&word_loop);
+  if (emit_debug_code()) {
+    And(scratch, src, kPointerSize - 1);
+    Assert(eq, "Expecting alignment for CopyBytes",
+        scratch, Operand(zero_reg));
+  }
+  Branch(&byte_loop, lt, length, Operand(kPointerSize));
+  lw(scratch, MemOperand(src));
+  Addu(src, src, kPointerSize);
+
+  // TODO(kalmard) check if this can be optimized to use sw in most cases.
+  // Can't use unaligned access - copy byte by byte.
+  sb(scratch, MemOperand(dst, 0));
+  srl(scratch, scratch, 8);
+  sb(scratch, MemOperand(dst, 1));
+  srl(scratch, scratch, 8);
+  sb(scratch, MemOperand(dst, 2));
+  srl(scratch, scratch, 8);
+  sb(scratch, MemOperand(dst, 3));
+  Addu(dst, dst, 4);
+
+  Subu(length, length, Operand(kPointerSize));
+  Branch(&word_loop);
+
+  // Copy the last bytes if any left.
+  bind(&byte_loop);
+  Branch(&done, eq, length, Operand(zero_reg));
+  bind(&byte_loop_1);
+  lbu(scratch, MemOperand(src));
+  Addu(src, src, 1);
+  sb(scratch, MemOperand(dst));
+  Addu(dst, dst, 1);
+  Subu(length, length, Operand(1));
+  Branch(&byte_loop_1, ne, length, Operand(zero_reg));
+  bind(&done);
+}
+
+
 void MacroAssembler::CheckMap(Register obj,
                               Register scratch,
                               Handle<Map> map,
                               Label* fail,
-                              bool is_heap_object) {
-  if (!is_heap_object) {
+                              SmiCheckType smi_check_type) {
+  if (smi_check_type == DO_SMI_CHECK) {
     JumpIfSmi(obj, fail);
   }
   lw(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
@@ -2220,12 +2619,27 @@
 }
 
 
+void MacroAssembler::DispatchMap(Register obj,
+                                 Register scratch,
+                                 Handle<Map> map,
+                                 Handle<Code> success,
+                                 SmiCheckType smi_check_type) {
+  Label fail;
+  if (smi_check_type == DO_SMI_CHECK) {
+    JumpIfSmi(obj, &fail);
+  }
+  lw(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
+  Jump(success, RelocInfo::CODE_TARGET, eq, scratch, Operand(map));
+  bind(&fail);
+}
+
+
 void MacroAssembler::CheckMap(Register obj,
                               Register scratch,
                               Heap::RootListIndex index,
                               Label* fail,
-                              bool is_heap_object) {
-  if (!is_heap_object) {
+                              SmiCheckType smi_check_type) {
+  if (smi_check_type == DO_SMI_CHECK) {
     JumpIfSmi(obj, fail);
   }
   lw(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
@@ -2234,8 +2648,74 @@
 }
 
 
+void MacroAssembler::GetCFunctionDoubleResult(const DoubleRegister dst) {
+  CpuFeatures::Scope scope(FPU);
+  if (IsMipsSoftFloatABI) {
+    Move(dst, v0, v1);
+  } else {
+    Move(dst, f0);  // Reg f0 is o32 ABI FP return value.
+  }
+}
+
+
+void MacroAssembler::SetCallCDoubleArguments(DoubleRegister dreg) {
+  CpuFeatures::Scope scope(FPU);
+  if (!IsMipsSoftFloatABI) {
+    Move(f12, dreg);
+  } else {
+    Move(a0, a1, dreg);
+  }
+}
+
+
+void MacroAssembler::SetCallCDoubleArguments(DoubleRegister dreg1,
+                                             DoubleRegister dreg2) {
+  CpuFeatures::Scope scope(FPU);
+  if (!IsMipsSoftFloatABI) {
+    if (dreg2.is(f12)) {
+      ASSERT(!dreg1.is(f14));
+      Move(f14, dreg2);
+      Move(f12, dreg1);
+    } else {
+      Move(f12, dreg1);
+      Move(f14, dreg2);
+    }
+  } else {
+    Move(a0, a1, dreg1);
+    Move(a2, a3, dreg2);
+  }
+}
+
+
+void MacroAssembler::SetCallCDoubleArguments(DoubleRegister dreg,
+                                             Register reg) {
+  CpuFeatures::Scope scope(FPU);
+  if (!IsMipsSoftFloatABI) {
+    Move(f12, dreg);
+    Move(a2, reg);
+  } else {
+    Move(a2, reg);
+    Move(a0, a1, dreg);
+  }
+}
+
+
+void MacroAssembler::SetCallKind(Register dst, CallKind call_kind) {
+  // This macro takes the dst register to make the code more readable
+  // at the call sites. However, the dst register has to be t1 to
+  // follow the calling convention which requires the call type to be
+  // in t1.
+  ASSERT(dst.is(t1));
+  if (call_kind == CALL_AS_FUNCTION) {
+    li(dst, Operand(Smi::FromInt(1)));
+  } else {
+    li(dst, Operand(Smi::FromInt(0)));
+  }
+}
+
+
 // -----------------------------------------------------------------------------
-// JavaScript invokes
+// JavaScript invokes.
 
 void MacroAssembler::InvokePrologue(const ParameterCount& expected,
                                     const ParameterCount& actual,
@@ -2243,7 +2723,8 @@
                                     Register code_reg,
                                     Label* done,
                                     InvokeFlag flag,
-                                    PostCallGenerator* post_call_generator) {
+                                    const CallWrapper& call_wrapper,
+                                    CallKind call_kind) {
   bool definitely_matches = false;
   Label regular_invoke;
 
@@ -2278,13 +2759,11 @@
         li(a2, Operand(expected.immediate()));
       }
     }
+  } else if (actual.is_immediate()) {
+    Branch(&regular_invoke, eq, expected.reg(), Operand(actual.immediate()));
+    li(a0, Operand(actual.immediate()));
   } else {
-    if (actual.is_immediate()) {
-      Branch(&regular_invoke, eq, expected.reg(), Operand(actual.immediate()));
-      li(a0, Operand(actual.immediate()));
-    } else {
-      Branch(&regular_invoke, eq, expected.reg(), Operand(actual.reg()));
-    }
+    Branch(&regular_invoke, eq, expected.reg(), Operand(actual.reg()));
   }
 
   if (!definitely_matches) {
@@ -2296,10 +2775,13 @@
     Handle<Code> adaptor =
         isolate()->builtins()->ArgumentsAdaptorTrampoline();
     if (flag == CALL_FUNCTION) {
+      call_wrapper.BeforeCall(CallSize(adaptor, RelocInfo::CODE_TARGET));
+      SetCallKind(t1, call_kind);
       Call(adaptor, RelocInfo::CODE_TARGET);
-      if (post_call_generator != NULL) post_call_generator->Generate();
+      call_wrapper.AfterCall();
       jmp(done);
     } else {
+      SetCallKind(t1, call_kind);
       Jump(adaptor, RelocInfo::CODE_TARGET);
     }
     bind(&regular_invoke);
@@ -2311,15 +2793,18 @@
                                 const ParameterCount& expected,
                                 const ParameterCount& actual,
                                 InvokeFlag flag,
-                                PostCallGenerator* post_call_generator) {
+                                const CallWrapper& call_wrapper,
+                                CallKind call_kind) {
   Label done;
 
   InvokePrologue(expected, actual, Handle<Code>::null(), code, &done, flag,
-                 post_call_generator);
+                 call_wrapper, call_kind);
   if (flag == CALL_FUNCTION) {
+    SetCallKind(t1, call_kind);
     Call(code);
   } else {
     ASSERT(flag == JUMP_FUNCTION);
+    SetCallKind(t1, call_kind);
     Jump(code);
   }
   // Continue here if InvokePrologue does handle the invocation due to
@@ -2332,13 +2817,17 @@
                                 const ParameterCount& expected,
                                 const ParameterCount& actual,
                                 RelocInfo::Mode rmode,
-                                InvokeFlag flag) {
+                                InvokeFlag flag,
+                                CallKind call_kind) {
   Label done;
 
-  InvokePrologue(expected, actual, code, no_reg, &done, flag);
+  InvokePrologue(expected, actual, code, no_reg, &done, flag,
+                 NullCallWrapper(), call_kind);
   if (flag == CALL_FUNCTION) {
+    SetCallKind(t1, call_kind);
     Call(code, rmode);
   } else {
+    SetCallKind(t1, call_kind);
     Jump(code, rmode);
   }
   // Continue here if InvokePrologue does handle the invocation due to
@@ -2350,7 +2839,8 @@
 void MacroAssembler::InvokeFunction(Register function,
                                     const ParameterCount& actual,
                                     InvokeFlag flag,
-                                    PostCallGenerator* post_call_generator) {
+                                    const CallWrapper& call_wrapper,
+                                    CallKind call_kind) {
   // Contract with called JS functions requires that function is passed in a1.
   ASSERT(function.is(a1));
   Register expected_reg = a2;
@@ -2365,7 +2855,7 @@
   lw(code_reg, FieldMemOperand(a1, JSFunction::kCodeEntryOffset));
 
   ParameterCount expected(expected_reg);
-  InvokeCode(code_reg, expected, actual, flag, post_call_generator);
+  InvokeCode(code_reg, expected, actual, flag, call_wrapper, call_kind);
 }
 
 
@@ -2478,7 +2968,7 @@
 
 
 // -----------------------------------------------------------------------------
-// Runtime calls
+// Runtime calls.
 
 void MacroAssembler::CallStub(CodeStub* stub, Condition cond,
                               Register r1, const Operand& r2) {
@@ -2487,11 +2977,136 @@
 }
 
 
+MaybeObject* MacroAssembler::TryCallStub(CodeStub* stub, Condition cond,
+                                         Register r1, const Operand& r2) {
+  ASSERT(allow_stub_calls());  // Stub calls are not allowed in some stubs.
+  Object* result;
+  { MaybeObject* maybe_result = stub->TryGetCode();
+    if (!maybe_result->ToObject(&result)) return maybe_result;
+  }
+  Call(Handle<Code>(Code::cast(result)), RelocInfo::CODE_TARGET, cond, r1, r2);
+  return result;
+}
+
+
+
 void MacroAssembler::TailCallStub(CodeStub* stub) {
-  ASSERT(allow_stub_calls());  // stub calls are not allowed in some stubs
+  ASSERT(allow_stub_calls());  // Stub calls are not allowed in some stubs.
   Jump(stub->GetCode(), RelocInfo::CODE_TARGET);
 }
 
+MaybeObject* MacroAssembler::TryTailCallStub(CodeStub* stub,
+                                             Condition cond,
+                                             Register r1,
+                                             const Operand& r2) {
+  ASSERT(allow_stub_calls());  // Stub calls are not allowed in some stubs.
+  Object* result;
+  { MaybeObject* maybe_result = stub->TryGetCode();
+    if (!maybe_result->ToObject(&result)) return maybe_result;
+  }
+  Jump(Handle<Code>(Code::cast(result)), RelocInfo::CODE_TARGET, cond, r1, r2);
+  return result;
+}
+
+
+static int AddressOffset(ExternalReference ref0, ExternalReference ref1) {
+  return ref0.address() - ref1.address();
+}
+
+
+MaybeObject* MacroAssembler::TryCallApiFunctionAndReturn(
+    ExternalReference function, int stack_space) {
+  ExternalReference next_address =
+      ExternalReference::handle_scope_next_address();
+  const int kNextOffset = 0;
+  const int kLimitOffset = AddressOffset(
+      ExternalReference::handle_scope_limit_address(),
+      next_address);
+  const int kLevelOffset = AddressOffset(
+      ExternalReference::handle_scope_level_address(),
+      next_address);
+
+  // Allocate HandleScope in callee-save registers.
+  li(s3, Operand(next_address));
+  lw(s0, MemOperand(s3, kNextOffset));
+  lw(s1, MemOperand(s3, kLimitOffset));
+  lw(s2, MemOperand(s3, kLevelOffset));
+  Addu(s2, s2, Operand(1));
+  sw(s2, MemOperand(s3, kLevelOffset));
+
+  // The O32 ABI requires us to pass a pointer in a0 where the returned struct
+  // (4 bytes) will be placed. This is also built into the Simulator.
+  // Set up the pointer to the returned value (a0). It was allocated in
+  // EnterExitFrame.
+  addiu(a0, fp, ExitFrameConstants::kStackSpaceOffset);
+
+  // Native call returns to the DirectCEntry stub which redirects to the
+  // return address pushed on stack (could have moved after GC).
+  // DirectCEntry stub itself is generated early and never moves.
+  DirectCEntryStub stub;
+  stub.GenerateCall(this, function);
+
+  // As mentioned above, on MIPS a pointer is returned - we need to dereference
+  // it to get the actual return value (which is also a pointer).
+  lw(v0, MemOperand(v0));
+
+  Label promote_scheduled_exception;
+  Label delete_allocated_handles;
+  Label leave_exit_frame;
+
+  // If result is non-zero, dereference to get the result value
+  // otherwise set it to undefined.
+  Label skip;
+  LoadRoot(a0, Heap::kUndefinedValueRootIndex);
+  Branch(&skip, eq, v0, Operand(zero_reg));
+  lw(a0, MemOperand(v0));
+  bind(&skip);
+  mov(v0, a0);
+
+  // No more valid handles (the result handle was the last one). Restore
+  // previous handle scope.
+  sw(s0, MemOperand(s3, kNextOffset));
+  if (emit_debug_code()) {
+    lw(a1, MemOperand(s3, kLevelOffset));
+    Check(eq, "Unexpected level after return from api call", a1, Operand(s2));
+  }
+  Subu(s2, s2, Operand(1));
+  sw(s2, MemOperand(s3, kLevelOffset));
+  lw(at, MemOperand(s3, kLimitOffset));
+  Branch(&delete_allocated_handles, ne, s1, Operand(at));
+
+  // Check if the function scheduled an exception.
+  bind(&leave_exit_frame);
+  LoadRoot(t0, Heap::kTheHoleValueRootIndex);
+  li(at, Operand(ExternalReference::scheduled_exception_address(isolate())));
+  lw(t1, MemOperand(at));
+  Branch(&promote_scheduled_exception, ne, t0, Operand(t1));
+  li(s0, Operand(stack_space));
+  LeaveExitFrame(false, s0);
+  Ret();
+
+  bind(&promote_scheduled_exception);
+  MaybeObject* result = TryTailCallExternalReference(
+      ExternalReference(Runtime::kPromoteScheduledException, isolate()), 0, 1);
+  if (result->IsFailure()) {
+    return result;
+  }
+
+  // HandleScope limit has changed. Delete allocated extensions.
+  bind(&delete_allocated_handles);
+  sw(s1, MemOperand(s3, kLimitOffset));
+  mov(s0, v0);
+  mov(a0, v0);
+  PrepareCallCFunction(1, s1);
+  li(a0, Operand(ExternalReference::isolate_address()));
+  CallCFunction(ExternalReference::delete_handle_scope_extensions(isolate()),
+      1);
+  mov(v0, s0);
+  jmp(&leave_exit_frame);
+
+  return result;
+}
+
 
 void MacroAssembler::IllegalOperation(int num_arguments) {
   if (num_arguments > 0) {
@@ -2554,7 +3169,6 @@
 }
 
 
-
 void MacroAssembler::SmiToDoubleFPURegister(Register smi,
                                             FPURegister value,
                                             Register scratch1) {
@@ -2564,6 +3178,84 @@
 }
 
 
+void MacroAssembler::AdduAndCheckForOverflow(Register dst,
+                                             Register left,
+                                             Register right,
+                                             Register overflow_dst,
+                                             Register scratch) {
+  ASSERT(!dst.is(overflow_dst));
+  ASSERT(!dst.is(scratch));
+  ASSERT(!overflow_dst.is(scratch));
+  ASSERT(!overflow_dst.is(left));
+  ASSERT(!overflow_dst.is(right));
+  ASSERT(!left.is(right));
+
+  // TODO(kalmard) There must be a way to optimize dst == left and dst == right
+  // cases.
+
+  if (dst.is(left)) {
+    addu(overflow_dst, left, right);
+    xor_(dst, overflow_dst, left);
+    xor_(scratch, overflow_dst, right);
+    and_(scratch, scratch, dst);
+    mov(dst, overflow_dst);
+    mov(overflow_dst, scratch);
+  } else if (dst.is(right)) {
+    addu(overflow_dst, left, right);
+    xor_(dst, overflow_dst, right);
+    xor_(scratch, overflow_dst, left);
+    and_(scratch, scratch, dst);
+    mov(dst, overflow_dst);
+    mov(overflow_dst, scratch);
+  } else {
+    addu(dst, left, right);
+    xor_(overflow_dst, dst, left);
+    xor_(scratch, dst, right);
+    and_(overflow_dst, scratch, overflow_dst);
+  }
+}
+
+
+void MacroAssembler::SubuAndCheckForOverflow(Register dst,
+                                             Register left,
+                                             Register right,
+                                             Register overflow_dst,
+                                             Register scratch) {
+  ASSERT(!dst.is(overflow_dst));
+  ASSERT(!dst.is(scratch));
+  ASSERT(!overflow_dst.is(scratch));
+  ASSERT(!overflow_dst.is(left));
+  ASSERT(!overflow_dst.is(right));
+  ASSERT(!left.is(right));
+  ASSERT(!scratch.is(left));
+  ASSERT(!scratch.is(right));
+
+  // TODO(kalmard) There must be a way to optimize dst == left and dst == right
+  // cases.
+
+  if (dst.is(left)) {
+    subu(overflow_dst, left, right);
+    xor_(scratch, overflow_dst, left);
+    xor_(dst, left, right);
+    and_(scratch, scratch, dst);
+    mov(dst, overflow_dst);
+    mov(overflow_dst, scratch);
+  } else if (dst.is(right)) {
+    subu(overflow_dst, left, right);
+    xor_(dst, left, right);
+    xor_(scratch, overflow_dst, left);
+    and_(scratch, scratch, dst);
+    mov(dst, overflow_dst);
+    mov(overflow_dst, scratch);
+  } else {
+    subu(dst, left, right);
+    xor_(overflow_dst, dst, left);
+    xor_(scratch, left, right);
+    and_(overflow_dst, scratch, overflow_dst);
+  }
+}
+
+
 void MacroAssembler::CallRuntime(const Runtime::Function* f,
                                  int num_arguments) {
   // All parameters are on the stack. v0 has the return value after call.
@@ -2623,6 +3315,16 @@
   JumpToExternalReference(ext);
 }
 
+MaybeObject* MacroAssembler::TryTailCallExternalReference(
+    const ExternalReference& ext, int num_arguments, int result_size) {
+  // TODO(1236192): Most runtime routines don't need the number of
+  // arguments passed in because it is constant. At some point we
+  // should remove this need and make the runtime routine entry code
+  // smarter.
+  li(a0, num_arguments);
+  return TryJumpToExternalReference(ext);
+}
+
 
 void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid,
                                      int num_arguments,
@@ -2640,15 +3342,24 @@
 }
 
 
+MaybeObject* MacroAssembler::TryJumpToExternalReference(
+    const ExternalReference& builtin) {
+  li(a1, Operand(builtin));
+  CEntryStub stub(1);
+  return TryTailCallStub(&stub);
+}
+
+
 void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id,
-                                   InvokeJSFlags flags,
-                                   PostCallGenerator* post_call_generator) {
+                                   InvokeFlag flag,
+                                   const CallWrapper& call_wrapper) {
   GetBuiltinEntry(t9, id);
-  if (flags == CALL_JS) {
+  if (flag == CALL_FUNCTION) {
+    call_wrapper.BeforeCall(CallSize(t9));
     Call(t9);
-    if (post_call_generator != NULL) post_call_generator->Generate();
+    call_wrapper.AfterCall();
   } else {
-    ASSERT(flags == JUMP_JS);
+    ASSERT(flag == JUMP_FUNCTION);
     Jump(t9);
   }
 }
@@ -2708,18 +3419,18 @@
 
 
 // -----------------------------------------------------------------------------
-// Debugging
+// Debugging.
 
 void MacroAssembler::Assert(Condition cc, const char* msg,
                             Register rs, Operand rt) {
-  if (FLAG_debug_code)
+  if (emit_debug_code())
     Check(cc, msg, rs, rt);
 }
 
 
 void MacroAssembler::AssertRegisterIsRoot(Register reg,
                                           Heap::RootListIndex index) {
-  if (FLAG_debug_code) {
+  if (emit_debug_code()) {
     LoadRoot(at, index);
     Check(eq, "Register did not match expected root", reg, Operand(at));
   }
@@ -2727,10 +3438,10 @@
 
 
 void MacroAssembler::AssertFastElements(Register elements) {
-  if (FLAG_debug_code) {
+  if (emit_debug_code()) {
     ASSERT(!elements.is(at));
     Label ok;
-    Push(elements);
+    push(elements);
     lw(elements, FieldMemOperand(elements, HeapObject::kMapOffset));
     LoadRoot(at, Heap::kFixedArrayMapRootIndex);
     Branch(&ok, eq, elements, Operand(at));
@@ -2738,7 +3449,7 @@
     Branch(&ok, eq, elements, Operand(at));
     Abort("JSObject with fast elements map has slow elements");
     bind(&ok);
-    Pop(elements);
+    pop(elements);
   }
 }
 
@@ -2748,7 +3459,7 @@
   Label L;
   Branch(&L, cc, rs, rt);
   Abort(msg);
-  // will not return here
+  // Will not return here.
   bind(&L);
 }
 
@@ -2774,11 +3485,11 @@
   AllowStubCallsScope allow_scope(this, true);
 
   li(a0, Operand(p0));
-  Push(a0);
+  push(a0);
   li(a0, Operand(Smi::FromInt(p1 - p0)));
-  Push(a0);
+  push(a0);
   CallRuntime(Runtime::kAbort, 2);
-  // will not return here
+  // Will not return here.
   if (is_trampoline_pool_blocked()) {
     // If the calling code cares about the exact number of
     // instructions generated, we insert padding here to keep the size
@@ -2805,11 +3516,22 @@
       lw(dst, MemOperand(dst, Context::SlotOffset(Context::CLOSURE_INDEX)));
       lw(dst, FieldMemOperand(dst, JSFunction::kContextOffset));
     }
-    // The context may be an intermediate context, not a function context.
-    lw(dst, MemOperand(dst, Context::SlotOffset(Context::FCONTEXT_INDEX)));
-  } else {  // Slot is in the current function context.
-    // The context may be an intermediate context, not a function context.
-    lw(dst, MemOperand(cp, Context::SlotOffset(Context::FCONTEXT_INDEX)));
+  } else {
+    // Slot is in the current function context.  Move it into the
+    // destination register in case we store into it (the write barrier
+    // cannot be allowed to destroy the context in esi).
+    Move(dst, cp);
+  }
+
+  // We should not have found a 'with' context by walking the context chain
+  // (i.e., the static scope chain and runtime context chain do not agree).
+  // A variable occurring in such a scope should have slot type LOOKUP and
+  // not CONTEXT.
+  if (emit_debug_code()) {
+    lw(t9, MemOperand(dst, Context::SlotOffset(Context::FCONTEXT_INDEX)));
+    Check(eq, "Yo dawg, I heard you liked function contexts "
+              "so I put function contexts in all your contexts",
+               dst, Operand(t9));
   }
 }
 
@@ -2830,9 +3552,9 @@
                                                   Register scratch) {
   // Load the initial map. The global functions all have initial maps.
   lw(map, FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
-  if (FLAG_debug_code) {
+  if (emit_debug_code()) {
     Label ok, fail;
-    CheckMap(map, scratch, Heap::kMetaMapRootIndex, &fail, false);
+    CheckMap(map, scratch, Heap::kMetaMapRootIndex, &fail, DO_SMI_CHECK);
     Branch(&ok);
     bind(&fail);
     Abort("Global functions must have initial map");
@@ -2862,38 +3584,34 @@
 }
 
 
-void MacroAssembler::EnterExitFrame(Register hold_argc,
-                                    Register hold_argv,
-                                    Register hold_function,
-                                    bool save_doubles) {
-  // a0 is argc.
-  sll(t8, a0, kPointerSizeLog2);
-  addu(hold_argv, sp, t8);
-  addiu(hold_argv, hold_argv, -kPointerSize);
+void MacroAssembler::EnterExitFrame(bool save_doubles,
+                                    int stack_space) {
+  // Setup the frame structure on the stack.
+  STATIC_ASSERT(2 * kPointerSize == ExitFrameConstants::kCallerSPDisplacement);
+  STATIC_ASSERT(1 * kPointerSize == ExitFrameConstants::kCallerPCOffset);
+  STATIC_ASSERT(0 * kPointerSize == ExitFrameConstants::kCallerFPOffset);
 
-  // Compute callee's stack pointer before making changes and save it as
-  // t9 register so that it is restored as sp register on exit, thereby
-  // popping the args.
-  // t9 = sp + kPointerSize * #args
-  addu(t9, sp, t8);
-
-  // Compute the argv pointer and keep it in a callee-saved register.
-  // This only seems to be needed for crankshaft and may cause problems
-  // so it's disabled for now.
-  // Subu(s6, t9, Operand(kPointerSize));
-
-  // Align the stack at this point.
-  AlignStack(0);
+  // This is how the stack will look:
+  // fp + 2 (==kCallerSPDisplacement) - old stack's end
+  // [fp + 1 (==kCallerPCOffset)] - saved old ra
+  // [fp + 0 (==kCallerFPOffset)] - saved old fp
+  // [fp - 1 (==kSPOffset)] - sp of the called function
+  // [fp - 2 (==kCodeOffset)] - CodeObject
+  // fp - (2 + stack_space + alignment) == sp == [fp - kSPOffset] - top of the
+  //   new stack (will contain saved ra)
 
   // Save registers.
-  addiu(sp, sp, -12);
-  sw(t9, MemOperand(sp, 8));
-  sw(ra, MemOperand(sp, 4));
-  sw(fp, MemOperand(sp, 0));
-  mov(fp, sp);  // Setup new frame pointer.
+  addiu(sp, sp, -4 * kPointerSize);
+  sw(ra, MemOperand(sp, 3 * kPointerSize));
+  sw(fp, MemOperand(sp, 2 * kPointerSize));
+  addiu(fp, sp, 2 * kPointerSize);  // Setup new frame pointer.
 
-  li(t8, Operand(CodeObject()));
-  Push(t8);  // Accessed from ExitFrame::code_slot.
+  if (emit_debug_code()) {
+    sw(zero_reg, MemOperand(fp, ExitFrameConstants::kSPOffset));
+  }
+
+  li(t8, Operand(CodeObject()));  // Accessed from ExitFrame::code_slot.
+  sw(t8, MemOperand(fp, ExitFrameConstants::kCodeOffset));
 
   // Save the frame pointer and the context in top.
   li(t8, Operand(ExternalReference(Isolate::k_c_entry_fp_address, isolate())));
@@ -2901,47 +3619,49 @@
   li(t8, Operand(ExternalReference(Isolate::k_context_address, isolate())));
   sw(cp, MemOperand(t8));
 
-  // Setup argc and the builtin function in callee-saved registers.
-  mov(hold_argc, a0);
-  mov(hold_function, a1);
-
-  // Optionally save all double registers.
+  const int frame_alignment = MacroAssembler::ActivationFrameAlignment();
   if (save_doubles) {
-#ifdef DEBUG
-    int frame_alignment = ActivationFrameAlignment();
-#endif
-    // The stack alignment code above made sp unaligned, so add space for one
-    // more double register and use aligned addresses.
+    // The stack  must be allign to 0 modulo 8 for stores with sdc1.
     ASSERT(kDoubleSize == frame_alignment);
-    // Mark the frame as containing doubles by pushing a non-valid return
-    // address, i.e. 0.
-    ASSERT(ExitFrameConstants::kMarkerOffset == -2 * kPointerSize);
-    push(zero_reg);  // Marker and alignment word.
-    int space = FPURegister::kNumRegisters * kDoubleSize + kPointerSize;
+    if (frame_alignment > 0) {
+      ASSERT(IsPowerOf2(frame_alignment));
+      And(sp, sp, Operand(-frame_alignment));  // Align stack.
+    }
+    int space = FPURegister::kNumRegisters * kDoubleSize;
     Subu(sp, sp, Operand(space));
     // Remember: we only need to save every 2nd double FPU value.
     for (int i = 0; i < FPURegister::kNumRegisters; i+=2) {
       FPURegister reg = FPURegister::from_code(i);
-      sdc1(reg, MemOperand(sp, i * kDoubleSize + kPointerSize));
+      sdc1(reg, MemOperand(sp, i * kDoubleSize));
     }
-    // Note that f0 will be accessible at fp - 2*kPointerSize -
-    // FPURegister::kNumRegisters * kDoubleSize, since the code slot and the
-    // alignment word were pushed after the fp.
   }
+
+  // Reserve place for the return address, stack space and an optional slot
+  // (used by the DirectCEntryStub to hold the return value if a struct is
+  // returned) and align the frame preparing for calling the runtime function.
+  ASSERT(stack_space >= 0);
+  Subu(sp, sp, Operand((stack_space + 2) * kPointerSize));
+  if (frame_alignment > 0) {
+    ASSERT(IsPowerOf2(frame_alignment));
+    And(sp, sp, Operand(-frame_alignment));  // Align stack.
+  }
+
+  // Set the exit frame sp value to point just before the return address
+  // location.
+  addiu(at, sp, kPointerSize);
+  sw(at, MemOperand(fp, ExitFrameConstants::kSPOffset));
 }
 
 
-void MacroAssembler::LeaveExitFrame(bool save_doubles) {
+void MacroAssembler::LeaveExitFrame(bool save_doubles,
+                                    Register argument_count) {
   // Optionally restore all double registers.
   if (save_doubles) {
-    // TODO(regis): Use vldrm instruction.
     // Remember: we only need to restore every 2nd double FPU value.
+    lw(t8, MemOperand(fp, ExitFrameConstants::kSPOffset));
     for (int i = 0; i < FPURegister::kNumRegisters; i+=2) {
       FPURegister reg = FPURegister::from_code(i);
-      // Register f30-f31 is just below the marker.
-      const int offset = ExitFrameConstants::kMarkerOffset;
-      ldc1(reg, MemOperand(fp,
-          (i - FPURegister::kNumRegisters) * kDoubleSize + offset));
+      ldc1(reg, MemOperand(t8, i  * kDoubleSize + kPointerSize));
     }
   }
 
@@ -2958,11 +3678,13 @@
 
   // Pop the arguments, restore registers, and return.
   mov(sp, fp);  // Respect ABI stack constraint.
-  lw(fp, MemOperand(sp, 0));
-  lw(ra, MemOperand(sp, 4));
-  lw(sp, MemOperand(sp, 8));
-  jr(ra);
-  nop();  // Branch delay slot nop.
+  lw(fp, MemOperand(sp, ExitFrameConstants::kCallerFPOffset));
+  lw(ra, MemOperand(sp, ExitFrameConstants::kCallerPCOffset));
+  addiu(sp, sp, 8);
+  if (argument_count.is_valid()) {
+    sll(t8, argument_count, kPointerSizeLog2);
+    addu(sp, sp, t8);
+  }
 }
 
 
@@ -2996,39 +3718,24 @@
 #endif  // defined(V8_HOST_ARCH_MIPS)
 }
 
+void MacroAssembler::AssertStackIsAligned() {
+  if (emit_debug_code()) {
+      const int frame_alignment = ActivationFrameAlignment();
+      const int frame_alignment_mask = frame_alignment - 1;
 
-void MacroAssembler::AlignStack(int offset) {
-  // On MIPS an offset of 0 aligns to 0 modulo 8 bytes,
-  //     and an offset of 1 aligns to 4 modulo 8 bytes.
-#if defined(V8_HOST_ARCH_MIPS)
-  // Running on the real platform. Use the alignment as mandated by the local
-  // environment.
-  // Note: This will break if we ever start generating snapshots on one MIPS
-  // platform for another MIPS platform with a different alignment.
-  int activation_frame_alignment = OS::ActivationFrameAlignment();
-#else  // defined(V8_HOST_ARCH_MIPS)
-  // If we are using the simulator then we should always align to the expected
-  // alignment. As the simulator is used to generate snapshots we do not know
-  // if the target platform will need alignment, so we will always align at
-  // this point here.
-  int activation_frame_alignment = 2 * kPointerSize;
-#endif  // defined(V8_HOST_ARCH_MIPS)
-  if (activation_frame_alignment != kPointerSize) {
-    // This code needs to be made more general if this assert doesn't hold.
-    ASSERT(activation_frame_alignment == 2 * kPointerSize);
-    if (offset == 0) {
-      andi(t8, sp, activation_frame_alignment - 1);
-      Push(zero_reg, eq, t8, zero_reg);
-    } else {
-      andi(t8, sp, activation_frame_alignment - 1);
-      addiu(t8, t8, -4);
-      Push(zero_reg, eq, t8, zero_reg);
+      if (frame_alignment > kPointerSize) {
+        Label alignment_as_expected;
+        ASSERT(IsPowerOf2(frame_alignment));
+        andi(at, sp, frame_alignment_mask);
+        Branch(&alignment_as_expected, eq, at, Operand(zero_reg));
+        // Don't use Check here, as it will call Runtime_Abort re-entering here.
+        stop("Unexpected stack alignment");
+        bind(&alignment_as_expected);
+      }
     }
-  }
 }
 
 
-
 void MacroAssembler::JumpIfNotPowerOfTwoOrZero(
     Register reg,
     Register scratch,
@@ -3078,6 +3785,18 @@
 }
 
 
+void MacroAssembler::AbortIfNotString(Register object) {
+  STATIC_ASSERT(kSmiTag == 0);
+  And(t0, object, Operand(kSmiTagMask));
+  Assert(ne, "Operand is not a string", t0, Operand(zero_reg));
+  push(object);
+  lw(object, FieldMemOperand(object, HeapObject::kMapOffset));
+  lbu(object, FieldMemOperand(object, Map::kInstanceTypeOffset));
+  Assert(lo, "Operand is not a string", object, Operand(FIRST_NONSTRING_TYPE));
+  pop(object);
+}
+
+
 void MacroAssembler::AbortIfNotRootValue(Register src,
                                          Heap::RootListIndex root_value_index,
                                          const char* message) {
@@ -3169,9 +3888,6 @@
 void MacroAssembler::PrepareCallCFunction(int num_arguments, Register scratch) {
   int frame_alignment = ActivationFrameAlignment();
 
-  // Reserve space for Isolate address which is always passed as last parameter
-  num_arguments += 1;
-
   // Up to four simple arguments are passed in registers a0..a3.
   // Those four arguments must have reserved argument slots on the stack for
   // mips, even though those argument slots are not normally used.
@@ -3198,7 +3914,7 @@
 
 void MacroAssembler::CallCFunction(ExternalReference function,
                                    int num_arguments) {
-  CallCFunctionHelper(no_reg, function, at, num_arguments);
+  CallCFunctionHelper(no_reg, function, t8, num_arguments);
 }
 
 
@@ -3216,21 +3932,6 @@
                                          ExternalReference function_reference,
                                          Register scratch,
                                          int num_arguments) {
-  // Push Isolate address as the last argument.
-  if (num_arguments < kRegisterPassedArguments) {
-    Register arg_to_reg[] = {a0, a1, a2, a3};
-    Register r = arg_to_reg[num_arguments];
-    li(r, Operand(ExternalReference::isolate_address()));
-  } else {
-    int stack_passed_arguments = num_arguments - kRegisterPassedArguments +
-                                 (StandardFrameConstants::kCArgsSlotsSize /
-                                  kPointerSize);
-    // Push Isolate address on the stack after the arguments.
-    li(scratch, Operand(ExternalReference::isolate_address()));
-    sw(scratch, MemOperand(sp, stack_passed_arguments * kPointerSize));
-  }
-  num_arguments += 1;
-
   // Make sure that the stack is aligned before calling a C function unless
   // running in the simulator. The simulator has its own alignment check which
   // provides more information.
@@ -3257,13 +3958,12 @@
   // Just call directly. The function called cannot cause a GC, or
   // allow preemption, so the return address in the link register
   // stays correct.
-  if (!function.is(t9)) {
-    mov(t9, function);
-    function = t9;
-  }
 
   if (function.is(no_reg)) {
-    li(t9, Operand(function_reference));
+    function = t9;
+    li(function, Operand(function_reference));
+  } else if (!function.is(t9)) {
+    mov(t9, function);
     function = t9;
   }
 
@@ -3286,12 +3986,22 @@
 #undef BRANCH_ARGS_CHECK
 
 
-#ifdef ENABLE_DEBUGGER_SUPPORT
+void MacroAssembler::LoadInstanceDescriptors(Register map,
+                                             Register descriptors) {
+  lw(descriptors,
+     FieldMemOperand(map, Map::kInstanceDescriptorsOrBitField3Offset));
+  Label not_smi;
+  JumpIfNotSmi(descriptors, &not_smi);
+  li(descriptors, Operand(FACTORY->empty_descriptor_array()));
+  bind(&not_smi);
+}
+
+
 CodePatcher::CodePatcher(byte* address, int instructions)
     : address_(address),
       instructions_(instructions),
       size_(instructions * Assembler::kInstrSize),
-      masm_(address, size_ + Assembler::kGap) {
+      masm_(Isolate::Current(), address, size_ + Assembler::kGap) {
   // Create a new macro assembler pointing to the address of the code to patch.
   // The size is adjusted with kGap on order for the assembler to generate size
   // bytes of instructions without failing with buffer size constraints.
@@ -3309,8 +4019,8 @@
 }
 
 
-void CodePatcher::Emit(Instr x) {
-  masm()->emit(x);
+void CodePatcher::Emit(Instr instr) {
+  masm()->emit(instr);
 }
 
 
@@ -3319,7 +4029,26 @@
 }
 
 
-#endif  // ENABLE_DEBUGGER_SUPPORT
+void CodePatcher::ChangeBranchCondition(Condition cond) {
+  Instr instr = Assembler::instr_at(masm_.pc_);
+  ASSERT(Assembler::IsBranch(instr));
+  uint32_t opcode = Assembler::GetOpcodeField(instr);
+  // Currently only the 'eq' and 'ne' cond values are supported and the simple
+  // branch instructions (with opcode being the branch type).
+  // There are some special cases (see Assembler::IsBranch()) so extending this
+  // would be tricky.
+  ASSERT(opcode == BEQ ||
+         opcode == BNE ||
+        opcode == BLEZ ||
+        opcode == BGTZ ||
+        opcode == BEQL ||
+        opcode == BNEL ||
+       opcode == BLEZL ||
+       opcode == BGTZL);
+  opcode = (cond == eq) ? BEQ : BNE;
+  instr = (instr & ~kOpcodeMask) | opcode;
+  masm_.emit(instr);
+}
 
 
 } }  // namespace v8::internal
diff --git a/src/mips/macro-assembler-mips.h b/src/mips/macro-assembler-mips.h
index 7ff9e17..bcb459e 100644
--- a/src/mips/macro-assembler-mips.h
+++ b/src/mips/macro-assembler-mips.h
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -30,13 +30,13 @@
 
 #include "assembler.h"
 #include "mips/assembler-mips.h"
+#include "v8globals.h"
 
 namespace v8 {
 namespace internal {
 
 // Forward declaration.
 class JumpTarget;
-class PostCallGenerator;
 
 // Reserved Register Usage Summary.
 //
@@ -53,17 +53,12 @@
 // Registers aliases
 // cp is assumed to be a callee saved register.
 const Register roots = s6;  // Roots array pointer.
-const Register cp = s7;     // JavaScript context pointer
-const Register fp = s8_fp;  // Alias fp
-// Register used for condition evaluation.
+const Register cp = s7;     // JavaScript context pointer.
+const Register fp = s8_fp;  // Alias for fp.
+// Registers used for condition evaluation.
 const Register condReg1 = s4;
 const Register condReg2 = s5;
 
-enum InvokeJSFlags {
-  CALL_JS,
-  JUMP_JS
-};
-
 
 // Flags used for the AllocateInNewSpace functions.
 enum AllocationFlags {
@@ -98,15 +93,19 @@
 // MacroAssembler implements a collection of frequently used macros.
 class MacroAssembler: public Assembler {
  public:
-  MacroAssembler(void* buffer, int size);
+  // The isolate parameter can be NULL if the macro assembler should
+  // not use isolate-dependent functionality. In this case, it's the
+  // responsibility of the caller to never invoke such function on the
+  // macro assembler.
+  MacroAssembler(Isolate* isolate, void* buffer, int size);
 
-// Arguments macros
+// Arguments macros.
 #define COND_TYPED_ARGS Condition cond, Register r1, const Operand& r2
 #define COND_ARGS cond, r1, r2
 
-// ** Prototypes
+// Prototypes.
 
-// * Prototypes for functions with no target (eg Ret()).
+// Prototypes for functions with no target (eg Ret()).
 #define DECLARE_NOTARGET_PROTOTYPE(Name) \
   void Name(BranchDelaySlot bd = PROTECT); \
   void Name(COND_TYPED_ARGS, BranchDelaySlot bd = PROTECT); \
@@ -114,7 +113,7 @@
     Name(COND_ARGS, bd); \
   }
 
-// * Prototypes for functions with a target.
+// Prototypes for functions with a target.
 
 // Cases when relocation may be needed.
 #define DECLARE_RELOC_PROTOTYPE(Name, target_type) \
@@ -152,7 +151,7 @@
     Name(target, COND_ARGS, bd); \
   }
 
-// ** Target prototypes.
+// Target prototypes.
 
 #define DECLARE_JUMP_CALL_PROTOTYPES(Name) \
   DECLARE_NORELOC_PROTOTYPE(Name, Register) \
@@ -181,6 +180,16 @@
 #undef DECLARE_JUMP_CALL_PROTOTYPES
 #undef DECLARE_BRANCH_PROTOTYPES
 
+  void CallWithAstId(Handle<Code> code,
+                     RelocInfo::Mode rmode,
+                     unsigned ast_id,
+                     Condition cond = al,
+                     Register r1 = zero_reg,
+                     const Operand& r2 = Operand(zero_reg));
+
+  int CallSize(Register reg);
+  int CallSize(Handle<Code> code, RelocInfo::Mode rmode);
+
   // Emit code to discard a non-negative number of pointer-sized elements
   // from the stack, clobbering only the sp register.
   void Drop(int count,
@@ -198,9 +207,28 @@
   void Swap(Register reg1, Register reg2, Register scratch = no_reg);
 
   void Call(Label* target);
-  // May do nothing if the registers are identical.
-  void Move(Register dst, Register src);
 
+  inline void Move(Register dst, Register src) {
+    if (!dst.is(src)) {
+      mov(dst, src);
+    }
+  }
+
+  inline void Move(FPURegister dst, FPURegister src) {
+    if (!dst.is(src)) {
+      mov_d(dst, src);
+    }
+  }
+
+  inline void Move(Register dst_low, Register dst_high, FPURegister src) {
+    mfc1(dst_low, src);
+    mfc1(dst_high, FPURegister::from_code(src.code() + 1));
+  }
+
+  inline void Move(FPURegister dst, Register src_low, Register src_high) {
+    mtc1(src_low, dst);
+    mtc1(src_high, FPURegister::from_code(dst.code() + 1));
+  }
 
   // Jump unconditionally to given label.
   // We NEED a nop in the branch delay slot, as it used by v8, for example in
@@ -262,7 +290,7 @@
 
 
   // ---------------------------------------------------------------------------
-  // Inline caching support
+  // Inline caching support.
 
   // Generate code for checking access rights - used for security checks
   // on access to global objects across environments. The holder register
@@ -306,7 +334,7 @@
 
 
   // ---------------------------------------------------------------------------
-  // Allocation support
+  // Allocation support.
 
   // Allocate an object in new space. The object_size is specified
   // either in bytes or in words if the allocation flag SIZE_IN_WORDS
@@ -373,7 +401,7 @@
                                    Label* gc_required);
 
   // ---------------------------------------------------------------------------
-  // Instruction macros
+  // Instruction macros.
 
 #define DEFINE_INSTRUCTION(instr)                                              \
   void instr(Register rd, Register rs, const Operand& rt);                     \
@@ -405,6 +433,7 @@
   DEFINE_INSTRUCTION(Or);
   DEFINE_INSTRUCTION(Xor);
   DEFINE_INSTRUCTION(Nor);
+  DEFINE_INSTRUCTION2(Neg);
 
   DEFINE_INSTRUCTION(Slt);
   DEFINE_INSTRUCTION(Sltu);
@@ -416,12 +445,12 @@
 #undef DEFINE_INSTRUCTION2
 
 
-  //------------Pseudo-instructions-------------
+  // ---------------------------------------------------------------------------
+  // Pseudo-instructions.
 
   void mov(Register rd, Register rt) { or_(rd, rt, zero_reg); }
 
-
-  // load int32 in the rd register
+  // Load int32 in the rd register.
   void li(Register rd, Operand j, bool gen2instr = false);
   inline void li(Register rd, int32_t j, bool gen2instr = false) {
     li(rd, Operand(j), gen2instr);
@@ -430,100 +459,88 @@
     li(dst, Operand(value), gen2instr);
   }
 
-  // Exception-generating instructions and debugging support
+  // Exception-generating instructions and debugging support.
   void stop(const char* msg);
 
-
   // Push multiple registers on the stack.
   // Registers are saved in numerical order, with higher numbered registers
-  // saved in higher memory addresses
+  // saved in higher memory addresses.
   void MultiPush(RegList regs);
   void MultiPushReversed(RegList regs);
 
-  void Push(Register src) {
+  // Lower case push() for compatibility with arch-independent code.
+  void push(Register src) {
     Addu(sp, sp, Operand(-kPointerSize));
     sw(src, MemOperand(sp, 0));
   }
 
-  // Push two registers.  Pushes leftmost register first (to highest address).
-  void Push(Register src1, Register src2, Condition cond = al) {
-    ASSERT(cond == al);  // Do not support conditional versions yet.
+  // Push two registers. Pushes leftmost register first (to highest address).
+  void Push(Register src1, Register src2) {
     Subu(sp, sp, Operand(2 * kPointerSize));
     sw(src1, MemOperand(sp, 1 * kPointerSize));
     sw(src2, MemOperand(sp, 0 * kPointerSize));
   }
 
-  // Push three registers.  Pushes leftmost register first (to highest address).
-  void Push(Register src1, Register src2, Register src3, Condition cond = al) {
-    ASSERT(cond == al);  // Do not support conditional versions yet.
-    Addu(sp, sp, Operand(3 * -kPointerSize));
+  // Push three registers. Pushes leftmost register first (to highest address).
+  void Push(Register src1, Register src2, Register src3) {
+    Subu(sp, sp, Operand(3 * kPointerSize));
     sw(src1, MemOperand(sp, 2 * kPointerSize));
     sw(src2, MemOperand(sp, 1 * kPointerSize));
     sw(src3, MemOperand(sp, 0 * kPointerSize));
   }
 
-  // Push four registers.  Pushes leftmost register first (to highest address).
-  void Push(Register src1, Register src2,
-            Register src3, Register src4, Condition cond = al) {
-    ASSERT(cond == al);  // Do not support conditional versions yet.
-    Addu(sp, sp, Operand(4 * -kPointerSize));
+  // Push four registers. Pushes leftmost register first (to highest address).
+  void Push(Register src1, Register src2, Register src3, Register src4) {
+    Subu(sp, sp, Operand(4 * kPointerSize));
     sw(src1, MemOperand(sp, 3 * kPointerSize));
     sw(src2, MemOperand(sp, 2 * kPointerSize));
     sw(src3, MemOperand(sp, 1 * kPointerSize));
     sw(src4, MemOperand(sp, 0 * kPointerSize));
   }
 
-  inline void push(Register src) { Push(src); }
-  inline void pop(Register src) { Pop(src); }
-
   void Push(Register src, Condition cond, Register tst1, Register tst2) {
-    // Since we don't have conditionnal execution we use a Branch.
+    // Since we don't have conditional execution we use a Branch.
     Branch(3, cond, tst1, Operand(tst2));
-    Addu(sp, sp, Operand(-kPointerSize));
+    Subu(sp, sp, Operand(kPointerSize));
     sw(src, MemOperand(sp, 0));
   }
 
-
   // Pops multiple values from the stack and load them in the
   // registers specified in regs. Pop order is the opposite as in MultiPush.
   void MultiPop(RegList regs);
   void MultiPopReversed(RegList regs);
-  void Pop(Register dst) {
+
+  // Lower case pop() for compatibility with arch-independent code.
+  void pop(Register dst) {
     lw(dst, MemOperand(sp, 0));
     Addu(sp, sp, Operand(kPointerSize));
   }
+
+  // Pop two registers. Pops rightmost register first (from lower address).
+  void Pop(Register src1, Register src2) {
+    ASSERT(!src1.is(src2));
+    lw(src2, MemOperand(sp, 0 * kPointerSize));
+    lw(src1, MemOperand(sp, 1 * kPointerSize));
+    Addu(sp, sp, 2 * kPointerSize);
+  }
+
   void Pop(uint32_t count = 1) {
     Addu(sp, sp, Operand(count * kPointerSize));
   }
 
-  // ---------------------------------------------------------------------------
-  // These functions are only used by crankshaft, so they are currently
-  // unimplemented.
-
   // Push and pop the registers that can hold pointers, as defined by the
   // RegList constant kSafepointSavedRegisters.
-  void PushSafepointRegisters() {
-    UNIMPLEMENTED_MIPS();
-  }
-
-  void PopSafepointRegisters() {
-    UNIMPLEMENTED_MIPS();
-  }
-
-  void PushSafepointRegistersAndDoubles() {
-    UNIMPLEMENTED_MIPS();
-  }
-
-  void PopSafepointRegistersAndDoubles() {
-    UNIMPLEMENTED_MIPS();
-  }
-
-  static int SafepointRegisterStackIndex(int reg_code) {
-    UNIMPLEMENTED_MIPS();
-    return 0;
-  }
-
-  // ---------------------------------------------------------------------------
+  void PushSafepointRegisters();
+  void PopSafepointRegisters();
+  void PushSafepointRegistersAndDoubles();
+  void PopSafepointRegistersAndDoubles();
+  // Store value in register src in the safepoint stack slot for
+  // register dst.
+  void StoreToSafepointRegisterSlot(Register src, Register dst);
+  void StoreToSafepointRegistersAndDoublesSlot(Register src, Register dst);
+  // Load the value of the src register from its safepoint stack slot
+  // into register dst.
+  void LoadFromSafepointRegisterSlot(Register dst, Register src);
 
   // MIPS32 R2 instruction macro.
   void Ins(Register rt, Register rs, uint16_t pos, uint16_t size);
@@ -548,8 +565,19 @@
                       FPURegister double_scratch,
                       Label *not_int32);
 
+  // Helper for EmitECMATruncate.
+  // This will truncate a floating-point value outside of the singed 32bit
+  // integer range to a 32bit signed integer.
+  // Expects the double value loaded in input_high and input_low.
+  // Exits with the answer in 'result'.
+  // Note that this code does not work for values in the 32bit range!
+  void EmitOutOfInt32RangeTruncate(Register result,
+                                   Register input_high,
+                                   Register input_low,
+                                   Register scratch);
+
   // -------------------------------------------------------------------------
-  // Activation frames
+  // Activation frames.
 
   void EnterInternalFrame() { EnterFrame(StackFrame::INTERNAL); }
   void LeaveInternalFrame() { LeaveFrame(StackFrame::INTERNAL); }
@@ -558,23 +586,21 @@
   void LeaveConstructFrame() { LeaveFrame(StackFrame::CONSTRUCT); }
 
   // Enter exit frame.
-  // Expects the number of arguments in register a0 and
-  // the builtin function to call in register a1.
-  // On output hold_argc, hold_function, and hold_argv are setup.
-  void EnterExitFrame(Register hold_argc,
-                      Register hold_argv,
-                      Register hold_function,
-                      bool save_doubles);
+  // argc - argument count to be dropped by LeaveExitFrame.
+  // save_doubles - saves FPU registers on stack, currently disabled.
+  // stack_space - extra stack space.
+  void EnterExitFrame(bool save_doubles,
+                      int stack_space = 0);
 
-  // Leave the current exit frame. Expects the return value in v0.
-  void LeaveExitFrame(bool save_doubles);
-
-  // Align the stack by optionally pushing a Smi zero.
-  void AlignStack(int offset);    // TODO(mips) : remove this function.
+  // Leave the current exit frame.
+  void LeaveExitFrame(bool save_doubles, Register arg_count);
 
   // Get the actual activation frame alignment for target environment.
   static int ActivationFrameAlignment();
 
+  // Make sure the stack is aligned. Only emits code in debug mode.
+  void AssertStackIsAligned();
+
   void LoadContext(Register dst, int context_chain_length);
 
   void LoadGlobalFunction(int index, Register function);
@@ -586,27 +612,35 @@
                                     Register scratch);
 
   // -------------------------------------------------------------------------
-  // JavaScript invokes
+  // JavaScript invokes.
+
+  // Setup call kind marking in t1. The method takes t1 as an
+  // explicit first parameter to make the code more readable at the
+  // call sites.
+  void SetCallKind(Register dst, CallKind kind);
 
   // Invoke the JavaScript function code by either calling or jumping.
   void InvokeCode(Register code,
                   const ParameterCount& expected,
                   const ParameterCount& actual,
                   InvokeFlag flag,
-                  PostCallGenerator* post_call_generator = NULL);
+                  const CallWrapper& call_wrapper = NullCallWrapper(),
+                  CallKind call_kind = CALL_AS_METHOD);
 
   void InvokeCode(Handle<Code> code,
                   const ParameterCount& expected,
                   const ParameterCount& actual,
                   RelocInfo::Mode rmode,
-                  InvokeFlag flag);
+                  InvokeFlag flag,
+                  CallKind call_kind = CALL_AS_METHOD);
 
   // Invoke the JavaScript function in the given register. Changes the
   // current context to the context in the function before invoking.
   void InvokeFunction(Register function,
                       const ParameterCount& actual,
                       InvokeFlag flag,
-                      PostCallGenerator* post_call_generator = NULL);
+                      const CallWrapper& call_wrapper = NullCallWrapper(),
+                      CallKind call_kind = CALL_AS_METHOD);
 
   void InvokeFunction(JSFunction* function,
                       const ParameterCount& actual,
@@ -628,14 +662,14 @@
 
 #ifdef ENABLE_DEBUGGER_SUPPORT
   // -------------------------------------------------------------------------
-  // Debugger Support
+  // Debugger Support.
 
   void DebugBreak();
 #endif
 
 
   // -------------------------------------------------------------------------
-  // Exception handling
+  // Exception handling.
 
   // Push a new try handler and link into try handler chain.
   // The return address must be passed in register ra.
@@ -646,9 +680,24 @@
   // Must preserve the result register.
   void PopTryHandler();
 
+  // Passes thrown value (in v0) to the handler of top of the try handler chain.
+  void Throw(Register value);
+
+  // Propagates an uncatchable exception to the top of the current JS stack's
+  // handler chain.
+  void ThrowUncatchable(UncatchableExceptionType type, Register value);
+
   // Copies a fixed number of fields of heap objects from src to dst.
   void CopyFields(Register dst, Register src, RegList temps, int field_count);
 
+  // Copies a number of bytes from src to dst. All registers are clobbered. On
+  // exit src and dst will point to the place just after where the last byte was
+  // read or written and length will be zero.
+  void CopyBytes(Register src,
+                 Register dst,
+                 Register length,
+                 Register scratch);
+
   // -------------------------------------------------------------------------
   // Support functions.
 
@@ -669,18 +718,27 @@
   // Check if the map of an object is equal to a specified map (either
   // given directly or as an index into the root list) and branch to
   // label if not. Skip the smi check if not required (object is known
-  // to be a heap object)
+  // to be a heap object).
   void CheckMap(Register obj,
                 Register scratch,
                 Handle<Map> map,
                 Label* fail,
-                bool is_heap_object);
+                SmiCheckType smi_check_type);
 
   void CheckMap(Register obj,
                 Register scratch,
                 Heap::RootListIndex index,
                 Label* fail,
-                bool is_heap_object);
+                SmiCheckType smi_check_type);
+
+  // Check if the map of an object is equal to a specified map and branch to a
+  // specified target if equal. Skip the smi check if not required (object is
+  // known to be a heap object)
+  void DispatchMap(Register obj,
+                   Register scratch,
+                   Handle<Map> map,
+                   Handle<Code> success,
+                   SmiCheckType smi_check_type);
 
   // Generates code for reporting that an illegal operation has
   // occurred.
@@ -692,6 +750,10 @@
   //   index - holds the overwritten index on exit.
   void IndexFromHash(Register hash, Register index);
 
+  // Get the number of least significant bits from a register.
+  void GetLeastBitsFromSmi(Register dst, Register src, int num_least_bits);
+  void GetLeastBitsFromInt32(Register dst, Register src, int mun_least_bits);
+
   // Load the value of a number object into a FPU double register. If the
   // object is not a number a jump to the label not_number is performed
   // and the FPU double register is unchanged.
@@ -712,15 +774,70 @@
                               Register scratch1);
 
   // -------------------------------------------------------------------------
-  // Runtime calls
+  // Overflow handling functions.
+  // Usage: first call the appropriate arithmetic function, then call one of the
+  // jump functions with the overflow_dst register as the second parameter.
+
+  void AdduAndCheckForOverflow(Register dst,
+                               Register left,
+                               Register right,
+                               Register overflow_dst,
+                               Register scratch = at);
+
+  void SubuAndCheckForOverflow(Register dst,
+                               Register left,
+                               Register right,
+                               Register overflow_dst,
+                               Register scratch = at);
+
+  void BranchOnOverflow(Label* label,
+                        Register overflow_check,
+                        BranchDelaySlot bd = PROTECT) {
+    Branch(label, lt, overflow_check, Operand(zero_reg), bd);
+  }
+
+  void BranchOnNoOverflow(Label* label,
+                          Register overflow_check,
+                          BranchDelaySlot bd = PROTECT) {
+    Branch(label, ge, overflow_check, Operand(zero_reg), bd);
+  }
+
+  void RetOnOverflow(Register overflow_check, BranchDelaySlot bd = PROTECT) {
+    Ret(lt, overflow_check, Operand(zero_reg), bd);
+  }
+
+  void RetOnNoOverflow(Register overflow_check, BranchDelaySlot bd = PROTECT) {
+    Ret(ge, overflow_check, Operand(zero_reg), bd);
+  }
+
+  // -------------------------------------------------------------------------
+  // Runtime calls.
 
   // Call a code stub.
   void CallStub(CodeStub* stub, Condition cond = cc_always,
                 Register r1 = zero_reg, const Operand& r2 = Operand(zero_reg));
 
+  // Call a code stub and return the code object called.  Try to generate
+  // the code if necessary.  Do not perform a GC but instead return a retry
+  // after GC failure.
+  MUST_USE_RESULT MaybeObject* TryCallStub(CodeStub* stub,
+                                           Condition cond = cc_always,
+                                           Register r1 = zero_reg,
+                                           const Operand& r2 =
+                                               Operand(zero_reg));
+
   // Tail call a code stub (jump).
   void TailCallStub(CodeStub* stub);
 
+  // Tail call a code stub (jump) and return the code object called.  Try to
+  // generate the code if necessary.  Do not perform a GC but instead return
+  // a retry after GC failure.
+  MUST_USE_RESULT MaybeObject* TryTailCallStub(CodeStub* stub,
+                                               Condition cond = cc_always,
+                                               Register r1 = zero_reg,
+                                               const Operand& r2 =
+                                                   Operand(zero_reg));
+
   void CallJSExitStub(CodeStub* stub);
 
   // Call a runtime routine.
@@ -741,6 +858,12 @@
                                  int num_arguments,
                                  int result_size);
 
+  // Tail call of a runtime routine (jump). Try to generate the code if
+  // necessary. Do not perform a GC but instead return a retry after GC
+  // failure.
+  MUST_USE_RESULT MaybeObject* TryTailCallExternalReference(
+      const ExternalReference& ext, int num_arguments, int result_size);
+
   // Convenience function: tail call a runtime routine (jump).
   void TailCallRuntime(Runtime::FunctionId fid,
                        int num_arguments,
@@ -768,15 +891,31 @@
   // function).
   void CallCFunction(ExternalReference function, int num_arguments);
   void CallCFunction(Register function, Register scratch, int num_arguments);
+  void GetCFunctionDoubleResult(const DoubleRegister dst);
+
+  // There are two ways of passing double arguments on MIPS, depending on
+  // whether soft or hard floating point ABI is used. These functions
+  // abstract parameter passing for the three different ways we call
+  // C functions from generated code.
+  void SetCallCDoubleArguments(DoubleRegister dreg);
+  void SetCallCDoubleArguments(DoubleRegister dreg1, DoubleRegister dreg2);
+  void SetCallCDoubleArguments(DoubleRegister dreg, Register reg);
+
+  // Calls an API function. Allocates HandleScope, extracts returned value
+  // from handle and propagates exceptions. Restores context.
+  MaybeObject* TryCallApiFunctionAndReturn(ExternalReference function,
+                                           int stack_space);
 
   // Jump to the builtin routine.
   void JumpToExternalReference(const ExternalReference& builtin);
 
+  MaybeObject* TryJumpToExternalReference(const ExternalReference& ext);
+
   // Invoke specified builtin JavaScript function. Adds an entry to
   // the unresolved list if the name does not resolve.
   void InvokeBuiltin(Builtins::JavaScript id,
-                     InvokeJSFlags flags,
-                     PostCallGenerator* post_call_generator = NULL);
+                     InvokeFlag flag,
+                     const CallWrapper& call_wrapper = NullCallWrapper());
 
   // Store the code object for the given builtin in the target register and
   // setup the function in a1.
@@ -787,14 +926,17 @@
 
   struct Unresolved {
     int pc;
-    uint32_t flags;  // see Bootstrapper::FixupFlags decoders/encoders.
+    uint32_t flags;  // See Bootstrapper::FixupFlags decoders/encoders.
     const char* name;
   };
 
-  Handle<Object> CodeObject() { return code_object_; }
+  Handle<Object> CodeObject() {
+    ASSERT(!code_object_.is_null());
+    return code_object_;
+  }
 
   // -------------------------------------------------------------------------
-  // StatsCounter support
+  // StatsCounter support.
 
   void SetCounter(StatsCounter* counter, int value,
                   Register scratch1, Register scratch2);
@@ -805,7 +947,7 @@
 
 
   // -------------------------------------------------------------------------
-  // Debugging
+  // Debugging.
 
   // Calls Abort(msg) if the condition cc is not satisfied.
   // Use --debug_code to enable.
@@ -826,7 +968,7 @@
   bool allow_stub_calls() { return allow_stub_calls_; }
 
   // ---------------------------------------------------------------------------
-  // Number utilities
+  // Number utilities.
 
   // Check whether the value of reg is a power of two and not zero. If not
   // control continues at the label not_power_of_two. If reg is a power of two
@@ -837,7 +979,7 @@
                                  Label* not_power_of_two_or_zero);
 
   // -------------------------------------------------------------------------
-  // Smi utilities
+  // Smi utilities.
 
   // Try to convert int32 to smi. If the value is to large, preserve
   // the original value and jump to not_a_smi. Destroys scratch and
@@ -888,13 +1030,16 @@
   void AbortIfSmi(Register object);
   void AbortIfNotSmi(Register object);
 
+  // Abort execution if argument is a string. Used in debug code.
+  void AbortIfNotString(Register object);
+
   // Abort execution if argument is not the root value with the given index.
   void AbortIfNotRootValue(Register src,
                            Heap::RootListIndex root_value_index,
                            const char* message);
 
   // ---------------------------------------------------------------------------
-  // HeapNumber utilities
+  // HeapNumber utilities.
 
   void JumpIfNotHeapNumber(Register object,
                            Register heap_number_map,
@@ -902,7 +1047,7 @@
                            Label* on_not_heap_number);
 
   // -------------------------------------------------------------------------
-  // String utilities
+  // String utilities.
 
   // Checks if both instance types are sequential ASCII strings and jumps to
   // label if either is not.
@@ -935,6 +1080,8 @@
                                            Register scratch2,
                                            Label* failure);
 
+  void LoadInstanceDescriptors(Register map, Register descriptors);
+
  private:
   void CallCFunctionHelper(Register function,
                            ExternalReference function_reference,
@@ -959,7 +1106,8 @@
                       Register code_reg,
                       Label* done,
                       InvokeFlag flag,
-                      PostCallGenerator* post_call_generator = NULL);
+                      const CallWrapper& call_wrapper = NullCallWrapper(),
+                      CallKind call_kind = CALL_AS_METHOD);
 
   // Get the code for the given builtin. Returns if able to resolve
   // the function in the 'resolved' flag.
@@ -975,15 +1123,22 @@
                            Register scratch1,
                            Register scratch2);
 
+  // Compute memory operands for safepoint stack slots.
+  static int SafepointRegisterStackIndex(int reg_code);
+  MemOperand SafepointRegisterSlot(Register reg);
+  MemOperand SafepointRegistersAndDoublesSlot(Register reg);
 
   bool generating_stub_;
   bool allow_stub_calls_;
   // This handle will be patched with the code object on installation.
   Handle<Object> code_object_;
+
+  // Needs access to SafepointRegisterStackIndex for optimized frame
+  // traversal.
+  friend class OptimizedFrame;
 };
 
 
-#ifdef ENABLE_DEBUGGER_SUPPORT
 // The code patcher is used to patch (typically) small parts of code e.g. for
 // debugging and other types of instrumentation. When using the code patcher
 // the exact number of bytes specified must be emitted. It is not legal to emit
@@ -998,29 +1153,21 @@
   MacroAssembler* masm() { return &masm_; }
 
   // Emit an instruction directly.
-  void Emit(Instr x);
+  void Emit(Instr instr);
 
   // Emit an address directly.
   void Emit(Address addr);
 
+  // Change the condition part of an instruction leaving the rest of the current
+  // instruction unchanged.
+  void ChangeBranchCondition(Condition cond);
+
  private:
   byte* address_;  // The address of the code being patched.
   int instructions_;  // Number of instructions of the expected patch size.
   int size_;  // Number of bytes of the expected patch size.
   MacroAssembler masm_;  // Macro assembler used to generate the code.
 };
-#endif  // ENABLE_DEBUGGER_SUPPORT
-
-
-// Helper class for generating code or data associated with the code
-// right after a call instruction. As an example this can be used to
-// generate safepoint data after calls for crankshaft.
-class PostCallGenerator {
- public:
-  PostCallGenerator() { }
-  virtual ~PostCallGenerator() { }
-  virtual void Generate() = 0;
-};
 
 
 // -----------------------------------------------------------------------------
@@ -1042,6 +1189,16 @@
 }
 
 
+// Generate a MemOperand for storing arguments 5..N on the stack
+// when calling CallCFunction().
+static inline MemOperand CFunctionArgumentOperand(int index) {
+  ASSERT(index > StandardFrameConstants::kCArgSlotCount);
+  // Argument 5 takes the slot just past the four Arg-slots.
+  int offset =
+      (index - 5) * kPointerSize + StandardFrameConstants::kCArgsSlotsSize;
+  return MemOperand(sp, offset);
+}
+
 
 #ifdef GENERATED_CODE_COVERAGE
 #define CODE_COVERAGE_STRINGIFY(x) #x
@@ -1055,4 +1212,3 @@
 } }  // namespace v8::internal
 
 #endif  // V8_MIPS_MACRO_ASSEMBLER_MIPS_H_
-
diff --git a/src/mips/regexp-macro-assembler-mips.cc b/src/mips/regexp-macro-assembler-mips.cc
index 9f9e976..cfc8f65 100644
--- a/src/mips/regexp-macro-assembler-mips.cc
+++ b/src/mips/regexp-macro-assembler-mips.cc
@@ -56,49 +56,58 @@
  * - sp : points to tip of C stack.
  *
  * The remaining registers are free for computations.
- *
  * Each call to a public method should retain this convention.
+ *
  * The stack will have the following structure:
- *       - direct_call        (if 1, direct call from JavaScript code, if 0 call
- *                             through the runtime system)
- *       - stack_area_base    (High end of the memory area to use as
- *                             backtracking stack)
- *       - int* capture_array (int[num_saved_registers_], for output).
- *       - stack frame header (16 bytes in size)
- *       --- sp when called ---
- *       - link address
- *       - backup of registers s0..s7
- *       - end of input       (Address of end of string)
- *       - start of input     (Address of first character in string)
- *       - start index        (character index of start)
- *       --- frame pointer ----
- *       - void* input_string (location of a handle containing the string)
- *       - Offset of location before start of input (effectively character
- *         position -1). Used to initialize capture registers to a non-position.
- *       - At start (if 1, we are starting at the start of the
- *         string, otherwise 0)
- *       - register 0         (Only positions must be stored in the first
- *       - register 1          num_saved_registers_ registers)
- *       - ...
- *       - register num_registers-1
- *       --- sp ---
+ *
+ *  - fp[56]  direct_call  (if 1, direct call from JavaScript code,
+ *                          if 0, call through the runtime system).
+ *  - fp[52]  stack_area_base (High end of the memory area to use as
+ *                             backtracking stack).
+ *  - fp[48]  int* capture_array (int[num_saved_registers_], for output).
+ *  - fp[44]  secondary link/return address used by native call.
+ *  --- sp when called ---
+ *  - fp[40]  return address (lr).
+ *  - fp[36]  old frame pointer (r11).
+ *  - fp[0..32]  backup of registers s0..s7.
+ *  --- frame pointer ----
+ *  - fp[-4]  end of input       (Address of end of string).
+ *  - fp[-8]  start of input     (Address of first character in string).
+ *  - fp[-12] start index        (character index of start).
+ *  - fp[-16] void* input_string (location of a handle containing the string).
+ *  - fp[-20] Offset of location before start of input (effectively character
+ *            position -1). Used to initialize capture registers to a
+ *            non-position.
+ *  - fp[-24] At start (if 1, we are starting at the start of the
+ *    string, otherwise 0)
+ *  - fp[-28] register 0         (Only positions must be stored in the first
+ *  -         register 1          num_saved_registers_ registers)
+ *  -         ...
+ *  -         register num_registers-1
+ *  --- sp ---
  *
  * The first num_saved_registers_ registers are initialized to point to
  * "character -1" in the string (i.e., char_size() bytes before the first
  * character of the string). The remaining registers start out as garbage.
  *
  * The data up to the return address must be placed there by the calling
- * code, by calling the code entry as cast to a function with the signature:
+ * code and the remaining arguments are passed in registers, e.g. by calling the
+ * code entry as cast to a function with the signature:
  * int (*match)(String* input_string,
  *              int start_index,
  *              Address start,
  *              Address end,
+ *              Address secondary_return_address,  // Only used by native call.
  *              int* capture_output_array,
- *              bool at_start,
  *              byte* stack_area_base,
- *              bool direct_call)
+ *              bool direct_call = false)
  * The call is performed by NativeRegExpMacroAssembler::Execute()
- * (in regexp-macro-assembler.cc).
+ * (in regexp-macro-assembler.cc) via the CALL_GENERATED_REGEXP_CODE macro
+ * in mips/simulator-mips.h.
+ * When calling as a non-direct call (i.e., from C++ code), the return address
+ * area is overwritten with the ra register by the RegExp code. When doing a
+ * direct call from generated code, the return address is placed there by
+ * the calling code, as in a normal exit frame.
  */
 
 #define __ ACCESS_MASM(masm_)
@@ -106,7 +115,7 @@
 RegExpMacroAssemblerMIPS::RegExpMacroAssemblerMIPS(
     Mode mode,
     int registers_to_save)
-    : masm_(new MacroAssembler(NULL, kRegExpCodeSize)),
+    : masm_(new MacroAssembler(Isolate::Current(), NULL, kRegExpCodeSize)),
       mode_(mode),
       num_registers_(registers_to_save),
       num_saved_registers_(registers_to_save),
@@ -114,9 +123,15 @@
       start_label_(),
       success_label_(),
       backtrack_label_(),
-      exit_label_() {
+      exit_label_(),
+      internal_failure_label_() {
   ASSERT_EQ(0, registers_to_save % 2);
   __ jmp(&entry_label_);   // We'll write the entry code later.
+  // If the code gets too big or corrupted, an internal exception will be
+  // raised, and we will exit right away.
+  __ bind(&internal_failure_label_);
+  __ li(v0, Operand(FAILURE));
+  __ Ret();
   __ bind(&start_label_);  // And then continue from here.
 }
 
@@ -131,6 +146,7 @@
   exit_label_.Unuse();
   check_preempt_label_.Unuse();
   stack_overflow_label_.Unuse();
+  internal_failure_label_.Unuse();
 }
 
 
@@ -140,47 +156,75 @@
 
 
 void RegExpMacroAssemblerMIPS::AdvanceCurrentPosition(int by) {
-  UNIMPLEMENTED_MIPS();
+  if (by != 0) {
+    __ Addu(current_input_offset(),
+           current_input_offset(), Operand(by * char_size()));
+  }
 }
 
 
 void RegExpMacroAssemblerMIPS::AdvanceRegister(int reg, int by) {
-  UNIMPLEMENTED_MIPS();
+  ASSERT(reg >= 0);
+  ASSERT(reg < num_registers_);
+  if (by != 0) {
+    __ lw(a0, register_location(reg));
+    __ Addu(a0, a0, Operand(by));
+    __ sw(a0, register_location(reg));
+  }
 }
 
 
 void RegExpMacroAssemblerMIPS::Backtrack() {
-  UNIMPLEMENTED_MIPS();
+  CheckPreemption();
+  // Pop Code* offset from backtrack stack, add Code* and jump to location.
+  Pop(a0);
+  __ Addu(a0, a0, code_pointer());
+  __ Jump(Operand(a0));
 }
 
 
 void RegExpMacroAssemblerMIPS::Bind(Label* label) {
-  UNIMPLEMENTED_MIPS();
+  __ bind(label);
 }
 
 
 void RegExpMacroAssemblerMIPS::CheckCharacter(uint32_t c, Label* on_equal) {
-  UNIMPLEMENTED_MIPS();
+  BranchOrBacktrack(on_equal, eq, current_character(), Operand(c));
 }
 
 
 void RegExpMacroAssemblerMIPS::CheckCharacterGT(uc16 limit, Label* on_greater) {
-  UNIMPLEMENTED_MIPS();
+  BranchOrBacktrack(on_greater, gt, current_character(), Operand(limit));
 }
 
 
 void RegExpMacroAssemblerMIPS::CheckAtStart(Label* on_at_start) {
-  UNIMPLEMENTED_MIPS();
+  Label not_at_start;
+  // Did we start the match at the start of the string at all?
+  __ lw(a0, MemOperand(frame_pointer(), kAtStart));
+  BranchOrBacktrack(&not_at_start, eq, a0, Operand(zero_reg));
+
+  // If we did, are we still at the start of the input?
+  __ lw(a1, MemOperand(frame_pointer(), kInputStart));
+  __ Addu(a0, end_of_input_address(), Operand(current_input_offset()));
+  BranchOrBacktrack(on_at_start, eq, a0, Operand(a1));
+  __ bind(&not_at_start);
 }
 
 
 void RegExpMacroAssemblerMIPS::CheckNotAtStart(Label* on_not_at_start) {
-  UNIMPLEMENTED_MIPS();
+  // Did we start the match at the start of the string at all?
+  __ lw(a0, MemOperand(frame_pointer(), kAtStart));
+  BranchOrBacktrack(on_not_at_start, eq, a0, Operand(zero_reg));
+  // If we did, are we still at the start of the input?
+  __ lw(a1, MemOperand(frame_pointer(), kInputStart));
+  __ Addu(a0, end_of_input_address(), Operand(current_input_offset()));
+  BranchOrBacktrack(on_not_at_start, ne, a0, Operand(a1));
 }
 
 
 void RegExpMacroAssemblerMIPS::CheckCharacterLT(uc16 limit, Label* on_less) {
-  UNIMPLEMENTED_MIPS();
+  BranchOrBacktrack(on_less, lt, current_character(), Operand(limit));
 }
 
 
@@ -188,26 +232,212 @@
                                               int cp_offset,
                                               Label* on_failure,
                                               bool check_end_of_string) {
-  UNIMPLEMENTED_MIPS();
+  if (on_failure == NULL) {
+    // Instead of inlining a backtrack for each test, (re)use the global
+    // backtrack target.
+    on_failure = &backtrack_label_;
+  }
+
+  if (check_end_of_string) {
+    // Is last character of required match inside string.
+    CheckPosition(cp_offset + str.length() - 1, on_failure);
+  }
+
+  __ Addu(a0, end_of_input_address(), Operand(current_input_offset()));
+  if (cp_offset != 0) {
+    int byte_offset = cp_offset * char_size();
+    __ Addu(a0, a0, Operand(byte_offset));
+  }
+
+  // a0 : Address of characters to match against str.
+  int stored_high_byte = 0;
+  for (int i = 0; i < str.length(); i++) {
+    if (mode_ == ASCII) {
+      __ lbu(a1, MemOperand(a0, 0));
+      __ addiu(a0, a0, char_size());
+      ASSERT(str[i] <= String::kMaxAsciiCharCode);
+      BranchOrBacktrack(on_failure, ne, a1, Operand(str[i]));
+    } else {
+      __ lhu(a1, MemOperand(a0, 0));
+      __ addiu(a0, a0, char_size());
+      uc16 match_char = str[i];
+      int match_high_byte = (match_char >> 8);
+      if (match_high_byte == 0) {
+        BranchOrBacktrack(on_failure, ne, a1, Operand(str[i]));
+      } else {
+        if (match_high_byte != stored_high_byte) {
+          __ li(a2, Operand(match_high_byte));
+          stored_high_byte = match_high_byte;
+        }
+        __ Addu(a3, a2, Operand(match_char & 0xff));
+        BranchOrBacktrack(on_failure, ne, a1, Operand(a3));
+      }
+    }
+  }
 }
 
 
 void RegExpMacroAssemblerMIPS::CheckGreedyLoop(Label* on_equal) {
-  UNIMPLEMENTED_MIPS();
+  Label backtrack_non_equal;
+  __ lw(a0, MemOperand(backtrack_stackpointer(), 0));
+  __ Branch(&backtrack_non_equal, ne, current_input_offset(), Operand(a0));
+  __ Addu(backtrack_stackpointer(),
+          backtrack_stackpointer(),
+          Operand(kPointerSize));
+  __ bind(&backtrack_non_equal);
+  BranchOrBacktrack(on_equal, eq, current_input_offset(), Operand(a0));
 }
 
 
 void RegExpMacroAssemblerMIPS::CheckNotBackReferenceIgnoreCase(
     int start_reg,
     Label* on_no_match) {
-  UNIMPLEMENTED_MIPS();
+  Label fallthrough;
+  __ lw(a0, register_location(start_reg));  // Index of start of capture.
+  __ lw(a1, register_location(start_reg + 1));  // Index of end of capture.
+  __ Subu(a1, a1, a0);  // Length of capture.
+
+  // If length is zero, either the capture is empty or it is not participating.
+  // In either case succeed immediately.
+  __ Branch(&fallthrough, eq, a1, Operand(zero_reg));
+
+  __ Addu(t5, a1, current_input_offset());
+  // Check that there are enough characters left in the input.
+  BranchOrBacktrack(on_no_match, gt, t5, Operand(zero_reg));
+
+  if (mode_ == ASCII) {
+    Label success;
+    Label fail;
+    Label loop_check;
+
+    // a0 - offset of start of capture.
+    // a1 - length of capture.
+    __ Addu(a0, a0, Operand(end_of_input_address()));
+    __ Addu(a2, end_of_input_address(), Operand(current_input_offset()));
+    __ Addu(a1, a0, Operand(a1));
+
+    // a0 - Address of start of capture.
+    // a1 - Address of end of capture.
+    // a2 - Address of current input position.
+
+    Label loop;
+    __ bind(&loop);
+    __ lbu(a3, MemOperand(a0, 0));
+    __ addiu(a0, a0, char_size());
+    __ lbu(t0, MemOperand(a2, 0));
+    __ addiu(a2, a2, char_size());
+
+    __ Branch(&loop_check, eq, t0, Operand(a3));
+
+    // Mismatch, try case-insensitive match (converting letters to lower-case).
+    __ Or(a3, a3, Operand(0x20));  // Convert capture character to lower-case.
+    __ Or(t0, t0, Operand(0x20));  // Also convert input character.
+    __ Branch(&fail, ne, t0, Operand(a3));
+    __ Subu(a3, a3, Operand('a'));
+    __ Branch(&fail, hi, a3, Operand('z' - 'a'));  // Is a3 a lowercase letter?
+
+    __ bind(&loop_check);
+    __ Branch(&loop, lt, a0, Operand(a1));
+    __ jmp(&success);
+
+    __ bind(&fail);
+    GoTo(on_no_match);
+
+    __ bind(&success);
+    // Compute new value of character position after the matched part.
+    __ Subu(current_input_offset(), a2, end_of_input_address());
+  } else {
+    ASSERT(mode_ == UC16);
+    // Put regexp engine registers on stack.
+    RegList regexp_registers_to_retain = current_input_offset().bit() |
+        current_character().bit() | backtrack_stackpointer().bit();
+    __ MultiPush(regexp_registers_to_retain);
+
+    int argument_count = 4;
+    __ PrepareCallCFunction(argument_count, a2);
+
+    // a0 - offset of start of capture.
+    // a1 - length of capture.
+
+    // Put arguments into arguments registers.
+    // Parameters are
+    //   a0: Address byte_offset1 - Address captured substring's start.
+    //   a1: Address byte_offset2 - Address of current character position.
+    //   a2: size_t byte_length - length of capture in bytes(!).
+    //   a3: Isolate* isolate.
+
+    // Address of start of capture.
+    __ Addu(a0, a0, Operand(end_of_input_address()));
+    // Length of capture.
+    __ mov(a2, a1);
+    // Save length in callee-save register for use on return.
+    __ mov(s3, a1);
+    // Address of current input position.
+    __ Addu(a1, current_input_offset(), Operand(end_of_input_address()));
+    // Isolate.
+    __ li(a3, Operand(ExternalReference::isolate_address()));
+
+    ExternalReference function =
+        ExternalReference::re_case_insensitive_compare_uc16(masm_->isolate());
+    __ CallCFunction(function, argument_count);
+
+    // Restore regexp engine registers.
+    __ MultiPop(regexp_registers_to_retain);
+    __ li(code_pointer(), Operand(masm_->CodeObject()));
+    __ lw(end_of_input_address(), MemOperand(frame_pointer(), kInputEnd));
+
+    // Check if function returned non-zero for success or zero for failure.
+    BranchOrBacktrack(on_no_match, eq, v0, Operand(zero_reg));
+    // On success, increment position by length of capture.
+    __ Addu(current_input_offset(), current_input_offset(), Operand(s3));
+  }
+
+  __ bind(&fallthrough);
 }
 
 
 void RegExpMacroAssemblerMIPS::CheckNotBackReference(
     int start_reg,
     Label* on_no_match) {
-  UNIMPLEMENTED_MIPS();
+  Label fallthrough;
+  Label success;
+
+  // Find length of back-referenced capture.
+  __ lw(a0, register_location(start_reg));
+  __ lw(a1, register_location(start_reg + 1));
+  __ Subu(a1, a1, a0);  // Length to check.
+  // Succeed on empty capture (including no capture).
+  __ Branch(&fallthrough, eq, a1, Operand(zero_reg));
+
+  __ Addu(t5, a1, current_input_offset());
+  // Check that there are enough characters left in the input.
+  BranchOrBacktrack(on_no_match, gt, t5, Operand(zero_reg));
+
+  // Compute pointers to match string and capture string.
+  __ Addu(a0, a0, Operand(end_of_input_address()));
+  __ Addu(a2, end_of_input_address(), Operand(current_input_offset()));
+  __ Addu(a1, a1, Operand(a0));
+
+  Label loop;
+  __ bind(&loop);
+  if (mode_ == ASCII) {
+    __ lbu(a3, MemOperand(a0, 0));
+    __ addiu(a0, a0, char_size());
+    __ lbu(t0, MemOperand(a2, 0));
+    __ addiu(a2, a2, char_size());
+  } else {
+    ASSERT(mode_ == UC16);
+    __ lhu(a3, MemOperand(a0, 0));
+    __ addiu(a0, a0, char_size());
+    __ lhu(t0, MemOperand(a2, 0));
+    __ addiu(a2, a2, char_size());
+  }
+  BranchOrBacktrack(on_no_match, ne, a3, Operand(t0));
+  __ Branch(&loop, lt, a0, Operand(a1));
+
+  // Move current character position to position after match.
+  __ Subu(current_input_offset(), a2, end_of_input_address());
+  __ bind(&fallthrough);
 }
 
 
@@ -220,21 +450,23 @@
 
 void RegExpMacroAssemblerMIPS::CheckNotCharacter(uint32_t c,
                                                 Label* on_not_equal) {
-  UNIMPLEMENTED_MIPS();
+  BranchOrBacktrack(on_not_equal, ne, current_character(), Operand(c));
 }
 
 
 void RegExpMacroAssemblerMIPS::CheckCharacterAfterAnd(uint32_t c,
                                                      uint32_t mask,
                                                      Label* on_equal) {
-  UNIMPLEMENTED_MIPS();
+  __ And(a0, current_character(), Operand(mask));
+  BranchOrBacktrack(on_equal, eq, a0, Operand(c));
 }
 
 
 void RegExpMacroAssemblerMIPS::CheckNotCharacterAfterAnd(uint32_t c,
                                                         uint32_t mask,
                                                         Label* on_not_equal) {
-  UNIMPLEMENTED_MIPS();
+  __ And(a0, current_character(), Operand(mask));
+  BranchOrBacktrack(on_not_equal, ne, a0, Operand(c));
 }
 
 
@@ -249,24 +481,360 @@
 
 bool RegExpMacroAssemblerMIPS::CheckSpecialCharacterClass(uc16 type,
                                                          Label* on_no_match) {
-  UNIMPLEMENTED_MIPS();
-  return false;
+  // Range checks (c in min..max) are generally implemented by an unsigned
+  // (c - min) <= (max - min) check.
+  switch (type) {
+  case 's':
+    // Match space-characters.
+    if (mode_ == ASCII) {
+      // ASCII space characters are '\t'..'\r' and ' '.
+      Label success;
+      __ Branch(&success, eq, current_character(), Operand(' '));
+      // Check range 0x09..0x0d.
+      __ Subu(a0, current_character(), Operand('\t'));
+      BranchOrBacktrack(on_no_match, hi, a0, Operand('\r' - '\t'));
+      __ bind(&success);
+      return true;
+    }
+    return false;
+  case 'S':
+    // Match non-space characters.
+    if (mode_ == ASCII) {
+      // ASCII space characters are '\t'..'\r' and ' '.
+      BranchOrBacktrack(on_no_match, eq, current_character(), Operand(' '));
+      __ Subu(a0, current_character(), Operand('\t'));
+      BranchOrBacktrack(on_no_match, ls, a0, Operand('\r' - '\t'));
+      return true;
+    }
+    return false;
+  case 'd':
+    // Match ASCII digits ('0'..'9').
+    __ Subu(a0, current_character(), Operand('0'));
+    BranchOrBacktrack(on_no_match, hi, a0, Operand('9' - '0'));
+    return true;
+  case 'D':
+    // Match non ASCII-digits.
+    __ Subu(a0, current_character(), Operand('0'));
+    BranchOrBacktrack(on_no_match, ls, a0, Operand('9' - '0'));
+    return true;
+  case '.': {
+    // Match non-newlines (not 0x0a('\n'), 0x0d('\r'), 0x2028 and 0x2029).
+    __ Xor(a0, current_character(), Operand(0x01));
+    // See if current character is '\n'^1 or '\r'^1, i.e., 0x0b or 0x0c.
+    __ Subu(a0, a0, Operand(0x0b));
+    BranchOrBacktrack(on_no_match, ls, a0, Operand(0x0c - 0x0b));
+    if (mode_ == UC16) {
+      // Compare original value to 0x2028 and 0x2029, using the already
+      // computed (current_char ^ 0x01 - 0x0b). I.e., check for
+      // 0x201d (0x2028 - 0x0b) or 0x201e.
+      __ Subu(a0, a0, Operand(0x2028 - 0x0b));
+      BranchOrBacktrack(on_no_match, ls, a0, Operand(1));
+    }
+    return true;
+  }
+  case 'n': {
+    // Match newlines (0x0a('\n'), 0x0d('\r'), 0x2028 and 0x2029).
+    __ Xor(a0, current_character(), Operand(0x01));
+    // See if current character is '\n'^1 or '\r'^1, i.e., 0x0b or 0x0c.
+    __ Subu(a0, a0, Operand(0x0b));
+    if (mode_ == ASCII) {
+      BranchOrBacktrack(on_no_match, hi, a0, Operand(0x0c - 0x0b));
+    } else {
+      Label done;
+      BranchOrBacktrack(&done, ls, a0, Operand(0x0c - 0x0b));
+      // Compare original value to 0x2028 and 0x2029, using the already
+      // computed (current_char ^ 0x01 - 0x0b). I.e., check for
+      // 0x201d (0x2028 - 0x0b) or 0x201e.
+      __ Subu(a0, a0, Operand(0x2028 - 0x0b));
+      BranchOrBacktrack(on_no_match, hi, a0, Operand(1));
+      __ bind(&done);
+    }
+    return true;
+  }
+  case 'w': {
+    if (mode_ != ASCII) {
+      // Table is 128 entries, so all ASCII characters can be tested.
+      BranchOrBacktrack(on_no_match, hi, current_character(), Operand('z'));
+    }
+    ExternalReference map = ExternalReference::re_word_character_map();
+    __ li(a0, Operand(map));
+    __ Addu(a0, a0, current_character());
+    __ lbu(a0, MemOperand(a0, 0));
+    BranchOrBacktrack(on_no_match, eq, a0, Operand(zero_reg));
+    return true;
+  }
+  case 'W': {
+    Label done;
+    if (mode_ != ASCII) {
+      // Table is 128 entries, so all ASCII characters can be tested.
+      __ Branch(&done, hi, current_character(), Operand('z'));
+    }
+    ExternalReference map = ExternalReference::re_word_character_map();
+    __ li(a0, Operand(map));
+    __ Addu(a0, a0, current_character());
+    __ lbu(a0, MemOperand(a0, 0));
+    BranchOrBacktrack(on_no_match, ne, a0, Operand(zero_reg));
+    if (mode_ != ASCII) {
+      __ bind(&done);
+    }
+    return true;
+  }
+  case '*':
+    // Match any character.
+    return true;
+  // No custom implementation (yet): s(UC16), S(UC16).
+  default:
+    return false;
+  }
 }
 
 
 void RegExpMacroAssemblerMIPS::Fail() {
-  UNIMPLEMENTED_MIPS();
+  __ li(v0, Operand(FAILURE));
+  __ jmp(&exit_label_);
 }
 
 
 Handle<HeapObject> RegExpMacroAssemblerMIPS::GetCode(Handle<String> source) {
-  UNIMPLEMENTED_MIPS();
-  return Handle<HeapObject>::null();
+  if (masm_->has_exception()) {
+    // If the code gets corrupted due to long regular expressions and lack of
+    // space on trampolines, an internal exception flag is set. If this case
+    // is detected, we will jump into exit sequence right away.
+    __ bind_to(&entry_label_, internal_failure_label_.pos());
+  } else {
+    // Finalize code - write the entry point code now we know how many
+    // registers we need.
+
+    // Entry code:
+    __ bind(&entry_label_);
+    // Push arguments
+    // Save callee-save registers.
+    // Start new stack frame.
+    // Store link register in existing stack-cell.
+    // Order here should correspond to order of offset constants in header file.
+    RegList registers_to_retain = s0.bit() | s1.bit() | s2.bit() |
+        s3.bit() | s4.bit() | s5.bit() | s6.bit() | s7.bit() | fp.bit();
+    RegList argument_registers = a0.bit() | a1.bit() | a2.bit() | a3.bit();
+    __ MultiPush(argument_registers | registers_to_retain | ra.bit());
+    // Set frame pointer in space for it if this is not a direct call
+    // from generated code.
+    __ Addu(frame_pointer(), sp, Operand(4 * kPointerSize));
+    __ push(a0);  // Make room for "position - 1" constant (value irrelevant).
+    __ push(a0);  // Make room for "at start" constant (value irrelevant).
+
+    // Check if we have space on the stack for registers.
+    Label stack_limit_hit;
+    Label stack_ok;
+
+    ExternalReference stack_limit =
+        ExternalReference::address_of_stack_limit(masm_->isolate());
+    __ li(a0, Operand(stack_limit));
+    __ lw(a0, MemOperand(a0));
+    __ Subu(a0, sp, a0);
+    // Handle it if the stack pointer is already below the stack limit.
+    __ Branch(&stack_limit_hit, le, a0, Operand(zero_reg));
+    // Check if there is room for the variable number of registers above
+    // the stack limit.
+    __ Branch(&stack_ok, hs, a0, Operand(num_registers_ * kPointerSize));
+    // Exit with OutOfMemory exception. There is not enough space on the stack
+    // for our working registers.
+    __ li(v0, Operand(EXCEPTION));
+    __ jmp(&exit_label_);
+
+    __ bind(&stack_limit_hit);
+    CallCheckStackGuardState(a0);
+    // If returned value is non-zero, we exit with the returned value as result.
+    __ Branch(&exit_label_, ne, v0, Operand(zero_reg));
+
+    __ bind(&stack_ok);
+    // Allocate space on stack for registers.
+    __ Subu(sp, sp, Operand(num_registers_ * kPointerSize));
+    // Load string end.
+    __ lw(end_of_input_address(), MemOperand(frame_pointer(), kInputEnd));
+    // Load input start.
+    __ lw(a0, MemOperand(frame_pointer(), kInputStart));
+    // Find negative length (offset of start relative to end).
+    __ Subu(current_input_offset(), a0, end_of_input_address());
+    // Set a0 to address of char before start of the input string
+    // (effectively string position -1).
+    __ lw(a1, MemOperand(frame_pointer(), kStartIndex));
+    __ Subu(a0, current_input_offset(), Operand(char_size()));
+    __ sll(t5, a1, (mode_ == UC16) ? 1 : 0);
+    __ Subu(a0, a0, t5);
+    // Store this value in a local variable, for use when clearing
+    // position registers.
+    __ sw(a0, MemOperand(frame_pointer(), kInputStartMinusOne));
+
+    // Determine whether the start index is zero, that is at the start of the
+    // string, and store that value in a local variable.
+    __ mov(t5, a1);
+    __ li(a1, Operand(1));
+    __ movn(a1, zero_reg, t5);
+    __ sw(a1, MemOperand(frame_pointer(), kAtStart));
+
+    if (num_saved_registers_ > 0) {  // Always is, if generated from a regexp.
+      // Fill saved registers with initial value = start offset - 1.
+
+      // Address of register 0.
+      __ Addu(a1, frame_pointer(), Operand(kRegisterZero));
+      __ li(a2, Operand(num_saved_registers_));
+      Label init_loop;
+      __ bind(&init_loop);
+      __ sw(a0, MemOperand(a1));
+      __ Addu(a1, a1, Operand(-kPointerSize));
+      __ Subu(a2, a2, Operand(1));
+      __ Branch(&init_loop, ne, a2, Operand(zero_reg));
+    }
+
+    // Initialize backtrack stack pointer.
+    __ lw(backtrack_stackpointer(), MemOperand(frame_pointer(), kStackHighEnd));
+    // Initialize code pointer register
+    __ li(code_pointer(), Operand(masm_->CodeObject()));
+    // Load previous char as initial value of current character register.
+    Label at_start;
+    __ lw(a0, MemOperand(frame_pointer(), kAtStart));
+    __ Branch(&at_start, ne, a0, Operand(zero_reg));
+    LoadCurrentCharacterUnchecked(-1, 1);  // Load previous char.
+    __ jmp(&start_label_);
+    __ bind(&at_start);
+    __ li(current_character(), Operand('\n'));
+    __ jmp(&start_label_);
+
+
+    // Exit code:
+    if (success_label_.is_linked()) {
+      // Save captures when successful.
+      __ bind(&success_label_);
+      if (num_saved_registers_ > 0) {
+        // Copy captures to output.
+        __ lw(a1, MemOperand(frame_pointer(), kInputStart));
+        __ lw(a0, MemOperand(frame_pointer(), kRegisterOutput));
+        __ lw(a2, MemOperand(frame_pointer(), kStartIndex));
+        __ Subu(a1, end_of_input_address(), a1);
+        // a1 is length of input in bytes.
+        if (mode_ == UC16) {
+          __ srl(a1, a1, 1);
+        }
+        // a1 is length of input in characters.
+        __ Addu(a1, a1, Operand(a2));
+        // a1 is length of string in characters.
+
+        ASSERT_EQ(0, num_saved_registers_ % 2);
+        // Always an even number of capture registers. This allows us to
+        // unroll the loop once to add an operation between a load of a register
+        // and the following use of that register.
+        for (int i = 0; i < num_saved_registers_; i += 2) {
+          __ lw(a2, register_location(i));
+          __ lw(a3, register_location(i + 1));
+          if (mode_ == UC16) {
+            __ sra(a2, a2, 1);
+            __ Addu(a2, a2, a1);
+            __ sra(a3, a3, 1);
+            __ Addu(a3, a3, a1);
+          } else {
+            __ Addu(a2, a1, Operand(a2));
+            __ Addu(a3, a1, Operand(a3));
+          }
+          __ sw(a2, MemOperand(a0));
+          __ Addu(a0, a0, kPointerSize);
+          __ sw(a3, MemOperand(a0));
+          __ Addu(a0, a0, kPointerSize);
+        }
+      }
+      __ li(v0, Operand(SUCCESS));
+    }
+    // Exit and return v0.
+    __ bind(&exit_label_);
+    // Skip sp past regexp registers and local variables..
+    __ mov(sp, frame_pointer());
+    // Restore registers s0..s7 and return (restoring ra to pc).
+    __ MultiPop(registers_to_retain | ra.bit());
+    __ Ret();
+
+    // Backtrack code (branch target for conditional backtracks).
+    if (backtrack_label_.is_linked()) {
+      __ bind(&backtrack_label_);
+      Backtrack();
+    }
+
+    Label exit_with_exception;
+
+    // Preempt-code.
+    if (check_preempt_label_.is_linked()) {
+      SafeCallTarget(&check_preempt_label_);
+      // Put regexp engine registers on stack.
+      RegList regexp_registers_to_retain = current_input_offset().bit() |
+          current_character().bit() | backtrack_stackpointer().bit();
+      __ MultiPush(regexp_registers_to_retain);
+      CallCheckStackGuardState(a0);
+      __ MultiPop(regexp_registers_to_retain);
+      // If returning non-zero, we should end execution with the given
+      // result as return value.
+      __ Branch(&exit_label_, ne, v0, Operand(zero_reg));
+
+      // String might have moved: Reload end of string from frame.
+      __ lw(end_of_input_address(), MemOperand(frame_pointer(), kInputEnd));
+      __ li(code_pointer(), Operand(masm_->CodeObject()));
+      SafeReturn();
+    }
+
+    // Backtrack stack overflow code.
+    if (stack_overflow_label_.is_linked()) {
+      SafeCallTarget(&stack_overflow_label_);
+      // Reached if the backtrack-stack limit has been hit.
+      // Put regexp engine registers on stack first.
+      RegList regexp_registers = current_input_offset().bit() |
+          current_character().bit();
+      __ MultiPush(regexp_registers);
+      Label grow_failed;
+      // Call GrowStack(backtrack_stackpointer(), &stack_base)
+      static const int num_arguments = 3;
+      __ PrepareCallCFunction(num_arguments, a0);
+      __ mov(a0, backtrack_stackpointer());
+      __ Addu(a1, frame_pointer(), Operand(kStackHighEnd));
+      __ li(a2, Operand(ExternalReference::isolate_address()));
+      ExternalReference grow_stack =
+          ExternalReference::re_grow_stack(masm_->isolate());
+      __ CallCFunction(grow_stack, num_arguments);
+      // Restore regexp registers.
+      __ MultiPop(regexp_registers);
+      // If return NULL, we have failed to grow the stack, and
+      // must exit with a stack-overflow exception.
+      __ Branch(&exit_with_exception, eq, v0, Operand(zero_reg));
+      // Otherwise use return value as new stack pointer.
+      __ mov(backtrack_stackpointer(), v0);
+      // Restore saved registers and continue.
+      __ li(code_pointer(), Operand(masm_->CodeObject()));
+      __ lw(end_of_input_address(), MemOperand(frame_pointer(), kInputEnd));
+      SafeReturn();
+    }
+
+    if (exit_with_exception.is_linked()) {
+      // If any of the code above needed to exit with an exception.
+      __ bind(&exit_with_exception);
+      // Exit with Result EXCEPTION(-1) to signal thrown exception.
+      __ li(v0, Operand(EXCEPTION));
+      __ jmp(&exit_label_);
+    }
+  }
+
+  CodeDesc code_desc;
+  masm_->GetCode(&code_desc);
+  Handle<Code> code = FACTORY->NewCode(code_desc,
+                                       Code::ComputeFlags(Code::REGEXP),
+                                       masm_->CodeObject());
+  LOG(Isolate::Current(), RegExpCodeCreateEvent(*code, *source));
+  return Handle<HeapObject>::cast(code);
 }
 
 
 void RegExpMacroAssemblerMIPS::GoTo(Label* to) {
-  UNIMPLEMENTED_MIPS();
+  if (to == NULL) {
+    Backtrack();
+    return;
+  }
+  __ jmp(to);
+  return;
 }
 
 
@@ -281,13 +849,15 @@
 void RegExpMacroAssemblerMIPS::IfRegisterLT(int reg,
                                            int comparand,
                                            Label* if_lt) {
-  UNIMPLEMENTED_MIPS();
+  __ lw(a0, register_location(reg));
+  BranchOrBacktrack(if_lt, lt, a0, Operand(comparand));
 }
 
 
 void RegExpMacroAssemblerMIPS::IfRegisterEqPos(int reg,
                                               Label* if_eq) {
-  UNIMPLEMENTED_MIPS();
+  __ lw(a0, register_location(reg));
+  BranchOrBacktrack(if_eq, eq, a0, Operand(current_input_offset()));
 }
 
 
@@ -301,23 +871,47 @@
                                                    Label* on_end_of_input,
                                                    bool check_bounds,
                                                    int characters) {
-  UNIMPLEMENTED_MIPS();
+  ASSERT(cp_offset >= -1);      // ^ and \b can look behind one character.
+  ASSERT(cp_offset < (1<<30));  // Be sane! (And ensure negation works).
+  if (check_bounds) {
+    CheckPosition(cp_offset + characters - 1, on_end_of_input);
+  }
+  LoadCurrentCharacterUnchecked(cp_offset, characters);
 }
 
 
 void RegExpMacroAssemblerMIPS::PopCurrentPosition() {
-  UNIMPLEMENTED_MIPS();
+  Pop(current_input_offset());
 }
 
 
 void RegExpMacroAssemblerMIPS::PopRegister(int register_index) {
-  UNIMPLEMENTED_MIPS();
+  Pop(a0);
+  __ sw(a0, register_location(register_index));
 }
 
 
-
 void RegExpMacroAssemblerMIPS::PushBacktrack(Label* label) {
-  UNIMPLEMENTED_MIPS();
+  if (label->is_bound()) {
+    int target = label->pos();
+    __ li(a0, Operand(target + Code::kHeaderSize - kHeapObjectTag));
+  } else {
+    Label after_constant;
+    __ Branch(&after_constant);
+    int offset = masm_->pc_offset();
+    int cp_offset = offset + Code::kHeaderSize - kHeapObjectTag;
+    __ emit(0);
+    masm_->label_at_put(label, offset);
+    __ bind(&after_constant);
+    if (is_int16(cp_offset)) {
+      __ lw(a0, MemOperand(code_pointer(), cp_offset));
+    } else {
+      __ Addu(a0, code_pointer(), cp_offset);
+      __ lw(a0, MemOperand(a0, 0));
+    }
+  }
+  Push(a0);
+  CheckStackLimit();
 }
 
 
@@ -328,55 +922,90 @@
 
 void RegExpMacroAssemblerMIPS::PushRegister(int register_index,
                                            StackCheckFlag check_stack_limit) {
-  UNIMPLEMENTED_MIPS();
+  __ lw(a0, register_location(register_index));
+  Push(a0);
+  if (check_stack_limit) CheckStackLimit();
 }
 
 
 void RegExpMacroAssemblerMIPS::ReadCurrentPositionFromRegister(int reg) {
-  UNIMPLEMENTED_MIPS();
+  __ lw(current_input_offset(), register_location(reg));
 }
 
 
 void RegExpMacroAssemblerMIPS::ReadStackPointerFromRegister(int reg) {
-  UNIMPLEMENTED_MIPS();
+  __ lw(backtrack_stackpointer(), register_location(reg));
+  __ lw(a0, MemOperand(frame_pointer(), kStackHighEnd));
+  __ Addu(backtrack_stackpointer(), backtrack_stackpointer(), Operand(a0));
 }
 
 
 void RegExpMacroAssemblerMIPS::SetCurrentPositionFromEnd(int by) {
-  UNIMPLEMENTED_MIPS();
+  Label after_position;
+  __ Branch(&after_position,
+            ge,
+            current_input_offset(),
+            Operand(-by * char_size()));
+  __ li(current_input_offset(), -by * char_size());
+  // On RegExp code entry (where this operation is used), the character before
+  // the current position is expected to be already loaded.
+  // We have advanced the position, so it's safe to read backwards.
+  LoadCurrentCharacterUnchecked(-1, 1);
+  __ bind(&after_position);
 }
 
 
 void RegExpMacroAssemblerMIPS::SetRegister(int register_index, int to) {
-  UNIMPLEMENTED_MIPS();
+  ASSERT(register_index >= num_saved_registers_);  // Reserved for positions!
+  __ li(a0, Operand(to));
+  __ sw(a0, register_location(register_index));
 }
 
 
 void RegExpMacroAssemblerMIPS::Succeed() {
-  UNIMPLEMENTED_MIPS();
+  __ jmp(&success_label_);
 }
 
 
 void RegExpMacroAssemblerMIPS::WriteCurrentPositionToRegister(int reg,
                                                              int cp_offset) {
-  UNIMPLEMENTED_MIPS();
+  if (cp_offset == 0) {
+    __ sw(current_input_offset(), register_location(reg));
+  } else {
+    __ Addu(a0, current_input_offset(), Operand(cp_offset * char_size()));
+    __ sw(a0, register_location(reg));
+  }
 }
 
 
 void RegExpMacroAssemblerMIPS::ClearRegisters(int reg_from, int reg_to) {
-  UNIMPLEMENTED_MIPS();
+  ASSERT(reg_from <= reg_to);
+  __ lw(a0, MemOperand(frame_pointer(), kInputStartMinusOne));
+  for (int reg = reg_from; reg <= reg_to; reg++) {
+    __ sw(a0, register_location(reg));
+  }
 }
 
 
 void RegExpMacroAssemblerMIPS::WriteStackPointerToRegister(int reg) {
-  UNIMPLEMENTED_MIPS();
+  __ lw(a1, MemOperand(frame_pointer(), kStackHighEnd));
+  __ Subu(a0, backtrack_stackpointer(), a1);
+  __ sw(a0, register_location(reg));
 }
 
 
 // Private methods:
 
 void RegExpMacroAssemblerMIPS::CallCheckStackGuardState(Register scratch) {
-  UNIMPLEMENTED_MIPS();
+  static const int num_arguments = 3;
+  __ PrepareCallCFunction(num_arguments, scratch);
+  __ mov(a2, frame_pointer());
+  // Code* of self.
+  __ li(a1, Operand(masm_->CodeObject()));
+  // a0 becomes return address pointer.
+  ExternalReference stack_guard_check =
+      ExternalReference::re_check_stack_guard_state(masm_->isolate());
+  CallCFunctionUsingStub(stack_guard_check, num_arguments);
 }
 
 
@@ -388,22 +1017,101 @@
 
 
 int RegExpMacroAssemblerMIPS::CheckStackGuardState(Address* return_address,
-                                                  Code* re_code,
-                                                  Address re_frame) {
-  UNIMPLEMENTED_MIPS();
+                                                   Code* re_code,
+                                                   Address re_frame) {
+  Isolate* isolate = frame_entry<Isolate*>(re_frame, kIsolate);
+  ASSERT(isolate == Isolate::Current());
+  if (isolate->stack_guard()->IsStackOverflow()) {
+    isolate->StackOverflow();
+    return EXCEPTION;
+  }
+
+  // If not real stack overflow the stack guard was used to interrupt
+  // execution for another purpose.
+
+  // If this is a direct call from JavaScript retry the RegExp forcing the call
+  // through the runtime system. Currently the direct call cannot handle a GC.
+  if (frame_entry<int>(re_frame, kDirectCall) == 1) {
+    return RETRY;
+  }
+
+  // Prepare for possible GC.
+  HandleScope handles;
+  Handle<Code> code_handle(re_code);
+
+  Handle<String> subject(frame_entry<String*>(re_frame, kInputString));
+  // Current string.
+  bool is_ascii = subject->IsAsciiRepresentation();
+
+  ASSERT(re_code->instruction_start() <= *return_address);
+  ASSERT(*return_address <=
+      re_code->instruction_start() + re_code->instruction_size());
+
+  MaybeObject* result = Execution::HandleStackGuardInterrupt();
+
+  if (*code_handle != re_code) {  // Return address no longer valid.
+    int delta = *code_handle - re_code;
+    // Overwrite the return address on the stack.
+    *return_address += delta;
+  }
+
+  if (result->IsException()) {
+    return EXCEPTION;
+  }
+
+  // String might have changed.
+  if (subject->IsAsciiRepresentation() != is_ascii) {
+    // If we changed between an ASCII and an UC16 string, the specialized
+    // code cannot be used, and we need to restart regexp matching from
+    // scratch (including, potentially, compiling a new version of the code).
+    return RETRY;
+  }
+
+  // Otherwise, the content of the string might have moved. It must still
+  // be a sequential or external string with the same content.
+  // Update the start and end pointers in the stack frame to the current
+  // location (whether it has actually moved or not).
+  ASSERT(StringShape(*subject).IsSequential() ||
+      StringShape(*subject).IsExternal());
+
+  // The original start address of the characters to match.
+  const byte* start_address = frame_entry<const byte*>(re_frame, kInputStart);
+
+  // Find the current start address of the same character at the current string
+  // position.
+  int start_index = frame_entry<int>(re_frame, kStartIndex);
+  const byte* new_address = StringCharacterPosition(*subject, start_index);
+
+  if (start_address != new_address) {
+    // If there is a difference, update the object pointer and start and end
+    // addresses in the RegExp stack frame to match the new value.
+    const byte* end_address = frame_entry<const byte* >(re_frame, kInputEnd);
+    int byte_length = end_address - start_address;
+    frame_entry<const String*>(re_frame, kInputString) = *subject;
+    frame_entry<const byte*>(re_frame, kInputStart) = new_address;
+    frame_entry<const byte*>(re_frame, kInputEnd) = new_address + byte_length;
+  }
+
   return 0;
 }
 
 
 MemOperand RegExpMacroAssemblerMIPS::register_location(int register_index) {
-  UNIMPLEMENTED_MIPS();
-  return MemOperand(zero_reg, 0);
+  ASSERT(register_index < (1<<30));
+  if (num_registers_ <= register_index) {
+    num_registers_ = register_index + 1;
+  }
+  return MemOperand(frame_pointer(),
+                    kRegisterZero - register_index * kPointerSize);
 }
 
 
 void RegExpMacroAssemblerMIPS::CheckPosition(int cp_offset,
                                             Label* on_outside_input) {
-  UNIMPLEMENTED_MIPS();
+  BranchOrBacktrack(on_outside_input,
+                    ge,
+                    current_input_offset(),
+                    Operand(-cp_offset * char_size()));
 }
 
 
@@ -411,61 +1119,126 @@
                                                  Condition condition,
                                                  Register rs,
                                                  const Operand& rt) {
-  UNIMPLEMENTED_MIPS();
+  if (condition == al) {  // Unconditional.
+    if (to == NULL) {
+      Backtrack();
+      return;
+    }
+    __ jmp(to);
+    return;
+  }
+  if (to == NULL) {
+    __ Branch(&backtrack_label_, condition, rs, rt);
+    return;
+  }
+  __ Branch(to, condition, rs, rt);
 }
 
 
 void RegExpMacroAssemblerMIPS::SafeCall(Label* to, Condition cond, Register rs,
                                            const Operand& rt) {
-  UNIMPLEMENTED_MIPS();
+  __ BranchAndLink(to, cond, rs, rt);
 }
 
 
 void RegExpMacroAssemblerMIPS::SafeReturn() {
-  UNIMPLEMENTED_MIPS();
+  __ pop(ra);
+  __ Addu(t5, ra, Operand(masm_->CodeObject()));
+  __ Jump(t5);
 }
 
 
 void RegExpMacroAssemblerMIPS::SafeCallTarget(Label* name) {
-  UNIMPLEMENTED_MIPS();
+  __ bind(name);
+  __ Subu(ra, ra, Operand(masm_->CodeObject()));
+  __ push(ra);
 }
 
 
 void RegExpMacroAssemblerMIPS::Push(Register source) {
-  UNIMPLEMENTED_MIPS();
+  ASSERT(!source.is(backtrack_stackpointer()));
+  __ Addu(backtrack_stackpointer(),
+          backtrack_stackpointer(),
+          Operand(-kPointerSize));
+  __ sw(source, MemOperand(backtrack_stackpointer()));
 }
 
 
 void RegExpMacroAssemblerMIPS::Pop(Register target) {
-  UNIMPLEMENTED_MIPS();
+  ASSERT(!target.is(backtrack_stackpointer()));
+  __ lw(target, MemOperand(backtrack_stackpointer()));
+  __ Addu(backtrack_stackpointer(), backtrack_stackpointer(), kPointerSize);
 }
 
 
 void RegExpMacroAssemblerMIPS::CheckPreemption() {
-  UNIMPLEMENTED_MIPS();
+  // Check for preemption.
+  ExternalReference stack_limit =
+      ExternalReference::address_of_stack_limit(masm_->isolate());
+  __ li(a0, Operand(stack_limit));
+  __ lw(a0, MemOperand(a0));
+  SafeCall(&check_preempt_label_, ls, sp, Operand(a0));
 }
 
 
 void RegExpMacroAssemblerMIPS::CheckStackLimit() {
-  UNIMPLEMENTED_MIPS();
+  ExternalReference stack_limit =
+      ExternalReference::address_of_regexp_stack_limit(masm_->isolate());
+
+  __ li(a0, Operand(stack_limit));
+  __ lw(a0, MemOperand(a0));
+  SafeCall(&stack_overflow_label_, ls, backtrack_stackpointer(), Operand(a0));
 }
 
 
 void RegExpMacroAssemblerMIPS::CallCFunctionUsingStub(
     ExternalReference function,
     int num_arguments) {
-  UNIMPLEMENTED_MIPS();
+  // Must pass all arguments in registers. The stub pushes on the stack.
+  ASSERT(num_arguments <= 4);
+  __ li(code_pointer(), Operand(function));
+  RegExpCEntryStub stub;
+  __ CallStub(&stub);
+  if (OS::ActivationFrameAlignment() != 0) {
+    __ lw(sp, MemOperand(sp, 16));
+  }
+  __ li(code_pointer(), Operand(masm_->CodeObject()));
 }
 
 
 void RegExpMacroAssemblerMIPS::LoadCurrentCharacterUnchecked(int cp_offset,
-                                                             int characters) {
-  UNIMPLEMENTED_MIPS();
+                                                            int characters) {
+  Register offset = current_input_offset();
+  if (cp_offset != 0) {
+    __ Addu(a0, current_input_offset(), Operand(cp_offset * char_size()));
+    offset = a0;
+  }
+  // We assume that we cannot do unaligned loads on MIPS, so this function
+  // must only be used to load a single character at a time.
+  ASSERT(characters == 1);
+  __ Addu(t5, end_of_input_address(), Operand(offset));
+  if (mode_ == ASCII) {
+    __ lbu(current_character(), MemOperand(t5, 0));
+  } else {
+    ASSERT(mode_ == UC16);
+    __ lhu(current_character(), MemOperand(t5, 0));
+  }
 }
 
 
 void RegExpCEntryStub::Generate(MacroAssembler* masm_) {
-  UNIMPLEMENTED_MIPS();
+  int stack_alignment = OS::ActivationFrameAlignment();
+  if (stack_alignment < kPointerSize) stack_alignment = kPointerSize;
+  // Stack is already aligned for call, so decrement by alignment
+  // to make room for storing the return address.
+  __ Subu(sp, sp, Operand(stack_alignment));
+  __ sw(ra, MemOperand(sp, 0));
+  __ mov(a0, sp);
+  __ mov(t9, t1);
+  __ Call(t9);
+  __ lw(ra, MemOperand(sp, 0));
+  __ Addu(sp, sp, Operand(stack_alignment));
+  __ Jump(Operand(ra));
 }
 
 
diff --git a/src/mips/regexp-macro-assembler-mips.h b/src/mips/regexp-macro-assembler-mips.h
index 7310c9d..ad7ada5 100644
--- a/src/mips/regexp-macro-assembler-mips.h
+++ b/src/mips/regexp-macro-assembler-mips.h
@@ -1,4 +1,4 @@
-// Copyright 2006-2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -121,10 +121,11 @@
   static const int kStoredRegisters = kFramePointer;
   // Return address (stored from link register, read into pc on return).
   static const int kReturnAddress = kStoredRegisters + 9 * kPointerSize;
+  static const int kSecondaryReturnAddress = kReturnAddress + kPointerSize;
   // Stack frame header.
   static const int kStackFrameHeader = kReturnAddress + kPointerSize;
   // Stack parameters placed by caller.
-  static const int kRegisterOutput = kStackFrameHeader + 16;
+  static const int kRegisterOutput = kStackFrameHeader + 20;
   static const int kStackHighEnd = kRegisterOutput + kPointerSize;
   static const int kDirectCall = kStackHighEnd + kPointerSize;
   static const int kIsolate = kDirectCall + kPointerSize;
@@ -183,7 +184,7 @@
   // Register holding pointer to the current code object.
   inline Register code_pointer() { return t1; }
 
-  // Byte size of chars in the string to match (decided by the Mode argument)
+  // Byte size of chars in the string to match (decided by the Mode argument).
   inline int char_size() { return static_cast<int>(mode_); }
 
   // Equivalent to a conditional branch to the label, unless the label
@@ -228,7 +229,7 @@
   int num_registers_;
 
   // Number of registers to output at the end (the saved registers
-  // are always 0..num_saved_registers_-1)
+  // are always 0..num_saved_registers_-1).
   int num_saved_registers_;
 
   // Labels used internally.
@@ -239,6 +240,7 @@
   Label exit_label_;
   Label check_preempt_label_;
   Label stack_overflow_label_;
+  Label internal_failure_label_;
 };
 
 #endif  // V8_INTERPRETED_REGEXP
diff --git a/src/mips/register-allocator-mips-inl.h b/src/mips/register-allocator-mips-inl.h
deleted file mode 100644
index bbfb31d..0000000
--- a/src/mips/register-allocator-mips-inl.h
+++ /dev/null
@@ -1,134 +0,0 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-//       notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-//       copyright notice, this list of conditions and the following
-//       disclaimer in the documentation and/or other materials provided
-//       with the distribution.
-//     * Neither the name of Google Inc. nor the names of its
-//       contributors may be used to endorse or promote products derived
-//       from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_IA32_REGISTER_ALLOCATOR_MIPS_INL_H_
-#define V8_IA32_REGISTER_ALLOCATOR_MIPS_INL_H_
-
-#include "v8.h"
-#include "mips/assembler-mips.h"
-
-namespace v8 {
-namespace internal {
-
-// -------------------------------------------------------------------------
-// RegisterAllocator implementation.
-
-bool RegisterAllocator::IsReserved(Register reg) {
-  // The code for this test relies on the order of register codes.
-  return reg.is(cp) || reg.is(s8_fp) || reg.is(sp);
-}
-
-
-int RegisterAllocator::ToNumber(Register reg) {
-  ASSERT(reg.is_valid() && !IsReserved(reg));
-  const int kNumbers[] = {
-    0,    // zero_reg
-    1,    // at
-    2,    // v0
-    3,    // v1
-    4,    // a0
-    5,    // a1
-    6,    // a2
-    7,    // a3
-    8,    // t0
-    9,    // t1
-    10,   // t2
-    11,   // t3
-    12,   // t4
-    13,   // t5
-    14,   // t
-    15,   // t7
-    16,   // t8
-    17,   // t9
-    18,   // s0
-    19,   // s1
-    20,   // s2
-    21,   // s3
-    22,   // s4
-    23,   // s5
-    24,   // s6
-    25,   // s7
-    26,   // k0
-    27,   // k1
-    28,   // gp
-    29,   // sp
-    30,   // s8_fp
-    31,   // ra
-  };
-  return kNumbers[reg.code()];
-}
-
-
-Register RegisterAllocator::ToRegister(int num) {
-  ASSERT(num >= 0 && num < kNumRegisters);
-  const Register kRegisters[] = {
-    zero_reg,
-    at,
-    v0,
-    v1,
-    a0,
-    a1,
-    a2,
-    a3,
-    t0,
-    t1,
-    t2,
-    t3,
-    t4,
-    t5,
-    t6,
-    t7,
-    s0,
-    s1,
-    s2,
-    s3,
-    s4,
-    s5,
-    s6,
-    s7,
-    t8,
-    t9,
-    k0,
-    k1,
-    gp,
-    sp,
-    s8_fp,
-    ra
-  };
-  return kRegisters[num];
-}
-
-
-void RegisterAllocator::Initialize() {
-  Reset();
-}
-
-
-} }  // namespace v8::internal
-
-#endif  // V8_IA32_REGISTER_ALLOCATOR_MIPS_INL_H_
-
diff --git a/src/mips/register-allocator-mips.cc b/src/mips/register-allocator-mips.cc
deleted file mode 100644
index 2c5d61b..0000000
--- a/src/mips/register-allocator-mips.cc
+++ /dev/null
@@ -1,63 +0,0 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-//       notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-//       copyright notice, this list of conditions and the following
-//       disclaimer in the documentation and/or other materials provided
-//       with the distribution.
-//     * Neither the name of Google Inc. nor the names of its
-//       contributors may be used to endorse or promote products derived
-//       from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#if defined(V8_TARGET_ARCH_MIPS)
-
-#include "codegen-inl.h"
-#include "register-allocator-inl.h"
-
-namespace v8 {
-namespace internal {
-
-// -------------------------------------------------------------------------
-// Result implementation.
-
-void Result::ToRegister() {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void Result::ToRegister(Register target) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-// -------------------------------------------------------------------------
-// RegisterAllocator implementation.
-
-Result RegisterAllocator::AllocateByteRegisterWithoutSpilling() {
-  // No byte registers on MIPS.
-  UNREACHABLE();
-  return Result();
-}
-
-
-} }  // namespace v8::internal
-
-#endif  // V8_TARGET_ARCH_MIPS
diff --git a/src/mips/register-allocator-mips.h b/src/mips/register-allocator-mips.h
deleted file mode 100644
index c448923..0000000
--- a/src/mips/register-allocator-mips.h
+++ /dev/null
@@ -1,47 +0,0 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-//       notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-//       copyright notice, this list of conditions and the following
-//       disclaimer in the documentation and/or other materials provided
-//       with the distribution.
-//     * Neither the name of Google Inc. nor the names of its
-//       contributors may be used to endorse or promote products derived
-//       from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_MIPS_REGISTER_ALLOCATOR_MIPS_H_
-#define V8_MIPS_REGISTER_ALLOCATOR_MIPS_H_
-
-#include "mips/constants-mips.h"
-
-namespace v8 {
-namespace internal {
-
-class RegisterAllocatorConstants : public AllStatic {
- public:
-  // No registers are currently managed by the register allocator on MIPS.
-  static const int kNumRegisters = 0;
-  static const int kInvalidRegister = -1;
-};
-
-
-} }  // namespace v8::internal
-
-#endif  // V8_MIPS_REGISTER_ALLOCATOR_MIPS_H_
-
diff --git a/src/mips/simulator-mips.cc b/src/mips/simulator-mips.cc
index 50ad7a1..68fb7ce 100644
--- a/src/mips/simulator-mips.cc
+++ b/src/mips/simulator-mips.cc
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -35,7 +35,7 @@
 
 #include "disasm.h"
 #include "assembler.h"
-#include "globals.h"    // Need the BitCast
+#include "globals.h"    // Need the BitCast.
 #include "mips/constants-mips.h"
 #include "mips/simulator-mips.h"
 
@@ -46,7 +46,7 @@
 namespace v8 {
 namespace internal {
 
-// Utils functions
+// Utils functions.
 bool HaveSameSign(int32_t a, int32_t b) {
   return ((a ^ b) >= 0);
 }
@@ -139,7 +139,7 @@
 }
 
 
-#else  // ndef GENERATED_CODE_COVERAGE
+#else  // GENERATED_CODE_COVERAGE
 
 #define UNSUPPORTED() printf("Unsupported instruction.\n");
 
@@ -263,15 +263,15 @@
 #define REG_INFO(n) Registers::Name(n), GetRegisterValue(n), GetRegisterValue(n)
 
   PrintF("\n");
-  // at, v0, a0
+  // at, v0, a0.
   PrintF("%3s: 0x%08x %10d\t%3s: 0x%08x %10d\t%3s: 0x%08x %10d\n",
          REG_INFO(1), REG_INFO(2), REG_INFO(4));
-  // v1, a1
+  // v1, a1.
   PrintF("%26s\t%3s: 0x%08x %10d\t%3s: 0x%08x %10d\n",
          "", REG_INFO(3), REG_INFO(5));
-  // a2
+  // a2.
   PrintF("%26s\t%26s\t%3s: 0x%08x %10d\n", "", "", REG_INFO(6));
-  // a3
+  // a3.
   PrintF("%26s\t%26s\t%3s: 0x%08x %10d\n", "", "", REG_INFO(7));
   PrintF("\n");
   // t0-t7, s0-s7
@@ -280,16 +280,16 @@
            REG_INFO(8+i), REG_INFO(16+i));
   }
   PrintF("\n");
-  // t8, k0, LO
+  // t8, k0, LO.
   PrintF("%3s: 0x%08x %10d\t%3s: 0x%08x %10d\t%3s: 0x%08x %10d\n",
          REG_INFO(24), REG_INFO(26), REG_INFO(32));
-  // t9, k1, HI
+  // t9, k1, HI.
   PrintF("%3s: 0x%08x %10d\t%3s: 0x%08x %10d\t%3s: 0x%08x %10d\n",
          REG_INFO(25), REG_INFO(27), REG_INFO(33));
-  // sp, fp, gp
+  // sp, fp, gp.
   PrintF("%3s: 0x%08x %10d\t%3s: 0x%08x %10d\t%3s: 0x%08x %10d\n",
          REG_INFO(29), REG_INFO(30), REG_INFO(28));
-  // pc
+  // pc.
   PrintF("%3s: 0x%08x %10d\t%3s: 0x%08x %10d\n",
          REG_INFO(31), REG_INFO(34));
 
@@ -307,7 +307,7 @@
   PrintAllRegs();
 
   PrintF("\n\n");
-  // f0, f1, f2, ... f31
+  // f0, f1, f2, ... f31.
   PrintF("%3s,%3s: 0x%08x%08x %16.4e\n", FPU_REG_INFO(0) );
   PrintF("%3s,%3s: 0x%08x%08x %16.4e\n", FPU_REG_INFO(2) );
   PrintF("%3s,%3s: 0x%08x%08x %16.4e\n", FPU_REG_INFO(4) );
@@ -345,7 +345,7 @@
   char arg2[ARG_SIZE + 1];
   char* argv[3] = { cmd, arg1, arg2 };
 
-  // make sure to have a proper terminating character if reaching the limit
+  // Make sure to have a proper terminating character if reaching the limit.
   cmd[COMMAND_SIZE] = 0;
   arg1[ARG_SIZE] = 0;
   arg2[ARG_SIZE] = 0;
@@ -358,10 +358,10 @@
     if (last_pc != sim_->get_pc()) {
       disasm::NameConverter converter;
       disasm::Disassembler dasm(converter);
-      // use a reasonably large buffer
+      // Use a reasonably large buffer.
       v8::internal::EmbeddedVector<char, 256> buffer;
       dasm.InstructionDecode(buffer,
-                             reinterpret_cast<byte_*>(sim_->get_pc()));
+                             reinterpret_cast<byte*>(sim_->get_pc()));
       PrintF("  0x%08x  %s\n", sim_->get_pc(), buffer.start());
       last_pc = sim_->get_pc();
     }
@@ -475,7 +475,7 @@
 
         if (strcmp(cmd, "stack") == 0) {
           cur = reinterpret_cast<int32_t*>(sim_->get_register(Simulator::sp));
-        } else {  // "mem"
+        } else {  // Command "mem".
           int32_t value;
           if (!GetValue(arg1, &value)) {
             PrintF("%s unrecognized\n", arg1);
@@ -496,35 +496,62 @@
         end = cur + words;
 
         while (cur < end) {
-          PrintF("  0x%08x:  0x%08x %10d\n",
+          PrintF("  0x%08x:  0x%08x %10d",
                  reinterpret_cast<intptr_t>(cur), *cur, *cur);
+          HeapObject* obj = reinterpret_cast<HeapObject*>(*cur);
+          int value = *cur;
+          Heap* current_heap = v8::internal::Isolate::Current()->heap();
+          if (current_heap->Contains(obj) || ((value & 1) == 0)) {
+            PrintF(" (");
+            if ((value & 1) == 0) {
+              PrintF("smi %d", value / 2);
+            } else {
+              obj->ShortPrint();
+            }
+            PrintF(")");
+          }
+          PrintF("\n");
           cur++;
         }
 
-      } else if ((strcmp(cmd, "disasm") == 0) || (strcmp(cmd, "dpc") == 0)) {
+      } else if ((strcmp(cmd, "disasm") == 0) ||
+                 (strcmp(cmd, "dpc") == 0) ||
+                 (strcmp(cmd, "di") == 0)) {
         disasm::NameConverter converter;
         disasm::Disassembler dasm(converter);
-        // use a reasonably large buffer
+        // Use a reasonably large buffer.
         v8::internal::EmbeddedVector<char, 256> buffer;
 
-        byte_* cur = NULL;
-        byte_* end = NULL;
+        byte* cur = NULL;
+        byte* end = NULL;
 
         if (argc == 1) {
-          cur = reinterpret_cast<byte_*>(sim_->get_pc());
+          cur = reinterpret_cast<byte*>(sim_->get_pc());
           end = cur + (10 * Instruction::kInstrSize);
         } else if (argc == 2) {
-          int32_t value;
-          if (GetValue(arg1, &value)) {
-            cur = reinterpret_cast<byte_*>(value);
-            // no length parameter passed, assume 10 instructions
-            end = cur + (10 * Instruction::kInstrSize);
+          int regnum = Registers::Number(arg1);
+          if (regnum != kInvalidRegister || strncmp(arg1, "0x", 2) == 0) {
+            // The argument is an address or a register name.
+            int32_t value;
+            if (GetValue(arg1, &value)) {
+              cur = reinterpret_cast<byte*>(value);
+              // Disassemble 10 instructions at <arg1>.
+              end = cur + (10 * Instruction::kInstrSize);
+            }
+          } else {
+            // The argument is the number of instructions.
+            int32_t value;
+            if (GetValue(arg1, &value)) {
+              cur = reinterpret_cast<byte*>(sim_->get_pc());
+              // Disassemble <arg1> instructions.
+              end = cur + (value * Instruction::kInstrSize);
+            }
           }
         } else {
           int32_t value1;
           int32_t value2;
           if (GetValue(arg1, &value1) && GetValue(arg2, &value2)) {
-            cur = reinterpret_cast<byte_*>(value1);
+            cur = reinterpret_cast<byte*>(value1);
             end = cur + (value2 * Instruction::kInstrSize);
           }
         }
@@ -561,25 +588,25 @@
       } else if (strcmp(cmd, "unstop") == 0) {
           PrintF("Unstop command not implemented on MIPS.");
       } else if ((strcmp(cmd, "stat") == 0) || (strcmp(cmd, "st") == 0)) {
-        // Print registers and disassemble
+        // Print registers and disassemble.
         PrintAllRegs();
         PrintF("\n");
 
         disasm::NameConverter converter;
         disasm::Disassembler dasm(converter);
-        // use a reasonably large buffer
+        // Use a reasonably large buffer.
         v8::internal::EmbeddedVector<char, 256> buffer;
 
-        byte_* cur = NULL;
-        byte_* end = NULL;
+        byte* cur = NULL;
+        byte* end = NULL;
 
         if (argc == 1) {
-          cur = reinterpret_cast<byte_*>(sim_->get_pc());
+          cur = reinterpret_cast<byte*>(sim_->get_pc());
           end = cur + (10 * Instruction::kInstrSize);
         } else if (argc == 2) {
           int32_t value;
           if (GetValue(arg1, &value)) {
-            cur = reinterpret_cast<byte_*>(value);
+            cur = reinterpret_cast<byte*>(value);
             // no length parameter passed, assume 10 instructions
             end = cur + (10 * Instruction::kInstrSize);
           }
@@ -587,7 +614,7 @@
           int32_t value1;
           int32_t value2;
           if (GetValue(arg1, &value1) && GetValue(arg2, &value2)) {
-            cur = reinterpret_cast<byte_*>(value1);
+            cur = reinterpret_cast<byte*>(value1);
             end = cur + (value2 * Instruction::kInstrSize);
           }
         }
@@ -615,8 +642,10 @@
         PrintF("flags\n");
         PrintF("  print flags\n");
         PrintF("disasm [<instructions>]\n");
-        PrintF("disasm [[<address>] <instructions>]\n");
-        PrintF("  disassemble code, default is 10 instructions from pc\n");
+        PrintF("disasm [<address/register>]\n");
+        PrintF("disasm [[<address/register>] <instructions>]\n");
+        PrintF("  disassemble code, default is 10 instructions\n");
+        PrintF("  from pc (alias 'di')\n");
         PrintF("gdb\n");
         PrintF("  enter gdb\n");
         PrintF("break <address>\n");
@@ -689,8 +718,8 @@
 
 CachePage* Simulator::GetCachePage(v8::internal::HashMap* i_cache, void* page) {
   v8::internal::HashMap::Entry* entry = i_cache->Lookup(page,
-                                                         ICacheHash(page),
-                                                         true);
+                                                        ICacheHash(page),
+                                                        true);
   if (entry->value == NULL) {
     CachePage* new_page = new CachePage();
     entry->value = new_page;
@@ -738,23 +767,23 @@
 }
 
 
-void Simulator::Initialize() {
-  if (Isolate::Current()->simulator_initialized()) return;
-  Isolate::Current()->set_simulator_initialized(true);
-  ::v8::internal::ExternalReference::set_redirector(&RedirectExternalReference);
+void Simulator::Initialize(Isolate* isolate) {
+  if (isolate->simulator_initialized()) return;
+  isolate->set_simulator_initialized(true);
+  ::v8::internal::ExternalReference::set_redirector(isolate,
+                                                    &RedirectExternalReference);
 }
 
 
-Simulator::Simulator() : isolate_(Isolate::Current()) {
+Simulator::Simulator(Isolate* isolate) : isolate_(isolate) {
   i_cache_ = isolate_->simulator_i_cache();
   if (i_cache_ == NULL) {
     i_cache_ = new v8::internal::HashMap(&ICacheMatch);
     isolate_->set_simulator_i_cache(i_cache_);
   }
-  Initialize();
+  Initialize(isolate);
   // Setup simulator support first. Some of this information is needed to
   // setup the architecture state.
-  stack_size_ = 1 * 1024*1024;  // allocate 1MB for stack
   stack_ = reinterpret_cast<char*>(malloc(stack_size_));
   pc_modified_ = false;
   icount_ = 0;
@@ -852,17 +881,14 @@
 // Get the active Simulator for the current thread.
 Simulator* Simulator::current(Isolate* isolate) {
   v8::internal::Isolate::PerIsolateThreadData* isolate_data =
-      Isolate::CurrentPerIsolateThreadData();
-  if (isolate_data == NULL) {
-    Isolate::EnterDefaultIsolate();
-    isolate_data = Isolate::CurrentPerIsolateThreadData();
-  }
+       isolate->FindOrAllocatePerThreadDataForThisThread();
+  ASSERT(isolate_data != NULL);
   ASSERT(isolate_data != NULL);
 
   Simulator* sim = isolate_data->simulator();
   if (sim == NULL) {
     // TODO(146): delete the simulator object when a thread/isolate goes away.
-    sim = new Simulator();
+    sim = new Simulator(isolate);
     isolate_data->set_simulator(sim);
   }
   return sim;
@@ -877,7 +903,7 @@
     pc_modified_ = true;
   }
 
-  // zero register always hold 0.
+  // Zero register always holds 0.
   registers_[reg] = (reg == 0) ? 0 : value;
 }
 
@@ -937,6 +963,87 @@
 }
 
 
+// For use in calls that take two double values, constructed either
+// from a0-a3 or f12 and f14.
+void Simulator::GetFpArgs(double* x, double* y) {
+  if (!IsMipsSoftFloatABI) {
+    *x = get_fpu_register_double(12);
+    *y = get_fpu_register_double(14);
+  } else {
+    // We use a char buffer to get around the strict-aliasing rules which
+    // otherwise allow the compiler to optimize away the copy.
+    char buffer[sizeof(*x)];
+    int32_t* reg_buffer = reinterpret_cast<int32_t*>(buffer);
+
+    // Registers a0 and a1 -> x.
+    reg_buffer[0] = get_register(a0);
+    reg_buffer[1] = get_register(a1);
+    memcpy(x, buffer, sizeof(buffer));
+
+    // Registers a2 and a3 -> y.
+    reg_buffer[0] = get_register(a2);
+    reg_buffer[1] = get_register(a3);
+    memcpy(y, buffer, sizeof(buffer));
+  }
+}
+
+
+// For use in calls that take one double value, constructed either
+// from a0 and a1 or f12.
+void Simulator::GetFpArgs(double* x) {
+  if (!IsMipsSoftFloatABI) {
+    *x = get_fpu_register_double(12);
+  } else {
+    // We use a char buffer to get around the strict-aliasing rules which
+    // otherwise allow the compiler to optimize away the copy.
+    char buffer[sizeof(*x)];
+    int32_t* reg_buffer = reinterpret_cast<int32_t*>(buffer);
+    // Registers a0 and a1 -> x.
+    reg_buffer[0] = get_register(a0);
+    reg_buffer[1] = get_register(a1);
+    memcpy(x, buffer, sizeof(buffer));
+  }
+}
+
+
+// For use in calls that take one double value constructed either
+// from a0 and a1 or f12 and one integer value.
+void Simulator::GetFpArgs(double* x, int32_t* y) {
+  if (!IsMipsSoftFloatABI) {
+    *x = get_fpu_register_double(12);
+    *y = get_register(a2);
+  } else {
+    // We use a char buffer to get around the strict-aliasing rules which
+    // otherwise allow the compiler to optimize away the copy.
+    char buffer[sizeof(*x)];
+    int32_t* reg_buffer = reinterpret_cast<int32_t*>(buffer);
+    // Registers 0 and 1 -> x.
+    reg_buffer[0] = get_register(a0);
+    reg_buffer[1] = get_register(a1);
+    memcpy(x, buffer, sizeof(buffer));
+
+    // Register 2 -> y.
+    reg_buffer[0] = get_register(a2);
+    memcpy(y, buffer, sizeof(*y));
+  }
+}
+
+
+// The return value is either in v0/v1 or f0.
+void Simulator::SetFpResult(const double& result) {
+  if (!IsMipsSoftFloatABI) {
+    set_fpu_register_double(0, result);
+  } else {
+    char buffer[2 * sizeof(registers_[0])];
+    int32_t* reg_buffer = reinterpret_cast<int32_t*>(buffer);
+    memcpy(buffer, &result, sizeof(buffer));
+    // Copy result to v0 and v1.
+    set_register(v0, reg_buffer[0]);
+    set_register(v1, reg_buffer[1]);
+  }
+}
+
+
 // Helper functions for setting and testing the FCSR register's bits.
 void Simulator::set_fcsr_bit(uint32_t cc, bool value) {
   if (value) {
@@ -995,7 +1102,7 @@
 
 int Simulator::ReadW(int32_t addr, Instruction* instr) {
   if (addr >=0 && addr < 0x400) {
-    // this has to be a NULL-dereference
+    // This has to be a NULL-dereference, drop into debugger.
     MipsDebugger dbg(this);
     dbg.Debug();
   }
@@ -1003,8 +1110,9 @@
     intptr_t* ptr = reinterpret_cast<intptr_t*>(addr);
     return *ptr;
   }
-  PrintF("Unaligned read at 0x%08x, pc=%p\n", addr,
-      reinterpret_cast<void*>(instr));
+  PrintF("Unaligned read at 0x%08x, pc=0x%08" V8PRIxPTR "\n",
+         addr,
+         reinterpret_cast<intptr_t>(instr));
   MipsDebugger dbg(this);
   dbg.Debug();
   return 0;
@@ -1013,7 +1121,7 @@
 
 void Simulator::WriteW(int32_t addr, int value, Instruction* instr) {
   if (addr >= 0 && addr < 0x400) {
-    // this has to be a NULL-dereference
+    // This has to be a NULL-dereference, drop into debugger.
     MipsDebugger dbg(this);
     dbg.Debug();
   }
@@ -1022,8 +1130,9 @@
     *ptr = value;
     return;
   }
-  PrintF("Unaligned write at 0x%08x, pc=%p\n", addr,
-      reinterpret_cast<void*>(instr));
+  PrintF("Unaligned write at 0x%08x, pc=0x%08" V8PRIxPTR "\n",
+         addr,
+         reinterpret_cast<intptr_t>(instr));
   MipsDebugger dbg(this);
   dbg.Debug();
 }
@@ -1034,8 +1143,9 @@
     double* ptr = reinterpret_cast<double*>(addr);
     return *ptr;
   }
-  PrintF("Unaligned (double) read at 0x%08x, pc=%p\n", addr,
-      reinterpret_cast<void*>(instr));
+  PrintF("Unaligned (double) read at 0x%08x, pc=0x%08" V8PRIxPTR "\n",
+         addr,
+         reinterpret_cast<intptr_t>(instr));
   OS::Abort();
   return 0;
 }
@@ -1047,8 +1157,9 @@
     *ptr = value;
     return;
   }
-  PrintF("Unaligned (double) write at 0x%08x, pc=%p\n", addr,
-      reinterpret_cast<void*>(instr));
+  PrintF("Unaligned (double) write at 0x%08x, pc=0x%08" V8PRIxPTR "\n",
+         addr,
+         reinterpret_cast<intptr_t>(instr));
   OS::Abort();
 }
 
@@ -1058,8 +1169,9 @@
     uint16_t* ptr = reinterpret_cast<uint16_t*>(addr);
     return *ptr;
   }
-  PrintF("Unaligned unsigned halfword read at 0x%08x, pc=%p\n", addr,
-      reinterpret_cast<void*>(instr));
+  PrintF("Unaligned unsigned halfword read at 0x%08x, pc=0x%08" V8PRIxPTR "\n",
+         addr,
+         reinterpret_cast<intptr_t>(instr));
   OS::Abort();
   return 0;
 }
@@ -1070,8 +1182,9 @@
     int16_t* ptr = reinterpret_cast<int16_t*>(addr);
     return *ptr;
   }
-  PrintF("Unaligned signed halfword read at 0x%08x, pc=%p\n", addr,
-      reinterpret_cast<void*>(instr));
+  PrintF("Unaligned signed halfword read at 0x%08x, pc=0x%08" V8PRIxPTR "\n",
+         addr,
+         reinterpret_cast<intptr_t>(instr));
   OS::Abort();
   return 0;
 }
@@ -1083,8 +1196,9 @@
     *ptr = value;
     return;
   }
-  PrintF("Unaligned unsigned halfword write at 0x%08x, pc=%p\n", addr,
-      reinterpret_cast<void*>(instr));
+  PrintF("Unaligned unsigned halfword write at 0x%08x, pc=0x%08" V8PRIxPTR "\n",
+         addr,
+         reinterpret_cast<intptr_t>(instr));
   OS::Abort();
 }
 
@@ -1095,8 +1209,9 @@
     *ptr = value;
     return;
   }
-  PrintF("Unaligned halfword write at 0x%08x, pc=%p\n", addr,
-      reinterpret_cast<void*>(instr));
+  PrintF("Unaligned halfword write at 0x%08x, pc=0x%08" V8PRIxPTR "\n",
+         addr,
+         reinterpret_cast<intptr_t>(instr));
   OS::Abort();
 }
 
@@ -1158,6 +1273,14 @@
                                          int32_t arg2,
                                          int32_t arg3);
 
+// This signature supports direct call in to API function native callback
+// (refer to InvocationCallback in v8.h).
+typedef v8::Handle<v8::Value> (*SimulatorRuntimeDirectApiCall)(int32_t arg0);
+
+// This signature supports direct call to accessor getter callback.
+typedef v8::Handle<v8::Value> (*SimulatorRuntimeDirectGetterCall)(int32_t arg0,
+                                                                  int32_t arg1);
+
 // Software interrupt instructions are used by the simulator to call into the
 // C-based V8 runtime. They are also used for debugging with simulator.
 void Simulator::SoftwareInterrupt(Instruction* instr) {
@@ -1169,11 +1292,6 @@
 
   // We first check if we met a call_rt_redirected.
   if (instr->InstructionBits() == rtCallRedirInstr) {
-    // Check if stack is aligned. Error if not aligned is reported below to
-    // include information on the function called.
-    bool stack_aligned =
-        (get_register(sp)
-         & (::v8::internal::FLAG_sim_stack_alignment - 1)) == 0;
     Redirection* redirection = Redirection::FromSwiInstruction(instr);
     int32_t arg0 = get_register(a0);
     int32_t arg1 = get_register(a1);
@@ -1188,58 +1306,122 @@
     // stack check here.
     int32_t* stack_pointer = reinterpret_cast<int32_t*>(get_register(sp));
     int32_t* stack = reinterpret_cast<int32_t*>(stack_);
-    if (stack_pointer >= stack && stack_pointer < stack + stack_size_) {
-      arg4 = stack_pointer[0];
-      arg5 = stack_pointer[1];
+    if (stack_pointer >= stack && stack_pointer < stack + stack_size_ - 5) {
+      // Args 4 and 5 are on the stack after the reserved space for args 0..3.
+      arg4 = stack_pointer[4];
+      arg5 = stack_pointer[5];
     }
+
+    bool fp_call =
+         (redirection->type() == ExternalReference::BUILTIN_FP_FP_CALL) ||
+         (redirection->type() == ExternalReference::BUILTIN_COMPARE_CALL) ||
+         (redirection->type() == ExternalReference::BUILTIN_FP_CALL) ||
+         (redirection->type() == ExternalReference::BUILTIN_FP_INT_CALL);
+
+    if (!IsMipsSoftFloatABI) {
+      // With the hard floating point calling convention, double
+      // arguments are passed in FPU registers. Fetch the arguments
+      // from there and call the builtin using soft floating point
+      // convention.
+      switch (redirection->type()) {
+      case ExternalReference::BUILTIN_FP_FP_CALL:
+      case ExternalReference::BUILTIN_COMPARE_CALL:
+        arg0 = get_fpu_register(f12);
+        arg1 = get_fpu_register(f13);
+        arg2 = get_fpu_register(f14);
+        arg3 = get_fpu_register(f15);
+        break;
+      case ExternalReference::BUILTIN_FP_CALL:
+        arg0 = get_fpu_register(f12);
+        arg1 = get_fpu_register(f13);
+        break;
+      case ExternalReference::BUILTIN_FP_INT_CALL:
+        arg0 = get_fpu_register(f12);
+        arg1 = get_fpu_register(f13);
+        arg2 = get_register(a2);
+        break;
+      default:
+        break;
+      }
+    }
+
     // This is dodgy but it works because the C entry stubs are never moved.
     // See comment in codegen-arm.cc and bug 1242173.
     int32_t saved_ra = get_register(ra);
 
     intptr_t external =
-        reinterpret_cast<int32_t>(redirection->external_function());
+          reinterpret_cast<intptr_t>(redirection->external_function());
 
     // Based on CpuFeatures::IsSupported(FPU), Mips will use either hardware
     // FPU, or gcc soft-float routines. Hardware FPU is simulated in this
     // simulator. Soft-float has additional abstraction of ExternalReference,
-    // to support serialization. Finally, when simulated on x86 host, the
-    // x86 softfloat routines are used, and this Redirection infrastructure
-    // lets simulated-mips make calls into x86 C code.
-    // When doing that, the 'double' return type must be handled differently
-    // than the usual int64_t return. The data is returned in different
-    // registers and cannot be cast from one type to the other. However, the
-    // calling arguments are passed the same way in both cases.
-    if (redirection->type() == ExternalReference::FP_RETURN_CALL) {
+    // to support serialization.
+    if (fp_call) {
       SimulatorRuntimeFPCall target =
-          reinterpret_cast<SimulatorRuntimeFPCall>(external);
-      if (::v8::internal::FLAG_trace_sim || !stack_aligned) {
-        PrintF("Call to host function at %p with args %08x:%08x %08x:%08x",
-               FUNCTION_ADDR(target), arg0, arg1, arg2, arg3);
-        if (!stack_aligned) {
-          PrintF(" with unaligned stack %08x\n", get_register(sp));
+                  reinterpret_cast<SimulatorRuntimeFPCall>(external);
+      if (::v8::internal::FLAG_trace_sim) {
+        double dval0, dval1;
+        int32_t ival;
+        switch (redirection->type()) {
+          case ExternalReference::BUILTIN_FP_FP_CALL:
+          case ExternalReference::BUILTIN_COMPARE_CALL:
+            GetFpArgs(&dval0, &dval1);
+            PrintF("Call to host function at %p with args %f, %f",
+                FUNCTION_ADDR(target), dval0, dval1);
+            break;
+          case ExternalReference::BUILTIN_FP_CALL:
+            GetFpArgs(&dval0);
+            PrintF("Call to host function at %p with arg %f",
+                FUNCTION_ADDR(target), dval1);
+            break;
+          case ExternalReference::BUILTIN_FP_INT_CALL:
+            GetFpArgs(&dval0, &ival);
+            PrintF("Call to host function at %p with args %f, %d",
+                FUNCTION_ADDR(target), dval0, ival);
+            break;
+          default:
+            UNREACHABLE();
+            break;
         }
-        PrintF("\n");
       }
       double result = target(arg0, arg1, arg2, arg3);
-      // fp result -> registers v0 and v1.
-      int32_t gpreg_pair[2];
-      memcpy(&gpreg_pair[0], &result, 2 * sizeof(int32_t));
-      set_register(v0, gpreg_pair[0]);
-      set_register(v1, gpreg_pair[1]);
+      if (redirection->type() != ExternalReference::BUILTIN_COMPARE_CALL) {
+          SetFpResult(result);
+      } else {
+        int32_t gpreg_pair[2];
+        memcpy(&gpreg_pair[0], &result, 2 * sizeof(int32_t));
+        set_register(v0, gpreg_pair[0]);
+        set_register(v1, gpreg_pair[1]);
+      }
     } else if (redirection->type() == ExternalReference::DIRECT_API_CALL) {
-      PrintF("Mips does not yet support ExternalReference::DIRECT_API_CALL\n");
-      ASSERT(redirection->type() != ExternalReference::DIRECT_API_CALL);
+      // See DirectCEntryStub::GenerateCall for explanation of register usage.
+      SimulatorRuntimeDirectApiCall target =
+                  reinterpret_cast<SimulatorRuntimeDirectApiCall>(external);
+      if (::v8::internal::FLAG_trace_sim) {
+        PrintF("Call to host function at %p args %08x\n",
+               FUNCTION_ADDR(target), arg1);
+      }
+      v8::Handle<v8::Value> result = target(arg1);
+      *(reinterpret_cast<int*>(arg0)) = (int32_t) *result;
+      set_register(v0, arg0);
     } else if (redirection->type() == ExternalReference::DIRECT_GETTER_CALL) {
-      PrintF("Mips does not support ExternalReference::DIRECT_GETTER_CALL\n");
-      ASSERT(redirection->type() != ExternalReference::DIRECT_GETTER_CALL);
+      // See DirectCEntryStub::GenerateCall for explanation of register usage.
+      SimulatorRuntimeDirectGetterCall target =
+                  reinterpret_cast<SimulatorRuntimeDirectGetterCall>(external);
+      if (::v8::internal::FLAG_trace_sim) {
+        PrintF("Call to host function at %p args %08x %08x\n",
+               FUNCTION_ADDR(target), arg1, arg2);
+      }
+      v8::Handle<v8::Value> result = target(arg1, arg2);
+      *(reinterpret_cast<int*>(arg0)) = (int32_t) *result;
+      set_register(v0, arg0);
     } else {
-      // Builtin call.
-      ASSERT(redirection->type() == ExternalReference::BUILTIN_CALL);
       SimulatorRuntimeCall target =
-          reinterpret_cast<SimulatorRuntimeCall>(external);
-      if (::v8::internal::FLAG_trace_sim || !stack_aligned) {
+                  reinterpret_cast<SimulatorRuntimeCall>(external);
+      if (::v8::internal::FLAG_trace_sim) {
         PrintF(
-            "Call to host function at %p: %08x, %08x, %08x, %08x, %08x, %08x",
+            "Call to host function at %p "
+            "args %08x, %08x, %08x, %08x, %08x, %08x\n",
             FUNCTION_ADDR(target),
             arg0,
             arg1,
@@ -1247,12 +1429,7 @@
             arg3,
             arg4,
             arg5);
-        if (!stack_aligned) {
-          PrintF(" with unaligned stack %08x\n", get_register(sp));
-        }
-        PrintF("\n");
       }
-
       int64_t result = target(arg0, arg1, arg2, arg3, arg4, arg5);
       set_register(v0, static_cast<int32_t>(result));
       set_register(v1, static_cast<int32_t>(result >> 32));
@@ -1263,8 +1440,8 @@
     set_register(ra, saved_ra);
     set_pc(get_register(ra));
 
-  } else if (func == BREAK && code >= 0 && code < 16) {
-    // First 16 break_ codes interpreted as debug markers.
+  } else if (func == BREAK && code >= 0 && code < 32) {
+    // First 32 break_ codes interpreted as debug-markers/watchpoints.
     MipsDebugger dbg(this);
     ++break_count_;
     PrintF("\n---- break %d marker: %3d  (instr count: %8d) ----------"
@@ -1314,9 +1491,9 @@
   const int32_t  fs_reg = instr->FsValue();
 
 
-  // ---------- Configuration
+  // ---------- Configuration.
   switch (op) {
-    case COP1:    // Coprocessor instructions
+    case COP1:    // Coprocessor instructions.
       switch (instr->RsFieldRaw()) {
         case BC1:   // Handled in DecodeTypeImmed, should never come here.
           UNREACHABLE();
@@ -1365,7 +1542,7 @@
           } else {
             // Logical right-rotate of a word by a fixed number of bits. This
             // is special case of SRL instruction, added in MIPS32 Release 2.
-            // RS field is equal to 00001
+            // RS field is equal to 00001.
             alu_out = (rt_u >> sa) | (rt_u << (32 - sa));
           }
           break;
@@ -1383,7 +1560,7 @@
           } else {
             // Logical right-rotate of a word by a variable number of bits.
             // This is special case od SRLV instruction, added in MIPS32
-            // Release 2. SA field is equal to 00001
+            // Release 2. SA field is equal to 00001.
             alu_out = (rt_u >> rs_u) | (rt_u << (32 - rs_u));
           }
           break;
@@ -1402,10 +1579,6 @@
         case MULTU:
           u64hilo = static_cast<uint64_t>(rs_u) * static_cast<uint64_t>(rt_u);
           break;
-        case DIV:
-        case DIVU:
-            exceptions[kDivideByZero] = rt == 0;
-          break;
         case ADD:
           if (HaveSameSign(rs, rt)) {
             if (rs > 0) {
@@ -1450,7 +1623,7 @@
         case SLTU:
           alu_out = rs_u < rt_u ? 1 : 0;
           break;
-        // Break and trap instructions
+        // Break and trap instructions.
         case BREAK:
 
           do_interrupt = true;
@@ -1478,6 +1651,10 @@
         case MOVCI:
           // No action taken on decode.
           break;
+        case DIV:
+        case DIVU:
+          // div and divu never raise exceptions.
+          break;
         default:
           UNREACHABLE();
       };
@@ -1497,7 +1674,7 @@
     case SPECIAL3:
       switch (instr->FunctionFieldRaw()) {
         case INS: {   // Mips32r2 instruction.
-          // Interpret Rd field as 5-bit msb of insert.
+          // Interpret rd field as 5-bit msb of insert.
           uint16_t msb = rd_reg;
           // Interpret sa field as 5-bit lsb of insert.
           uint16_t lsb = sa;
@@ -1507,7 +1684,7 @@
           break;
         }
         case EXT: {   // Mips32r2 instruction.
-          // Interpret Rd field as 5-bit msb of extract.
+          // Interpret rd field as 5-bit msb of extract.
           uint16_t msb = rd_reg;
           // Interpret sa field as 5-bit lsb of extract.
           uint16_t lsb = sa;
@@ -1543,7 +1720,7 @@
   int64_t  i64hilo = 0;
   uint64_t u64hilo = 0;
 
-  // ALU output
+  // ALU output.
   // It should not be used as is. Instructions using it should always
   // initialize it first.
   int32_t alu_out = 0x12345678;
@@ -1551,7 +1728,7 @@
   // For break and trap instructions.
   bool do_interrupt = false;
 
-  // For jr and jalr
+  // For jr and jalr.
   // Get current pc.
   int32_t current_pc = get_pc();
   // Next pc
@@ -1568,11 +1745,11 @@
   // ---------- Raise exceptions triggered.
   SignalExceptions();
 
-  // ---------- Execution
+  // ---------- Execution.
   switch (op) {
     case COP1:
       switch (instr->RsFieldRaw()) {
-        case BC1:   // branch on coprocessor condition
+        case BC1:   // Branch on coprocessor condition.
           UNREACHABLE();
           break;
         case CFC1:
@@ -1802,7 +1979,7 @@
           Instruction* branch_delay_instr = reinterpret_cast<Instruction*>(
               current_pc+Instruction::kInstrSize);
           BranchDelayInstructionDecode(branch_delay_instr);
-          set_register(31, current_pc + 2* Instruction::kInstrSize);
+          set_register(31, current_pc + 2 * Instruction::kInstrSize);
           set_pc(next_pc);
           pc_modified_ = true;
           break;
@@ -1817,13 +1994,19 @@
           set_register(HI, static_cast<int32_t>(u64hilo >> 32));
           break;
         case DIV:
-          // Divide by zero was checked in the configuration step.
-          set_register(LO, rs / rt);
-          set_register(HI, rs % rt);
+          // Divide by zero was not checked in the configuration step - div and
+          // divu do not raise exceptions. On division by 0, the result will
+          // be UNPREDICTABLE.
+          if (rt != 0) {
+            set_register(LO, rs / rt);
+            set_register(HI, rs % rt);
+          }
           break;
         case DIVU:
-          set_register(LO, rs_u / rt_u);
-          set_register(HI, rs_u % rt_u);
+          if (rt_u != 0) {
+            set_register(LO, rs_u / rt_u);
+            set_register(HI, rs_u % rt_u);
+          }
           break;
         // Break and trap instructions.
         case BREAK:
@@ -1842,9 +2025,9 @@
           if (rt) set_register(rd_reg, rs);
           break;
         case MOVCI: {
-          uint32_t cc = instr->FCccValue();
+          uint32_t cc = instr->FBccValue();
           uint32_t fcsr_cc = get_fcsr_condition_bit(cc);
-          if (instr->Bit(16)) {  // Read Tf bit
+          if (instr->Bit(16)) {  // Read Tf bit.
             if (test_fcsr_bit(fcsr_cc)) set_register(rd_reg, rs);
           } else {
             if (!test_fcsr_bit(fcsr_cc)) set_register(rd_reg, rs);
@@ -1893,17 +2076,17 @@
 }
 
 
-// Type 2: instructions using a 16 bytes immediate. (eg: addi, beq)
+// Type 2: instructions using a 16 bytes immediate. (eg: addi, beq).
 void Simulator::DecodeTypeImmediate(Instruction* instr) {
   // Instruction fields.
   Opcode   op     = instr->OpcodeFieldRaw();
   int32_t  rs     = get_register(instr->RsValue());
   uint32_t rs_u   = static_cast<uint32_t>(rs);
-  int32_t  rt_reg = instr->RtValue();  // destination register
+  int32_t  rt_reg = instr->RtValue();  // Destination register.
   int32_t  rt     = get_register(rt_reg);
   int16_t  imm16  = instr->Imm16Value();
 
-  int32_t  ft_reg = instr->FtValue();  // destination register
+  int32_t  ft_reg = instr->FtValue();  // Destination register.
 
   // Zero extended immediate.
   uint32_t  oe_imm16 = 0xffff & imm16;
@@ -1927,10 +2110,10 @@
 
   // Used for memory instructions.
   int32_t addr = 0x0;
-  // Value to be written in memory
+  // Value to be written in memory.
   uint32_t mem_value = 0x0;
 
-  // ---------- Configuration (and execution for REGIMM)
+  // ---------- Configuration (and execution for REGIMM).
   switch (op) {
     // ------------- COP1. Coprocessor instructions.
     case COP1:
@@ -1941,7 +2124,7 @@
           cc_value = test_fcsr_bit(fcsr_cc);
           do_branch = (instr->FBtrueValue()) ? cc_value : !cc_value;
           execute_branch_delay_instruction = true;
-          // Set next_pc
+          // Set next_pc.
           if (do_branch) {
             next_pc = current_pc + (imm16 << 2) + Instruction::kInstrSize;
           } else {
@@ -1952,7 +2135,7 @@
           UNREACHABLE();
       };
       break;
-    // ------------- REGIMM class
+    // ------------- REGIMM class.
     case REGIMM:
       switch (instr->RtFieldRaw()) {
         case BLTZ:
@@ -1977,7 +2160,7 @@
         case BGEZAL:
           // Branch instructions common part.
           execute_branch_delay_instruction = true;
-          // Set next_pc
+          // Set next_pc.
           if (do_branch) {
             next_pc = current_pc + (imm16 << 2) + Instruction::kInstrSize;
             if (instr->IsLinkingInstruction()) {
@@ -1989,8 +2172,8 @@
         default:
           break;
         };
-    break;  // case REGIMM
-    // ------------- Branch instructions
+    break;  // case REGIMM.
+    // ------------- Branch instructions.
     // When comparing to zero, the encoding of rt field is always 0, so we don't
     // need to replace rt with zero.
     case BEQ:
@@ -2005,7 +2188,7 @@
     case BGTZ:
       do_branch = rs  > 0;
       break;
-    // ------------- Arithmetic instructions
+    // ------------- Arithmetic instructions.
     case ADDI:
       if (HaveSameSign(rs, se_imm16)) {
         if (rs > 0) {
@@ -2038,7 +2221,7 @@
     case LUI:
         alu_out = (oe_imm16 << 16);
       break;
-    // ------------- Memory instructions
+    // ------------- Memory instructions.
     case LB:
       addr = rs + se_imm16;
       alu_out = ReadB(addr);
@@ -2048,7 +2231,7 @@
       alu_out = ReadH(addr, instr);
       break;
     case LWL: {
-      // al_offset is an offset of the effective address within an aligned word
+      // al_offset is offset of the effective address within an aligned word.
       uint8_t al_offset = (rs + se_imm16) & kPointerAlignmentMask;
       uint8_t byte_shift = kPointerAlignmentMask - al_offset;
       uint32_t mask = (1 << byte_shift * 8) - 1;
@@ -2071,7 +2254,7 @@
       alu_out = ReadHU(addr, instr);
       break;
     case LWR: {
-      // al_offset is an offset of the effective address within an aligned word
+      // al_offset is offset of the effective address within an aligned word.
       uint8_t al_offset = (rs + se_imm16) & kPointerAlignmentMask;
       uint8_t byte_shift = kPointerAlignmentMask - al_offset;
       uint32_t mask = al_offset ? (~0 << (byte_shift + 1) * 8) : 0;
@@ -2126,16 +2309,16 @@
   // ---------- Raise exceptions triggered.
   SignalExceptions();
 
-  // ---------- Execution
+  // ---------- Execution.
   switch (op) {
-    // ------------- Branch instructions
+    // ------------- Branch instructions.
     case BEQ:
     case BNE:
     case BLEZ:
     case BGTZ:
       // Branch instructions common part.
       execute_branch_delay_instruction = true;
-      // Set next_pc
+      // Set next_pc.
       if (do_branch) {
         next_pc = current_pc + (imm16 << 2) + Instruction::kInstrSize;
         if (instr->IsLinkingInstruction()) {
@@ -2145,7 +2328,7 @@
         next_pc = current_pc + 2 * Instruction::kInstrSize;
       }
       break;
-    // ------------- Arithmetic instructions
+    // ------------- Arithmetic instructions.
     case ADDI:
     case ADDIU:
     case SLTI:
@@ -2156,7 +2339,7 @@
     case LUI:
       set_register(rt_reg, alu_out);
       break;
-    // ------------- Memory instructions
+    // ------------- Memory instructions.
     case LB:
     case LH:
     case LWL:
@@ -2216,26 +2399,26 @@
 }
 
 
-// Type 3: instructions using a 26 bytes immediate. (eg: j, jal)
+// Type 3: instructions using a 26 bytes immediate. (eg: j, jal).
 void Simulator::DecodeTypeJump(Instruction* instr) {
   // Get current pc.
   int32_t current_pc = get_pc();
   // Get unchanged bits of pc.
   int32_t pc_high_bits = current_pc & 0xf0000000;
-  // Next pc
+  // Next pc.
   int32_t next_pc = pc_high_bits | (instr->Imm26Value() << 2);
 
-  // Execute branch delay slot
+  // Execute branch delay slot.
   // We don't check for end_sim_pc. First it should not be met as the current pc
   // is valid. Secondly a jump should always execute its branch delay slot.
   Instruction* branch_delay_instr =
-    reinterpret_cast<Instruction*>(current_pc+Instruction::kInstrSize);
+      reinterpret_cast<Instruction*>(current_pc + Instruction::kInstrSize);
   BranchDelayInstructionDecode(branch_delay_instr);
 
   // Update pc and ra if necessary.
   // Do this after the branch delay execution.
   if (instr->IsLinkingInstruction()) {
-    set_register(31, current_pc + 2* Instruction::kInstrSize);
+    set_register(31, current_pc + 2 * Instruction::kInstrSize);
   }
   set_pc(next_pc);
   pc_modified_ = true;
@@ -2251,11 +2434,11 @@
   if (::v8::internal::FLAG_trace_sim) {
     disasm::NameConverter converter;
     disasm::Disassembler dasm(converter);
-    // use a reasonably large buffer
+    // Use a reasonably large buffer.
     v8::internal::EmbeddedVector<char, 256> buffer;
-    dasm.InstructionDecode(buffer, reinterpret_cast<byte_*>(instr));
+    dasm.InstructionDecode(buffer, reinterpret_cast<byte*>(instr));
     PrintF("  0x%08x  %s\n", reinterpret_cast<intptr_t>(instr),
-           buffer.start());
+        buffer.start());
   }
 
   switch (instr->InstructionType()) {
@@ -2310,10 +2493,10 @@
 }
 
 
-int32_t Simulator::Call(byte_* entry, int argument_count, ...) {
+int32_t Simulator::Call(byte* entry, int argument_count, ...) {
   va_list parameters;
   va_start(parameters, argument_count);
-  // Setup arguments
+  // Setup arguments.
 
   // First four arguments passed in registers.
   ASSERT(argument_count >= 4);
@@ -2338,7 +2521,7 @@
   va_end(parameters);
   set_register(sp, entry_stack);
 
-  // Prepare to execute the code at entry
+  // Prepare to execute the code at entry.
   set_register(pc, reinterpret_cast<int32_t>(entry));
   // Put down marker for end of simulation. The simulator will stop simulation
   // when the PC reaches this value. By saving the "end simulation" value into
@@ -2374,7 +2557,7 @@
   set_register(gp, callee_saved_value);
   set_register(fp, callee_saved_value);
 
-  // Start the simulation
+  // Start the simulation.
   Execute();
 
   // Check that the callee-saved registers have been preserved.
diff --git a/src/mips/simulator-mips.h b/src/mips/simulator-mips.h
index 0cd9bbe..21476dc 100644
--- a/src/mips/simulator-mips.h
+++ b/src/mips/simulator-mips.h
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -50,14 +50,15 @@
   entry(p0, p1, p2, p3, p4)
 
 typedef int (*mips_regexp_matcher)(String*, int, const byte*, const byte*,
-                                  void*, int*, Address, int, Isolate*);
+                                   void*, int*, Address, int, Isolate*);
+
 
 // Call the generated regexp code directly. The code at the entry address
 // should act as a function matching the type arm_regexp_matcher.
 // The fifth argument is a dummy that reserves the space used for
 // the return address added by the ExitFrame in native calls.
 #define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6, p7) \
-  (FUNCTION_CAST<mips_regexp_matcher>(entry)(                             \
+  (FUNCTION_CAST<mips_regexp_matcher>(entry)( \
       p0, p1, p2, p3, NULL, p4, p5, p6, p7))
 
 #define TRY_CATCH_FROM_ADDRESS(try_catch_address) \
@@ -68,7 +69,8 @@
 // just use the C stack limit.
 class SimulatorStack : public v8::internal::AllStatic {
  public:
-  static inline uintptr_t JsLimitFromCLimit(uintptr_t c_limit) {
+  static inline uintptr_t JsLimitFromCLimit(Isolate* isolate,
+                                            uintptr_t c_limit) {
     return c_limit;
   }
 
@@ -95,6 +97,7 @@
 // Running with a simulator.
 
 #include "hashmap.h"
+#include "assembler.h"
 
 namespace v8 {
 namespace internal {
@@ -151,7 +154,7 @@
     sp,
     s8,
     ra,
-    // LO, HI, and pc
+    // LO, HI, and pc.
     LO,
     HI,
     pc,   // pc must be the last register.
@@ -164,13 +167,13 @@
   // Generated code will always use doubles. So we will only use even registers.
   enum FPURegister {
     f0, f1, f2, f3, f4, f5, f6, f7, f8, f9, f10, f11,
-    f12, f13, f14, f15,   // f12 and f14 are arguments FPURegisters
+    f12, f13, f14, f15,   // f12 and f14 are arguments FPURegisters.
     f16, f17, f18, f19, f20, f21, f22, f23, f24, f25,
     f26, f27, f28, f29, f30, f31,
     kNumFPURegisters
   };
 
-  Simulator();
+  explicit Simulator(Isolate* isolate);
   ~Simulator();
 
   // The currently executing Simulator instance. Potentially there can be one
@@ -182,7 +185,7 @@
   // instruction.
   void set_register(int reg, int32_t value);
   int32_t get_register(int reg) const;
-  // Same for FPURegisters
+  // Same for FPURegisters.
   void set_fpu_register(int fpureg, int32_t value);
   void set_fpu_register_float(int fpureg, float value);
   void set_fpu_register_double(int fpureg, double value);
@@ -205,7 +208,7 @@
   void Execute();
 
   // Call on program start.
-  static void Initialize();
+  static void Initialize(Isolate* isolate);
 
   // V8 generally calls into generated JS code with 5 parameters and into
   // generated RegExp code with 7 parameters. This is a convenience function,
@@ -304,7 +307,6 @@
                            int size);
   static CachePage* GetCachePage(v8::internal::HashMap* i_cache, void* page);
 
-
   enum Exception {
     none,
     kIntegerOverflow,
@@ -321,9 +323,12 @@
   static void* RedirectExternalReference(void* external_function,
                                          ExternalReference::Type type);
 
-  // Used for real time calls that takes two double values as arguments and
-  // returns a double.
-  void SetFpResult(double result);
+  // For use in calls that take double value arguments.
+  void GetFpArgs(double* x, double* y);
+  void GetFpArgs(double* x);
+  void GetFpArgs(double* x, int32_t* y);
+  void SetFpResult(const double& result);
+
 
   // Architecture state.
   // Registers.
@@ -334,35 +339,36 @@
   uint32_t FCSR_;
 
   // Simulator support.
+  // Allocate 1MB for stack.
+  static const size_t stack_size_ = 1 * 1024*1024;
   char* stack_;
-  size_t stack_size_;
   bool pc_modified_;
   int icount_;
   int break_count_;
 
-  // Icache simulation
+  // Icache simulation.
   v8::internal::HashMap* i_cache_;
 
+  v8::internal::Isolate* isolate_;
+
   // Registered breakpoints.
   Instruction* break_pc_;
   Instr break_instr_;
-
-  v8::internal::Isolate* isolate_;
 };
 
 
 // When running with the simulator transition into simulated execution at this
 // point.
 #define CALL_GENERATED_CODE(entry, p0, p1, p2, p3, p4) \
-reinterpret_cast<Object*>(Simulator::current(Isolate::Current())->Call( \
+    reinterpret_cast<Object*>(Simulator::current(Isolate::Current())->Call( \
       FUNCTION_ADDR(entry), 5, p0, p1, p2, p3, p4))
 
 #define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6, p7) \
-  Simulator::current(Isolate::Current())->Call( \
-      entry, 9, p0, p1, p2, p3, NULL, p4, p5, p6, p7)
+    Simulator::current(Isolate::Current())->Call( \
+        entry, 9, p0, p1, p2, p3, NULL, p4, p5, p6, p7)
 
-#define TRY_CATCH_FROM_ADDRESS(try_catch_address) \
-  try_catch_address == NULL ? \
+#define TRY_CATCH_FROM_ADDRESS(try_catch_address)                              \
+  try_catch_address == NULL ?                                                  \
       NULL : *(reinterpret_cast<TryCatch**>(try_catch_address))
 
 
@@ -373,8 +379,9 @@
 // trouble down the line.
 class SimulatorStack : public v8::internal::AllStatic {
  public:
-  static inline uintptr_t JsLimitFromCLimit(uintptr_t c_limit) {
-    return Simulator::current(Isolate::Current())->StackLimit();
+  static inline uintptr_t JsLimitFromCLimit(Isolate* isolate,
+                                            uintptr_t c_limit) {
+    return Simulator::current(isolate)->StackLimit();
   }
 
   static inline uintptr_t RegisterCTryCatch(uintptr_t try_catch_address) {
diff --git a/src/mips/stub-cache-mips.cc b/src/mips/stub-cache-mips.cc
index 1a49558..47428a8 100644
--- a/src/mips/stub-cache-mips.cc
+++ b/src/mips/stub-cache-mips.cc
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -30,7 +30,7 @@
 #if defined(V8_TARGET_ARCH_MIPS)
 
 #include "ic-inl.h"
-#include "codegen-inl.h"
+#include "codegen.h"
 #include "stub-cache.h"
 
 namespace v8 {
@@ -39,6 +39,124 @@
 #define __ ACCESS_MASM(masm)
 
 
+static void ProbeTable(Isolate* isolate,
+                       MacroAssembler* masm,
+                       Code::Flags flags,
+                       StubCache::Table table,
+                       Register name,
+                       Register offset,
+                       Register scratch,
+                       Register scratch2) {
+  ExternalReference key_offset(isolate->stub_cache()->key_reference(table));
+  ExternalReference value_offset(isolate->stub_cache()->value_reference(table));
+
+  uint32_t key_off_addr = reinterpret_cast<uint32_t>(key_offset.address());
+  uint32_t value_off_addr = reinterpret_cast<uint32_t>(value_offset.address());
+
+  // Check the relative positions of the address fields.
+  ASSERT(value_off_addr > key_off_addr);
+  ASSERT((value_off_addr - key_off_addr) % 4 == 0);
+  ASSERT((value_off_addr - key_off_addr) < (256 * 4));
+
+  Label miss;
+  Register offsets_base_addr = scratch;
+
+  // Check that the key in the entry matches the name.
+  __ li(offsets_base_addr, Operand(key_offset));
+  __ sll(scratch2, offset, 1);
+  __ addu(scratch2, offsets_base_addr, scratch2);
+  __ lw(scratch2, MemOperand(scratch2));
+  __ Branch(&miss, ne, name, Operand(scratch2));
+
+  // Get the code entry from the cache.
+  __ Addu(offsets_base_addr, offsets_base_addr,
+         Operand(value_off_addr - key_off_addr));
+  __ sll(scratch2, offset, 1);
+  __ addu(scratch2, offsets_base_addr, scratch2);
+  __ lw(scratch2, MemOperand(scratch2));
+
+  // Check that the flags match what we're looking for.
+  __ lw(scratch2, FieldMemOperand(scratch2, Code::kFlagsOffset));
+  __ And(scratch2, scratch2, Operand(~Code::kFlagsNotUsedInLookup));
+  __ Branch(&miss, ne, scratch2, Operand(flags));
+
+  // Re-load code entry from cache.
+  __ sll(offset, offset, 1);
+  __ addu(offset, offset, offsets_base_addr);
+  __ lw(offset, MemOperand(offset));
+
+  // Jump to the first instruction in the code stub.
+  __ Addu(offset, offset, Operand(Code::kHeaderSize - kHeapObjectTag));
+  __ Jump(offset);
+
+  // Miss: fall through.
+  __ bind(&miss);
+}
+
+
+// Helper function used to check that the dictionary doesn't contain
+// the property. This function may return false negatives, so miss_label
+// must always call a backup property check that is complete.
+// This function is safe to call if the receiver has fast properties.
+// Name must be a symbol and receiver must be a heap object.
+MUST_USE_RESULT static MaybeObject* GenerateDictionaryNegativeLookup(
+    MacroAssembler* masm,
+    Label* miss_label,
+    Register receiver,
+    String* name,
+    Register scratch0,
+    Register scratch1) {
+  ASSERT(name->IsSymbol());
+  Counters* counters = masm->isolate()->counters();
+  __ IncrementCounter(counters->negative_lookups(), 1, scratch0, scratch1);
+  __ IncrementCounter(counters->negative_lookups_miss(), 1, scratch0, scratch1);
+
+  Label done;
+
+  const int kInterceptorOrAccessCheckNeededMask =
+      (1 << Map::kHasNamedInterceptor) | (1 << Map::kIsAccessCheckNeeded);
+
+  // Bail out if the receiver has a named interceptor or requires access checks.
+  Register map = scratch1;
+  __ lw(map, FieldMemOperand(receiver, HeapObject::kMapOffset));
+  __ lbu(scratch0, FieldMemOperand(map, Map::kBitFieldOffset));
+  __ And(at, scratch0, Operand(kInterceptorOrAccessCheckNeededMask));
+  __ Branch(miss_label, ne, at, Operand(zero_reg));
+
+
+  // Check that receiver is a JSObject.
+  __ lbu(scratch0, FieldMemOperand(map, Map::kInstanceTypeOffset));
+  __ Branch(miss_label, lt, scratch0, Operand(FIRST_JS_OBJECT_TYPE));
+
+  // Load properties array.
+  Register properties = scratch0;
+  __ lw(properties, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
+  // Check that the properties array is a dictionary.
+  __ lw(map, FieldMemOperand(properties, HeapObject::kMapOffset));
+  Register tmp = properties;
+  __ LoadRoot(tmp, Heap::kHashTableMapRootIndex);
+  __ Branch(miss_label, ne, map, Operand(tmp));
+
+  // Restore the temporarily used register.
+  __ lw(properties, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
+
+  MaybeObject* result = StringDictionaryLookupStub::GenerateNegativeLookup(
+      masm,
+      miss_label,
+      &done,
+      receiver,
+      properties,
+      name,
+      scratch1);
+  if (result->IsFailure()) return result;
+
+  __ bind(&done);
+  __ DecrementCounter(counters->negative_lookups_miss(), 1, scratch0, scratch1);
+
+  return result;
+}
+
+
 void StubCache::GenerateProbe(MacroAssembler* masm,
                               Code::Flags flags,
                               Register receiver,
@@ -46,20 +164,96 @@
                               Register scratch,
                               Register extra,
                               Register extra2) {
-  UNIMPLEMENTED_MIPS();
+  Isolate* isolate = masm->isolate();
+  Label miss;
+
+  // Make sure that code is valid. The shifting code relies on the
+  // entry size being 8.
+  ASSERT(sizeof(Entry) == 8);
+
+  // Make sure the flags does not name a specific type.
+  ASSERT(Code::ExtractTypeFromFlags(flags) == 0);
+
+  // Make sure that there are no register conflicts.
+  ASSERT(!scratch.is(receiver));
+  ASSERT(!scratch.is(name));
+  ASSERT(!extra.is(receiver));
+  ASSERT(!extra.is(name));
+  ASSERT(!extra.is(scratch));
+  ASSERT(!extra2.is(receiver));
+  ASSERT(!extra2.is(name));
+  ASSERT(!extra2.is(scratch));
+  ASSERT(!extra2.is(extra));
+
+  // Check scratch, extra and extra2 registers are valid.
+  ASSERT(!scratch.is(no_reg));
+  ASSERT(!extra.is(no_reg));
+  ASSERT(!extra2.is(no_reg));
+
+  // Check that the receiver isn't a smi.
+  __ JumpIfSmi(receiver, &miss, t0);
+
+  // Get the map of the receiver and compute the hash.
+  __ lw(scratch, FieldMemOperand(name, String::kHashFieldOffset));
+  __ lw(t8, FieldMemOperand(receiver, HeapObject::kMapOffset));
+  __ Addu(scratch, scratch, Operand(t8));
+  __ Xor(scratch, scratch, Operand(flags));
+  __ And(scratch,
+         scratch,
+         Operand((kPrimaryTableSize - 1) << kHeapObjectTagSize));
+
+  // Probe the primary table.
+  ProbeTable(isolate, masm, flags, kPrimary, name, scratch, extra, extra2);
+
+  // Primary miss: Compute hash for secondary probe.
+  __ Subu(scratch, scratch, Operand(name));
+  __ Addu(scratch, scratch, Operand(flags));
+  __ And(scratch,
+         scratch,
+         Operand((kSecondaryTableSize - 1) << kHeapObjectTagSize));
+
+  // Probe the secondary table.
+  ProbeTable(isolate, masm, flags, kSecondary, name, scratch, extra, extra2);
+
+  // Cache miss: Fall-through and let caller handle the miss by
+  // entering the runtime system.
+  __ bind(&miss);
 }
 
 
 void StubCompiler::GenerateLoadGlobalFunctionPrototype(MacroAssembler* masm,
                                                        int index,
                                                        Register prototype) {
-  UNIMPLEMENTED_MIPS();
+  // Load the global or builtins object from the current context.
+  __ lw(prototype, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
+  // Load the global context from the global or builtins object.
+  __ lw(prototype,
+         FieldMemOperand(prototype, GlobalObject::kGlobalContextOffset));
+  // Load the function from the global context.
+  __ lw(prototype, MemOperand(prototype, Context::SlotOffset(index)));
+  // Load the initial map.  The global functions all have initial maps.
+  __ lw(prototype,
+         FieldMemOperand(prototype, JSFunction::kPrototypeOrInitialMapOffset));
+  // Load the prototype from the initial map.
+  __ lw(prototype, FieldMemOperand(prototype, Map::kPrototypeOffset));
 }
 
 
 void StubCompiler::GenerateDirectLoadGlobalFunctionPrototype(
     MacroAssembler* masm, int index, Register prototype, Label* miss) {
-  UNIMPLEMENTED_MIPS();
+  Isolate* isolate = masm->isolate();
+  // Check we're still in the same context.
+  __ lw(prototype, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
+  ASSERT(!prototype.is(at));
+  __ li(at, isolate->global());
+  __ Branch(miss, ne, prototype, Operand(at));
+  // Get the global function with the given index.
+  JSFunction* function =
+      JSFunction::cast(isolate->global_context()->get(index));
+  // Load its initial map. The global functions all have initial maps.
+  __ li(prototype, Handle<Map>(function->initial_map()));
+  // Load the prototype from the initial map.
+  __ lw(prototype, FieldMemOperand(prototype, Map::kPrototypeOffset));
 }
 
 
@@ -69,7 +263,18 @@
 void StubCompiler::GenerateFastPropertyLoad(MacroAssembler* masm,
                                             Register dst, Register src,
                                             JSObject* holder, int index) {
-  UNIMPLEMENTED_MIPS();
+  // Adjust for the number of properties stored in the holder.
+  index -= holder->map()->inobject_properties();
+  if (index < 0) {
+    // Get the property straight out of the holder.
+    int offset = holder->map()->instance_size() + (index * kPointerSize);
+    __ lw(dst, FieldMemOperand(src, offset));
+  } else {
+    // Calculate the offset into the properties array.
+    int offset = index * kPointerSize + FixedArray::kHeaderSize;
+    __ lw(dst, FieldMemOperand(src, JSObject::kPropertiesOffset));
+    __ lw(dst, FieldMemOperand(dst, offset));
+  }
 }
 
 
@@ -77,7 +282,41 @@
                                            Register receiver,
                                            Register scratch,
                                            Label* miss_label) {
-  UNIMPLEMENTED_MIPS();
+  // Check that the receiver isn't a smi.
+  __ And(scratch, receiver, Operand(kSmiTagMask));
+  __ Branch(miss_label, eq, scratch, Operand(zero_reg));
+
+  // Check that the object is a JS array.
+  __ GetObjectType(receiver, scratch, scratch);
+  __ Branch(miss_label, ne, scratch, Operand(JS_ARRAY_TYPE));
+
+  // Load length directly from the JS array.
+  __ lw(v0, FieldMemOperand(receiver, JSArray::kLengthOffset));
+  __ Ret();
+}
+
+
+// Generate code to check if an object is a string.  If the object is a
+// heap object, its map's instance type is left in the scratch1 register.
+// If this is not needed, scratch1 and scratch2 may be the same register.
+static void GenerateStringCheck(MacroAssembler* masm,
+                                Register receiver,
+                                Register scratch1,
+                                Register scratch2,
+                                Label* smi,
+                                Label* non_string_object) {
+  // Check that the receiver isn't a smi.
+  __ JumpIfSmi(receiver, smi, t0);
+
+  // Check that the object is a string.
+  __ lw(scratch1, FieldMemOperand(receiver, HeapObject::kMapOffset));
+  __ lbu(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset));
+  __ And(scratch2, scratch1, Operand(kIsNotStringMask));
+  // The cast is to resolve the overload for the argument of 0x0.
+  __ Branch(non_string_object,
+            ne,
+            scratch2,
+            Operand(static_cast<int32_t>(kStringTag)));
 }
 
 
@@ -91,7 +330,28 @@
                                             Register scratch2,
                                             Label* miss,
                                             bool support_wrappers) {
-  UNIMPLEMENTED_MIPS();
+  Label check_wrapper;
+
+  // Check if the object is a string leaving the instance type in the
+  // scratch1 register.
+  GenerateStringCheck(masm, receiver, scratch1, scratch2, miss,
+                      support_wrappers ? &check_wrapper : miss);
+
+  // Load length directly from the string.
+  __ lw(v0, FieldMemOperand(receiver, String::kLengthOffset));
+  __ Ret();
+
+  if (support_wrappers) {
+    // Check if the object is a JSValue wrapper.
+    __ bind(&check_wrapper);
+    __ Branch(miss, ne, scratch1, Operand(JS_VALUE_TYPE));
+
+    // Unwrap the value and check if the wrapped value is a string.
+    __ lw(scratch1, FieldMemOperand(receiver, JSValue::kValueOffset));
+    GenerateStringCheck(masm, scratch1, scratch2, scratch2, miss, miss);
+    __ lw(v0, FieldMemOperand(scratch1, String::kLengthOffset));
+    __ Ret();
+  }
 }
 
 
@@ -100,7 +360,9 @@
                                                  Register scratch1,
                                                  Register scratch2,
                                                  Label* miss_label) {
-  UNIMPLEMENTED_MIPS();
+  __ TryGetFunctionPrototype(receiver, scratch1, scratch2, miss_label);
+  __ mov(v0, scratch1);
+  __ Ret();
 }
 
 
@@ -115,15 +377,254 @@
                                       Register name_reg,
                                       Register scratch,
                                       Label* miss_label) {
-  UNIMPLEMENTED_MIPS();
+  // a0 : value.
+  Label exit;
+
+  // Check that the receiver isn't a smi.
+  __ JumpIfSmi(receiver_reg, miss_label, scratch);
+
+  // Check that the map of the receiver hasn't changed.
+  __ lw(scratch, FieldMemOperand(receiver_reg, HeapObject::kMapOffset));
+  __ Branch(miss_label, ne, scratch, Operand(Handle<Map>(object->map())));
+
+  // Perform global security token check if needed.
+  if (object->IsJSGlobalProxy()) {
+    __ CheckAccessGlobalProxy(receiver_reg, scratch, miss_label);
+  }
+
+  // Stub never generated for non-global objects that require access
+  // checks.
+  ASSERT(object->IsJSGlobalProxy() || !object->IsAccessCheckNeeded());
+
+  // Perform map transition for the receiver if necessary.
+  if ((transition != NULL) && (object->map()->unused_property_fields() == 0)) {
+    // The properties must be extended before we can store the value.
+    // We jump to a runtime call that extends the properties array.
+    __ push(receiver_reg);
+    __ li(a2, Operand(Handle<Map>(transition)));
+    __ Push(a2, a0);
+    __ TailCallExternalReference(
+           ExternalReference(IC_Utility(IC::kSharedStoreIC_ExtendStorage),
+                             masm->isolate()),
+           3, 1);
+    return;
+  }
+
+  if (transition != NULL) {
+    // Update the map of the object; no write barrier updating is
+    // needed because the map is never in new space.
+    __ li(t0, Operand(Handle<Map>(transition)));
+    __ sw(t0, FieldMemOperand(receiver_reg, HeapObject::kMapOffset));
+  }
+
+  // Adjust for the number of properties stored in the object. Even in the
+  // face of a transition we can use the old map here because the size of the
+  // object and the number of in-object properties is not going to change.
+  index -= object->map()->inobject_properties();
+
+  if (index < 0) {
+    // Set the property straight into the object.
+    int offset = object->map()->instance_size() + (index * kPointerSize);
+    __ sw(a0, FieldMemOperand(receiver_reg, offset));
+
+    // Skip updating write barrier if storing a smi.
+    __ JumpIfSmi(a0, &exit, scratch);
+
+    // Update the write barrier for the array address.
+    // Pass the now unused name_reg as a scratch register.
+    __ RecordWrite(receiver_reg, Operand(offset), name_reg, scratch);
+  } else {
+    // Write to the properties array.
+    int offset = index * kPointerSize + FixedArray::kHeaderSize;
+    // Get the properties array.
+    __ lw(scratch, FieldMemOperand(receiver_reg, JSObject::kPropertiesOffset));
+    __ sw(a0, FieldMemOperand(scratch, offset));
+
+    // Skip updating write barrier if storing a smi.
+    __ JumpIfSmi(a0, &exit);
+
+    // Update the write barrier for the array address.
+    // Ok to clobber receiver_reg and name_reg, since we return.
+    __ RecordWrite(scratch, Operand(offset), name_reg, receiver_reg);
+  }
+
+  // Return the value (register v0).
+  __ bind(&exit);
+  __ mov(v0, a0);
+  __ Ret();
 }
 
 
 void StubCompiler::GenerateLoadMiss(MacroAssembler* masm, Code::Kind kind) {
-  UNIMPLEMENTED_MIPS();
+  ASSERT(kind == Code::LOAD_IC || kind == Code::KEYED_LOAD_IC);
+  Code* code = NULL;
+  if (kind == Code::LOAD_IC) {
+    code = masm->isolate()->builtins()->builtin(Builtins::kLoadIC_Miss);
+  } else {
+    code = masm->isolate()->builtins()->builtin(Builtins::kKeyedLoadIC_Miss);
+  }
+
+  Handle<Code> ic(code);
+  __ Jump(ic, RelocInfo::CODE_TARGET);
 }
 
 
+static void GenerateCallFunction(MacroAssembler* masm,
+                                 Object* object,
+                                 const ParameterCount& arguments,
+                                 Label* miss) {
+  // ----------- S t a t e -------------
+  //  -- a0: receiver
+  //  -- a1: function to call
+  // -----------------------------------
+  // Check that the function really is a function.
+  __ JumpIfSmi(a1, miss);
+  __ GetObjectType(a1, a3, a3);
+  __ Branch(miss, ne, a3, Operand(JS_FUNCTION_TYPE));
+
+  // Patch the receiver on the stack with the global proxy if
+  // necessary.
+  if (object->IsGlobalObject()) {
+    __ lw(a3, FieldMemOperand(a0, GlobalObject::kGlobalReceiverOffset));
+    __ sw(a3, MemOperand(sp, arguments.immediate() * kPointerSize));
+  }
+
+  // Invoke the function.
+  __ InvokeFunction(a1, arguments, JUMP_FUNCTION);
+}
+
+
+static void PushInterceptorArguments(MacroAssembler* masm,
+                                     Register receiver,
+                                     Register holder,
+                                     Register name,
+                                     JSObject* holder_obj) {
+  __ push(name);
+  InterceptorInfo* interceptor = holder_obj->GetNamedInterceptor();
+  ASSERT(!masm->isolate()->heap()->InNewSpace(interceptor));
+  Register scratch = name;
+  __ li(scratch, Operand(Handle<Object>(interceptor)));
+  __ Push(scratch, receiver, holder);
+  __ lw(scratch, FieldMemOperand(scratch, InterceptorInfo::kDataOffset));
+  __ push(scratch);
+}
+
+
+static void CompileCallLoadPropertyWithInterceptor(MacroAssembler* masm,
+                                                   Register receiver,
+                                                   Register holder,
+                                                   Register name,
+                                                   JSObject* holder_obj) {
+  PushInterceptorArguments(masm, receiver, holder, name, holder_obj);
+
+  ExternalReference ref =
+      ExternalReference(IC_Utility(IC::kLoadPropertyWithInterceptorOnly),
+          masm->isolate());
+  __ li(a0, Operand(5));
+  __ li(a1, Operand(ref));
+
+  CEntryStub stub(1);
+  __ CallStub(&stub);
+}
+
+
+static const int kFastApiCallArguments = 3;
+
+
+// Reserves space for the extra arguments to FastHandleApiCall in the
+// caller's frame.
+//
+// These arguments are set by CheckPrototypes and GenerateFastApiDirectCall.
+static void ReserveSpaceForFastApiCall(MacroAssembler* masm,
+                                       Register scratch) {
+  ASSERT(Smi::FromInt(0) == 0);
+  for (int i = 0; i < kFastApiCallArguments; i++) {
+    __ push(zero_reg);
+  }
+}
+
+
+// Undoes the effects of ReserveSpaceForFastApiCall.
+static void FreeSpaceForFastApiCall(MacroAssembler* masm) {
+  __ Drop(kFastApiCallArguments);
+}
+
+
+static MaybeObject* GenerateFastApiDirectCall(MacroAssembler* masm,
+                                      const CallOptimization& optimization,
+                                      int argc) {
+  // ----------- S t a t e -------------
+  //  -- sp[0]              : holder (set by CheckPrototypes)
+  //  -- sp[4]              : callee js function
+  //  -- sp[8]              : call data
+  //  -- sp[12]             : last js argument
+  //  -- ...
+  //  -- sp[(argc + 3) * 4] : first js argument
+  //  -- sp[(argc + 4) * 4] : receiver
+  // -----------------------------------
+  // Get the function and setup the context.
+  JSFunction* function = optimization.constant_function();
+  __ li(t1, Operand(Handle<JSFunction>(function)));
+  __ lw(cp, FieldMemOperand(t1, JSFunction::kContextOffset));
+
+  // Pass the additional arguments FastHandleApiCall expects.
+  Object* call_data = optimization.api_call_info()->data();
+  Handle<CallHandlerInfo> api_call_info_handle(optimization.api_call_info());
+  if (masm->isolate()->heap()->InNewSpace(call_data)) {
+    __ li(a0, api_call_info_handle);
+    __ lw(t2, FieldMemOperand(a0, CallHandlerInfo::kDataOffset));
+  } else {
+    __ li(t2, Operand(Handle<Object>(call_data)));
+  }
+
+  // Store js function and call data.
+  __ sw(t1, MemOperand(sp, 1 * kPointerSize));
+  __ sw(t2, MemOperand(sp, 2 * kPointerSize));
+
+  // a2 points to call data as expected by Arguments
+  // (refer to layout above).
+  __ Addu(a2, sp, Operand(2 * kPointerSize));
+
+  Object* callback = optimization.api_call_info()->callback();
+  Address api_function_address = v8::ToCData<Address>(callback);
+  ApiFunction fun(api_function_address);
+
+  const int kApiStackSpace = 4;
+
+  __ EnterExitFrame(false, kApiStackSpace);
+
+  // NOTE: the O32 abi requires a0 to hold a special pointer when returning a
+  // struct from the function (which is currently the case). This means we pass
+  // the first argument in a1 instead of a0. TryCallApiFunctionAndReturn
+  // will handle setting up a0.
+
+  // a1 = v8::Arguments&
+  // Arguments is built at sp + 1 (sp is a reserved spot for ra).
+  __ Addu(a1, sp, kPointerSize);
+
+  // v8::Arguments::implicit_args = data
+  __ sw(a2, MemOperand(a1, 0 * kPointerSize));
+  // v8::Arguments::values = last argument
+  __ Addu(t0, a2, Operand(argc * kPointerSize));
+  __ sw(t0, MemOperand(a1, 1 * kPointerSize));
+  // v8::Arguments::length_ = argc
+  __ li(t0, Operand(argc));
+  __ sw(t0, MemOperand(a1, 2 * kPointerSize));
+  // v8::Arguments::is_construct_call = 0
+  __ sw(zero_reg, MemOperand(a1, 3 * kPointerSize));
+
+  // Emitting a stub call may try to allocate (if the code is not
+  // already generated). Do not allow the assembler to perform a
+  // garbage collection but instead return the allocation failure
+  // object.
+  const int kStackUnwindSpace = argc + kFastApiCallArguments + 1;
+  ExternalReference ref =
+      ExternalReference(&fun,
+                        ExternalReference::DIRECT_API_CALL,
+                        masm->isolate());
+  return masm->TryCallApiFunctionAndReturn(ref, kStackUnwindSpace);
+}
+
 class CallInterceptorCompiler BASE_EMBEDDED {
  public:
   CallInterceptorCompiler(StubCompiler* stub_compiler,
@@ -133,32 +634,150 @@
         arguments_(arguments),
         name_(name) {}
 
-  void Compile(MacroAssembler* masm,
-               JSObject* object,
-               JSObject* holder,
-               String* name,
-               LookupResult* lookup,
-               Register receiver,
-               Register scratch1,
-               Register scratch2,
-               Register scratch3,
-               Label* miss) {
-    UNIMPLEMENTED_MIPS();
-  }
-
- private:
-  void CompileCacheable(MacroAssembler* masm,
+  MaybeObject* Compile(MacroAssembler* masm,
                        JSObject* object,
+                       JSObject* holder,
+                       String* name,
+                       LookupResult* lookup,
                        Register receiver,
                        Register scratch1,
                        Register scratch2,
                        Register scratch3,
-                       JSObject* interceptor_holder,
-                       LookupResult* lookup,
-                       String* name,
-                       const CallOptimization& optimization,
-                       Label* miss_label) {
-    UNIMPLEMENTED_MIPS();
+                       Label* miss) {
+    ASSERT(holder->HasNamedInterceptor());
+    ASSERT(!holder->GetNamedInterceptor()->getter()->IsUndefined());
+
+    // Check that the receiver isn't a smi.
+    __ JumpIfSmi(receiver, miss);
+
+    CallOptimization optimization(lookup);
+
+    if (optimization.is_constant_call()) {
+      return CompileCacheable(masm,
+                              object,
+                              receiver,
+                              scratch1,
+                              scratch2,
+                              scratch3,
+                              holder,
+                              lookup,
+                              name,
+                              optimization,
+                              miss);
+    } else {
+      CompileRegular(masm,
+                     object,
+                     receiver,
+                     scratch1,
+                     scratch2,
+                     scratch3,
+                     name,
+                     holder,
+                     miss);
+      return masm->isolate()->heap()->undefined_value();
+    }
+  }
+
+ private:
+  MaybeObject* CompileCacheable(MacroAssembler* masm,
+                                JSObject* object,
+                                Register receiver,
+                                Register scratch1,
+                                Register scratch2,
+                                Register scratch3,
+                                JSObject* interceptor_holder,
+                                LookupResult* lookup,
+                                String* name,
+                                const CallOptimization& optimization,
+                                Label* miss_label) {
+    ASSERT(optimization.is_constant_call());
+    ASSERT(!lookup->holder()->IsGlobalObject());
+
+    Counters* counters = masm->isolate()->counters();
+
+    int depth1 = kInvalidProtoDepth;
+    int depth2 = kInvalidProtoDepth;
+    bool can_do_fast_api_call = false;
+    if (optimization.is_simple_api_call() &&
+        !lookup->holder()->IsGlobalObject()) {
+      depth1 =
+          optimization.GetPrototypeDepthOfExpectedType(object,
+                                                      interceptor_holder);
+      if (depth1 == kInvalidProtoDepth) {
+        depth2 =
+            optimization.GetPrototypeDepthOfExpectedType(interceptor_holder,
+                                                        lookup->holder());
+      }
+      can_do_fast_api_call = (depth1 != kInvalidProtoDepth) ||
+                             (depth2 != kInvalidProtoDepth);
+    }
+
+    __ IncrementCounter(counters->call_const_interceptor(), 1,
+                      scratch1, scratch2);
+
+    if (can_do_fast_api_call) {
+      __ IncrementCounter(counters->call_const_interceptor_fast_api(), 1,
+                          scratch1, scratch2);
+      ReserveSpaceForFastApiCall(masm, scratch1);
+    }
+
+    // Check that the maps from receiver to interceptor's holder
+    // haven't changed and thus we can invoke interceptor.
+    Label miss_cleanup;
+    Label* miss = can_do_fast_api_call ? &miss_cleanup : miss_label;
+    Register holder =
+      stub_compiler_->CheckPrototypes(object, receiver,
+                                      interceptor_holder, scratch1,
+                                      scratch2, scratch3, name, depth1, miss);
+
+    // Invoke an interceptor and if it provides a value,
+    // branch to |regular_invoke|.
+    Label regular_invoke;
+    LoadWithInterceptor(masm, receiver, holder, interceptor_holder, scratch2,
+                        &regular_invoke);
+
+    // Interceptor returned nothing for this property.  Try to use cached
+    // constant function.
+
+    // Check that the maps from interceptor's holder to constant function's
+    // holder haven't changed and thus we can use cached constant function.
+    if (interceptor_holder != lookup->holder()) {
+      stub_compiler_->CheckPrototypes(interceptor_holder, receiver,
+                                      lookup->holder(), scratch1,
+                                      scratch2, scratch3, name, depth2, miss);
+    } else {
+      // CheckPrototypes has a side effect of fetching a 'holder'
+      // for API (object which is instanceof for the signature).  It's
+      // safe to omit it here, as if present, it should be fetched
+      // by the previous CheckPrototypes.
+      ASSERT(depth2 == kInvalidProtoDepth);
+    }
+
+    // Invoke function.
+    if (can_do_fast_api_call) {
+      MaybeObject* result = GenerateFastApiDirectCall(masm,
+                                                      optimization,
+                                                      arguments_.immediate());
+      if (result->IsFailure()) return result;
+    } else {
+      __ InvokeFunction(optimization.constant_function(), arguments_,
+                        JUMP_FUNCTION);
+    }
+
+    // Deferred code for fast API call case---clean preallocated space.
+    if (can_do_fast_api_call) {
+      __ bind(&miss_cleanup);
+      FreeSpaceForFastApiCall(masm);
+      __ Branch(miss_label);
+    }
+
+    // Invoke a regular function.
+    __ bind(&regular_invoke);
+    if (can_do_fast_api_call) {
+      FreeSpaceForFastApiCall(masm);
+    }
+
+    return masm->isolate()->heap()->undefined_value();
   }
 
   void CompileRegular(MacroAssembler* masm,
@@ -170,7 +789,31 @@
                       String* name,
                       JSObject* interceptor_holder,
                       Label* miss_label) {
-    UNIMPLEMENTED_MIPS();
+    Register holder =
+        stub_compiler_->CheckPrototypes(object, receiver, interceptor_holder,
+                                        scratch1, scratch2, scratch3, name,
+                                        miss_label);
+
+    // Call a runtime function to load the interceptor property.
+    __ EnterInternalFrame();
+    // Save the name_ register across the call.
+    __ push(name_);
+
+    PushInterceptorArguments(masm,
+                             receiver,
+                             holder,
+                             name_,
+                             interceptor_holder);
+
+    __ CallExternalReference(
+          ExternalReference(
+              IC_Utility(IC::kLoadPropertyWithInterceptorForCall),
+              masm->isolate()),
+          5);
+
+    // Restore the name_ register.
+    __ pop(name_);
+    __ LeaveInternalFrame();
   }
 
   void LoadWithInterceptor(MacroAssembler* masm,
@@ -179,7 +822,23 @@
                            JSObject* holder_obj,
                            Register scratch,
                            Label* interceptor_succeeded) {
-    UNIMPLEMENTED_MIPS();
+    __ EnterInternalFrame();
+
+    __ Push(holder, name_);
+
+    CompileCallLoadPropertyWithInterceptor(masm,
+                                           receiver,
+                                           holder,
+                                           name_,
+                                           holder_obj);
+
+    __ pop(name_);  // Restore the name.
+    __ pop(receiver);  // Restore the holder.
+    __ LeaveInternalFrame();
+
+    // If interceptor returns no-result sentinel, call the constant function.
+    __ LoadRoot(scratch, Heap::kNoInterceptorResultSentinelRootIndex);
+    __ Branch(interceptor_succeeded, ne, v0, Operand(scratch));
   }
 
   StubCompiler* stub_compiler_;
@@ -188,6 +847,175 @@
 };
 
 
+
+// Generate code to check that a global property cell is empty. Create
+// the property cell at compilation time if no cell exists for the
+// property.
+MUST_USE_RESULT static MaybeObject* GenerateCheckPropertyCell(
+    MacroAssembler* masm,
+    GlobalObject* global,
+    String* name,
+    Register scratch,
+    Label* miss) {
+  Object* probe;
+  { MaybeObject* maybe_probe = global->EnsurePropertyCell(name);
+    if (!maybe_probe->ToObject(&probe)) return maybe_probe;
+  }
+  JSGlobalPropertyCell* cell = JSGlobalPropertyCell::cast(probe);
+  ASSERT(cell->value()->IsTheHole());
+  __ li(scratch, Operand(Handle<Object>(cell)));
+  __ lw(scratch,
+        FieldMemOperand(scratch, JSGlobalPropertyCell::kValueOffset));
+  __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
+  __ Branch(miss, ne, scratch, Operand(at));
+  return cell;
+}
+
+
+// Calls GenerateCheckPropertyCell for each global object in the prototype chain
+// from object to (but not including) holder.
+MUST_USE_RESULT static MaybeObject* GenerateCheckPropertyCells(
+    MacroAssembler* masm,
+    JSObject* object,
+    JSObject* holder,
+    String* name,
+    Register scratch,
+    Label* miss) {
+  JSObject* current = object;
+  while (current != holder) {
+    if (current->IsGlobalObject()) {
+      // Returns a cell or a failure.
+      MaybeObject* result = GenerateCheckPropertyCell(
+          masm,
+          GlobalObject::cast(current),
+          name,
+          scratch,
+          miss);
+      if (result->IsFailure()) return result;
+    }
+    ASSERT(current->IsJSObject());
+    current = JSObject::cast(current->GetPrototype());
+  }
+  return NULL;
+}
+
+
+// Convert and store int passed in register ival to IEEE 754 single precision
+// floating point value at memory location (dst + 4 * wordoffset)
+// If FPU is available use it for conversion.
+static void StoreIntAsFloat(MacroAssembler* masm,
+                            Register dst,
+                            Register wordoffset,
+                            Register ival,
+                            Register fval,
+                            Register scratch1,
+                            Register scratch2) {
+  if (CpuFeatures::IsSupported(FPU)) {
+    CpuFeatures::Scope scope(FPU);
+    __ mtc1(ival, f0);
+    __ cvt_s_w(f0, f0);
+    __ sll(scratch1, wordoffset, 2);
+    __ addu(scratch1, dst, scratch1);
+    __ swc1(f0, MemOperand(scratch1, 0));
+  } else {
+    // FPU is not available,  do manual conversions.
+
+    Label not_special, done;
+    // Move sign bit from source to destination.  This works because the sign
+    // bit in the exponent word of the double has the same position and polarity
+    // as the 2's complement sign bit in a Smi.
+    ASSERT(kBinary32SignMask == 0x80000000u);
+
+    __ And(fval, ival, Operand(kBinary32SignMask));
+    // Negate value if it is negative.
+    __ subu(scratch1, zero_reg, ival);
+    __ movn(ival, scratch1, fval);
+
+    // We have -1, 0 or 1, which we treat specially. Register ival contains
+    // absolute value: it is either equal to 1 (special case of -1 and 1),
+    // greater than 1 (not a special case) or less than 1 (special case of 0).
+    __ Branch(&not_special, gt, ival, Operand(1));
+
+    // For 1 or -1 we need to or in the 0 exponent (biased).
+    static const uint32_t exponent_word_for_1 =
+        kBinary32ExponentBias << kBinary32ExponentShift;
+
+    __ Xor(scratch1, ival, Operand(1));
+    __ li(scratch2, exponent_word_for_1);
+    __ or_(scratch2, fval, scratch2);
+    __ movz(fval, scratch2, scratch1);  // Only if ival is equal to 1.
+    __ Branch(&done);
+
+    __ bind(&not_special);
+    // Count leading zeros.
+    // Gets the wrong answer for 0, but we already checked for that case above.
+    Register zeros = scratch2;
+    __ clz(zeros, ival);
+
+    // Compute exponent and or it into the exponent register.
+    __ li(scratch1, (kBitsPerInt - 1) + kBinary32ExponentBias);
+    __ subu(scratch1, scratch1, zeros);
+
+    __ sll(scratch1, scratch1, kBinary32ExponentShift);
+    __ or_(fval, fval, scratch1);
+
+    // Shift up the source chopping the top bit off.
+    __ Addu(zeros, zeros, Operand(1));
+    // This wouldn't work for 1 and -1 as the shift would be 32 which means 0.
+    __ sllv(ival, ival, zeros);
+    // And the top (top 20 bits).
+    __ srl(scratch1, ival, kBitsPerInt - kBinary32MantissaBits);
+    __ or_(fval, fval, scratch1);
+
+    __ bind(&done);
+
+    __ sll(scratch1, wordoffset, 2);
+    __ addu(scratch1, dst, scratch1);
+    __ sw(fval, MemOperand(scratch1, 0));
+  }
+}
+
+
+// Convert unsigned integer with specified number of leading zeroes in binary
+// representation to IEEE 754 double.
+// Integer to convert is passed in register hiword.
+// Resulting double is returned in registers hiword:loword.
+// This functions does not work correctly for 0.
+static void GenerateUInt2Double(MacroAssembler* masm,
+                                Register hiword,
+                                Register loword,
+                                Register scratch,
+                                int leading_zeroes) {
+  const int meaningful_bits = kBitsPerInt - leading_zeroes - 1;
+  const int biased_exponent = HeapNumber::kExponentBias + meaningful_bits;
+
+  const int mantissa_shift_for_hi_word =
+      meaningful_bits - HeapNumber::kMantissaBitsInTopWord;
+
+  const int mantissa_shift_for_lo_word =
+      kBitsPerInt - mantissa_shift_for_hi_word;
+
+  __ li(scratch, biased_exponent << HeapNumber::kExponentShift);
+  if (mantissa_shift_for_hi_word > 0) {
+    __ sll(loword, hiword, mantissa_shift_for_lo_word);
+    __ srl(hiword, hiword, mantissa_shift_for_hi_word);
+    __ or_(hiword, scratch, hiword);
+  } else {
+    __ mov(loword, zero_reg);
+    __ sll(hiword, hiword, mantissa_shift_for_hi_word);
+    __ or_(hiword, scratch, hiword);
+  }
+
+  // If least significant bit of biased exponent was not 1 it was corrupted
+  // by most significant bit of mantissa so we should fix that.
+  if (!(biased_exponent & 1)) {
+    __ li(scratch, 1 << HeapNumber::kExponentShift);
+    __ nor(scratch, scratch, scratch);
+    __ and_(hiword, hiword, scratch);
+  }
+}
+
+
 #undef __
 #define __ ACCESS_MASM(masm())
 
@@ -201,8 +1029,132 @@
                                        String* name,
                                        int save_at_depth,
                                        Label* miss) {
-  UNIMPLEMENTED_MIPS();
-  return no_reg;
+  // Make sure there's no overlap between holder and object registers.
+  ASSERT(!scratch1.is(object_reg) && !scratch1.is(holder_reg));
+  ASSERT(!scratch2.is(object_reg) && !scratch2.is(holder_reg)
+         && !scratch2.is(scratch1));
+
+  // Keep track of the current object in register reg.
+  Register reg = object_reg;
+  int depth = 0;
+
+  if (save_at_depth == depth) {
+    __ sw(reg, MemOperand(sp));
+  }
+
+  // Check the maps in the prototype chain.
+  // Traverse the prototype chain from the object and do map checks.
+  JSObject* current = object;
+  while (current != holder) {
+    depth++;
+
+    // Only global objects and objects that do not require access
+    // checks are allowed in stubs.
+    ASSERT(current->IsJSGlobalProxy() || !current->IsAccessCheckNeeded());
+
+    ASSERT(current->GetPrototype()->IsJSObject());
+    JSObject* prototype = JSObject::cast(current->GetPrototype());
+    if (!current->HasFastProperties() &&
+        !current->IsJSGlobalObject() &&
+        !current->IsJSGlobalProxy()) {
+      if (!name->IsSymbol()) {
+        MaybeObject* maybe_lookup_result = heap()->LookupSymbol(name);
+        Object* lookup_result = NULL;  // Initialization to please compiler.
+        if (!maybe_lookup_result->ToObject(&lookup_result)) {
+          set_failure(Failure::cast(maybe_lookup_result));
+          return reg;
+        }
+        name = String::cast(lookup_result);
+      }
+      ASSERT(current->property_dictionary()->FindEntry(name) ==
+             StringDictionary::kNotFound);
+
+      MaybeObject* negative_lookup = GenerateDictionaryNegativeLookup(masm(),
+                                                                      miss,
+                                                                      reg,
+                                                                      name,
+                                                                      scratch1,
+                                                                      scratch2);
+      if (negative_lookup->IsFailure()) {
+        set_failure(Failure::cast(negative_lookup));
+        return reg;
+      }
+
+      __ lw(scratch1, FieldMemOperand(reg, HeapObject::kMapOffset));
+      reg = holder_reg;  // From now the object is in holder_reg.
+      __ lw(reg, FieldMemOperand(scratch1, Map::kPrototypeOffset));
+    } else if (heap()->InNewSpace(prototype)) {
+      // Get the map of the current object.
+      __ lw(scratch1, FieldMemOperand(reg, HeapObject::kMapOffset));
+
+      // Branch on the result of the map check.
+      __ Branch(miss, ne, scratch1, Operand(Handle<Map>(current->map())));
+
+      // Check access rights to the global object.  This has to happen
+      // after the map check so that we know that the object is
+      // actually a global object.
+      if (current->IsJSGlobalProxy()) {
+        __ CheckAccessGlobalProxy(reg, scratch1, miss);
+        // Restore scratch register to be the map of the object.  In the
+        // new space case below, we load the prototype from the map in
+        // the scratch register.
+        __ lw(scratch1, FieldMemOperand(reg, HeapObject::kMapOffset));
+      }
+
+      reg = holder_reg;  // From now the object is in holder_reg.
+      // The prototype is in new space; we cannot store a reference
+      // to it in the code. Load it from the map.
+      __ lw(reg, FieldMemOperand(scratch1, Map::kPrototypeOffset));
+    } else {
+      // Check the map of the current object.
+      __ lw(scratch1, FieldMemOperand(reg, HeapObject::kMapOffset));
+      // Branch on the result of the map check.
+      __ Branch(miss, ne, scratch1, Operand(Handle<Map>(current->map())));
+      // Check access rights to the global object.  This has to happen
+      // after the map check so that we know that the object is
+      // actually a global object.
+      if (current->IsJSGlobalProxy()) {
+        __ CheckAccessGlobalProxy(reg, scratch1, miss);
+      }
+      // The prototype is in old space; load it directly.
+      reg = holder_reg;  // From now the object is in holder_reg.
+      __ li(reg, Operand(Handle<JSObject>(prototype)));
+    }
+
+    if (save_at_depth == depth) {
+      __ sw(reg, MemOperand(sp));
+    }
+
+    // Go to the next object in the prototype chain.
+    current = prototype;
+  }
+
+  // Check the holder map.
+  __ lw(scratch1, FieldMemOperand(reg, HeapObject::kMapOffset));
+  __ Branch(miss, ne, scratch1, Operand(Handle<Map>(current->map())));
+
+  // Log the check depth.
+  LOG(masm()->isolate(), IntEvent("check-maps-depth", depth + 1));
+  // Perform security check for access to the global object.
+  ASSERT(holder->IsJSGlobalProxy() || !holder->IsAccessCheckNeeded());
+  if (holder->IsJSGlobalProxy()) {
+    __ CheckAccessGlobalProxy(reg, scratch1, miss);
+  };
+
+  // If we've skipped any global objects, it's not enough to verify
+  // that their maps haven't changed.  We also need to check that the
+  // property cell for the property is still empty.
+
+  MaybeObject* result = GenerateCheckPropertyCells(masm(),
+                                                   object,
+                                                   holder,
+                                                   name,
+                                                   scratch1,
+                                                   miss);
+  if (result->IsFailure()) set_failure(Failure::cast(result));
+
+  // Return the register containing the holder.
+  return reg;
 }
 
 
@@ -215,7 +1167,16 @@
                                      int index,
                                      String* name,
                                      Label* miss) {
-  UNIMPLEMENTED_MIPS();
+  // Check that the receiver isn't a smi.
+  __ And(scratch1, receiver, Operand(kSmiTagMask));
+  __ Branch(miss, eq, scratch1, Operand(zero_reg));
+
+  // Check that the maps haven't changed.
+  Register reg =
+      CheckPrototypes(object, receiver, holder, scratch1, scratch2, scratch3,
+                      name, miss);
+  GenerateFastPropertyLoad(masm(), v0, reg, holder, index);
+  __ Ret();
 }
 
 
@@ -228,7 +1189,17 @@
                                         Object* value,
                                         String* name,
                                         Label* miss) {
-  UNIMPLEMENTED_MIPS();
+  // Check that the receiver isn't a smi.
+  __ JumpIfSmi(receiver, miss, scratch1);
+
+  // Check that the maps haven't changed.
+  Register reg =
+      CheckPrototypes(object, receiver, holder,
+                      scratch1, scratch2, scratch3, name, miss);
+
+  // Return the constant value.
+  __ li(v0, Operand(Handle<Object>(value)));
+  __ Ret();
 }
 
 
@@ -242,8 +1213,56 @@
                                                 AccessorInfo* callback,
                                                 String* name,
                                                 Label* miss) {
-  UNIMPLEMENTED_MIPS();
-  return NULL;
+  // Check that the receiver isn't a smi.
+  __ JumpIfSmi(receiver, miss, scratch1);
+
+  // Check that the maps haven't changed.
+  Register reg =
+    CheckPrototypes(object, receiver, holder, scratch1, scratch2, scratch3,
+                    name, miss);
+
+  // Build AccessorInfo::args_ list on the stack and push property name below
+  // the exit frame to make GC aware of them and store pointers to them.
+  __ push(receiver);
+  __ mov(scratch2, sp);  // scratch2 = AccessorInfo::args_
+  Handle<AccessorInfo> callback_handle(callback);
+  if (heap()->InNewSpace(callback_handle->data())) {
+    __ li(scratch3, callback_handle);
+    __ lw(scratch3, FieldMemOperand(scratch3, AccessorInfo::kDataOffset));
+  } else {
+    __ li(scratch3, Handle<Object>(callback_handle->data()));
+  }
+  __ Push(reg, scratch3, name_reg);
+  __ mov(a2, scratch2);  // Saved in case scratch2 == a1.
+  __ mov(a1, sp);  // a1 (first argument - see note below) = Handle<String>
+
+  Address getter_address = v8::ToCData<Address>(callback->getter());
+  ApiFunction fun(getter_address);
+
+  // NOTE: the O32 abi requires a0 to hold a special pointer when returning a
+  // struct from the function (which is currently the case). This means we pass
+  // the arguments in a1-a2 instead of a0-a1. TryCallApiFunctionAndReturn
+  // will handle setting up a0.
+
+  const int kApiStackSpace = 1;
+
+  __ EnterExitFrame(false, kApiStackSpace);
+  // Create AccessorInfo instance on the stack above the exit frame with
+  // scratch2 (internal::Object **args_) as the data.
+  __ sw(a2, MemOperand(sp, kPointerSize));
+  // a2 (second argument - see note above) = AccessorInfo&
+  __ Addu(a2, sp, kPointerSize);
+
+  // Emitting a stub call may try to allocate (if the code is not
+  // already generated).  Do not allow the assembler to perform a
+  // garbage collection but instead return the allocation failure
+  // object.
+  ExternalReference ref =
+      ExternalReference(&fun,
+                        ExternalReference::DIRECT_GETTER_CALL,
+                        masm()->isolate());
+  // 4 args - will be freed later by LeaveExitFrame.
+  return masm()->TryCallApiFunctionAndReturn(ref, 4);
 }
 
 
@@ -257,12 +1276,143 @@
                                            Register scratch3,
                                            String* name,
                                            Label* miss) {
-  UNIMPLEMENTED_MIPS();
+  ASSERT(interceptor_holder->HasNamedInterceptor());
+  ASSERT(!interceptor_holder->GetNamedInterceptor()->getter()->IsUndefined());
+
+  // Check that the receiver isn't a smi.
+  __ JumpIfSmi(receiver, miss);
+
+  // So far the most popular follow ups for interceptor loads are FIELD
+  // and CALLBACKS, so inline only them, other cases may be added
+  // later.
+  bool compile_followup_inline = false;
+  if (lookup->IsProperty() && lookup->IsCacheable()) {
+    if (lookup->type() == FIELD) {
+      compile_followup_inline = true;
+    } else if (lookup->type() == CALLBACKS &&
+        lookup->GetCallbackObject()->IsAccessorInfo() &&
+        AccessorInfo::cast(lookup->GetCallbackObject())->getter() != NULL) {
+      compile_followup_inline = true;
+    }
+  }
+
+  if (compile_followup_inline) {
+    // Compile the interceptor call, followed by inline code to load the
+    // property from further up the prototype chain if the call fails.
+    // Check that the maps haven't changed.
+    Register holder_reg = CheckPrototypes(object, receiver, interceptor_holder,
+                                          scratch1, scratch2, scratch3,
+                                          name, miss);
+    ASSERT(holder_reg.is(receiver) || holder_reg.is(scratch1));
+
+    // Save necessary data before invoking an interceptor.
+    // Requires a frame to make GC aware of pushed pointers.
+    __ EnterInternalFrame();
+
+    if (lookup->type() == CALLBACKS && !receiver.is(holder_reg)) {
+      // CALLBACKS case needs a receiver to be passed into C++ callback.
+      __ Push(receiver, holder_reg, name_reg);
+    } else {
+      __ Push(holder_reg, name_reg);
+    }
+
+    // Invoke an interceptor.  Note: map checks from receiver to
+    // interceptor's holder has been compiled before (see a caller
+    // of this method).
+    CompileCallLoadPropertyWithInterceptor(masm(),
+                                           receiver,
+                                           holder_reg,
+                                           name_reg,
+                                           interceptor_holder);
+
+    // Check if interceptor provided a value for property.  If it's
+    // the case, return immediately.
+    Label interceptor_failed;
+    __ LoadRoot(scratch1, Heap::kNoInterceptorResultSentinelRootIndex);
+    __ Branch(&interceptor_failed, eq, v0, Operand(scratch1));
+    __ LeaveInternalFrame();
+    __ Ret();
+
+    __ bind(&interceptor_failed);
+    __ pop(name_reg);
+    __ pop(holder_reg);
+    if (lookup->type() == CALLBACKS && !receiver.is(holder_reg)) {
+      __ pop(receiver);
+    }
+
+    __ LeaveInternalFrame();
+
+    // Check that the maps from interceptor's holder to lookup's holder
+    // haven't changed.  And load lookup's holder into |holder| register.
+    if (interceptor_holder != lookup->holder()) {
+      holder_reg = CheckPrototypes(interceptor_holder,
+                                   holder_reg,
+                                   lookup->holder(),
+                                   scratch1,
+                                   scratch2,
+                                   scratch3,
+                                   name,
+                                   miss);
+    }
+
+    if (lookup->type() == FIELD) {
+      // We found FIELD property in prototype chain of interceptor's holder.
+      // Retrieve a field from field's holder.
+      GenerateFastPropertyLoad(masm(), v0, holder_reg,
+                               lookup->holder(), lookup->GetFieldIndex());
+      __ Ret();
+    } else {
+      // We found CALLBACKS property in prototype chain of interceptor's
+      // holder.
+      ASSERT(lookup->type() == CALLBACKS);
+      ASSERT(lookup->GetCallbackObject()->IsAccessorInfo());
+      AccessorInfo* callback = AccessorInfo::cast(lookup->GetCallbackObject());
+      ASSERT(callback != NULL);
+      ASSERT(callback->getter() != NULL);
+
+      // Tail call to runtime.
+      // Important invariant in CALLBACKS case: the code above must be
+      // structured to never clobber |receiver| register.
+      __ li(scratch2, Handle<AccessorInfo>(callback));
+      // holder_reg is either receiver or scratch1.
+      if (!receiver.is(holder_reg)) {
+        ASSERT(scratch1.is(holder_reg));
+        __ Push(receiver, holder_reg);
+        __ lw(scratch3,
+              FieldMemOperand(scratch2, AccessorInfo::kDataOffset));
+        __ Push(scratch3, scratch2, name_reg);
+      } else {
+        __ push(receiver);
+        __ lw(scratch3,
+              FieldMemOperand(scratch2, AccessorInfo::kDataOffset));
+        __ Push(holder_reg, scratch3, scratch2, name_reg);
+      }
+
+      ExternalReference ref =
+          ExternalReference(IC_Utility(IC::kLoadCallbackProperty),
+                            masm()->isolate());
+      __ TailCallExternalReference(ref, 5, 1);
+    }
+  } else {  // !compile_followup_inline
+    // Call the runtime system to load the interceptor.
+    // Check that the maps haven't changed.
+    Register holder_reg = CheckPrototypes(object, receiver, interceptor_holder,
+                                          scratch1, scratch2, scratch3,
+                                          name, miss);
+    PushInterceptorArguments(masm(), receiver, holder_reg,
+                             name_reg, interceptor_holder);
+
+    ExternalReference ref = ExternalReference(
+        IC_Utility(IC::kLoadPropertyWithInterceptorForLoad), masm()->isolate());
+    __ TailCallExternalReference(ref, 5, 1);
+  }
 }
 
 
 void CallStubCompiler::GenerateNameCheck(String* name, Label* miss) {
-  UNIMPLEMENTED_MIPS();
+  if (kind_ == Code::KEYED_CALL_IC) {
+    __ Branch(miss, ne, a2, Operand(Handle<String>(name)));
+  }
 }
 
 
@@ -270,20 +1420,63 @@
                                                    JSObject* holder,
                                                    String* name,
                                                    Label* miss) {
-  UNIMPLEMENTED_MIPS();
+  ASSERT(holder->IsGlobalObject());
+
+  // Get the number of arguments.
+  const int argc = arguments().immediate();
+
+  // Get the receiver from the stack.
+  __ lw(a0, MemOperand(sp, argc * kPointerSize));
+
+  // If the object is the holder then we know that it's a global
+  // object which can only happen for contextual calls. In this case,
+  // the receiver cannot be a smi.
+  if (object != holder) {
+    __ JumpIfSmi(a0, miss);
+  }
+
+  // Check that the maps haven't changed.
+  CheckPrototypes(object, a0, holder, a3, a1, t0, name, miss);
 }
 
 
 void CallStubCompiler::GenerateLoadFunctionFromCell(JSGlobalPropertyCell* cell,
                                                     JSFunction* function,
                                                     Label* miss) {
-  UNIMPLEMENTED_MIPS();
+  // Get the value from the cell.
+  __ li(a3, Operand(Handle<JSGlobalPropertyCell>(cell)));
+  __ lw(a1, FieldMemOperand(a3, JSGlobalPropertyCell::kValueOffset));
+
+  // Check that the cell contains the same function.
+  if (heap()->InNewSpace(function)) {
+    // We can't embed a pointer to a function in new space so we have
+    // to verify that the shared function info is unchanged. This has
+    // the nice side effect that multiple closures based on the same
+    // function can all use this call IC. Before we load through the
+    // function, we have to verify that it still is a function.
+    __ JumpIfSmi(a1, miss);
+    __ GetObjectType(a1, a3, a3);
+    __ Branch(miss, ne, a3, Operand(JS_FUNCTION_TYPE));
+
+    // Check the shared function info. Make sure it hasn't changed.
+    __ li(a3, Handle<SharedFunctionInfo>(function->shared()));
+    __ lw(t0, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
+    __ Branch(miss, ne, t0, Operand(a3));
+  } else {
+    __ Branch(miss, ne, a1, Operand(Handle<JSFunction>(function)));
+  }
 }
 
 
 MaybeObject* CallStubCompiler::GenerateMissBranch() {
-  UNIMPLEMENTED_MIPS();
-  return NULL;
+  MaybeObject* maybe_obj =
+      isolate()->stub_cache()->ComputeCallMiss(arguments().immediate(),
+                                               kind_,
+                                               extra_ic_state_);
+  Object* obj;
+  if (!maybe_obj->ToObject(&obj)) return maybe_obj;
+  __ Jump(Handle<Code>(Code::cast(obj)), RelocInfo::CODE_TARGET);
+  return obj;
 }
 
 
@@ -291,8 +1484,34 @@
                                                 JSObject* holder,
                                                 int index,
                                                 String* name) {
-  UNIMPLEMENTED_MIPS();
-  return NULL;
+  // ----------- S t a t e -------------
+  //  -- a2    : name
+  //  -- ra    : return address
+  // -----------------------------------
+  Label miss;
+
+  GenerateNameCheck(name, &miss);
+
+  const int argc = arguments().immediate();
+
+  // Get the receiver of the function from the stack into a0.
+  __ lw(a0, MemOperand(sp, argc * kPointerSize));
+  // Check that the receiver isn't a smi.
+  __ JumpIfSmi(a0, &miss, t0);
+
+  // Do the right check and compute the holder register.
+  Register reg = CheckPrototypes(object, a0, holder, a1, a3, t0, name, &miss);
+  GenerateFastPropertyLoad(masm(), a1, reg, holder, index);
+
+  GenerateCallFunction(masm(), object, arguments(), &miss);
+
+  // Handle call cache miss.
+  __ bind(&miss);
+  MaybeObject* maybe_result = GenerateMissBranch();
+  if (maybe_result->IsFailure()) return maybe_result;
+
+  // Return the generated code.
+  return GetCode(FIELD, name);
 }
 
 
@@ -301,8 +1520,160 @@
                                                     JSGlobalPropertyCell* cell,
                                                     JSFunction* function,
                                                     String* name) {
-  UNIMPLEMENTED_MIPS();
-  return NULL;
+  // ----------- S t a t e -------------
+  //  -- a2    : name
+  //  -- ra    : return address
+  //  -- sp[(argc - n - 1) * 4] : arg[n] (zero-based)
+  //  -- ...
+  //  -- sp[argc * 4]           : receiver
+  // -----------------------------------
+
+  // If object is not an array, bail out to regular call.
+  if (!object->IsJSArray() || cell != NULL) return heap()->undefined_value();
+
+  Label miss;
+
+  GenerateNameCheck(name, &miss);
+
+  Register receiver = a1;
+
+  // Get the receiver from the stack.
+  const int argc = arguments().immediate();
+  __ lw(receiver, MemOperand(sp, argc * kPointerSize));
+
+  // Check that the receiver isn't a smi.
+  __ JumpIfSmi(receiver, &miss);
+
+  // Check that the maps haven't changed.
+  CheckPrototypes(JSObject::cast(object), receiver,
+                  holder, a3, v0, t0, name, &miss);
+
+  if (argc == 0) {
+    // Nothing to do, just return the length.
+    __ lw(v0, FieldMemOperand(receiver, JSArray::kLengthOffset));
+    __ Drop(argc + 1);
+    __ Ret();
+  } else {
+    Label call_builtin;
+
+    Register elements = a3;
+    Register end_elements = t1;
+
+    // Get the elements array of the object.
+    __ lw(elements, FieldMemOperand(receiver, JSArray::kElementsOffset));
+
+    // Check that the elements are in fast mode and writable.
+    __ CheckMap(elements,
+                v0,
+                Heap::kFixedArrayMapRootIndex,
+                &call_builtin,
+                DONT_DO_SMI_CHECK);
+
+    if (argc == 1) {  // Otherwise fall through to call the builtin.
+      Label exit, with_write_barrier, attempt_to_grow_elements;
+
+      // Get the array's length into v0 and calculate new length.
+      __ lw(v0, FieldMemOperand(receiver, JSArray::kLengthOffset));
+      STATIC_ASSERT(kSmiTagSize == 1);
+      STATIC_ASSERT(kSmiTag == 0);
+      __ Addu(v0, v0, Operand(Smi::FromInt(argc)));
+
+      // Get the element's length.
+      __ lw(t0, FieldMemOperand(elements, FixedArray::kLengthOffset));
+
+      // Check if we could survive without allocation.
+      __ Branch(&attempt_to_grow_elements, gt, v0, Operand(t0));
+
+      // Save new length.
+      __ sw(v0, FieldMemOperand(receiver, JSArray::kLengthOffset));
+
+      // Push the element.
+      __ lw(t0, MemOperand(sp, (argc - 1) * kPointerSize));
+      // We may need a register containing the address end_elements below,
+      // so write back the value in end_elements.
+      __ sll(end_elements, v0, kPointerSizeLog2 - kSmiTagSize);
+      __ Addu(end_elements, elements, end_elements);
+      const int kEndElementsOffset =
+          FixedArray::kHeaderSize - kHeapObjectTag - argc * kPointerSize;
+      __ sw(t0, MemOperand(end_elements, kEndElementsOffset));
+      __ Addu(end_elements, end_elements, kPointerSize);
+
+      // Check for a smi.
+      __ JumpIfNotSmi(t0, &with_write_barrier);
+      __ bind(&exit);
+      __ Drop(argc + 1);
+      __ Ret();
+
+      __ bind(&with_write_barrier);
+      __ InNewSpace(elements, t0, eq, &exit);
+      __ RecordWriteHelper(elements, end_elements, t0);
+      __ Drop(argc + 1);
+      __ Ret();
+
+      __ bind(&attempt_to_grow_elements);
+      // v0: array's length + 1.
+      // t0: elements' length.
+
+      if (!FLAG_inline_new) {
+        __ Branch(&call_builtin);
+      }
+
+      ExternalReference new_space_allocation_top =
+          ExternalReference::new_space_allocation_top_address(
+              masm()->isolate());
+      ExternalReference new_space_allocation_limit =
+          ExternalReference::new_space_allocation_limit_address(
+              masm()->isolate());
+
+      const int kAllocationDelta = 4;
+      // Load top and check if it is the end of elements.
+      __ sll(end_elements, v0, kPointerSizeLog2 - kSmiTagSize);
+      __ Addu(end_elements, elements, end_elements);
+      __ Addu(end_elements, end_elements, Operand(kEndElementsOffset));
+      __ li(t3, Operand(new_space_allocation_top));
+      __ lw(t2, MemOperand(t3));
+      __ Branch(&call_builtin, ne, end_elements, Operand(t2));
+
+      __ li(t5, Operand(new_space_allocation_limit));
+      __ lw(t5, MemOperand(t5));
+      __ Addu(t2, t2, Operand(kAllocationDelta * kPointerSize));
+      __ Branch(&call_builtin, hi, t2, Operand(t5));
+
+      // We fit and could grow elements.
+      // Update new_space_allocation_top.
+      __ sw(t2, MemOperand(t3));
+      // Push the argument.
+      __ lw(t2, MemOperand(sp, (argc - 1) * kPointerSize));
+      __ sw(t2, MemOperand(end_elements));
+      // Fill the rest with holes.
+      __ LoadRoot(t2, Heap::kTheHoleValueRootIndex);
+      for (int i = 1; i < kAllocationDelta; i++) {
+        __ sw(t2, MemOperand(end_elements, i * kPointerSize));
+      }
+
+      // Update elements' and array's sizes.
+      __ sw(v0, FieldMemOperand(receiver, JSArray::kLengthOffset));
+      __ Addu(t0, t0, Operand(Smi::FromInt(kAllocationDelta)));
+      __ sw(t0, FieldMemOperand(elements, FixedArray::kLengthOffset));
+
+      // Elements are in new space, so write barrier is not required.
+      __ Drop(argc + 1);
+      __ Ret();
+    }
+    __ bind(&call_builtin);
+    __ TailCallExternalReference(ExternalReference(Builtins::c_ArrayPush,
+                                                   masm()->isolate()),
+                                 argc + 1,
+                                 1);
+  }
+
+  // Handle call cache miss.
+  __ bind(&miss);
+  MaybeObject* maybe_result = GenerateMissBranch();
+  if (maybe_result->IsFailure()) return maybe_result;
+
+  // Return the generated code.
+  return GetCode(function);
 }
 
 
@@ -311,8 +1682,87 @@
                                                    JSGlobalPropertyCell* cell,
                                                    JSFunction* function,
                                                    String* name) {
-  UNIMPLEMENTED_MIPS();
-  return NULL;
+  // ----------- S t a t e -------------
+  //  -- a2    : name
+  //  -- ra    : return address
+  //  -- sp[(argc - n - 1) * 4] : arg[n] (zero-based)
+  //  -- ...
+  //  -- sp[argc * 4]           : receiver
+  // -----------------------------------
+
+  // If object is not an array, bail out to regular call.
+  if (!object->IsJSArray() || cell != NULL) return heap()->undefined_value();
+
+  Label miss, return_undefined, call_builtin;
+
+  Register receiver = a1;
+  Register elements = a3;
+
+  GenerateNameCheck(name, &miss);
+
+  // Get the receiver from the stack.
+  const int argc = arguments().immediate();
+  __ lw(receiver, MemOperand(sp, argc * kPointerSize));
+
+  // Check that the receiver isn't a smi.
+  __ JumpIfSmi(receiver, &miss);
+
+  // Check that the maps haven't changed.
+  CheckPrototypes(JSObject::cast(object),
+                  receiver, holder, elements, t0, v0, name, &miss);
+
+  // Get the elements array of the object.
+  __ lw(elements, FieldMemOperand(receiver, JSArray::kElementsOffset));
+
+  // Check that the elements are in fast mode and writable.
+  __ CheckMap(elements,
+              v0,
+              Heap::kFixedArrayMapRootIndex,
+              &call_builtin,
+              DONT_DO_SMI_CHECK);
+
+  // Get the array's length into t0 and calculate new length.
+  __ lw(t0, FieldMemOperand(receiver, JSArray::kLengthOffset));
+  __ Subu(t0, t0, Operand(Smi::FromInt(1)));
+  __ Branch(&return_undefined, lt, t0, Operand(zero_reg));
+
+  // Get the last element.
+  __ LoadRoot(t2, Heap::kTheHoleValueRootIndex);
+  STATIC_ASSERT(kSmiTagSize == 1);
+  STATIC_ASSERT(kSmiTag == 0);
+  // We can't address the last element in one operation. Compute the more
+  // expensive shift first, and use an offset later on.
+  __ sll(t1, t0, kPointerSizeLog2 - kSmiTagSize);
+  __ Addu(elements, elements, t1);
+  __ lw(v0, MemOperand(elements, FixedArray::kHeaderSize - kHeapObjectTag));
+  __ Branch(&call_builtin, eq, v0, Operand(t2));
+
+  // Set the array's length.
+  __ sw(t0, FieldMemOperand(receiver, JSArray::kLengthOffset));
+
+  // Fill with the hole.
+  __ sw(t2, MemOperand(elements, FixedArray::kHeaderSize - kHeapObjectTag));
+  __ Drop(argc + 1);
+  __ Ret();
+
+  __ bind(&return_undefined);
+  __ LoadRoot(v0, Heap::kUndefinedValueRootIndex);
+  __ Drop(argc + 1);
+  __ Ret();
+
+  __ bind(&call_builtin);
+  __ TailCallExternalReference(ExternalReference(Builtins::c_ArrayPop,
+                                                 masm()->isolate()),
+                               argc + 1,
+                               1);
+
+  // Handle call cache miss.
+  __ bind(&miss);
+  MaybeObject* maybe_result = GenerateMissBranch();
+  if (maybe_result->IsFailure()) return maybe_result;
+
+  // Return the generated code.
+  return GetCode(function);
 }
 
 
@@ -322,8 +1772,84 @@
     JSGlobalPropertyCell* cell,
     JSFunction* function,
     String* name) {
-  UNIMPLEMENTED_MIPS();
-  return NULL;
+  // ----------- S t a t e -------------
+  //  -- a2                     : function name
+  //  -- ra                     : return address
+  //  -- sp[(argc - n - 1) * 4] : arg[n] (zero-based)
+  //  -- ...
+  //  -- sp[argc * 4]           : receiver
+  // -----------------------------------
+
+  // If object is not a string, bail out to regular call.
+  if (!object->IsString() || cell != NULL) return heap()->undefined_value();
+
+  const int argc = arguments().immediate();
+
+  Label miss;
+  Label name_miss;
+  Label index_out_of_range;
+
+  Label* index_out_of_range_label = &index_out_of_range;
+
+  if (kind_ == Code::CALL_IC &&
+      (CallICBase::StringStubState::decode(extra_ic_state_) ==
+       DEFAULT_STRING_STUB)) {
+    index_out_of_range_label = &miss;
+  }
+
+  GenerateNameCheck(name, &name_miss);
+
+  // Check that the maps starting from the prototype haven't changed.
+  GenerateDirectLoadGlobalFunctionPrototype(masm(),
+                                            Context::STRING_FUNCTION_INDEX,
+                                            v0,
+                                            &miss);
+  ASSERT(object != holder);
+  CheckPrototypes(JSObject::cast(object->GetPrototype()), v0, holder,
+                  a1, a3, t0, name, &miss);
+
+  Register receiver = a1;
+  Register index = t1;
+  Register scratch = a3;
+  Register result = v0;
+  __ lw(receiver, MemOperand(sp, argc * kPointerSize));
+  if (argc > 0) {
+    __ lw(index, MemOperand(sp, (argc - 1) * kPointerSize));
+  } else {
+    __ LoadRoot(index, Heap::kUndefinedValueRootIndex);
+  }
+
+  StringCharCodeAtGenerator char_code_at_generator(receiver,
+                                                   index,
+                                                   scratch,
+                                                   result,
+                                                   &miss,  // When not a string.
+                                                   &miss,  // When not a number.
+                                                   index_out_of_range_label,
+                                                   STRING_INDEX_IS_NUMBER);
+  char_code_at_generator.GenerateFast(masm());
+  __ Drop(argc + 1);
+  __ Ret();
+
+  StubRuntimeCallHelper call_helper;
+  char_code_at_generator.GenerateSlow(masm(), call_helper);
+
+  if (index_out_of_range.is_linked()) {
+    __ bind(&index_out_of_range);
+    __ LoadRoot(v0, Heap::kNanValueRootIndex);
+    __ Drop(argc + 1);
+    __ Ret();
+  }
+
+  __ bind(&miss);
+  // Restore function name in a2.
+  __ li(a2, Handle<String>(name));
+  __ bind(&name_miss);
+  MaybeObject* maybe_result = GenerateMissBranch();
+  if (maybe_result->IsFailure()) return maybe_result;
+
+  // Return the generated code.
+  return GetCode(function);
 }
 
 
@@ -333,8 +1859,85 @@
     JSGlobalPropertyCell* cell,
     JSFunction* function,
     String* name) {
-  UNIMPLEMENTED_MIPS();
-  return NULL;
+  // ----------- S t a t e -------------
+  //  -- a2                     : function name
+  //  -- ra                     : return address
+  //  -- sp[(argc - n - 1) * 4] : arg[n] (zero-based)
+  //  -- ...
+  //  -- sp[argc * 4]           : receiver
+  // -----------------------------------
+
+  // If object is not a string, bail out to regular call.
+  if (!object->IsString() || cell != NULL) return heap()->undefined_value();
+
+  const int argc = arguments().immediate();
+
+  Label miss;
+  Label name_miss;
+  Label index_out_of_range;
+  Label* index_out_of_range_label = &index_out_of_range;
+
+  if (kind_ == Code::CALL_IC &&
+      (CallICBase::StringStubState::decode(extra_ic_state_) ==
+       DEFAULT_STRING_STUB)) {
+    index_out_of_range_label = &miss;
+  }
+
+  GenerateNameCheck(name, &name_miss);
+
+  // Check that the maps starting from the prototype haven't changed.
+  GenerateDirectLoadGlobalFunctionPrototype(masm(),
+                                            Context::STRING_FUNCTION_INDEX,
+                                            v0,
+                                            &miss);
+  ASSERT(object != holder);
+  CheckPrototypes(JSObject::cast(object->GetPrototype()), v0, holder,
+                  a1, a3, t0, name, &miss);
+
+  Register receiver = v0;
+  Register index = t1;
+  Register scratch1 = a1;
+  Register scratch2 = a3;
+  Register result = v0;
+  __ lw(receiver, MemOperand(sp, argc * kPointerSize));
+  if (argc > 0) {
+    __ lw(index, MemOperand(sp, (argc - 1) * kPointerSize));
+  } else {
+    __ LoadRoot(index, Heap::kUndefinedValueRootIndex);
+  }
+
+  StringCharAtGenerator char_at_generator(receiver,
+                                          index,
+                                          scratch1,
+                                          scratch2,
+                                          result,
+                                          &miss,  // When not a string.
+                                          &miss,  // When not a number.
+                                          index_out_of_range_label,
+                                          STRING_INDEX_IS_NUMBER);
+  char_at_generator.GenerateFast(masm());
+  __ Drop(argc + 1);
+  __ Ret();
+
+  StubRuntimeCallHelper call_helper;
+  char_at_generator.GenerateSlow(masm(), call_helper);
+
+  if (index_out_of_range.is_linked()) {
+    __ bind(&index_out_of_range);
+    __ LoadRoot(v0, Heap::kEmptyStringRootIndex);
+    __ Drop(argc + 1);
+    __ Ret();
+  }
+
+  __ bind(&miss);
+  // Restore function name in a2.
+  __ li(a2, Handle<String>(name));
+  __ bind(&name_miss);
+  MaybeObject* maybe_result = GenerateMissBranch();
+  if (maybe_result->IsFailure()) return maybe_result;
+
+  // Return the generated code.
+  return GetCode(function);
 }
 
 
@@ -344,8 +1947,69 @@
     JSGlobalPropertyCell* cell,
     JSFunction* function,
     String* name) {
-  UNIMPLEMENTED_MIPS();
-  return NULL;
+  // ----------- S t a t e -------------
+  //  -- a2                     : function name
+  //  -- ra                     : return address
+  //  -- sp[(argc - n - 1) * 4] : arg[n] (zero-based)
+  //  -- ...
+  //  -- sp[argc * 4]           : receiver
+  // -----------------------------------
+
+  const int argc = arguments().immediate();
+
+  // If the object is not a JSObject or we got an unexpected number of
+  // arguments, bail out to the regular call.
+  if (!object->IsJSObject() || argc != 1) return heap()->undefined_value();
+
+  Label miss;
+  GenerateNameCheck(name, &miss);
+
+  if (cell == NULL) {
+    __ lw(a1, MemOperand(sp, 1 * kPointerSize));
+
+    STATIC_ASSERT(kSmiTag == 0);
+    __ JumpIfSmi(a1, &miss);
+
+    CheckPrototypes(JSObject::cast(object), a1, holder, v0, a3, t0, name,
+                    &miss);
+  } else {
+    ASSERT(cell->value() == function);
+    GenerateGlobalReceiverCheck(JSObject::cast(object), holder, name, &miss);
+    GenerateLoadFunctionFromCell(cell, function, &miss);
+  }
+
+  // Load the char code argument.
+  Register code = a1;
+  __ lw(code, MemOperand(sp, 0 * kPointerSize));
+
+  // Check the code is a smi.
+  Label slow;
+  STATIC_ASSERT(kSmiTag == 0);
+  __ JumpIfNotSmi(code, &slow);
+
+  // Convert the smi code to uint16.
+  __ And(code, code, Operand(Smi::FromInt(0xffff)));
+
+  StringCharFromCodeGenerator char_from_code_generator(code, v0);
+  char_from_code_generator.GenerateFast(masm());
+  __ Drop(argc + 1);
+  __ Ret();
+
+  StubRuntimeCallHelper call_helper;
+  char_from_code_generator.GenerateSlow(masm(), call_helper);
+
+  // Tail call the full function. We do not have to patch the receiver
+  // because the function makes no use of it.
+  __ bind(&slow);
+  __ InvokeFunction(function, arguments(), JUMP_FUNCTION);
+
+  __ bind(&miss);
+  // a2: function name.
+  MaybeObject* maybe_result = GenerateMissBranch();
+  if (maybe_result->IsFailure()) return maybe_result;
+
+  // Return the generated code.
+  return (cell == NULL) ? GetCode(function) : GetCode(NORMAL, name);
 }
 
 
@@ -354,8 +2018,134 @@
                                                     JSGlobalPropertyCell* cell,
                                                     JSFunction* function,
                                                     String* name) {
-  UNIMPLEMENTED_MIPS();
-  return NULL;
+  // ----------- S t a t e -------------
+  //  -- a2                     : function name
+  //  -- ra                     : return address
+  //  -- sp[(argc - n - 1) * 4] : arg[n] (zero-based)
+  //  -- ...
+  //  -- sp[argc * 4]           : receiver
+  // -----------------------------------
+
+  if (!CpuFeatures::IsSupported(FPU))
+    return heap()->undefined_value();
+  CpuFeatures::Scope scope_fpu(FPU);
+
+  const int argc = arguments().immediate();
+
+  // If the object is not a JSObject or we got an unexpected number of
+  // arguments, bail out to the regular call.
+  if (!object->IsJSObject() || argc != 1) return heap()->undefined_value();
+
+  Label miss, slow;
+  GenerateNameCheck(name, &miss);
+
+  if (cell == NULL) {
+    __ lw(a1, MemOperand(sp, 1 * kPointerSize));
+
+    STATIC_ASSERT(kSmiTag == 0);
+    __ JumpIfSmi(a1, &miss);
+
+    CheckPrototypes(JSObject::cast(object), a1, holder, a0, a3, t0, name,
+                    &miss);
+  } else {
+    ASSERT(cell->value() == function);
+    GenerateGlobalReceiverCheck(JSObject::cast(object), holder, name, &miss);
+    GenerateLoadFunctionFromCell(cell, function, &miss);
+  }
+
+  // Load the (only) argument into v0.
+  __ lw(v0, MemOperand(sp, 0 * kPointerSize));
+
+  // If the argument is a smi, just return.
+  STATIC_ASSERT(kSmiTag == 0);
+  __ And(t0, v0, Operand(kSmiTagMask));
+  __ Drop(argc + 1, eq, t0, Operand(zero_reg));
+  __ Ret(eq, t0, Operand(zero_reg));
+
+  __ CheckMap(v0, a1, Heap::kHeapNumberMapRootIndex, &slow, DONT_DO_SMI_CHECK);
+
+  Label wont_fit_smi, no_fpu_error, restore_fcsr_and_return;
+
+  // If fpu is enabled, we use the floor instruction.
+
+  // Load the HeapNumber value.
+  __ ldc1(f0, FieldMemOperand(v0, HeapNumber::kValueOffset));
+
+  // Backup FCSR.
+  __ cfc1(a3, FCSR);
+  // Clearing FCSR clears the exception mask with no side-effects.
+  __ ctc1(zero_reg, FCSR);
+  // Convert the argument to an integer.
+  __ floor_w_d(f0, f0);
+
+  // Start checking for special cases.
+  // Get the argument exponent and clear the sign bit.
+  __ lw(t1, FieldMemOperand(v0, HeapNumber::kValueOffset + kPointerSize));
+  __ And(t2, t1, Operand(~HeapNumber::kSignMask));
+  __ srl(t2, t2, HeapNumber::kMantissaBitsInTopWord);
+
+  // Retrieve FCSR and check for fpu errors.
+  __ cfc1(t5, FCSR);
+  __ srl(t5, t5, kFCSRFlagShift);
+  // Flag 1 marks an inaccurate but still good result so we ignore it.
+  __ And(t5, t5, Operand(kFCSRFlagMask ^ 1));
+  __ Branch(&no_fpu_error, eq, t5, Operand(zero_reg));
+
+  // Check for NaN, Infinity, and -Infinity.
+  // They are invariant through a Math.Floor call, so just
+  // return the original argument.
+  __ Subu(t3, t2, Operand(HeapNumber::kExponentMask
+        >> HeapNumber::kMantissaBitsInTopWord));
+  __ Branch(&restore_fcsr_and_return, eq, t3, Operand(zero_reg));
+  // We had an overflow or underflow in the conversion. Check if we
+  // have a big exponent.
+  // If greater or equal, the argument is already round and in v0.
+  __ Branch(&restore_fcsr_and_return, ge, t3,
+      Operand(HeapNumber::kMantissaBits));
+  __ Branch(&wont_fit_smi);
+
+  __ bind(&no_fpu_error);
+  // Move the result back to v0.
+  __ mfc1(v0, f0);
+  // Check if the result fits into a smi.
+  __ Addu(a1, v0, Operand(0x40000000));
+  __ Branch(&wont_fit_smi, lt, a1, Operand(zero_reg));
+  // Tag the result.
+  STATIC_ASSERT(kSmiTag == 0);
+  __ sll(v0, v0, kSmiTagSize);
+
+  // Check for -0.
+  __ Branch(&restore_fcsr_and_return, ne, v0, Operand(zero_reg));
+  // t1 already holds the HeapNumber exponent.
+  __ And(t0, t1, Operand(HeapNumber::kSignMask));
+  // If our HeapNumber is negative it was -0, so load its address and return.
+  // Else v0 is loaded with 0, so we can also just return.
+  __ Branch(&restore_fcsr_and_return, eq, t0, Operand(zero_reg));
+  __ lw(v0, MemOperand(sp, 0 * kPointerSize));
+
+  __ bind(&restore_fcsr_and_return);
+  // Restore FCSR and return.
+  __ ctc1(a3, FCSR);
+
+  __ Drop(argc + 1);
+  __ Ret();
+
+  __ bind(&wont_fit_smi);
+  // Restore FCSR and fall to slow case.
+  __ ctc1(a3, FCSR);
+
+  __ bind(&slow);
+  // Tail call the full function. We do not have to patch the receiver
+  // because the function makes no use of it.
+  __ InvokeFunction(function, arguments(), JUMP_FUNCTION);
+
+  __ bind(&miss);
+  // a2: function name.
+  MaybeObject* obj = GenerateMissBranch();
+  if (obj->IsFailure()) return obj;
+
+  // Return the generated code.
+  return (cell == NULL) ? GetCode(function) : GetCode(NORMAL, name);
 }
 
 
@@ -364,8 +2154,100 @@
                                                   JSGlobalPropertyCell* cell,
                                                   JSFunction* function,
                                                   String* name) {
-  UNIMPLEMENTED_MIPS();
-  return NULL;
+  // ----------- S t a t e -------------
+  //  -- a2                     : function name
+  //  -- ra                     : return address
+  //  -- sp[(argc - n - 1) * 4] : arg[n] (zero-based)
+  //  -- ...
+  //  -- sp[argc * 4]           : receiver
+  // -----------------------------------
+
+  const int argc = arguments().immediate();
+
+  // If the object is not a JSObject or we got an unexpected number of
+  // arguments, bail out to the regular call.
+  if (!object->IsJSObject() || argc != 1) return heap()->undefined_value();
+
+  Label miss;
+  GenerateNameCheck(name, &miss);
+
+  if (cell == NULL) {
+    __ lw(a1, MemOperand(sp, 1 * kPointerSize));
+
+    STATIC_ASSERT(kSmiTag == 0);
+    __ JumpIfSmi(a1, &miss);
+
+    CheckPrototypes(JSObject::cast(object), a1, holder, v0, a3, t0, name,
+                    &miss);
+  } else {
+    ASSERT(cell->value() == function);
+    GenerateGlobalReceiverCheck(JSObject::cast(object), holder, name, &miss);
+    GenerateLoadFunctionFromCell(cell, function, &miss);
+  }
+
+  // Load the (only) argument into v0.
+  __ lw(v0, MemOperand(sp, 0 * kPointerSize));
+
+  // Check if the argument is a smi.
+  Label not_smi;
+  STATIC_ASSERT(kSmiTag == 0);
+  __ JumpIfNotSmi(v0, &not_smi);
+
+  // Do bitwise not or do nothing depending on the sign of the
+  // argument.
+  __ sra(t0, v0, kBitsPerInt - 1);
+  __ Xor(a1, v0, t0);
+
+  // Add 1 or do nothing depending on the sign of the argument.
+  __ Subu(v0, a1, t0);
+
+  // If the result is still negative, go to the slow case.
+  // This only happens for the most negative smi.
+  Label slow;
+  __ Branch(&slow, lt, v0, Operand(zero_reg));
+
+  // Smi case done.
+  __ Drop(argc + 1);
+  __ Ret();
+
+  // Check if the argument is a heap number and load its exponent and
+  // sign.
+  __ bind(&not_smi);
+  __ CheckMap(v0, a1, Heap::kHeapNumberMapRootIndex, &slow, DONT_DO_SMI_CHECK);
+  __ lw(a1, FieldMemOperand(v0, HeapNumber::kExponentOffset));
+
+  // Check the sign of the argument. If the argument is positive,
+  // just return it.
+  Label negative_sign;
+  __ And(t0, a1, Operand(HeapNumber::kSignMask));
+  __ Branch(&negative_sign, ne, t0, Operand(zero_reg));
+  __ Drop(argc + 1);
+  __ Ret();
+
+  // If the argument is negative, clear the sign, and return a new
+  // number.
+  __ bind(&negative_sign);
+  __ Xor(a1, a1, Operand(HeapNumber::kSignMask));
+  __ lw(a3, FieldMemOperand(v0, HeapNumber::kMantissaOffset));
+  __ LoadRoot(t2, Heap::kHeapNumberMapRootIndex);
+  __ AllocateHeapNumber(v0, t0, t1, t2, &slow);
+  __ sw(a1, FieldMemOperand(v0, HeapNumber::kExponentOffset));
+  __ sw(a3, FieldMemOperand(v0, HeapNumber::kMantissaOffset));
+  __ Drop(argc + 1);
+  __ Ret();
+
+  // Tail call the full function. We do not have to patch the receiver
+  // because the function makes no use of it.
+  __ bind(&slow);
+  __ InvokeFunction(function, arguments(), JUMP_FUNCTION);
+
+  __ bind(&miss);
+  // a2: function name.
+  MaybeObject* maybe_result = GenerateMissBranch();
+  if (maybe_result->IsFailure()) return maybe_result;
+
+  // Return the generated code.
+  return (cell == NULL) ? GetCode(function) : GetCode(NORMAL, name);
 }
 
 
@@ -376,8 +2258,51 @@
     JSGlobalPropertyCell* cell,
     JSFunction* function,
     String* name) {
-  UNIMPLEMENTED_MIPS();
-  return NULL;
+
+  Counters* counters = isolate()->counters();
+
+  ASSERT(optimization.is_simple_api_call());
+  // Bail out if object is a global object as we don't want to
+  // repatch it to global receiver.
+  if (object->IsGlobalObject()) return heap()->undefined_value();
+  if (cell != NULL) return heap()->undefined_value();
+  if (!object->IsJSObject()) return heap()->undefined_value();
+  int depth = optimization.GetPrototypeDepthOfExpectedType(
+            JSObject::cast(object), holder);
+  if (depth == kInvalidProtoDepth) return heap()->undefined_value();
+
+  Label miss, miss_before_stack_reserved;
+
+  GenerateNameCheck(name, &miss_before_stack_reserved);
+
+  // Get the receiver from the stack.
+  const int argc = arguments().immediate();
+  __ lw(a1, MemOperand(sp, argc * kPointerSize));
+
+  // Check that the receiver isn't a smi.
+  __ JumpIfSmi(a1, &miss_before_stack_reserved);
+
+  __ IncrementCounter(counters->call_const(), 1, a0, a3);
+  __ IncrementCounter(counters->call_const_fast_api(), 1, a0, a3);
+
+  ReserveSpaceForFastApiCall(masm(), a0);
+
+  // Check that the maps haven't changed and find a Holder as a side effect.
+  CheckPrototypes(JSObject::cast(object), a1, holder, a0, a3, t0, name,
+                  depth, &miss);
+
+  MaybeObject* result = GenerateFastApiDirectCall(masm(), optimization, argc);
+  if (result->IsFailure()) return result;
+
+  __ bind(&miss);
+  FreeSpaceForFastApiCall(masm());
+
+  __ bind(&miss_before_stack_reserved);
+  MaybeObject* maybe_result = GenerateMissBranch();
+  if (maybe_result->IsFailure()) return maybe_result;
+
+  // Return the generated code.
+  return GetCode(function);
 }
 
 
@@ -386,26 +2311,251 @@
                                                    JSFunction* function,
                                                    String* name,
                                                    CheckType check) {
-  UNIMPLEMENTED_MIPS();
-  return NULL;
+  // ----------- S t a t e -------------
+  //  -- a2    : name
+  //  -- ra    : return address
+  // -----------------------------------
+  if (HasCustomCallGenerator(function)) {
+    MaybeObject* maybe_result = CompileCustomCall(
+        object, holder, NULL, function, name);
+    Object* result;
+    if (!maybe_result->ToObject(&result)) return maybe_result;
+    // Undefined means bail out to regular compiler.
+    if (!result->IsUndefined()) return result;
+  }
+
+  Label miss;
+
+  GenerateNameCheck(name, &miss);
+
+  // Get the receiver from the stack.
+  const int argc = arguments().immediate();
+  __ lw(a1, MemOperand(sp, argc * kPointerSize));
+
+  // Check that the receiver isn't a smi.
+  if (check != NUMBER_CHECK) {
+    __ And(t1, a1, Operand(kSmiTagMask));
+    __ Branch(&miss, eq, t1, Operand(zero_reg));
+  }
+
+  // Make sure that it's okay not to patch the on stack receiver
+  // unless we're doing a receiver map check.
+  ASSERT(!object->IsGlobalObject() || check == RECEIVER_MAP_CHECK);
+
+  SharedFunctionInfo* function_info = function->shared();
+  switch (check) {
+    case RECEIVER_MAP_CHECK:
+      __ IncrementCounter(masm()->isolate()->counters()->call_const(),
+          1, a0, a3);
+
+      // Check that the maps haven't changed.
+      CheckPrototypes(JSObject::cast(object), a1, holder, a0, a3, t0, name,
+                      &miss);
+
+      // Patch the receiver on the stack with the global proxy if
+      // necessary.
+      if (object->IsGlobalObject()) {
+        __ lw(a3, FieldMemOperand(a1, GlobalObject::kGlobalReceiverOffset));
+        __ sw(a3, MemOperand(sp, argc * kPointerSize));
+      }
+      break;
+
+    case STRING_CHECK:
+      if (!function->IsBuiltin() && !function_info->strict_mode()) {
+        // Calling non-strict non-builtins with a value as the receiver
+        // requires boxing.
+        __ jmp(&miss);
+      } else {
+        // Check that the object is a two-byte string or a symbol.
+        __ GetObjectType(a1, a3, a3);
+        __ Branch(&miss, Ugreater_equal, a3, Operand(FIRST_NONSTRING_TYPE));
+        // Check that the maps starting from the prototype haven't changed.
+        GenerateDirectLoadGlobalFunctionPrototype(
+            masm(), Context::STRING_FUNCTION_INDEX, a0, &miss);
+        CheckPrototypes(JSObject::cast(object->GetPrototype()), a0, holder, a3,
+                        a1, t0, name, &miss);
+      }
+      break;
+
+    case NUMBER_CHECK: {
+      if (!function->IsBuiltin() && !function_info->strict_mode()) {
+        // Calling non-strict non-builtins with a value as the receiver
+        // requires boxing.
+        __ jmp(&miss);
+      } else {
+      Label fast;
+        // Check that the object is a smi or a heap number.
+        __ And(t1, a1, Operand(kSmiTagMask));
+        __ Branch(&fast, eq, t1, Operand(zero_reg));
+        __ GetObjectType(a1, a0, a0);
+        __ Branch(&miss, ne, a0, Operand(HEAP_NUMBER_TYPE));
+        __ bind(&fast);
+        // Check that the maps starting from the prototype haven't changed.
+        GenerateDirectLoadGlobalFunctionPrototype(
+            masm(), Context::NUMBER_FUNCTION_INDEX, a0, &miss);
+        CheckPrototypes(JSObject::cast(object->GetPrototype()), a0, holder, a3,
+                        a1, t0, name, &miss);
+      }
+      break;
+    }
+
+    case BOOLEAN_CHECK: {
+      if (!function->IsBuiltin() && !function_info->strict_mode()) {
+        // Calling non-strict non-builtins with a value as the receiver
+        // requires boxing.
+        __ jmp(&miss);
+      } else {
+        Label fast;
+        // Check that the object is a boolean.
+        __ LoadRoot(t0, Heap::kTrueValueRootIndex);
+        __ Branch(&fast, eq, a1, Operand(t0));
+        __ LoadRoot(t0, Heap::kFalseValueRootIndex);
+        __ Branch(&miss, ne, a1, Operand(t0));
+        __ bind(&fast);
+        // Check that the maps starting from the prototype haven't changed.
+        GenerateDirectLoadGlobalFunctionPrototype(
+            masm(), Context::BOOLEAN_FUNCTION_INDEX, a0, &miss);
+        CheckPrototypes(JSObject::cast(object->GetPrototype()), a0, holder, a3,
+                        a1, t0, name, &miss);
+      }
+      break;
+    }
+
+    default:
+      UNREACHABLE();
+  }
+
+  __ InvokeFunction(function, arguments(), JUMP_FUNCTION);
+
+  // Handle call cache miss.
+  __ bind(&miss);
+
+  MaybeObject* maybe_result = GenerateMissBranch();
+  if (maybe_result->IsFailure()) return maybe_result;
+
+  // Return the generated code.
+  return GetCode(function);
 }
 
 
 MaybeObject* CallStubCompiler::CompileCallInterceptor(JSObject* object,
                                                       JSObject* holder,
                                                       String* name) {
-  UNIMPLEMENTED_MIPS();
-  return NULL;
+  // ----------- S t a t e -------------
+  //  -- a2    : name
+  //  -- ra    : return address
+  // -----------------------------------
+
+  Label miss;
+
+  GenerateNameCheck(name, &miss);
+
+  // Get the number of arguments.
+  const int argc = arguments().immediate();
+
+  LookupResult lookup;
+  LookupPostInterceptor(holder, name, &lookup);
+
+  // Get the receiver from the stack.
+  __ lw(a1, MemOperand(sp, argc * kPointerSize));
+
+  CallInterceptorCompiler compiler(this, arguments(), a2);
+  MaybeObject* result = compiler.Compile(masm(),
+                                         object,
+                                         holder,
+                                         name,
+                                         &lookup,
+                                         a1,
+                                         a3,
+                                         t0,
+                                         a0,
+                                         &miss);
+  if (result->IsFailure()) {
+    return result;
+  }
+
+  // Move returned value, the function to call, to a1.
+  __ mov(a1, v0);
+  // Restore receiver.
+  __ lw(a0, MemOperand(sp, argc * kPointerSize));
+
+  GenerateCallFunction(masm(), object, arguments(), &miss);
+
+  // Handle call cache miss.
+  __ bind(&miss);
+  MaybeObject* maybe_result = GenerateMissBranch();
+  if (maybe_result->IsFailure()) return maybe_result;
+
+  // Return the generated code.
+  return GetCode(INTERCEPTOR, name);
 }
 
 
-MaybeObject* CallStubCompiler::CompileCallGlobal(JSObject* object,
-                                                 GlobalObject* holder,
-                                                 JSGlobalPropertyCell* cell,
-                                                 JSFunction* function,
-                                                 String* name) {
-  UNIMPLEMENTED_MIPS();
-  return NULL;
+MaybeObject* CallStubCompiler::CompileCallGlobal(
+    JSObject* object,
+    GlobalObject* holder,
+    JSGlobalPropertyCell* cell,
+    JSFunction* function,
+    String* name,
+    Code::ExtraICState extra_ic_state) {
+  // ----------- S t a t e -------------
+  //  -- a2    : name
+  //  -- ra    : return address
+  // -----------------------------------
+
+  if (HasCustomCallGenerator(function)) {
+    MaybeObject* maybe_result = CompileCustomCall(
+        object, holder, cell, function, name);
+    Object* result;
+    if (!maybe_result->ToObject(&result)) return maybe_result;
+    // Undefined means bail out to regular compiler.
+    if (!result->IsUndefined()) return result;
+  }
+
+  Label miss;
+
+  GenerateNameCheck(name, &miss);
+
+  // Get the number of arguments.
+  const int argc = arguments().immediate();
+
+  GenerateGlobalReceiverCheck(object, holder, name, &miss);
+  GenerateLoadFunctionFromCell(cell, function, &miss);
+
+  // Patch the receiver on the stack with the global proxy if
+  // necessary.
+  if (object->IsGlobalObject()) {
+    __ lw(a3, FieldMemOperand(a0, GlobalObject::kGlobalReceiverOffset));
+    __ sw(a3, MemOperand(sp, argc * kPointerSize));
+  }
+
+  // Setup the context (function already in r1).
+  __ lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
+
+  // Jump to the cached code (tail call).
+  Counters* counters = masm()->isolate()->counters();
+  __ IncrementCounter(counters->call_global_inline(), 1, a3, t0);
+  ASSERT(function->is_compiled());
+  Handle<Code> code(function->code());
+  ParameterCount expected(function->shared()->formal_parameter_count());
+  CallKind call_kind = CallICBase::Contextual::decode(extra_ic_state)
+      ? CALL_AS_FUNCTION
+      : CALL_AS_METHOD;
+  if (V8::UseCrankshaft()) {
+    UNIMPLEMENTED_MIPS();
+  } else {
+    __ InvokeCode(code, expected, arguments(), RelocInfo::CODE_TARGET,
+                  JUMP_FUNCTION, call_kind);
+  }
+
+  // Handle call cache miss.
+  __ bind(&miss);
+  __ IncrementCounter(counters->call_global_inline_miss(), 1, a1, a3);
+  MaybeObject* maybe_result = GenerateMissBranch();
+  if (maybe_result->IsFailure()) return maybe_result;
+
+  // Return the generated code.
+  return GetCode(NORMAL, name);
 }
 
 
@@ -413,39 +2563,205 @@
                                                   int index,
                                                   Map* transition,
                                                   String* name) {
-  UNIMPLEMENTED_MIPS();
-  return NULL;
+  // ----------- S t a t e -------------
+  //  -- a0    : value
+  //  -- a1    : receiver
+  //  -- a2    : name
+  //  -- ra    : return address
+  // -----------------------------------
+  Label miss;
+
+  // Name register might be clobbered.
+  GenerateStoreField(masm(),
+                     object,
+                     index,
+                     transition,
+                     a1, a2, a3,
+                     &miss);
+  __ bind(&miss);
+  __ li(a2, Operand(Handle<String>(name)));  // Restore name.
+  Handle<Code> ic = masm()->isolate()->builtins()->Builtins::StoreIC_Miss();
+  __ Jump(ic, RelocInfo::CODE_TARGET);
+
+  // Return the generated code.
+  return GetCode(transition == NULL ? FIELD : MAP_TRANSITION, name);
 }
 
 
 MaybeObject* StoreStubCompiler::CompileStoreCallback(JSObject* object,
                                                      AccessorInfo* callback,
                                                      String* name) {
-  UNIMPLEMENTED_MIPS();
-  return NULL;
+  // ----------- S t a t e -------------
+  //  -- a0    : value
+  //  -- a1    : receiver
+  //  -- a2    : name
+  //  -- ra    : return address
+  // -----------------------------------
+  Label miss;
+
+  // Check that the object isn't a smi.
+  __ JumpIfSmi(a1, &miss);
+
+  // Check that the map of the object hasn't changed.
+  __ lw(a3, FieldMemOperand(a1, HeapObject::kMapOffset));
+  __ Branch(&miss, ne, a3, Operand(Handle<Map>(object->map())));
+
+  // Perform global security token check if needed.
+  if (object->IsJSGlobalProxy()) {
+    __ CheckAccessGlobalProxy(a1, a3, &miss);
+  }
+
+  // Stub never generated for non-global objects that require access
+  // checks.
+  ASSERT(object->IsJSGlobalProxy() || !object->IsAccessCheckNeeded());
+
+  __ push(a1);  // Receiver.
+  __ li(a3, Operand(Handle<AccessorInfo>(callback)));  // Callback info.
+  __ Push(a3, a2, a0);
+
+  // Do tail-call to the runtime system.
+  ExternalReference store_callback_property =
+      ExternalReference(IC_Utility(IC::kStoreCallbackProperty),
+          masm()->isolate());
+  __ TailCallExternalReference(store_callback_property, 4, 1);
+
+  // Handle store cache miss.
+  __ bind(&miss);
+  Handle<Code> ic = masm()->isolate()->builtins()->StoreIC_Miss();
+  __ Jump(ic, RelocInfo::CODE_TARGET);
+
+  // Return the generated code.
+  return GetCode(CALLBACKS, name);
 }
 
 
 MaybeObject* StoreStubCompiler::CompileStoreInterceptor(JSObject* receiver,
                                                         String* name) {
-  UNIMPLEMENTED_MIPS();
-  return NULL;
+  // ----------- S t a t e -------------
+  //  -- a0    : value
+  //  -- a1    : receiver
+  //  -- a2    : name
+  //  -- ra    : return address
+  // -----------------------------------
+  Label miss;
+
+  // Check that the object isn't a smi.
+  __ JumpIfSmi(a1, &miss);
+
+  // Check that the map of the object hasn't changed.
+  __ lw(a3, FieldMemOperand(a1, HeapObject::kMapOffset));
+  __ Branch(&miss, ne, a3, Operand(Handle<Map>(receiver->map())));
+
+  // Perform global security token check if needed.
+  if (receiver->IsJSGlobalProxy()) {
+    __ CheckAccessGlobalProxy(a1, a3, &miss);
+  }
+
+  // Stub is never generated for non-global objects that require access
+  // checks.
+  ASSERT(receiver->IsJSGlobalProxy() || !receiver->IsAccessCheckNeeded());
+
+  __ Push(a1, a2, a0);  // Receiver, name, value.
+
+  __ li(a0, Operand(Smi::FromInt(strict_mode_)));
+  __ push(a0);  // Strict mode.
+
+  // Do tail-call to the runtime system.
+  ExternalReference store_ic_property =
+      ExternalReference(IC_Utility(IC::kStoreInterceptorProperty),
+          masm()->isolate());
+  __ TailCallExternalReference(store_ic_property, 4, 1);
+
+  // Handle store cache miss.
+  __ bind(&miss);
+  Handle<Code> ic = masm()->isolate()->builtins()->Builtins::StoreIC_Miss();
+  __ Jump(ic, RelocInfo::CODE_TARGET);
+
+  // Return the generated code.
+  return GetCode(INTERCEPTOR, name);
 }
 
 
 MaybeObject* StoreStubCompiler::CompileStoreGlobal(GlobalObject* object,
                                                    JSGlobalPropertyCell* cell,
                                                    String* name) {
-  UNIMPLEMENTED_MIPS();
-  return NULL;
+  // ----------- S t a t e -------------
+  //  -- a0    : value
+  //  -- a1    : receiver
+  //  -- a2    : name
+  //  -- ra    : return address
+  // -----------------------------------
+  Label miss;
+
+  // Check that the map of the global has not changed.
+  __ lw(a3, FieldMemOperand(a1, HeapObject::kMapOffset));
+  __ Branch(&miss, ne, a3, Operand(Handle<Map>(object->map())));
+
+  // Check that the value in the cell is not the hole. If it is, this
+  // cell could have been deleted and reintroducing the global needs
+  // to update the property details in the property dictionary of the
+  // global object. We bail out to the runtime system to do that.
+  __ li(t0, Operand(Handle<JSGlobalPropertyCell>(cell)));
+  __ LoadRoot(t1, Heap::kTheHoleValueRootIndex);
+  __ lw(t2, FieldMemOperand(t0, JSGlobalPropertyCell::kValueOffset));
+  __ Branch(&miss, eq, t1, Operand(t2));
+
+  // Store the value in the cell.
+  __ sw(a0, FieldMemOperand(t0, JSGlobalPropertyCell::kValueOffset));
+  __ mov(v0, a0);  // Stored value must be returned in v0.
+  Counters* counters = masm()->isolate()->counters();
+  __ IncrementCounter(counters->named_store_global_inline(), 1, a1, a3);
+  __ Ret();
+
+  // Handle store cache miss.
+  __ bind(&miss);
+  __ IncrementCounter(counters->named_store_global_inline_miss(), 1, a1, a3);
+  Handle<Code> ic = masm()->isolate()->builtins()->StoreIC_Miss();
+  __ Jump(ic, RelocInfo::CODE_TARGET);
+
+  // Return the generated code.
+  return GetCode(NORMAL, name);
 }
 
 
 MaybeObject* LoadStubCompiler::CompileLoadNonexistent(String* name,
                                                       JSObject* object,
                                                       JSObject* last) {
-  UNIMPLEMENTED_MIPS();
-  return NULL;
+  // ----------- S t a t e -------------
+  //  -- a0    : receiver
+  //  -- ra    : return address
+  // -----------------------------------
+  Label miss;
+
+  // Check that the receiver is not a smi.
+  __ JumpIfSmi(a0, &miss);
+
+  // Check the maps of the full prototype chain.
+  CheckPrototypes(object, a0, last, a3, a1, t0, name, &miss);
+
+  // If the last object in the prototype chain is a global object,
+  // check that the global property cell is empty.
+  if (last->IsGlobalObject()) {
+    MaybeObject* cell = GenerateCheckPropertyCell(masm(),
+                                                  GlobalObject::cast(last),
+                                                  name,
+                                                  a1,
+                                                  &miss);
+    if (cell->IsFailure()) {
+      miss.Unuse();
+      return cell;
+    }
+  }
+
+  // Return undefined if maps of the full prototype chain is still the same.
+  __ LoadRoot(v0, Heap::kUndefinedValueRootIndex);
+  __ Ret();
+
+  __ bind(&miss);
+  GenerateLoadMiss(masm(), Code::LOAD_IC);
+
+  // Return the generated code.
+  return GetCode(NONEXISTENT, heap()->empty_string());
 }
 
 
@@ -453,8 +2769,21 @@
                                                 JSObject* holder,
                                                 int index,
                                                 String* name) {
-  UNIMPLEMENTED_MIPS();
-  return NULL;
+  // ----------- S t a t e -------------
+  //  -- a0    : receiver
+  //  -- a2    : name
+  //  -- ra    : return address
+  // -----------------------------------
+  Label miss;
+
+  __ mov(v0, a0);
+
+  GenerateLoadField(object, holder, v0, a3, a1, t0, index, name, &miss);
+  __ bind(&miss);
+  GenerateLoadMiss(masm(), Code::LOAD_IC);
+
+  // Return the generated code.
+  return GetCode(FIELD, name);
 }
 
 
@@ -462,8 +2791,25 @@
                                                    JSObject* object,
                                                    JSObject* holder,
                                                    AccessorInfo* callback) {
-  UNIMPLEMENTED_MIPS();
-  return NULL;
+  // ----------- S t a t e -------------
+  //  -- a0    : receiver
+  //  -- a2    : name
+  //  -- ra    : return address
+  // -----------------------------------
+  Label miss;
+
+  MaybeObject* result = GenerateLoadCallback(object, holder, a0, a2, a3, a1, t0,
+                                             callback, name, &miss);
+  if (result->IsFailure()) {
+    miss.Unuse();
+    return result;
+  }
+
+  __ bind(&miss);
+  GenerateLoadMiss(masm(), Code::LOAD_IC);
+
+  // Return the generated code.
+  return GetCode(CALLBACKS, name);
 }
 
 
@@ -471,16 +2817,50 @@
                                                    JSObject* holder,
                                                    Object* value,
                                                    String* name) {
-  UNIMPLEMENTED_MIPS();
-  return NULL;
+  // ----------- S t a t e -------------
+  //  -- a0    : receiver
+  //  -- a2    : name
+  //  -- ra    : return address
+  // -----------------------------------
+  Label miss;
+
+  GenerateLoadConstant(object, holder, a0, a3, a1, t0, value, name, &miss);
+  __ bind(&miss);
+  GenerateLoadMiss(masm(), Code::LOAD_IC);
+
+  // Return the generated code.
+  return GetCode(CONSTANT_FUNCTION, name);
 }
 
 
 MaybeObject* LoadStubCompiler::CompileLoadInterceptor(JSObject* object,
                                                       JSObject* holder,
                                                       String* name) {
-  UNIMPLEMENTED_MIPS();
-  return NULL;
+  // ----------- S t a t e -------------
+  //  -- a0    : receiver
+  //  -- a2    : name
+  //  -- ra    : return address
+  //  -- [sp]  : receiver
+  // -----------------------------------
+  Label miss;
+
+  LookupResult lookup;
+  LookupPostInterceptor(holder, name, &lookup);
+  GenerateLoadInterceptor(object,
+                          holder,
+                          &lookup,
+                          a0,
+                          a2,
+                          a3,
+                          a1,
+                          t0,
+                          name,
+                          &miss);
+  __ bind(&miss);
+  GenerateLoadMiss(masm(), Code::LOAD_IC);
+
+  // Return the generated code.
+  return GetCode(INTERCEPTOR, name);
 }
 
 
@@ -489,8 +2869,45 @@
                                                  JSGlobalPropertyCell* cell,
                                                  String* name,
                                                  bool is_dont_delete) {
-  UNIMPLEMENTED_MIPS();
-  return NULL;
+  // ----------- S t a t e -------------
+  //  -- a0    : receiver
+  //  -- a2    : name
+  //  -- ra    : return address
+  // -----------------------------------
+  Label miss;
+
+  // If the object is the holder then we know that it's a global
+  // object which can only happen for contextual calls. In this case,
+  // the receiver cannot be a smi.
+  if (object != holder) {
+    __ And(t0, a0, Operand(kSmiTagMask));
+    __ Branch(&miss, eq, t0, Operand(zero_reg));
+  }
+
+  // Check that the map of the global has not changed.
+  CheckPrototypes(object, a0, holder, a3, t0, a1, name, &miss);
+
+  // Get the value from the cell.
+  __ li(a3, Operand(Handle<JSGlobalPropertyCell>(cell)));
+  __ lw(t0, FieldMemOperand(a3, JSGlobalPropertyCell::kValueOffset));
+
+  // Check for deleted property if property can actually be deleted.
+  if (!is_dont_delete) {
+    __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
+    __ Branch(&miss, eq, t0, Operand(at));
+  }
+
+  __ mov(v0, t0);
+  Counters* counters = masm()->isolate()->counters();
+  __ IncrementCounter(counters->named_load_global_stub(), 1, a1, a3);
+  __ Ret();
+
+  __ bind(&miss);
+  __ IncrementCounter(counters->named_load_global_stub_miss(), 1, a1, a3);
+  GenerateLoadMiss(masm(), Code::LOAD_IC);
+
+  // Return the generated code.
+  return GetCode(NORMAL, name);
 }
 
 
@@ -498,8 +2915,21 @@
                                                      JSObject* receiver,
                                                      JSObject* holder,
                                                      int index) {
-  UNIMPLEMENTED_MIPS();
-  return NULL;
+  // ----------- S t a t e -------------
+  //  -- ra    : return address
+  //  -- a0    : key
+  //  -- a1    : receiver
+  // -----------------------------------
+  Label miss;
+
+  // Check the key is the cached one.
+  __ Branch(&miss, ne, a0, Operand(Handle<String>(name)));
+
+  GenerateLoadField(receiver, holder, a1, a2, a3, t0, index, name, &miss);
+  __ bind(&miss);
+  GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
+
+  return GetCode(FIELD, name);
 }
 
 
@@ -508,8 +2938,27 @@
     JSObject* receiver,
     JSObject* holder,
     AccessorInfo* callback) {
-  UNIMPLEMENTED_MIPS();
-  return NULL;
+  // ----------- S t a t e -------------
+  //  -- ra    : return address
+  //  -- a0    : key
+  //  -- a1    : receiver
+  // -----------------------------------
+  Label miss;
+
+  // Check the key is the cached one.
+  __ Branch(&miss, ne, a0, Operand(Handle<String>(name)));
+
+  MaybeObject* result = GenerateLoadCallback(receiver, holder, a1, a0, a2, a3,
+                                             t0, callback, name, &miss);
+  if (result->IsFailure()) {
+    miss.Unuse();
+    return result;
+  }
+
+  __ bind(&miss);
+  GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
+
+  return GetCode(CALLBACKS, name);
 }
 
 
@@ -517,40 +2966,171 @@
                                                         JSObject* receiver,
                                                         JSObject* holder,
                                                         Object* value) {
-  UNIMPLEMENTED_MIPS();
-  return NULL;
+  // ----------- S t a t e -------------
+  //  -- ra    : return address
+  //  -- a0    : key
+  //  -- a1    : receiver
+  // -----------------------------------
+  Label miss;
+
+  // Check the key is the cached one.
+  __ Branch(&miss, ne, a0, Operand(Handle<String>(name)));
+
+  GenerateLoadConstant(receiver, holder, a1, a2, a3, t0, value, name, &miss);
+  __ bind(&miss);
+  GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
+
+  // Return the generated code.
+  return GetCode(CONSTANT_FUNCTION, name);
 }
 
 
 MaybeObject* KeyedLoadStubCompiler::CompileLoadInterceptor(JSObject* receiver,
                                                            JSObject* holder,
                                                            String* name) {
-  UNIMPLEMENTED_MIPS();
-  return NULL;
+  // ----------- S t a t e -------------
+  //  -- ra    : return address
+  //  -- a0    : key
+  //  -- a1    : receiver
+  // -----------------------------------
+  Label miss;
+
+  // Check the key is the cached one.
+  __ Branch(&miss, ne, a0, Operand(Handle<String>(name)));
+
+  LookupResult lookup;
+  LookupPostInterceptor(holder, name, &lookup);
+  GenerateLoadInterceptor(receiver,
+                          holder,
+                          &lookup,
+                          a1,
+                          a0,
+                          a2,
+                          a3,
+                          t0,
+                          name,
+                          &miss);
+  __ bind(&miss);
+  GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
+
+  return GetCode(INTERCEPTOR, name);
 }
 
 
 MaybeObject* KeyedLoadStubCompiler::CompileLoadArrayLength(String* name) {
-  UNIMPLEMENTED_MIPS();
-  return NULL;
+  // ----------- S t a t e -------------
+  //  -- ra    : return address
+  //  -- a0    : key
+  //  -- a1    : receiver
+  // -----------------------------------
+  Label miss;
+
+  // Check the key is the cached one.
+  __ Branch(&miss, ne, a0, Operand(Handle<String>(name)));
+
+  GenerateLoadArrayLength(masm(), a1, a2, &miss);
+  __ bind(&miss);
+  GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
+
+  return GetCode(CALLBACKS, name);
 }
 
 
 MaybeObject* KeyedLoadStubCompiler::CompileLoadStringLength(String* name) {
-  UNIMPLEMENTED_MIPS();
-  return NULL;
+  // ----------- S t a t e -------------
+  //  -- ra    : return address
+  //  -- a0    : key
+  //  -- a1    : receiver
+  // -----------------------------------
+  Label miss;
+
+  Counters* counters = masm()->isolate()->counters();
+  __ IncrementCounter(counters->keyed_load_string_length(), 1, a2, a3);
+
+  // Check the key is the cached one.
+  __ Branch(&miss, ne, a0, Operand(Handle<String>(name)));
+
+  GenerateLoadStringLength(masm(), a1, a2, a3, &miss, true);
+  __ bind(&miss);
+  __ DecrementCounter(counters->keyed_load_string_length(), 1, a2, a3);
+
+  GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
+
+  return GetCode(CALLBACKS, name);
 }
 
 
 MaybeObject* KeyedLoadStubCompiler::CompileLoadFunctionPrototype(String* name) {
-  UNIMPLEMENTED_MIPS();
-  return NULL;
+  // ----------- S t a t e -------------
+  //  -- ra    : return address
+  //  -- a0    : key
+  //  -- a1    : receiver
+  // -----------------------------------
+  Label miss;
+
+  Counters* counters = masm()->isolate()->counters();
+  __ IncrementCounter(counters->keyed_load_function_prototype(), 1, a2, a3);
+
+  // Check the name hasn't changed.
+  __ Branch(&miss, ne, a0, Operand(Handle<String>(name)));
+
+  GenerateLoadFunctionPrototype(masm(), a1, a2, a3, &miss);
+  __ bind(&miss);
+  __ DecrementCounter(counters->keyed_load_function_prototype(), 1, a2, a3);
+  GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
+
+  return GetCode(CALLBACKS, name);
 }
 
 
-MaybeObject* KeyedLoadStubCompiler::CompileLoadSpecialized(JSObject* receiver) {
-  UNIMPLEMENTED_MIPS();
-  return NULL;
+MaybeObject* KeyedLoadStubCompiler::CompileLoadFastElement(Map* receiver_map) {
+  // ----------- S t a t e -------------
+  //  -- ra    : return address
+  //  -- a0    : key
+  //  -- a1    : receiver
+  // -----------------------------------
+  MaybeObject* maybe_stub = KeyedLoadFastElementStub().TryGetCode();
+  Code* stub;
+  if (!maybe_stub->To(&stub)) return maybe_stub;
+  __ DispatchMap(a1,
+                 a2,
+                 Handle<Map>(receiver_map),
+                 Handle<Code>(stub),
+                 DO_SMI_CHECK);
+
+  Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Miss();
+  __ Jump(ic, RelocInfo::CODE_TARGET);
+
+  // Return the generated code.
+  return GetCode(NORMAL, NULL);
+}
+
+
+MaybeObject* KeyedLoadStubCompiler::CompileLoadMegamorphic(
+    MapList* receiver_maps,
+    CodeList* handler_ics) {
+  // ----------- S t a t e -------------
+  //  -- ra    : return address
+  //  -- a0    : key
+  //  -- a1    : receiver
+  // -----------------------------------
+  Label miss;
+  __ JumpIfSmi(a1, &miss);
+
+  int receiver_count = receiver_maps->length();
+  __ lw(a2, FieldMemOperand(a1, HeapObject::kMapOffset));
+  for (int current = 0; current < receiver_count; ++current) {
+    Handle<Map> map(receiver_maps->at(current));
+    Handle<Code> code(handler_ics->at(current));
+    __ Jump(code, RelocInfo::CODE_TARGET, eq, a2, Operand(map));
+  }
+
+  __ bind(&miss);
+  Handle<Code> miss_ic = isolate()->builtins()->KeyedLoadIC_Miss();
+  __ Jump(miss_ic, RelocInfo::CODE_TARGET);
+
+  // Return the generated code.
+  return GetCode(NORMAL, NULL, MEGAMORPHIC);
 }
 
 
@@ -558,39 +3138,1143 @@
                                                        int index,
                                                        Map* transition,
                                                        String* name) {
-  UNIMPLEMENTED_MIPS();
-  return NULL;
+  // ----------- S t a t e -------------
+  //  -- a0    : value
+  //  -- a1    : key
+  //  -- a2    : receiver
+  //  -- ra    : return address
+  // -----------------------------------
+
+  Label miss;
+
+  Counters* counters = masm()->isolate()->counters();
+  __ IncrementCounter(counters->keyed_store_field(), 1, a3, t0);
+
+  // Check that the name has not changed.
+  __ Branch(&miss, ne, a1, Operand(Handle<String>(name)));
+
+  // a3 is used as scratch register. a1 and a2 keep their values if a jump to
+  // the miss label is generated.
+  GenerateStoreField(masm(),
+                     object,
+                     index,
+                     transition,
+                     a2, a1, a3,
+                     &miss);
+  __ bind(&miss);
+
+  __ DecrementCounter(counters->keyed_store_field(), 1, a3, t0);
+  Handle<Code> ic = masm()->isolate()->builtins()->KeyedStoreIC_Miss();
+  __ Jump(ic, RelocInfo::CODE_TARGET);
+
+  // Return the generated code.
+  return GetCode(transition == NULL ? FIELD : MAP_TRANSITION, name);
 }
 
 
-MaybeObject* KeyedStoreStubCompiler::CompileStoreSpecialized(
-    JSObject* receiver) {
-  UNIMPLEMENTED_MIPS();
-  return NULL;
+MaybeObject* KeyedStoreStubCompiler::CompileStoreFastElement(
+    Map* receiver_map) {
+  // ----------- S t a t e -------------
+  //  -- a0    : value
+  //  -- a1    : key
+  //  -- a2    : receiver
+  //  -- ra    : return address
+  //  -- a3    : scratch
+  // -----------------------------------
+  bool is_js_array = receiver_map->instance_type() == JS_ARRAY_TYPE;
+  MaybeObject* maybe_stub =
+      KeyedStoreFastElementStub(is_js_array).TryGetCode();
+  Code* stub;
+  if (!maybe_stub->To(&stub)) return maybe_stub;
+  __ DispatchMap(a2,
+                 a3,
+                 Handle<Map>(receiver_map),
+                 Handle<Code>(stub),
+                 DO_SMI_CHECK);
+
+  Handle<Code> ic = isolate()->builtins()->KeyedStoreIC_Miss();
+  __ Jump(ic, RelocInfo::CODE_TARGET);
+
+  // Return the generated code.
+  return GetCode(NORMAL, NULL);
+}
+
+
+MaybeObject* KeyedStoreStubCompiler::CompileStoreMegamorphic(
+    MapList* receiver_maps,
+    CodeList* handler_ics) {
+  // ----------- S t a t e -------------
+  //  -- a0    : value
+  //  -- a1    : key
+  //  -- a2    : receiver
+  //  -- ra    : return address
+  //  -- a3    : scratch
+  // -----------------------------------
+  Label miss;
+  __ JumpIfSmi(a2, &miss);
+
+  int receiver_count = receiver_maps->length();
+  __ lw(a3, FieldMemOperand(a2, HeapObject::kMapOffset));
+  for (int current = 0; current < receiver_count; ++current) {
+    Handle<Map> map(receiver_maps->at(current));
+    Handle<Code> code(handler_ics->at(current));
+    __ Jump(code, RelocInfo::CODE_TARGET, eq, a3, Operand(map));
+  }
+
+  __ bind(&miss);
+  Handle<Code> miss_ic = isolate()->builtins()->KeyedStoreIC_Miss();
+  __ Jump(miss_ic, RelocInfo::CODE_TARGET);
+
+  // Return the generated code.
+  return GetCode(NORMAL, NULL, MEGAMORPHIC);
 }
 
 
 MaybeObject* ConstructStubCompiler::CompileConstructStub(JSFunction* function) {
-  UNIMPLEMENTED_MIPS();
-  return NULL;
+  // a0    : argc
+  // a1    : constructor
+  // ra    : return address
+  // [sp]  : last argument
+  Label generic_stub_call;
+
+  // Use t7 for holding undefined which is used in several places below.
+  __ LoadRoot(t7, Heap::kUndefinedValueRootIndex);
+
+#ifdef ENABLE_DEBUGGER_SUPPORT
+  // Check to see whether there are any break points in the function code. If
+  // there are jump to the generic constructor stub which calls the actual
+  // code for the function thereby hitting the break points.
+  __ lw(t5, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
+  __ lw(a2, FieldMemOperand(t5, SharedFunctionInfo::kDebugInfoOffset));
+  __ Branch(&generic_stub_call, ne, a2, Operand(t7));
+#endif
+
+  // Load the initial map and verify that it is in fact a map.
+  // a1: constructor function
+  // t7: undefined
+  __ lw(a2, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset));
+  __ And(t0, a2, Operand(kSmiTagMask));
+  __ Branch(&generic_stub_call, eq, t0, Operand(zero_reg));
+  __ GetObjectType(a2, a3, t0);
+  __ Branch(&generic_stub_call, ne, t0, Operand(MAP_TYPE));
+
+#ifdef DEBUG
+  // Cannot construct functions this way.
+  // a0: argc
+  // a1: constructor function
+  // a2: initial map
+  // t7: undefined
+  __ lbu(a3, FieldMemOperand(a2, Map::kInstanceTypeOffset));
+  __ Check(ne, "Function constructed by construct stub.",
+      a3, Operand(JS_FUNCTION_TYPE));
+#endif
+
+  // Now allocate the JSObject in new space.
+  // a0: argc
+  // a1: constructor function
+  // a2: initial map
+  // t7: undefined
+  __ lbu(a3, FieldMemOperand(a2, Map::kInstanceSizeOffset));
+  __ AllocateInNewSpace(a3,
+                        t4,
+                        t5,
+                        t6,
+                        &generic_stub_call,
+                        SIZE_IN_WORDS);
+
+  // Allocated the JSObject, now initialize the fields. Map is set to initial
+  // map and properties and elements are set to empty fixed array.
+  // a0: argc
+  // a1: constructor function
+  // a2: initial map
+  // a3: object size (in words)
+  // t4: JSObject (not tagged)
+  // t7: undefined
+  __ LoadRoot(t6, Heap::kEmptyFixedArrayRootIndex);
+  __ mov(t5, t4);
+  __ sw(a2, MemOperand(t5, JSObject::kMapOffset));
+  __ sw(t6, MemOperand(t5, JSObject::kPropertiesOffset));
+  __ sw(t6, MemOperand(t5, JSObject::kElementsOffset));
+  __ Addu(t5, t5, Operand(3 * kPointerSize));
+  ASSERT_EQ(0 * kPointerSize, JSObject::kMapOffset);
+  ASSERT_EQ(1 * kPointerSize, JSObject::kPropertiesOffset);
+  ASSERT_EQ(2 * kPointerSize, JSObject::kElementsOffset);
+
+
+  // Calculate the location of the first argument. The stack contains only the
+  // argc arguments.
+  __ sll(a1, a0, kPointerSizeLog2);
+  __ Addu(a1, a1, sp);
+
+  // Fill all the in-object properties with undefined.
+  // a0: argc
+  // a1: first argument
+  // a3: object size (in words)
+  // t4: JSObject (not tagged)
+  // t5: First in-object property of JSObject (not tagged)
+  // t7: undefined
+  // Fill the initialized properties with a constant value or a passed argument
+  // depending on the this.x = ...; assignment in the function.
+  SharedFunctionInfo* shared = function->shared();
+  for (int i = 0; i < shared->this_property_assignments_count(); i++) {
+    if (shared->IsThisPropertyAssignmentArgument(i)) {
+      Label not_passed, next;
+      // Check if the argument assigned to the property is actually passed.
+      int arg_number = shared->GetThisPropertyAssignmentArgument(i);
+      __ Branch(&not_passed, less_equal, a0, Operand(arg_number));
+      // Argument passed - find it on the stack.
+      __ lw(a2, MemOperand(a1, (arg_number + 1) * -kPointerSize));
+      __ sw(a2, MemOperand(t5));
+      __ Addu(t5, t5, kPointerSize);
+      __ jmp(&next);
+      __ bind(&not_passed);
+      // Set the property to undefined.
+      __ sw(t7, MemOperand(t5));
+      __ Addu(t5, t5, Operand(kPointerSize));
+      __ bind(&next);
+    } else {
+      // Set the property to the constant value.
+      Handle<Object> constant(shared->GetThisPropertyAssignmentConstant(i));
+      __ li(a2, Operand(constant));
+      __ sw(a2, MemOperand(t5));
+      __ Addu(t5, t5, kPointerSize);
+    }
+  }
+
+  // Fill the unused in-object property fields with undefined.
+  ASSERT(function->has_initial_map());
+  for (int i = shared->this_property_assignments_count();
+       i < function->initial_map()->inobject_properties();
+       i++) {
+      __ sw(t7, MemOperand(t5));
+      __ Addu(t5, t5, kPointerSize);
+  }
+
+  // a0: argc
+  // t4: JSObject (not tagged)
+  // Move argc to a1 and the JSObject to return to v0 and tag it.
+  __ mov(a1, a0);
+  __ mov(v0, t4);
+  __ Or(v0, v0, Operand(kHeapObjectTag));
+
+  // v0: JSObject
+  // a1: argc
+  // Remove caller arguments and receiver from the stack and return.
+  __ sll(t0, a1, kPointerSizeLog2);
+  __ Addu(sp, sp, t0);
+  __ Addu(sp, sp, Operand(kPointerSize));
+  Counters* counters = masm()->isolate()->counters();
+  __ IncrementCounter(counters->constructed_objects(), 1, a1, a2);
+  __ IncrementCounter(counters->constructed_objects_stub(), 1, a1, a2);
+  __ Ret();
+
+  // Jump to the generic stub in case the specialized code cannot handle the
+  // construction.
+  __ bind(&generic_stub_call);
+  Handle<Code> generic_construct_stub =
+      masm()->isolate()->builtins()->JSConstructStubGeneric();
+  __ Jump(generic_construct_stub, RelocInfo::CODE_TARGET);
+
+  // Return the generated code.
+  return GetCode();
 }
 
 
-MaybeObject* ExternalArrayStubCompiler::CompileKeyedLoadStub(
-    JSObject* receiver_object,
-    ExternalArrayType array_type,
-    Code::Flags flags) {
-  UNIMPLEMENTED_MIPS();
-  return NULL;
+MaybeObject* ExternalArrayLoadStubCompiler::CompileLoad(
+    JSObject*receiver, ExternalArrayType array_type) {
+  // ----------- S t a t e -------------
+  //  -- ra    : return address
+  //  -- a0    : key
+  //  -- a1    : receiver
+  // -----------------------------------
+  MaybeObject* maybe_stub =
+      KeyedLoadExternalArrayStub(array_type).TryGetCode();
+  Code* stub;
+  if (!maybe_stub->To(&stub)) return maybe_stub;
+  __ DispatchMap(a1,
+                 a2,
+                 Handle<Map>(receiver->map()),
+                 Handle<Code>(stub),
+                 DO_SMI_CHECK);
+
+  Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Miss();
+  __ Jump(ic, RelocInfo::CODE_TARGET);
+
+  // Return the generated code.
+  return GetCode();
 }
 
 
-MaybeObject* ExternalArrayStubCompiler::CompileKeyedStoreStub(
-    JSObject* receiver_object,
-    ExternalArrayType array_type,
-    Code::Flags flags) {
-  UNIMPLEMENTED_MIPS();
-  return NULL;
+MaybeObject* ExternalArrayStoreStubCompiler::CompileStore(
+    JSObject* receiver, ExternalArrayType array_type) {
+  // ----------- S t a t e -------------
+  //  -- a0    : value
+  //  -- a1    : name
+  //  -- a2    : receiver
+  //  -- ra    : return address
+  // -----------------------------------
+  MaybeObject* maybe_stub =
+      KeyedStoreExternalArrayStub(array_type).TryGetCode();
+  Code* stub;
+  if (!maybe_stub->To(&stub)) return maybe_stub;
+  __ DispatchMap(a2,
+                 a3,
+                 Handle<Map>(receiver->map()),
+                 Handle<Code>(stub),
+                 DO_SMI_CHECK);
+
+  Handle<Code> ic = isolate()->builtins()->KeyedStoreIC_Miss();
+  __ Jump(ic, RelocInfo::CODE_TARGET);
+
+  return GetCode();
+}
+
+
+#undef __
+#define __ ACCESS_MASM(masm)
+
+
+static bool IsElementTypeSigned(ExternalArrayType array_type) {
+  switch (array_type) {
+    case kExternalByteArray:
+    case kExternalShortArray:
+    case kExternalIntArray:
+      return true;
+
+    case kExternalUnsignedByteArray:
+    case kExternalUnsignedShortArray:
+    case kExternalUnsignedIntArray:
+      return false;
+
+    default:
+      UNREACHABLE();
+      return false;
+  }
+}
+
+
+void KeyedLoadStubCompiler::GenerateLoadExternalArray(
+    MacroAssembler* masm,
+    ExternalArrayType array_type) {
+  // ---------- S t a t e --------------
+  //  -- ra     : return address
+  //  -- a0     : key
+  //  -- a1     : receiver
+  // -----------------------------------
+  Label miss_force_generic, slow, failed_allocation;
+
+  Register key = a0;
+  Register receiver = a1;
+
+  // This stub is meant to be tail-jumped to, the receiver must already
+  // have been verified by the caller to not be a smi.
+
+  // Check that the key is a smi.
+  __ JumpIfNotSmi(key, &miss_force_generic);
+
+  __ lw(a3, FieldMemOperand(receiver, JSObject::kElementsOffset));
+  // a3: elements array
+
+  // Check that the index is in range.
+  __ lw(t1, FieldMemOperand(a3, ExternalArray::kLengthOffset));
+  __ sra(t2, key, kSmiTagSize);
+  // Unsigned comparison catches both negative and too-large values.
+  __ Branch(&miss_force_generic, Uless, t1, Operand(t2));
+
+  __ lw(a3, FieldMemOperand(a3, ExternalArray::kExternalPointerOffset));
+  // a3: base pointer of external storage
+
+  // We are not untagging smi key and instead work with it
+  // as if it was premultiplied by 2.
+  ASSERT((kSmiTag == 0) && (kSmiTagSize == 1));
+
+  Register value = a2;
+  switch (array_type) {
+    case kExternalByteArray:
+      __ srl(t2, key, 1);
+      __ addu(t3, a3, t2);
+      __ lb(value, MemOperand(t3, 0));
+      break;
+    case kExternalPixelArray:
+    case kExternalUnsignedByteArray:
+      __ srl(t2, key, 1);
+      __ addu(t3, a3, t2);
+      __ lbu(value, MemOperand(t3, 0));
+      break;
+    case kExternalShortArray:
+      __ addu(t3, a3, key);
+      __ lh(value, MemOperand(t3, 0));
+      break;
+    case kExternalUnsignedShortArray:
+      __ addu(t3, a3, key);
+      __ lhu(value, MemOperand(t3, 0));
+      break;
+    case kExternalIntArray:
+    case kExternalUnsignedIntArray:
+      __ sll(t2, key, 1);
+      __ addu(t3, a3, t2);
+      __ lw(value, MemOperand(t3, 0));
+      break;
+    case kExternalFloatArray:
+      __ sll(t3, t2, 2);
+      __ addu(t3, a3, t3);
+      if (CpuFeatures::IsSupported(FPU)) {
+        CpuFeatures::Scope scope(FPU);
+        __ lwc1(f0, MemOperand(t3, 0));
+      } else {
+        __ lw(value, MemOperand(t3, 0));
+      }
+      break;
+    case kExternalDoubleArray:
+      __ sll(t2, key, 2);
+      __ addu(t3, a3, t2);
+      if (CpuFeatures::IsSupported(FPU)) {
+        CpuFeatures::Scope scope(FPU);
+        __ ldc1(f0, MemOperand(t3, 0));
+      } else {
+        // t3: pointer to the beginning of the double we want to load.
+        __ lw(a2, MemOperand(t3, 0));
+        __ lw(a3, MemOperand(t3, Register::kSizeInBytes));
+      }
+      break;
+    default:
+      UNREACHABLE();
+      break;
+  }
+
+  // For integer array types:
+  // a2: value
+  // For float array type:
+  // f0: value (if FPU is supported)
+  // a2: value (if FPU is not supported)
+  // For double array type:
+  // f0: value (if FPU is supported)
+  // a2/a3: value (if FPU is not supported)
+
+  if (array_type == kExternalIntArray) {
+    // For the Int and UnsignedInt array types, we need to see whether
+    // the value can be represented in a Smi. If not, we need to convert
+    // it to a HeapNumber.
+    Label box_int;
+    __ Subu(t3, value, Operand(0xC0000000));  // Non-smi value gives neg result.
+    __ Branch(&box_int, lt, t3, Operand(zero_reg));
+    // Tag integer as smi and return it.
+    __ sll(v0, value, kSmiTagSize);
+    __ Ret();
+
+    __ bind(&box_int);
+    // Allocate a HeapNumber for the result and perform int-to-double
+    // conversion.
+    // The arm version uses a temporary here to save r0, but we don't need to
+    // (a0 is not modified).
+    __ LoadRoot(t1, Heap::kHeapNumberMapRootIndex);
+    __ AllocateHeapNumber(v0, a3, t0, t1, &slow);
+
+    if (CpuFeatures::IsSupported(FPU)) {
+      CpuFeatures::Scope scope(FPU);
+      __ mtc1(value, f0);
+      __ cvt_d_w(f0, f0);
+      __ sdc1(f0, MemOperand(v0, HeapNumber::kValueOffset - kHeapObjectTag));
+      __ Ret();
+    } else {
+      Register dst1 = t2;
+      Register dst2 = t3;
+      FloatingPointHelper::Destination dest =
+          FloatingPointHelper::kCoreRegisters;
+      FloatingPointHelper::ConvertIntToDouble(masm,
+                                              value,
+                                              dest,
+                                              f0,
+                                              dst1,
+                                              dst2,
+                                              t1,
+                                              f2);
+      __ sw(dst1, FieldMemOperand(v0, HeapNumber::kMantissaOffset));
+      __ sw(dst2, FieldMemOperand(v0, HeapNumber::kExponentOffset));
+      __ Ret();
+    }
+  } else if (array_type == kExternalUnsignedIntArray) {
+    // The test is different for unsigned int values. Since we need
+    // the value to be in the range of a positive smi, we can't
+    // handle either of the top two bits being set in the value.
+    if (CpuFeatures::IsSupported(FPU)) {
+      CpuFeatures::Scope scope(FPU);
+      Label pl_box_int;
+      __ And(t2, value, Operand(0xC0000000));
+      __ Branch(&pl_box_int, ne, t2, Operand(zero_reg));
+
+      // It can fit in an Smi.
+      // Tag integer as smi and return it.
+      __ sll(v0, value, kSmiTagSize);
+      __ Ret();
+
+      __ bind(&pl_box_int);
+      // Allocate a HeapNumber for the result and perform int-to-double
+      // conversion. Don't use a0 and a1 as AllocateHeapNumber clobbers all
+      // registers - also when jumping due to exhausted young space.
+      __ LoadRoot(t6, Heap::kHeapNumberMapRootIndex);
+      __ AllocateHeapNumber(v0, t2, t3, t6, &slow);
+
+      // This is replaced by a macro:
+      // __ mtc1(value, f0);     // LS 32-bits.
+      // __ mtc1(zero_reg, f1);  // MS 32-bits are all zero.
+      // __ cvt_d_l(f0, f0); // Use 64 bit conv to get correct unsigned 32-bit.
+
+      __ Cvt_d_uw(f0, value);
+
+      __ sdc1(f0, MemOperand(v0, HeapNumber::kValueOffset - kHeapObjectTag));
+
+      __ Ret();
+    } else {
+      // Check whether unsigned integer fits into smi.
+      Label box_int_0, box_int_1, done;
+      __ And(t2, value, Operand(0x80000000));
+      __ Branch(&box_int_0, ne, t2, Operand(zero_reg));
+      __ And(t2, value, Operand(0x40000000));
+      __ Branch(&box_int_1, ne, t2, Operand(zero_reg));
+
+      // Tag integer as smi and return it.
+      __ sll(v0, value, kSmiTagSize);
+      __ Ret();
+
+      Register hiword = value;  // a2.
+      Register loword = a3;
+
+      __ bind(&box_int_0);
+      // Integer does not have leading zeros.
+      GenerateUInt2Double(masm, hiword, loword, t0, 0);
+      __ Branch(&done);
+
+      __ bind(&box_int_1);
+      // Integer has one leading zero.
+      GenerateUInt2Double(masm, hiword, loword, t0, 1);
+
+
+      __ bind(&done);
+      // Integer was converted to double in registers hiword:loword.
+      // Wrap it into a HeapNumber. Don't use a0 and a1 as AllocateHeapNumber
+      // clobbers all registers - also when jumping due to exhausted young
+      // space.
+      __ LoadRoot(t6, Heap::kHeapNumberMapRootIndex);
+      __ AllocateHeapNumber(t2, t3, t5, t6, &slow);
+
+      __ sw(hiword, FieldMemOperand(t2, HeapNumber::kExponentOffset));
+      __ sw(loword, FieldMemOperand(t2, HeapNumber::kMantissaOffset));
+
+      __ mov(v0, t2);
+      __ Ret();
+    }
+  } else if (array_type == kExternalFloatArray) {
+    // For the floating-point array type, we need to always allocate a
+    // HeapNumber.
+    if (CpuFeatures::IsSupported(FPU)) {
+      CpuFeatures::Scope scope(FPU);
+      // Allocate a HeapNumber for the result. Don't use a0 and a1 as
+      // AllocateHeapNumber clobbers all registers - also when jumping due to
+      // exhausted young space.
+      __ LoadRoot(t6, Heap::kHeapNumberMapRootIndex);
+      __ AllocateHeapNumber(v0, t3, t5, t6, &slow);
+      // The float (single) value is already in fpu reg f0 (if we use float).
+      __ cvt_d_s(f0, f0);
+      __ sdc1(f0, MemOperand(v0, HeapNumber::kValueOffset - kHeapObjectTag));
+      __ Ret();
+    } else {
+      // Allocate a HeapNumber for the result. Don't use a0 and a1 as
+      // AllocateHeapNumber clobbers all registers - also when jumping due to
+      // exhausted young space.
+      __ LoadRoot(t6, Heap::kHeapNumberMapRootIndex);
+      __ AllocateHeapNumber(v0, t3, t5, t6, &slow);
+      // FPU is not available, do manual single to double conversion.
+
+      // a2: floating point value (binary32).
+      // v0: heap number for result
+
+      // Extract mantissa to t4.
+      __ And(t4, value, Operand(kBinary32MantissaMask));
+
+      // Extract exponent to t5.
+      __ srl(t5, value, kBinary32MantissaBits);
+      __ And(t5, t5, Operand(kBinary32ExponentMask >> kBinary32MantissaBits));
+
+      Label exponent_rebiased;
+      __ Branch(&exponent_rebiased, eq, t5, Operand(zero_reg));
+
+      __ li(t0, 0x7ff);
+      __ Xor(t1, t5, Operand(0xFF));
+      __ movz(t5, t0, t1);  // Set t5 to 0x7ff only if t5 is equal to 0xff.
+      __ Branch(&exponent_rebiased, eq, t0, Operand(0xff));
+
+      // Rebias exponent.
+      __ Addu(t5,
+              t5,
+              Operand(-kBinary32ExponentBias + HeapNumber::kExponentBias));
+
+      __ bind(&exponent_rebiased);
+      __ And(a2, value, Operand(kBinary32SignMask));
+      value = no_reg;
+      __ sll(t0, t5, HeapNumber::kMantissaBitsInTopWord);
+      __ or_(a2, a2, t0);
+
+      // Shift mantissa.
+      static const int kMantissaShiftForHiWord =
+          kBinary32MantissaBits - HeapNumber::kMantissaBitsInTopWord;
+
+      static const int kMantissaShiftForLoWord =
+          kBitsPerInt - kMantissaShiftForHiWord;
+
+      __ srl(t0, t4, kMantissaShiftForHiWord);
+      __ or_(a2, a2, t0);
+      __ sll(a0, t4, kMantissaShiftForLoWord);
+
+      __ sw(a2, FieldMemOperand(v0, HeapNumber::kExponentOffset));
+      __ sw(a0, FieldMemOperand(v0, HeapNumber::kMantissaOffset));
+      __ Ret();
+    }
+
+  } else if (array_type == kExternalDoubleArray) {
+    if (CpuFeatures::IsSupported(FPU)) {
+      CpuFeatures::Scope scope(FPU);
+      // Allocate a HeapNumber for the result. Don't use a0 and a1 as
+      // AllocateHeapNumber clobbers all registers - also when jumping due to
+      // exhausted young space.
+      __ LoadRoot(t6, Heap::kHeapNumberMapRootIndex);
+      __ AllocateHeapNumber(v0, t3, t5, t6, &slow);
+      // The double value is already in f0
+      __ sdc1(f0, FieldMemOperand(v0, HeapNumber::kValueOffset));
+      __ Ret();
+    } else {
+      // Allocate a HeapNumber for the result. Don't use a0 and a1 as
+      // AllocateHeapNumber clobbers all registers - also when jumping due to
+      // exhausted young space.
+      __ LoadRoot(t6, Heap::kHeapNumberMapRootIndex);
+      __ AllocateHeapNumber(v0, t3, t5, t6, &slow);
+
+      __ sw(a2, FieldMemOperand(v0, HeapNumber::kMantissaOffset));
+      __ sw(a3, FieldMemOperand(v0, HeapNumber::kExponentOffset));
+      __ Ret();
+    }
+
+  } else {
+    // Tag integer as smi and return it.
+    __ sll(v0, value, kSmiTagSize);
+    __ Ret();
+  }
+
+  // Slow case, key and receiver still in a0 and a1.
+  __ bind(&slow);
+  __ IncrementCounter(
+      masm->isolate()->counters()->keyed_load_external_array_slow(),
+      1, a2, a3);
+
+  // ---------- S t a t e --------------
+  //  -- ra     : return address
+  //  -- a0     : key
+  //  -- a1     : receiver
+  // -----------------------------------
+
+  __ Push(a1, a0);
+
+  __ TailCallRuntime(Runtime::kKeyedGetProperty, 2, 1);
+
+  __ bind(&miss_force_generic);
+  Code* stub = masm->isolate()->builtins()->builtin(
+      Builtins::kKeyedLoadIC_MissForceGeneric);
+  __ Jump(Handle<Code>(stub), RelocInfo::CODE_TARGET);
+}
+
+
+void KeyedStoreStubCompiler::GenerateStoreExternalArray(
+    MacroAssembler* masm,
+    ExternalArrayType array_type) {
+  // ---------- S t a t e --------------
+  //  -- a0     : value
+  //  -- a1     : key
+  //  -- a2     : receiver
+  //  -- ra     : return address
+  // -----------------------------------
+
+  Label slow, check_heap_number, miss_force_generic;
+
+  // Register usage.
+  Register value = a0;
+  Register key = a1;
+  Register receiver = a2;
+  // a3 mostly holds the elements array or the destination external array.
+
+  // This stub is meant to be tail-jumped to, the receiver must already
+  // have been verified by the caller to not be a smi.
+
+  __ lw(a3, FieldMemOperand(receiver, JSObject::kElementsOffset));
+
+  // Check that the key is a smi.
+  __ JumpIfNotSmi(key, &miss_force_generic);
+
+  // Check that the index is in range.
+  __ SmiUntag(t0, key);
+  __ lw(t1, FieldMemOperand(a3, ExternalArray::kLengthOffset));
+  // Unsigned comparison catches both negative and too-large values.
+  __ Branch(&miss_force_generic, Ugreater_equal, t0, Operand(t1));
+
+  // Handle both smis and HeapNumbers in the fast path. Go to the
+  // runtime for all other kinds of values.
+  // a3: external array.
+  // t0: key (integer).
+
+  if (array_type == kExternalPixelArray) {
+    // Double to pixel conversion is only implemented in the runtime for now.
+    __ JumpIfNotSmi(value, &slow);
+  } else {
+    __ JumpIfNotSmi(value, &check_heap_number);
+  }
+  __ SmiUntag(t1, value);
+  __ lw(a3, FieldMemOperand(a3, ExternalArray::kExternalPointerOffset));
+
+  // a3: base pointer of external storage.
+  // t0: key (integer).
+  // t1: value (integer).
+
+  switch (array_type) {
+    case kExternalPixelArray: {
+      // Clamp the value to [0..255].
+      // v0 is used as a scratch register here.
+      Label done;
+      __ li(v0, Operand(255));
+      // Normal branch: nop in delay slot.
+      __ Branch(&done, gt, t1, Operand(v0));
+      // Use delay slot in this branch.
+      __ Branch(USE_DELAY_SLOT, &done, lt, t1, Operand(zero_reg));
+      __ mov(v0, zero_reg);  // In delay slot.
+      __ mov(v0, t1);  // Value is in range 0..255.
+      __ bind(&done);
+      __ mov(t1, v0);
+      __ addu(t8, a3, t0);
+      __ sb(t1, MemOperand(t8, 0));
+      }
+      break;
+    case kExternalByteArray:
+    case kExternalUnsignedByteArray:
+      __ addu(t8, a3, t0);
+      __ sb(t1, MemOperand(t8, 0));
+      break;
+    case kExternalShortArray:
+    case kExternalUnsignedShortArray:
+      __ sll(t8, t0, 1);
+      __ addu(t8, a3, t8);
+      __ sh(t1, MemOperand(t8, 0));
+      break;
+    case kExternalIntArray:
+    case kExternalUnsignedIntArray:
+      __ sll(t8, t0, 2);
+      __ addu(t8, a3, t8);
+      __ sw(t1, MemOperand(t8, 0));
+      break;
+    case kExternalFloatArray:
+      // Perform int-to-float conversion and store to memory.
+      StoreIntAsFloat(masm, a3, t0, t1, t2, t3, t4);
+      break;
+    case kExternalDoubleArray:
+      __ sll(t8, t0, 3);
+      __ addu(a3, a3, t8);
+      // a3: effective address of the double element
+      FloatingPointHelper::Destination destination;
+      if (CpuFeatures::IsSupported(FPU)) {
+        destination = FloatingPointHelper::kFPURegisters;
+      } else {
+        destination = FloatingPointHelper::kCoreRegisters;
+      }
+      FloatingPointHelper::ConvertIntToDouble(
+          masm, t1, destination,
+          f0, t2, t3,  // These are: double_dst, dst1, dst2.
+          t0, f2);  // These are: scratch2, single_scratch.
+      if (destination == FloatingPointHelper::kFPURegisters) {
+        CpuFeatures::Scope scope(FPU);
+        __ sdc1(f0, MemOperand(a3, 0));
+      } else {
+        __ sw(t2, MemOperand(a3, 0));
+        __ sw(t3, MemOperand(a3, Register::kSizeInBytes));
+      }
+      break;
+    default:
+      UNREACHABLE();
+      break;
+  }
+
+  // Entry registers are intact, a0 holds the value which is the return value.
+  __ mov(v0, value);
+  __ Ret();
+
+  if (array_type != kExternalPixelArray) {
+    // a3: external array.
+    // t0: index (integer).
+    __ bind(&check_heap_number);
+    __ GetObjectType(value, t1, t2);
+    __ Branch(&slow, ne, t2, Operand(HEAP_NUMBER_TYPE));
+
+    __ lw(a3, FieldMemOperand(a3, ExternalArray::kExternalPointerOffset));
+
+    // a3: base pointer of external storage.
+    // t0: key (integer).
+
+    // The WebGL specification leaves the behavior of storing NaN and
+    // +/-Infinity into integer arrays basically undefined. For more
+    // reproducible behavior, convert these to zero.
+
+    if (CpuFeatures::IsSupported(FPU)) {
+      CpuFeatures::Scope scope(FPU);
+
+      __ ldc1(f0, FieldMemOperand(a0, HeapNumber::kValueOffset));
+
+      if (array_type == kExternalFloatArray) {
+        __ cvt_s_d(f0, f0);
+        __ sll(t8, t0, 2);
+        __ addu(t8, a3, t8);
+        __ swc1(f0, MemOperand(t8, 0));
+      } else if (array_type == kExternalDoubleArray) {
+        __ sll(t8, t0, 3);
+        __ addu(t8, a3, t8);
+        __ sdc1(f0, MemOperand(t8, 0));
+      } else {
+        Label done;
+
+        // Need to perform float-to-int conversion.
+        // Test whether exponent equal to 0x7FF (infinity or NaN).
+
+        __ mfc1(t3, f1);  // Move exponent word of double to t3 (as raw bits).
+        __ li(t1, Operand(0x7FF00000));
+        __ And(t3, t3, Operand(t1));
+        __ Branch(USE_DELAY_SLOT, &done, eq, t3, Operand(t1));
+        __ mov(t3, zero_reg);  // In delay slot.
+
+        // Not infinity or NaN simply convert to int.
+        if (IsElementTypeSigned(array_type)) {
+          __ trunc_w_d(f0, f0);
+          __ mfc1(t3, f0);
+        } else {
+          __ Trunc_uw_d(f0, t3);
+        }
+
+        // t3: HeapNumber converted to integer
+        __ bind(&done);
+        switch (array_type) {
+          case kExternalByteArray:
+          case kExternalUnsignedByteArray:
+            __ addu(t8, a3, t0);
+            __ sb(t3, MemOperand(t8, 0));
+            break;
+          case kExternalShortArray:
+          case kExternalUnsignedShortArray:
+            __ sll(t8, t0, 1);
+            __ addu(t8, a3, t8);
+            __ sh(t3, MemOperand(t8, 0));
+            break;
+          case kExternalIntArray:
+          case kExternalUnsignedIntArray:
+            __ sll(t8, t0, 2);
+            __ addu(t8, a3, t8);
+            __ sw(t3, MemOperand(t8, 0));
+            break;
+          default:
+            UNREACHABLE();
+            break;
+        }
+      }
+
+      // Entry registers are intact, a0 holds the value
+      // which is the return value.
+      __ mov(v0, value);
+      __ Ret();
+    } else {
+      // FPU is not available, do manual conversions.
+
+      __ lw(t3, FieldMemOperand(value, HeapNumber::kExponentOffset));
+      __ lw(t4, FieldMemOperand(value, HeapNumber::kMantissaOffset));
+
+      if (array_type == kExternalFloatArray) {
+        Label done, nan_or_infinity_or_zero;
+        static const int kMantissaInHiWordShift =
+            kBinary32MantissaBits - HeapNumber::kMantissaBitsInTopWord;
+
+        static const int kMantissaInLoWordShift =
+            kBitsPerInt - kMantissaInHiWordShift;
+
+        // Test for all special exponent values: zeros, subnormal numbers, NaNs
+        // and infinities. All these should be converted to 0.
+        __ li(t5, HeapNumber::kExponentMask);
+        __ and_(t6, t3, t5);
+        __ Branch(&nan_or_infinity_or_zero, eq, t6, Operand(zero_reg));
+
+        __ xor_(t1, t6, t5);
+        __ li(t2, kBinary32ExponentMask);
+        __ movz(t6, t2, t1);  // Only if t6 is equal to t5.
+        __ Branch(&nan_or_infinity_or_zero, eq, t6, Operand(t5));
+
+        // Rebias exponent.
+        __ srl(t6, t6, HeapNumber::kExponentShift);
+        __ Addu(t6,
+                t6,
+                Operand(kBinary32ExponentBias - HeapNumber::kExponentBias));
+
+        __ li(t1, Operand(kBinary32MaxExponent));
+        __ Slt(t1, t1, t6);
+        __ And(t2, t3, Operand(HeapNumber::kSignMask));
+        __ Or(t2, t2, Operand(kBinary32ExponentMask));
+        __ movn(t3, t2, t1);  // Only if t6 is gt kBinary32MaxExponent.
+        __ Branch(&done, gt, t6, Operand(kBinary32MaxExponent));
+
+        __ Slt(t1, t6, Operand(kBinary32MinExponent));
+        __ And(t2, t3, Operand(HeapNumber::kSignMask));
+        __ movn(t3, t2, t1);  // Only if t6 is lt kBinary32MinExponent.
+        __ Branch(&done, lt, t6, Operand(kBinary32MinExponent));
+
+        __ And(t7, t3, Operand(HeapNumber::kSignMask));
+        __ And(t3, t3, Operand(HeapNumber::kMantissaMask));
+        __ sll(t3, t3, kMantissaInHiWordShift);
+        __ or_(t7, t7, t3);
+        __ srl(t4, t4, kMantissaInLoWordShift);
+        __ or_(t7, t7, t4);
+        __ sll(t6, t6, kBinary32ExponentShift);
+        __ or_(t3, t7, t6);
+
+        __ bind(&done);
+        __ sll(t9, a1, 2);
+        __ addu(t9, a2, t9);
+        __ sw(t3, MemOperand(t9, 0));
+
+        // Entry registers are intact, a0 holds the value which is the return
+        // value.
+        __ mov(v0, value);
+        __ Ret();
+
+        __ bind(&nan_or_infinity_or_zero);
+        __ And(t7, t3, Operand(HeapNumber::kSignMask));
+        __ And(t3, t3, Operand(HeapNumber::kMantissaMask));
+        __ or_(t6, t6, t7);
+        __ sll(t3, t3, kMantissaInHiWordShift);
+        __ or_(t6, t6, t3);
+        __ srl(t4, t4, kMantissaInLoWordShift);
+        __ or_(t3, t6, t4);
+        __ Branch(&done);
+      } else if (array_type == kExternalDoubleArray) {
+        __ sll(t8, t0, 3);
+        __ addu(t8, a3, t8);
+        // t8: effective address of destination element.
+        __ sw(t4, MemOperand(t8, 0));
+        __ sw(t3, MemOperand(t8, Register::kSizeInBytes));
+        __ Ret();
+      } else {
+        bool is_signed_type  = IsElementTypeSigned(array_type);
+        int meaningfull_bits = is_signed_type ? (kBitsPerInt - 1) : kBitsPerInt;
+        int32_t min_value    = is_signed_type ? 0x80000000 : 0x00000000;
+
+        Label done, sign;
+
+        // Test for all special exponent values: zeros, subnormal numbers, NaNs
+        // and infinities. All these should be converted to 0.
+        __ li(t5, HeapNumber::kExponentMask);
+        __ and_(t6, t3, t5);
+        __ movz(t3, zero_reg, t6);  // Only if t6 is equal to zero.
+        __ Branch(&done, eq, t6, Operand(zero_reg));
+
+        __ xor_(t2, t6, t5);
+        __ movz(t3, zero_reg, t2);  // Only if t6 is equal to t5.
+        __ Branch(&done, eq, t6, Operand(t5));
+
+        // Unbias exponent.
+        __ srl(t6, t6, HeapNumber::kExponentShift);
+        __ Subu(t6, t6, Operand(HeapNumber::kExponentBias));
+        // If exponent is negative then result is 0.
+        __ slt(t2, t6, zero_reg);
+        __ movn(t3, zero_reg, t2);  // Only if exponent is negative.
+        __ Branch(&done, lt, t6, Operand(zero_reg));
+
+        // If exponent is too big then result is minimal value.
+        __ slti(t1, t6, meaningfull_bits - 1);
+        __ li(t2, min_value);
+        __ movz(t3, t2, t1);  // Only if t6 is ge meaningfull_bits - 1.
+        __ Branch(&done, ge, t6, Operand(meaningfull_bits - 1));
+
+        __ And(t5, t3, Operand(HeapNumber::kSignMask));
+        __ And(t3, t3, Operand(HeapNumber::kMantissaMask));
+        __ Or(t3, t3, Operand(1u << HeapNumber::kMantissaBitsInTopWord));
+
+        __ li(t9, HeapNumber::kMantissaBitsInTopWord);
+        __ subu(t6, t9, t6);
+        __ slt(t1, t6, zero_reg);
+        __ srlv(t2, t3, t6);
+        __ movz(t3, t2, t1);  // Only if t6 is positive.
+        __ Branch(&sign, ge, t6, Operand(zero_reg));
+
+        __ subu(t6, zero_reg, t6);
+        __ sllv(t3, t3, t6);
+        __ li(t9, meaningfull_bits);
+        __ subu(t6, t9, t6);
+        __ srlv(t4, t4, t6);
+        __ or_(t3, t3, t4);
+
+        __ bind(&sign);
+        __ subu(t2, t3, zero_reg);
+        __ movz(t3, t2, t5);  // Only if t5 is zero.
+
+        __ bind(&done);
+
+        // Result is in t3.
+        // This switch block should be exactly the same as above (FPU mode).
+        switch (array_type) {
+          case kExternalByteArray:
+          case kExternalUnsignedByteArray:
+            __ addu(t8, a3, t0);
+            __ sb(t3, MemOperand(t8, 0));
+            break;
+          case kExternalShortArray:
+          case kExternalUnsignedShortArray:
+            __ sll(t8, t0, 1);
+            __ addu(t8, a3, t8);
+            __ sh(t3, MemOperand(t8, 0));
+            break;
+          case kExternalIntArray:
+          case kExternalUnsignedIntArray:
+            __ sll(t8, t0, 2);
+            __ addu(t8, a3, t8);
+            __ sw(t3, MemOperand(t8, 0));
+            break;
+          default:
+            UNREACHABLE();
+            break;
+        }
+      }
+    }
+  }
+
+  // Slow case, key and receiver still in a0 and a1.
+  __ bind(&slow);
+  __ IncrementCounter(
+      masm->isolate()->counters()->keyed_load_external_array_slow(),
+      1, a2, a3);
+  // Entry registers are intact.
+  // ---------- S t a t e --------------
+  //  -- ra     : return address
+  //  -- a0     : key
+  //  -- a1     : receiver
+  // -----------------------------------
+  Handle<Code> slow_ic =
+      masm->isolate()->builtins()->KeyedStoreIC_Slow();
+  __ Jump(slow_ic, RelocInfo::CODE_TARGET);
+
+  // Miss case, call the runtime.
+  __ bind(&miss_force_generic);
+
+  // ---------- S t a t e --------------
+  //  -- ra     : return address
+  //  -- a0     : key
+  //  -- a1     : receiver
+  // -----------------------------------
+
+  Handle<Code> miss_ic =
+     masm->isolate()->builtins()->KeyedStoreIC_MissForceGeneric();
+  __ Jump(miss_ic, RelocInfo::CODE_TARGET);
+}
+
+
+void KeyedLoadStubCompiler::GenerateLoadFastElement(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- ra    : return address
+  //  -- a0    : key
+  //  -- a1    : receiver
+  // -----------------------------------
+  Label miss_force_generic;
+
+  // This stub is meant to be tail-jumped to, the receiver must already
+  // have been verified by the caller to not be a smi.
+
+  // Check that the key is a smi.
+  __ JumpIfNotSmi(a0, &miss_force_generic);
+
+  // Get the elements array.
+  __ lw(a2, FieldMemOperand(a1, JSObject::kElementsOffset));
+  __ AssertFastElements(a2);
+
+  // Check that the key is within bounds.
+  __ lw(a3, FieldMemOperand(a2, FixedArray::kLengthOffset));
+  __ Branch(&miss_force_generic, hs, a0, Operand(a3));
+
+  // Load the result and make sure it's not the hole.
+  __ Addu(a3, a2, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+  ASSERT(kSmiTag == 0 && kSmiTagSize < kPointerSizeLog2);
+  __ sll(t0, a0, kPointerSizeLog2 - kSmiTagSize);
+  __ Addu(t0, t0, a3);
+  __ lw(t0, MemOperand(t0));
+  __ LoadRoot(t1, Heap::kTheHoleValueRootIndex);
+  __ Branch(&miss_force_generic, eq, t0, Operand(t1));
+  __ mov(v0, t0);
+  __ Ret();
+
+  __ bind(&miss_force_generic);
+  Code* stub = masm->isolate()->builtins()->builtin(
+      Builtins::kKeyedLoadIC_MissForceGeneric);
+  __ Jump(Handle<Code>(stub), RelocInfo::CODE_TARGET);
+}
+
+
+void KeyedStoreStubCompiler::GenerateStoreFastElement(MacroAssembler* masm,
+                                                      bool is_js_array) {
+  // ----------- S t a t e -------------
+  //  -- a0    : value
+  //  -- a1    : key
+  //  -- a2    : receiver
+  //  -- ra    : return address
+  //  -- a3    : scratch
+  //  -- a4    : scratch (elements)
+  // -----------------------------------
+  Label miss_force_generic;
+
+  Register value_reg = a0;
+  Register key_reg = a1;
+  Register receiver_reg = a2;
+  Register scratch = a3;
+  Register elements_reg = t0;
+  Register scratch2 = t1;
+  Register scratch3 = t2;
+
+  // This stub is meant to be tail-jumped to, the receiver must already
+  // have been verified by the caller to not be a smi.
+
+  // Check that the key is a smi.
+  __ JumpIfNotSmi(a0, &miss_force_generic);
+
+  // Get the elements array and make sure it is a fast element array, not 'cow'.
+  __ lw(elements_reg,
+        FieldMemOperand(receiver_reg, JSObject::kElementsOffset));
+  __ CheckMap(elements_reg,
+              scratch,
+              Heap::kFixedArrayMapRootIndex,
+              &miss_force_generic,
+              DONT_DO_SMI_CHECK);
+
+  // Check that the key is within bounds.
+  if (is_js_array) {
+    __ lw(scratch, FieldMemOperand(receiver_reg, JSArray::kLengthOffset));
+  } else {
+    __ lw(scratch, FieldMemOperand(elements_reg, FixedArray::kLengthOffset));
+  }
+  // Compare smis.
+  __ Branch(&miss_force_generic, hs, key_reg, Operand(scratch));
+
+  __ Addu(scratch,
+          elements_reg, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+  ASSERT(kSmiTag == 0 && kSmiTagSize < kPointerSizeLog2);
+  __ sll(scratch2, key_reg, kPointerSizeLog2 - kSmiTagSize);
+  __ Addu(scratch3, scratch2, scratch);
+  __ sw(value_reg, MemOperand(scratch3));
+  __ RecordWrite(scratch, Operand(scratch2), receiver_reg , elements_reg);
+
+  // value_reg (a0) is preserved.
+  // Done.
+  __ Ret();
+
+  __ bind(&miss_force_generic);
+  Handle<Code> ic =
+      masm->isolate()->builtins()->KeyedStoreIC_MissForceGeneric();
+  __ Jump(ic, RelocInfo::CODE_TARGET);
 }
 
 
diff --git a/src/mips/virtual-frame-mips-inl.h b/src/mips/virtual-frame-mips-inl.h
deleted file mode 100644
index f0d2fab..0000000
--- a/src/mips/virtual-frame-mips-inl.h
+++ /dev/null
@@ -1,58 +0,0 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-//       notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-//       copyright notice, this list of conditions and the following
-//       disclaimer in the documentation and/or other materials provided
-//       with the distribution.
-//     * Neither the name of Google Inc. nor the names of its
-//       contributors may be used to endorse or promote products derived
-//       from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_VIRTUAL_FRAME_MIPS_INL_H_
-#define V8_VIRTUAL_FRAME_MIPS_INL_H_
-
-#include "assembler-mips.h"
-#include "virtual-frame-mips.h"
-
-namespace v8 {
-namespace internal {
-
-
-MemOperand VirtualFrame::ParameterAt(int index) {
-  UNIMPLEMENTED_MIPS();
-  return MemOperand(zero_reg, 0);
-}
-
-
-// The receiver frame slot.
-MemOperand VirtualFrame::Receiver() {
-  UNIMPLEMENTED_MIPS();
-  return MemOperand(zero_reg, 0);
-}
-
-
-void VirtualFrame::Forget(int count) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-} }  // namespace v8::internal
-
-#endif  // V8_VIRTUAL_FRAME_MIPS_INL_H_
diff --git a/src/mips/virtual-frame-mips.cc b/src/mips/virtual-frame-mips.cc
deleted file mode 100644
index 22fe9f0..0000000
--- a/src/mips/virtual-frame-mips.cc
+++ /dev/null
@@ -1,307 +0,0 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-//       notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-//       copyright notice, this list of conditions and the following
-//       disclaimer in the documentation and/or other materials provided
-//       with the distribution.
-//     * Neither the name of Google Inc. nor the names of its
-//       contributors may be used to endorse or promote products derived
-//       from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#if defined(V8_TARGET_ARCH_MIPS)
-
-#include "codegen-inl.h"
-#include "register-allocator-inl.h"
-#include "scopes.h"
-#include "virtual-frame-inl.h"
-
-namespace v8 {
-namespace internal {
-
-#define __ ACCESS_MASM(masm())
-
-void VirtualFrame::PopToA1A0() {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void VirtualFrame::PopToA1() {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void VirtualFrame::PopToA0() {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void VirtualFrame::MergeTo(const VirtualFrame* expected,
-                           Condition cond,
-                           Register r1,
-                           const Operand& r2) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void VirtualFrame::MergeTo(VirtualFrame* expected,
-                           Condition cond,
-                           Register r1,
-                           const Operand& r2) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void VirtualFrame::MergeTOSTo(
-    VirtualFrame::TopOfStack expected_top_of_stack_state,
-    Condition cond,
-    Register r1,
-    const Operand& r2) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void VirtualFrame::Enter() {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void VirtualFrame::Exit() {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void VirtualFrame::AllocateStackSlots() {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-
-void VirtualFrame::PushReceiverSlotAddress() {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void VirtualFrame::PushTryHandler(HandlerType type) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void VirtualFrame::CallJSFunction(int arg_count) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void VirtualFrame::CallRuntime(const Runtime::Function* f, int arg_count) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void VirtualFrame::CallRuntime(Runtime::FunctionId id, int arg_count) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-#ifdef ENABLE_DEBUGGER_SUPPORT
-void VirtualFrame::DebugBreak() {
-  UNIMPLEMENTED_MIPS();
-}
-#endif
-
-
-void VirtualFrame::InvokeBuiltin(Builtins::JavaScript id,
-                                 InvokeJSFlags flags,
-                                 int arg_count) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void VirtualFrame::CallLoadIC(Handle<String> name, RelocInfo::Mode mode) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void VirtualFrame::CallStoreIC(Handle<String> name, bool is_contextual) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void VirtualFrame::CallKeyedLoadIC() {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void VirtualFrame::CallKeyedStoreIC() {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void VirtualFrame::CallCodeObject(Handle<Code> code,
-                                  RelocInfo::Mode rmode,
-                                  int dropped_args) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-//    NO_TOS_REGISTERS, A0_TOS, A1_TOS, A1_A0_TOS, A0_A1_TOS.
-const bool VirtualFrame::kA0InUse[TOS_STATES] =
-    { false,            true,   false,  true,      true };
-const bool VirtualFrame::kA1InUse[TOS_STATES] =
-    { false,            false,  true,   true,      true };
-const int VirtualFrame::kVirtualElements[TOS_STATES] =
-    { 0,                1,      1,      2,         2 };
-const Register VirtualFrame::kTopRegister[TOS_STATES] =
-    { a0,               a0,     a1,     a1,        a0 };
-const Register VirtualFrame::kBottomRegister[TOS_STATES] =
-    { a0,               a0,     a1,     a0,        a1 };
-const Register VirtualFrame::kAllocatedRegisters[
-    VirtualFrame::kNumberOfAllocatedRegisters] = { a2, a3, t0, t1, t2 };
-// Popping is done by the transition implied by kStateAfterPop.  Of course if
-// there were no stack slots allocated to registers then the physical SP must
-// be adjusted.
-const VirtualFrame::TopOfStack VirtualFrame::kStateAfterPop[TOS_STATES] =
-    { NO_TOS_REGISTERS, NO_TOS_REGISTERS, NO_TOS_REGISTERS, A0_TOS, A1_TOS };
-// Pushing is done by the transition implied by kStateAfterPush.  Of course if
-// the maximum number of registers was already allocated to the top of stack
-// slots then one register must be physically pushed onto the stack.
-const VirtualFrame::TopOfStack VirtualFrame::kStateAfterPush[TOS_STATES] =
-    { A0_TOS, A1_A0_TOS, A0_A1_TOS, A0_A1_TOS, A1_A0_TOS };
-
-
-void VirtualFrame::Drop(int count) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void VirtualFrame::Pop() {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void VirtualFrame::EmitPop(Register reg) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void VirtualFrame::SpillAllButCopyTOSToA0() {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void VirtualFrame::SpillAllButCopyTOSToA1() {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void VirtualFrame::SpillAllButCopyTOSToA1A0() {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-Register VirtualFrame::Peek() {
-  UNIMPLEMENTED_MIPS();
-  return no_reg;
-}
-
-
-Register VirtualFrame::Peek2() {
-  UNIMPLEMENTED_MIPS();
-  return no_reg;
-}
-
-
-void VirtualFrame::Dup() {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void VirtualFrame::Dup2() {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-Register VirtualFrame::PopToRegister(Register but_not_to_this_one) {
-  UNIMPLEMENTED_MIPS();
-  return no_reg;
-}
-
-
-void VirtualFrame::EnsureOneFreeTOSRegister() {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void VirtualFrame::EmitMultiPop(RegList regs) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void VirtualFrame::EmitPush(Register reg, TypeInfo info) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void VirtualFrame::SetElementAt(Register reg, int this_far_down) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-Register VirtualFrame::GetTOSRegister() {
-  UNIMPLEMENTED_MIPS();
-  return no_reg;
-}
-
-
-void VirtualFrame::EmitPush(Operand operand, TypeInfo info) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void VirtualFrame::EmitPush(MemOperand operand, TypeInfo info) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void VirtualFrame::EmitPushRoot(Heap::RootListIndex index) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void VirtualFrame::EmitMultiPush(RegList regs) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void VirtualFrame::EmitMultiPushReversed(RegList regs) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void VirtualFrame::SpillAll() {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-#undef __
-
-} }  // namespace v8::internal
-
-#endif  // V8_TARGET_ARCH_MIPS
diff --git a/src/mips/virtual-frame-mips.h b/src/mips/virtual-frame-mips.h
deleted file mode 100644
index cf30b09..0000000
--- a/src/mips/virtual-frame-mips.h
+++ /dev/null
@@ -1,530 +0,0 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-//       notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-//       copyright notice, this list of conditions and the following
-//       disclaimer in the documentation and/or other materials provided
-//       with the distribution.
-//     * Neither the name of Google Inc. nor the names of its
-//       contributors may be used to endorse or promote products derived
-//       from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
-#ifndef V8_MIPS_VIRTUAL_FRAME_MIPS_H_
-#define V8_MIPS_VIRTUAL_FRAME_MIPS_H_
-
-#include "register-allocator.h"
-
-namespace v8 {
-namespace internal {
-
-// This dummy class is only used to create invalid virtual frames.
-extern class InvalidVirtualFrameInitializer {}* kInvalidVirtualFrameInitializer;
-
-
-// -------------------------------------------------------------------------
-// Virtual frames
-//
-// The virtual frame is an abstraction of the physical stack frame. It
-// encapsulates the parameters, frame-allocated locals, and the expression
-// stack. It supports push/pop operations on the expression stack, as well
-// as random access to the expression stack elements, locals, and
-// parameters.
-
-class VirtualFrame : public ZoneObject {
- public:
-  class RegisterAllocationScope;
-  // A utility class to introduce a scope where the virtual frame is
-  // expected to remain spilled. The constructor spills the code
-  // generator's current frame, and keeps it spilled.
-  class SpilledScope BASE_EMBEDDED {
-   public:
-    explicit SpilledScope(VirtualFrame* frame)
-      : old_is_spilled_(
-          Isolate::Current()->is_virtual_frame_in_spilled_scope()) {
-      if (frame != NULL) {
-        if (!old_is_spilled_) {
-          frame->SpillAll();
-        } else {
-          frame->AssertIsSpilled();
-        }
-      }
-      Isolate::Current()->set_is_virtual_frame_in_spilled_scope(true);
-    }
-    ~SpilledScope() {
-      Isolate::Current()->set_is_virtual_frame_in_spilled_scope(
-          old_is_spilled_);
-    }
-    static bool is_spilled() {
-      return Isolate::Current()->is_virtual_frame_in_spilled_scope();
-    }
-
-   private:
-    int old_is_spilled_;
-
-    SpilledScope() {}
-
-    friend class RegisterAllocationScope;
-  };
-
-  class RegisterAllocationScope BASE_EMBEDDED {
-   public:
-    // A utility class to introduce a scope where the virtual frame
-    // is not spilled, ie. where register allocation occurs.  Eventually
-    // when RegisterAllocationScope is ubiquitous it can be removed
-    // along with the (by then unused) SpilledScope class.
-    inline explicit RegisterAllocationScope(CodeGenerator* cgen);
-    inline ~RegisterAllocationScope();
-
-   private:
-    CodeGenerator* cgen_;
-    bool old_is_spilled_;
-
-    RegisterAllocationScope() {}
-  };
-
-  // An illegal index into the virtual frame.
-  static const int kIllegalIndex = -1;
-
-  // Construct an initial virtual frame on entry to a JS function.
-  inline VirtualFrame();
-
-  // Construct an invalid virtual frame, used by JumpTargets.
-  explicit inline VirtualFrame(InvalidVirtualFrameInitializer* dummy);
-
-  // Construct a virtual frame as a clone of an existing one.
-  explicit inline VirtualFrame(VirtualFrame* original);
-
-  inline CodeGenerator* cgen() const;
-  inline MacroAssembler* masm();
-
-  // The number of elements on the virtual frame.
-  int element_count() const { return element_count_; }
-
-  // The height of the virtual expression stack.
-  inline int height() const;
-
-  bool is_used(int num) {
-    switch (num) {
-      case 0: {  // a0.
-        return kA0InUse[top_of_stack_state_];
-      }
-      case 1: {  // a1.
-        return kA1InUse[top_of_stack_state_];
-      }
-      case 2:
-      case 3:
-      case 4:
-      case 5:
-      case 6: {  // a2 to a3, t0 to t2.
-        ASSERT(num - kFirstAllocatedRegister < kNumberOfAllocatedRegisters);
-        ASSERT(num >= kFirstAllocatedRegister);
-        if ((register_allocation_map_ &
-             (1 << (num - kFirstAllocatedRegister))) == 0) {
-          return false;
-        } else {
-          return true;
-        }
-      }
-      default: {
-        ASSERT(num < kFirstAllocatedRegister ||
-               num >= kFirstAllocatedRegister + kNumberOfAllocatedRegisters);
-        return false;
-      }
-    }
-  }
-
-  // Add extra in-memory elements to the top of the frame to match an actual
-  // frame (eg, the frame after an exception handler is pushed). No code is
-  // emitted.
-  void Adjust(int count);
-
-  // Forget elements from the top of the frame to match an actual frame (eg,
-  // the frame after a runtime call). No code is emitted except to bring the
-  // frame to a spilled state.
-  void Forget(int count);
-
-
-  // Spill all values from the frame to memory.
-  void SpillAll();
-
-  void AssertIsSpilled() const {
-    ASSERT(top_of_stack_state_ == NO_TOS_REGISTERS);
-    ASSERT(register_allocation_map_ == 0);
-  }
-
-  void AssertIsNotSpilled() {
-    ASSERT(!SpilledScope::is_spilled());
-  }
-
-  // Spill all occurrences of a specific register from the frame.
-  void Spill(Register reg) {
-    UNIMPLEMENTED();
-  }
-
-  // Spill all occurrences of an arbitrary register if possible. Return the
-  // register spilled or no_reg if it was not possible to free any register
-  // (ie, they all have frame-external references). Unimplemented.
-  Register SpillAnyRegister();
-
-  // Make this virtual frame have a state identical to an expected virtual
-  // frame. As a side effect, code may be emitted to make this frame match
-  // the expected one.
-  void MergeTo(const VirtualFrame* expected,
-               Condition cond = al,
-               Register r1 = no_reg,
-               const Operand& r2 = Operand(no_reg));
-
-  void MergeTo(VirtualFrame* expected,
-               Condition cond = al,
-               Register r1 = no_reg,
-               const Operand& r2 = Operand(no_reg));
-
-  // Checks whether this frame can be branched to by the other frame.
-  bool IsCompatibleWith(const VirtualFrame* other) const {
-    return (tos_known_smi_map_ & (~other->tos_known_smi_map_)) == 0;
-  }
-
-  inline void ForgetTypeInfo() {
-    tos_known_smi_map_ = 0;
-  }
-
-  // Detach a frame from its code generator, perhaps temporarily. This
-  // tells the register allocator that it is free to use frame-internal
-  // registers. Used when the code generator's frame is switched from this
-  // one to NULL by an unconditional jump.
-  void DetachFromCodeGenerator() {
-  }
-
-  // (Re)attach a frame to its code generator. This informs the register
-  // allocator that the frame-internal register references are active again.
-  // Used when a code generator's frame is switched from NULL to this one by
-  // binding a label.
-  void AttachToCodeGenerator() {
-  }
-
-  // Emit code for the physical JS entry and exit frame sequences. After
-  // calling Enter, the virtual frame is ready for use; and after calling
-  // Exit it should not be used. Note that Enter does not allocate space in
-  // the physical frame for storing frame-allocated locals.
-  void Enter();
-  void Exit();
-
-  // Prepare for returning from the frame by elements in the virtual frame.
-  // This avoids generating unnecessary merge code when jumping to the shared
-  // return site. No spill code emitted. Value to return should be in v0.
-  inline void PrepareForReturn();
-
-  // Number of local variables after when we use a loop for allocating.
-  static const int kLocalVarBound = 5;
-
-  // Allocate and initialize the frame-allocated locals.
-  void AllocateStackSlots();
-
-  // The current top of the expression stack as an assembly operand.
-  MemOperand Top() {
-    AssertIsSpilled();
-    return MemOperand(sp, 0);
-  }
-
-  // An element of the expression stack as an assembly operand.
-  MemOperand ElementAt(int index) {
-    int adjusted_index = index - kVirtualElements[top_of_stack_state_];
-    ASSERT(adjusted_index >= 0);
-    return MemOperand(sp, adjusted_index * kPointerSize);
-  }
-
-  bool KnownSmiAt(int index) {
-    if (index >= kTOSKnownSmiMapSize) return false;
-    return (tos_known_smi_map_ & (1 << index)) != 0;
-  }
-  // A frame-allocated local as an assembly operand.
-  inline MemOperand LocalAt(int index);
-
-  // Push the address of the receiver slot on the frame.
-  void PushReceiverSlotAddress();
-
-  // The function frame slot.
-  MemOperand Function() { return MemOperand(fp, kFunctionOffset); }
-
-  // The context frame slot.
-  MemOperand Context() { return MemOperand(fp, kContextOffset); }
-
-  // A parameter as an assembly operand.
-  inline MemOperand ParameterAt(int index);
-
-  // The receiver frame slot.
-  inline MemOperand Receiver();
-
-  // Push a try-catch or try-finally handler on top of the virtual frame.
-  void PushTryHandler(HandlerType type);
-
-  // Call stub given the number of arguments it expects on (and
-  // removes from) the stack.
-  inline void CallStub(CodeStub* stub, int arg_count);
-
-  // Call JS function from top of the stack with arguments
-  // taken from the stack.
-  void CallJSFunction(int arg_count);
-
-  // Call runtime given the number of arguments expected on (and
-  // removed from) the stack.
-  void CallRuntime(const Runtime::Function* f, int arg_count);
-  void CallRuntime(Runtime::FunctionId id, int arg_count);
-
-#ifdef ENABLE_DEBUGGER_SUPPORT
-  void DebugBreak();
-#endif
-
-  // Invoke builtin given the number of arguments it expects on (and
-  // removes from) the stack.
-  void InvokeBuiltin(Builtins::JavaScript id,
-                     InvokeJSFlags flag,
-                     int arg_count);
-
-  // Call load IC. Receiver is on the stack and is consumed. Result is returned
-  // in v0.
-  void CallLoadIC(Handle<String> name, RelocInfo::Mode mode);
-
-  // Call store IC. If the load is contextual, value is found on top of the
-  // frame. If not, value and receiver are on the frame. Both are consumed.
-  // Result is returned in v0.
-  void CallStoreIC(Handle<String> name, bool is_contextual);
-
-  // Call keyed load IC. Key and receiver are on the stack. Both are consumed.
-  // Result is returned in v0.
-  void CallKeyedLoadIC();
-
-  // Call keyed store IC. Value, key and receiver are on the stack. All three
-  // are consumed. Result is returned in v0 (and a0).
-  void CallKeyedStoreIC();
-
-  // Call into an IC stub given the number of arguments it removes
-  // from the stack. Register arguments to the IC stub are implicit,
-  // and depend on the type of IC stub.
-  void CallCodeObject(Handle<Code> ic,
-                      RelocInfo::Mode rmode,
-                      int dropped_args);
-
-  // Drop a number of elements from the top of the expression stack. May
-  // emit code to affect the physical frame. Does not clobber any registers
-  // excepting possibly the stack pointer.
-  void Drop(int count);
-
-  // Drop one element.
-  void Drop() { Drop(1); }
-
-  // Pop an element from the top of the expression stack. Discards
-  // the result.
-  void Pop();
-
-  // Pop an element from the top of the expression stack.  The register
-  // will be one normally used for the top of stack register allocation
-  // so you can't hold on to it if you push on the stack.
-  Register PopToRegister(Register but_not_to_this_one = no_reg);
-
-  // Look at the top of the stack.  The register returned is aliased and
-  // must be copied to a scratch register before modification.
-  Register Peek();
-
-  // Look at the value beneath the top of the stack. The register returned is
-  // aliased and must be copied to a scratch register before modification.
-  Register Peek2();
-
-  // Duplicate the top of stack.
-  void Dup();
-
-  // Duplicate the two elements on top of stack.
-  void Dup2();
-
-  // Flushes all registers, but it puts a copy of the top-of-stack in a0.
-  void SpillAllButCopyTOSToA0();
-
-  // Flushes all registers, but it puts a copy of the top-of-stack in a1.
-  void SpillAllButCopyTOSToA1();
-
-  // Flushes all registers, but it puts a copy of the top-of-stack in a1
-  // and the next value on the stack in a0.
-  void SpillAllButCopyTOSToA1A0();
-
-  // Pop and save an element from the top of the expression stack and
-  // emit a corresponding pop instruction.
-  void EmitPop(Register reg);
-  // Same but for multiple registers
-  void EmitMultiPop(RegList regs);
-  void EmitMultiPopReversed(RegList regs);
-
-
-  // Takes the top two elements and puts them in a0 (top element) and a1
-  // (second element).
-  void PopToA1A0();
-
-  // Takes the top element and puts it in a1.
-  void PopToA1();
-
-  // Takes the top element and puts it in a0.
-  void PopToA0();
-
-  // Push an element on top of the expression stack and emit a
-  // corresponding push instruction.
-  void EmitPush(Register reg, TypeInfo type_info = TypeInfo::Unknown());
-  void EmitPush(Operand operand, TypeInfo type_info = TypeInfo::Unknown());
-  void EmitPush(MemOperand operand, TypeInfo type_info = TypeInfo::Unknown());
-  void EmitPushRoot(Heap::RootListIndex index);
-
-  // Overwrite the nth thing on the stack.  If the nth position is in a
-  // register then this turns into a Move, otherwise an sw.  Afterwards
-  // you can still use the register even if it is a register that can be
-  // used for TOS (a0 or a1).
-  void SetElementAt(Register reg, int this_far_down);
-
-  // Get a register which is free and which must be immediately used to
-  // push on the top of the stack.
-  Register GetTOSRegister();
-
-  // Same but for multiple registers.
-  void EmitMultiPush(RegList regs);
-  void EmitMultiPushReversed(RegList regs);
-
-  static Register scratch0() { return t4; }
-  static Register scratch1() { return t5; }
-  static Register scratch2() { return t6; }
-
- private:
-  static const int kLocal0Offset = JavaScriptFrameConstants::kLocal0Offset;
-  static const int kFunctionOffset = JavaScriptFrameConstants::kFunctionOffset;
-  static const int kContextOffset = StandardFrameConstants::kContextOffset;
-
-  static const int kHandlerSize = StackHandlerConstants::kSize / kPointerSize;
-  static const int kPreallocatedElements = 5 + 8;  // 8 expression stack slots.
-
-  // 5 states for the top of stack, which can be in memory or in a0 and a1.
-  enum TopOfStack { NO_TOS_REGISTERS, A0_TOS, A1_TOS, A1_A0_TOS, A0_A1_TOS,
-                    TOS_STATES};
-  static const int kMaxTOSRegisters = 2;
-
-  static const bool kA0InUse[TOS_STATES];
-  static const bool kA1InUse[TOS_STATES];
-  static const int kVirtualElements[TOS_STATES];
-  static const TopOfStack kStateAfterPop[TOS_STATES];
-  static const TopOfStack kStateAfterPush[TOS_STATES];
-  static const Register kTopRegister[TOS_STATES];
-  static const Register kBottomRegister[TOS_STATES];
-
-  // We allocate up to 5 locals in registers.
-  static const int kNumberOfAllocatedRegisters = 5;
-  // r2 to r6 are allocated to locals.
-  static const int kFirstAllocatedRegister = 2;
-
-  static const Register kAllocatedRegisters[kNumberOfAllocatedRegisters];
-
-  static Register AllocatedRegister(int r) {
-    ASSERT(r >= 0 && r < kNumberOfAllocatedRegisters);
-    return kAllocatedRegisters[r];
-  }
-
-  // The number of elements on the stack frame.
-  int element_count_;
-  TopOfStack top_of_stack_state_:3;
-  int register_allocation_map_:kNumberOfAllocatedRegisters;
-  static const int kTOSKnownSmiMapSize = 4;
-  unsigned tos_known_smi_map_:kTOSKnownSmiMapSize;
-
-  // The index of the element that is at the processor's stack pointer
-  // (the sp register).  For now since everything is in memory it is given
-  // by the number of elements on the not-very-virtual stack frame.
-  int stack_pointer() { return element_count_ - 1; }
-
-  // The number of frame-allocated locals and parameters respectively.
-  inline int parameter_count() const;
-  inline int local_count() const;
-
-  // The index of the element that is at the processor's frame pointer
-  // (the fp register). The parameters, receiver, function, and context
-  // are below the frame pointer.
-  inline int frame_pointer() const;
-
-  // The index of the first parameter. The receiver lies below the first
-  // parameter.
-  int param0_index() { return 1; }
-
-  // The index of the context slot in the frame. It is immediately
-  // below the frame pointer.
-  inline int context_index();
-
-  // The index of the function slot in the frame. It is below the frame
-  // pointer and context slot.
-  inline int function_index();
-
-  // The index of the first local. Between the frame pointer and the
-  // locals lies the return address.
-  inline int local0_index() const;
-
-  // The index of the base of the expression stack.
-  inline int expression_base_index() const;
-
-  // Convert a frame index into a frame pointer relative offset into the
-  // actual stack.
-  inline int fp_relative(int index);
-
-  // Spill all elements in registers. Spill the top spilled_args elements
-  // on the frame. Sync all other frame elements.
-  // Then drop dropped_args elements from the virtual frame, to match
-  // the effect of an upcoming call that will drop them from the stack.
-  void PrepareForCall(int spilled_args, int dropped_args);
-
-  // If all top-of-stack registers are in use then the lowest one is pushed
-  // onto the physical stack and made free.
-  void EnsureOneFreeTOSRegister();
-
-  // Emit instructions to get the top of stack state from where we are to where
-  // we want to be.
-  void MergeTOSTo(TopOfStack expected_state,
-                  Condition cond = al,
-                  Register r1 = no_reg,
-                  const Operand& r2 = Operand(no_reg));
-
-  inline bool Equals(const VirtualFrame* other);
-
-  inline void LowerHeight(int count) {
-    element_count_ -= count;
-    if (count >= kTOSKnownSmiMapSize) {
-      tos_known_smi_map_ = 0;
-    } else {
-      tos_known_smi_map_ >>= count;
-    }
-  }
-
-  inline void RaiseHeight(int count, unsigned known_smi_map = 0) {
-    ASSERT(known_smi_map < (1u << count));
-    element_count_ += count;
-    if (count >= kTOSKnownSmiMapSize) {
-      tos_known_smi_map_ = known_smi_map;
-    } else {
-      tos_known_smi_map_ = ((tos_known_smi_map_ << count) | known_smi_map);
-    }
-  }
-  friend class JumpTarget;
-};
-
-
-} }  // namespace v8::internal
-
-#endif  // V8_MIPS_VIRTUAL_FRAME_MIPS_H_
-