Update V8 to r7427: Initial merge by git

As required by WebKit r82507

Change-Id: I7ae83ef3f689356043b4929255b7c1dd31d8c5df
diff --git a/src/mips/assembler-mips-inl.h b/src/mips/assembler-mips-inl.h
index 2e63461..f7453d1 100644
--- a/src/mips/assembler-mips-inl.h
+++ b/src/mips/assembler-mips-inl.h
@@ -38,21 +38,13 @@
 
 #include "mips/assembler-mips.h"
 #include "cpu.h"
+#include "debug.h"
 
 
 namespace v8 {
 namespace internal {
 
 // -----------------------------------------------------------------------------
-// Condition
-
-Condition NegateCondition(Condition cc) {
-  ASSERT(cc != cc_always);
-  return static_cast<Condition>(cc ^ 1);
-}
-
-
-// -----------------------------------------------------------------------------
 // Operand and MemOperand
 
 Operand::Operand(int32_t immediate, RelocInfo::Mode rmode)  {
@@ -61,17 +53,13 @@
   rmode_ = rmode;
 }
 
+
 Operand::Operand(const ExternalReference& f)  {
   rm_ = no_reg;
   imm32_ = reinterpret_cast<int32_t>(f.address());
   rmode_ = RelocInfo::EXTERNAL_REFERENCE;
 }
 
-Operand::Operand(const char* s) {
-  rm_ = no_reg;
-  imm32_ = reinterpret_cast<int32_t>(s);
-  rmode_ = RelocInfo::EMBEDDED_STRING;
-}
 
 Operand::Operand(Smi* value) {
   rm_ = no_reg;
@@ -79,10 +67,12 @@
   rmode_ = RelocInfo::NONE;
 }
 
+
 Operand::Operand(Register rm) {
   rm_ = rm;
 }
 
+
 bool Operand::is_reg() const {
   return rm_.is_valid();
 }
@@ -105,8 +95,29 @@
 
 
 Address RelocInfo::target_address_address() {
-  ASSERT(IsCodeTarget(rmode_) || rmode_ == RUNTIME_ENTRY);
-  return reinterpret_cast<Address>(pc_);
+  ASSERT(IsCodeTarget(rmode_) || rmode_ == RUNTIME_ENTRY
+                              || rmode_ == EMBEDDED_OBJECT
+                              || rmode_ == EXTERNAL_REFERENCE);
+  // Read the address of the word containing the target_address in an
+  // instruction stream.
+  // The only architecture-independent user of this function is the serializer.
+  // The serializer uses it to find out how many raw bytes of instruction to
+  // output before the next target.
+  // For an instructions like LUI/ORI where the target bits are mixed into the
+  // instruction bits, the size of the target will be zero, indicating that the
+  // serializer should not step forward in memory after a target is resolved
+  // and written.  In this case the target_address_address function should
+  // return the end of the instructions to be patched, allowing the
+  // deserializer to deserialize the instructions as raw bytes and put them in
+  // place, ready to be patched with the target. In our case, that is the
+  // address of the instruction that follows LUI/ORI instruction pair.
+  return reinterpret_cast<Address>(
+    pc_ + Assembler::kInstructionsFor32BitConstant * Assembler::kInstrSize);
+}
+
+
+int RelocInfo::target_address_size() {
+  return Assembler::kExternalTargetSize;
 }
 
 
@@ -130,8 +141,15 @@
 
 
 Object** RelocInfo::target_object_address() {
+  // Provide a "natural pointer" to the embedded object,
+  // which can be de-referenced during heap iteration.
   ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
-  return reinterpret_cast<Object**>(pc_);
+  // TODO(mips): Commenting out, to simplify arch-independent changes.
+  // GC won't work like this, but this commit is for asm/disasm/sim.
+  // reconstructed_obj_ptr_ =
+  //   reinterpret_cast<Object*>(Assembler::target_address_at(pc_));
+  // return &reconstructed_obj_ptr_;
+  return NULL;
 }
 
 
@@ -143,23 +161,55 @@
 
 Address* RelocInfo::target_reference_address() {
   ASSERT(rmode_ == EXTERNAL_REFERENCE);
-  return reinterpret_cast<Address*>(pc_);
+  // TODO(mips): Commenting out, to simplify arch-independent changes.
+  // GC won't work like this, but this commit is for asm/disasm/sim.
+  // reconstructed_adr_ptr_ = Assembler::target_address_at(pc_);
+  // return &reconstructed_adr_ptr_;
+  return NULL;
+}
+
+
+Handle<JSGlobalPropertyCell> RelocInfo::target_cell_handle() {
+  ASSERT(rmode_ == RelocInfo::GLOBAL_PROPERTY_CELL);
+  Address address = Memory::Address_at(pc_);
+  return Handle<JSGlobalPropertyCell>(
+      reinterpret_cast<JSGlobalPropertyCell**>(address));
+}
+
+
+JSGlobalPropertyCell* RelocInfo::target_cell() {
+  ASSERT(rmode_ == RelocInfo::GLOBAL_PROPERTY_CELL);
+  Address address = Memory::Address_at(pc_);
+  Object* object = HeapObject::FromAddress(
+      address - JSGlobalPropertyCell::kValueOffset);
+  return reinterpret_cast<JSGlobalPropertyCell*>(object);
+}
+
+
+void RelocInfo::set_target_cell(JSGlobalPropertyCell* cell) {
+  ASSERT(rmode_ == RelocInfo::GLOBAL_PROPERTY_CELL);
+  Address address = cell->address() + JSGlobalPropertyCell::kValueOffset;
+  Memory::Address_at(pc_) = address;
 }
 
 
 Address RelocInfo::call_address() {
-  ASSERT(IsPatchedReturnSequence());
-  // The 2 instructions offset assumes patched return sequence.
-  ASSERT(IsJSReturn(rmode()));
-  return Memory::Address_at(pc_ + 2 * Assembler::kInstrSize);
+  ASSERT((IsJSReturn(rmode()) && IsPatchedReturnSequence()) ||
+         (IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence()));
+  // The pc_ offset of 0 assumes mips patched return sequence per
+  // debug-mips.cc BreakLocationIterator::SetDebugBreakAtReturn(), or
+  // debug break slot per BreakLocationIterator::SetDebugBreakAtSlot().
+  return Assembler::target_address_at(pc_);
 }
 
 
 void RelocInfo::set_call_address(Address target) {
-  ASSERT(IsPatchedReturnSequence());
-  // The 2 instructions offset assumes patched return sequence.
-  ASSERT(IsJSReturn(rmode()));
-  Memory::Address_at(pc_ + 2 * Assembler::kInstrSize) = target;
+  ASSERT((IsJSReturn(rmode()) && IsPatchedReturnSequence()) ||
+         (IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence()));
+  // The pc_ offset of 0 assumes mips patched return sequence per
+  // debug-mips.cc BreakLocationIterator::SetDebugBreakAtReturn(), or
+  // debug break slot per BreakLocationIterator::SetDebugBreakAtSlot().
+  Assembler::set_target_address_at(pc_, target);
 }
 
 
@@ -169,9 +219,8 @@
 
 
 Object** RelocInfo::call_object_address() {
-  ASSERT(IsPatchedReturnSequence());
-  // The 2 instructions offset assumes patched return sequence.
-  ASSERT(IsJSReturn(rmode()));
+  ASSERT((IsJSReturn(rmode()) && IsPatchedReturnSequence()) ||
+         (IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence()));
   return reinterpret_cast<Object**>(pc_ + 2 * Assembler::kInstrSize);
 }
 
@@ -182,13 +231,76 @@
 
 
 bool RelocInfo::IsPatchedReturnSequence() {
-#ifdef DEBUG
-  PrintF("%s - %d - %s : Checking for jal(r)",
-      __FILE__, __LINE__, __func__);
+  Instr instr0 = Assembler::instr_at(pc_);
+  Instr instr1 = Assembler::instr_at(pc_ + 1 * Assembler::kInstrSize);
+  Instr instr2 = Assembler::instr_at(pc_ + 2 * Assembler::kInstrSize);
+  bool patched_return = ((instr0 & kOpcodeMask) == LUI &&
+                         (instr1 & kOpcodeMask) == ORI &&
+                         (instr2 & kOpcodeMask) == SPECIAL &&
+                         (instr2 & kFunctionFieldMask) == JALR);
+  return patched_return;
+}
+
+
+bool RelocInfo::IsPatchedDebugBreakSlotSequence() {
+  Instr current_instr = Assembler::instr_at(pc_);
+  return !Assembler::IsNop(current_instr, Assembler::DEBUG_BREAK_NOP);
+}
+
+
+void RelocInfo::Visit(ObjectVisitor* visitor) {
+  RelocInfo::Mode mode = rmode();
+  if (mode == RelocInfo::EMBEDDED_OBJECT) {
+    // RelocInfo is needed when pointer must be updated/serialized, such as
+    // UpdatingVisitor in mark-compact.cc or Serializer in serialize.cc.
+    // It is ignored by visitors that do not need it.
+    // Commenting out, to simplify arch-independednt changes.
+    // GC won't work like this, but this commit is for asm/disasm/sim.
+    // visitor->VisitPointer(target_object_address(), this);
+  } else if (RelocInfo::IsCodeTarget(mode)) {
+    visitor->VisitCodeTarget(this);
+  } else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
+    // RelocInfo is needed when external-references must be serialized by
+    // Serializer Visitor in serialize.cc. It is ignored by visitors that
+    // do not need it.
+    // Commenting out, to simplify arch-independednt changes.
+    // Serializer won't work like this, but this commit is for asm/disasm/sim.
+    // visitor->VisitExternalReference(target_reference_address(), this);
+#ifdef ENABLE_DEBUGGER_SUPPORT
+  // TODO(isolates): Get a cached isolate below.
+  } else if (((RelocInfo::IsJSReturn(mode) &&
+               IsPatchedReturnSequence()) ||
+             (RelocInfo::IsDebugBreakSlot(mode) &&
+               IsPatchedDebugBreakSlotSequence())) &&
+             Isolate::Current()->debug()->has_break_points()) {
+    visitor->VisitDebugTarget(this);
 #endif
-  return ((Assembler::instr_at(pc_) & kOpcodeMask) == SPECIAL) &&
-         (((Assembler::instr_at(pc_) & kFunctionFieldMask) == JAL) ||
-          ((Assembler::instr_at(pc_) & kFunctionFieldMask) == JALR));
+  } else if (mode == RelocInfo::RUNTIME_ENTRY) {
+    visitor->VisitRuntimeEntry(this);
+  }
+}
+
+
+template<typename StaticVisitor>
+void RelocInfo::Visit(Heap* heap) {
+  RelocInfo::Mode mode = rmode();
+  if (mode == RelocInfo::EMBEDDED_OBJECT) {
+    StaticVisitor::VisitPointer(heap, target_object_address());
+  } else if (RelocInfo::IsCodeTarget(mode)) {
+    StaticVisitor::VisitCodeTarget(this);
+  } else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
+    StaticVisitor::VisitExternalReference(target_reference_address());
+#ifdef ENABLE_DEBUGGER_SUPPORT
+  } else if (heap->isolate()->debug()->has_break_points() &&
+             ((RelocInfo::IsJSReturn(mode) &&
+              IsPatchedReturnSequence()) ||
+             (RelocInfo::IsDebugBreakSlot(mode) &&
+              IsPatchedDebugBreakSlotSequence()))) {
+    StaticVisitor::VisitDebugTarget(this);
+#endif
+  } else if (mode == RelocInfo::RUNTIME_ENTRY) {
+    StaticVisitor::VisitRuntimeEntry(this);
+  }
 }
 
 
@@ -203,10 +315,18 @@
 }
 
 
+void Assembler::CheckTrampolinePoolQuick() {
+  if (pc_offset() >= next_buffer_check_) {
+    CheckTrampolinePool();
+  }
+}
+
+
 void Assembler::emit(Instr x) {
   CheckBuffer();
   *reinterpret_cast<Instr*>(pc_) = x;
   pc_ += kInstrSize;
+  CheckTrampolinePoolQuick();
 }
 
 
diff --git a/src/mips/assembler-mips.cc b/src/mips/assembler-mips.cc
index a3b316b..7d00da1 100644
--- a/src/mips/assembler-mips.cc
+++ b/src/mips/assembler-mips.cc
@@ -40,83 +40,41 @@
 #include "mips/assembler-mips-inl.h"
 #include "serialize.h"
 
-
 namespace v8 {
 namespace internal {
 
+CpuFeatures::CpuFeatures()
+    : supported_(0),
+      enabled_(0),
+      found_by_runtime_probing_(0) {
+}
 
+void CpuFeatures::Probe(bool portable) {
+  // If the compiler is allowed to use fpu then we can use fpu too in our
+  // code generation.
+#if !defined(__mips__)
+  // For the simulator=mips build, use FPU when FLAG_enable_fpu is enabled.
+  if (FLAG_enable_fpu) {
+      supported_ |= 1u << FPU;
+  }
+#else
+  if (portable && Serializer::enabled()) {
+    supported_ |= OS::CpuFeaturesImpliedByPlatform();
+    return;  // No features if we might serialize.
+  }
 
-const Register no_reg = { -1 };
+  if (OS::MipsCpuHasFeature(FPU)) {
+    // This implementation also sets the FPU flags if
+    // runtime detection of FPU returns true.
+    supported_ |= 1u << FPU;
+    found_by_runtime_probing_ |= 1u << FPU;
+  }
 
-const Register zero_reg = { 0 };
-const Register at = { 1 };
-const Register v0 = { 2 };
-const Register v1 = { 3 };
-const Register a0 = { 4 };
-const Register a1 = { 5 };
-const Register a2 = { 6 };
-const Register a3 = { 7 };
-const Register t0 = { 8 };
-const Register t1 = { 9 };
-const Register t2 = { 10 };
-const Register t3 = { 11 };
-const Register t4 = { 12 };
-const Register t5 = { 13 };
-const Register t6 = { 14 };
-const Register t7 = { 15 };
-const Register s0 = { 16 };
-const Register s1 = { 17 };
-const Register s2 = { 18 };
-const Register s3 = { 19 };
-const Register s4 = { 20 };
-const Register s5 = { 21 };
-const Register s6 = { 22 };
-const Register s7 = { 23 };
-const Register t8 = { 24 };
-const Register t9 = { 25 };
-const Register k0 = { 26 };
-const Register k1 = { 27 };
-const Register gp = { 28 };
-const Register sp = { 29 };
-const Register s8_fp = { 30 };
-const Register ra = { 31 };
+  if (!portable) found_by_runtime_probing_ = 0;
+#endif
+}
 
 
-const FPURegister no_creg = { -1 };
-
-const FPURegister f0 = { 0 };
-const FPURegister f1 = { 1 };
-const FPURegister f2 = { 2 };
-const FPURegister f3 = { 3 };
-const FPURegister f4 = { 4 };
-const FPURegister f5 = { 5 };
-const FPURegister f6 = { 6 };
-const FPURegister f7 = { 7 };
-const FPURegister f8 = { 8 };
-const FPURegister f9 = { 9 };
-const FPURegister f10 = { 10 };
-const FPURegister f11 = { 11 };
-const FPURegister f12 = { 12 };
-const FPURegister f13 = { 13 };
-const FPURegister f14 = { 14 };
-const FPURegister f15 = { 15 };
-const FPURegister f16 = { 16 };
-const FPURegister f17 = { 17 };
-const FPURegister f18 = { 18 };
-const FPURegister f19 = { 19 };
-const FPURegister f20 = { 20 };
-const FPURegister f21 = { 21 };
-const FPURegister f22 = { 22 };
-const FPURegister f23 = { 23 };
-const FPURegister f24 = { 24 };
-const FPURegister f25 = { 25 };
-const FPURegister f26 = { 26 };
-const FPURegister f27 = { 27 };
-const FPURegister f28 = { 28 };
-const FPURegister f29 = { 29 };
-const FPURegister f30 = { 30 };
-const FPURegister f31 = { 31 };
-
 int ToNumber(Register reg) {
   ASSERT(reg.is_valid());
   const int kNumbers[] = {
@@ -156,6 +114,7 @@
   return kNumbers[reg.code()];
 }
 
+
 Register ToRegister(int num) {
   ASSERT(num >= 0 && num < kNumRegisters);
   const Register kRegisters[] = {
@@ -181,6 +140,15 @@
 
 const int RelocInfo::kApplyMask = 0;
 
+
+bool RelocInfo::IsCodedSpecially() {
+  // The deserializer needs to know whether a pointer is specially coded.  Being
+  // specially coded on MIPS means that it is a lui/ori instruction, and that is
+  // always the case inside code objects.
+  return true;
+}
+
+
 // Patch the code at the current address with the supplied instructions.
 void RelocInfo::PatchCode(byte* instructions, int instruction_count) {
   Instr* pc = reinterpret_cast<Instr*>(pc_);
@@ -210,7 +178,7 @@
   rm_ = no_reg;
   // Verify all Objects referred by code are NOT in new space.
   Object* obj = *handle;
-  ASSERT(!Heap::InNewSpace(obj));
+  ASSERT(!HEAP->InNewSpace(obj));
   if (obj->IsHeapObject()) {
     imm32_ = reinterpret_cast<intptr_t>(handle.location());
     rmode_ = RelocInfo::EMBEDDED_OBJECT;
@@ -221,26 +189,66 @@
   }
 }
 
-MemOperand::MemOperand(Register rm, int16_t offset) : Operand(rm) {
+
+MemOperand::MemOperand(Register rm, int32_t offset) : Operand(rm) {
   offset_ = offset;
 }
 
 
 // -----------------------------------------------------------------------------
-// Implementation of Assembler.
+// Specific instructions, constants, and masks.
 
-static const int kMinimalBufferSize = 4*KB;
-static byte* spare_buffer_ = NULL;
+static const int kNegOffset = 0x00008000;
+// addiu(sp, sp, 4) aka Pop() operation or part of Pop(r)
+// operations as post-increment of sp.
+const Instr kPopInstruction = ADDIU | (sp.code() << kRsShift)
+      | (sp.code() << kRtShift) | (kPointerSize & kImm16Mask);
+// addiu(sp, sp, -4) part of Push(r) operation as pre-decrement of sp.
+const Instr kPushInstruction = ADDIU | (sp.code() << kRsShift)
+      | (sp.code() << kRtShift) | (-kPointerSize & kImm16Mask);
+// sw(r, MemOperand(sp, 0))
+const Instr kPushRegPattern = SW | (sp.code() << kRsShift)
+      |  (0 & kImm16Mask);
+//  lw(r, MemOperand(sp, 0))
+const Instr kPopRegPattern = LW | (sp.code() << kRsShift)
+      |  (0 & kImm16Mask);
 
-Assembler::Assembler(void* buffer, int buffer_size) {
+const Instr kLwRegFpOffsetPattern = LW | (s8_fp.code() << kRsShift)
+      |  (0 & kImm16Mask);
+
+const Instr kSwRegFpOffsetPattern = SW | (s8_fp.code() << kRsShift)
+      |  (0 & kImm16Mask);
+
+const Instr kLwRegFpNegOffsetPattern = LW | (s8_fp.code() << kRsShift)
+      |  (kNegOffset & kImm16Mask);
+
+const Instr kSwRegFpNegOffsetPattern = SW | (s8_fp.code() << kRsShift)
+      |  (kNegOffset & kImm16Mask);
+// A mask for the Rt register for push, pop, lw, sw instructions.
+const Instr kRtMask = kRtFieldMask;
+const Instr kLwSwInstrTypeMask = 0xffe00000;
+const Instr kLwSwInstrArgumentMask  = ~kLwSwInstrTypeMask;
+const Instr kLwSwOffsetMask = kImm16Mask;
+
+
+// Spare buffer.
+static const int kMinimalBufferSize = 4 * KB;
+
+
+Assembler::Assembler(void* buffer, int buffer_size)
+    : AssemblerBase(Isolate::Current()),
+      positions_recorder_(this),
+      allow_peephole_optimization_(false) {
+  // BUG(3245989): disable peephole optimization if crankshaft is enabled.
+  allow_peephole_optimization_ = FLAG_peephole_optimization;
   if (buffer == NULL) {
     // Do our own buffer management.
     if (buffer_size <= kMinimalBufferSize) {
       buffer_size = kMinimalBufferSize;
 
-      if (spare_buffer_ != NULL) {
-        buffer = spare_buffer_;
-        spare_buffer_ = NULL;
+      if (isolate()->assembler_spare_buffer() != NULL) {
+        buffer = isolate()->assembler_spare_buffer();
+        isolate()->set_assembler_spare_buffer(NULL);
       }
     }
     if (buffer == NULL) {
@@ -263,17 +271,19 @@
   ASSERT(buffer_ != NULL);
   pc_ = buffer_;
   reloc_info_writer.Reposition(buffer_ + buffer_size, pc_);
-  current_statement_position_ = RelocInfo::kNoPosition;
-  current_position_ = RelocInfo::kNoPosition;
-  written_statement_position_ = current_statement_position_;
-  written_position_ = current_position_;
+
+  last_trampoline_pool_end_ = 0;
+  no_trampoline_pool_before_ = 0;
+  trampoline_pool_blocked_nesting_ = 0;
+  next_buffer_check_ = kMaxBranchOffset - kTrampolineSize;
 }
 
 
 Assembler::~Assembler() {
   if (own_buffer_) {
-    if (spare_buffer_ == NULL && buffer_size_ == kMinimalBufferSize) {
-      spare_buffer_ = buffer_;
+    if (isolate()->assembler_spare_buffer() == NULL &&
+      buffer_size_ == kMinimalBufferSize) {
+      isolate()->set_assembler_spare_buffer(buffer_);
     } else {
       DeleteArray(buffer_);
     }
@@ -282,7 +292,7 @@
 
 
 void Assembler::GetCode(CodeDesc* desc) {
-  ASSERT(pc_ <= reloc_info_writer.pos());  // no overlap
+  ASSERT(pc_ <= reloc_info_writer.pos());  // No overlap.
   // Setup code descriptor.
   desc->buffer = buffer_;
   desc->buffer_size = buffer_size_;
@@ -291,6 +301,60 @@
 }
 
 
+void Assembler::Align(int m) {
+  ASSERT(m >= 4 && IsPowerOf2(m));
+  while ((pc_offset() & (m - 1)) != 0) {
+    nop();
+  }
+}
+
+
+void Assembler::CodeTargetAlign() {
+  // No advantage to aligning branch/call targets to more than
+  // single instruction, that I am aware of.
+  Align(4);
+}
+
+
+Register Assembler::GetRt(Instr instr) {
+  Register rt;
+  rt.code_ = (instr & kRtMask) >> kRtShift;
+  return rt;
+}
+
+
+bool Assembler::IsPop(Instr instr) {
+  return (instr & ~kRtMask) == kPopRegPattern;
+}
+
+
+bool Assembler::IsPush(Instr instr) {
+  return (instr & ~kRtMask) == kPushRegPattern;
+}
+
+
+bool Assembler::IsSwRegFpOffset(Instr instr) {
+  return ((instr & kLwSwInstrTypeMask) == kSwRegFpOffsetPattern);
+}
+
+
+bool Assembler::IsLwRegFpOffset(Instr instr) {
+  return ((instr & kLwSwInstrTypeMask) == kLwRegFpOffsetPattern);
+}
+
+
+bool Assembler::IsSwRegFpNegOffset(Instr instr) {
+  return ((instr & (kLwSwInstrTypeMask | kNegOffset)) ==
+          kSwRegFpNegOffsetPattern);
+}
+
+
+bool Assembler::IsLwRegFpNegOffset(Instr instr) {
+  return ((instr & (kLwSwInstrTypeMask | kNegOffset)) ==
+          kLwRegFpNegOffsetPattern);
+}
+
+
 // Labels refer to positions in the (to be) generated code.
 // There are bound, linked, and unused labels.
 //
@@ -301,14 +365,19 @@
 // to be generated; pos() is the position of the last
 // instruction using the label.
 
+// The link chain is terminated by a value in the instruction of -1,
+// which is an otherwise illegal value (branch -1 is inf loop).
+// The instruction 16-bit offset field addresses 32-bit words, but in
+// code is conv to an 18-bit value addressing bytes, hence the -4 value.
 
-// The link chain is terminated by a negative code position (must be aligned).
 const int kEndOfChain = -4;
 
-bool Assembler::is_branch(Instr instr) {
+
+bool Assembler::IsBranch(Instr instr) {
   uint32_t opcode   = ((instr & kOpcodeMask));
   uint32_t rt_field = ((instr & kRtFieldMask));
   uint32_t rs_field = ((instr & kRsFieldMask));
+  uint32_t label_constant = (instr & ~kImm16Mask);
   // Checks if the instruction is a branch.
   return opcode == BEQ ||
       opcode == BNE ||
@@ -320,7 +389,79 @@
       opcode == BGTZL||
       (opcode == REGIMM && (rt_field == BLTZ || rt_field == BGEZ ||
                             rt_field == BLTZAL || rt_field == BGEZAL)) ||
-      (opcode == COP1 && rs_field == BC1);  // Coprocessor branch.
+      (opcode == COP1 && rs_field == BC1) ||  // Coprocessor branch.
+      label_constant == 0;  // Emitted label const in reg-exp engine.
+}
+
+
+bool Assembler::IsNop(Instr instr, unsigned int type) {
+  // See Assembler::nop(type).
+  ASSERT(type < 32);
+  uint32_t opcode = ((instr & kOpcodeMask));
+  uint32_t rt = ((instr & kRtFieldMask) >> kRtShift);
+  uint32_t rs = ((instr & kRsFieldMask) >> kRsShift);
+  uint32_t sa = ((instr & kSaFieldMask) >> kSaShift);
+
+  // nop(type) == sll(zero_reg, zero_reg, type);
+  // Technically all these values will be 0 but
+  // this makes more sense to the reader.
+
+  bool ret = (opcode == SLL &&
+              rt == static_cast<uint32_t>(ToNumber(zero_reg)) &&
+              rs == static_cast<uint32_t>(ToNumber(zero_reg)) &&
+              sa == type);
+
+  return ret;
+}
+
+
+int32_t Assembler::GetBranchOffset(Instr instr) {
+  ASSERT(IsBranch(instr));
+  return ((int16_t)(instr & kImm16Mask)) << 2;
+}
+
+
+bool Assembler::IsLw(Instr instr) {
+  return ((instr & kOpcodeMask) == LW);
+}
+
+
+int16_t Assembler::GetLwOffset(Instr instr) {
+  ASSERT(IsLw(instr));
+  return ((instr & kImm16Mask));
+}
+
+
+Instr Assembler::SetLwOffset(Instr instr, int16_t offset) {
+  ASSERT(IsLw(instr));
+
+  // We actually create a new lw instruction based on the original one.
+  Instr temp_instr = LW | (instr & kRsFieldMask) | (instr & kRtFieldMask)
+      | (offset & kImm16Mask);
+
+  return temp_instr;
+}
+
+
+bool Assembler::IsSw(Instr instr) {
+  return ((instr & kOpcodeMask) == SW);
+}
+
+
+Instr Assembler::SetSwOffset(Instr instr, int16_t offset) {
+  ASSERT(IsSw(instr));
+  return ((instr & ~kImm16Mask) | (offset & kImm16Mask));
+}
+
+
+bool Assembler::IsAddImmediate(Instr instr) {
+  return ((instr & kOpcodeMask) == ADDIU);
+}
+
+
+Instr Assembler::SetAddImmediateOffset(Instr instr, int16_t offset) {
+  ASSERT(IsAddImmediate(instr));
+  return ((instr & ~kImm16Mask) | (offset & kImm16Mask));
 }
 
 
@@ -328,16 +469,25 @@
   Instr instr = instr_at(pos);
   if ((instr & ~kImm16Mask) == 0) {
     // Emitted label constant, not part of a branch.
-    return instr - (Code::kHeaderSize - kHeapObjectTag);
+    if (instr == 0) {
+       return kEndOfChain;
+     } else {
+       int32_t imm18 =((instr & static_cast<int32_t>(kImm16Mask)) << 16) >> 14;
+       return (imm18 + pos);
+     }
   }
   // Check we have a branch instruction.
-  ASSERT(is_branch(instr));
+  ASSERT(IsBranch(instr));
   // Do NOT change this to <<2. We rely on arithmetic shifts here, assuming
   // the compiler uses arithmectic shifts for signed integers.
-  int32_t imm18 = ((instr &
-                    static_cast<int32_t>(kImm16Mask)) << 16) >> 14;
+  int32_t imm18 = ((instr & static_cast<int32_t>(kImm16Mask)) << 16) >> 14;
 
-  return pos + kBranchPCOffset + imm18;
+  if (imm18 == kEndOfChain) {
+    // EndOfChain sentinel is returned directly, not relative to pc or pos.
+    return kEndOfChain;
+  } else {
+    return pos + kBranchPCOffset + imm18;
+  }
 }
 
 
@@ -351,7 +501,7 @@
     return;
   }
 
-  ASSERT(is_branch(instr));
+  ASSERT(IsBranch(instr));
   int32_t imm18 = target_pos - (pos + kBranchPCOffset);
   ASSERT((imm18 & 3) == 0);
 
@@ -388,10 +538,28 @@
 
 
 void Assembler::bind_to(Label* L, int pos) {
-  ASSERT(0 <= pos && pos <= pc_offset());  // must have a valid binding position
+  ASSERT(0 <= pos && pos <= pc_offset());  // Must have valid binding position.
   while (L->is_linked()) {
     int32_t fixup_pos = L->pos();
-    next(L);  // call next before overwriting link with target at fixup_pos
+    int32_t dist = pos - fixup_pos;
+    next(L);  // Call next before overwriting link with target at fixup_pos.
+    if (dist > kMaxBranchOffset) {
+      do {
+        int32_t trampoline_pos = get_trampoline_entry(fixup_pos);
+        ASSERT((trampoline_pos - fixup_pos) <= kMaxBranchOffset);
+        target_at_put(fixup_pos, trampoline_pos);
+        fixup_pos = trampoline_pos;
+        dist = pos - fixup_pos;
+      } while (dist > kMaxBranchOffset);
+    } else if (dist < -kMaxBranchOffset) {
+      do {
+        int32_t trampoline_pos = get_trampoline_entry(fixup_pos, false);
+        ASSERT((trampoline_pos - fixup_pos) >= -kMaxBranchOffset);
+        target_at_put(fixup_pos, trampoline_pos);
+        fixup_pos = trampoline_pos;
+        dist = pos - fixup_pos;
+      } while (dist < -kMaxBranchOffset);
+    };
     target_at_put(fixup_pos, pos);
   }
   L->bind_to(pos);
@@ -416,16 +584,16 @@
       ASSERT(link == kEndOfChain);
       target_at_put(fixup_pos, appendix->pos());
     } else {
-      // L is empty, simply use appendix
+      // L is empty, simply use appendix.
       *L = *appendix;
     }
   }
-  appendix->Unuse();  // appendix should not be used anymore
+  appendix->Unuse();  // Appendix should not be used anymore.
 }
 
 
 void Assembler::bind(Label* L) {
-  ASSERT(!L->is_bound());  // label can only be bound once
+  ASSERT(!L->is_bound());  // Label can only be bound once.
   bind_to(L, pc_offset());
 }
 
@@ -433,11 +601,11 @@
 void Assembler::next(Label* L) {
   ASSERT(L->is_linked());
   int link = target_at(L->pos());
-  if (link > 0) {
-    L->link_to(link);
-  } else {
-    ASSERT(link == kEndOfChain);
+  ASSERT(link > 0 || link == kEndOfChain);
+  if (link == kEndOfChain) {
     L->Unuse();
+  } else if (link > 0) {
+    L->link_to(link);
   }
 }
 
@@ -446,13 +614,8 @@
 // if they can be encoded in the MIPS's 16 bits of immediate-offset instruction
 // space.  There is no guarantee that the relocated location can be similarly
 // encoded.
-bool Assembler::MustUseAt(RelocInfo::Mode rmode) {
-  if (rmode == RelocInfo::EXTERNAL_REFERENCE) {
-    return Serializer::enabled();
-  } else if (rmode == RelocInfo::NONE) {
-    return false;
-  }
-  return true;
+bool Assembler::MustUseReg(RelocInfo::Mode rmode) {
+  return rmode != RelocInfo::NONE;
 }
 
 
@@ -470,14 +633,28 @@
 
 
 void Assembler::GenInstrRegister(Opcode opcode,
+                                 Register rs,
+                                 Register rt,
+                                 uint16_t msb,
+                                 uint16_t lsb,
+                                 SecondaryField func) {
+  ASSERT(rs.is_valid() && rt.is_valid() && is_uint5(msb) && is_uint5(lsb));
+  Instr instr = opcode | (rs.code() << kRsShift) | (rt.code() << kRtShift)
+      | (msb << kRdShift) | (lsb << kSaShift) | func;
+  emit(instr);
+}
+
+
+void Assembler::GenInstrRegister(Opcode opcode,
                                  SecondaryField fmt,
                                  FPURegister ft,
                                  FPURegister fs,
                                  FPURegister fd,
                                  SecondaryField func) {
   ASSERT(fd.is_valid() && fs.is_valid() && ft.is_valid());
-  Instr instr = opcode | fmt | (ft.code() << 16) | (fs.code() << kFsShift)
-      | (fd.code() << 6) | func;
+  ASSERT(isolate()->cpu_features()->IsEnabled(FPU));
+  Instr instr = opcode | fmt | (ft.code() << kFtShift) | (fs.code() << kFsShift)
+      | (fd.code() << kFdShift) | func;
   emit(instr);
 }
 
@@ -489,8 +666,22 @@
                                  FPURegister fd,
                                  SecondaryField func) {
   ASSERT(fd.is_valid() && fs.is_valid() && rt.is_valid());
+  ASSERT(isolate()->cpu_features()->IsEnabled(FPU));
   Instr instr = opcode | fmt | (rt.code() << kRtShift)
-      | (fs.code() << kFsShift) | (fd.code() << 6) | func;
+      | (fs.code() << kFsShift) | (fd.code() << kFdShift) | func;
+  emit(instr);
+}
+
+
+void Assembler::GenInstrRegister(Opcode opcode,
+                                 SecondaryField fmt,
+                                 Register rt,
+                                 FPUControlRegister fs,
+                                 SecondaryField func) {
+  ASSERT(fs.is_valid() && rt.is_valid());
+  ASSERT(isolate()->cpu_features()->IsEnabled(FPU));
+  Instr instr =
+      opcode | fmt | (rt.code() << kRtShift) | (fs.code() << kFsShift) | func;
   emit(instr);
 }
 
@@ -523,6 +714,7 @@
                                   FPURegister ft,
                                   int32_t j) {
   ASSERT(rs.is_valid() && ft.is_valid() && (is_int16(j) || is_uint16(j)));
+  ASSERT(isolate()->cpu_features()->IsEnabled(FPU));
   Instr instr = opcode | (rs.code() << kRsShift) | (ft.code() << kFtShift)
       | (j & kImm16Mask);
   emit(instr);
@@ -532,26 +724,122 @@
 // Registers are in the order of the instruction encoding, from left to right.
 void Assembler::GenInstrJump(Opcode opcode,
                               uint32_t address) {
+  BlockTrampolinePoolScope block_trampoline_pool(this);
   ASSERT(is_uint26(address));
   Instr instr = opcode | address;
   emit(instr);
+  BlockTrampolinePoolFor(1);  // For associated delay slot.
+}
+
+
+// Returns the next free label entry from the next trampoline pool.
+int32_t Assembler::get_label_entry(int32_t pos, bool next_pool) {
+  int trampoline_count = trampolines_.length();
+  int32_t label_entry = 0;
+  ASSERT(trampoline_count > 0);
+
+  if (next_pool) {
+    for (int i = 0; i < trampoline_count; i++) {
+      if (trampolines_[i].start() > pos) {
+       label_entry = trampolines_[i].take_label();
+       break;
+      }
+    }
+  } else {  //  Caller needs a label entry from the previous pool.
+    for (int i = trampoline_count-1; i >= 0; i--) {
+      if (trampolines_[i].end() < pos) {
+       label_entry = trampolines_[i].take_label();
+       break;
+      }
+    }
+  }
+  return label_entry;
+}
+
+
+// Returns the next free trampoline entry from the next trampoline pool.
+int32_t Assembler::get_trampoline_entry(int32_t pos, bool next_pool) {
+  int trampoline_count = trampolines_.length();
+  int32_t trampoline_entry = 0;
+  ASSERT(trampoline_count > 0);
+
+  if (next_pool) {
+    for (int i = 0; i < trampoline_count; i++) {
+      if (trampolines_[i].start() > pos) {
+       trampoline_entry = trampolines_[i].take_slot();
+       break;
+      }
+    }
+  } else {  // Caller needs a trampoline entry from the previous pool.
+    for (int i = trampoline_count-1; i >= 0; i--) {
+      if (trampolines_[i].end() < pos) {
+       trampoline_entry = trampolines_[i].take_slot();
+       break;
+      }
+    }
+  }
+  return trampoline_entry;
 }
 
 
 int32_t Assembler::branch_offset(Label* L, bool jump_elimination_allowed) {
   int32_t target_pos;
+  int32_t pc_offset_v = pc_offset();
+
   if (L->is_bound()) {
     target_pos = L->pos();
+    int32_t dist = pc_offset_v - target_pos;
+    if (dist > kMaxBranchOffset) {
+      do {
+        int32_t trampoline_pos = get_trampoline_entry(target_pos);
+        ASSERT((trampoline_pos - target_pos) > 0);
+        ASSERT((trampoline_pos - target_pos) <= kMaxBranchOffset);
+        target_at_put(trampoline_pos, target_pos);
+        target_pos = trampoline_pos;
+        dist = pc_offset_v - target_pos;
+      } while (dist > kMaxBranchOffset);
+    } else if (dist < -kMaxBranchOffset) {
+      do {
+        int32_t trampoline_pos = get_trampoline_entry(target_pos, false);
+        ASSERT((target_pos - trampoline_pos) > 0);
+        ASSERT((target_pos - trampoline_pos) <= kMaxBranchOffset);
+        target_at_put(trampoline_pos, target_pos);
+        target_pos = trampoline_pos;
+        dist = pc_offset_v - target_pos;
+      } while (dist < -kMaxBranchOffset);
+    }
   } else {
     if (L->is_linked()) {
-      target_pos = L->pos();  // L's link
+      target_pos = L->pos();  // L's link.
+      int32_t dist = pc_offset_v - target_pos;
+      if (dist > kMaxBranchOffset) {
+        do {
+          int32_t label_pos = get_label_entry(target_pos);
+          ASSERT((label_pos - target_pos) < kMaxBranchOffset);
+          label_at_put(L, label_pos);
+          target_pos = label_pos;
+          dist = pc_offset_v - target_pos;
+        } while (dist > kMaxBranchOffset);
+      } else if (dist < -kMaxBranchOffset) {
+        do {
+          int32_t label_pos = get_label_entry(target_pos, false);
+          ASSERT((label_pos - target_pos) > -kMaxBranchOffset);
+          label_at_put(L, label_pos);
+          target_pos = label_pos;
+          dist = pc_offset_v - target_pos;
+        } while (dist < -kMaxBranchOffset);
+      }
+      L->link_to(pc_offset());
     } else {
-      target_pos = kEndOfChain;
+      L->link_to(pc_offset());
+      return kEndOfChain;
     }
-    L->link_to(pc_offset());
   }
 
   int32_t offset = target_pos - (pc_offset() + kBranchPCOffset);
+  ASSERT((offset & 3) == 0);
+  ASSERT(is_int16(offset >> 2));
+
   return offset;
 }
 
@@ -560,14 +848,20 @@
   int target_pos;
   if (L->is_bound()) {
     target_pos = L->pos();
+    instr_at_put(at_offset, target_pos + (Code::kHeaderSize - kHeapObjectTag));
   } else {
     if (L->is_linked()) {
-      target_pos = L->pos();  // L's link
+      target_pos = L->pos();  // L's link.
+      int32_t imm18 = target_pos - at_offset;
+      ASSERT((imm18 & 3) == 0);
+      int32_t imm16 = imm18 >> 2;
+      ASSERT(is_int16(imm16));
+      instr_at_put(at_offset, (imm16 & kImm16Mask));
     } else {
       target_pos = kEndOfChain;
+      instr_at_put(at_offset, 0);
     }
     L->link_to(at_offset);
-    instr_at_put(at_offset, target_pos + (Code::kHeaderSize - kHeapObjectTag));
   }
 }
 
@@ -580,47 +874,66 @@
 
 
 void Assembler::bal(int16_t offset) {
+  positions_recorder()->WriteRecordedPositions();
   bgezal(zero_reg, offset);
 }
 
 
 void Assembler::beq(Register rs, Register rt, int16_t offset) {
+  BlockTrampolinePoolScope block_trampoline_pool(this);
   GenInstrImmediate(BEQ, rs, rt, offset);
+  BlockTrampolinePoolFor(1);  // For associated delay slot.
 }
 
 
 void Assembler::bgez(Register rs, int16_t offset) {
+  BlockTrampolinePoolScope block_trampoline_pool(this);
   GenInstrImmediate(REGIMM, rs, BGEZ, offset);
+  BlockTrampolinePoolFor(1);  // For associated delay slot.
 }
 
 
 void Assembler::bgezal(Register rs, int16_t offset) {
+  BlockTrampolinePoolScope block_trampoline_pool(this);
+  positions_recorder()->WriteRecordedPositions();
   GenInstrImmediate(REGIMM, rs, BGEZAL, offset);
+  BlockTrampolinePoolFor(1);  // For associated delay slot.
 }
 
 
 void Assembler::bgtz(Register rs, int16_t offset) {
+  BlockTrampolinePoolScope block_trampoline_pool(this);
   GenInstrImmediate(BGTZ, rs, zero_reg, offset);
+  BlockTrampolinePoolFor(1);  // For associated delay slot.
 }
 
 
 void Assembler::blez(Register rs, int16_t offset) {
+  BlockTrampolinePoolScope block_trampoline_pool(this);
   GenInstrImmediate(BLEZ, rs, zero_reg, offset);
+  BlockTrampolinePoolFor(1);  // For associated delay slot.
 }
 
 
 void Assembler::bltz(Register rs, int16_t offset) {
+  BlockTrampolinePoolScope block_trampoline_pool(this);
   GenInstrImmediate(REGIMM, rs, BLTZ, offset);
+  BlockTrampolinePoolFor(1);  // For associated delay slot.
 }
 
 
 void Assembler::bltzal(Register rs, int16_t offset) {
+  BlockTrampolinePoolScope block_trampoline_pool(this);
+  positions_recorder()->WriteRecordedPositions();
   GenInstrImmediate(REGIMM, rs, BLTZAL, offset);
+  BlockTrampolinePoolFor(1);  // For associated delay slot.
 }
 
 
 void Assembler::bne(Register rs, Register rt, int16_t offset) {
+  BlockTrampolinePoolScope block_trampoline_pool(this);
   GenInstrImmediate(BNE, rs, rt, offset);
+  BlockTrampolinePoolFor(1);  // For associated delay slot.
 }
 
 
@@ -631,18 +944,27 @@
 
 
 void Assembler::jr(Register rs) {
+  BlockTrampolinePoolScope block_trampoline_pool(this);
+  if (rs.is(ra)) {
+    positions_recorder()->WriteRecordedPositions();
+  }
   GenInstrRegister(SPECIAL, rs, zero_reg, zero_reg, 0, JR);
+  BlockTrampolinePoolFor(1);  // For associated delay slot.
 }
 
 
 void Assembler::jal(int32_t target) {
+  positions_recorder()->WriteRecordedPositions();
   ASSERT(is_uint28(target) && ((target & 3) == 0));
   GenInstrJump(JAL, target >> 2);
 }
 
 
 void Assembler::jalr(Register rs, Register rd) {
+  BlockTrampolinePoolScope block_trampoline_pool(this);
+  positions_recorder()->WriteRecordedPositions();
   GenInstrRegister(SPECIAL, rs, zero_reg, rd, 0, JALR);
+  BlockTrampolinePoolFor(1);  // For associated delay slot.
 }
 
 
@@ -650,28 +972,164 @@
 
 // Arithmetic.
 
-void Assembler::add(Register rd, Register rs, Register rt) {
-  GenInstrRegister(SPECIAL, rs, rt, rd, 0, ADD);
-}
-
-
 void Assembler::addu(Register rd, Register rs, Register rt) {
   GenInstrRegister(SPECIAL, rs, rt, rd, 0, ADDU);
 }
 
 
-void Assembler::addi(Register rd, Register rs, int32_t j) {
-  GenInstrImmediate(ADDI, rs, rd, j);
-}
-
-
 void Assembler::addiu(Register rd, Register rs, int32_t j) {
   GenInstrImmediate(ADDIU, rs, rd, j);
-}
 
+  // Eliminate pattern: push(r), pop().
+  //   addiu(sp, sp, Operand(-kPointerSize));
+  //   sw(src, MemOperand(sp, 0);
+  //   addiu(sp, sp, Operand(kPointerSize));
+  // Both instructions can be eliminated.
+  if (can_peephole_optimize(3) &&
+      // Pattern.
+      instr_at(pc_ - 1 * kInstrSize) == kPopInstruction &&
+      (instr_at(pc_ - 2 * kInstrSize) & ~kRtMask) == kPushRegPattern &&
+      (instr_at(pc_ - 3 * kInstrSize)) == kPushInstruction) {
+    pc_ -= 3 * kInstrSize;
+    if (FLAG_print_peephole_optimization) {
+      PrintF("%x push(reg)/pop() eliminated\n", pc_offset());
+    }
+  }
 
-void Assembler::sub(Register rd, Register rs, Register rt) {
-  GenInstrRegister(SPECIAL, rs, rt, rd, 0, SUB);
+  // Eliminate pattern: push(ry), pop(rx).
+  //   addiu(sp, sp, -kPointerSize)
+  //   sw(ry, MemOperand(sp, 0)
+  //   lw(rx, MemOperand(sp, 0)
+  //   addiu(sp, sp, kPointerSize);
+  // Both instructions can be eliminated if ry = rx.
+  // If ry != rx, a register copy from ry to rx is inserted
+  // after eliminating the push and the pop instructions.
+  if (can_peephole_optimize(4)) {
+    Instr pre_push_sp_set = instr_at(pc_ - 4 * kInstrSize);
+    Instr push_instr = instr_at(pc_ - 3 * kInstrSize);
+    Instr pop_instr = instr_at(pc_ - 2 * kInstrSize);
+    Instr post_pop_sp_set = instr_at(pc_ - 1 * kInstrSize);
+
+    if (IsPush(push_instr) &&
+        IsPop(pop_instr) && pre_push_sp_set == kPushInstruction &&
+        post_pop_sp_set == kPopInstruction) {
+      if ((pop_instr & kRtMask) != (push_instr & kRtMask)) {
+        // For consecutive push and pop on different registers,
+        // we delete both the push & pop and insert a register move.
+        // push ry, pop rx --> mov rx, ry.
+        Register reg_pushed, reg_popped;
+        reg_pushed = GetRt(push_instr);
+        reg_popped = GetRt(pop_instr);
+        pc_ -= 4 * kInstrSize;
+        // Insert a mov instruction, which is better than a pair of push & pop.
+        or_(reg_popped, reg_pushed, zero_reg);
+        if (FLAG_print_peephole_optimization) {
+          PrintF("%x push/pop (diff reg) replaced by a reg move\n",
+                 pc_offset());
+        }
+      } else {
+        // For consecutive push and pop on the same register,
+        // both the push and the pop can be deleted.
+        pc_ -= 4 * kInstrSize;
+        if (FLAG_print_peephole_optimization) {
+          PrintF("%x push/pop (same reg) eliminated\n", pc_offset());
+        }
+      }
+    }
+  }
+
+  if (can_peephole_optimize(5)) {
+    Instr pre_push_sp_set = instr_at(pc_ - 5 * kInstrSize);
+    Instr mem_write_instr = instr_at(pc_ - 4 * kInstrSize);
+    Instr lw_instr = instr_at(pc_ - 3 * kInstrSize);
+    Instr mem_read_instr = instr_at(pc_ - 2 * kInstrSize);
+    Instr post_pop_sp_set = instr_at(pc_ - 1 * kInstrSize);
+
+    if (IsPush(mem_write_instr) &&
+        pre_push_sp_set == kPushInstruction &&
+        IsPop(mem_read_instr) &&
+        post_pop_sp_set == kPopInstruction) {
+      if ((IsLwRegFpOffset(lw_instr) ||
+        IsLwRegFpNegOffset(lw_instr))) {
+        if ((mem_write_instr & kRtMask) ==
+              (mem_read_instr & kRtMask)) {
+          // Pattern: push & pop from/to same register,
+          // with a fp+offset lw in between.
+          //
+          // The following:
+          // addiu sp, sp, -4
+          // sw rx, [sp, #0]!
+          // lw rz, [fp, #-24]
+          // lw rx, [sp, 0],
+          // addiu sp, sp, 4
+          //
+          // Becomes:
+          // if(rx == rz)
+          //   delete all
+          // else
+          //   lw rz, [fp, #-24]
+
+          if ((mem_write_instr & kRtMask) == (lw_instr & kRtMask)) {
+            pc_ -= 5 * kInstrSize;
+          } else {
+            pc_ -= 5 * kInstrSize;
+            // Reinsert back the lw rz.
+            emit(lw_instr);
+          }
+          if (FLAG_print_peephole_optimization) {
+            PrintF("%x push/pop -dead ldr fp+offset in middle\n", pc_offset());
+          }
+        } else {
+          // Pattern: push & pop from/to different registers
+          // with a fp + offset lw in between.
+          //
+          // The following:
+          // addiu sp, sp ,-4
+          // sw rx, [sp, 0]
+          // lw rz, [fp, #-24]
+          // lw ry, [sp, 0]
+          // addiu sp, sp, 4
+          //
+          // Becomes:
+          // if(ry == rz)
+          //   mov ry, rx;
+          // else if(rx != rz)
+          //   lw rz, [fp, #-24]
+          //   mov ry, rx
+          // else if((ry != rz) || (rx == rz)) becomes:
+          //   mov ry, rx
+          //   lw rz, [fp, #-24]
+
+          Register reg_pushed, reg_popped;
+          if ((mem_read_instr & kRtMask) == (lw_instr & kRtMask)) {
+            reg_pushed = GetRt(mem_write_instr);
+            reg_popped = GetRt(mem_read_instr);
+            pc_ -= 5 * kInstrSize;
+            or_(reg_popped, reg_pushed, zero_reg);  // Move instruction.
+          } else if ((mem_write_instr & kRtMask)
+                                != (lw_instr & kRtMask)) {
+            reg_pushed = GetRt(mem_write_instr);
+            reg_popped = GetRt(mem_read_instr);
+            pc_ -= 5 * kInstrSize;
+            emit(lw_instr);
+            or_(reg_popped, reg_pushed, zero_reg);  // Move instruction.
+          } else if (((mem_read_instr & kRtMask)
+                                     != (lw_instr & kRtMask)) ||
+                    ((mem_write_instr & kRtMask)
+                                     == (lw_instr & kRtMask)) ) {
+            reg_pushed = GetRt(mem_write_instr);
+            reg_popped = GetRt(mem_read_instr);
+            pc_ -= 5 * kInstrSize;
+            or_(reg_popped, reg_pushed, zero_reg);  // Move instruction.
+            emit(lw_instr);
+          }
+          if (FLAG_print_peephole_optimization) {
+            PrintF("%x push/pop (ldr fp+off in middle)\n", pc_offset());
+          }
+        }
+      }
+    }
+  }
 }
 
 
@@ -743,7 +1201,15 @@
 
 
 // Shifts.
-void Assembler::sll(Register rd, Register rt, uint16_t sa) {
+void Assembler::sll(Register rd,
+                    Register rt,
+                    uint16_t sa,
+                    bool coming_from_nop) {
+  // Don't allow nop instructions in the form sll zero_reg, zero_reg to be
+  // generated using the sll instruction. They must be generated using
+  // nop(int/NopMarkerTypes) or MarkCode(int/NopMarkerTypes) pseudo
+  // instructions.
+  ASSERT(coming_from_nop || !(rd.is(zero_reg) && rt.is(zero_reg)));
   GenInstrRegister(SPECIAL, zero_reg, rt, rd, sa, SLL);
 }
 
@@ -773,30 +1239,199 @@
 }
 
 
+void Assembler::rotr(Register rd, Register rt, uint16_t sa) {
+  // Should be called via MacroAssembler::Ror.
+  ASSERT(rd.is_valid() && rt.is_valid() && is_uint5(sa));
+  ASSERT(mips32r2);
+  Instr instr = SPECIAL | (1 << kRsShift) | (rt.code() << kRtShift)
+      | (rd.code() << kRdShift) | (sa << kSaShift) | SRL;
+  emit(instr);
+}
+
+
+void Assembler::rotrv(Register rd, Register rt, Register rs) {
+  // Should be called via MacroAssembler::Ror.
+  ASSERT(rd.is_valid() && rt.is_valid() && rs.is_valid() );
+  ASSERT(mips32r2);
+  Instr instr = SPECIAL | (rs.code() << kRsShift) | (rt.code() << kRtShift)
+     | (rd.code() << kRdShift) | (1 << kSaShift) | SRLV;
+  emit(instr);
+}
+
+
 //------------Memory-instructions-------------
 
+// Helper for base-reg + offset, when offset is larger than int16.
+void Assembler::LoadRegPlusOffsetToAt(const MemOperand& src) {
+  ASSERT(!src.rm().is(at));
+  lui(at, src.offset_ >> kLuiShift);
+  ori(at, at, src.offset_ & kImm16Mask);  // Load 32-bit offset.
+  addu(at, at, src.rm());  // Add base register.
+}
+
+
 void Assembler::lb(Register rd, const MemOperand& rs) {
-  GenInstrImmediate(LB, rs.rm(), rd, rs.offset_);
+  if (is_int16(rs.offset_)) {
+    GenInstrImmediate(LB, rs.rm(), rd, rs.offset_);
+  } else {  // Offset > 16 bits, use multiple instructions to load.
+    LoadRegPlusOffsetToAt(rs);
+    GenInstrImmediate(LB, at, rd, 0);  // Equiv to lb(rd, MemOperand(at, 0));
+  }
 }
 
 
 void Assembler::lbu(Register rd, const MemOperand& rs) {
-  GenInstrImmediate(LBU, rs.rm(), rd, rs.offset_);
+  if (is_int16(rs.offset_)) {
+    GenInstrImmediate(LBU, rs.rm(), rd, rs.offset_);
+  } else {  // Offset > 16 bits, use multiple instructions to load.
+    LoadRegPlusOffsetToAt(rs);
+    GenInstrImmediate(LBU, at, rd, 0);  // Equiv to lbu(rd, MemOperand(at, 0));
+  }
+}
+
+
+void Assembler::lh(Register rd, const MemOperand& rs) {
+  if (is_int16(rs.offset_)) {
+    GenInstrImmediate(LH, rs.rm(), rd, rs.offset_);
+  } else {  // Offset > 16 bits, use multiple instructions to load.
+    LoadRegPlusOffsetToAt(rs);
+    GenInstrImmediate(LH, at, rd, 0);  // Equiv to lh(rd, MemOperand(at, 0));
+  }
+}
+
+
+void Assembler::lhu(Register rd, const MemOperand& rs) {
+  if (is_int16(rs.offset_)) {
+    GenInstrImmediate(LHU, rs.rm(), rd, rs.offset_);
+  } else {  // Offset > 16 bits, use multiple instructions to load.
+    LoadRegPlusOffsetToAt(rs);
+    GenInstrImmediate(LHU, at, rd, 0);  // Equiv to lhu(rd, MemOperand(at, 0));
+  }
 }
 
 
 void Assembler::lw(Register rd, const MemOperand& rs) {
-  GenInstrImmediate(LW, rs.rm(), rd, rs.offset_);
+  if (is_int16(rs.offset_)) {
+    GenInstrImmediate(LW, rs.rm(), rd, rs.offset_);
+  } else {  // Offset > 16 bits, use multiple instructions to load.
+    LoadRegPlusOffsetToAt(rs);
+    GenInstrImmediate(LW, at, rd, 0);  // Equiv to lw(rd, MemOperand(at, 0));
+  }
+
+  if (can_peephole_optimize(2)) {
+    Instr sw_instr = instr_at(pc_ - 2 * kInstrSize);
+    Instr lw_instr = instr_at(pc_ - 1 * kInstrSize);
+
+    if ((IsSwRegFpOffset(sw_instr) &&
+         IsLwRegFpOffset(lw_instr)) ||
+       (IsSwRegFpNegOffset(sw_instr) &&
+         IsLwRegFpNegOffset(lw_instr))) {
+      if ((lw_instr & kLwSwInstrArgumentMask) ==
+            (sw_instr & kLwSwInstrArgumentMask)) {
+        // Pattern: Lw/sw same fp+offset, same register.
+        //
+        // The following:
+        // sw rx, [fp, #-12]
+        // lw rx, [fp, #-12]
+        //
+        // Becomes:
+        // sw rx, [fp, #-12]
+
+        pc_ -= 1 * kInstrSize;
+        if (FLAG_print_peephole_optimization) {
+          PrintF("%x sw/lw (fp + same offset), same reg\n", pc_offset());
+        }
+      } else if ((lw_instr & kLwSwOffsetMask) ==
+                 (sw_instr & kLwSwOffsetMask)) {
+        // Pattern: Lw/sw same fp+offset, different register.
+        //
+        // The following:
+        // sw rx, [fp, #-12]
+        // lw ry, [fp, #-12]
+        //
+        // Becomes:
+        // sw rx, [fp, #-12]
+        // mov ry, rx
+
+        Register reg_stored, reg_loaded;
+        reg_stored = GetRt(sw_instr);
+        reg_loaded = GetRt(lw_instr);
+        pc_ -= 1 * kInstrSize;
+        // Insert a mov instruction, which is better than lw.
+        or_(reg_loaded, reg_stored, zero_reg);  // Move instruction.
+        if (FLAG_print_peephole_optimization) {
+          PrintF("%x sw/lw (fp + same offset), diff reg \n", pc_offset());
+        }
+      }
+    }
+  }
+}
+
+
+void Assembler::lwl(Register rd, const MemOperand& rs) {
+  GenInstrImmediate(LWL, rs.rm(), rd, rs.offset_);
+}
+
+
+void Assembler::lwr(Register rd, const MemOperand& rs) {
+  GenInstrImmediate(LWR, rs.rm(), rd, rs.offset_);
 }
 
 
 void Assembler::sb(Register rd, const MemOperand& rs) {
-  GenInstrImmediate(SB, rs.rm(), rd, rs.offset_);
+  if (is_int16(rs.offset_)) {
+    GenInstrImmediate(SB, rs.rm(), rd, rs.offset_);
+  } else {  // Offset > 16 bits, use multiple instructions to store.
+    LoadRegPlusOffsetToAt(rs);
+    GenInstrImmediate(SB, at, rd, 0);  // Equiv to sb(rd, MemOperand(at, 0));
+  }
+}
+
+
+void Assembler::sh(Register rd, const MemOperand& rs) {
+  if (is_int16(rs.offset_)) {
+    GenInstrImmediate(SH, rs.rm(), rd, rs.offset_);
+  } else {  // Offset > 16 bits, use multiple instructions to store.
+    LoadRegPlusOffsetToAt(rs);
+    GenInstrImmediate(SH, at, rd, 0);  // Equiv to sh(rd, MemOperand(at, 0));
+  }
 }
 
 
 void Assembler::sw(Register rd, const MemOperand& rs) {
-  GenInstrImmediate(SW, rs.rm(), rd, rs.offset_);
+  if (is_int16(rs.offset_)) {
+    GenInstrImmediate(SW, rs.rm(), rd, rs.offset_);
+  } else {  // Offset > 16 bits, use multiple instructions to store.
+    LoadRegPlusOffsetToAt(rs);
+    GenInstrImmediate(SW, at, rd, 0);  // Equiv to sw(rd, MemOperand(at, 0));
+  }
+
+  // Eliminate pattern: pop(), push(r).
+  //     addiu sp, sp, Operand(kPointerSize);
+  //     addiu sp, sp, Operand(-kPointerSize);
+  // ->  sw r, MemOpernad(sp, 0);
+  if (can_peephole_optimize(3) &&
+     // Pattern.
+     instr_at(pc_ - 1 * kInstrSize) ==
+       (kPushRegPattern | (rd.code() << kRtShift)) &&
+     instr_at(pc_ - 2 * kInstrSize) == kPushInstruction &&
+     instr_at(pc_ - 3 * kInstrSize) == kPopInstruction) {
+    pc_ -= 3 * kInstrSize;
+    GenInstrImmediate(SW, rs.rm(), rd, rs.offset_);
+    if (FLAG_print_peephole_optimization) {
+      PrintF("%x pop()/push(reg) eliminated\n", pc_offset());
+    }
+  }
+}
+
+
+void Assembler::swl(Register rd, const MemOperand& rs) {
+  GenInstrImmediate(SWL, rs.rm(), rd, rs.offset_);
+}
+
+
+void Assembler::swr(Register rd, const MemOperand& rs) {
+  GenInstrImmediate(SWR, rs.rm(), rd, rs.offset_);
 }
 
 
@@ -841,7 +1476,8 @@
 
 void Assembler::tltu(Register rs, Register rt, uint16_t code) {
   ASSERT(is_uint10(code));
-  Instr instr = SPECIAL | TLTU | rs.code() << kRsShift
+  Instr instr =
+      SPECIAL | TLTU | rs.code() << kRsShift
       | rt.code() << kRtShift | code << 6;
   emit(instr);
 }
@@ -896,6 +1532,54 @@
 }
 
 
+// Conditional move.
+void Assembler::movz(Register rd, Register rs, Register rt) {
+  GenInstrRegister(SPECIAL, rs, rt, rd, 0, MOVZ);
+}
+
+
+void Assembler::movn(Register rd, Register rs, Register rt) {
+  GenInstrRegister(SPECIAL, rs, rt, rd, 0, MOVN);
+}
+
+
+void Assembler::movt(Register rd, Register rs, uint16_t cc) {
+  Register rt;
+  rt.code_ = (cc & 0x0003) << 2 | 1;
+  GenInstrRegister(SPECIAL, rs, rt, rd, 0, MOVCI);
+}
+
+
+void Assembler::movf(Register rd, Register rs, uint16_t cc) {
+  Register rt;
+  rt.code_ = (cc & 0x0003) << 2 | 0;
+  GenInstrRegister(SPECIAL, rs, rt, rd, 0, MOVCI);
+}
+
+
+// Bit twiddling.
+void Assembler::clz(Register rd, Register rs) {
+  // Clz instr requires same GPR number in 'rd' and 'rt' fields.
+  GenInstrRegister(SPECIAL2, rs, rd, rd, 0, CLZ);
+}
+
+
+void Assembler::ins_(Register rt, Register rs, uint16_t pos, uint16_t size) {
+  // Should be called via MacroAssembler::Ins.
+  // Ins instr has 'rt' field as dest, and two uint5: msb, lsb.
+  ASSERT(mips32r2);
+  GenInstrRegister(SPECIAL3, rs, rt, pos + size - 1, pos, INS);
+}
+
+
+void Assembler::ext_(Register rt, Register rs, uint16_t pos, uint16_t size) {
+  // Should be called via MacroAssembler::Ext.
+  // Ext instr has 'rt' field as dest, and two uint5: msb, lsb.
+  ASSERT(mips32r2);
+  GenInstrRegister(SPECIAL3, rs, rt, size - 1, pos, EXT);
+}
+
+
 //--------Coprocessor-instructions----------------
 
 // Load, store, move.
@@ -905,7 +1589,12 @@
 
 
 void Assembler::ldc1(FPURegister fd, const MemOperand& src) {
-  GenInstrImmediate(LDC1, src.rm(), fd, src.offset_);
+  // Workaround for non-8-byte alignment of HeapNumber, convert 64-bit
+  // load to two 32-bit loads.
+  GenInstrImmediate(LWC1, src.rm(), fd, src.offset_);
+  FPURegister nextfpreg;
+  nextfpreg.setcode(fd.code() + 1);
+  GenInstrImmediate(LWC1, src.rm(), nextfpreg, src.offset_ + 4);
 }
 
 
@@ -915,27 +1604,74 @@
 
 
 void Assembler::sdc1(FPURegister fd, const MemOperand& src) {
-  GenInstrImmediate(SDC1, src.rm(), fd, src.offset_);
+  // Workaround for non-8-byte alignment of HeapNumber, convert 64-bit
+  // store to two 32-bit stores.
+  GenInstrImmediate(SWC1, src.rm(), fd, src.offset_);
+  FPURegister nextfpreg;
+  nextfpreg.setcode(fd.code() + 1);
+  GenInstrImmediate(SWC1, src.rm(), nextfpreg, src.offset_ + 4);
 }
 
 
-void Assembler::mtc1(FPURegister fs, Register rt) {
+void Assembler::mtc1(Register rt, FPURegister fs) {
   GenInstrRegister(COP1, MTC1, rt, fs, f0);
 }
 
 
-void Assembler::mthc1(FPURegister fs, Register rt) {
-  GenInstrRegister(COP1, MTHC1, rt, fs, f0);
-}
-
-
-void Assembler::mfc1(FPURegister fs, Register rt) {
+void Assembler::mfc1(Register rt, FPURegister fs) {
   GenInstrRegister(COP1, MFC1, rt, fs, f0);
 }
 
 
-void Assembler::mfhc1(FPURegister fs, Register rt) {
-  GenInstrRegister(COP1, MFHC1, rt, fs, f0);
+void Assembler::ctc1(Register rt, FPUControlRegister fs) {
+  GenInstrRegister(COP1, CTC1, rt, fs);
+}
+
+
+void Assembler::cfc1(Register rt, FPUControlRegister fs) {
+  GenInstrRegister(COP1, CFC1, rt, fs);
+}
+
+
+// Arithmetic.
+
+void Assembler::add_d(FPURegister fd, FPURegister fs, FPURegister ft) {
+  GenInstrRegister(COP1, D, ft, fs, fd, ADD_D);
+}
+
+
+void Assembler::sub_d(FPURegister fd, FPURegister fs, FPURegister ft) {
+  GenInstrRegister(COP1, D, ft, fs, fd, SUB_D);
+}
+
+
+void Assembler::mul_d(FPURegister fd, FPURegister fs, FPURegister ft) {
+  GenInstrRegister(COP1, D, ft, fs, fd, MUL_D);
+}
+
+
+void Assembler::div_d(FPURegister fd, FPURegister fs, FPURegister ft) {
+  GenInstrRegister(COP1, D, ft, fs, fd, DIV_D);
+}
+
+
+void Assembler::abs_d(FPURegister fd, FPURegister fs) {
+  GenInstrRegister(COP1, D, f0, fs, fd, ABS_D);
+}
+
+
+void Assembler::mov_d(FPURegister fd, FPURegister fs) {
+  GenInstrRegister(COP1, D, f0, fs, fd, MOV_D);
+}
+
+
+void Assembler::neg_d(FPURegister fd, FPURegister fs) {
+  GenInstrRegister(COP1, D, f0, fs, fd, NEG_D);
+}
+
+
+void Assembler::sqrt_d(FPURegister fd, FPURegister fs) {
+  GenInstrRegister(COP1, D, f0, fs, fd, SQRT_D);
 }
 
 
@@ -951,22 +1687,107 @@
 }
 
 
+void Assembler::trunc_w_s(FPURegister fd, FPURegister fs) {
+  GenInstrRegister(COP1, S, f0, fs, fd, TRUNC_W_S);
+}
+
+
+void Assembler::trunc_w_d(FPURegister fd, FPURegister fs) {
+  GenInstrRegister(COP1, D, f0, fs, fd, TRUNC_W_D);
+}
+
+
+void Assembler::round_w_s(FPURegister fd, FPURegister fs) {
+  GenInstrRegister(COP1, S, f0, fs, fd, ROUND_W_S);
+}
+
+
+void Assembler::round_w_d(FPURegister fd, FPURegister fs) {
+  GenInstrRegister(COP1, D, f0, fs, fd, ROUND_W_D);
+}
+
+
+void Assembler::floor_w_s(FPURegister fd, FPURegister fs) {
+  GenInstrRegister(COP1, S, f0, fs, fd, FLOOR_W_S);
+}
+
+
+void Assembler::floor_w_d(FPURegister fd, FPURegister fs) {
+  GenInstrRegister(COP1, D, f0, fs, fd, FLOOR_W_D);
+}
+
+
+void Assembler::ceil_w_s(FPURegister fd, FPURegister fs) {
+  GenInstrRegister(COP1, S, f0, fs, fd, CEIL_W_S);
+}
+
+
+void Assembler::ceil_w_d(FPURegister fd, FPURegister fs) {
+  GenInstrRegister(COP1, D, f0, fs, fd, CEIL_W_D);
+}
+
+
 void Assembler::cvt_l_s(FPURegister fd, FPURegister fs) {
+  ASSERT(mips32r2);
   GenInstrRegister(COP1, S, f0, fs, fd, CVT_L_S);
 }
 
 
 void Assembler::cvt_l_d(FPURegister fd, FPURegister fs) {
+  ASSERT(mips32r2);
   GenInstrRegister(COP1, D, f0, fs, fd, CVT_L_D);
 }
 
 
+void Assembler::trunc_l_s(FPURegister fd, FPURegister fs) {
+  ASSERT(mips32r2);
+  GenInstrRegister(COP1, S, f0, fs, fd, TRUNC_L_S);
+}
+
+
+void Assembler::trunc_l_d(FPURegister fd, FPURegister fs) {
+  ASSERT(mips32r2);
+  GenInstrRegister(COP1, D, f0, fs, fd, TRUNC_L_D);
+}
+
+
+void Assembler::round_l_s(FPURegister fd, FPURegister fs) {
+  GenInstrRegister(COP1, S, f0, fs, fd, ROUND_L_S);
+}
+
+
+void Assembler::round_l_d(FPURegister fd, FPURegister fs) {
+  GenInstrRegister(COP1, D, f0, fs, fd, ROUND_L_D);
+}
+
+
+void Assembler::floor_l_s(FPURegister fd, FPURegister fs) {
+  GenInstrRegister(COP1, S, f0, fs, fd, FLOOR_L_S);
+}
+
+
+void Assembler::floor_l_d(FPURegister fd, FPURegister fs) {
+  GenInstrRegister(COP1, D, f0, fs, fd, FLOOR_L_D);
+}
+
+
+void Assembler::ceil_l_s(FPURegister fd, FPURegister fs) {
+  GenInstrRegister(COP1, S, f0, fs, fd, CEIL_L_S);
+}
+
+
+void Assembler::ceil_l_d(FPURegister fd, FPURegister fs) {
+  GenInstrRegister(COP1, D, f0, fs, fd, CEIL_L_D);
+}
+
+
 void Assembler::cvt_s_w(FPURegister fd, FPURegister fs) {
   GenInstrRegister(COP1, W, f0, fs, fd, CVT_S_W);
 }
 
 
 void Assembler::cvt_s_l(FPURegister fd, FPURegister fs) {
+  ASSERT(mips32r2);
   GenInstrRegister(COP1, L, f0, fs, fd, CVT_S_L);
 }
 
@@ -982,6 +1803,7 @@
 
 
 void Assembler::cvt_d_l(FPURegister fd, FPURegister fs) {
+  ASSERT(mips32r2);
   GenInstrRegister(COP1, L, f0, fs, fd, CVT_D_L);
 }
 
@@ -993,7 +1815,8 @@
 
 // Conditions.
 void Assembler::c(FPUCondition cond, SecondaryField fmt,
-    FPURegister ft, FPURegister fs, uint16_t cc) {
+    FPURegister fs, FPURegister ft, uint16_t cc) {
+  ASSERT(isolate()->cpu_features()->IsEnabled(FPU));
   ASSERT(is_uint3(cc));
   ASSERT((fmt & ~(31 << kRsShift)) == 0);
   Instr instr = COP1 | fmt | ft.code() << 16 | fs.code() << kFsShift
@@ -1002,7 +1825,18 @@
 }
 
 
+void Assembler::fcmp(FPURegister src1, const double src2,
+      FPUCondition cond) {
+  ASSERT(isolate()->cpu_features()->IsSupported(FPU));
+  ASSERT(src2 == 0.0);
+  mtc1(zero_reg, f14);
+  cvt_d_w(f14, f14);
+  c(cond, D, src1, f14, 0);
+}
+
+
 void Assembler::bc1f(int16_t offset, uint16_t cc) {
+  ASSERT(isolate()->cpu_features()->IsEnabled(FPU));
   ASSERT(is_uint3(cc));
   Instr instr = COP1 | BC1 | cc << 18 | 0 << 16 | (offset & kImm16Mask);
   emit(instr);
@@ -1010,6 +1844,7 @@
 
 
 void Assembler::bc1t(int16_t offset, uint16_t cc) {
+  ASSERT(isolate()->cpu_features()->IsEnabled(FPU));
   ASSERT(is_uint3(cc));
   Instr instr = COP1 | BC1 | cc << 18 | 1 << 16 | (offset & kImm16Mask);
   emit(instr);
@@ -1018,66 +1853,32 @@
 
 // Debugging.
 void Assembler::RecordJSReturn() {
-  WriteRecordedPositions();
+  positions_recorder()->WriteRecordedPositions();
   CheckBuffer();
   RecordRelocInfo(RelocInfo::JS_RETURN);
 }
 
 
+void Assembler::RecordDebugBreakSlot() {
+  positions_recorder()->WriteRecordedPositions();
+  CheckBuffer();
+  RecordRelocInfo(RelocInfo::DEBUG_BREAK_SLOT);
+}
+
+
 void Assembler::RecordComment(const char* msg) {
-  if (FLAG_debug_code) {
+  if (FLAG_code_comments) {
     CheckBuffer();
     RecordRelocInfo(RelocInfo::COMMENT, reinterpret_cast<intptr_t>(msg));
   }
 }
 
 
-void Assembler::RecordPosition(int pos) {
-  if (pos == RelocInfo::kNoPosition) return;
-  ASSERT(pos >= 0);
-  current_position_ = pos;
-}
-
-
-void Assembler::RecordStatementPosition(int pos) {
-  if (pos == RelocInfo::kNoPosition) return;
-  ASSERT(pos >= 0);
-  current_statement_position_ = pos;
-}
-
-
-bool Assembler::WriteRecordedPositions() {
-  bool written = false;
-
-  // Write the statement position if it is different from what was written last
-  // time.
-  if (current_statement_position_ != written_statement_position_) {
-    CheckBuffer();
-    RecordRelocInfo(RelocInfo::STATEMENT_POSITION, current_statement_position_);
-    written_statement_position_ = current_statement_position_;
-    written = true;
-  }
-
-  // Write the position if it is different from what was written last time and
-  // also different from the written statement position.
-  if (current_position_ != written_position_ &&
-      current_position_ != written_statement_position_) {
-    CheckBuffer();
-    RecordRelocInfo(RelocInfo::POSITION, current_position_);
-    written_position_ = current_position_;
-    written = true;
-  }
-
-  // Return whether something was written.
-  return written;
-}
-
-
 void Assembler::GrowBuffer() {
   if (!own_buffer_) FATAL("external code buffer is too small");
 
   // Compute new buffer size.
-  CodeDesc desc;  // the new buffer
+  CodeDesc desc;  // The new buffer.
   if (buffer_size_ < 4*KB) {
     desc.buffer_size = 4*KB;
   } else if (buffer_size_ < 1*MB) {
@@ -1085,7 +1886,7 @@
   } else {
     desc.buffer_size = buffer_size_ + 1*MB;
   }
-  CHECK_GT(desc.buffer_size, 0);  // no overflow
+  CHECK_GT(desc.buffer_size, 0);  // No overflow.
 
   // Setup new buffer.
   desc.buffer = NewArray<byte>(desc.buffer_size);
@@ -1108,7 +1909,6 @@
   reloc_info_writer.Reposition(reloc_info_writer.pos() + rc_delta,
                                reloc_info_writer.last_pc() + pc_delta);
 
-
   // On ia32 and ARM pc relative addressing is used, and we thus need to apply a
   // shift by pc_delta. But on MIPS the target address it directly loaded, so
   // we do not need to relocate here.
@@ -1117,11 +1917,26 @@
 }
 
 
+void Assembler::db(uint8_t data) {
+  CheckBuffer();
+  *reinterpret_cast<uint8_t*>(pc_) = data;
+  pc_ += sizeof(uint8_t);
+}
+
+
+void Assembler::dd(uint32_t data) {
+  CheckBuffer();
+  *reinterpret_cast<uint32_t*>(pc_) = data;
+  pc_ += sizeof(uint32_t);
+}
+
+
 void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
-  RelocInfo rinfo(pc_, rmode, data);  // we do not try to reuse pool constants
-  if (rmode >= RelocInfo::JS_RETURN && rmode <= RelocInfo::STATEMENT_POSITION) {
+  RelocInfo rinfo(pc_, rmode, data);  // We do not try to reuse pool constants.
+  if (rmode >= RelocInfo::JS_RETURN && rmode <= RelocInfo::DEBUG_BREAK_SLOT) {
     // Adjust code for new modes.
-    ASSERT(RelocInfo::IsJSReturn(rmode)
+    ASSERT(RelocInfo::IsDebugBreakSlot(rmode)
+           || RelocInfo::IsJSReturn(rmode)
            || RelocInfo::IsComment(rmode)
            || RelocInfo::IsPosition(rmode));
     // These modes do not need an entry in the constant pool.
@@ -1133,12 +1948,72 @@
         !FLAG_debug_code) {
       return;
     }
-    ASSERT(buffer_space() >= kMaxRelocSize);  // too late to grow buffer here
+    ASSERT(buffer_space() >= kMaxRelocSize);  // Too late to grow buffer here.
     reloc_info_writer.Write(&rinfo);
   }
 }
 
 
+void Assembler::BlockTrampolinePoolFor(int instructions) {
+  BlockTrampolinePoolBefore(pc_offset() + instructions * kInstrSize);
+}
+
+
+void Assembler::CheckTrampolinePool(bool force_emit) {
+  // Calculate the offset of the next check.
+  next_buffer_check_ = pc_offset() + kCheckConstInterval;
+
+  int dist = pc_offset() - last_trampoline_pool_end_;
+
+  if (dist <= kMaxDistBetweenPools && !force_emit) {
+    return;
+  }
+
+  // Some small sequences of instructions must not be broken up by the
+  // insertion of a trampoline pool; such sequences are protected by setting
+  // either trampoline_pool_blocked_nesting_ or no_trampoline_pool_before_,
+  // which are both checked here. Also, recursive calls to CheckTrampolinePool
+  // are blocked by trampoline_pool_blocked_nesting_.
+  if ((trampoline_pool_blocked_nesting_ > 0) ||
+      (pc_offset() < no_trampoline_pool_before_)) {
+    // Emission is currently blocked; make sure we try again as soon as
+    // possible.
+    if (trampoline_pool_blocked_nesting_ > 0) {
+      next_buffer_check_ = pc_offset() + kInstrSize;
+    } else {
+      next_buffer_check_ = no_trampoline_pool_before_;
+    }
+    return;
+  }
+
+  // First we emit jump (2 instructions), then we emit trampoline pool.
+  { BlockTrampolinePoolScope block_trampoline_pool(this);
+    Label after_pool;
+    b(&after_pool);
+    nop();
+
+    int pool_start = pc_offset();
+    for (int i = 0; i < kSlotsPerTrampoline; i++) {
+      b(&after_pool);
+      nop();
+    }
+    for (int i = 0; i < kLabelsPerTrampoline; i++) {
+      emit(0);
+    }
+    last_trampoline_pool_end_ = pc_offset() - kInstrSize;
+    bind(&after_pool);
+    trampolines_.Add(Trampoline(pool_start,
+                                kSlotsPerTrampoline,
+                                kLabelsPerTrampoline));
+
+    // Since a trampoline pool was just emitted,
+    // move the check offset forward by the standard interval.
+    next_buffer_check_ = last_trampoline_pool_end_ + kMaxDistBetweenPools;
+  }
+  return;
+}
+
+
 Address Assembler::target_address_at(Address pc) {
   Instr instr1 = instr_at(pc);
   Instr instr2 = instr_at(pc + kInstrSize);
@@ -1157,7 +2032,7 @@
       return reinterpret_cast<Address>((instr2 & kImm16Mask) << 16);
     }
   } else if ((instr1 & kOpcodeMask) == LUI && (instr2 & kOpcodeMask) == ORI) {
-    // 32 bits value.
+    // 32 bit value.
     return reinterpret_cast<Address>(
         (instr1 & kImm16Mask) << 16 | (instr2 & kImm16Mask));
   }
@@ -1176,38 +2051,37 @@
 #ifdef DEBUG
   Instr instr1 = instr_at(pc);
 
-  // Check we have indeed the result from a li with MustUseAt true.
+  // Check we have indeed the result from a li with MustUseReg true.
   CHECK(((instr1 & kOpcodeMask) == LUI && (instr2 & kOpcodeMask) == ORI) ||
         ((instr1 == 0) && ((instr2 & kOpcodeMask)== ADDIU ||
                            (instr2 & kOpcodeMask)== ORI ||
                            (instr2 & kOpcodeMask)== LUI)));
 #endif
 
-
   uint32_t rt_code = (instr2 & kRtFieldMask);
   uint32_t* p = reinterpret_cast<uint32_t*>(pc);
   uint32_t itarget = reinterpret_cast<uint32_t>(target);
 
   if (is_int16(itarget)) {
-    // nop
-    // addiu rt zero_reg j
+    // nop.
+    // addiu rt zero_reg j.
     *p = nopInstr;
-    *(p+1) = ADDIU | rt_code | (itarget & LOMask);
-  } else if (!(itarget & HIMask)) {
-    // nop
-    // ori rt zero_reg j
+    *(p+1) = ADDIU | rt_code | (itarget & kImm16Mask);
+  } else if (!(itarget & kHiMask)) {
+    // nop.
+    // ori rt zero_reg j.
     *p = nopInstr;
-    *(p+1) = ORI | rt_code | (itarget & LOMask);
-  } else if (!(itarget & LOMask)) {
-    // nop
-    // lui rt (HIMask & itarget)>>16
+    *(p+1) = ORI | rt_code | (itarget & kImm16Mask);
+  } else if (!(itarget & kImm16Mask)) {
+    // nop.
+    // lui rt (kHiMask & itarget) >> kLuiShift.
     *p = nopInstr;
-    *(p+1) = LUI | rt_code | ((itarget & HIMask)>>16);
+    *(p+1) = LUI | rt_code | ((itarget & kHiMask) >> kLuiShift);
   } else {
-    // lui rt (HIMask & itarget)>>16
-    // ori rt rt, (LOMask & itarget)
-    *p = LUI | rt_code | ((itarget & HIMask)>>16);
-    *(p+1) = ORI | rt_code | (rt_code << 5) | (itarget & LOMask);
+    // lui rt (kHiMask & itarget) >> kLuiShift.
+    // ori rt rt, (kImm16Mask & itarget).
+    *p = LUI | rt_code | ((itarget & kHiMask) >> kLuiShift);
+    *(p+1) = ORI | rt_code | (rt_code << 5) | (itarget & kImm16Mask);
   }
 
   CPU::FlushICache(pc, 2 * sizeof(int32_t));
diff --git a/src/mips/assembler-mips.h b/src/mips/assembler-mips.h
index a687c2b..5a6e271 100644
--- a/src/mips/assembler-mips.h
+++ b/src/mips/assembler-mips.h
@@ -41,8 +41,6 @@
 #include "constants-mips.h"
 #include "serialize.h"
 
-using namespace assembler::mips;
-
 namespace v8 {
 namespace internal {
 
@@ -73,6 +71,44 @@
 
 // Core register.
 struct Register {
+  static const int kNumRegisters = v8::internal::kNumRegisters;
+  static const int kNumAllocatableRegisters = 14;  // v0 through t7
+
+  static int ToAllocationIndex(Register reg) {
+    return reg.code() - 2;  // zero_reg and 'at' are skipped.
+  }
+
+  static Register FromAllocationIndex(int index) {
+    ASSERT(index >= 0 && index < kNumAllocatableRegisters);
+    return from_code(index + 2);  // zero_reg and 'at' are skipped.
+  }
+
+  static const char* AllocationIndexToString(int index) {
+    ASSERT(index >= 0 && index < kNumAllocatableRegisters);
+    const char* const names[] = {
+      "v0",
+      "v1",
+      "a0",
+      "a1",
+      "a2",
+      "a3",
+      "t0",
+      "t1",
+      "t2",
+      "t3",
+      "t4",
+      "t5",
+      "t6",
+      "t7",
+    };
+    return names[index];
+  }
+
+  static Register from_code(int code) {
+    Register r = { code };
+    return r;
+  }
+
   bool is_valid() const { return 0 <= code_ && code_ < kNumRegisters; }
   bool is(Register reg) const { return code_ == reg.code_; }
   int code() const {
@@ -88,40 +124,41 @@
   int code_;
 };
 
-extern const Register no_reg;
+const Register no_reg = { -1 };
 
-extern const Register zero_reg;
-extern const Register at;
-extern const Register v0;
-extern const Register v1;
-extern const Register a0;
-extern const Register a1;
-extern const Register a2;
-extern const Register a3;
-extern const Register t0;
-extern const Register t1;
-extern const Register t2;
-extern const Register t3;
-extern const Register t4;
-extern const Register t5;
-extern const Register t6;
-extern const Register t7;
-extern const Register s0;
-extern const Register s1;
-extern const Register s2;
-extern const Register s3;
-extern const Register s4;
-extern const Register s5;
-extern const Register s6;
-extern const Register s7;
-extern const Register t8;
-extern const Register t9;
-extern const Register k0;
-extern const Register k1;
-extern const Register gp;
-extern const Register sp;
-extern const Register s8_fp;
-extern const Register ra;
+const Register zero_reg = { 0 };
+const Register at = { 1 };
+const Register v0 = { 2 };
+const Register v1 = { 3 };
+const Register a0 = { 4 };
+const Register a1 = { 5 };
+const Register a2 = { 6 };
+const Register a3 = { 7 };
+const Register t0 = { 8 };
+const Register t1 = { 9 };
+const Register t2 = { 10 };
+const Register t3 = { 11 };
+const Register t4 = { 12 };
+const Register t5 = { 13 };
+const Register t6 = { 14 };
+const Register t7 = { 15 };
+const Register s0 = { 16 };
+const Register s1 = { 17 };
+const Register s2 = { 18 };
+const Register s3 = { 19 };
+const Register s4 = { 20 };
+const Register s5 = { 21 };
+const Register s6 = { 22 };
+const Register s7 = { 23 };
+const Register t8 = { 24 };
+const Register t9 = { 25 };
+const Register k0 = { 26 };
+const Register k1 = { 27 };
+const Register gp = { 28 };
+const Register sp = { 29 };
+const Register s8_fp = { 30 };
+const Register ra = { 31 };
+
 
 int ToNumber(Register reg);
 
@@ -129,7 +166,50 @@
 
 // Coprocessor register.
 struct FPURegister {
-  bool is_valid() const { return 0 <= code_ && code_ < kNumFPURegister ; }
+  static const int kNumRegisters = v8::internal::kNumFPURegisters;
+  // f0 has been excluded from allocation. This is following ia32
+  // where xmm0 is excluded.
+  static const int kNumAllocatableRegisters = 15;
+
+  static int ToAllocationIndex(FPURegister reg) {
+    ASSERT(reg.code() != 0);
+    ASSERT(reg.code() % 2 == 0);
+    return (reg.code() / 2) - 1;
+  }
+
+  static FPURegister FromAllocationIndex(int index) {
+    ASSERT(index >= 0 && index < kNumAllocatableRegisters);
+    return from_code((index + 1) * 2);
+  }
+
+  static const char* AllocationIndexToString(int index) {
+    ASSERT(index >= 0 && index < kNumAllocatableRegisters);
+    const char* const names[] = {
+      "f2",
+      "f4",
+      "f6",
+      "f8",
+      "f10",
+      "f12",
+      "f14",
+      "f16",
+      "f18",
+      "f20",
+      "f22",
+      "f24",
+      "f26",
+      "f28",
+      "f30"
+    };
+    return names[index];
+  }
+
+  static FPURegister from_code(int code) {
+    FPURegister r = { code };
+    return r;
+  }
+
+  bool is_valid() const { return 0 <= code_ && code_ < kNumFPURegisters ; }
   bool is(FPURegister creg) const { return code_ == creg.code_; }
   int code() const {
     ASSERT(is_valid());
@@ -139,84 +219,77 @@
     ASSERT(is_valid());
     return 1 << code_;
   }
-
+  void setcode(int f) {
+    code_ = f;
+    ASSERT(is_valid());
+  }
   // Unfortunately we can't make this private in a struct.
   int code_;
 };
 
-extern const FPURegister no_creg;
+typedef FPURegister DoubleRegister;
 
-extern const FPURegister f0;
-extern const FPURegister f1;
-extern const FPURegister f2;
-extern const FPURegister f3;
-extern const FPURegister f4;
-extern const FPURegister f5;
-extern const FPURegister f6;
-extern const FPURegister f7;
-extern const FPURegister f8;
-extern const FPURegister f9;
-extern const FPURegister f10;
-extern const FPURegister f11;
-extern const FPURegister f12;  // arg
-extern const FPURegister f13;
-extern const FPURegister f14;  // arg
-extern const FPURegister f15;
-extern const FPURegister f16;
-extern const FPURegister f17;
-extern const FPURegister f18;
-extern const FPURegister f19;
-extern const FPURegister f20;
-extern const FPURegister f21;
-extern const FPURegister f22;
-extern const FPURegister f23;
-extern const FPURegister f24;
-extern const FPURegister f25;
-extern const FPURegister f26;
-extern const FPURegister f27;
-extern const FPURegister f28;
-extern const FPURegister f29;
-extern const FPURegister f30;
-extern const FPURegister f31;
+const FPURegister no_creg = { -1 };
 
+const FPURegister f0 = { 0 };  // Return value in hard float mode.
+const FPURegister f1 = { 1 };
+const FPURegister f2 = { 2 };
+const FPURegister f3 = { 3 };
+const FPURegister f4 = { 4 };
+const FPURegister f5 = { 5 };
+const FPURegister f6 = { 6 };
+const FPURegister f7 = { 7 };
+const FPURegister f8 = { 8 };
+const FPURegister f9 = { 9 };
+const FPURegister f10 = { 10 };
+const FPURegister f11 = { 11 };
+const FPURegister f12 = { 12 };  // Arg 0 in hard float mode.
+const FPURegister f13 = { 13 };
+const FPURegister f14 = { 14 };  // Arg 1 in hard float mode.
+const FPURegister f15 = { 15 };
+const FPURegister f16 = { 16 };
+const FPURegister f17 = { 17 };
+const FPURegister f18 = { 18 };
+const FPURegister f19 = { 19 };
+const FPURegister f20 = { 20 };
+const FPURegister f21 = { 21 };
+const FPURegister f22 = { 22 };
+const FPURegister f23 = { 23 };
+const FPURegister f24 = { 24 };
+const FPURegister f25 = { 25 };
+const FPURegister f26 = { 26 };
+const FPURegister f27 = { 27 };
+const FPURegister f28 = { 28 };
+const FPURegister f29 = { 29 };
+const FPURegister f30 = { 30 };
+const FPURegister f31 = { 31 };
 
-// Returns the equivalent of !cc.
-// Negation of the default no_condition (-1) results in a non-default
-// no_condition value (-2). As long as tests for no_condition check
-// for condition < 0, this will work as expected.
-inline Condition NegateCondition(Condition cc);
+// FPU (coprocessor 1) control registers.
+// Currently only FCSR (#31) is implemented.
+struct FPUControlRegister {
+  static const int kFCSRRegister = 31;
+  static const int kInvalidFPUControlRegister = -1;
 
-inline Condition ReverseCondition(Condition cc) {
-  switch (cc) {
-    case Uless:
-      return Ugreater;
-    case Ugreater:
-      return Uless;
-    case Ugreater_equal:
-      return Uless_equal;
-    case Uless_equal:
-      return Ugreater_equal;
-    case less:
-      return greater;
-    case greater:
-      return less;
-    case greater_equal:
-      return less_equal;
-    case less_equal:
-      return greater_equal;
-    default:
-      return cc;
-  };
-}
-
-
-enum Hint {
-  no_hint = 0
+  bool is_valid() const { return code_ == kFCSRRegister; }
+  bool is(FPUControlRegister creg) const { return code_ == creg.code_; }
+  int code() const {
+    ASSERT(is_valid());
+    return code_;
+  }
+  int bit() const {
+    ASSERT(is_valid());
+    return 1 << code_;
+  }
+  void setcode(int f) {
+    code_ = f;
+    ASSERT(is_valid());
+  }
+  // Unfortunately we can't make this private in a struct.
+  int code_;
 };
 
-inline Hint NegateHint(Hint hint) {
-  return no_hint;
-}
+const FPUControlRegister no_fpucreg = { -1 };
+const FPUControlRegister FCSR = { kFCSRRegister };
 
 
 // -----------------------------------------------------------------------------
@@ -258,16 +331,75 @@
 class MemOperand : public Operand {
  public:
 
-  explicit MemOperand(Register rn, int16_t offset = 0);
+  explicit MemOperand(Register rn, int32_t offset = 0);
 
  private:
-  int16_t offset_;
+  int32_t offset_;
 
   friend class Assembler;
 };
 
 
-class Assembler : public Malloced {
+// CpuFeatures keeps track of which features are supported by the target CPU.
+// Supported features must be enabled by a Scope before use.
+class CpuFeatures {
+ public:
+  // Detect features of the target CPU. Set safe defaults if the serializer
+  // is enabled (snapshots must be portable).
+  void Probe(bool portable);
+
+  // Check whether a feature is supported by the target CPU.
+  bool IsSupported(CpuFeature f) const {
+    if (f == FPU && !FLAG_enable_fpu) return false;
+    return (supported_ & (1u << f)) != 0;
+  }
+
+  // Check whether a feature is currently enabled.
+  bool IsEnabled(CpuFeature f) const {
+    return (enabled_ & (1u << f)) != 0;
+  }
+
+  // Enable a specified feature within a scope.
+  class Scope BASE_EMBEDDED {
+#ifdef DEBUG
+   public:
+    explicit Scope(CpuFeature f)
+        : cpu_features_(Isolate::Current()->cpu_features()),
+          isolate_(Isolate::Current()) {
+      ASSERT(cpu_features_->IsSupported(f));
+      ASSERT(!Serializer::enabled() ||
+             (cpu_features_->found_by_runtime_probing_ & (1u << f)) == 0);
+      old_enabled_ = cpu_features_->enabled_;
+      cpu_features_->enabled_ |= 1u << f;
+    }
+    ~Scope() {
+      ASSERT_EQ(Isolate::Current(), isolate_);
+      cpu_features_->enabled_ = old_enabled_;
+     }
+   private:
+    unsigned old_enabled_;
+    CpuFeatures* cpu_features_;
+    Isolate* isolate_;
+#else
+   public:
+    explicit Scope(CpuFeature f) {}
+#endif
+  };
+
+ private:
+  CpuFeatures();
+
+  unsigned supported_;
+  unsigned enabled_;
+  unsigned found_by_runtime_probing_;
+
+  friend class Isolate;
+
+  DISALLOW_COPY_AND_ASSIGN(CpuFeatures);
+};
+
+
+class Assembler : public AssemblerBase {
  public:
   // Create an assembler. Instructions and relocation information are emitted
   // into a buffer, with the instructions starting from the beginning and the
@@ -285,6 +417,9 @@
   Assembler(void* buffer, int buffer_size);
   ~Assembler();
 
+  // Overrides the default provided by FLAG_debug_code.
+  void set_emit_debug_code(bool value) { emit_debug_code_ = value; }
+
   // GetCode emits any pending (non-emitted) code and fills the descriptor
   // desc. GetCode() is idempotent; it returns the same result if no other
   // Assembler functions are invoked in between GetCode() calls.
@@ -320,12 +455,6 @@
   // The high 8 bits are set to zero.
   void label_at_put(Label* L, int at_offset);
 
-  // Size of an instruction.
-  static const int kInstrSize = sizeof(Instr);
-
-  // Difference between address of current opcode and target address offset.
-  static const int kBranchPCOffset = 4;
-
   // Read/Modify the code target address in the branch/call instruction at pc.
   static Address target_address_at(Address pc);
   static void set_target_address_at(Address pc, Address target);
@@ -344,8 +473,25 @@
     set_target_address_at(instruction_payload, target);
   }
 
-  static const int kCallTargetSize = 3 * kPointerSize;
-  static const int kExternalTargetSize = 3 * kPointerSize;
+  // Size of an instruction.
+  static const int kInstrSize = sizeof(Instr);
+
+  // Difference between address of current opcode and target address offset.
+  static const int kBranchPCOffset = 4;
+
+  // Here we are patching the address in the LUI/ORI instruction pair.
+  // These values are used in the serialization process and must be zero for
+  // MIPS platform, as Code, Embedded Object or External-reference pointers
+  // are split across two consecutive instructions and don't exist separately
+  // in the code, so the serializer should not step forwards in memory after
+  // a target is resolved and written.
+  static const int kCallTargetSize = 0 * kInstrSize;
+  static const int kExternalTargetSize = 0 * kInstrSize;
+
+  // Number of consecutive instructions used to store 32bit constant.
+  // Used in RelocInfo::target_address_address() function to tell serializer
+  // address of the instruction that follows LUI/ORI instruction pair.
+  static const int kInstructionsFor32BitConstant = 2;
 
   // Distance between the instruction referring to the address of the call
   // target and the return address.
@@ -353,16 +499,53 @@
 
   // Distance between start of patched return sequence and the emitted address
   // to jump to.
-  static const int kPatchReturnSequenceAddressOffset = kInstrSize;
+  static const int kPatchReturnSequenceAddressOffset = 0;
 
   // Distance between start of patched debug break slot and the emitted address
   // to jump to.
-  static const int kPatchDebugBreakSlotAddressOffset = kInstrSize;
+  static const int kPatchDebugBreakSlotAddressOffset =  0 * kInstrSize;
+
+  // Difference between address of current opcode and value read from pc
+  // register.
+  static const int kPcLoadDelta = 4;
+
+  // Number of instructions used for the JS return sequence. The constant is
+  // used by the debugger to patch the JS return sequence.
+  static const int kJSReturnSequenceInstructions = 7;
+  static const int kDebugBreakSlotInstructions = 4;
+  static const int kDebugBreakSlotLength =
+      kDebugBreakSlotInstructions * kInstrSize;
+
 
   // ---------------------------------------------------------------------------
   // Code generation.
 
-  void nop() { sll(zero_reg, zero_reg, 0); }
+  // Insert the smallest number of nop instructions
+  // possible to align the pc offset to a multiple
+  // of m. m must be a power of 2 (>= 4).
+  void Align(int m);
+  // Aligns code to something that's optimal for a jump target for the platform.
+  void CodeTargetAlign();
+
+  // Different nop operations are used by the code generator to detect certain
+  // states of the generated code.
+  enum NopMarkerTypes {
+    NON_MARKING_NOP = 0,
+    DEBUG_BREAK_NOP,
+    // IC markers.
+    PROPERTY_ACCESS_INLINED,
+    PROPERTY_ACCESS_INLINED_CONTEXT,
+    PROPERTY_ACCESS_INLINED_CONTEXT_DONT_DELETE,
+    // Helper values.
+    LAST_CODE_MARKER,
+    FIRST_IC_MARKER = PROPERTY_ACCESS_INLINED
+  };
+
+  // type == 0 is the default non-marking type.
+  void nop(unsigned int type = 0) {
+    ASSERT(type < 32);
+    sll(zero_reg, zero_reg, type, true);
+  }
 
 
   //------- Branch and jump  instructions --------
@@ -400,9 +583,7 @@
   //-------Data-processing-instructions---------
 
   // Arithmetic.
-  void add(Register rd, Register rs, Register rt);
   void addu(Register rd, Register rs, Register rt);
-  void sub(Register rd, Register rs, Register rt);
   void subu(Register rd, Register rs, Register rt);
   void mult(Register rs, Register rt);
   void multu(Register rs, Register rt);
@@ -410,7 +591,6 @@
   void divu(Register rs, Register rt);
   void mul(Register rd, Register rs, Register rt);
 
-  void addi(Register rd, Register rs, int32_t j);
   void addiu(Register rd, Register rs, int32_t j);
 
   // Logical.
@@ -425,21 +605,33 @@
   void lui(Register rd, int32_t j);
 
   // Shifts.
-  void sll(Register rd, Register rt, uint16_t sa);
+  // Please note: sll(zero_reg, zero_reg, x) instructions are reserved as nop
+  // and may cause problems in normal code. coming_from_nop makes sure this
+  // doesn't happen.
+  void sll(Register rd, Register rt, uint16_t sa, bool coming_from_nop = false);
   void sllv(Register rd, Register rt, Register rs);
   void srl(Register rd, Register rt, uint16_t sa);
   void srlv(Register rd, Register rt, Register rs);
   void sra(Register rt, Register rd, uint16_t sa);
   void srav(Register rt, Register rd, Register rs);
+  void rotr(Register rd, Register rt, uint16_t sa);
+  void rotrv(Register rd, Register rt, Register rs);
 
 
   //------------Memory-instructions-------------
 
   void lb(Register rd, const MemOperand& rs);
   void lbu(Register rd, const MemOperand& rs);
+  void lh(Register rd, const MemOperand& rs);
+  void lhu(Register rd, const MemOperand& rs);
   void lw(Register rd, const MemOperand& rs);
+  void lwl(Register rd, const MemOperand& rs);
+  void lwr(Register rd, const MemOperand& rs);
   void sb(Register rd, const MemOperand& rs);
+  void sh(Register rd, const MemOperand& rs);
   void sw(Register rd, const MemOperand& rs);
+  void swl(Register rd, const MemOperand& rs);
+  void swr(Register rd, const MemOperand& rs);
 
 
   //-------------Misc-instructions--------------
@@ -463,6 +655,16 @@
   void slti(Register rd, Register rs, int32_t j);
   void sltiu(Register rd, Register rs, int32_t j);
 
+  // Conditional move.
+  void movz(Register rd, Register rs, Register rt);
+  void movn(Register rd, Register rs, Register rt);
+  void movt(Register rd, Register rs, uint16_t cc = 0);
+  void movf(Register rd, Register rs, uint16_t cc = 0);
+
+  // Bit twiddling.
+  void clz(Register rd, Register rs);
+  void ins_(Register rt, Register rs, uint16_t pos, uint16_t size);
+  void ext_(Register rt, Register rs, uint16_t pos, uint16_t size);
 
   //--------Coprocessor-instructions----------------
 
@@ -473,19 +675,44 @@
   void swc1(FPURegister fs, const MemOperand& dst);
   void sdc1(FPURegister fs, const MemOperand& dst);
 
-  // When paired with MTC1 to write a value to a 64-bit FPR, the MTC1 must be
-  // executed first, followed by the MTHC1.
-  void mtc1(FPURegister fs, Register rt);
-  void mthc1(FPURegister fs, Register rt);
-  void mfc1(FPURegister fs, Register rt);
-  void mfhc1(FPURegister fs, Register rt);
+  void mtc1(Register rt, FPURegister fs);
+  void mfc1(Register rt, FPURegister fs);
+
+  void ctc1(Register rt, FPUControlRegister fs);
+  void cfc1(Register rt, FPUControlRegister fs);
+
+  // Arithmetic.
+  void add_d(FPURegister fd, FPURegister fs, FPURegister ft);
+  void sub_d(FPURegister fd, FPURegister fs, FPURegister ft);
+  void mul_d(FPURegister fd, FPURegister fs, FPURegister ft);
+  void div_d(FPURegister fd, FPURegister fs, FPURegister ft);
+  void abs_d(FPURegister fd, FPURegister fs);
+  void mov_d(FPURegister fd, FPURegister fs);
+  void neg_d(FPURegister fd, FPURegister fs);
+  void sqrt_d(FPURegister fd, FPURegister fs);
 
   // Conversion.
   void cvt_w_s(FPURegister fd, FPURegister fs);
   void cvt_w_d(FPURegister fd, FPURegister fs);
+  void trunc_w_s(FPURegister fd, FPURegister fs);
+  void trunc_w_d(FPURegister fd, FPURegister fs);
+  void round_w_s(FPURegister fd, FPURegister fs);
+  void round_w_d(FPURegister fd, FPURegister fs);
+  void floor_w_s(FPURegister fd, FPURegister fs);
+  void floor_w_d(FPURegister fd, FPURegister fs);
+  void ceil_w_s(FPURegister fd, FPURegister fs);
+  void ceil_w_d(FPURegister fd, FPURegister fs);
 
   void cvt_l_s(FPURegister fd, FPURegister fs);
   void cvt_l_d(FPURegister fd, FPURegister fs);
+  void trunc_l_s(FPURegister fd, FPURegister fs);
+  void trunc_l_d(FPURegister fd, FPURegister fs);
+  void round_l_s(FPURegister fd, FPURegister fs);
+  void round_l_d(FPURegister fd, FPURegister fs);
+  void floor_l_s(FPURegister fd, FPURegister fs);
+  void floor_l_d(FPURegister fd, FPURegister fs);
+  void ceil_l_s(FPURegister fd, FPURegister fs);
+  void ceil_l_d(FPURegister fd, FPURegister fs);
 
   void cvt_s_w(FPURegister fd, FPURegister fs);
   void cvt_s_l(FPURegister fd, FPURegister fs);
@@ -503,32 +730,60 @@
   void bc1f(Label* L, uint16_t cc = 0) { bc1f(branch_offset(L, false)>>2, cc); }
   void bc1t(int16_t offset, uint16_t cc = 0);
   void bc1t(Label* L, uint16_t cc = 0) { bc1t(branch_offset(L, false)>>2, cc); }
-
+  void fcmp(FPURegister src1, const double src2, FPUCondition cond);
 
   // Check the code size generated from label to here.
   int InstructionsGeneratedSince(Label* l) {
     return (pc_offset() - l->pos()) / kInstrSize;
   }
 
+  // Class for scoping postponing the trampoline pool generation.
+  class BlockTrampolinePoolScope {
+   public:
+    explicit BlockTrampolinePoolScope(Assembler* assem) : assem_(assem) {
+      assem_->StartBlockTrampolinePool();
+    }
+    ~BlockTrampolinePoolScope() {
+      assem_->EndBlockTrampolinePool();
+    }
+
+   private:
+    Assembler* assem_;
+
+    DISALLOW_IMPLICIT_CONSTRUCTORS(BlockTrampolinePoolScope);
+  };
+
   // Debugging.
 
   // Mark address of the ExitJSFrame code.
   void RecordJSReturn();
 
+  // Mark address of a debug break slot.
+  void RecordDebugBreakSlot();
+
   // Record a comment relocation entry that can be used by a disassembler.
-  // Use --debug_code to enable.
+  // Use --code-comments to enable.
   void RecordComment(const char* msg);
 
-  void RecordPosition(int pos);
-  void RecordStatementPosition(int pos);
-  bool WriteRecordedPositions();
+  // Writes a single byte or word of data in the code stream.  Used for
+  // inline tables, e.g., jump-tables.
+  void db(uint8_t data);
+  void dd(uint32_t data);
 
   int32_t pc_offset() const { return pc_ - buffer_; }
-  int32_t current_position() const { return current_position_; }
-  int32_t current_statement_position() const {
-    return current_statement_position_;
+
+  PositionsRecorder* positions_recorder() { return &positions_recorder_; }
+
+  bool can_peephole_optimize(int instructions) {
+    if (!allow_peephole_optimization_) return false;
+    if (last_bound_pos_ > pc_offset() - instructions * kInstrSize) return false;
+    return reloc_info_writer.last_pc() <= pc_ - instructions * kInstrSize;
   }
 
+  // Postpone the generation of the trampoline pool for the specified number of
+  // instructions.
+  void BlockTrampolinePoolFor(int instructions);
+
   // Check if there is less than kGap bytes available in the buffer.
   // If this is the case, we need to grow the buffer before emitting
   // an instruction or relocation information.
@@ -537,12 +792,9 @@
   // Get the number of bytes available in the buffer.
   inline int available_space() const { return reloc_info_writer.pos() - pc_; }
 
- protected:
-  int32_t buffer_space() const { return reloc_info_writer.pos() - pc_; }
-
   // Read/patch instructions.
   static Instr instr_at(byte* pc) { return *reinterpret_cast<Instr*>(pc); }
-  void instr_at_put(byte* pc, Instr instr) {
+  static void instr_at_put(byte* pc, Instr instr) {
     *reinterpret_cast<Instr*>(pc) = instr;
   }
   Instr instr_at(int pos) { return *reinterpret_cast<Instr*>(buffer_ + pos); }
@@ -551,7 +803,34 @@
   }
 
   // Check if an instruction is a branch of some kind.
-  bool is_branch(Instr instr);
+  static bool IsBranch(Instr instr);
+
+  static bool IsNop(Instr instr, unsigned int type);
+  static bool IsPop(Instr instr);
+  static bool IsPush(Instr instr);
+  static bool IsLwRegFpOffset(Instr instr);
+  static bool IsSwRegFpOffset(Instr instr);
+  static bool IsLwRegFpNegOffset(Instr instr);
+  static bool IsSwRegFpNegOffset(Instr instr);
+
+  static Register GetRt(Instr instr);
+
+  static int32_t GetBranchOffset(Instr instr);
+  static bool IsLw(Instr instr);
+  static int16_t GetLwOffset(Instr instr);
+  static Instr SetLwOffset(Instr instr, int16_t offset);
+
+  static bool IsSw(Instr instr);
+  static Instr SetSwOffset(Instr instr, int16_t offset);
+  static bool IsAddImmediate(Instr instr);
+  static Instr SetAddImmediateOffset(Instr instr, int16_t offset);
+
+  void CheckTrampolinePool(bool force_emit = false);
+
+ protected:
+  bool emit_debug_code() const { return emit_debug_code_; }
+
+  int32_t buffer_space() const { return reloc_info_writer.pos() - pc_; }
 
   // Decode branch instruction at pos and return branch target pos.
   int target_at(int32_t pos);
@@ -560,11 +839,28 @@
   void target_at_put(int32_t pos, int32_t target_pos);
 
   // Say if we need to relocate with this mode.
-  bool MustUseAt(RelocInfo::Mode rmode);
+  bool MustUseReg(RelocInfo::Mode rmode);
 
   // Record reloc info for current pc_.
   void RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data = 0);
 
+  // Block the emission of the trampoline pool before pc_offset.
+  void BlockTrampolinePoolBefore(int pc_offset) {
+    if (no_trampoline_pool_before_ < pc_offset)
+      no_trampoline_pool_before_ = pc_offset;
+  }
+
+  void StartBlockTrampolinePool() {
+    trampoline_pool_blocked_nesting_++;
+  }
+  void EndBlockTrampolinePool() {
+    trampoline_pool_blocked_nesting_--;
+  }
+
+  bool is_trampoline_pool_blocked() const {
+    return trampoline_pool_blocked_nesting_ > 0;
+  }
+
  private:
   // Code buffer:
   // The buffer into which code and relocation info are generated.
@@ -585,6 +881,22 @@
   static const int kGap = 32;
   byte* pc_;  // The program counter - moves forward.
 
+
+  // Repeated checking whether the trampoline pool should be emitted is rather
+  // expensive. By default we only check again once a number of instructions
+  // has been generated.
+  static const int kCheckConstIntervalInst = 32;
+  static const int kCheckConstInterval = kCheckConstIntervalInst * kInstrSize;
+
+  int next_buffer_check_;  // pc offset of next buffer check.
+
+  // Emission of the trampoline pool may be blocked in some code sequences.
+  int trampoline_pool_blocked_nesting_;  // Block emission if this is not zero.
+  int no_trampoline_pool_before_;  // Block emission before this pc offset.
+
+  // Keep track of the last emitted pool to guarantee a maximal distance.
+  int last_trampoline_pool_end_;  // pc offset of the end of the last pool.
+
   // Relocation information generation.
   // Each relocation is encoded as a variable size value.
   static const int kMaxRelocSize = RelocInfoWriter::kMaxSize;
@@ -593,16 +905,11 @@
   // The bound position, before this we cannot do instruction elimination.
   int last_bound_pos_;
 
-  // Source position information.
-  int current_position_;
-  int current_statement_position_;
-  int written_position_;
-  int written_statement_position_;
-
   // Code emission.
   inline void CheckBuffer();
   void GrowBuffer();
   inline void emit(Instr x);
+  inline void CheckTrampolinePoolQuick();
 
   // Instruction generation.
   // We have 3 different kind of encoding layout on MIPS.
@@ -620,6 +927,13 @@
                         SecondaryField func = NULLSF);
 
   void GenInstrRegister(Opcode opcode,
+                        Register rs,
+                        Register rt,
+                        uint16_t msb,
+                        uint16_t lsb,
+                        SecondaryField func);
+
+  void GenInstrRegister(Opcode opcode,
                         SecondaryField fmt,
                         FPURegister ft,
                         FPURegister fs,
@@ -633,6 +947,12 @@
                         FPURegister fd,
                         SecondaryField func = NULLSF);
 
+  void GenInstrRegister(Opcode opcode,
+                        SecondaryField fmt,
+                        Register rt,
+                        FPUControlRegister fs,
+                        SecondaryField func = NULLSF);
+
 
   void GenInstrImmediate(Opcode opcode,
                          Register rs,
@@ -651,6 +971,8 @@
   void GenInstrJump(Opcode opcode,
                      uint32_t address);
 
+  // Helpers.
+  void LoadRegPlusOffsetToAt(const MemOperand& src);
 
   // Labels.
   void print(Label* L);
@@ -658,8 +980,85 @@
   void link_to(Label* L, Label* appendix);
   void next(Label* L);
 
+  // One trampoline consists of:
+  // - space for trampoline slots,
+  // - space for labels.
+  //
+  // Space for trampoline slots is equal to slot_count * 2 * kInstrSize.
+  // Space for trampoline slots preceeds space for labels. Each label is of one
+  // instruction size, so total amount for labels is equal to
+  // label_count *  kInstrSize.
+  class Trampoline {
+   public:
+    Trampoline(int start, int slot_count, int label_count) {
+      start_ = start;
+      next_slot_ = start;
+      free_slot_count_ = slot_count;
+      next_label_ = start + slot_count * 2 * kInstrSize;
+      free_label_count_ = label_count;
+      end_ = next_label_ + (label_count - 1) * kInstrSize;
+    }
+    int start() {
+      return start_;
+    }
+    int end() {
+      return end_;
+    }
+    int take_slot() {
+      int trampoline_slot = next_slot_;
+      ASSERT(free_slot_count_ > 0);
+      free_slot_count_--;
+      next_slot_ += 2 * kInstrSize;
+      return trampoline_slot;
+    }
+    int take_label() {
+      int label_pos = next_label_;
+      ASSERT(free_label_count_ > 0);
+      free_label_count_--;
+      next_label_ += kInstrSize;
+      return label_pos;
+    }
+   private:
+    int start_;
+    int end_;
+    int next_slot_;
+    int free_slot_count_;
+    int next_label_;
+    int free_label_count_;
+  };
+
+  int32_t get_label_entry(int32_t pos, bool next_pool = true);
+  int32_t get_trampoline_entry(int32_t pos, bool next_pool = true);
+
+  static const int kSlotsPerTrampoline = 2304;
+  static const int kLabelsPerTrampoline = 8;
+  static const int kTrampolineInst =
+      2 * kSlotsPerTrampoline + kLabelsPerTrampoline;
+  static const int kTrampolineSize = kTrampolineInst * kInstrSize;
+  static const int kMaxBranchOffset = (1 << (18 - 1)) - 1;
+  static const int kMaxDistBetweenPools =
+      kMaxBranchOffset - 2 * kTrampolineSize;
+
+  List<Trampoline> trampolines_;
+
   friend class RegExpMacroAssemblerMIPS;
   friend class RelocInfo;
+  friend class CodePatcher;
+  friend class BlockTrampolinePoolScope;
+
+  PositionsRecorder positions_recorder_;
+  bool allow_peephole_optimization_;
+  bool emit_debug_code_;
+  friend class PositionsRecorder;
+  friend class EnsureSpace;
+};
+
+
+class EnsureSpace BASE_EMBEDDED {
+ public:
+  explicit EnsureSpace(Assembler* assembler) {
+    assembler->CheckBuffer();
+  }
 };
 
 } }  // namespace v8::internal
diff --git a/src/mips/builtins-mips.cc b/src/mips/builtins-mips.cc
index 9532938..b4bab8e 100644
--- a/src/mips/builtins-mips.cc
+++ b/src/mips/builtins-mips.cc
@@ -33,6 +33,8 @@
 
 #include "codegen-inl.h"
 #include "debug.h"
+#include "deoptimizer.h"
+#include "full-codegen.h"
 #include "runtime.h"
 
 namespace v8 {
@@ -59,11 +61,21 @@
 }
 
 
+void Builtins::Generate_StringConstructCode(MacroAssembler* masm) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
 void Builtins::Generate_JSConstructCall(MacroAssembler* masm) {
   UNIMPLEMENTED_MIPS();
 }
 
 
+void Builtins::Generate_JSConstructStubCountdown(MacroAssembler* masm) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
 void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
   UNIMPLEMENTED_MIPS();
 }
@@ -74,111 +86,43 @@
 }
 
 
-static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
-                                             bool is_construct) {
-  // Called from JSEntryStub::GenerateBody
-
-  // Registers:
-  // a0: entry_address
-  // a1: function
-  // a2: reveiver_pointer
-  // a3: argc
-  // s0: argv
-  //
-  // Stack:
-  // arguments slots
-  // handler frame
-  // entry frame
-  // callee saved registers + ra
-  // 4 args slots
-  // args
-
-  // Clear the context before we push it when entering the JS frame.
-  __ li(cp, Operand(0, RelocInfo::NONE));
-
-  // Enter an internal frame.
-  __ EnterInternalFrame();
-
-  // Set up the context from the function argument.
-  __ lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
-
-  // Set up the roots register.
-  ExternalReference roots_address = ExternalReference::roots_address();
-  __ li(s6, Operand(roots_address));
-
-  // Push the function and the receiver onto the stack.
-  __ MultiPushReversed(a1.bit() | a2.bit());
-
-  // Copy arguments to the stack in a loop.
-  // a3: argc
-  // s0: argv, ie points to first arg
-  Label loop, entry;
-  __ sll(t0, a3, kPointerSizeLog2);
-  __ add(t2, s0, t0);
-  __ b(&entry);
-  __ nop();   // Branch delay slot nop.
-  // t2 points past last arg.
-  __ bind(&loop);
-  __ lw(t0, MemOperand(s0));  // Read next parameter.
-  __ addiu(s0, s0, kPointerSize);
-  __ lw(t0, MemOperand(t0));  // Dereference handle.
-  __ Push(t0);  // Push parameter.
-  __ bind(&entry);
-  __ Branch(ne, &loop, s0, Operand(t2));
-
-  // Registers:
-  // a0: entry_address
-  // a1: function
-  // a2: reveiver_pointer
-  // a3: argc
-  // s0: argv
-  // s6: roots_address
-  //
-  // Stack:
-  // arguments
-  // receiver
-  // function
-  // arguments slots
-  // handler frame
-  // entry frame
-  // callee saved registers + ra
-  // 4 args slots
-  // args
-
-  // Initialize all JavaScript callee-saved registers, since they will be seen
-  // by the garbage collector as part of handlers.
-  __ LoadRoot(t4, Heap::kUndefinedValueRootIndex);
-  __ mov(s1, t4);
-  __ mov(s2, t4);
-  __ mov(s3, t4);
-  __ mov(s4, s4);
-  __ mov(s5, t4);
-  // s6 holds the root address. Do not clobber.
-  // s7 is cp. Do not init.
-
-  // Invoke the code and pass argc as a0.
-  __ mov(a0, a3);
-  if (is_construct) {
-    UNIMPLEMENTED_MIPS();
-    __ break_(0x164);
-  } else {
-    ParameterCount actual(a0);
-    __ InvokeFunction(a1, actual, CALL_FUNCTION);
-  }
-
-  __ LeaveInternalFrame();
-
-  __ Jump(ra);
-}
-
-
 void Builtins::Generate_JSEntryTrampoline(MacroAssembler* masm) {
-  Generate_JSEntryTrampolineHelper(masm, false);
+  UNIMPLEMENTED_MIPS();
 }
 
 
 void Builtins::Generate_JSConstructEntryTrampoline(MacroAssembler* masm) {
-  Generate_JSEntryTrampolineHelper(masm, true);
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void Builtins::Generate_LazyCompile(MacroAssembler* masm) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void Builtins::Generate_LazyRecompile(MacroAssembler* masm) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void Builtins::Generate_NotifyDeoptimized(MacroAssembler* masm) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void Builtins::Generate_NotifyLazyDeoptimized(MacroAssembler* masm) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void Builtins::Generate_NotifyOSR(MacroAssembler* masm) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
+  UNIMPLEMENTED_MIPS();
 }
 
 
@@ -194,7 +138,6 @@
 
 void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
   UNIMPLEMENTED_MIPS();
-  __ break_(0x201);
 }
 
 
diff --git a/src/mips/code-stubs-mips.cc b/src/mips/code-stubs-mips.cc
new file mode 100644
index 0000000..6cc272c
--- /dev/null
+++ b/src/mips/code-stubs-mips.cc
@@ -0,0 +1,752 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#if defined(V8_TARGET_ARCH_MIPS)
+
+#include "bootstrapper.h"
+#include "code-stubs.h"
+#include "codegen-inl.h"
+#include "regexp-macro-assembler.h"
+
+namespace v8 {
+namespace internal {
+
+
+#define __ ACCESS_MASM(masm)
+
+
+void ToNumberStub::Generate(MacroAssembler* masm) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void FastNewClosureStub::Generate(MacroAssembler* masm) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void FastNewContextStub::Generate(MacroAssembler* masm) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void FastCloneShallowArrayStub::Generate(MacroAssembler* masm) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+// Takes a Smi and converts to an IEEE 64 bit floating point value in two
+// registers.  The format is 1 sign bit, 11 exponent bits (biased 1023) and
+// 52 fraction bits (20 in the first word, 32 in the second).  Zeros is a
+// scratch register.  Destroys the source register.  No GC occurs during this
+// stub so you don't have to set up the frame.
+class ConvertToDoubleStub : public CodeStub {
+ public:
+  ConvertToDoubleStub(Register result_reg_1,
+                      Register result_reg_2,
+                      Register source_reg,
+                      Register scratch_reg)
+      : result1_(result_reg_1),
+        result2_(result_reg_2),
+        source_(source_reg),
+        zeros_(scratch_reg) { }
+
+ private:
+  Register result1_;
+  Register result2_;
+  Register source_;
+  Register zeros_;
+
+  // Minor key encoding in 16 bits.
+  class ModeBits: public BitField<OverwriteMode, 0, 2> {};
+  class OpBits: public BitField<Token::Value, 2, 14> {};
+
+  Major MajorKey() { return ConvertToDouble; }
+  int MinorKey() {
+    // Encode the parameters in a unique 16 bit value.
+    return  result1_.code() +
+           (result2_.code() << 4) +
+           (source_.code() << 8) +
+           (zeros_.code() << 12);
+  }
+
+  void Generate(MacroAssembler* masm);
+
+  const char* GetName() { return "ConvertToDoubleStub"; }
+
+#ifdef DEBUG
+  void Print() { PrintF("ConvertToDoubleStub\n"); }
+#endif
+};
+
+
+void ConvertToDoubleStub::Generate(MacroAssembler* masm) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+class FloatingPointHelper : public AllStatic {
+ public:
+
+  enum Destination {
+    kFPURegisters,
+    kCoreRegisters
+  };
+
+
+  // Loads smis from a0 and a1 (right and left in binary operations) into
+  // floating point registers. Depending on the destination the values ends up
+  // either f14 and f12 or in a2/a3 and a0/a1 respectively. If the destination
+  // is floating point registers FPU must be supported. If core registers are
+  // requested when FPU is supported f12 and f14 will be scratched.
+  static void LoadSmis(MacroAssembler* masm,
+                       Destination destination,
+                       Register scratch1,
+                       Register scratch2);
+
+  // Loads objects from a0 and a1 (right and left in binary operations) into
+  // floating point registers. Depending on the destination the values ends up
+  // either f14 and f12 or in a2/a3 and a0/a1 respectively. If the destination
+  // is floating point registers FPU must be supported. If core registers are
+  // requested when FPU is supported f12 and f14 will still be scratched. If
+  // either a0 or a1 is not a number (not smi and not heap number object) the
+  // not_number label is jumped to with a0 and a1 intact.
+  static void LoadOperands(MacroAssembler* masm,
+                           FloatingPointHelper::Destination destination,
+                           Register heap_number_map,
+                           Register scratch1,
+                           Register scratch2,
+                           Label* not_number);
+  // Loads the number from object into dst as a 32-bit integer if possible. If
+  // the object is not a 32-bit integer control continues at the label
+  // not_int32. If FPU is supported double_scratch is used but not scratch2.
+  static void LoadNumberAsInteger(MacroAssembler* masm,
+                                  Register object,
+                                  Register dst,
+                                  Register heap_number_map,
+                                  Register scratch1,
+                                  Register scratch2,
+                                  FPURegister double_scratch,
+                                  Label* not_int32);
+ private:
+  static void LoadNumber(MacroAssembler* masm,
+                         FloatingPointHelper::Destination destination,
+                         Register object,
+                         FPURegister dst,
+                         Register dst1,
+                         Register dst2,
+                         Register heap_number_map,
+                         Register scratch1,
+                         Register scratch2,
+                         Label* not_number);
+};
+
+
+void FloatingPointHelper::LoadSmis(MacroAssembler* masm,
+                                   FloatingPointHelper::Destination destination,
+                                   Register scratch1,
+                                   Register scratch2) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void FloatingPointHelper::LoadOperands(
+    MacroAssembler* masm,
+    FloatingPointHelper::Destination destination,
+    Register heap_number_map,
+    Register scratch1,
+    Register scratch2,
+    Label* slow) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void FloatingPointHelper::LoadNumber(MacroAssembler* masm,
+                                     Destination destination,
+                                     Register object,
+                                     FPURegister dst,
+                                     Register dst1,
+                                     Register dst2,
+                                     Register heap_number_map,
+                                     Register scratch1,
+                                     Register scratch2,
+                                     Label* not_number) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void FloatingPointHelper::LoadNumberAsInteger(MacroAssembler* masm,
+                                              Register object,
+                                              Register dst,
+                                              Register heap_number_map,
+                                              Register scratch1,
+                                              Register scratch2,
+                                              FPURegister double_scratch,
+                                              Label* not_int32) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+// See comment for class, this does NOT work for int32's that are in Smi range.
+void WriteInt32ToHeapNumberStub::Generate(MacroAssembler* masm) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void EmitNanCheck(MacroAssembler* masm, Condition cc) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void NumberToStringStub::GenerateLookupNumberStringCache(MacroAssembler* masm,
+                                                         Register object,
+                                                         Register result,
+                                                         Register scratch1,
+                                                         Register scratch2,
+                                                         Register scratch3,
+                                                         bool object_is_smi,
+                                                         Label* not_found) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void NumberToStringStub::Generate(MacroAssembler* masm) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+// On entry lhs_ (lhs) and rhs_ (rhs) are the things to be compared.
+// On exit, v0 is 0, positive, or negative (smi) to indicate the result
+// of the comparison.
+void CompareStub::Generate(MacroAssembler* masm) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+// This stub does not handle the inlined cases (Smis, Booleans, undefined).
+// The stub returns zero for false, and a non-zero value for true.
+void ToBooleanStub::Generate(MacroAssembler* masm) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+// We fall into this code if the operands were Smis, but the result was
+// not (eg. overflow).  We branch into this code (to the not_smi label) if
+// the operands were not both Smi.  The operands are in lhs and rhs.
+// To call the C-implemented binary fp operation routines we need to end up
+// with the double precision floating point operands in a0 and a1 (for the
+// value in a1) and a2 and a3 (for the value in a0).
+void GenericBinaryOpStub::HandleBinaryOpSlowCases(MacroAssembler* masm,
+                                    Label* not_smi,
+                                    Register lhs,
+                                    Register rhs,
+                                    const Builtins::JavaScript& builtin) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+// For bitwise ops where the inputs are not both Smis we here try to determine
+// whether both inputs are either Smis or at least heap numbers that can be
+// represented by a 32 bit signed value.  We truncate towards zero as required
+// by the ES spec.  If this is the case we do the bitwise op and see if the
+// result is a Smi.  If so, great, otherwise we try to find a heap number to
+// write the answer into (either by allocating or by overwriting).
+// On entry the operands are in lhs (x) and rhs (y). (Result = x op y).
+// On exit the result is in v0.
+void GenericBinaryOpStub::HandleNonSmiBitwiseOp(MacroAssembler* masm,
+                                                Register lhs,
+                                                Register rhs) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void GenericBinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+Handle<Code> GetBinaryOpStub(int key, BinaryOpIC::TypeInfo type_info) {
+  GenericBinaryOpStub stub(key, type_info);
+  return stub.GetCode();
+}
+
+
+Handle<Code> GetTypeRecordingBinaryOpStub(int key,
+    TRBinaryOpIC::TypeInfo type_info,
+    TRBinaryOpIC::TypeInfo result_type_info) {
+  TypeRecordingBinaryOpStub stub(key, type_info, result_type_info);
+  return stub.GetCode();
+}
+
+
+void TypeRecordingBinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void TypeRecordingBinaryOpStub::GenerateTypeTransitionWithSavedArgs(
+    MacroAssembler* masm) {
+  UNIMPLEMENTED();
+}
+
+
+void TypeRecordingBinaryOpStub::Generate(MacroAssembler* masm) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+const char* TypeRecordingBinaryOpStub::GetName() {
+  UNIMPLEMENTED_MIPS();
+  return name_;
+}
+
+
+
+void TypeRecordingBinaryOpStub::GenerateSmiSmiOperation(
+    MacroAssembler* masm) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void TypeRecordingBinaryOpStub::GenerateFPOperation(MacroAssembler* masm,
+                                                    bool smi_operands,
+                                                    Label* not_numbers,
+                                                    Label* gc_required) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+// Generate the smi code. If the operation on smis are successful this return is
+// generated. If the result is not a smi and heap number allocation is not
+// requested the code falls through. If number allocation is requested but a
+// heap number cannot be allocated the code jumps to the lable gc_required.
+void TypeRecordingBinaryOpStub::GenerateSmiCode(MacroAssembler* masm,
+    Label* gc_required,
+    SmiCodeGenerateHeapNumberResults allow_heapnumber_results) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void TypeRecordingBinaryOpStub::GenerateSmiStub(MacroAssembler* masm) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void TypeRecordingBinaryOpStub::GenerateStringStub(MacroAssembler* masm) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void TypeRecordingBinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void TypeRecordingBinaryOpStub::GenerateHeapNumberStub(MacroAssembler* masm) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void TypeRecordingBinaryOpStub::GenerateGeneric(MacroAssembler* masm) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void TypeRecordingBinaryOpStub::GenerateAddStrings(MacroAssembler* masm) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void TypeRecordingBinaryOpStub::GenerateCallRuntime(MacroAssembler* masm) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void TypeRecordingBinaryOpStub::GenerateHeapResultAllocation(
+    MacroAssembler* masm,
+    Register result,
+    Register heap_number_map,
+    Register scratch1,
+    Register scratch2,
+    Label* gc_required) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void TypeRecordingBinaryOpStub::GenerateRegisterArgsPush(MacroAssembler* masm) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+
+void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+Runtime::FunctionId TranscendentalCacheStub::RuntimeFunction() {
+  UNIMPLEMENTED_MIPS();
+  return Runtime::kAbort;
+}
+
+
+void StackCheckStub::Generate(MacroAssembler* masm) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void GenericUnaryOpStub::Generate(MacroAssembler* masm) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+bool CEntryStub::NeedsImmovableCode() {
+  return true;
+}
+
+
+void CEntryStub::GenerateThrowTOS(MacroAssembler* masm) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void CEntryStub::GenerateThrowUncatchable(MacroAssembler* masm,
+                                          UncatchableExceptionType type) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void CEntryStub::GenerateCore(MacroAssembler* masm,
+                              Label* throw_normal_exception,
+                              Label* throw_termination_exception,
+                              Label* throw_out_of_memory_exception,
+                              bool do_gc,
+                              bool always_allocate) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void CEntryStub::Generate(MacroAssembler* masm) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+// Uses registers a0 to t0. Expected input is
+// object in a0 (or at sp+1*kPointerSize) and function in
+// a1 (or at sp), depending on whether or not
+// args_in_registers() is true.
+void InstanceofStub::Generate(MacroAssembler* masm) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void ArgumentsAccessStub::GenerateNewObject(MacroAssembler* masm) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void RegExpExecStub::Generate(MacroAssembler* masm) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void RegExpConstructResultStub::Generate(MacroAssembler* masm) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void CallFunctionStub::Generate(MacroAssembler* masm) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+// Unfortunately you have to run without snapshots to see most of these
+// names in the profile since most compare stubs end up in the snapshot.
+const char* CompareStub::GetName() {
+  UNIMPLEMENTED_MIPS();
+  return name_;
+}
+
+
+int CompareStub::MinorKey() {
+  UNIMPLEMENTED_MIPS();
+  return 0;
+}
+
+
+// StringCharCodeAtGenerator
+
+void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void StringCharCodeAtGenerator::GenerateSlow(
+    MacroAssembler* masm, const RuntimeCallHelper& call_helper) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+// -------------------------------------------------------------------------
+// StringCharFromCodeGenerator
+
+void StringCharFromCodeGenerator::GenerateFast(MacroAssembler* masm) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void StringCharFromCodeGenerator::GenerateSlow(
+    MacroAssembler* masm, const RuntimeCallHelper& call_helper) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+// -------------------------------------------------------------------------
+// StringCharAtGenerator
+
+void StringCharAtGenerator::GenerateFast(MacroAssembler* masm) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void StringCharAtGenerator::GenerateSlow(
+    MacroAssembler* masm, const RuntimeCallHelper& call_helper) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+class StringHelper : public AllStatic {
+ public:
+  // Generate code for copying characters using a simple loop. This should only
+  // be used in places where the number of characters is small and the
+  // additional setup and checking in GenerateCopyCharactersLong adds too much
+  // overhead. Copying of overlapping regions is not supported.
+  // Dest register ends at the position after the last character written.
+  static void GenerateCopyCharacters(MacroAssembler* masm,
+                                     Register dest,
+                                     Register src,
+                                     Register count,
+                                     Register scratch,
+                                     bool ascii);
+
+  // Generate code for copying a large number of characters. This function
+  // is allowed to spend extra time setting up conditions to make copying
+  // faster. Copying of overlapping regions is not supported.
+  // Dest register ends at the position after the last character written.
+  static void GenerateCopyCharactersLong(MacroAssembler* masm,
+                                         Register dest,
+                                         Register src,
+                                         Register count,
+                                         Register scratch1,
+                                         Register scratch2,
+                                         Register scratch3,
+                                         Register scratch4,
+                                         Register scratch5,
+                                         int flags);
+
+
+  // Probe the symbol table for a two character string. If the string is
+  // not found by probing a jump to the label not_found is performed. This jump
+  // does not guarantee that the string is not in the symbol table. If the
+  // string is found the code falls through with the string in register r0.
+  // Contents of both c1 and c2 registers are modified. At the exit c1 is
+  // guaranteed to contain halfword with low and high bytes equal to
+  // initial contents of c1 and c2 respectively.
+  static void GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm,
+                                                   Register c1,
+                                                   Register c2,
+                                                   Register scratch1,
+                                                   Register scratch2,
+                                                   Register scratch3,
+                                                   Register scratch4,
+                                                   Register scratch5,
+                                                   Label* not_found);
+
+  // Generate string hash.
+  static void GenerateHashInit(MacroAssembler* masm,
+                               Register hash,
+                               Register character);
+
+  static void GenerateHashAddCharacter(MacroAssembler* masm,
+                                       Register hash,
+                                       Register character);
+
+  static void GenerateHashGetHash(MacroAssembler* masm,
+                                  Register hash);
+
+ private:
+  DISALLOW_IMPLICIT_CONSTRUCTORS(StringHelper);
+};
+
+
+void StringHelper::GenerateCopyCharacters(MacroAssembler* masm,
+                                          Register dest,
+                                          Register src,
+                                          Register count,
+                                          Register scratch,
+                                          bool ascii) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+enum CopyCharactersFlags {
+  COPY_ASCII = 1,
+  DEST_ALWAYS_ALIGNED = 2
+};
+
+
+void StringHelper::GenerateCopyCharactersLong(MacroAssembler* masm,
+                                              Register dest,
+                                              Register src,
+                                              Register count,
+                                              Register scratch1,
+                                              Register scratch2,
+                                              Register scratch3,
+                                              Register scratch4,
+                                              Register scratch5,
+                                              int flags) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void StringHelper::GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm,
+                                                        Register c1,
+                                                        Register c2,
+                                                        Register scratch1,
+                                                        Register scratch2,
+                                                        Register scratch3,
+                                                        Register scratch4,
+                                                        Register scratch5,
+                                                        Label* not_found) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void StringHelper::GenerateHashInit(MacroAssembler* masm,
+                                      Register hash,
+                                      Register character) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void StringHelper::GenerateHashAddCharacter(MacroAssembler* masm,
+                                              Register hash,
+                                              Register character) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void StringHelper::GenerateHashGetHash(MacroAssembler* masm,
+                                         Register hash) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void SubStringStub::Generate(MacroAssembler* masm) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void StringCompareStub::GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
+                                                        Register right,
+                                                        Register left,
+                                                        Register scratch1,
+                                                        Register scratch2,
+                                                        Register scratch3,
+                                                        Register scratch4) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void StringCompareStub::Generate(MacroAssembler* masm) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void StringAddStub::Generate(MacroAssembler* masm) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void ICCompareStub::GenerateSmis(MacroAssembler* masm) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void ICCompareStub::GenerateHeapNumbers(MacroAssembler* masm) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void ICCompareStub::GenerateObjects(MacroAssembler* masm) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void ICCompareStub::GenerateMiss(MacroAssembler* masm) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void GenerateFastPixelArrayLoad(MacroAssembler* masm,
+                                Register receiver,
+                                Register key,
+                                Register elements_map,
+                                Register elements,
+                                Register scratch1,
+                                Register scratch2,
+                                Register result,
+                                Label* not_pixel_array,
+                                Label* key_not_smi,
+                                Label* out_of_range) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+#undef __
+
+} }  // namespace v8::internal
+
+#endif  // V8_TARGET_ARCH_MIPS
+
diff --git a/src/mips/code-stubs-mips.h b/src/mips/code-stubs-mips.h
new file mode 100644
index 0000000..675730a
--- /dev/null
+++ b/src/mips/code-stubs-mips.h
@@ -0,0 +1,511 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_MIPS_CODE_STUBS_ARM_H_
+#define V8_MIPS_CODE_STUBS_ARM_H_
+
+#include "ic-inl.h"
+
+
+namespace v8 {
+namespace internal {
+
+
+// Compute a transcendental math function natively, or call the
+// TranscendentalCache runtime function.
+class TranscendentalCacheStub: public CodeStub {
+ public:
+  explicit TranscendentalCacheStub(TranscendentalCache::Type type)
+      : type_(type) {}
+  void Generate(MacroAssembler* masm);
+ private:
+  TranscendentalCache::Type type_;
+  Major MajorKey() { return TranscendentalCache; }
+  int MinorKey() { return type_; }
+  Runtime::FunctionId RuntimeFunction();
+};
+
+
+class ToBooleanStub: public CodeStub {
+ public:
+  explicit ToBooleanStub(Register tos) : tos_(tos) { }
+
+  void Generate(MacroAssembler* masm);
+
+ private:
+  Register tos_;
+  Major MajorKey() { return ToBoolean; }
+  int MinorKey() { return tos_.code(); }
+};
+
+
+class GenericBinaryOpStub : public CodeStub {
+ public:
+  static const int kUnknownIntValue = -1;
+
+  GenericBinaryOpStub(Token::Value op,
+                      OverwriteMode mode,
+                      Register lhs,
+                      Register rhs,
+                      int constant_rhs = kUnknownIntValue)
+      : op_(op),
+        mode_(mode),
+        lhs_(lhs),
+        rhs_(rhs),
+        constant_rhs_(constant_rhs),
+        specialized_on_rhs_(RhsIsOneWeWantToOptimizeFor(op, constant_rhs)),
+        runtime_operands_type_(BinaryOpIC::UNINIT_OR_SMI),
+        name_(NULL) { }
+
+  GenericBinaryOpStub(int key, BinaryOpIC::TypeInfo type_info)
+      : op_(OpBits::decode(key)),
+        mode_(ModeBits::decode(key)),
+        lhs_(LhsRegister(RegisterBits::decode(key))),
+        rhs_(RhsRegister(RegisterBits::decode(key))),
+        constant_rhs_(KnownBitsForMinorKey(KnownIntBits::decode(key))),
+        specialized_on_rhs_(RhsIsOneWeWantToOptimizeFor(op_, constant_rhs_)),
+        runtime_operands_type_(type_info),
+        name_(NULL) { }
+
+ private:
+  Token::Value op_;
+  OverwriteMode mode_;
+  Register lhs_;
+  Register rhs_;
+  int constant_rhs_;
+  bool specialized_on_rhs_;
+  BinaryOpIC::TypeInfo runtime_operands_type_;
+  char* name_;
+
+  static const int kMaxKnownRhs = 0x40000000;
+  static const int kKnownRhsKeyBits = 6;
+
+  // Minor key encoding in 16 bits.
+  class ModeBits: public BitField<OverwriteMode, 0, 2> {};
+  class OpBits: public BitField<Token::Value, 2, 6> {};
+  class TypeInfoBits: public BitField<int, 8, 3> {};
+  class RegisterBits: public BitField<bool, 11, 1> {};
+  class KnownIntBits: public BitField<int, 12, kKnownRhsKeyBits> {};
+
+  Major MajorKey() { return GenericBinaryOp; }
+  int MinorKey() {
+    ASSERT((lhs_.is(a0) && rhs_.is(a1)) ||
+           (lhs_.is(a1) && rhs_.is(a0)));
+    // Encode the parameters in a unique 16 bit value.
+    return OpBits::encode(op_)
+           | ModeBits::encode(mode_)
+           | KnownIntBits::encode(MinorKeyForKnownInt())
+           | TypeInfoBits::encode(runtime_operands_type_)
+           | RegisterBits::encode(lhs_.is(a0));
+  }
+
+  void Generate(MacroAssembler* masm);
+  void HandleNonSmiBitwiseOp(MacroAssembler* masm,
+                             Register lhs,
+                             Register rhs);
+  void HandleBinaryOpSlowCases(MacroAssembler* masm,
+                               Label* not_smi,
+                               Register lhs,
+                               Register rhs,
+                               const Builtins::JavaScript& builtin);
+  void GenerateTypeTransition(MacroAssembler* masm);
+
+  static bool RhsIsOneWeWantToOptimizeFor(Token::Value op, int constant_rhs) {
+    if (constant_rhs == kUnknownIntValue) return false;
+    if (op == Token::DIV) return constant_rhs >= 2 && constant_rhs <= 3;
+    if (op == Token::MOD) {
+      if (constant_rhs <= 1) return false;
+      if (constant_rhs <= 10) return true;
+      if (constant_rhs <= kMaxKnownRhs && IsPowerOf2(constant_rhs)) return true;
+      return false;
+    }
+    return false;
+  }
+
+  int MinorKeyForKnownInt() {
+    if (!specialized_on_rhs_) return 0;
+    if (constant_rhs_ <= 10) return constant_rhs_ + 1;
+    ASSERT(IsPowerOf2(constant_rhs_));
+    int key = 12;
+    int d = constant_rhs_;
+    while ((d & 1) == 0) {
+      key++;
+      d >>= 1;
+    }
+    ASSERT(key >= 0 && key < (1 << kKnownRhsKeyBits));
+    return key;
+  }
+
+  int KnownBitsForMinorKey(int key) {
+    if (!key) return 0;
+    if (key <= 11) return key - 1;
+    int d = 1;
+    while (key != 12) {
+      key--;
+      d <<= 1;
+    }
+    return d;
+  }
+
+  Register LhsRegister(bool lhs_is_a0) {
+    return lhs_is_a0 ? a0 : a1;
+  }
+
+  Register RhsRegister(bool lhs_is_a0) {
+    return lhs_is_a0 ? a1 : a0;
+  }
+
+  bool HasSmiSmiFastPath() {
+    return op_ != Token::DIV;
+  }
+
+  bool ShouldGenerateSmiCode() {
+    return ((op_ != Token::DIV && op_ != Token::MOD) || specialized_on_rhs_) &&
+        runtime_operands_type_ != BinaryOpIC::HEAP_NUMBERS &&
+        runtime_operands_type_ != BinaryOpIC::STRINGS;
+  }
+
+  bool ShouldGenerateFPCode() {
+    return runtime_operands_type_ != BinaryOpIC::STRINGS;
+  }
+
+  virtual int GetCodeKind() { return Code::BINARY_OP_IC; }
+
+  virtual InlineCacheState GetICState() {
+    return BinaryOpIC::ToState(runtime_operands_type_);
+  }
+
+  const char* GetName();
+
+  virtual void FinishCode(Code* code) {
+    code->set_binary_op_type(runtime_operands_type_);
+  }
+
+#ifdef DEBUG
+  void Print() {
+    if (!specialized_on_rhs_) {
+      PrintF("GenericBinaryOpStub (%s)\n", Token::String(op_));
+    } else {
+      PrintF("GenericBinaryOpStub (%s by %d)\n",
+             Token::String(op_),
+             constant_rhs_);
+    }
+  }
+#endif
+};
+
+class TypeRecordingBinaryOpStub: public CodeStub {
+ public:
+  TypeRecordingBinaryOpStub(Token::Value op, OverwriteMode mode)
+      : op_(op),
+        mode_(mode),
+        operands_type_(TRBinaryOpIC::UNINITIALIZED),
+        result_type_(TRBinaryOpIC::UNINITIALIZED),
+        name_(NULL) {
+    UNIMPLEMENTED_MIPS();
+  }
+
+  TypeRecordingBinaryOpStub(
+      int key,
+      TRBinaryOpIC::TypeInfo operands_type,
+      TRBinaryOpIC::TypeInfo result_type = TRBinaryOpIC::UNINITIALIZED)
+      : op_(OpBits::decode(key)),
+        mode_(ModeBits::decode(key)),
+        use_fpu_(FPUBits::decode(key)),
+        operands_type_(operands_type),
+        result_type_(result_type),
+        name_(NULL) { }
+
+ private:
+  enum SmiCodeGenerateHeapNumberResults {
+    ALLOW_HEAPNUMBER_RESULTS,
+    NO_HEAPNUMBER_RESULTS
+  };
+
+  Token::Value op_;
+  OverwriteMode mode_;
+  bool use_fpu_;
+
+  // Operand type information determined at runtime.
+  TRBinaryOpIC::TypeInfo operands_type_;
+  TRBinaryOpIC::TypeInfo result_type_;
+
+  char* name_;
+
+  const char* GetName();
+
+#ifdef DEBUG
+  void Print() {
+    PrintF("TypeRecordingBinaryOpStub %d (op %s), "
+           "(mode %d, runtime_type_info %s)\n",
+           MinorKey(),
+           Token::String(op_),
+           static_cast<int>(mode_),
+           TRBinaryOpIC::GetName(operands_type_));
+  }
+#endif
+
+  // Minor key encoding in 16 bits RRRTTTVOOOOOOOMM.
+  class ModeBits: public BitField<OverwriteMode, 0, 2> {};
+  class OpBits: public BitField<Token::Value, 2, 7> {};
+  class FPUBits: public BitField<bool, 9, 1> {};
+  class OperandTypeInfoBits: public BitField<TRBinaryOpIC::TypeInfo, 10, 3> {};
+  class ResultTypeInfoBits: public BitField<TRBinaryOpIC::TypeInfo, 13, 3> {};
+
+  Major MajorKey() { return TypeRecordingBinaryOp; }
+  int MinorKey() {
+    return OpBits::encode(op_)
+           | ModeBits::encode(mode_)
+           | FPUBits::encode(use_fpu_)
+           | OperandTypeInfoBits::encode(operands_type_)
+           | ResultTypeInfoBits::encode(result_type_);
+  }
+
+  void Generate(MacroAssembler* masm);
+  void GenerateGeneric(MacroAssembler* masm);
+  void GenerateSmiSmiOperation(MacroAssembler* masm);
+  void GenerateFPOperation(MacroAssembler* masm,
+                           bool smi_operands,
+                           Label* not_numbers,
+                           Label* gc_required);
+  void GenerateSmiCode(MacroAssembler* masm,
+                       Label* gc_required,
+                       SmiCodeGenerateHeapNumberResults heapnumber_results);
+  void GenerateLoadArguments(MacroAssembler* masm);
+  void GenerateReturn(MacroAssembler* masm);
+  void GenerateUninitializedStub(MacroAssembler* masm);
+  void GenerateSmiStub(MacroAssembler* masm);
+  void GenerateInt32Stub(MacroAssembler* masm);
+  void GenerateHeapNumberStub(MacroAssembler* masm);
+  void GenerateStringStub(MacroAssembler* masm);
+  void GenerateGenericStub(MacroAssembler* masm);
+  void GenerateAddStrings(MacroAssembler* masm);
+  void GenerateCallRuntime(MacroAssembler* masm);
+
+  void GenerateHeapResultAllocation(MacroAssembler* masm,
+                                    Register result,
+                                    Register heap_number_map,
+                                    Register scratch1,
+                                    Register scratch2,
+                                    Label* gc_required);
+  void GenerateRegisterArgsPush(MacroAssembler* masm);
+  void GenerateTypeTransition(MacroAssembler* masm);
+  void GenerateTypeTransitionWithSavedArgs(MacroAssembler* masm);
+
+  virtual int GetCodeKind() { return Code::TYPE_RECORDING_BINARY_OP_IC; }
+
+  virtual InlineCacheState GetICState() {
+    return TRBinaryOpIC::ToState(operands_type_);
+  }
+
+  virtual void FinishCode(Code* code) {
+    code->set_type_recording_binary_op_type(operands_type_);
+    code->set_type_recording_binary_op_result_type(result_type_);
+  }
+
+  friend class CodeGenerator;
+};
+
+
+// Flag that indicates how to generate code for the stub StringAddStub.
+enum StringAddFlags {
+  NO_STRING_ADD_FLAGS = 0,
+  NO_STRING_CHECK_IN_STUB = 1 << 0  // Omit string check in stub.
+};
+
+
+class StringAddStub: public CodeStub {
+ public:
+  explicit StringAddStub(StringAddFlags flags) {
+    string_check_ = ((flags & NO_STRING_CHECK_IN_STUB) == 0);
+  }
+
+ private:
+  Major MajorKey() { return StringAdd; }
+  int MinorKey() { return string_check_ ? 0 : 1; }
+
+  void Generate(MacroAssembler* masm);
+
+  // Should the stub check whether arguments are strings?
+  bool string_check_;
+};
+
+
+class SubStringStub: public CodeStub {
+ public:
+  SubStringStub() {}
+
+ private:
+  Major MajorKey() { return SubString; }
+  int MinorKey() { return 0; }
+
+  void Generate(MacroAssembler* masm);
+};
+
+
+class StringCompareStub: public CodeStub {
+ public:
+  StringCompareStub() { }
+
+  // Compare two flat ASCII strings and returns result in v0.
+  // Does not use the stack.
+  static void GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
+                                              Register left,
+                                              Register right,
+                                              Register scratch1,
+                                              Register scratch2,
+                                              Register scratch3,
+                                              Register scratch4);
+
+ private:
+  Major MajorKey() { return StringCompare; }
+  int MinorKey() { return 0; }
+
+  void Generate(MacroAssembler* masm);
+};
+
+
+// This stub can convert a signed int32 to a heap number (double).  It does
+// not work for int32s that are in Smi range!  No GC occurs during this stub
+// so you don't have to set up the frame.
+class WriteInt32ToHeapNumberStub : public CodeStub {
+ public:
+  WriteInt32ToHeapNumberStub(Register the_int,
+                             Register the_heap_number,
+                             Register scratch,
+                             Register scratch2)
+      : the_int_(the_int),
+        the_heap_number_(the_heap_number),
+        scratch_(scratch),
+        sign_(scratch2) { }
+
+ private:
+  Register the_int_;
+  Register the_heap_number_;
+  Register scratch_;
+  Register sign_;
+
+  // Minor key encoding in 16 bits.
+  class IntRegisterBits: public BitField<int, 0, 4> {};
+  class HeapNumberRegisterBits: public BitField<int, 4, 4> {};
+  class ScratchRegisterBits: public BitField<int, 8, 4> {};
+
+  Major MajorKey() { return WriteInt32ToHeapNumber; }
+  int MinorKey() {
+    // Encode the parameters in a unique 16 bit value.
+    return IntRegisterBits::encode(the_int_.code())
+           | HeapNumberRegisterBits::encode(the_heap_number_.code())
+           | ScratchRegisterBits::encode(scratch_.code());
+  }
+
+  void Generate(MacroAssembler* masm);
+
+  const char* GetName() { return "WriteInt32ToHeapNumberStub"; }
+
+#ifdef DEBUG
+  void Print() { PrintF("WriteInt32ToHeapNumberStub\n"); }
+#endif
+};
+
+
+class NumberToStringStub: public CodeStub {
+ public:
+  NumberToStringStub() { }
+
+  // Generate code to do a lookup in the number string cache. If the number in
+  // the register object is found in the cache the generated code falls through
+  // with the result in the result register. The object and the result register
+  // can be the same. If the number is not found in the cache the code jumps to
+  // the label not_found with only the content of register object unchanged.
+  static void GenerateLookupNumberStringCache(MacroAssembler* masm,
+                                              Register object,
+                                              Register result,
+                                              Register scratch1,
+                                              Register scratch2,
+                                              Register scratch3,
+                                              bool object_is_smi,
+                                              Label* not_found);
+
+ private:
+  Major MajorKey() { return NumberToString; }
+  int MinorKey() { return 0; }
+
+  void Generate(MacroAssembler* masm);
+
+  const char* GetName() { return "NumberToStringStub"; }
+
+#ifdef DEBUG
+  void Print() {
+    PrintF("NumberToStringStub\n");
+  }
+#endif
+};
+
+
+// Enter C code from generated RegExp code in a way that allows
+// the C code to fix the return address in case of a GC.
+// Currently only needed on ARM and MIPS.
+class RegExpCEntryStub: public CodeStub {
+ public:
+  RegExpCEntryStub() {}
+  virtual ~RegExpCEntryStub() {}
+  void Generate(MacroAssembler* masm);
+
+ private:
+  Major MajorKey() { return RegExpCEntry; }
+  int MinorKey() { return 0; }
+
+  bool NeedsImmovableCode() { return true; }
+
+  const char* GetName() { return "RegExpCEntryStub"; }
+};
+
+
+// Generate code the to load an element from a pixel array. The receiver is
+// assumed to not be a smi and to have elements, the caller must guarantee this
+// precondition. If the receiver does not have elements that are pixel arrays,
+// the generated code jumps to not_pixel_array. If key is not a smi, then the
+// generated code branches to key_not_smi. Callers can specify NULL for
+// key_not_smi to signal that a smi check has already been performed on key so
+// that the smi check is not generated . If key is not a valid index within the
+// bounds of the pixel array, the generated code jumps to out_of_range.
+void GenerateFastPixelArrayLoad(MacroAssembler* masm,
+                                Register receiver,
+                                Register key,
+                                Register elements_map,
+                                Register elements,
+                                Register scratch1,
+                                Register scratch2,
+                                Register result,
+                                Label* not_pixel_array,
+                                Label* key_not_smi,
+                                Label* out_of_range);
+
+
+} }  // namespace v8::internal
+
+#endif  // V8_MIPS_CODE_STUBS_ARM_H_
diff --git a/src/mips/codegen-mips-inl.h b/src/mips/codegen-mips-inl.h
index 3a511b8..be9ae9e 100644
--- a/src/mips/codegen-mips-inl.h
+++ b/src/mips/codegen-mips-inl.h
@@ -29,6 +29,8 @@
 #ifndef V8_MIPS_CODEGEN_MIPS_INL_H_
 #define V8_MIPS_CODEGEN_MIPS_INL_H_
 
+#include "virtual-frame-mips.h"
+
 namespace v8 {
 namespace internal {
 
@@ -42,26 +44,18 @@
 }
 
 
+// Note: this has been hacked for submisson. Mips branches require two
+//  additional operands: Register src1, const Operand& src2.
+void DeferredCode::Branch(Condition cond) {
+  __ Branch(&entry_label_, cond, zero_reg, Operand(0));
+}
+
+
 void Reference::GetValueAndSpill() {
   GetValue();
 }
 
 
-void CodeGenerator::VisitAndSpill(Statement* statement) {
-  Visit(statement);
-}
-
-
-void CodeGenerator::VisitStatementsAndSpill(ZoneList<Statement*>* statements) {
-  VisitStatements(statements);
-}
-
-
-void CodeGenerator::LoadAndSpill(Expression* expression) {
-  Load(expression);
-}
-
-
 #undef __
 
 } }  // namespace v8::internal
diff --git a/src/mips/codegen-mips.cc b/src/mips/codegen-mips.cc
index 79801f0..c1149df 100644
--- a/src/mips/codegen-mips.cc
+++ b/src/mips/codegen-mips.cc
@@ -31,36 +31,62 @@
 #if defined(V8_TARGET_ARCH_MIPS)
 
 #include "bootstrapper.h"
+#include "code-stubs.h"
 #include "codegen-inl.h"
 #include "compiler.h"
 #include "debug.h"
 #include "ic-inl.h"
+#include "jsregexp.h"
+#include "jump-target-inl.h"
 #include "parser.h"
+#include "regexp-macro-assembler.h"
+#include "regexp-stack.h"
 #include "register-allocator-inl.h"
 #include "runtime.h"
 #include "scopes.h"
+#include "stub-cache.h"
 #include "virtual-frame-inl.h"
-
-
+#include "virtual-frame-mips-inl.h"
 
 namespace v8 {
 namespace internal {
 
+
 #define __ ACCESS_MASM(masm_)
 
-
-
-// -----------------------------------------------------------------------------
+// -------------------------------------------------------------------------
 // Platform-specific DeferredCode functions.
 
-
 void DeferredCode::SaveRegisters() {
-  UNIMPLEMENTED_MIPS();
+  // On MIPS you either have a completely spilled frame or you
+  // handle it yourself, but at the moment there's no automation
+  // of registers and deferred code.
 }
 
 
 void DeferredCode::RestoreRegisters() {
-  UNIMPLEMENTED_MIPS();
+}
+
+
+// -------------------------------------------------------------------------
+// Platform-specific RuntimeCallHelper functions.
+
+void VirtualFrameRuntimeCallHelper::BeforeCall(MacroAssembler* masm) const {
+  frame_state_->frame()->AssertIsSpilled();
+}
+
+
+void VirtualFrameRuntimeCallHelper::AfterCall(MacroAssembler* masm) const {
+}
+
+
+void StubRuntimeCallHelper::BeforeCall(MacroAssembler* masm) const {
+  masm->EnterInternalFrame();
+}
+
+
+void StubRuntimeCallHelper::AfterCall(MacroAssembler* masm) const {
+  masm->LeaveInternalFrame();
 }
 
 
@@ -69,21 +95,28 @@
 
 CodeGenState::CodeGenState(CodeGenerator* owner)
     : owner_(owner),
-      true_target_(NULL),
-      false_target_(NULL),
-      previous_(NULL) {
-  owner_->set_state(this);
+      previous_(owner->state()) {
+  owner->set_state(this);
 }
 
 
-CodeGenState::CodeGenState(CodeGenerator* owner,
+ConditionCodeGenState::ConditionCodeGenState(CodeGenerator* owner,
                            JumpTarget* true_target,
                            JumpTarget* false_target)
-    : owner_(owner),
+    : CodeGenState(owner),
       true_target_(true_target),
-      false_target_(false_target),
-      previous_(owner->state()) {
-  owner_->set_state(this);
+      false_target_(false_target) {
+  owner->set_state(this);
+}
+
+
+TypeInfoCodeGenState::TypeInfoCodeGenState(CodeGenerator* owner,
+                                           Slot* slot,
+                                           TypeInfo type_info)
+    : CodeGenState(owner),
+      slot_(slot) {
+  owner->set_state(this);
+  old_type_info_ = owner->set_type_info(slot, type_info);
 }
 
 
@@ -93,16 +126,25 @@
 }
 
 
+TypeInfoCodeGenState::~TypeInfoCodeGenState() {
+  owner()->set_type_info(slot_, old_type_info_);
+}
+
+
 // -----------------------------------------------------------------------------
-// CodeGenerator implementation
+// CodeGenerator implementation.
 
 CodeGenerator::CodeGenerator(MacroAssembler* masm)
     : deferred_(8),
       masm_(masm),
+      info_(NULL),
       frame_(NULL),
       allocator_(NULL),
       cc_reg_(cc_always),
       state_(NULL),
+      loop_nesting_(0),
+      type_info_(NULL),
+      function_return_(JumpTarget::BIDIRECTIONAL),
       function_return_is_shadowed_(false) {
 }
 
@@ -114,356 +156,249 @@
 // cp: callee's context
 
 void CodeGenerator::Generate(CompilationInfo* info) {
-  // Record the position for debugging purposes.
-  CodeForFunctionPosition(info->function());
-
-  // Initialize state.
-  info_ = info;
-  ASSERT(allocator_ == NULL);
-  RegisterAllocator register_allocator(this);
-  allocator_ = &register_allocator;
-  ASSERT(frame_ == NULL);
-  frame_ = new VirtualFrame();
-  cc_reg_ = cc_always;
-
-  {
-    CodeGenState state(this);
-
-    // Registers:
-    // a1: called JS function
-    // ra: return address
-    // fp: caller's frame pointer
-    // sp: stack pointer
-    // cp: callee's context
-    //
-    // Stack:
-    // arguments
-    // receiver
-
-    frame_->Enter();
-
-    // Allocate space for locals and initialize them.
-    frame_->AllocateStackSlots();
-
-    // Initialize the function return target.
-    function_return_.set_direction(JumpTarget::BIDIRECTIONAL);
-    function_return_is_shadowed_ = false;
-
-    VirtualFrame::SpilledScope spilled_scope;
-    if (scope()->num_heap_slots() > 0) {
-      UNIMPLEMENTED_MIPS();
-    }
-
-    {
-      Comment cmnt2(masm_, "[ copy context parameters into .context");
-
-      // Note that iteration order is relevant here! If we have the same
-      // parameter twice (e.g., function (x, y, x)), and that parameter
-      // needs to be copied into the context, it must be the last argument
-      // passed to the parameter that needs to be copied. This is a rare
-      // case so we don't check for it, instead we rely on the copying
-      // order: such a parameter is copied repeatedly into the same
-      // context location and thus the last value is what is seen inside
-      // the function.
-      for (int i = 0; i < scope()->num_parameters(); i++) {
-        UNIMPLEMENTED_MIPS();
-      }
-    }
-
-    // Store the arguments object.  This must happen after context
-    // initialization because the arguments object may be stored in the
-    // context.
-    if (scope()->arguments() != NULL) {
-      UNIMPLEMENTED_MIPS();
-    }
-
-    // Generate code to 'execute' declarations and initialize functions
-    // (source elements). In case of an illegal redeclaration we need to
-    // handle that instead of processing the declarations.
-    if (scope()->HasIllegalRedeclaration()) {
-      Comment cmnt(masm_, "[ illegal redeclarations");
-      scope()->VisitIllegalRedeclaration(this);
-    } else {
-      Comment cmnt(masm_, "[ declarations");
-      ProcessDeclarations(scope()->declarations());
-      // Bail out if a stack-overflow exception occurred when processing
-      // declarations.
-      if (HasStackOverflow()) return;
-    }
-
-    if (FLAG_trace) {
-      UNIMPLEMENTED_MIPS();
-    }
-
-    // Compile the body of the function in a vanilla state. Don't
-    // bother compiling all the code if the scope has an illegal
-    // redeclaration.
-    if (!scope()->HasIllegalRedeclaration()) {
-      Comment cmnt(masm_, "[ function body");
-#ifdef DEBUG
-      bool is_builtin = Bootstrapper::IsActive();
-      bool should_trace =
-          is_builtin ? FLAG_trace_builtin_calls : FLAG_trace_calls;
-      if (should_trace) {
-        UNIMPLEMENTED_MIPS();
-      }
-#endif
-      VisitStatementsAndSpill(info->function()->body());
-    }
-  }
-
-  if (has_valid_frame() || function_return_.is_linked()) {
-    if (!function_return_.is_linked()) {
-      CodeForReturnPosition(info->function());
-    }
-    // Registers:
-    // v0: result
-    // sp: stack pointer
-    // fp: frame pointer
-    // cp: callee's context
-
-    __ LoadRoot(v0, Heap::kUndefinedValueRootIndex);
-
-    function_return_.Bind();
-    if (FLAG_trace) {
-      UNIMPLEMENTED_MIPS();
-    }
-
-    // Add a label for checking the size of the code used for returning.
-    Label check_exit_codesize;
-    masm_->bind(&check_exit_codesize);
-
-    masm_->mov(sp, fp);
-    masm_->lw(fp, MemOperand(sp, 0));
-    masm_->lw(ra, MemOperand(sp, 4));
-    masm_->addiu(sp, sp, 8);
-
-    // Here we use masm_-> instead of the __ macro to avoid the code coverage
-    // tool from instrumenting as we rely on the code size here.
-    // TODO(MIPS): Should we be able to use more than 0x1ffe parameters?
-    masm_->addiu(sp, sp, (scope()->num_parameters() + 1) * kPointerSize);
-    masm_->Jump(ra);
-    // The Jump automatically generates a nop in the branch delay slot.
-
-    // Check that the size of the code used for returning matches what is
-    // expected by the debugger.
-    ASSERT_EQ(kJSReturnSequenceLength,
-              masm_->InstructionsGeneratedSince(&check_exit_codesize));
-  }
-
-  // Code generation state must be reset.
-  ASSERT(!has_cc());
-  ASSERT(state_ == NULL);
-  ASSERT(!function_return_is_shadowed_);
-  function_return_.Unuse();
-  DeleteFrame();
-
-  // Process any deferred code using the register allocator.
-  if (!HasStackOverflow()) {
-    ProcessDeferred();
-  }
-
-  allocator_ = NULL;
+  UNIMPLEMENTED_MIPS();
 }
 
 
-void CodeGenerator::LoadReference(Reference* ref) {
-  VirtualFrame::SpilledScope spilled_scope;
-  Comment cmnt(masm_, "[ LoadReference");
-  Expression* e = ref->expression();
-  Property* property = e->AsProperty();
-  Variable* var = e->AsVariableProxy()->AsVariable();
-
-  if (property != NULL) {
-    UNIMPLEMENTED_MIPS();
-  } else if (var != NULL) {
-    // The expression is a variable proxy that does not rewrite to a
-    // property.  Global variables are treated as named property references.
-    if (var->is_global()) {
-      LoadGlobal();
-      ref->set_type(Reference::NAMED);
-    } else {
-      ASSERT(var->slot() != NULL);
-      ref->set_type(Reference::SLOT);
-    }
-  } else {
-    UNIMPLEMENTED_MIPS();
-  }
-}
-
-
-void CodeGenerator::UnloadReference(Reference* ref) {
-  VirtualFrame::SpilledScope spilled_scope;
-  // Pop a reference from the stack while preserving TOS.
-  Comment cmnt(masm_, "[ UnloadReference");
-  int size = ref->size();
-  if (size > 0) {
-    frame_->EmitPop(a0);
-    frame_->Drop(size);
-    frame_->EmitPush(a0);
-  }
-  ref->set_unloaded();
+int CodeGenerator::NumberOfSlot(Slot* slot) {
+  UNIMPLEMENTED_MIPS();
+  return 0;
 }
 
 
 MemOperand CodeGenerator::SlotOperand(Slot* slot, Register tmp) {
-  // Currently, this assertion will fail if we try to assign to
-  // a constant variable that is constant because it is read-only
-  // (such as the variable referring to a named function expression).
-  // We need to implement assignments to read-only variables.
-  // Ideally, we should do this during AST generation (by converting
-  // such assignments into expression statements); however, in general
-  // we may not be able to make the decision until past AST generation,
-  // that is when the entire program is known.
-  ASSERT(slot != NULL);
-  int index = slot->index();
-  switch (slot->type()) {
-    case Slot::PARAMETER:
-      UNIMPLEMENTED_MIPS();
-      return MemOperand(no_reg, 0);
-
-    case Slot::LOCAL:
-      return frame_->LocalAt(index);
-
-    case Slot::CONTEXT: {
-      UNIMPLEMENTED_MIPS();
-      return MemOperand(no_reg, 0);
-    }
-
-    default:
-      UNREACHABLE();
-      return MemOperand(no_reg, 0);
-  }
+  UNIMPLEMENTED_MIPS();
+  return MemOperand(zero_reg, 0);
 }
 
 
-// Loads a value on TOS. If it is a boolean value, the result may have been
-// (partially) translated into branches, or it may have set the condition
-// code register. If force_cc is set, the value is forced to set the
-// condition code register and no value is pushed. If the condition code
-// register was set, has_cc() is true and cc_reg_ contains the condition to
-// test for 'true'.
+MemOperand CodeGenerator::ContextSlotOperandCheckExtensions(
+    Slot* slot,
+    Register tmp,
+    Register tmp2,
+    JumpTarget* slow) {
+  UNIMPLEMENTED_MIPS();
+  return MemOperand(zero_reg, 0);
+}
+
+
 void CodeGenerator::LoadCondition(Expression* x,
                                   JumpTarget* true_target,
                                   JumpTarget* false_target,
                                   bool force_cc) {
-  ASSERT(!has_cc());
-  int original_height = frame_->height();
-
-  { CodeGenState new_state(this, true_target, false_target);
-    Visit(x);
-
-    // If we hit a stack overflow, we may not have actually visited
-    // the expression. In that case, we ensure that we have a
-    // valid-looking frame state because we will continue to generate
-    // code as we unwind the C++ stack.
-    //
-    // It's possible to have both a stack overflow and a valid frame
-    // state (eg, a subexpression overflowed, visiting it returned
-    // with a dummied frame state, and visiting this expression
-    // returned with a normal-looking state).
-    if (HasStackOverflow() &&
-        has_valid_frame() &&
-        !has_cc() &&
-        frame_->height() == original_height) {
-      true_target->Jump();
-    }
-  }
-  if (force_cc && frame_ != NULL && !has_cc()) {
-    // Convert the TOS value to a boolean in the condition code register.
-    UNIMPLEMENTED_MIPS();
-  }
-  ASSERT(!force_cc || !has_valid_frame() || has_cc());
-  ASSERT(!has_valid_frame() ||
-         (has_cc() && frame_->height() == original_height) ||
-         (!has_cc() && frame_->height() == original_height + 1));
+  UNIMPLEMENTED_MIPS();
 }
 
 
 void CodeGenerator::Load(Expression* x) {
-#ifdef DEBUG
-  int original_height = frame_->height();
-#endif
-  JumpTarget true_target;
-  JumpTarget false_target;
-  LoadCondition(x, &true_target, &false_target, false);
-
-  if (has_cc()) {
-    UNIMPLEMENTED_MIPS();
-  }
-
-  if (true_target.is_linked() || false_target.is_linked()) {
-    UNIMPLEMENTED_MIPS();
-  }
-  ASSERT(has_valid_frame());
-  ASSERT(!has_cc());
-  ASSERT(frame_->height() == original_height + 1);
+  UNIMPLEMENTED_MIPS();
 }
 
 
 void CodeGenerator::LoadGlobal() {
-  VirtualFrame::SpilledScope spilled_scope;
-  __ lw(a0, GlobalObject());
-  frame_->EmitPush(a0);
+  UNIMPLEMENTED_MIPS();
 }
 
 
-void CodeGenerator::LoadFromSlot(Slot* slot, TypeofState typeof_state) {
-  VirtualFrame::SpilledScope spilled_scope;
-  if (slot->type() == Slot::LOOKUP) {
-    UNIMPLEMENTED_MIPS();
-  } else {
-    __ lw(a0, SlotOperand(slot, a2));
-    frame_->EmitPush(a0);
-    if (slot->var()->mode() == Variable::CONST) {
-      UNIMPLEMENTED_MIPS();
-    }
-  }
+void CodeGenerator::LoadGlobalReceiver(Register scratch) {
+  UNIMPLEMENTED_MIPS();
 }
 
 
-void CodeGenerator::StoreToSlot(Slot* slot, InitState init_state) {
-  ASSERT(slot != NULL);
-  if (slot->type() == Slot::LOOKUP) {
-      UNIMPLEMENTED_MIPS();
-  } else {
-    ASSERT(!slot->var()->is_dynamic());
+ArgumentsAllocationMode CodeGenerator::ArgumentsMode() {
+  UNIMPLEMENTED_MIPS();
+  return EAGER_ARGUMENTS_ALLOCATION;
+}
 
-    JumpTarget exit;
-    if (init_state == CONST_INIT) {
-      UNIMPLEMENTED_MIPS();
-    }
 
-    // We must execute the store. Storing a variable must keep the
-    // (new) value on the stack. This is necessary for compiling
-    // assignment expressions.
-    //
-    // Note: We will reach here even with slot->var()->mode() ==
-    // Variable::CONST because of const declarations which will
-    // initialize consts to 'the hole' value and by doing so, end up
-    // calling this code. a2 may be loaded with context; used below in
-    // RecordWrite.
-    frame_->EmitPop(a0);
-    __ sw(a0, SlotOperand(slot, a2));
-    frame_->EmitPush(a0);
-    if (slot->type() == Slot::CONTEXT) {
-      UNIMPLEMENTED_MIPS();
-    }
-    // If we definitely did not jump over the assignment, we do not need
-    // to bind the exit label. Doing so can defeat peephole
-    // optimization.
-    if (init_state == CONST_INIT || slot->type() == Slot::CONTEXT) {
-      exit.Bind();
-    }
+void CodeGenerator::StoreArgumentsObject(bool initial) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void CodeGenerator::LoadTypeofExpression(Expression* x) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+Reference::Reference(CodeGenerator* cgen,
+                     Expression* expression,
+                     bool persist_after_get)
+    : cgen_(cgen),
+      expression_(expression),
+      type_(ILLEGAL),
+      persist_after_get_(persist_after_get) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+Reference::~Reference() {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void CodeGenerator::LoadReference(Reference* ref) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void CodeGenerator::UnloadReference(Reference* ref) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+// ECMA-262, section 9.2, page 30: ToBoolean(). Convert the given
+// register to a boolean in the condition code register. The code
+// may jump to 'false_target' in case the register converts to 'false'.
+void CodeGenerator::ToBoolean(JumpTarget* true_target,
+                              JumpTarget* false_target) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void CodeGenerator::GenericBinaryOperation(Token::Value op,
+                                           OverwriteMode overwrite_mode,
+                                           GenerateInlineSmi inline_smi,
+                                           int constant_rhs) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+class DeferredInlineSmiOperation: public DeferredCode {
+ public:
+  DeferredInlineSmiOperation(Token::Value op,
+                             int value,
+                             bool reversed,
+                             OverwriteMode overwrite_mode,
+                             Register tos)
+      : op_(op),
+        value_(value),
+        reversed_(reversed),
+        overwrite_mode_(overwrite_mode),
+        tos_register_(tos) {
+    set_comment("[ DeferredInlinedSmiOperation");
   }
+
+  virtual void Generate();
+  // This stub makes explicit calls to SaveRegisters(), RestoreRegisters() and
+  // Exit(). Currently on MIPS SaveRegisters() and RestoreRegisters() are empty
+  // methods, it is the responsibility of the deferred code to save and restore
+  // registers.
+  virtual bool AutoSaveAndRestore() { return false; }
+
+  void JumpToNonSmiInput(Condition cond, Register cmp1, const Operand& cmp2);
+  void JumpToAnswerOutOfRange(Condition cond,
+                              Register cmp1,
+                              const Operand& cmp2);
+
+ private:
+  void GenerateNonSmiInput();
+  void GenerateAnswerOutOfRange();
+  void WriteNonSmiAnswer(Register answer,
+                         Register heap_number,
+                         Register scratch);
+
+  Token::Value op_;
+  int value_;
+  bool reversed_;
+  OverwriteMode overwrite_mode_;
+  Register tos_register_;
+  Label non_smi_input_;
+  Label answer_out_of_range_;
+};
+
+
+// For bit operations we try harder and handle the case where the input is not
+// a Smi but a 32bits integer without calling the generic stub.
+void DeferredInlineSmiOperation::JumpToNonSmiInput(Condition cond,
+                                                   Register cmp1,
+                                                   const Operand& cmp2) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+// For bit operations the result is always 32bits so we handle the case where
+// the result does not fit in a Smi without calling the generic stub.
+void DeferredInlineSmiOperation::JumpToAnswerOutOfRange(Condition cond,
+                                                        Register cmp1,
+                                                        const Operand& cmp2) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+// On entry the non-constant side of the binary operation is in tos_register_
+// and the constant smi side is nowhere.  The tos_register_ is not used by the
+// virtual frame.  On exit the answer is in the tos_register_ and the virtual
+// frame is unchanged.
+void DeferredInlineSmiOperation::Generate() {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+// Convert and write the integer answer into heap_number.
+void DeferredInlineSmiOperation::WriteNonSmiAnswer(Register answer,
+                                                   Register heap_number,
+                                                   Register scratch) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void DeferredInlineSmiOperation::GenerateNonSmiInput() {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void DeferredInlineSmiOperation::GenerateAnswerOutOfRange() {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void CodeGenerator::SmiOperation(Token::Value op,
+                                 Handle<Object> value,
+                                 bool reversed,
+                                 OverwriteMode mode) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+// On MIPS we load registers condReg1 and condReg2 with the values which should
+// be compared. With the CodeGenerator::cc_reg_ condition, functions will be
+// able to evaluate correctly the condition. (eg CodeGenerator::Branch)
+void CodeGenerator::Comparison(Condition cc,
+                               Expression* left,
+                               Expression* right,
+                               bool strict) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void CodeGenerator::CallWithArguments(ZoneList<Expression*>* args,
+                                      CallFunctionFlags flags,
+                                      int position) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void CodeGenerator::CallApplyLazy(Expression* applicand,
+                                  Expression* receiver,
+                                  VariableProxy* arguments,
+                                  int position) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void CodeGenerator::Branch(bool if_true, JumpTarget* target) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void CodeGenerator::CheckStack() {
+  UNIMPLEMENTED_MIPS();
 }
 
 
 void CodeGenerator::VisitStatements(ZoneList<Statement*>* statements) {
-  VirtualFrame::SpilledScope spilled_scope;
-  for (int i = 0; frame_ != NULL && i < statements->length(); i++) {
-    VisitAndSpill(statements->at(i));
-  }
+  UNIMPLEMENTED_MIPS();
 }
 
 
@@ -473,14 +408,7 @@
 
 
 void CodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
-  VirtualFrame::SpilledScope spilled_scope;
-  frame_->EmitPush(cp);
-  __ li(t0, Operand(pairs));
-  frame_->EmitPush(t0);
-  __ li(t0, Operand(Smi::FromInt(is_eval() ? 1 : 0)));
-  frame_->EmitPush(t0);
-  frame_->CallRuntime(Runtime::kDeclareGlobals, 3);
-  // The result is discarded.
+  UNIMPLEMENTED_MIPS();
 }
 
 
@@ -490,17 +418,7 @@
 
 
 void CodeGenerator::VisitExpressionStatement(ExpressionStatement* node) {
-#ifdef DEBUG
-  int original_height = frame_->height();
-#endif
-  VirtualFrame::SpilledScope spilled_scope;
-  Comment cmnt(masm_, "[ ExpressionStatement");
-  CodeForStatementPosition(node);
-  Expression* expression = node->expression();
-  expression->MarkAsStatement();
-  LoadAndSpill(expression);
-  frame_->Drop();
-  ASSERT(frame_->height() == original_height);
+  UNIMPLEMENTED_MIPS();
 }
 
 
@@ -525,22 +443,12 @@
 
 
 void CodeGenerator::VisitReturnStatement(ReturnStatement* node) {
-  VirtualFrame::SpilledScope spilled_scope;
-  Comment cmnt(masm_, "[ ReturnStatement");
+  UNIMPLEMENTED_MIPS();
+}
 
-  CodeForStatementPosition(node);
-  LoadAndSpill(node->expression());
-  if (function_return_is_shadowed_) {
-    frame_->EmitPop(v0);
-    function_return_.Jump();
-  } else {
-    // Pop the result from the frame and prepare the frame for
-    // returning thus making it easier to merge.
-    frame_->EmitPop(v0);
-    frame_->PrepareForReturn();
 
-    function_return_.Jump();
-  }
+void CodeGenerator::GenerateReturnSequence() {
+  UNIMPLEMENTED_MIPS();
 }
 
 
@@ -594,6 +502,13 @@
 }
 
 
+void CodeGenerator::InstantiateFunction(
+    Handle<SharedFunctionInfo> function_info,
+    bool pretenure) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
 void CodeGenerator::VisitFunctionLiteral(FunctionLiteral* node) {
   UNIMPLEMENTED_MIPS();
 }
@@ -610,46 +525,49 @@
 }
 
 
+void CodeGenerator::LoadFromSlot(Slot* slot, TypeofState typeof_state) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void CodeGenerator::LoadFromSlotCheckForArguments(Slot* slot,
+                                                  TypeofState state) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void CodeGenerator::StoreToSlot(Slot* slot, InitState init_state) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void CodeGenerator::LoadFromGlobalSlotCheckExtensions(Slot* slot,
+                                                      TypeofState typeof_state,
+                                                      JumpTarget* slow) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void CodeGenerator::EmitDynamicLoadFromSlotFastCase(Slot* slot,
+                                                    TypeofState typeof_state,
+                                                    JumpTarget* slow,
+                                                    JumpTarget* done) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
 void CodeGenerator::VisitSlot(Slot* node) {
-#ifdef DEBUG
-  int original_height = frame_->height();
-#endif
-  VirtualFrame::SpilledScope spilled_scope;
-  Comment cmnt(masm_, "[ Slot");
-  LoadFromSlot(node, typeof_state());
-  ASSERT(frame_->height() == original_height + 1);
+  UNIMPLEMENTED_MIPS();
 }
 
 
 void CodeGenerator::VisitVariableProxy(VariableProxy* node) {
-#ifdef DEBUG
-  int original_height = frame_->height();
-#endif
-  VirtualFrame::SpilledScope spilled_scope;
-  Comment cmnt(masm_, "[ VariableProxy");
-
-  Variable* var = node->var();
-  Expression* expr = var->rewrite();
-  if (expr != NULL) {
-    Visit(expr);
-  } else {
-    ASSERT(var->is_global());
-    Reference ref(this, node);
-    ref.GetValueAndSpill();
-  }
-  ASSERT(frame_->height() == original_height + 1);
+  UNIMPLEMENTED_MIPS();
 }
 
 
 void CodeGenerator::VisitLiteral(Literal* node) {
-#ifdef DEBUG
-  int original_height = frame_->height();
-#endif
-  VirtualFrame::SpilledScope spilled_scope;
-  Comment cmnt(masm_, "[ Literal");
-  __ li(t0, Operand(node->handle()));
-  frame_->EmitPush(t0);
-  ASSERT(frame_->height() == original_height + 1);
+  UNIMPLEMENTED_MIPS();
 }
 
 
@@ -673,48 +591,23 @@
 }
 
 
+void CodeGenerator::EmitSlotAssignment(Assignment* node) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void CodeGenerator::EmitNamedPropertyAssignment(Assignment* node) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void CodeGenerator::EmitKeyedPropertyAssignment(Assignment* node) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
 void CodeGenerator::VisitAssignment(Assignment* node) {
-#ifdef DEBUG
-  int original_height = frame_->height();
-#endif
-  VirtualFrame::SpilledScope spilled_scope;
-  Comment cmnt(masm_, "[ Assignment");
-
-  { Reference target(this, node->target());
-    if (target.is_illegal()) {
-      // Fool the virtual frame into thinking that we left the assignment's
-      // value on the frame.
-      frame_->EmitPush(zero_reg);
-      ASSERT(frame_->height() == original_height + 1);
-      return;
-    }
-
-    if (node->op() == Token::ASSIGN ||
-        node->op() == Token::INIT_VAR ||
-        node->op() == Token::INIT_CONST) {
-      LoadAndSpill(node->value());
-    } else {
-      UNIMPLEMENTED_MIPS();
-    }
-
-    Variable* var = node->target()->AsVariableProxy()->AsVariable();
-    if (var != NULL &&
-        (var->mode() == Variable::CONST) &&
-        node->op() != Token::INIT_VAR && node->op() != Token::INIT_CONST) {
-      // Assignment ignored - leave the value on the stack.
-    } else {
-      CodeForSourcePosition(node->position());
-      if (node->op() == Token::INIT_CONST) {
-        // Dynamic constant initializations must use the function context
-        // and initialize the actual constant declared. Dynamic variable
-        // initializations are simply assignments and use SetValue.
-        target.SetValue(CONST_INIT);
-      } else {
-        target.SetValue(NOT_CONST_INIT);
-      }
-    }
-  }
-  ASSERT(frame_->height() == original_height + 1);
+  UNIMPLEMENTED_MIPS();
 }
 
 
@@ -729,73 +622,7 @@
 
 
 void CodeGenerator::VisitCall(Call* node) {
-#ifdef DEBUG
-  int original_height = frame_->height();
-#endif
-  VirtualFrame::SpilledScope spilled_scope;
-  Comment cmnt(masm_, "[ Call");
-
-  Expression* function = node->expression();
-  ZoneList<Expression*>* args = node->arguments();
-
-  // Standard function call.
-  // Check if the function is a variable or a property.
-  Variable* var = function->AsVariableProxy()->AsVariable();
-  Property* property = function->AsProperty();
-
-  // ------------------------------------------------------------------------
-  // Fast-case: Use inline caching.
-  // ---
-  // According to ECMA-262, section 11.2.3, page 44, the function to call
-  // must be resolved after the arguments have been evaluated. The IC code
-  // automatically handles this by loading the arguments before the function
-  // is resolved in cache misses (this also holds for megamorphic calls).
-  // ------------------------------------------------------------------------
-
-  if (var != NULL && var->is_possibly_eval()) {
-    UNIMPLEMENTED_MIPS();
-  } else if (var != NULL && !var->is_this() && var->is_global()) {
-    // ----------------------------------
-    // JavaScript example: 'foo(1, 2, 3)'  // foo is global
-    // ----------------------------------
-
-    int arg_count = args->length();
-
-    // We need sp to be 8 bytes aligned when calling the stub.
-    __ SetupAlignedCall(t0, arg_count);
-
-    // Pass the global object as the receiver and let the IC stub
-    // patch the stack to use the global proxy as 'this' in the
-    // invoked function.
-    LoadGlobal();
-
-    // Load the arguments.
-    for (int i = 0; i < arg_count; i++) {
-      LoadAndSpill(args->at(i));
-    }
-
-    // Setup the receiver register and call the IC initialization code.
-    __ li(a2, Operand(var->name()));
-    InLoopFlag in_loop = loop_nesting() > 0 ? IN_LOOP : NOT_IN_LOOP;
-    Handle<Code> stub = ComputeCallInitialize(arg_count, in_loop);
-    CodeForSourcePosition(node->position());
-    frame_->CallCodeObject(stub, RelocInfo::CODE_TARGET_CONTEXT,
-                           arg_count + 1);
-    __ ReturnFromAlignedCall();
-    __ lw(cp, frame_->Context());
-    // Remove the function from the stack.
-    frame_->EmitPush(v0);
-
-  } else if (var != NULL && var->slot() != NULL &&
-             var->slot()->type() == Slot::LOOKUP) {
-    UNIMPLEMENTED_MIPS();
-  } else if (property != NULL) {
-    UNIMPLEMENTED_MIPS();
-  } else {
-    UNIMPLEMENTED_MIPS();
-  }
-
-  ASSERT(frame_->height() == original_height + 1);
+  UNIMPLEMENTED_MIPS();
 }
 
 
@@ -839,30 +666,112 @@
 }
 
 
-void CodeGenerator::GenerateMathCos(ZoneList<Expression*>* args) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::GenerateMathSin(ZoneList<Expression*>* args) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
 void CodeGenerator::GenerateMathSqrt(ZoneList<Expression*>* args) {
   UNIMPLEMENTED_MIPS();
 }
 
 
-// This should generate code that performs a charCodeAt() call or returns
-// undefined in order to trigger the slow case, Runtime_StringCharCodeAt.
-// It is not yet implemented on ARM, so it always goes to the slow case.
-void CodeGenerator::GenerateFastCharCodeAt(ZoneList<Expression*>* args) {
+class DeferredStringCharCodeAt : public DeferredCode {
+ public:
+  DeferredStringCharCodeAt(Register object,
+                           Register index,
+                           Register scratch,
+                           Register result)
+      : result_(result),
+        char_code_at_generator_(object,
+                                index,
+                                scratch,
+                                result,
+                                &need_conversion_,
+                                &need_conversion_,
+                                &index_out_of_range_,
+                                STRING_INDEX_IS_NUMBER) {}
+
+  StringCharCodeAtGenerator* fast_case_generator() {
+    return &char_code_at_generator_;
+  }
+
+  virtual void Generate() {
+    UNIMPLEMENTED_MIPS();
+  }
+
+ private:
+  Register result_;
+
+  Label need_conversion_;
+  Label index_out_of_range_;
+
+  StringCharCodeAtGenerator char_code_at_generator_;
+};
+
+
+void CodeGenerator::GenerateStringCharCodeAt(ZoneList<Expression*>* args) {
   UNIMPLEMENTED_MIPS();
 }
 
 
-void CodeGenerator::GenerateCharFromCode(ZoneList<Expression*>* args) {
+class DeferredStringCharFromCode : public DeferredCode {
+ public:
+  DeferredStringCharFromCode(Register code,
+                             Register result)
+      : char_from_code_generator_(code, result) {}
+
+  StringCharFromCodeGenerator* fast_case_generator() {
+    return &char_from_code_generator_;
+  }
+
+  virtual void Generate() {
+    VirtualFrameRuntimeCallHelper call_helper(frame_state());
+    char_from_code_generator_.GenerateSlow(masm(), call_helper);
+  }
+
+ private:
+  StringCharFromCodeGenerator char_from_code_generator_;
+};
+
+
+void CodeGenerator::GenerateStringCharFromCode(ZoneList<Expression*>* args) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+class DeferredStringCharAt : public DeferredCode {
+ public:
+  DeferredStringCharAt(Register object,
+                       Register index,
+                       Register scratch1,
+                       Register scratch2,
+                       Register result)
+      : result_(result),
+        char_at_generator_(object,
+                           index,
+                           scratch1,
+                           scratch2,
+                           result,
+                           &need_conversion_,
+                           &need_conversion_,
+                           &index_out_of_range_,
+                           STRING_INDEX_IS_NUMBER) {}
+
+  StringCharAtGenerator* fast_case_generator() {
+    return &char_at_generator_;
+  }
+
+  virtual void Generate() {
+  UNIMPLEMENTED_MIPS();
+}
+
+ private:
+  Register result_;
+
+  Label need_conversion_;
+  Label index_out_of_range_;
+
+  StringCharAtGenerator char_at_generator_;
+};
+
+
+void CodeGenerator::GenerateStringCharAt(ZoneList<Expression*>* args) {
   UNIMPLEMENTED_MIPS();
 }
 
@@ -877,6 +786,55 @@
 }
 
 
+void CodeGenerator::GenerateIsObject(ZoneList<Expression*>* args) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void CodeGenerator::GenerateIsSpecObject(ZoneList<Expression*>* args) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+class DeferredIsStringWrapperSafeForDefaultValueOf : public DeferredCode {
+ public:
+  DeferredIsStringWrapperSafeForDefaultValueOf(Register object,
+                                               Register map_result,
+                                               Register scratch1,
+                                               Register scratch2)
+      : object_(object),
+        map_result_(map_result),
+        scratch1_(scratch1),
+        scratch2_(scratch2) { }
+
+  virtual void Generate() {
+    UNIMPLEMENTED_MIPS();
+  }
+
+ private:
+  Register object_;
+  Register map_result_;
+  Register scratch1_;
+  Register scratch2_;
+};
+
+
+void CodeGenerator::GenerateIsStringWrapperSafeForDefaultValueOf(
+    ZoneList<Expression*>* args) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void CodeGenerator::GenerateIsFunction(ZoneList<Expression*>* args) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void CodeGenerator::GenerateIsUndetectableObject(ZoneList<Expression*>* args) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
 void CodeGenerator::GenerateIsConstructCall(ZoneList<Expression*>* args) {
   UNIMPLEMENTED_MIPS();
 }
@@ -892,32 +850,8 @@
 }
 
 
-void CodeGenerator::GenerateRandomHeapNumber(ZoneList<Expression*>* args) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::GenerateObjectEquals(ZoneList<Expression*>* args) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::GenerateIsObject(ZoneList<Expression*>* args) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::GenerateIsSpecObject(ZoneList<Expression*>* args) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::GenerateIsFunction(ZoneList<Expression*>* args) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::GenerateIsUndetectableObject(ZoneList<Expression*>* args) {
+void CodeGenerator::GenerateRandomHeapNumber(
+    ZoneList<Expression*>* args) {
   UNIMPLEMENTED_MIPS();
 }
 
@@ -942,11 +876,109 @@
 }
 
 
+void CodeGenerator::GenerateRegExpConstructResult(ZoneList<Expression*>* args) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+class DeferredSearchCache: public DeferredCode {
+ public:
+  DeferredSearchCache(Register dst, Register cache, Register key)
+      : dst_(dst), cache_(cache), key_(key) {
+    set_comment("[ DeferredSearchCache");
+  }
+
+  virtual void Generate();
+
+ private:
+  Register dst_, cache_, key_;
+};
+
+
+void DeferredSearchCache::Generate() {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void CodeGenerator::GenerateGetFromCache(ZoneList<Expression*>* args) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
 void CodeGenerator::GenerateNumberToString(ZoneList<Expression*>* args) {
   UNIMPLEMENTED_MIPS();
 }
 
 
+class DeferredSwapElements: public DeferredCode {
+ public:
+  DeferredSwapElements(Register object, Register index1, Register index2)
+      : object_(object), index1_(index1), index2_(index2) {
+    set_comment("[ DeferredSwapElements");
+  }
+
+  virtual void Generate();
+
+ private:
+  Register object_, index1_, index2_;
+};
+
+
+void DeferredSwapElements::Generate() {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void CodeGenerator::GenerateSwapElements(ZoneList<Expression*>* args) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void CodeGenerator::GenerateCallFunction(ZoneList<Expression*>* args) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void CodeGenerator::GenerateMathSin(ZoneList<Expression*>* args) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void CodeGenerator::GenerateMathCos(ZoneList<Expression*>* args) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void CodeGenerator::GenerateMathLog(ZoneList<Expression*>* args) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void CodeGenerator::GenerateObjectEquals(ZoneList<Expression*>* args) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void CodeGenerator::GenerateIsRegExpEquivalent(ZoneList<Expression*>* args) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void CodeGenerator::GenerateHasCachedArrayIndex(ZoneList<Expression*>* args) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void CodeGenerator::GenerateGetCachedArrayIndex(ZoneList<Expression*>* args) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void CodeGenerator::GenerateFastAsciiArrayJoin(ZoneList<Expression*>* args) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
 void CodeGenerator::VisitCallRuntime(CallRuntime* node) {
   UNIMPLEMENTED_MIPS();
 }
@@ -957,11 +989,39 @@
 }
 
 
+class DeferredCountOperation: public DeferredCode {
+ public:
+  DeferredCountOperation(Register value,
+                         bool is_increment,
+                         bool is_postfix,
+                         int target_size)
+      : value_(value),
+        is_increment_(is_increment),
+        is_postfix_(is_postfix),
+        target_size_(target_size) {}
+
+  virtual void Generate() {
+    UNIMPLEMENTED_MIPS();
+  }
+
+ private:
+  Register value_;
+  bool is_increment_;
+  bool is_postfix_;
+  int target_size_;
+};
+
+
 void CodeGenerator::VisitCountOperation(CountOperation* node) {
   UNIMPLEMENTED_MIPS();
 }
 
 
+void CodeGenerator::GenerateLogicalBooleanOperation(BinaryOperation* node) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
 void CodeGenerator::VisitBinaryOperation(BinaryOperation* node) {
   UNIMPLEMENTED_MIPS();
 }
@@ -977,8 +1037,138 @@
 }
 
 
+void CodeGenerator::VisitCompareToNull(CompareToNull* node) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+class DeferredReferenceGetNamedValue: public DeferredCode {
+ public:
+  explicit DeferredReferenceGetNamedValue(Register receiver,
+                                          Handle<String> name,
+                                          bool is_contextual)
+      : receiver_(receiver),
+        name_(name),
+        is_contextual_(is_contextual),
+        is_dont_delete_(false) {
+    set_comment(is_contextual
+                ? "[ DeferredReferenceGetNamedValue (contextual)"
+                : "[ DeferredReferenceGetNamedValue");
+  }
+
+  virtual void Generate();
+
+  void set_is_dont_delete(bool value) {
+    ASSERT(is_contextual_);
+    is_dont_delete_ = value;
+  }
+
+ private:
+  Register receiver_;
+  Handle<String> name_;
+  bool is_contextual_;
+  bool is_dont_delete_;
+};
+
+
+
+void DeferredReferenceGetNamedValue::Generate() {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+class DeferredReferenceGetKeyedValue: public DeferredCode {
+ public:
+  DeferredReferenceGetKeyedValue(Register key, Register receiver)
+      : key_(key), receiver_(receiver) {
+    set_comment("[ DeferredReferenceGetKeyedValue");
+  }
+
+  virtual void Generate();
+
+ private:
+  Register key_;
+  Register receiver_;
+};
+
+
+void DeferredReferenceGetKeyedValue::Generate() {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+class DeferredReferenceSetKeyedValue: public DeferredCode {
+ public:
+  DeferredReferenceSetKeyedValue(Register value,
+                                 Register key,
+                                 Register receiver)
+      : value_(value), key_(key), receiver_(receiver) {
+    set_comment("[ DeferredReferenceSetKeyedValue");
+  }
+
+  virtual void Generate();
+
+ private:
+  Register value_;
+  Register key_;
+  Register receiver_;
+};
+
+
+void DeferredReferenceSetKeyedValue::Generate() {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+class DeferredReferenceSetNamedValue: public DeferredCode {
+ public:
+  DeferredReferenceSetNamedValue(Register value,
+                                 Register receiver,
+                                 Handle<String> name)
+      : value_(value), receiver_(receiver), name_(name) {
+    set_comment("[ DeferredReferenceSetNamedValue");
+  }
+
+  virtual void Generate();
+
+ private:
+  Register value_;
+  Register receiver_;
+  Handle<String> name_;
+};
+
+
+void DeferredReferenceSetNamedValue::Generate() {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void CodeGenerator::EmitNamedLoad(Handle<String> name, bool is_contextual) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void CodeGenerator::EmitNamedStore(Handle<String> name, bool is_contextual) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void CodeGenerator::EmitKeyedLoad() {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void CodeGenerator::EmitKeyedStore(StaticType* key_type,
+                                   WriteBarrierCharacter wb_info) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
 #ifdef DEBUG
-bool CodeGenerator::HasValidEntryRegisters() { return true; }
+bool CodeGenerator::HasValidEntryRegisters() {
+  UNIMPLEMENTED_MIPS();
+  return false;
+}
 #endif
 
 
@@ -986,447 +1176,33 @@
 #define __ ACCESS_MASM(masm)
 
 // -----------------------------------------------------------------------------
-// Reference support
-
-Reference::Reference(CodeGenerator* cgen,
-                     Expression* expression,
-                     bool persist_after_get)
-    : cgen_(cgen),
-      expression_(expression),
-      type_(ILLEGAL),
-      persist_after_get_(persist_after_get) {
-  cgen->LoadReference(this);
-}
-
-
-Reference::~Reference() {
-  ASSERT(is_unloaded() || is_illegal());
-}
+// Reference support.
 
 
 Handle<String> Reference::GetName() {
-  ASSERT(type_ == NAMED);
-  Property* property = expression_->AsProperty();
-  if (property == NULL) {
-    // Global variable reference treated as a named property reference.
-    VariableProxy* proxy = expression_->AsVariableProxy();
-    ASSERT(proxy->AsVariable() != NULL);
-    ASSERT(proxy->AsVariable()->is_global());
-    return proxy->name();
-  } else {
-    Literal* raw_name = property->key()->AsLiteral();
-    ASSERT(raw_name != NULL);
-    return Handle<String>(String::cast(*raw_name->handle()));
-  }
+  UNIMPLEMENTED_MIPS();
+  return Handle<String>();
+}
+
+
+void Reference::DupIfPersist() {
+  UNIMPLEMENTED_MIPS();
 }
 
 
 void Reference::GetValue() {
-  ASSERT(cgen_->HasValidEntryRegisters());
-  ASSERT(!is_illegal());
-  ASSERT(!cgen_->has_cc());
-  Property* property = expression_->AsProperty();
-  if (property != NULL) {
-    cgen_->CodeForSourcePosition(property->position());
-  }
-
-  switch (type_) {
-    case SLOT: {
-      UNIMPLEMENTED_MIPS();
-      break;
-    }
-
-    case NAMED: {
-      UNIMPLEMENTED_MIPS();
-      break;
-    }
-
-    case KEYED: {
-      UNIMPLEMENTED_MIPS();
-      break;
-    }
-
-    default:
-      UNREACHABLE();
-  }
-}
-
-
-void Reference::SetValue(InitState init_state) {
-  ASSERT(!is_illegal());
-  ASSERT(!cgen_->has_cc());
-  MacroAssembler* masm = cgen_->masm();
-  Property* property = expression_->AsProperty();
-  if (property != NULL) {
-    cgen_->CodeForSourcePosition(property->position());
-  }
-
-  switch (type_) {
-    case SLOT: {
-      Comment cmnt(masm, "[ Store to Slot");
-      Slot* slot = expression_->AsVariableProxy()->AsVariable()->slot();
-      cgen_->StoreToSlot(slot, init_state);
-      cgen_->UnloadReference(this);
-      break;
-    }
-
-    case NAMED: {
-      UNIMPLEMENTED_MIPS();
-      break;
-    }
-
-    case KEYED: {
-      UNIMPLEMENTED_MIPS();
-      break;
-    }
-
-    default:
-      UNREACHABLE();
-  }
-}
-
-
-// On entry a0 and a1 are the things to be compared. On exit v0 is 0,
-// positive or negative to indicate the result of the comparison.
-void CompareStub::Generate(MacroAssembler* masm) {
   UNIMPLEMENTED_MIPS();
-  __ break_(0x765);
 }
 
 
-Handle<Code> GetBinaryOpStub(int key, BinaryOpIC::TypeInfo type_info) {
+void Reference::SetValue(InitState init_state, WriteBarrierCharacter wb_info) {
   UNIMPLEMENTED_MIPS();
-  return Handle<Code>::null();
 }
 
 
-void StackCheckStub::Generate(MacroAssembler* masm) {
+const char* GenericBinaryOpStub::GetName() {
   UNIMPLEMENTED_MIPS();
-  __ break_(0x790);
-}
-
-
-void CEntryStub::GenerateThrowTOS(MacroAssembler* masm) {
-  UNIMPLEMENTED_MIPS();
-  __ break_(0x808);
-}
-
-
-void CEntryStub::GenerateThrowUncatchable(MacroAssembler* masm,
-                                          UncatchableExceptionType type) {
-  UNIMPLEMENTED_MIPS();
-  __ break_(0x815);
-}
-
-void CEntryStub::GenerateCore(MacroAssembler* masm,
-                              Label* throw_normal_exception,
-                              Label* throw_termination_exception,
-                              Label* throw_out_of_memory_exception,
-                              bool do_gc,
-                              bool always_allocate) {
-  // s0: number of arguments including receiver (C callee-saved)
-  // s1: pointer to the first argument          (C callee-saved)
-  // s2: pointer to builtin function            (C callee-saved)
-
-  if (do_gc) {
-    UNIMPLEMENTED_MIPS();
-  }
-
-  ExternalReference scope_depth =
-      ExternalReference::heap_always_allocate_scope_depth();
-  if (always_allocate) {
-    UNIMPLEMENTED_MIPS();
-  }
-
-  // Call C built-in.
-  // a0 = argc, a1 = argv
-  __ mov(a0, s0);
-  __ mov(a1, s1);
-
-  __ CallBuiltin(s2);
-
-  if (always_allocate) {
-    UNIMPLEMENTED_MIPS();
-  }
-
-  // Check for failure result.
-  Label failure_returned;
-  ASSERT(((kFailureTag + 1) & kFailureTagMask) == 0);
-  __ addiu(a2, v0, 1);
-  __ andi(t0, a2, kFailureTagMask);
-  __ Branch(eq, &failure_returned, t0, Operand(zero_reg));
-
-  // Exit C frame and return.
-  // v0:v1: result
-  // sp: stack pointer
-  // fp: frame pointer
-  __ LeaveExitFrame(mode_);
-
-  // Check if we should retry or throw exception.
-  Label retry;
-  __ bind(&failure_returned);
-  ASSERT(Failure::RETRY_AFTER_GC == 0);
-  __ andi(t0, v0, ((1 << kFailureTypeTagSize) - 1) << kFailureTagSize);
-  __ Branch(eq, &retry, t0, Operand(zero_reg));
-
-  // Special handling of out of memory exceptions.
-  Failure* out_of_memory = Failure::OutOfMemoryException();
-  __ Branch(eq, throw_out_of_memory_exception,
-            v0, Operand(reinterpret_cast<int32_t>(out_of_memory)));
-
-  // Retrieve the pending exception and clear the variable.
-  __ LoadExternalReference(t0, ExternalReference::the_hole_value_location());
-  __ lw(a3, MemOperand(t0));
-  __ LoadExternalReference(t0,
-      ExternalReference(Top::k_pending_exception_address));
-  __ lw(v0, MemOperand(t0));
-  __ sw(a3, MemOperand(t0));
-
-  // Special handling of termination exceptions which are uncatchable
-  // by javascript code.
-  __ Branch(eq, throw_termination_exception,
-            v0, Operand(Factory::termination_exception()));
-
-  // Handle normal exception.
-  __ b(throw_normal_exception);
-  __ nop();   // Branch delay slot nop.
-
-  __ bind(&retry);  // pass last failure (r0) as parameter (r0) when retrying
-}
-
-void CEntryStub::Generate(MacroAssembler* masm) {
-  // Called from JavaScript; parameters are on stack as if calling JS function
-  // a0: number of arguments including receiver
-  // a1: pointer to builtin function
-  // fp: frame pointer    (restored after C call)
-  // sp: stack pointer    (restored as callee's sp after C call)
-  // cp: current context  (C callee-saved)
-
-  // NOTE: Invocations of builtins may return failure objects
-  // instead of a proper result. The builtin entry handles
-  // this by performing a garbage collection and retrying the
-  // builtin once.
-
-  // Enter the exit frame that transitions from JavaScript to C++.
-  __ EnterExitFrame(mode_, s0, s1, s2);
-
-  // s0: number of arguments (C callee-saved)
-  // s1: pointer to first argument (C callee-saved)
-  // s2: pointer to builtin function (C callee-saved)
-
-  Label throw_normal_exception;
-  Label throw_termination_exception;
-  Label throw_out_of_memory_exception;
-
-  // Call into the runtime system.
-  GenerateCore(masm,
-               &throw_normal_exception,
-               &throw_termination_exception,
-               &throw_out_of_memory_exception,
-               false,
-               false);
-
-  // Do space-specific GC and retry runtime call.
-  GenerateCore(masm,
-               &throw_normal_exception,
-               &throw_termination_exception,
-               &throw_out_of_memory_exception,
-               true,
-               false);
-
-  // Do full GC and retry runtime call one final time.
-  Failure* failure = Failure::InternalError();
-  __ li(v0, Operand(reinterpret_cast<int32_t>(failure)));
-  GenerateCore(masm,
-               &throw_normal_exception,
-               &throw_termination_exception,
-               &throw_out_of_memory_exception,
-               true,
-               true);
-
-  __ bind(&throw_out_of_memory_exception);
-  GenerateThrowUncatchable(masm, OUT_OF_MEMORY);
-
-  __ bind(&throw_termination_exception);
-  GenerateThrowUncatchable(masm, TERMINATION);
-
-  __ bind(&throw_normal_exception);
-  GenerateThrowTOS(masm);
-}
-
-void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
-  Label invoke, exit;
-
-  // Registers:
-  // a0: entry address
-  // a1: function
-  // a2: reveiver
-  // a3: argc
-  //
-  // Stack:
-  // 4 args slots
-  // args
-
-  // Save callee saved registers on the stack.
-  __ MultiPush((kCalleeSaved | ra.bit()) & ~sp.bit());
-
-  // We build an EntryFrame.
-  __ li(t3, Operand(-1));  // Push a bad frame pointer to fail if it is used.
-  int marker = is_construct ? StackFrame::ENTRY_CONSTRUCT : StackFrame::ENTRY;
-  __ li(t2, Operand(Smi::FromInt(marker)));
-  __ li(t1, Operand(Smi::FromInt(marker)));
-  __ LoadExternalReference(t0, ExternalReference(Top::k_c_entry_fp_address));
-  __ lw(t0, MemOperand(t0));
-  __ MultiPush(t0.bit() | t1.bit() | t2.bit() | t3.bit());
-
-  // Setup frame pointer for the frame to be pushed.
-  __ addiu(fp, sp, -EntryFrameConstants::kCallerFPOffset);
-
-  // Load argv in s0 register.
-  __ lw(s0, MemOperand(sp, (kNumCalleeSaved + 1) * kPointerSize +
-                           StandardFrameConstants::kCArgsSlotsSize));
-
-  // Registers:
-  // a0: entry_address
-  // a1: function
-  // a2: reveiver_pointer
-  // a3: argc
-  // s0: argv
-  //
-  // Stack:
-  // caller fp          |
-  // function slot      | entry frame
-  // context slot       |
-  // bad fp (0xff...f)  |
-  // callee saved registers + ra
-  // 4 args slots
-  // args
-
-  // Call a faked try-block that does the invoke.
-  __ bal(&invoke);
-  __ nop();   // Branch delay slot nop.
-
-  // Caught exception: Store result (exception) in the pending
-  // exception field in the JSEnv and return a failure sentinel.
-  // Coming in here the fp will be invalid because the PushTryHandler below
-  // sets it to 0 to signal the existence of the JSEntry frame.
-  __ LoadExternalReference(t0,
-      ExternalReference(Top::k_pending_exception_address));
-  __ sw(v0, MemOperand(t0));  // We come back from 'invoke'. result is in v0.
-  __ li(v0, Operand(reinterpret_cast<int32_t>(Failure::Exception())));
-  __ b(&exit);
-  __ nop();   // Branch delay slot nop.
-
-  // Invoke: Link this frame into the handler chain.
-  __ bind(&invoke);
-  __ PushTryHandler(IN_JS_ENTRY, JS_ENTRY_HANDLER);
-  // If an exception not caught by another handler occurs, this handler
-  // returns control to the code after the bal(&invoke) above, which
-  // restores all kCalleeSaved registers (including cp and fp) to their
-  // saved values before returning a failure to C.
-
-  // Clear any pending exceptions.
-  __ LoadExternalReference(t0, ExternalReference::the_hole_value_location());
-  __ lw(t1, MemOperand(t0));
-  __ LoadExternalReference(t0,
-      ExternalReference(Top::k_pending_exception_address));
-  __ sw(t1, MemOperand(t0));
-
-  // Invoke the function by calling through JS entry trampoline builtin.
-  // Notice that we cannot store a reference to the trampoline code directly in
-  // this stub, because runtime stubs are not traversed when doing GC.
-
-  // Registers:
-  // a0: entry_address
-  // a1: function
-  // a2: reveiver_pointer
-  // a3: argc
-  // s0: argv
-  //
-  // Stack:
-  // handler frame
-  // entry frame
-  // callee saved registers + ra
-  // 4 args slots
-  // args
-
-  if (is_construct) {
-    ExternalReference construct_entry(Builtins::JSConstructEntryTrampoline);
-    __ LoadExternalReference(t0, construct_entry);
-  } else {
-    ExternalReference entry(Builtins::JSEntryTrampoline);
-    __ LoadExternalReference(t0, entry);
-  }
-  __ lw(t9, MemOperand(t0));  // deref address
-
-  // Call JSEntryTrampoline.
-  __ addiu(t9, t9, Code::kHeaderSize - kHeapObjectTag);
-  __ CallBuiltin(t9);
-
-  // Unlink this frame from the handler chain. When reading the
-  // address of the next handler, there is no need to use the address
-  // displacement since the current stack pointer (sp) points directly
-  // to the stack handler.
-  __ lw(t1, MemOperand(sp, StackHandlerConstants::kNextOffset));
-  __ LoadExternalReference(t0, ExternalReference(Top::k_handler_address));
-  __ sw(t1, MemOperand(t0));
-
-  // This restores sp to its position before PushTryHandler.
-  __ addiu(sp, sp, StackHandlerConstants::kSize);
-
-  __ bind(&exit);  // v0 holds result
-  // Restore the top frame descriptors from the stack.
-  __ Pop(t1);
-  __ LoadExternalReference(t0, ExternalReference(Top::k_c_entry_fp_address));
-  __ sw(t1, MemOperand(t0));
-
-  // Reset the stack to the callee saved registers.
-  __ addiu(sp, sp, -EntryFrameConstants::kCallerFPOffset);
-
-  // Restore callee saved registers from the stack.
-  __ MultiPop((kCalleeSaved | ra.bit()) & ~sp.bit());
-  // Return.
-  __ Jump(ra);
-}
-
-
-// This stub performs an instanceof, calling the builtin function if
-// necessary. Uses a1 for the object, a0 for the function that it may
-// be an instance of (these are fetched from the stack).
-void InstanceofStub::Generate(MacroAssembler* masm) {
-  UNIMPLEMENTED_MIPS();
-  __ break_(0x845);
-}
-
-
-void ArgumentsAccessStub::GenerateReadLength(MacroAssembler* masm) {
-  UNIMPLEMENTED_MIPS();
-  __ break_(0x851);
-}
-
-
-void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
-  UNIMPLEMENTED_MIPS();
-  __ break_(0x857);
-}
-
-
-void ArgumentsAccessStub::GenerateNewObject(MacroAssembler* masm) {
-  UNIMPLEMENTED_MIPS();
-  __ break_(0x863);
-}
-
-
-const char* CompareStub::GetName() {
-  UNIMPLEMENTED_MIPS();
-  return NULL;  // UNIMPLEMENTED RETURN
-}
-
-
-int CompareStub::MinorKey() {
-  // Encode the two parameters in a unique 16 bit value.
-  ASSERT(static_cast<unsigned>(cc_) >> 28 < (1 << 15));
-  return (static_cast<unsigned>(cc_) >> 27) | (strict_ ? 1 : 0);
+  return name_;
 }
 
 
diff --git a/src/mips/codegen-mips.h b/src/mips/codegen-mips.h
index 66f891b..0a2cd45 100644
--- a/src/mips/codegen-mips.h
+++ b/src/mips/codegen-mips.h
@@ -29,17 +29,37 @@
 #ifndef V8_MIPS_CODEGEN_MIPS_H_
 #define V8_MIPS_CODEGEN_MIPS_H_
 
+
+#include "ast.h"
+#include "code-stubs-mips.h"
+#include "ic-inl.h"
+
 namespace v8 {
 namespace internal {
 
+#if(defined(__mips_hard_float) && __mips_hard_float != 0)
+// Use floating-point coprocessor instructions. This flag is raised when
+// -mhard-float is passed to the compiler.
+static const bool IsMipsSoftFloatABI = false;
+#elif(defined(__mips_soft_float) && __mips_soft_float != 0)
+// Not using floating-point coprocessor instructions. This flag is raised when
+// -msoft-float is passed to the compiler.
+static const bool IsMipsSoftFloatABI = true;
+#else
+static const bool IsMipsSoftFloatABI = true;
+#endif
+
 // Forward declarations
 class CompilationInfo;
 class DeferredCode;
+class JumpTarget;
 class RegisterAllocator;
 class RegisterFile;
 
 enum InitState { CONST_INIT, NOT_CONST_INIT };
 enum TypeofState { INSIDE_TYPEOF, NOT_INSIDE_TYPEOF };
+enum GenerateInlineSmi { DONT_GENERATE_INLINE_SMI, GENERATE_INLINE_SMI };
+enum WriteBarrierCharacter { UNLIKELY_SMI, LIKELY_SMI, NEVER_NEWSPACE };
 
 
 // -----------------------------------------------------------------------------
@@ -101,7 +121,12 @@
   // on the expression stack.  The  value is stored in the location specified
   // by the reference, and is left on top of the stack, after the reference
   // is popped from beneath it (unloaded).
-  void SetValue(InitState init_state);
+  void SetValue(InitState init_state, WriteBarrierCharacter wb);
+
+  // This is in preparation for something that uses the reference on the stack.
+  // If we need this reference afterwards get then dup it now.  Otherwise mark
+  // it as used.
+  inline void DupIfPersist();
 
  private:
   CodeGenerator* cgen_;
@@ -126,31 +151,24 @@
   // leaves the code generator with a NULL state.
   explicit CodeGenState(CodeGenerator* owner);
 
-  // Create a code generator state based on a code generator's current
-  // state.  The new state has its own typeof state and pair of branch
-  // labels.
-  CodeGenState(CodeGenerator* owner,
-               JumpTarget* true_target,
-               JumpTarget* false_target);
+
 
   // Destroy a code generator state and restore the owning code generator's
   // previous state.
-  ~CodeGenState();
+  virtual ~CodeGenState();
 
-  TypeofState typeof_state() const { return typeof_state_; }
-  JumpTarget* true_target() const { return true_target_; }
-  JumpTarget* false_target() const { return false_target_; }
+  virtual JumpTarget* true_target() const { return NULL; }
+  virtual JumpTarget* false_target() const { return NULL; }
+
+ protected:
+  inline CodeGenerator* owner() { return owner_; }
+  inline CodeGenState* previous() const { return previous_; }
 
  private:
   // The owning code generator.
   CodeGenerator* owner_;
 
-  // A flag indicating whether we are compiling the immediate subexpression
-  // of a typeof expression.
-  TypeofState typeof_state_;
 
-  JumpTarget* true_target_;
-  JumpTarget* false_target_;
 
   // The previous state of the owning code generator, restored when
   // this state is destroyed.
@@ -158,6 +176,50 @@
 };
 
 
+class ConditionCodeGenState : public CodeGenState {
+ public:
+  // Create a code generator state based on a code generator's current
+  // state.  The new state has its own pair of branch labels.
+  ConditionCodeGenState(CodeGenerator* owner,
+                        JumpTarget* true_target,
+                        JumpTarget* false_target);
+
+  virtual JumpTarget* true_target() const { return true_target_; }
+  virtual JumpTarget* false_target() const { return false_target_; }
+
+ private:
+  JumpTarget* true_target_;
+  JumpTarget* false_target_;
+};
+
+
+class TypeInfoCodeGenState : public CodeGenState {
+ public:
+  TypeInfoCodeGenState(CodeGenerator* owner,
+                       Slot* slot_number,
+                       TypeInfo info);
+  virtual ~TypeInfoCodeGenState();
+
+  virtual JumpTarget* true_target() const { return previous()->true_target(); }
+  virtual JumpTarget* false_target() const {
+    return previous()->false_target();
+  }
+
+ private:
+  Slot* slot_;
+  TypeInfo old_type_info_;
+};
+
+
+// -------------------------------------------------------------------------
+// Arguments allocation mode
+
+enum ArgumentsAllocationMode {
+  NO_ARGUMENTS_ALLOCATION,
+  EAGER_ARGUMENTS_ALLOCATION,
+  LAZY_ARGUMENTS_ALLOCATION
+};
+
 
 // -----------------------------------------------------------------------------
 // CodeGenerator
@@ -173,9 +235,7 @@
     SECONDARY
   };
 
-  // Takes a function literal, generates code for it. This function should only
-  // be called by compiler.cc.
-  static Handle<Code> MakeCode(CompilationInfo* info);
+  static bool MakeCode(CompilationInfo* info);
 
   // Printing of AST, etc. as requested by flags.
   static void MakeCodePrologue(CompilationInfo* info);
@@ -185,6 +245,9 @@
                                        Code::Flags flags,
                                        CompilationInfo* info);
 
+  // Print the code after compiling it.
+  static void PrintCode(Handle<Code> code, CompilationInfo* info);
+
 #ifdef ENABLE_LOGGING_AND_PROFILING
   static bool ShouldGenerateLog(Expression* type);
 #endif
@@ -194,7 +257,9 @@
                               bool is_toplevel,
                               Handle<Script> script);
 
-  static void RecordPositions(MacroAssembler* masm, int pos);
+  static bool RecordPositions(MacroAssembler* masm,
+                              int pos,
+                              bool right_here = false);
 
   // Accessors
   MacroAssembler* masm() { return masm_; }
@@ -216,73 +281,105 @@
   CodeGenState* state() { return state_; }
   void set_state(CodeGenState* state) { state_ = state; }
 
+  TypeInfo type_info(Slot* slot) {
+    int index = NumberOfSlot(slot);
+    if (index == kInvalidSlotNumber) return TypeInfo::Unknown();
+    return (*type_info_)[index];
+  }
+
+  TypeInfo set_type_info(Slot* slot, TypeInfo info) {
+    int index = NumberOfSlot(slot);
+    ASSERT(index >= kInvalidSlotNumber);
+    if (index != kInvalidSlotNumber) {
+      TypeInfo previous_value = (*type_info_)[index];
+      (*type_info_)[index] = info;
+      return previous_value;
+    }
+    return TypeInfo::Unknown();
+  }
   void AddDeferred(DeferredCode* code) { deferred_.Add(code); }
 
-  static const int kUnknownIntValue = -1;
-
-  // Number of instructions used for the JS return sequence. The constant is
-  // used by the debugger to patch the JS return sequence.
-  static const int kJSReturnSequenceLength = 7;
-
-  // If the name is an inline runtime function call return the number of
-  // expected arguments. Otherwise return -1.
-  static int InlineRuntimeCallArgumentsCount(Handle<String> name);
+  // Constants related to patching of inlined load/store.
+  static int GetInlinedKeyedLoadInstructionsAfterPatch() {
+    // This is in correlation with the padding in MacroAssembler::Abort.
+    return FLAG_debug_code ? 45 : 20;
+  }
+  static const int kInlinedKeyedStoreInstructionsAfterPatch = 9;
+  static int GetInlinedNamedStoreInstructionsAfterPatch() {
+    ASSERT(Isolate::Current()->inlined_write_barrier_size() != -1);
+    // Magic number 5: instruction count after patched map load:
+    //  li: 2 (liu & ori), Branch : 2 (bne & nop), sw : 1
+    return Isolate::Current()->inlined_write_barrier_size() + 5;
+  }
 
  private:
+  // Type of a member function that generates inline code for a native function.
+  typedef void (CodeGenerator::*InlineFunctionGenerator)
+      (ZoneList<Expression*>*);
+
+  static const InlineFunctionGenerator kInlineFunctionGenerators[];
+
+
   // Construction/Destruction.
   explicit CodeGenerator(MacroAssembler* masm);
 
   // Accessors.
   inline bool is_eval();
   inline Scope* scope();
+  inline bool is_strict_mode();
+  inline StrictModeFlag strict_mode_flag();
 
   // Generating deferred code.
   void ProcessDeferred();
 
+  static const int kInvalidSlotNumber = -1;
+
+  int NumberOfSlot(Slot* slot);
   // State
   bool has_cc() const { return cc_reg_ != cc_always; }
-  TypeofState typeof_state() const { return state_->typeof_state(); }
+
   JumpTarget* true_target() const { return state_->true_target(); }
   JumpTarget* false_target() const { return state_->false_target(); }
 
-  // We don't track loop nesting level on mips yet.
-  int loop_nesting() const { return 0; }
+  // Track loop nesting level.
+  int loop_nesting() const { return loop_nesting_; }
+  void IncrementLoopNesting() { loop_nesting_++; }
+  void DecrementLoopNesting() { loop_nesting_--; }
 
   // Node visitors.
   void VisitStatements(ZoneList<Statement*>* statements);
 
+  virtual void VisitSlot(Slot* node);
 #define DEF_VISIT(type) \
-  void Visit##type(type* node);
+  virtual void Visit##type(type* node);
   AST_NODE_LIST(DEF_VISIT)
 #undef DEF_VISIT
 
-  // Visit a statement and then spill the virtual frame if control flow can
-  // reach the end of the statement (ie, it does not exit via break,
-  // continue, return, or throw).  This function is used temporarily while
-  // the code generator is being transformed.
-  inline void VisitAndSpill(Statement* statement);
-
-  // Visit a list of statements and then spill the virtual frame if control
-  // flow can reach the end of the list.
-  inline void VisitStatementsAndSpill(ZoneList<Statement*>* statements);
-
   // Main code generation function
   void Generate(CompilationInfo* info);
 
+  // Generate the return sequence code.  Should be called no more than
+  // once per compiled function, immediately after binding the return
+  // target (which can not be done more than once).  The return value should
+  // be in v0.
+  void GenerateReturnSequence();
+
+  // Returns the arguments allocation mode.
+  ArgumentsAllocationMode ArgumentsMode();
+
+  // Store the arguments object and allocate it if necessary.
+  void StoreArgumentsObject(bool initial);
+
   // The following are used by class Reference.
   void LoadReference(Reference* ref);
   void UnloadReference(Reference* ref);
 
-  MemOperand ContextOperand(Register context, int index) const {
-    return MemOperand(context, Context::SlotOffset(index));
-  }
-
   MemOperand SlotOperand(Slot* slot, Register tmp);
 
-  // Expressions
-  MemOperand GlobalObject() const {
-    return ContextOperand(cp, Context::GLOBAL_INDEX);
-  }
+  MemOperand ContextSlotOperandCheckExtensions(Slot* slot,
+                                               Register tmp,
+                                               Register tmp2,
+                                               JumpTarget* slow);
 
   void LoadCondition(Expression* x,
                      JumpTarget* true_target,
@@ -290,35 +387,113 @@
                      bool force_cc);
   void Load(Expression* x);
   void LoadGlobal();
+  void LoadGlobalReceiver(Register scratch);
 
-  // Generate code to push the value of an expression on top of the frame
-  // and then spill the frame fully to memory.  This function is used
-  // temporarily while the code generator is being transformed.
-  inline void LoadAndSpill(Expression* expression);
+
+  // Special code for typeof expressions: Unfortunately, we must
+  // be careful when loading the expression in 'typeof'
+  // expressions. We are not allowed to throw reference errors for
+  // non-existing properties of the global object, so we must make it
+  // look like an explicit property access, instead of an access
+  // through the context chain.
+  void LoadTypeofExpression(Expression* x);
+
+  // Store a keyed property. Key and receiver are on the stack and the value is
+  // in a0. Result is returned in r0.
+  void EmitKeyedStore(StaticType* key_type, WriteBarrierCharacter wb_info);
 
   // Read a value from a slot and leave it on top of the expression stack.
   void LoadFromSlot(Slot* slot, TypeofState typeof_state);
+  void LoadFromGlobalSlotCheckExtensions(Slot* slot,
+                                         TypeofState typeof_state,
+                                         JumpTarget* slow);
+  void LoadFromSlotCheckForArguments(Slot* slot, TypeofState state);
+
+  // Support for loading from local/global variables and arguments
+  // whose location is known unless they are shadowed by
+  // eval-introduced bindings. Generates no code for unsupported slot
+  // types and therefore expects to fall through to the slow jump target.
+  void EmitDynamicLoadFromSlotFastCase(Slot* slot,
+                                       TypeofState typeof_state,
+                                       JumpTarget* slow,
+                                       JumpTarget* done);
+
   // Store the value on top of the stack to a slot.
   void StoreToSlot(Slot* slot, InitState init_state);
 
-  struct InlineRuntimeLUT {
-    void (CodeGenerator::*method)(ZoneList<Expression*>*);
-    const char* name;
-    int nargs;
-  };
+  // Support for compiling assignment expressions.
+  void EmitSlotAssignment(Assignment* node);
+  void EmitNamedPropertyAssignment(Assignment* node);
+  void EmitKeyedPropertyAssignment(Assignment* node);
 
-  static InlineRuntimeLUT* FindInlineRuntimeLUT(Handle<String> name);
+  // Load a named property, returning it in v0. The receiver is passed on the
+  // stack, and remains there.
+  void EmitNamedLoad(Handle<String> name, bool is_contextual);
+
+  // Store to a named property. If the store is contextual, value is passed on
+  // the frame and consumed. Otherwise, receiver and value are passed on the
+  // frame and consumed. The result is returned in v0.
+  void EmitNamedStore(Handle<String> name, bool is_contextual);
+
+  // Load a keyed property, leaving it in v0. The receiver and key are
+  // passed on the stack, and remain there.
+  void EmitKeyedLoad();
+
+  void ToBoolean(JumpTarget* true_target, JumpTarget* false_target);
+
+  // Generate code that computes a shortcutting logical operation.
+  void GenerateLogicalBooleanOperation(BinaryOperation* node);
+
+  void GenericBinaryOperation(Token::Value op,
+                              OverwriteMode overwrite_mode,
+                              GenerateInlineSmi inline_smi,
+                              int known_rhs =
+                                GenericBinaryOpStub::kUnknownIntValue);
+
+  void VirtualFrameBinaryOperation(Token::Value op,
+                                   OverwriteMode overwrite_mode,
+                                   int known_rhs =
+                                      GenericBinaryOpStub::kUnknownIntValue);
+
+  void SmiOperation(Token::Value op,
+                    Handle<Object> value,
+                    bool reversed,
+                    OverwriteMode mode);
+
+  void Comparison(Condition cc,
+                  Expression* left,
+                  Expression* right,
+                  bool strict = false);
+
+  void CallWithArguments(ZoneList<Expression*>* arguments,
+                         CallFunctionFlags flags,
+                         int position);
+
+  // An optimized implementation of expressions of the form
+  // x.apply(y, arguments).  We call x the applicand and y the receiver.
+  // The optimization avoids allocating an arguments object if possible.
+  void CallApplyLazy(Expression* applicand,
+                     Expression* receiver,
+                     VariableProxy* arguments,
+                     int position);
+
+  // Control flow
+  void Branch(bool if_true, JumpTarget* target);
+  void CheckStack();
+
   bool CheckForInlineRuntimeCall(CallRuntime* node);
 
   static Handle<Code> ComputeLazyCompile(int argc);
   void ProcessDeclarations(ZoneList<Declaration*>* declarations);
 
-  Handle<Code> ComputeCallInitialize(int argc, InLoopFlag in_loop);
-
   // Declare global variables and functions in the given array of
   // name/value pairs.
   void DeclareGlobals(Handle<FixedArray> pairs);
 
+  // Instantiate the function based on the shared function info.
+  void InstantiateFunction(Handle<SharedFunctionInfo> function_info,
+                           bool pretenure);
+
   // Support for type checks.
   void GenerateIsSmi(ZoneList<Expression*>* args);
   void GenerateIsNonNegativeSmi(ZoneList<Expression*>* args);
@@ -338,10 +513,13 @@
   void GenerateSetValueOf(ZoneList<Expression*>* args);
 
   // Fast support for charCodeAt(n).
-  void GenerateFastCharCodeAt(ZoneList<Expression*>* args);
+  void GenerateStringCharCodeAt(ZoneList<Expression*>* args);
 
   // Fast support for string.charAt(n) and string[n].
-  void GenerateCharFromCode(ZoneList<Expression*>* args);
+  void GenerateStringCharFromCode(ZoneList<Expression*>* args);
+
+  // Fast support for string.charAt(n) and string[n].
+  void GenerateStringCharAt(ZoneList<Expression*>* args);
 
   // Fast support for object equality testing.
   void GenerateObjectEquals(ZoneList<Expression*>* args);
@@ -358,14 +536,38 @@
   void GenerateStringAdd(ZoneList<Expression*>* args);
   void GenerateSubString(ZoneList<Expression*>* args);
   void GenerateStringCompare(ZoneList<Expression*>* args);
+  void GenerateIsStringWrapperSafeForDefaultValueOf(
+      ZoneList<Expression*>* args);
+
+  // Support for direct calls from JavaScript to native RegExp code.
   void GenerateRegExpExec(ZoneList<Expression*>* args);
+
+  void GenerateRegExpConstructResult(ZoneList<Expression*>* args);
+
+  // Support for fast native caches.
+  void GenerateGetFromCache(ZoneList<Expression*>* args);
+
+  // Fast support for number to string.
   void GenerateNumberToString(ZoneList<Expression*>* args);
 
+  // Fast swapping of elements.
+  void GenerateSwapElements(ZoneList<Expression*>* args);
+
+  // Fast call for custom callbacks.
+  void GenerateCallFunction(ZoneList<Expression*>* args);
+
   // Fast call to math functions.
   void GenerateMathPow(ZoneList<Expression*>* args);
   void GenerateMathSin(ZoneList<Expression*>* args);
   void GenerateMathCos(ZoneList<Expression*>* args);
   void GenerateMathSqrt(ZoneList<Expression*>* args);
+  void GenerateMathLog(ZoneList<Expression*>* args);
+
+  void GenerateIsRegExpEquivalent(ZoneList<Expression*>* args);
+
+  void GenerateHasCachedArrayIndex(ZoneList<Expression*>* args);
+  void GenerateGetCachedArrayIndex(ZoneList<Expression*>* args);
+  void GenerateFastAsciiArrayJoin(ZoneList<Expression*>* args);
 
   // Simple condition analysis.
   enum ConditionAnalysis {
@@ -389,9 +591,6 @@
   bool HasValidEntryRegisters();
 #endif
 
-  bool is_eval_;  // Tells whether code is generated for eval.
-
-  Handle<Script> script_;
   List<DeferredCode*> deferred_;
 
   // Assembler
@@ -404,7 +603,9 @@
   RegisterAllocator* allocator_;
   Condition cc_reg_;
   CodeGenState* state_;
+  int loop_nesting_;
 
+  Vector<TypeInfo>* type_info_;
   // Jump targets
   BreakTarget function_return_;
 
@@ -413,14 +614,15 @@
   // to some unlinking code).
   bool function_return_is_shadowed_;
 
-  static InlineRuntimeLUT kInlineRuntimeLUT[];
-
   friend class VirtualFrame;
+  friend class Isolate;
   friend class JumpTarget;
   friend class Reference;
   friend class FastCodeGenerator;
   friend class FullCodeGenerator;
   friend class FullCodeGenSyntaxChecker;
+  friend class InlineRuntimeFunctionsTable;
+  friend class LCodeGen;
 
   DISALLOW_COPY_AND_ASSIGN(CodeGenerator);
 };
diff --git a/src/mips/constants-mips.cc b/src/mips/constants-mips.cc
index 49502bd..16e49c9 100644
--- a/src/mips/constants-mips.cc
+++ b/src/mips/constants-mips.cc
@@ -31,10 +31,8 @@
 
 #include "constants-mips.h"
 
-namespace assembler {
-namespace mips {
-
-namespace v8i = v8::internal;
+namespace v8 {
+namespace internal {
 
 
 // -----------------------------------------------------------------------------
@@ -102,20 +100,20 @@
 }
 
 
-const char* FPURegister::names_[kNumFPURegister] = {
+const char* FPURegisters::names_[kNumFPURegisters] = {
   "f0", "f1", "f2", "f3", "f4", "f5", "f6", "f7", "f8", "f9", "f10", "f11",
   "f12", "f13", "f14", "f15", "f16", "f17", "f18", "f19", "f20", "f21",
   "f22", "f23", "f24", "f25", "f26", "f27", "f28", "f29", "f30", "f31"
 };
 
 // List of alias names which can be used when referring to MIPS registers.
-const FPURegister::RegisterAlias FPURegister::aliases_[] = {
+const FPURegisters::RegisterAlias FPURegisters::aliases_[] = {
   {kInvalidRegister, NULL}
 };
 
-const char* FPURegister::Name(int creg) {
+const char* FPURegisters::Name(int creg) {
   const char* result;
-  if ((0 <= creg) && (creg < kNumFPURegister)) {
+  if ((0 <= creg) && (creg < kNumFPURegisters)) {
     result = names_[creg];
   } else {
     result = "nocreg";
@@ -124,9 +122,9 @@
 }
 
 
-int FPURegister::Number(const char* name) {
+int FPURegisters::Number(const char* name) {
   // Look through the canonical names.
-  for (int i = 0; i < kNumSimuRegisters; i++) {
+  for (int i = 0; i < kNumFPURegisters; i++) {
     if (strcmp(names_[i], name) == 0) {
       return i;
     }
@@ -149,8 +147,8 @@
 // -----------------------------------------------------------------------------
 // Instruction
 
-bool Instruction::IsForbiddenInBranchDelay() {
-  int op = OpcodeFieldRaw();
+bool Instruction::IsForbiddenInBranchDelay() const {
+  const int op = OpcodeFieldRaw();
   switch (op) {
     case J:
     case JAL:
@@ -189,13 +187,18 @@
 }
 
 
-bool Instruction::IsLinkingInstruction() {
-  int op = OpcodeFieldRaw();
+bool Instruction::IsLinkingInstruction() const {
+  const int op = OpcodeFieldRaw();
   switch (op) {
     case JAL:
-    case BGEZAL:
-    case BLTZAL:
-      return true;
+    case REGIMM:
+      switch (RtFieldRaw()) {
+        case BGEZAL:
+        case BLTZAL:
+          return true;
+      default:
+        return false;
+      };
     case SPECIAL:
       switch (FunctionFieldRaw()) {
         case JALR:
@@ -209,7 +212,7 @@
 }
 
 
-bool Instruction::IsTrap() {
+bool Instruction::IsTrap() const {
   if (OpcodeFieldRaw() != SPECIAL) {
     return false;
   } else {
@@ -264,6 +267,9 @@
         case TLTU:
         case TEQ:
         case TNE:
+        case MOVZ:
+        case MOVN:
+        case MOVCI:
           return kRegisterType;
         default:
           UNREACHABLE();
@@ -272,13 +278,23 @@
     case SPECIAL2:
       switch (FunctionFieldRaw()) {
         case MUL:
+        case CLZ:
+          return kRegisterType;
+        default:
+          UNREACHABLE();
+      };
+      break;
+    case SPECIAL3:
+      switch (FunctionFieldRaw()) {
+        case INS:
+        case EXT:
           return kRegisterType;
         default:
           UNREACHABLE();
       };
       break;
     case COP1:    // Coprocessor instructions
-      switch (FunctionFieldRaw()) {
+      switch (RsFieldRawNoAssert()) {
         case BC1:   // branch on coprocessor condition
           return kImmediateType;
         default:
@@ -304,10 +320,17 @@
     case BLEZL:
     case BGTZL:
     case LB:
+    case LH:
+    case LWL:
     case LW:
     case LBU:
+    case LHU:
+    case LWR:
     case SB:
+    case SH:
+    case SWL:
     case SW:
+    case SWR:
     case LWC1:
     case LDC1:
     case SWC1:
@@ -323,6 +346,7 @@
   return kUnsupported;
 }
 
-} }   // namespace assembler::mips
+
+} }   // namespace v8::internal
 
 #endif  // V8_TARGET_ARCH_MIPS
diff --git a/src/mips/constants-mips.h b/src/mips/constants-mips.h
index d0fdf88..b20e9a2 100644
--- a/src/mips/constants-mips.h
+++ b/src/mips/constants-mips.h
@@ -28,15 +28,25 @@
 #ifndef  V8_MIPS_CONSTANTS_H_
 #define  V8_MIPS_CONSTANTS_H_
 
-#include "checks.h"
-
 // UNIMPLEMENTED_ macro for MIPS.
+#ifdef DEBUG
 #define UNIMPLEMENTED_MIPS()                                                  \
   v8::internal::PrintF("%s, \tline %d: \tfunction %s not implemented. \n",    \
                        __FILE__, __LINE__, __func__)
+#else
+#define UNIMPLEMENTED_MIPS()
+#endif
+
 #define UNSUPPORTED_MIPS() v8::internal::PrintF("Unsupported instruction.\n")
 
 
+#ifdef _MIPS_ARCH_MIPS32R2
+  #define mips32r2 1
+#else
+  #define mips32r2 0
+#endif
+
+
 // Defines constants and accessor classes to assemble, disassemble and
 // simulate MIPS32 instructions.
 //
@@ -44,8 +54,8 @@
 //      Volume II: The MIPS32 Instruction Set
 // Try www.cs.cornell.edu/courses/cs3410/2008fa/MIPS_Vol2.pdf.
 
-namespace assembler {
-namespace mips {
+namespace v8 {
+namespace internal {
 
 // -----------------------------------------------------------------------------
 // Registers and FPURegister.
@@ -61,9 +71,18 @@
 static const int kPCRegister = 34;
 
 // Number coprocessor registers.
-static const int kNumFPURegister = 32;
+static const int kNumFPURegisters = 32;
 static const int kInvalidFPURegister = -1;
 
+// FPU (coprocessor 1) control registers. Currently only FCSR is implemented.
+static const int kFCSRRegister = 31;
+static const int kInvalidFPUControlRegister = -1;
+static const uint32_t kFPUInvalidResult = (uint32_t) (1 << 31) - 1;
+
+// FCSR constants.
+static const uint32_t kFCSRFlagMask = (1 << 6) - 1;
+static const uint32_t kFCSRFlagShift = 2;
+
 // Helper functions for converting between register numbers and names.
 class Registers {
  public:
@@ -88,7 +107,7 @@
 };
 
 // Helper functions for converting between register numbers and names.
-class FPURegister {
+class FPURegisters {
  public:
   // Return the name of the register.
   static const char* Name(int reg);
@@ -103,7 +122,7 @@
 
  private:
 
-  static const char* names_[kNumFPURegister];
+  static const char* names_[kNumFPURegisters];
   static const RegisterAlias aliases_[];
 };
 
@@ -136,6 +155,7 @@
 static const int kSaBits        = 5;
 static const int kFunctionShift = 0;
 static const int kFunctionBits  = 6;
+static const int kLuiShift      = 16;
 
 static const int kImm16Shift = 0;
 static const int kImm16Bits  = 16;
@@ -146,6 +166,14 @@
 static const int kFsBits        = 5;
 static const int kFtShift       = 16;
 static const int kFtBits        = 5;
+static const int kFdShift       = 6;
+static const int kFdBits        = 5;
+static const int kFCccShift     = 8;
+static const int kFCccBits      = 3;
+static const int kFBccShift     = 18;
+static const int kFBccBits      = 3;
+static const int kFBtrueShift   = 16;
+static const int kFBtrueBits    = 1;
 
 // ----- Miscellianous useful masks.
 // Instruction bit masks.
@@ -159,9 +187,9 @@
 static const int  kFunctionFieldMask =
     ((1 << kFunctionBits) - 1) << kFunctionShift;
 // Misc masks.
-static const int  HIMask        =   0xffff << 16;
-static const int  LOMask        =   0xffff;
-static const int  signMask      =   0x80000000;
+static const int  kHiMask       =   0xffff << 16;
+static const int  kLoMask       =   0xffff;
+static const int  kSignMask     =   0x80000000;
 
 
 // ----- MIPS Opcodes and Function Fields.
@@ -194,12 +222,20 @@
   BGTZL     =   ((2 << 3) + 7) << kOpcodeShift,
 
   SPECIAL2  =   ((3 << 3) + 4) << kOpcodeShift,
+  SPECIAL3  =   ((3 << 3) + 7) << kOpcodeShift,
 
   LB        =   ((4 << 3) + 0) << kOpcodeShift,
+  LH        =   ((4 << 3) + 1) << kOpcodeShift,
+  LWL       =   ((4 << 3) + 2) << kOpcodeShift,
   LW        =   ((4 << 3) + 3) << kOpcodeShift,
   LBU       =   ((4 << 3) + 4) << kOpcodeShift,
+  LHU       =   ((4 << 3) + 5) << kOpcodeShift,
+  LWR       =   ((4 << 3) + 6) << kOpcodeShift,
   SB        =   ((5 << 3) + 0) << kOpcodeShift,
+  SH        =   ((5 << 3) + 1) << kOpcodeShift,
+  SWL       =   ((5 << 3) + 2) << kOpcodeShift,
   SW        =   ((5 << 3) + 3) << kOpcodeShift,
+  SWR       =   ((5 << 3) + 6) << kOpcodeShift,
 
   LWC1      =   ((6 << 3) + 1) << kOpcodeShift,
   LDC1      =   ((6 << 3) + 5) << kOpcodeShift,
@@ -216,9 +252,12 @@
   SLLV      =   ((0 << 3) + 4),
   SRLV      =   ((0 << 3) + 6),
   SRAV      =   ((0 << 3) + 7),
+  MOVCI     =   ((0 << 3) + 1),
 
   JR        =   ((1 << 3) + 0),
   JALR      =   ((1 << 3) + 1),
+  MOVZ      =   ((1 << 3) + 2),
+  MOVN      =   ((1 << 3) + 3),
   BREAK     =   ((1 << 3) + 5),
 
   MFHI      =   ((2 << 3) + 0),
@@ -250,6 +289,12 @@
 
   // SPECIAL2 Encoding of Function Field.
   MUL       =   ((0 << 3) + 2),
+  CLZ       =   ((4 << 3) + 0),
+  CLO       =   ((4 << 3) + 1),
+
+  // SPECIAL3 Encoding of Function Field.
+  EXT       =   ((0 << 3) + 0),
+  INS       =   ((0 << 3) + 4),
 
   // REGIMM  encoding of rt Field.
   BLTZ      =   ((0 << 3) + 0) << 16,
@@ -259,8 +304,10 @@
 
   // COP1 Encoding of rs Field.
   MFC1      =   ((0 << 3) + 0) << 21,
+  CFC1      =   ((0 << 3) + 2) << 21,
   MFHC1     =   ((0 << 3) + 3) << 21,
   MTC1      =   ((0 << 3) + 4) << 21,
+  CTC1      =   ((0 << 3) + 6) << 21,
   MTHC1     =   ((0 << 3) + 7) << 21,
   BC1       =   ((1 << 3) + 0) << 21,
   S         =   ((2 << 3) + 0) << 21,
@@ -269,14 +316,46 @@
   L         =   ((2 << 3) + 5) << 21,
   PS        =   ((2 << 3) + 6) << 21,
   // COP1 Encoding of Function Field When rs=S.
+  ROUND_L_S =   ((1 << 3) + 0),
+  TRUNC_L_S =   ((1 << 3) + 1),
+  CEIL_L_S  =   ((1 << 3) + 2),
+  FLOOR_L_S =   ((1 << 3) + 3),
+  ROUND_W_S =   ((1 << 3) + 4),
+  TRUNC_W_S =   ((1 << 3) + 5),
+  CEIL_W_S  =   ((1 << 3) + 6),
+  FLOOR_W_S =   ((1 << 3) + 7),
   CVT_D_S   =   ((4 << 3) + 1),
   CVT_W_S   =   ((4 << 3) + 4),
   CVT_L_S   =   ((4 << 3) + 5),
   CVT_PS_S  =   ((4 << 3) + 6),
   // COP1 Encoding of Function Field When rs=D.
+  ADD_D     =   ((0 << 3) + 0),
+  SUB_D     =   ((0 << 3) + 1),
+  MUL_D     =   ((0 << 3) + 2),
+  DIV_D     =   ((0 << 3) + 3),
+  SQRT_D    =   ((0 << 3) + 4),
+  ABS_D     =   ((0 << 3) + 5),
+  MOV_D     =   ((0 << 3) + 6),
+  NEG_D     =   ((0 << 3) + 7),
+  ROUND_L_D =   ((1 << 3) + 0),
+  TRUNC_L_D =   ((1 << 3) + 1),
+  CEIL_L_D  =   ((1 << 3) + 2),
+  FLOOR_L_D =   ((1 << 3) + 3),
+  ROUND_W_D =   ((1 << 3) + 4),
+  TRUNC_W_D =   ((1 << 3) + 5),
+  CEIL_W_D  =   ((1 << 3) + 6),
+  FLOOR_W_D =   ((1 << 3) + 7),
   CVT_S_D   =   ((4 << 3) + 0),
   CVT_W_D   =   ((4 << 3) + 4),
   CVT_L_D   =   ((4 << 3) + 5),
+  C_F_D     =   ((6 << 3) + 0),
+  C_UN_D    =   ((6 << 3) + 1),
+  C_EQ_D    =   ((6 << 3) + 2),
+  C_UEQ_D   =   ((6 << 3) + 3),
+  C_OLT_D   =   ((6 << 3) + 4),
+  C_ULT_D   =   ((6 << 3) + 5),
+  C_OLE_D   =   ((6 << 3) + 6),
+  C_ULE_D   =   ((6 << 3) + 7),
   // COP1 Encoding of Function Field When rs=W or L.
   CVT_S_W   =   ((4 << 3) + 0),
   CVT_D_W   =   ((4 << 3) + 1),
@@ -293,7 +372,7 @@
 // the 'U' prefix is used to specify unsigned comparisons.
 enum Condition {
   // Any value < 0 is considered no_condition.
-  no_condition  = -1,
+  kNoCondition  = -1,
 
   overflow      =  0,
   no_overflow   =  1,
@@ -321,12 +400,59 @@
   eq            = equal,
   not_zero      = not_equal,
   ne            = not_equal,
+  nz            = not_equal,
   sign          = negative,
   not_sign      = positive,
+  mi            = negative,
+  pl            = positive,
+  hi            = Ugreater,
+  ls            = Uless_equal,
+  ge            = greater_equal,
+  lt            = less,
+  gt            = greater,
+  le            = less_equal,
+  hs            = Ugreater_equal,
+  lo            = Uless,
+  al            = cc_always,
 
-  cc_default    = no_condition
+  cc_default    = kNoCondition
 };
 
+
+// Returns the equivalent of !cc.
+// Negation of the default kNoCondition (-1) results in a non-default
+// no_condition value (-2). As long as tests for no_condition check
+// for condition < 0, this will work as expected.
+inline Condition NegateCondition(Condition cc) {
+  ASSERT(cc != cc_always);
+  return static_cast<Condition>(cc ^ 1);
+}
+
+
+inline Condition ReverseCondition(Condition cc) {
+  switch (cc) {
+    case Uless:
+      return Ugreater;
+    case Ugreater:
+      return Uless;
+    case Ugreater_equal:
+      return Uless_equal;
+    case Uless_equal:
+      return Ugreater_equal;
+    case less:
+      return greater;
+    case greater:
+      return less;
+    case greater_equal:
+      return less_equal;
+    case less_equal:
+      return greater_equal;
+    default:
+      return cc;
+  };
+}
+
+
 // ----- Coprocessor conditions.
 enum FPUCondition {
   F,    // False
@@ -340,6 +466,46 @@
 };
 
 
+// -----------------------------------------------------------------------------
+// Hints.
+
+// Branch hints are not used on the MIPS.  They are defined so that they can
+// appear in shared function signatures, but will be ignored in MIPS
+// implementations.
+enum Hint {
+  no_hint = 0
+};
+
+
+inline Hint NegateHint(Hint hint) {
+  return no_hint;
+}
+
+
+// -----------------------------------------------------------------------------
+// Specific instructions, constants, and masks.
+// These constants are declared in assembler-mips.cc, as they use named
+// registers and other constants.
+
+// addiu(sp, sp, 4) aka Pop() operation or part of Pop(r)
+// operations as post-increment of sp.
+extern const Instr kPopInstruction;
+// addiu(sp, sp, -4) part of Push(r) operation as pre-decrement of sp.
+extern const Instr kPushInstruction;
+// sw(r, MemOperand(sp, 0))
+extern const Instr kPushRegPattern;
+//  lw(r, MemOperand(sp, 0))
+extern const Instr kPopRegPattern;
+extern const Instr kLwRegFpOffsetPattern;
+extern const Instr kSwRegFpOffsetPattern;
+extern const Instr kLwRegFpNegOffsetPattern;
+extern const Instr kSwRegFpNegOffsetPattern;
+// A mask for the Rt register for push, pop, lw, sw instructions.
+extern const Instr kRtMask;
+extern const Instr kLwSwInstrTypeMask;
+extern const Instr kLwSwInstrArgumentMask;
+extern const Instr kLwSwOffsetMask;
+
 // Break 0xfffff, reserved for redirected real time call.
 const Instr rtCallRedirInstr = SPECIAL | BREAK | call_rt_redirected << 6;
 // A nop instruction. (Encoding of sll 0 0 0).
@@ -348,10 +514,10 @@
 class Instruction {
  public:
   enum {
-    kInstructionSize = 4,
-    kInstructionSizeLog2 = 2,
+    kInstrSize = 4,
+    kInstrSizeLog2 = 2,
     // On MIPS PC cannot actually be directly accessed. We behave as if PC was
-    // always the value of the current instruction being exectued.
+    // always the value of the current instruction being executed.
     kPCReadOffset = 0
   };
 
@@ -388,45 +554,64 @@
 
 
   // Accessors for the different named fields used in the MIPS encoding.
-  inline Opcode OpcodeField() const {
+  inline Opcode OpcodeValue() const {
     return static_cast<Opcode>(
         Bits(kOpcodeShift + kOpcodeBits - 1, kOpcodeShift));
   }
 
-  inline int RsField() const {
+  inline int RsValue() const {
     ASSERT(InstructionType() == kRegisterType ||
            InstructionType() == kImmediateType);
     return Bits(kRsShift + kRsBits - 1, kRsShift);
   }
 
-  inline int RtField() const {
+  inline int RtValue() const {
     ASSERT(InstructionType() == kRegisterType ||
            InstructionType() == kImmediateType);
     return Bits(kRtShift + kRtBits - 1, kRtShift);
   }
 
-  inline int RdField() const {
+  inline int RdValue() const {
     ASSERT(InstructionType() == kRegisterType);
     return Bits(kRdShift + kRdBits - 1, kRdShift);
   }
 
-  inline int SaField() const {
+  inline int SaValue() const {
     ASSERT(InstructionType() == kRegisterType);
     return Bits(kSaShift + kSaBits - 1, kSaShift);
   }
 
-  inline int FunctionField() const {
+  inline int FunctionValue() const {
     ASSERT(InstructionType() == kRegisterType ||
            InstructionType() == kImmediateType);
     return Bits(kFunctionShift + kFunctionBits - 1, kFunctionShift);
   }
 
-  inline int FsField() const {
-    return Bits(kFsShift + kRsBits - 1, kFsShift);
+  inline int FdValue() const {
+    return Bits(kFdShift + kFdBits - 1, kFdShift);
   }
 
-  inline int FtField() const {
-    return Bits(kFtShift + kRsBits - 1, kFtShift);
+  inline int FsValue() const {
+    return Bits(kFsShift + kFsBits - 1, kFsShift);
+  }
+
+  inline int FtValue() const {
+    return Bits(kFtShift + kFtBits - 1, kFtShift);
+  }
+
+  // Float Compare condition code instruction bits.
+  inline int FCccValue() const {
+    return Bits(kFCccShift + kFCccBits - 1, kFCccShift);
+  }
+
+  // Float Branch condition code instruction bits.
+  inline int FBccValue() const {
+    return Bits(kFBccShift + kFBccBits - 1, kFBccShift);
+  }
+
+  // Float Branch true/false instruction bit.
+  inline int FBtrueValue() const {
+    return Bits(kFBtrueShift + kFBtrueBits - 1, kFBtrueShift);
   }
 
   // Return the fields at their original place in the instruction encoding.
@@ -440,6 +625,11 @@
     return InstructionBits() & kRsFieldMask;
   }
 
+  // Same as above function, but safe to call within InstructionType().
+  inline int RsFieldRawNoAssert() const {
+    return InstructionBits() & kRsFieldMask;
+  }
+
   inline int RtFieldRaw() const {
     ASSERT(InstructionType() == kRegisterType ||
            InstructionType() == kImmediateType);
@@ -461,37 +651,37 @@
   }
 
   // Get the secondary field according to the opcode.
-  inline int SecondaryField() const {
+  inline int SecondaryValue() const {
     Opcode op = OpcodeFieldRaw();
     switch (op) {
       case SPECIAL:
       case SPECIAL2:
-        return FunctionField();
+        return FunctionValue();
       case COP1:
-        return RsField();
+        return RsValue();
       case REGIMM:
-        return RtField();
+        return RtValue();
       default:
         return NULLSF;
     }
   }
 
-  inline int32_t Imm16Field() const {
+  inline int32_t Imm16Value() const {
     ASSERT(InstructionType() == kImmediateType);
     return Bits(kImm16Shift + kImm16Bits - 1, kImm16Shift);
   }
 
-  inline int32_t Imm26Field() const {
+  inline int32_t Imm26Value() const {
     ASSERT(InstructionType() == kJumpType);
     return Bits(kImm16Shift + kImm26Bits - 1, kImm26Shift);
   }
 
   // Say if the instruction should not be used in a branch delay slot.
-  bool IsForbiddenInBranchDelay();
+  bool IsForbiddenInBranchDelay() const;
   // Say if the instruction 'links'. eg: jal, bal.
-  bool IsLinkingInstruction();
+  bool IsLinkingInstruction() const;
   // Say if the instruction is a break or a trap.
-  bool IsTrap();
+  bool IsTrap() const;
 
   // Instructions are read of out a code stream. The only way to get a
   // reference to an instruction is to convert a pointer. There is no way
@@ -510,16 +700,24 @@
 // -----------------------------------------------------------------------------
 // MIPS assembly various constants.
 
-static const int kArgsSlotsSize  = 4 * Instruction::kInstructionSize;
+
+static const int kArgsSlotsSize  = 4 * Instruction::kInstrSize;
 static const int kArgsSlotsNum   = 4;
+// C/C++ argument slots size.
+static const int kCArgsSlotsSize = 4 * Instruction::kInstrSize;
+// JS argument slots size.
+static const int kJSArgsSlotsSize = 0 * Instruction::kInstrSize;
+// Assembly builtins argument slots size.
+static const int kBArgsSlotsSize = 0 * Instruction::kInstrSize;
 
-static const int kBranchReturnOffset = 2 * Instruction::kInstructionSize;
+static const int kBranchReturnOffset = 2 * Instruction::kInstrSize;
 
-static const int kDoubleAlignment = 2 * 8;
-static const int kDoubleAlignmentMask = kDoubleAlignmentMask - 1;
+static const int kDoubleAlignmentBits = 3;
+static const int kDoubleAlignment = (1 << kDoubleAlignmentBits);
+static const int kDoubleAlignmentMask = kDoubleAlignment - 1;
 
 
-} }   // namespace assembler::mips
+} }   // namespace v8::internal
 
 #endif    // #ifndef V8_MIPS_CONSTANTS_H_
 
diff --git a/src/mips/cpu-mips.cc b/src/mips/cpu-mips.cc
index 659fc01..36f577b 100644
--- a/src/mips/cpu-mips.cc
+++ b/src/mips/cpu-mips.cc
@@ -39,16 +39,25 @@
 #if defined(V8_TARGET_ARCH_MIPS)
 
 #include "cpu.h"
+#include "macro-assembler.h"
+
+#include "simulator.h"  // For cache flushing.
 
 namespace v8 {
 namespace internal {
 
+
 void CPU::Setup() {
-  // Nothing to do.
+  CpuFeatures* cpu_features = Isolate::Current()->cpu_features();
+  cpu_features->Probe(true);
+  if (!cpu_features->IsSupported(FPU) || Serializer::enabled()) {
+    V8::DisableCrankshaft();
+  }
 }
 
+
 void CPU::FlushICache(void* start, size_t size) {
-#ifdef __mips
+#if !defined (USE_SIMULATOR)
   int res;
 
   // See http://www.linux-mips.org/wiki/Cacheflush_Syscall
@@ -58,7 +67,14 @@
     V8_Fatal(__FILE__, __LINE__, "Failed to flush the instruction cache");
   }
 
-#endif    // #ifdef __mips
+#else  // USE_SIMULATOR.
+  // Not generating mips instructions for C-code. This means that we are
+  // building a mips emulator based target.  We should notify the simulator
+  // that the Icache was flushed.
+  // None of this code ends up in the snapshot so there are no issues
+  // around whether or not to generate the code when building snapshots.
+  Simulator::FlushICache(Isolate::Current()->simulator_i_cache(), start, size);
+#endif  // USE_SIMULATOR.
 }
 
 
@@ -68,6 +84,7 @@
 #endif  // #ifdef __mips
 }
 
+
 } }  // namespace v8::internal
 
 #endif  // V8_TARGET_ARCH_MIPS
diff --git a/src/mips/debug-mips.cc b/src/mips/debug-mips.cc
index b8ae68e..35df69b 100644
--- a/src/mips/debug-mips.cc
+++ b/src/mips/debug-mips.cc
@@ -38,8 +38,10 @@
 namespace internal {
 
 #ifdef ENABLE_DEBUGGER_SUPPORT
+
 bool BreakLocationIterator::IsDebugBreakAtReturn() {
-  return Debug::IsDebugBreakAtReturn(rinfo());
+  UNIMPLEMENTED_MIPS();
+  return false;
 }
 
 
@@ -54,18 +56,33 @@
 }
 
 
-// A debug break in the exit code is identified by a call.
+// A debug break in the exit code is identified by the JS frame exit code
+// having been patched with li/call psuedo-instrunction (liu/ori/jalr)
 bool Debug::IsDebugBreakAtReturn(RelocInfo* rinfo) {
-  ASSERT(RelocInfo::IsJSReturn(rinfo->rmode()));
-  return rinfo->IsPatchedReturnSequence();
+  UNIMPLEMENTED_MIPS();
+  return false;
+}
+
+
+bool BreakLocationIterator::IsDebugBreakAtSlot() {
+  UNIMPLEMENTED_MIPS();
+  return false;
+}
+
+
+void BreakLocationIterator::SetDebugBreakAtSlot() {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void BreakLocationIterator::ClearDebugBreakAtSlot() {
+  UNIMPLEMENTED_MIPS();
 }
 
 
 #define __ ACCESS_MASM(masm)
 
 
-
-
 void Debug::GenerateLoadICDebugBreak(MacroAssembler* masm) {
   UNIMPLEMENTED_MIPS();
 }
@@ -106,12 +123,23 @@
 }
 
 
-void Debug::GeneratePlainReturnLiveEdit(MacroAssembler* masm) {
-  masm->Abort("LiveEdit frame dropping is not supported on mips");
+void Debug::GenerateSlot(MacroAssembler* masm) {
+  UNIMPLEMENTED_MIPS();
 }
 
+
+void Debug::GenerateSlotDebugBreak(MacroAssembler* masm) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void Debug::GeneratePlainReturnLiveEdit(MacroAssembler* masm) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
 void Debug::GenerateFrameDropperLiveEdit(MacroAssembler* masm) {
-  masm->Abort("LiveEdit frame dropping is not supported on mips");
+  UNIMPLEMENTED_MIPS();
 }
 
 
diff --git a/src/mips/deoptimizer-mips.cc b/src/mips/deoptimizer-mips.cc
new file mode 100644
index 0000000..4b69859
--- /dev/null
+++ b/src/mips/deoptimizer-mips.cc
@@ -0,0 +1,91 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#include "codegen.h"
+#include "deoptimizer.h"
+#include "full-codegen.h"
+#include "safepoint-table.h"
+
+// Note: this file was taken from the X64 version. ARM has a partially working
+// lithium implementation, but for now it is not ported to mips.
+
+namespace v8 {
+namespace internal {
+
+
+int Deoptimizer::table_entry_size_ = 10;
+
+
+int Deoptimizer::patch_size() {
+  const int kCallInstructionSizeInWords = 3;
+  return kCallInstructionSizeInWords * Assembler::kInstrSize;
+}
+
+
+void Deoptimizer::DeoptimizeFunction(JSFunction* function) {
+  UNIMPLEMENTED();
+}
+
+
+void Deoptimizer::PatchStackCheckCodeAt(Address pc_after,
+                                        Code* check_code,
+                                        Code* replacement_code) {
+  UNIMPLEMENTED();
+}
+
+
+void Deoptimizer::RevertStackCheckCodeAt(Address pc_after,
+                                         Code* check_code,
+                                         Code* replacement_code) {
+  UNIMPLEMENTED();
+}
+
+
+void Deoptimizer::DoComputeOsrOutputFrame() {
+  UNIMPLEMENTED();
+}
+
+
+void Deoptimizer::DoComputeFrame(TranslationIterator* iterator,
+                                 int frame_index) {
+  UNIMPLEMENTED();
+}
+
+
+void Deoptimizer::EntryGenerator::Generate() {
+  UNIMPLEMENTED();
+}
+
+
+void Deoptimizer::TableEntryGenerator::GeneratePrologue() {
+  UNIMPLEMENTED();
+}
+
+
+} }  // namespace v8::internal
diff --git a/src/mips/disasm-mips.cc b/src/mips/disasm-mips.cc
index 959a4a2..b7ceb2b 100644
--- a/src/mips/disasm-mips.cc
+++ b/src/mips/disasm-mips.cc
@@ -34,10 +34,9 @@
 //   NameConverter converter;
 //   Disassembler d(converter);
 //   for (byte_* pc = begin; pc < end;) {
-//     char buffer[128];
-//     buffer[0] = '\0';
-//     byte_* prev_pc = pc;
-//     pc += d.InstructionDecode(buffer, sizeof buffer, pc);
+//     v8::internal::EmbeddedVector<char, 256> buffer;
+//     byte* prev_pc = pc;
+//     pc += d.InstructionDecode(buffer, pc);
 //     printf("%p    %08x      %s\n",
 //            prev_pc, *reinterpret_cast<int32_t*>(prev_pc), buffer);
 //   }
@@ -59,17 +58,13 @@
 
 #if defined(V8_TARGET_ARCH_MIPS)
 
-#include "constants-mips.h"
+#include "mips/constants-mips.h"
 #include "disasm.h"
 #include "macro-assembler.h"
 #include "platform.h"
 
-namespace assembler {
-namespace mips {
-
-
-namespace v8i = v8::internal;
-
+namespace v8 {
+namespace internal {
 
 //------------------------------------------------------------------------------
 
@@ -99,7 +94,7 @@
 
   // Printing of common values.
   void PrintRegister(int reg);
-  void PrintCRegister(int creg);
+  void PrintFPURegister(int freg);
   void PrintRs(Instruction* instr);
   void PrintRt(Instruction* instr);
   void PrintRd(Instruction* instr);
@@ -107,6 +102,9 @@
   void PrintFt(Instruction* instr);
   void PrintFd(Instruction* instr);
   void PrintSa(Instruction* instr);
+  void PrintSd(Instruction* instr);
+  void PrintBc(Instruction* instr);
+  void PrintCc(Instruction* instr);
   void PrintFunction(Instruction* instr);
   void PrintSecondaryField(Instruction* instr);
   void PrintUImm16(Instruction* instr);
@@ -119,7 +117,7 @@
 
   // Handle formatting of instructions and their options.
   int FormatRegister(Instruction* instr, const char* option);
-  int FormatCRegister(Instruction* instr, const char* option);
+  int FormatFPURegister(Instruction* instr, const char* option);
   int FormatOption(Instruction* instr, const char* option);
   void Format(Instruction* instr, const char* format);
   void Unknown(Instruction* instr);
@@ -166,84 +164,100 @@
 
 
 void Decoder::PrintRs(Instruction* instr) {
-  int reg = instr->RsField();
+  int reg = instr->RsValue();
   PrintRegister(reg);
 }
 
 
 void Decoder::PrintRt(Instruction* instr) {
-  int reg = instr->RtField();
+  int reg = instr->RtValue();
   PrintRegister(reg);
 }
 
 
 void Decoder::PrintRd(Instruction* instr) {
-  int reg = instr->RdField();
+  int reg = instr->RdValue();
   PrintRegister(reg);
 }
 
 
-// Print the Cregister name according to the active name converter.
-void Decoder::PrintCRegister(int creg) {
-  Print(converter_.NameOfXMMRegister(creg));
+// Print the FPUregister name according to the active name converter.
+void Decoder::PrintFPURegister(int freg) {
+  Print(converter_.NameOfXMMRegister(freg));
 }
 
 
 void Decoder::PrintFs(Instruction* instr) {
-  int creg = instr->RsField();
-  PrintCRegister(creg);
+  int freg = instr->RsValue();
+  PrintFPURegister(freg);
 }
 
 
 void Decoder::PrintFt(Instruction* instr) {
-  int creg = instr->RtField();
-  PrintCRegister(creg);
+  int freg = instr->RtValue();
+  PrintFPURegister(freg);
 }
 
 
 void Decoder::PrintFd(Instruction* instr) {
-  int creg = instr->RdField();
-  PrintCRegister(creg);
+  int freg = instr->RdValue();
+  PrintFPURegister(freg);
 }
 
 
 // Print the integer value of the sa field.
 void Decoder::PrintSa(Instruction* instr) {
-  int sa = instr->SaField();
-  out_buffer_pos_ += v8i::OS::SNPrintF(out_buffer_ + out_buffer_pos_,
-                                       "%d", sa);
+  int sa = instr->SaValue();
+  out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_, "%d", sa);
+}
+
+
+// Print the integer value of the rd field, (when it is not used as reg).
+void Decoder::PrintSd(Instruction* instr) {
+  int sd = instr->RdValue();
+  out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_, "%d", sd);
+}
+
+
+// Print the integer value of the cc field for the bc1t/f instructions.
+void Decoder::PrintBc(Instruction* instr) {
+  int cc = instr->FBccValue();
+  out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_, "%d", cc);
+}
+
+
+// Print the integer value of the cc field for the FP compare instructions.
+void Decoder::PrintCc(Instruction* instr) {
+  int cc = instr->FCccValue();
+  out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_, "cc(%d)", cc);
 }
 
 
 // Print 16-bit unsigned immediate value.
 void Decoder::PrintUImm16(Instruction* instr) {
-  int32_t imm = instr->Imm16Field();
-  out_buffer_pos_ += v8i::OS::SNPrintF(out_buffer_ + out_buffer_pos_,
-                                       "%u", imm);
+  int32_t imm = instr->Imm16Value();
+  out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_, "%u", imm);
 }
 
 
 // Print 16-bit signed immediate value.
 void Decoder::PrintSImm16(Instruction* instr) {
-  int32_t imm = ((instr->Imm16Field())<<16)>>16;
-  out_buffer_pos_ += v8i::OS::SNPrintF(out_buffer_ + out_buffer_pos_,
-                                       "%d", imm);
+  int32_t imm = ((instr->Imm16Value())<<16)>>16;
+  out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_, "%d", imm);
 }
 
 
 // Print 16-bit hexa immediate value.
 void Decoder::PrintXImm16(Instruction* instr) {
-  int32_t imm = instr->Imm16Field();
-  out_buffer_pos_ += v8i::OS::SNPrintF(out_buffer_ + out_buffer_pos_,
-                                       "0x%x", imm);
+  int32_t imm = instr->Imm16Value();
+  out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_, "0x%x", imm);
 }
 
 
 // Print 26-bit immediate value.
 void Decoder::PrintImm26(Instruction* instr) {
-  int32_t imm = instr->Imm26Field();
-  out_buffer_pos_ += v8i::OS::SNPrintF(out_buffer_ + out_buffer_pos_,
-                                       "%d", imm);
+  int32_t imm = instr->Imm26Value();
+  out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_, "%d", imm);
 }
 
 
@@ -254,8 +268,8 @@
   switch (instr->FunctionFieldRaw()) {
     case BREAK: {
       int32_t code = instr->Bits(25, 6);
-      out_buffer_pos_ +=
-          v8i::OS::SNPrintF(out_buffer_ + out_buffer_pos_, "0x%05x", code);
+      out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_,
+                                      "0x%05x (%d)", code, code);
       break;
                 }
     case TGE:
@@ -266,7 +280,7 @@
     case TNE: {
       int32_t code = instr->Bits(15, 6);
       out_buffer_pos_ +=
-          v8i::OS::SNPrintF(out_buffer_ + out_buffer_pos_, "0x%03x", code);
+          OS::SNPrintF(out_buffer_ + out_buffer_pos_, "0x%03x", code);
       break;
     }
     default:  // Not a break or trap instruction.
@@ -285,15 +299,15 @@
 int Decoder::FormatRegister(Instruction* instr, const char* format) {
   ASSERT(format[0] == 'r');
   if (format[1] == 's') {  // 'rs: Rs register
-    int reg = instr->RsField();
+    int reg = instr->RsValue();
     PrintRegister(reg);
     return 2;
   } else if (format[1] == 't') {  // 'rt: rt register
-    int reg = instr->RtField();
+    int reg = instr->RtValue();
     PrintRegister(reg);
     return 2;
   } else if (format[1] == 'd') {  // 'rd: rd register
-    int reg = instr->RdField();
+    int reg = instr->RdValue();
     PrintRegister(reg);
     return 2;
   }
@@ -302,21 +316,21 @@
 }
 
 
-// Handle all Cregister based formatting in this function to reduce the
+// Handle all FPUregister based formatting in this function to reduce the
 // complexity of FormatOption.
-int Decoder::FormatCRegister(Instruction* instr, const char* format) {
+int Decoder::FormatFPURegister(Instruction* instr, const char* format) {
   ASSERT(format[0] == 'f');
   if (format[1] == 's') {  // 'fs: fs register
-    int reg = instr->RsField();
-    PrintCRegister(reg);
+    int reg = instr->FsValue();
+    PrintFPURegister(reg);
     return 2;
   } else if (format[1] == 't') {  // 'ft: ft register
-    int reg = instr->RtField();
-    PrintCRegister(reg);
+    int reg = instr->FtValue();
+    PrintFPURegister(reg);
     return 2;
   } else if (format[1] == 'd') {  // 'fd: fd register
-    int reg = instr->RdField();
-    PrintCRegister(reg);
+    int reg = instr->FdValue();
+    PrintFPURegister(reg);
     return 2;
   }
   UNREACHABLE();
@@ -359,12 +373,31 @@
     case 'r': {   // 'r: registers
       return FormatRegister(instr, format);
     }
-    case 'f': {   // 'f: Cregisters
-      return FormatCRegister(instr, format);
+    case 'f': {   // 'f: FPUregisters
+      return FormatFPURegister(instr, format);
     }
     case 's': {   // 'sa
-      ASSERT(STRING_STARTS_WITH(format, "sa"));
-      PrintSa(instr);
+      switch (format[1]) {
+        case 'a': {
+          ASSERT(STRING_STARTS_WITH(format, "sa"));
+          PrintSa(instr);
+          return 2;
+        }
+        case 'd': {
+          ASSERT(STRING_STARTS_WITH(format, "sd"));
+          PrintSd(instr);
+          return 2;
+        }
+      }
+    }
+    case 'b': {   // 'bc - Special for bc1 cc field.
+      ASSERT(STRING_STARTS_WITH(format, "bc"));
+      PrintBc(instr);
+      return 2;
+    }
+    case 'C': {   // 'Cc - Special for c.xx.d cc field.
+      ASSERT(STRING_STARTS_WITH(format, "Cc"));
+      PrintCc(instr);
       return 2;
     }
   };
@@ -401,45 +434,160 @@
   switch (instr->OpcodeFieldRaw()) {
     case COP1:    // Coprocessor instructions
       switch (instr->RsFieldRaw()) {
-        case BC1:   // branch on coprocessor condition
+        case BC1:   // bc1 handled in DecodeTypeImmediate.
           UNREACHABLE();
           break;
         case MFC1:
-          Format(instr, "mfc1 'rt, 'fs");
+          Format(instr, "mfc1   'rt, 'fs");
           break;
         case MFHC1:
-          Format(instr, "mfhc1  rt, 'fs");
+          Format(instr, "mfhc1  'rt, 'fs");
           break;
         case MTC1:
-          Format(instr, "mtc1 'rt, 'fs");
+          Format(instr, "mtc1   'rt, 'fs");
+          break;
+        // These are called "fs" too, although they are not FPU registers.
+        case CTC1:
+          Format(instr, "ctc1   'rt, 'fs");
+          break;
+        case CFC1:
+          Format(instr, "cfc1   'rt, 'fs");
           break;
         case MTHC1:
-          Format(instr, "mthc1  rt, 'fs");
+          Format(instr, "mthc1  'rt, 'fs");
+          break;
+        case D:
+          switch (instr->FunctionFieldRaw()) {
+            case ADD_D:
+              Format(instr, "add.d   'fd, 'fs, 'ft");
+              break;
+            case SUB_D:
+              Format(instr, "sub.d   'fd, 'fs, 'ft");
+              break;
+            case MUL_D:
+              Format(instr, "mul.d   'fd, 'fs, 'ft");
+              break;
+            case DIV_D:
+              Format(instr, "div.d   'fd, 'fs, 'ft");
+              break;
+            case ABS_D:
+              Format(instr, "abs.d   'fd, 'fs");
+              break;
+            case MOV_D:
+              Format(instr, "mov.d   'fd, 'fs");
+              break;
+            case NEG_D:
+              Format(instr, "neg.d   'fd, 'fs");
+              break;
+            case SQRT_D:
+              Format(instr, "sqrt.d   'fd, 'fs");
+              break;
+            case CVT_W_D:
+              Format(instr, "cvt.w.d 'fd, 'fs");
+              break;
+            case CVT_L_D: {
+              if (mips32r2) {
+                Format(instr, "cvt.l.d 'fd, 'fs");
+              } else {
+                Unknown(instr);
+              }
+              break;
+            }
+            case TRUNC_W_D:
+              Format(instr, "trunc.w.d 'fd, 'fs");
+              break;
+            case TRUNC_L_D: {
+              if (mips32r2) {
+                Format(instr, "trunc.l.d 'fd, 'fs");
+              } else {
+                Unknown(instr);
+              }
+              break;
+            }
+            case ROUND_W_D:
+              Format(instr, "round.w.d 'fd, 'fs");
+              break;
+            case FLOOR_W_D:
+              Format(instr, "floor.w.d 'fd, 'fs");
+              break;
+            case CEIL_W_D:
+              Format(instr, "ceil.w.d 'fd, 'fs");
+              break;
+            case CVT_S_D:
+              Format(instr, "cvt.s.d 'fd, 'fs");
+              break;
+            case C_F_D:
+              Format(instr, "c.f.d   'fs, 'ft, 'Cc");
+              break;
+            case C_UN_D:
+              Format(instr, "c.un.d  'fs, 'ft, 'Cc");
+              break;
+            case C_EQ_D:
+              Format(instr, "c.eq.d  'fs, 'ft, 'Cc");
+              break;
+            case C_UEQ_D:
+              Format(instr, "c.ueq.d 'fs, 'ft, 'Cc");
+              break;
+            case C_OLT_D:
+              Format(instr, "c.olt.d 'fs, 'ft, 'Cc");
+              break;
+            case C_ULT_D:
+              Format(instr, "c.ult.d 'fs, 'ft, 'Cc");
+              break;
+            case C_OLE_D:
+              Format(instr, "c.ole.d 'fs, 'ft, 'Cc");
+              break;
+            case C_ULE_D:
+              Format(instr, "c.ule.d 'fs, 'ft, 'Cc");
+              break;
+            default:
+              Format(instr, "unknown.cop1.d");
+              break;
+          }
           break;
         case S:
-        case D:
           UNIMPLEMENTED_MIPS();
           break;
         case W:
           switch (instr->FunctionFieldRaw()) {
-            case CVT_S_W:
-              UNIMPLEMENTED_MIPS();
+            case CVT_S_W:   // Convert word to float (single).
+              Format(instr, "cvt.s.w 'fd, 'fs");
               break;
             case CVT_D_W:   // Convert word to double.
-              Format(instr, "cvt.d.w  'fd, 'fs");
+              Format(instr, "cvt.d.w 'fd, 'fs");
               break;
             default:
               UNREACHABLE();
-          };
+          }
           break;
         case L:
+          switch (instr->FunctionFieldRaw()) {
+            case CVT_D_L: {
+              if (mips32r2) {
+                Format(instr, "cvt.d.l 'fd, 'fs");
+              } else {
+                Unknown(instr);
+              }
+              break;
+            }
+            case CVT_S_L: {
+              if (mips32r2) {
+                Format(instr, "cvt.s.l 'fd, 'fs");
+              } else {
+                Unknown(instr);
+              }
+              break;
+            }
+            default:
+              UNREACHABLE();
+          }
+          break;
         case PS:
           UNIMPLEMENTED_MIPS();
           break;
-          break;
         default:
           UNREACHABLE();
-      };
+      }
       break;
     case SPECIAL:
       switch (instr->FunctionFieldRaw()) {
@@ -456,7 +604,15 @@
             Format(instr, "sll  'rd, 'rt, 'sa");
           break;
         case SRL:
-          Format(instr, "srl  'rd, 'rt, 'sa");
+          if (instr->RsValue() == 0) {
+            Format(instr, "srl  'rd, 'rt, 'sa");
+          } else {
+            if (mips32r2) {
+              Format(instr, "rotr  'rd, 'rt, 'sa");
+            } else {
+              Unknown(instr);
+            }
+          }
           break;
         case SRA:
           Format(instr, "sra  'rd, 'rt, 'sa");
@@ -465,7 +621,15 @@
           Format(instr, "sllv 'rd, 'rt, 'rs");
           break;
         case SRLV:
-          Format(instr, "srlv 'rd, 'rt, 'rs");
+          if (instr->SaValue() == 0) {
+            Format(instr, "srlv 'rd, 'rt, 'rs");
+          } else {
+            if (mips32r2) {
+              Format(instr, "rotrv 'rd, 'rt, 'rs");
+            } else {
+              Unknown(instr);
+            }
+          }
           break;
         case SRAV:
           Format(instr, "srav 'rd, 'rt, 'rs");
@@ -504,9 +668,9 @@
           Format(instr, "and  'rd, 'rs, 'rt");
           break;
         case OR:
-          if (0 == instr->RsField()) {
+          if (0 == instr->RsValue()) {
             Format(instr, "mov  'rd, 'rt");
-          } else if (0 == instr->RtField()) {
+          } else if (0 == instr->RtValue()) {
             Format(instr, "mov  'rd, 'rs");
           } else {
             Format(instr, "or   'rd, 'rs, 'rt");
@@ -545,27 +709,79 @@
         case TNE:
           Format(instr, "tne  'rs, 'rt, code: 'code");
           break;
+        case MOVZ:
+          Format(instr, "movz 'rd, 'rs, 'rt");
+          break;
+        case MOVN:
+          Format(instr, "movn 'rd, 'rs, 'rt");
+          break;
+        case MOVCI:
+          if (instr->Bit(16)) {
+            Format(instr, "movt 'rd, 'rs, 'Cc");
+          } else {
+            Format(instr, "movf 'rd, 'rs, 'Cc");
+          }
+          break;
         default:
           UNREACHABLE();
-      };
+      }
       break;
     case SPECIAL2:
       switch (instr->FunctionFieldRaw()) {
         case MUL:
+          Format(instr, "mul  'rd, 'rs, 'rt");
+          break;
+        case CLZ:
+          Format(instr, "clz  'rd, 'rs");
           break;
         default:
           UNREACHABLE();
-      };
+      }
+      break;
+    case SPECIAL3:
+      switch (instr->FunctionFieldRaw()) {
+        case INS: {
+          if (mips32r2) {
+            Format(instr, "ins  'rt, 'rs, 'sd, 'sa");
+          } else {
+            Unknown(instr);
+          }
+          break;
+        }
+        case EXT: {
+          if (mips32r2) {
+            Format(instr, "ext  'rt, 'rs, 'sd, 'sa");
+          } else {
+            Unknown(instr);
+          }
+          break;
+        }
+        default:
+          UNREACHABLE();
+      }
       break;
     default:
       UNREACHABLE();
-  };
+  }
 }
 
 
 void Decoder::DecodeTypeImmediate(Instruction* instr) {
   switch (instr->OpcodeFieldRaw()) {
     // ------------- REGIMM class.
+    case COP1:
+      switch (instr->RsFieldRaw()) {
+        case BC1:
+          if (instr->FBtrueValue()) {
+            Format(instr, "bc1t    'bc, 'imm16u");
+          } else {
+            Format(instr, "bc1f    'bc, 'imm16u");
+          }
+          break;
+        default:
+          UNREACHABLE();
+      };
+      break;  // Case COP1.
     case REGIMM:
       switch (instr->RtFieldRaw()) {
         case BLTZ:
@@ -582,8 +798,8 @@
           break;
         default:
           UNREACHABLE();
-      };
-    break;  // case REGIMM
+      }
+    break;  // Case REGIMM.
     // ------------- Branch instructions.
     case BEQ:
       Format(instr, "beq  'rs, 'rt, 'imm16u");
@@ -626,18 +842,39 @@
     case LB:
       Format(instr, "lb     'rt, 'imm16s('rs)");
       break;
+    case LH:
+      Format(instr, "lh     'rt, 'imm16s('rs)");
+      break;
+    case LWL:
+      Format(instr, "lwl    'rt, 'imm16s('rs)");
+      break;
     case LW:
       Format(instr, "lw     'rt, 'imm16s('rs)");
       break;
     case LBU:
       Format(instr, "lbu    'rt, 'imm16s('rs)");
       break;
+    case LHU:
+      Format(instr, "lhu    'rt, 'imm16s('rs)");
+      break;
+    case LWR:
+      Format(instr, "lwr    'rt, 'imm16s('rs)");
+      break;
     case SB:
       Format(instr, "sb     'rt, 'imm16s('rs)");
       break;
+    case SH:
+      Format(instr, "sh     'rt, 'imm16s('rs)");
+      break;
+    case SWL:
+      Format(instr, "swl    'rt, 'imm16s('rs)");
+      break;
     case SW:
       Format(instr, "sw     'rt, 'imm16s('rs)");
       break;
+    case SWR:
+      Format(instr, "swr    'rt, 'imm16s('rs)");
+      break;
     case LWC1:
       Format(instr, "lwc1   'ft, 'imm16s('rs)");
       break;
@@ -645,10 +882,10 @@
       Format(instr, "ldc1   'ft, 'imm16s('rs)");
       break;
     case SWC1:
-      Format(instr, "swc1   'rt, 'imm16s('fs)");
+      Format(instr, "swc1   'ft, 'imm16s('rs)");
       break;
     case SDC1:
-      Format(instr, "sdc1   'rt, 'imm16s('fs)");
+      Format(instr, "sdc1   'ft, 'imm16s('rs)");
       break;
     default:
       UNREACHABLE();
@@ -675,7 +912,7 @@
 int Decoder::InstructionDecode(byte_* instr_ptr) {
   Instruction* instr = Instruction::At(instr_ptr);
   // Print raw instruction bytes.
-  out_buffer_pos_ += v8i::OS::SNPrintF(out_buffer_ + out_buffer_pos_,
+  out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_,
                                        "%08x       ",
                                        instr->InstructionBits());
   switch (instr->InstructionType()) {
@@ -695,11 +932,11 @@
       UNSUPPORTED_MIPS();
     }
   }
-  return Instruction::kInstructionSize;
+  return Instruction::kInstrSize;
 }
 
 
-} }  // namespace assembler::mips
+} }  // namespace v8::internal
 
 
 
@@ -707,13 +944,11 @@
 
 namespace disasm {
 
-namespace v8i = v8::internal;
-
+using v8::internal::byte_;
 
 const char* NameConverter::NameOfAddress(byte_* addr) const {
-  static v8::internal::EmbeddedVector<char, 32> tmp_buffer;
-  v8::internal::OS::SNPrintF(tmp_buffer, "%p", addr);
-  return tmp_buffer.start();
+  v8::internal::OS::SNPrintF(tmp_buffer_, "%p", addr);
+  return tmp_buffer_.start();
 }
 
 
@@ -723,12 +958,12 @@
 
 
 const char* NameConverter::NameOfCPURegister(int reg) const {
-  return assembler::mips::Registers::Name(reg);
+  return v8::internal::Registers::Name(reg);
 }
 
 
 const char* NameConverter::NameOfXMMRegister(int reg) const {
-  return assembler::mips::FPURegister::Name(reg);
+  return v8::internal::FPURegisters::Name(reg);
 }
 
 
@@ -756,13 +991,13 @@
 
 int Disassembler::InstructionDecode(v8::internal::Vector<char> buffer,
                                     byte_* instruction) {
-  assembler::mips::Decoder d(converter_, buffer);
+  v8::internal::Decoder d(converter_, buffer);
   return d.InstructionDecode(instruction);
 }
 
 
+// The MIPS assembler does not currently use constant pools.
 int Disassembler::ConstantPoolSizeAt(byte_* instruction) {
-  UNIMPLEMENTED_MIPS();
   return -1;
 }
 
@@ -780,6 +1015,7 @@
   }
 }
 
+
 #undef UNSUPPORTED
 
 }  // namespace disasm
diff --git a/src/mips/frames-mips.cc b/src/mips/frames-mips.cc
index d630562..e2e0c91 100644
--- a/src/mips/frames-mips.cc
+++ b/src/mips/frames-mips.cc
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -37,57 +37,9 @@
 namespace internal {
 
 
-StackFrame::Type StackFrame::ComputeType(State* state) {
-  ASSERT(state->fp != NULL);
-  if (StandardFrame::IsArgumentsAdaptorFrame(state->fp)) {
-    return ARGUMENTS_ADAPTOR;
-  }
-  // The marker and function offsets overlap. If the marker isn't a
-  // smi then the frame is a JavaScript frame -- and the marker is
-  // really the function.
-  const int offset = StandardFrameConstants::kMarkerOffset;
-  Object* marker = Memory::Object_at(state->fp + offset);
-  if (!marker->IsSmi()) return JAVA_SCRIPT;
-  return static_cast<StackFrame::Type>(Smi::cast(marker)->value());
-}
-
-
 Address ExitFrame::ComputeStackPointer(Address fp) {
-  Address sp = fp + ExitFrameConstants::kSPDisplacement;
-  const int offset = ExitFrameConstants::kCodeOffset;
-  Object* code = Memory::Object_at(fp + offset);
-  bool is_debug_exit = code->IsSmi();
-  if (is_debug_exit) {
-    sp -= kNumJSCallerSaved * kPointerSize;
-  }
-  return sp;
-}
-
-
-void ExitFrame::Iterate(ObjectVisitor* v) const {
-  // Do nothing
-}
-
-
-int JavaScriptFrame::GetProvidedParametersCount() const {
-  return ComputeParametersCount();
-}
-
-
-Address JavaScriptFrame::GetCallerStackPointer() const {
   UNIMPLEMENTED_MIPS();
-  return static_cast<Address>(NULL);  // UNIMPLEMENTED RETURN
-}
-
-
-Address ArgumentsAdaptorFrame::GetCallerStackPointer() const {
-  UNIMPLEMENTED_MIPS();
-  return static_cast<Address>(NULL);  // UNIMPLEMENTED RETURN
-}
-
-
-Address InternalFrame::GetCallerStackPointer() const {
-  return fp() + StandardFrameConstants::kCallerSPOffset;
+  return fp;
 }
 
 
diff --git a/src/mips/frames-mips.h b/src/mips/frames-mips.h
index 06e9979..6441470 100644
--- a/src/mips/frames-mips.h
+++ b/src/mips/frames-mips.h
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -40,16 +40,17 @@
 static const int kNumRegs = 32;
 
 static const RegList kJSCallerSaved =
+  1 << 2 |  // v0
   1 << 4 |  // a0
   1 << 5 |  // a1
   1 << 6 |  // a2
   1 << 7;   // a3
 
-static const int kNumJSCallerSaved = 4;
+static const int kNumJSCallerSaved = 5;
 
 
 // Return the code of the n-th caller-saved register available to JavaScript
-// e.g. JSCallerSavedReg(0) returns r0.code() == 0.
+// e.g. JSCallerSavedReg(0) returns a0.code() == 4.
 int JSCallerSavedCode(int n);
 
 
@@ -64,6 +65,18 @@
 static const int kNumCalleeSaved = 11;
 
 
+// Number of registers for which space is reserved in safepoints. Must be a
+// multiple of 8.
+// TODO(mips): Only 8 registers may actually be sufficient. Revisit.
+static const int kNumSafepointRegisters = 16;
+
+// Define the list of registers actually saved at safepoints.
+// Note that the number of saved registers may be smaller than the reserved
+// space, i.e. kNumSafepointSavedRegisters <= kNumSafepointRegisters.
+static const RegList kSafepointSavedRegisters = kJSCallerSaved | kCalleeSaved;
+static const int kNumSafepointSavedRegisters =
+    kNumJSCallerSaved + kNumCalleeSaved;
+
 typedef Object* JSCallerSavedBuffer[kNumJSCallerSaved];
 
 
@@ -88,15 +101,14 @@
 
 class ExitFrameConstants : public AllStatic {
  public:
-  // Exit frames have a debug marker on the stack.
-  static const int kSPDisplacement = -1 * kPointerSize;
-
-  // The debug marker is just above the frame pointer.
   static const int kDebugMarkOffset = -1 * kPointerSize;
   // Must be the same as kDebugMarkOffset. Alias introduced when upgrading.
   static const int kCodeOffset = -1 * kPointerSize;
+  static const int kSPOffset = -1 * kPointerSize;
 
-  static const int kSavedRegistersOffset = 0 * kPointerSize;
+  // TODO(mips): Use a patched sp value on the stack instead.
+  // A marker of 0 indicates that double registers are saved.
+  static const int kMarkerOffset = -2 * kPointerSize;
 
   // The caller fields are below the frame pointer on the stack.
   static const int kCallerFPOffset = +0 * kPointerSize;
@@ -126,6 +138,8 @@
   static const int kCArgsSlotsSize = 4 * kPointerSize;
   // JS argument slots size.
   static const int kJSArgsSlotsSize = 0 * kPointerSize;
+  // Assembly builtins argument slots size.
+  static const int kBArgsSlotsSize = 0 * kPointerSize;
 };
 
 
@@ -159,6 +173,7 @@
   return Memory::Object_at(fp() + offset);
 }
 
+
 } }  // namespace v8::internal
 
 #endif
diff --git a/src/mips/full-codegen-mips.cc b/src/mips/full-codegen-mips.cc
index 17ee531..87507ff 100644
--- a/src/mips/full-codegen-mips.cc
+++ b/src/mips/full-codegen-mips.cc
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -29,18 +29,55 @@
 
 #if defined(V8_TARGET_ARCH_MIPS)
 
+// Note on Mips implementation:
+//
+// The result_register() for mips is the 'v0' register, which is defined
+// by the ABI to contain function return values. However, the first
+// parameter to a function is defined to be 'a0'. So there are many
+// places where we have to move a previous result in v0 to a0 for the
+// next call: mov(a0, v0). This is not needed on the other architectures.
+
+#include "code-stubs.h"
 #include "codegen-inl.h"
 #include "compiler.h"
 #include "debug.h"
 #include "full-codegen.h"
 #include "parser.h"
+#include "scopes.h"
+#include "stub-cache.h"
+
+#include "mips/code-stubs-mips.h"
 
 namespace v8 {
 namespace internal {
 
 #define __ ACCESS_MASM(masm_)
 
-void FullCodeGenerator::Generate(CompilationInfo* info, Mode mode) {
+// Generate code for a JS function.  On entry to the function the receiver
+// and arguments have been pushed on the stack left to right.  The actual
+// argument count matches the formal parameter count expected by the
+// function.
+//
+// The live registers are:
+//   o a1: the JS function object being called (ie, ourselves)
+//   o cp: our context
+//   o fp: our caller's frame pointer
+//   o sp: stack pointer
+//   o ra: return address
+//
+// The function builds a JS frame.  Please see JavaScriptFrameConstants in
+// frames-mips.h for its layout.
+void FullCodeGenerator::Generate(CompilationInfo* info) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void FullCodeGenerator::ClearAccumulator() {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void FullCodeGenerator::EmitStackCheck(IterationStatement* stmt) {
   UNIMPLEMENTED_MIPS();
 }
 
@@ -50,47 +87,165 @@
 }
 
 
-void FullCodeGenerator::Apply(Expression::Context context, Register reg) {
+void FullCodeGenerator::EffectContext::Plug(Slot* slot) const {
   UNIMPLEMENTED_MIPS();
 }
 
 
-void FullCodeGenerator::Apply(Expression::Context context, Slot* slot) {
-  UNIMPLEMENTED_MIPS();
-}
-
-void FullCodeGenerator::Apply(Expression::Context context, Literal* lit) {
+void FullCodeGenerator::AccumulatorValueContext::Plug(Slot* slot) const {
   UNIMPLEMENTED_MIPS();
 }
 
 
-void FullCodeGenerator::ApplyTOS(Expression::Context context) {
+void FullCodeGenerator::StackValueContext::Plug(Slot* slot) const {
   UNIMPLEMENTED_MIPS();
 }
 
 
-void FullCodeGenerator::DropAndApply(int count,
-                                     Expression::Context context,
-                                     Register reg) {
+void FullCodeGenerator::TestContext::Plug(Slot* slot) const {
   UNIMPLEMENTED_MIPS();
 }
 
 
-void FullCodeGenerator::Apply(Expression::Context context,
-                              Label* materialize_true,
-                              Label* materialize_false) {
+void FullCodeGenerator::EffectContext::Plug(Heap::RootListIndex index) const {
   UNIMPLEMENTED_MIPS();
 }
 
 
-void FullCodeGenerator::DoTest(Expression::Context context) {
+void FullCodeGenerator::AccumulatorValueContext::Plug(
+    Heap::RootListIndex index) const {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void FullCodeGenerator::StackValueContext::Plug(
+    Heap::RootListIndex index) const {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void FullCodeGenerator::TestContext::Plug(Heap::RootListIndex index) const {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void FullCodeGenerator::EffectContext::Plug(Handle<Object> lit) const {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void FullCodeGenerator::AccumulatorValueContext::Plug(
+    Handle<Object> lit) const {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void FullCodeGenerator::StackValueContext::Plug(Handle<Object> lit) const {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void FullCodeGenerator::TestContext::Plug(Handle<Object> lit) const {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void FullCodeGenerator::EffectContext::DropAndPlug(int count,
+                                                   Register reg) const {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void FullCodeGenerator::AccumulatorValueContext::DropAndPlug(
+    int count,
+    Register reg) const {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void FullCodeGenerator::StackValueContext::DropAndPlug(int count,
+                                                       Register reg) const {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void FullCodeGenerator::TestContext::DropAndPlug(int count,
+                                                 Register reg) const {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void FullCodeGenerator::EffectContext::Plug(Label* materialize_true,
+                                            Label* materialize_false) const {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void FullCodeGenerator::AccumulatorValueContext::Plug(
+    Label* materialize_true,
+    Label* materialize_false) const {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void FullCodeGenerator::StackValueContext::Plug(
+    Label* materialize_true,
+    Label* materialize_false) const {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void FullCodeGenerator::TestContext::Plug(Label* materialize_true,
+                                          Label* materialize_false) const {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void FullCodeGenerator::EffectContext::Plug(bool flag) const {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void FullCodeGenerator::AccumulatorValueContext::Plug(bool flag) const {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void FullCodeGenerator::StackValueContext::Plug(bool flag) const {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void FullCodeGenerator::TestContext::Plug(bool flag) const {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void FullCodeGenerator::DoTest(Label* if_true,
+                               Label* if_false,
+                               Label* fall_through) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+// Original prototype for mips, needs arch-indep change. Leave out for now.
+// void FullCodeGenerator::Split(Condition cc,
+//                               Register lhs,
+//                               const Operand&  rhs,
+//                               Label* if_true,
+//                               Label* if_false,
+//                               Label* fall_through) {
+void FullCodeGenerator::Split(Condition cc,
+                              Label* if_true,
+                              Label* if_false,
+                              Label* fall_through) {
   UNIMPLEMENTED_MIPS();
 }
 
 
 MemOperand FullCodeGenerator::EmitSlotSearch(Slot* slot, Register scratch) {
   UNIMPLEMENTED_MIPS();
-  return MemOperand(zero_reg, 0);   // UNIMPLEMENTED RETURN
+  return MemOperand(zero_reg, 0);
 }
 
 
@@ -99,6 +254,14 @@
 }
 
 
+void FullCodeGenerator::PrepareForBailoutBeforeSplit(State state,
+                                                     bool should_normalize,
+                                                     Label* if_true,
+                                                     Label* if_false) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
 void FullCodeGenerator::Move(Slot* dst,
                              Register src,
                              Register scratch1,
@@ -107,6 +270,13 @@
 }
 
 
+void FullCodeGenerator::EmitDeclaration(Variable* variable,
+                                        Variable::Mode mode,
+                                        FunctionLiteral* function) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
 void FullCodeGenerator::VisitDeclaration(Declaration* decl) {
   UNIMPLEMENTED_MIPS();
 }
@@ -117,7 +287,18 @@
 }
 
 
-void FullCodeGenerator::VisitFunctionLiteral(FunctionLiteral* expr) {
+void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void FullCodeGenerator::EmitNewClosure(Handle<SharedFunctionInfo> info,
+                                       bool pretenure) {
   UNIMPLEMENTED_MIPS();
 }
 
@@ -127,8 +308,32 @@
 }
 
 
-void FullCodeGenerator::EmitVariableLoad(Variable* var,
-                                         Expression::Context context) {
+MemOperand FullCodeGenerator::ContextSlotOperandCheckExtensions(
+    Slot* slot,
+    Label* slow) {
+  UNIMPLEMENTED_MIPS();
+  return MemOperand(zero_reg, 0);
+}
+
+
+void FullCodeGenerator::EmitDynamicLoadFromSlotFastCase(
+    Slot* slot,
+    TypeofState typeof_state,
+    Label* slow,
+    Label* done) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void FullCodeGenerator::EmitLoadGlobalSlotCheckExtensions(
+    Slot* slot,
+    TypeofState typeof_state,
+    Label* slow) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void FullCodeGenerator::EmitVariableLoad(Variable* var) {
   UNIMPLEMENTED_MIPS();
 }
 
@@ -163,14 +368,28 @@
 }
 
 
+void FullCodeGenerator::EmitInlineSmiBinaryOp(Expression* expr,
+                                              Token::Value op,
+                                              OverwriteMode mode,
+                                              Expression* left,
+                                              Expression* right) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
 void FullCodeGenerator::EmitBinaryOp(Token::Value op,
-                                     Expression::Context context) {
+                                     OverwriteMode mode) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void FullCodeGenerator::EmitAssignment(Expression* expr, int bailout_ast_id) {
   UNIMPLEMENTED_MIPS();
 }
 
 
 void FullCodeGenerator::EmitVariableAssignment(Variable* var,
-                                               Expression::Context context) {
+                                               Token::Value op) {
   UNIMPLEMENTED_MIPS();
 }
 
@@ -189,13 +408,21 @@
   UNIMPLEMENTED_MIPS();
 }
 
+
 void FullCodeGenerator::EmitCallWithIC(Call* expr,
-                                       Handle<Object> ignored,
+                                       Handle<Object> name,
                                        RelocInfo::Mode mode) {
   UNIMPLEMENTED_MIPS();
 }
 
 
+void FullCodeGenerator::EmitKeyedCallWithIC(Call* expr,
+                                            Expression* key,
+                                            RelocInfo::Mode mode) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
 void FullCodeGenerator::EmitCallWithStub(Call* expr) {
   UNIMPLEMENTED_MIPS();
 }
@@ -211,6 +438,202 @@
 }
 
 
+void FullCodeGenerator::EmitIsSmi(ZoneList<Expression*>* args) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void FullCodeGenerator::EmitIsNonNegativeSmi(ZoneList<Expression*>* args) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void FullCodeGenerator::EmitIsObject(ZoneList<Expression*>* args) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void FullCodeGenerator::EmitIsSpecObject(ZoneList<Expression*>* args) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void FullCodeGenerator::EmitIsUndetectableObject(ZoneList<Expression*>* args) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void FullCodeGenerator::EmitIsStringWrapperSafeForDefaultValueOf(
+    ZoneList<Expression*>* args) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void FullCodeGenerator::EmitIsFunction(ZoneList<Expression*>* args) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void FullCodeGenerator::EmitIsArray(ZoneList<Expression*>* args) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void FullCodeGenerator::EmitIsRegExp(ZoneList<Expression*>* args) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void FullCodeGenerator::EmitIsConstructCall(ZoneList<Expression*>* args) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void FullCodeGenerator::EmitObjectEquals(ZoneList<Expression*>* args) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void FullCodeGenerator::EmitArguments(ZoneList<Expression*>* args) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void FullCodeGenerator::EmitArgumentsLength(ZoneList<Expression*>* args) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void FullCodeGenerator::EmitClassOf(ZoneList<Expression*>* args) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void FullCodeGenerator::EmitLog(ZoneList<Expression*>* args) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void FullCodeGenerator::EmitRandomHeapNumber(ZoneList<Expression*>* args) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void FullCodeGenerator::EmitSubString(ZoneList<Expression*>* args) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void FullCodeGenerator::EmitRegExpExec(ZoneList<Expression*>* args) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void FullCodeGenerator::EmitValueOf(ZoneList<Expression*>* args) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void FullCodeGenerator::EmitMathPow(ZoneList<Expression*>* args) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void FullCodeGenerator::EmitSetValueOf(ZoneList<Expression*>* args) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void FullCodeGenerator::EmitNumberToString(ZoneList<Expression*>* args) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void FullCodeGenerator::EmitStringCharFromCode(ZoneList<Expression*>* args) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void FullCodeGenerator::EmitStringCharCodeAt(ZoneList<Expression*>* args) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void FullCodeGenerator::EmitStringCharAt(ZoneList<Expression*>* args) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void FullCodeGenerator::EmitStringAdd(ZoneList<Expression*>* args) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void FullCodeGenerator::EmitStringCompare(ZoneList<Expression*>* args) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void FullCodeGenerator::EmitMathSin(ZoneList<Expression*>* args) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void FullCodeGenerator::EmitMathCos(ZoneList<Expression*>* args) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void FullCodeGenerator::EmitMathSqrt(ZoneList<Expression*>* args) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void FullCodeGenerator::EmitMathLog(ZoneList<Expression*>* args) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void FullCodeGenerator::EmitCallFunction(ZoneList<Expression*>* args) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void FullCodeGenerator::EmitRegExpConstructResult(ZoneList<Expression*>* args) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void FullCodeGenerator::EmitSwapElements(ZoneList<Expression*>* args) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void FullCodeGenerator::EmitGetFromCache(ZoneList<Expression*>* args) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void FullCodeGenerator::EmitIsRegExpEquivalent(ZoneList<Expression*>* args) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void FullCodeGenerator::EmitHasCachedArrayIndex(ZoneList<Expression*>* args) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void FullCodeGenerator::EmitGetCachedArrayIndex(ZoneList<Expression*>* args) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void FullCodeGenerator::EmitFastAsciiArrayJoin(ZoneList<Expression*>* args) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
 void FullCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
   UNIMPLEMENTED_MIPS();
 }
@@ -226,25 +649,52 @@
 }
 
 
-void FullCodeGenerator::VisitBinaryOperation(BinaryOperation* expr) {
+void FullCodeGenerator::VisitForTypeofValue(Expression* expr) {
   UNIMPLEMENTED_MIPS();
 }
 
 
+bool FullCodeGenerator::TryLiteralCompare(Token::Value op,
+                                          Expression* left,
+                                          Expression* right,
+                                          Label* if_true,
+                                          Label* if_false,
+                                          Label* fall_through) {
+  UNIMPLEMENTED_MIPS();
+  return false;
+}
+
+
 void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
   UNIMPLEMENTED_MIPS();
 }
 
 
+void FullCodeGenerator::VisitCompareToNull(CompareToNull* expr) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
 void FullCodeGenerator::VisitThisFunction(ThisFunction* expr) {
   UNIMPLEMENTED_MIPS();
 }
 
 
-Register FullCodeGenerator::result_register() { return v0; }
+Register FullCodeGenerator::result_register() {
+  UNIMPLEMENTED_MIPS();
+  return v0;
+}
 
 
-Register FullCodeGenerator::context_register() { return cp; }
+Register FullCodeGenerator::context_register() {
+  UNIMPLEMENTED_MIPS();
+  return cp;
+}
+
+
+void FullCodeGenerator::EmitCallIC(Handle<Code> ic, RelocInfo::Mode mode) {
+  UNIMPLEMENTED_MIPS();
+}
 
 
 void FullCodeGenerator::StoreToFrameField(int frame_offset, Register value) {
diff --git a/src/mips/ic-mips.cc b/src/mips/ic-mips.cc
index e5c2ad8..fa8a7bb 100644
--- a/src/mips/ic-mips.cc
+++ b/src/mips/ic-mips.cc
@@ -32,6 +32,7 @@
 #if defined(V8_TARGET_ARCH_MIPS)
 
 #include "codegen-inl.h"
+#include "code-stubs.h"
 #include "ic-inl.h"
 #include "runtime.h"
 #include "stub-cache.h"
@@ -52,7 +53,7 @@
 }
 
 
-void LoadIC::GenerateStringLength(MacroAssembler* masm) {
+void LoadIC::GenerateStringLength(MacroAssembler* masm, bool support_wrappers) {
   UNIMPLEMENTED_MIPS();
 }
 
@@ -65,6 +66,12 @@
 // Defined in ic.cc.
 Object* CallIC_Miss(Arguments args);
 
+
+void CallIC::GenerateMiss(MacroAssembler* masm, int argc) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
 void CallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) {
   UNIMPLEMENTED_MIPS();
 }
@@ -74,51 +81,22 @@
   UNIMPLEMENTED_MIPS();
 }
 
-void CallIC::GenerateMiss(MacroAssembler* masm, int argc) {
+
+void KeyedCallIC::GenerateMiss(MacroAssembler* masm, int argc) {
   UNIMPLEMENTED_MIPS();
-    // Registers:
-    // a2: name
-    // ra: return address
-
-  // Get the receiver of the function from the stack.
-  __ lw(a3, MemOperand(sp, argc*kPointerSize));
-
-  __ EnterInternalFrame();
-
-  // Push the receiver and the name of the function.
-  __ MultiPush(a2.bit() | a3.bit());
-
-  // Call the entry.
-  __ li(a0, Operand(2));
-  __ li(a1, Operand(ExternalReference(IC_Utility(kCallIC_Miss))));
-
-  CEntryStub stub(1);
-  __ CallStub(&stub);
-
-  // Move result to r1 and leave the internal frame.
-  __ mov(a1, v0);
-  __ LeaveInternalFrame();
-
-  // Check if the receiver is a global object of some sort.
-  Label invoke, global;
-  __ lw(a2, MemOperand(sp, argc * kPointerSize));
-  __ andi(t0, a2, kSmiTagMask);
-  __ Branch(eq, &invoke, t0, Operand(zero_reg));
-  __ GetObjectType(a2, a3, a3);
-  __ Branch(eq, &global, a3, Operand(JS_GLOBAL_OBJECT_TYPE));
-  __ Branch(ne, &invoke, a3, Operand(JS_BUILTINS_OBJECT_TYPE));
-
-  // Patch the receiver on the stack.
-  __ bind(&global);
-  __ lw(a2, FieldMemOperand(a2, GlobalObject::kGlobalReceiverOffset));
-  __ sw(a2, MemOperand(sp, argc * kPointerSize));
-
-  // Invoke the function.
-  ParameterCount actual(argc);
-  __ bind(&invoke);
-  __ InvokeFunction(a1, actual, JUMP_FUNCTION);
 }
 
+
+void KeyedCallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void KeyedCallIC::GenerateNormal(MacroAssembler* masm, int argc) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
 // Defined in ic.cc.
 Object* LoadIC_Miss(Arguments args);
 
@@ -137,19 +115,35 @@
 }
 
 
-void LoadIC::ClearInlinedVersion(Address address) {}
 bool LoadIC::PatchInlinedLoad(Address address, Object* map, int offset) {
+  UNIMPLEMENTED_MIPS();
   return false;
 }
 
-void KeyedLoadIC::ClearInlinedVersion(Address address) {}
+
+bool LoadIC::PatchInlinedContextualLoad(Address address,
+                                        Object* map,
+                                        Object* cell,
+                                        bool is_dont_delete) {
+  UNIMPLEMENTED_MIPS();
+  return false;
+}
+
+
+bool StoreIC::PatchInlinedStore(Address address, Object* map, int offset) {
+  UNIMPLEMENTED_MIPS();
+  return false;
+}
+
+
 bool KeyedLoadIC::PatchInlinedLoad(Address address, Object* map) {
+  UNIMPLEMENTED_MIPS();
   return false;
 }
 
-void KeyedStoreIC::ClearInlinedVersion(Address address) {}
-void KeyedStoreIC::RestoreInlinedVersion(Address address) {}
+
 bool KeyedStoreIC::PatchInlinedStore(Address address, Object* map) {
+  UNIMPLEMENTED_MIPS();
   return false;
 }
 
@@ -162,6 +156,11 @@
 }
 
 
+void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
 void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
   UNIMPLEMENTED_MIPS();
 }
@@ -172,7 +171,14 @@
 }
 
 
-void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm) {
+void KeyedStoreIC::GenerateRuntimeSetProperty(MacroAssembler* masm,
+                                              StrictModeFlag strict_mode) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
+                                   StrictModeFlag strict_mode) {
   UNIMPLEMENTED_MIPS();
 }
 
@@ -187,7 +193,8 @@
 }
 
 
-void StoreIC::GenerateMegamorphic(MacroAssembler* masm) {
+void StoreIC::GenerateMegamorphic(MacroAssembler* masm,
+                                  StrictModeFlag strict_mode) {
   UNIMPLEMENTED_MIPS();
 }
 
@@ -201,8 +208,37 @@
   UNIMPLEMENTED_MIPS();
 }
 
+
+void StoreIC::GenerateNormal(MacroAssembler* masm) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void StoreIC::GenerateGlobalProxy(MacroAssembler* masm,
+                                  StrictModeFlag strict_mode) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
 #undef __
 
+
+Condition CompareIC::ComputeCondition(Token::Value op) {
+  UNIMPLEMENTED_MIPS();
+  return kNoCondition;
+}
+
+
+void CompareIC::UpdateCaches(Handle<Object> x, Handle<Object> y) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void PatchInlinedSmiCode(Address address) {
+  // Currently there is no smi inlining in the MIPS full code generator.
+}
+
+
 } }  // namespace v8::internal
 
 #endif  // V8_TARGET_ARCH_MIPS
diff --git a/src/mips/jump-target-mips.cc b/src/mips/jump-target-mips.cc
index 408f75e..bd6d60b 100644
--- a/src/mips/jump-target-mips.cc
+++ b/src/mips/jump-target-mips.cc
@@ -43,41 +43,19 @@
 
 #define __ ACCESS_MASM(cgen()->masm())
 
+// BRANCH_ARGS_CHECK checks that conditional jump arguments are correct.
+#define BRANCH_ARGS_CHECK(cond, rs, rt) ASSERT(                                \
+    (cond == cc_always && rs.is(zero_reg) && rt.rm().is(zero_reg)) ||          \
+    (cond != cc_always && (!rs.is(zero_reg) || !rt.rm().is(zero_reg))))
+
+
 void JumpTarget::DoJump() {
-  ASSERT(cgen()->has_valid_frame());
-  // Live non-frame registers are not allowed at unconditional jumps
-  // because we have no way of invalidating the corresponding results
-  // which are still live in the C++ code.
-  ASSERT(cgen()->HasValidEntryRegisters());
-
-  if (is_bound()) {
-    // Backward jump.  There already a frame expectation at the target.
-    ASSERT(direction_ == BIDIRECTIONAL);
-    cgen()->frame()->MergeTo(entry_frame_);
-    cgen()->DeleteFrame();
-  } else {
-    // Use the current frame as the expected one at the target if necessary.
-    if (entry_frame_ == NULL) {
-      entry_frame_ = cgen()->frame();
-      RegisterFile empty;
-      cgen()->SetFrame(NULL, &empty);
-    } else {
-      cgen()->frame()->MergeTo(entry_frame_);
-      cgen()->DeleteFrame();
-    }
-
-    // The predicate is_linked() should be made true.  Its implementation
-    // detects the presence of a frame pointer in the reaching_frames_ list.
-    if (!is_linked()) {
-      reaching_frames_.Add(NULL);
-      ASSERT(is_linked());
-    }
-  }
-  __ b(&entry_label_);
-  __ nop();   // Branch delay slot nop.
+  UNIMPLEMENTED_MIPS();
 }
 
-
+// Original prototype for mips, needs arch-indep change. Leave out for now.
+// void JumpTarget::DoBranch(Condition cc, Hint ignored,
+//     Register src1, const Operand& src2) {
 void JumpTarget::DoBranch(Condition cc, Hint ignored) {
   UNIMPLEMENTED_MIPS();
 }
@@ -89,85 +67,12 @@
 
 
 void JumpTarget::DoBind() {
-  ASSERT(!is_bound());
-
-  // Live non-frame registers are not allowed at the start of a basic
-  // block.
-  ASSERT(!cgen()->has_valid_frame() || cgen()->HasValidEntryRegisters());
-
-  if (cgen()->has_valid_frame()) {
-    // If there is a current frame we can use it on the fall through.
-    if (entry_frame_ == NULL) {
-      entry_frame_ = new VirtualFrame(cgen()->frame());
-    } else {
-      ASSERT(cgen()->frame()->Equals(entry_frame_));
-    }
-  } else {
-    // If there is no current frame we must have an entry frame which we can
-    // copy.
-    ASSERT(entry_frame_ != NULL);
-    RegisterFile empty;
-    cgen()->SetFrame(new VirtualFrame(entry_frame_), &empty);
-  }
-
-  // The predicate is_linked() should be made false.  Its implementation
-  // detects the presence (or absence) of frame pointers in the
-  // reaching_frames_ list.  If we inserted a bogus frame to make
-  // is_linked() true, remove it now.
-  if (is_linked()) {
-    reaching_frames_.Clear();
-  }
-
-  __ bind(&entry_label_);
-}
-
-
-void BreakTarget::Jump() {
-  // On ARM we do not currently emit merge code for jumps, so we need to do
-  // it explicitly here.  The only merging necessary is to drop extra
-  // statement state from the stack.
-  ASSERT(cgen()->has_valid_frame());
-  int count = cgen()->frame()->height() - expected_height_;
-  cgen()->frame()->Drop(count);
-  DoJump();
-}
-
-
-void BreakTarget::Jump(Result* arg) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void BreakTarget::Bind() {
-#ifdef DEBUG
-  // All the forward-reaching frames should have been adjusted at the
-  // jumps to this target.
-  for (int i = 0; i < reaching_frames_.length(); i++) {
-    ASSERT(reaching_frames_[i] == NULL ||
-           reaching_frames_[i]->height() == expected_height_);
-  }
-#endif
-  // Drop leftover statement state from the frame before merging, even
-  // on the fall through.  This is so we can bind the return target
-  // with state on the frame.
-  if (cgen()->has_valid_frame()) {
-    int count = cgen()->frame()->height() - expected_height_;
-    // On ARM we do not currently emit merge code at binding sites, so we need
-    // to do it explicitly here.  The only merging necessary is to drop extra
-    // statement state from the stack.
-    cgen()->frame()->Drop(count);
-  }
-
-  DoBind();
-}
-
-
-void BreakTarget::Bind(Result* arg) {
   UNIMPLEMENTED_MIPS();
 }
 
 
 #undef __
+#undef BRANCH_ARGS_CHECK
 
 
 } }  // namespace v8::internal
diff --git a/src/mips/fast-codegen-mips.cc b/src/mips/lithium-codegen-mips.h
similarity index 63%
copy from src/mips/fast-codegen-mips.cc
copy to src/mips/lithium-codegen-mips.h
index 186f9fa..345d912 100644
--- a/src/mips/fast-codegen-mips.cc
+++ b/src/mips/lithium-codegen-mips.h
@@ -25,53 +25,41 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-#include "v8.h"
+#ifndef V8_MIPS_LITHIUM_CODEGEN_MIPS_H_
+#define V8_MIPS_LITHIUM_CODEGEN_MIPS_H_
 
-#if defined(V8_TARGET_ARCH_MIPS)
+#include "mips/lithium-mips.h"
 
-#include "codegen-inl.h"
-#include "fast-codegen.h"
+#include "deoptimizer.h"
+#include "safepoint-table.h"
+#include "scopes.h"
+
+// Note: this file was taken from the X64 version. ARM has a partially working
+// lithium implementation, but for now it is not ported to mips.
 
 namespace v8 {
 namespace internal {
 
-#define __ ACCESS_MASM(masm_)
+// Forward declarations.
+class LDeferredCode;
 
-Register FastCodeGenerator::accumulator0() { return no_reg; }
-Register FastCodeGenerator::accumulator1() { return no_reg; }
-Register FastCodeGenerator::scratch0() { return no_reg; }
-Register FastCodeGenerator::scratch1() { return no_reg; }
-Register FastCodeGenerator::receiver_reg() { return no_reg; }
-Register FastCodeGenerator::context_reg() { return no_reg; }
+class LCodeGen BASE_EMBEDDED {
+ public:
+  LCodeGen(LChunk* chunk, MacroAssembler* assembler, CompilationInfo* info) { }
 
+  // Try to generate code for the entire chunk, but it may fail if the
+  // chunk contains constructs we cannot handle. Returns true if the
+  // code generation attempt succeeded.
+  bool GenerateCode() {
+    UNIMPLEMENTED();
+    return false;
+  }
 
-void FastCodeGenerator::Generate(CompilationInfo* info) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void FastCodeGenerator::EmitThisPropertyStore(Handle<String> name) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void FastCodeGenerator::EmitGlobalVariableLoad(Handle<Object> name) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void FastCodeGenerator::EmitThisPropertyLoad(Handle<String> name) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void FastCodeGenerator::EmitBitOr() {
-  UNIMPLEMENTED_MIPS();
-}
-
-#undef __
-
+  // Finish the code by setting stack height, safepoint, and bailout
+  // information on it.
+  void FinishCode(Handle<Code> code) { UNIMPLEMENTED(); }
+};
 
 } }  // namespace v8::internal
 
-#endif  // V8_TARGET_ARCH_MIPS
+#endif  // V8_MIPS_LITHIUM_CODEGEN_MIPS_H_
diff --git a/src/mips/lithium-mips.h b/src/mips/lithium-mips.h
new file mode 100644
index 0000000..e11dfab
--- /dev/null
+++ b/src/mips/lithium-mips.h
@@ -0,0 +1,304 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_MIPS_LITHIUM_MIPS_H_
+#define V8_MIPS_LITHIUM_MIPS_H_
+
+#include "hydrogen.h"
+#include "lithium-allocator.h"
+#include "lithium.h"
+#include "safepoint-table.h"
+
+// Note: this file was taken from the X64 version. ARM has a partially working
+// lithium implementation, but for now it is not ported to mips.
+
+namespace v8 {
+namespace internal {
+
+// Forward declarations.
+class LCodeGen;
+class LEnvironment;
+class Translation;
+
+class LInstruction: public ZoneObject {
+ public:
+  LInstruction() { }
+  virtual ~LInstruction() { }
+
+  // Predicates should be generated by macro as in lithium-ia32.h.
+  virtual bool IsLabel() const {
+    UNIMPLEMENTED();
+    return false;
+  }
+  virtual bool IsOsrEntry() const {
+    UNIMPLEMENTED();
+    return false;
+  }
+
+  LPointerMap* pointer_map() const {
+    UNIMPLEMENTED();
+    return NULL;
+  }
+
+  bool HasPointerMap() const {
+    UNIMPLEMENTED();
+    return false;
+  }
+
+  void set_environment(LEnvironment* env) { UNIMPLEMENTED(); }
+
+  LEnvironment* environment() const {
+    UNIMPLEMENTED();
+    return NULL;
+  }
+
+  bool HasEnvironment() const {
+    UNIMPLEMENTED();
+    return NULL;
+  }
+
+  virtual void PrintTo(StringStream* stream) const { UNIMPLEMENTED(); }
+
+  virtual bool IsControl() const {
+    UNIMPLEMENTED();
+    return false;
+  }
+
+  void MarkAsCall() { UNIMPLEMENTED(); }
+  void MarkAsSaveDoubles() { UNIMPLEMENTED(); }
+
+  // Interface to the register allocator and iterators.
+  bool IsMarkedAsCall() const {
+    UNIMPLEMENTED();
+    return false;
+  }
+
+  bool IsMarkedAsSaveDoubles() const {
+    UNIMPLEMENTED();
+    return false;
+  }
+
+  virtual bool HasResult() const {
+    UNIMPLEMENTED();
+    return false;
+  }
+
+  virtual LOperand* result() {
+    UNIMPLEMENTED();
+    return NULL;
+  }
+
+  virtual int InputCount() {
+    UNIMPLEMENTED();
+    return 0;
+  }
+
+  virtual LOperand* InputAt(int i) {
+    UNIMPLEMENTED();
+    return NULL;
+  }
+
+  virtual int TempCount() {
+    UNIMPLEMENTED();
+    return 0;
+  }
+
+  virtual LOperand* TempAt(int i) {
+    UNIMPLEMENTED();
+    return NULL;
+  }
+
+  LOperand* FirstInput() {
+    UNIMPLEMENTED();
+    return NULL;
+  }
+
+  LOperand* Output() {
+    UNIMPLEMENTED();
+    return NULL;
+  }
+
+#ifdef DEBUG
+  void VerifyCall() { UNIMPLEMENTED(); }
+#endif
+};
+
+
+class LGap: public LInstruction {
+ public:
+  explicit LGap(HBasicBlock* block) { }
+
+  HBasicBlock* block() const {
+    UNIMPLEMENTED();
+    return NULL;
+  }
+
+  enum InnerPosition {
+    BEFORE,
+    START,
+    END,
+    AFTER,
+    FIRST_INNER_POSITION = BEFORE,
+    LAST_INNER_POSITION = AFTER
+  };
+
+  LParallelMove* GetOrCreateParallelMove(InnerPosition pos) {
+    UNIMPLEMENTED();
+    return NULL;
+  }
+
+  LParallelMove* GetParallelMove(InnerPosition pos)  {
+    UNIMPLEMENTED();
+    return NULL;
+  }
+};
+
+
+class LLabel: public LGap {
+ public:
+  explicit LLabel(HBasicBlock* block) : LGap(block) { }
+};
+
+
+class LOsrEntry: public LInstruction {
+ public:
+  // Function could be generated by a macro as in lithium-ia32.h.
+  static LOsrEntry* cast(LInstruction* instr) {
+    UNIMPLEMENTED();
+    return NULL;
+  }
+
+  LOperand** SpilledRegisterArray() {
+    UNIMPLEMENTED();
+    return NULL;
+  }
+  LOperand** SpilledDoubleRegisterArray() {
+    UNIMPLEMENTED();
+    return NULL;
+  }
+
+  void MarkSpilledRegister(int allocation_index, LOperand* spill_operand) {
+    UNIMPLEMENTED();
+  }
+  void MarkSpilledDoubleRegister(int allocation_index,
+                                 LOperand* spill_operand) {
+    UNIMPLEMENTED();
+  }
+};
+
+
+class LChunk: public ZoneObject {
+ public:
+  explicit LChunk(CompilationInfo* info, HGraph* graph) { }
+
+  HGraph* graph() const {
+    UNIMPLEMENTED();
+    return NULL;
+  }
+
+  CompilationInfo* info() const { return NULL; }
+
+  const ZoneList<LPointerMap*>* pointer_maps() const {
+    UNIMPLEMENTED();
+    return NULL;
+  }
+
+  LOperand* GetNextSpillSlot(bool double_slot) {
+    UNIMPLEMENTED();
+    return NULL;
+  }
+
+  LConstantOperand* DefineConstantOperand(HConstant* constant) {
+    UNIMPLEMENTED();
+    return NULL;
+  }
+
+  LLabel* GetLabel(int block_id) const {
+    UNIMPLEMENTED();
+    return NULL;
+  }
+
+  const ZoneList<LInstruction*>* instructions() const {
+    UNIMPLEMENTED();
+    return NULL;
+  }
+
+  int GetParameterStackSlot(int index) const {
+    UNIMPLEMENTED();
+    return 0;
+  }
+
+  void AddGapMove(int index, LOperand* from, LOperand* to) { UNIMPLEMENTED(); }
+
+  LGap* GetGapAt(int index) const {
+    UNIMPLEMENTED();
+    return NULL;
+  }
+
+  bool IsGapAt(int index) const {
+    UNIMPLEMENTED();
+    return false;
+  }
+
+  int NearestGapPos(int index) const {
+    UNIMPLEMENTED();
+    return 0;
+  }
+
+  void MarkEmptyBlocks() { UNIMPLEMENTED(); }
+
+#ifdef DEBUG
+  void Verify() { UNIMPLEMENTED(); }
+#endif
+};
+
+
+class LChunkBuilder BASE_EMBEDDED {
+ public:
+  LChunkBuilder(CompilationInfo* info, HGraph* graph, LAllocator* allocator) { }
+
+  // Build the sequence for the graph.
+  LChunk* Build() {
+    UNIMPLEMENTED();
+    return NULL;
+  };
+
+  // Declare methods that deal with the individual node types.
+#define DECLARE_DO(type) LInstruction* Do##type(H##type* node) { \
+    UNIMPLEMENTED(); \
+    return NULL; \
+  }
+  HYDROGEN_CONCRETE_INSTRUCTION_LIST(DECLARE_DO)
+#undef DECLARE_DO
+
+  DISALLOW_COPY_AND_ASSIGN(LChunkBuilder);
+};
+
+
+} }  // namespace v8::internal
+
+#endif  // V8_MIPS_LITHIUM_MIPS_H_
diff --git a/src/mips/macro-assembler-mips.cc b/src/mips/macro-assembler-mips.cc
index e096028..bd4ab48 100644
--- a/src/mips/macro-assembler-mips.cc
+++ b/src/mips/macro-assembler-mips.cc
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -25,7 +25,7 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-
+#include <limits.h>  // For LONG_MIN, LONG_MAX
 
 #include "v8.h"
 
@@ -41,68 +41,90 @@
 
 MacroAssembler::MacroAssembler(void* buffer, int size)
     : Assembler(buffer, size),
-      unresolved_(0),
       generating_stub_(false),
       allow_stub_calls_(true),
-      code_object_(Heap::undefined_value()) {
+      code_object_(HEAP->undefined_value()) {
 }
 
 
+// Arguments macros
+#define COND_TYPED_ARGS Condition cond, Register r1, const Operand& r2
+#define COND_ARGS cond, r1, r2
 
-void MacroAssembler::Jump(Register target, Condition cond,
-                          Register r1, const Operand& r2) {
-  Jump(Operand(target), cond, r1, r2);
+#define REGISTER_TARGET_BODY(Name) \
+void MacroAssembler::Name(Register target, \
+                          BranchDelaySlot bd) { \
+  Name(Operand(target), bd); \
+} \
+void MacroAssembler::Name(Register target, COND_TYPED_ARGS, \
+                          BranchDelaySlot bd) { \
+  Name(Operand(target), COND_ARGS, bd); \
 }
 
 
-void MacroAssembler::Jump(intptr_t target, RelocInfo::Mode rmode,
-                          Condition cond, Register r1, const Operand& r2) {
-  Jump(Operand(target, rmode), cond, r1, r2);
+#define INT_PTR_TARGET_BODY(Name) \
+void MacroAssembler::Name(intptr_t target, RelocInfo::Mode rmode, \
+                          BranchDelaySlot bd) { \
+  Name(Operand(target, rmode), bd); \
+} \
+void MacroAssembler::Name(intptr_t target, \
+                          RelocInfo::Mode rmode, \
+                          COND_TYPED_ARGS, \
+                          BranchDelaySlot bd) { \
+  Name(Operand(target, rmode), COND_ARGS, bd); \
 }
 
 
-void MacroAssembler::Jump(byte* target, RelocInfo::Mode rmode,
-                          Condition cond, Register r1, const Operand& r2) {
-  ASSERT(!RelocInfo::IsCodeTarget(rmode));
-  Jump(reinterpret_cast<intptr_t>(target), rmode, cond, r1, r2);
+#define BYTE_PTR_TARGET_BODY(Name) \
+void MacroAssembler::Name(byte* target, RelocInfo::Mode rmode, \
+                          BranchDelaySlot bd) { \
+  Name(reinterpret_cast<intptr_t>(target), rmode, bd); \
+} \
+void MacroAssembler::Name(byte* target, \
+                          RelocInfo::Mode rmode, \
+                          COND_TYPED_ARGS, \
+                          BranchDelaySlot bd) { \
+  Name(reinterpret_cast<intptr_t>(target), rmode, COND_ARGS, bd); \
 }
 
 
-void MacroAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode,
-                          Condition cond, Register r1, const Operand& r2) {
-  ASSERT(RelocInfo::IsCodeTarget(rmode));
-  Jump(reinterpret_cast<intptr_t>(code.location()), rmode, cond);
+#define CODE_TARGET_BODY(Name) \
+void MacroAssembler::Name(Handle<Code> target, RelocInfo::Mode rmode, \
+                          BranchDelaySlot bd) { \
+  Name(reinterpret_cast<intptr_t>(target.location()), rmode, bd); \
+} \
+void MacroAssembler::Name(Handle<Code> target, \
+                          RelocInfo::Mode rmode, \
+                          COND_TYPED_ARGS, \
+                          BranchDelaySlot bd) { \
+  Name(reinterpret_cast<intptr_t>(target.location()), rmode, COND_ARGS, bd); \
 }
 
 
-void MacroAssembler::Call(Register target,
-                          Condition cond, Register r1, const Operand& r2) {
-  Call(Operand(target), cond, r1, r2);
+REGISTER_TARGET_BODY(Jump)
+REGISTER_TARGET_BODY(Call)
+INT_PTR_TARGET_BODY(Jump)
+INT_PTR_TARGET_BODY(Call)
+BYTE_PTR_TARGET_BODY(Jump)
+BYTE_PTR_TARGET_BODY(Call)
+CODE_TARGET_BODY(Jump)
+CODE_TARGET_BODY(Call)
+
+#undef COND_TYPED_ARGS
+#undef COND_ARGS
+#undef REGISTER_TARGET_BODY
+#undef BYTE_PTR_TARGET_BODY
+#undef CODE_TARGET_BODY
+
+
+void MacroAssembler::Ret(BranchDelaySlot bd) {
+  Jump(Operand(ra), bd);
 }
 
 
-void MacroAssembler::Call(intptr_t target, RelocInfo::Mode rmode,
-                          Condition cond, Register r1, const Operand& r2) {
-  Call(Operand(target, rmode), cond, r1, r2);
-}
-
-
-void MacroAssembler::Call(byte* target, RelocInfo::Mode rmode,
-                          Condition cond, Register r1, const Operand& r2) {
-  ASSERT(!RelocInfo::IsCodeTarget(rmode));
-  Call(reinterpret_cast<intptr_t>(target), rmode, cond, r1, r2);
-}
-
-
-void MacroAssembler::Call(Handle<Code> code, RelocInfo::Mode rmode,
-                          Condition cond, Register r1, const Operand& r2) {
-  ASSERT(RelocInfo::IsCodeTarget(rmode));
-  Call(reinterpret_cast<intptr_t>(code.location()), rmode, cond, r1, r2);
-}
-
-
-void MacroAssembler::Ret(Condition cond, Register r1, const Operand& r2) {
-  Jump(Operand(ra), cond, r1, r2);
+void MacroAssembler::Ret(Condition cond, Register r1, const Operand& r2,
+    BranchDelaySlot bd) {
+  Jump(Operand(ra), cond, r1, r2, bd);
 }
 
 
@@ -111,45 +133,226 @@
   lw(destination, MemOperand(s6, index << kPointerSizeLog2));
 }
 
+
 void MacroAssembler::LoadRoot(Register destination,
                               Heap::RootListIndex index,
                               Condition cond,
                               Register src1, const Operand& src2) {
-  Branch(NegateCondition(cond), 2, src1, src2);
+  Branch(2, NegateCondition(cond), src1, src2);
   lw(destination, MemOperand(s6, index << kPointerSizeLog2));
 }
 
 
-void MacroAssembler::RecordWrite(Register object, Register offset,
+void MacroAssembler::StoreRoot(Register source,
+                               Heap::RootListIndex index) {
+  sw(source, MemOperand(s6, index << kPointerSizeLog2));
+}
+
+
+void MacroAssembler::StoreRoot(Register source,
+                               Heap::RootListIndex index,
+                               Condition cond,
+                               Register src1, const Operand& src2) {
+  Branch(2, NegateCondition(cond), src1, src2);
+  sw(source, MemOperand(s6, index << kPointerSizeLog2));
+}
+
+
+void MacroAssembler::RecordWriteHelper(Register object,
+                                       Register address,
+                                       Register scratch) {
+  if (FLAG_debug_code) {
+    // Check that the object is not in new space.
+    Label not_in_new_space;
+    InNewSpace(object, scratch, ne, &not_in_new_space);
+    Abort("new-space object passed to RecordWriteHelper");
+    bind(&not_in_new_space);
+  }
+
+  // Calculate page address: Clear bits from 0 to kPageSizeBits.
+  if (mips32r2) {
+    Ins(object, zero_reg, 0, kPageSizeBits);
+  } else {
+    // The Ins macro is slow on r1, so use shifts instead.
+    srl(object, object, kPageSizeBits);
+    sll(object, object, kPageSizeBits);
+  }
+
+  // Calculate region number.
+  Ext(address, address, Page::kRegionSizeLog2,
+      kPageSizeBits - Page::kRegionSizeLog2);
+
+  // Mark region dirty.
+  lw(scratch, MemOperand(object, Page::kDirtyFlagOffset));
+  li(at, Operand(1));
+  sllv(at, at, address);
+  or_(scratch, scratch, at);
+  sw(scratch, MemOperand(object, Page::kDirtyFlagOffset));
+}
+
+
+void MacroAssembler::InNewSpace(Register object,
+                                Register scratch,
+                                Condition cc,
+                                Label* branch) {
+  ASSERT(cc == eq || cc == ne);
+  And(scratch, object, Operand(ExternalReference::new_space_mask(isolate())));
+  Branch(branch, cc, scratch,
+         Operand(ExternalReference::new_space_start(isolate())));
+}
+
+
+// Will clobber 4 registers: object, scratch0, scratch1, at. The
+// register 'object' contains a heap object pointer.  The heap object
+// tag is shifted away.
+void MacroAssembler::RecordWrite(Register object,
+                                 Operand offset,
+                                 Register scratch0,
+                                 Register scratch1) {
+  // The compiled code assumes that record write doesn't change the
+  // context register, so we check that none of the clobbered
+  // registers are cp.
+  ASSERT(!object.is(cp) && !scratch0.is(cp) && !scratch1.is(cp));
+
+  Label done;
+
+  // First, test that the object is not in the new space.  We cannot set
+  // region marks for new space pages.
+  InNewSpace(object, scratch0, eq, &done);
+
+  // Add offset into the object.
+  Addu(scratch0, object, offset);
+
+  // Record the actual write.
+  RecordWriteHelper(object, scratch0, scratch1);
+
+  bind(&done);
+
+  // Clobber all input registers when running with the debug-code flag
+  // turned on to provoke errors.
+  if (FLAG_debug_code) {
+    li(object, Operand(BitCast<int32_t>(kZapValue)));
+    li(scratch0, Operand(BitCast<int32_t>(kZapValue)));
+    li(scratch1, Operand(BitCast<int32_t>(kZapValue)));
+  }
+}
+
+
+// Will clobber 4 registers: object, address, scratch, ip.  The
+// register 'object' contains a heap object pointer.  The heap object
+// tag is shifted away.
+void MacroAssembler::RecordWrite(Register object,
+                                 Register address,
                                  Register scratch) {
-  UNIMPLEMENTED_MIPS();
+  // The compiled code assumes that record write doesn't change the
+  // context register, so we check that none of the clobbered
+  // registers are cp.
+  ASSERT(!object.is(cp) && !address.is(cp) && !scratch.is(cp));
+
+  Label done;
+
+  // First, test that the object is not in the new space.  We cannot set
+  // region marks for new space pages.
+  InNewSpace(object, scratch, eq, &done);
+
+  // Record the actual write.
+  RecordWriteHelper(object, address, scratch);
+
+  bind(&done);
+
+  // Clobber all input registers when running with the debug-code flag
+  // turned on to provoke errors.
+  if (FLAG_debug_code) {
+    li(object, Operand(BitCast<int32_t>(kZapValue)));
+    li(address, Operand(BitCast<int32_t>(kZapValue)));
+    li(scratch, Operand(BitCast<int32_t>(kZapValue)));
+  }
+}
+
+
+// -----------------------------------------------------------------------------
+// Allocation support
+
+
+void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
+                                            Register scratch,
+                                            Label* miss) {
+  Label same_contexts;
+
+  ASSERT(!holder_reg.is(scratch));
+  ASSERT(!holder_reg.is(at));
+  ASSERT(!scratch.is(at));
+
+  // Load current lexical context from the stack frame.
+  lw(scratch, MemOperand(fp, StandardFrameConstants::kContextOffset));
+  // In debug mode, make sure the lexical context is set.
+#ifdef DEBUG
+  Check(ne, "we should not have an empty lexical context",
+      scratch, Operand(zero_reg));
+#endif
+
+  // Load the global context of the current context.
+  int offset = Context::kHeaderSize + Context::GLOBAL_INDEX * kPointerSize;
+  lw(scratch, FieldMemOperand(scratch, offset));
+  lw(scratch, FieldMemOperand(scratch, GlobalObject::kGlobalContextOffset));
+
+  // Check the context is a global context.
+  if (FLAG_debug_code) {
+    // TODO(119): Avoid push(holder_reg)/pop(holder_reg).
+    Push(holder_reg);  // Temporarily save holder on the stack.
+    // Read the first word and compare to the global_context_map.
+    lw(holder_reg, FieldMemOperand(scratch, HeapObject::kMapOffset));
+    LoadRoot(at, Heap::kGlobalContextMapRootIndex);
+    Check(eq, "JSGlobalObject::global_context should be a global context.",
+          holder_reg, Operand(at));
+    Pop(holder_reg);  // Restore holder.
+  }
+
+  // Check if both contexts are the same.
+  lw(at, FieldMemOperand(holder_reg, JSGlobalProxy::kContextOffset));
+  Branch(&same_contexts, eq, scratch, Operand(at));
+
+  // Check the context is a global context.
+  if (FLAG_debug_code) {
+    // TODO(119): Avoid push(holder_reg)/pop(holder_reg).
+    Push(holder_reg);  // Temporarily save holder on the stack.
+    mov(holder_reg, at);  // Move at to its holding place.
+    LoadRoot(at, Heap::kNullValueRootIndex);
+    Check(ne, "JSGlobalProxy::context() should not be null.",
+          holder_reg, Operand(at));
+
+    lw(holder_reg, FieldMemOperand(holder_reg, HeapObject::kMapOffset));
+    LoadRoot(at, Heap::kGlobalContextMapRootIndex);
+    Check(eq, "JSGlobalObject::global_context should be a global context.",
+          holder_reg, Operand(at));
+    // Restore at is not needed. at is reloaded below.
+    Pop(holder_reg);  // Restore holder.
+    // Restore at to holder's context.
+    lw(at, FieldMemOperand(holder_reg, JSGlobalProxy::kContextOffset));
+  }
+
+  // Check that the security token in the calling global object is
+  // compatible with the security token in the receiving global
+  // object.
+  int token_offset = Context::kHeaderSize +
+                     Context::SECURITY_TOKEN_INDEX * kPointerSize;
+
+  lw(scratch, FieldMemOperand(scratch, token_offset));
+  lw(at, FieldMemOperand(at, token_offset));
+  Branch(miss, ne, scratch, Operand(at));
+
+  bind(&same_contexts);
 }
 
 
 // ---------------------------------------------------------------------------
 // Instruction macros
 
-void MacroAssembler::Add(Register rd, Register rs, const Operand& rt) {
-  if (rt.is_reg()) {
-    add(rd, rs, rt.rm());
-  } else {
-    if (is_int16(rt.imm32_) && !MustUseAt(rt.rmode_)) {
-      addi(rd, rs, rt.imm32_);
-    } else {
-      // li handles the relocation.
-      ASSERT(!rs.is(at));
-      li(at, rt);
-      add(rd, rs, at);
-    }
-  }
-}
-
-
 void MacroAssembler::Addu(Register rd, Register rs, const Operand& rt) {
   if (rt.is_reg()) {
     addu(rd, rs, rt.rm());
   } else {
-    if (is_int16(rt.imm32_) && !MustUseAt(rt.rmode_)) {
+    if (is_int16(rt.imm32_) && !MustUseReg(rt.rmode_)) {
       addiu(rd, rs, rt.imm32_);
     } else {
       // li handles the relocation.
@@ -161,6 +364,22 @@
 }
 
 
+void MacroAssembler::Subu(Register rd, Register rs, const Operand& rt) {
+  if (rt.is_reg()) {
+    subu(rd, rs, rt.rm());
+  } else {
+    if (is_int16(rt.imm32_) && !MustUseReg(rt.rmode_)) {
+      addiu(rd, rs, -rt.imm32_);  // No subiu instr, use addiu(x, y, -imm).
+    } else {
+      // li handles the relocation.
+      ASSERT(!rs.is(at));
+      li(at, rt);
+      subu(rd, rs, at);
+    }
+  }
+}
+
+
 void MacroAssembler::Mul(Register rd, Register rs, const Operand& rt) {
   if (rt.is_reg()) {
     mul(rd, rs, rt.rm());
@@ -225,7 +444,7 @@
   if (rt.is_reg()) {
     and_(rd, rs, rt.rm());
   } else {
-    if (is_int16(rt.imm32_) && !MustUseAt(rt.rmode_)) {
+    if (is_uint16(rt.imm32_) && !MustUseReg(rt.rmode_)) {
       andi(rd, rs, rt.imm32_);
     } else {
       // li handles the relocation.
@@ -241,7 +460,7 @@
   if (rt.is_reg()) {
     or_(rd, rs, rt.rm());
   } else {
-    if (is_int16(rt.imm32_) && !MustUseAt(rt.rmode_)) {
+    if (is_uint16(rt.imm32_) && !MustUseReg(rt.rmode_)) {
       ori(rd, rs, rt.imm32_);
     } else {
       // li handles the relocation.
@@ -257,7 +476,7 @@
   if (rt.is_reg()) {
     xor_(rd, rs, rt.rm());
   } else {
-    if (is_int16(rt.imm32_) && !MustUseAt(rt.rmode_)) {
+    if (is_uint16(rt.imm32_) && !MustUseReg(rt.rmode_)) {
       xori(rd, rs, rt.imm32_);
     } else {
       // li handles the relocation.
@@ -285,7 +504,7 @@
   if (rt.is_reg()) {
     slt(rd, rs, rt.rm());
   } else {
-    if (is_int16(rt.imm32_) && !MustUseAt(rt.rmode_)) {
+    if (is_int16(rt.imm32_) && !MustUseReg(rt.rmode_)) {
       slti(rd, rs, rt.imm32_);
     } else {
       // li handles the relocation.
@@ -301,7 +520,7 @@
   if (rt.is_reg()) {
     sltu(rd, rs, rt.rm());
   } else {
-    if (is_int16(rt.imm32_) && !MustUseAt(rt.rmode_)) {
+    if (is_uint16(rt.imm32_) && !MustUseReg(rt.rmode_)) {
       sltiu(rd, rs, rt.imm32_);
     } else {
       // li handles the relocation.
@@ -313,31 +532,51 @@
 }
 
 
-//------------Pseudo-instructions-------------
-
-void MacroAssembler::movn(Register rd, Register rt) {
-  addiu(at, zero_reg, -1);  // Fill at with ones.
-  xor_(rd, rt, at);
+void MacroAssembler::Ror(Register rd, Register rs, const Operand& rt) {
+  if (mips32r2) {
+    if (rt.is_reg()) {
+      rotrv(rd, rs, rt.rm());
+    } else {
+      rotr(rd, rs, rt.imm32_);
+    }
+  } else {
+    if (rt.is_reg()) {
+      subu(at, zero_reg, rt.rm());
+      sllv(at, rs, at);
+      srlv(rd, rs, rt.rm());
+      or_(rd, rd, at);
+    } else {
+      if (rt.imm32_ == 0) {
+        srl(rd, rs, 0);
+      } else {
+        srl(at, rs, rt.imm32_);
+        sll(rd, rs, (0x20 - rt.imm32_) & 0x1f);
+        or_(rd, rd, at);
+      }
+    }
+  }
 }
 
 
+//------------Pseudo-instructions-------------
+
 void MacroAssembler::li(Register rd, Operand j, bool gen2instr) {
   ASSERT(!j.is_reg());
-
-  if (!MustUseAt(j.rmode_) && !gen2instr) {
+  BlockTrampolinePoolScope block_trampoline_pool(this);
+  if (!MustUseReg(j.rmode_) && !gen2instr) {
     // Normal load of an immediate value which does not need Relocation Info.
     if (is_int16(j.imm32_)) {
       addiu(rd, zero_reg, j.imm32_);
-    } else if (!(j.imm32_ & HIMask)) {
+    } else if (!(j.imm32_ & kHiMask)) {
       ori(rd, zero_reg, j.imm32_);
-    } else if (!(j.imm32_ & LOMask)) {
-      lui(rd, (HIMask & j.imm32_) >> 16);
+    } else if (!(j.imm32_ & kImm16Mask)) {
+      lui(rd, (j.imm32_ & kHiMask) >> kLuiShift);
     } else {
-      lui(rd, (HIMask & j.imm32_) >> 16);
-      ori(rd, rd, (LOMask & j.imm32_));
+      lui(rd, (j.imm32_ & kHiMask) >> kLuiShift);
+      ori(rd, rd, (j.imm32_ & kImm16Mask));
     }
-  } else if (MustUseAt(j.rmode_) || gen2instr) {
-    if (MustUseAt(j.rmode_)) {
+  } else if (MustUseReg(j.rmode_) || gen2instr) {
+    if (MustUseReg(j.rmode_)) {
       RecordRelocInfo(j.rmode_, j.imm32_);
     }
     // We need always the same number of instructions as we may need to patch
@@ -345,15 +584,15 @@
     if (is_int16(j.imm32_)) {
       nop();
       addiu(rd, zero_reg, j.imm32_);
-    } else if (!(j.imm32_ & HIMask)) {
+    } else if (!(j.imm32_ & kHiMask)) {
       nop();
       ori(rd, zero_reg, j.imm32_);
-    } else if (!(j.imm32_ & LOMask)) {
+    } else if (!(j.imm32_ & kImm16Mask)) {
       nop();
-      lui(rd, (HIMask & j.imm32_) >> 16);
+      lui(rd, (j.imm32_ & kHiMask) >> kLuiShift);
     } else {
-      lui(rd, (HIMask & j.imm32_) >> 16);
-      ori(rd, rd, (LOMask & j.imm32_));
+      lui(rd, (j.imm32_ & kHiMask) >> kLuiShift);
+      ori(rd, rd, (j.imm32_ & kImm16Mask));
     }
   }
 }
@@ -417,230 +656,772 @@
 }
 
 
-// Emulated condtional branches do not emit a nop in the branch delay slot.
+void MacroAssembler::Ext(Register rt,
+                         Register rs,
+                         uint16_t pos,
+                         uint16_t size) {
+  ASSERT(pos < 32);
+  ASSERT(pos + size < 32);
 
-// Trashes the at register if no scratch register is provided.
-void MacroAssembler::Branch(Condition cond, int16_t offset, Register rs,
-                            const Operand& rt, Register scratch) {
+  if (mips32r2) {
+    ext_(rt, rs, pos, size);
+  } else {
+    // Move rs to rt and shift it left then right to get the
+    // desired bitfield on the right side and zeroes on the left.
+    sll(rt, rs, 32 - (pos + size));
+    srl(rt, rt, 32 - size);
+  }
+}
+
+
+void MacroAssembler::Ins(Register rt,
+                         Register rs,
+                         uint16_t pos,
+                         uint16_t size) {
+  ASSERT(pos < 32);
+  ASSERT(pos + size < 32);
+
+  if (mips32r2) {
+    ins_(rt, rs, pos, size);
+  } else {
+    ASSERT(!rt.is(t8) && !rs.is(t8));
+
+    srl(t8, rt, pos + size);
+    // The left chunk from rt that needs to
+    // be saved is on the right side of t8.
+    sll(at, t8, pos + size);
+    // The 'at' register now contains the left chunk on
+    // the left (proper position) and zeroes.
+    sll(t8, rt, 32 - pos);
+    // t8 now contains the right chunk on the left and zeroes.
+    srl(t8, t8, 32 - pos);
+    // t8 now contains the right chunk on
+    // the right (proper position) and zeroes.
+    or_(rt, at, t8);
+    // rt now contains the left and right chunks from the original rt
+    // in their proper position and zeroes in the middle.
+    sll(t8, rs, 32 - size);
+    // t8 now contains the chunk from rs on the left and zeroes.
+    srl(t8, t8, 32 - size - pos);
+    // t8 now contains the original chunk from rs in
+    // the middle (proper position).
+    or_(rt, rt, t8);
+    // rt now contains the result of the ins instruction in R2 mode.
+  }
+}
+
+
+void MacroAssembler::Cvt_d_uw(FPURegister fd, FPURegister fs) {
+  // Move the data from fs to t4.
+  mfc1(t4, fs);
+  return Cvt_d_uw(fd, t4);
+}
+
+
+void MacroAssembler::Cvt_d_uw(FPURegister fd, Register rs) {
+  // Convert rs to a FP value in fd (and fd + 1).
+  // We do this by converting rs minus the MSB to avoid sign conversion,
+  // then adding 2^31-1 and 1 to the result.
+
+  ASSERT(!fd.is(f20));
+  ASSERT(!rs.is(t9));
+  ASSERT(!rs.is(t8));
+
+  // Save rs's MSB to t8
+  And(t8, rs, 0x80000000);
+  // Remove rs's MSB.
+  And(t9, rs, 0x7FFFFFFF);
+  // Move t9 to fd
+  mtc1(t9, fd);
+
+  // Convert fd to a real FP value.
+  cvt_d_w(fd, fd);
+
+  Label conversion_done;
+
+  // If rs's MSB was 0, it's done.
+  // Otherwise we need to add that to the FP register.
+  Branch(&conversion_done, eq, t8, Operand(zero_reg));
+
+  // First load 2^31 - 1 into f20.
+  Or(t9, zero_reg, 0x7FFFFFFF);
+  mtc1(t9, f20);
+
+  // Convert it to FP and add it to fd.
+  cvt_d_w(f20, f20);
+  add_d(fd, fd, f20);
+  // Now add 1.
+  Or(t9, zero_reg, 1);
+  mtc1(t9, f20);
+
+  cvt_d_w(f20, f20);
+  add_d(fd, fd, f20);
+  bind(&conversion_done);
+}
+
+
+void MacroAssembler::Trunc_uw_d(FPURegister fd, FPURegister fs) {
+  Trunc_uw_d(fs, t4);
+  mtc1(t4, fd);
+}
+
+
+void MacroAssembler::Trunc_uw_d(FPURegister fd, Register rs) {
+  ASSERT(!fd.is(f22));
+  ASSERT(!rs.is(t6));
+
+  // Load 2^31 into f22.
+  Or(t6, zero_reg, 0x80000000);
+  Cvt_d_uw(f22, t6);
+
+  // Test if f22 > fd.
+  c(OLT, D, fd, f22);
+
+  Label simple_convert;
+  // If fd < 2^31 we can convert it normally.
+  bc1t(&simple_convert);
+
+  // First we subtract 2^31 from fd, then trunc it to rs
+  // and add 2^31 to rs.
+
+  sub_d(f22, fd, f22);
+  trunc_w_d(f22, f22);
+  mfc1(rs, f22);
+  or_(rs, rs, t6);
+
+  Label done;
+  Branch(&done);
+  // Simple conversion.
+  bind(&simple_convert);
+  trunc_w_d(f22, fd);
+  mfc1(rs, f22);
+
+  bind(&done);
+}
+
+
+// Tries to get a signed int32 out of a double precision floating point heap
+// number. Rounds towards 0. Branch to 'not_int32' if the double is out of the
+// 32bits signed integer range.
+// This method implementation differs from the ARM version for performance
+// reasons.
+void MacroAssembler::ConvertToInt32(Register source,
+                                    Register dest,
+                                    Register scratch,
+                                    Register scratch2,
+                                    FPURegister double_scratch,
+                                    Label *not_int32) {
+  Label right_exponent, done;
+  // Get exponent word (ENDIAN issues).
+  lw(scratch, FieldMemOperand(source, HeapNumber::kExponentOffset));
+  // Get exponent alone in scratch2.
+  And(scratch2, scratch, Operand(HeapNumber::kExponentMask));
+  // Load dest with zero.  We use this either for the final shift or
+  // for the answer.
+  mov(dest, zero_reg);
+  // Check whether the exponent matches a 32 bit signed int that is not a Smi.
+  // A non-Smi integer is 1.xxx * 2^30 so the exponent is 30 (biased).  This is
+  // the exponent that we are fastest at and also the highest exponent we can
+  // handle here.
+  const uint32_t non_smi_exponent =
+      (HeapNumber::kExponentBias + 30) << HeapNumber::kExponentShift;
+  // If we have a match of the int32-but-not-Smi exponent then skip some logic.
+  Branch(&right_exponent, eq, scratch2, Operand(non_smi_exponent));
+  // If the exponent is higher than that then go to not_int32 case.  This
+  // catches numbers that don't fit in a signed int32, infinities and NaNs.
+  Branch(not_int32, gt, scratch2, Operand(non_smi_exponent));
+
+  // We know the exponent is smaller than 30 (biased).  If it is less than
+  // 0 (biased) then the number is smaller in magnitude than 1.0 * 2^0, ie
+  // it rounds to zero.
+  const uint32_t zero_exponent =
+      (HeapNumber::kExponentBias + 0) << HeapNumber::kExponentShift;
+  Subu(scratch2, scratch2, Operand(zero_exponent));
+  // Dest already has a Smi zero.
+  Branch(&done, lt, scratch2, Operand(zero_reg));
+  if (!Isolate::Current()->cpu_features()->IsSupported(FPU)) {
+    // We have a shifted exponent between 0 and 30 in scratch2.
+    srl(dest, scratch2, HeapNumber::kExponentShift);
+    // We now have the exponent in dest.  Subtract from 30 to get
+    // how much to shift down.
+    li(at, Operand(30));
+    subu(dest, at, dest);
+  }
+  bind(&right_exponent);
+  if (Isolate::Current()->cpu_features()->IsSupported(FPU)) {
+    CpuFeatures::Scope scope(FPU);
+    // MIPS FPU instructions implementing double precision to integer
+    // conversion using round to zero. Since the FP value was qualified
+    // above, the resulting integer should be a legal int32.
+    // The original 'Exponent' word is still in scratch.
+    lwc1(double_scratch, FieldMemOperand(source, HeapNumber::kMantissaOffset));
+    mtc1(scratch, FPURegister::from_code(double_scratch.code() + 1));
+    trunc_w_d(double_scratch, double_scratch);
+    mfc1(dest, double_scratch);
+  } else {
+    // On entry, dest has final downshift, scratch has original sign/exp/mant.
+    // Save sign bit in top bit of dest.
+    And(scratch2, scratch, Operand(0x80000000));
+    Or(dest, dest, Operand(scratch2));
+    // Put back the implicit 1, just above mantissa field.
+    Or(scratch, scratch, Operand(1 << HeapNumber::kExponentShift));
+
+    // Shift up the mantissa bits to take up the space the exponent used to
+    // take. We just orred in the implicit bit so that took care of one and
+    // we want to leave the sign bit 0 so we subtract 2 bits from the shift
+    // distance. But we want to clear the sign-bit so shift one more bit
+    // left, then shift right one bit.
+    const int shift_distance = HeapNumber::kNonMantissaBitsInTopWord - 2;
+    sll(scratch, scratch, shift_distance + 1);
+    srl(scratch, scratch, 1);
+
+    // Get the second half of the double. For some exponents we don't
+    // actually need this because the bits get shifted out again, but
+    // it's probably slower to test than just to do it.
+    lw(scratch2, FieldMemOperand(source, HeapNumber::kMantissaOffset));
+    // Extract the top 10 bits, and insert those bottom 10 bits of scratch.
+    // The width of the field here is the same as the shift amount above.
+    const int field_width = shift_distance;
+    Ext(scratch2, scratch2, 32-shift_distance, field_width);
+    Ins(scratch, scratch2, 0, field_width);
+    // Move down according to the exponent.
+    srlv(scratch, scratch, dest);
+    // Prepare the negative version of our integer.
+    subu(scratch2, zero_reg, scratch);
+    // Trick to check sign bit (msb) held in dest, count leading zero.
+    // 0 indicates negative, save negative version with conditional move.
+    clz(dest, dest);
+    movz(scratch, scratch2, dest);
+    mov(dest, scratch);
+  }
+  bind(&done);
+}
+
+
+// Emulated condtional branches do not emit a nop in the branch delay slot.
+//
+// BRANCH_ARGS_CHECK checks that conditional jump arguments are correct.
+#define BRANCH_ARGS_CHECK(cond, rs, rt) ASSERT(                                \
+    (cond == cc_always && rs.is(zero_reg) && rt.rm().is(zero_reg)) ||          \
+    (cond != cc_always && (!rs.is(zero_reg) || !rt.rm().is(zero_reg))))
+
+
+void MacroAssembler::Branch(int16_t offset, BranchDelaySlot bdslot) {
+  b(offset);
+
+  // Emit a nop in the branch delay slot if required.
+  if (bdslot == PROTECT)
+    nop();
+}
+
+
+void MacroAssembler::Branch(int16_t offset, Condition cond, Register rs,
+                            const Operand& rt,
+                            BranchDelaySlot bdslot) {
+  BRANCH_ARGS_CHECK(cond, rs, rt);
+  ASSERT(!rs.is(zero_reg));
   Register r2 = no_reg;
+  Register scratch = at;
+
   if (rt.is_reg()) {
     // We don't want any other register but scratch clobbered.
     ASSERT(!scratch.is(rs) && !scratch.is(rt.rm_));
     r2 = rt.rm_;
-  } else if (cond != cc_always) {
-    // We don't want any other register but scratch clobbered.
-    ASSERT(!scratch.is(rs));
-    r2 = scratch;
-    li(r2, rt);
-  }
-
-  switch (cond) {
-    case cc_always:
-      b(offset);
-      break;
-    case eq:
-      beq(rs, r2, offset);
-      break;
-    case ne:
-      bne(rs, r2, offset);
-      break;
-
+    switch (cond) {
+      case cc_always:
+        b(offset);
+        break;
+      case eq:
+        beq(rs, r2, offset);
+        break;
+      case ne:
+        bne(rs, r2, offset);
+        break;
       // Signed comparison
-    case greater:
-      slt(scratch, r2, rs);
-      bne(scratch, zero_reg, offset);
-      break;
-    case greater_equal:
-      slt(scratch, rs, r2);
-      beq(scratch, zero_reg, offset);
-      break;
-    case less:
-      slt(scratch, rs, r2);
-      bne(scratch, zero_reg, offset);
-      break;
-    case less_equal:
-      slt(scratch, r2, rs);
-      beq(scratch, zero_reg, offset);
-      break;
-
+      case greater:
+        if (r2.is(zero_reg)) {
+          bgtz(rs, offset);
+        } else {
+          slt(scratch, r2, rs);
+          bne(scratch, zero_reg, offset);
+        }
+        break;
+      case greater_equal:
+        if (r2.is(zero_reg)) {
+          bgez(rs, offset);
+        } else {
+          slt(scratch, rs, r2);
+          beq(scratch, zero_reg, offset);
+        }
+        break;
+      case less:
+        if (r2.is(zero_reg)) {
+          bltz(rs, offset);
+        } else {
+          slt(scratch, rs, r2);
+          bne(scratch, zero_reg, offset);
+        }
+        break;
+      case less_equal:
+        if (r2.is(zero_reg)) {
+          blez(rs, offset);
+        } else {
+          slt(scratch, r2, rs);
+          beq(scratch, zero_reg, offset);
+        }
+        break;
       // Unsigned comparison.
-    case Ugreater:
-      sltu(scratch, r2, rs);
-      bne(scratch, zero_reg, offset);
-      break;
-    case Ugreater_equal:
-      sltu(scratch, rs, r2);
-      beq(scratch, zero_reg, offset);
-      break;
-    case Uless:
-      sltu(scratch, rs, r2);
-      bne(scratch, zero_reg, offset);
-      break;
-    case Uless_equal:
-      sltu(scratch, r2, rs);
-      beq(scratch, zero_reg, offset);
-      break;
-
-    default:
-      UNREACHABLE();
+      case Ugreater:
+        if (r2.is(zero_reg)) {
+          bgtz(rs, offset);
+        } else {
+          sltu(scratch, r2, rs);
+          bne(scratch, zero_reg, offset);
+        }
+        break;
+      case Ugreater_equal:
+        if (r2.is(zero_reg)) {
+          bgez(rs, offset);
+        } else {
+          sltu(scratch, rs, r2);
+          beq(scratch, zero_reg, offset);
+        }
+        break;
+      case Uless:
+        if (r2.is(zero_reg)) {
+          b(offset);
+        } else {
+          sltu(scratch, rs, r2);
+          bne(scratch, zero_reg, offset);
+        }
+        break;
+      case Uless_equal:
+        if (r2.is(zero_reg)) {
+          b(offset);
+        } else {
+          sltu(scratch, r2, rs);
+          beq(scratch, zero_reg, offset);
+        }
+        break;
+      default:
+        UNREACHABLE();
+    }
+  } else {
+    // Be careful to always use shifted_branch_offset only just before the
+    // branch instruction, as the location will be remember for patching the
+    // target.
+    switch (cond) {
+      case cc_always:
+        b(offset);
+        break;
+      case eq:
+        // We don't want any other register but scratch clobbered.
+        ASSERT(!scratch.is(rs));
+        r2 = scratch;
+        li(r2, rt);
+        beq(rs, r2, offset);
+        break;
+      case ne:
+        // We don't want any other register but scratch clobbered.
+        ASSERT(!scratch.is(rs));
+        r2 = scratch;
+        li(r2, rt);
+        bne(rs, r2, offset);
+        break;
+      // Signed comparison
+      case greater:
+        if (rt.imm32_ == 0) {
+          bgtz(rs, offset);
+        } else {
+          r2 = scratch;
+          li(r2, rt);
+          slt(scratch, r2, rs);
+          bne(scratch, zero_reg, offset);
+        }
+        break;
+      case greater_equal:
+        if (rt.imm32_ == 0) {
+          bgez(rs, offset);
+        } else if (is_int16(rt.imm32_)) {
+          slti(scratch, rs, rt.imm32_);
+          beq(scratch, zero_reg, offset);
+        } else {
+          r2 = scratch;
+          li(r2, rt);
+          sltu(scratch, rs, r2);
+          beq(scratch, zero_reg, offset);
+        }
+        break;
+      case less:
+        if (rt.imm32_ == 0) {
+          bltz(rs, offset);
+        } else if (is_int16(rt.imm32_)) {
+          slti(scratch, rs, rt.imm32_);
+          bne(scratch, zero_reg, offset);
+        } else {
+          r2 = scratch;
+          li(r2, rt);
+          slt(scratch, rs, r2);
+          bne(scratch, zero_reg, offset);
+        }
+        break;
+      case less_equal:
+        if (rt.imm32_ == 0) {
+          blez(rs, offset);
+        } else {
+          r2 = scratch;
+          li(r2, rt);
+          slt(scratch, r2, rs);
+          beq(scratch, zero_reg, offset);
+       }
+       break;
+      // Unsigned comparison.
+      case Ugreater:
+        if (rt.imm32_ == 0) {
+          bgtz(rs, offset);
+        } else {
+          r2 = scratch;
+          li(r2, rt);
+          sltu(scratch, r2, rs);
+          bne(scratch, zero_reg, offset);
+        }
+        break;
+      case Ugreater_equal:
+        if (rt.imm32_ == 0) {
+          bgez(rs, offset);
+        } else if (is_int16(rt.imm32_)) {
+          sltiu(scratch, rs, rt.imm32_);
+          beq(scratch, zero_reg, offset);
+        } else {
+          r2 = scratch;
+          li(r2, rt);
+          sltu(scratch, rs, r2);
+          beq(scratch, zero_reg, offset);
+        }
+        break;
+      case Uless:
+        if (rt.imm32_ == 0) {
+          b(offset);
+        } else if (is_int16(rt.imm32_)) {
+          sltiu(scratch, rs, rt.imm32_);
+          bne(scratch, zero_reg, offset);
+        } else {
+          r2 = scratch;
+          li(r2, rt);
+          sltu(scratch, rs, r2);
+          bne(scratch, zero_reg, offset);
+        }
+        break;
+      case Uless_equal:
+        if (rt.imm32_ == 0) {
+          b(offset);
+        } else {
+          r2 = scratch;
+          li(r2, rt);
+          sltu(scratch, r2, rs);
+          beq(scratch, zero_reg, offset);
+        }
+        break;
+      default:
+        UNREACHABLE();
+    }
   }
-  // Emit a nop in the branch delay slot.
-  nop();
+  // Emit a nop in the branch delay slot if required.
+  if (bdslot == PROTECT)
+    nop();
 }
 
 
-void MacroAssembler::Branch(Condition cond,  Label* L, Register rs,
-                            const Operand& rt, Register scratch) {
-  Register r2 = no_reg;
-  if (rt.is_reg()) {
-    r2 = rt.rm_;
-  } else if (cond != cc_always) {
-    r2 = scratch;
-    li(r2, rt);
-  }
-
+void MacroAssembler::Branch(Label* L, BranchDelaySlot bdslot) {
   // We use branch_offset as an argument for the branch instructions to be sure
   // it is called just before generating the branch instruction, as needed.
 
-  switch (cond) {
-    case cc_always:
-      b(shifted_branch_offset(L, false));
-      break;
-    case eq:
-      beq(rs, r2, shifted_branch_offset(L, false));
-      break;
-    case ne:
-      bne(rs, r2, shifted_branch_offset(L, false));
-      break;
+  b(shifted_branch_offset(L, false));
 
-    // Signed comparison
-    case greater:
-      slt(scratch, r2, rs);
-      bne(scratch, zero_reg, shifted_branch_offset(L, false));
-      break;
-    case greater_equal:
-      slt(scratch, rs, r2);
-      beq(scratch, zero_reg, shifted_branch_offset(L, false));
-      break;
-    case less:
-      slt(scratch, rs, r2);
-      bne(scratch, zero_reg, shifted_branch_offset(L, false));
-      break;
-    case less_equal:
-      slt(scratch, r2, rs);
-      beq(scratch, zero_reg, shifted_branch_offset(L, false));
-      break;
-
-    // Unsigned comparison.
-    case Ugreater:
-      sltu(scratch, r2, rs);
-      bne(scratch, zero_reg, shifted_branch_offset(L, false));
-      break;
-    case Ugreater_equal:
-      sltu(scratch, rs, r2);
-      beq(scratch, zero_reg, shifted_branch_offset(L, false));
-      break;
-    case Uless:
-      sltu(scratch, rs, r2);
-      bne(scratch, zero_reg, shifted_branch_offset(L, false));
-      break;
-    case Uless_equal:
-      sltu(scratch, r2, rs);
-      beq(scratch, zero_reg, shifted_branch_offset(L, false));
-      break;
-
-    default:
-      UNREACHABLE();
-  }
-  // Emit a nop in the branch delay slot.
-  nop();
+  // Emit a nop in the branch delay slot if required.
+  if (bdslot == PROTECT)
+    nop();
 }
 
 
-// Trashes the at register if no scratch register is provided.
+void MacroAssembler::Branch(Label* L, Condition cond, Register rs,
+                            const Operand& rt,
+                            BranchDelaySlot bdslot) {
+  BRANCH_ARGS_CHECK(cond, rs, rt);
+
+  int32_t offset;
+  Register r2 = no_reg;
+  Register scratch = at;
+  if (rt.is_reg()) {
+    r2 = rt.rm_;
+    // Be careful to always use shifted_branch_offset only just before the
+    // branch instruction, as the location will be remember for patching the
+    // target.
+    switch (cond) {
+      case cc_always:
+        offset = shifted_branch_offset(L, false);
+        b(offset);
+        break;
+      case eq:
+        offset = shifted_branch_offset(L, false);
+        beq(rs, r2, offset);
+        break;
+      case ne:
+        offset = shifted_branch_offset(L, false);
+        bne(rs, r2, offset);
+        break;
+      // Signed comparison
+      case greater:
+        if (r2.is(zero_reg)) {
+          offset = shifted_branch_offset(L, false);
+          bgtz(rs, offset);
+        } else {
+          slt(scratch, r2, rs);
+          offset = shifted_branch_offset(L, false);
+          bne(scratch, zero_reg, offset);
+        }
+        break;
+      case greater_equal:
+        if (r2.is(zero_reg)) {
+          offset = shifted_branch_offset(L, false);
+          bgez(rs, offset);
+        } else {
+          slt(scratch, rs, r2);
+          offset = shifted_branch_offset(L, false);
+          beq(scratch, zero_reg, offset);
+        }
+        break;
+      case less:
+        if (r2.is(zero_reg)) {
+          offset = shifted_branch_offset(L, false);
+          bltz(rs, offset);
+        } else {
+          slt(scratch, rs, r2);
+          offset = shifted_branch_offset(L, false);
+          bne(scratch, zero_reg, offset);
+        }
+        break;
+      case less_equal:
+        if (r2.is(zero_reg)) {
+          offset = shifted_branch_offset(L, false);
+          blez(rs, offset);
+        } else {
+          slt(scratch, r2, rs);
+          offset = shifted_branch_offset(L, false);
+          beq(scratch, zero_reg, offset);
+        }
+        break;
+      // Unsigned comparison.
+      case Ugreater:
+        if (r2.is(zero_reg)) {
+          offset = shifted_branch_offset(L, false);
+           bgtz(rs, offset);
+        } else {
+          sltu(scratch, r2, rs);
+          offset = shifted_branch_offset(L, false);
+          bne(scratch, zero_reg, offset);
+        }
+        break;
+      case Ugreater_equal:
+        if (r2.is(zero_reg)) {
+          offset = shifted_branch_offset(L, false);
+          bgez(rs, offset);
+        } else {
+          sltu(scratch, rs, r2);
+          offset = shifted_branch_offset(L, false);
+          beq(scratch, zero_reg, offset);
+        }
+        break;
+      case Uless:
+        if (r2.is(zero_reg)) {
+          offset = shifted_branch_offset(L, false);
+          b(offset);
+        } else {
+          sltu(scratch, rs, r2);
+          offset = shifted_branch_offset(L, false);
+          bne(scratch, zero_reg, offset);
+        }
+        break;
+      case Uless_equal:
+        if (r2.is(zero_reg)) {
+          offset = shifted_branch_offset(L, false);
+          b(offset);
+        } else {
+          sltu(scratch, r2, rs);
+          offset = shifted_branch_offset(L, false);
+          beq(scratch, zero_reg, offset);
+        }
+        break;
+      default:
+        UNREACHABLE();
+    }
+  } else {
+    // Be careful to always use shifted_branch_offset only just before the
+    // branch instruction, as the location will be remember for patching the
+    // target.
+    switch (cond) {
+      case cc_always:
+        offset = shifted_branch_offset(L, false);
+        b(offset);
+        break;
+      case eq:
+        r2 = scratch;
+        li(r2, rt);
+        offset = shifted_branch_offset(L, false);
+        beq(rs, r2, offset);
+        break;
+      case ne:
+        r2 = scratch;
+        li(r2, rt);
+        offset = shifted_branch_offset(L, false);
+        bne(rs, r2, offset);
+        break;
+      // Signed comparison
+      case greater:
+        if (rt.imm32_ == 0) {
+          offset = shifted_branch_offset(L, false);
+          bgtz(rs, offset);
+        } else {
+          r2 = scratch;
+          li(r2, rt);
+          slt(scratch, r2, rs);
+          offset = shifted_branch_offset(L, false);
+          bne(scratch, zero_reg, offset);
+        }
+        break;
+      case greater_equal:
+        if (rt.imm32_ == 0) {
+          offset = shifted_branch_offset(L, false);
+          bgez(rs, offset);
+        } else if (is_int16(rt.imm32_)) {
+          slti(scratch, rs, rt.imm32_);
+          offset = shifted_branch_offset(L, false);
+          beq(scratch, zero_reg, offset);
+        } else {
+          r2 = scratch;
+          li(r2, rt);
+          sltu(scratch, rs, r2);
+          offset = shifted_branch_offset(L, false);
+          beq(scratch, zero_reg, offset);
+        }
+        break;
+      case less:
+        if (rt.imm32_ == 0) {
+          offset = shifted_branch_offset(L, false);
+          bltz(rs, offset);
+        } else if (is_int16(rt.imm32_)) {
+          slti(scratch, rs, rt.imm32_);
+          offset = shifted_branch_offset(L, false);
+          bne(scratch, zero_reg, offset);
+        } else {
+          r2 = scratch;
+          li(r2, rt);
+          slt(scratch, rs, r2);
+          offset = shifted_branch_offset(L, false);
+          bne(scratch, zero_reg, offset);
+        }
+        break;
+      case less_equal:
+        if (rt.imm32_ == 0) {
+          offset = shifted_branch_offset(L, false);
+          blez(rs, offset);
+        } else {
+          r2 = scratch;
+          li(r2, rt);
+          slt(scratch, r2, rs);
+          offset = shifted_branch_offset(L, false);
+          beq(scratch, zero_reg, offset);
+        }
+        break;
+      // Unsigned comparison.
+      case Ugreater:
+        if (rt.imm32_ == 0) {
+          offset = shifted_branch_offset(L, false);
+          bgtz(rs, offset);
+        } else {
+          r2 = scratch;
+          li(r2, rt);
+          sltu(scratch, r2, rs);
+          offset = shifted_branch_offset(L, false);
+          bne(scratch, zero_reg, offset);
+        }
+        break;
+      case Ugreater_equal:
+        if (rt.imm32_ == 0) {
+          offset = shifted_branch_offset(L, false);
+          bgez(rs, offset);
+        } else if (is_int16(rt.imm32_)) {
+          sltiu(scratch, rs, rt.imm32_);
+          offset = shifted_branch_offset(L, false);
+          beq(scratch, zero_reg, offset);
+        } else {
+          r2 = scratch;
+          li(r2, rt);
+          sltu(scratch, rs, r2);
+          offset = shifted_branch_offset(L, false);
+          beq(scratch, zero_reg, offset);
+        }
+        break;
+     case Uless:
+        if (rt.imm32_ == 0) {
+          offset = shifted_branch_offset(L, false);
+          b(offset);
+        } else if (is_int16(rt.imm32_)) {
+          sltiu(scratch, rs, rt.imm32_);
+          offset = shifted_branch_offset(L, false);
+          bne(scratch, zero_reg, offset);
+        } else {
+          r2 = scratch;
+          li(r2, rt);
+          sltu(scratch, rs, r2);
+          offset = shifted_branch_offset(L, false);
+          bne(scratch, zero_reg, offset);
+        }
+        break;
+      case Uless_equal:
+        if (rt.imm32_ == 0) {
+          offset = shifted_branch_offset(L, false);
+          b(offset);
+        } else {
+          r2 = scratch;
+          li(r2, rt);
+          sltu(scratch, r2, rs);
+          offset = shifted_branch_offset(L, false);
+          beq(scratch, zero_reg, offset);
+        }
+        break;
+      default:
+        UNREACHABLE();
+    }
+  }
+  // Check that offset could actually hold on an int16_t.
+  ASSERT(is_int16(offset));
+  // Emit a nop in the branch delay slot if required.
+  if (bdslot == PROTECT)
+    nop();
+}
+
+
 // We need to use a bgezal or bltzal, but they can't be used directly with the
 // slt instructions. We could use sub or add instead but we would miss overflow
 // cases, so we keep slt and add an intermediate third instruction.
-void MacroAssembler::BranchAndLink(Condition cond, int16_t offset, Register rs,
-                                   const Operand& rt, Register scratch) {
-  Register r2 = no_reg;
-  if (rt.is_reg()) {
-    r2 = rt.rm_;
-  } else if (cond != cc_always) {
-    r2 = scratch;
-    li(r2, rt);
-  }
+void MacroAssembler::BranchAndLink(int16_t offset,
+                                   BranchDelaySlot bdslot) {
+  bal(offset);
 
-  switch (cond) {
-    case cc_always:
-      bal(offset);
-      break;
-    case eq:
-      bne(rs, r2, 2);
-      nop();
-      bal(offset);
-      break;
-    case ne:
-      beq(rs, r2, 2);
-      nop();
-      bal(offset);
-      break;
-
-    // Signed comparison
-    case greater:
-      slt(scratch, r2, rs);
-      addiu(scratch, scratch, -1);
-      bgezal(scratch, offset);
-      break;
-    case greater_equal:
-      slt(scratch, rs, r2);
-      addiu(scratch, scratch, -1);
-      bltzal(scratch, offset);
-      break;
-    case less:
-      slt(scratch, rs, r2);
-      addiu(scratch, scratch, -1);
-      bgezal(scratch, offset);
-      break;
-    case less_equal:
-      slt(scratch, r2, rs);
-      addiu(scratch, scratch, -1);
-      bltzal(scratch, offset);
-      break;
-
-    // Unsigned comparison.
-    case Ugreater:
-      sltu(scratch, r2, rs);
-      addiu(scratch, scratch, -1);
-      bgezal(scratch, offset);
-      break;
-    case Ugreater_equal:
-      sltu(scratch, rs, r2);
-      addiu(scratch, scratch, -1);
-      bltzal(scratch, offset);
-      break;
-    case Uless:
-      sltu(scratch, rs, r2);
-      addiu(scratch, scratch, -1);
-      bgezal(scratch, offset);
-      break;
-    case Uless_equal:
-      sltu(scratch, r2, rs);
-      addiu(scratch, scratch, -1);
-      bltzal(scratch, offset);
-      break;
-
-    default:
-      UNREACHABLE();
-  }
-  // Emit a nop in the branch delay slot.
-  nop();
+  // Emit a nop in the branch delay slot if required.
+  if (bdslot == PROTECT)
+    nop();
 }
 
 
-void MacroAssembler::BranchAndLink(Condition cond, Label* L, Register rs,
-                                   const Operand& rt, Register scratch) {
+void MacroAssembler::BranchAndLink(int16_t offset, Condition cond, Register rs,
+                                   const Operand& rt,
+                                   BranchDelaySlot bdslot) {
+  BRANCH_ARGS_CHECK(cond, rs, rt);
   Register r2 = no_reg;
+  Register scratch = at;
+
   if (rt.is_reg()) {
     r2 = rt.rm_;
   } else if (cond != cc_always) {
@@ -650,157 +1431,372 @@
 
   switch (cond) {
     case cc_always:
-      bal(shifted_branch_offset(L, false));
+      bal(offset);
       break;
     case eq:
       bne(rs, r2, 2);
       nop();
-      bal(shifted_branch_offset(L, false));
+      bal(offset);
       break;
     case ne:
       beq(rs, r2, 2);
       nop();
-      bal(shifted_branch_offset(L, false));
+      bal(offset);
       break;
 
     // Signed comparison
     case greater:
       slt(scratch, r2, rs);
       addiu(scratch, scratch, -1);
-      bgezal(scratch, shifted_branch_offset(L, false));
+      bgezal(scratch, offset);
       break;
     case greater_equal:
       slt(scratch, rs, r2);
       addiu(scratch, scratch, -1);
-      bltzal(scratch, shifted_branch_offset(L, false));
+      bltzal(scratch, offset);
       break;
     case less:
       slt(scratch, rs, r2);
       addiu(scratch, scratch, -1);
-      bgezal(scratch, shifted_branch_offset(L, false));
+      bgezal(scratch, offset);
       break;
     case less_equal:
       slt(scratch, r2, rs);
       addiu(scratch, scratch, -1);
-      bltzal(scratch, shifted_branch_offset(L, false));
+      bltzal(scratch, offset);
       break;
 
     // Unsigned comparison.
     case Ugreater:
       sltu(scratch, r2, rs);
       addiu(scratch, scratch, -1);
-      bgezal(scratch, shifted_branch_offset(L, false));
+      bgezal(scratch, offset);
       break;
     case Ugreater_equal:
       sltu(scratch, rs, r2);
       addiu(scratch, scratch, -1);
-      bltzal(scratch, shifted_branch_offset(L, false));
+      bltzal(scratch, offset);
       break;
     case Uless:
       sltu(scratch, rs, r2);
       addiu(scratch, scratch, -1);
-      bgezal(scratch, shifted_branch_offset(L, false));
+      bgezal(scratch, offset);
       break;
     case Uless_equal:
       sltu(scratch, r2, rs);
       addiu(scratch, scratch, -1);
-      bltzal(scratch, shifted_branch_offset(L, false));
+      bltzal(scratch, offset);
       break;
 
     default:
       UNREACHABLE();
   }
-  // Emit a nop in the branch delay slot.
-  nop();
+  // Emit a nop in the branch delay slot if required.
+  if (bdslot == PROTECT)
+    nop();
+}
+
+
+void MacroAssembler::BranchAndLink(Label* L, BranchDelaySlot bdslot) {
+  bal(shifted_branch_offset(L, false));
+
+  // Emit a nop in the branch delay slot if required.
+  if (bdslot == PROTECT)
+    nop();
+}
+
+
+void MacroAssembler::BranchAndLink(Label* L, Condition cond, Register rs,
+                                   const Operand& rt,
+                                   BranchDelaySlot bdslot) {
+  BRANCH_ARGS_CHECK(cond, rs, rt);
+
+  int32_t offset;
+  Register r2 = no_reg;
+  Register scratch = at;
+  if (rt.is_reg()) {
+    r2 = rt.rm_;
+  } else if (cond != cc_always) {
+    r2 = scratch;
+    li(r2, rt);
+  }
+
+  switch (cond) {
+    case cc_always:
+      offset = shifted_branch_offset(L, false);
+      bal(offset);
+      break;
+    case eq:
+      bne(rs, r2, 2);
+      nop();
+      offset = shifted_branch_offset(L, false);
+      bal(offset);
+      break;
+    case ne:
+      beq(rs, r2, 2);
+      nop();
+      offset = shifted_branch_offset(L, false);
+      bal(offset);
+      break;
+
+    // Signed comparison
+    case greater:
+      slt(scratch, r2, rs);
+      addiu(scratch, scratch, -1);
+      offset = shifted_branch_offset(L, false);
+      bgezal(scratch, offset);
+      break;
+    case greater_equal:
+      slt(scratch, rs, r2);
+      addiu(scratch, scratch, -1);
+      offset = shifted_branch_offset(L, false);
+      bltzal(scratch, offset);
+      break;
+    case less:
+      slt(scratch, rs, r2);
+      addiu(scratch, scratch, -1);
+      offset = shifted_branch_offset(L, false);
+      bgezal(scratch, offset);
+      break;
+    case less_equal:
+      slt(scratch, r2, rs);
+      addiu(scratch, scratch, -1);
+      offset = shifted_branch_offset(L, false);
+      bltzal(scratch, offset);
+      break;
+
+    // Unsigned comparison.
+    case Ugreater:
+      sltu(scratch, r2, rs);
+      addiu(scratch, scratch, -1);
+      offset = shifted_branch_offset(L, false);
+      bgezal(scratch, offset);
+      break;
+    case Ugreater_equal:
+      sltu(scratch, rs, r2);
+      addiu(scratch, scratch, -1);
+      offset = shifted_branch_offset(L, false);
+      bltzal(scratch, offset);
+      break;
+    case Uless:
+      sltu(scratch, rs, r2);
+      addiu(scratch, scratch, -1);
+      offset = shifted_branch_offset(L, false);
+      bgezal(scratch, offset);
+      break;
+    case Uless_equal:
+      sltu(scratch, r2, rs);
+      addiu(scratch, scratch, -1);
+      offset = shifted_branch_offset(L, false);
+      bltzal(scratch, offset);
+      break;
+
+    default:
+      UNREACHABLE();
+  }
+
+  // Check that offset could actually hold on an int16_t.
+  ASSERT(is_int16(offset));
+
+  // Emit a nop in the branch delay slot if required.
+  if (bdslot == PROTECT)
+    nop();
+}
+
+
+void MacroAssembler::Jump(const Operand& target, BranchDelaySlot bdslot) {
+  BlockTrampolinePoolScope block_trampoline_pool(this);
+  if (target.is_reg()) {
+      jr(target.rm());
+  } else {
+    if (!MustUseReg(target.rmode_)) {
+        j(target.imm32_);
+    } else {
+      li(t9, target);
+      jr(t9);
+    }
+  }
+  // Emit a nop in the branch delay slot if required.
+  if (bdslot == PROTECT)
+    nop();
 }
 
 
 void MacroAssembler::Jump(const Operand& target,
-                          Condition cond, Register rs, const Operand& rt) {
+                          Condition cond, Register rs, const Operand& rt,
+                          BranchDelaySlot bdslot) {
+  BlockTrampolinePoolScope block_trampoline_pool(this);
+  BRANCH_ARGS_CHECK(cond, rs, rt);
   if (target.is_reg()) {
     if (cond == cc_always) {
       jr(target.rm());
     } else {
-      Branch(NegateCondition(cond), 2, rs, rt);
+      Branch(2, NegateCondition(cond), rs, rt);
       jr(target.rm());
     }
-  } else {    // !target.is_reg()
-    if (!MustUseAt(target.rmode_)) {
+  } else {  // Not register target.
+    if (!MustUseReg(target.rmode_)) {
       if (cond == cc_always) {
         j(target.imm32_);
       } else {
-        Branch(NegateCondition(cond), 2, rs, rt);
+        Branch(2, NegateCondition(cond), rs, rt);
         j(target.imm32_);  // Will generate only one instruction.
       }
-    } else {  // MustUseAt(target)
-      li(at, target);
+    } else {  // MustUseReg(target)
+      li(t9, target);
       if (cond == cc_always) {
-        jr(at);
+        jr(t9);
       } else {
-        Branch(NegateCondition(cond), 2, rs, rt);
-        jr(at);  // Will generate only one instruction.
+        Branch(2, NegateCondition(cond), rs, rt);
+        jr(t9);  // Will generate only one instruction.
       }
     }
   }
-  // Emit a nop in the branch delay slot.
-  nop();
+  // Emit a nop in the branch delay slot if required.
+  if (bdslot == PROTECT)
+    nop();
 }
 
 
+// Note: To call gcc-compiled C code on mips, you must call thru t9.
+void MacroAssembler::Call(const Operand& target, BranchDelaySlot bdslot) {
+  BlockTrampolinePoolScope block_trampoline_pool(this);
+  if (target.is_reg()) {
+      jalr(target.rm());
+  } else {    // !target.is_reg()
+    if (!MustUseReg(target.rmode_)) {
+      jal(target.imm32_);
+    } else {  // MustUseReg(target)
+      li(t9, target);
+      jalr(t9);
+    }
+  }
+  // Emit a nop in the branch delay slot if required.
+  if (bdslot == PROTECT)
+    nop();
+}
+
+
+// Note: To call gcc-compiled C code on mips, you must call thru t9.
 void MacroAssembler::Call(const Operand& target,
-                          Condition cond, Register rs, const Operand& rt) {
+                          Condition cond, Register rs, const Operand& rt,
+                          BranchDelaySlot bdslot) {
+  BlockTrampolinePoolScope block_trampoline_pool(this);
+  BRANCH_ARGS_CHECK(cond, rs, rt);
   if (target.is_reg()) {
     if (cond == cc_always) {
       jalr(target.rm());
     } else {
-      Branch(NegateCondition(cond), 2, rs, rt);
+      Branch(2, NegateCondition(cond), rs, rt);
       jalr(target.rm());
     }
   } else {    // !target.is_reg()
-    if (!MustUseAt(target.rmode_)) {
+    if (!MustUseReg(target.rmode_)) {
       if (cond == cc_always) {
         jal(target.imm32_);
       } else {
-        Branch(NegateCondition(cond), 2, rs, rt);
+        Branch(2, NegateCondition(cond), rs, rt);
         jal(target.imm32_);  // Will generate only one instruction.
       }
-    } else {  // MustUseAt(target)
-      li(at, target);
+    } else {  // MustUseReg(target)
+      li(t9, target);
       if (cond == cc_always) {
-        jalr(at);
+        jalr(t9);
       } else {
-        Branch(NegateCondition(cond), 2, rs, rt);
-        jalr(at);  // Will generate only one instruction.
+        Branch(2, NegateCondition(cond), rs, rt);
+        jalr(t9);  // Will generate only one instruction.
       }
     }
   }
-  // Emit a nop in the branch delay slot.
-  nop();
-}
-
-void MacroAssembler::StackLimitCheck(Label* on_stack_overflow) {
-  UNIMPLEMENTED_MIPS();
+  // Emit a nop in the branch delay slot if required.
+  if (bdslot == PROTECT)
+    nop();
 }
 
 
-void MacroAssembler::Drop(int count, Condition cond) {
-  UNIMPLEMENTED_MIPS();
+void MacroAssembler::Drop(int count,
+                          Condition cond,
+                          Register reg,
+                          const Operand& op) {
+  if (count <= 0) {
+    return;
+  }
+
+  Label skip;
+
+  if (cond != al) {
+    Branch(&skip, NegateCondition(cond), reg, op);
+  }
+
+  if (count > 0) {
+    addiu(sp, sp, count * kPointerSize);
+  }
+
+  if (cond != al) {
+    bind(&skip);
+  }
+}
+
+
+void MacroAssembler::DropAndRet(int drop,
+                                Condition cond,
+                                Register r1,
+                                const Operand& r2) {
+  // This is a workaround to make sure only one branch instruction is
+  // generated. It relies on Drop and Ret not creating branches if
+  // cond == cc_always.
+  Label skip;
+  if (cond != cc_always) {
+    Branch(&skip, NegateCondition(cond), r1, r2);
+  }
+
+  Drop(drop);
+  Ret();
+
+  if (cond != cc_always) {
+    bind(&skip);
+  }
+}
+
+
+void MacroAssembler::Swap(Register reg1,
+                          Register reg2,
+                          Register scratch) {
+  if (scratch.is(no_reg)) {
+    Xor(reg1, reg1, Operand(reg2));
+    Xor(reg2, reg2, Operand(reg1));
+    Xor(reg1, reg1, Operand(reg2));
+  } else {
+    mov(scratch, reg1);
+    mov(reg1, reg2);
+    mov(reg2, scratch);
+  }
 }
 
 
 void MacroAssembler::Call(Label* target) {
-  UNIMPLEMENTED_MIPS();
+  BranchAndLink(target);
+}
+
+
+void MacroAssembler::Move(Register dst, Register src) {
+  if (!dst.is(src)) {
+    mov(dst, src);
+  }
 }
 
 
 #ifdef ENABLE_DEBUGGER_SUPPORT
-  // ---------------------------------------------------------------------------
-  // Debugger Support
 
-  void MacroAssembler::DebugBreak() {
-    UNIMPLEMENTED_MIPS();
-  }
-#endif
+void MacroAssembler::DebugBreak() {
+  ASSERT(allow_stub_calls());
+  mov(a0, zero_reg);
+  li(a1, Operand(ExternalReference(Runtime::kDebugBreak, isolate())));
+  CEntryStub ces(1);
+  Call(ces.GetCode(), RelocInfo::DEBUG_BREAK);
+}
+
+#endif  // ENABLE_DEBUGGER_SUPPORT
 
 
 // ---------------------------------------------------------------------------
@@ -822,7 +1818,7 @@
            && StackHandlerConstants::kPCOffset == 3 * kPointerSize
            && StackHandlerConstants::kNextOffset == 0 * kPointerSize);
     // Save the current handler as the next handler.
-    LoadExternalReference(t2, ExternalReference(Top::k_handler_address));
+    li(t2, Operand(ExternalReference(Isolate::k_handler_address, isolate())));
     lw(t1, MemOperand(t2));
 
     addiu(sp, sp, -StackHandlerConstants::kSize);
@@ -848,7 +1844,7 @@
     li(t0, Operand(StackHandler::ENTRY));
 
     // Save the current handler as the next handler.
-    LoadExternalReference(t2, ExternalReference(Top::k_handler_address));
+    li(t2, Operand(ExternalReference(Isolate::k_handler_address, isolate())));
     lw(t1, MemOperand(t2));
 
     addiu(sp, sp, -StackHandlerConstants::kSize);
@@ -864,45 +1860,377 @@
 
 
 void MacroAssembler::PopTryHandler() {
-  UNIMPLEMENTED_MIPS();
+  ASSERT_EQ(0, StackHandlerConstants::kNextOffset);
+  pop(a1);
+  Addu(sp, sp, Operand(StackHandlerConstants::kSize - kPointerSize));
+  li(at, Operand(ExternalReference(Isolate::k_handler_address, isolate())));
+  sw(a1, MemOperand(at));
 }
 
 
-
-// -----------------------------------------------------------------------------
-// Activation frames
-
-void MacroAssembler::SetupAlignedCall(Register scratch, int arg_count) {
-  Label extra_push, end;
-
-  andi(scratch, sp, 7);
-
-  // We check for args and receiver size on the stack, all of them word sized.
-  // We add one for sp, that we also want to store on the stack.
-  if (((arg_count + 1) % kPointerSizeLog2) == 0) {
-    Branch(ne, &extra_push, at, Operand(zero_reg));
-  } else {  // ((arg_count + 1) % 2) == 1
-    Branch(eq, &extra_push, at, Operand(zero_reg));
+void MacroAssembler::AllocateInNewSpace(int object_size,
+                                        Register result,
+                                        Register scratch1,
+                                        Register scratch2,
+                                        Label* gc_required,
+                                        AllocationFlags flags) {
+  if (!FLAG_inline_new) {
+    if (FLAG_debug_code) {
+      // Trash the registers to simulate an allocation failure.
+      li(result, 0x7091);
+      li(scratch1, 0x7191);
+      li(scratch2, 0x7291);
+    }
+    jmp(gc_required);
+    return;
   }
 
-  // Save sp on the stack.
-  mov(scratch, sp);
-  Push(scratch);
-  b(&end);
+  ASSERT(!result.is(scratch1));
+  ASSERT(!result.is(scratch2));
+  ASSERT(!scratch1.is(scratch2));
+  ASSERT(!scratch1.is(t9));
+  ASSERT(!scratch2.is(t9));
+  ASSERT(!result.is(t9));
 
-  // Align before saving sp on the stack.
-  bind(&extra_push);
-  mov(scratch, sp);
-  addiu(sp, sp, -8);
-  sw(scratch, MemOperand(sp));
+  // Make object size into bytes.
+  if ((flags & SIZE_IN_WORDS) != 0) {
+    object_size *= kPointerSize;
+  }
+  ASSERT_EQ(0, object_size & kObjectAlignmentMask);
 
-  // The stack is aligned and sp is stored on the top.
-  bind(&end);
+  // Check relative positions of allocation top and limit addresses.
+  // ARM adds additional checks to make sure the ldm instruction can be
+  // used. On MIPS we don't have ldm so we don't need additional checks either.
+  ExternalReference new_space_allocation_top =
+      ExternalReference::new_space_allocation_top_address(isolate());
+  ExternalReference new_space_allocation_limit =
+      ExternalReference::new_space_allocation_limit_address(isolate());
+  intptr_t top   =
+      reinterpret_cast<intptr_t>(new_space_allocation_top.address());
+  intptr_t limit =
+      reinterpret_cast<intptr_t>(new_space_allocation_limit.address());
+  ASSERT((limit - top) == kPointerSize);
+
+  // Set up allocation top address and object size registers.
+  Register topaddr = scratch1;
+  Register obj_size_reg = scratch2;
+  li(topaddr, Operand(new_space_allocation_top));
+  li(obj_size_reg, Operand(object_size));
+
+  // This code stores a temporary value in t9.
+  if ((flags & RESULT_CONTAINS_TOP) == 0) {
+    // Load allocation top into result and allocation limit into t9.
+    lw(result, MemOperand(topaddr));
+    lw(t9, MemOperand(topaddr, kPointerSize));
+  } else {
+    if (FLAG_debug_code) {
+      // Assert that result actually contains top on entry. t9 is used
+      // immediately below so this use of t9 does not cause difference with
+      // respect to register content between debug and release mode.
+      lw(t9, MemOperand(topaddr));
+      Check(eq, "Unexpected allocation top", result, Operand(t9));
+    }
+    // Load allocation limit into t9. Result already contains allocation top.
+    lw(t9, MemOperand(topaddr, limit - top));
+  }
+
+  // Calculate new top and bail out if new space is exhausted. Use result
+  // to calculate the new top.
+  Addu(scratch2, result, Operand(obj_size_reg));
+  Branch(gc_required, Ugreater, scratch2, Operand(t9));
+  sw(scratch2, MemOperand(topaddr));
+
+  // Tag object if requested.
+  if ((flags & TAG_OBJECT) != 0) {
+    Addu(result, result, Operand(kHeapObjectTag));
+  }
 }
 
 
-void MacroAssembler::ReturnFromAlignedCall() {
-  lw(sp, MemOperand(sp));
+void MacroAssembler::AllocateInNewSpace(Register object_size,
+                                        Register result,
+                                        Register scratch1,
+                                        Register scratch2,
+                                        Label* gc_required,
+                                        AllocationFlags flags) {
+  if (!FLAG_inline_new) {
+    if (FLAG_debug_code) {
+      // Trash the registers to simulate an allocation failure.
+      li(result, 0x7091);
+      li(scratch1, 0x7191);
+      li(scratch2, 0x7291);
+    }
+    jmp(gc_required);
+    return;
+  }
+
+  ASSERT(!result.is(scratch1));
+  ASSERT(!result.is(scratch2));
+  ASSERT(!scratch1.is(scratch2));
+  ASSERT(!scratch1.is(t9) && !scratch2.is(t9) && !result.is(t9));
+
+  // Check relative positions of allocation top and limit addresses.
+  // ARM adds additional checks to make sure the ldm instruction can be
+  // used. On MIPS we don't have ldm so we don't need additional checks either.
+  ExternalReference new_space_allocation_top =
+      ExternalReference::new_space_allocation_top_address(isolate());
+  ExternalReference new_space_allocation_limit =
+      ExternalReference::new_space_allocation_limit_address(isolate());
+  intptr_t top   =
+      reinterpret_cast<intptr_t>(new_space_allocation_top.address());
+  intptr_t limit =
+      reinterpret_cast<intptr_t>(new_space_allocation_limit.address());
+  ASSERT((limit - top) == kPointerSize);
+
+  // Set up allocation top address and object size registers.
+  Register topaddr = scratch1;
+  li(topaddr, Operand(new_space_allocation_top));
+
+  // This code stores a temporary value in t9.
+  if ((flags & RESULT_CONTAINS_TOP) == 0) {
+    // Load allocation top into result and allocation limit into t9.
+    lw(result, MemOperand(topaddr));
+    lw(t9, MemOperand(topaddr, kPointerSize));
+  } else {
+    if (FLAG_debug_code) {
+      // Assert that result actually contains top on entry. t9 is used
+      // immediately below so this use of t9 does not cause difference with
+      // respect to register content between debug and release mode.
+      lw(t9, MemOperand(topaddr));
+      Check(eq, "Unexpected allocation top", result, Operand(t9));
+    }
+    // Load allocation limit into t9. Result already contains allocation top.
+    lw(t9, MemOperand(topaddr, limit - top));
+  }
+
+  // Calculate new top and bail out if new space is exhausted. Use result
+  // to calculate the new top. Object size may be in words so a shift is
+  // required to get the number of bytes.
+  if ((flags & SIZE_IN_WORDS) != 0) {
+    sll(scratch2, object_size, kPointerSizeLog2);
+    Addu(scratch2, result, scratch2);
+  } else {
+    Addu(scratch2, result, Operand(object_size));
+  }
+  Branch(gc_required, Ugreater, scratch2, Operand(t9));
+
+  // Update allocation top. result temporarily holds the new top.
+  if (FLAG_debug_code) {
+    And(t9, scratch2, Operand(kObjectAlignmentMask));
+    Check(eq, "Unaligned allocation in new space", t9, Operand(zero_reg));
+  }
+  sw(scratch2, MemOperand(topaddr));
+
+  // Tag object if requested.
+  if ((flags & TAG_OBJECT) != 0) {
+    Addu(result, result, Operand(kHeapObjectTag));
+  }
+}
+
+
+void MacroAssembler::UndoAllocationInNewSpace(Register object,
+                                              Register scratch) {
+  ExternalReference new_space_allocation_top =
+      ExternalReference::new_space_allocation_top_address(isolate());
+
+  // Make sure the object has no tag before resetting top.
+  And(object, object, Operand(~kHeapObjectTagMask));
+#ifdef DEBUG
+  // Check that the object un-allocated is below the current top.
+  li(scratch, Operand(new_space_allocation_top));
+  lw(scratch, MemOperand(scratch));
+  Check(less, "Undo allocation of non allocated memory",
+      object, Operand(scratch));
+#endif
+  // Write the address of the object to un-allocate as the current top.
+  li(scratch, Operand(new_space_allocation_top));
+  sw(object, MemOperand(scratch));
+}
+
+
+void MacroAssembler::AllocateTwoByteString(Register result,
+                                           Register length,
+                                           Register scratch1,
+                                           Register scratch2,
+                                           Register scratch3,
+                                           Label* gc_required) {
+  // Calculate the number of bytes needed for the characters in the string while
+  // observing object alignment.
+  ASSERT((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0);
+  sll(scratch1, length, 1);  // Length in bytes, not chars.
+  addiu(scratch1, scratch1,
+       kObjectAlignmentMask + SeqTwoByteString::kHeaderSize);
+  And(scratch1, scratch1, Operand(~kObjectAlignmentMask));
+
+  // Allocate two-byte string in new space.
+  AllocateInNewSpace(scratch1,
+                     result,
+                     scratch2,
+                     scratch3,
+                     gc_required,
+                     TAG_OBJECT);
+
+  // Set the map, length and hash field.
+  InitializeNewString(result,
+                      length,
+                      Heap::kStringMapRootIndex,
+                      scratch1,
+                      scratch2);
+}
+
+
+void MacroAssembler::AllocateAsciiString(Register result,
+                                         Register length,
+                                         Register scratch1,
+                                         Register scratch2,
+                                         Register scratch3,
+                                         Label* gc_required) {
+  // Calculate the number of bytes needed for the characters in the string
+  // while observing object alignment.
+  ASSERT((SeqAsciiString::kHeaderSize & kObjectAlignmentMask) == 0);
+  ASSERT(kCharSize == 1);
+  addiu(scratch1, length, kObjectAlignmentMask + SeqAsciiString::kHeaderSize);
+  And(scratch1, scratch1, Operand(~kObjectAlignmentMask));
+
+  // Allocate ASCII string in new space.
+  AllocateInNewSpace(scratch1,
+                     result,
+                     scratch2,
+                     scratch3,
+                     gc_required,
+                     TAG_OBJECT);
+
+  // Set the map, length and hash field.
+  InitializeNewString(result,
+                      length,
+                      Heap::kAsciiStringMapRootIndex,
+                      scratch1,
+                      scratch2);
+}
+
+
+void MacroAssembler::AllocateTwoByteConsString(Register result,
+                                               Register length,
+                                               Register scratch1,
+                                               Register scratch2,
+                                               Label* gc_required) {
+  AllocateInNewSpace(ConsString::kSize,
+                     result,
+                     scratch1,
+                     scratch2,
+                     gc_required,
+                     TAG_OBJECT);
+  InitializeNewString(result,
+                      length,
+                      Heap::kConsStringMapRootIndex,
+                      scratch1,
+                      scratch2);
+}
+
+
+void MacroAssembler::AllocateAsciiConsString(Register result,
+                                             Register length,
+                                             Register scratch1,
+                                             Register scratch2,
+                                             Label* gc_required) {
+  AllocateInNewSpace(ConsString::kSize,
+                     result,
+                     scratch1,
+                     scratch2,
+                     gc_required,
+                     TAG_OBJECT);
+  InitializeNewString(result,
+                      length,
+                      Heap::kConsAsciiStringMapRootIndex,
+                      scratch1,
+                      scratch2);
+}
+
+
+// Allocates a heap number or jumps to the label if the young space is full and
+// a scavenge is needed.
+void MacroAssembler::AllocateHeapNumber(Register result,
+                                        Register scratch1,
+                                        Register scratch2,
+                                        Register heap_number_map,
+                                        Label* need_gc) {
+  // Allocate an object in the heap for the heap number and tag it as a heap
+  // object.
+  AllocateInNewSpace(HeapNumber::kSize,
+                     result,
+                     scratch1,
+                     scratch2,
+                     need_gc,
+                     TAG_OBJECT);
+
+  // Store heap number map in the allocated object.
+  AssertRegisterIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
+  sw(heap_number_map, FieldMemOperand(result, HeapObject::kMapOffset));
+}
+
+
+void MacroAssembler::AllocateHeapNumberWithValue(Register result,
+                                                 FPURegister value,
+                                                 Register scratch1,
+                                                 Register scratch2,
+                                                 Label* gc_required) {
+  LoadRoot(t6, Heap::kHeapNumberMapRootIndex);
+  AllocateHeapNumber(result, scratch1, scratch2, t6, gc_required);
+  sdc1(value, FieldMemOperand(result, HeapNumber::kValueOffset));
+}
+
+
+// Copies a fixed number of fields of heap objects from src to dst.
+void MacroAssembler::CopyFields(Register dst,
+                                Register src,
+                                RegList temps,
+                                int field_count) {
+  ASSERT((temps & dst.bit()) == 0);
+  ASSERT((temps & src.bit()) == 0);
+  // Primitive implementation using only one temporary register.
+
+  Register tmp = no_reg;
+  // Find a temp register in temps list.
+  for (int i = 0; i < kNumRegisters; i++) {
+    if ((temps & (1 << i)) != 0) {
+      tmp.code_ = i;
+      break;
+    }
+  }
+  ASSERT(!tmp.is(no_reg));
+
+  for (int i = 0; i < field_count; i++) {
+    lw(tmp, FieldMemOperand(src, i * kPointerSize));
+    sw(tmp, FieldMemOperand(dst, i * kPointerSize));
+  }
+}
+
+
+void MacroAssembler::CheckMap(Register obj,
+                              Register scratch,
+                              Handle<Map> map,
+                              Label* fail,
+                              bool is_heap_object) {
+  if (!is_heap_object) {
+    JumpIfSmi(obj, fail);
+  }
+  lw(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
+  li(at, Operand(map));
+  Branch(fail, ne, scratch, Operand(at));
+}
+
+
+void MacroAssembler::CheckMap(Register obj,
+                              Register scratch,
+                              Heap::RootListIndex index,
+                              Label* fail,
+                              bool is_heap_object) {
+  if (!is_heap_object) {
+    JumpIfSmi(obj, fail);
+  }
+  lw(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
+  LoadRoot(at, index);
+  Branch(fail, ne, scratch, Operand(at));
 }
 
 
@@ -914,7 +2242,8 @@
                                     Handle<Code> code_constant,
                                     Register code_reg,
                                     Label* done,
-                                    InvokeFlag flag) {
+                                    InvokeFlag flag,
+                                    PostCallGenerator* post_call_generator) {
   bool definitely_matches = false;
   Label regular_invoke;
 
@@ -949,11 +2278,13 @@
         li(a2, Operand(expected.immediate()));
       }
     }
-  } else if (actual.is_immediate()) {
-    Branch(eq, &regular_invoke, expected.reg(), Operand(actual.immediate()));
-    li(a0, Operand(actual.immediate()));
   } else {
-    Branch(eq, &regular_invoke, expected.reg(), Operand(actual.reg()));
+    if (actual.is_immediate()) {
+      Branch(&regular_invoke, eq, expected.reg(), Operand(actual.immediate()));
+      li(a0, Operand(actual.immediate()));
+    } else {
+      Branch(&regular_invoke, eq, expected.reg(), Operand(actual.reg()));
+    }
   }
 
   if (!definitely_matches) {
@@ -962,25 +2293,29 @@
       addiu(a3, a3, Code::kHeaderSize - kHeapObjectTag);
     }
 
-    ExternalReference adaptor(Builtins::ArgumentsAdaptorTrampoline);
+    Handle<Code> adaptor =
+        isolate()->builtins()->ArgumentsAdaptorTrampoline();
     if (flag == CALL_FUNCTION) {
-      CallBuiltin(adaptor);
-      b(done);
-      nop();
+      Call(adaptor, RelocInfo::CODE_TARGET);
+      if (post_call_generator != NULL) post_call_generator->Generate();
+      jmp(done);
     } else {
-      JumpToBuiltin(adaptor);
+      Jump(adaptor, RelocInfo::CODE_TARGET);
     }
     bind(&regular_invoke);
   }
 }
 
+
 void MacroAssembler::InvokeCode(Register code,
                                 const ParameterCount& expected,
                                 const ParameterCount& actual,
-                                InvokeFlag flag) {
+                                InvokeFlag flag,
+                                PostCallGenerator* post_call_generator) {
   Label done;
 
-  InvokePrologue(expected, actual, Handle<Code>::null(), code, &done, flag);
+  InvokePrologue(expected, actual, Handle<Code>::null(), code, &done, flag,
+                 post_call_generator);
   if (flag == CALL_FUNCTION) {
     Call(code);
   } else {
@@ -1014,7 +2349,8 @@
 
 void MacroAssembler::InvokeFunction(Register function,
                                     const ParameterCount& actual,
-                                    InvokeFlag flag) {
+                                    InvokeFlag flag,
+                                    PostCallGenerator* post_call_generator) {
   // Contract with called JS functions requires that function is passed in a1.
   ASSERT(function.is(a1));
   Register expected_reg = a2;
@@ -1025,68 +2361,120 @@
   lw(expected_reg,
       FieldMemOperand(code_reg,
                       SharedFunctionInfo::kFormalParameterCountOffset));
-  lw(code_reg,
-      MemOperand(code_reg, SharedFunctionInfo::kCodeOffset - kHeapObjectTag));
-  addiu(code_reg, code_reg, Code::kHeaderSize - kHeapObjectTag);
+  sra(expected_reg, expected_reg, kSmiTagSize);
+  lw(code_reg, FieldMemOperand(a1, JSFunction::kCodeEntryOffset));
 
   ParameterCount expected(expected_reg);
-  InvokeCode(code_reg, expected, actual, flag);
+  InvokeCode(code_reg, expected, actual, flag, post_call_generator);
+}
+
+
+void MacroAssembler::InvokeFunction(JSFunction* function,
+                                    const ParameterCount& actual,
+                                    InvokeFlag flag) {
+  ASSERT(function->is_compiled());
+
+  // Get the function and setup the context.
+  li(a1, Operand(Handle<JSFunction>(function)));
+  lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
+
+  // Invoke the cached code.
+  Handle<Code> code(function->code());
+  ParameterCount expected(function->shared()->formal_parameter_count());
+  if (V8::UseCrankshaft()) {
+    UNIMPLEMENTED_MIPS();
+  } else {
+    InvokeCode(code, expected, actual, RelocInfo::CODE_TARGET, flag);
+  }
+}
+
+
+void MacroAssembler::IsObjectJSObjectType(Register heap_object,
+                                          Register map,
+                                          Register scratch,
+                                          Label* fail) {
+  lw(map, FieldMemOperand(heap_object, HeapObject::kMapOffset));
+  IsInstanceJSObjectType(map, scratch, fail);
+}
+
+
+void MacroAssembler::IsInstanceJSObjectType(Register map,
+                                            Register scratch,
+                                            Label* fail) {
+  lbu(scratch, FieldMemOperand(map, Map::kInstanceTypeOffset));
+  Branch(fail, lt, scratch, Operand(FIRST_JS_OBJECT_TYPE));
+  Branch(fail, gt, scratch, Operand(LAST_JS_OBJECT_TYPE));
+}
+
+
+void MacroAssembler::IsObjectJSStringType(Register object,
+                                          Register scratch,
+                                          Label* fail) {
+  ASSERT(kNotStringTag != 0);
+
+  lw(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
+  lbu(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
+  And(scratch, scratch, Operand(kIsNotStringMask));
+  Branch(fail, ne, scratch, Operand(zero_reg));
 }
 
 
 // ---------------------------------------------------------------------------
 // Support functions.
 
-  void MacroAssembler::GetObjectType(Register function,
-                                     Register map,
-                                     Register type_reg) {
-    lw(map, FieldMemOperand(function, HeapObject::kMapOffset));
-    lbu(type_reg, FieldMemOperand(map, Map::kInstanceTypeOffset));
-  }
+
+void MacroAssembler::TryGetFunctionPrototype(Register function,
+                                             Register result,
+                                             Register scratch,
+                                             Label* miss) {
+  // Check that the receiver isn't a smi.
+  JumpIfSmi(function, miss);
+
+  // Check that the function really is a function.  Load map into result reg.
+  GetObjectType(function, result, scratch);
+  Branch(miss, ne, scratch, Operand(JS_FUNCTION_TYPE));
+
+  // Make sure that the function has an instance prototype.
+  Label non_instance;
+  lbu(scratch, FieldMemOperand(result, Map::kBitFieldOffset));
+  And(scratch, scratch, Operand(1 << Map::kHasNonInstancePrototype));
+  Branch(&non_instance, ne, scratch, Operand(zero_reg));
+
+  // Get the prototype or initial map from the function.
+  lw(result,
+     FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
+
+  // If the prototype or initial map is the hole, don't return it and
+  // simply miss the cache instead. This will allow us to allocate a
+  // prototype object on-demand in the runtime system.
+  LoadRoot(t8, Heap::kTheHoleValueRootIndex);
+  Branch(miss, eq, result, Operand(t8));
+
+  // If the function does not have an initial map, we're done.
+  Label done;
+  GetObjectType(result, scratch, scratch);
+  Branch(&done, ne, scratch, Operand(MAP_TYPE));
+
+  // Get the prototype from the initial map.
+  lw(result, FieldMemOperand(result, Map::kPrototypeOffset));
+  jmp(&done);
+
+  // Non-instance prototype: Fetch prototype from constructor field
+  // in initial map.
+  bind(&non_instance);
+  lw(result, FieldMemOperand(result, Map::kConstructorOffset));
+
+  // All done.
+  bind(&done);
+}
 
 
-  void MacroAssembler::CallBuiltin(ExternalReference builtin_entry) {
-    // Load builtin address.
-    LoadExternalReference(t9, builtin_entry);
-    lw(t9, MemOperand(t9));  // Deref address.
-    addiu(t9, t9, Code::kHeaderSize - kHeapObjectTag);
-    // Call and allocate arguments slots.
-    jalr(t9);
-    // Use the branch delay slot to allocated argument slots.
-    addiu(sp, sp, -StandardFrameConstants::kRArgsSlotsSize);
-    addiu(sp, sp, StandardFrameConstants::kRArgsSlotsSize);
-  }
-
-
-  void MacroAssembler::CallBuiltin(Register target) {
-    // Target already holds target address.
-    // Call and allocate arguments slots.
-    jalr(target);
-    // Use the branch delay slot to allocated argument slots.
-    addiu(sp, sp, -StandardFrameConstants::kRArgsSlotsSize);
-    addiu(sp, sp, StandardFrameConstants::kRArgsSlotsSize);
-  }
-
-
-  void MacroAssembler::JumpToBuiltin(ExternalReference builtin_entry) {
-    // Load builtin address.
-    LoadExternalReference(t9, builtin_entry);
-    lw(t9, MemOperand(t9));  // Deref address.
-    addiu(t9, t9, Code::kHeaderSize - kHeapObjectTag);
-    // Call and allocate arguments slots.
-    jr(t9);
-    // Use the branch delay slot to allocated argument slots.
-    addiu(sp, sp, -StandardFrameConstants::kRArgsSlotsSize);
-  }
-
-
-  void MacroAssembler::JumpToBuiltin(Register target) {
-    // t9 already holds target address.
-    // Call and allocate arguments slots.
-    jr(t9);
-    // Use the branch delay slot to allocated argument slots.
-    addiu(sp, sp, -StandardFrameConstants::kRArgsSlotsSize);
-  }
+void MacroAssembler::GetObjectType(Register object,
+                                   Register map,
+                                   Register type_reg) {
+  lw(map, FieldMemOperand(object, HeapObject::kMapOffset));
+  lbu(type_reg, FieldMemOperand(map, Map::kInstanceTypeOffset));
+}
 
 
 // -----------------------------------------------------------------------------
@@ -1099,8 +2487,9 @@
 }
 
 
-void MacroAssembler::StubReturn(int argc) {
-  UNIMPLEMENTED_MIPS();
+void MacroAssembler::TailCallStub(CodeStub* stub) {
+  ASSERT(allow_stub_calls());  // stub calls are not allowed in some stubs
+  Jump(stub->GetCode(), RelocInfo::CODE_TARGET);
 }
 
 
@@ -1112,7 +2501,71 @@
 }
 
 
-void MacroAssembler::CallRuntime(Runtime::Function* f, int num_arguments) {
+void MacroAssembler::IndexFromHash(Register hash,
+                                   Register index) {
+  // If the hash field contains an array index pick it out. The assert checks
+  // that the constants for the maximum number of digits for an array index
+  // cached in the hash field and the number of bits reserved for it does not
+  // conflict.
+  ASSERT(TenToThe(String::kMaxCachedArrayIndexLength) <
+         (1 << String::kArrayIndexValueBits));
+  // We want the smi-tagged index in key.  kArrayIndexValueMask has zeros in
+  // the low kHashShift bits.
+  STATIC_ASSERT(kSmiTag == 0);
+  Ext(hash, hash, String::kHashShift, String::kArrayIndexValueBits);
+  sll(index, hash, kSmiTagSize);
+}
+
+
+void MacroAssembler::ObjectToDoubleFPURegister(Register object,
+                                               FPURegister result,
+                                               Register scratch1,
+                                               Register scratch2,
+                                               Register heap_number_map,
+                                               Label* not_number,
+                                               ObjectToDoubleFlags flags) {
+  Label done;
+  if ((flags & OBJECT_NOT_SMI) == 0) {
+    Label not_smi;
+    JumpIfNotSmi(object, &not_smi);
+    // Remove smi tag and convert to double.
+    sra(scratch1, object, kSmiTagSize);
+    mtc1(scratch1, result);
+    cvt_d_w(result, result);
+    Branch(&done);
+    bind(&not_smi);
+  }
+  // Check for heap number and load double value from it.
+  lw(scratch1, FieldMemOperand(object, HeapObject::kMapOffset));
+  Branch(not_number, ne, scratch1, Operand(heap_number_map));
+
+  if ((flags & AVOID_NANS_AND_INFINITIES) != 0) {
+    // If exponent is all ones the number is either a NaN or +/-Infinity.
+    Register exponent = scratch1;
+    Register mask_reg = scratch2;
+    lw(exponent, FieldMemOperand(object, HeapNumber::kExponentOffset));
+    li(mask_reg, HeapNumber::kExponentMask);
+
+    And(exponent, exponent, mask_reg);
+    Branch(not_number, eq, exponent, Operand(mask_reg));
+  }
+  ldc1(result, FieldMemOperand(object, HeapNumber::kValueOffset));
+  bind(&done);
+}
+
+
+
+void MacroAssembler::SmiToDoubleFPURegister(Register smi,
+                                            FPURegister value,
+                                            Register scratch1) {
+  sra(scratch1, smi, kSmiTagSize);
+  mtc1(scratch1, value);
+  cvt_d_w(value, value);
+}
+
+
+void MacroAssembler::CallRuntime(const Runtime::Function* f,
+                                 int num_arguments) {
   // All parameters are on the stack. v0 has the return value after call.
 
   // If the expected number of arguments of the runtime function is
@@ -1128,69 +2581,129 @@
   // should remove this need and make the runtime routine entry code
   // smarter.
   li(a0, num_arguments);
-  LoadExternalReference(a1, ExternalReference(f));
+  li(a1, Operand(ExternalReference(f, isolate())));
   CEntryStub stub(1);
   CallStub(&stub);
 }
 
 
+void MacroAssembler::CallRuntimeSaveDoubles(Runtime::FunctionId id) {
+  const Runtime::Function* function = Runtime::FunctionForId(id);
+  li(a0, Operand(function->nargs));
+  li(a1, Operand(ExternalReference(function, isolate())));
+  CEntryStub stub(1);
+  stub.SaveDoubles();
+  CallStub(&stub);
+}
+
+
 void MacroAssembler::CallRuntime(Runtime::FunctionId fid, int num_arguments) {
   CallRuntime(Runtime::FunctionForId(fid), num_arguments);
 }
 
 
+void MacroAssembler::CallExternalReference(const ExternalReference& ext,
+                                           int num_arguments) {
+  li(a0, Operand(num_arguments));
+  li(a1, Operand(ext));
+
+  CEntryStub stub(1);
+  CallStub(&stub);
+}
+
+
 void MacroAssembler::TailCallExternalReference(const ExternalReference& ext,
                                                int num_arguments,
                                                int result_size) {
-  UNIMPLEMENTED_MIPS();
+  // TODO(1236192): Most runtime routines don't need the number of
+  // arguments passed in because it is constant. At some point we
+  // should remove this need and make the runtime routine entry code
+  // smarter.
+  li(a0, Operand(num_arguments));
+  JumpToExternalReference(ext);
 }
 
 
 void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid,
                                      int num_arguments,
                                      int result_size) {
-  TailCallExternalReference(ExternalReference(fid), num_arguments, result_size);
+  TailCallExternalReference(ExternalReference(fid, isolate()),
+                            num_arguments,
+                            result_size);
 }
 
 
 void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-Handle<Code> MacroAssembler::ResolveBuiltin(Builtins::JavaScript id,
-                                            bool* resolved) {
-  UNIMPLEMENTED_MIPS();
-  return Handle<Code>(reinterpret_cast<Code*>(NULL));   // UNIMPLEMENTED RETURN
+  li(a1, Operand(builtin));
+  CEntryStub stub(1);
+  Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
 }
 
 
 void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id,
-                                   InvokeJSFlags flags) {
-  UNIMPLEMENTED_MIPS();
+                                   InvokeJSFlags flags,
+                                   PostCallGenerator* post_call_generator) {
+  GetBuiltinEntry(t9, id);
+  if (flags == CALL_JS) {
+    Call(t9);
+    if (post_call_generator != NULL) post_call_generator->Generate();
+  } else {
+    ASSERT(flags == JUMP_JS);
+    Jump(t9);
+  }
+}
+
+
+void MacroAssembler::GetBuiltinFunction(Register target,
+                                        Builtins::JavaScript id) {
+  // Load the builtins object into target register.
+  lw(target, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
+  lw(target, FieldMemOperand(target, GlobalObject::kBuiltinsOffset));
+  // Load the JavaScript builtin function from the builtins object.
+  lw(target, FieldMemOperand(target,
+                          JSBuiltinsObject::OffsetOfFunctionWithId(id)));
 }
 
 
 void MacroAssembler::GetBuiltinEntry(Register target, Builtins::JavaScript id) {
-  UNIMPLEMENTED_MIPS();
+  ASSERT(!target.is(a1));
+  GetBuiltinFunction(a1, id);
+  // Load the code entry point from the builtins object.
+  lw(target, FieldMemOperand(a1, JSFunction::kCodeEntryOffset));
 }
 
 
 void MacroAssembler::SetCounter(StatsCounter* counter, int value,
                                 Register scratch1, Register scratch2) {
-  UNIMPLEMENTED_MIPS();
+  if (FLAG_native_code_counters && counter->Enabled()) {
+    li(scratch1, Operand(value));
+    li(scratch2, Operand(ExternalReference(counter)));
+    sw(scratch1, MemOperand(scratch2));
+  }
 }
 
 
 void MacroAssembler::IncrementCounter(StatsCounter* counter, int value,
                                       Register scratch1, Register scratch2) {
-  UNIMPLEMENTED_MIPS();
+  ASSERT(value > 0);
+  if (FLAG_native_code_counters && counter->Enabled()) {
+    li(scratch2, Operand(ExternalReference(counter)));
+    lw(scratch1, MemOperand(scratch2));
+    Addu(scratch1, scratch1, Operand(value));
+    sw(scratch1, MemOperand(scratch2));
+  }
 }
 
 
 void MacroAssembler::DecrementCounter(StatsCounter* counter, int value,
                                       Register scratch1, Register scratch2) {
-  UNIMPLEMENTED_MIPS();
+  ASSERT(value > 0);
+  if (FLAG_native_code_counters && counter->Enabled()) {
+    li(scratch2, Operand(ExternalReference(counter)));
+    lw(scratch1, MemOperand(scratch2));
+    Subu(scratch1, scratch1, Operand(value));
+    sw(scratch1, MemOperand(scratch2));
+  }
 }
 
 
@@ -1199,30 +2712,144 @@
 
 void MacroAssembler::Assert(Condition cc, const char* msg,
                             Register rs, Operand rt) {
-  UNIMPLEMENTED_MIPS();
+  if (FLAG_debug_code)
+    Check(cc, msg, rs, rt);
+}
+
+
+void MacroAssembler::AssertRegisterIsRoot(Register reg,
+                                          Heap::RootListIndex index) {
+  if (FLAG_debug_code) {
+    LoadRoot(at, index);
+    Check(eq, "Register did not match expected root", reg, Operand(at));
+  }
+}
+
+
+void MacroAssembler::AssertFastElements(Register elements) {
+  if (FLAG_debug_code) {
+    ASSERT(!elements.is(at));
+    Label ok;
+    Push(elements);
+    lw(elements, FieldMemOperand(elements, HeapObject::kMapOffset));
+    LoadRoot(at, Heap::kFixedArrayMapRootIndex);
+    Branch(&ok, eq, elements, Operand(at));
+    LoadRoot(at, Heap::kFixedCOWArrayMapRootIndex);
+    Branch(&ok, eq, elements, Operand(at));
+    Abort("JSObject with fast elements map has slow elements");
+    bind(&ok);
+    Pop(elements);
+  }
 }
 
 
 void MacroAssembler::Check(Condition cc, const char* msg,
                            Register rs, Operand rt) {
-  UNIMPLEMENTED_MIPS();
+  Label L;
+  Branch(&L, cc, rs, rt);
+  Abort(msg);
+  // will not return here
+  bind(&L);
 }
 
 
 void MacroAssembler::Abort(const char* msg) {
-  UNIMPLEMENTED_MIPS();
+  Label abort_start;
+  bind(&abort_start);
+  // We want to pass the msg string like a smi to avoid GC
+  // problems, however msg is not guaranteed to be aligned
+  // properly. Instead, we pass an aligned pointer that is
+  // a proper v8 smi, but also pass the alignment difference
+  // from the real pointer as a smi.
+  intptr_t p1 = reinterpret_cast<intptr_t>(msg);
+  intptr_t p0 = (p1 & ~kSmiTagMask) + kSmiTag;
+  ASSERT(reinterpret_cast<Object*>(p0)->IsSmi());
+#ifdef DEBUG
+  if (msg != NULL) {
+    RecordComment("Abort message: ");
+    RecordComment(msg);
+  }
+#endif
+  // Disable stub call restrictions to always allow calls to abort.
+  AllowStubCallsScope allow_scope(this, true);
+
+  li(a0, Operand(p0));
+  Push(a0);
+  li(a0, Operand(Smi::FromInt(p1 - p0)));
+  Push(a0);
+  CallRuntime(Runtime::kAbort, 2);
+  // will not return here
+  if (is_trampoline_pool_blocked()) {
+    // If the calling code cares about the exact number of
+    // instructions generated, we insert padding here to keep the size
+    // of the Abort macro constant.
+    // Currently in debug mode with debug_code enabled the number of
+    // generated instructions is 14, so we use this as a maximum value.
+    static const int kExpectedAbortInstructions = 14;
+    int abort_instructions = InstructionsGeneratedSince(&abort_start);
+    ASSERT(abort_instructions <= kExpectedAbortInstructions);
+    while (abort_instructions++ < kExpectedAbortInstructions) {
+      nop();
+    }
+  }
+}
+
+
+void MacroAssembler::LoadContext(Register dst, int context_chain_length) {
+  if (context_chain_length > 0) {
+    // Move up the chain of contexts to the context containing the slot.
+    lw(dst, MemOperand(cp, Context::SlotOffset(Context::CLOSURE_INDEX)));
+    // Load the function context (which is the incoming, outer context).
+    lw(dst, FieldMemOperand(dst, JSFunction::kContextOffset));
+    for (int i = 1; i < context_chain_length; i++) {
+      lw(dst, MemOperand(dst, Context::SlotOffset(Context::CLOSURE_INDEX)));
+      lw(dst, FieldMemOperand(dst, JSFunction::kContextOffset));
+    }
+    // The context may be an intermediate context, not a function context.
+    lw(dst, MemOperand(dst, Context::SlotOffset(Context::FCONTEXT_INDEX)));
+  } else {  // Slot is in the current function context.
+    // The context may be an intermediate context, not a function context.
+    lw(dst, MemOperand(cp, Context::SlotOffset(Context::FCONTEXT_INDEX)));
+  }
+}
+
+
+void MacroAssembler::LoadGlobalFunction(int index, Register function) {
+  // Load the global or builtins object from the current context.
+  lw(function, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
+  // Load the global context from the global or builtins object.
+  lw(function, FieldMemOperand(function,
+                               GlobalObject::kGlobalContextOffset));
+  // Load the function from the global context.
+  lw(function, MemOperand(function, Context::SlotOffset(index)));
+}
+
+
+void MacroAssembler::LoadGlobalFunctionInitialMap(Register function,
+                                                  Register map,
+                                                  Register scratch) {
+  // Load the initial map. The global functions all have initial maps.
+  lw(map, FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
+  if (FLAG_debug_code) {
+    Label ok, fail;
+    CheckMap(map, scratch, Heap::kMetaMapRootIndex, &fail, false);
+    Branch(&ok);
+    bind(&fail);
+    Abort("Global functions must have initial map");
+    bind(&ok);
+  }
 }
 
 
 void MacroAssembler::EnterFrame(StackFrame::Type type) {
   addiu(sp, sp, -5 * kPointerSize);
-  li(t0, Operand(Smi::FromInt(type)));
-  li(t1, Operand(CodeObject()));
+  li(t8, Operand(Smi::FromInt(type)));
+  li(t9, Operand(CodeObject()));
   sw(ra, MemOperand(sp, 4 * kPointerSize));
   sw(fp, MemOperand(sp, 3 * kPointerSize));
   sw(cp, MemOperand(sp, 2 * kPointerSize));
-  sw(t0, MemOperand(sp, 1 * kPointerSize));
-  sw(t1, MemOperand(sp, 0 * kPointerSize));
+  sw(t8, MemOperand(sp, 1 * kPointerSize));
+  sw(t9, MemOperand(sp, 0 * kPointerSize));
   addiu(fp, sp, 3 * kPointerSize);
 }
 
@@ -1235,62 +2862,98 @@
 }
 
 
-void MacroAssembler::EnterExitFrame(ExitFrame::Mode mode,
-                                    Register hold_argc,
+void MacroAssembler::EnterExitFrame(Register hold_argc,
                                     Register hold_argv,
-                                    Register hold_function) {
-  // Compute the argv pointer and keep it in a callee-saved register.
+                                    Register hold_function,
+                                    bool save_doubles) {
   // a0 is argc.
-  sll(t0, a0, kPointerSizeLog2);
-  add(hold_argv, sp, t0);
-  addi(hold_argv, hold_argv, -kPointerSize);
+  sll(t8, a0, kPointerSizeLog2);
+  addu(hold_argv, sp, t8);
+  addiu(hold_argv, hold_argv, -kPointerSize);
 
   // Compute callee's stack pointer before making changes and save it as
-  // t1 register so that it is restored as sp register on exit, thereby
+  // t9 register so that it is restored as sp register on exit, thereby
   // popping the args.
-  // t1 = sp + kPointerSize * #args
-  add(t1, sp, t0);
+  // t9 = sp + kPointerSize * #args
+  addu(t9, sp, t8);
+
+  // Compute the argv pointer and keep it in a callee-saved register.
+  // This only seems to be needed for crankshaft and may cause problems
+  // so it's disabled for now.
+  // Subu(s6, t9, Operand(kPointerSize));
 
   // Align the stack at this point.
   AlignStack(0);
 
   // Save registers.
   addiu(sp, sp, -12);
-  sw(t1, MemOperand(sp, 8));
+  sw(t9, MemOperand(sp, 8));
   sw(ra, MemOperand(sp, 4));
   sw(fp, MemOperand(sp, 0));
   mov(fp, sp);  // Setup new frame pointer.
 
-  // Push debug marker.
-  if (mode == ExitFrame::MODE_DEBUG) {
-    Push(zero_reg);
-  } else {
-    li(t0, Operand(CodeObject()));
-    Push(t0);
-  }
+  li(t8, Operand(CodeObject()));
+  Push(t8);  // Accessed from ExitFrame::code_slot.
 
   // Save the frame pointer and the context in top.
-  LoadExternalReference(t0, ExternalReference(Top::k_c_entry_fp_address));
-  sw(fp, MemOperand(t0));
-  LoadExternalReference(t0, ExternalReference(Top::k_context_address));
-  sw(cp, MemOperand(t0));
+  li(t8, Operand(ExternalReference(Isolate::k_c_entry_fp_address, isolate())));
+  sw(fp, MemOperand(t8));
+  li(t8, Operand(ExternalReference(Isolate::k_context_address, isolate())));
+  sw(cp, MemOperand(t8));
 
   // Setup argc and the builtin function in callee-saved registers.
   mov(hold_argc, a0);
   mov(hold_function, a1);
+
+  // Optionally save all double registers.
+  if (save_doubles) {
+#ifdef DEBUG
+    int frame_alignment = ActivationFrameAlignment();
+#endif
+    // The stack alignment code above made sp unaligned, so add space for one
+    // more double register and use aligned addresses.
+    ASSERT(kDoubleSize == frame_alignment);
+    // Mark the frame as containing doubles by pushing a non-valid return
+    // address, i.e. 0.
+    ASSERT(ExitFrameConstants::kMarkerOffset == -2 * kPointerSize);
+    push(zero_reg);  // Marker and alignment word.
+    int space = FPURegister::kNumRegisters * kDoubleSize + kPointerSize;
+    Subu(sp, sp, Operand(space));
+    // Remember: we only need to save every 2nd double FPU value.
+    for (int i = 0; i < FPURegister::kNumRegisters; i+=2) {
+      FPURegister reg = FPURegister::from_code(i);
+      sdc1(reg, MemOperand(sp, i * kDoubleSize + kPointerSize));
+    }
+    // Note that f0 will be accessible at fp - 2*kPointerSize -
+    // FPURegister::kNumRegisters * kDoubleSize, since the code slot and the
+    // alignment word were pushed after the fp.
+  }
 }
 
 
-void MacroAssembler::LeaveExitFrame(ExitFrame::Mode mode) {
+void MacroAssembler::LeaveExitFrame(bool save_doubles) {
+  // Optionally restore all double registers.
+  if (save_doubles) {
+    // TODO(regis): Use vldrm instruction.
+    // Remember: we only need to restore every 2nd double FPU value.
+    for (int i = 0; i < FPURegister::kNumRegisters; i+=2) {
+      FPURegister reg = FPURegister::from_code(i);
+      // Register f30-f31 is just below the marker.
+      const int offset = ExitFrameConstants::kMarkerOffset;
+      ldc1(reg, MemOperand(fp,
+          (i - FPURegister::kNumRegisters) * kDoubleSize + offset));
+    }
+  }
+
   // Clear top frame.
-  LoadExternalReference(t0, ExternalReference(Top::k_c_entry_fp_address));
-  sw(zero_reg, MemOperand(t0));
+  li(t8, Operand(ExternalReference(Isolate::k_c_entry_fp_address, isolate())));
+  sw(zero_reg, MemOperand(t8));
 
   // Restore current context from top and clear it in debug mode.
-  LoadExternalReference(t0, ExternalReference(Top::k_context_address));
-  lw(cp, MemOperand(t0));
+  li(t8, Operand(ExternalReference(Isolate::k_context_address, isolate())));
+  lw(cp, MemOperand(t8));
 #ifdef DEBUG
-  sw(a3, MemOperand(t0));
+  sw(a3, MemOperand(t8));
 #endif
 
   // Pop the arguments, restore registers, and return.
@@ -1303,24 +2966,362 @@
 }
 
 
+void MacroAssembler::InitializeNewString(Register string,
+                                         Register length,
+                                         Heap::RootListIndex map_index,
+                                         Register scratch1,
+                                         Register scratch2) {
+  sll(scratch1, length, kSmiTagSize);
+  LoadRoot(scratch2, map_index);
+  sw(scratch1, FieldMemOperand(string, String::kLengthOffset));
+  li(scratch1, Operand(String::kEmptyHashField));
+  sw(scratch2, FieldMemOperand(string, HeapObject::kMapOffset));
+  sw(scratch1, FieldMemOperand(string, String::kHashFieldOffset));
+}
+
+
+int MacroAssembler::ActivationFrameAlignment() {
+#if defined(V8_HOST_ARCH_MIPS)
+  // Running on the real platform. Use the alignment as mandated by the local
+  // environment.
+  // Note: This will break if we ever start generating snapshots on one Mips
+  // platform for another Mips platform with a different alignment.
+  return OS::ActivationFrameAlignment();
+#else  // defined(V8_HOST_ARCH_MIPS)
+  // If we are using the simulator then we should always align to the expected
+  // alignment. As the simulator is used to generate snapshots we do not know
+  // if the target platform will need alignment, so this is controlled from a
+  // flag.
+  return FLAG_sim_stack_alignment;
+#endif  // defined(V8_HOST_ARCH_MIPS)
+}
+
+
 void MacroAssembler::AlignStack(int offset) {
   // On MIPS an offset of 0 aligns to 0 modulo 8 bytes,
   //     and an offset of 1 aligns to 4 modulo 8 bytes.
+#if defined(V8_HOST_ARCH_MIPS)
+  // Running on the real platform. Use the alignment as mandated by the local
+  // environment.
+  // Note: This will break if we ever start generating snapshots on one MIPS
+  // platform for another MIPS platform with a different alignment.
   int activation_frame_alignment = OS::ActivationFrameAlignment();
+#else  // defined(V8_HOST_ARCH_MIPS)
+  // If we are using the simulator then we should always align to the expected
+  // alignment. As the simulator is used to generate snapshots we do not know
+  // if the target platform will need alignment, so we will always align at
+  // this point here.
+  int activation_frame_alignment = 2 * kPointerSize;
+#endif  // defined(V8_HOST_ARCH_MIPS)
   if (activation_frame_alignment != kPointerSize) {
     // This code needs to be made more general if this assert doesn't hold.
     ASSERT(activation_frame_alignment == 2 * kPointerSize);
     if (offset == 0) {
-      andi(t0, sp, activation_frame_alignment - 1);
-      Push(zero_reg, eq, t0, zero_reg);
+      andi(t8, sp, activation_frame_alignment - 1);
+      Push(zero_reg, eq, t8, zero_reg);
     } else {
-      andi(t0, sp, activation_frame_alignment - 1);
-      addiu(t0, t0, -4);
-      Push(zero_reg, eq, t0, zero_reg);
+      andi(t8, sp, activation_frame_alignment - 1);
+      addiu(t8, t8, -4);
+      Push(zero_reg, eq, t8, zero_reg);
     }
   }
 }
 
+
+
+void MacroAssembler::JumpIfNotPowerOfTwoOrZero(
+    Register reg,
+    Register scratch,
+    Label* not_power_of_two_or_zero) {
+  Subu(scratch, reg, Operand(1));
+  Branch(USE_DELAY_SLOT, not_power_of_two_or_zero, lt,
+         scratch, Operand(zero_reg));
+  and_(at, scratch, reg);  // In the delay slot.
+  Branch(not_power_of_two_or_zero, ne, at, Operand(zero_reg));
+}
+
+
+void MacroAssembler::JumpIfNotBothSmi(Register reg1,
+                                      Register reg2,
+                                      Label* on_not_both_smi) {
+  STATIC_ASSERT(kSmiTag == 0);
+  ASSERT_EQ(1, kSmiTagMask);
+  or_(at, reg1, reg2);
+  andi(at, at, kSmiTagMask);
+  Branch(on_not_both_smi, ne, at, Operand(zero_reg));
+}
+
+
+void MacroAssembler::JumpIfEitherSmi(Register reg1,
+                                     Register reg2,
+                                     Label* on_either_smi) {
+  STATIC_ASSERT(kSmiTag == 0);
+  ASSERT_EQ(1, kSmiTagMask);
+  // Both Smi tags must be 1 (not Smi).
+  and_(at, reg1, reg2);
+  andi(at, at, kSmiTagMask);
+  Branch(on_either_smi, eq, at, Operand(zero_reg));
+}
+
+
+void MacroAssembler::AbortIfSmi(Register object) {
+  STATIC_ASSERT(kSmiTag == 0);
+  andi(at, object, kSmiTagMask);
+  Assert(ne, "Operand is a smi", at, Operand(zero_reg));
+}
+
+
+void MacroAssembler::AbortIfNotSmi(Register object) {
+  STATIC_ASSERT(kSmiTag == 0);
+  andi(at, object, kSmiTagMask);
+  Assert(eq, "Operand is a smi", at, Operand(zero_reg));
+}
+
+
+void MacroAssembler::AbortIfNotRootValue(Register src,
+                                         Heap::RootListIndex root_value_index,
+                                         const char* message) {
+  ASSERT(!src.is(at));
+  LoadRoot(at, root_value_index);
+  Assert(eq, message, src, Operand(at));
+}
+
+
+void MacroAssembler::JumpIfNotHeapNumber(Register object,
+                                         Register heap_number_map,
+                                         Register scratch,
+                                         Label* on_not_heap_number) {
+  lw(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
+  AssertRegisterIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
+  Branch(on_not_heap_number, ne, scratch, Operand(heap_number_map));
+}
+
+
+void MacroAssembler::JumpIfNonSmisNotBothSequentialAsciiStrings(
+    Register first,
+    Register second,
+    Register scratch1,
+    Register scratch2,
+    Label* failure) {
+  // Test that both first and second are sequential ASCII strings.
+  // Assume that they are non-smis.
+  lw(scratch1, FieldMemOperand(first, HeapObject::kMapOffset));
+  lw(scratch2, FieldMemOperand(second, HeapObject::kMapOffset));
+  lbu(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset));
+  lbu(scratch2, FieldMemOperand(scratch2, Map::kInstanceTypeOffset));
+
+  JumpIfBothInstanceTypesAreNotSequentialAscii(scratch1,
+                                               scratch2,
+                                               scratch1,
+                                               scratch2,
+                                               failure);
+}
+
+
+void MacroAssembler::JumpIfNotBothSequentialAsciiStrings(Register first,
+                                                         Register second,
+                                                         Register scratch1,
+                                                         Register scratch2,
+                                                         Label* failure) {
+  // Check that neither is a smi.
+  STATIC_ASSERT(kSmiTag == 0);
+  And(scratch1, first, Operand(second));
+  And(scratch1, scratch1, Operand(kSmiTagMask));
+  Branch(failure, eq, scratch1, Operand(zero_reg));
+  JumpIfNonSmisNotBothSequentialAsciiStrings(first,
+                                             second,
+                                             scratch1,
+                                             scratch2,
+                                             failure);
+}
+
+
+void MacroAssembler::JumpIfBothInstanceTypesAreNotSequentialAscii(
+    Register first,
+    Register second,
+    Register scratch1,
+    Register scratch2,
+    Label* failure) {
+  int kFlatAsciiStringMask =
+      kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask;
+  int kFlatAsciiStringTag = ASCII_STRING_TYPE;
+  ASSERT(kFlatAsciiStringTag <= 0xffff);  // Ensure this fits 16-bit immed.
+  andi(scratch1, first, kFlatAsciiStringMask);
+  Branch(failure, ne, scratch1, Operand(kFlatAsciiStringTag));
+  andi(scratch2, second, kFlatAsciiStringMask);
+  Branch(failure, ne, scratch2, Operand(kFlatAsciiStringTag));
+}
+
+
+void MacroAssembler::JumpIfInstanceTypeIsNotSequentialAscii(Register type,
+                                                            Register scratch,
+                                                            Label* failure) {
+  int kFlatAsciiStringMask =
+      kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask;
+  int kFlatAsciiStringTag = ASCII_STRING_TYPE;
+  And(scratch, type, Operand(kFlatAsciiStringMask));
+  Branch(failure, ne, scratch, Operand(kFlatAsciiStringTag));
+}
+
+
+static const int kRegisterPassedArguments = 4;
+
+void MacroAssembler::PrepareCallCFunction(int num_arguments, Register scratch) {
+  int frame_alignment = ActivationFrameAlignment();
+
+  // Reserve space for Isolate address which is always passed as last parameter
+  num_arguments += 1;
+
+  // Up to four simple arguments are passed in registers a0..a3.
+  // Those four arguments must have reserved argument slots on the stack for
+  // mips, even though those argument slots are not normally used.
+  // Remaining arguments are pushed on the stack, above (higher address than)
+  // the argument slots.
+  ASSERT(StandardFrameConstants::kCArgsSlotsSize % kPointerSize == 0);
+  int stack_passed_arguments = ((num_arguments <= kRegisterPassedArguments) ?
+                                 0 : num_arguments - kRegisterPassedArguments) +
+                               (StandardFrameConstants::kCArgsSlotsSize /
+                               kPointerSize);
+  if (frame_alignment > kPointerSize) {
+    // Make stack end at alignment and make room for num_arguments - 4 words
+    // and the original value of sp.
+    mov(scratch, sp);
+    Subu(sp, sp, Operand((stack_passed_arguments + 1) * kPointerSize));
+    ASSERT(IsPowerOf2(frame_alignment));
+    And(sp, sp, Operand(-frame_alignment));
+    sw(scratch, MemOperand(sp, stack_passed_arguments * kPointerSize));
+  } else {
+    Subu(sp, sp, Operand(stack_passed_arguments * kPointerSize));
+  }
+}
+
+
+void MacroAssembler::CallCFunction(ExternalReference function,
+                                   int num_arguments) {
+  CallCFunctionHelper(no_reg, function, at, num_arguments);
+}
+
+
+void MacroAssembler::CallCFunction(Register function,
+                                   Register scratch,
+                                   int num_arguments) {
+  CallCFunctionHelper(function,
+                      ExternalReference::the_hole_value_location(isolate()),
+                      scratch,
+                      num_arguments);
+}
+
+
+void MacroAssembler::CallCFunctionHelper(Register function,
+                                         ExternalReference function_reference,
+                                         Register scratch,
+                                         int num_arguments) {
+  // Push Isolate address as the last argument.
+  if (num_arguments < kRegisterPassedArguments) {
+    Register arg_to_reg[] = {a0, a1, a2, a3};
+    Register r = arg_to_reg[num_arguments];
+    li(r, Operand(ExternalReference::isolate_address()));
+  } else {
+    int stack_passed_arguments = num_arguments - kRegisterPassedArguments +
+                                 (StandardFrameConstants::kCArgsSlotsSize /
+                                  kPointerSize);
+    // Push Isolate address on the stack after the arguments.
+    li(scratch, Operand(ExternalReference::isolate_address()));
+    sw(scratch, MemOperand(sp, stack_passed_arguments * kPointerSize));
+  }
+  num_arguments += 1;
+
+  // Make sure that the stack is aligned before calling a C function unless
+  // running in the simulator. The simulator has its own alignment check which
+  // provides more information.
+  // The argument stots are presumed to have been set up by
+  // PrepareCallCFunction. The C function must be called via t9, for mips ABI.
+
+#if defined(V8_HOST_ARCH_MIPS)
+  if (emit_debug_code()) {
+    int frame_alignment = OS::ActivationFrameAlignment();
+    int frame_alignment_mask = frame_alignment - 1;
+    if (frame_alignment > kPointerSize) {
+      ASSERT(IsPowerOf2(frame_alignment));
+      Label alignment_as_expected;
+      And(at, sp, Operand(frame_alignment_mask));
+      Branch(&alignment_as_expected, eq, at, Operand(zero_reg));
+      // Don't use Check here, as it will call Runtime_Abort possibly
+      // re-entering here.
+      stop("Unexpected alignment in CallCFunction");
+      bind(&alignment_as_expected);
+    }
+  }
+#endif  // V8_HOST_ARCH_MIPS
+
+  // Just call directly. The function called cannot cause a GC, or
+  // allow preemption, so the return address in the link register
+  // stays correct.
+  if (!function.is(t9)) {
+    mov(t9, function);
+    function = t9;
+  }
+
+  if (function.is(no_reg)) {
+    li(t9, Operand(function_reference));
+    function = t9;
+  }
+
+  Call(function);
+
+  ASSERT(StandardFrameConstants::kCArgsSlotsSize % kPointerSize == 0);
+  int stack_passed_arguments = ((num_arguments <= kRegisterPassedArguments) ?
+                                0 : num_arguments - kRegisterPassedArguments) +
+                               (StandardFrameConstants::kCArgsSlotsSize /
+                               kPointerSize);
+
+  if (OS::ActivationFrameAlignment() > kPointerSize) {
+    lw(sp, MemOperand(sp, stack_passed_arguments * kPointerSize));
+  } else {
+    Addu(sp, sp, Operand(stack_passed_arguments * sizeof(kPointerSize)));
+  }
+}
+
+
+#undef BRANCH_ARGS_CHECK
+
+
+#ifdef ENABLE_DEBUGGER_SUPPORT
+CodePatcher::CodePatcher(byte* address, int instructions)
+    : address_(address),
+      instructions_(instructions),
+      size_(instructions * Assembler::kInstrSize),
+      masm_(address, size_ + Assembler::kGap) {
+  // Create a new macro assembler pointing to the address of the code to patch.
+  // The size is adjusted with kGap on order for the assembler to generate size
+  // bytes of instructions without failing with buffer size constraints.
+  ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
+}
+
+
+CodePatcher::~CodePatcher() {
+  // Indicate that code has changed.
+  CPU::FlushICache(address_, size_);
+
+  // Check that the code was patched as expected.
+  ASSERT(masm_.pc_ == address_ + size_);
+  ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
+}
+
+
+void CodePatcher::Emit(Instr x) {
+  masm()->emit(x);
+}
+
+
+void CodePatcher::Emit(Address addr) {
+  masm()->emit(reinterpret_cast<Instr>(addr));
+}
+
+
+#endif  // ENABLE_DEBUGGER_SUPPORT
+
+
 } }  // namespace v8::internal
 
 #endif  // V8_TARGET_ARCH_MIPS
diff --git a/src/mips/macro-assembler-mips.h b/src/mips/macro-assembler-mips.h
index 0f0365b..7ff9e17 100644
--- a/src/mips/macro-assembler-mips.h
+++ b/src/mips/macro-assembler-mips.h
@@ -36,69 +36,171 @@
 
 // Forward declaration.
 class JumpTarget;
+class PostCallGenerator;
 
-// Register at is used for instruction generation. So it is not safe to use it
-// unless we know exactly what we do.
+// Reserved Register Usage Summary.
+//
+// Registers t8, t9, and at are reserved for use by the MacroAssembler.
+//
+// The programmer should know that the MacroAssembler may clobber these three,
+// but won't touch other registers except in special cases.
+//
+// Per the MIPS ABI, register t9 must be used for indirect function call
+// via 'jalr t9' or 'jr t9' instructions. This is relied upon by gcc when
+// trying to update gp register for position-independent-code. Whenever
+// MIPS generated code calls C code, it must be via t9 register.
 
 // Registers aliases
 // cp is assumed to be a callee saved register.
+const Register roots = s6;  // Roots array pointer.
 const Register cp = s7;     // JavaScript context pointer
 const Register fp = s8_fp;  // Alias fp
+// Register used for condition evaluation.
+const Register condReg1 = s4;
+const Register condReg2 = s5;
 
 enum InvokeJSFlags {
   CALL_JS,
   JUMP_JS
 };
 
+
+// Flags used for the AllocateInNewSpace functions.
+enum AllocationFlags {
+  // No special flags.
+  NO_ALLOCATION_FLAGS = 0,
+  // Return the pointer to the allocated already tagged as a heap object.
+  TAG_OBJECT = 1 << 0,
+  // The content of the result register already contains the allocation top in
+  // new space.
+  RESULT_CONTAINS_TOP = 1 << 1,
+  // Specify that the requested size of the space to allocate is specified in
+  // words instead of bytes.
+  SIZE_IN_WORDS = 1 << 2
+};
+
+// Flags used for the ObjectToDoubleFPURegister function.
+enum ObjectToDoubleFlags {
+  // No special flags.
+  NO_OBJECT_TO_DOUBLE_FLAGS = 0,
+  // Object is known to be a non smi.
+  OBJECT_NOT_SMI = 1 << 0,
+  // Don't load NaNs or infinities, branch to the non number case instead.
+  AVOID_NANS_AND_INFINITIES = 1 << 1
+};
+
+// Allow programmer to use Branch Delay Slot of Branches, Jumps, Calls.
+enum BranchDelaySlot {
+  USE_DELAY_SLOT,
+  PROTECT
+};
+
 // MacroAssembler implements a collection of frequently used macros.
 class MacroAssembler: public Assembler {
  public:
   MacroAssembler(void* buffer, int size);
 
-  // Jump, Call, and Ret pseudo instructions implementing inter-working.
-  void Jump(const Operand& target,
-            Condition cond = cc_always,
-            Register r1 = zero_reg, const Operand& r2 = Operand(zero_reg));
-  void Call(const Operand& target,
-            Condition cond = cc_always,
-            Register r1 = zero_reg, const Operand& r2 = Operand(zero_reg));
-  void Jump(Register target,
-            Condition cond = cc_always,
-            Register r1 = zero_reg, const Operand& r2 = Operand(zero_reg));
-  void Jump(byte* target, RelocInfo::Mode rmode,
-            Condition cond = cc_always,
-            Register r1 = zero_reg, const Operand& r2 = Operand(zero_reg));
-  void Jump(Handle<Code> code, RelocInfo::Mode rmode,
-            Condition cond = cc_always,
-            Register r1 = zero_reg, const Operand& r2 = Operand(zero_reg));
-  void Call(Register target,
-            Condition cond = cc_always,
-            Register r1 = zero_reg, const Operand& r2 = Operand(zero_reg));
-  void Call(byte* target, RelocInfo::Mode rmode,
-            Condition cond = cc_always,
-            Register r1 = zero_reg, const Operand& r2 = Operand(zero_reg));
-  void Call(Handle<Code> code, RelocInfo::Mode rmode,
-            Condition cond = cc_always,
-            Register r1 = zero_reg, const Operand& r2 = Operand(zero_reg));
-  void Ret(Condition cond = cc_always,
-           Register r1 = zero_reg, const Operand& r2 = Operand(zero_reg));
-  void Branch(Condition cond, int16_t offset, Register rs = zero_reg,
-              const Operand& rt = Operand(zero_reg), Register scratch = at);
-  void Branch(Condition cond, Label* L, Register rs = zero_reg,
-              const Operand& rt = Operand(zero_reg), Register scratch = at);
-  // conditionnal branch and link
-  void BranchAndLink(Condition cond, int16_t offset, Register rs = zero_reg,
-                     const Operand& rt = Operand(zero_reg),
-                     Register scratch = at);
-  void BranchAndLink(Condition cond, Label* L, Register rs = zero_reg,
-                     const Operand& rt = Operand(zero_reg),
-                     Register scratch = at);
+// Arguments macros
+#define COND_TYPED_ARGS Condition cond, Register r1, const Operand& r2
+#define COND_ARGS cond, r1, r2
+
+// ** Prototypes
+
+// * Prototypes for functions with no target (eg Ret()).
+#define DECLARE_NOTARGET_PROTOTYPE(Name) \
+  void Name(BranchDelaySlot bd = PROTECT); \
+  void Name(COND_TYPED_ARGS, BranchDelaySlot bd = PROTECT); \
+  inline void Name(BranchDelaySlot bd, COND_TYPED_ARGS) { \
+    Name(COND_ARGS, bd); \
+  }
+
+// * Prototypes for functions with a target.
+
+// Cases when relocation may be needed.
+#define DECLARE_RELOC_PROTOTYPE(Name, target_type) \
+  void Name(target_type target, \
+            RelocInfo::Mode rmode, \
+            BranchDelaySlot bd = PROTECT); \
+  inline void Name(BranchDelaySlot bd, \
+                   target_type target, \
+                   RelocInfo::Mode rmode) { \
+    Name(target, rmode, bd); \
+  } \
+  void Name(target_type target, \
+            RelocInfo::Mode rmode, \
+            COND_TYPED_ARGS, \
+            BranchDelaySlot bd = PROTECT); \
+  inline void Name(BranchDelaySlot bd, \
+                   target_type target, \
+                   RelocInfo::Mode rmode, \
+                   COND_TYPED_ARGS) { \
+    Name(target, rmode, COND_ARGS, bd); \
+  }
+
+// Cases when relocation is not needed.
+#define DECLARE_NORELOC_PROTOTYPE(Name, target_type) \
+  void Name(target_type target, BranchDelaySlot bd = PROTECT); \
+  inline void Name(BranchDelaySlot bd, target_type target) { \
+    Name(target, bd); \
+  } \
+  void Name(target_type target, \
+            COND_TYPED_ARGS, \
+            BranchDelaySlot bd = PROTECT); \
+  inline void Name(BranchDelaySlot bd, \
+                   target_type target, \
+                   COND_TYPED_ARGS) { \
+    Name(target, COND_ARGS, bd); \
+  }
+
+// ** Target prototypes.
+
+#define DECLARE_JUMP_CALL_PROTOTYPES(Name) \
+  DECLARE_NORELOC_PROTOTYPE(Name, Register) \
+  DECLARE_NORELOC_PROTOTYPE(Name, const Operand&) \
+  DECLARE_RELOC_PROTOTYPE(Name, byte*) \
+  DECLARE_RELOC_PROTOTYPE(Name, Handle<Code>)
+
+#define DECLARE_BRANCH_PROTOTYPES(Name) \
+  DECLARE_NORELOC_PROTOTYPE(Name, Label*) \
+  DECLARE_NORELOC_PROTOTYPE(Name, int16_t)
+
+
+DECLARE_JUMP_CALL_PROTOTYPES(Jump)
+DECLARE_JUMP_CALL_PROTOTYPES(Call)
+
+DECLARE_BRANCH_PROTOTYPES(Branch)
+DECLARE_BRANCH_PROTOTYPES(BranchAndLink)
+
+DECLARE_NOTARGET_PROTOTYPE(Ret)
+
+#undef COND_TYPED_ARGS
+#undef COND_ARGS
+#undef DECLARE_NOTARGET_PROTOTYPE
+#undef DECLARE_NORELOC_PROTOTYPE
+#undef DECLARE_RELOC_PROTOTYPE
+#undef DECLARE_JUMP_CALL_PROTOTYPES
+#undef DECLARE_BRANCH_PROTOTYPES
 
   // Emit code to discard a non-negative number of pointer-sized elements
   // from the stack, clobbering only the sp register.
-  void Drop(int count, Condition cond = cc_always);
+  void Drop(int count,
+            Condition cond = cc_always,
+            Register reg = no_reg,
+            const Operand& op = Operand(no_reg));
+
+  void DropAndRet(int drop = 0,
+                  Condition cond = cc_always,
+                  Register reg = no_reg,
+                  const Operand& op = Operand(no_reg));
+
+  // Swap two registers.  If the scratch register is omitted then a slightly
+  // less efficient form using xor instead of mov is emitted.
+  void Swap(Register reg1, Register reg2, Register scratch = no_reg);
 
   void Call(Label* target);
+  // May do nothing if the registers are identical.
+  void Move(Register dst, Register src);
+
 
   // Jump unconditionally to given label.
   // We NEED a nop in the branch delay slot, as it used by v8, for example in
@@ -106,7 +208,7 @@
   // Currently the branch delay slot is filled by the MacroAssembler.
   // Use rather b(Label) for code generation.
   void jmp(Label* L) {
-    Branch(cc_always, L);
+    Branch(L);
   }
 
   // Load an object from the root table.
@@ -116,19 +218,164 @@
                 Heap::RootListIndex index,
                 Condition cond, Register src1, const Operand& src2);
 
-  // Load an external reference.
-  void LoadExternalReference(Register reg, ExternalReference ext) {
-    li(reg, Operand(ext));
+  // Store an object to the root table.
+  void StoreRoot(Register source,
+                 Heap::RootListIndex index);
+  void StoreRoot(Register source,
+                 Heap::RootListIndex index,
+                 Condition cond, Register src1, const Operand& src2);
+
+
+  // Check if object is in new space.
+  // scratch can be object itself, but it will be clobbered.
+  void InNewSpace(Register object,
+                  Register scratch,
+                  Condition cc,  // eq for new space, ne otherwise.
+                  Label* branch);
+
+
+  // For the page containing |object| mark the region covering [address]
+  // dirty. The object address must be in the first 8K of an allocated page.
+  void RecordWriteHelper(Register object,
+                         Register address,
+                         Register scratch);
+
+  // For the page containing |object| mark the region covering
+  // [object+offset] dirty. The object address must be in the first 8K
+  // of an allocated page.  The 'scratch' registers are used in the
+  // implementation and all 3 registers are clobbered by the
+  // operation, as well as the 'at' register. RecordWrite updates the
+  // write barrier even when storing smis.
+  void RecordWrite(Register object,
+                   Operand offset,
+                   Register scratch0,
+                   Register scratch1);
+
+  // For the page containing |object| mark the region covering
+  // [address] dirty. The object address must be in the first 8K of an
+  // allocated page.  All 3 registers are clobbered by the operation,
+  // as well as the ip register. RecordWrite updates the write barrier
+  // even when storing smis.
+  void RecordWrite(Register object,
+                   Register address,
+                   Register scratch);
+
+
+  // ---------------------------------------------------------------------------
+  // Inline caching support
+
+  // Generate code for checking access rights - used for security checks
+  // on access to global objects across environments. The holder register
+  // is left untouched, whereas both scratch registers are clobbered.
+  void CheckAccessGlobalProxy(Register holder_reg,
+                              Register scratch,
+                              Label* miss);
+
+  inline void MarkCode(NopMarkerTypes type) {
+    nop(type);
   }
 
-  // Sets the remembered set bit for [address+offset].
-  void RecordWrite(Register object, Register offset, Register scratch);
+  // Check if the given instruction is a 'type' marker.
+  // ie. check if it is a sll zero_reg, zero_reg, <type> (referenced as
+  // nop(type)). These instructions are generated to mark special location in
+  // the code, like some special IC code.
+  static inline bool IsMarkedCode(Instr instr, int type) {
+    ASSERT((FIRST_IC_MARKER <= type) && (type < LAST_CODE_MARKER));
+    return IsNop(instr, type);
+  }
 
 
+  static inline int GetCodeMarker(Instr instr) {
+    uint32_t opcode = ((instr & kOpcodeMask));
+    uint32_t rt = ((instr & kRtFieldMask) >> kRtShift);
+    uint32_t rs = ((instr & kRsFieldMask) >> kRsShift);
+    uint32_t sa = ((instr & kSaFieldMask) >> kSaShift);
+
+    // Return <n> if we have a sll zero_reg, zero_reg, n
+    // else return -1.
+    bool sllzz = (opcode == SLL &&
+                  rt == static_cast<uint32_t>(ToNumber(zero_reg)) &&
+                  rs == static_cast<uint32_t>(ToNumber(zero_reg)));
+    int type =
+        (sllzz && FIRST_IC_MARKER <= sa && sa < LAST_CODE_MARKER) ? sa : -1;
+    ASSERT((type == -1) ||
+           ((FIRST_IC_MARKER <= type) && (type < LAST_CODE_MARKER)));
+    return type;
+  }
+
+
+
+  // ---------------------------------------------------------------------------
+  // Allocation support
+
+  // Allocate an object in new space. The object_size is specified
+  // either in bytes or in words if the allocation flag SIZE_IN_WORDS
+  // is passed. If the new space is exhausted control continues at the
+  // gc_required label. The allocated object is returned in result. If
+  // the flag tag_allocated_object is true the result is tagged as as
+  // a heap object. All registers are clobbered also when control
+  // continues at the gc_required label.
+  void AllocateInNewSpace(int object_size,
+                          Register result,
+                          Register scratch1,
+                          Register scratch2,
+                          Label* gc_required,
+                          AllocationFlags flags);
+  void AllocateInNewSpace(Register object_size,
+                          Register result,
+                          Register scratch1,
+                          Register scratch2,
+                          Label* gc_required,
+                          AllocationFlags flags);
+
+  // Undo allocation in new space. The object passed and objects allocated after
+  // it will no longer be allocated. The caller must make sure that no pointers
+  // are left to the object(s) no longer allocated as they would be invalid when
+  // allocation is undone.
+  void UndoAllocationInNewSpace(Register object, Register scratch);
+
+
+  void AllocateTwoByteString(Register result,
+                             Register length,
+                             Register scratch1,
+                             Register scratch2,
+                             Register scratch3,
+                             Label* gc_required);
+  void AllocateAsciiString(Register result,
+                           Register length,
+                           Register scratch1,
+                           Register scratch2,
+                           Register scratch3,
+                           Label* gc_required);
+  void AllocateTwoByteConsString(Register result,
+                                 Register length,
+                                 Register scratch1,
+                                 Register scratch2,
+                                 Label* gc_required);
+  void AllocateAsciiConsString(Register result,
+                               Register length,
+                               Register scratch1,
+                               Register scratch2,
+                               Label* gc_required);
+
+  // Allocates a heap number or jumps to the gc_required label if the young
+  // space is full and a scavenge is needed. All registers are clobbered also
+  // when control continues at the gc_required label.
+  void AllocateHeapNumber(Register result,
+                          Register scratch1,
+                          Register scratch2,
+                          Register heap_number_map,
+                          Label* gc_required);
+  void AllocateHeapNumberWithValue(Register result,
+                                   FPURegister value,
+                                   Register scratch1,
+                                   Register scratch2,
+                                   Label* gc_required);
+
   // ---------------------------------------------------------------------------
   // Instruction macros
 
-#define DEFINE_INSTRUCTION(instr)                                       \
+#define DEFINE_INSTRUCTION(instr)                                              \
   void instr(Register rd, Register rs, const Operand& rt);                     \
   void instr(Register rd, Register rs, Register rt) {                          \
     instr(rd, rs, Operand(rt));                                                \
@@ -137,7 +384,7 @@
     instr(rs, rt, Operand(j));                                                 \
   }
 
-#define DEFINE_INSTRUCTION2(instr)                                      \
+#define DEFINE_INSTRUCTION2(instr)                                             \
   void instr(Register rs, const Operand& rt);                                  \
   void instr(Register rs, Register rt) {                                       \
     instr(rs, Operand(rt));                                                    \
@@ -146,8 +393,8 @@
     instr(rs, Operand(j));                                                     \
   }
 
-  DEFINE_INSTRUCTION(Add);
   DEFINE_INSTRUCTION(Addu);
+  DEFINE_INSTRUCTION(Subu);
   DEFINE_INSTRUCTION(Mul);
   DEFINE_INSTRUCTION2(Mult);
   DEFINE_INSTRUCTION2(Multu);
@@ -162,6 +409,9 @@
   DEFINE_INSTRUCTION(Slt);
   DEFINE_INSTRUCTION(Sltu);
 
+  // MIPS32 R2 instruction macro.
+  DEFINE_INSTRUCTION(Ror);
+
 #undef DEFINE_INSTRUCTION
 #undef DEFINE_INSTRUCTION2
 
@@ -169,8 +419,6 @@
   //------------Pseudo-instructions-------------
 
   void mov(Register rd, Register rt) { or_(rd, rt, zero_reg); }
-  // Move the logical ones complement of source to dest.
-  void movn(Register rd, Register rt);
 
 
   // load int32 in the rd register
@@ -178,6 +426,9 @@
   inline void li(Register rd, int32_t j, bool gen2instr = false) {
     li(rd, Operand(j), gen2instr);
   }
+  inline void li(Register dst, Handle<Object> value, bool gen2instr = false) {
+    li(dst, Operand(value), gen2instr);
+  }
 
   // Exception-generating instructions and debugging support
   void stop(const char* msg);
@@ -188,19 +439,51 @@
   // saved in higher memory addresses
   void MultiPush(RegList regs);
   void MultiPushReversed(RegList regs);
+
   void Push(Register src) {
     Addu(sp, sp, Operand(-kPointerSize));
     sw(src, MemOperand(sp, 0));
   }
+
+  // Push two registers.  Pushes leftmost register first (to highest address).
+  void Push(Register src1, Register src2, Condition cond = al) {
+    ASSERT(cond == al);  // Do not support conditional versions yet.
+    Subu(sp, sp, Operand(2 * kPointerSize));
+    sw(src1, MemOperand(sp, 1 * kPointerSize));
+    sw(src2, MemOperand(sp, 0 * kPointerSize));
+  }
+
+  // Push three registers.  Pushes leftmost register first (to highest address).
+  void Push(Register src1, Register src2, Register src3, Condition cond = al) {
+    ASSERT(cond == al);  // Do not support conditional versions yet.
+    Addu(sp, sp, Operand(3 * -kPointerSize));
+    sw(src1, MemOperand(sp, 2 * kPointerSize));
+    sw(src2, MemOperand(sp, 1 * kPointerSize));
+    sw(src3, MemOperand(sp, 0 * kPointerSize));
+  }
+
+  // Push four registers.  Pushes leftmost register first (to highest address).
+  void Push(Register src1, Register src2,
+            Register src3, Register src4, Condition cond = al) {
+    ASSERT(cond == al);  // Do not support conditional versions yet.
+    Addu(sp, sp, Operand(4 * -kPointerSize));
+    sw(src1, MemOperand(sp, 3 * kPointerSize));
+    sw(src2, MemOperand(sp, 2 * kPointerSize));
+    sw(src3, MemOperand(sp, 1 * kPointerSize));
+    sw(src4, MemOperand(sp, 0 * kPointerSize));
+  }
+
   inline void push(Register src) { Push(src); }
+  inline void pop(Register src) { Pop(src); }
 
   void Push(Register src, Condition cond, Register tst1, Register tst2) {
     // Since we don't have conditionnal execution we use a Branch.
-    Branch(cond, 3, tst1, Operand(tst2));
+    Branch(3, cond, tst1, Operand(tst2));
     Addu(sp, sp, Operand(-kPointerSize));
     sw(src, MemOperand(sp, 0));
   }
 
+
   // Pops multiple values from the stack and load them in the
   // registers specified in regs. Pop order is the opposite as in MultiPush.
   void MultiPop(RegList regs);
@@ -209,44 +492,108 @@
     lw(dst, MemOperand(sp, 0));
     Addu(sp, sp, Operand(kPointerSize));
   }
-  void Pop() {
-    Add(sp, sp, Operand(kPointerSize));
+  void Pop(uint32_t count = 1) {
+    Addu(sp, sp, Operand(count * kPointerSize));
   }
 
+  // ---------------------------------------------------------------------------
+  // These functions are only used by crankshaft, so they are currently
+  // unimplemented.
+
+  // Push and pop the registers that can hold pointers, as defined by the
+  // RegList constant kSafepointSavedRegisters.
+  void PushSafepointRegisters() {
+    UNIMPLEMENTED_MIPS();
+  }
+
+  void PopSafepointRegisters() {
+    UNIMPLEMENTED_MIPS();
+  }
+
+  void PushSafepointRegistersAndDoubles() {
+    UNIMPLEMENTED_MIPS();
+  }
+
+  void PopSafepointRegistersAndDoubles() {
+    UNIMPLEMENTED_MIPS();
+  }
+
+  static int SafepointRegisterStackIndex(int reg_code) {
+    UNIMPLEMENTED_MIPS();
+    return 0;
+  }
 
   // ---------------------------------------------------------------------------
+
+  // MIPS32 R2 instruction macro.
+  void Ins(Register rt, Register rs, uint16_t pos, uint16_t size);
+  void Ext(Register rt, Register rs, uint16_t pos, uint16_t size);
+
+  // Convert unsigned word to double.
+  void Cvt_d_uw(FPURegister fd, FPURegister fs);
+  void Cvt_d_uw(FPURegister fd, Register rs);
+
+  // Convert double to unsigned word.
+  void Trunc_uw_d(FPURegister fd, FPURegister fs);
+  void Trunc_uw_d(FPURegister fd, Register rs);
+
+  // Convert the HeapNumber pointed to by source to a 32bits signed integer
+  // dest. If the HeapNumber does not fit into a 32bits signed integer branch
+  // to not_int32 label. If FPU is available double_scratch is used but not
+  // scratch2.
+  void ConvertToInt32(Register source,
+                      Register dest,
+                      Register scratch,
+                      Register scratch2,
+                      FPURegister double_scratch,
+                      Label *not_int32);
+
+  // -------------------------------------------------------------------------
   // Activation frames
 
   void EnterInternalFrame() { EnterFrame(StackFrame::INTERNAL); }
   void LeaveInternalFrame() { LeaveFrame(StackFrame::INTERNAL); }
 
-  // Enter specific kind of exit frame; either EXIT or
-  // EXIT_DEBUG. Expects the number of arguments in register a0 and
+  void EnterConstructFrame() { EnterFrame(StackFrame::CONSTRUCT); }
+  void LeaveConstructFrame() { LeaveFrame(StackFrame::CONSTRUCT); }
+
+  // Enter exit frame.
+  // Expects the number of arguments in register a0 and
   // the builtin function to call in register a1.
   // On output hold_argc, hold_function, and hold_argv are setup.
-  void EnterExitFrame(ExitFrame::Mode mode,
-                      Register hold_argc,
+  void EnterExitFrame(Register hold_argc,
                       Register hold_argv,
-                      Register hold_function);
+                      Register hold_function,
+                      bool save_doubles);
 
   // Leave the current exit frame. Expects the return value in v0.
-  void LeaveExitFrame(ExitFrame::Mode mode);
+  void LeaveExitFrame(bool save_doubles);
 
   // Align the stack by optionally pushing a Smi zero.
-  void AlignStack(int offset);
+  void AlignStack(int offset);    // TODO(mips) : remove this function.
 
-  void SetupAlignedCall(Register scratch, int arg_count = 0);
-  void ReturnFromAlignedCall();
+  // Get the actual activation frame alignment for target environment.
+  static int ActivationFrameAlignment();
 
+  void LoadContext(Register dst, int context_chain_length);
 
-  // ---------------------------------------------------------------------------
+  void LoadGlobalFunction(int index, Register function);
+
+  // Load the initial map from the global function. The registers
+  // function and map can be the same, function is then overwritten.
+  void LoadGlobalFunctionInitialMap(Register function,
+                                    Register map,
+                                    Register scratch);
+
+  // -------------------------------------------------------------------------
   // JavaScript invokes
 
   // Invoke the JavaScript function code by either calling or jumping.
   void InvokeCode(Register code,
                   const ParameterCount& expected,
                   const ParameterCount& actual,
-                  InvokeFlag flag);
+                  InvokeFlag flag,
+                  PostCallGenerator* post_call_generator = NULL);
 
   void InvokeCode(Handle<Code> code,
                   const ParameterCount& expected,
@@ -258,84 +605,135 @@
   // current context to the context in the function before invoking.
   void InvokeFunction(Register function,
                       const ParameterCount& actual,
+                      InvokeFlag flag,
+                      PostCallGenerator* post_call_generator = NULL);
+
+  void InvokeFunction(JSFunction* function,
+                      const ParameterCount& actual,
                       InvokeFlag flag);
 
 
+  void IsObjectJSObjectType(Register heap_object,
+                            Register map,
+                            Register scratch,
+                            Label* fail);
+
+  void IsInstanceJSObjectType(Register map,
+                              Register scratch,
+                              Label* fail);
+
+  void IsObjectJSStringType(Register object,
+                            Register scratch,
+                            Label* fail);
+
 #ifdef ENABLE_DEBUGGER_SUPPORT
-  // ---------------------------------------------------------------------------
+  // -------------------------------------------------------------------------
   // Debugger Support
 
-  void SaveRegistersToMemory(RegList regs);
-  void RestoreRegistersFromMemory(RegList regs);
-  void CopyRegistersFromMemoryToStack(Register base, RegList regs);
-  void CopyRegistersFromStackToMemory(Register base,
-                                      Register scratch,
-                                      RegList regs);
   void DebugBreak();
 #endif
 
 
-  // ---------------------------------------------------------------------------
+  // -------------------------------------------------------------------------
   // Exception handling
 
   // Push a new try handler and link into try handler chain.
   // The return address must be passed in register ra.
+  // Clobber t0, t1, t2.
   void PushTryHandler(CodeLocation try_location, HandlerType type);
 
   // Unlink the stack handler on top of the stack from the try handler chain.
   // Must preserve the result register.
   void PopTryHandler();
 
+  // Copies a fixed number of fields of heap objects from src to dst.
+  void CopyFields(Register dst, Register src, RegList temps, int field_count);
 
-  // ---------------------------------------------------------------------------
+  // -------------------------------------------------------------------------
   // Support functions.
 
+  // Try to get function prototype of a function and puts the value in
+  // the result register. Checks that the function really is a
+  // function and jumps to the miss label if the fast checks fail. The
+  // function register will be untouched; the other registers may be
+  // clobbered.
+  void TryGetFunctionPrototype(Register function,
+                               Register result,
+                               Register scratch,
+                               Label* miss);
+
   void GetObjectType(Register function,
                      Register map,
                      Register type_reg);
 
-  inline void BranchOnSmi(Register value, Label* smi_label,
-                          Register scratch = at) {
-    ASSERT_EQ(0, kSmiTag);
-    andi(scratch, value, kSmiTagMask);
-    Branch(eq, smi_label, scratch, Operand(zero_reg));
-  }
+  // Check if the map of an object is equal to a specified map (either
+  // given directly or as an index into the root list) and branch to
+  // label if not. Skip the smi check if not required (object is known
+  // to be a heap object)
+  void CheckMap(Register obj,
+                Register scratch,
+                Handle<Map> map,
+                Label* fail,
+                bool is_heap_object);
 
-
-  inline void BranchOnNotSmi(Register value, Label* not_smi_label,
-                             Register scratch = at) {
-    ASSERT_EQ(0, kSmiTag);
-    andi(scratch, value, kSmiTagMask);
-    Branch(ne, not_smi_label, scratch, Operand(zero_reg));
-  }
-
-  void CallBuiltin(ExternalReference builtin_entry);
-  void CallBuiltin(Register target);
-  void JumpToBuiltin(ExternalReference builtin_entry);
-  void JumpToBuiltin(Register target);
+  void CheckMap(Register obj,
+                Register scratch,
+                Heap::RootListIndex index,
+                Label* fail,
+                bool is_heap_object);
 
   // Generates code for reporting that an illegal operation has
   // occurred.
   void IllegalOperation(int num_arguments);
 
+  // Picks out an array index from the hash field.
+  // Register use:
+  //   hash - holds the index's hash. Clobbered.
+  //   index - holds the overwritten index on exit.
+  void IndexFromHash(Register hash, Register index);
 
-  // ---------------------------------------------------------------------------
+  // Load the value of a number object into a FPU double register. If the
+  // object is not a number a jump to the label not_number is performed
+  // and the FPU double register is unchanged.
+  void ObjectToDoubleFPURegister(
+      Register object,
+      FPURegister value,
+      Register scratch1,
+      Register scratch2,
+      Register heap_number_map,
+      Label* not_number,
+      ObjectToDoubleFlags flags = NO_OBJECT_TO_DOUBLE_FLAGS);
+
+  // Load the value of a smi object into a FPU double register. The register
+  // scratch1 can be the same register as smi in which case smi will hold the
+  // untagged value afterwards.
+  void SmiToDoubleFPURegister(Register smi,
+                              FPURegister value,
+                              Register scratch1);
+
+  // -------------------------------------------------------------------------
   // Runtime calls
 
   // Call a code stub.
   void CallStub(CodeStub* stub, Condition cond = cc_always,
                 Register r1 = zero_reg, const Operand& r2 = Operand(zero_reg));
+
+  // Tail call a code stub (jump).
+  void TailCallStub(CodeStub* stub);
+
   void CallJSExitStub(CodeStub* stub);
 
-  // Return from a code stub after popping its arguments.
-  void StubReturn(int argc);
-
   // Call a runtime routine.
-  void CallRuntime(Runtime::Function* f, int num_arguments);
+  void CallRuntime(const Runtime::Function* f, int num_arguments);
+  void CallRuntimeSaveDoubles(Runtime::FunctionId id);
 
   // Convenience function: Same as above, but takes the fid instead.
   void CallRuntime(Runtime::FunctionId fid, int num_arguments);
 
+  // Convenience function: call an external reference.
+  void CallExternalReference(const ExternalReference& ext,
+                             int num_arguments);
+
   // Tail call of a runtime routine (jump).
   // Like JumpToExternalReference, but also takes care of passing the number
   // of parameters.
@@ -348,34 +746,54 @@
                        int num_arguments,
                        int result_size);
 
+  // Before calling a C-function from generated code, align arguments on stack
+  // and add space for the four mips argument slots.
+  // After aligning the frame, non-register arguments must be stored on the
+  // stack, after the argument-slots using helper: CFunctionArgumentOperand().
+  // The argument count assumes all arguments are word sized.
+  // Some compilers/platforms require the stack to be aligned when calling
+  // C++ code.
+  // Needs a scratch register to do some arithmetic. This register will be
+  // trashed.
+  void PrepareCallCFunction(int num_arguments, Register scratch);
+
+  // Arguments 1-4 are placed in registers a0 thru a3 respectively.
+  // Arguments 5..n are stored to stack using following:
+  //  sw(t0, CFunctionArgumentOperand(5));
+
+  // Calls a C function and cleans up the space for arguments allocated
+  // by PrepareCallCFunction. The called function is not allowed to trigger a
+  // garbage collection, since that might move the code and invalidate the
+  // return address (unless this is somehow accounted for by the called
+  // function).
+  void CallCFunction(ExternalReference function, int num_arguments);
+  void CallCFunction(Register function, Register scratch, int num_arguments);
+
   // Jump to the builtin routine.
   void JumpToExternalReference(const ExternalReference& builtin);
 
   // Invoke specified builtin JavaScript function. Adds an entry to
   // the unresolved list if the name does not resolve.
-  void InvokeBuiltin(Builtins::JavaScript id, InvokeJSFlags flags);
+  void InvokeBuiltin(Builtins::JavaScript id,
+                     InvokeJSFlags flags,
+                     PostCallGenerator* post_call_generator = NULL);
 
   // Store the code object for the given builtin in the target register and
-  // setup the function in r1.
+  // setup the function in a1.
   void GetBuiltinEntry(Register target, Builtins::JavaScript id);
 
+  // Store the function for the given builtin in the target register.
+  void GetBuiltinFunction(Register target, Builtins::JavaScript id);
+
   struct Unresolved {
     int pc;
     uint32_t flags;  // see Bootstrapper::FixupFlags decoders/encoders.
     const char* name;
   };
-  List<Unresolved>* unresolved() { return &unresolved_; }
 
   Handle<Object> CodeObject() { return code_object_; }
 
-
-  // ---------------------------------------------------------------------------
-  // Stack limit support
-
-  void StackLimitCheck(Label* on_stack_limit_hit);
-
-
-  // ---------------------------------------------------------------------------
+  // -------------------------------------------------------------------------
   // StatsCounter support
 
   void SetCounter(StatsCounter* counter, int value,
@@ -386,12 +804,14 @@
                         Register scratch1, Register scratch2);
 
 
-  // ---------------------------------------------------------------------------
+  // -------------------------------------------------------------------------
   // Debugging
 
   // Calls Abort(msg) if the condition cc is not satisfied.
   // Use --debug_code to enable.
   void Assert(Condition cc, const char* msg, Register rs, Operand rt);
+  void AssertRegisterIsRoot(Register reg, Heap::RootListIndex index);
+  void AssertFastElements(Register elements);
 
   // Like Assert(), but always enabled.
   void Check(Condition cc, const char* msg, Register rs, Operand rt);
@@ -405,17 +825,132 @@
   void set_allow_stub_calls(bool value) { allow_stub_calls_ = value; }
   bool allow_stub_calls() { return allow_stub_calls_; }
 
- private:
-  List<Unresolved> unresolved_;
-  bool generating_stub_;
-  bool allow_stub_calls_;
-  // This handle will be patched with the code object on installation.
-  Handle<Object> code_object_;
+  // ---------------------------------------------------------------------------
+  // Number utilities
 
+  // Check whether the value of reg is a power of two and not zero. If not
+  // control continues at the label not_power_of_two. If reg is a power of two
+  // the register scratch contains the value of (reg - 1) when control falls
+  // through.
+  void JumpIfNotPowerOfTwoOrZero(Register reg,
+                                 Register scratch,
+                                 Label* not_power_of_two_or_zero);
+
+  // -------------------------------------------------------------------------
+  // Smi utilities
+
+  // Try to convert int32 to smi. If the value is to large, preserve
+  // the original value and jump to not_a_smi. Destroys scratch and
+  // sets flags.
+  // This is only used by crankshaft atm so it is unimplemented on MIPS.
+  void TrySmiTag(Register reg, Label* not_a_smi, Register scratch) {
+    UNIMPLEMENTED_MIPS();
+  }
+
+  void SmiTag(Register reg) {
+    Addu(reg, reg, reg);
+  }
+
+  void SmiTag(Register dst, Register src) {
+    Addu(dst, src, src);
+  }
+
+  void SmiUntag(Register reg) {
+    sra(reg, reg, kSmiTagSize);
+  }
+
+  void SmiUntag(Register dst, Register src) {
+    sra(dst, src, kSmiTagSize);
+  }
+
+  // Jump the register contains a smi.
+  inline void JumpIfSmi(Register value, Label* smi_label,
+                        Register scratch = at) {
+    ASSERT_EQ(0, kSmiTag);
+    andi(scratch, value, kSmiTagMask);
+    Branch(smi_label, eq, scratch, Operand(zero_reg));
+  }
+
+  // Jump if the register contains a non-smi.
+  inline void JumpIfNotSmi(Register value, Label* not_smi_label,
+                           Register scratch = at) {
+    ASSERT_EQ(0, kSmiTag);
+    andi(scratch, value, kSmiTagMask);
+    Branch(not_smi_label, ne, scratch, Operand(zero_reg));
+  }
+
+  // Jump if either of the registers contain a non-smi.
+  void JumpIfNotBothSmi(Register reg1, Register reg2, Label* on_not_both_smi);
+  // Jump if either of the registers contain a smi.
+  void JumpIfEitherSmi(Register reg1, Register reg2, Label* on_either_smi);
+
+  // Abort execution if argument is a smi. Used in debug code.
+  void AbortIfSmi(Register object);
+  void AbortIfNotSmi(Register object);
+
+  // Abort execution if argument is not the root value with the given index.
+  void AbortIfNotRootValue(Register src,
+                           Heap::RootListIndex root_value_index,
+                           const char* message);
+
+  // ---------------------------------------------------------------------------
+  // HeapNumber utilities
+
+  void JumpIfNotHeapNumber(Register object,
+                           Register heap_number_map,
+                           Register scratch,
+                           Label* on_not_heap_number);
+
+  // -------------------------------------------------------------------------
+  // String utilities
+
+  // Checks if both instance types are sequential ASCII strings and jumps to
+  // label if either is not.
+  void JumpIfBothInstanceTypesAreNotSequentialAscii(
+      Register first_object_instance_type,
+      Register second_object_instance_type,
+      Register scratch1,
+      Register scratch2,
+      Label* failure);
+
+  // Check if instance type is sequential ASCII string and jump to label if
+  // it is not.
+  void JumpIfInstanceTypeIsNotSequentialAscii(Register type,
+                                              Register scratch,
+                                              Label* failure);
+
+  // Test that both first and second are sequential ASCII strings.
+  // Assume that they are non-smis.
+  void JumpIfNonSmisNotBothSequentialAsciiStrings(Register first,
+                                                  Register second,
+                                                  Register scratch1,
+                                                  Register scratch2,
+                                                  Label* failure);
+
+  // Test that both first and second are sequential ASCII strings.
+  // Check that they are non-smis.
+  void JumpIfNotBothSequentialAsciiStrings(Register first,
+                                           Register second,
+                                           Register scratch1,
+                                           Register scratch2,
+                                           Label* failure);
+
+ private:
+  void CallCFunctionHelper(Register function,
+                           ExternalReference function_reference,
+                           Register scratch,
+                           int num_arguments);
+
+  void Jump(intptr_t target, RelocInfo::Mode rmode,
+            BranchDelaySlot bd = PROTECT);
   void Jump(intptr_t target, RelocInfo::Mode rmode, Condition cond = cc_always,
-            Register r1 = zero_reg, const Operand& r2 = Operand(zero_reg));
+            Register r1 = zero_reg, const Operand& r2 = Operand(zero_reg),
+            BranchDelaySlot bd = PROTECT);
+  void Call(intptr_t target, RelocInfo::Mode rmode,
+            BranchDelaySlot bd = PROTECT);
   void Call(intptr_t target, RelocInfo::Mode rmode, Condition cond = cc_always,
-            Register r1 = zero_reg, const Operand& r2 = Operand(zero_reg));
+            Register r1 = zero_reg, const Operand& r2 = Operand(zero_reg),
+            BranchDelaySlot bd = PROTECT);
 
   // Helper functions for generating invokes.
   void InvokePrologue(const ParameterCount& expected,
@@ -423,22 +958,84 @@
                       Handle<Code> code_constant,
                       Register code_reg,
                       Label* done,
-                      InvokeFlag flag);
+                      InvokeFlag flag,
+                      PostCallGenerator* post_call_generator = NULL);
 
   // Get the code for the given builtin. Returns if able to resolve
   // the function in the 'resolved' flag.
   Handle<Code> ResolveBuiltin(Builtins::JavaScript id, bool* resolved);
 
   // Activation support.
-  // EnterFrame clobbers t0 and t1.
   void EnterFrame(StackFrame::Type type);
   void LeaveFrame(StackFrame::Type type);
+
+  void InitializeNewString(Register string,
+                           Register length,
+                           Heap::RootListIndex map_index,
+                           Register scratch1,
+                           Register scratch2);
+
+
+  bool generating_stub_;
+  bool allow_stub_calls_;
+  // This handle will be patched with the code object on installation.
+  Handle<Object> code_object_;
+};
+
+
+#ifdef ENABLE_DEBUGGER_SUPPORT
+// The code patcher is used to patch (typically) small parts of code e.g. for
+// debugging and other types of instrumentation. When using the code patcher
+// the exact number of bytes specified must be emitted. It is not legal to emit
+// relocation information. If any of these constraints are violated it causes
+// an assertion to fail.
+class CodePatcher {
+ public:
+  CodePatcher(byte* address, int instructions);
+  virtual ~CodePatcher();
+
+  // Macro assembler to emit code.
+  MacroAssembler* masm() { return &masm_; }
+
+  // Emit an instruction directly.
+  void Emit(Instr x);
+
+  // Emit an address directly.
+  void Emit(Address addr);
+
+ private:
+  byte* address_;  // The address of the code being patched.
+  int instructions_;  // Number of instructions of the expected patch size.
+  int size_;  // Number of bytes of the expected patch size.
+  MacroAssembler masm_;  // Macro assembler used to generate the code.
+};
+#endif  // ENABLE_DEBUGGER_SUPPORT
+
+
+// Helper class for generating code or data associated with the code
+// right after a call instruction. As an example this can be used to
+// generate safepoint data after calls for crankshaft.
+class PostCallGenerator {
+ public:
+  PostCallGenerator() { }
+  virtual ~PostCallGenerator() { }
+  virtual void Generate() = 0;
 };
 
 
 // -----------------------------------------------------------------------------
 // Static helper functions.
 
+static MemOperand ContextOperand(Register context, int index) {
+  return MemOperand(context, Context::SlotOffset(index));
+}
+
+
+static inline MemOperand GlobalObjectOperand()  {
+  return ContextOperand(cp, Context::GLOBAL_INDEX);
+}
+
+
 // Generate a MemOperand for loading a field from an object.
 static inline MemOperand FieldMemOperand(Register object, int offset) {
   return MemOperand(object, offset - kHeapObjectTag);
diff --git a/src/mips/regexp-macro-assembler-mips.cc b/src/mips/regexp-macro-assembler-mips.cc
new file mode 100644
index 0000000..d1dbc43
--- /dev/null
+++ b/src/mips/regexp-macro-assembler-mips.cc
@@ -0,0 +1,478 @@
+// Copyright 2006-2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#if defined(V8_TARGET_ARCH_MIPS)
+
+#include "unicode.h"
+#include "log.h"
+#include "code-stubs.h"
+#include "regexp-stack.h"
+#include "macro-assembler.h"
+#include "regexp-macro-assembler.h"
+#include "mips/regexp-macro-assembler-mips.h"
+
+namespace v8 {
+namespace internal {
+
+#ifndef V8_INTERPRETED_REGEXP
+/*
+ * This assembler uses the following register assignment convention
+ * - t1 : Pointer to current code object (Code*) including heap object tag.
+ * - t2 : Current position in input, as negative offset from end of string.
+ *        Please notice that this is the byte offset, not the character offset!
+ * - t3 : Currently loaded character. Must be loaded using
+ *        LoadCurrentCharacter before using any of the dispatch methods.
+ * - t4 : points to tip of backtrack stack
+ * - t5 : Unused.
+ * - t6 : End of input (points to byte after last character in input).
+ * - fp : Frame pointer. Used to access arguments, local variables and
+ *         RegExp registers.
+ * - sp : points to tip of C stack.
+ *
+ * The remaining registers are free for computations.
+ *
+ * Each call to a public method should retain this convention.
+ * The stack will have the following structure:
+ *       - direct_call        (if 1, direct call from JavaScript code, if 0 call
+ *                             through the runtime system)
+ *       - stack_area_base    (High end of the memory area to use as
+ *                             backtracking stack)
+ *       - int* capture_array (int[num_saved_registers_], for output).
+ *       - stack frame header (16 bytes in size)
+ *       --- sp when called ---
+ *       - link address
+ *       - backup of registers s0..s7
+ *       - end of input       (Address of end of string)
+ *       - start of input     (Address of first character in string)
+ *       - start index        (character index of start)
+ *       --- frame pointer ----
+ *       - void* input_string (location of a handle containing the string)
+ *       - Offset of location before start of input (effectively character
+ *         position -1). Used to initialize capture registers to a non-position.
+ *       - At start (if 1, we are starting at the start of the
+ *         string, otherwise 0)
+ *       - register 0         (Only positions must be stored in the first
+ *       - register 1          num_saved_registers_ registers)
+ *       - ...
+ *       - register num_registers-1
+ *       --- sp ---
+ *
+ * The first num_saved_registers_ registers are initialized to point to
+ * "character -1" in the string (i.e., char_size() bytes before the first
+ * character of the string). The remaining registers start out as garbage.
+ *
+ * The data up to the return address must be placed there by the calling
+ * code, by calling the code entry as cast to a function with the signature:
+ * int (*match)(String* input_string,
+ *              int start_index,
+ *              Address start,
+ *              Address end,
+ *              int* capture_output_array,
+ *              bool at_start,
+ *              byte* stack_area_base,
+ *              bool direct_call)
+ * The call is performed by NativeRegExpMacroAssembler::Execute()
+ * (in regexp-macro-assembler.cc).
+ */
+
+#define __ ACCESS_MASM(masm_)
+
+RegExpMacroAssemblerMIPS::RegExpMacroAssemblerMIPS(
+    Mode mode,
+    int registers_to_save)
+    : masm_(new MacroAssembler(NULL, kRegExpCodeSize)),
+      mode_(mode),
+      num_registers_(registers_to_save),
+      num_saved_registers_(registers_to_save),
+      entry_label_(),
+      start_label_(),
+      success_label_(),
+      backtrack_label_(),
+      exit_label_() {
+  ASSERT_EQ(0, registers_to_save % 2);
+  __ jmp(&entry_label_);   // We'll write the entry code later.
+  __ bind(&start_label_);  // And then continue from here.
+}
+
+
+RegExpMacroAssemblerMIPS::~RegExpMacroAssemblerMIPS() {
+  delete masm_;
+  // Unuse labels in case we throw away the assembler without calling GetCode.
+  entry_label_.Unuse();
+  start_label_.Unuse();
+  success_label_.Unuse();
+  backtrack_label_.Unuse();
+  exit_label_.Unuse();
+  check_preempt_label_.Unuse();
+  stack_overflow_label_.Unuse();
+}
+
+
+int RegExpMacroAssemblerMIPS::stack_limit_slack()  {
+  return RegExpStack::kStackLimitSlack;
+}
+
+
+void RegExpMacroAssemblerMIPS::AdvanceCurrentPosition(int by) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void RegExpMacroAssemblerMIPS::AdvanceRegister(int reg, int by) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void RegExpMacroAssemblerMIPS::Backtrack() {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void RegExpMacroAssemblerMIPS::Bind(Label* label) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void RegExpMacroAssemblerMIPS::CheckCharacter(uint32_t c, Label* on_equal) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void RegExpMacroAssemblerMIPS::CheckCharacterGT(uc16 limit, Label* on_greater) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void RegExpMacroAssemblerMIPS::CheckAtStart(Label* on_at_start) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void RegExpMacroAssemblerMIPS::CheckNotAtStart(Label* on_not_at_start) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void RegExpMacroAssemblerMIPS::CheckCharacterLT(uc16 limit, Label* on_less) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void RegExpMacroAssemblerMIPS::CheckCharacters(Vector<const uc16> str,
+                                              int cp_offset,
+                                              Label* on_failure,
+                                              bool check_end_of_string) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void RegExpMacroAssemblerMIPS::CheckGreedyLoop(Label* on_equal) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void RegExpMacroAssemblerMIPS::CheckNotBackReferenceIgnoreCase(
+    int start_reg,
+    Label* on_no_match) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void RegExpMacroAssemblerMIPS::CheckNotBackReference(
+    int start_reg,
+    Label* on_no_match) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void RegExpMacroAssemblerMIPS::CheckNotRegistersEqual(int reg1,
+                                                      int reg2,
+                                                      Label* on_not_equal) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void RegExpMacroAssemblerMIPS::CheckNotCharacter(uint32_t c,
+                                                Label* on_not_equal) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void RegExpMacroAssemblerMIPS::CheckCharacterAfterAnd(uint32_t c,
+                                                     uint32_t mask,
+                                                     Label* on_equal) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void RegExpMacroAssemblerMIPS::CheckNotCharacterAfterAnd(uint32_t c,
+                                                        uint32_t mask,
+                                                        Label* on_not_equal) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void RegExpMacroAssemblerMIPS::CheckNotCharacterAfterMinusAnd(
+    uc16 c,
+    uc16 minus,
+    uc16 mask,
+    Label* on_not_equal) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+bool RegExpMacroAssemblerMIPS::CheckSpecialCharacterClass(uc16 type,
+                                                         Label* on_no_match) {
+  UNIMPLEMENTED_MIPS();
+  return false;
+}
+
+
+void RegExpMacroAssemblerMIPS::Fail() {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+Handle<Object> RegExpMacroAssemblerMIPS::GetCode(Handle<String> source) {
+  UNIMPLEMENTED_MIPS();
+  return Handle<Object>::null();
+}
+
+
+void RegExpMacroAssemblerMIPS::GoTo(Label* to) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void RegExpMacroAssemblerMIPS::IfRegisterGE(int reg,
+                                           int comparand,
+                                           Label* if_ge) {
+  __ lw(a0, register_location(reg));
+    BranchOrBacktrack(if_ge, ge, a0, Operand(comparand));
+}
+
+
+void RegExpMacroAssemblerMIPS::IfRegisterLT(int reg,
+                                           int comparand,
+                                           Label* if_lt) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void RegExpMacroAssemblerMIPS::IfRegisterEqPos(int reg,
+                                              Label* if_eq) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+RegExpMacroAssembler::IrregexpImplementation
+    RegExpMacroAssemblerMIPS::Implementation() {
+  return kMIPSImplementation;
+}
+
+
+void RegExpMacroAssemblerMIPS::LoadCurrentCharacter(int cp_offset,
+                                                   Label* on_end_of_input,
+                                                   bool check_bounds,
+                                                   int characters) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void RegExpMacroAssemblerMIPS::PopCurrentPosition() {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void RegExpMacroAssemblerMIPS::PopRegister(int register_index) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+
+void RegExpMacroAssemblerMIPS::PushBacktrack(Label* label) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void RegExpMacroAssemblerMIPS::PushCurrentPosition() {
+  Push(current_input_offset());
+}
+
+
+void RegExpMacroAssemblerMIPS::PushRegister(int register_index,
+                                           StackCheckFlag check_stack_limit) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void RegExpMacroAssemblerMIPS::ReadCurrentPositionFromRegister(int reg) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void RegExpMacroAssemblerMIPS::ReadStackPointerFromRegister(int reg) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void RegExpMacroAssemblerMIPS::SetCurrentPositionFromEnd(int by) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void RegExpMacroAssemblerMIPS::SetRegister(int register_index, int to) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void RegExpMacroAssemblerMIPS::Succeed() {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void RegExpMacroAssemblerMIPS::WriteCurrentPositionToRegister(int reg,
+                                                             int cp_offset) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void RegExpMacroAssemblerMIPS::ClearRegisters(int reg_from, int reg_to) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void RegExpMacroAssemblerMIPS::WriteStackPointerToRegister(int reg) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+// Private methods:
+
+void RegExpMacroAssemblerMIPS::CallCheckStackGuardState(Register scratch) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+// Helper function for reading a value out of a stack frame.
+template <typename T>
+static T& frame_entry(Address re_frame, int frame_offset) {
+  return reinterpret_cast<T&>(Memory::int32_at(re_frame + frame_offset));
+}
+
+
+int RegExpMacroAssemblerMIPS::CheckStackGuardState(Address* return_address,
+                                                  Code* re_code,
+                                                  Address re_frame) {
+  UNIMPLEMENTED_MIPS();
+  return 0;
+}
+
+
+MemOperand RegExpMacroAssemblerMIPS::register_location(int register_index) {
+  UNIMPLEMENTED_MIPS();
+  return MemOperand(zero_reg, 0);
+}
+
+
+void RegExpMacroAssemblerMIPS::CheckPosition(int cp_offset,
+                                            Label* on_outside_input) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void RegExpMacroAssemblerMIPS::BranchOrBacktrack(Label* to,
+                                                 Condition condition,
+                                                 Register rs,
+                                                 const Operand& rt) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void RegExpMacroAssemblerMIPS::SafeCall(Label* to, Condition cond, Register rs,
+                                           const Operand& rt) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void RegExpMacroAssemblerMIPS::SafeReturn() {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void RegExpMacroAssemblerMIPS::SafeCallTarget(Label* name) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void RegExpMacroAssemblerMIPS::Push(Register source) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void RegExpMacroAssemblerMIPS::Pop(Register target) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void RegExpMacroAssemblerMIPS::CheckPreemption() {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void RegExpMacroAssemblerMIPS::CheckStackLimit() {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void RegExpMacroAssemblerMIPS::CallCFunctionUsingStub(
+    ExternalReference function,
+    int num_arguments) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void RegExpMacroAssemblerMIPS::LoadCurrentCharacterUnchecked(int cp_offset,
+                                                             int characters) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void RegExpCEntryStub::Generate(MacroAssembler* masm_) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+#undef __
+
+#endif  // V8_INTERPRETED_REGEXP
+
+}}  // namespace v8::internal
+
+#endif  // V8_TARGET_ARCH_MIPS
diff --git a/src/mips/regexp-macro-assembler-mips.h b/src/mips/regexp-macro-assembler-mips.h
new file mode 100644
index 0000000..2f4319f
--- /dev/null
+++ b/src/mips/regexp-macro-assembler-mips.h
@@ -0,0 +1,250 @@
+// Copyright 2006-2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+#ifndef V8_MIPS_REGEXP_MACRO_ASSEMBLER_MIPS_H_
+#define V8_MIPS_REGEXP_MACRO_ASSEMBLER_MIPS_H_
+
+namespace v8 {
+namespace internal {
+
+#ifdef V8_INTERPRETED_REGEXP
+class RegExpMacroAssemblerMIPS: public RegExpMacroAssembler {
+ public:
+  RegExpMacroAssemblerMIPS();
+  virtual ~RegExpMacroAssemblerMIPS();
+};
+#else  // V8_INTERPRETED_REGEXP
+class RegExpMacroAssemblerMIPS: public NativeRegExpMacroAssembler {
+ public:
+  RegExpMacroAssemblerMIPS(Mode mode, int registers_to_save);
+  virtual ~RegExpMacroAssemblerMIPS();
+  virtual int stack_limit_slack();
+  virtual void AdvanceCurrentPosition(int by);
+  virtual void AdvanceRegister(int reg, int by);
+  virtual void Backtrack();
+  virtual void Bind(Label* label);
+  virtual void CheckAtStart(Label* on_at_start);
+  virtual void CheckCharacter(uint32_t c, Label* on_equal);
+  virtual void CheckCharacterAfterAnd(uint32_t c,
+                                      uint32_t mask,
+                                      Label* on_equal);
+  virtual void CheckCharacterGT(uc16 limit, Label* on_greater);
+  virtual void CheckCharacterLT(uc16 limit, Label* on_less);
+  virtual void CheckCharacters(Vector<const uc16> str,
+                               int cp_offset,
+                               Label* on_failure,
+                               bool check_end_of_string);
+  // A "greedy loop" is a loop that is both greedy and with a simple
+  // body. It has a particularly simple implementation.
+  virtual void CheckGreedyLoop(Label* on_tos_equals_current_position);
+  virtual void CheckNotAtStart(Label* on_not_at_start);
+  virtual void CheckNotBackReference(int start_reg, Label* on_no_match);
+  virtual void CheckNotBackReferenceIgnoreCase(int start_reg,
+                                               Label* on_no_match);
+  virtual void CheckNotRegistersEqual(int reg1, int reg2, Label* on_not_equal);
+  virtual void CheckNotCharacter(uint32_t c, Label* on_not_equal);
+  virtual void CheckNotCharacterAfterAnd(uint32_t c,
+                                         uint32_t mask,
+                                         Label* on_not_equal);
+  virtual void CheckNotCharacterAfterMinusAnd(uc16 c,
+                                              uc16 minus,
+                                              uc16 mask,
+                                              Label* on_not_equal);
+  // Checks whether the given offset from the current position is before
+  // the end of the string.
+  virtual void CheckPosition(int cp_offset, Label* on_outside_input);
+  virtual bool CheckSpecialCharacterClass(uc16 type,
+                                          Label* on_no_match);
+  virtual void Fail();
+  virtual Handle<Object> GetCode(Handle<String> source);
+  virtual void GoTo(Label* label);
+  virtual void IfRegisterGE(int reg, int comparand, Label* if_ge);
+  virtual void IfRegisterLT(int reg, int comparand, Label* if_lt);
+  virtual void IfRegisterEqPos(int reg, Label* if_eq);
+  virtual IrregexpImplementation Implementation();
+  virtual void LoadCurrentCharacter(int cp_offset,
+                                    Label* on_end_of_input,
+                                    bool check_bounds = true,
+                                    int characters = 1);
+  virtual void PopCurrentPosition();
+  virtual void PopRegister(int register_index);
+  virtual void PushBacktrack(Label* label);
+  virtual void PushCurrentPosition();
+  virtual void PushRegister(int register_index,
+                            StackCheckFlag check_stack_limit);
+  virtual void ReadCurrentPositionFromRegister(int reg);
+  virtual void ReadStackPointerFromRegister(int reg);
+  virtual void SetCurrentPositionFromEnd(int by);
+  virtual void SetRegister(int register_index, int to);
+  virtual void Succeed();
+  virtual void WriteCurrentPositionToRegister(int reg, int cp_offset);
+  virtual void ClearRegisters(int reg_from, int reg_to);
+  virtual void WriteStackPointerToRegister(int reg);
+
+  // Called from RegExp if the stack-guard is triggered.
+  // If the code object is relocated, the return address is fixed before
+  // returning.
+  static int CheckStackGuardState(Address* return_address,
+                                  Code* re_code,
+                                  Address re_frame);
+ private:
+  // Offsets from frame_pointer() of function parameters and stored registers.
+  static const int kFramePointer = 0;
+
+  // Above the frame pointer - Stored registers and stack passed parameters.
+  // Registers s0 to s7, fp, and ra.
+  static const int kStoredRegisters = kFramePointer;
+  // Return address (stored from link register, read into pc on return).
+  static const int kReturnAddress = kStoredRegisters + 9 * kPointerSize;
+  // Stack frame header.
+  static const int kStackFrameHeader = kReturnAddress + kPointerSize;
+  // Stack parameters placed by caller.
+  static const int kRegisterOutput = kStackFrameHeader + 16;
+  static const int kStackHighEnd = kRegisterOutput + kPointerSize;
+  static const int kDirectCall = kStackHighEnd + kPointerSize;
+  static const int kIsolate = kDirectCall + kPointerSize;
+
+  // Below the frame pointer.
+  // Register parameters stored by setup code.
+  static const int kInputEnd = kFramePointer - kPointerSize;
+  static const int kInputStart = kInputEnd - kPointerSize;
+  static const int kStartIndex = kInputStart - kPointerSize;
+  static const int kInputString = kStartIndex - kPointerSize;
+  // When adding local variables remember to push space for them in
+  // the frame in GetCode.
+  static const int kInputStartMinusOne = kInputString - kPointerSize;
+  static const int kAtStart = kInputStartMinusOne - kPointerSize;
+  // First register address. Following registers are below it on the stack.
+  static const int kRegisterZero = kAtStart - kPointerSize;
+
+  // Initial size of code buffer.
+  static const size_t kRegExpCodeSize = 1024;
+
+  // Load a number of characters at the given offset from the
+  // current position, into the current-character register.
+  void LoadCurrentCharacterUnchecked(int cp_offset, int character_count);
+
+  // Check whether preemption has been requested.
+  void CheckPreemption();
+
+  // Check whether we are exceeding the stack limit on the backtrack stack.
+  void CheckStackLimit();
+
+
+  // Generate a call to CheckStackGuardState.
+  void CallCheckStackGuardState(Register scratch);
+
+  // The ebp-relative location of a regexp register.
+  MemOperand register_location(int register_index);
+
+  // Register holding the current input position as negative offset from
+  // the end of the string.
+  inline Register current_input_offset() { return t2; }
+
+  // The register containing the current character after LoadCurrentCharacter.
+  inline Register current_character() { return t3; }
+
+  // Register holding address of the end of the input string.
+  inline Register end_of_input_address() { return t6; }
+
+  // Register holding the frame address. Local variables, parameters and
+  // regexp registers are addressed relative to this.
+  inline Register frame_pointer() { return fp; }
+
+  // The register containing the backtrack stack top. Provides a meaningful
+  // name to the register.
+  inline Register backtrack_stackpointer() { return t4; }
+
+  // Register holding pointer to the current code object.
+  inline Register code_pointer() { return t1; }
+
+  // Byte size of chars in the string to match (decided by the Mode argument)
+  inline int char_size() { return static_cast<int>(mode_); }
+
+  // Equivalent to a conditional branch to the label, unless the label
+  // is NULL, in which case it is a conditional Backtrack.
+  void BranchOrBacktrack(Label* to,
+                         Condition condition,
+                         Register rs,
+                         const Operand& rt);
+
+  // Call and return internally in the generated code in a way that
+  // is GC-safe (i.e., doesn't leave absolute code addresses on the stack)
+  inline void SafeCall(Label* to,
+                       Condition cond,
+                       Register rs,
+                       const Operand& rt);
+  inline void SafeReturn();
+  inline void SafeCallTarget(Label* name);
+
+  // Pushes the value of a register on the backtrack stack. Decrements the
+  // stack pointer by a word size and stores the register's value there.
+  inline void Push(Register source);
+
+  // Pops a value from the backtrack stack. Reads the word at the stack pointer
+  // and increments it by a word size.
+  inline void Pop(Register target);
+
+  // Calls a C function and cleans up the frame alignment done by
+  // by FrameAlign. The called function *is* allowed to trigger a garbage
+  // collection, but may not take more than four arguments (no arguments
+  // passed on the stack), and the first argument will be a pointer to the
+  // return address.
+  inline void CallCFunctionUsingStub(ExternalReference function,
+                                     int num_arguments);
+
+
+  MacroAssembler* masm_;
+
+  // Which mode to generate code for (ASCII or UC16).
+  Mode mode_;
+
+  // One greater than maximal register index actually used.
+  int num_registers_;
+
+  // Number of registers to output at the end (the saved registers
+  // are always 0..num_saved_registers_-1)
+  int num_saved_registers_;
+
+  // Labels used internally.
+  Label entry_label_;
+  Label start_label_;
+  Label success_label_;
+  Label backtrack_label_;
+  Label exit_label_;
+  Label check_preempt_label_;
+  Label stack_overflow_label_;
+};
+
+#endif  // V8_INTERPRETED_REGEXP
+
+
+}}  // namespace v8::internal
+
+#endif  // V8_MIPS_REGEXP_MACRO_ASSEMBLER_MIPS_H_
+
diff --git a/src/mips/register-allocator-mips-inl.h b/src/mips/register-allocator-mips-inl.h
index a876bee..bbfb31d 100644
--- a/src/mips/register-allocator-mips-inl.h
+++ b/src/mips/register-allocator-mips-inl.h
@@ -125,9 +125,6 @@
 
 void RegisterAllocator::Initialize() {
   Reset();
-  // The non-reserved a1 and ra registers are live on JS function entry.
-  Use(a1);  // JS function.
-  Use(ra);  // Return address.
 }
 
 
diff --git a/src/mips/register-allocator-mips.h b/src/mips/register-allocator-mips.h
index e056fb8..c448923 100644
--- a/src/mips/register-allocator-mips.h
+++ b/src/mips/register-allocator-mips.h
@@ -35,8 +35,9 @@
 
 class RegisterAllocatorConstants : public AllStatic {
  public:
-  static const int kNumRegisters = assembler::mips::kNumRegisters;
-  static const int kInvalidRegister = assembler::mips::kInvalidRegister;
+  // No registers are currently managed by the register allocator on MIPS.
+  static const int kNumRegisters = 0;
+  static const int kInvalidRegister = -1;
 };
 
 
diff --git a/src/mips/simulator-mips.cc b/src/mips/simulator-mips.cc
index 59a5373..50ad7a1 100644
--- a/src/mips/simulator-mips.cc
+++ b/src/mips/simulator-mips.cc
@@ -26,6 +26,8 @@
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
 #include <stdlib.h>
+#include <math.h>
+#include <limits.h>
 #include <cstdarg>
 #include "v8.h"
 
@@ -37,23 +39,25 @@
 #include "mips/constants-mips.h"
 #include "mips/simulator-mips.h"
 
-namespace v8i = v8::internal;
-
-#if !defined(__mips) || defined(USE_SIMULATOR)
 
 // Only build the simulator if not compiling for real MIPS hardware.
-namespace assembler {
-namespace mips {
+#if defined(USE_SIMULATOR)
 
-using ::v8::internal::Object;
-using ::v8::internal::PrintF;
-using ::v8::internal::OS;
-using ::v8::internal::ReadLine;
-using ::v8::internal::DeleteArray;
+namespace v8 {
+namespace internal {
 
 // Utils functions
 bool HaveSameSign(int32_t a, int32_t b) {
-  return ((a ^ b) > 0);
+  return ((a ^ b) >= 0);
+}
+
+
+uint32_t get_fcsr_condition_bit(uint32_t cc) {
+  if (cc == 0) {
+    return 23;
+  } else {
+    return 24 + cc;
+  }
 }
 
 
@@ -63,15 +67,18 @@
 // Library does not provide vsscanf.
 #define SScanF sscanf  // NOLINT
 
-// The Debugger class is used by the simulator while debugging simulated MIPS
+// The MipsDebugger class is used by the simulator while debugging simulated
 // code.
-class Debugger {
+class MipsDebugger {
  public:
-  explicit Debugger(Simulator* sim);
-  ~Debugger();
+  explicit MipsDebugger(Simulator* sim);
+  ~MipsDebugger();
 
   void Stop(Instruction* instr);
   void Debug();
+  // Print all registers with a nice formatting.
+  void PrintAllRegs();
+  void PrintAllRegsIncludingFPU();
 
  private:
   // We set the breakpoint code to 0xfffff to easily recognize it.
@@ -81,6 +88,10 @@
   Simulator* sim_;
 
   int32_t GetRegisterValue(int regnum);
+  int32_t GetFPURegisterValueInt(int regnum);
+  int64_t GetFPURegisterValueLong(int regnum);
+  float GetFPURegisterValueFloat(int regnum);
+  double GetFPURegisterValueDouble(int regnum);
   bool GetValue(const char* desc, int32_t* value);
 
   // Set or delete a breakpoint. Returns true if successful.
@@ -91,18 +102,17 @@
   // execution to skip past breakpoints when run from the debugger.
   void UndoBreakpoints();
   void RedoBreakpoints();
-
-  // Print all registers with a nice formatting.
-  void PrintAllRegs();
 };
 
-Debugger::Debugger(Simulator* sim) {
+MipsDebugger::MipsDebugger(Simulator* sim) {
   sim_ = sim;
 }
 
-Debugger::~Debugger() {
+
+MipsDebugger::~MipsDebugger() {
 }
 
+
 #ifdef GENERATED_CODE_COVERAGE
 static FILE* coverage_log = NULL;
 
@@ -115,7 +125,7 @@
 }
 
 
-void Debugger::Stop(Instruction* instr) {
+void MipsDebugger::Stop(Instruction* instr) {
   UNIMPLEMENTED_MIPS();
   char* str = reinterpret_cast<char*>(instr->InstructionBits());
   if (strlen(str) > 0) {
@@ -125,9 +135,10 @@
     }
     instr->SetInstructionBits(0x0);  // Overwrite with nop.
   }
-  sim_->set_pc(sim_->get_pc() + Instruction::kInstructionSize);
+  sim_->set_pc(sim_->get_pc() + Instruction::kInstrSize);
 }
 
+
 #else  // ndef GENERATED_CODE_COVERAGE
 
 #define UNSUPPORTED() printf("Unsupported instruction.\n");
@@ -135,16 +146,16 @@
 static void InitializeCoverage() {}
 
 
-void Debugger::Stop(Instruction* instr) {
+void MipsDebugger::Stop(Instruction* instr) {
   const char* str = reinterpret_cast<char*>(instr->InstructionBits());
   PrintF("Simulator hit %s\n", str);
-  sim_->set_pc(sim_->get_pc() + Instruction::kInstructionSize);
+  sim_->set_pc(sim_->get_pc() + Instruction::kInstrSize);
   Debug();
 }
 #endif  // GENERATED_CODE_COVERAGE
 
 
-int32_t Debugger::GetRegisterValue(int regnum) {
+int32_t MipsDebugger::GetRegisterValue(int regnum) {
   if (regnum == kNumSimuRegisters) {
     return sim_->get_pc();
   } else {
@@ -153,11 +164,54 @@
 }
 
 
-bool Debugger::GetValue(const char* desc, int32_t* value) {
+int32_t MipsDebugger::GetFPURegisterValueInt(int regnum) {
+  if (regnum == kNumFPURegisters) {
+    return sim_->get_pc();
+  } else {
+    return sim_->get_fpu_register(regnum);
+  }
+}
+
+
+int64_t MipsDebugger::GetFPURegisterValueLong(int regnum) {
+  if (regnum == kNumFPURegisters) {
+    return sim_->get_pc();
+  } else {
+    return sim_->get_fpu_register_long(regnum);
+  }
+}
+
+
+float MipsDebugger::GetFPURegisterValueFloat(int regnum) {
+  if (regnum == kNumFPURegisters) {
+    return sim_->get_pc();
+  } else {
+    return sim_->get_fpu_register_float(regnum);
+  }
+}
+
+
+double MipsDebugger::GetFPURegisterValueDouble(int regnum) {
+  if (regnum == kNumFPURegisters) {
+    return sim_->get_pc();
+  } else {
+    return sim_->get_fpu_register_double(regnum);
+  }
+}
+
+
+bool MipsDebugger::GetValue(const char* desc, int32_t* value) {
   int regnum = Registers::Number(desc);
+  int fpuregnum = FPURegisters::Number(desc);
+
   if (regnum != kInvalidRegister) {
     *value = GetRegisterValue(regnum);
     return true;
+  } else if (fpuregnum != kInvalidFPURegister) {
+    *value = GetFPURegisterValueInt(fpuregnum);
+    return true;
+  } else if (strncmp(desc, "0x", 2) == 0) {
+    return SScanF(desc, "%x", reinterpret_cast<uint32_t*>(value)) == 1;
   } else {
     return SScanF(desc, "%i", value) == 1;
   }
@@ -165,7 +219,7 @@
 }
 
 
-bool Debugger::SetBreakpoint(Instruction* breakpc) {
+bool MipsDebugger::SetBreakpoint(Instruction* breakpc) {
   // Check if a breakpoint can be set. If not return without any side-effects.
   if (sim_->break_pc_ != NULL) {
     return false;
@@ -180,7 +234,7 @@
 }
 
 
-bool Debugger::DeleteBreakpoint(Instruction* breakpc) {
+bool MipsDebugger::DeleteBreakpoint(Instruction* breakpc) {
   if (sim_->break_pc_ != NULL) {
     sim_->break_pc_->SetInstructionBits(sim_->break_instr_);
   }
@@ -191,20 +245,21 @@
 }
 
 
-void Debugger::UndoBreakpoints() {
+void MipsDebugger::UndoBreakpoints() {
   if (sim_->break_pc_ != NULL) {
     sim_->break_pc_->SetInstructionBits(sim_->break_instr_);
   }
 }
 
 
-void Debugger::RedoBreakpoints() {
+void MipsDebugger::RedoBreakpoints() {
   if (sim_->break_pc_ != NULL) {
     sim_->break_pc_->SetInstructionBits(kBreakpointInstr);
   }
 }
 
-void Debugger::PrintAllRegs() {
+
+void MipsDebugger::PrintAllRegs() {
 #define REG_INFO(n) Registers::Name(n), GetRegisterValue(n), GetRegisterValue(n)
 
   PrintF("\n");
@@ -237,10 +292,45 @@
   // pc
   PrintF("%3s: 0x%08x %10d\t%3s: 0x%08x %10d\n",
          REG_INFO(31), REG_INFO(34));
+
 #undef REG_INFO
+#undef FPU_REG_INFO
 }
 
-void Debugger::Debug() {
+
+void MipsDebugger::PrintAllRegsIncludingFPU() {
+#define FPU_REG_INFO(n) FPURegisters::Name(n), FPURegisters::Name(n+1), \
+        GetFPURegisterValueInt(n+1), \
+        GetFPURegisterValueInt(n), \
+                        GetFPURegisterValueDouble(n)
+
+  PrintAllRegs();
+
+  PrintF("\n\n");
+  // f0, f1, f2, ... f31
+  PrintF("%3s,%3s: 0x%08x%08x %16.4e\n", FPU_REG_INFO(0) );
+  PrintF("%3s,%3s: 0x%08x%08x %16.4e\n", FPU_REG_INFO(2) );
+  PrintF("%3s,%3s: 0x%08x%08x %16.4e\n", FPU_REG_INFO(4) );
+  PrintF("%3s,%3s: 0x%08x%08x %16.4e\n", FPU_REG_INFO(6) );
+  PrintF("%3s,%3s: 0x%08x%08x %16.4e\n", FPU_REG_INFO(8) );
+  PrintF("%3s,%3s: 0x%08x%08x %16.4e\n", FPU_REG_INFO(10));
+  PrintF("%3s,%3s: 0x%08x%08x %16.4e\n", FPU_REG_INFO(12));
+  PrintF("%3s,%3s: 0x%08x%08x %16.4e\n", FPU_REG_INFO(14));
+  PrintF("%3s,%3s: 0x%08x%08x %16.4e\n", FPU_REG_INFO(16));
+  PrintF("%3s,%3s: 0x%08x%08x %16.4e\n", FPU_REG_INFO(18));
+  PrintF("%3s,%3s: 0x%08x%08x %16.4e\n", FPU_REG_INFO(20));
+  PrintF("%3s,%3s: 0x%08x%08x %16.4e\n", FPU_REG_INFO(22));
+  PrintF("%3s,%3s: 0x%08x%08x %16.4e\n", FPU_REG_INFO(24));
+  PrintF("%3s,%3s: 0x%08x%08x %16.4e\n", FPU_REG_INFO(26));
+  PrintF("%3s,%3s: 0x%08x%08x %16.4e\n", FPU_REG_INFO(28));
+  PrintF("%3s,%3s: 0x%08x%08x %16.4e\n", FPU_REG_INFO(30));
+
+#undef REG_INFO
+#undef FPU_REG_INFO
+}
+
+
+void MipsDebugger::Debug() {
   intptr_t last_pc = -1;
   bool done = false;
 
@@ -253,6 +343,7 @@
   char cmd[COMMAND_SIZE + 1];
   char arg1[ARG_SIZE + 1];
   char arg2[ARG_SIZE + 1];
+  char* argv[3] = { cmd, arg1, arg2 };
 
   // make sure to have a proper terminating character if reaching the limit
   cmd[COMMAND_SIZE] = 0;
@@ -280,19 +371,21 @@
     } else {
       // Use sscanf to parse the individual parts of the command line. At the
       // moment no command expects more than two parameters.
-      int args = SScanF(line,
+      int argc = SScanF(line,
                         "%" XSTR(COMMAND_SIZE) "s "
                         "%" XSTR(ARG_SIZE) "s "
                         "%" XSTR(ARG_SIZE) "s",
                         cmd, arg1, arg2);
       if ((strcmp(cmd, "si") == 0) || (strcmp(cmd, "stepi") == 0)) {
-        if (!(reinterpret_cast<Instruction*>(sim_->get_pc())->IsTrap())) {
+        Instruction* instr = reinterpret_cast<Instruction*>(sim_->get_pc());
+        if (!(instr->IsTrap()) ||
+            instr->InstructionBits() == rtCallRedirInstr) {
           sim_->InstructionDecode(
-                                reinterpret_cast<Instruction*>(sim_->get_pc()));
+              reinterpret_cast<Instruction*>(sim_->get_pc()));
         } else {
           // Allow si to jump over generated breakpoints.
           PrintF("/!\\ Jumping over generated breakpoint.\n");
-          sim_->set_pc(sim_->get_pc() + Instruction::kInstructionSize);
+          sim_->set_pc(sim_->get_pc() + Instruction::kInstrSize);
         }
       } else if ((strcmp(cmd, "c") == 0) || (strcmp(cmd, "cont") == 0)) {
         // Execute the one instruction we broke at with breakpoints disabled.
@@ -300,23 +393,65 @@
         // Leave the debugger shell.
         done = true;
       } else if ((strcmp(cmd, "p") == 0) || (strcmp(cmd, "print") == 0)) {
-        if (args == 2) {
+        if (argc == 2) {
           int32_t value;
+          float fvalue;
           if (strcmp(arg1, "all") == 0) {
             PrintAllRegs();
+          } else if (strcmp(arg1, "allf") == 0) {
+            PrintAllRegsIncludingFPU();
           } else {
-            if (GetValue(arg1, &value)) {
+            int regnum = Registers::Number(arg1);
+            int fpuregnum = FPURegisters::Number(arg1);
+
+            if (regnum != kInvalidRegister) {
+              value = GetRegisterValue(regnum);
               PrintF("%s: 0x%08x %d \n", arg1, value, value);
+            } else if (fpuregnum != kInvalidFPURegister) {
+              if (fpuregnum % 2 == 1) {
+                value = GetFPURegisterValueInt(fpuregnum);
+                fvalue = GetFPURegisterValueFloat(fpuregnum);
+                PrintF("%s: 0x%08x %11.4e\n", arg1, value, fvalue);
+              } else {
+                double dfvalue;
+                int32_t lvalue1 = GetFPURegisterValueInt(fpuregnum);
+                int32_t lvalue2 = GetFPURegisterValueInt(fpuregnum + 1);
+                dfvalue = GetFPURegisterValueDouble(fpuregnum);
+                PrintF("%3s,%3s: 0x%08x%08x %16.4e\n",
+                       FPURegisters::Name(fpuregnum+1),
+                       FPURegisters::Name(fpuregnum),
+                       lvalue1,
+                       lvalue2,
+                       dfvalue);
+              }
             } else {
               PrintF("%s unrecognized\n", arg1);
             }
           }
         } else {
-          PrintF("print <register>\n");
+          if (argc == 3) {
+            if (strcmp(arg2, "single") == 0) {
+              int32_t value;
+              float fvalue;
+              int fpuregnum = FPURegisters::Number(arg1);
+
+              if (fpuregnum != kInvalidFPURegister) {
+                value = GetFPURegisterValueInt(fpuregnum);
+                fvalue = GetFPURegisterValueFloat(fpuregnum);
+                PrintF("%s: 0x%08x %11.4e\n", arg1, value, fvalue);
+              } else {
+                PrintF("%s unrecognized\n", arg1);
+              }
+            } else {
+              PrintF("print <fpu register> single\n");
+            }
+          } else {
+            PrintF("print <register> or print <fpu register> single\n");
+          }
         }
       } else if ((strcmp(cmd, "po") == 0)
                  || (strcmp(cmd, "printobject") == 0)) {
-        if (args == 2) {
+        if (argc == 2) {
           int32_t value;
           if (GetValue(arg1, &value)) {
             Object* obj = reinterpret_cast<Object*>(value);
@@ -333,6 +468,39 @@
         } else {
           PrintF("printobject <value>\n");
         }
+      } else if (strcmp(cmd, "stack") == 0 || strcmp(cmd, "mem") == 0) {
+        int32_t* cur = NULL;
+        int32_t* end = NULL;
+        int next_arg = 1;
+
+        if (strcmp(cmd, "stack") == 0) {
+          cur = reinterpret_cast<int32_t*>(sim_->get_register(Simulator::sp));
+        } else {  // "mem"
+          int32_t value;
+          if (!GetValue(arg1, &value)) {
+            PrintF("%s unrecognized\n", arg1);
+            continue;
+          }
+          cur = reinterpret_cast<int32_t*>(value);
+          next_arg++;
+        }
+
+        int32_t words;
+        if (argc == next_arg) {
+          words = 10;
+        } else if (argc == next_arg + 1) {
+          if (!GetValue(argv[next_arg], &words)) {
+            words = 10;
+          }
+        }
+        end = cur + words;
+
+        while (cur < end) {
+          PrintF("  0x%08x:  0x%08x %10d\n",
+                 reinterpret_cast<intptr_t>(cur), *cur, *cur);
+          cur++;
+        }
+
       } else if ((strcmp(cmd, "disasm") == 0) || (strcmp(cmd, "dpc") == 0)) {
         disasm::NameConverter converter;
         disasm::Disassembler dasm(converter);
@@ -342,36 +510,37 @@
         byte_* cur = NULL;
         byte_* end = NULL;
 
-        if (args == 1) {
+        if (argc == 1) {
           cur = reinterpret_cast<byte_*>(sim_->get_pc());
-          end = cur + (10 * Instruction::kInstructionSize);
-        } else if (args == 2) {
+          end = cur + (10 * Instruction::kInstrSize);
+        } else if (argc == 2) {
           int32_t value;
           if (GetValue(arg1, &value)) {
             cur = reinterpret_cast<byte_*>(value);
             // no length parameter passed, assume 10 instructions
-            end = cur + (10 * Instruction::kInstructionSize);
+            end = cur + (10 * Instruction::kInstrSize);
           }
         } else {
           int32_t value1;
           int32_t value2;
           if (GetValue(arg1, &value1) && GetValue(arg2, &value2)) {
             cur = reinterpret_cast<byte_*>(value1);
-            end = cur + (value2 * Instruction::kInstructionSize);
+            end = cur + (value2 * Instruction::kInstrSize);
           }
         }
 
         while (cur < end) {
           dasm.InstructionDecode(buffer, cur);
-          PrintF("  0x%08x  %s\n", cur, buffer.start());
-          cur += Instruction::kInstructionSize;
+          PrintF("  0x%08x  %s\n",
+              reinterpret_cast<intptr_t>(cur), buffer.start());
+          cur += Instruction::kInstrSize;
         }
       } else if (strcmp(cmd, "gdb") == 0) {
         PrintF("relinquishing control to gdb\n");
         v8::internal::OS::DebugBreak();
         PrintF("regaining control from gdb\n");
       } else if (strcmp(cmd, "break") == 0) {
-        if (args == 2) {
+        if (argc == 2) {
           int32_t value;
           if (GetValue(arg1, &value)) {
             if (!SetBreakpoint(reinterpret_cast<Instruction*>(value))) {
@@ -404,29 +573,30 @@
         byte_* cur = NULL;
         byte_* end = NULL;
 
-        if (args == 1) {
+        if (argc == 1) {
           cur = reinterpret_cast<byte_*>(sim_->get_pc());
-          end = cur + (10 * Instruction::kInstructionSize);
-        } else if (args == 2) {
+          end = cur + (10 * Instruction::kInstrSize);
+        } else if (argc == 2) {
           int32_t value;
           if (GetValue(arg1, &value)) {
             cur = reinterpret_cast<byte_*>(value);
             // no length parameter passed, assume 10 instructions
-            end = cur + (10 * Instruction::kInstructionSize);
+            end = cur + (10 * Instruction::kInstrSize);
           }
         } else {
           int32_t value1;
           int32_t value2;
           if (GetValue(arg1, &value1) && GetValue(arg2, &value2)) {
             cur = reinterpret_cast<byte_*>(value1);
-            end = cur + (value2 * Instruction::kInstructionSize);
+            end = cur + (value2 * Instruction::kInstrSize);
           }
         }
 
         while (cur < end) {
           dasm.InstructionDecode(buffer, cur);
-          PrintF("  0x%08x  %s\n", cur, buffer.start());
-          cur += Instruction::kInstructionSize;
+          PrintF("  0x%08x  %s\n",
+                 reinterpret_cast<intptr_t>(cur), buffer.start());
+          cur += Instruction::kInstrSize;
         }
       } else if ((strcmp(cmd, "h") == 0) || (strcmp(cmd, "help") == 0)) {
         PrintF("cont\n");
@@ -438,6 +608,10 @@
         PrintF("  use register name 'all' to print all registers\n");
         PrintF("printobject <register>\n");
         PrintF("  print an object from a register (alias 'po')\n");
+        PrintF("stack [<words>]\n");
+        PrintF("  dump stack content, default dump 10 words)\n");
+        PrintF("mem <address> [<words>]\n");
+        PrintF("  dump memory content, default dump 10 words)\n");
         PrintF("flags\n");
         PrintF("  print flags\n");
         PrintF("disasm [<instructions>]\n");
@@ -471,29 +645,120 @@
 }
 
 
-// Create one simulator per thread and keep it in thread local storage.
-static v8::internal::Thread::LocalStorageKey simulator_key;
+static bool ICacheMatch(void* one, void* two) {
+  ASSERT((reinterpret_cast<intptr_t>(one) & CachePage::kPageMask) == 0);
+  ASSERT((reinterpret_cast<intptr_t>(two) & CachePage::kPageMask) == 0);
+  return one == two;
+}
 
 
-bool Simulator::initialized_ = false;
+static uint32_t ICacheHash(void* key) {
+  return static_cast<uint32_t>(reinterpret_cast<uintptr_t>(key)) >> 2;
+}
+
+
+static bool AllOnOnePage(uintptr_t start, int size) {
+  intptr_t start_page = (start & ~CachePage::kPageMask);
+  intptr_t end_page = ((start + size) & ~CachePage::kPageMask);
+  return start_page == end_page;
+}
+
+
+void Simulator::FlushICache(v8::internal::HashMap* i_cache,
+                            void* start_addr,
+                            size_t size) {
+  intptr_t start = reinterpret_cast<intptr_t>(start_addr);
+  int intra_line = (start & CachePage::kLineMask);
+  start -= intra_line;
+  size += intra_line;
+  size = ((size - 1) | CachePage::kLineMask) + 1;
+  int offset = (start & CachePage::kPageMask);
+  while (!AllOnOnePage(start, size - 1)) {
+    int bytes_to_flush = CachePage::kPageSize - offset;
+    FlushOnePage(i_cache, start, bytes_to_flush);
+    start += bytes_to_flush;
+    size -= bytes_to_flush;
+    ASSERT_EQ(0, start & CachePage::kPageMask);
+    offset = 0;
+  }
+  if (size != 0) {
+    FlushOnePage(i_cache, start, size);
+  }
+}
+
+
+CachePage* Simulator::GetCachePage(v8::internal::HashMap* i_cache, void* page) {
+  v8::internal::HashMap::Entry* entry = i_cache->Lookup(page,
+                                                         ICacheHash(page),
+                                                         true);
+  if (entry->value == NULL) {
+    CachePage* new_page = new CachePage();
+    entry->value = new_page;
+  }
+  return reinterpret_cast<CachePage*>(entry->value);
+}
+
+
+// Flush from start up to and not including start + size.
+void Simulator::FlushOnePage(v8::internal::HashMap* i_cache,
+                             intptr_t start,
+                             int size) {
+  ASSERT(size <= CachePage::kPageSize);
+  ASSERT(AllOnOnePage(start, size - 1));
+  ASSERT((start & CachePage::kLineMask) == 0);
+  ASSERT((size & CachePage::kLineMask) == 0);
+  void* page = reinterpret_cast<void*>(start & (~CachePage::kPageMask));
+  int offset = (start & CachePage::kPageMask);
+  CachePage* cache_page = GetCachePage(i_cache, page);
+  char* valid_bytemap = cache_page->ValidityByte(offset);
+  memset(valid_bytemap, CachePage::LINE_INVALID, size >> CachePage::kLineShift);
+}
+
+
+void Simulator::CheckICache(v8::internal::HashMap* i_cache,
+                            Instruction* instr) {
+  intptr_t address = reinterpret_cast<intptr_t>(instr);
+  void* page = reinterpret_cast<void*>(address & (~CachePage::kPageMask));
+  void* line = reinterpret_cast<void*>(address & (~CachePage::kLineMask));
+  int offset = (address & CachePage::kPageMask);
+  CachePage* cache_page = GetCachePage(i_cache, page);
+  char* cache_valid_byte = cache_page->ValidityByte(offset);
+  bool cache_hit = (*cache_valid_byte == CachePage::LINE_VALID);
+  char* cached_line = cache_page->CachedData(offset & ~CachePage::kLineMask);
+  if (cache_hit) {
+    // Check that the data in memory matches the contents of the I-cache.
+    CHECK(memcmp(reinterpret_cast<void*>(instr),
+                 cache_page->CachedData(offset),
+                 Instruction::kInstrSize) == 0);
+  } else {
+    // Cache miss.  Load memory into the cache.
+    memcpy(cached_line, line, CachePage::kLineLength);
+    *cache_valid_byte = CachePage::LINE_VALID;
+  }
+}
 
 
 void Simulator::Initialize() {
-  if (initialized_) return;
-  simulator_key = v8::internal::Thread::CreateThreadLocalKey();
-  initialized_ = true;
+  if (Isolate::Current()->simulator_initialized()) return;
+  Isolate::Current()->set_simulator_initialized(true);
   ::v8::internal::ExternalReference::set_redirector(&RedirectExternalReference);
 }
 
 
-Simulator::Simulator() {
+Simulator::Simulator() : isolate_(Isolate::Current()) {
+  i_cache_ = isolate_->simulator_i_cache();
+  if (i_cache_ == NULL) {
+    i_cache_ = new v8::internal::HashMap(&ICacheMatch);
+    isolate_->set_simulator_i_cache(i_cache_);
+  }
   Initialize();
   // Setup simulator support first. Some of this information is needed to
   // setup the architecture state.
-  size_t stack_size = 1 * 1024*1024;  // allocate 1MB for stack
-  stack_ = reinterpret_cast<char*>(malloc(stack_size));
+  stack_size_ = 1 * 1024*1024;  // allocate 1MB for stack
+  stack_ = reinterpret_cast<char*>(malloc(stack_size_));
   pc_modified_ = false;
   icount_ = 0;
+  break_count_ = 0;
   break_pc_ = NULL;
   break_instr_ = 0;
 
@@ -502,16 +767,23 @@
   for (int i = 0; i < kNumSimuRegisters; i++) {
     registers_[i] = 0;
   }
+  for (int i = 0; i < kNumFPURegisters; i++) {
+    FPUregisters_[i] = 0;
+  }
+  FCSR_ = 0;
 
   // The sp is initialized to point to the bottom (high address) of the
   // allocated stack area. To be safe in potential stack underflows we leave
   // some buffer below.
-  registers_[sp] = reinterpret_cast<int32_t>(stack_) + stack_size - 64;
+  registers_[sp] = reinterpret_cast<int32_t>(stack_) + stack_size_ - 64;
   // The ra and pc are initialized to a known bad value that will cause an
   // access violation if the simulator ever tries to execute it.
   registers_[pc] = bad_ra;
   registers_[ra] = bad_ra;
   InitializeCoverage();
+  for (int i = 0; i < kNumExceptions; i++) {
+    exceptions[i] = 0;
+  }
 }
 
 
@@ -524,12 +796,18 @@
 // offset from the swi instruction so the simulator knows what to call.
 class Redirection {
  public:
-  Redirection(void* external_function, bool fp_return)
+  Redirection(void* external_function, ExternalReference::Type type)
       : external_function_(external_function),
         swi_instruction_(rtCallRedirInstr),
-        fp_return_(fp_return),
-        next_(list_) {
-    list_ = this;
+        type_(type),
+        next_(NULL) {
+    Isolate* isolate = Isolate::Current();
+    next_ = isolate->simulator_redirection();
+    Simulator::current(isolate)->
+        FlushICache(isolate->simulator_i_cache(),
+                    reinterpret_cast<void*>(&swi_instruction_),
+                    Instruction::kInstrSize);
+    isolate->set_simulator_redirection(this);
   }
 
   void* address_of_swi_instruction() {
@@ -537,14 +815,16 @@
   }
 
   void* external_function() { return external_function_; }
-  bool fp_return() { return fp_return_; }
+  ExternalReference::Type type() { return type_; }
 
-  static Redirection* Get(void* external_function, bool fp_return) {
-    Redirection* current;
-    for (current = list_; current != NULL; current = current->next_) {
+  static Redirection* Get(void* external_function,
+                          ExternalReference::Type type) {
+    Isolate* isolate = Isolate::Current();
+    Redirection* current = isolate->simulator_redirection();
+    for (; current != NULL; current = current->next_) {
       if (current->external_function_ == external_function) return current;
     }
-    return new Redirection(external_function, fp_return);
+    return new Redirection(external_function, type);
   }
 
   static Redirection* FromSwiInstruction(Instruction* swi_instruction) {
@@ -557,31 +837,33 @@
  private:
   void* external_function_;
   uint32_t swi_instruction_;
-  bool fp_return_;
+  ExternalReference::Type type_;
   Redirection* next_;
-  static Redirection* list_;
 };
 
 
-Redirection* Redirection::list_ = NULL;
-
-
 void* Simulator::RedirectExternalReference(void* external_function,
-                                           bool fp_return) {
-  Redirection* redirection = Redirection::Get(external_function, fp_return);
+                                           ExternalReference::Type type) {
+  Redirection* redirection = Redirection::Get(external_function, type);
   return redirection->address_of_swi_instruction();
 }
 
 
 // Get the active Simulator for the current thread.
-Simulator* Simulator::current() {
-  Initialize();
-  Simulator* sim = reinterpret_cast<Simulator*>(
-      v8::internal::Thread::GetThreadLocal(simulator_key));
+Simulator* Simulator::current(Isolate* isolate) {
+  v8::internal::Isolate::PerIsolateThreadData* isolate_data =
+      Isolate::CurrentPerIsolateThreadData();
+  if (isolate_data == NULL) {
+    Isolate::EnterDefaultIsolate();
+    isolate_data = Isolate::CurrentPerIsolateThreadData();
+  }
+  ASSERT(isolate_data != NULL);
+
+  Simulator* sim = isolate_data->simulator();
   if (sim == NULL) {
-    // TODO(146): delete the simulator object when a thread goes away.
+    // TODO(146): delete the simulator object when a thread/isolate goes away.
     sim = new Simulator();
-    v8::internal::Thread::SetThreadLocal(simulator_key, sim);
+    isolate_data->set_simulator(sim);
   }
   return sim;
 }
@@ -599,14 +881,22 @@
   registers_[reg] = (reg == 0) ? 0 : value;
 }
 
+
 void Simulator::set_fpu_register(int fpureg, int32_t value) {
   ASSERT((fpureg >= 0) && (fpureg < kNumFPURegisters));
   FPUregisters_[fpureg] = value;
 }
 
+
+void Simulator::set_fpu_register_float(int fpureg, float value) {
+  ASSERT((fpureg >= 0) && (fpureg < kNumFPURegisters));
+  *BitCast<float*>(&FPUregisters_[fpureg]) = value;
+}
+
+
 void Simulator::set_fpu_register_double(int fpureg, double value) {
   ASSERT((fpureg >= 0) && (fpureg < kNumFPURegisters) && ((fpureg % 2) == 0));
-  *v8i::BitCast<double*>(&FPUregisters_[fpureg]) = value;
+  *BitCast<double*>(&FPUregisters_[fpureg]) = value;
 }
 
 
@@ -620,22 +910,75 @@
     return registers_[reg] + ((reg == pc) ? Instruction::kPCReadOffset : 0);
 }
 
+
 int32_t Simulator::get_fpu_register(int fpureg) const {
   ASSERT((fpureg >= 0) && (fpureg < kNumFPURegisters));
   return FPUregisters_[fpureg];
 }
 
+
+int64_t Simulator::get_fpu_register_long(int fpureg) const {
+  ASSERT((fpureg >= 0) && (fpureg < kNumFPURegisters) && ((fpureg % 2) == 0));
+  return *BitCast<int64_t*>(
+      const_cast<int32_t*>(&FPUregisters_[fpureg]));
+}
+
+
+float Simulator::get_fpu_register_float(int fpureg) const {
+  ASSERT((fpureg >= 0) && (fpureg < kNumFPURegisters));
+  return *BitCast<float*>(
+      const_cast<int32_t*>(&FPUregisters_[fpureg]));
+}
+
+
 double Simulator::get_fpu_register_double(int fpureg) const {
   ASSERT((fpureg >= 0) && (fpureg < kNumFPURegisters) && ((fpureg % 2) == 0));
-  return *v8i::BitCast<double*>(const_cast<int32_t*>(&FPUregisters_[fpureg]));
+  return *BitCast<double*>(const_cast<int32_t*>(&FPUregisters_[fpureg]));
 }
 
+
+// Helper functions for setting and testing the FCSR register's bits.
+void Simulator::set_fcsr_bit(uint32_t cc, bool value) {
+  if (value) {
+    FCSR_ |= (1 << cc);
+  } else {
+    FCSR_ &= ~(1 << cc);
+  }
+}
+
+
+bool Simulator::test_fcsr_bit(uint32_t cc) {
+  return FCSR_ & (1 << cc);
+}
+
+
+// Sets the rounding error codes in FCSR based on the result of the rounding.
+// Returns true if the operation was invalid.
+bool Simulator::set_fcsr_round_error(double original, double rounded) {
+  if (!isfinite(original) ||
+      rounded > LONG_MAX ||
+      rounded < LONG_MIN) {
+    set_fcsr_bit(6, true);  // Invalid operation.
+    return true;
+  } else if (original != static_cast<double>(rounded)) {
+    set_fcsr_bit(2, true);  // Inexact.
+  }
+  return false;
+}
+
+
 // Raw access to the PC register.
 void Simulator::set_pc(int32_t value) {
   pc_modified_ = true;
   registers_[pc] = value;
 }
 
+
+bool Simulator::has_bad_pc() const {
+  return ((registers_[pc] == bad_ra) || (registers_[pc] == end_sim_pc));
+}
+
+
 // Raw access to the PC register without the special adjustment when reading.
 int32_t Simulator::get_pc() const {
   return registers_[pc];
@@ -651,24 +994,38 @@
 // get the correct MIPS-like behaviour on unaligned accesses.
 
 int Simulator::ReadW(int32_t addr, Instruction* instr) {
-  if ((addr & v8i::kPointerAlignmentMask) == 0) {
+  if (addr >=0 && addr < 0x400) {
+    // this has to be a NULL-dereference
+    MipsDebugger dbg(this);
+    dbg.Debug();
+  }
+  if ((addr & kPointerAlignmentMask) == 0) {
     intptr_t* ptr = reinterpret_cast<intptr_t*>(addr);
     return *ptr;
   }
-  PrintF("Unaligned read at 0x%08x, pc=%p\n", addr, instr);
-  OS::Abort();
+  PrintF("Unaligned read at 0x%08x, pc=%p\n", addr,
+      reinterpret_cast<void*>(instr));
+  MipsDebugger dbg(this);
+  dbg.Debug();
   return 0;
 }
 
 
 void Simulator::WriteW(int32_t addr, int value, Instruction* instr) {
-  if ((addr & v8i::kPointerAlignmentMask) == 0) {
+  if (addr >= 0 && addr < 0x400) {
+    // this has to be a NULL-dereference
+    MipsDebugger dbg(this);
+    dbg.Debug();
+  }
+  if ((addr & kPointerAlignmentMask) == 0) {
     intptr_t* ptr = reinterpret_cast<intptr_t*>(addr);
     *ptr = value;
     return;
   }
-  PrintF("Unaligned write at 0x%08x, pc=%p\n", addr, instr);
-  OS::Abort();
+  PrintF("Unaligned write at 0x%08x, pc=%p\n", addr,
+      reinterpret_cast<void*>(instr));
+  MipsDebugger dbg(this);
+  dbg.Debug();
 }
 
 
@@ -677,7 +1034,8 @@
     double* ptr = reinterpret_cast<double*>(addr);
     return *ptr;
   }
-  PrintF("Unaligned read at 0x%08x, pc=%p\n", addr, instr);
+  PrintF("Unaligned (double) read at 0x%08x, pc=%p\n", addr,
+      reinterpret_cast<void*>(instr));
   OS::Abort();
   return 0;
 }
@@ -689,7 +1047,8 @@
     *ptr = value;
     return;
   }
-  PrintF("Unaligned write at 0x%08x, pc=%p\n", addr, instr);
+  PrintF("Unaligned (double) write at 0x%08x, pc=%p\n", addr,
+      reinterpret_cast<void*>(instr));
   OS::Abort();
 }
 
@@ -699,7 +1058,8 @@
     uint16_t* ptr = reinterpret_cast<uint16_t*>(addr);
     return *ptr;
   }
-  PrintF("Unaligned unsigned halfword read at 0x%08x, pc=%p\n", addr, instr);
+  PrintF("Unaligned unsigned halfword read at 0x%08x, pc=%p\n", addr,
+      reinterpret_cast<void*>(instr));
   OS::Abort();
   return 0;
 }
@@ -710,7 +1070,8 @@
     int16_t* ptr = reinterpret_cast<int16_t*>(addr);
     return *ptr;
   }
-  PrintF("Unaligned signed halfword read at 0x%08x, pc=%p\n", addr, instr);
+  PrintF("Unaligned signed halfword read at 0x%08x, pc=%p\n", addr,
+      reinterpret_cast<void*>(instr));
   OS::Abort();
   return 0;
 }
@@ -722,7 +1083,8 @@
     *ptr = value;
     return;
   }
-  PrintF("Unaligned unsigned halfword write at 0x%08x, pc=%p\n", addr, instr);
+  PrintF("Unaligned unsigned halfword write at 0x%08x, pc=%p\n", addr,
+      reinterpret_cast<void*>(instr));
   OS::Abort();
 }
 
@@ -733,7 +1095,8 @@
     *ptr = value;
     return;
   }
-  PrintF("Unaligned halfword write at 0x%08x, pc=%p\n", addr, instr);
+  PrintF("Unaligned halfword write at 0x%08x, pc=%p\n", addr,
+      reinterpret_cast<void*>(instr));
   OS::Abort();
 }
 
@@ -746,7 +1109,7 @@
 
 int32_t Simulator::ReadB(int32_t addr) {
   int8_t* ptr = reinterpret_cast<int8_t*>(addr);
-  return ((*ptr << 24) >> 24) & 0xff;
+  return *ptr;
 }
 
 
@@ -773,7 +1136,7 @@
 // Unsupported instructions use Format to print an error and stop execution.
 void Simulator::Format(Instruction* instr, const char* format) {
   PrintF("Simulator found unsupported instruction:\n 0x%08x: %s\n",
-         instr, format);
+         reinterpret_cast<intptr_t>(instr), format);
   UNIMPLEMENTED_MIPS();
 }
 
@@ -782,75 +1145,140 @@
 // Note: To be able to return two values from some calls the code in runtime.cc
 // uses the ObjectPair which is essentially two 32-bit values stuffed into a
 // 64-bit value. With the code below we assume that all runtime calls return
-// 64 bits of result. If they don't, the r1 result register contains a bogus
+// 64 bits of result. If they don't, the v1 result register contains a bogus
 // value, which is fine because it is caller-saved.
 typedef int64_t (*SimulatorRuntimeCall)(int32_t arg0,
                                         int32_t arg1,
                                         int32_t arg2,
-                                        int32_t arg3);
-typedef double (*SimulatorRuntimeFPCall)(double fparg0,
-                                         double fparg1);
-
+                                        int32_t arg3,
+                                        int32_t arg4,
+                                        int32_t arg5);
+typedef double (*SimulatorRuntimeFPCall)(int32_t arg0,
+                                         int32_t arg1,
+                                         int32_t arg2,
+                                         int32_t arg3);
 
 // Software interrupt instructions are used by the simulator to call into the
-// C-based V8 runtime.
+// C-based V8 runtime. They are also used for debugging with simulator.
 void Simulator::SoftwareInterrupt(Instruction* instr) {
+  // There are several instructions that could get us here,
+  // the break_ instruction, or several variants of traps. All
+  // Are "SPECIAL" class opcode, and are distinuished by function.
+  int32_t func = instr->FunctionFieldRaw();
+  int32_t code = (func == BREAK) ? instr->Bits(25, 6) : -1;
+
   // We first check if we met a call_rt_redirected.
   if (instr->InstructionBits() == rtCallRedirInstr) {
+    // Check if stack is aligned. Error if not aligned is reported below to
+    // include information on the function called.
+    bool stack_aligned =
+        (get_register(sp)
+         & (::v8::internal::FLAG_sim_stack_alignment - 1)) == 0;
     Redirection* redirection = Redirection::FromSwiInstruction(instr);
     int32_t arg0 = get_register(a0);
     int32_t arg1 = get_register(a1);
     int32_t arg2 = get_register(a2);
     int32_t arg3 = get_register(a3);
-    // fp args are (not always) in f12 and f14.
-    // See MIPS conventions for more details.
-    double fparg0 = get_fpu_register_double(f12);
-    double fparg1 = get_fpu_register_double(f14);
+    int32_t arg4 = 0;
+    int32_t arg5 = 0;
+
+    // Need to check if sp is valid before assigning arg4, arg5.
+    // This is a fix for cctest test-api/CatchStackOverflow which causes
+    // the stack to overflow. For some reason arm doesn't need this
+    // stack check here.
+    int32_t* stack_pointer = reinterpret_cast<int32_t*>(get_register(sp));
+    int32_t* stack = reinterpret_cast<int32_t*>(stack_);
+    if (stack_pointer >= stack && stack_pointer < stack + stack_size_) {
+      arg4 = stack_pointer[0];
+      arg5 = stack_pointer[1];
+    }
     // This is dodgy but it works because the C entry stubs are never moved.
     // See comment in codegen-arm.cc and bug 1242173.
     int32_t saved_ra = get_register(ra);
-    if (redirection->fp_return()) {
-      intptr_t external =
-          reinterpret_cast<intptr_t>(redirection->external_function());
+
+    intptr_t external =
+        reinterpret_cast<int32_t>(redirection->external_function());
+
+    // Based on CpuFeatures::IsSupported(FPU), Mips will use either hardware
+    // FPU, or gcc soft-float routines. Hardware FPU is simulated in this
+    // simulator. Soft-float has additional abstraction of ExternalReference,
+    // to support serialization. Finally, when simulated on x86 host, the
+    // x86 softfloat routines are used, and this Redirection infrastructure
+    // lets simulated-mips make calls into x86 C code.
+    // When doing that, the 'double' return type must be handled differently
+    // than the usual int64_t return. The data is returned in different
+    // registers and cannot be cast from one type to the other. However, the
+    // calling arguments are passed the same way in both cases.
+    if (redirection->type() == ExternalReference::FP_RETURN_CALL) {
       SimulatorRuntimeFPCall target =
           reinterpret_cast<SimulatorRuntimeFPCall>(external);
-      if (::v8::internal::FLAG_trace_sim) {
-        PrintF("Call to host function at %p with args %f, %f\n",
-               FUNCTION_ADDR(target), fparg0, fparg1);
+      if (::v8::internal::FLAG_trace_sim || !stack_aligned) {
+        PrintF("Call to host function at %p with args %08x:%08x %08x:%08x",
+               FUNCTION_ADDR(target), arg0, arg1, arg2, arg3);
+        if (!stack_aligned) {
+          PrintF(" with unaligned stack %08x\n", get_register(sp));
+        }
+        PrintF("\n");
       }
-      double result = target(fparg0, fparg1);
-      set_fpu_register_double(f0, result);
+      double result = target(arg0, arg1, arg2, arg3);
+      // fp result -> registers v0 and v1.
+      int32_t gpreg_pair[2];
+      memcpy(&gpreg_pair[0], &result, 2 * sizeof(int32_t));
+      set_register(v0, gpreg_pair[0]);
+      set_register(v1, gpreg_pair[1]);
+    } else if (redirection->type() == ExternalReference::DIRECT_API_CALL) {
+      PrintF("Mips does not yet support ExternalReference::DIRECT_API_CALL\n");
+      ASSERT(redirection->type() != ExternalReference::DIRECT_API_CALL);
+    } else if (redirection->type() == ExternalReference::DIRECT_GETTER_CALL) {
+      PrintF("Mips does not support ExternalReference::DIRECT_GETTER_CALL\n");
+      ASSERT(redirection->type() != ExternalReference::DIRECT_GETTER_CALL);
     } else {
-      intptr_t external =
-          reinterpret_cast<int32_t>(redirection->external_function());
+      // Builtin call.
+      ASSERT(redirection->type() == ExternalReference::BUILTIN_CALL);
       SimulatorRuntimeCall target =
           reinterpret_cast<SimulatorRuntimeCall>(external);
-      if (::v8::internal::FLAG_trace_sim) {
+      if (::v8::internal::FLAG_trace_sim || !stack_aligned) {
         PrintF(
-            "Call to host function at %p with args %08x, %08x, %08x, %08x\n",
+            "Call to host function at %p: %08x, %08x, %08x, %08x, %08x, %08x",
             FUNCTION_ADDR(target),
             arg0,
             arg1,
             arg2,
-            arg3);
+            arg3,
+            arg4,
+            arg5);
+        if (!stack_aligned) {
+          PrintF(" with unaligned stack %08x\n", get_register(sp));
+        }
+        PrintF("\n");
       }
-      int64_t result = target(arg0, arg1, arg2, arg3);
-      int32_t lo_res = static_cast<int32_t>(result);
-      int32_t hi_res = static_cast<int32_t>(result >> 32);
-      if (::v8::internal::FLAG_trace_sim) {
-        PrintF("Returned %08x\n", lo_res);
-      }
-      set_register(v0, lo_res);
-      set_register(v1, hi_res);
+
+      int64_t result = target(arg0, arg1, arg2, arg3, arg4, arg5);
+      set_register(v0, static_cast<int32_t>(result));
+      set_register(v1, static_cast<int32_t>(result >> 32));
+    }
+    if (::v8::internal::FLAG_trace_sim) {
+      PrintF("Returned %08x : %08x\n", get_register(v1), get_register(v0));
     }
     set_register(ra, saved_ra);
     set_pc(get_register(ra));
+
+  } else if (func == BREAK && code >= 0 && code < 16) {
+    // First 16 break_ codes interpreted as debug markers.
+    MipsDebugger dbg(this);
+    ++break_count_;
+    PrintF("\n---- break %d marker: %3d  (instr count: %8d) ----------"
+           "----------------------------------",
+           code, break_count_, icount_);
+    dbg.PrintAllRegs();  // Print registers and continue running.
   } else {
-    Debugger dbg(this);
+    // All remaining break_ codes, and all traps are handled here.
+    MipsDebugger dbg(this);
     dbg.Debug();
   }
 }
 
+
 void Simulator::SignalExceptions() {
   for (int i = 1; i < kNumExceptions; i++) {
     if (exceptions[i] != 0) {
@@ -859,51 +1287,52 @@
   }
 }
 
+
 // Handle execution based on instruction types.
-void Simulator::DecodeTypeRegister(Instruction* instr) {
-  // Instruction fields
-  Opcode   op     = instr->OpcodeFieldRaw();
-  int32_t  rs_reg = instr->RsField();
-  int32_t  rs     = get_register(rs_reg);
-  uint32_t rs_u   = static_cast<uint32_t>(rs);
-  int32_t  rt_reg = instr->RtField();
-  int32_t  rt     = get_register(rt_reg);
-  uint32_t rt_u   = static_cast<uint32_t>(rt);
-  int32_t  rd_reg = instr->RdField();
-  uint32_t sa     = instr->SaField();
 
-  int32_t fs_reg= instr->FsField();
+void Simulator::ConfigureTypeRegister(Instruction* instr,
+                                      int32_t& alu_out,
+                                      int64_t& i64hilo,
+                                      uint64_t& u64hilo,
+                                      int32_t& next_pc,
+                                      bool& do_interrupt) {
+  // Every local variable declared here needs to be const.
+  // This is to make sure that changed values are sent back to
+  // DecodeTypeRegister correctly.
 
-  // ALU output
-  // It should not be used as is. Instructions using it should always initialize
-  // it first.
-  int32_t alu_out = 0x12345678;
-  // Output or temporary for floating point.
-  double fp_out = 0.0;
+  // Instruction fields.
+  const Opcode   op     = instr->OpcodeFieldRaw();
+  const int32_t  rs_reg = instr->RsValue();
+  const int32_t  rs     = get_register(rs_reg);
+  const uint32_t rs_u   = static_cast<uint32_t>(rs);
+  const int32_t  rt_reg = instr->RtValue();
+  const int32_t  rt     = get_register(rt_reg);
+  const uint32_t rt_u   = static_cast<uint32_t>(rt);
+  const int32_t  rd_reg = instr->RdValue();
+  const uint32_t sa     = instr->SaValue();
 
-  // For break and trap instructions.
-  bool do_interrupt = false;
+  const int32_t  fs_reg = instr->FsValue();
 
-  // For jr and jalr
-  // Get current pc.
-  int32_t current_pc = get_pc();
-  // Next pc
-  int32_t next_pc = 0;
 
   // ---------- Configuration
   switch (op) {
     case COP1:    // Coprocessor instructions
       switch (instr->RsFieldRaw()) {
-        case BC1:   // branch on coprocessor condition
+        case BC1:   // Handled in DecodeTypeImmed, should never come here.
           UNREACHABLE();
           break;
+        case CFC1:
+          // At the moment only FCSR is supported.
+          ASSERT(fs_reg == kFCSRRegister);
+          alu_out = FCSR_;
+          break;
         case MFC1:
           alu_out = get_fpu_register(fs_reg);
           break;
         case MFHC1:
-          fp_out = get_fpu_register_double(fs_reg);
-          alu_out = *v8i::BitCast<int32_t*>(&fp_out);
+          UNIMPLEMENTED_MIPS();
           break;
+        case CTC1:
         case MTC1:
         case MTHC1:
           // Do the store in the execution step.
@@ -923,13 +1352,22 @@
       switch (instr->FunctionFieldRaw()) {
         case JR:
         case JALR:
-          next_pc = get_register(instr->RsField());
+          next_pc = get_register(instr->RsValue());
           break;
         case SLL:
           alu_out = rt << sa;
           break;
         case SRL:
-          alu_out = rt_u >> sa;
+          if (rs_reg == 0) {
+            // Regular logical right shift of a word by a fixed number of
+            // bits instruction. RS field is always equal to 0.
+            alu_out = rt_u >> sa;
+          } else {
+            // Logical right-rotate of a word by a fixed number of bits. This
+            // is special case of SRL instruction, added in MIPS32 Release 2.
+            // RS field is equal to 00001
+            alu_out = (rt_u >> sa) | (rt_u << (32 - sa));
+          }
           break;
         case SRA:
           alu_out = rt >> sa;
@@ -938,7 +1376,16 @@
           alu_out = rt << rs;
           break;
         case SRLV:
-          alu_out = rt_u >> rs;
+          if (sa == 0) {
+            // Regular logical right-shift of a word by a variable number of
+            // bits instruction. SA field is always equal to 0.
+            alu_out = rt_u >> rs;
+          } else {
+            // Logical right-rotate of a word by a variable number of bits.
+            // This is special case od SRLV instruction, added in MIPS32
+            // Release 2. SA field is equal to 00001
+            alu_out = (rt_u >> rs_u) | (rt_u << (32 - rs_u));
+          }
           break;
         case SRAV:
           alu_out = rt >> rs;
@@ -950,10 +1397,10 @@
           alu_out = get_register(LO);
           break;
         case MULT:
-          UNIMPLEMENTED_MIPS();
+          i64hilo = static_cast<int64_t>(rs) * static_cast<int64_t>(rt);
           break;
         case MULTU:
-          UNIMPLEMENTED_MIPS();
+          u64hilo = static_cast<uint64_t>(rs_u) * static_cast<uint64_t>(rt_u);
           break;
         case DIV:
         case DIVU:
@@ -1005,6 +1452,7 @@
           break;
         // Break and trap instructions
         case BREAK:
+
           do_interrupt = true;
           break;
         case TGE:
@@ -1025,6 +1473,11 @@
         case TNE:
           do_interrupt = rs != rt;
           break;
+        case MOVN:
+        case MOVZ:
+        case MOVCI:
+          // No action taken on decode.
+          break;
         default:
           UNREACHABLE();
       };
@@ -1034,13 +1487,83 @@
         case MUL:
           alu_out = rs_u * rt_u;  // Only the lower 32 bits are kept.
           break;
+        case CLZ:
+          alu_out = __builtin_clz(rs_u);
+          break;
         default:
           UNREACHABLE();
-      }
+      };
+      break;
+    case SPECIAL3:
+      switch (instr->FunctionFieldRaw()) {
+        case INS: {   // Mips32r2 instruction.
+          // Interpret Rd field as 5-bit msb of insert.
+          uint16_t msb = rd_reg;
+          // Interpret sa field as 5-bit lsb of insert.
+          uint16_t lsb = sa;
+          uint16_t size = msb - lsb + 1;
+          uint32_t mask = (1 << size) - 1;
+          alu_out = (rt_u & ~(mask << lsb)) | ((rs_u & mask) << lsb);
+          break;
+        }
+        case EXT: {   // Mips32r2 instruction.
+          // Interpret Rd field as 5-bit msb of extract.
+          uint16_t msb = rd_reg;
+          // Interpret sa field as 5-bit lsb of extract.
+          uint16_t lsb = sa;
+          uint16_t size = msb + 1;
+          uint32_t mask = (1 << size) - 1;
+          alu_out = (rs_u & (mask << lsb)) >> lsb;
+          break;
+        }
+        default:
+          UNREACHABLE();
+      };
       break;
     default:
       UNREACHABLE();
   };
+}
+
+
+void Simulator::DecodeTypeRegister(Instruction* instr) {
+  // Instruction fields.
+  const Opcode   op     = instr->OpcodeFieldRaw();
+  const int32_t  rs_reg = instr->RsValue();
+  const int32_t  rs     = get_register(rs_reg);
+  const uint32_t rs_u   = static_cast<uint32_t>(rs);
+  const int32_t  rt_reg = instr->RtValue();
+  const int32_t  rt     = get_register(rt_reg);
+  const uint32_t rt_u   = static_cast<uint32_t>(rt);
+  const int32_t  rd_reg = instr->RdValue();
+
+  const int32_t  fs_reg = instr->FsValue();
+  const int32_t  ft_reg = instr->FtValue();
+  const int32_t  fd_reg = instr->FdValue();
+  int64_t  i64hilo = 0;
+  uint64_t u64hilo = 0;
+
+  // ALU output
+  // It should not be used as is. Instructions using it should always
+  // initialize it first.
+  int32_t alu_out = 0x12345678;
+
+  // For break and trap instructions.
+  bool do_interrupt = false;
+
+  // For jr and jalr
+  // Get current pc.
+  int32_t current_pc = get_pc();
+  // Next pc
+  int32_t next_pc = 0;
+
+  // Setup the variables if needed before executing the instruction.
+  ConfigureTypeRegister(instr,
+                        alu_out,
+                        i64hilo,
+                        u64hilo,
+                        next_pc,
+                        do_interrupt);
 
   // ---------- Raise exceptions triggered.
   SignalExceptions();
@@ -1052,25 +1575,42 @@
         case BC1:   // branch on coprocessor condition
           UNREACHABLE();
           break;
+        case CFC1:
+          set_register(rt_reg, alu_out);
         case MFC1:
-        case MFHC1:
           set_register(rt_reg, alu_out);
           break;
+        case MFHC1:
+          UNIMPLEMENTED_MIPS();
+          break;
+        case CTC1:
+          // At the moment only FCSR is supported.
+          ASSERT(fs_reg == kFCSRRegister);
+          FCSR_ = registers_[rt_reg];
+          break;
         case MTC1:
-          // We don't need to set the higher bits to 0, because MIPS ISA says
-          // they are in an unpredictable state after executing MTC1.
           FPUregisters_[fs_reg] = registers_[rt_reg];
-          FPUregisters_[fs_reg+1] = Unpredictable;
           break;
         case MTHC1:
-          // Here we need to keep the lower bits unchanged.
-          FPUregisters_[fs_reg+1] = registers_[rt_reg];
+          UNIMPLEMENTED_MIPS();
           break;
         case S:
+          float f;
           switch (instr->FunctionFieldRaw()) {
             case CVT_D_S:
+              f = get_fpu_register_float(fs_reg);
+              set_fpu_register_double(fd_reg, static_cast<double>(f));
+              break;
             case CVT_W_S:
             case CVT_L_S:
+            case TRUNC_W_S:
+            case TRUNC_L_S:
+            case ROUND_W_S:
+            case ROUND_L_S:
+            case FLOOR_W_S:
+            case FLOOR_L_S:
+            case CEIL_W_S:
+            case CEIL_L_S:
             case CVT_PS_S:
               UNIMPLEMENTED_MIPS();
               break;
@@ -1079,10 +1619,133 @@
           }
           break;
         case D:
+          double ft, fs;
+          uint32_t cc, fcsr_cc;
+          int64_t  i64;
+          fs = get_fpu_register_double(fs_reg);
+          ft = get_fpu_register_double(ft_reg);
+          cc = instr->FCccValue();
+          fcsr_cc = get_fcsr_condition_bit(cc);
           switch (instr->FunctionFieldRaw()) {
-            case CVT_S_D:
-            case CVT_W_D:
-            case CVT_L_D:
+            case ADD_D:
+              set_fpu_register_double(fd_reg, fs + ft);
+              break;
+            case SUB_D:
+              set_fpu_register_double(fd_reg, fs - ft);
+              break;
+            case MUL_D:
+              set_fpu_register_double(fd_reg, fs * ft);
+              break;
+            case DIV_D:
+              set_fpu_register_double(fd_reg, fs / ft);
+              break;
+            case ABS_D:
+              set_fpu_register_double(fd_reg, fs < 0 ? -fs : fs);
+              break;
+            case MOV_D:
+              set_fpu_register_double(fd_reg, fs);
+              break;
+            case NEG_D:
+              set_fpu_register_double(fd_reg, -fs);
+              break;
+            case SQRT_D:
+              set_fpu_register_double(fd_reg, sqrt(fs));
+              break;
+            case C_UN_D:
+              set_fcsr_bit(fcsr_cc, isnan(fs) || isnan(ft));
+              break;
+            case C_EQ_D:
+              set_fcsr_bit(fcsr_cc, (fs == ft));
+              break;
+            case C_UEQ_D:
+              set_fcsr_bit(fcsr_cc, (fs == ft) || (isnan(fs) || isnan(ft)));
+              break;
+            case C_OLT_D:
+              set_fcsr_bit(fcsr_cc, (fs < ft));
+              break;
+            case C_ULT_D:
+              set_fcsr_bit(fcsr_cc, (fs < ft) || (isnan(fs) || isnan(ft)));
+              break;
+            case C_OLE_D:
+              set_fcsr_bit(fcsr_cc, (fs <= ft));
+              break;
+            case C_ULE_D:
+              set_fcsr_bit(fcsr_cc, (fs <= ft) || (isnan(fs) || isnan(ft)));
+              break;
+            case CVT_W_D:   // Convert double to word.
+              // Rounding modes are not yet supported.
+              ASSERT((FCSR_ & 3) == 0);
+              // In rounding mode 0 it should behave like ROUND.
+            case ROUND_W_D:  // Round double to word.
+              {
+                double rounded = fs > 0 ? floor(fs + 0.5) : ceil(fs - 0.5);
+                int32_t result = static_cast<int32_t>(rounded);
+                set_fpu_register(fd_reg, result);
+                if (set_fcsr_round_error(fs, rounded)) {
+                  set_fpu_register(fd_reg, kFPUInvalidResult);
+                }
+              }
+              break;
+            case TRUNC_W_D:  // Truncate double to word (round towards 0).
+              {
+                int32_t result = static_cast<int32_t>(fs);
+                set_fpu_register(fd_reg, result);
+                if (set_fcsr_round_error(fs, static_cast<double>(result))) {
+                  set_fpu_register(fd_reg, kFPUInvalidResult);
+                }
+              }
+              break;
+            case FLOOR_W_D:  // Round double to word towards negative infinity.
+              {
+                double rounded = floor(fs);
+                int32_t result = static_cast<int32_t>(rounded);
+                set_fpu_register(fd_reg, result);
+                if (set_fcsr_round_error(fs, rounded)) {
+                  set_fpu_register(fd_reg, kFPUInvalidResult);
+                }
+              }
+              break;
+            case CEIL_W_D:  // Round double to word towards positive infinity.
+              {
+                double rounded = ceil(fs);
+                int32_t result = static_cast<int32_t>(rounded);
+                set_fpu_register(fd_reg, result);
+                if (set_fcsr_round_error(fs, rounded)) {
+                  set_fpu_register(fd_reg, kFPUInvalidResult);
+                }
+              }
+              break;
+            case CVT_S_D:  // Convert double to float (single).
+              set_fpu_register_float(fd_reg, static_cast<float>(fs));
+              break;
+            case CVT_L_D:  // Mips32r2: Truncate double to 64-bit long-word.
+              i64 = static_cast<int64_t>(fs);
+              set_fpu_register(fd_reg, i64 & 0xffffffff);
+              set_fpu_register(fd_reg + 1, i64 >> 32);
+              break;
+            case TRUNC_L_D:  // Mips32r2 instruction.
+              i64 = static_cast<int64_t>(fs);
+              set_fpu_register(fd_reg, i64 & 0xffffffff);
+              set_fpu_register(fd_reg + 1, i64 >> 32);
+              break;
+            case ROUND_L_D: {  // Mips32r2 instruction.
+              double rounded = fs > 0 ? floor(fs + 0.5) : ceil(fs - 0.5);
+              i64 = static_cast<int64_t>(rounded);
+              set_fpu_register(fd_reg, i64 & 0xffffffff);
+              set_fpu_register(fd_reg + 1, i64 >> 32);
+              break;
+            }
+            case FLOOR_L_D:  // Mips32r2 instruction.
+              i64 = static_cast<int64_t>(floor(fs));
+              set_fpu_register(fd_reg, i64 & 0xffffffff);
+              set_fpu_register(fd_reg + 1, i64 >> 32);
+              break;
+            case CEIL_L_D:  // Mips32r2 instruction.
+              i64 = static_cast<int64_t>(ceil(fs));
+              set_fpu_register(fd_reg, i64 & 0xffffffff);
+              set_fpu_register(fd_reg + 1, i64 >> 32);
+              break;
+            case C_F_D:
               UNIMPLEMENTED_MIPS();
               break;
             default:
@@ -1091,11 +1754,13 @@
           break;
         case W:
           switch (instr->FunctionFieldRaw()) {
-            case CVT_S_W:
-              UNIMPLEMENTED_MIPS();
+            case CVT_S_W:   // Convert word to float (single).
+              alu_out = get_fpu_register(fs_reg);
+              set_fpu_register_float(fd_reg, static_cast<float>(alu_out));
               break;
             case CVT_D_W:   // Convert word to double.
-              set_fpu_register(rd_reg, static_cast<double>(rs));
+              alu_out = get_fpu_register(fs_reg);
+              set_fpu_register_double(fd_reg, static_cast<double>(alu_out));
               break;
             default:
               UNREACHABLE();
@@ -1103,8 +1768,14 @@
           break;
         case L:
           switch (instr->FunctionFieldRaw()) {
+          case CVT_D_L:  // Mips32r2 instruction.
+            // Watch the signs here, we want 2 32-bit vals
+            // to make a sign-64.
+            i64 = (uint32_t) get_fpu_register(fs_reg);
+            i64 |= ((int64_t) get_fpu_register(fs_reg + 1) << 32);
+            set_fpu_register_double(fd_reg, static_cast<double>(i64));
+            break;
             case CVT_S_L:
-            case CVT_D_L:
               UNIMPLEMENTED_MIPS();
               break;
             default:
@@ -1121,7 +1792,7 @@
       switch (instr->FunctionFieldRaw()) {
         case JR: {
           Instruction* branch_delay_instr = reinterpret_cast<Instruction*>(
-              current_pc+Instruction::kInstructionSize);
+              current_pc+Instruction::kInstrSize);
           BranchDelayInstructionDecode(branch_delay_instr);
           set_pc(next_pc);
           pc_modified_ = true;
@@ -1129,16 +1800,21 @@
         }
         case JALR: {
           Instruction* branch_delay_instr = reinterpret_cast<Instruction*>(
-              current_pc+Instruction::kInstructionSize);
+              current_pc+Instruction::kInstrSize);
           BranchDelayInstructionDecode(branch_delay_instr);
-          set_register(31, current_pc + 2* Instruction::kInstructionSize);
+          set_register(31, current_pc + 2* Instruction::kInstrSize);
           set_pc(next_pc);
           pc_modified_ = true;
           break;
         }
         // Instructions using HI and LO registers.
         case MULT:
+          set_register(LO, static_cast<int32_t>(i64hilo & 0xffffffff));
+          set_register(HI, static_cast<int32_t>(i64hilo >> 32));
+          break;
         case MULTU:
+          set_register(LO, static_cast<int32_t>(u64hilo & 0xffffffff));
+          set_register(HI, static_cast<int32_t>(u64hilo >> 32));
           break;
         case DIV:
           // Divide by zero was checked in the configuration step.
@@ -1149,7 +1825,7 @@
           set_register(LO, rs_u / rt_u);
           set_register(HI, rs_u % rt_u);
           break;
-        // Break and trap instructions
+        // Break and trap instructions.
         case BREAK:
         case TGE:
         case TGEU:
@@ -1161,6 +1837,23 @@
             SoftwareInterrupt(instr);
           }
           break;
+        // Conditional moves.
+        case MOVN:
+          if (rt) set_register(rd_reg, rs);
+          break;
+        case MOVCI: {
+          uint32_t cc = instr->FCccValue();
+          uint32_t fcsr_cc = get_fcsr_condition_bit(cc);
+          if (instr->Bit(16)) {  // Read Tf bit
+            if (test_fcsr_bit(fcsr_cc)) set_register(rd_reg, rs);
+          } else {
+            if (!test_fcsr_bit(fcsr_cc)) set_register(rd_reg, rs);
+          }
+          break;
+        }
+        case MOVZ:
+          if (!rt) set_register(rd_reg, rs);
+          break;
         default:  // For other special opcodes we do the default operation.
           set_register(rd_reg, alu_out);
       };
@@ -1173,9 +1866,23 @@
           set_register(LO, Unpredictable);
           set_register(HI, Unpredictable);
           break;
+        default:  // For other special2 opcodes we do the default operation.
+          set_register(rd_reg, alu_out);
+      }
+      break;
+    case SPECIAL3:
+      switch (instr->FunctionFieldRaw()) {
+        case INS:
+          // Ins instr leaves result in Rt, rather than Rd.
+          set_register(rt_reg, alu_out);
+          break;
+        case EXT:
+          // Ext instr leaves result in Rt, rather than Rd.
+          set_register(rt_reg, alu_out);
+          break;
         default:
           UNREACHABLE();
-      }
+      };
       break;
     // Unimplemented opcodes raised an error in the configuration step before,
     // so we can use the default here to set the destination register in common
@@ -1185,22 +1892,22 @@
   };
 }
 
+
 // Type 2: instructions using a 16 bytes immediate. (eg: addi, beq)
 void Simulator::DecodeTypeImmediate(Instruction* instr) {
-  // Instruction fields
+  // Instruction fields.
   Opcode   op     = instr->OpcodeFieldRaw();
-  int32_t  rs     = get_register(instr->RsField());
+  int32_t  rs     = get_register(instr->RsValue());
   uint32_t rs_u   = static_cast<uint32_t>(rs);
-  int32_t  rt_reg = instr->RtField();  // destination register
+  int32_t  rt_reg = instr->RtValue();  // destination register
   int32_t  rt     = get_register(rt_reg);
-  int16_t  imm16  = instr->Imm16Field();
+  int16_t  imm16  = instr->Imm16Value();
 
-  int32_t  ft_reg = instr->FtField();  // destination register
-  int32_t  ft     = get_register(ft_reg);
+  int32_t  ft_reg = instr->FtValue();  // destination register
 
-  // zero extended immediate
+  // Zero extended immediate.
   uint32_t  oe_imm16 = 0xffff & imm16;
-  // sign extended immediate
+  // Sign extended immediate.
   int32_t   se_imm16 = imm16;
 
   // Get current pc.
@@ -1208,25 +1915,38 @@
   // Next pc.
   int32_t next_pc = bad_ra;
 
-  // Used for conditional branch instructions
+  // Used for conditional branch instructions.
   bool do_branch = false;
   bool execute_branch_delay_instruction = false;
 
-  // Used for arithmetic instructions
+  // Used for arithmetic instructions.
   int32_t alu_out = 0;
-  // Floating point
+  // Floating point.
   double fp_out = 0.0;
+  uint32_t cc, cc_value, fcsr_cc;
 
-  // Used for memory instructions
+  // Used for memory instructions.
   int32_t addr = 0x0;
+  // Value to be written in memory
+  uint32_t mem_value = 0x0;
 
   // ---------- Configuration (and execution for REGIMM)
   switch (op) {
-    // ------------- COP1. Coprocessor instructions
+    // ------------- COP1. Coprocessor instructions.
     case COP1:
       switch (instr->RsFieldRaw()) {
-        case BC1:   // branch on coprocessor condition
-          UNIMPLEMENTED_MIPS();
+        case BC1:   // Branch on coprocessor condition.
+          cc = instr->FBccValue();
+          fcsr_cc = get_fcsr_condition_bit(cc);
+          cc_value = test_fcsr_bit(fcsr_cc);
+          do_branch = (instr->FBtrueValue()) ? cc_value : !cc_value;
+          execute_branch_delay_instruction = true;
+          // Set next_pc
+          if (do_branch) {
+            next_pc = current_pc + (imm16 << 2) + Instruction::kInstrSize;
+          } else {
+            next_pc = current_pc + kBranchReturnOffset;
+          }
           break;
         default:
           UNREACHABLE();
@@ -1259,7 +1979,7 @@
           execute_branch_delay_instruction = true;
           // Set next_pc
           if (do_branch) {
-            next_pc = current_pc + (imm16 << 2) + Instruction::kInstructionSize;
+            next_pc = current_pc + (imm16 << 2) + Instruction::kInstrSize;
             if (instr->IsLinkingInstruction()) {
               set_register(31, current_pc + kBranchReturnOffset);
             }
@@ -1323,6 +2043,21 @@
       addr = rs + se_imm16;
       alu_out = ReadB(addr);
       break;
+    case LH:
+      addr = rs + se_imm16;
+      alu_out = ReadH(addr, instr);
+      break;
+    case LWL: {
+      // al_offset is an offset of the effective address within an aligned word
+      uint8_t al_offset = (rs + se_imm16) & kPointerAlignmentMask;
+      uint8_t byte_shift = kPointerAlignmentMask - al_offset;
+      uint32_t mask = (1 << byte_shift * 8) - 1;
+      addr = rs + se_imm16 - al_offset;
+      alu_out = ReadW(addr, instr);
+      alu_out <<= byte_shift * 8;
+      alu_out |= rt & mask;
+      break;
+    }
     case LW:
       addr = rs + se_imm16;
       alu_out = ReadW(addr, instr);
@@ -1331,12 +2066,47 @@
       addr = rs + se_imm16;
       alu_out = ReadBU(addr);
       break;
+    case LHU:
+      addr = rs + se_imm16;
+      alu_out = ReadHU(addr, instr);
+      break;
+    case LWR: {
+      // al_offset is an offset of the effective address within an aligned word
+      uint8_t al_offset = (rs + se_imm16) & kPointerAlignmentMask;
+      uint8_t byte_shift = kPointerAlignmentMask - al_offset;
+      uint32_t mask = al_offset ? (~0 << (byte_shift + 1) * 8) : 0;
+      addr = rs + se_imm16 - al_offset;
+      alu_out = ReadW(addr, instr);
+      alu_out = static_cast<uint32_t> (alu_out) >> al_offset * 8;
+      alu_out |= rt & mask;
+      break;
+    }
     case SB:
       addr = rs + se_imm16;
       break;
+    case SH:
+      addr = rs + se_imm16;
+      break;
+    case SWL: {
+      uint8_t al_offset = (rs + se_imm16) & kPointerAlignmentMask;
+      uint8_t byte_shift = kPointerAlignmentMask - al_offset;
+      uint32_t mask = byte_shift ? (~0 << (al_offset + 1) * 8) : 0;
+      addr = rs + se_imm16 - al_offset;
+      mem_value = ReadW(addr, instr) & mask;
+      mem_value |= static_cast<uint32_t>(rt) >> byte_shift * 8;
+      break;
+    }
     case SW:
       addr = rs + se_imm16;
       break;
+    case SWR: {
+      uint8_t al_offset = (rs + se_imm16) & kPointerAlignmentMask;
+      uint32_t mask = (1 << al_offset * 8) - 1;
+      addr = rs + se_imm16 - al_offset;
+      mem_value = ReadW(addr, instr);
+      mem_value = (rt << al_offset * 8) | (mem_value & mask);
+      break;
+    }
     case LWC1:
       addr = rs + se_imm16;
       alu_out = ReadW(addr, instr);
@@ -1367,12 +2137,12 @@
       execute_branch_delay_instruction = true;
       // Set next_pc
       if (do_branch) {
-        next_pc = current_pc + (imm16 << 2) + Instruction::kInstructionSize;
+        next_pc = current_pc + (imm16 << 2) + Instruction::kInstrSize;
         if (instr->IsLinkingInstruction()) {
-          set_register(31, current_pc + 2* Instruction::kInstructionSize);
+          set_register(31, current_pc + 2* Instruction::kInstrSize);
         }
       } else {
-        next_pc = current_pc + 2 * Instruction::kInstructionSize;
+        next_pc = current_pc + 2 * Instruction::kInstrSize;
       }
       break;
     // ------------- Arithmetic instructions
@@ -1388,16 +2158,29 @@
       break;
     // ------------- Memory instructions
     case LB:
+    case LH:
+    case LWL:
     case LW:
     case LBU:
+    case LHU:
+    case LWR:
       set_register(rt_reg, alu_out);
       break;
     case SB:
       WriteB(addr, static_cast<int8_t>(rt));
       break;
+    case SH:
+      WriteH(addr, static_cast<uint16_t>(rt), instr);
+      break;
+    case SWL:
+      WriteW(addr, mem_value, instr);
+      break;
     case SW:
       WriteW(addr, rt, instr);
       break;
+    case SWR:
+      WriteW(addr, mem_value, instr);
+      break;
     case LWC1:
       set_fpu_register(ft_reg, alu_out);
       break;
@@ -1410,7 +2193,7 @@
       break;
     case SDC1:
       addr = rs + se_imm16;
-      WriteD(addr, ft, instr);
+      WriteD(addr, get_fpu_register_double(ft_reg), instr);
       break;
     default:
       break;
@@ -1422,7 +2205,7 @@
     // We don't check for end_sim_pc. First it should not be met as the current
     // pc is valid. Secondly a jump should always execute its branch delay slot.
     Instruction* branch_delay_instr =
-      reinterpret_cast<Instruction*>(current_pc+Instruction::kInstructionSize);
+      reinterpret_cast<Instruction*>(current_pc+Instruction::kInstrSize);
     BranchDelayInstructionDecode(branch_delay_instr);
   }
 
@@ -1432,6 +2215,7 @@
   }
 }
 
+
 // Type 3: instructions using a 26 bytes immediate. (eg: j, jal)
 void Simulator::DecodeTypeJump(Instruction* instr) {
   // Get current pc.
@@ -1439,35 +2223,39 @@
   // Get unchanged bits of pc.
   int32_t pc_high_bits = current_pc & 0xf0000000;
   // Next pc
-  int32_t next_pc = pc_high_bits | (instr->Imm26Field() << 2);
+  int32_t next_pc = pc_high_bits | (instr->Imm26Value() << 2);
 
   // Execute branch delay slot
   // We don't check for end_sim_pc. First it should not be met as the current pc
   // is valid. Secondly a jump should always execute its branch delay slot.
   Instruction* branch_delay_instr =
-    reinterpret_cast<Instruction*>(current_pc+Instruction::kInstructionSize);
+    reinterpret_cast<Instruction*>(current_pc+Instruction::kInstrSize);
   BranchDelayInstructionDecode(branch_delay_instr);
 
   // Update pc and ra if necessary.
   // Do this after the branch delay execution.
   if (instr->IsLinkingInstruction()) {
-    set_register(31, current_pc + 2* Instruction::kInstructionSize);
+    set_register(31, current_pc + 2* Instruction::kInstrSize);
   }
   set_pc(next_pc);
   pc_modified_ = true;
 }
 
+
 // Executes the current instruction.
 void Simulator::InstructionDecode(Instruction* instr) {
+  if (v8::internal::FLAG_check_icache) {
+    CheckICache(isolate_->simulator_i_cache(), instr);
+  }
   pc_modified_ = false;
   if (::v8::internal::FLAG_trace_sim) {
     disasm::NameConverter converter;
     disasm::Disassembler dasm(converter);
     // use a reasonably large buffer
     v8::internal::EmbeddedVector<char, 256> buffer;
-    dasm.InstructionDecode(buffer,
-                           reinterpret_cast<byte_*>(instr));
-    PrintF("  0x%08x  %s\n", instr, buffer.start());
+    dasm.InstructionDecode(buffer, reinterpret_cast<byte_*>(instr));
+    PrintF("  0x%08x  %s\n", reinterpret_cast<intptr_t>(instr),
+           buffer.start());
   }
 
   switch (instr->InstructionType()) {
@@ -1485,7 +2273,7 @@
   }
   if (!pc_modified_) {
     set_register(pc, reinterpret_cast<int32_t>(instr) +
-                 Instruction::kInstructionSize);
+                 Instruction::kInstrSize);
   }
 }
 
@@ -1511,7 +2299,7 @@
       Instruction* instr = reinterpret_cast<Instruction*>(program_counter);
       icount_++;
       if (icount_ == ::v8::internal::FLAG_stop_sim_at) {
-        Debugger dbg(this);
+        MipsDebugger dbg(this);
         dbg.Debug();
       } else {
         InstructionDecode(instr);
@@ -1538,7 +2326,7 @@
   int original_stack = get_register(sp);
   // Compute position of stack on entry to generated code.
   int entry_stack = (original_stack - (argument_count - 4) * sizeof(int32_t)
-                                    - kArgsSlotsSize);
+                                    - kCArgsSlotsSize);
   if (OS::ActivationFrameAlignment() != 0) {
     entry_stack &= -OS::ActivationFrameAlignment();
   }
@@ -1643,8 +2431,8 @@
 
 #undef UNSUPPORTED
 
-} }  // namespace assembler::mips
+} }  // namespace v8::internal
 
-#endif  // !__mips || USE_SIMULATOR
+#endif  // USE_SIMULATOR
 
 #endif  // V8_TARGET_ARCH_MIPS
diff --git a/src/mips/simulator-mips.h b/src/mips/simulator-mips.h
index 6e42683..0cd9bbe 100644
--- a/src/mips/simulator-mips.h
+++ b/src/mips/simulator-mips.h
@@ -37,12 +37,31 @@
 #define V8_MIPS_SIMULATOR_MIPS_H_
 
 #include "allocation.h"
+#include "constants-mips.h"
 
-#if defined(__mips) && !defined(USE_SIMULATOR)
+#if !defined(USE_SIMULATOR)
+// Running without a simulator on a native mips platform.
+
+namespace v8 {
+namespace internal {
 
 // When running without a simulator we call the entry directly.
 #define CALL_GENERATED_CODE(entry, p0, p1, p2, p3, p4) \
-  entry(p0, p1, p2, p3, p4);
+  entry(p0, p1, p2, p3, p4)
+
+typedef int (*mips_regexp_matcher)(String*, int, const byte*, const byte*,
+                                  void*, int*, Address, int, Isolate*);
+
+// Call the generated regexp code directly. The code at the entry address
+// should act as a function matching the type arm_regexp_matcher.
+// The fifth argument is a dummy that reserves the space used for
+// the return address added by the ExitFrame in native calls.
+#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6, p7) \
+  (FUNCTION_CAST<mips_regexp_matcher>(entry)(                             \
+      p0, p1, p2, p3, NULL, p4, p5, p6, p7))
+
+#define TRY_CATCH_FROM_ADDRESS(try_catch_address) \
+  reinterpret_cast<TryCatch*>(try_catch_address)
 
 // The stack limit beyond which we will throw stack overflow errors in
 // generated code. Because generated code on mips uses the C stack, we
@@ -60,6 +79,8 @@
   static inline void UnregisterCTryCatch() { }
 };
 
+} }  // namespace v8::internal
+
 // Calculated the stack limit beyond which we will throw stack overflow errors.
 // This macro must be called from a C++ method. It relies on being able to take
 // the address of "this" to get a value on the current execution stack and then
@@ -70,39 +91,50 @@
   (reinterpret_cast<uintptr_t>(this) >= limit ? \
       reinterpret_cast<uintptr_t>(this) - limit : 0)
 
-// Call the generated regexp code directly. The entry function pointer should
-// expect seven int/pointer sized arguments and return an int.
-#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6) \
-  entry(p0, p1, p2, p3, p4, p5, p6)
+#else  // !defined(USE_SIMULATOR)
+// Running with a simulator.
 
-#define TRY_CATCH_FROM_ADDRESS(try_catch_address) \
-  reinterpret_cast<TryCatch*>(try_catch_address)
+#include "hashmap.h"
 
+namespace v8 {
+namespace internal {
 
-#else  // #if !defined(__mips) || defined(USE_SIMULATOR)
+// -----------------------------------------------------------------------------
+// Utility functions
 
-// When running with the simulator transition into simulated execution at this
-// point.
-#define CALL_GENERATED_CODE(entry, p0, p1, p2, p3, p4) \
-  reinterpret_cast<Object*>(\
-      assembler::mips::Simulator::current()->Call(FUNCTION_ADDR(entry), 5, \
-                                                  p0, p1, p2, p3, p4))
+class CachePage {
+ public:
+  static const int LINE_VALID = 0;
+  static const int LINE_INVALID = 1;
 
-#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6) \
-  assembler::mips::Simulator::current()->Call(\
-    FUNCTION_ADDR(entry), 7, p0, p1, p2, p3, p4, p5, p6)
+  static const int kPageShift = 12;
+  static const int kPageSize = 1 << kPageShift;
+  static const int kPageMask = kPageSize - 1;
+  static const int kLineShift = 2;  // The cache line is only 4 bytes right now.
+  static const int kLineLength = 1 << kLineShift;
+  static const int kLineMask = kLineLength - 1;
 
-#define TRY_CATCH_FROM_ADDRESS(try_catch_address) \
-  try_catch_address == NULL ? \
-      NULL : *(reinterpret_cast<TryCatch**>(try_catch_address))
+  CachePage() {
+    memset(&validity_map_, LINE_INVALID, sizeof(validity_map_));
+  }
 
+  char* ValidityByte(int offset) {
+    return &validity_map_[offset >> kLineShift];
+  }
 
-namespace assembler {
-namespace mips {
+  char* CachedData(int offset) {
+    return &data_[offset];
+  }
+
+ private:
+  char data_[kPageSize];   // The cached data.
+  static const int kValidityMapSize = kPageSize >> kLineShift;
+  char validity_map_[kValidityMapSize];  // One byte per line.
+};
 
 class Simulator {
  public:
-  friend class Debugger;
+  friend class MipsDebugger;
 
   // Registers are declared in order. See SMRL chapter 2.
   enum Register {
@@ -143,7 +175,7 @@
 
   // The currently executing Simulator instance. Potentially there can be one
   // for each native thread.
-  static Simulator* current();
+  static Simulator* current(v8::internal::Isolate* isolate);
 
   // Accessors for register state. Reading the pc value adheres to the MIPS
   // architecture specification and is off by a 8 from the currently executing
@@ -152,9 +184,15 @@
   int32_t get_register(int reg) const;
   // Same for FPURegisters
   void set_fpu_register(int fpureg, int32_t value);
+  void set_fpu_register_float(int fpureg, float value);
   void set_fpu_register_double(int fpureg, double value);
   int32_t get_fpu_register(int fpureg) const;
+  int64_t get_fpu_register_long(int fpureg) const;
+  float get_fpu_register_float(int fpureg) const;
   double get_fpu_register_double(int fpureg) const;
+  void set_fcsr_bit(uint32_t cc, bool value);
+  bool test_fcsr_bit(uint32_t cc);
+  bool set_fcsr_round_error(double original, double rounded);
 
   // Special case of set_register and get_register to access the raw PC value.
   void set_pc(int32_t value);
@@ -172,7 +210,7 @@
   // V8 generally calls into generated JS code with 5 parameters and into
   // generated RegExp code with 7 parameters. This is a convenience function,
   // which sets up the simulator state and grabs the result on return.
-  int32_t Call(byte_* entry, int argument_count, ...);
+  int32_t Call(byte* entry, int argument_count, ...);
 
   // Push an address onto the JS stack.
   uintptr_t PushAddress(uintptr_t address);
@@ -180,6 +218,14 @@
   // Pop an address from the JS stack.
   uintptr_t PopAddress();
 
+  // ICache checking.
+  static void FlushICache(v8::internal::HashMap* i_cache, void* start,
+                          size_t size);
+
+  // Returns true if pc register contains one of the 'special_values' defined
+  // below (bad_ra, end_sim_pc).
+  bool has_bad_pc() const;
+
  private:
   enum special_values {
     // Known bad pc value to ensure that the simulator does not execute
@@ -223,9 +269,17 @@
   inline int32_t SetDoubleHIW(double* addr);
   inline int32_t SetDoubleLOW(double* addr);
 
-
   // Executing is handled based on the instruction type.
   void DecodeTypeRegister(Instruction* instr);
+
+  // Helper function for DecodeTypeRegister.
+  void ConfigureTypeRegister(Instruction* instr,
+                             int32_t& alu_out,
+                             int64_t& i64hilo,
+                             uint64_t& u64hilo,
+                             int32_t& next_pc,
+                             bool& do_interrupt);
+
   void DecodeTypeImmediate(Instruction* instr);
   void DecodeTypeJump(Instruction* instr);
 
@@ -239,11 +293,18 @@
     if (instr->IsForbiddenInBranchDelay()) {
       V8_Fatal(__FILE__, __LINE__,
                "Eror:Unexpected %i opcode in a branch delay slot.",
-               instr->OpcodeField());
+               instr->OpcodeValue());
     }
     InstructionDecode(instr);
   }
 
+  // ICache.
+  static void CheckICache(v8::internal::HashMap* i_cache, Instruction* instr);
+  static void FlushOnePage(v8::internal::HashMap* i_cache, intptr_t start,
+                           int size);
+  static CachePage* GetCachePage(v8::internal::HashMap* i_cache, void* page);
+
+
   enum Exception {
     none,
     kIntegerOverflow,
@@ -258,7 +319,7 @@
 
   // Runtime call support.
   static void* RedirectExternalReference(void* external_function,
-                                         bool fp_return);
+                                         ExternalReference::Type type);
 
   // Used for real time calls that takes two double values as arguments and
   // returns a double.
@@ -269,19 +330,40 @@
   int32_t registers_[kNumSimuRegisters];
   // Coprocessor Registers.
   int32_t FPUregisters_[kNumFPURegisters];
+  // FPU control register.
+  uint32_t FCSR_;
 
   // Simulator support.
   char* stack_;
+  size_t stack_size_;
   bool pc_modified_;
   int icount_;
-  static bool initialized_;
+  int break_count_;
+
+  // Icache simulation
+  v8::internal::HashMap* i_cache_;
 
   // Registered breakpoints.
   Instruction* break_pc_;
   Instr break_instr_;
+
+  v8::internal::Isolate* isolate_;
 };
 
-} }   // namespace assembler::mips
+
+// When running with the simulator transition into simulated execution at this
+// point.
+#define CALL_GENERATED_CODE(entry, p0, p1, p2, p3, p4) \
+reinterpret_cast<Object*>(Simulator::current(Isolate::Current())->Call( \
+      FUNCTION_ADDR(entry), 5, p0, p1, p2, p3, p4))
+
+#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6, p7) \
+  Simulator::current(Isolate::Current())->Call( \
+      entry, 9, p0, p1, p2, p3, NULL, p4, p5, p6, p7)
+
+#define TRY_CATCH_FROM_ADDRESS(try_catch_address) \
+  try_catch_address == NULL ? \
+      NULL : *(reinterpret_cast<TryCatch**>(try_catch_address))
 
 
 // The simulator has its own stack. Thus it has a different stack limit from
@@ -292,20 +374,21 @@
 class SimulatorStack : public v8::internal::AllStatic {
  public:
   static inline uintptr_t JsLimitFromCLimit(uintptr_t c_limit) {
-    return assembler::mips::Simulator::current()->StackLimit();
+    return Simulator::current(Isolate::Current())->StackLimit();
   }
 
   static inline uintptr_t RegisterCTryCatch(uintptr_t try_catch_address) {
-    assembler::mips::Simulator* sim = assembler::mips::Simulator::current();
+    Simulator* sim = Simulator::current(Isolate::Current());
     return sim->PushAddress(try_catch_address);
   }
 
   static inline void UnregisterCTryCatch() {
-    assembler::mips::Simulator::current()->PopAddress();
+    Simulator::current(Isolate::Current())->PopAddress();
   }
 };
 
-#endif  // !defined(__mips) || defined(USE_SIMULATOR)
+} }  // namespace v8::internal
 
+#endif  // !defined(USE_SIMULATOR)
 #endif  // V8_MIPS_SIMULATOR_MIPS_H_
 
diff --git a/src/mips/stub-cache-mips.cc b/src/mips/stub-cache-mips.cc
index 683b862..1a49558 100644
--- a/src/mips/stub-cache-mips.cc
+++ b/src/mips/stub-cache-mips.cc
@@ -57,6 +57,12 @@
 }
 
 
+void StubCompiler::GenerateDirectLoadGlobalFunctionPrototype(
+    MacroAssembler* masm, int index, Register prototype, Label* miss) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
 // Load a fast property out of a holder object (src). In-object properties
 // are loaded directly otherwise the property is loaded from the properties
 // fixed array.
@@ -75,6 +81,20 @@
 }
 
 
+// Generate code to load the length from a string object and return the length.
+// If the receiver object is not a string or a wrapped string object the
+// execution continues at the miss label. The register containing the
+// receiver is potentially clobbered.
+void StubCompiler::GenerateLoadStringLength(MacroAssembler* masm,
+                                            Register receiver,
+                                            Register scratch1,
+                                            Register scratch2,
+                                            Label* miss,
+                                            bool support_wrappers) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
 void StubCompiler::GenerateLoadFunctionPrototype(MacroAssembler* masm,
                                                  Register receiver,
                                                  Register scratch1,
@@ -84,7 +104,7 @@
 }
 
 
-// Generate StoreField code, value is passed in r0 register.
+// Generate StoreField code, value is passed in a0 register.
 // After executing generated code, the receiver_reg and name_reg
 // may be clobbered.
 void StubCompiler::GenerateStoreField(MacroAssembler* masm,
@@ -104,15 +124,94 @@
 }
 
 
+class CallInterceptorCompiler BASE_EMBEDDED {
+ public:
+  CallInterceptorCompiler(StubCompiler* stub_compiler,
+                          const ParameterCount& arguments,
+                          Register name)
+      : stub_compiler_(stub_compiler),
+        arguments_(arguments),
+        name_(name) {}
+
+  void Compile(MacroAssembler* masm,
+               JSObject* object,
+               JSObject* holder,
+               String* name,
+               LookupResult* lookup,
+               Register receiver,
+               Register scratch1,
+               Register scratch2,
+               Register scratch3,
+               Label* miss) {
+    UNIMPLEMENTED_MIPS();
+  }
+
+ private:
+  void CompileCacheable(MacroAssembler* masm,
+                       JSObject* object,
+                       Register receiver,
+                       Register scratch1,
+                       Register scratch2,
+                       Register scratch3,
+                       JSObject* interceptor_holder,
+                       LookupResult* lookup,
+                       String* name,
+                       const CallOptimization& optimization,
+                       Label* miss_label) {
+    UNIMPLEMENTED_MIPS();
+  }
+
+  void CompileRegular(MacroAssembler* masm,
+                      JSObject* object,
+                      Register receiver,
+                      Register scratch1,
+                      Register scratch2,
+                      Register scratch3,
+                      String* name,
+                      JSObject* interceptor_holder,
+                      Label* miss_label) {
+    UNIMPLEMENTED_MIPS();
+  }
+
+  void LoadWithInterceptor(MacroAssembler* masm,
+                           Register receiver,
+                           Register holder,
+                           JSObject* holder_obj,
+                           Register scratch,
+                           Label* interceptor_succeeded) {
+    UNIMPLEMENTED_MIPS();
+  }
+
+  StubCompiler* stub_compiler_;
+  const ParameterCount& arguments_;
+  Register name_;
+};
+
+
 #undef __
 #define __ ACCESS_MASM(masm())
 
 
+Register StubCompiler::CheckPrototypes(JSObject* object,
+                                       Register object_reg,
+                                       JSObject* holder,
+                                       Register holder_reg,
+                                       Register scratch1,
+                                       Register scratch2,
+                                       String* name,
+                                       int save_at_depth,
+                                       Label* miss) {
+  UNIMPLEMENTED_MIPS();
+  return no_reg;
+}
+
+
 void StubCompiler::GenerateLoadField(JSObject* object,
                                      JSObject* holder,
                                      Register receiver,
                                      Register scratch1,
                                      Register scratch2,
+                                     Register scratch3,
                                      int index,
                                      String* name,
                                      Label* miss) {
@@ -125,6 +224,7 @@
                                         Register receiver,
                                         Register scratch1,
                                         Register scratch2,
+                                        Register scratch3,
                                         Object* value,
                                         String* name,
                                         Label* miss) {
@@ -132,282 +232,365 @@
 }
 
 
-bool StubCompiler::GenerateLoadCallback(JSObject* object,
-                                        JSObject* holder,
-                                        Register receiver,
-                                        Register name_reg,
-                                        Register scratch1,
-                                        Register scratch2,
-                                        AccessorInfo* callback,
-                                        String* name,
-                                        Label* miss,
-                                        Failure** failure) {
+MaybeObject* StubCompiler::GenerateLoadCallback(JSObject* object,
+                                                JSObject* holder,
+                                                Register receiver,
+                                                Register name_reg,
+                                                Register scratch1,
+                                                Register scratch2,
+                                                Register scratch3,
+                                                AccessorInfo* callback,
+                                                String* name,
+                                                Label* miss) {
   UNIMPLEMENTED_MIPS();
-  __ break_(0x470);
-  return false;   // UNIMPLEMENTED RETURN
+  return NULL;
 }
 
 
 void StubCompiler::GenerateLoadInterceptor(JSObject* object,
-                                           JSObject* holder,
+                                           JSObject* interceptor_holder,
                                            LookupResult* lookup,
                                            Register receiver,
                                            Register name_reg,
                                            Register scratch1,
                                            Register scratch2,
+                                           Register scratch3,
                                            String* name,
                                            Label* miss) {
   UNIMPLEMENTED_MIPS();
-  __ break_(0x505);
 }
 
 
-Object* StubCompiler::CompileLazyCompile(Code::Flags flags) {
-  // Registers:
-  // a1: function
-  // ra: return address
-
-  // Enter an internal frame.
-  __ EnterInternalFrame();
-  // Preserve the function.
-  __ Push(a1);
-  // Setup aligned call.
-  __ SetupAlignedCall(t0, 1);
-  // Push the function on the stack as the argument to the runtime function.
-  __ Push(a1);
-  // Call the runtime function
-  __ CallRuntime(Runtime::kLazyCompile, 1);
-  __ ReturnFromAlignedCall();
-  // Calculate the entry point.
-  __ addiu(t9, v0, Code::kHeaderSize - kHeapObjectTag);
-  // Restore saved function.
-  __ Pop(a1);
-  // Tear down temporary frame.
-  __ LeaveInternalFrame();
-  // Do a tail-call of the compiled function.
-  __ Jump(t9);
-
-  return GetCodeWithFlags(flags, "LazyCompileStub");
-}
-
-
-Object* CallStubCompiler::CompileCallField(JSObject* object,
-                                           JSObject* holder,
-                                           int index,
-                                           String* name) {
+void CallStubCompiler::GenerateNameCheck(String* name, Label* miss) {
   UNIMPLEMENTED_MIPS();
-  return reinterpret_cast<Object*>(NULL);   // UNIMPLEMENTED RETURN
 }
 
 
-Object* CallStubCompiler::CompileArrayPushCall(Object* object,
-                                               JSObject* holder,
-                                               JSFunction* function,
-                                               String* name,
-                                               CheckType check) {
+void CallStubCompiler::GenerateGlobalReceiverCheck(JSObject* object,
+                                                   JSObject* holder,
+                                                   String* name,
+                                                   Label* miss) {
   UNIMPLEMENTED_MIPS();
-  return reinterpret_cast<Object*>(NULL);   // UNIMPLEMENTED RETURN
 }
 
 
-Object* CallStubCompiler::CompileArrayPopCall(Object* object,
-                                              JSObject* holder,
-                                              JSFunction* function,
-                                              String* name,
-                                              CheckType check) {
+void CallStubCompiler::GenerateLoadFunctionFromCell(JSGlobalPropertyCell* cell,
+                                                    JSFunction* function,
+                                                    Label* miss) {
   UNIMPLEMENTED_MIPS();
-  return reinterpret_cast<Object*>(NULL);   // UNIMPLEMENTED RETURN
 }
 
 
-Object* CallStubCompiler::CompileCallConstant(Object* object,
-                                              JSObject* holder,
-                                              JSFunction* function,
-                                              String* name,
-                                              CheckType check) {
+MaybeObject* CallStubCompiler::GenerateMissBranch() {
   UNIMPLEMENTED_MIPS();
-  return reinterpret_cast<Object*>(NULL);   // UNIMPLEMENTED RETURN
+  return NULL;
 }
 
 
-Object* CallStubCompiler::CompileCallInterceptor(JSObject* object,
-                                                 JSObject* holder,
-                                                 String* name) {
-  UNIMPLEMENTED_MIPS();
-  __ break_(0x782);
-  return GetCode(INTERCEPTOR, name);
-}
-
-
-Object* CallStubCompiler::CompileCallGlobal(JSObject* object,
-                                            GlobalObject* holder,
-                                            JSGlobalPropertyCell* cell,
-                                            JSFunction* function,
-                                            String* name) {
-  UNIMPLEMENTED_MIPS();
-  return reinterpret_cast<Object*>(NULL);   // UNIMPLEMENTED RETURN
-}
-
-
-Object* StoreStubCompiler::CompileStoreField(JSObject* object,
-                                             int index,
-                                             Map* transition,
-                                             String* name) {
-  UNIMPLEMENTED_MIPS();
-  return reinterpret_cast<Object*>(NULL);   // UNIMPLEMENTED RETURN
-}
-
-
-Object* StoreStubCompiler::CompileStoreCallback(JSObject* object,
-                                                AccessorInfo* callback,
+MaybeObject* CallStubCompiler::CompileCallField(JSObject* object,
+                                                JSObject* holder,
+                                                int index,
                                                 String* name) {
   UNIMPLEMENTED_MIPS();
-  __ break_(0x906);
-  return reinterpret_cast<Object*>(NULL);   // UNIMPLEMENTED RETURN
+  return NULL;
 }
 
 
-Object* StoreStubCompiler::CompileStoreInterceptor(JSObject* receiver,
+MaybeObject* CallStubCompiler::CompileArrayPushCall(Object* object,
+                                                    JSObject* holder,
+                                                    JSGlobalPropertyCell* cell,
+                                                    JSFunction* function,
+                                                    String* name) {
+  UNIMPLEMENTED_MIPS();
+  return NULL;
+}
+
+
+MaybeObject* CallStubCompiler::CompileArrayPopCall(Object* object,
+                                                   JSObject* holder,
+                                                   JSGlobalPropertyCell* cell,
+                                                   JSFunction* function,
                                                    String* name) {
   UNIMPLEMENTED_MIPS();
-  return reinterpret_cast<Object*>(NULL);   // UNIMPLEMENTED RETURN
+  return NULL;
 }
 
 
-Object* StoreStubCompiler::CompileStoreGlobal(GlobalObject* object,
-                                              JSGlobalPropertyCell* cell,
-                                              String* name) {
+MaybeObject* CallStubCompiler::CompileStringCharCodeAtCall(
+    Object* object,
+    JSObject* holder,
+    JSGlobalPropertyCell* cell,
+    JSFunction* function,
+    String* name) {
   UNIMPLEMENTED_MIPS();
-  return reinterpret_cast<Object*>(NULL);   // UNIMPLEMENTED RETURN
+  return NULL;
 }
 
 
-Object* LoadStubCompiler::CompileLoadField(JSObject* object,
-                                           JSObject* holder,
-                                           int index,
-                                           String* name) {
+MaybeObject* CallStubCompiler::CompileStringCharAtCall(
+    Object* object,
+    JSObject* holder,
+    JSGlobalPropertyCell* cell,
+    JSFunction* function,
+    String* name) {
   UNIMPLEMENTED_MIPS();
-  return reinterpret_cast<Object*>(NULL);   // UNIMPLEMENTED RETURN
+  return NULL;
 }
 
 
-Object* LoadStubCompiler::CompileLoadCallback(String* name,
-                                              JSObject* object,
-                                              JSObject* holder,
-                                              AccessorInfo* callback) {
+MaybeObject* CallStubCompiler::CompileStringFromCharCodeCall(
+    Object* object,
+    JSObject* holder,
+    JSGlobalPropertyCell* cell,
+    JSFunction* function,
+    String* name) {
   UNIMPLEMENTED_MIPS();
-  return reinterpret_cast<Object*>(NULL);   // UNIMPLEMENTED RETURN
+  return NULL;
 }
 
 
-Object* LoadStubCompiler::CompileLoadConstant(JSObject* object,
-                                              JSObject* holder,
-                                              Object* value,
-                                              String* name) {
+MaybeObject* CallStubCompiler::CompileMathFloorCall(Object* object,
+                                                    JSObject* holder,
+                                                    JSGlobalPropertyCell* cell,
+                                                    JSFunction* function,
+                                                    String* name) {
   UNIMPLEMENTED_MIPS();
-  return reinterpret_cast<Object*>(NULL);   // UNIMPLEMENTED RETURN
+  return NULL;
 }
 
 
-Object* LoadStubCompiler::CompileLoadInterceptor(JSObject* object,
-                                                 JSObject* holder,
-                                                 String* name) {
+MaybeObject* CallStubCompiler::CompileMathAbsCall(Object* object,
+                                                  JSObject* holder,
+                                                  JSGlobalPropertyCell* cell,
+                                                  JSFunction* function,
+                                                  String* name) {
   UNIMPLEMENTED_MIPS();
-  return reinterpret_cast<Object*>(NULL);   // UNIMPLEMENTED RETURN
+  return NULL;
 }
 
 
-Object* LoadStubCompiler::CompileLoadGlobal(JSObject* object,
-                                            GlobalObject* holder,
-                                            JSGlobalPropertyCell* cell,
-                                            String* name,
-                                            bool is_dont_delete) {
+MaybeObject* CallStubCompiler::CompileFastApiCall(
+    const CallOptimization& optimization,
+    Object* object,
+    JSObject* holder,
+    JSGlobalPropertyCell* cell,
+    JSFunction* function,
+    String* name) {
   UNIMPLEMENTED_MIPS();
-  return reinterpret_cast<Object*>(NULL);   // UNIMPLEMENTED RETURN
+  return NULL;
 }
 
 
-Object* KeyedLoadStubCompiler::CompileLoadField(String* name,
-                                                JSObject* receiver,
-                                                JSObject* holder,
-                                                int index) {
-  UNIMPLEMENTED_MIPS();
-  return reinterpret_cast<Object*>(NULL);   // UNIMPLEMENTED RETURN
-}
-
-
-Object* KeyedLoadStubCompiler::CompileLoadCallback(String* name,
-                                                   JSObject* receiver,
+MaybeObject* CallStubCompiler::CompileCallConstant(Object* object,
                                                    JSObject* holder,
-                                                   AccessorInfo* callback) {
+                                                   JSFunction* function,
+                                                   String* name,
+                                                   CheckType check) {
   UNIMPLEMENTED_MIPS();
-  return reinterpret_cast<Object*>(NULL);   // UNIMPLEMENTED RETURN
+  return NULL;
 }
 
 
-Object* KeyedLoadStubCompiler::CompileLoadConstant(String* name,
-                                                   JSObject* receiver,
-                                                   JSObject* holder,
-                                                   Object* value) {
-  UNIMPLEMENTED_MIPS();
-  return reinterpret_cast<Object*>(NULL);   // UNIMPLEMENTED RETURN
-}
-
-
-Object* KeyedLoadStubCompiler::CompileLoadInterceptor(JSObject* receiver,
+MaybeObject* CallStubCompiler::CompileCallInterceptor(JSObject* object,
                                                       JSObject* holder,
                                                       String* name) {
   UNIMPLEMENTED_MIPS();
-  return reinterpret_cast<Object*>(NULL);   // UNIMPLEMENTED RETURN
+  return NULL;
 }
 
 
-Object* KeyedLoadStubCompiler::CompileLoadArrayLength(String* name) {
+MaybeObject* CallStubCompiler::CompileCallGlobal(JSObject* object,
+                                                 GlobalObject* holder,
+                                                 JSGlobalPropertyCell* cell,
+                                                 JSFunction* function,
+                                                 String* name) {
   UNIMPLEMENTED_MIPS();
-  return reinterpret_cast<Object*>(NULL);   // UNIMPLEMENTED RETURN
+  return NULL;
 }
 
 
-Object* KeyedLoadStubCompiler::CompileLoadStringLength(String* name) {
-  UNIMPLEMENTED_MIPS();
-  return reinterpret_cast<Object*>(NULL);   // UNIMPLEMENTED RETURN
-}
-
-
-// TODO(1224671): implement the fast case.
-Object* KeyedLoadStubCompiler::CompileLoadFunctionPrototype(String* name) {
-  UNIMPLEMENTED_MIPS();
-  return reinterpret_cast<Object*>(NULL);   // UNIMPLEMENTED RETURN
-}
-
-
-Object* KeyedStoreStubCompiler::CompileStoreField(JSObject* object,
+MaybeObject* StoreStubCompiler::CompileStoreField(JSObject* object,
                                                   int index,
                                                   Map* transition,
                                                   String* name) {
   UNIMPLEMENTED_MIPS();
-  return reinterpret_cast<Object*>(NULL);   // UNIMPLEMENTED RETURN
+  return NULL;
 }
 
 
-Object* ConstructStubCompiler::CompileConstructStub(
-    SharedFunctionInfo* shared) {
+MaybeObject* StoreStubCompiler::CompileStoreCallback(JSObject* object,
+                                                     AccessorInfo* callback,
+                                                     String* name) {
   UNIMPLEMENTED_MIPS();
-  return reinterpret_cast<Object*>(NULL);   // UNIMPLEMENTED RETURN
+  return NULL;
 }
 
 
-Object* ExternalArrayStubCompiler::CompileKeyedLoadStub(
-    ExternalArrayType array_type, Code::Flags flags) {
+MaybeObject* StoreStubCompiler::CompileStoreInterceptor(JSObject* receiver,
+                                                        String* name) {
   UNIMPLEMENTED_MIPS();
-  return reinterpret_cast<Object*>(NULL);   // UNIMPLEMENTED RETURN
+  return NULL;
 }
 
 
-Object* ExternalArrayStubCompiler::CompileKeyedStoreStub(
-    ExternalArrayType array_type, Code::Flags flags) {
+MaybeObject* StoreStubCompiler::CompileStoreGlobal(GlobalObject* object,
+                                                   JSGlobalPropertyCell* cell,
+                                                   String* name) {
   UNIMPLEMENTED_MIPS();
-  return reinterpret_cast<Object*>(NULL);   // UNIMPLEMENTED RETURN
+  return NULL;
+}
+
+
+MaybeObject* LoadStubCompiler::CompileLoadNonexistent(String* name,
+                                                      JSObject* object,
+                                                      JSObject* last) {
+  UNIMPLEMENTED_MIPS();
+  return NULL;
+}
+
+
+MaybeObject* LoadStubCompiler::CompileLoadField(JSObject* object,
+                                                JSObject* holder,
+                                                int index,
+                                                String* name) {
+  UNIMPLEMENTED_MIPS();
+  return NULL;
+}
+
+
+MaybeObject* LoadStubCompiler::CompileLoadCallback(String* name,
+                                                   JSObject* object,
+                                                   JSObject* holder,
+                                                   AccessorInfo* callback) {
+  UNIMPLEMENTED_MIPS();
+  return NULL;
+}
+
+
+MaybeObject* LoadStubCompiler::CompileLoadConstant(JSObject* object,
+                                                   JSObject* holder,
+                                                   Object* value,
+                                                   String* name) {
+  UNIMPLEMENTED_MIPS();
+  return NULL;
+}
+
+
+MaybeObject* LoadStubCompiler::CompileLoadInterceptor(JSObject* object,
+                                                      JSObject* holder,
+                                                      String* name) {
+  UNIMPLEMENTED_MIPS();
+  return NULL;
+}
+
+
+MaybeObject* LoadStubCompiler::CompileLoadGlobal(JSObject* object,
+                                                 GlobalObject* holder,
+                                                 JSGlobalPropertyCell* cell,
+                                                 String* name,
+                                                 bool is_dont_delete) {
+  UNIMPLEMENTED_MIPS();
+  return NULL;
+}
+
+
+MaybeObject* KeyedLoadStubCompiler::CompileLoadField(String* name,
+                                                     JSObject* receiver,
+                                                     JSObject* holder,
+                                                     int index) {
+  UNIMPLEMENTED_MIPS();
+  return NULL;
+}
+
+
+MaybeObject* KeyedLoadStubCompiler::CompileLoadCallback(
+    String* name,
+    JSObject* receiver,
+    JSObject* holder,
+    AccessorInfo* callback) {
+  UNIMPLEMENTED_MIPS();
+  return NULL;
+}
+
+
+MaybeObject* KeyedLoadStubCompiler::CompileLoadConstant(String* name,
+                                                        JSObject* receiver,
+                                                        JSObject* holder,
+                                                        Object* value) {
+  UNIMPLEMENTED_MIPS();
+  return NULL;
+}
+
+
+MaybeObject* KeyedLoadStubCompiler::CompileLoadInterceptor(JSObject* receiver,
+                                                           JSObject* holder,
+                                                           String* name) {
+  UNIMPLEMENTED_MIPS();
+  return NULL;
+}
+
+
+MaybeObject* KeyedLoadStubCompiler::CompileLoadArrayLength(String* name) {
+  UNIMPLEMENTED_MIPS();
+  return NULL;
+}
+
+
+MaybeObject* KeyedLoadStubCompiler::CompileLoadStringLength(String* name) {
+  UNIMPLEMENTED_MIPS();
+  return NULL;
+}
+
+
+MaybeObject* KeyedLoadStubCompiler::CompileLoadFunctionPrototype(String* name) {
+  UNIMPLEMENTED_MIPS();
+  return NULL;
+}
+
+
+MaybeObject* KeyedLoadStubCompiler::CompileLoadSpecialized(JSObject* receiver) {
+  UNIMPLEMENTED_MIPS();
+  return NULL;
+}
+
+
+MaybeObject* KeyedStoreStubCompiler::CompileStoreField(JSObject* object,
+                                                       int index,
+                                                       Map* transition,
+                                                       String* name) {
+  UNIMPLEMENTED_MIPS();
+  return NULL;
+}
+
+
+MaybeObject* KeyedStoreStubCompiler::CompileStoreSpecialized(
+    JSObject* receiver) {
+  UNIMPLEMENTED_MIPS();
+  return NULL;
+}
+
+
+MaybeObject* ConstructStubCompiler::CompileConstructStub(JSFunction* function) {
+  UNIMPLEMENTED_MIPS();
+  return NULL;
+}
+
+
+MaybeObject* ExternalArrayStubCompiler::CompileKeyedLoadStub(
+    JSObject* receiver_object,
+    ExternalArrayType array_type,
+    Code::Flags flags) {
+  UNIMPLEMENTED_MIPS();
+  return NULL;
+}
+
+
+MaybeObject* ExternalArrayStubCompiler::CompileKeyedStoreStub(
+    JSObject* receiver_object,
+    ExternalArrayType array_type,
+    Code::Flags flags) {
+  UNIMPLEMENTED_MIPS();
+  return NULL;
 }
 
 
diff --git a/src/mips/fast-codegen-mips.cc b/src/mips/virtual-frame-mips-inl.h
similarity index 65%
rename from src/mips/fast-codegen-mips.cc
rename to src/mips/virtual-frame-mips-inl.h
index 186f9fa..f0d2fab 100644
--- a/src/mips/fast-codegen-mips.cc
+++ b/src/mips/virtual-frame-mips-inl.h
@@ -25,53 +25,34 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-#include "v8.h"
+#ifndef V8_VIRTUAL_FRAME_MIPS_INL_H_
+#define V8_VIRTUAL_FRAME_MIPS_INL_H_
 
-#if defined(V8_TARGET_ARCH_MIPS)
-
-#include "codegen-inl.h"
-#include "fast-codegen.h"
+#include "assembler-mips.h"
+#include "virtual-frame-mips.h"
 
 namespace v8 {
 namespace internal {
 
-#define __ ACCESS_MASM(masm_)
 
-Register FastCodeGenerator::accumulator0() { return no_reg; }
-Register FastCodeGenerator::accumulator1() { return no_reg; }
-Register FastCodeGenerator::scratch0() { return no_reg; }
-Register FastCodeGenerator::scratch1() { return no_reg; }
-Register FastCodeGenerator::receiver_reg() { return no_reg; }
-Register FastCodeGenerator::context_reg() { return no_reg; }
-
-
-void FastCodeGenerator::Generate(CompilationInfo* info) {
+MemOperand VirtualFrame::ParameterAt(int index) {
   UNIMPLEMENTED_MIPS();
+  return MemOperand(zero_reg, 0);
 }
 
 
-void FastCodeGenerator::EmitThisPropertyStore(Handle<String> name) {
+// The receiver frame slot.
+MemOperand VirtualFrame::Receiver() {
   UNIMPLEMENTED_MIPS();
+  return MemOperand(zero_reg, 0);
 }
 
 
-void FastCodeGenerator::EmitGlobalVariableLoad(Handle<Object> name) {
+void VirtualFrame::Forget(int count) {
   UNIMPLEMENTED_MIPS();
 }
 
 
-void FastCodeGenerator::EmitThisPropertyLoad(Handle<String> name) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void FastCodeGenerator::EmitBitOr() {
-  UNIMPLEMENTED_MIPS();
-}
-
-#undef __
-
-
 } }  // namespace v8::internal
 
-#endif  // V8_TARGET_ARCH_MIPS
+#endif  // V8_VIRTUAL_FRAME_MIPS_INL_H_
diff --git a/src/mips/virtual-frame-mips.cc b/src/mips/virtual-frame-mips.cc
index b61ce75..22fe9f0 100644
--- a/src/mips/virtual-frame-mips.cc
+++ b/src/mips/virtual-frame-mips.cc
@@ -25,8 +25,6 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-
-
 #include "v8.h"
 
 #if defined(V8_TARGET_ARCH_MIPS)
@@ -39,44 +37,50 @@
 namespace v8 {
 namespace internal {
 
-// -------------------------------------------------------------------------
-// VirtualFrame implementation.
-
 #define __ ACCESS_MASM(masm())
 
-void VirtualFrame::SyncElementBelowStackPointer(int index) {
-  UNREACHABLE();
+void VirtualFrame::PopToA1A0() {
+  UNIMPLEMENTED_MIPS();
 }
 
 
-void VirtualFrame::SyncElementByPushing(int index) {
-  UNREACHABLE();
+void VirtualFrame::PopToA1() {
+  UNIMPLEMENTED_MIPS();
 }
 
 
-void VirtualFrame::SyncRange(int begin, int end) {
-  // All elements are in memory on MIPS (ie, synced).
-#ifdef DEBUG
-  for (int i = begin; i <= end; i++) {
-    ASSERT(elements_[i].is_synced());
-  }
-#endif
+void VirtualFrame::PopToA0() {
+  UNIMPLEMENTED_MIPS();
 }
 
 
-void VirtualFrame::MergeTo(VirtualFrame* expected) {
+void VirtualFrame::MergeTo(const VirtualFrame* expected,
+                           Condition cond,
+                           Register r1,
+                           const Operand& r2) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void VirtualFrame::MergeTo(VirtualFrame* expected,
+                           Condition cond,
+                           Register r1,
+                           const Operand& r2) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void VirtualFrame::MergeTOSTo(
+    VirtualFrame::TopOfStack expected_top_of_stack_state,
+    Condition cond,
+    Register r1,
+    const Operand& r2) {
   UNIMPLEMENTED_MIPS();
 }
 
 
 void VirtualFrame::Enter() {
-  // TODO(MIPS): Implement DEBUG
-
-  // We are about to push four values to the frame.
-  Adjust(4);
-  __ MultiPush(ra.bit() | fp.bit() | cp.bit() | a1.bit());
-  // Adjust FP to point to saved FP.
-  __ addiu(fp, sp, 2 * kPointerSize);
+  UNIMPLEMENTED_MIPS();
 }
 
 
@@ -86,232 +90,216 @@
 
 
 void VirtualFrame::AllocateStackSlots() {
-  int count = local_count();
-  if (count > 0) {
-    Comment cmnt(masm(), "[ Allocate space for locals");
-    Adjust(count);
-      // Initialize stack slots with 'undefined' value.
-    __ LoadRoot(t0, Heap::kUndefinedValueRootIndex);
-    __ addiu(sp, sp, -count * kPointerSize);
-    for (int i = 0; i < count; i++) {
-      __ sw(t0, MemOperand(sp, (count-i-1)*kPointerSize));
-    }
-  }
-}
-
-
-void VirtualFrame::SaveContextRegister() {
   UNIMPLEMENTED_MIPS();
 }
 
 
-void VirtualFrame::RestoreContextRegister() {
-  UNIMPLEMENTED_MIPS();
-}
-
 
 void VirtualFrame::PushReceiverSlotAddress() {
   UNIMPLEMENTED_MIPS();
 }
 
 
-int VirtualFrame::InvalidateFrameSlotAt(int index) {
-  return kIllegalIndex;
-}
-
-
-void VirtualFrame::TakeFrameSlotAt(int index) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void VirtualFrame::StoreToFrameSlotAt(int index) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
 void VirtualFrame::PushTryHandler(HandlerType type) {
   UNIMPLEMENTED_MIPS();
 }
 
 
-void VirtualFrame::RawCallStub(CodeStub* stub) {
+void VirtualFrame::CallJSFunction(int arg_count) {
   UNIMPLEMENTED_MIPS();
 }
 
 
-void VirtualFrame::CallStub(CodeStub* stub, Result* arg) {
+void VirtualFrame::CallRuntime(const Runtime::Function* f, int arg_count) {
   UNIMPLEMENTED_MIPS();
 }
 
 
-void VirtualFrame::CallStub(CodeStub* stub, Result* arg0, Result* arg1) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void VirtualFrame::CallRuntime(Runtime::Function* f, int arg_count) {
-  PrepareForCall(arg_count, arg_count);
-  ASSERT(cgen()->HasValidEntryRegisters());
-  __ CallRuntime(f, arg_count);
-}
-
-
 void VirtualFrame::CallRuntime(Runtime::FunctionId id, int arg_count) {
-  PrepareForCall(arg_count, arg_count);
-  ASSERT(cgen()->HasValidEntryRegisters());
-  __ CallRuntime(id, arg_count);
-}
-
-
-void VirtualFrame::CallAlignedRuntime(Runtime::Function* f, int arg_count) {
   UNIMPLEMENTED_MIPS();
 }
 
 
-void VirtualFrame::CallAlignedRuntime(Runtime::FunctionId id, int arg_count) {
+#ifdef ENABLE_DEBUGGER_SUPPORT
+void VirtualFrame::DebugBreak() {
   UNIMPLEMENTED_MIPS();
 }
+#endif
 
 
 void VirtualFrame::InvokeBuiltin(Builtins::JavaScript id,
                                  InvokeJSFlags flags,
-                                 Result* arg_count_register,
                                  int arg_count) {
   UNIMPLEMENTED_MIPS();
 }
 
 
-void VirtualFrame::CallCodeObject(Handle<Code> code,
-                                  RelocInfo::Mode rmode,
-                                  int dropped_args) {
-  switch (code->kind()) {
-    case Code::CALL_IC:
-      break;
-    case Code::FUNCTION:
-      UNIMPLEMENTED_MIPS();
-      break;
-    case Code::KEYED_LOAD_IC:
-      UNIMPLEMENTED_MIPS();
-      break;
-    case Code::LOAD_IC:
-      UNIMPLEMENTED_MIPS();
-      break;
-    case Code::KEYED_STORE_IC:
-      UNIMPLEMENTED_MIPS();
-      break;
-    case Code::STORE_IC:
-      UNIMPLEMENTED_MIPS();
-      break;
-    case Code::BUILTIN:
-      UNIMPLEMENTED_MIPS();
-      break;
-    default:
-      UNREACHABLE();
-      break;
-  }
-  Forget(dropped_args);
-  ASSERT(cgen()->HasValidEntryRegisters());
-  __ Call(code, rmode);
+void VirtualFrame::CallLoadIC(Handle<String> name, RelocInfo::Mode mode) {
+  UNIMPLEMENTED_MIPS();
 }
 
 
-void VirtualFrame::CallCodeObject(Handle<Code> code,
-                                  RelocInfo::Mode rmode,
-                                  Result* arg,
-                                  int dropped_args) {
+void VirtualFrame::CallStoreIC(Handle<String> name, bool is_contextual) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void VirtualFrame::CallKeyedLoadIC() {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void VirtualFrame::CallKeyedStoreIC() {
   UNIMPLEMENTED_MIPS();
 }
 
 
 void VirtualFrame::CallCodeObject(Handle<Code> code,
                                   RelocInfo::Mode rmode,
-                                  Result* arg0,
-                                  Result* arg1,
-                                  int dropped_args,
-                                  bool set_auto_args_slots) {
+                                  int dropped_args) {
   UNIMPLEMENTED_MIPS();
 }
 
 
+//    NO_TOS_REGISTERS, A0_TOS, A1_TOS, A1_A0_TOS, A0_A1_TOS.
+const bool VirtualFrame::kA0InUse[TOS_STATES] =
+    { false,            true,   false,  true,      true };
+const bool VirtualFrame::kA1InUse[TOS_STATES] =
+    { false,            false,  true,   true,      true };
+const int VirtualFrame::kVirtualElements[TOS_STATES] =
+    { 0,                1,      1,      2,         2 };
+const Register VirtualFrame::kTopRegister[TOS_STATES] =
+    { a0,               a0,     a1,     a1,        a0 };
+const Register VirtualFrame::kBottomRegister[TOS_STATES] =
+    { a0,               a0,     a1,     a0,        a1 };
+const Register VirtualFrame::kAllocatedRegisters[
+    VirtualFrame::kNumberOfAllocatedRegisters] = { a2, a3, t0, t1, t2 };
+// Popping is done by the transition implied by kStateAfterPop.  Of course if
+// there were no stack slots allocated to registers then the physical SP must
+// be adjusted.
+const VirtualFrame::TopOfStack VirtualFrame::kStateAfterPop[TOS_STATES] =
+    { NO_TOS_REGISTERS, NO_TOS_REGISTERS, NO_TOS_REGISTERS, A0_TOS, A1_TOS };
+// Pushing is done by the transition implied by kStateAfterPush.  Of course if
+// the maximum number of registers was already allocated to the top of stack
+// slots then one register must be physically pushed onto the stack.
+const VirtualFrame::TopOfStack VirtualFrame::kStateAfterPush[TOS_STATES] =
+    { A0_TOS, A1_A0_TOS, A0_A1_TOS, A0_A1_TOS, A1_A0_TOS };
+
+
 void VirtualFrame::Drop(int count) {
-  ASSERT(count >= 0);
-  ASSERT(height() >= count);
-  int num_virtual_elements = (element_count() - 1) - stack_pointer_;
-
-  // Emit code to lower the stack pointer if necessary.
-  if (num_virtual_elements < count) {
-    int num_dropped = count - num_virtual_elements;
-    stack_pointer_ -= num_dropped;
-    __ addiu(sp, sp, num_dropped * kPointerSize);
-  }
-
-  // Discard elements from the virtual frame and free any registers.
-  for (int i = 0; i < count; i++) {
-    FrameElement dropped = elements_.RemoveLast();
-    if (dropped.is_register()) {
-      Unuse(dropped.reg());
-    }
-  }
-}
-
-
-void VirtualFrame::DropFromVFrameOnly(int count) {
   UNIMPLEMENTED_MIPS();
 }
 
 
-Result VirtualFrame::Pop() {
+void VirtualFrame::Pop() {
   UNIMPLEMENTED_MIPS();
-  Result res = Result();
-  return res;    // UNIMPLEMENTED RETURN
 }
 
 
 void VirtualFrame::EmitPop(Register reg) {
-  ASSERT(stack_pointer_ == element_count() - 1);
-  stack_pointer_--;
-  elements_.RemoveLast();
-  __ Pop(reg);
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void VirtualFrame::SpillAllButCopyTOSToA0() {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void VirtualFrame::SpillAllButCopyTOSToA1() {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void VirtualFrame::SpillAllButCopyTOSToA1A0() {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+Register VirtualFrame::Peek() {
+  UNIMPLEMENTED_MIPS();
+  return no_reg;
+}
+
+
+Register VirtualFrame::Peek2() {
+  UNIMPLEMENTED_MIPS();
+  return no_reg;
+}
+
+
+void VirtualFrame::Dup() {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void VirtualFrame::Dup2() {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+Register VirtualFrame::PopToRegister(Register but_not_to_this_one) {
+  UNIMPLEMENTED_MIPS();
+  return no_reg;
+}
+
+
+void VirtualFrame::EnsureOneFreeTOSRegister() {
+  UNIMPLEMENTED_MIPS();
 }
 
 
 void VirtualFrame::EmitMultiPop(RegList regs) {
-  ASSERT(stack_pointer_ == element_count() - 1);
-  for (int16_t i = 0; i < kNumRegisters; i++) {
-    if ((regs & (1 << i)) != 0) {
-      stack_pointer_--;
-      elements_.RemoveLast();
-    }
-  }
-  __ MultiPop(regs);
+  UNIMPLEMENTED_MIPS();
 }
 
 
-void VirtualFrame::EmitPush(Register reg) {
-  ASSERT(stack_pointer_ == element_count() - 1);
-  elements_.Add(FrameElement::MemoryElement(NumberInfo::Unknown()));
-  stack_pointer_++;
-  __ Push(reg);
+void VirtualFrame::EmitPush(Register reg, TypeInfo info) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void VirtualFrame::SetElementAt(Register reg, int this_far_down) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+Register VirtualFrame::GetTOSRegister() {
+  UNIMPLEMENTED_MIPS();
+  return no_reg;
+}
+
+
+void VirtualFrame::EmitPush(Operand operand, TypeInfo info) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void VirtualFrame::EmitPush(MemOperand operand, TypeInfo info) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void VirtualFrame::EmitPushRoot(Heap::RootListIndex index) {
+  UNIMPLEMENTED_MIPS();
 }
 
 
 void VirtualFrame::EmitMultiPush(RegList regs) {
-  ASSERT(stack_pointer_ == element_count() - 1);
-  for (int16_t i = kNumRegisters; i > 0; i--) {
-    if ((regs & (1 << i)) != 0) {
-      elements_.Add(FrameElement::MemoryElement(NumberInfo::Unknown()));
-      stack_pointer_++;
-    }
-  }
-  __ MultiPush(regs);
-}
-
-
-void VirtualFrame::EmitArgumentSlots(RegList reglist) {
   UNIMPLEMENTED_MIPS();
 }
 
+
+void VirtualFrame::EmitMultiPushReversed(RegList regs) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void VirtualFrame::SpillAll() {
+  UNIMPLEMENTED_MIPS();
+}
+
+
 #undef __
 
 } }  // namespace v8::internal
diff --git a/src/mips/virtual-frame-mips.h b/src/mips/virtual-frame-mips.h
index b32e2ae..be8b74e 100644
--- a/src/mips/virtual-frame-mips.h
+++ b/src/mips/virtual-frame-mips.h
@@ -30,11 +30,13 @@
 #define V8_MIPS_VIRTUAL_FRAME_MIPS_H_
 
 #include "register-allocator.h"
-#include "scopes.h"
 
 namespace v8 {
 namespace internal {
 
+// This dummy class is only used to create invalid virtual frames.
+extern class InvalidVirtualFrameInitializer {}* kInvalidVirtualFrameInitializer;
+
 
 // -------------------------------------------------------------------------
 // Virtual frames
@@ -47,14 +49,54 @@
 
 class VirtualFrame : public ZoneObject {
  public:
+  class RegisterAllocationScope;
   // A utility class to introduce a scope where the virtual frame is
   // expected to remain spilled. The constructor spills the code
-  // generator's current frame, but no attempt is made to require it
-  // to stay spilled. It is intended as documentation while the code
-  // generator is being transformed.
+  // generator's current frame, and keeps it spilled.
   class SpilledScope BASE_EMBEDDED {
    public:
+    explicit SpilledScope(VirtualFrame* frame)
+      : old_is_spilled_(
+          Isolate::Current()->is_virtual_frame_in_spilled_scope()) {
+      if (frame != NULL) {
+        if (!old_is_spilled_) {
+          frame->SpillAll();
+        } else {
+          frame->AssertIsSpilled();
+        }
+      }
+      Isolate::Current()->set_is_virtual_frame_in_spilled_scope(true);
+    }
+    ~SpilledScope() {
+      Isolate::Current()->set_is_virtual_frame_in_spilled_scope(
+          old_is_spilled_);
+    }
+    static bool is_spilled() {
+      return Isolate::Current()->is_virtual_frame_in_spilled_scope();
+    }
+
+   private:
+    int old_is_spilled_;
+
     SpilledScope() {}
+
+    friend class RegisterAllocationScope;
+  };
+
+  class RegisterAllocationScope BASE_EMBEDDED {
+   public:
+    // A utility class to introduce a scope where the virtual frame
+    // is not spilled, ie. where register allocation occurs.  Eventually
+    // when RegisterAllocationScope is ubiquitous it can be removed
+    // along with the (by then unused) SpilledScope class.
+    inline explicit RegisterAllocationScope(CodeGenerator* cgen);
+    inline ~RegisterAllocationScope();
+
+   private:
+    CodeGenerator* cgen_;
+    bool old_is_spilled_;
+
+    RegisterAllocationScope() {}
   };
 
   // An illegal index into the virtual frame.
@@ -63,45 +105,49 @@
   // Construct an initial virtual frame on entry to a JS function.
   inline VirtualFrame();
 
+  // Construct an invalid virtual frame, used by JumpTargets.
+  inline VirtualFrame(InvalidVirtualFrameInitializer* dummy);
+
   // Construct a virtual frame as a clone of an existing one.
   explicit inline VirtualFrame(VirtualFrame* original);
 
-  CodeGenerator* cgen() { return CodeGeneratorScope::Current(); }
-  MacroAssembler* masm() { return cgen()->masm(); }
-
-  // Create a duplicate of an existing valid frame element.
-  FrameElement CopyElementAt(int index,
-                             NumberInfo info = NumberInfo::Unknown());
+  inline CodeGenerator* cgen() const;
+  inline MacroAssembler* masm();
 
   // The number of elements on the virtual frame.
-  int element_count() { return elements_.length(); }
+  int element_count() const { return element_count_; }
 
   // The height of the virtual expression stack.
-  int height() {
-    return element_count() - expression_base_index();
-  }
-
-  int register_location(int num) {
-    ASSERT(num >= 0 && num < RegisterAllocator::kNumRegisters);
-    return register_locations_[num];
-  }
-
-  int register_location(Register reg) {
-    return register_locations_[RegisterAllocator::ToNumber(reg)];
-  }
-
-  void set_register_location(Register reg, int index) {
-    register_locations_[RegisterAllocator::ToNumber(reg)] = index;
-  }
+  inline int height() const;
 
   bool is_used(int num) {
-    ASSERT(num >= 0 && num < RegisterAllocator::kNumRegisters);
-    return register_locations_[num] != kIllegalIndex;
-  }
-
-  bool is_used(Register reg) {
-    return register_locations_[RegisterAllocator::ToNumber(reg)]
-        != kIllegalIndex;
+    switch (num) {
+      case 0: {  // a0.
+        return kA0InUse[top_of_stack_state_];
+      }
+      case 1: {  // a1.
+        return kA1InUse[top_of_stack_state_];
+      }
+      case 2:
+      case 3:
+      case 4:
+      case 5:
+      case 6: {  // a2 to a3, t0 to t2.
+        ASSERT(num - kFirstAllocatedRegister < kNumberOfAllocatedRegisters);
+        ASSERT(num >= kFirstAllocatedRegister);
+        if ((register_allocation_map_ &
+             (1 << (num - kFirstAllocatedRegister))) == 0) {
+          return false;
+        } else {
+          return true;
+        }
+      }
+      default: {
+        ASSERT(num < kFirstAllocatedRegister ||
+               num >= kFirstAllocatedRegister + kNumberOfAllocatedRegisters);
+        return false;
+      }
+    }
   }
 
   // Add extra in-memory elements to the top of the frame to match an actual
@@ -110,53 +156,60 @@
   void Adjust(int count);
 
   // Forget elements from the top of the frame to match an actual frame (eg,
-  // the frame after a runtime call). No code is emitted.
-  void Forget(int count) {
-    ASSERT(count >= 0);
-    ASSERT(stack_pointer_ == element_count() - 1);
-    stack_pointer_ -= count;
-    // On mips, all elements are in memory, so there is no extra bookkeeping
-    // (registers, copies, etc.) beyond dropping the elements.
-    elements_.Rewind(stack_pointer_ + 1);
-  }
+  // the frame after a runtime call). No code is emitted except to bring the
+  // frame to a spilled state.
+  void Forget(int count);
 
-  // Forget count elements from the top of the frame and adjust the stack
-  // pointer downward. This is used, for example, before merging frames at
-  // break, continue, and return targets.
-  void ForgetElements(int count);
 
   // Spill all values from the frame to memory.
   void SpillAll();
 
+  void AssertIsSpilled() const {
+    ASSERT(top_of_stack_state_ == NO_TOS_REGISTERS);
+    ASSERT(register_allocation_map_ == 0);
+  }
+
+  void AssertIsNotSpilled() {
+    ASSERT(!SpilledScope::is_spilled());
+  }
+
   // Spill all occurrences of a specific register from the frame.
   void Spill(Register reg) {
-    if (is_used(reg)) SpillElementAt(register_location(reg));
+    UNIMPLEMENTED();
   }
 
   // Spill all occurrences of an arbitrary register if possible. Return the
   // register spilled or no_reg if it was not possible to free any register
-  // (ie, they all have frame-external references).
+  // (ie, they all have frame-external references). Unimplemented.
   Register SpillAnyRegister();
 
-  // Prepare this virtual frame for merging to an expected frame by
-  // performing some state changes that do not require generating
-  // code. It is guaranteed that no code will be generated.
-  void PrepareMergeTo(VirtualFrame* expected);
-
   // Make this virtual frame have a state identical to an expected virtual
   // frame. As a side effect, code may be emitted to make this frame match
   // the expected one.
-  void MergeTo(VirtualFrame* expected);
+  void MergeTo(const VirtualFrame* expected,
+               Condition cond = al,
+               Register r1 = no_reg,
+               const Operand& r2 = Operand(no_reg));
+
+  void MergeTo(VirtualFrame* expected,
+               Condition cond = al,
+               Register r1 = no_reg,
+               const Operand& r2 = Operand(no_reg));
+
+  // Checks whether this frame can be branched to by the other frame.
+  bool IsCompatibleWith(const VirtualFrame* other) const {
+    return (tos_known_smi_map_ & (~other->tos_known_smi_map_)) == 0;
+  }
+
+  inline void ForgetTypeInfo() {
+    tos_known_smi_map_ = 0;
+  }
 
   // Detach a frame from its code generator, perhaps temporarily. This
   // tells the register allocator that it is free to use frame-internal
   // registers. Used when the code generator's frame is switched from this
   // one to NULL by an unconditional jump.
   void DetachFromCodeGenerator() {
-    RegisterAllocator* cgen_allocator = cgen()->allocator();
-    for (int i = 0; i < RegisterAllocator::kNumRegisters; i++) {
-      if (is_used(i)) cgen_allocator->Unuse(i);
-    }
   }
 
   // (Re)attach a frame to its code generator. This informs the register
@@ -164,10 +217,6 @@
   // Used when a code generator's frame is switched from NULL to this one by
   // binding a label.
   void AttachToCodeGenerator() {
-    RegisterAllocator* cgen_allocator = cgen()->allocator();
-    for (int i = 0; i < RegisterAllocator::kNumRegisters; i++) {
-      if (is_used(i)) cgen_allocator->Unuse(i);
-    }
   }
 
   // Emit code for the physical JS entry and exit frame sequences. After
@@ -177,176 +226,142 @@
   void Enter();
   void Exit();
 
-  // Prepare for returning from the frame by spilling locals and
-  // dropping all non-locals elements in the virtual frame. This
-  // avoids generating unnecessary merge code when jumping to the
-  // shared return site. Emits code for spills.
-  void PrepareForReturn();
+  // Prepare for returning from the frame by elements in the virtual frame.
+  // This avoids generating unnecessary merge code when jumping to the shared
+  // return site. No spill code emitted. Value to return should be in v0.
+  inline void PrepareForReturn();
+
+  // Number of local variables after when we use a loop for allocating.
+  static const int kLocalVarBound = 5;
 
   // Allocate and initialize the frame-allocated locals.
   void AllocateStackSlots();
 
   // The current top of the expression stack as an assembly operand.
-  MemOperand Top() { return MemOperand(sp, 0); }
+  MemOperand Top() {
+    AssertIsSpilled();
+    return MemOperand(sp, 0);
+  }
 
   // An element of the expression stack as an assembly operand.
   MemOperand ElementAt(int index) {
-    return MemOperand(sp, index * kPointerSize);
+    int adjusted_index = index - kVirtualElements[top_of_stack_state_];
+    ASSERT(adjusted_index >= 0);
+    return MemOperand(sp, adjusted_index * kPointerSize);
   }
 
-  // Random-access store to a frame-top relative frame element. The result
-  // becomes owned by the frame and is invalidated.
-  void SetElementAt(int index, Result* value);
-
-  // Set a frame element to a constant. The index is frame-top relative.
-  void SetElementAt(int index, Handle<Object> value) {
-    Result temp(value);
-    SetElementAt(index, &temp);
+  bool KnownSmiAt(int index) {
+    if (index >= kTOSKnownSmiMapSize) return false;
+    return (tos_known_smi_map_ & (1 << index)) != 0;
   }
-
-  void PushElementAt(int index) {
-    PushFrameSlotAt(element_count() - index - 1);
-  }
-
   // A frame-allocated local as an assembly operand.
-  MemOperand LocalAt(int index) {
-    ASSERT(0 <= index);
-    ASSERT(index < local_count());
-    return MemOperand(s8_fp, kLocal0Offset - index * kPointerSize);
-  }
-
-  // Push a copy of the value of a local frame slot on top of the frame.
-  void PushLocalAt(int index) {
-    PushFrameSlotAt(local0_index() + index);
-  }
-
-  // Push the value of a local frame slot on top of the frame and invalidate
-  // the local slot. The slot should be written to before trying to read
-  // from it again.
-  void TakeLocalAt(int index) {
-    TakeFrameSlotAt(local0_index() + index);
-  }
-
-  // Store the top value on the virtual frame into a local frame slot. The
-  // value is left in place on top of the frame.
-  void StoreToLocalAt(int index) {
-    StoreToFrameSlotAt(local0_index() + index);
-  }
+  inline MemOperand LocalAt(int index);
 
   // Push the address of the receiver slot on the frame.
   void PushReceiverSlotAddress();
 
   // The function frame slot.
-  MemOperand Function() { return MemOperand(s8_fp, kFunctionOffset); }
-
-  // Push the function on top of the frame.
-  void PushFunction() { PushFrameSlotAt(function_index()); }
+  MemOperand Function() { return MemOperand(fp, kFunctionOffset); }
 
   // The context frame slot.
-  MemOperand Context() { return MemOperand(s8_fp, kContextOffset); }
-
-  // Save the value of the cp register to the context frame slot.
-  void SaveContextRegister();
-
-  // Restore the cp register from the value of the context frame
-  // slot.
-  void RestoreContextRegister();
+  MemOperand Context() { return MemOperand(fp, kContextOffset); }
 
   // A parameter as an assembly operand.
-  MemOperand ParameterAt(int index) {
-    // Index -1 corresponds to the receiver.
-    ASSERT(-1 <= index);  // -1 is the receiver.
-    ASSERT(index <= parameter_count());
-    uint16_t a = 0;   // Number of argument slots.
-    return MemOperand(s8_fp, (1 + parameter_count() + a - index) *kPointerSize);
-  }
-
-  // Push a copy of the value of a parameter frame slot on top of the frame.
-  void PushParameterAt(int index) {
-    PushFrameSlotAt(param0_index() + index);
-  }
-
-  // Push the value of a paramter frame slot on top of the frame and
-  // invalidate the parameter slot. The slot should be written to before
-  // trying to read from it again.
-  void TakeParameterAt(int index) {
-    TakeFrameSlotAt(param0_index() + index);
-  }
-
-  // Store the top value on the virtual frame into a parameter frame slot.
-  // The value is left in place on top of the frame.
-  void StoreToParameterAt(int index) {
-    StoreToFrameSlotAt(param0_index() + index);
-  }
+  inline MemOperand ParameterAt(int index);
 
   // The receiver frame slot.
-  MemOperand Receiver() { return ParameterAt(-1); }
+  inline MemOperand Receiver();
 
   // Push a try-catch or try-finally handler on top of the virtual frame.
   void PushTryHandler(HandlerType type);
 
   // Call stub given the number of arguments it expects on (and
   // removes from) the stack.
-  void CallStub(CodeStub* stub, int arg_count) {
-    PrepareForCall(arg_count, arg_count);
-    RawCallStub(stub);
-  }
+  inline void CallStub(CodeStub* stub, int arg_count);
 
-  void CallStub(CodeStub* stub, Result* arg);
-
-  void CallStub(CodeStub* stub, Result* arg0, Result* arg1);
+  // Call JS function from top of the stack with arguments
+  // taken from the stack.
+  void CallJSFunction(int arg_count);
 
   // Call runtime given the number of arguments expected on (and
   // removed from) the stack.
-  void CallRuntime(Runtime::Function* f, int arg_count);
+  void CallRuntime(const Runtime::Function* f, int arg_count);
   void CallRuntime(Runtime::FunctionId id, int arg_count);
 
-  // Call runtime with sp aligned to 8 bytes.
-  void CallAlignedRuntime(Runtime::Function* f, int arg_count);
-  void CallAlignedRuntime(Runtime::FunctionId id, int arg_count);
+#ifdef ENABLE_DEBUGGER_SUPPORT
+  void DebugBreak();
+#endif
 
   // Invoke builtin given the number of arguments it expects on (and
   // removes from) the stack.
   void InvokeBuiltin(Builtins::JavaScript id,
                      InvokeJSFlags flag,
-                     Result* arg_count_register,
                      int arg_count);
 
+  // Call load IC. Receiver is on the stack and is consumed. Result is returned
+  // in v0.
+  void CallLoadIC(Handle<String> name, RelocInfo::Mode mode);
+
+  // Call store IC. If the load is contextual, value is found on top of the
+  // frame. If not, value and receiver are on the frame. Both are consumed.
+  // Result is returned in v0.
+  void CallStoreIC(Handle<String> name, bool is_contextual);
+
+  // Call keyed load IC. Key and receiver are on the stack. Both are consumed.
+  // Result is returned in v0.
+  void CallKeyedLoadIC();
+
+  // Call keyed store IC. Value, key and receiver are on the stack. All three
+  // are consumed. Result is returned in v0 (and a0).
+  void CallKeyedStoreIC();
+
   // Call into an IC stub given the number of arguments it removes
-  // from the stack. Register arguments are passed as results and
-  // consumed by the call.
+  // from the stack. Register arguments to the IC stub are implicit,
+  // and depend on the type of IC stub.
   void CallCodeObject(Handle<Code> ic,
                       RelocInfo::Mode rmode,
                       int dropped_args);
-  void CallCodeObject(Handle<Code> ic,
-                      RelocInfo::Mode rmode,
-                      Result* arg,
-                      int dropped_args);
-  void CallCodeObject(Handle<Code> ic,
-                      RelocInfo::Mode rmode,
-                      Result* arg0,
-                      Result* arg1,
-                      int dropped_args,
-                      bool set_auto_args_slots = false);
 
   // Drop a number of elements from the top of the expression stack. May
   // emit code to affect the physical frame. Does not clobber any registers
   // excepting possibly the stack pointer.
   void Drop(int count);
-  // Similar to VirtualFrame::Drop but we don't modify the actual stack.
-  // This is because we need to manually restore sp to the correct position.
-  void DropFromVFrameOnly(int count);
 
   // Drop one element.
   void Drop() { Drop(1); }
-  void DropFromVFrameOnly() { DropFromVFrameOnly(1); }
 
-  // Duplicate the top element of the frame.
-  void Dup() { PushFrameSlotAt(element_count() - 1); }
+  // Pop an element from the top of the expression stack. Discards
+  // the result.
+  void Pop();
 
-  // Pop an element from the top of the expression stack. Returns a
-  // Result, which may be a constant or a register.
-  Result Pop();
+  // Pop an element from the top of the expression stack.  The register
+  // will be one normally used for the top of stack register allocation
+  // so you can't hold on to it if you push on the stack.
+  Register PopToRegister(Register but_not_to_this_one = no_reg);
+
+  // Look at the top of the stack.  The register returned is aliased and
+  // must be copied to a scratch register before modification.
+  Register Peek();
+
+  // Look at the value beneath the top of the stack. The register returned is
+  // aliased and must be copied to a scratch register before modification.
+  Register Peek2();
+
+  // Duplicate the top of stack.
+  void Dup();
+
+  // Duplicate the two elements on top of stack.
+  void Dup2();
+
+  // Flushes all registers, but it puts a copy of the top-of-stack in a0.
+  void SpillAllButCopyTOSToA0();
+
+  // Flushes all registers, but it puts a copy of the top-of-stack in a1.
+  void SpillAllButCopyTOSToA1();
+
+  // Flushes all registers, but it puts a copy of the top-of-stack in a1
+  // and the next value on the stack in a0.
+  void SpillAllButCopyTOSToA1A0();
 
   // Pop and save an element from the top of the expression stack and
   // emit a corresponding pop instruction.
@@ -355,40 +370,41 @@
   void EmitMultiPop(RegList regs);
   void EmitMultiPopReversed(RegList regs);
 
+
+  // Takes the top two elements and puts them in a0 (top element) and a1
+  // (second element).
+  void PopToA1A0();
+
+  // Takes the top element and puts it in a1.
+  void PopToA1();
+
+  // Takes the top element and puts it in a0.
+  void PopToA0();
+
   // Push an element on top of the expression stack and emit a
   // corresponding push instruction.
-  void EmitPush(Register reg);
+  void EmitPush(Register reg, TypeInfo type_info = TypeInfo::Unknown());
+  void EmitPush(Operand operand, TypeInfo type_info = TypeInfo::Unknown());
+  void EmitPush(MemOperand operand, TypeInfo type_info = TypeInfo::Unknown());
+  void EmitPushRoot(Heap::RootListIndex index);
+
+  // Overwrite the nth thing on the stack.  If the nth position is in a
+  // register then this turns into a Move, otherwise an sw.  Afterwards
+  // you can still use the register even if it is a register that can be
+  // used for TOS (a0 or a1).
+  void SetElementAt(Register reg, int this_far_down);
+
+  // Get a register which is free and which must be immediately used to
+  // push on the top of the stack.
+  Register GetTOSRegister();
+
   // Same but for multiple registers.
   void EmitMultiPush(RegList regs);
   void EmitMultiPushReversed(RegList regs);
 
-  // Push an element on the virtual frame.
-  inline void Push(Register reg, NumberInfo info = NumberInfo::Unknown());
-  inline void Push(Handle<Object> value);
-  inline void Push(Smi* value);
-
-  // Pushing a result invalidates it (its contents become owned by the frame).
-  void Push(Result* result) {
-    if (result->is_register()) {
-      Push(result->reg());
-    } else {
-      ASSERT(result->is_constant());
-      Push(result->handle());
-    }
-    result->Unuse();
-  }
-
-  // Nip removes zero or more elements from immediately below the top
-  // of the frame, leaving the previous top-of-frame value on top of
-  // the frame. Nip(k) is equivalent to x = Pop(), Drop(k), Push(x).
-  inline void Nip(int num_dropped);
-
-  // This pushes 4 arguments slots on the stack and saves asked 'a' registers
-  // 'a' registers are arguments register a0 to a3.
-  void EmitArgumentSlots(RegList reglist);
-
-  inline void SetTypeForLocalAt(int index, NumberInfo info);
-  inline void SetTypeForParamAt(int index, NumberInfo info);
+  static Register scratch0() { return t4; }
+  static Register scratch1() { return t5; }
+  static Register scratch2() { return t6; }
 
  private:
   static const int kLocal0Offset = JavaScriptFrameConstants::kLocal0Offset;
@@ -398,24 +414,51 @@
   static const int kHandlerSize = StackHandlerConstants::kSize / kPointerSize;
   static const int kPreallocatedElements = 5 + 8;  // 8 expression stack slots.
 
-  ZoneList<FrameElement> elements_;
+  // 5 states for the top of stack, which can be in memory or in a0 and a1.
+  enum TopOfStack { NO_TOS_REGISTERS, A0_TOS, A1_TOS, A1_A0_TOS, A0_A1_TOS,
+                    TOS_STATES};
+  static const int kMaxTOSRegisters = 2;
+
+  static const bool kA0InUse[TOS_STATES];
+  static const bool kA1InUse[TOS_STATES];
+  static const int kVirtualElements[TOS_STATES];
+  static const TopOfStack kStateAfterPop[TOS_STATES];
+  static const TopOfStack kStateAfterPush[TOS_STATES];
+  static const Register kTopRegister[TOS_STATES];
+  static const Register kBottomRegister[TOS_STATES];
+
+  // We allocate up to 5 locals in registers.
+  static const int kNumberOfAllocatedRegisters = 5;
+  // r2 to r6 are allocated to locals.
+  static const int kFirstAllocatedRegister = 2;
+
+  static const Register kAllocatedRegisters[kNumberOfAllocatedRegisters];
+
+  static Register AllocatedRegister(int r) {
+    ASSERT(r >= 0 && r < kNumberOfAllocatedRegisters);
+    return kAllocatedRegisters[r];
+  }
+
+  // The number of elements on the stack frame.
+  int element_count_;
+  TopOfStack top_of_stack_state_:3;
+  int register_allocation_map_:kNumberOfAllocatedRegisters;
+  static const int kTOSKnownSmiMapSize = 4;
+  unsigned tos_known_smi_map_:kTOSKnownSmiMapSize;
 
   // The index of the element that is at the processor's stack pointer
-  // (the sp register).
-  int stack_pointer_;
-
-  // The index of the register frame element using each register, or
-  // kIllegalIndex if a register is not on the frame.
-  int register_locations_[RegisterAllocator::kNumRegisters];
+  // (the sp register).  For now since everything is in memory it is given
+  // by the number of elements on the not-very-virtual stack frame.
+  int stack_pointer() { return element_count_ - 1; }
 
   // The number of frame-allocated locals and parameters respectively.
-  int parameter_count() { return cgen()->scope()->num_parameters(); }
-  int local_count() { return cgen()->scope()->num_stack_slots(); }
+  inline int parameter_count() const;
+  inline int local_count() const;
 
   // The index of the element that is at the processor's frame pointer
   // (the fp register). The parameters, receiver, function, and context
   // are below the frame pointer.
-  int frame_pointer() { return parameter_count() + 3; }
+  inline int frame_pointer() const;
 
   // The index of the first parameter. The receiver lies below the first
   // parameter.
@@ -423,75 +466,22 @@
 
   // The index of the context slot in the frame. It is immediately
   // below the frame pointer.
-  int context_index() { return frame_pointer() - 1; }
+  inline int context_index();
 
   // The index of the function slot in the frame. It is below the frame
   // pointer and context slot.
-  int function_index() { return frame_pointer() - 2; }
+  inline int function_index();
 
   // The index of the first local. Between the frame pointer and the
   // locals lies the return address.
-  int local0_index() { return frame_pointer() + 2; }
+  inline int local0_index() const;
 
   // The index of the base of the expression stack.
-  int expression_base_index() { return local0_index() + local_count(); }
+  inline int expression_base_index() const;
 
   // Convert a frame index into a frame pointer relative offset into the
   // actual stack.
-  int fp_relative(int index) {
-    ASSERT(index < element_count());
-    ASSERT(frame_pointer() < element_count());  // FP is on the frame.
-    return (frame_pointer() - index) * kPointerSize;
-  }
-
-  // Record an occurrence of a register in the virtual frame. This has the
-  // effect of incrementing the register's external reference count and
-  // of updating the index of the register's location in the frame.
-  void Use(Register reg, int index) {
-    ASSERT(!is_used(reg));
-    set_register_location(reg, index);
-    cgen()->allocator()->Use(reg);
-  }
-
-  // Record that a register reference has been dropped from the frame. This
-  // decrements the register's external reference count and invalidates the
-  // index of the register's location in the frame.
-  void Unuse(Register reg) {
-    ASSERT(is_used(reg));
-    set_register_location(reg, kIllegalIndex);
-    cgen()->allocator()->Unuse(reg);
-  }
-
-  // Spill the element at a particular index---write it to memory if
-  // necessary, free any associated register, and forget its value if
-  // constant.
-  void SpillElementAt(int index);
-
-  // Sync the element at a particular index. If it is a register or
-  // constant that disagrees with the value on the stack, write it to memory.
-  // Keep the element type as register or constant, and clear the dirty bit.
-  void SyncElementAt(int index);
-
-  // Sync the range of elements in [begin, end] with memory.
-  void SyncRange(int begin, int end);
-
-  // Sync a single unsynced element that lies beneath or at the stack pointer.
-  void SyncElementBelowStackPointer(int index);
-
-  // Sync a single unsynced element that lies just above the stack pointer.
-  void SyncElementByPushing(int index);
-
-  // Push a copy of a frame slot (typically a local or parameter) on top of
-  // the frame.
-  inline void PushFrameSlotAt(int index);
-
-  // Push a the value of a frame slot (typically a local or parameter) on
-  // top of the frame and invalidate the slot.
-  void TakeFrameSlotAt(int index);
-
-  // Store the value on top of the frame to a frame slot (typically a local
-  // or parameter).
-  void StoreToFrameSlotAt(int index);
+  inline int fp_relative(int index);
 
   // Spill all elements in registers. Spill the top spilled_args elements
   // on the frame. Sync all other frame elements.
@@ -499,45 +489,37 @@
   // the effect of an upcoming call that will drop them from the stack.
   void PrepareForCall(int spilled_args, int dropped_args);
 
-  // Move frame elements currently in registers or constants, that
-  // should be in memory in the expected frame, to memory.
-  void MergeMoveRegistersToMemory(VirtualFrame* expected);
+  // If all top-of-stack registers are in use then the lowest one is pushed
+  // onto the physical stack and made free.
+  void EnsureOneFreeTOSRegister();
 
-  // Make the register-to-register moves necessary to
-  // merge this frame with the expected frame.
-  // Register to memory moves must already have been made,
-  // and memory to register moves must follow this call.
-  // This is because some new memory-to-register moves are
-  // created in order to break cycles of register moves.
-  // Used in the implementation of MergeTo().
-  void MergeMoveRegistersToRegisters(VirtualFrame* expected);
+  // Emit instructions to get the top of stack state from where we are to where
+  // we want to be.
+  void MergeTOSTo(TopOfStack expected_state,
+                  Condition cond = al,
+                  Register r1 = no_reg,
+                  const Operand& r2 = Operand(no_reg));
 
-  // Make the memory-to-register and constant-to-register moves
-  // needed to make this frame equal the expected frame.
-  // Called after all register-to-memory and register-to-register
-  // moves have been made. After this function returns, the frames
-  // should be equal.
-  void MergeMoveMemoryToRegisters(VirtualFrame* expected);
+  inline bool Equals(const VirtualFrame* other);
 
-  // Invalidates a frame slot (puts an invalid frame element in it).
-  // Copies on the frame are correctly handled, and if this slot was
-  // the backing store of copies, the index of the new backing store
-  // is returned. Otherwise, returns kIllegalIndex.
-  // Register counts are correctly updated.
-  int InvalidateFrameSlotAt(int index);
+  inline void LowerHeight(int count) {
+    element_count_ -= count;
+    if (count >= kTOSKnownSmiMapSize) {
+      tos_known_smi_map_ = 0;
+    } else {
+      tos_known_smi_map_ >>= count;
+    }
+  }
 
-  // Call a code stub that has already been prepared for calling (via
-  // PrepareForCall).
-  void RawCallStub(CodeStub* stub);
-
-  // Calls a code object which has already been prepared for calling
-  // (via PrepareForCall).
-  void RawCallCodeObject(Handle<Code> code, RelocInfo::Mode rmode);
-
-  inline bool Equals(VirtualFrame* other);
-
-  // Classes that need raw access to the elements_ array.
-  friend class DeferredCode;
+  inline void RaiseHeight(int count, unsigned known_smi_map = 0) {
+    ASSERT(known_smi_map < (1u << count));
+    element_count_ += count;
+    if (count >= kTOSKnownSmiMapSize) {
+      tos_known_smi_map_ = known_smi_map;
+    } else {
+      tos_known_smi_map_ = ((tos_known_smi_map_ << count) | known_smi_map);
+    }
+  }
   friend class JumpTarget;
 };