Update V8 to r4730 as required by WebKit r60469
diff --git a/src/arm/assembler-arm-inl.h b/src/arm/assembler-arm-inl.h
index 3f0854e..e292cef 100644
--- a/src/arm/assembler-arm-inl.h
+++ b/src/arm/assembler-arm-inl.h
@@ -39,6 +39,7 @@
 
 #include "arm/assembler-arm.h"
 #include "cpu.h"
+#include "debug.h"
 
 
 namespace v8 {
@@ -73,6 +74,11 @@
 }
 
 
+int RelocInfo::target_address_size() {
+  return Assembler::kExternalTargetSize;
+}
+
+
 void RelocInfo::set_target_address(Address target) {
   ASSERT(IsCodeTarget(rmode_) || rmode_ == RUNTIME_ENTRY);
   Assembler::set_target_address_at(pc_, target);
@@ -162,6 +168,26 @@
 }
 
 
+void RelocInfo::Visit(ObjectVisitor* visitor) {
+  RelocInfo::Mode mode = rmode();
+  if (mode == RelocInfo::EMBEDDED_OBJECT) {
+    visitor->VisitPointer(target_object_address());
+  } else if (RelocInfo::IsCodeTarget(mode)) {
+    visitor->VisitCodeTarget(this);
+  } else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
+    visitor->VisitExternalReference(target_reference_address());
+#ifdef ENABLE_DEBUGGER_SUPPORT
+  } else if (Debug::has_break_points() &&
+             RelocInfo::IsJSReturn(mode) &&
+             IsPatchedReturnSequence()) {
+    visitor->VisitDebugTarget(this);
+#endif
+  } else if (mode == RelocInfo::RUNTIME_ENTRY) {
+    visitor->VisitRuntimeEntry(this);
+  }
+}
+
+
 Operand::Operand(int32_t immediate, RelocInfo::Mode rmode)  {
   rm_ = no_reg;
   imm32_ = immediate;
@@ -169,13 +195,6 @@
 }
 
 
-Operand::Operand(const char* s) {
-  rm_ = no_reg;
-  imm32_ = reinterpret_cast<int32_t>(s);
-  rmode_ = RelocInfo::EMBEDDED_STRING;
-}
-
-
 Operand::Operand(const ExternalReference& f)  {
   rm_ = no_reg;
   imm32_ = reinterpret_cast<int32_t>(f.address());
diff --git a/src/arm/assembler-arm.cc b/src/arm/assembler-arm.cc
index f1f59ce..050e15b 100644
--- a/src/arm/assembler-arm.cc
+++ b/src/arm/assembler-arm.cc
@@ -36,6 +36,8 @@
 
 #include "v8.h"
 
+#if defined(V8_TARGET_ARCH_ARM)
+
 #include "arm/assembler-arm-inl.h"
 #include "serialize.h"
 
@@ -106,6 +108,15 @@
 const int RelocInfo::kApplyMask = 0;
 
 
+bool RelocInfo::IsCodedSpecially() {
+  // The deserializer needs to know whether a pointer is specially coded.  Being
+  // specially coded on ARM means that it is a movw/movt instruction.  We don't
+  // generate those yet.
+  return false;
+}
+
+
+
 void RelocInfo::PatchCode(byte* instructions, int instruction_count) {
   // Patch the code at the current address with the supplied instructions.
   Instr* pc = reinterpret_cast<Instr*>(pc_);
@@ -268,6 +279,20 @@
     15 * B24 | 15 * B20 | 15 * B16 | 15 * B12 | 15 * B8 | 15 * B4;
 const Instr kBlxRegPattern =
     B24 | B21 | 15 * B16 | 15 * B12 | 15 * B8 | 3 * B4;
+// A mask for the Rd register for push, pop, ldr, str instructions.
+const Instr kRdMask = 0x0000f000;
+static const int kRdShift = 12;
+static const Instr kLdrRegFpOffsetPattern =
+    al | B26 | L | Offset | fp.code() * B16;
+static const Instr kStrRegFpOffsetPattern =
+    al | B26 | Offset | fp.code() * B16;
+static const Instr kLdrRegFpNegOffsetPattern =
+    al | B26 | L | NegOffset | fp.code() * B16;
+static const Instr kStrRegFpNegOffsetPattern =
+    al | B26 | NegOffset | fp.code() * B16;
+static const Instr kLdrStrInstrTypeMask = 0xffff0000;
+static const Instr kLdrStrInstrArgumentMask = 0x0000ffff;
+static const Instr kLdrStrOffsetMask = 0x00000fff;
 
 // Spare buffer.
 static const int kMinimalBufferSize = 4*KB;
@@ -395,6 +420,43 @@
 }
 
 
+Register Assembler::GetRd(Instr instr) {
+  Register reg;
+  reg.code_ = ((instr & kRdMask) >> kRdShift);
+  return reg;
+}
+
+
+bool Assembler::IsPush(Instr instr) {
+  return ((instr & ~kRdMask) == kPushRegPattern);
+}
+
+
+bool Assembler::IsPop(Instr instr) {
+  return ((instr & ~kRdMask) == kPopRegPattern);
+}
+
+
+bool Assembler::IsStrRegFpOffset(Instr instr) {
+  return ((instr & kLdrStrInstrTypeMask) == kStrRegFpOffsetPattern);
+}
+
+
+bool Assembler::IsLdrRegFpOffset(Instr instr) {
+  return ((instr & kLdrStrInstrTypeMask) == kLdrRegFpOffsetPattern);
+}
+
+
+bool Assembler::IsStrRegFpNegOffset(Instr instr) {
+  return ((instr & kLdrStrInstrTypeMask) == kStrRegFpNegOffsetPattern);
+}
+
+
+bool Assembler::IsLdrRegFpNegOffset(Instr instr) {
+  return ((instr & kLdrStrInstrTypeMask) == kLdrRegFpNegOffsetPattern);
+}
+
+
 // Labels refer to positions in the (to be) generated code.
 // There are bound, linked, and unused labels.
 //
@@ -887,15 +949,12 @@
   //   str(src, MemOperand(sp, 4, NegPreIndex), al);
   //   add(sp, sp, Operand(kPointerSize));
   // Both instructions can be eliminated.
-  int pattern_size = 2 * kInstrSize;
-  if (FLAG_push_pop_elimination &&
-      last_bound_pos_ <= (pc_offset() - pattern_size) &&
-      reloc_info_writer.last_pc() <= (pc_ - pattern_size) &&
+  if (can_peephole_optimize(2) &&
       // Pattern.
       instr_at(pc_ - 1 * kInstrSize) == kPopInstruction &&
       (instr_at(pc_ - 2 * kInstrSize) & ~RdMask) == kPushRegPattern) {
     pc_ -= 2 * kInstrSize;
-    if (FLAG_print_push_pop_elimination) {
+    if (FLAG_print_peephole_optimization) {
       PrintF("%x push(reg)/pop() eliminated\n", pc_offset());
     }
   }
@@ -1086,20 +1145,170 @@
   }
   addrmod2(cond | B26 | L, dst, src);
 
-  // Eliminate pattern: push(r), pop(r)
-  //   str(r, MemOperand(sp, 4, NegPreIndex), al)
-  //   ldr(r, MemOperand(sp, 4, PostIndex), al)
-  // Both instructions can be eliminated.
-  int pattern_size = 2 * kInstrSize;
-  if (FLAG_push_pop_elimination &&
-      last_bound_pos_ <= (pc_offset() - pattern_size) &&
-      reloc_info_writer.last_pc() <= (pc_ - pattern_size) &&
-      // Pattern.
-      instr_at(pc_ - 1 * kInstrSize) == (kPopRegPattern | dst.code() * B12) &&
-      instr_at(pc_ - 2 * kInstrSize) == (kPushRegPattern | dst.code() * B12)) {
-    pc_ -= 2 * kInstrSize;
-    if (FLAG_print_push_pop_elimination) {
-      PrintF("%x push/pop (same reg) eliminated\n", pc_offset());
+  // Eliminate pattern: push(ry), pop(rx)
+  //   str(ry, MemOperand(sp, 4, NegPreIndex), al)
+  //   ldr(rx, MemOperand(sp, 4, PostIndex), al)
+  // Both instructions can be eliminated if ry = rx.
+  // If ry != rx, a register copy from ry to rx is inserted
+  // after eliminating the push and the pop instructions.
+  Instr push_instr = instr_at(pc_ - 2 * kInstrSize);
+  Instr pop_instr = instr_at(pc_ - 1 * kInstrSize);
+
+  if (can_peephole_optimize(2) &&
+      IsPush(push_instr) &&
+      IsPop(pop_instr)) {
+    if ((pop_instr & kRdMask) != (push_instr & kRdMask)) {
+      // For consecutive push and pop on different registers,
+      // we delete both the push & pop and insert a register move.
+      // push ry, pop rx --> mov rx, ry
+      Register reg_pushed, reg_popped;
+      reg_pushed = GetRd(push_instr);
+      reg_popped = GetRd(pop_instr);
+      pc_ -= 2 * kInstrSize;
+      // Insert a mov instruction, which is better than a pair of push & pop
+      mov(reg_popped, reg_pushed);
+      if (FLAG_print_peephole_optimization) {
+        PrintF("%x push/pop (diff reg) replaced by a reg move\n", pc_offset());
+      }
+    } else {
+      // For consecutive push and pop on the same register,
+      // both the push and the pop can be deleted.
+      pc_ -= 2 * kInstrSize;
+      if (FLAG_print_peephole_optimization) {
+        PrintF("%x push/pop (same reg) eliminated\n", pc_offset());
+      }
+    }
+  }
+
+  if (can_peephole_optimize(2)) {
+    Instr str_instr = instr_at(pc_ - 2 * kInstrSize);
+    Instr ldr_instr = instr_at(pc_ - 1 * kInstrSize);
+
+    if ((IsStrRegFpOffset(str_instr) &&
+         IsLdrRegFpOffset(ldr_instr)) ||
+       (IsStrRegFpNegOffset(str_instr) &&
+         IsLdrRegFpNegOffset(ldr_instr))) {
+      if ((ldr_instr & kLdrStrInstrArgumentMask) ==
+            (str_instr & kLdrStrInstrArgumentMask)) {
+        // Pattern: Ldr/str same fp+offset, same register.
+        //
+        // The following:
+        // str rx, [fp, #-12]
+        // ldr rx, [fp, #-12]
+        //
+        // Becomes:
+        // str rx, [fp, #-12]
+
+        pc_ -= 1 * kInstrSize;
+        if (FLAG_print_peephole_optimization) {
+          PrintF("%x str/ldr (fp + same offset), same reg\n", pc_offset());
+        }
+      } else if ((ldr_instr & kLdrStrOffsetMask) ==
+                 (str_instr & kLdrStrOffsetMask)) {
+        // Pattern: Ldr/str same fp+offset, different register.
+        //
+        // The following:
+        // str rx, [fp, #-12]
+        // ldr ry, [fp, #-12]
+        //
+        // Becomes:
+        // str rx, [fp, #-12]
+        // mov ry, rx
+
+        Register reg_stored, reg_loaded;
+        reg_stored = GetRd(str_instr);
+        reg_loaded = GetRd(ldr_instr);
+        pc_ -= 1 * kInstrSize;
+        // Insert a mov instruction, which is better than ldr.
+        mov(reg_loaded, reg_stored);
+        if (FLAG_print_peephole_optimization) {
+          PrintF("%x str/ldr (fp + same offset), diff reg \n", pc_offset());
+        }
+      }
+    }
+  }
+
+  if (can_peephole_optimize(3)) {
+    Instr mem_write_instr = instr_at(pc_ - 3 * kInstrSize);
+    Instr ldr_instr = instr_at(pc_ - 2 * kInstrSize);
+    Instr mem_read_instr = instr_at(pc_ - 1 * kInstrSize);
+    if (IsPush(mem_write_instr) &&
+        IsPop(mem_read_instr)) {
+      if ((IsLdrRegFpOffset(ldr_instr) ||
+        IsLdrRegFpNegOffset(ldr_instr))) {
+        if ((mem_write_instr & kRdMask) ==
+              (mem_read_instr & kRdMask)) {
+          // Pattern: push & pop from/to same register,
+          // with a fp+offset ldr in between
+          //
+          // The following:
+          // str rx, [sp, #-4]!
+          // ldr rz, [fp, #-24]
+          // ldr rx, [sp], #+4
+          //
+          // Becomes:
+          // if(rx == rz)
+          //   delete all
+          // else
+          //   ldr rz, [fp, #-24]
+
+          if ((mem_write_instr & kRdMask) == (ldr_instr & kRdMask)) {
+            pc_ -= 3 * kInstrSize;
+          } else {
+            pc_ -= 3 * kInstrSize;
+            // Reinsert back the ldr rz.
+            emit(ldr_instr);
+          }
+          if (FLAG_print_peephole_optimization) {
+            PrintF("%x push/pop -dead ldr fp+offset in middle\n", pc_offset());
+          }
+        } else {
+          // Pattern: push & pop from/to different registers
+          // with a fp+offset ldr in between
+          //
+          // The following:
+          // str rx, [sp, #-4]!
+          // ldr rz, [fp, #-24]
+          // ldr ry, [sp], #+4
+          //
+          // Becomes:
+          // if(ry == rz)
+          //   mov ry, rx;
+          // else if(rx != rz)
+          //   ldr rz, [fp, #-24]
+          //   mov ry, rx
+          // else if((ry != rz) || (rx == rz)) becomes:
+          //   mov ry, rx
+          //   ldr rz, [fp, #-24]
+
+          Register reg_pushed, reg_popped;
+          if ((mem_read_instr & kRdMask) == (ldr_instr & kRdMask)) {
+            reg_pushed = GetRd(mem_write_instr);
+            reg_popped = GetRd(mem_read_instr);
+            pc_ -= 3 * kInstrSize;
+            mov(reg_popped, reg_pushed);
+          } else if ((mem_write_instr & kRdMask)
+                                != (ldr_instr & kRdMask)) {
+            reg_pushed = GetRd(mem_write_instr);
+            reg_popped = GetRd(mem_read_instr);
+            pc_ -= 3 * kInstrSize;
+            emit(ldr_instr);
+            mov(reg_popped, reg_pushed);
+          } else if (((mem_read_instr & kRdMask)
+                                     != (ldr_instr & kRdMask)) ||
+                    ((mem_write_instr & kRdMask)
+                                     == (ldr_instr & kRdMask)) ) {
+            reg_pushed = GetRd(mem_write_instr);
+            reg_popped = GetRd(mem_read_instr);
+            pc_ -= 3 * kInstrSize;
+            mov(reg_popped, reg_pushed);
+            emit(ldr_instr);
+          }
+          if (FLAG_print_peephole_optimization) {
+            PrintF("%x push/pop (ldr fp+off in middle)\n", pc_offset());
+          }
+        }
+      }
     }
   }
 }
@@ -1111,16 +1320,13 @@
   // Eliminate pattern: pop(), push(r)
   //     add sp, sp, #4 LeaveCC, al; str r, [sp, #-4], al
   // ->  str r, [sp, 0], al
-  int pattern_size = 2 * kInstrSize;
-  if (FLAG_push_pop_elimination &&
-     last_bound_pos_ <= (pc_offset() - pattern_size) &&
-     reloc_info_writer.last_pc() <= (pc_ - pattern_size) &&
+  if (can_peephole_optimize(2) &&
      // Pattern.
      instr_at(pc_ - 1 * kInstrSize) == (kPushRegPattern | src.code() * B12) &&
      instr_at(pc_ - 2 * kInstrSize) == kPopInstruction) {
     pc_ -= 2 * kInstrSize;
     emit(al | B26 | 0 | Offset | sp.code() * B16 | src.code() * B12);
-    if (FLAG_print_push_pop_elimination) {
+    if (FLAG_print_peephole_optimization) {
       PrintF("%x pop()/push(reg) eliminated\n", pc_offset());
     }
   }
@@ -1157,33 +1363,25 @@
 }
 
 
-void Assembler::ldrd(Register dst, const MemOperand& src, Condition cond) {
+void Assembler::ldrd(Register dst1, Register dst2,
+                     const MemOperand& src, Condition cond) {
+  ASSERT(CpuFeatures::IsEnabled(ARMv7));
   ASSERT(src.rm().is(no_reg));
-#ifdef CAN_USE_ARMV7_INSTRUCTIONS
-  addrmod3(cond | B7 | B6 | B4, dst, src);
-#else
-  ldr(dst, src, cond);
-  MemOperand src1(src);
-  src1.set_offset(src1.offset() + 4);
-  Register dst1(dst);
-  dst1.code_ = dst1.code_ + 1;
-  ldr(dst1, src1, cond);
-#endif
+  ASSERT(!dst1.is(lr));  // r14.
+  ASSERT_EQ(0, dst1.code() % 2);
+  ASSERT_EQ(dst1.code() + 1, dst2.code());
+  addrmod3(cond | B7 | B6 | B4, dst1, src);
 }
 
 
-void Assembler::strd(Register src, const MemOperand& dst, Condition cond) {
+void Assembler::strd(Register src1, Register src2,
+                     const MemOperand& dst, Condition cond) {
   ASSERT(dst.rm().is(no_reg));
-#ifdef CAN_USE_ARMV7_INSTRUCTIONS
-  addrmod3(cond | B7 | B6 | B5 | B4, src, dst);
-#else
-  str(src, dst, cond);
-  MemOperand dst1(dst);
-  dst1.set_offset(dst1.offset() + 4);
-  Register src1(src);
-  src1.code_ = src1.code_ + 1;
-  str(src1, dst1, cond);
-#endif
+  ASSERT(!src1.is(lr));  // r14.
+  ASSERT_EQ(0, src1.code() % 2);
+  ASSERT_EQ(src1.code() + 1, src2.code());
+  ASSERT(CpuFeatures::IsEnabled(ARMv7));
+  addrmod3(cond | B7 | B6 | B5 | B4, src1, dst);
 }
 
 // Load/Store multiple instructions.
@@ -1216,26 +1414,6 @@
 }
 
 
-// Semaphore instructions.
-void Assembler::swp(Register dst, Register src, Register base, Condition cond) {
-  ASSERT(!dst.is(pc) && !src.is(pc) && !base.is(pc));
-  ASSERT(!dst.is(base) && !src.is(base));
-  emit(cond | P | base.code()*B16 | dst.code()*B12 |
-       B7 | B4 | src.code());
-}
-
-
-void Assembler::swpb(Register dst,
-                     Register src,
-                     Register base,
-                     Condition cond) {
-  ASSERT(!dst.is(pc) && !src.is(pc) && !base.is(pc));
-  ASSERT(!dst.is(base) && !src.is(base));
-  emit(cond | P | B | base.code()*B16 | dst.code()*B12 |
-       B7 | B4 | src.code());
-}
-
-
 // Exception-generating instructions and debugging support.
 void Assembler::stop(const char* msg) {
 #ifndef __arm__
@@ -1779,34 +1957,6 @@
 }
 
 
-void Assembler::lea(Register dst,
-                    const MemOperand& x,
-                    SBit s,
-                    Condition cond) {
-  int am = x.am_;
-  if (!x.rm_.is_valid()) {
-    // Immediate offset.
-    if ((am & P) == 0)  // post indexing
-      mov(dst, Operand(x.rn_), s, cond);
-    else if ((am & U) == 0)  // negative indexing
-      sub(dst, x.rn_, Operand(x.offset_), s, cond);
-    else
-      add(dst, x.rn_, Operand(x.offset_), s, cond);
-  } else {
-    // Register offset (shift_imm_ and shift_op_ are 0) or scaled
-    // register offset the constructors make sure than both shift_imm_
-    // and shift_op_ are initialized.
-    ASSERT(!x.rm_.is(pc));
-    if ((am & P) == 0)  // post indexing
-      mov(dst, Operand(x.rn_), s, cond);
-    else if ((am & U) == 0)  // negative indexing
-      sub(dst, x.rn_, Operand(x.rm_, x.shift_op_, x.shift_imm_), s, cond);
-    else
-      add(dst, x.rn_, Operand(x.rm_, x.shift_op_, x.shift_imm_), s, cond);
-  }
-}
-
-
 bool Assembler::ImmediateFitsAddrMode1Instruction(int32_t imm32) {
   uint32_t dummy1;
   uint32_t dummy2;
@@ -2062,3 +2212,5 @@
 
 
 } }  // namespace v8::internal
+
+#endif  // V8_TARGET_ARCH_ARM
diff --git a/src/arm/assembler-arm.h b/src/arm/assembler-arm.h
index 61b84d4..a1b98f6 100644
--- a/src/arm/assembler-arm.h
+++ b/src/arm/assembler-arm.h
@@ -80,6 +80,11 @@
     return 1 << code_;
   }
 
+  void set_code(int code) {
+    code_ = code;
+    ASSERT(is_valid());
+  }
+
   // Unfortunately we can't make this private in a struct.
   int code_;
 };
@@ -458,7 +463,8 @@
       return offset_;
   }
 
-  Register rm() const {return rm_;}
+  Register rn() const { return rn_; }
+  Register rm() const { return rm_; }
 
  private:
   Register rn_;  // base
@@ -767,17 +773,17 @@
   void strh(Register src, const MemOperand& dst, Condition cond = al);
   void ldrsb(Register dst, const MemOperand& src, Condition cond = al);
   void ldrsh(Register dst, const MemOperand& src, Condition cond = al);
-  void ldrd(Register dst, const MemOperand& src, Condition cond = al);
-  void strd(Register src, const MemOperand& dst, Condition cond = al);
+  void ldrd(Register dst1,
+            Register dst2,
+            const MemOperand& src, Condition cond = al);
+  void strd(Register src1,
+            Register src2,
+            const MemOperand& dst, Condition cond = al);
 
   // Load/Store multiple instructions
   void ldm(BlockAddrMode am, Register base, RegList dst, Condition cond = al);
   void stm(BlockAddrMode am, Register base, RegList src, Condition cond = al);
 
-  // Semaphore instructions
-  void swp(Register dst, Register src, Register base, Condition cond = al);
-  void swpb(Register dst, Register src, Register base, Condition cond = al);
-
   // Exception-generating instructions and debugging support
   void stop(const char* msg);
 
@@ -924,10 +930,6 @@
     add(sp, sp, Operand(kPointerSize));
   }
 
-  // Load effective address of memory operand x into register dst
-  void lea(Register dst, const MemOperand& x,
-           SBit s = LeaveCC, Condition cond = al);
-
   // Jump unconditionally to given label.
   void jmp(Label* L) { b(L, al); }
 
@@ -976,6 +978,12 @@
   int current_position() const { return current_position_; }
   int current_statement_position() const { return current_statement_position_; }
 
+  bool can_peephole_optimize(int instructions) {
+    if (!FLAG_peephole_optimization) return false;
+    if (last_bound_pos_ > pc_offset() - instructions * kInstrSize) return false;
+    return reloc_info_writer.last_pc() <= pc_ - instructions * kInstrSize;
+  }
+
   // Read/patch instructions
   static Instr instr_at(byte* pc) { return *reinterpret_cast<Instr*>(pc); }
   static void instr_at_put(byte* pc, Instr instr) {
@@ -987,6 +995,13 @@
   static bool IsLdrRegisterImmediate(Instr instr);
   static int GetLdrRegisterImmediateOffset(Instr instr);
   static Instr SetLdrRegisterImmediateOffset(Instr instr, int offset);
+  static Register GetRd(Instr instr);
+  static bool IsPush(Instr instr);
+  static bool IsPop(Instr instr);
+  static bool IsStrRegFpOffset(Instr instr);
+  static bool IsLdrRegFpOffset(Instr instr);
+  static bool IsStrRegFpNegOffset(Instr instr);
+  static bool IsLdrRegFpNegOffset(Instr instr);
 
 
  protected:
diff --git a/src/arm/assembler-thumb2-inl.h b/src/arm/assembler-thumb2-inl.h
deleted file mode 100644
index 9e0fc2f..0000000
--- a/src/arm/assembler-thumb2-inl.h
+++ /dev/null
@@ -1,263 +0,0 @@
-// Copyright (c) 1994-2006 Sun Microsystems Inc.
-// All Rights Reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-//
-// - Redistributions of source code must retain the above copyright notice,
-// this list of conditions and the following disclaimer.
-//
-// - Redistribution in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the
-// distribution.
-//
-// - Neither the name of Sun Microsystems or the names of contributors may
-// be used to endorse or promote products derived from this software without
-// specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
-// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
-// COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
-// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
-// HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
-// STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
-// OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// The original source code covered by the above license above has been modified
-// significantly by Google Inc.
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
-
-#ifndef V8_ARM_ASSEMBLER_THUMB2_INL_H_
-#define V8_ARM_ASSEMBLER_THUMB2_INL_H_
-
-#include "arm/assembler-thumb2.h"
-#include "cpu.h"
-
-
-namespace v8 {
-namespace internal {
-
-Condition NegateCondition(Condition cc) {
-  ASSERT(cc != al);
-  return static_cast<Condition>(cc ^ ne);
-}
-
-
-void RelocInfo::apply(intptr_t delta) {
-  if (RelocInfo::IsInternalReference(rmode_)) {
-    // absolute code pointer inside code object moves with the code object.
-    int32_t* p = reinterpret_cast<int32_t*>(pc_);
-    *p += delta;  // relocate entry
-  }
-  // We do not use pc relative addressing on ARM, so there is
-  // nothing else to do.
-}
-
-
-Address RelocInfo::target_address() {
-  ASSERT(IsCodeTarget(rmode_) || rmode_ == RUNTIME_ENTRY);
-  return Assembler::target_address_at(pc_);
-}
-
-
-Address RelocInfo::target_address_address() {
-  ASSERT(IsCodeTarget(rmode_) || rmode_ == RUNTIME_ENTRY);
-  return reinterpret_cast<Address>(Assembler::target_address_address_at(pc_));
-}
-
-
-void RelocInfo::set_target_address(Address target) {
-  ASSERT(IsCodeTarget(rmode_) || rmode_ == RUNTIME_ENTRY);
-  Assembler::set_target_address_at(pc_, target);
-}
-
-
-Object* RelocInfo::target_object() {
-  ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
-  return Memory::Object_at(Assembler::target_address_address_at(pc_));
-}
-
-
-Handle<Object> RelocInfo::target_object_handle(Assembler* origin) {
-  ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
-  return Memory::Object_Handle_at(Assembler::target_address_address_at(pc_));
-}
-
-
-Object** RelocInfo::target_object_address() {
-  ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
-  return reinterpret_cast<Object**>(Assembler::target_address_address_at(pc_));
-}
-
-
-void RelocInfo::set_target_object(Object* target) {
-  ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
-  Assembler::set_target_address_at(pc_, reinterpret_cast<Address>(target));
-}
-
-
-Address* RelocInfo::target_reference_address() {
-  ASSERT(rmode_ == EXTERNAL_REFERENCE);
-  return reinterpret_cast<Address*>(Assembler::target_address_address_at(pc_));
-}
-
-
-Address RelocInfo::call_address() {
-  ASSERT(IsPatchedReturnSequence());
-  // The 2 instructions offset assumes patched return sequence.
-  ASSERT(IsJSReturn(rmode()));
-  return Memory::Address_at(pc_ + 2 * Assembler::kInstrSize);
-}
-
-
-void RelocInfo::set_call_address(Address target) {
-  ASSERT(IsPatchedReturnSequence());
-  // The 2 instructions offset assumes patched return sequence.
-  ASSERT(IsJSReturn(rmode()));
-  Memory::Address_at(pc_ + 2 * Assembler::kInstrSize) = target;
-}
-
-
-Object* RelocInfo::call_object() {
-  return *call_object_address();
-}
-
-
-Object** RelocInfo::call_object_address() {
-  ASSERT(IsPatchedReturnSequence());
-  // The 2 instructions offset assumes patched return sequence.
-  ASSERT(IsJSReturn(rmode()));
-  return reinterpret_cast<Object**>(pc_ + 2 * Assembler::kInstrSize);
-}
-
-
-void RelocInfo::set_call_object(Object* target) {
-  *call_object_address() = target;
-}
-
-
-bool RelocInfo::IsPatchedReturnSequence() {
-  // On ARM a "call instruction" is actually two instructions.
-  //   mov lr, pc
-  //   ldr pc, [pc, #XXX]
-  return (Assembler::instr_at(pc_) == kMovLrPc)
-          && ((Assembler::instr_at(pc_ + Assembler::kInstrSize) & kLdrPCPattern)
-              == kLdrPCPattern);
-}
-
-
-Operand::Operand(int32_t immediate, RelocInfo::Mode rmode)  {
-  rm_ = no_reg;
-  imm32_ = immediate;
-  rmode_ = rmode;
-}
-
-
-Operand::Operand(const char* s) {
-  rm_ = no_reg;
-  imm32_ = reinterpret_cast<int32_t>(s);
-  rmode_ = RelocInfo::EMBEDDED_STRING;
-}
-
-
-Operand::Operand(const ExternalReference& f)  {
-  rm_ = no_reg;
-  imm32_ = reinterpret_cast<int32_t>(f.address());
-  rmode_ = RelocInfo::EXTERNAL_REFERENCE;
-}
-
-
-Operand::Operand(Smi* value) {
-  rm_ = no_reg;
-  imm32_ =  reinterpret_cast<intptr_t>(value);
-  rmode_ = RelocInfo::NONE;
-}
-
-
-Operand::Operand(Register rm) {
-  rm_ = rm;
-  rs_ = no_reg;
-  shift_op_ = LSL;
-  shift_imm_ = 0;
-}
-
-
-bool Operand::is_reg() const {
-  return rm_.is_valid() &&
-         rs_.is(no_reg) &&
-         shift_op_ == LSL &&
-         shift_imm_ == 0;
-}
-
-
-void Assembler::CheckBuffer() {
-  if (buffer_space() <= kGap) {
-    GrowBuffer();
-  }
-  if (pc_offset() >= next_buffer_check_) {
-    CheckConstPool(false, true);
-  }
-}
-
-
-void Assembler::emit(Instr x) {
-  CheckBuffer();
-  *reinterpret_cast<Instr*>(pc_) = x;
-  pc_ += kInstrSize;
-}
-
-
-Address Assembler::target_address_address_at(Address pc) {
-  Address target_pc = pc;
-  Instr instr = Memory::int32_at(target_pc);
-  // If we have a bx instruction, the instruction before the bx is
-  // what we need to patch.
-  static const int32_t kBxInstMask = 0x0ffffff0;
-  static const int32_t kBxInstPattern = 0x012fff10;
-  if ((instr & kBxInstMask) == kBxInstPattern) {
-    target_pc -= kInstrSize;
-    instr = Memory::int32_at(target_pc);
-  }
-  // Verify that the instruction to patch is a
-  // ldr<cond> <Rd>, [pc +/- offset_12].
-  ASSERT((instr & 0x0f7f0000) == 0x051f0000);
-  int offset = instr & 0xfff;  // offset_12 is unsigned
-  if ((instr & (1 << 23)) == 0) offset = -offset;  // U bit defines offset sign
-  // Verify that the constant pool comes after the instruction referencing it.
-  ASSERT(offset >= -4);
-  return target_pc + offset + 8;
-}
-
-
-Address Assembler::target_address_at(Address pc) {
-  return Memory::Address_at(target_address_address_at(pc));
-}
-
-
-void Assembler::set_target_at(Address constant_pool_entry,
-                              Address target) {
-  Memory::Address_at(constant_pool_entry) = target;
-}
-
-
-void Assembler::set_target_address_at(Address pc, Address target) {
-  Memory::Address_at(target_address_address_at(pc)) = target;
-  // Intuitively, we would think it is necessary to flush the instruction cache
-  // after patching a target address in the code as follows:
-  //   CPU::FlushICache(pc, sizeof(target));
-  // However, on ARM, no instruction was actually patched by the assignment
-  // above; the target address is not part of an instruction, it is patched in
-  // the constant pool and is read via a data access; the instruction accessing
-  // this address in the constant pool remains unchanged.
-}
-
-} }  // namespace v8::internal
-
-#endif  // V8_ARM_ASSEMBLER_THUMB2_INL_H_
diff --git a/src/arm/assembler-thumb2.cc b/src/arm/assembler-thumb2.cc
deleted file mode 100644
index e31c429..0000000
--- a/src/arm/assembler-thumb2.cc
+++ /dev/null
@@ -1,1878 +0,0 @@
-// Copyright (c) 1994-2006 Sun Microsystems Inc.
-// All Rights Reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-//
-// - Redistributions of source code must retain the above copyright notice,
-// this list of conditions and the following disclaimer.
-//
-// - Redistribution in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the
-// distribution.
-//
-// - Neither the name of Sun Microsystems or the names of contributors may
-// be used to endorse or promote products derived from this software without
-// specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
-// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
-// COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
-// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
-// HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
-// STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
-// OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// The original source code covered by the above license above has been
-// modified significantly by Google Inc.
-// Copyright 2010 the V8 project authors. All rights reserved.
-
-#include "v8.h"
-
-#include "arm/assembler-thumb2-inl.h"
-#include "serialize.h"
-
-namespace v8 {
-namespace internal {
-
-// Safe default is no features.
-unsigned CpuFeatures::supported_ = 0;
-unsigned CpuFeatures::enabled_ = 0;
-unsigned CpuFeatures::found_by_runtime_probing_ = 0;
-
-void CpuFeatures::Probe() {
-  // If the compiler is allowed to use vfp then we can use vfp too in our
-  // code generation.
-#if !defined(__arm__)
-  // For the simulator=arm build, use VFP when FLAG_enable_vfp3 is enabled.
-  if (FLAG_enable_vfp3) {
-      supported_ |= 1u << VFP3;
-  }
-  // For the simulator=arm build, use ARMv7 when FLAG_enable_armv7 is enabled
-  if (FLAG_enable_armv7) {
-      supported_ |= 1u << ARMv7;
-  }
-#else
-  if (Serializer::enabled()) {
-    supported_ |= OS::CpuFeaturesImpliedByPlatform();
-    return;  // No features if we might serialize.
-  }
-
-  if (OS::ArmCpuHasFeature(VFP3)) {
-    // This implementation also sets the VFP flags if
-    // runtime detection of VFP returns true.
-    supported_ |= 1u << VFP3;
-    found_by_runtime_probing_ |= 1u << VFP3;
-  }
-
-  if (OS::ArmCpuHasFeature(ARMv7)) {
-    supported_ |= 1u << ARMv7;
-    found_by_runtime_probing_ |= 1u << ARMv7;
-  }
-#endif
-}
-
-
-// -----------------------------------------------------------------------------
-// Implementation of Register and CRegister
-
-Register no_reg = { -1 };
-
-Register r0  = {  0 };
-Register r1  = {  1 };
-Register r2  = {  2 };
-Register r3  = {  3 };
-Register r4  = {  4 };
-Register r5  = {  5 };
-Register r6  = {  6 };
-Register r7  = {  7 };
-Register r8  = {  8 };  // Used as context register.
-Register r9  = {  9 };
-Register r10 = { 10 };  // Used as roots register.
-Register fp  = { 11 };
-Register ip  = { 12 };
-Register sp  = { 13 };
-Register lr  = { 14 };
-Register pc  = { 15 };
-
-
-CRegister no_creg = { -1 };
-
-CRegister cr0  = {  0 };
-CRegister cr1  = {  1 };
-CRegister cr2  = {  2 };
-CRegister cr3  = {  3 };
-CRegister cr4  = {  4 };
-CRegister cr5  = {  5 };
-CRegister cr6  = {  6 };
-CRegister cr7  = {  7 };
-CRegister cr8  = {  8 };
-CRegister cr9  = {  9 };
-CRegister cr10 = { 10 };
-CRegister cr11 = { 11 };
-CRegister cr12 = { 12 };
-CRegister cr13 = { 13 };
-CRegister cr14 = { 14 };
-CRegister cr15 = { 15 };
-
-// Support for the VFP registers s0 to s31 (d0 to d15).
-// Note that "sN:sM" is the same as "dN/2".
-SwVfpRegister s0  = {  0 };
-SwVfpRegister s1  = {  1 };
-SwVfpRegister s2  = {  2 };
-SwVfpRegister s3  = {  3 };
-SwVfpRegister s4  = {  4 };
-SwVfpRegister s5  = {  5 };
-SwVfpRegister s6  = {  6 };
-SwVfpRegister s7  = {  7 };
-SwVfpRegister s8  = {  8 };
-SwVfpRegister s9  = {  9 };
-SwVfpRegister s10 = { 10 };
-SwVfpRegister s11 = { 11 };
-SwVfpRegister s12 = { 12 };
-SwVfpRegister s13 = { 13 };
-SwVfpRegister s14 = { 14 };
-SwVfpRegister s15 = { 15 };
-SwVfpRegister s16 = { 16 };
-SwVfpRegister s17 = { 17 };
-SwVfpRegister s18 = { 18 };
-SwVfpRegister s19 = { 19 };
-SwVfpRegister s20 = { 20 };
-SwVfpRegister s21 = { 21 };
-SwVfpRegister s22 = { 22 };
-SwVfpRegister s23 = { 23 };
-SwVfpRegister s24 = { 24 };
-SwVfpRegister s25 = { 25 };
-SwVfpRegister s26 = { 26 };
-SwVfpRegister s27 = { 27 };
-SwVfpRegister s28 = { 28 };
-SwVfpRegister s29 = { 29 };
-SwVfpRegister s30 = { 30 };
-SwVfpRegister s31 = { 31 };
-
-DwVfpRegister d0  = {  0 };
-DwVfpRegister d1  = {  1 };
-DwVfpRegister d2  = {  2 };
-DwVfpRegister d3  = {  3 };
-DwVfpRegister d4  = {  4 };
-DwVfpRegister d5  = {  5 };
-DwVfpRegister d6  = {  6 };
-DwVfpRegister d7  = {  7 };
-DwVfpRegister d8  = {  8 };
-DwVfpRegister d9  = {  9 };
-DwVfpRegister d10 = { 10 };
-DwVfpRegister d11 = { 11 };
-DwVfpRegister d12 = { 12 };
-DwVfpRegister d13 = { 13 };
-DwVfpRegister d14 = { 14 };
-DwVfpRegister d15 = { 15 };
-
-// -----------------------------------------------------------------------------
-// Implementation of RelocInfo
-
-const int RelocInfo::kApplyMask = 0;
-
-
-void RelocInfo::PatchCode(byte* instructions, int instruction_count) {
-  // Patch the code at the current address with the supplied instructions.
-  Instr* pc = reinterpret_cast<Instr*>(pc_);
-  Instr* instr = reinterpret_cast<Instr*>(instructions);
-  for (int i = 0; i < instruction_count; i++) {
-    *(pc + i) = *(instr + i);
-  }
-
-  // Indicate that code has changed.
-  CPU::FlushICache(pc_, instruction_count * Assembler::kInstrSize);
-}
-
-
-// Patch the code at the current PC with a call to the target address.
-// Additional guard instructions can be added if required.
-void RelocInfo::PatchCodeWithCall(Address target, int guard_bytes) {
-  // Patch the code at the current address with a call to the target.
-  UNIMPLEMENTED();
-}
-
-
-// -----------------------------------------------------------------------------
-// Implementation of Operand and MemOperand
-// See assembler-thumb2-inl.h for inlined constructors
-
-Operand::Operand(Handle<Object> handle) {
-  rm_ = no_reg;
-  // Verify all Objects referred by code are NOT in new space.
-  Object* obj = *handle;
-  ASSERT(!Heap::InNewSpace(obj));
-  if (obj->IsHeapObject()) {
-    imm32_ = reinterpret_cast<intptr_t>(handle.location());
-    rmode_ = RelocInfo::EMBEDDED_OBJECT;
-  } else {
-    // no relocation needed
-    imm32_ =  reinterpret_cast<intptr_t>(obj);
-    rmode_ = RelocInfo::NONE;
-  }
-}
-
-
-Operand::Operand(Register rm, ShiftOp shift_op, int shift_imm) {
-  ASSERT(is_uint5(shift_imm));
-  ASSERT(shift_op != ROR || shift_imm != 0);  // use RRX if you mean it
-  rm_ = rm;
-  rs_ = no_reg;
-  shift_op_ = shift_op;
-  shift_imm_ = shift_imm & 31;
-  if (shift_op == RRX) {
-    // encoded as ROR with shift_imm == 0
-    ASSERT(shift_imm == 0);
-    shift_op_ = ROR;
-    shift_imm_ = 0;
-  }
-}
-
-
-Operand::Operand(Register rm, ShiftOp shift_op, Register rs) {
-  ASSERT(shift_op != RRX);
-  rm_ = rm;
-  rs_ = no_reg;
-  shift_op_ = shift_op;
-  rs_ = rs;
-}
-
-
-MemOperand::MemOperand(Register rn, int32_t offset, AddrMode am) {
-  rn_ = rn;
-  rm_ = no_reg;
-  offset_ = offset;
-  am_ = am;
-}
-
-MemOperand::MemOperand(Register rn, Register rm, AddrMode am) {
-  rn_ = rn;
-  rm_ = rm;
-  shift_op_ = LSL;
-  shift_imm_ = 0;
-  am_ = am;
-}
-
-
-MemOperand::MemOperand(Register rn, Register rm,
-                       ShiftOp shift_op, int shift_imm, AddrMode am) {
-  ASSERT(is_uint5(shift_imm));
-  rn_ = rn;
-  rm_ = rm;
-  shift_op_ = shift_op;
-  shift_imm_ = shift_imm & 31;
-  am_ = am;
-}
-
-
-// -----------------------------------------------------------------------------
-// Implementation of Assembler.
-
-// Instruction encoding bits.
-enum {
-  H   = 1 << 5,   // halfword (or byte)
-  S6  = 1 << 6,   // signed (or unsigned)
-  L   = 1 << 20,  // load (or store)
-  S   = 1 << 20,  // set condition code (or leave unchanged)
-  W   = 1 << 21,  // writeback base register (or leave unchanged)
-  A   = 1 << 21,  // accumulate in multiply instruction (or not)
-  B   = 1 << 22,  // unsigned byte (or word)
-  N   = 1 << 22,  // long (or short)
-  U   = 1 << 23,  // positive (or negative) offset/index
-  P   = 1 << 24,  // offset/pre-indexed addressing (or post-indexed addressing)
-  I   = 1 << 25,  // immediate shifter operand (or not)
-
-  B4  = 1 << 4,
-  B5  = 1 << 5,
-  B6  = 1 << 6,
-  B7  = 1 << 7,
-  B8  = 1 << 8,
-  B9  = 1 << 9,
-  B12 = 1 << 12,
-  B16 = 1 << 16,
-  B18 = 1 << 18,
-  B19 = 1 << 19,
-  B20 = 1 << 20,
-  B21 = 1 << 21,
-  B22 = 1 << 22,
-  B23 = 1 << 23,
-  B24 = 1 << 24,
-  B25 = 1 << 25,
-  B26 = 1 << 26,
-  B27 = 1 << 27,
-
-  // Instruction bit masks.
-  RdMask     = 15 << 12,  // in str instruction
-  CondMask   = 15 << 28,
-  CoprocessorMask = 15 << 8,
-  OpCodeMask = 15 << 21,  // in data-processing instructions
-  Imm24Mask  = (1 << 24) - 1,
-  Off12Mask  = (1 << 12) - 1,
-  // Reserved condition.
-  nv = 15 << 28
-};
-
-
-// add(sp, sp, 4) instruction (aka Pop())
-static const Instr kPopInstruction =
-    al | 4 * B21 | 4 | LeaveCC | I | sp.code() * B16 | sp.code() * B12;
-// str(r, MemOperand(sp, 4, NegPreIndex), al) instruction (aka push(r))
-// register r is not encoded.
-static const Instr kPushRegPattern =
-    al | B26 | 4 | NegPreIndex | sp.code() * B16;
-// ldr(r, MemOperand(sp, 4, PostIndex), al) instruction (aka pop(r))
-// register r is not encoded.
-static const Instr kPopRegPattern =
-    al | B26 | L | 4 | PostIndex | sp.code() * B16;
-// mov lr, pc
-const Instr kMovLrPc = al | 13*B21 | pc.code() | lr.code() * B12;
-// ldr pc, [pc, #XXX]
-const Instr kLdrPCPattern = al | B26 | L | pc.code() * B16;
-
-// Spare buffer.
-static const int kMinimalBufferSize = 4*KB;
-static byte* spare_buffer_ = NULL;
-
-Assembler::Assembler(void* buffer, int buffer_size) {
-  if (buffer == NULL) {
-    // Do our own buffer management.
-    if (buffer_size <= kMinimalBufferSize) {
-      buffer_size = kMinimalBufferSize;
-
-      if (spare_buffer_ != NULL) {
-        buffer = spare_buffer_;
-        spare_buffer_ = NULL;
-      }
-    }
-    if (buffer == NULL) {
-      buffer_ = NewArray<byte>(buffer_size);
-    } else {
-      buffer_ = static_cast<byte*>(buffer);
-    }
-    buffer_size_ = buffer_size;
-    own_buffer_ = true;
-
-  } else {
-    // Use externally provided buffer instead.
-    ASSERT(buffer_size > 0);
-    buffer_ = static_cast<byte*>(buffer);
-    buffer_size_ = buffer_size;
-    own_buffer_ = false;
-  }
-
-  // Setup buffer pointers.
-  ASSERT(buffer_ != NULL);
-  pc_ = buffer_;
-  reloc_info_writer.Reposition(buffer_ + buffer_size, pc_);
-  num_prinfo_ = 0;
-  next_buffer_check_ = 0;
-  no_const_pool_before_ = 0;
-  last_const_pool_end_ = 0;
-  last_bound_pos_ = 0;
-  current_statement_position_ = RelocInfo::kNoPosition;
-  current_position_ = RelocInfo::kNoPosition;
-  written_statement_position_ = current_statement_position_;
-  written_position_ = current_position_;
-}
-
-
-Assembler::~Assembler() {
-  if (own_buffer_) {
-    if (spare_buffer_ == NULL && buffer_size_ == kMinimalBufferSize) {
-      spare_buffer_ = buffer_;
-    } else {
-      DeleteArray(buffer_);
-    }
-  }
-}
-
-
-void Assembler::GetCode(CodeDesc* desc) {
-  // Emit constant pool if necessary.
-  CheckConstPool(true, false);
-  ASSERT(num_prinfo_ == 0);
-
-  // Setup code descriptor.
-  desc->buffer = buffer_;
-  desc->buffer_size = buffer_size_;
-  desc->instr_size = pc_offset();
-  desc->reloc_size = (buffer_ + buffer_size_) - reloc_info_writer.pos();
-}
-
-
-void Assembler::Align(int m) {
-  ASSERT(m >= 4 && IsPowerOf2(m));
-  while ((pc_offset() & (m - 1)) != 0) {
-    nop();
-  }
-}
-
-
-// Labels refer to positions in the (to be) generated code.
-// There are bound, linked, and unused labels.
-//
-// Bound labels refer to known positions in the already
-// generated code. pos() is the position the label refers to.
-//
-// Linked labels refer to unknown positions in the code
-// to be generated; pos() is the position of the last
-// instruction using the label.
-
-
-// The link chain is terminated by a negative code position (must be aligned)
-const int kEndOfChain = -4;
-
-
-int Assembler::target_at(int pos)  {
-  Instr instr = instr_at(pos);
-  if ((instr & ~Imm24Mask) == 0) {
-    // Emitted label constant, not part of a branch.
-    return instr - (Code::kHeaderSize - kHeapObjectTag);
-  }
-  ASSERT((instr & 7*B25) == 5*B25);  // b, bl, or blx imm24
-  int imm26 = ((instr & Imm24Mask) << 8) >> 6;
-  if ((instr & CondMask) == nv && (instr & B24) != 0)
-    // blx uses bit 24 to encode bit 2 of imm26
-    imm26 += 2;
-
-  return pos + kPcLoadDelta + imm26;
-}
-
-
-void Assembler::target_at_put(int pos, int target_pos) {
-  Instr instr = instr_at(pos);
-  if ((instr & ~Imm24Mask) == 0) {
-    ASSERT(target_pos == kEndOfChain || target_pos >= 0);
-    // Emitted label constant, not part of a branch.
-    // Make label relative to Code* of generated Code object.
-    instr_at_put(pos, target_pos + (Code::kHeaderSize - kHeapObjectTag));
-    return;
-  }
-  int imm26 = target_pos - (pos + kPcLoadDelta);
-  ASSERT((instr & 7*B25) == 5*B25);  // b, bl, or blx imm24
-  if ((instr & CondMask) == nv) {
-    // blx uses bit 24 to encode bit 2 of imm26
-    ASSERT((imm26 & 1) == 0);
-    instr = (instr & ~(B24 | Imm24Mask)) | ((imm26 & 2) >> 1)*B24;
-  } else {
-    ASSERT((imm26 & 3) == 0);
-    instr &= ~Imm24Mask;
-  }
-  int imm24 = imm26 >> 2;
-  ASSERT(is_int24(imm24));
-  instr_at_put(pos, instr | (imm24 & Imm24Mask));
-}
-
-
-void Assembler::print(Label* L) {
-  if (L->is_unused()) {
-    PrintF("unused label\n");
-  } else if (L->is_bound()) {
-    PrintF("bound label to %d\n", L->pos());
-  } else if (L->is_linked()) {
-    Label l = *L;
-    PrintF("unbound label");
-    while (l.is_linked()) {
-      PrintF("@ %d ", l.pos());
-      Instr instr = instr_at(l.pos());
-      if ((instr & ~Imm24Mask) == 0) {
-        PrintF("value\n");
-      } else {
-        ASSERT((instr & 7*B25) == 5*B25);  // b, bl, or blx
-        int cond = instr & CondMask;
-        const char* b;
-        const char* c;
-        if (cond == nv) {
-          b = "blx";
-          c = "";
-        } else {
-          if ((instr & B24) != 0)
-            b = "bl";
-          else
-            b = "b";
-
-          switch (cond) {
-            case eq: c = "eq"; break;
-            case ne: c = "ne"; break;
-            case hs: c = "hs"; break;
-            case lo: c = "lo"; break;
-            case mi: c = "mi"; break;
-            case pl: c = "pl"; break;
-            case vs: c = "vs"; break;
-            case vc: c = "vc"; break;
-            case hi: c = "hi"; break;
-            case ls: c = "ls"; break;
-            case ge: c = "ge"; break;
-            case lt: c = "lt"; break;
-            case gt: c = "gt"; break;
-            case le: c = "le"; break;
-            case al: c = ""; break;
-            default:
-              c = "";
-              UNREACHABLE();
-          }
-        }
-        PrintF("%s%s\n", b, c);
-      }
-      next(&l);
-    }
-  } else {
-    PrintF("label in inconsistent state (pos = %d)\n", L->pos_);
-  }
-}
-
-
-void Assembler::bind_to(Label* L, int pos) {
-  ASSERT(0 <= pos && pos <= pc_offset());  // must have a valid binding position
-  while (L->is_linked()) {
-    int fixup_pos = L->pos();
-    next(L);  // call next before overwriting link with target at fixup_pos
-    target_at_put(fixup_pos, pos);
-  }
-  L->bind_to(pos);
-
-  // Keep track of the last bound label so we don't eliminate any instructions
-  // before a bound label.
-  if (pos > last_bound_pos_)
-    last_bound_pos_ = pos;
-}
-
-
-void Assembler::link_to(Label* L, Label* appendix) {
-  if (appendix->is_linked()) {
-    if (L->is_linked()) {
-      // Append appendix to L's list.
-      int fixup_pos;
-      int link = L->pos();
-      do {
-        fixup_pos = link;
-        link = target_at(fixup_pos);
-      } while (link > 0);
-      ASSERT(link == kEndOfChain);
-      target_at_put(fixup_pos, appendix->pos());
-    } else {
-      // L is empty, simply use appendix.
-      *L = *appendix;
-    }
-  }
-  appendix->Unuse();  // appendix should not be used anymore
-}
-
-
-void Assembler::bind(Label* L) {
-  ASSERT(!L->is_bound());  // label can only be bound once
-  bind_to(L, pc_offset());
-}
-
-
-void Assembler::next(Label* L) {
-  ASSERT(L->is_linked());
-  int link = target_at(L->pos());
-  if (link > 0) {
-    L->link_to(link);
-  } else {
-    ASSERT(link == kEndOfChain);
-    L->Unuse();
-  }
-}
-
-
-// Low-level code emission routines depending on the addressing mode.
-static bool fits_shifter(uint32_t imm32,
-                         uint32_t* rotate_imm,
-                         uint32_t* immed_8,
-                         Instr* instr) {
-  // imm32 must be unsigned.
-  for (int rot = 0; rot < 16; rot++) {
-    uint32_t imm8 = (imm32 << 2*rot) | (imm32 >> (32 - 2*rot));
-    if ((imm8 <= 0xff)) {
-      *rotate_imm = rot;
-      *immed_8 = imm8;
-      return true;
-    }
-  }
-  // If the opcode is mov or mvn and if ~imm32 fits, change the opcode.
-  if (instr != NULL && (*instr & 0xd*B21) == 0xd*B21) {
-    if (fits_shifter(~imm32, rotate_imm, immed_8, NULL)) {
-      *instr ^= 0x2*B21;
-      return true;
-    }
-  }
-  return false;
-}
-
-
-// We have to use the temporary register for things that can be relocated even
-// if they can be encoded in the ARM's 12 bits of immediate-offset instruction
-// space.  There is no guarantee that the relocated location can be similarly
-// encoded.
-static bool MustUseIp(RelocInfo::Mode rmode) {
-  if (rmode == RelocInfo::EXTERNAL_REFERENCE) {
-#ifdef DEBUG
-    if (!Serializer::enabled()) {
-      Serializer::TooLateToEnableNow();
-    }
-#endif
-    return Serializer::enabled();
-  } else if (rmode == RelocInfo::NONE) {
-    return false;
-  }
-  return true;
-}
-
-
-void Assembler::addrmod1(Instr instr,
-                         Register rn,
-                         Register rd,
-                         const Operand& x) {
-  CheckBuffer();
-  ASSERT((instr & ~(CondMask | OpCodeMask | S)) == 0);
-  if (!x.rm_.is_valid()) {
-    // Immediate.
-    uint32_t rotate_imm;
-    uint32_t immed_8;
-    if (MustUseIp(x.rmode_) ||
-        !fits_shifter(x.imm32_, &rotate_imm, &immed_8, &instr)) {
-      // The immediate operand cannot be encoded as a shifter operand, so load
-      // it first to register ip and change the original instruction to use ip.
-      // However, if the original instruction is a 'mov rd, x' (not setting the
-      // condition code), then replace it with a 'ldr rd, [pc]'.
-      RecordRelocInfo(x.rmode_, x.imm32_);
-      CHECK(!rn.is(ip));  // rn should never be ip, or will be trashed
-      Condition cond = static_cast<Condition>(instr & CondMask);
-      if ((instr & ~CondMask) == 13*B21) {  // mov, S not set
-        ldr(rd, MemOperand(pc, 0), cond);
-      } else {
-        ldr(ip, MemOperand(pc, 0), cond);
-        addrmod1(instr, rn, rd, Operand(ip));
-      }
-      return;
-    }
-    instr |= I | rotate_imm*B8 | immed_8;
-  } else if (!x.rs_.is_valid()) {
-    // Immediate shift.
-    instr |= x.shift_imm_*B7 | x.shift_op_ | x.rm_.code();
-  } else {
-    // Register shift.
-    ASSERT(!rn.is(pc) && !rd.is(pc) && !x.rm_.is(pc) && !x.rs_.is(pc));
-    instr |= x.rs_.code()*B8 | x.shift_op_ | B4 | x.rm_.code();
-  }
-  emit(instr | rn.code()*B16 | rd.code()*B12);
-  if (rn.is(pc) || x.rm_.is(pc))
-    // Block constant pool emission for one instruction after reading pc.
-    BlockConstPoolBefore(pc_offset() + kInstrSize);
-}
-
-
-void Assembler::addrmod2(Instr instr, Register rd, const MemOperand& x) {
-  ASSERT((instr & ~(CondMask | B | L)) == B26);
-  int am = x.am_;
-  if (!x.rm_.is_valid()) {
-    // Immediate offset.
-    int offset_12 = x.offset_;
-    if (offset_12 < 0) {
-      offset_12 = -offset_12;
-      am ^= U;
-    }
-    if (!is_uint12(offset_12)) {
-      // Immediate offset cannot be encoded, load it first to register ip
-      // rn (and rd in a load) should never be ip, or will be trashed.
-      ASSERT(!x.rn_.is(ip) && ((instr & L) == L || !rd.is(ip)));
-      mov(ip, Operand(x.offset_), LeaveCC,
-          static_cast<Condition>(instr & CondMask));
-      addrmod2(instr, rd, MemOperand(x.rn_, ip, x.am_));
-      return;
-    }
-    ASSERT(offset_12 >= 0);  // no masking needed
-    instr |= offset_12;
-  } else {
-    // Register offset (shift_imm_ and shift_op_ are 0) or scaled
-    // register offset the constructors make sure than both shift_imm_
-    // and shift_op_ are initialized.
-    ASSERT(!x.rm_.is(pc));
-    instr |= B25 | x.shift_imm_*B7 | x.shift_op_ | x.rm_.code();
-  }
-  ASSERT((am & (P|W)) == P || !x.rn_.is(pc));  // no pc base with writeback
-  emit(instr | am | x.rn_.code()*B16 | rd.code()*B12);
-}
-
-
-void Assembler::addrmod3(Instr instr, Register rd, const MemOperand& x) {
-  ASSERT((instr & ~(CondMask | L | S6 | H)) == (B4 | B7));
-  ASSERT(x.rn_.is_valid());
-  int am = x.am_;
-  if (!x.rm_.is_valid()) {
-    // Immediate offset.
-    int offset_8 = x.offset_;
-    if (offset_8 < 0) {
-      offset_8 = -offset_8;
-      am ^= U;
-    }
-    if (!is_uint8(offset_8)) {
-      // Immediate offset cannot be encoded, load it first to register ip
-      // rn (and rd in a load) should never be ip, or will be trashed.
-      ASSERT(!x.rn_.is(ip) && ((instr & L) == L || !rd.is(ip)));
-      mov(ip, Operand(x.offset_), LeaveCC,
-          static_cast<Condition>(instr & CondMask));
-      addrmod3(instr, rd, MemOperand(x.rn_, ip, x.am_));
-      return;
-    }
-    ASSERT(offset_8 >= 0);  // no masking needed
-    instr |= B | (offset_8 >> 4)*B8 | (offset_8 & 0xf);
-  } else if (x.shift_imm_ != 0) {
-    // Scaled register offset not supported, load index first
-    // rn (and rd in a load) should never be ip, or will be trashed.
-    ASSERT(!x.rn_.is(ip) && ((instr & L) == L || !rd.is(ip)));
-    mov(ip, Operand(x.rm_, x.shift_op_, x.shift_imm_), LeaveCC,
-        static_cast<Condition>(instr & CondMask));
-    addrmod3(instr, rd, MemOperand(x.rn_, ip, x.am_));
-    return;
-  } else {
-    // Register offset.
-    ASSERT((am & (P|W)) == P || !x.rm_.is(pc));  // no pc index with writeback
-    instr |= x.rm_.code();
-  }
-  ASSERT((am & (P|W)) == P || !x.rn_.is(pc));  // no pc base with writeback
-  emit(instr | am | x.rn_.code()*B16 | rd.code()*B12);
-}
-
-
-void Assembler::addrmod4(Instr instr, Register rn, RegList rl) {
-  ASSERT((instr & ~(CondMask | P | U | W | L)) == B27);
-  ASSERT(rl != 0);
-  ASSERT(!rn.is(pc));
-  emit(instr | rn.code()*B16 | rl);
-}
-
-
-void Assembler::addrmod5(Instr instr, CRegister crd, const MemOperand& x) {
-  // Unindexed addressing is not encoded by this function.
-  ASSERT_EQ((B27 | B26),
-            (instr & ~(CondMask | CoprocessorMask | P | U | N | W | L)));
-  ASSERT(x.rn_.is_valid() && !x.rm_.is_valid());
-  int am = x.am_;
-  int offset_8 = x.offset_;
-  ASSERT((offset_8 & 3) == 0);  // offset must be an aligned word offset
-  offset_8 >>= 2;
-  if (offset_8 < 0) {
-    offset_8 = -offset_8;
-    am ^= U;
-  }
-  ASSERT(is_uint8(offset_8));  // unsigned word offset must fit in a byte
-  ASSERT((am & (P|W)) == P || !x.rn_.is(pc));  // no pc base with writeback
-
-  // Post-indexed addressing requires W == 1; different than in addrmod2/3.
-  if ((am & P) == 0)
-    am |= W;
-
-  ASSERT(offset_8 >= 0);  // no masking needed
-  emit(instr | am | x.rn_.code()*B16 | crd.code()*B12 | offset_8);
-}
-
-
-int Assembler::branch_offset(Label* L, bool jump_elimination_allowed) {
-  int target_pos;
-  if (L->is_bound()) {
-    target_pos = L->pos();
-  } else {
-    if (L->is_linked()) {
-      target_pos = L->pos();  // L's link
-    } else {
-      target_pos = kEndOfChain;
-    }
-    L->link_to(pc_offset());
-  }
-
-  // Block the emission of the constant pool, since the branch instruction must
-  // be emitted at the pc offset recorded by the label.
-  BlockConstPoolBefore(pc_offset() + kInstrSize);
-  return target_pos - (pc_offset() + kPcLoadDelta);
-}
-
-
-void Assembler::label_at_put(Label* L, int at_offset) {
-  int target_pos;
-  if (L->is_bound()) {
-    target_pos = L->pos();
-  } else {
-    if (L->is_linked()) {
-      target_pos = L->pos();  // L's link
-    } else {
-      target_pos = kEndOfChain;
-    }
-    L->link_to(at_offset);
-    instr_at_put(at_offset, target_pos + (Code::kHeaderSize - kHeapObjectTag));
-  }
-}
-
-
-// Branch instructions.
-void Assembler::b(int branch_offset, Condition cond) {
-  ASSERT((branch_offset & 3) == 0);
-  int imm24 = branch_offset >> 2;
-  ASSERT(is_int24(imm24));
-  emit(cond | B27 | B25 | (imm24 & Imm24Mask));
-
-  if (cond == al)
-    // Dead code is a good location to emit the constant pool.
-    CheckConstPool(false, false);
-}
-
-
-void Assembler::bl(int branch_offset, Condition cond) {
-  ASSERT((branch_offset & 3) == 0);
-  int imm24 = branch_offset >> 2;
-  ASSERT(is_int24(imm24));
-  emit(cond | B27 | B25 | B24 | (imm24 & Imm24Mask));
-}
-
-
-void Assembler::blx(int branch_offset) {  // v5 and above
-  WriteRecordedPositions();
-  ASSERT((branch_offset & 1) == 0);
-  int h = ((branch_offset & 2) >> 1)*B24;
-  int imm24 = branch_offset >> 2;
-  ASSERT(is_int24(imm24));
-  emit(15 << 28 | B27 | B25 | h | (imm24 & Imm24Mask));
-}
-
-
-void Assembler::blx(Register target, Condition cond) {  // v5 and above
-  WriteRecordedPositions();
-  ASSERT(!target.is(pc));
-  emit(cond | B24 | B21 | 15*B16 | 15*B12 | 15*B8 | 3*B4 | target.code());
-}
-
-
-void Assembler::bx(Register target, Condition cond) {  // v5 and above, plus v4t
-  WriteRecordedPositions();
-  ASSERT(!target.is(pc));  // use of pc is actually allowed, but discouraged
-  emit(cond | B24 | B21 | 15*B16 | 15*B12 | 15*B8 | B4 | target.code());
-}
-
-
-// Data-processing instructions.
-
-// UBFX <Rd>,<Rn>,#<lsb>,#<width - 1>
-// Instruction details available in ARM DDI 0406A, A8-464.
-// cond(31-28) | 01111(27-23)| 1(22) | 1(21) | widthm1(20-16) |
-//  Rd(15-12) | lsb(11-7) | 101(6-4) | Rn(3-0)
-void Assembler::ubfx(Register dst, Register src1, const Operand& src2,
-                     const Operand& src3, Condition cond) {
-  ASSERT(!src2.rm_.is_valid() && !src3.rm_.is_valid());
-  ASSERT(static_cast<uint32_t>(src2.imm32_) <= 0x1f);
-  ASSERT(static_cast<uint32_t>(src3.imm32_) <= 0x1f);
-  emit(cond | 0x3F*B21 | src3.imm32_*B16 |
-       dst.code()*B12 | src2.imm32_*B7 | 0x5*B4 | src1.code());
-}
-
-
-void Assembler::and_(Register dst, Register src1, const Operand& src2,
-                     SBit s, Condition cond) {
-  addrmod1(cond | 0*B21 | s, src1, dst, src2);
-}
-
-
-void Assembler::eor(Register dst, Register src1, const Operand& src2,
-                    SBit s, Condition cond) {
-  addrmod1(cond | 1*B21 | s, src1, dst, src2);
-}
-
-
-void Assembler::sub(Register dst, Register src1, const Operand& src2,
-                    SBit s, Condition cond) {
-  addrmod1(cond | 2*B21 | s, src1, dst, src2);
-}
-
-
-void Assembler::rsb(Register dst, Register src1, const Operand& src2,
-                    SBit s, Condition cond) {
-  addrmod1(cond | 3*B21 | s, src1, dst, src2);
-}
-
-
-void Assembler::add(Register dst, Register src1, const Operand& src2,
-                    SBit s, Condition cond) {
-  addrmod1(cond | 4*B21 | s, src1, dst, src2);
-
-  // Eliminate pattern: push(r), pop()
-  //   str(src, MemOperand(sp, 4, NegPreIndex), al);
-  //   add(sp, sp, Operand(kPointerSize));
-  // Both instructions can be eliminated.
-  int pattern_size = 2 * kInstrSize;
-  if (FLAG_push_pop_elimination &&
-      last_bound_pos_ <= (pc_offset() - pattern_size) &&
-      reloc_info_writer.last_pc() <= (pc_ - pattern_size) &&
-      // Pattern.
-      instr_at(pc_ - 1 * kInstrSize) == kPopInstruction &&
-      (instr_at(pc_ - 2 * kInstrSize) & ~RdMask) == kPushRegPattern) {
-    pc_ -= 2 * kInstrSize;
-    if (FLAG_print_push_pop_elimination) {
-      PrintF("%x push(reg)/pop() eliminated\n", pc_offset());
-    }
-  }
-}
-
-
-void Assembler::adc(Register dst, Register src1, const Operand& src2,
-                    SBit s, Condition cond) {
-  addrmod1(cond | 5*B21 | s, src1, dst, src2);
-}
-
-
-void Assembler::sbc(Register dst, Register src1, const Operand& src2,
-                    SBit s, Condition cond) {
-  addrmod1(cond | 6*B21 | s, src1, dst, src2);
-}
-
-
-void Assembler::rsc(Register dst, Register src1, const Operand& src2,
-                    SBit s, Condition cond) {
-  addrmod1(cond | 7*B21 | s, src1, dst, src2);
-}
-
-
-void Assembler::tst(Register src1, const Operand& src2, Condition cond) {
-  addrmod1(cond | 8*B21 | S, src1, r0, src2);
-}
-
-
-void Assembler::teq(Register src1, const Operand& src2, Condition cond) {
-  addrmod1(cond | 9*B21 | S, src1, r0, src2);
-}
-
-
-void Assembler::cmp(Register src1, const Operand& src2, Condition cond) {
-  addrmod1(cond | 10*B21 | S, src1, r0, src2);
-}
-
-
-void Assembler::cmn(Register src1, const Operand& src2, Condition cond) {
-  addrmod1(cond | 11*B21 | S, src1, r0, src2);
-}
-
-
-void Assembler::orr(Register dst, Register src1, const Operand& src2,
-                    SBit s, Condition cond) {
-  addrmod1(cond | 12*B21 | s, src1, dst, src2);
-}
-
-
-void Assembler::mov(Register dst, const Operand& src, SBit s, Condition cond) {
-  if (dst.is(pc)) {
-    WriteRecordedPositions();
-  }
-  addrmod1(cond | 13*B21 | s, r0, dst, src);
-}
-
-
-void Assembler::bic(Register dst, Register src1, const Operand& src2,
-                    SBit s, Condition cond) {
-  addrmod1(cond | 14*B21 | s, src1, dst, src2);
-}
-
-
-void Assembler::mvn(Register dst, const Operand& src, SBit s, Condition cond) {
-  addrmod1(cond | 15*B21 | s, r0, dst, src);
-}
-
-
-// Multiply instructions.
-void Assembler::mla(Register dst, Register src1, Register src2, Register srcA,
-                    SBit s, Condition cond) {
-  ASSERT(!dst.is(pc) && !src1.is(pc) && !src2.is(pc) && !srcA.is(pc));
-  emit(cond | A | s | dst.code()*B16 | srcA.code()*B12 |
-       src2.code()*B8 | B7 | B4 | src1.code());
-}
-
-
-void Assembler::mul(Register dst, Register src1, Register src2,
-                    SBit s, Condition cond) {
-  ASSERT(!dst.is(pc) && !src1.is(pc) && !src2.is(pc));
-  // dst goes in bits 16-19 for this instruction!
-  emit(cond | s | dst.code()*B16 | src2.code()*B8 | B7 | B4 | src1.code());
-}
-
-
-void Assembler::smlal(Register dstL,
-                      Register dstH,
-                      Register src1,
-                      Register src2,
-                      SBit s,
-                      Condition cond) {
-  ASSERT(!dstL.is(pc) && !dstH.is(pc) && !src1.is(pc) && !src2.is(pc));
-  ASSERT(!dstL.is(dstH));
-  emit(cond | B23 | B22 | A | s | dstH.code()*B16 | dstL.code()*B12 |
-       src2.code()*B8 | B7 | B4 | src1.code());
-}
-
-
-void Assembler::smull(Register dstL,
-                      Register dstH,
-                      Register src1,
-                      Register src2,
-                      SBit s,
-                      Condition cond) {
-  ASSERT(!dstL.is(pc) && !dstH.is(pc) && !src1.is(pc) && !src2.is(pc));
-  ASSERT(!dstL.is(dstH));
-  emit(cond | B23 | B22 | s | dstH.code()*B16 | dstL.code()*B12 |
-       src2.code()*B8 | B7 | B4 | src1.code());
-}
-
-
-void Assembler::umlal(Register dstL,
-                      Register dstH,
-                      Register src1,
-                      Register src2,
-                      SBit s,
-                      Condition cond) {
-  ASSERT(!dstL.is(pc) && !dstH.is(pc) && !src1.is(pc) && !src2.is(pc));
-  ASSERT(!dstL.is(dstH));
-  emit(cond | B23 | A | s | dstH.code()*B16 | dstL.code()*B12 |
-       src2.code()*B8 | B7 | B4 | src1.code());
-}
-
-
-void Assembler::umull(Register dstL,
-                      Register dstH,
-                      Register src1,
-                      Register src2,
-                      SBit s,
-                      Condition cond) {
-  ASSERT(!dstL.is(pc) && !dstH.is(pc) && !src1.is(pc) && !src2.is(pc));
-  ASSERT(!dstL.is(dstH));
-  emit(cond | B23 | s | dstH.code()*B16 | dstL.code()*B12 |
-       src2.code()*B8 | B7 | B4 | src1.code());
-}
-
-
-// Miscellaneous arithmetic instructions.
-void Assembler::clz(Register dst, Register src, Condition cond) {
-  // v5 and above.
-  ASSERT(!dst.is(pc) && !src.is(pc));
-  emit(cond | B24 | B22 | B21 | 15*B16 | dst.code()*B12 |
-       15*B8 | B4 | src.code());
-}
-
-
-// Status register access instructions.
-void Assembler::mrs(Register dst, SRegister s, Condition cond) {
-  ASSERT(!dst.is(pc));
-  emit(cond | B24 | s | 15*B16 | dst.code()*B12);
-}
-
-
-void Assembler::msr(SRegisterFieldMask fields, const Operand& src,
-                    Condition cond) {
-  ASSERT(fields >= B16 && fields < B20);  // at least one field set
-  Instr instr;
-  if (!src.rm_.is_valid()) {
-    // Immediate.
-    uint32_t rotate_imm;
-    uint32_t immed_8;
-    if (MustUseIp(src.rmode_) ||
-        !fits_shifter(src.imm32_, &rotate_imm, &immed_8, NULL)) {
-      // Immediate operand cannot be encoded, load it first to register ip.
-      RecordRelocInfo(src.rmode_, src.imm32_);
-      ldr(ip, MemOperand(pc, 0), cond);
-      msr(fields, Operand(ip), cond);
-      return;
-    }
-    instr = I | rotate_imm*B8 | immed_8;
-  } else {
-    ASSERT(!src.rs_.is_valid() && src.shift_imm_ == 0);  // only rm allowed
-    instr = src.rm_.code();
-  }
-  emit(cond | instr | B24 | B21 | fields | 15*B12);
-}
-
-
-// Load/Store instructions.
-void Assembler::ldr(Register dst, const MemOperand& src, Condition cond) {
-  if (dst.is(pc)) {
-    WriteRecordedPositions();
-  }
-  addrmod2(cond | B26 | L, dst, src);
-
-  // Eliminate pattern: push(r), pop(r)
-  //   str(r, MemOperand(sp, 4, NegPreIndex), al)
-  //   ldr(r, MemOperand(sp, 4, PostIndex), al)
-  // Both instructions can be eliminated.
-  int pattern_size = 2 * kInstrSize;
-  if (FLAG_push_pop_elimination &&
-      last_bound_pos_ <= (pc_offset() - pattern_size) &&
-      reloc_info_writer.last_pc() <= (pc_ - pattern_size) &&
-      // Pattern.
-      instr_at(pc_ - 1 * kInstrSize) == (kPopRegPattern | dst.code() * B12) &&
-      instr_at(pc_ - 2 * kInstrSize) == (kPushRegPattern | dst.code() * B12)) {
-    pc_ -= 2 * kInstrSize;
-    if (FLAG_print_push_pop_elimination) {
-      PrintF("%x push/pop (same reg) eliminated\n", pc_offset());
-    }
-  }
-}
-
-
-void Assembler::str(Register src, const MemOperand& dst, Condition cond) {
-  addrmod2(cond | B26, src, dst);
-
-  // Eliminate pattern: pop(), push(r)
-  //     add sp, sp, #4 LeaveCC, al; str r, [sp, #-4], al
-  // ->  str r, [sp, 0], al
-  int pattern_size = 2 * kInstrSize;
-  if (FLAG_push_pop_elimination &&
-     last_bound_pos_ <= (pc_offset() - pattern_size) &&
-     reloc_info_writer.last_pc() <= (pc_ - pattern_size) &&
-     // Pattern.
-     instr_at(pc_ - 1 * kInstrSize) == (kPushRegPattern | src.code() * B12) &&
-     instr_at(pc_ - 2 * kInstrSize) == kPopInstruction) {
-    pc_ -= 2 * kInstrSize;
-    emit(al | B26 | 0 | Offset | sp.code() * B16 | src.code() * B12);
-    if (FLAG_print_push_pop_elimination) {
-      PrintF("%x pop()/push(reg) eliminated\n", pc_offset());
-    }
-  }
-}
-
-
-void Assembler::ldrb(Register dst, const MemOperand& src, Condition cond) {
-  addrmod2(cond | B26 | B | L, dst, src);
-}
-
-
-void Assembler::strb(Register src, const MemOperand& dst, Condition cond) {
-  addrmod2(cond | B26 | B, src, dst);
-}
-
-
-void Assembler::ldrh(Register dst, const MemOperand& src, Condition cond) {
-  addrmod3(cond | L | B7 | H | B4, dst, src);
-}
-
-
-void Assembler::strh(Register src, const MemOperand& dst, Condition cond) {
-  addrmod3(cond | B7 | H | B4, src, dst);
-}
-
-
-void Assembler::ldrsb(Register dst, const MemOperand& src, Condition cond) {
-  addrmod3(cond | L | B7 | S6 | B4, dst, src);
-}
-
-
-void Assembler::ldrsh(Register dst, const MemOperand& src, Condition cond) {
-  addrmod3(cond | L | B7 | S6 | H | B4, dst, src);
-}
-
-
-// Load/Store multiple instructions.
-void Assembler::ldm(BlockAddrMode am,
-                    Register base,
-                    RegList dst,
-                    Condition cond) {
-  // ABI stack constraint: ldmxx base, {..sp..}  base != sp  is not restartable.
-  ASSERT(base.is(sp) || (dst & sp.bit()) == 0);
-
-  addrmod4(cond | B27 | am | L, base, dst);
-
-  // Emit the constant pool after a function return implemented by ldm ..{..pc}.
-  if (cond == al && (dst & pc.bit()) != 0) {
-    // There is a slight chance that the ldm instruction was actually a call,
-    // in which case it would be wrong to return into the constant pool; we
-    // recognize this case by checking if the emission of the pool was blocked
-    // at the pc of the ldm instruction by a mov lr, pc instruction; if this is
-    // the case, we emit a jump over the pool.
-    CheckConstPool(true, no_const_pool_before_ == pc_offset() - kInstrSize);
-  }
-}
-
-
-void Assembler::stm(BlockAddrMode am,
-                    Register base,
-                    RegList src,
-                    Condition cond) {
-  addrmod4(cond | B27 | am, base, src);
-}
-
-
-// Semaphore instructions.
-void Assembler::swp(Register dst, Register src, Register base, Condition cond) {
-  ASSERT(!dst.is(pc) && !src.is(pc) && !base.is(pc));
-  ASSERT(!dst.is(base) && !src.is(base));
-  emit(cond | P | base.code()*B16 | dst.code()*B12 |
-       B7 | B4 | src.code());
-}
-
-
-void Assembler::swpb(Register dst,
-                     Register src,
-                     Register base,
-                     Condition cond) {
-  ASSERT(!dst.is(pc) && !src.is(pc) && !base.is(pc));
-  ASSERT(!dst.is(base) && !src.is(base));
-  emit(cond | P | B | base.code()*B16 | dst.code()*B12 |
-       B7 | B4 | src.code());
-}
-
-
-// Exception-generating instructions and debugging support.
-void Assembler::stop(const char* msg) {
-#if !defined(__arm__)
-  // The simulator handles these special instructions and stops execution.
-  emit(15 << 28 | ((intptr_t) msg));
-#else
-  // Just issue a simple break instruction for now. Alternatively we could use
-  // the swi(0x9f0001) instruction on Linux.
-  bkpt(0);
-#endif
-}
-
-
-void Assembler::bkpt(uint32_t imm16) {  // v5 and above
-  ASSERT(is_uint16(imm16));
-  emit(al | B24 | B21 | (imm16 >> 4)*B8 | 7*B4 | (imm16 & 0xf));
-}
-
-
-void Assembler::swi(uint32_t imm24, Condition cond) {
-  ASSERT(is_uint24(imm24));
-  emit(cond | 15*B24 | imm24);
-}
-
-
-// Coprocessor instructions.
-void Assembler::cdp(Coprocessor coproc,
-                    int opcode_1,
-                    CRegister crd,
-                    CRegister crn,
-                    CRegister crm,
-                    int opcode_2,
-                    Condition cond) {
-  ASSERT(is_uint4(opcode_1) && is_uint3(opcode_2));
-  emit(cond | B27 | B26 | B25 | (opcode_1 & 15)*B20 | crn.code()*B16 |
-       crd.code()*B12 | coproc*B8 | (opcode_2 & 7)*B5 | crm.code());
-}
-
-
-void Assembler::cdp2(Coprocessor coproc,
-                     int opcode_1,
-                     CRegister crd,
-                     CRegister crn,
-                     CRegister crm,
-                     int opcode_2) {  // v5 and above
-  cdp(coproc, opcode_1, crd, crn, crm, opcode_2, static_cast<Condition>(nv));
-}
-
-
-void Assembler::mcr(Coprocessor coproc,
-                    int opcode_1,
-                    Register rd,
-                    CRegister crn,
-                    CRegister crm,
-                    int opcode_2,
-                    Condition cond) {
-  ASSERT(is_uint3(opcode_1) && is_uint3(opcode_2));
-  emit(cond | B27 | B26 | B25 | (opcode_1 & 7)*B21 | crn.code()*B16 |
-       rd.code()*B12 | coproc*B8 | (opcode_2 & 7)*B5 | B4 | crm.code());
-}
-
-
-void Assembler::mcr2(Coprocessor coproc,
-                     int opcode_1,
-                     Register rd,
-                     CRegister crn,
-                     CRegister crm,
-                     int opcode_2) {  // v5 and above
-  mcr(coproc, opcode_1, rd, crn, crm, opcode_2, static_cast<Condition>(nv));
-}
-
-
-void Assembler::mrc(Coprocessor coproc,
-                    int opcode_1,
-                    Register rd,
-                    CRegister crn,
-                    CRegister crm,
-                    int opcode_2,
-                    Condition cond) {
-  ASSERT(is_uint3(opcode_1) && is_uint3(opcode_2));
-  emit(cond | B27 | B26 | B25 | (opcode_1 & 7)*B21 | L | crn.code()*B16 |
-       rd.code()*B12 | coproc*B8 | (opcode_2 & 7)*B5 | B4 | crm.code());
-}
-
-
-void Assembler::mrc2(Coprocessor coproc,
-                     int opcode_1,
-                     Register rd,
-                     CRegister crn,
-                     CRegister crm,
-                     int opcode_2) {  // v5 and above
-  mrc(coproc, opcode_1, rd, crn, crm, opcode_2, static_cast<Condition>(nv));
-}
-
-
-void Assembler::ldc(Coprocessor coproc,
-                    CRegister crd,
-                    const MemOperand& src,
-                    LFlag l,
-                    Condition cond) {
-  addrmod5(cond | B27 | B26 | l | L | coproc*B8, crd, src);
-}
-
-
-void Assembler::ldc(Coprocessor coproc,
-                    CRegister crd,
-                    Register rn,
-                    int option,
-                    LFlag l,
-                    Condition cond) {
-  // Unindexed addressing.
-  ASSERT(is_uint8(option));
-  emit(cond | B27 | B26 | U | l | L | rn.code()*B16 | crd.code()*B12 |
-       coproc*B8 | (option & 255));
-}
-
-
-void Assembler::ldc2(Coprocessor coproc,
-                     CRegister crd,
-                     const MemOperand& src,
-                     LFlag l) {  // v5 and above
-  ldc(coproc, crd, src, l, static_cast<Condition>(nv));
-}
-
-
-void Assembler::ldc2(Coprocessor coproc,
-                     CRegister crd,
-                     Register rn,
-                     int option,
-                     LFlag l) {  // v5 and above
-  ldc(coproc, crd, rn, option, l, static_cast<Condition>(nv));
-}
-
-
-void Assembler::stc(Coprocessor coproc,
-                    CRegister crd,
-                    const MemOperand& dst,
-                    LFlag l,
-                    Condition cond) {
-  addrmod5(cond | B27 | B26 | l | coproc*B8, crd, dst);
-}
-
-
-void Assembler::stc(Coprocessor coproc,
-                    CRegister crd,
-                    Register rn,
-                    int option,
-                    LFlag l,
-                    Condition cond) {
-  // Unindexed addressing.
-  ASSERT(is_uint8(option));
-  emit(cond | B27 | B26 | U | l | rn.code()*B16 | crd.code()*B12 |
-       coproc*B8 | (option & 255));
-}
-
-
-void Assembler::stc2(Coprocessor
-                     coproc, CRegister crd,
-                     const MemOperand& dst,
-                     LFlag l) {  // v5 and above
-  stc(coproc, crd, dst, l, static_cast<Condition>(nv));
-}
-
-
-void Assembler::stc2(Coprocessor coproc,
-                     CRegister crd,
-                     Register rn,
-                     int option,
-                     LFlag l) {  // v5 and above
-  stc(coproc, crd, rn, option, l, static_cast<Condition>(nv));
-}
-
-
-// Support for VFP.
-void Assembler::vldr(const DwVfpRegister dst,
-                     const Register base,
-                     int offset,
-                     const Condition cond) {
-  // Ddst = MEM(Rbase + offset).
-  // Instruction details available in ARM DDI 0406A, A8-628.
-  // cond(31-28) | 1101(27-24)| 1001(23-20) | Rbase(19-16) |
-  // Vdst(15-12) | 1011(11-8) | offset
-  ASSERT(CpuFeatures::IsEnabled(VFP3));
-  ASSERT(offset % 4 == 0);
-  emit(cond | 0xD9*B20 | base.code()*B16 | dst.code()*B12 |
-       0xB*B8 | ((offset / 4) & 255));
-}
-
-
-void Assembler::vstr(const DwVfpRegister src,
-                     const Register base,
-                     int offset,
-                     const Condition cond) {
-  // MEM(Rbase + offset) = Dsrc.
-  // Instruction details available in ARM DDI 0406A, A8-786.
-  // cond(31-28) | 1101(27-24)| 1000(23-20) | | Rbase(19-16) |
-  // Vsrc(15-12) | 1011(11-8) | (offset/4)
-  ASSERT(CpuFeatures::IsEnabled(VFP3));
-  ASSERT(offset % 4 == 0);
-  emit(cond | 0xD8*B20 | base.code()*B16 | src.code()*B12 |
-       0xB*B8 | ((offset / 4) & 255));
-}
-
-
-void Assembler::vmov(const DwVfpRegister dst,
-                     const Register src1,
-                     const Register src2,
-                     const Condition cond) {
-  // Dm = <Rt,Rt2>.
-  // Instruction details available in ARM DDI 0406A, A8-646.
-  // cond(31-28) | 1100(27-24)| 010(23-21) | op=0(20) | Rt2(19-16) |
-  // Rt(15-12) | 1011(11-8) | 00(7-6) | M(5) | 1(4) | Vm
-  ASSERT(CpuFeatures::IsEnabled(VFP3));
-  ASSERT(!src1.is(pc) && !src2.is(pc));
-  emit(cond | 0xC*B24 | B22 | src2.code()*B16 |
-       src1.code()*B12 | 0xB*B8 | B4 | dst.code());
-}
-
-
-void Assembler::vmov(const Register dst1,
-                     const Register dst2,
-                     const DwVfpRegister src,
-                     const Condition cond) {
-  // <Rt,Rt2> = Dm.
-  // Instruction details available in ARM DDI 0406A, A8-646.
-  // cond(31-28) | 1100(27-24)| 010(23-21) | op=1(20) | Rt2(19-16) |
-  // Rt(15-12) | 1011(11-8) | 00(7-6) | M(5) | 1(4) | Vm
-  ASSERT(CpuFeatures::IsEnabled(VFP3));
-  ASSERT(!dst1.is(pc) && !dst2.is(pc));
-  emit(cond | 0xC*B24 | B22 | B20 | dst2.code()*B16 |
-       dst1.code()*B12 | 0xB*B8 | B4 | src.code());
-}
-
-
-void Assembler::vmov(const SwVfpRegister dst,
-                     const Register src,
-                     const Condition cond) {
-  // Sn = Rt.
-  // Instruction details available in ARM DDI 0406A, A8-642.
-  // cond(31-28) | 1110(27-24)| 000(23-21) | op=0(20) | Vn(19-16) |
-  // Rt(15-12) | 1010(11-8) | N(7)=0 | 00(6-5) | 1(4) | 0000(3-0)
-  ASSERT(CpuFeatures::IsEnabled(VFP3));
-  ASSERT(!src.is(pc));
-  emit(cond | 0xE*B24 | (dst.code() >> 1)*B16 |
-       src.code()*B12 | 0xA*B8 | (0x1 & dst.code())*B7 | B4);
-}
-
-
-void Assembler::vmov(const Register dst,
-                     const SwVfpRegister src,
-                     const Condition cond) {
-  // Rt = Sn.
-  // Instruction details available in ARM DDI 0406A, A8-642.
-  // cond(31-28) | 1110(27-24)| 000(23-21) | op=1(20) | Vn(19-16) |
-  // Rt(15-12) | 1010(11-8) | N(7)=0 | 00(6-5) | 1(4) | 0000(3-0)
-  ASSERT(CpuFeatures::IsEnabled(VFP3));
-  ASSERT(!dst.is(pc));
-  emit(cond | 0xE*B24 | B20 | (src.code() >> 1)*B16 |
-       dst.code()*B12 | 0xA*B8 | (0x1 & src.code())*B7 | B4);
-}
-
-
-void Assembler::vcvt(const DwVfpRegister dst,
-                     const SwVfpRegister src,
-                     const Condition cond) {
-  // Dd = Sm (integer in Sm converted to IEEE 64-bit doubles in Dd).
-  // Instruction details available in ARM DDI 0406A, A8-576.
-  // cond(31-28) | 11101(27-23)| D=?(22) | 11(21-20) | 1(19) | opc2=000(18-16) |
-  // Vd(15-12) | 101(11-9) | sz(8)=1 | op(7)=1 | 1(6) | M=?(5) | 0(4) | Vm(3-0)
-  ASSERT(CpuFeatures::IsEnabled(VFP3));
-  emit(cond | 0xE*B24 | B23 | 0x3*B20 | B19 |
-       dst.code()*B12 | 0x5*B9 | B8 | B7 | B6 |
-       (0x1 & src.code())*B5 | (src.code() >> 1));
-}
-
-
-void Assembler::vcvt(const SwVfpRegister dst,
-                     const DwVfpRegister src,
-                     const Condition cond) {
-  // Sd = Dm (IEEE 64-bit doubles in Dm converted to 32 bit integer in Sd).
-  // Instruction details available in ARM DDI 0406A, A8-576.
-  // cond(31-28) | 11101(27-23)| D=?(22) | 11(21-20) | 1(19) | opc2=101(18-16)|
-  // Vd(15-12) | 101(11-9) | sz(8)=1 | op(7)=? | 1(6) | M=?(5) | 0(4) | Vm(3-0)
-  ASSERT(CpuFeatures::IsEnabled(VFP3));
-  emit(cond | 0xE*B24 | B23 |(0x1 & dst.code())*B22 |
-       0x3*B20 | B19 | 0x5*B16 | (dst.code() >> 1)*B12 |
-       0x5*B9 | B8 | B7 | B6 | src.code());
-}
-
-
-void Assembler::vadd(const DwVfpRegister dst,
-                     const DwVfpRegister src1,
-                     const DwVfpRegister src2,
-                     const Condition cond) {
-  // Dd = vadd(Dn, Dm) double precision floating point addition.
-  // Dd = D:Vd; Dm=M:Vm; Dn=N:Vm.
-  // Instruction details available in ARM DDI 0406A, A8-536.
-  // cond(31-28) | 11100(27-23)| D=?(22) | 11(21-20) | Vn(19-16) |
-  // Vd(15-12) | 101(11-9) | sz(8)=1 | N(7)=0 | 0(6) | M=?(5) | 0(4) | Vm(3-0)
-  ASSERT(CpuFeatures::IsEnabled(VFP3));
-  emit(cond | 0xE*B24 | 0x3*B20 | src1.code()*B16 |
-       dst.code()*B12 | 0x5*B9 | B8 | src2.code());
-}
-
-
-void Assembler::vsub(const DwVfpRegister dst,
-                     const DwVfpRegister src1,
-                     const DwVfpRegister src2,
-                     const Condition cond) {
-  // Dd = vsub(Dn, Dm) double precision floating point subtraction.
-  // Dd = D:Vd; Dm=M:Vm; Dn=N:Vm.
-  // Instruction details available in ARM DDI 0406A, A8-784.
-  // cond(31-28) | 11100(27-23)| D=?(22) | 11(21-20) | Vn(19-16) |
-  // Vd(15-12) | 101(11-9) | sz(8)=1 | N(7)=0 | 1(6) | M=?(5) | 0(4) | Vm(3-0)
-  ASSERT(CpuFeatures::IsEnabled(VFP3));
-  emit(cond | 0xE*B24 | 0x3*B20 | src1.code()*B16 |
-       dst.code()*B12 | 0x5*B9 | B8 | B6 | src2.code());
-}
-
-
-void Assembler::vmul(const DwVfpRegister dst,
-                     const DwVfpRegister src1,
-                     const DwVfpRegister src2,
-                     const Condition cond) {
-  // Dd = vmul(Dn, Dm) double precision floating point multiplication.
-  // Dd = D:Vd; Dm=M:Vm; Dn=N:Vm.
-  // Instruction details available in ARM DDI 0406A, A8-784.
-  // cond(31-28) | 11100(27-23)| D=?(22) | 10(21-20) | Vn(19-16) |
-  // Vd(15-12) | 101(11-9) | sz(8)=1 | N(7)=0 | 0(6) | M=?(5) | 0(4) | Vm(3-0)
-  ASSERT(CpuFeatures::IsEnabled(VFP3));
-  emit(cond | 0xE*B24 | 0x2*B20 | src1.code()*B16 |
-       dst.code()*B12 | 0x5*B9 | B8 | src2.code());
-}
-
-
-void Assembler::vdiv(const DwVfpRegister dst,
-                     const DwVfpRegister src1,
-                     const DwVfpRegister src2,
-                     const Condition cond) {
-  // Dd = vdiv(Dn, Dm) double precision floating point division.
-  // Dd = D:Vd; Dm=M:Vm; Dn=N:Vm.
-  // Instruction details available in ARM DDI 0406A, A8-584.
-  // cond(31-28) | 11101(27-23)| D=?(22) | 00(21-20) | Vn(19-16) |
-  // Vd(15-12) | 101(11-9) | sz(8)=1 | N(7)=? | 0(6) | M=?(5) | 0(4) | Vm(3-0)
-  ASSERT(CpuFeatures::IsEnabled(VFP3));
-  emit(cond | 0xE*B24 | B23 | src1.code()*B16 |
-       dst.code()*B12 | 0x5*B9 | B8 | src2.code());
-}
-
-
-void Assembler::vcmp(const DwVfpRegister src1,
-                     const DwVfpRegister src2,
-                     const SBit s,
-                     const Condition cond) {
-  // vcmp(Dd, Dm) double precision floating point comparison.
-  // Instruction details available in ARM DDI 0406A, A8-570.
-  // cond(31-28) | 11101 (27-23)| D=?(22) | 11 (21-20) | 0100 (19-16) |
-  // Vd(15-12) | 101(11-9) | sz(8)=1 | E(7)=? | 1(6) | M(5)=? | 0(4) | Vm(3-0)
-  ASSERT(CpuFeatures::IsEnabled(VFP3));
-  emit(cond | 0xE*B24 |B23 | 0x3*B20 | B18 |
-       src1.code()*B12 | 0x5*B9 | B8 | B6 | src2.code());
-}
-
-
-void Assembler::vmrs(Register dst, Condition cond) {
-  // Instruction details available in ARM DDI 0406A, A8-652.
-  // cond(31-28) | 1110 (27-24) | 1111(23-20)| 0001 (19-16) |
-  // Rt(15-12) | 1010 (11-8) | 0(7) | 00 (6-5) | 1(4) | 0000(3-0)
-  ASSERT(CpuFeatures::IsEnabled(VFP3));
-  emit(cond | 0xE*B24 | 0xF*B20 |  B16 |
-       dst.code()*B12 | 0xA*B8 | B4);
-}
-
-
-// Pseudo instructions.
-void Assembler::lea(Register dst,
-                    const MemOperand& x,
-                    SBit s,
-                    Condition cond) {
-  int am = x.am_;
-  if (!x.rm_.is_valid()) {
-    // Immediate offset.
-    if ((am & P) == 0)  // post indexing
-      mov(dst, Operand(x.rn_), s, cond);
-    else if ((am & U) == 0)  // negative indexing
-      sub(dst, x.rn_, Operand(x.offset_), s, cond);
-    else
-      add(dst, x.rn_, Operand(x.offset_), s, cond);
-  } else {
-    // Register offset (shift_imm_ and shift_op_ are 0) or scaled
-    // register offset the constructors make sure than both shift_imm_
-    // and shift_op_ are initialized.
-    ASSERT(!x.rm_.is(pc));
-    if ((am & P) == 0)  // post indexing
-      mov(dst, Operand(x.rn_), s, cond);
-    else if ((am & U) == 0)  // negative indexing
-      sub(dst, x.rn_, Operand(x.rm_, x.shift_op_, x.shift_imm_), s, cond);
-    else
-      add(dst, x.rn_, Operand(x.rm_, x.shift_op_, x.shift_imm_), s, cond);
-  }
-}
-
-
-bool Assembler::ImmediateFitsAddrMode1Instruction(int32_t imm32) {
-  uint32_t dummy1;
-  uint32_t dummy2;
-  return fits_shifter(imm32, &dummy1, &dummy2, NULL);
-}
-
-
-void Assembler::BlockConstPoolFor(int instructions) {
-  BlockConstPoolBefore(pc_offset() + instructions * kInstrSize);
-}
-
-
-// Debugging.
-void Assembler::RecordJSReturn() {
-  WriteRecordedPositions();
-  CheckBuffer();
-  RecordRelocInfo(RelocInfo::JS_RETURN);
-}
-
-
-void Assembler::RecordComment(const char* msg) {
-  if (FLAG_debug_code) {
-    CheckBuffer();
-    RecordRelocInfo(RelocInfo::COMMENT, reinterpret_cast<intptr_t>(msg));
-  }
-}
-
-
-void Assembler::RecordPosition(int pos) {
-  if (pos == RelocInfo::kNoPosition) return;
-  ASSERT(pos >= 0);
-  current_position_ = pos;
-}
-
-
-void Assembler::RecordStatementPosition(int pos) {
-  if (pos == RelocInfo::kNoPosition) return;
-  ASSERT(pos >= 0);
-  current_statement_position_ = pos;
-}
-
-
-void Assembler::WriteRecordedPositions() {
-  // Write the statement position if it is different from what was written last
-  // time.
-  if (current_statement_position_ != written_statement_position_) {
-    CheckBuffer();
-    RecordRelocInfo(RelocInfo::STATEMENT_POSITION, current_statement_position_);
-    written_statement_position_ = current_statement_position_;
-  }
-
-  // Write the position if it is different from what was written last time and
-  // also different from the written statement position.
-  if (current_position_ != written_position_ &&
-      current_position_ != written_statement_position_) {
-    CheckBuffer();
-    RecordRelocInfo(RelocInfo::POSITION, current_position_);
-    written_position_ = current_position_;
-  }
-}
-
-
-void Assembler::GrowBuffer() {
-  if (!own_buffer_) FATAL("external code buffer is too small");
-
-  // Compute new buffer size.
-  CodeDesc desc;  // the new buffer
-  if (buffer_size_ < 4*KB) {
-    desc.buffer_size = 4*KB;
-  } else if (buffer_size_ < 1*MB) {
-    desc.buffer_size = 2*buffer_size_;
-  } else {
-    desc.buffer_size = buffer_size_ + 1*MB;
-  }
-  CHECK_GT(desc.buffer_size, 0);  // no overflow
-
-  // Setup new buffer.
-  desc.buffer = NewArray<byte>(desc.buffer_size);
-
-  desc.instr_size = pc_offset();
-  desc.reloc_size = (buffer_ + buffer_size_) - reloc_info_writer.pos();
-
-  // Copy the data.
-  int pc_delta = desc.buffer - buffer_;
-  int rc_delta = (desc.buffer + desc.buffer_size) - (buffer_ + buffer_size_);
-  memmove(desc.buffer, buffer_, desc.instr_size);
-  memmove(reloc_info_writer.pos() + rc_delta,
-          reloc_info_writer.pos(), desc.reloc_size);
-
-  // Switch buffers.
-  DeleteArray(buffer_);
-  buffer_ = desc.buffer;
-  buffer_size_ = desc.buffer_size;
-  pc_ += pc_delta;
-  reloc_info_writer.Reposition(reloc_info_writer.pos() + rc_delta,
-                               reloc_info_writer.last_pc() + pc_delta);
-
-  // None of our relocation types are pc relative pointing outside the code
-  // buffer nor pc absolute pointing inside the code buffer, so there is no need
-  // to relocate any emitted relocation entries.
-
-  // Relocate pending relocation entries.
-  for (int i = 0; i < num_prinfo_; i++) {
-    RelocInfo& rinfo = prinfo_[i];
-    ASSERT(rinfo.rmode() != RelocInfo::COMMENT &&
-           rinfo.rmode() != RelocInfo::POSITION);
-    if (rinfo.rmode() != RelocInfo::JS_RETURN) {
-      rinfo.set_pc(rinfo.pc() + pc_delta);
-    }
-  }
-}
-
-
-void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
-  RelocInfo rinfo(pc_, rmode, data);  // we do not try to reuse pool constants
-  if (rmode >= RelocInfo::JS_RETURN && rmode <= RelocInfo::STATEMENT_POSITION) {
-    // Adjust code for new modes.
-    ASSERT(RelocInfo::IsJSReturn(rmode)
-           || RelocInfo::IsComment(rmode)
-           || RelocInfo::IsPosition(rmode));
-    // These modes do not need an entry in the constant pool.
-  } else {
-    ASSERT(num_prinfo_ < kMaxNumPRInfo);
-    prinfo_[num_prinfo_++] = rinfo;
-    // Make sure the constant pool is not emitted in place of the next
-    // instruction for which we just recorded relocation info.
-    BlockConstPoolBefore(pc_offset() + kInstrSize);
-  }
-  if (rinfo.rmode() != RelocInfo::NONE) {
-    // Don't record external references unless the heap will be serialized.
-    if (rmode == RelocInfo::EXTERNAL_REFERENCE) {
-#ifdef DEBUG
-      if (!Serializer::enabled()) {
-        Serializer::TooLateToEnableNow();
-      }
-#endif
-      if (!Serializer::enabled() && !FLAG_debug_code) {
-        return;
-      }
-    }
-    ASSERT(buffer_space() >= kMaxRelocSize);  // too late to grow buffer here
-    reloc_info_writer.Write(&rinfo);
-  }
-}
-
-
-void Assembler::CheckConstPool(bool force_emit, bool require_jump) {
-  // Calculate the offset of the next check. It will be overwritten
-  // when a const pool is generated or when const pools are being
-  // blocked for a specific range.
-  next_buffer_check_ = pc_offset() + kCheckConstInterval;
-
-  // There is nothing to do if there are no pending relocation info entries.
-  if (num_prinfo_ == 0) return;
-
-  // We emit a constant pool at regular intervals of about kDistBetweenPools
-  // or when requested by parameter force_emit (e.g. after each function).
-  // We prefer not to emit a jump unless the max distance is reached or if we
-  // are running low on slots, which can happen if a lot of constants are being
-  // emitted (e.g. --debug-code and many static references).
-  int dist = pc_offset() - last_const_pool_end_;
-  if (!force_emit && dist < kMaxDistBetweenPools &&
-      (require_jump || dist < kDistBetweenPools) &&
-      // TODO(1236125): Cleanup the "magic" number below. We know that
-      // the code generation will test every kCheckConstIntervalInst.
-      // Thus we are safe as long as we generate less than 7 constant
-      // entries per instruction.
-      (num_prinfo_ < (kMaxNumPRInfo - (7 * kCheckConstIntervalInst)))) {
-    return;
-  }
-
-  // If we did not return by now, we need to emit the constant pool soon.
-
-  // However, some small sequences of instructions must not be broken up by the
-  // insertion of a constant pool; such sequences are protected by setting
-  // no_const_pool_before_, which is checked here. Also, recursive calls to
-  // CheckConstPool are blocked by no_const_pool_before_.
-  if (pc_offset() < no_const_pool_before_) {
-    // Emission is currently blocked; make sure we try again as soon as
-    // possible.
-    next_buffer_check_ = no_const_pool_before_;
-
-    // Something is wrong if emission is forced and blocked at the same time.
-    ASSERT(!force_emit);
-    return;
-  }
-
-  int jump_instr = require_jump ? kInstrSize : 0;
-
-  // Check that the code buffer is large enough before emitting the constant
-  // pool and relocation information (include the jump over the pool and the
-  // constant pool marker).
-  int max_needed_space =
-      jump_instr + kInstrSize + num_prinfo_*(kInstrSize + kMaxRelocSize);
-  while (buffer_space() <= (max_needed_space + kGap)) GrowBuffer();
-
-  // Block recursive calls to CheckConstPool.
-  BlockConstPoolBefore(pc_offset() + jump_instr + kInstrSize +
-                       num_prinfo_*kInstrSize);
-  // Don't bother to check for the emit calls below.
-  next_buffer_check_ = no_const_pool_before_;
-
-  // Emit jump over constant pool if necessary.
-  Label after_pool;
-  if (require_jump) b(&after_pool);
-
-  RecordComment("[ Constant Pool");
-
-  // Put down constant pool marker "Undefined instruction" as specified by
-  // A3.1 Instruction set encoding.
-  emit(0x03000000 | num_prinfo_);
-
-  // Emit constant pool entries.
-  for (int i = 0; i < num_prinfo_; i++) {
-    RelocInfo& rinfo = prinfo_[i];
-    ASSERT(rinfo.rmode() != RelocInfo::COMMENT &&
-           rinfo.rmode() != RelocInfo::POSITION &&
-           rinfo.rmode() != RelocInfo::STATEMENT_POSITION);
-    Instr instr = instr_at(rinfo.pc());
-
-    // Instruction to patch must be a ldr/str [pc, #offset].
-    // P and U set, B and W clear, Rn == pc, offset12 still 0.
-    ASSERT((instr & (7*B25 | P | U | B | W | 15*B16 | Off12Mask)) ==
-           (2*B25 | P | U | pc.code()*B16));
-    int delta = pc_ - rinfo.pc() - 8;
-    ASSERT(delta >= -4);  // instr could be ldr pc, [pc, #-4] followed by targ32
-    if (delta < 0) {
-      instr &= ~U;
-      delta = -delta;
-    }
-    ASSERT(is_uint12(delta));
-    instr_at_put(rinfo.pc(), instr + delta);
-    emit(rinfo.data());
-  }
-  num_prinfo_ = 0;
-  last_const_pool_end_ = pc_offset();
-
-  RecordComment("]");
-
-  if (after_pool.is_linked()) {
-    bind(&after_pool);
-  }
-
-  // Since a constant pool was just emitted, move the check offset forward by
-  // the standard interval.
-  next_buffer_check_ = pc_offset() + kCheckConstInterval;
-}
-
-
-} }  // namespace v8::internal
diff --git a/src/arm/assembler-thumb2.h b/src/arm/assembler-thumb2.h
deleted file mode 100644
index 2da1138..0000000
--- a/src/arm/assembler-thumb2.h
+++ /dev/null
@@ -1,1036 +0,0 @@
-// Copyright (c) 1994-2006 Sun Microsystems Inc.
-// All Rights Reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-//
-// - Redistributions of source code must retain the above copyright notice,
-// this list of conditions and the following disclaimer.
-//
-// - Redistribution in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the
-// distribution.
-//
-// - Neither the name of Sun Microsystems or the names of contributors may
-// be used to endorse or promote products derived from this software without
-// specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
-// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
-// COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
-// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
-// HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
-// STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
-// OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// The original source code covered by the above license above has been
-// modified significantly by Google Inc.
-// Copyright 2010 the V8 project authors. All rights reserved.
-
-// A light-weight ARM Assembler
-// Generates user mode instructions for the ARM architecture up to version 5
-
-#ifndef V8_ARM_ASSEMBLER_THUMB2_H_
-#define V8_ARM_ASSEMBLER_THUMB2_H_
-#include <stdio.h>
-#include "assembler.h"
-#include "serialize.h"
-
-namespace v8 {
-namespace internal {
-
-// CPU Registers.
-//
-// 1) We would prefer to use an enum, but enum values are assignment-
-// compatible with int, which has caused code-generation bugs.
-//
-// 2) We would prefer to use a class instead of a struct but we don't like
-// the register initialization to depend on the particular initialization
-// order (which appears to be different on OS X, Linux, and Windows for the
-// installed versions of C++ we tried). Using a struct permits C-style
-// "initialization". Also, the Register objects cannot be const as this
-// forces initialization stubs in MSVC, making us dependent on initialization
-// order.
-//
-// 3) By not using an enum, we are possibly preventing the compiler from
-// doing certain constant folds, which may significantly reduce the
-// code generated for some assembly instructions (because they boil down
-// to a few constants). If this is a problem, we could change the code
-// such that we use an enum in optimized mode, and the struct in debug
-// mode. This way we get the compile-time error checking in debug mode
-// and best performance in optimized code.
-//
-// Core register
-struct Register {
-  bool is_valid() const  { return 0 <= code_ && code_ < 16; }
-  bool is(Register reg) const  { return code_ == reg.code_; }
-  int code() const  {
-    ASSERT(is_valid());
-    return code_;
-  }
-  int bit() const  {
-    ASSERT(is_valid());
-    return 1 << code_;
-  }
-
-  // Unfortunately we can't make this private in a struct.
-  int code_;
-};
-
-
-extern Register no_reg;
-extern Register r0;
-extern Register r1;
-extern Register r2;
-extern Register r3;
-extern Register r4;
-extern Register r5;
-extern Register r6;
-extern Register r7;
-extern Register r8;
-extern Register r9;
-extern Register r10;
-extern Register fp;
-extern Register ip;
-extern Register sp;
-extern Register lr;
-extern Register pc;
-
-
-// Single word VFP register.
-struct SwVfpRegister {
-  bool is_valid() const  { return 0 <= code_ && code_ < 32; }
-  bool is(SwVfpRegister reg) const  { return code_ == reg.code_; }
-  int code() const  {
-    ASSERT(is_valid());
-    return code_;
-  }
-  int bit() const  {
-    ASSERT(is_valid());
-    return 1 << code_;
-  }
-
-  int code_;
-};
-
-
-// Double word VFP register.
-struct DwVfpRegister {
-  // Supporting d0 to d15, can be later extended to d31.
-  bool is_valid() const  { return 0 <= code_ && code_ < 16; }
-  bool is(DwVfpRegister reg) const  { return code_ == reg.code_; }
-  int code() const  {
-    ASSERT(is_valid());
-    return code_;
-  }
-  int bit() const  {
-    ASSERT(is_valid());
-    return 1 << code_;
-  }
-
-  int code_;
-};
-
-
-// Support for VFP registers s0 to s31 (d0 to d15).
-// Note that "s(N):s(N+1)" is the same as "d(N/2)".
-extern SwVfpRegister s0;
-extern SwVfpRegister s1;
-extern SwVfpRegister s2;
-extern SwVfpRegister s3;
-extern SwVfpRegister s4;
-extern SwVfpRegister s5;
-extern SwVfpRegister s6;
-extern SwVfpRegister s7;
-extern SwVfpRegister s8;
-extern SwVfpRegister s9;
-extern SwVfpRegister s10;
-extern SwVfpRegister s11;
-extern SwVfpRegister s12;
-extern SwVfpRegister s13;
-extern SwVfpRegister s14;
-extern SwVfpRegister s15;
-extern SwVfpRegister s16;
-extern SwVfpRegister s17;
-extern SwVfpRegister s18;
-extern SwVfpRegister s19;
-extern SwVfpRegister s20;
-extern SwVfpRegister s21;
-extern SwVfpRegister s22;
-extern SwVfpRegister s23;
-extern SwVfpRegister s24;
-extern SwVfpRegister s25;
-extern SwVfpRegister s26;
-extern SwVfpRegister s27;
-extern SwVfpRegister s28;
-extern SwVfpRegister s29;
-extern SwVfpRegister s30;
-extern SwVfpRegister s31;
-
-extern DwVfpRegister d0;
-extern DwVfpRegister d1;
-extern DwVfpRegister d2;
-extern DwVfpRegister d3;
-extern DwVfpRegister d4;
-extern DwVfpRegister d5;
-extern DwVfpRegister d6;
-extern DwVfpRegister d7;
-extern DwVfpRegister d8;
-extern DwVfpRegister d9;
-extern DwVfpRegister d10;
-extern DwVfpRegister d11;
-extern DwVfpRegister d12;
-extern DwVfpRegister d13;
-extern DwVfpRegister d14;
-extern DwVfpRegister d15;
-
-
-// Coprocessor register
-struct CRegister {
-  bool is_valid() const  { return 0 <= code_ && code_ < 16; }
-  bool is(CRegister creg) const  { return code_ == creg.code_; }
-  int code() const  {
-    ASSERT(is_valid());
-    return code_;
-  }
-  int bit() const  {
-    ASSERT(is_valid());
-    return 1 << code_;
-  }
-
-  // Unfortunately we can't make this private in a struct.
-  int code_;
-};
-
-
-extern CRegister no_creg;
-extern CRegister cr0;
-extern CRegister cr1;
-extern CRegister cr2;
-extern CRegister cr3;
-extern CRegister cr4;
-extern CRegister cr5;
-extern CRegister cr6;
-extern CRegister cr7;
-extern CRegister cr8;
-extern CRegister cr9;
-extern CRegister cr10;
-extern CRegister cr11;
-extern CRegister cr12;
-extern CRegister cr13;
-extern CRegister cr14;
-extern CRegister cr15;
-
-
-// Coprocessor number
-enum Coprocessor {
-  p0  = 0,
-  p1  = 1,
-  p2  = 2,
-  p3  = 3,
-  p4  = 4,
-  p5  = 5,
-  p6  = 6,
-  p7  = 7,
-  p8  = 8,
-  p9  = 9,
-  p10 = 10,
-  p11 = 11,
-  p12 = 12,
-  p13 = 13,
-  p14 = 14,
-  p15 = 15
-};
-
-
-// Condition field in instructions.
-enum Condition {
-  eq =  0 << 28,  // Z set            equal.
-  ne =  1 << 28,  // Z clear          not equal.
-  nz =  1 << 28,  // Z clear          not zero.
-  cs =  2 << 28,  // C set            carry set.
-  hs =  2 << 28,  // C set            unsigned higher or same.
-  cc =  3 << 28,  // C clear          carry clear.
-  lo =  3 << 28,  // C clear          unsigned lower.
-  mi =  4 << 28,  // N set            negative.
-  pl =  5 << 28,  // N clear          positive or zero.
-  vs =  6 << 28,  // V set            overflow.
-  vc =  7 << 28,  // V clear          no overflow.
-  hi =  8 << 28,  // C set, Z clear   unsigned higher.
-  ls =  9 << 28,  // C clear or Z set unsigned lower or same.
-  ge = 10 << 28,  // N == V           greater or equal.
-  lt = 11 << 28,  // N != V           less than.
-  gt = 12 << 28,  // Z clear, N == V  greater than.
-  le = 13 << 28,  // Z set or N != V  less then or equal
-  al = 14 << 28   //                  always.
-};
-
-
-// Returns the equivalent of !cc.
-INLINE(Condition NegateCondition(Condition cc));
-
-
-// Corresponds to transposing the operands of a comparison.
-inline Condition ReverseCondition(Condition cc) {
-  switch (cc) {
-    case lo:
-      return hi;
-    case hi:
-      return lo;
-    case hs:
-      return ls;
-    case ls:
-      return hs;
-    case lt:
-      return gt;
-    case gt:
-      return lt;
-    case ge:
-      return le;
-    case le:
-      return ge;
-    default:
-      return cc;
-  };
-}
-
-
-// Branch hints are not used on the ARM.  They are defined so that they can
-// appear in shared function signatures, but will be ignored in ARM
-// implementations.
-enum Hint { no_hint };
-
-// Hints are not used on the arm.  Negating is trivial.
-inline Hint NegateHint(Hint ignored) { return no_hint; }
-
-
-// -----------------------------------------------------------------------------
-// Addressing modes and instruction variants
-
-// Shifter operand shift operation
-enum ShiftOp {
-  LSL = 0 << 5,
-  LSR = 1 << 5,
-  ASR = 2 << 5,
-  ROR = 3 << 5,
-  RRX = -1
-};
-
-
-// Condition code updating mode
-enum SBit {
-  SetCC   = 1 << 20,  // set condition code
-  LeaveCC = 0 << 20   // leave condition code unchanged
-};
-
-
-// Status register selection
-enum SRegister {
-  CPSR = 0 << 22,
-  SPSR = 1 << 22
-};
-
-
-// Status register fields
-enum SRegisterField {
-  CPSR_c = CPSR | 1 << 16,
-  CPSR_x = CPSR | 1 << 17,
-  CPSR_s = CPSR | 1 << 18,
-  CPSR_f = CPSR | 1 << 19,
-  SPSR_c = SPSR | 1 << 16,
-  SPSR_x = SPSR | 1 << 17,
-  SPSR_s = SPSR | 1 << 18,
-  SPSR_f = SPSR | 1 << 19
-};
-
-// Status register field mask (or'ed SRegisterField enum values)
-typedef uint32_t SRegisterFieldMask;
-
-
-// Memory operand addressing mode
-enum AddrMode {
-  // bit encoding P U W
-  Offset       = (8|4|0) << 21,  // offset (without writeback to base)
-  PreIndex     = (8|4|1) << 21,  // pre-indexed addressing with writeback
-  PostIndex    = (0|4|0) << 21,  // post-indexed addressing with writeback
-  NegOffset    = (8|0|0) << 21,  // negative offset (without writeback to base)
-  NegPreIndex  = (8|0|1) << 21,  // negative pre-indexed with writeback
-  NegPostIndex = (0|0|0) << 21   // negative post-indexed with writeback
-};
-
-
-// Load/store multiple addressing mode
-enum BlockAddrMode {
-  // bit encoding P U W
-  da           = (0|0|0) << 21,  // decrement after
-  ia           = (0|4|0) << 21,  // increment after
-  db           = (8|0|0) << 21,  // decrement before
-  ib           = (8|4|0) << 21,  // increment before
-  da_w         = (0|0|1) << 21,  // decrement after with writeback to base
-  ia_w         = (0|4|1) << 21,  // increment after with writeback to base
-  db_w         = (8|0|1) << 21,  // decrement before with writeback to base
-  ib_w         = (8|4|1) << 21   // increment before with writeback to base
-};
-
-
-// Coprocessor load/store operand size
-enum LFlag {
-  Long  = 1 << 22,  // long load/store coprocessor
-  Short = 0 << 22   // short load/store coprocessor
-};
-
-
-// -----------------------------------------------------------------------------
-// Machine instruction Operands
-
-// Class Operand represents a shifter operand in data processing instructions
-class Operand BASE_EMBEDDED {
- public:
-  // immediate
-  INLINE(explicit Operand(int32_t immediate,
-         RelocInfo::Mode rmode = RelocInfo::NONE));
-  INLINE(explicit Operand(const ExternalReference& f));
-  INLINE(explicit Operand(const char* s));
-  explicit Operand(Handle<Object> handle);
-  INLINE(explicit Operand(Smi* value));
-
-  // rm
-  INLINE(explicit Operand(Register rm));
-
-  // rm <shift_op> shift_imm
-  explicit Operand(Register rm, ShiftOp shift_op, int shift_imm);
-
-  // rm <shift_op> rs
-  explicit Operand(Register rm, ShiftOp shift_op, Register rs);
-
-  // Return true if this is a register operand.
-  INLINE(bool is_reg() const);
-
-  Register rm() const { return rm_; }
-
- private:
-  Register rm_;
-  Register rs_;
-  ShiftOp shift_op_;
-  int shift_imm_;  // valid if rm_ != no_reg && rs_ == no_reg
-  int32_t imm32_;  // valid if rm_ == no_reg
-  RelocInfo::Mode rmode_;
-
-  friend class Assembler;
-};
-
-
-// Class MemOperand represents a memory operand in load and store instructions
-class MemOperand BASE_EMBEDDED {
- public:
-  // [rn +/- offset]      Offset/NegOffset
-  // [rn +/- offset]!     PreIndex/NegPreIndex
-  // [rn], +/- offset     PostIndex/NegPostIndex
-  // offset is any signed 32-bit value; offset is first loaded to register ip if
-  // it does not fit the addressing mode (12-bit unsigned and sign bit)
-  explicit MemOperand(Register rn, int32_t offset = 0, AddrMode am = Offset);
-
-  // [rn +/- rm]          Offset/NegOffset
-  // [rn +/- rm]!         PreIndex/NegPreIndex
-  // [rn], +/- rm         PostIndex/NegPostIndex
-  explicit MemOperand(Register rn, Register rm, AddrMode am = Offset);
-
-  // [rn +/- rm <shift_op> shift_imm]      Offset/NegOffset
-  // [rn +/- rm <shift_op> shift_imm]!     PreIndex/NegPreIndex
-  // [rn], +/- rm <shift_op> shift_imm     PostIndex/NegPostIndex
-  explicit MemOperand(Register rn, Register rm,
-                      ShiftOp shift_op, int shift_imm, AddrMode am = Offset);
-
- private:
-  Register rn_;  // base
-  Register rm_;  // register offset
-  int32_t offset_;  // valid if rm_ == no_reg
-  ShiftOp shift_op_;
-  int shift_imm_;  // valid if rm_ != no_reg && rs_ == no_reg
-  AddrMode am_;  // bits P, U, and W
-
-  friend class Assembler;
-};
-
-// CpuFeatures keeps track of which features are supported by the target CPU.
-// Supported features must be enabled by a Scope before use.
-class CpuFeatures : public AllStatic {
- public:
-  // Detect features of the target CPU. Set safe defaults if the serializer
-  // is enabled (snapshots must be portable).
-  static void Probe();
-
-  // Check whether a feature is supported by the target CPU.
-  static bool IsSupported(CpuFeature f) {
-    if (f == VFP3 && !FLAG_enable_vfp3) return false;
-    return (supported_ & (1u << f)) != 0;
-  }
-
-  // Check whether a feature is currently enabled.
-  static bool IsEnabled(CpuFeature f) {
-    return (enabled_ & (1u << f)) != 0;
-  }
-
-  // Enable a specified feature within a scope.
-  class Scope BASE_EMBEDDED {
-#ifdef DEBUG
-   public:
-    explicit Scope(CpuFeature f) {
-      ASSERT(CpuFeatures::IsSupported(f));
-      ASSERT(!Serializer::enabled() ||
-             (found_by_runtime_probing_ & (1u << f)) == 0);
-      old_enabled_ = CpuFeatures::enabled_;
-      CpuFeatures::enabled_ |= 1u << f;
-    }
-    ~Scope() { CpuFeatures::enabled_ = old_enabled_; }
-   private:
-    unsigned old_enabled_;
-#else
-   public:
-    explicit Scope(CpuFeature f) {}
-#endif
-  };
-
- private:
-  static unsigned supported_;
-  static unsigned enabled_;
-  static unsigned found_by_runtime_probing_;
-};
-
-
-typedef int32_t Instr;
-
-
-extern const Instr kMovLrPc;
-extern const Instr kLdrPCPattern;
-
-
-class Assembler : public Malloced {
- public:
-  // Create an assembler. Instructions and relocation information are emitted
-  // into a buffer, with the instructions starting from the beginning and the
-  // relocation information starting from the end of the buffer. See CodeDesc
-  // for a detailed comment on the layout (globals.h).
-  //
-  // If the provided buffer is NULL, the assembler allocates and grows its own
-  // buffer, and buffer_size determines the initial buffer size. The buffer is
-  // owned by the assembler and deallocated upon destruction of the assembler.
-  //
-  // If the provided buffer is not NULL, the assembler uses the provided buffer
-  // for code generation and assumes its size to be buffer_size. If the buffer
-  // is too small, a fatal error occurs. No deallocation of the buffer is done
-  // upon destruction of the assembler.
-  Assembler(void* buffer, int buffer_size);
-  ~Assembler();
-
-  // GetCode emits any pending (non-emitted) code and fills the descriptor
-  // desc. GetCode() is idempotent; it returns the same result if no other
-  // Assembler functions are invoked in between GetCode() calls.
-  void GetCode(CodeDesc* desc);
-
-  // Label operations & relative jumps (PPUM Appendix D)
-  //
-  // Takes a branch opcode (cc) and a label (L) and generates
-  // either a backward branch or a forward branch and links it
-  // to the label fixup chain. Usage:
-  //
-  // Label L;    // unbound label
-  // j(cc, &L);  // forward branch to unbound label
-  // bind(&L);   // bind label to the current pc
-  // j(cc, &L);  // backward branch to bound label
-  // bind(&L);   // illegal: a label may be bound only once
-  //
-  // Note: The same Label can be used for forward and backward branches
-  // but it may be bound only once.
-
-  void bind(Label* L);  // binds an unbound label L to the current code position
-
-  // Returns the branch offset to the given label from the current code position
-  // Links the label to the current position if it is still unbound
-  // Manages the jump elimination optimization if the second parameter is true.
-  int branch_offset(Label* L, bool jump_elimination_allowed);
-
-  // Puts a labels target address at the given position.
-  // The high 8 bits are set to zero.
-  void label_at_put(Label* L, int at_offset);
-
-  // Return the address in the constant pool of the code target address used by
-  // the branch/call instruction at pc.
-  INLINE(static Address target_address_address_at(Address pc));
-
-  // Read/Modify the code target address in the branch/call instruction at pc.
-  INLINE(static Address target_address_at(Address pc));
-  INLINE(static void set_target_address_at(Address pc, Address target));
-
-  // This sets the branch destination (which is in the constant pool on ARM).
-  // This is for calls and branches within generated code.
-  inline static void set_target_at(Address constant_pool_entry, Address target);
-
-  // This sets the branch destination (which is in the constant pool on ARM).
-  // This is for calls and branches to runtime code.
-  inline static void set_external_target_at(Address constant_pool_entry,
-                                            Address target) {
-    set_target_at(constant_pool_entry, target);
-  }
-
-  // Here we are patching the address in the constant pool, not the actual call
-  // instruction.  The address in the constant pool is the same size as a
-  // pointer.
-  static const int kCallTargetSize = kPointerSize;
-  static const int kExternalTargetSize = kPointerSize;
-
-  // Size of an instruction.
-  static const int kInstrSize = sizeof(Instr);
-
-  // Distance between the instruction referring to the address of the call
-  // target (ldr pc, [target addr in const pool]) and the return address
-  static const int kCallTargetAddressOffset = kInstrSize;
-
-  // Distance between start of patched return sequence and the emitted address
-  // to jump to.
-  static const int kPatchReturnSequenceAddressOffset = kInstrSize;
-
-  // Difference between address of current opcode and value read from pc
-  // register.
-  static const int kPcLoadDelta = 8;
-
-  static const int kJSReturnSequenceLength = 4;
-
-  // ---------------------------------------------------------------------------
-  // Code generation
-
-  // Insert the smallest number of nop instructions
-  // possible to align the pc offset to a multiple
-  // of m. m must be a power of 2 (>= 4).
-  void Align(int m);
-
-  // Branch instructions
-  void b(int branch_offset, Condition cond = al);
-  void bl(int branch_offset, Condition cond = al);
-  void blx(int branch_offset);  // v5 and above
-  void blx(Register target, Condition cond = al);  // v5 and above
-  void bx(Register target, Condition cond = al);  // v5 and above, plus v4t
-
-  // Convenience branch instructions using labels
-  void b(Label* L, Condition cond = al)  {
-    b(branch_offset(L, cond == al), cond);
-  }
-  void b(Condition cond, Label* L)  { b(branch_offset(L, cond == al), cond); }
-  void bl(Label* L, Condition cond = al)  { bl(branch_offset(L, false), cond); }
-  void bl(Condition cond, Label* L)  { bl(branch_offset(L, false), cond); }
-  void blx(Label* L)  { blx(branch_offset(L, false)); }  // v5 and above
-
-  // Data-processing instructions
-  void ubfx(Register dst, Register src1, const Operand& src2,
-            const Operand& src3, Condition cond = al);
-
-  void and_(Register dst, Register src1, const Operand& src2,
-            SBit s = LeaveCC, Condition cond = al);
-
-  void eor(Register dst, Register src1, const Operand& src2,
-           SBit s = LeaveCC, Condition cond = al);
-
-  void sub(Register dst, Register src1, const Operand& src2,
-           SBit s = LeaveCC, Condition cond = al);
-  void sub(Register dst, Register src1, Register src2,
-           SBit s = LeaveCC, Condition cond = al) {
-    sub(dst, src1, Operand(src2), s, cond);
-  }
-
-  void rsb(Register dst, Register src1, const Operand& src2,
-           SBit s = LeaveCC, Condition cond = al);
-
-  void add(Register dst, Register src1, const Operand& src2,
-           SBit s = LeaveCC, Condition cond = al);
-
-  void adc(Register dst, Register src1, const Operand& src2,
-           SBit s = LeaveCC, Condition cond = al);
-
-  void sbc(Register dst, Register src1, const Operand& src2,
-           SBit s = LeaveCC, Condition cond = al);
-
-  void rsc(Register dst, Register src1, const Operand& src2,
-           SBit s = LeaveCC, Condition cond = al);
-
-  void tst(Register src1, const Operand& src2, Condition cond = al);
-  void tst(Register src1, Register src2, Condition cond = al) {
-    tst(src1, Operand(src2), cond);
-  }
-
-  void teq(Register src1, const Operand& src2, Condition cond = al);
-
-  void cmp(Register src1, const Operand& src2, Condition cond = al);
-  void cmp(Register src1, Register src2, Condition cond = al) {
-    cmp(src1, Operand(src2), cond);
-  }
-
-  void cmn(Register src1, const Operand& src2, Condition cond = al);
-
-  void orr(Register dst, Register src1, const Operand& src2,
-           SBit s = LeaveCC, Condition cond = al);
-  void orr(Register dst, Register src1, Register src2,
-           SBit s = LeaveCC, Condition cond = al) {
-    orr(dst, src1, Operand(src2), s, cond);
-  }
-
-  void mov(Register dst, const Operand& src,
-           SBit s = LeaveCC, Condition cond = al);
-  void mov(Register dst, Register src, SBit s = LeaveCC, Condition cond = al) {
-    mov(dst, Operand(src), s, cond);
-  }
-
-  void bic(Register dst, Register src1, const Operand& src2,
-           SBit s = LeaveCC, Condition cond = al);
-
-  void mvn(Register dst, const Operand& src,
-           SBit s = LeaveCC, Condition cond = al);
-
-  // Multiply instructions
-
-  void mla(Register dst, Register src1, Register src2, Register srcA,
-           SBit s = LeaveCC, Condition cond = al);
-
-  void mul(Register dst, Register src1, Register src2,
-           SBit s = LeaveCC, Condition cond = al);
-
-  void smlal(Register dstL, Register dstH, Register src1, Register src2,
-             SBit s = LeaveCC, Condition cond = al);
-
-  void smull(Register dstL, Register dstH, Register src1, Register src2,
-             SBit s = LeaveCC, Condition cond = al);
-
-  void umlal(Register dstL, Register dstH, Register src1, Register src2,
-             SBit s = LeaveCC, Condition cond = al);
-
-  void umull(Register dstL, Register dstH, Register src1, Register src2,
-             SBit s = LeaveCC, Condition cond = al);
-
-  // Miscellaneous arithmetic instructions
-
-  void clz(Register dst, Register src, Condition cond = al);  // v5 and above
-
-  // Status register access instructions
-
-  void mrs(Register dst, SRegister s, Condition cond = al);
-  void msr(SRegisterFieldMask fields, const Operand& src, Condition cond = al);
-
-  // Load/Store instructions
-  void ldr(Register dst, const MemOperand& src, Condition cond = al);
-  void str(Register src, const MemOperand& dst, Condition cond = al);
-  void ldrb(Register dst, const MemOperand& src, Condition cond = al);
-  void strb(Register src, const MemOperand& dst, Condition cond = al);
-  void ldrh(Register dst, const MemOperand& src, Condition cond = al);
-  void strh(Register src, const MemOperand& dst, Condition cond = al);
-  void ldrsb(Register dst, const MemOperand& src, Condition cond = al);
-  void ldrsh(Register dst, const MemOperand& src, Condition cond = al);
-
-  // Load/Store multiple instructions
-  void ldm(BlockAddrMode am, Register base, RegList dst, Condition cond = al);
-  void stm(BlockAddrMode am, Register base, RegList src, Condition cond = al);
-
-  // Semaphore instructions
-  void swp(Register dst, Register src, Register base, Condition cond = al);
-  void swpb(Register dst, Register src, Register base, Condition cond = al);
-
-  // Exception-generating instructions and debugging support
-  void stop(const char* msg);
-
-  void bkpt(uint32_t imm16);  // v5 and above
-  void swi(uint32_t imm24, Condition cond = al);
-
-  // Coprocessor instructions
-
-  void cdp(Coprocessor coproc, int opcode_1,
-           CRegister crd, CRegister crn, CRegister crm,
-           int opcode_2, Condition cond = al);
-
-  void cdp2(Coprocessor coproc, int opcode_1,
-            CRegister crd, CRegister crn, CRegister crm,
-            int opcode_2);  // v5 and above
-
-  void mcr(Coprocessor coproc, int opcode_1,
-           Register rd, CRegister crn, CRegister crm,
-           int opcode_2 = 0, Condition cond = al);
-
-  void mcr2(Coprocessor coproc, int opcode_1,
-            Register rd, CRegister crn, CRegister crm,
-            int opcode_2 = 0);  // v5 and above
-
-  void mrc(Coprocessor coproc, int opcode_1,
-           Register rd, CRegister crn, CRegister crm,
-           int opcode_2 = 0, Condition cond = al);
-
-  void mrc2(Coprocessor coproc, int opcode_1,
-            Register rd, CRegister crn, CRegister crm,
-            int opcode_2 = 0);  // v5 and above
-
-  void ldc(Coprocessor coproc, CRegister crd, const MemOperand& src,
-           LFlag l = Short, Condition cond = al);
-  void ldc(Coprocessor coproc, CRegister crd, Register base, int option,
-           LFlag l = Short, Condition cond = al);
-
-  void ldc2(Coprocessor coproc, CRegister crd, const MemOperand& src,
-            LFlag l = Short);  // v5 and above
-  void ldc2(Coprocessor coproc, CRegister crd, Register base, int option,
-            LFlag l = Short);  // v5 and above
-
-  void stc(Coprocessor coproc, CRegister crd, const MemOperand& dst,
-           LFlag l = Short, Condition cond = al);
-  void stc(Coprocessor coproc, CRegister crd, Register base, int option,
-           LFlag l = Short, Condition cond = al);
-
-  void stc2(Coprocessor coproc, CRegister crd, const MemOperand& dst,
-            LFlag l = Short);  // v5 and above
-  void stc2(Coprocessor coproc, CRegister crd, Register base, int option,
-            LFlag l = Short);  // v5 and above
-
-  // Support for VFP.
-  // All these APIs support S0 to S31 and D0 to D15.
-  // Currently these APIs do not support extended D registers, i.e, D16 to D31.
-  // However, some simple modifications can allow
-  // these APIs to support D16 to D31.
-
-  void vldr(const DwVfpRegister dst,
-            const Register base,
-            int offset,  // Offset must be a multiple of 4.
-            const Condition cond = al);
-  void vstr(const DwVfpRegister src,
-            const Register base,
-            int offset,  // Offset must be a multiple of 4.
-            const Condition cond = al);
-  void vmov(const DwVfpRegister dst,
-            const Register src1,
-            const Register src2,
-            const Condition cond = al);
-  void vmov(const Register dst1,
-            const Register dst2,
-            const DwVfpRegister src,
-            const Condition cond = al);
-  void vmov(const SwVfpRegister dst,
-            const Register src,
-            const Condition cond = al);
-  void vmov(const Register dst,
-            const SwVfpRegister src,
-            const Condition cond = al);
-  void vcvt(const DwVfpRegister dst,
-            const SwVfpRegister src,
-            const Condition cond = al);
-  void vcvt(const SwVfpRegister dst,
-            const DwVfpRegister src,
-            const Condition cond = al);
-
-  void vadd(const DwVfpRegister dst,
-            const DwVfpRegister src1,
-            const DwVfpRegister src2,
-            const Condition cond = al);
-  void vsub(const DwVfpRegister dst,
-            const DwVfpRegister src1,
-            const DwVfpRegister src2,
-            const Condition cond = al);
-  void vmul(const DwVfpRegister dst,
-            const DwVfpRegister src1,
-            const DwVfpRegister src2,
-            const Condition cond = al);
-  void vdiv(const DwVfpRegister dst,
-            const DwVfpRegister src1,
-            const DwVfpRegister src2,
-            const Condition cond = al);
-  void vcmp(const DwVfpRegister src1,
-            const DwVfpRegister src2,
-            const SBit s = LeaveCC,
-            const Condition cond = al);
-  void vmrs(const Register dst,
-            const Condition cond = al);
-
-  // Pseudo instructions
-  void nop()  { mov(r0, Operand(r0)); }
-
-  void push(Register src, Condition cond = al) {
-    str(src, MemOperand(sp, 4, NegPreIndex), cond);
-  }
-
-  void pop(Register dst, Condition cond = al) {
-    ldr(dst, MemOperand(sp, 4, PostIndex), cond);
-  }
-
-  void pop() {
-    add(sp, sp, Operand(kPointerSize));
-  }
-
-  // Load effective address of memory operand x into register dst
-  void lea(Register dst, const MemOperand& x,
-           SBit s = LeaveCC, Condition cond = al);
-
-  // Jump unconditionally to given label.
-  void jmp(Label* L) { b(L, al); }
-
-  // Check the code size generated from label to here.
-  int InstructionsGeneratedSince(Label* l) {
-    return (pc_offset() - l->pos()) / kInstrSize;
-  }
-
-  // Check whether an immediate fits an addressing mode 1 instruction.
-  bool ImmediateFitsAddrMode1Instruction(int32_t imm32);
-
-  // Postpone the generation of the constant pool for the specified number of
-  // instructions.
-  void BlockConstPoolFor(int instructions);
-
-  // Debugging
-
-  // Mark address of the ExitJSFrame code.
-  void RecordJSReturn();
-
-  // Record a comment relocation entry that can be used by a disassembler.
-  // Use --debug_code to enable.
-  void RecordComment(const char* msg);
-
-  void RecordPosition(int pos);
-  void RecordStatementPosition(int pos);
-  void WriteRecordedPositions();
-
-  int pc_offset() const { return pc_ - buffer_; }
-  int current_position() const { return current_position_; }
-  int current_statement_position() const { return current_statement_position_; }
-
- protected:
-  int buffer_space() const { return reloc_info_writer.pos() - pc_; }
-
-  // Read/patch instructions
-  static Instr instr_at(byte* pc) { return *reinterpret_cast<Instr*>(pc); }
-  void instr_at_put(byte* pc, Instr instr) {
-    *reinterpret_cast<Instr*>(pc) = instr;
-  }
-  Instr instr_at(int pos) { return *reinterpret_cast<Instr*>(buffer_ + pos); }
-  void instr_at_put(int pos, Instr instr) {
-    *reinterpret_cast<Instr*>(buffer_ + pos) = instr;
-  }
-
-  // Decode branch instruction at pos and return branch target pos
-  int target_at(int pos);
-
-  // Patch branch instruction at pos to branch to given branch target pos
-  void target_at_put(int pos, int target_pos);
-
-  // Check if is time to emit a constant pool for pending reloc info entries
-  void CheckConstPool(bool force_emit, bool require_jump);
-
-  // Block the emission of the constant pool before pc_offset
-  void BlockConstPoolBefore(int pc_offset) {
-    if (no_const_pool_before_ < pc_offset) no_const_pool_before_ = pc_offset;
-  }
-
- private:
-  // Code buffer:
-  // The buffer into which code and relocation info are generated.
-  byte* buffer_;
-  int buffer_size_;
-  // True if the assembler owns the buffer, false if buffer is external.
-  bool own_buffer_;
-
-  // Buffer size and constant pool distance are checked together at regular
-  // intervals of kBufferCheckInterval emitted bytes
-  static const int kBufferCheckInterval = 1*KB/2;
-  int next_buffer_check_;  // pc offset of next buffer check
-
-  // Code generation
-  // The relocation writer's position is at least kGap bytes below the end of
-  // the generated instructions. This is so that multi-instruction sequences do
-  // not have to check for overflow. The same is true for writes of large
-  // relocation info entries.
-  static const int kGap = 32;
-  byte* pc_;  // the program counter; moves forward
-
-  // Constant pool generation
-  // Pools are emitted in the instruction stream, preferably after unconditional
-  // jumps or after returns from functions (in dead code locations).
-  // If a long code sequence does not contain unconditional jumps, it is
-  // necessary to emit the constant pool before the pool gets too far from the
-  // location it is accessed from. In this case, we emit a jump over the emitted
-  // constant pool.
-  // Constants in the pool may be addresses of functions that gets relocated;
-  // if so, a relocation info entry is associated to the constant pool entry.
-
-  // Repeated checking whether the constant pool should be emitted is rather
-  // expensive. By default we only check again once a number of instructions
-  // has been generated. That also means that the sizing of the buffers is not
-  // an exact science, and that we rely on some slop to not overrun buffers.
-  static const int kCheckConstIntervalInst = 32;
-  static const int kCheckConstInterval = kCheckConstIntervalInst * kInstrSize;
-
-
-  // Pools are emitted after function return and in dead code at (more or less)
-  // regular intervals of kDistBetweenPools bytes
-  static const int kDistBetweenPools = 1*KB;
-
-  // Constants in pools are accessed via pc relative addressing, which can
-  // reach +/-4KB thereby defining a maximum distance between the instruction
-  // and the accessed constant. We satisfy this constraint by limiting the
-  // distance between pools.
-  static const int kMaxDistBetweenPools = 4*KB - 2*kBufferCheckInterval;
-
-  // Emission of the constant pool may be blocked in some code sequences
-  int no_const_pool_before_;  // block emission before this pc offset
-
-  // Keep track of the last emitted pool to guarantee a maximal distance
-  int last_const_pool_end_;  // pc offset following the last constant pool
-
-  // Relocation info generation
-  // Each relocation is encoded as a variable size value
-  static const int kMaxRelocSize = RelocInfoWriter::kMaxSize;
-  RelocInfoWriter reloc_info_writer;
-  // Relocation info records are also used during code generation as temporary
-  // containers for constants and code target addresses until they are emitted
-  // to the constant pool. These pending relocation info records are temporarily
-  // stored in a separate buffer until a constant pool is emitted.
-  // If every instruction in a long sequence is accessing the pool, we need one
-  // pending relocation entry per instruction.
-  static const int kMaxNumPRInfo = kMaxDistBetweenPools/kInstrSize;
-  RelocInfo prinfo_[kMaxNumPRInfo];  // the buffer of pending relocation info
-  int num_prinfo_;  // number of pending reloc info entries in the buffer
-
-  // The bound position, before this we cannot do instruction elimination.
-  int last_bound_pos_;
-
-  // source position information
-  int current_position_;
-  int current_statement_position_;
-  int written_position_;
-  int written_statement_position_;
-
-  // Code emission
-  inline void CheckBuffer();
-  void GrowBuffer();
-  inline void emit(Instr x);
-
-  // Instruction generation
-  void addrmod1(Instr instr, Register rn, Register rd, const Operand& x);
-  void addrmod2(Instr instr, Register rd, const MemOperand& x);
-  void addrmod3(Instr instr, Register rd, const MemOperand& x);
-  void addrmod4(Instr instr, Register rn, RegList rl);
-  void addrmod5(Instr instr, CRegister crd, const MemOperand& x);
-
-  // Labels
-  void print(Label* L);
-  void bind_to(Label* L, int pos);
-  void link_to(Label* L, Label* appendix);
-  void next(Label* L);
-
-  // Record reloc info for current pc_
-  void RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data = 0);
-
-  friend class RegExpMacroAssemblerARM;
-  friend class RelocInfo;
-  friend class CodePatcher;
-};
-
-} }  // namespace v8::internal
-
-#endif  // V8_ARM_ASSEMBLER_THUMB2_H_
diff --git a/src/arm/builtins-arm.cc b/src/arm/builtins-arm.cc
index 5718cb3..1f77656 100644
--- a/src/arm/builtins-arm.cc
+++ b/src/arm/builtins-arm.cc
@@ -27,6 +27,8 @@
 
 #include "v8.h"
 
+#if defined(V8_TARGET_ARCH_ARM)
+
 #include "codegen-inl.h"
 #include "debug.h"
 #include "runtime.h"
@@ -130,7 +132,7 @@
   // of the JSArray.
   // result: JSObject
   // scratch2: start of next object
-  __ lea(scratch1, MemOperand(result, JSArray::kSize));
+  __ add(scratch1, result, Operand(JSArray::kSize));
   __ str(scratch1, FieldMemOperand(result, JSArray::kElementsOffset));
 
   // Clear the heap tag on the elements array.
@@ -1311,3 +1313,5 @@
 #undef __
 
 } }  // namespace v8::internal
+
+#endif  // V8_TARGET_ARCH_ARM
diff --git a/src/arm/codegen-arm.cc b/src/arm/codegen-arm.cc
index 68ae026..64ed425 100644
--- a/src/arm/codegen-arm.cc
+++ b/src/arm/codegen-arm.cc
@@ -27,6 +27,8 @@
 
 #include "v8.h"
 
+#if defined(V8_TARGET_ARCH_ARM)
+
 #include "bootstrapper.h"
 #include "codegen-inl.h"
 #include "compiler.h"
@@ -1368,6 +1370,7 @@
   // give us a megamorphic load site. Not super, but it works.
   LoadAndSpill(applicand);
   Handle<String> name = Factory::LookupAsciiSymbol("apply");
+  frame_->Dup();
   frame_->CallLoadIC(name, RelocInfo::CODE_TARGET);
   frame_->EmitPush(r0);
 
@@ -1511,7 +1514,7 @@
   // Then process it as a normal function call.
   __ ldr(r0, MemOperand(sp, 3 * kPointerSize));
   __ ldr(r1, MemOperand(sp, 2 * kPointerSize));
-  __ strd(r0, MemOperand(sp, 2 * kPointerSize));
+  __ Strd(r0, r1, MemOperand(sp, 2 * kPointerSize));
 
   CallFunctionStub call_function(2, NOT_IN_LOOP, NO_CALL_FUNCTION_FLAGS);
   frame_->CallStub(&call_function, 3);
@@ -2304,12 +2307,10 @@
   node->continue_target()->SetExpectedHeight();
 
   // Load the current count to r0, load the length to r1.
-  __ ldrd(r0, frame_->ElementAt(0));
+  __ Ldrd(r0, r1, frame_->ElementAt(0));
   __ cmp(r0, r1);  // compare to the array length
   node->break_target()->Branch(hs);
 
-  __ ldr(r0, frame_->ElementAt(0));
-
   // Get the i'th entry of the array.
   __ ldr(r2, frame_->ElementAt(2));
   __ add(r2, r2, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
@@ -2727,7 +2728,6 @@
 #ifdef DEBUG
   int original_height = frame_->height();
 #endif
-  VirtualFrame::SpilledScope spilled_scope(frame_);
   Comment cmnt(masm_, "[ FunctionLiteral");
 
   // Build the function info and instantiate it.
@@ -2748,7 +2748,6 @@
 #ifdef DEBUG
   int original_height = frame_->height();
 #endif
-  VirtualFrame::SpilledScope spilled_scope(frame_);
   Comment cmnt(masm_, "[ SharedFunctionInfoLiteral");
   InstantiateFunction(node->shared_function_info());
   ASSERT_EQ(original_height + 1, frame_->height());
@@ -3007,8 +3006,6 @@
                      typeof_state == INSIDE_TYPEOF
                          ? RelocInfo::CODE_TARGET
                          : RelocInfo::CODE_TARGET_CONTEXT);
-  // Drop the global object. The result is in r0.
-  frame_->Drop();
 }
 
 
@@ -3422,7 +3419,6 @@
       frame_->Dup();
     }
     EmitNamedLoad(name, var != NULL);
-    frame_->Drop();  // Receiver is left on the stack.
     frame_->EmitPush(r0);
 
     // Perform the binary operation.
@@ -3561,9 +3557,7 @@
   // Perform the assignment.  It is safe to ignore constants here.
   ASSERT(node->op() != Token::INIT_CONST);
   CodeForSourcePosition(node->position());
-  frame_->PopToR0();
   EmitKeyedStore(prop->key()->type());
-  frame_->Drop(2);  // Key and receiver are left on the stack.
   frame_->EmitPush(r0);
 
   // Stack layout:
@@ -4047,37 +4041,35 @@
 
 
 void CodeGenerator::GenerateIsSmi(ZoneList<Expression*>* args) {
-  VirtualFrame::SpilledScope spilled_scope(frame_);
   ASSERT(args->length() == 1);
-  LoadAndSpill(args->at(0));
-  frame_->EmitPop(r0);
-  __ tst(r0, Operand(kSmiTagMask));
+  Load(args->at(0));
+  Register reg = frame_->PopToRegister();
+  __ tst(reg, Operand(kSmiTagMask));
   cc_reg_ = eq;
 }
 
 
 void CodeGenerator::GenerateLog(ZoneList<Expression*>* args) {
-  VirtualFrame::SpilledScope spilled_scope(frame_);
   // See comment in CodeGenerator::GenerateLog in codegen-ia32.cc.
   ASSERT_EQ(args->length(), 3);
 #ifdef ENABLE_LOGGING_AND_PROFILING
   if (ShouldGenerateLog(args->at(0))) {
-    LoadAndSpill(args->at(1));
-    LoadAndSpill(args->at(2));
+    Load(args->at(1));
+    Load(args->at(2));
+    frame_->SpillAll();
+    VirtualFrame::SpilledScope spilled_scope(frame_);
     __ CallRuntime(Runtime::kLog, 2);
   }
 #endif
-  __ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
-  frame_->EmitPush(r0);
+  frame_->EmitPushRoot(Heap::kUndefinedValueRootIndex);
 }
 
 
 void CodeGenerator::GenerateIsNonNegativeSmi(ZoneList<Expression*>* args) {
-  VirtualFrame::SpilledScope spilled_scope(frame_);
   ASSERT(args->length() == 1);
-  LoadAndSpill(args->at(0));
-  frame_->EmitPop(r0);
-  __ tst(r0, Operand(kSmiTagMask | 0x80000000u));
+  Load(args->at(0));
+  Register reg = frame_->PopToRegister();
+  __ tst(reg, Operand(kSmiTagMask | 0x80000000u));
   cc_reg_ = eq;
 }
 
@@ -4108,22 +4100,23 @@
 // flatten the string, which will ensure that the answer is in the left hand
 // side the next time around.
 void CodeGenerator::GenerateFastCharCodeAt(ZoneList<Expression*>* args) {
-  VirtualFrame::SpilledScope spilled_scope(frame_);
   ASSERT(args->length() == 2);
   Comment(masm_, "[ GenerateFastCharCodeAt");
 
-  LoadAndSpill(args->at(0));
-  LoadAndSpill(args->at(1));
-  frame_->EmitPop(r1);  // Index.
-  frame_->EmitPop(r2);  // String.
+  Load(args->at(0));
+  Load(args->at(1));
+  Register index = frame_->PopToRegister();         // Index.
+  Register string = frame_->PopToRegister(index);   // String.
+  Register result = VirtualFrame::scratch0();
+  Register scratch = VirtualFrame::scratch1();
 
   Label slow_case;
   Label exit;
   StringHelper::GenerateFastCharCodeAt(masm_,
-                                       r2,
-                                       r1,
-                                       r3,
-                                       r0,
+                                       string,
+                                       index,
+                                       scratch,
+                                       result,
                                        &slow_case,
                                        &slow_case,
                                        &slow_case,
@@ -4133,10 +4126,10 @@
   __ bind(&slow_case);
   // Move the undefined value into the result register, which will
   // trigger the slow case.
-  __ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
+  __ LoadRoot(result, Heap::kUndefinedValueRootIndex);
 
   __ bind(&exit);
-  frame_->EmitPush(r0);
+  frame_->EmitPush(result);
 }
 
 
@@ -4216,9 +4209,8 @@
   __ ldr(map_reg, FieldMemOperand(r1, HeapObject::kMapOffset));
   // Undetectable objects behave like undefined when tested with typeof.
   __ ldrb(r1, FieldMemOperand(map_reg, Map::kBitFieldOffset));
-  __ and_(r1, r1, Operand(1 << Map::kIsUndetectable));
-  __ cmp(r1, Operand(1 << Map::kIsUndetectable));
-  false_target()->Branch(eq);
+  __ tst(r1, Operand(1 << Map::kIsUndetectable));
+  false_target()->Branch(ne);
 
   __ ldrb(r1, FieldMemOperand(map_reg, Map::kInstanceTypeOffset));
   __ cmp(r1, Operand(FIRST_JS_OBJECT_TYPE));
@@ -4258,48 +4250,52 @@
 
 
 void CodeGenerator::GenerateIsConstructCall(ZoneList<Expression*>* args) {
-  VirtualFrame::SpilledScope spilled_scope(frame_);
   ASSERT(args->length() == 0);
 
+  Register scratch0 = VirtualFrame::scratch0();
+  Register scratch1 = VirtualFrame::scratch1();
   // Get the frame pointer for the calling frame.
-  __ ldr(r2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+  __ ldr(scratch0, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
 
   // Skip the arguments adaptor frame if it exists.
-  Label check_frame_marker;
-  __ ldr(r1, MemOperand(r2, StandardFrameConstants::kContextOffset));
-  __ cmp(r1, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
-  __ b(ne, &check_frame_marker);
-  __ ldr(r2, MemOperand(r2, StandardFrameConstants::kCallerFPOffset));
+  __ ldr(scratch1,
+         MemOperand(scratch0, StandardFrameConstants::kContextOffset));
+  __ cmp(scratch1, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+  __ ldr(scratch0,
+         MemOperand(scratch0, StandardFrameConstants::kCallerFPOffset), eq);
 
   // Check the marker in the calling frame.
-  __ bind(&check_frame_marker);
-  __ ldr(r1, MemOperand(r2, StandardFrameConstants::kMarkerOffset));
-  __ cmp(r1, Operand(Smi::FromInt(StackFrame::CONSTRUCT)));
+  __ ldr(scratch1,
+         MemOperand(scratch0, StandardFrameConstants::kMarkerOffset));
+  __ cmp(scratch1, Operand(Smi::FromInt(StackFrame::CONSTRUCT)));
   cc_reg_ = eq;
 }
 
 
 void CodeGenerator::GenerateArgumentsLength(ZoneList<Expression*>* args) {
-  VirtualFrame::SpilledScope spilled_scope(frame_);
   ASSERT(args->length() == 0);
 
-  Label exit;
-
-  // Get the number of formal parameters.
-  __ mov(r0, Operand(Smi::FromInt(scope()->num_parameters())));
+  Register tos = frame_->GetTOSRegister();
+  Register scratch0 = VirtualFrame::scratch0();
+  Register scratch1 = VirtualFrame::scratch1();
 
   // Check if the calling frame is an arguments adaptor frame.
-  __ ldr(r2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
-  __ ldr(r3, MemOperand(r2, StandardFrameConstants::kContextOffset));
-  __ cmp(r3, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
-  __ b(ne, &exit);
+  __ ldr(scratch0,
+         MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+  __ ldr(scratch1,
+         MemOperand(scratch0, StandardFrameConstants::kContextOffset));
+  __ cmp(scratch1, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+
+  // Get the number of formal parameters.
+  __ mov(tos, Operand(Smi::FromInt(scope()->num_parameters())), LeaveCC, ne);
 
   // Arguments adaptor case: Read the arguments length from the
   // adaptor frame.
-  __ ldr(r0, MemOperand(r2, ArgumentsAdaptorFrameConstants::kLengthOffset));
+  __ ldr(tos,
+         MemOperand(scratch0, ArgumentsAdaptorFrameConstants::kLengthOffset),
+         eq);
 
-  __ bind(&exit);
-  frame_->EmitPush(r0);
+  frame_->EmitPush(tos);
 }
 
 
@@ -4737,15 +4733,14 @@
 
 
 void CodeGenerator::GenerateObjectEquals(ZoneList<Expression*>* args) {
-  VirtualFrame::SpilledScope spilled_scope(frame_);
   ASSERT(args->length() == 2);
 
   // Load the two objects into registers and perform the comparison.
-  LoadAndSpill(args->at(0));
-  LoadAndSpill(args->at(1));
-  frame_->EmitPop(r0);
-  frame_->EmitPop(r1);
-  __ cmp(r0, r1);
+  Load(args->at(0));
+  Load(args->at(1));
+  Register lhs = frame_->PopToRegister();
+  Register rhs = frame_->PopToRegister(lhs);
+  __ cmp(lhs, rhs);
   cc_reg_ = eq;
 }
 
@@ -5044,6 +5039,7 @@
   // after evaluating the left hand side (due to the shortcut
   // semantics), but the compiler must (statically) know if the result
   // of compiling the binary operation is materialized or not.
+  VirtualFrame::SpilledScope spilled_scope(frame_);
   if (node->op() == Token::AND) {
     JumpTarget is_true;
     LoadConditionAndSpill(node->left(),
@@ -5055,8 +5051,7 @@
       JumpTarget pop_and_continue;
       JumpTarget exit;
 
-      __ ldr(r0, frame_->Top());  // Duplicate the stack top.
-      frame_->EmitPush(r0);
+      frame_->Dup();
       // Avoid popping the result if it converts to 'false' using the
       // standard ToBoolean() conversion as described in ECMA-262,
       // section 9.2, page 30.
@@ -5065,7 +5060,7 @@
 
       // Pop the result of evaluating the first part.
       pop_and_continue.Bind();
-      frame_->EmitPop(r0);
+      frame_->Pop();
 
       // Evaluate right side expression.
       is_true.Bind();
@@ -5102,8 +5097,7 @@
       JumpTarget pop_and_continue;
       JumpTarget exit;
 
-      __ ldr(r0, frame_->Top());
-      frame_->EmitPush(r0);
+      frame_->Dup();
       // Avoid popping the result if it converts to 'true' using the
       // standard ToBoolean() conversion as described in ECMA-262,
       // section 9.2, page 30.
@@ -5112,7 +5106,7 @@
 
       // Pop the result of evaluating the first part.
       pop_and_continue.Bind();
-      frame_->EmitPop(r0);
+      frame_->Pop();
 
       // Evaluate right side expression.
       is_false.Bind();
@@ -5147,7 +5141,6 @@
   Comment cmnt(masm_, "[ BinaryOperation");
 
   if (node->op() == Token::AND || node->op() == Token::OR) {
-    VirtualFrame::SpilledScope spilled_scope(frame_);
     GenerateLogicalBooleanOperation(node);
   } else {
     // Optimize for the case where (at least) one of the expressions
@@ -5200,9 +5193,7 @@
 #ifdef DEBUG
   int original_height = frame_->height();
 #endif
-  VirtualFrame::SpilledScope spilled_scope(frame_);
-  __ ldr(r0, frame_->Function());
-  frame_->EmitPush(r0);
+  frame_->EmitPush(MemOperand(frame_->Function()));
   ASSERT_EQ(original_height + 1, frame_->height());
 }
 
@@ -5430,26 +5421,30 @@
 
 class DeferredReferenceGetNamedValue: public DeferredCode {
  public:
-  explicit DeferredReferenceGetNamedValue(Handle<String> name) : name_(name) {
+  explicit DeferredReferenceGetNamedValue(Register receiver,
+                                          Handle<String> name)
+      : receiver_(receiver), name_(name) {
     set_comment("[ DeferredReferenceGetNamedValue");
   }
 
   virtual void Generate();
 
  private:
+  Register receiver_;
   Handle<String> name_;
 };
 
 
 void DeferredReferenceGetNamedValue::Generate() {
+  ASSERT(receiver_.is(r0) || receiver_.is(r1));
+
   Register scratch1 = VirtualFrame::scratch0();
   Register scratch2 = VirtualFrame::scratch1();
   __ DecrementCounter(&Counters::named_load_inline, 1, scratch1, scratch2);
   __ IncrementCounter(&Counters::named_load_inline_miss, 1, scratch1, scratch2);
 
-  // Setup the registers and call load IC.
-  // On entry to this deferred code, r0 is assumed to already contain the
-  // receiver from the top of the stack.
+  // Ensure receiver in r0 and name in r2 to match load ic calling convention.
+  __ Move(r0, receiver_);
   __ mov(r2, Operand(name_));
 
   // The rest of the instructions in the deferred code must be together.
@@ -5517,11 +5512,19 @@
 
 class DeferredReferenceSetKeyedValue: public DeferredCode {
  public:
-  DeferredReferenceSetKeyedValue() {
+  DeferredReferenceSetKeyedValue(Register value,
+                                 Register key,
+                                 Register receiver)
+      : value_(value), key_(key), receiver_(receiver) {
     set_comment("[ DeferredReferenceSetKeyedValue");
   }
 
   virtual void Generate();
+
+ private:
+  Register value_;
+  Register key_;
+  Register receiver_;
 };
 
 
@@ -5532,10 +5535,17 @@
   __ IncrementCounter(
       &Counters::keyed_store_inline_miss, 1, scratch1, scratch2);
 
+  // Ensure value in r0, key in r1 and receiver in r2 to match keyed store ic
+  // calling convention.
+  if (value_.is(r1)) {
+    __ Swap(r0, r1, ip);
+  }
+  ASSERT(receiver_.is(r2));
+
   // The rest of the instructions in the deferred code must be together.
   { Assembler::BlockConstPoolScope block_const_pool(masm_);
-    // Call keyed load IC. It has receiver amd key on the stack and the value to
-    // store in r0.
+    // Call keyed store IC. It has the arguments value, key and receiver in r0,
+    // r1 and r2.
     Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize));
     __ Call(ic, RelocInfo::CODE_TARGET);
     // The call must be followed by a nop instruction to indicate that the
@@ -5573,10 +5583,11 @@
     // this code
 
     // Load the receiver from the stack.
-    frame_->SpillAllButCopyTOSToR0();
+    Register receiver = frame_->PopToRegister();
+    VirtualFrame::SpilledScope spilled(frame_);
 
     DeferredReferenceGetNamedValue* deferred =
-        new DeferredReferenceGetNamedValue(name);
+        new DeferredReferenceGetNamedValue(receiver, name);
 
 #ifdef DEBUG
     int kInlinedNamedLoadInstructions = 7;
@@ -5586,19 +5597,19 @@
 
     { Assembler::BlockConstPoolScope block_const_pool(masm_);
       // Check that the receiver is a heap object.
-      __ tst(r0, Operand(kSmiTagMask));
+      __ tst(receiver, Operand(kSmiTagMask));
       deferred->Branch(eq);
 
       // Check the map. The null map used below is patched by the inline cache
       // code.
-      __ ldr(r2, FieldMemOperand(r0, HeapObject::kMapOffset));
+      __ ldr(r2, FieldMemOperand(receiver, HeapObject::kMapOffset));
       __ mov(r3, Operand(Factory::null_value()));
       __ cmp(r2, r3);
       deferred->Branch(ne);
 
       // Initially use an invalid index. The index will be patched by the
       // inline cache code.
-      __ ldr(r0, MemOperand(r0, 0));
+      __ ldr(r0, MemOperand(receiver, 0));
 
       // Make sure that the expected number of instructions are generated.
       ASSERT_EQ(kInlinedNamedLoadInstructions,
@@ -5695,7 +5706,7 @@
 
       __ mov(r0, scratch1);
       // Make sure that the expected number of instructions are generated.
-      ASSERT_EQ(kInlinedKeyedLoadInstructionsAfterPatchSize,
+      ASSERT_EQ(kInlinedKeyedLoadInstructionsAfterPatch,
                 masm_->InstructionsGeneratedSince(&check_inlined_codesize));
     }
 
@@ -5705,78 +5716,86 @@
 
 
 void CodeGenerator::EmitKeyedStore(StaticType* key_type) {
-  VirtualFrame::SpilledScope scope(frame_);
   // Generate inlined version of the keyed store if the code is in a loop
   // and the key is likely to be a smi.
   if (loop_nesting() > 0 && key_type->IsLikelySmi()) {
     // Inline the keyed store.
     Comment cmnt(masm_, "[ Inlined store to keyed property");
 
-    DeferredReferenceSetKeyedValue* deferred =
-        new DeferredReferenceSetKeyedValue();
+    Register scratch1 = VirtualFrame::scratch0();
+    Register scratch2 = VirtualFrame::scratch1();
+    Register scratch3 = r3;
 
     // Counter will be decremented in the deferred code. Placed here to avoid
     // having it in the instruction stream below where patching will occur.
     __ IncrementCounter(&Counters::keyed_store_inline, 1,
-                        frame_->scratch0(), frame_->scratch1());
+                        scratch1, scratch2);
+
+    // Load the value, key and receiver from the stack.
+    Register value = frame_->PopToRegister();
+    Register key = frame_->PopToRegister(value);
+    Register receiver = r2;
+    frame_->EmitPop(receiver);
+    VirtualFrame::SpilledScope spilled(frame_);
+
+    // The deferred code expects value, key and receiver in registers.
+    DeferredReferenceSetKeyedValue* deferred =
+        new DeferredReferenceSetKeyedValue(value, key, receiver);
 
     // Check that the value is a smi. As this inlined code does not set the
     // write barrier it is only possible to store smi values.
-    __ tst(r0, Operand(kSmiTagMask));
+    __ tst(value, Operand(kSmiTagMask));
     deferred->Branch(ne);
 
-    // Load the key and receiver from the stack.
-    __ ldr(r1, MemOperand(sp, 0));
-    __ ldr(r2, MemOperand(sp, kPointerSize));
-
     // Check that the key is a smi.
-    __ tst(r1, Operand(kSmiTagMask));
+    __ tst(key, Operand(kSmiTagMask));
     deferred->Branch(ne);
 
     // Check that the receiver is a heap object.
-    __ tst(r2, Operand(kSmiTagMask));
+    __ tst(receiver, Operand(kSmiTagMask));
     deferred->Branch(eq);
 
     // Check that the receiver is a JSArray.
-    __ CompareObjectType(r2, r3, r3, JS_ARRAY_TYPE);
+    __ CompareObjectType(receiver, scratch1, scratch1, JS_ARRAY_TYPE);
     deferred->Branch(ne);
 
     // Check that the key is within bounds. Both the key and the length of
     // the JSArray are smis. Use unsigned comparison to handle negative keys.
-    __ ldr(r3, FieldMemOperand(r2, JSArray::kLengthOffset));
-    __ cmp(r3, r1);
+    __ ldr(scratch1, FieldMemOperand(receiver, JSArray::kLengthOffset));
+    __ cmp(scratch1, key);
     deferred->Branch(ls);  // Unsigned less equal.
 
     // The following instructions are the part of the inlined store keyed
     // property code which can be patched. Therefore the exact number of
     // instructions generated need to be fixed, so the constant pool is blocked
     // while generating this code.
-#ifdef DEBUG
-    int kInlinedKeyedStoreInstructions = 7;
-    Label check_inlined_codesize;
-    masm_->bind(&check_inlined_codesize);
-#endif
     { Assembler::BlockConstPoolScope block_const_pool(masm_);
       // Get the elements array from the receiver and check that it
       // is not a dictionary.
-      __ ldr(r3, FieldMemOperand(r2, JSObject::kElementsOffset));
-      __ ldr(r4, FieldMemOperand(r3, JSObject::kMapOffset));
+      __ ldr(scratch1, FieldMemOperand(receiver, JSObject::kElementsOffset));
+      __ ldr(scratch2, FieldMemOperand(scratch1, JSObject::kMapOffset));
       // Read the fixed array map from the constant pool (not from the root
       // array) so that the value can be patched.  When debugging, we patch this
       // comparison to always fail so that we will hit the IC call in the
       // deferred code which will allow the debugger to break for fast case
       // stores.
-      __ mov(r5, Operand(Factory::fixed_array_map()));
-      __ cmp(r4, r5);
+#ifdef DEBUG
+    Label check_inlined_codesize;
+    masm_->bind(&check_inlined_codesize);
+#endif
+      __ mov(scratch3, Operand(Factory::fixed_array_map()));
+      __ cmp(scratch2, scratch3);
       deferred->Branch(ne);
 
       // Store the value.
-      __ add(r3, r3, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
-      __ str(r0, MemOperand(r3, r1, LSL,
-                            kPointerSizeLog2 - (kSmiTagSize + kSmiShiftSize)));
+      __ add(scratch1, scratch1,
+             Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+      __ str(value,
+             MemOperand(scratch1, key, LSL,
+                        kPointerSizeLog2 - (kSmiTagSize + kSmiShiftSize)));
 
       // Make sure that the expected number of instructions are generated.
-      ASSERT_EQ(kInlinedKeyedStoreInstructions,
+      ASSERT_EQ(kInlinedKeyedStoreInstructionsAfterPatch,
                 masm_->InstructionsGeneratedSince(&check_inlined_codesize));
     }
 
@@ -5839,19 +5858,20 @@
       Variable* var = expression_->AsVariableProxy()->AsVariable();
       bool is_global = var != NULL;
       ASSERT(!is_global || var->is_global());
+      if (persist_after_get_) {
+        cgen_->frame()->Dup();
+      }
       cgen_->EmitNamedLoad(GetName(), is_global);
       cgen_->frame()->EmitPush(r0);
-      if (!persist_after_get_) {
-        cgen_->UnloadReference(this);
-      }
+      if (!persist_after_get_) set_unloaded();
       break;
     }
 
     case KEYED: {
+      ASSERT(property != NULL);
       if (persist_after_get_) {
         cgen_->frame()->Dup2();
       }
-      ASSERT(property != NULL);
       cgen_->EmitKeyedLoad();
       cgen_->frame()->EmitPush(r0);
       if (!persist_after_get_) set_unloaded();
@@ -5892,16 +5912,13 @@
     }
 
     case KEYED: {
-      VirtualFrame::SpilledScope scope(frame);
       Comment cmnt(masm, "[ Store to keyed Property");
       Property* property = expression_->AsProperty();
       ASSERT(property != NULL);
       cgen_->CodeForSourcePosition(property->position());
-
-      frame->EmitPop(r0);  // Value.
       cgen_->EmitKeyedStore(property->key()->type());
       frame->EmitPush(r0);
-      cgen_->UnloadReference(this);
+      set_unloaded();
       break;
     }
 
@@ -6362,7 +6379,7 @@
     ConvertToDoubleStub stub1(r3, r2, r7, r6);
     __ Call(stub1.GetCode(), RelocInfo::CODE_TARGET);
     // Load rhs to a double in r0, r1.
-    __ ldrd(r0, FieldMemOperand(r0, HeapNumber::kValueOffset));
+    __ Ldrd(r0, r1, FieldMemOperand(r0, HeapNumber::kValueOffset));
     __ pop(lr);
   }
 
@@ -6397,7 +6414,7 @@
   } else {
     __ push(lr);
     // Load lhs to a double in r2, r3.
-    __ ldrd(r2, FieldMemOperand(r1, HeapNumber::kValueOffset));
+    __ Ldrd(r2, r3, FieldMemOperand(r1, HeapNumber::kValueOffset));
     // Convert rhs to a double in r0, r1.
     __ mov(r7, Operand(r0));
     ConvertToDoubleStub stub2(r1, r0, r7, r6);
@@ -6561,8 +6578,8 @@
     __ sub(r7, r1, Operand(kHeapObjectTag));
     __ vldr(d7, r7, HeapNumber::kValueOffset);
   } else {
-    __ ldrd(r2, FieldMemOperand(r1, HeapNumber::kValueOffset));
-    __ ldrd(r0, FieldMemOperand(r0, HeapNumber::kValueOffset));
+    __ Ldrd(r2, r3, FieldMemOperand(r1, HeapNumber::kValueOffset));
+    __ Ldrd(r0, r1, FieldMemOperand(r0, HeapNumber::kValueOffset));
   }
   __ jmp(both_loaded_as_doubles);
 }
@@ -6939,7 +6956,7 @@
       __ vldr(d7, r7, HeapNumber::kValueOffset);
     } else {
       // Calling convention says that second double is in r2 and r3.
-      __ ldrd(r2, FieldMemOperand(r0, HeapNumber::kValueOffset));
+      __ Ldrd(r2, r3, FieldMemOperand(r0, HeapNumber::kValueOffset));
     }
     __ jmp(&finished_loading_r0);
     __ bind(&r0_is_smi);
@@ -6991,7 +7008,7 @@
       __ vldr(d6, r7, HeapNumber::kValueOffset);
     } else {
       // Calling convention says that first double is in r0 and r1.
-      __ ldrd(r0, FieldMemOperand(r1, HeapNumber::kValueOffset));
+      __ Ldrd(r0, r1, FieldMemOperand(r1, HeapNumber::kValueOffset));
     }
     __ jmp(&finished_loading_r1);
     __ bind(&r1_is_smi);
@@ -7062,7 +7079,7 @@
       __ stc(p1, cr8, MemOperand(r4, HeapNumber::kValueOffset));
   #else
       // Double returned in registers 0 and 1.
-      __ strd(r0, FieldMemOperand(r5, HeapNumber::kValueOffset));
+      __ Strd(r0, r1, FieldMemOperand(r5, HeapNumber::kValueOffset));
   #endif
       __ mov(r0, Operand(r5));
       // And we are done.
@@ -10020,3 +10037,5 @@
 #undef __
 
 } }  // namespace v8::internal
+
+#endif  // V8_TARGET_ARCH_ARM
diff --git a/src/arm/codegen-arm.h b/src/arm/codegen-arm.h
index 33a85c4..361ea13 100644
--- a/src/arm/codegen-arm.h
+++ b/src/arm/codegen-arm.h
@@ -220,7 +220,8 @@
   static int InlineRuntimeCallArgumentsCount(Handle<String> name);
 
   // Constants related to patching of inlined load/store.
-  static const int kInlinedKeyedLoadInstructionsAfterPatchSize = 19;
+  static const int kInlinedKeyedLoadInstructionsAfterPatch = 19;
+  static const int kInlinedKeyedStoreInstructionsAfterPatch = 5;
 
  private:
   // Construction/Destruction
diff --git a/src/arm/constants-arm.cc b/src/arm/constants-arm.cc
index 2e37120..4e186d1 100644
--- a/src/arm/constants-arm.cc
+++ b/src/arm/constants-arm.cc
@@ -27,6 +27,8 @@
 
 #include "v8.h"
 
+#if defined(V8_TARGET_ARCH_ARM)
+
 #include "constants-arm.h"
 
 
@@ -128,3 +130,5 @@
 
 
 } }  // namespace assembler::arm
+
+#endif  // V8_TARGET_ARCH_ARM
diff --git a/src/arm/cpu-arm.cc b/src/arm/cpu-arm.cc
index d50c203..3d3e6ae 100644
--- a/src/arm/cpu-arm.cc
+++ b/src/arm/cpu-arm.cc
@@ -32,6 +32,8 @@
 
 #include "v8.h"
 
+#if defined(V8_TARGET_ARCH_ARM)
+
 #include "cpu.h"
 #include "macro-assembler.h"
 
@@ -136,3 +138,5 @@
 }
 
 } }  // namespace v8::internal
+
+#endif  // V8_TARGET_ARCH_ARM
diff --git a/src/arm/debug-arm.cc b/src/arm/debug-arm.cc
index d02ba76..69fc504 100644
--- a/src/arm/debug-arm.cc
+++ b/src/arm/debug-arm.cc
@@ -27,6 +27,8 @@
 
 #include "v8.h"
 
+#if defined(V8_TARGET_ARCH_ARM)
+
 #include "codegen-inl.h"
 #include "debug.h"
 
@@ -170,10 +172,11 @@
 
 void Debug::GenerateKeyedStoreICDebugBreak(MacroAssembler* masm) {
   // ---------- S t a t e --------------
+  //  -- r0     : value
+  //  -- r1     : key
+  //  -- r2     : receiver
   //  -- lr     : return address
-  //  -- sp[0]  : key
-  //  -- sp[4]  : receiver
-  Generate_DebugBreakCallHelper(masm, 0);
+  Generate_DebugBreakCallHelper(masm, r0.bit() | r1.bit() | r2.bit());
 }
 
 
@@ -237,3 +240,5 @@
 #endif  // ENABLE_DEBUGGER_SUPPORT
 
 } }  // namespace v8::internal
+
+#endif  // V8_TARGET_ARCH_ARM
diff --git a/src/arm/disasm-arm.cc b/src/arm/disasm-arm.cc
index 4051096..0ac7d19 100644
--- a/src/arm/disasm-arm.cc
+++ b/src/arm/disasm-arm.cc
@@ -56,6 +56,8 @@
 
 #include "v8.h"
 
+#if defined(V8_TARGET_ARCH_ARM)
+
 #include "constants-arm.h"
 #include "disasm.h"
 #include "macro-assembler.h"
@@ -1356,3 +1358,5 @@
 
 
 }  // namespace disasm
+
+#endif  // V8_TARGET_ARCH_ARM
diff --git a/src/arm/fast-codegen-arm.cc b/src/arm/fast-codegen-arm.cc
index 5dedc29..48eaf46 100644
--- a/src/arm/fast-codegen-arm.cc
+++ b/src/arm/fast-codegen-arm.cc
@@ -27,6 +27,8 @@
 
 #include "v8.h"
 
+#if defined(V8_TARGET_ARCH_ARM)
+
 #include "codegen-inl.h"
 #include "fast-codegen.h"
 #include "scopes.h"
@@ -236,3 +238,5 @@
 
 
 } }  // namespace v8::internal
+
+#endif  // V8_TARGET_ARCH_ARM
diff --git a/src/arm/frames-arm.cc b/src/arm/frames-arm.cc
index 0cb7f12..271e4a6 100644
--- a/src/arm/frames-arm.cc
+++ b/src/arm/frames-arm.cc
@@ -27,12 +27,10 @@
 
 #include "v8.h"
 
+#if defined(V8_TARGET_ARCH_ARM)
+
 #include "frames-inl.h"
-#ifdef V8_ARM_VARIANT_THUMB
-#include "arm/assembler-thumb2-inl.h"
-#else
 #include "arm/assembler-arm-inl.h"
-#endif
 
 
 namespace v8 {
@@ -121,3 +119,5 @@
 
 
 } }  // namespace v8::internal
+
+#endif  // V8_TARGET_ARCH_ARM
diff --git a/src/arm/full-codegen-arm.cc b/src/arm/full-codegen-arm.cc
index 6680af9..fecc213 100644
--- a/src/arm/full-codegen-arm.cc
+++ b/src/arm/full-codegen-arm.cc
@@ -27,6 +27,8 @@
 
 #include "v8.h"
 
+#if defined(V8_TARGET_ARCH_ARM)
+
 #include "codegen-inl.h"
 #include "compiler.h"
 #include "debug.h"
@@ -62,7 +64,7 @@
   if (mode == PRIMARY) {
     int locals_count = scope()->num_stack_slots();
 
-    __ stm(db_w, sp, r1.bit() | cp.bit() | fp.bit() | lr.bit());
+    __ Push(lr, fp, cp, r1);
     if (locals_count > 0) {
       // Load undefined value here, so the value is ready for the loop
       // below.
@@ -80,11 +82,17 @@
     bool function_in_register = true;
 
     // Possibly allocate a local context.
-    if (scope()->num_heap_slots() > 0) {
+    int heap_slots = scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
+    if (heap_slots > 0) {
       Comment cmnt(masm_, "[ Allocate local context");
       // Argument to NewContext is the function, which is in r1.
       __ push(r1);
-      __ CallRuntime(Runtime::kNewContext, 1);
+      if (heap_slots <= FastNewContextStub::kMaximumSlots) {
+        FastNewContextStub stub(heap_slots);
+        __ CallStub(&stub);
+      } else {
+        __ CallRuntime(Runtime::kNewContext, 1);
+      }
       function_in_register = false;
       // Context is returned in both r0 and cp.  It replaces the context
       // passed to us.  It's saved in the stack and kept live in cp.
@@ -142,6 +150,21 @@
     }
   }
 
+  { Comment cmnt(masm_, "[ Declarations");
+    // For named function expressions, declare the function name as a
+    // constant.
+    if (scope()->is_function_scope() && scope()->function() != NULL) {
+      EmitDeclaration(scope()->function(), Variable::CONST, NULL);
+    }
+    // Visit all the explicit declarations unless there is an illegal
+    // redeclaration.
+    if (scope()->HasIllegalRedeclaration()) {
+      scope()->VisitIllegalRedeclaration(this);
+    } else {
+      VisitDeclarations(scope()->declarations());
+    }
+  }
+
   // Check the stack for overflow or break request.
   // Put the lr setup instruction in the delay slot.  The kInstrSize is
   // added to the implicit 8 byte offset that always applies to operations
@@ -158,10 +181,6 @@
            lo);
   }
 
-  { Comment cmnt(masm_, "[ Declarations");
-    VisitDeclarations(scope()->declarations());
-  }
-
   if (FLAG_trace) {
     __ CallRuntime(Runtime::kTraceEnter, 0);
   }
@@ -382,6 +401,38 @@
   }
 }
 
+void FullCodeGenerator::PrepareTest(Label* materialize_true,
+                                    Label* materialize_false,
+                                    Label** if_true,
+                                    Label** if_false) {
+  switch (context_) {
+    case Expression::kUninitialized:
+      UNREACHABLE();
+      break;
+    case Expression::kEffect:
+      // In an effect context, the true and the false case branch to the
+      // same label.
+      *if_true = *if_false = materialize_true;
+      break;
+    case Expression::kValue:
+      *if_true = materialize_true;
+      *if_false = materialize_false;
+      break;
+    case Expression::kTest:
+      *if_true = true_label_;
+      *if_false = false_label_;
+      break;
+    case Expression::kValueTest:
+      *if_true = materialize_true;
+      *if_false = false_label_;
+      break;
+    case Expression::kTestValue:
+      *if_true = true_label_;
+      *if_false = materialize_false;
+      break;
+  }
+}
+
 
 void FullCodeGenerator::Apply(Expression::Context context,
                               Label* materialize_true,
@@ -396,19 +447,25 @@
 
     case Expression::kValue: {
       Label done;
-      __ bind(materialize_true);
-      __ mov(result_register(), Operand(Factory::true_value()));
-      __ jmp(&done);
-      __ bind(materialize_false);
-      __ mov(result_register(), Operand(Factory::false_value()));
-      __ bind(&done);
       switch (location_) {
         case kAccumulator:
+          __ bind(materialize_true);
+          __ LoadRoot(result_register(), Heap::kTrueValueRootIndex);
+          __ jmp(&done);
+          __ bind(materialize_false);
+          __ LoadRoot(result_register(), Heap::kFalseValueRootIndex);
           break;
         case kStack:
-          __ push(result_register());
+          __ bind(materialize_true);
+          __ LoadRoot(ip, Heap::kTrueValueRootIndex);
+          __ push(ip);
+          __ jmp(&done);
+          __ bind(materialize_false);
+          __ LoadRoot(ip, Heap::kFalseValueRootIndex);
+          __ push(ip);
           break;
       }
+      __ bind(&done);
       break;
     }
 
@@ -417,12 +474,13 @@
 
     case Expression::kValueTest:
       __ bind(materialize_true);
-      __ mov(result_register(), Operand(Factory::true_value()));
       switch (location_) {
         case kAccumulator:
+          __ LoadRoot(result_register(), Heap::kTrueValueRootIndex);
           break;
         case kStack:
-          __ push(result_register());
+          __ LoadRoot(ip, Heap::kTrueValueRootIndex);
+          __ push(ip);
           break;
       }
       __ jmp(true_label_);
@@ -430,12 +488,13 @@
 
     case Expression::kTestValue:
       __ bind(materialize_false);
-      __ mov(result_register(), Operand(Factory::false_value()));
       switch (location_) {
         case kAccumulator:
+          __ LoadRoot(result_register(), Heap::kFalseValueRootIndex);
           break;
         case kStack:
-          __ push(result_register());
+          __ LoadRoot(ip, Heap::kFalseValueRootIndex);
+          __ push(ip);
           break;
       }
       __ jmp(false_label_);
@@ -444,6 +503,68 @@
 }
 
 
+// Convert constant control flow (true or false) to the result expected for
+// a given expression context.
+void FullCodeGenerator::Apply(Expression::Context context, bool flag) {
+  switch (context) {
+    case Expression::kUninitialized:
+      UNREACHABLE();
+      break;
+    case Expression::kEffect:
+      break;
+    case Expression::kValue: {
+      Heap::RootListIndex value_root_index =
+          flag ? Heap::kTrueValueRootIndex : Heap::kFalseValueRootIndex;
+      switch (location_) {
+        case kAccumulator:
+          __ LoadRoot(result_register(), value_root_index);
+          break;
+        case kStack:
+          __ LoadRoot(ip, value_root_index);
+          __ push(ip);
+          break;
+      }
+      break;
+    }
+    case Expression::kTest:
+      __ b(flag ? true_label_ : false_label_);
+      break;
+    case Expression::kTestValue:
+      switch (location_) {
+        case kAccumulator:
+          // If value is false it's needed.
+          if (!flag) __ LoadRoot(result_register(), Heap::kFalseValueRootIndex);
+          break;
+        case kStack:
+          // If value is false it's needed.
+          if (!flag) {
+            __ LoadRoot(ip, Heap::kFalseValueRootIndex);
+            __ push(ip);
+          }
+          break;
+      }
+      __ b(flag ? true_label_ : false_label_);
+      break;
+    case Expression::kValueTest:
+      switch (location_) {
+        case kAccumulator:
+          // If value is true it's needed.
+          if (flag) __ LoadRoot(result_register(), Heap::kTrueValueRootIndex);
+          break;
+        case kStack:
+          // If value is true it's needed.
+          if (flag) {
+            __ LoadRoot(ip, Heap::kTrueValueRootIndex);
+            __ push(ip);
+          }
+          break;
+      }
+      __ b(flag ? true_label_ : false_label_);
+      break;
+  }
+}
+
+
 void FullCodeGenerator::DoTest(Expression::Context context) {
   // The value to test is pushed on the stack, and duplicated on the stack
   // if necessary (for value/test and test/value contexts).
@@ -549,22 +670,23 @@
 }
 
 
-void FullCodeGenerator::VisitDeclaration(Declaration* decl) {
+void FullCodeGenerator::EmitDeclaration(Variable* variable,
+                                        Variable::Mode mode,
+                                        FunctionLiteral* function) {
   Comment cmnt(masm_, "[ Declaration");
-  Variable* var = decl->proxy()->var();
-  ASSERT(var != NULL);  // Must have been resolved.
-  Slot* slot = var->slot();
-  Property* prop = var->AsProperty();
+  ASSERT(variable != NULL);  // Must have been resolved.
+  Slot* slot = variable->slot();
+  Property* prop = variable->AsProperty();
 
   if (slot != NULL) {
     switch (slot->type()) {
       case Slot::PARAMETER:
       case Slot::LOCAL:
-        if (decl->mode() == Variable::CONST) {
+        if (mode == Variable::CONST) {
           __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
           __ str(ip, MemOperand(fp, SlotOffset(slot)));
-        } else if (decl->fun() != NULL) {
-          VisitForValue(decl->fun(), kAccumulator);
+        } else if (function != NULL) {
+          VisitForValue(function, kAccumulator);
           __ str(result_register(), MemOperand(fp, SlotOffset(slot)));
         }
         break;
@@ -574,7 +696,7 @@
         // this specific context.
 
         // The variable in the decl always resides in the current context.
-        ASSERT_EQ(0, scope()->ContextChainLength(var->scope()));
+        ASSERT_EQ(0, scope()->ContextChainLength(variable->scope()));
         if (FLAG_debug_code) {
           // Check if we have the correct context pointer.
           __ ldr(r1,
@@ -582,12 +704,12 @@
           __ cmp(r1, cp);
           __ Check(eq, "Unexpected declaration in current context.");
         }
-        if (decl->mode() == Variable::CONST) {
+        if (mode == Variable::CONST) {
           __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
           __ str(ip, CodeGenerator::ContextOperand(cp, slot->index()));
           // No write barrier since the_hole_value is in old space.
-        } else if (decl->fun() != NULL) {
-          VisitForValue(decl->fun(), kAccumulator);
+        } else if (function != NULL) {
+          VisitForValue(function, kAccumulator);
           __ str(result_register(),
                  CodeGenerator::ContextOperand(cp, slot->index()));
           int offset = Context::SlotOffset(slot->index());
@@ -599,27 +721,27 @@
         break;
 
       case Slot::LOOKUP: {
-        __ mov(r2, Operand(var->name()));
+        __ mov(r2, Operand(variable->name()));
         // Declaration nodes are always introduced in one of two modes.
-        ASSERT(decl->mode() == Variable::VAR ||
-               decl->mode() == Variable::CONST);
+        ASSERT(mode == Variable::VAR ||
+               mode == Variable::CONST);
         PropertyAttributes attr =
-            (decl->mode() == Variable::VAR) ? NONE : READ_ONLY;
+            (mode == Variable::VAR) ? NONE : READ_ONLY;
         __ mov(r1, Operand(Smi::FromInt(attr)));
         // Push initial value, if any.
         // Note: For variables we must not push an initial value (such as
         // 'undefined') because we may have a (legal) redeclaration and we
         // must not destroy the current value.
-        if (decl->mode() == Variable::CONST) {
+        if (mode == Variable::CONST) {
           __ LoadRoot(r0, Heap::kTheHoleValueRootIndex);
-          __ stm(db_w, sp, cp.bit() | r2.bit() | r1.bit() | r0.bit());
-        } else if (decl->fun() != NULL) {
-          __ stm(db_w, sp, cp.bit() | r2.bit() | r1.bit());
+          __ Push(cp, r2, r1, r0);
+        } else if (function != NULL) {
+          __ Push(cp, r2, r1);
           // Push initial value for function declaration.
-          VisitForValue(decl->fun(), kStack);
+          VisitForValue(function, kStack);
         } else {
           __ mov(r0, Operand(Smi::FromInt(0)));  // No initial value!
-          __ stm(db_w, sp, cp.bit() | r2.bit() | r1.bit() | r0.bit());
+          __ Push(cp, r2, r1, r0);
         }
         __ CallRuntime(Runtime::kDeclareContextSlot, 4);
         break;
@@ -627,53 +749,275 @@
     }
 
   } else if (prop != NULL) {
-    if (decl->fun() != NULL || decl->mode() == Variable::CONST) {
+    if (function != NULL || mode == Variable::CONST) {
       // We are declaring a function or constant that rewrites to a
       // property.  Use (keyed) IC to set the initial value.
       VisitForValue(prop->obj(), kStack);
-      VisitForValue(prop->key(), kStack);
-
-      if (decl->fun() != NULL) {
-        VisitForValue(decl->fun(), kAccumulator);
+      if (function != NULL) {
+        VisitForValue(prop->key(), kStack);
+        VisitForValue(function, kAccumulator);
+        __ pop(r1);  // Key.
       } else {
+        VisitForValue(prop->key(), kAccumulator);
+        __ mov(r1, result_register());  // Key.
         __ LoadRoot(result_register(), Heap::kTheHoleValueRootIndex);
       }
+      __ pop(r2);  // Receiver.
 
       Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize));
       __ Call(ic, RelocInfo::CODE_TARGET);
-
-      // Value in r0 is ignored (declarations are statements).  Receiver
-      // and key on stack are discarded.
-      __ Drop(2);
+      // Value in r0 is ignored (declarations are statements).
     }
   }
 }
 
 
+void FullCodeGenerator::VisitDeclaration(Declaration* decl) {
+  EmitDeclaration(decl->proxy()->var(), decl->mode(), decl->fun());
+}
+
+
 void FullCodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
   // Call the runtime to declare the globals.
   // The context is the first argument.
   __ mov(r1, Operand(pairs));
   __ mov(r0, Operand(Smi::FromInt(is_eval() ? 1 : 0)));
-  __ stm(db_w, sp, cp.bit() | r1.bit() | r0.bit());
+  __ Push(cp, r1, r0);
   __ CallRuntime(Runtime::kDeclareGlobals, 3);
   // Return value is ignored.
 }
 
 
-void FullCodeGenerator::VisitFunctionLiteral(FunctionLiteral* expr) {
-  Comment cmnt(masm_, "[ FunctionLiteral");
+void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
+  Comment cmnt(masm_, "[ SwitchStatement");
+  Breakable nested_statement(this, stmt);
+  SetStatementPosition(stmt);
+  // Keep the switch value on the stack until a case matches.
+  VisitForValue(stmt->tag(), kStack);
 
-  // Build the shared function info and instantiate the function based
-  // on it.
-  Handle<SharedFunctionInfo> function_info =
-      Compiler::BuildFunctionInfo(expr, script(), this);
-  if (HasStackOverflow()) return;
+  ZoneList<CaseClause*>* clauses = stmt->cases();
+  CaseClause* default_clause = NULL;  // Can occur anywhere in the list.
 
-  // Create a new closure.
-  __ mov(r0, Operand(function_info));
-  __ stm(db_w, sp, cp.bit() | r0.bit());
-  __ CallRuntime(Runtime::kNewClosure, 2);
+  Label next_test;  // Recycled for each test.
+  // Compile all the tests with branches to their bodies.
+  for (int i = 0; i < clauses->length(); i++) {
+    CaseClause* clause = clauses->at(i);
+    // The default is not a test, but remember it as final fall through.
+    if (clause->is_default()) {
+      default_clause = clause;
+      continue;
+    }
+
+    Comment cmnt(masm_, "[ Case comparison");
+    __ bind(&next_test);
+    next_test.Unuse();
+
+    // Compile the label expression.
+    VisitForValue(clause->label(), kAccumulator);
+
+    // Perform the comparison as if via '==='.  The comparison stub expects
+    // the smi vs. smi case to be handled before it is called.
+    Label slow_case;
+    __ ldr(r1, MemOperand(sp, 0));  // Switch value.
+    __ mov(r2, r1);
+    __ orr(r2, r2, r0);
+    __ tst(r2, Operand(kSmiTagMask));
+    __ b(ne, &slow_case);
+    __ cmp(r1, r0);
+    __ b(ne, &next_test);
+    __ Drop(1);  // Switch value is no longer needed.
+    __ b(clause->body_target()->entry_label());
+
+    __ bind(&slow_case);
+    CompareStub stub(eq, true);
+    __ CallStub(&stub);
+    __ tst(r0, r0);
+    __ b(ne, &next_test);
+    __ Drop(1);  // Switch value is no longer needed.
+    __ b(clause->body_target()->entry_label());
+  }
+
+  // Discard the test value and jump to the default if present, otherwise to
+  // the end of the statement.
+  __ bind(&next_test);
+  __ Drop(1);  // Switch value is no longer needed.
+  if (default_clause == NULL) {
+    __ b(nested_statement.break_target());
+  } else {
+    __ b(default_clause->body_target()->entry_label());
+  }
+
+  // Compile all the case bodies.
+  for (int i = 0; i < clauses->length(); i++) {
+    Comment cmnt(masm_, "[ Case body");
+    CaseClause* clause = clauses->at(i);
+    __ bind(clause->body_target()->entry_label());
+    VisitStatements(clause->statements());
+  }
+
+  __ bind(nested_statement.break_target());
+}
+
+
+void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
+  Comment cmnt(masm_, "[ ForInStatement");
+  SetStatementPosition(stmt);
+
+  Label loop, exit;
+  ForIn loop_statement(this, stmt);
+  increment_loop_depth();
+
+  // Get the object to enumerate over. Both SpiderMonkey and JSC
+  // ignore null and undefined in contrast to the specification; see
+  // ECMA-262 section 12.6.4.
+  VisitForValue(stmt->enumerable(), kAccumulator);
+  __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
+  __ cmp(r0, ip);
+  __ b(eq, &exit);
+  __ LoadRoot(ip, Heap::kNullValueRootIndex);
+  __ cmp(r0, ip);
+  __ b(eq, &exit);
+
+  // Convert the object to a JS object.
+  Label convert, done_convert;
+  __ BranchOnSmi(r0, &convert);
+  __ CompareObjectType(r0, r1, r1, FIRST_JS_OBJECT_TYPE);
+  __ b(hs, &done_convert);
+  __ bind(&convert);
+  __ push(r0);
+  __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_JS);
+  __ bind(&done_convert);
+  __ push(r0);
+
+  // TODO(kasperl): Check cache validity in generated code. This is a
+  // fast case for the JSObject::IsSimpleEnum cache validity
+  // checks. If we cannot guarantee cache validity, call the runtime
+  // system to check cache validity or get the property names in a
+  // fixed array.
+
+  // Get the set of properties to enumerate.
+  __ push(r0);  // Duplicate the enumerable object on the stack.
+  __ CallRuntime(Runtime::kGetPropertyNamesFast, 1);
+
+  // If we got a map from the runtime call, we can do a fast
+  // modification check. Otherwise, we got a fixed array, and we have
+  // to do a slow check.
+  Label fixed_array;
+  __ mov(r2, r0);
+  __ ldr(r1, FieldMemOperand(r2, HeapObject::kMapOffset));
+  __ LoadRoot(ip, Heap::kMetaMapRootIndex);
+  __ cmp(r1, ip);
+  __ b(ne, &fixed_array);
+
+  // We got a map in register r0. Get the enumeration cache from it.
+  __ ldr(r1, FieldMemOperand(r0, Map::kInstanceDescriptorsOffset));
+  __ ldr(r1, FieldMemOperand(r1, DescriptorArray::kEnumerationIndexOffset));
+  __ ldr(r2, FieldMemOperand(r1, DescriptorArray::kEnumCacheBridgeCacheOffset));
+
+  // Setup the four remaining stack slots.
+  __ push(r0);  // Map.
+  __ ldr(r1, FieldMemOperand(r2, FixedArray::kLengthOffset));
+  __ mov(r1, Operand(r1, LSL, kSmiTagSize));
+  __ mov(r0, Operand(Smi::FromInt(0)));
+  // Push enumeration cache, enumeration cache length (as smi) and zero.
+  __ Push(r2, r1, r0);
+  __ jmp(&loop);
+
+  // We got a fixed array in register r0. Iterate through that.
+  __ bind(&fixed_array);
+  __ mov(r1, Operand(Smi::FromInt(0)));  // Map (0) - force slow check.
+  __ Push(r1, r0);
+  __ ldr(r1, FieldMemOperand(r0, FixedArray::kLengthOffset));
+  __ mov(r1, Operand(r1, LSL, kSmiTagSize));
+  __ mov(r0, Operand(Smi::FromInt(0)));
+  __ Push(r1, r0);  // Fixed array length (as smi) and initial index.
+
+  // Generate code for doing the condition check.
+  __ bind(&loop);
+  // Load the current count to r0, load the length to r1.
+  __ Ldrd(r0, r1, MemOperand(sp, 0 * kPointerSize));
+  __ cmp(r0, r1);  // Compare to the array length.
+  __ b(hs, loop_statement.break_target());
+
+  // Get the current entry of the array into register r3.
+  __ ldr(r2, MemOperand(sp, 2 * kPointerSize));
+  __ add(r2, r2, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+  __ ldr(r3, MemOperand(r2, r0, LSL, kPointerSizeLog2 - kSmiTagSize));
+
+  // Get the expected map from the stack or a zero map in the
+  // permanent slow case into register r2.
+  __ ldr(r2, MemOperand(sp, 3 * kPointerSize));
+
+  // Check if the expected map still matches that of the enumerable.
+  // If not, we have to filter the key.
+  Label update_each;
+  __ ldr(r1, MemOperand(sp, 4 * kPointerSize));
+  __ ldr(r4, FieldMemOperand(r1, HeapObject::kMapOffset));
+  __ cmp(r4, Operand(r2));
+  __ b(eq, &update_each);
+
+  // Convert the entry to a string or null if it isn't a property
+  // anymore. If the property has been removed while iterating, we
+  // just skip it.
+  __ push(r1);  // Enumerable.
+  __ push(r3);  // Current entry.
+  __ InvokeBuiltin(Builtins::FILTER_KEY, CALL_JS);
+  __ mov(r3, Operand(r0));
+  __ LoadRoot(ip, Heap::kNullValueRootIndex);
+  __ cmp(r3, ip);
+  __ b(eq, loop_statement.continue_target());
+
+  // Update the 'each' property or variable from the possibly filtered
+  // entry in register r3.
+  __ bind(&update_each);
+  __ mov(result_register(), r3);
+  // Perform the assignment as if via '='.
+  EmitAssignment(stmt->each());
+
+  // Generate code for the body of the loop.
+  Label stack_limit_hit, stack_check_done;
+  Visit(stmt->body());
+
+  __ StackLimitCheck(&stack_limit_hit);
+  __ bind(&stack_check_done);
+
+  // Generate code for the going to the next element by incrementing
+  // the index (smi) stored on top of the stack.
+  __ bind(loop_statement.continue_target());
+  __ pop(r0);
+  __ add(r0, r0, Operand(Smi::FromInt(1)));
+  __ push(r0);
+  __ b(&loop);
+
+  // Slow case for the stack limit check.
+  StackCheckStub stack_check_stub;
+  __ bind(&stack_limit_hit);
+  __ CallStub(&stack_check_stub);
+  __ b(&stack_check_done);
+
+  // Remove the pointers stored on the stack.
+  __ bind(loop_statement.break_target());
+  __ Drop(5);
+
+  // Exit and decrement the loop depth.
+  __ bind(&exit);
+  decrement_loop_depth();
+}
+
+
+void FullCodeGenerator::EmitNewClosure(Handle<SharedFunctionInfo> info) {
+  // Use the fast case closure allocation code that allocates in new
+  // space for nested functions that don't need literals cloning.
+  if (scope()->is_function_scope() && info->num_literals() == 0) {
+    FastNewClosureStub stub;
+    __ mov(r0, Operand(info));
+    __ push(r0);
+    __ CallStub(&stub);
+  } else {
+    __ mov(r0, Operand(info));
+    __ Push(cp, r0);
+    __ CallRuntime(Runtime::kNewClosure, 2);
+  }
   Apply(context_, r0);
 }
 
@@ -695,18 +1039,17 @@
   if (var->is_global() && !var->is_this()) {
     Comment cmnt(masm_, "Global variable");
     // Use inline caching. Variable name is passed in r2 and the global
-    // object on the stack.
+    // object (receiver) in r0.
     __ ldr(r0, CodeGenerator::GlobalObject());
-    __ push(r0);
     __ mov(r2, Operand(var->name()));
     Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
     __ Call(ic, RelocInfo::CODE_TARGET_CONTEXT);
-    DropAndApply(1, context, r0);
+    Apply(context, r0);
 
   } else if (slot != NULL && slot->type() == Slot::LOOKUP) {
     Comment cmnt(masm_, "Lookup slot");
     __ mov(r1, Operand(var->name()));
-    __ stm(db_w, sp, cp.bit() | r1.bit());  // Context and name.
+    __ Push(cp, r1);  // Context and name.
     __ CallRuntime(Runtime::kLoadContextSlot, 2);
     Apply(context, r0);
 
@@ -714,8 +1057,21 @@
     Comment cmnt(masm_, (slot->type() == Slot::CONTEXT)
                             ? "Context slot"
                             : "Stack slot");
-    Apply(context, slot);
-
+    if (var->mode() == Variable::CONST) {
+       // Constants may be the hole value if they have not been initialized.
+       // Unhole them.
+       Label done;
+       MemOperand slot_operand = EmitSlotSearch(slot, r0);
+       __ ldr(r0, slot_operand);
+       __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
+       __ cmp(r0, ip);
+       __ b(ne, &done);
+       __ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
+       __ bind(&done);
+       Apply(context, r0);
+     } else {
+       Apply(context, slot);
+     }
   } else {
     Comment cmnt(masm_, "Rewritten parameter");
     ASSERT_NOT_NULL(property);
@@ -851,6 +1207,10 @@
 
 void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
   Comment cmnt(masm_, "[ ArrayLiteral");
+
+  ZoneList<Expression*>* subexprs = expr->values();
+  int length = subexprs->length();
+
   __ ldr(r3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
   __ ldr(r3, FieldMemOperand(r3, JSFunction::kLiteralsOffset));
   __ mov(r2, Operand(Smi::FromInt(expr->literal_index())));
@@ -858,16 +1218,18 @@
   __ Push(r3, r2, r1);
   if (expr->depth() > 1) {
     __ CallRuntime(Runtime::kCreateArrayLiteral, 3);
-  } else {
+  } else if (length > FastCloneShallowArrayStub::kMaximumLength) {
     __ CallRuntime(Runtime::kCreateArrayLiteralShallow, 3);
+  } else {
+    FastCloneShallowArrayStub stub(length);
+    __ CallStub(&stub);
   }
 
   bool result_saved = false;  // Is the result saved to the stack?
 
   // Emit code to evaluate all the non-constant subexpressions and to store
   // them into the newly cloned array.
-  ZoneList<Expression*>* subexprs = expr->values();
-  for (int i = 0, len = subexprs->length(); i < len; i++) {
+  for (int i = 0; i < length; i++) {
     Expression* subexpr = subexprs->at(i);
     // If the subexpression is a literal or a simple materialized literal it
     // is already set in the cloned array.
@@ -904,7 +1266,13 @@
 
 void FullCodeGenerator::VisitAssignment(Assignment* expr) {
   Comment cmnt(masm_, "[ Assignment");
-  ASSERT(expr->op() != Token::INIT_CONST);
+  // Invalid left-hand sides are rewritten to have a 'throw ReferenceError'
+  // on the left-hand side.
+  if (!expr->target()->IsValidLeftHandSide()) {
+    VisitForEffect(expr->target());
+    return;
+  }
+
   // Left-hand side can only be a property, a global or a (parameter or local)
   // slot. Variables with rewrite to .arguments are treated as KEYED_PROPERTY.
   enum LhsKind { VARIABLE, NAMED_PROPERTY, KEYED_PROPERTY };
@@ -984,6 +1352,7 @@
   switch (assign_type) {
     case VARIABLE:
       EmitVariableAssignment(expr->target()->AsVariableProxy()->var(),
+                             expr->op(),
                              context_);
       break;
     case NAMED_PROPERTY:
@@ -1000,7 +1369,7 @@
   SetSourcePosition(prop->position());
   Literal* key = prop->key()->AsLiteral();
   __ mov(r2, Operand(key->handle()));
-  __ ldr(r0, MemOperand(sp, 0));
+  // Call load IC. It has arguments receiver and property name r0 and r2.
   Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
   __ Call(ic, RelocInfo::CODE_TARGET);
 }
@@ -1023,15 +1392,64 @@
 }
 
 
+void FullCodeGenerator::EmitAssignment(Expression* expr) {
+  // Invalid left-hand sides are rewritten to have a 'throw
+  // ReferenceError' on the left-hand side.
+  if (!expr->IsValidLeftHandSide()) {
+    VisitForEffect(expr);
+    return;
+  }
+
+  // Left-hand side can only be a property, a global or a (parameter or local)
+  // slot. Variables with rewrite to .arguments are treated as KEYED_PROPERTY.
+  enum LhsKind { VARIABLE, NAMED_PROPERTY, KEYED_PROPERTY };
+  LhsKind assign_type = VARIABLE;
+  Property* prop = expr->AsProperty();
+  if (prop != NULL) {
+    assign_type = (prop->key()->IsPropertyName())
+        ? NAMED_PROPERTY
+        : KEYED_PROPERTY;
+  }
+
+  switch (assign_type) {
+    case VARIABLE: {
+      Variable* var = expr->AsVariableProxy()->var();
+      EmitVariableAssignment(var, Token::ASSIGN, Expression::kEffect);
+      break;
+    }
+    case NAMED_PROPERTY: {
+      __ push(r0);  // Preserve value.
+      VisitForValue(prop->obj(), kAccumulator);
+      __ mov(r1, r0);
+      __ pop(r0);  // Restore value.
+      __ mov(r2, Operand(prop->key()->AsLiteral()->handle()));
+      Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
+      __ Call(ic, RelocInfo::CODE_TARGET);
+      break;
+    }
+    case KEYED_PROPERTY: {
+      __ push(r0);  // Preserve value.
+      VisitForValue(prop->obj(), kStack);
+      VisitForValue(prop->key(), kAccumulator);
+      __ mov(r1, r0);
+      __ pop(r2);
+      __ pop(r0);  // Restore value.
+      Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize));
+      __ Call(ic, RelocInfo::CODE_TARGET);
+      break;
+    }
+  }
+}
+
+
 void FullCodeGenerator::EmitVariableAssignment(Variable* var,
+                                               Token::Value op,
                                                Expression::Context context) {
-  // Three main cases: global variables, lookup slots, and all other
-  // types of slots.  Left-hand-side parameters that rewrite to
-  // explicit property accesses do not reach here.
+  // Left-hand sides that rewrite to explicit property accesses do not reach
+  // here.
   ASSERT(var != NULL);
   ASSERT(var->is_global() || var->slot() != NULL);
 
-  Slot* slot = var->slot();
   if (var->is_global()) {
     ASSERT(!var->is_this());
     // Assignment to a global variable.  Use inline caching for the
@@ -1042,43 +1460,61 @@
     Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
     __ Call(ic, RelocInfo::CODE_TARGET);
 
-  } else if (slot != NULL && slot->type() == Slot::LOOKUP) {
-    __ push(result_register());  // Value.
-    __ mov(r1, Operand(var->name()));
-    __ stm(db_w, sp, cp.bit() | r1.bit());  // Context and name.
-    __ CallRuntime(Runtime::kStoreContextSlot, 3);
-
-  } else if (var->slot() != NULL) {
+  } else if (var->mode() != Variable::CONST || op == Token::INIT_CONST) {
+    // Perform the assignment for non-const variables and for initialization
+    // of const variables.  Const assignments are simply skipped.
+    Label done;
     Slot* slot = var->slot();
     switch (slot->type()) {
-      case Slot::LOCAL:
       case Slot::PARAMETER:
+      case Slot::LOCAL:
+        if (op == Token::INIT_CONST) {
+          // Detect const reinitialization by checking for the hole value.
+          __ ldr(r1, MemOperand(fp, SlotOffset(slot)));
+          __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
+          __ cmp(r1, ip);
+          __ b(ne, &done);
+        }
+        // Perform the assignment.
         __ str(result_register(), MemOperand(fp, SlotOffset(slot)));
         break;
 
       case Slot::CONTEXT: {
         MemOperand target = EmitSlotSearch(slot, r1);
+        if (op == Token::INIT_CONST) {
+          // Detect const reinitialization by checking for the hole value.
+          __ ldr(r2, target);
+          __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
+          __ cmp(r2, ip);
+          __ b(ne, &done);
+        }
+        // Perform the assignment and issue the write barrier.
         __ str(result_register(), target);
-
         // RecordWrite may destroy all its register arguments.
         __ mov(r3, result_register());
         int offset = FixedArray::kHeaderSize + slot->index() * kPointerSize;
-
         __ mov(r2, Operand(offset));
         __ RecordWrite(r1, r2, r3);
         break;
       }
 
       case Slot::LOOKUP:
-        UNREACHABLE();
+        // Call the runtime for the assignment.  The runtime will ignore
+        // const reinitialization.
+        __ push(r0);  // Value.
+        __ mov(r0, Operand(slot->var()->name()));
+        __ Push(cp, r0);  // Context and name.
+        if (op == Token::INIT_CONST) {
+          // The runtime will ignore const redeclaration.
+          __ CallRuntime(Runtime::kInitializeConstContextSlot, 3);
+        } else {
+          __ CallRuntime(Runtime::kStoreContextSlot, 3);
+        }
         break;
     }
-
-  } else {
-    // Variables rewritten as properties are not treated as variables in
-    // assignments.
-    UNREACHABLE();
+    __ bind(&done);
   }
+
   Apply(context, result_register());
 }
 
@@ -1103,6 +1539,8 @@
   // Record source code position before IC call.
   SetSourcePosition(expr->position());
   __ mov(r2, Operand(prop->key()->AsLiteral()->handle()));
+  // Load receiver to r1. Leave a copy in the stack if needed for turning the
+  // receiver into fast case.
   if (expr->ends_initialization_block()) {
     __ ldr(r1, MemOperand(sp));
   } else {
@@ -1115,7 +1553,8 @@
   // If the assignment ends an initialization block, revert to fast case.
   if (expr->ends_initialization_block()) {
     __ push(r0);  // Result of assignment, saved even if not needed.
-    __ ldr(ip, MemOperand(sp, kPointerSize));  // Receiver is under value.
+    // Receiver is under the result value.
+    __ ldr(ip, MemOperand(sp, kPointerSize));
     __ push(ip);
     __ CallRuntime(Runtime::kToFastProperties, 1);
     __ pop(r0);
@@ -1143,21 +1582,30 @@
 
   // Record source code position before IC call.
   SetSourcePosition(expr->position());
+  __ pop(r1);  // Key.
+  // Load receiver to r2. Leave a copy in the stack if needed for turning the
+  // receiver into fast case.
+  if (expr->ends_initialization_block()) {
+    __ ldr(r2, MemOperand(sp));
+  } else {
+    __ pop(r2);
+  }
+
   Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize));
   __ Call(ic, RelocInfo::CODE_TARGET);
 
   // If the assignment ends an initialization block, revert to fast case.
   if (expr->ends_initialization_block()) {
     __ push(r0);  // Result of assignment, saved even if not needed.
-    // Receiver is under the key and value.
-    __ ldr(ip, MemOperand(sp, 2 * kPointerSize));
+    // Receiver is under the result value.
+    __ ldr(ip, MemOperand(sp, kPointerSize));
     __ push(ip);
     __ CallRuntime(Runtime::kToFastProperties, 1);
     __ pop(r0);
+    DropAndApply(1, context_, r0);
+  } else {
+    Apply(context_, r0);
   }
-
-  // Receiver and key are still on stack.
-  DropAndApply(2, context_, r0);
 }
 
 
@@ -1165,14 +1613,12 @@
   Comment cmnt(masm_, "[ Property");
   Expression* key = expr->key();
 
-  // Evaluate receiver.
-  VisitForValue(expr->obj(), kStack);
-
   if (key->IsPropertyName()) {
+    VisitForValue(expr->obj(), kAccumulator);
     EmitNamedPropertyLoad(expr);
-    // Drop receiver left on the stack by IC.
-    DropAndApply(1, context_, r0);
+    Apply(context_, r0);
   } else {
+    VisitForValue(expr->obj(), kStack);
     VisitForValue(expr->key(), kAccumulator);
     __ pop(r1);
     EmitKeyedPropertyLoad(expr);
@@ -1211,7 +1657,8 @@
   }
   // Record source position for debugger.
   SetSourcePosition(expr->position());
-  CallFunctionStub stub(arg_count, NOT_IN_LOOP, RECEIVER_MIGHT_BE_VALUE);
+  InLoopFlag in_loop = (loop_depth() > 0) ? IN_LOOP : NOT_IN_LOOP;
+  CallFunctionStub stub(arg_count, in_loop, RECEIVER_MIGHT_BE_VALUE);
   __ CallStub(&stub);
   // Restore context register.
   __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
@@ -1225,8 +1672,51 @@
   Variable* var = fun->AsVariableProxy()->AsVariable();
 
   if (var != NULL && var->is_possibly_eval()) {
-    // Call to the identifier 'eval'.
-    UNREACHABLE();
+    // In a call to eval, we first call %ResolvePossiblyDirectEval to
+    // resolve the function we need to call and the receiver of the
+    // call.  Then we call the resolved function using the given
+    // arguments.
+    VisitForValue(fun, kStack);
+    __ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
+    __ push(r2);  // Reserved receiver slot.
+
+    // Push the arguments.
+    ZoneList<Expression*>* args = expr->arguments();
+    int arg_count = args->length();
+    for (int i = 0; i < arg_count; i++) {
+      VisitForValue(args->at(i), kStack);
+    }
+
+    // Push copy of the function - found below the arguments.
+    __ ldr(r1, MemOperand(sp, (arg_count + 1) * kPointerSize));
+    __ push(r1);
+
+    // Push copy of the first argument or undefined if it doesn't exist.
+    if (arg_count > 0) {
+      __ ldr(r1, MemOperand(sp, arg_count * kPointerSize));
+      __ push(r1);
+    } else {
+      __ push(r2);
+    }
+
+    // Push the receiver of the enclosing function and do runtime call.
+    __ ldr(r1, MemOperand(fp, (2 + scope()->num_parameters()) * kPointerSize));
+    __ push(r1);
+    __ CallRuntime(Runtime::kResolvePossiblyDirectEval, 3);
+
+    // The runtime call returns a pair of values in r0 (function) and
+    // r1 (receiver). Touch up the stack with the right values.
+    __ str(r0, MemOperand(sp, (arg_count + 1) * kPointerSize));
+    __ str(r1, MemOperand(sp, arg_count * kPointerSize));
+
+    // Record source position for debugger.
+    SetSourcePosition(expr->position());
+    InLoopFlag in_loop = (loop_depth() > 0) ? IN_LOOP : NOT_IN_LOOP;
+    CallFunctionStub stub(arg_count, in_loop, RECEIVER_MIGHT_BE_VALUE);
+    __ CallStub(&stub);
+    // Restore context register.
+    __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+    DropAndApply(1, context_, r0);
   } else if (var != NULL && !var->is_this() && var->is_global()) {
     // Push global object as receiver for the call IC.
     __ ldr(r0, CodeGenerator::GlobalObject());
@@ -1234,8 +1724,16 @@
     EmitCallWithIC(expr, var->name(), RelocInfo::CODE_TARGET_CONTEXT);
   } else if (var != NULL && var->slot() != NULL &&
              var->slot()->type() == Slot::LOOKUP) {
-    // Call to a lookup slot.
-    UNREACHABLE();
+    // Call to a lookup slot (dynamically introduced variable).  Call the
+    // runtime to find the function to call (returned in eax) and the object
+    // holding it (returned in edx).
+    __ push(context_register());
+    __ mov(r2, Operand(var->name()));
+    __ push(r2);
+    __ CallRuntime(Runtime::kLoadContextSlot, 2);
+    __ push(r0);  // Function.
+    __ push(r1);  // Receiver.
+    EmitCallWithStub(expr);
   } else if (fun->AsProperty() != NULL) {
     // Call to an object property.
     Property* prop = fun->AsProperty();
@@ -1331,7 +1829,720 @@
 }
 
 
+void FullCodeGenerator::EmitInlineRuntimeCall(CallRuntime* expr) {
+  Handle<String> name = expr->name();
+  if (strcmp("_IsSmi", *name->ToCString()) == 0) {
+    EmitIsSmi(expr->arguments());
+  } else if (strcmp("_IsNonNegativeSmi", *name->ToCString()) == 0) {
+    EmitIsNonNegativeSmi(expr->arguments());
+  } else if (strcmp("_IsObject", *name->ToCString()) == 0) {
+    EmitIsObject(expr->arguments());
+  } else if (strcmp("_IsUndetectableObject", *name->ToCString()) == 0) {
+    EmitIsUndetectableObject(expr->arguments());
+  } else if (strcmp("_IsFunction", *name->ToCString()) == 0) {
+    EmitIsFunction(expr->arguments());
+  } else if (strcmp("_IsArray", *name->ToCString()) == 0) {
+    EmitIsArray(expr->arguments());
+  } else if (strcmp("_IsRegExp", *name->ToCString()) == 0) {
+    EmitIsRegExp(expr->arguments());
+  } else if (strcmp("_IsConstructCall", *name->ToCString()) == 0) {
+    EmitIsConstructCall(expr->arguments());
+  } else if (strcmp("_ObjectEquals", *name->ToCString()) == 0) {
+    EmitObjectEquals(expr->arguments());
+  } else if (strcmp("_Arguments", *name->ToCString()) == 0) {
+    EmitArguments(expr->arguments());
+  } else if (strcmp("_ArgumentsLength", *name->ToCString()) == 0) {
+    EmitArgumentsLength(expr->arguments());
+  } else if (strcmp("_ClassOf", *name->ToCString()) == 0) {
+    EmitClassOf(expr->arguments());
+  } else if (strcmp("_Log", *name->ToCString()) == 0) {
+    EmitLog(expr->arguments());
+  } else if (strcmp("_RandomHeapNumber", *name->ToCString()) == 0) {
+    EmitRandomHeapNumber(expr->arguments());
+  } else if (strcmp("_SubString", *name->ToCString()) == 0) {
+    EmitSubString(expr->arguments());
+  } else if (strcmp("_RegExpExec", *name->ToCString()) == 0) {
+    EmitRegExpExec(expr->arguments());
+  } else if (strcmp("_ValueOf", *name->ToCString()) == 0) {
+    EmitValueOf(expr->arguments());
+  } else if (strcmp("_SetValueOf", *name->ToCString()) == 0) {
+    EmitSetValueOf(expr->arguments());
+  } else if (strcmp("_NumberToString", *name->ToCString()) == 0) {
+    EmitNumberToString(expr->arguments());
+  } else if (strcmp("_CharFromCode", *name->ToCString()) == 0) {
+    EmitCharFromCode(expr->arguments());
+  } else if (strcmp("_FastCharCodeAt", *name->ToCString()) == 0) {
+    EmitFastCharCodeAt(expr->arguments());
+  } else if (strcmp("_StringAdd", *name->ToCString()) == 0) {
+    EmitStringAdd(expr->arguments());
+  } else if (strcmp("_StringCompare", *name->ToCString()) == 0) {
+    EmitStringCompare(expr->arguments());
+  } else if (strcmp("_MathPow", *name->ToCString()) == 0) {
+    EmitMathPow(expr->arguments());
+  } else if (strcmp("_MathSin", *name->ToCString()) == 0) {
+    EmitMathSin(expr->arguments());
+  } else if (strcmp("_MathCos", *name->ToCString()) == 0) {
+    EmitMathCos(expr->arguments());
+  } else if (strcmp("_MathSqrt", *name->ToCString()) == 0) {
+    EmitMathSqrt(expr->arguments());
+  } else if (strcmp("_CallFunction", *name->ToCString()) == 0) {
+    EmitCallFunction(expr->arguments());
+  } else if (strcmp("_RegExpConstructResult", *name->ToCString()) == 0) {
+    EmitRegExpConstructResult(expr->arguments());
+  } else if (strcmp("_SwapElements", *name->ToCString()) == 0) {
+    EmitSwapElements(expr->arguments());
+  } else if (strcmp("_GetFromCache", *name->ToCString()) == 0) {
+    EmitGetFromCache(expr->arguments());
+  } else {
+    UNREACHABLE();
+  }
+}
+
+
+void FullCodeGenerator::EmitIsSmi(ZoneList<Expression*>* args) {
+  ASSERT(args->length() == 1);
+
+  VisitForValue(args->at(0), kAccumulator);
+
+  Label materialize_true, materialize_false;
+  Label* if_true = NULL;
+  Label* if_false = NULL;
+  PrepareTest(&materialize_true, &materialize_false, &if_true, &if_false);
+
+  __ BranchOnSmi(r0, if_true);
+  __ b(if_false);
+
+  Apply(context_, if_true, if_false);
+}
+
+
+void FullCodeGenerator::EmitIsNonNegativeSmi(ZoneList<Expression*>* args) {
+  ASSERT(args->length() == 1);
+
+  VisitForValue(args->at(0), kAccumulator);
+
+  Label materialize_true, materialize_false;
+  Label* if_true = NULL;
+  Label* if_false = NULL;
+  PrepareTest(&materialize_true, &materialize_false, &if_true, &if_false);
+
+  __ tst(r0, Operand(kSmiTagMask | 0x80000000));
+  __ b(eq, if_true);
+  __ b(if_false);
+
+  Apply(context_, if_true, if_false);
+}
+
+
+void FullCodeGenerator::EmitIsObject(ZoneList<Expression*>* args) {
+  ASSERT(args->length() == 1);
+
+  VisitForValue(args->at(0), kAccumulator);
+
+  Label materialize_true, materialize_false;
+  Label* if_true = NULL;
+  Label* if_false = NULL;
+  PrepareTest(&materialize_true, &materialize_false, &if_true, &if_false);
+  __ BranchOnSmi(r0, if_false);
+  __ LoadRoot(ip, Heap::kNullValueRootIndex);
+  __ cmp(r0, ip);
+  __ b(eq, if_true);
+  __ ldr(r2, FieldMemOperand(r0, HeapObject::kMapOffset));
+  // Undetectable objects behave like undefined when tested with typeof.
+  __ ldrb(r1, FieldMemOperand(r2, Map::kBitFieldOffset));
+  __ tst(r1, Operand(1 << Map::kIsUndetectable));
+  __ b(ne, if_false);
+  __ ldrb(r1, FieldMemOperand(r2, Map::kInstanceTypeOffset));
+  __ cmp(r1, Operand(FIRST_JS_OBJECT_TYPE));
+  __ b(lt, if_false);
+  __ cmp(r1, Operand(LAST_JS_OBJECT_TYPE));
+  __ b(le, if_true);
+  __ b(if_false);
+
+  Apply(context_, if_true, if_false);
+}
+
+
+void FullCodeGenerator::EmitIsUndetectableObject(ZoneList<Expression*>* args) {
+  ASSERT(args->length() == 1);
+
+  VisitForValue(args->at(0), kAccumulator);
+
+  Label materialize_true, materialize_false;
+  Label* if_true = NULL;
+  Label* if_false = NULL;
+  PrepareTest(&materialize_true, &materialize_false, &if_true, &if_false);
+
+  __ BranchOnSmi(r0, if_false);
+  __ ldr(r1, FieldMemOperand(r0, HeapObject::kMapOffset));
+  __ ldrb(r1, FieldMemOperand(r1, Map::kBitFieldOffset));
+  __ tst(r1, Operand(1 << Map::kIsUndetectable));
+  __ b(ne, if_true);
+  __ b(if_false);
+
+  Apply(context_, if_true, if_false);
+}
+
+
+void FullCodeGenerator::EmitIsFunction(ZoneList<Expression*>* args) {
+  ASSERT(args->length() == 1);
+
+  VisitForValue(args->at(0), kAccumulator);
+
+  Label materialize_true, materialize_false;
+  Label* if_true = NULL;
+  Label* if_false = NULL;
+  PrepareTest(&materialize_true, &materialize_false, &if_true, &if_false);
+
+  __ BranchOnSmi(r0, if_false);
+  __ CompareObjectType(r0, r1, r1, JS_FUNCTION_TYPE);
+  __ b(eq, if_true);
+  __ b(if_false);
+
+  Apply(context_, if_true, if_false);
+}
+
+
+void FullCodeGenerator::EmitIsArray(ZoneList<Expression*>* args) {
+  ASSERT(args->length() == 1);
+
+  VisitForValue(args->at(0), kAccumulator);
+
+  Label materialize_true, materialize_false;
+  Label* if_true = NULL;
+  Label* if_false = NULL;
+  PrepareTest(&materialize_true, &materialize_false, &if_true, &if_false);
+
+  __ BranchOnSmi(r0, if_false);
+  __ CompareObjectType(r0, r1, r1, JS_ARRAY_TYPE);
+  __ b(eq, if_true);
+  __ b(if_false);
+
+  Apply(context_, if_true, if_false);
+}
+
+
+void FullCodeGenerator::EmitIsRegExp(ZoneList<Expression*>* args) {
+  ASSERT(args->length() == 1);
+
+  VisitForValue(args->at(0), kAccumulator);
+
+  Label materialize_true, materialize_false;
+  Label* if_true = NULL;
+  Label* if_false = NULL;
+  PrepareTest(&materialize_true, &materialize_false, &if_true, &if_false);
+
+  __ BranchOnSmi(r0, if_false);
+  __ CompareObjectType(r0, r1, r1, JS_REGEXP_TYPE);
+  __ b(eq, if_true);
+  __ b(if_false);
+
+  Apply(context_, if_true, if_false);
+}
+
+
+
+void FullCodeGenerator::EmitIsConstructCall(ZoneList<Expression*>* args) {
+  ASSERT(args->length() == 0);
+
+  Label materialize_true, materialize_false;
+  Label* if_true = NULL;
+  Label* if_false = NULL;
+  PrepareTest(&materialize_true, &materialize_false, &if_true, &if_false);
+
+  // Get the frame pointer for the calling frame.
+  __ ldr(r2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+
+  // Skip the arguments adaptor frame if it exists.
+  Label check_frame_marker;
+  __ ldr(r1, MemOperand(r2, StandardFrameConstants::kContextOffset));
+  __ cmp(r1, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+  __ b(ne, &check_frame_marker);
+  __ ldr(r2, MemOperand(r2, StandardFrameConstants::kCallerFPOffset));
+
+  // Check the marker in the calling frame.
+  __ bind(&check_frame_marker);
+  __ ldr(r1, MemOperand(r2, StandardFrameConstants::kMarkerOffset));
+  __ cmp(r1, Operand(Smi::FromInt(StackFrame::CONSTRUCT)));
+  __ b(eq, if_true);
+  __ b(if_false);
+
+  Apply(context_, if_true, if_false);
+}
+
+
+void FullCodeGenerator::EmitObjectEquals(ZoneList<Expression*>* args) {
+  ASSERT(args->length() == 2);
+
+  // Load the two objects into registers and perform the comparison.
+  VisitForValue(args->at(0), kStack);
+  VisitForValue(args->at(1), kAccumulator);
+
+  Label materialize_true, materialize_false;
+  Label* if_true = NULL;
+  Label* if_false = NULL;
+  PrepareTest(&materialize_true, &materialize_false, &if_true, &if_false);
+
+  __ pop(r1);
+  __ cmp(r0, r1);
+  __ b(eq, if_true);
+  __ b(if_false);
+
+  Apply(context_, if_true, if_false);
+}
+
+
+void FullCodeGenerator::EmitArguments(ZoneList<Expression*>* args) {
+  ASSERT(args->length() == 1);
+
+  // ArgumentsAccessStub expects the key in edx and the formal
+  // parameter count in eax.
+  VisitForValue(args->at(0), kAccumulator);
+  __ mov(r1, r0);
+  __ mov(r0, Operand(Smi::FromInt(scope()->num_parameters())));
+  ArgumentsAccessStub stub(ArgumentsAccessStub::READ_ELEMENT);
+  __ CallStub(&stub);
+  Apply(context_, r0);
+}
+
+
+void FullCodeGenerator::EmitArgumentsLength(ZoneList<Expression*>* args) {
+  ASSERT(args->length() == 0);
+
+  Label exit;
+  // Get the number of formal parameters.
+  __ mov(r0, Operand(Smi::FromInt(scope()->num_parameters())));
+
+  // Check if the calling frame is an arguments adaptor frame.
+  __ ldr(r2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+  __ ldr(r3, MemOperand(r2, StandardFrameConstants::kContextOffset));
+  __ cmp(r3, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+  __ b(ne, &exit);
+
+  // Arguments adaptor case: Read the arguments length from the
+  // adaptor frame.
+  __ ldr(r0, MemOperand(r2, ArgumentsAdaptorFrameConstants::kLengthOffset));
+
+  __ bind(&exit);
+  Apply(context_, r0);
+}
+
+
+void FullCodeGenerator::EmitClassOf(ZoneList<Expression*>* args) {
+  ASSERT(args->length() == 1);
+  Label done, null, function, non_function_constructor;
+
+  VisitForValue(args->at(0), kAccumulator);
+
+  // If the object is a smi, we return null.
+  __ BranchOnSmi(r0, &null);
+
+  // Check that the object is a JS object but take special care of JS
+  // functions to make sure they have 'Function' as their class.
+  __ CompareObjectType(r0, r0, r1, FIRST_JS_OBJECT_TYPE);  // Map is now in r0.
+  __ b(lt, &null);
+
+  // As long as JS_FUNCTION_TYPE is the last instance type and it is
+  // right after LAST_JS_OBJECT_TYPE, we can avoid checking for
+  // LAST_JS_OBJECT_TYPE.
+  ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
+  ASSERT(JS_FUNCTION_TYPE == LAST_JS_OBJECT_TYPE + 1);
+  __ cmp(r1, Operand(JS_FUNCTION_TYPE));
+  __ b(eq, &function);
+
+  // Check if the constructor in the map is a function.
+  __ ldr(r0, FieldMemOperand(r0, Map::kConstructorOffset));
+  __ CompareObjectType(r0, r1, r1, JS_FUNCTION_TYPE);
+  __ b(ne, &non_function_constructor);
+
+  // r0 now contains the constructor function. Grab the
+  // instance class name from there.
+  __ ldr(r0, FieldMemOperand(r0, JSFunction::kSharedFunctionInfoOffset));
+  __ ldr(r0, FieldMemOperand(r0, SharedFunctionInfo::kInstanceClassNameOffset));
+  __ b(&done);
+
+  // Functions have class 'Function'.
+  __ bind(&function);
+  __ LoadRoot(r0, Heap::kfunction_class_symbolRootIndex);
+  __ jmp(&done);
+
+  // Objects with a non-function constructor have class 'Object'.
+  __ bind(&non_function_constructor);
+  __ LoadRoot(r0, Heap::kfunction_class_symbolRootIndex);
+  __ jmp(&done);
+
+  // Non-JS objects have class null.
+  __ bind(&null);
+  __ LoadRoot(r0, Heap::kNullValueRootIndex);
+
+  // All done.
+  __ bind(&done);
+
+  Apply(context_, r0);
+}
+
+
+void FullCodeGenerator::EmitLog(ZoneList<Expression*>* args) {
+  // Conditionally generate a log call.
+  // Args:
+  //   0 (literal string): The type of logging (corresponds to the flags).
+  //     This is used to determine whether or not to generate the log call.
+  //   1 (string): Format string.  Access the string at argument index 2
+  //     with '%2s' (see Logger::LogRuntime for all the formats).
+  //   2 (array): Arguments to the format string.
+  ASSERT_EQ(args->length(), 3);
+#ifdef ENABLE_LOGGING_AND_PROFILING
+  if (CodeGenerator::ShouldGenerateLog(args->at(0))) {
+    VisitForValue(args->at(1), kStack);
+    VisitForValue(args->at(2), kStack);
+    __ CallRuntime(Runtime::kLog, 2);
+  }
+#endif
+  // Finally, we're expected to leave a value on the top of the stack.
+  __ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
+  Apply(context_, r0);
+}
+
+
+void FullCodeGenerator::EmitRandomHeapNumber(ZoneList<Expression*>* args) {
+  ASSERT(args->length() == 0);
+
+  Label slow_allocate_heapnumber;
+  Label heapnumber_allocated;
+
+  __ AllocateHeapNumber(r4, r1, r2, &slow_allocate_heapnumber);
+  __ jmp(&heapnumber_allocated);
+
+  __ bind(&slow_allocate_heapnumber);
+  // To allocate a heap number, and ensure that it is not a smi, we
+  // call the runtime function FUnaryMinus on 0, returning the double
+  // -0.0. A new, distinct heap number is returned each time.
+  __ mov(r0, Operand(Smi::FromInt(0)));
+  __ push(r0);
+  __ CallRuntime(Runtime::kNumberUnaryMinus, 1);
+  __ mov(r4, Operand(r0));
+
+  __ bind(&heapnumber_allocated);
+
+  // Convert 32 random bits in r0 to 0.(32 random bits) in a double
+  // by computing:
+  // ( 1.(20 0s)(32 random bits) x 2^20 ) - (1.0 x 2^20)).
+  if (CpuFeatures::IsSupported(VFP3)) {
+    __ PrepareCallCFunction(0, r1);
+    __ CallCFunction(ExternalReference::random_uint32_function(), 0);
+
+    CpuFeatures::Scope scope(VFP3);
+    // 0x41300000 is the top half of 1.0 x 2^20 as a double.
+    // Create this constant using mov/orr to avoid PC relative load.
+    __ mov(r1, Operand(0x41000000));
+    __ orr(r1, r1, Operand(0x300000));
+    // Move 0x41300000xxxxxxxx (x = random bits) to VFP.
+    __ vmov(d7, r0, r1);
+    // Move 0x4130000000000000 to VFP.
+    __ mov(r0, Operand(0));
+    __ vmov(d8, r0, r1);
+    // Subtract and store the result in the heap number.
+    __ vsub(d7, d7, d8);
+    __ sub(r0, r4, Operand(kHeapObjectTag));
+    __ vstr(d7, r0, HeapNumber::kValueOffset);
+    __ mov(r0, r4);
+  } else {
+    __ mov(r0, Operand(r4));
+    __ PrepareCallCFunction(1, r1);
+    __ CallCFunction(
+        ExternalReference::fill_heap_number_with_random_function(), 1);
+  }
+
+  Apply(context_, r0);
+}
+
+
+void FullCodeGenerator::EmitSubString(ZoneList<Expression*>* args) {
+  // Load the arguments on the stack and call the stub.
+  SubStringStub stub;
+  ASSERT(args->length() == 3);
+  VisitForValue(args->at(0), kStack);
+  VisitForValue(args->at(1), kStack);
+  VisitForValue(args->at(2), kStack);
+  __ CallStub(&stub);
+  Apply(context_, r0);
+}
+
+
+void FullCodeGenerator::EmitRegExpExec(ZoneList<Expression*>* args) {
+  // Load the arguments on the stack and call the stub.
+  RegExpExecStub stub;
+  ASSERT(args->length() == 4);
+  VisitForValue(args->at(0), kStack);
+  VisitForValue(args->at(1), kStack);
+  VisitForValue(args->at(2), kStack);
+  VisitForValue(args->at(3), kStack);
+  __ CallStub(&stub);
+  Apply(context_, r0);
+}
+
+
+void FullCodeGenerator::EmitValueOf(ZoneList<Expression*>* args) {
+  ASSERT(args->length() == 1);
+
+  VisitForValue(args->at(0), kAccumulator);  // Load the object.
+
+  Label done;
+  // If the object is a smi return the object.
+  __ BranchOnSmi(r0, &done);
+  // If the object is not a value type, return the object.
+  __ CompareObjectType(r0, r1, r1, JS_VALUE_TYPE);
+  __ b(ne, &done);
+  __ ldr(r0, FieldMemOperand(r0, JSValue::kValueOffset));
+
+  __ bind(&done);
+  Apply(context_, r0);
+}
+
+
+void FullCodeGenerator::EmitMathPow(ZoneList<Expression*>* args) {
+  // Load the arguments on the stack and call the runtime function.
+  ASSERT(args->length() == 2);
+  VisitForValue(args->at(0), kStack);
+  VisitForValue(args->at(1), kStack);
+  __ CallRuntime(Runtime::kMath_pow, 2);
+  Apply(context_, r0);
+}
+
+
+void FullCodeGenerator::EmitSetValueOf(ZoneList<Expression*>* args) {
+  ASSERT(args->length() == 2);
+
+  VisitForValue(args->at(0), kStack);  // Load the object.
+  VisitForValue(args->at(1), kAccumulator);  // Load the value.
+  __ pop(r1);  // r0 = value. r1 = object.
+
+  Label done;
+  // If the object is a smi, return the value.
+  __ BranchOnSmi(r1, &done);
+
+  // If the object is not a value type, return the value.
+  __ CompareObjectType(r1, r2, r2, JS_VALUE_TYPE);
+  __ b(ne, &done);
+
+  // Store the value.
+  __ str(r0, FieldMemOperand(r1, JSValue::kValueOffset));
+  // Update the write barrier.  Save the value as it will be
+  // overwritten by the write barrier code and is needed afterward.
+  __ mov(r2, Operand(JSValue::kValueOffset - kHeapObjectTag));
+  __ RecordWrite(r1, r2, r3);
+
+  __ bind(&done);
+  Apply(context_, r0);
+}
+
+
+void FullCodeGenerator::EmitNumberToString(ZoneList<Expression*>* args) {
+  ASSERT_EQ(args->length(), 1);
+
+  // Load the argument on the stack and call the stub.
+  VisitForValue(args->at(0), kStack);
+
+  NumberToStringStub stub;
+  __ CallStub(&stub);
+  Apply(context_, r0);
+}
+
+
+void FullCodeGenerator::EmitCharFromCode(ZoneList<Expression*>* args) {
+  ASSERT(args->length() == 1);
+
+  VisitForValue(args->at(0), kAccumulator);
+
+  Label slow_case, done;
+  // Fast case of Heap::LookupSingleCharacterStringFromCode.
+  ASSERT(kSmiTag == 0);
+  ASSERT(kSmiShiftSize == 0);
+  ASSERT(IsPowerOf2(String::kMaxAsciiCharCode + 1));
+  __ tst(r0, Operand(kSmiTagMask |
+                       ((~String::kMaxAsciiCharCode) << kSmiTagSize)));
+  __ b(nz, &slow_case);
+  __ mov(r1, Operand(Factory::single_character_string_cache()));
+  ASSERT(kSmiTag == 0);
+  ASSERT(kSmiTagSize == 1);
+  ASSERT(kSmiShiftSize == 0);
+  // At this point code register contains smi tagged ascii char code.
+  __ add(r1, r1, Operand(r0, LSL, kPointerSizeLog2 - kSmiTagSize));
+  __ ldr(r1, MemOperand(r1, FixedArray::kHeaderSize - kHeapObjectTag));
+  __ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
+  __ cmp(r1, r2);
+  __ b(eq, &slow_case);
+  __ mov(r0, r1);
+  __ b(&done);
+
+  __ bind(&slow_case);
+  __ push(r0);
+  __ CallRuntime(Runtime::kCharFromCode, 1);
+
+  __ bind(&done);
+  Apply(context_, r0);
+}
+
+
+void FullCodeGenerator::EmitFastCharCodeAt(ZoneList<Expression*>* args) {
+  // TODO(fsc): Port the complete implementation from the classic back-end.
+  // Move the undefined value into the result register, which will
+  // trigger the slow case.
+  __ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
+  Apply(context_, r0);
+}
+
+void FullCodeGenerator::EmitStringAdd(ZoneList<Expression*>* args) {
+  ASSERT_EQ(2, args->length());
+
+  VisitForValue(args->at(0), kStack);
+  VisitForValue(args->at(1), kStack);
+
+  StringAddStub stub(NO_STRING_ADD_FLAGS);
+  __ CallStub(&stub);
+  Apply(context_, r0);
+}
+
+
+void FullCodeGenerator::EmitStringCompare(ZoneList<Expression*>* args) {
+  ASSERT_EQ(2, args->length());
+
+  VisitForValue(args->at(0), kStack);
+  VisitForValue(args->at(1), kStack);
+
+  StringCompareStub stub;
+  __ CallStub(&stub);
+  Apply(context_, r0);
+}
+
+
+void FullCodeGenerator::EmitMathSin(ZoneList<Expression*>* args) {
+  // Load the argument on the stack and call the runtime.
+  ASSERT(args->length() == 1);
+  VisitForValue(args->at(0), kStack);
+  __ CallRuntime(Runtime::kMath_sin, 1);
+  Apply(context_, r0);
+}
+
+
+void FullCodeGenerator::EmitMathCos(ZoneList<Expression*>* args) {
+  // Load the argument on the stack and call the runtime.
+  ASSERT(args->length() == 1);
+  VisitForValue(args->at(0), kStack);
+  __ CallRuntime(Runtime::kMath_cos, 1);
+  Apply(context_, r0);
+}
+
+
+void FullCodeGenerator::EmitMathSqrt(ZoneList<Expression*>* args) {
+  // Load the argument on the stack and call the runtime function.
+  ASSERT(args->length() == 1);
+  VisitForValue(args->at(0), kStack);
+  __ CallRuntime(Runtime::kMath_sqrt, 1);
+  Apply(context_, r0);
+}
+
+
+void FullCodeGenerator::EmitCallFunction(ZoneList<Expression*>* args) {
+  ASSERT(args->length() >= 2);
+
+  int arg_count = args->length() - 2;  // For receiver and function.
+  VisitForValue(args->at(0), kStack);  // Receiver.
+  for (int i = 0; i < arg_count; i++) {
+    VisitForValue(args->at(i + 1), kStack);
+  }
+  VisitForValue(args->at(arg_count + 1), kAccumulator);  // Function.
+
+  // InvokeFunction requires function in r1. Move it in there.
+  if (!result_register().is(r1)) __ mov(r1, result_register());
+  ParameterCount count(arg_count);
+  __ InvokeFunction(r1, count, CALL_FUNCTION);
+  __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+  Apply(context_, r0);
+}
+
+
+void FullCodeGenerator::EmitRegExpConstructResult(ZoneList<Expression*>* args) {
+  ASSERT(args->length() == 3);
+  VisitForValue(args->at(0), kStack);
+  VisitForValue(args->at(1), kStack);
+  VisitForValue(args->at(2), kStack);
+  __ CallRuntime(Runtime::kRegExpConstructResult, 3);
+  Apply(context_, r0);
+}
+
+
+void FullCodeGenerator::EmitSwapElements(ZoneList<Expression*>* args) {
+  ASSERT(args->length() == 3);
+  VisitForValue(args->at(0), kStack);
+  VisitForValue(args->at(1), kStack);
+  VisitForValue(args->at(2), kStack);
+  __ CallRuntime(Runtime::kSwapElements, 3);
+  Apply(context_, r0);
+}
+
+
+void FullCodeGenerator::EmitGetFromCache(ZoneList<Expression*>* args) {
+  ASSERT_EQ(2, args->length());
+
+  ASSERT_NE(NULL, args->at(0)->AsLiteral());
+  int cache_id = Smi::cast(*(args->at(0)->AsLiteral()->handle()))->value();
+
+  Handle<FixedArray> jsfunction_result_caches(
+      Top::global_context()->jsfunction_result_caches());
+  if (jsfunction_result_caches->length() <= cache_id) {
+    __ Abort("Attempt to use undefined cache.");
+    __ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
+    Apply(context_, r0);
+    return;
+  }
+
+  VisitForValue(args->at(1), kAccumulator);
+
+  Register key = r0;
+  Register cache = r1;
+  __ ldr(cache, CodeGenerator::ContextOperand(cp, Context::GLOBAL_INDEX));
+  __ ldr(cache, FieldMemOperand(cache, GlobalObject::kGlobalContextOffset));
+  __ ldr(cache,
+         CodeGenerator::ContextOperand(
+             cache, Context::JSFUNCTION_RESULT_CACHES_INDEX));
+  __ ldr(cache,
+         FieldMemOperand(cache, FixedArray::OffsetOfElementAt(cache_id)));
+
+
+  Label done, not_found;
+  // tmp now holds finger offset as a smi.
+  ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
+  __ ldr(r2, FieldMemOperand(cache, JSFunctionResultCache::kFingerOffset));
+  // r2 now holds finger offset as a smi.
+  __ add(r3, cache, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+  // r3 now points to the start of fixed array elements.
+  __ ldr(r2, MemOperand(r3, r2, LSL, kPointerSizeLog2 - kSmiTagSize, PreIndex));
+  // Note side effect of PreIndex: r3 now points to the key of the pair.
+  __ cmp(key, r2);
+  __ b(ne, &not_found);
+
+  __ ldr(r0, MemOperand(r3, kPointerSize));
+  __ b(&done);
+
+  __ bind(&not_found);
+  // Call runtime to perform the lookup.
+  __ Push(cache, key);
+  __ CallRuntime(Runtime::kGetFromCache, 2);
+
+  __ bind(&done);
+  Apply(context_, r0);
+}
+
+
 void FullCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
+  Handle<String> name = expr->name();
+  if (name->length() > 0 && name->Get(0) == '_') {
+    Comment cmnt(masm_, "[ InlineRuntimeCall");
+    EmitInlineRuntimeCall(expr);
+    return;
+  }
+
   Comment cmnt(masm_, "[ CallRuntime");
   ZoneList<Expression*>* args = expr->arguments();
 
@@ -1366,6 +2577,49 @@
 
 void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
   switch (expr->op()) {
+    case Token::DELETE: {
+      Comment cmnt(masm_, "[ UnaryOperation (DELETE)");
+      Property* prop = expr->expression()->AsProperty();
+      Variable* var = expr->expression()->AsVariableProxy()->AsVariable();
+      if (prop == NULL && var == NULL) {
+        // Result of deleting non-property, non-variable reference is true.
+        // The subexpression may have side effects.
+        VisitForEffect(expr->expression());
+        Apply(context_, true);
+      } else if (var != NULL &&
+                 !var->is_global() &&
+                 var->slot() != NULL &&
+                 var->slot()->type() != Slot::LOOKUP) {
+        // Result of deleting non-global, non-dynamic variables is false.
+        // The subexpression does not have side effects.
+        Apply(context_, false);
+      } else {
+        // Property or variable reference.  Call the delete builtin with
+        // object and property name as arguments.
+        if (prop != NULL) {
+          VisitForValue(prop->obj(), kStack);
+          VisitForValue(prop->key(), kStack);
+        } else if (var->is_global()) {
+          __ ldr(r1, CodeGenerator::GlobalObject());
+          __ mov(r0, Operand(var->name()));
+          __ Push(r1, r0);
+        } else {
+          // Non-global variable.  Call the runtime to look up the context
+          // where the variable was introduced.
+          __ push(context_register());
+          __ mov(r2, Operand(var->name()));
+          __ push(r2);
+          __ CallRuntime(Runtime::kLookupContext, 2);
+          __ push(r0);
+          __ mov(r2, Operand(var->name()));
+          __ push(r2);
+        }
+        __ InvokeBuiltin(Builtins::DELETE, CALL_JS);
+        Apply(context_, r0);
+      }
+      break;
+    }
+
     case Token::VOID: {
       Comment cmnt(masm_, "[ UnaryOperation (VOID)");
       VisitForEffect(expr->expression());
@@ -1406,33 +2660,15 @@
 
     case Token::NOT: {
       Comment cmnt(masm_, "[ UnaryOperation (NOT)");
-      Label materialize_true, materialize_false, done;
-      // Initially assume a pure test context.  Notice that the labels are
-      // swapped.
-      Label* if_true = false_label_;
-      Label* if_false = true_label_;
-      switch (context_) {
-        case Expression::kUninitialized:
-          UNREACHABLE();
-          break;
-        case Expression::kEffect:
-          if_true = &done;
-          if_false = &done;
-          break;
-        case Expression::kValue:
-          if_true = &materialize_false;
-          if_false = &materialize_true;
-          break;
-        case Expression::kTest:
-          break;
-        case Expression::kValueTest:
-          if_false = &materialize_true;
-          break;
-        case Expression::kTestValue:
-          if_true = &materialize_false;
-          break;
-      }
+      Label materialize_true, materialize_false;
+      Label* if_true = NULL;
+      Label* if_false = NULL;
+
+      // Notice that the labels are swapped.
+      PrepareTest(&materialize_true, &materialize_false, &if_false, &if_true);
+
       VisitForControl(expr->expression(), if_true, if_false);
+
       Apply(context_, if_false, if_true);  // Labels swapped.
       break;
     }
@@ -1445,18 +2681,17 @@
           proxy->var()->is_global()) {
         Comment cmnt(masm_, "Global variable");
         __ ldr(r0, CodeGenerator::GlobalObject());
-        __ push(r0);
         __ mov(r2, Operand(proxy->name()));
         Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
         // Use a regular load, not a contextual load, to avoid a reference
         // error.
         __ Call(ic, RelocInfo::CODE_TARGET);
-        __ str(r0, MemOperand(sp));
+        __ push(r0);
       } else if (proxy != NULL &&
                  proxy->var()->slot() != NULL &&
                  proxy->var()->slot()->type() == Slot::LOOKUP) {
         __ mov(r0, Operand(proxy->name()));
-        __ stm(db_w, sp, cp.bit() | r0.bit());
+        __ Push(cp, r0);
         __ CallRuntime(Runtime::kLoadContextSlotNoReferenceError, 2);
         __ push(r0);
       } else {
@@ -1507,8 +2742,7 @@
       VisitForValue(expr->expression(), kAccumulator);
       // Avoid calling the stub for Smis.
       Label smi, done;
-      __ tst(result_register(), Operand(kSmiTagMask));
-      __ b(eq, &smi);
+      __ BranchOnSmi(result_register(), &smi);
       // Non-smi: call stub leaving result in accumulator register.
       __ CallStub(&stub);
       __ b(&done);
@@ -1530,6 +2764,12 @@
 
 void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
   Comment cmnt(masm_, "[ CountOperation");
+  // Invalid left-hand sides are rewritten to have a 'throw ReferenceError'
+  // as the left-hand side.
+  if (!expr->expression()->IsValidLeftHandSide()) {
+    VisitForEffect(expr->expression());
+    return;
+  }
 
   // Expression can only be a property, a global or a (parameter or local)
   // slot. Variables with rewrite to .arguments are treated as KEYED_PROPERTY.
@@ -1557,10 +2797,13 @@
       __ mov(ip, Operand(Smi::FromInt(0)));
       __ push(ip);
     }
-    VisitForValue(prop->obj(), kStack);
     if (assign_type == NAMED_PROPERTY) {
+      // Put the object both on the stack and in the accumulator.
+      VisitForValue(prop->obj(), kAccumulator);
+      __ push(r0);
       EmitNamedPropertyLoad(prop);
     } else {
+      VisitForValue(prop->obj(), kStack);
       VisitForValue(prop->key(), kAccumulator);
       __ ldr(r1, MemOperand(sp, 0));
       __ push(r0);
@@ -1570,8 +2813,7 @@
 
   // Call ToNumber only if operand is not a smi.
   Label no_conversion;
-  __ tst(r0, Operand(kSmiTagMask));
-  __ b(eq, &no_conversion);
+  __ BranchOnSmi(r0, &no_conversion);
   __ push(r0);
   __ InvokeBuiltin(Builtins::TO_NUMBER, CALL_JS);
   __ bind(&no_conversion);
@@ -1615,8 +2857,7 @@
     __ b(vs, &stub_call);
     // We could eliminate this smi check if we split the code at
     // the first smi check before calling ToNumber.
-    __ tst(r0, Operand(kSmiTagMask));
-    __ b(eq, &done);
+    __ BranchOnSmi(r0, &done);
     __ bind(&stub_call);
     // Call stub. Undo operation first.
     __ sub(r0, r0, Operand(Smi::FromInt(count_value)));
@@ -1631,6 +2872,7 @@
     case VARIABLE:
       if (expr->is_postfix()) {
         EmitVariableAssignment(expr->expression()->AsVariableProxy()->var(),
+                               Token::ASSIGN,
                                Expression::kEffect);
         // For all contexts except kEffect: We have the result on
         // top of the stack.
@@ -1639,6 +2881,7 @@
         }
       } else {
         EmitVariableAssignment(expr->expression()->AsVariableProxy()->var(),
+                               Token::ASSIGN,
                                context_);
       }
       break;
@@ -1657,15 +2900,16 @@
       break;
     }
     case KEYED_PROPERTY: {
+      __ pop(r1);  // Key.
+      __ pop(r2);  // Receiver.
       Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize));
       __ Call(ic, RelocInfo::CODE_TARGET);
       if (expr->is_postfix()) {
-        __ Drop(2);  // Result is on the stack under the key and the receiver.
         if (context_ != Expression::kEffect) {
           ApplyTOS(context_);
         }
       } else {
-        DropAndApply(2, context_, r0);
+        Apply(context_, r0);
       }
       break;
     }
@@ -1708,36 +2952,41 @@
 }
 
 
+void FullCodeGenerator::EmitNullCompare(bool strict,
+                                        Register obj,
+                                        Register null_const,
+                                        Label* if_true,
+                                        Label* if_false,
+                                        Register scratch) {
+  __ cmp(obj, null_const);
+  if (strict) {
+    __ b(eq, if_true);
+  } else {
+    __ b(eq, if_true);
+    __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
+    __ cmp(obj, ip);
+    __ b(eq, if_true);
+    __ BranchOnSmi(obj, if_false);
+    // It can be an undetectable object.
+    __ ldr(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
+    __ ldrb(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset));
+    __ tst(scratch, Operand(1 << Map::kIsUndetectable));
+    __ b(ne, if_true);
+  }
+  __ jmp(if_false);
+}
+
+
 void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
   Comment cmnt(masm_, "[ CompareOperation");
 
   // Always perform the comparison for its control flow.  Pack the result
   // into the expression's context after the comparison is performed.
-  Label materialize_true, materialize_false, done;
-  // Initially assume we are in a test context.
-  Label* if_true = true_label_;
-  Label* if_false = false_label_;
-  switch (context_) {
-    case Expression::kUninitialized:
-      UNREACHABLE();
-      break;
-    case Expression::kEffect:
-      if_true = &done;
-      if_false = &done;
-      break;
-    case Expression::kValue:
-      if_true = &materialize_true;
-      if_false = &materialize_false;
-      break;
-    case Expression::kTest:
-      break;
-    case Expression::kValueTest:
-      if_true = &materialize_true;
-      break;
-    case Expression::kTestValue:
-      if_false = &materialize_false;
-      break;
-  }
+
+  Label materialize_true, materialize_false;
+  Label* if_true = NULL;
+  Label* if_false = NULL;
+  PrepareTest(&materialize_true, &materialize_false, &if_true, &if_false);
 
   VisitForValue(expr->left(), kStack);
   switch (expr->op()) {
@@ -1768,10 +3017,24 @@
         case Token::EQ_STRICT:
           strict = true;
           // Fall through
-        case Token::EQ:
+        case Token::EQ: {
           cc = eq;
           __ pop(r1);
+          // If either operand is constant null we do a fast compare
+          // against null.
+          Literal* right_literal = expr->right()->AsLiteral();
+          Literal* left_literal = expr->left()->AsLiteral();
+          if (right_literal != NULL && right_literal->handle()->IsNull()) {
+            EmitNullCompare(strict, r1, r0, if_true, if_false, r2);
+            Apply(context_, if_true, if_false);
+            return;
+          } else if (left_literal != NULL && left_literal->handle()->IsNull()) {
+            EmitNullCompare(strict, r0, r1, if_true, if_false, r2);
+            Apply(context_, if_true, if_false);
+            return;
+          }
           break;
+        }
         case Token::LT:
           cc = lt;
           __ pop(r1);
@@ -1802,8 +3065,7 @@
       // before it is called.
       Label slow_case;
       __ orr(r2, r0, Operand(r1));
-      __ tst(r2, Operand(kSmiTagMask));
-      __ b(ne, &slow_case);
+      __ BranchOnNotSmi(r2, &slow_case);
       __ cmp(r1, r0);
       __ b(cc, if_true);
       __ jmp(if_false);
@@ -1877,3 +3139,5 @@
 #undef __
 
 } }  // namespace v8::internal
+
+#endif  // V8_TARGET_ARCH_ARM
diff --git a/src/arm/ic-arm.cc b/src/arm/ic-arm.cc
index c308d69..ba318fd 100644
--- a/src/arm/ic-arm.cc
+++ b/src/arm/ic-arm.cc
@@ -27,6 +27,8 @@
 
 #include "v8.h"
 
+#if defined(V8_TARGET_ARCH_ARM)
+
 #include "assembler-arm.h"
 #include "codegen.h"
 #include "codegen-inl.h"
@@ -641,8 +643,8 @@
   // Patch the map check.
   Address ldr_map_instr_address =
       inline_end_address -
-      CodeGenerator::kInlinedKeyedLoadInstructionsAfterPatchSize *
-      Assembler::kInstrSize;
+      (CodeGenerator::kInlinedKeyedLoadInstructionsAfterPatch *
+      Assembler::kInstrSize);
   Assembler::set_target_address_at(ldr_map_instr_address,
                                    reinterpret_cast<Address>(map));
   return true;
@@ -672,7 +674,9 @@
 
   // Patch the map check.
   Address ldr_map_instr_address =
-      inline_end_address - 5 * Assembler::kInstrSize;
+      inline_end_address -
+      (CodeGenerator::kInlinedKeyedStoreInstructionsAfterPatch *
+      Assembler::kInstrSize);
   Assembler::set_target_address_at(ldr_map_instr_address,
                                    reinterpret_cast<Address>(map));
   return true;
@@ -1207,13 +1211,13 @@
 void KeyedStoreIC::GenerateMiss(MacroAssembler* masm) {
   // ---------- S t a t e --------------
   //  -- r0     : value
+  //  -- r1     : key
+  //  -- r2     : receiver
   //  -- lr     : return address
-  //  -- sp[0]  : key
-  //  -- sp[1]  : receiver
   // -----------------------------------
 
-  __ ldm(ia, sp, r2.bit() | r3.bit());
-  __ Push(r3, r2, r0);
+  // Push receiver, key and value for runtime call.
+  __ Push(r2, r1, r0);
 
   ExternalReference ref = ExternalReference(IC_Utility(kKeyedStoreIC_Miss));
   __ TailCallExternalReference(ref, 3, 1);
@@ -1223,12 +1227,13 @@
 void KeyedStoreIC::GenerateRuntimeSetProperty(MacroAssembler* masm) {
   // ---------- S t a t e --------------
   //  -- r0     : value
+  //  -- r1     : key
+  //  -- r2     : receiver
   //  -- lr     : return address
-  //  -- sp[0]  : key
-  //  -- sp[1]  : receiver
   // -----------------------------------
-  __ ldm(ia, sp, r1.bit() | r3.bit());  // r0 == value, r1 == key, r3 == object
-  __ Push(r3, r1, r0);
+
+  // Push receiver, key and value for runtime call.
+  __ Push(r2, r1, r0);
 
   __ TailCallRuntime(Runtime::kSetProperty, 3, 1);
 }
@@ -1237,147 +1242,135 @@
 void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm) {
   // ---------- S t a t e --------------
   //  -- r0     : value
+  //  -- r1     : key
+  //  -- r2     : receiver
   //  -- lr     : return address
-  //  -- sp[0]  : key
-  //  -- sp[1]  : receiver
   // -----------------------------------
-  Label slow, fast, array, extra, exit, check_pixel_array;
+  Label slow, fast, array, extra, check_pixel_array;
 
-  // Get the key and the object from the stack.
-  __ ldm(ia, sp, r1.bit() | r3.bit());  // r1 = key, r3 = receiver
+  // Register usage.
+  Register value = r0;
+  Register key = r1;
+  Register receiver = r2;
+  Register elements = r3;  // Elements array of the receiver.
+  // r4 and r5 are used as general scratch registers.
+
   // Check that the key is a smi.
-  __ tst(r1, Operand(kSmiTagMask));
+  __ tst(key, Operand(kSmiTagMask));
   __ b(ne, &slow);
   // Check that the object isn't a smi.
-  __ tst(r3, Operand(kSmiTagMask));
+  __ tst(receiver, Operand(kSmiTagMask));
   __ b(eq, &slow);
   // Get the map of the object.
-  __ ldr(r2, FieldMemOperand(r3, HeapObject::kMapOffset));
+  __ ldr(r4, FieldMemOperand(receiver, HeapObject::kMapOffset));
   // Check that the receiver does not require access checks.  We need
   // to do this because this generic stub does not perform map checks.
-  __ ldrb(ip, FieldMemOperand(r2, Map::kBitFieldOffset));
+  __ ldrb(ip, FieldMemOperand(r4, Map::kBitFieldOffset));
   __ tst(ip, Operand(1 << Map::kIsAccessCheckNeeded));
   __ b(ne, &slow);
   // Check if the object is a JS array or not.
-  __ ldrb(r2, FieldMemOperand(r2, Map::kInstanceTypeOffset));
-  __ cmp(r2, Operand(JS_ARRAY_TYPE));
-  // r1 == key.
+  __ ldrb(r4, FieldMemOperand(r4, Map::kInstanceTypeOffset));
+  __ cmp(r4, Operand(JS_ARRAY_TYPE));
   __ b(eq, &array);
   // Check that the object is some kind of JS object.
-  __ cmp(r2, Operand(FIRST_JS_OBJECT_TYPE));
+  __ cmp(r4, Operand(FIRST_JS_OBJECT_TYPE));
   __ b(lt, &slow);
 
-
   // Object case: Check key against length in the elements array.
-  __ ldr(r3, FieldMemOperand(r3, JSObject::kElementsOffset));
+  __ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
   // Check that the object is in fast mode (not dictionary).
-  __ ldr(r2, FieldMemOperand(r3, HeapObject::kMapOffset));
+  __ ldr(r4, FieldMemOperand(elements, HeapObject::kMapOffset));
   __ LoadRoot(ip, Heap::kFixedArrayMapRootIndex);
-  __ cmp(r2, ip);
+  __ cmp(r4, ip);
   __ b(ne, &check_pixel_array);
   // Untag the key (for checking against untagged length in the fixed array).
-  __ mov(r1, Operand(r1, ASR, kSmiTagSize));
+  __ mov(r4, Operand(key, ASR, kSmiTagSize));
   // Compute address to store into and check array bounds.
-  __ add(r2, r3, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
-  __ add(r2, r2, Operand(r1, LSL, kPointerSizeLog2));
-  __ ldr(ip, FieldMemOperand(r3, FixedArray::kLengthOffset));
-  __ cmp(r1, Operand(ip));
+  __ ldr(ip, FieldMemOperand(elements, FixedArray::kLengthOffset));
+  __ cmp(r4, Operand(ip));
   __ b(lo, &fast);
 
-
-  // Slow case:
+  // Slow case, handle jump to runtime.
   __ bind(&slow);
+  // Entry registers are intact.
+  // r0: value.
+  // r1: key.
+  // r2: receiver.
   GenerateRuntimeSetProperty(masm);
 
   // Check whether the elements is a pixel array.
-  // r0: value
-  // r1: index (as a smi), zero-extended.
-  // r3: elements array
+  // r4: elements map.
   __ bind(&check_pixel_array);
   __ LoadRoot(ip, Heap::kPixelArrayMapRootIndex);
-  __ cmp(r2, ip);
+  __ cmp(r4, ip);
   __ b(ne, &slow);
   // Check that the value is a smi. If a conversion is needed call into the
   // runtime to convert and clamp.
-  __ BranchOnNotSmi(r0, &slow);
-  __ mov(r1, Operand(r1, ASR, kSmiTagSize));  // Untag the key.
-  __ ldr(ip, FieldMemOperand(r3, PixelArray::kLengthOffset));
-  __ cmp(r1, Operand(ip));
+  __ BranchOnNotSmi(value, &slow);
+  __ mov(r4, Operand(key, ASR, kSmiTagSize));  // Untag the key.
+  __ ldr(ip, FieldMemOperand(elements, PixelArray::kLengthOffset));
+  __ cmp(r4, Operand(ip));
   __ b(hs, &slow);
-  __ mov(r4, r0);  // Save the value.
-  __ mov(r0, Operand(r0, ASR, kSmiTagSize));  // Untag the value.
+  __ mov(r5, Operand(value, ASR, kSmiTagSize));  // Untag the value.
   {  // Clamp the value to [0..255].
     Label done;
-    __ tst(r0, Operand(0xFFFFFF00));
+    __ tst(r5, Operand(0xFFFFFF00));
     __ b(eq, &done);
-    __ mov(r0, Operand(0), LeaveCC, mi);  // 0 if negative.
-    __ mov(r0, Operand(255), LeaveCC, pl);  // 255 if positive.
+    __ mov(r5, Operand(0), LeaveCC, mi);  // 0 if negative.
+    __ mov(r5, Operand(255), LeaveCC, pl);  // 255 if positive.
     __ bind(&done);
   }
-  __ ldr(r2, FieldMemOperand(r3, PixelArray::kExternalPointerOffset));
-  __ strb(r0, MemOperand(r2, r1));
-  __ mov(r0, Operand(r4));  // Return the original value.
+  // Get the pointer to the external array. This clobbers elements.
+  __ ldr(elements,
+         FieldMemOperand(elements, PixelArray::kExternalPointerOffset));
+  __ strb(r5, MemOperand(elements, r4));  // Elements is now external array.
   __ Ret();
 
-
   // Extra capacity case: Check if there is extra capacity to
   // perform the store and update the length. Used for adding one
   // element to the array by writing to array[array.length].
-  // r0 == value, r1 == key, r2 == elements, r3 == object
   __ bind(&extra);
-  __ b(ne, &slow);  // do not leave holes in the array
-  __ mov(r1, Operand(r1, ASR, kSmiTagSize));  // untag
-  __ ldr(ip, FieldMemOperand(r2, Array::kLengthOffset));
-  __ cmp(r1, Operand(ip));
+  // Condition code from comparing key and array length is still available.
+  __ b(ne, &slow);  // Only support writing to writing to array[array.length].
+  // Check for room in the elements backing store.
+  __ mov(r4, Operand(key, ASR, kSmiTagSize));  // Untag key.
+  __ ldr(ip, FieldMemOperand(elements, FixedArray::kLengthOffset));
+  __ cmp(r4, Operand(ip));
   __ b(hs, &slow);
-  __ mov(r1, Operand(r1, LSL, kSmiTagSize));  // restore tag
-  __ add(r1, r1, Operand(1 << kSmiTagSize));  // and increment
-  __ str(r1, FieldMemOperand(r3, JSArray::kLengthOffset));
-  __ mov(r3, Operand(r2));
-  // NOTE: Computing the address to store into must take the fact
-  // that the key has been incremented into account.
-  int displacement = FixedArray::kHeaderSize - kHeapObjectTag -
-      ((1 << kSmiTagSize) * 2);
-  __ add(r2, r2, Operand(displacement));
-  __ add(r2, r2, Operand(r1, LSL, kPointerSizeLog2 - kSmiTagSize));
+  // Calculate key + 1 as smi.
+  ASSERT_EQ(0, kSmiTag);
+  __ add(r4, key, Operand(Smi::FromInt(1)));
+  __ str(r4, FieldMemOperand(receiver, JSArray::kLengthOffset));
   __ b(&fast);
 
-
   // Array case: Get the length and the elements array from the JS
   // array. Check that the array is in fast mode; if it is the
   // length is always a smi.
-  // r0 == value, r3 == object
   __ bind(&array);
-  __ ldr(r2, FieldMemOperand(r3, JSObject::kElementsOffset));
-  __ ldr(r1, FieldMemOperand(r2, HeapObject::kMapOffset));
+  __ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
+  __ ldr(r4, FieldMemOperand(elements, HeapObject::kMapOffset));
   __ LoadRoot(ip, Heap::kFixedArrayMapRootIndex);
-  __ cmp(r1, ip);
+  __ cmp(r4, ip);
   __ b(ne, &slow);
 
-  // Check the key against the length in the array, compute the
-  // address to store into and fall through to fast case.
-  __ ldr(r1, MemOperand(sp));  // restore key
-  // r0 == value, r1 == key, r2 == elements, r3 == object.
-  __ ldr(ip, FieldMemOperand(r3, JSArray::kLengthOffset));
-  __ cmp(r1, Operand(ip));
+  // Check the key against the length in the array.
+  __ ldr(ip, FieldMemOperand(receiver, JSArray::kLengthOffset));
+  __ cmp(key, Operand(ip));
   __ b(hs, &extra);
-  __ mov(r3, Operand(r2));
-  __ add(r2, r2, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
-  __ add(r2, r2, Operand(r1, LSL, kPointerSizeLog2 - kSmiTagSize));
+  // Fall through to fast case.
 
-
-  // Fast case: Do the store.
-  // r0 == value, r2 == address to store into, r3 == elements
   __ bind(&fast);
-  __ str(r0, MemOperand(r2));
+  // Fast case, store the value to the elements backing store.
+  __ add(r5, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+  __ add(r5, r5, Operand(key, LSL, kPointerSizeLog2 - kSmiTagSize));
+  __ str(value, MemOperand(r5));
   // Skip write barrier if the written value is a smi.
-  __ tst(r0, Operand(kSmiTagMask));
-  __ b(eq, &exit);
+  __ tst(value, Operand(kSmiTagMask));
+  __ Ret(eq);
   // Update write barrier for the elements array address.
-  __ sub(r1, r2, Operand(r3));
-  __ RecordWrite(r3, r1, r2);
+  __ sub(r4, r5, Operand(elements));
+  __ RecordWrite(elements, r4, r5);
 
-  __ bind(&exit);
   __ Ret();
 }
 
@@ -1471,20 +1464,23 @@
                                          ExternalArrayType array_type) {
   // ---------- S t a t e --------------
   //  -- r0     : value
+  //  -- r1     : key
+  //  -- r2     : receiver
   //  -- lr     : return address
-  //  -- sp[0]  : key
-  //  -- sp[1]  : receiver
   // -----------------------------------
   Label slow, check_heap_number;
 
-  // Get the key and the object from the stack.
-  __ ldm(ia, sp, r1.bit() | r2.bit());  // r1 = key, r2 = receiver
+  // Register usage.
+  Register value = r0;
+  Register key = r1;
+  Register receiver = r2;
+  // r3 mostly holds the elements array or the destination external array.
 
   // Check that the object isn't a smi.
-  __ BranchOnSmi(r2, &slow);
+  __ BranchOnSmi(receiver, &slow);
 
-  // Check that the object is a JS object. Load map into r3
-  __ CompareObjectType(r2, r3, r4, FIRST_JS_OBJECT_TYPE);
+  // Check that the object is a JS object. Load map into r3.
+  __ CompareObjectType(receiver, r3, r4, FIRST_JS_OBJECT_TYPE);
   __ b(le, &slow);
 
   // Check that the receiver does not require access checks.  We need
@@ -1494,73 +1490,70 @@
   __ b(ne, &slow);
 
   // Check that the key is a smi.
-  __ BranchOnNotSmi(r1, &slow);
+  __ BranchOnNotSmi(key, &slow);
 
-  // Check that the elements array is the appropriate type of
-  // ExternalArray.
-  // r0: value
-  // r1: index (smi)
-  // r2: object
-  __ ldr(r2, FieldMemOperand(r2, JSObject::kElementsOffset));
-  __ ldr(r3, FieldMemOperand(r2, HeapObject::kMapOffset));
+  // Check that the elements array is the appropriate type of ExternalArray.
+  __ ldr(r3, FieldMemOperand(receiver, JSObject::kElementsOffset));
+  __ ldr(r4, FieldMemOperand(r3, HeapObject::kMapOffset));
   __ LoadRoot(ip, Heap::RootIndexForExternalArrayType(array_type));
-  __ cmp(r3, ip);
+  __ cmp(r4, ip);
   __ b(ne, &slow);
 
   // Check that the index is in range.
-  __ mov(r1, Operand(r1, ASR, kSmiTagSize));  // Untag the index.
-  __ ldr(ip, FieldMemOperand(r2, ExternalArray::kLengthOffset));
-  __ cmp(r1, ip);
+  __ mov(r4, Operand(key, ASR, kSmiTagSize));  // Untag the index.
+  __ ldr(ip, FieldMemOperand(r3, ExternalArray::kLengthOffset));
+  __ cmp(r4, ip);
   // Unsigned comparison catches both negative and too-large values.
   __ b(hs, &slow);
 
   // Handle both smis and HeapNumbers in the fast path. Go to the
   // runtime for all other kinds of values.
-  // r0: value
-  // r1: index (integer)
-  // r2: array
-  __ BranchOnNotSmi(r0, &check_heap_number);
-  __ mov(r3, Operand(r0, ASR, kSmiTagSize));  // Untag the value.
-  __ ldr(r2, FieldMemOperand(r2, ExternalArray::kExternalPointerOffset));
+  // r3: external array.
+  // r4: key (integer).
+  __ BranchOnNotSmi(value, &check_heap_number);
+  __ mov(r5, Operand(value, ASR, kSmiTagSize));  // Untag the value.
+  __ ldr(r3, FieldMemOperand(r3, ExternalArray::kExternalPointerOffset));
 
-  // r1: index (integer)
-  // r2: base pointer of external storage
-  // r3: value (integer)
+  // r3: base pointer of external storage.
+  // r4: key (integer).
+  // r5: value (integer).
   switch (array_type) {
     case kExternalByteArray:
     case kExternalUnsignedByteArray:
-      __ strb(r3, MemOperand(r2, r1, LSL, 0));
+      __ strb(r5, MemOperand(r3, r4, LSL, 0));
       break;
     case kExternalShortArray:
     case kExternalUnsignedShortArray:
-      __ strh(r3, MemOperand(r2, r1, LSL, 1));
+      __ strh(r5, MemOperand(r3, r4, LSL, 1));
       break;
     case kExternalIntArray:
     case kExternalUnsignedIntArray:
-      __ str(r3, MemOperand(r2, r1, LSL, 2));
+      __ str(r5, MemOperand(r3, r4, LSL, 2));
       break;
     case kExternalFloatArray:
       // Need to perform int-to-float conversion.
-      ConvertIntToFloat(masm, r3, r4, r5, r6);
-      __ str(r4, MemOperand(r2, r1, LSL, 2));
+      ConvertIntToFloat(masm, r5, r6, r7, r9);
+      __ str(r6, MemOperand(r3, r4, LSL, 2));
       break;
     default:
       UNREACHABLE();
       break;
   }
 
-  // r0: value
+  // Entry registers are intact, r0 holds the value which is the return value.
   __ Ret();
 
 
-  // r0: value
-  // r1: index (integer)
-  // r2: external array object
+  // r3: external array.
+  // r4: index (integer).
   __ bind(&check_heap_number);
-  __ CompareObjectType(r0, r3, r4, HEAP_NUMBER_TYPE);
+  __ CompareObjectType(value, r5, r6, HEAP_NUMBER_TYPE);
   __ b(ne, &slow);
 
-  __ ldr(r2, FieldMemOperand(r2, ExternalArray::kExternalPointerOffset));
+  __ ldr(r3, FieldMemOperand(r3, ExternalArray::kExternalPointerOffset));
+
+  // r3: base pointer of external storage.
+  // r4: key (integer).
 
   // The WebGL specification leaves the behavior of storing NaN and
   // +/-Infinity into integer arrays basically undefined. For more
@@ -1570,13 +1563,13 @@
 
     // vldr requires offset to be a multiple of 4 so we can not
     // include -kHeapObjectTag into it.
-    __ sub(r3, r0, Operand(kHeapObjectTag));
-    __ vldr(d0, r3, HeapNumber::kValueOffset);
+    __ sub(r5, r0, Operand(kHeapObjectTag));
+    __ vldr(d0, r5, HeapNumber::kValueOffset);
 
     if (array_type == kExternalFloatArray) {
       __ vcvt_f32_f64(s0, d0);
-      __ vmov(r3, s0);
-      __ str(r3, MemOperand(r2, r1, LSL, 2));
+      __ vmov(r5, s0);
+      __ str(r5, MemOperand(r3, r4, LSL, 2));
     } else {
       Label done;
 
@@ -1585,38 +1578,38 @@
       __ vcmp(d0, d0);
       // Move vector status bits to normal status bits.
       __ vmrs(v8::internal::pc);
-      __ mov(r3, Operand(0), LeaveCC, vs);  // NaN converts to 0
+      __ mov(r5, Operand(0), LeaveCC, vs);  // NaN converts to 0.
       __ b(vs, &done);
 
-      // Test whether exponent equal to 0x7FF (infinity or NaN)
-      __ vmov(r4, r3, d0);
+      // Test whether exponent equal to 0x7FF (infinity or NaN).
+      __ vmov(r6, r7, d0);
       __ mov(r5, Operand(0x7FF00000));
-      __ and_(r3, r3, Operand(r5));
-      __ teq(r3, Operand(r5));
-      __ mov(r3, Operand(0), LeaveCC, eq);
+      __ and_(r6, r6, Operand(r5));
+      __ teq(r6, Operand(r5));
+      __ mov(r6, Operand(0), LeaveCC, eq);
 
-      // Not infinity or NaN simply convert to int
+      // Not infinity or NaN simply convert to int.
       if (IsElementTypeSigned(array_type)) {
         __ vcvt_s32_f64(s0, d0, ne);
       } else {
         __ vcvt_u32_f64(s0, d0, ne);
       }
 
-      __ vmov(r3, s0, ne);
+      __ vmov(r5, s0, ne);
 
       __ bind(&done);
       switch (array_type) {
         case kExternalByteArray:
         case kExternalUnsignedByteArray:
-          __ strb(r3, MemOperand(r2, r1, LSL, 0));
+          __ strb(r5, MemOperand(r3, r4, LSL, 0));
           break;
         case kExternalShortArray:
         case kExternalUnsignedShortArray:
-          __ strh(r3, MemOperand(r2, r1, LSL, 1));
+          __ strh(r5, MemOperand(r3, r4, LSL, 1));
           break;
         case kExternalIntArray:
         case kExternalUnsignedIntArray:
-          __ str(r3, MemOperand(r2, r1, LSL, 2));
+          __ str(r5, MemOperand(r3, r4, LSL, 2));
           break;
         default:
           UNREACHABLE();
@@ -1624,12 +1617,12 @@
       }
     }
 
-    // r0: original value
+    // Entry registers are intact, r0 holds the value which is the return value.
     __ Ret();
   } else {
-    // VFP3 is not available do manual conversions
-    __ ldr(r3, FieldMemOperand(r0, HeapNumber::kExponentOffset));
-    __ ldr(r4, FieldMemOperand(r0, HeapNumber::kMantissaOffset));
+    // VFP3 is not available do manual conversions.
+    __ ldr(r5, FieldMemOperand(value, HeapNumber::kExponentOffset));
+    __ ldr(r6, FieldMemOperand(value, HeapNumber::kMantissaOffset));
 
     if (array_type == kExternalFloatArray) {
       Label done, nan_or_infinity_or_zero;
@@ -1641,106 +1634,108 @@
 
       // Test for all special exponent values: zeros, subnormal numbers, NaNs
       // and infinities. All these should be converted to 0.
-      __ mov(r5, Operand(HeapNumber::kExponentMask));
-      __ and_(r6, r3, Operand(r5), SetCC);
+      __ mov(r7, Operand(HeapNumber::kExponentMask));
+      __ and_(r9, r5, Operand(r7), SetCC);
       __ b(eq, &nan_or_infinity_or_zero);
 
-      __ teq(r6, Operand(r5));
-      __ mov(r6, Operand(kBinary32ExponentMask), LeaveCC, eq);
+      __ teq(r9, Operand(r7));
+      __ mov(r9, Operand(kBinary32ExponentMask), LeaveCC, eq);
       __ b(eq, &nan_or_infinity_or_zero);
 
       // Rebias exponent.
-      __ mov(r6, Operand(r6, LSR, HeapNumber::kExponentShift));
-      __ add(r6,
-             r6,
+      __ mov(r9, Operand(r9, LSR, HeapNumber::kExponentShift));
+      __ add(r9,
+             r9,
              Operand(kBinary32ExponentBias - HeapNumber::kExponentBias));
 
-      __ cmp(r6, Operand(kBinary32MaxExponent));
-      __ and_(r3, r3, Operand(HeapNumber::kSignMask), LeaveCC, gt);
-      __ orr(r3, r3, Operand(kBinary32ExponentMask), LeaveCC, gt);
+      __ cmp(r9, Operand(kBinary32MaxExponent));
+      __ and_(r5, r5, Operand(HeapNumber::kSignMask), LeaveCC, gt);
+      __ orr(r5, r5, Operand(kBinary32ExponentMask), LeaveCC, gt);
       __ b(gt, &done);
 
-      __ cmp(r6, Operand(kBinary32MinExponent));
-      __ and_(r3, r3, Operand(HeapNumber::kSignMask), LeaveCC, lt);
+      __ cmp(r9, Operand(kBinary32MinExponent));
+      __ and_(r5, r5, Operand(HeapNumber::kSignMask), LeaveCC, lt);
       __ b(lt, &done);
 
-      __ and_(r7, r3, Operand(HeapNumber::kSignMask));
-      __ and_(r3, r3, Operand(HeapNumber::kMantissaMask));
-      __ orr(r7, r7, Operand(r3, LSL, kMantissaInHiWordShift));
-      __ orr(r7, r7, Operand(r4, LSR, kMantissaInLoWordShift));
-      __ orr(r3, r7, Operand(r6, LSL, kBinary32ExponentShift));
+      __ and_(r7, r5, Operand(HeapNumber::kSignMask));
+      __ and_(r5, r5, Operand(HeapNumber::kMantissaMask));
+      __ orr(r7, r7, Operand(r5, LSL, kMantissaInHiWordShift));
+      __ orr(r7, r7, Operand(r6, LSR, kMantissaInLoWordShift));
+      __ orr(r5, r7, Operand(r9, LSL, kBinary32ExponentShift));
 
       __ bind(&done);
-      __ str(r3, MemOperand(r2, r1, LSL, 2));
+      __ str(r5, MemOperand(r3, r4, LSL, 2));
+      // Entry registers are intact, r0 holds the value which is the return
+      // value.
       __ Ret();
 
       __ bind(&nan_or_infinity_or_zero);
-      __ and_(r7, r3, Operand(HeapNumber::kSignMask));
-      __ and_(r3, r3, Operand(HeapNumber::kMantissaMask));
-      __ orr(r6, r6, r7);
-      __ orr(r6, r6, Operand(r3, LSL, kMantissaInHiWordShift));
-      __ orr(r3, r6, Operand(r4, LSR, kMantissaInLoWordShift));
+      __ and_(r7, r5, Operand(HeapNumber::kSignMask));
+      __ and_(r5, r5, Operand(HeapNumber::kMantissaMask));
+      __ orr(r9, r9, r7);
+      __ orr(r9, r9, Operand(r5, LSL, kMantissaInHiWordShift));
+      __ orr(r5, r9, Operand(r6, LSR, kMantissaInLoWordShift));
       __ b(&done);
     } else {
-      bool is_signed_type  = IsElementTypeSigned(array_type);
+      bool is_signed_type = IsElementTypeSigned(array_type);
       int meaningfull_bits = is_signed_type ? (kBitsPerInt - 1) : kBitsPerInt;
-      int32_t min_value    = is_signed_type ? 0x80000000 : 0x00000000;
+      int32_t min_value = is_signed_type ? 0x80000000 : 0x00000000;
 
       Label done, sign;
 
       // Test for all special exponent values: zeros, subnormal numbers, NaNs
       // and infinities. All these should be converted to 0.
-      __ mov(r5, Operand(HeapNumber::kExponentMask));
-      __ and_(r6, r3, Operand(r5), SetCC);
-      __ mov(r3, Operand(0), LeaveCC, eq);
+      __ mov(r7, Operand(HeapNumber::kExponentMask));
+      __ and_(r9, r5, Operand(r7), SetCC);
+      __ mov(r5, Operand(0), LeaveCC, eq);
       __ b(eq, &done);
 
-      __ teq(r6, Operand(r5));
-      __ mov(r3, Operand(0), LeaveCC, eq);
+      __ teq(r9, Operand(r7));
+      __ mov(r5, Operand(0), LeaveCC, eq);
       __ b(eq, &done);
 
       // Unbias exponent.
-      __ mov(r6, Operand(r6, LSR, HeapNumber::kExponentShift));
-      __ sub(r6, r6, Operand(HeapNumber::kExponentBias), SetCC);
+      __ mov(r9, Operand(r9, LSR, HeapNumber::kExponentShift));
+      __ sub(r9, r9, Operand(HeapNumber::kExponentBias), SetCC);
       // If exponent is negative than result is 0.
-      __ mov(r3, Operand(0), LeaveCC, mi);
+      __ mov(r5, Operand(0), LeaveCC, mi);
       __ b(mi, &done);
 
-      // If exponent is too big than result is minimal value
-      __ cmp(r6, Operand(meaningfull_bits - 1));
-      __ mov(r3, Operand(min_value), LeaveCC, ge);
+      // If exponent is too big than result is minimal value.
+      __ cmp(r9, Operand(meaningfull_bits - 1));
+      __ mov(r5, Operand(min_value), LeaveCC, ge);
       __ b(ge, &done);
 
-      __ and_(r5, r3, Operand(HeapNumber::kSignMask), SetCC);
-      __ and_(r3, r3, Operand(HeapNumber::kMantissaMask));
-      __ orr(r3, r3, Operand(1u << HeapNumber::kMantissaBitsInTopWord));
+      __ and_(r7, r5, Operand(HeapNumber::kSignMask), SetCC);
+      __ and_(r5, r5, Operand(HeapNumber::kMantissaMask));
+      __ orr(r5, r5, Operand(1u << HeapNumber::kMantissaBitsInTopWord));
 
-      __ rsb(r6, r6, Operand(HeapNumber::kMantissaBitsInTopWord), SetCC);
-      __ mov(r3, Operand(r3, LSR, r6), LeaveCC, pl);
+      __ rsb(r9, r9, Operand(HeapNumber::kMantissaBitsInTopWord), SetCC);
+      __ mov(r5, Operand(r5, LSR, r9), LeaveCC, pl);
       __ b(pl, &sign);
 
-      __ rsb(r6, r6, Operand(0));
-      __ mov(r3, Operand(r3, LSL, r6));
-      __ rsb(r6, r6, Operand(meaningfull_bits));
-      __ orr(r3, r3, Operand(r4, LSR, r6));
+      __ rsb(r9, r9, Operand(0));
+      __ mov(r5, Operand(r5, LSL, r9));
+      __ rsb(r9, r9, Operand(meaningfull_bits));
+      __ orr(r5, r5, Operand(r6, LSR, r9));
 
       __ bind(&sign);
-      __ teq(r5, Operand(0));
-      __ rsb(r3, r3, Operand(0), LeaveCC, ne);
+      __ teq(r7, Operand(0));
+      __ rsb(r5, r5, Operand(0), LeaveCC, ne);
 
       __ bind(&done);
       switch (array_type) {
         case kExternalByteArray:
         case kExternalUnsignedByteArray:
-          __ strb(r3, MemOperand(r2, r1, LSL, 0));
+          __ strb(r5, MemOperand(r3, r4, LSL, 0));
           break;
         case kExternalShortArray:
         case kExternalUnsignedShortArray:
-          __ strh(r3, MemOperand(r2, r1, LSL, 1));
+          __ strh(r5, MemOperand(r3, r4, LSL, 1));
           break;
         case kExternalIntArray:
         case kExternalUnsignedIntArray:
-          __ str(r3, MemOperand(r2, r1, LSL, 2));
+          __ str(r5, MemOperand(r3, r4, LSL, 2));
           break;
         default:
           UNREACHABLE();
@@ -1751,6 +1746,11 @@
 
   // Slow case: call runtime.
   __ bind(&slow);
+
+  // Entry registers are intact.
+  // r0: value
+  // r1: key
+  // r2: receiver
   GenerateRuntimeSetProperty(masm);
 }
 
@@ -1841,3 +1841,5 @@
 
 
 } }  // namespace v8::internal
+
+#endif  // V8_TARGET_ARCH_ARM
diff --git a/src/arm/jump-target-arm.cc b/src/arm/jump-target-arm.cc
index 8d182be..3c43d16 100644
--- a/src/arm/jump-target-arm.cc
+++ b/src/arm/jump-target-arm.cc
@@ -27,6 +27,8 @@
 
 #include "v8.h"
 
+#if defined(V8_TARGET_ARCH_ARM)
+
 #include "codegen-inl.h"
 #include "jump-target-inl.h"
 #include "register-allocator-inl.h"
@@ -136,3 +138,5 @@
 
 
 } }  // namespace v8::internal
+
+#endif  // V8_TARGET_ARCH_ARM
diff --git a/src/arm/macro-assembler-arm.cc b/src/arm/macro-assembler-arm.cc
index c4b153f..29c48a4 100644
--- a/src/arm/macro-assembler-arm.cc
+++ b/src/arm/macro-assembler-arm.cc
@@ -27,6 +27,8 @@
 
 #include "v8.h"
 
+#if defined(V8_TARGET_ARCH_ARM)
+
 #include "bootstrapper.h"
 #include "codegen-inl.h"
 #include "debug.h"
@@ -352,6 +354,51 @@
 }
 
 
+void MacroAssembler::Ldrd(Register dst1, Register dst2,
+                          const MemOperand& src, Condition cond) {
+  ASSERT(src.rm().is(no_reg));
+  ASSERT(!dst1.is(lr));  // r14.
+  ASSERT_EQ(0, dst1.code() % 2);
+  ASSERT_EQ(dst1.code() + 1, dst2.code());
+
+  // Generate two ldr instructions if ldrd is not available.
+  if (CpuFeatures::IsSupported(ARMv7)) {
+    CpuFeatures::Scope scope(ARMv7);
+    ldrd(dst1, dst2, src, cond);
+  } else {
+    MemOperand src2(src);
+    src2.set_offset(src2.offset() + 4);
+    if (dst1.is(src.rn())) {
+      ldr(dst2, src2, cond);
+      ldr(dst1, src, cond);
+    } else {
+      ldr(dst1, src, cond);
+      ldr(dst2, src2, cond);
+    }
+  }
+}
+
+
+void MacroAssembler::Strd(Register src1, Register src2,
+                          const MemOperand& dst, Condition cond) {
+  ASSERT(dst.rm().is(no_reg));
+  ASSERT(!src1.is(lr));  // r14.
+  ASSERT_EQ(0, src1.code() % 2);
+  ASSERT_EQ(src1.code() + 1, src2.code());
+
+  // Generate two str instructions if strd is not available.
+  if (CpuFeatures::IsSupported(ARMv7)) {
+    CpuFeatures::Scope scope(ARMv7);
+    strd(src1, src2, dst, cond);
+  } else {
+    MemOperand dst2(dst);
+    dst2.set_offset(dst2.offset() + 4);
+    str(src1, dst, cond);
+    str(src2, dst2, cond);
+  }
+}
+
+
 void MacroAssembler::EnterFrame(StackFrame::Type type) {
   // r0-r3: preserved
   stm(db_w, sp, cp.bit() | fp.bit() | lr.bit());
@@ -1725,3 +1772,5 @@
 
 
 } }  // namespace v8::internal
+
+#endif  // V8_TARGET_ARCH_ARM
diff --git a/src/arm/macro-assembler-arm.h b/src/arm/macro-assembler-arm.h
index 9cf93da..494f2b6 100644
--- a/src/arm/macro-assembler-arm.h
+++ b/src/arm/macro-assembler-arm.h
@@ -185,6 +185,18 @@
     }
   }
 
+  // Load two consecutive registers with two consecutive memory locations.
+  void Ldrd(Register dst1,
+            Register dst2,
+            const MemOperand& src,
+            Condition cond = al);
+
+  // Store two consecutive registers to two consecutive memory locations.
+  void Strd(Register src1,
+            Register src2,
+            const MemOperand& dst,
+            Condition cond = al);
+
   // ---------------------------------------------------------------------------
   // Stack limit support
 
diff --git a/src/arm/regexp-macro-assembler-arm.cc b/src/arm/regexp-macro-assembler-arm.cc
index 64fe5d6..e8910f4 100644
--- a/src/arm/regexp-macro-assembler-arm.cc
+++ b/src/arm/regexp-macro-assembler-arm.cc
@@ -26,6 +26,9 @@
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
 #include "v8.h"
+
+#if defined(V8_TARGET_ARCH_ARM)
+
 #include "unicode.h"
 #include "log.h"
 #include "ast.h"
@@ -1255,3 +1258,5 @@
 #endif  // V8_INTERPRETED_REGEXP
 
 }}  // namespace v8::internal
+
+#endif  // V8_TARGET_ARCH_ARM
diff --git a/src/arm/register-allocator-arm.cc b/src/arm/register-allocator-arm.cc
index ad0c7f9..3b35574 100644
--- a/src/arm/register-allocator-arm.cc
+++ b/src/arm/register-allocator-arm.cc
@@ -27,6 +27,8 @@
 
 #include "v8.h"
 
+#if defined(V8_TARGET_ARCH_ARM)
+
 #include "codegen-inl.h"
 #include "register-allocator-inl.h"
 
@@ -57,3 +59,5 @@
 
 
 } }  // namespace v8::internal
+
+#endif  // V8_TARGET_ARCH_ARM
diff --git a/src/arm/simulator-arm.cc b/src/arm/simulator-arm.cc
index e4601f3..e72a879 100644
--- a/src/arm/simulator-arm.cc
+++ b/src/arm/simulator-arm.cc
@@ -29,6 +29,8 @@
 #include <cstdarg>
 #include "v8.h"
 
+#if defined(V8_TARGET_ARCH_ARM)
+
 #include "disasm.h"
 #include "assembler.h"
 #include "arm/constants-arm.h"
@@ -2731,3 +2733,5 @@
 } }  // namespace assembler::arm
 
 #endif  // __arm__
+
+#endif  // V8_TARGET_ARCH_ARM
diff --git a/src/arm/stub-cache-arm.cc b/src/arm/stub-cache-arm.cc
index 877354c..d82ef21 100644
--- a/src/arm/stub-cache-arm.cc
+++ b/src/arm/stub-cache-arm.cc
@@ -27,6 +27,8 @@
 
 #include "v8.h"
 
+#if defined(V8_TARGET_ARCH_ARM)
+
 #include "ic-inl.h"
 #include "codegen-inl.h"
 #include "stub-cache.h"
@@ -434,7 +436,7 @@
                         Register holder,
                         Register scratch1,
                         Register scratch2,
-                        JSObject* holder_obj,
+                        JSObject* interceptor_holder,
                         LookupResult* lookup,
                         String* name,
                         Label* miss_label) {
@@ -454,7 +456,8 @@
     }
 
     if (!optimize) {
-      CompileRegular(masm, receiver, holder, scratch2, holder_obj, miss_label);
+      CompileRegular(masm, receiver, holder, scratch2, interceptor_holder,
+                     miss_label);
       return;
     }
 
@@ -464,14 +467,18 @@
     __ push(receiver);
     __ Push(holder, name_);
 
+    // Invoke an interceptor.  Note: map checks from receiver to
+    // interceptor's holder has been compiled before (see a caller
+    // of this method.)
     CompileCallLoadPropertyWithInterceptor(masm,
                                            receiver,
                                            holder,
                                            name_,
-                                           holder_obj);
+                                           interceptor_holder);
 
+    // Check if interceptor provided a value for property.  If it's
+    // the case, return immediately.
     Label interceptor_failed;
-    // Compare with no_interceptor_result_sentinel.
     __ LoadRoot(scratch1, Heap::kNoInterceptorResultSentinelRootIndex);
     __ cmp(r0, scratch1);
     __ b(eq, &interceptor_failed);
@@ -486,13 +493,17 @@
     __ LeaveInternalFrame();
 
     if (lookup->type() == FIELD) {
-      holder = stub_compiler->CheckPrototypes(holder_obj,
+      // We found FIELD property in prototype chain of interceptor's holder.
+      // Check that the maps from interceptor's holder to field's holder
+      // haven't changed...
+      holder = stub_compiler->CheckPrototypes(interceptor_holder,
                                               holder,
                                               lookup->holder(),
                                               scratch1,
                                               scratch2,
                                               name,
                                               miss_label);
+      // ... and retrieve a field from field's holder.
       stub_compiler->GenerateFastPropertyLoad(masm,
                                               r0,
                                               holder,
@@ -500,35 +511,40 @@
                                               lookup->GetFieldIndex());
       __ Ret();
     } else {
+      // We found CALLBACKS property in prototype chain of interceptor's
+      // holder.
       ASSERT(lookup->type() == CALLBACKS);
       ASSERT(lookup->GetCallbackObject()->IsAccessorInfo());
       ASSERT(callback != NULL);
       ASSERT(callback->getter() != NULL);
 
+      // Prepare for tail call: push receiver to stack.
       Label cleanup;
-      __ pop(scratch2);
-      __ Push(receiver, scratch2);
+      __ push(receiver);
 
-      holder = stub_compiler->CheckPrototypes(holder_obj, holder,
+      // Check that the maps from interceptor's holder to callback's holder
+      // haven't changed.
+      holder = stub_compiler->CheckPrototypes(interceptor_holder, holder,
                                               lookup->holder(), scratch1,
                                               scratch2,
                                               name,
                                               &cleanup);
 
+      // Continue tail call preparation: push remaining parameters.
       __ push(holder);
       __ Move(holder, Handle<AccessorInfo>(callback));
       __ push(holder);
       __ ldr(scratch1, FieldMemOperand(holder, AccessorInfo::kDataOffset));
       __ Push(scratch1, name_);
 
+      // Tail call to runtime.
       ExternalReference ref =
           ExternalReference(IC_Utility(IC::kLoadCallbackProperty));
       __ TailCallExternalReference(ref, 5, 1);
 
+      // Clean up code: we pushed receiver and need to remove it.
       __ bind(&cleanup);
-      __ pop(scratch1);
       __ pop(scratch2);
-      __ push(scratch1);
     }
   }
 
@@ -537,9 +553,9 @@
                       Register receiver,
                       Register holder,
                       Register scratch,
-                      JSObject* holder_obj,
+                      JSObject* interceptor_holder,
                       Label* miss_label) {
-    PushInterceptorArguments(masm, receiver, holder, name_, holder_obj);
+    PushInterceptorArguments(masm, receiver, holder, name_, interceptor_holder);
 
     ExternalReference ref = ExternalReference(
         IC_Utility(IC::kLoadPropertyWithInterceptorForLoad));
@@ -715,7 +731,7 @@
                        Register receiver,
                        Register scratch1,
                        Register scratch2,
-                       JSObject* holder_obj,
+                       JSObject* interceptor_holder,
                        LookupResult* lookup,
                        String* name,
                        const CallOptimization& optimization,
@@ -728,10 +744,13 @@
     bool can_do_fast_api_call = false;
     if (optimization.is_simple_api_call() &&
        !lookup->holder()->IsGlobalObject()) {
-     depth1 = optimization.GetPrototypeDepthOfExpectedType(object, holder_obj);
+     depth1 =
+         optimization.GetPrototypeDepthOfExpectedType(object,
+                                                      interceptor_holder);
      if (depth1 == kInvalidProtoDepth) {
-       depth2 = optimization.GetPrototypeDepthOfExpectedType(holder_obj,
-                                                             lookup->holder());
+       depth2 =
+           optimization.GetPrototypeDepthOfExpectedType(interceptor_holder,
+                                                        lookup->holder());
      }
      can_do_fast_api_call = (depth1 != kInvalidProtoDepth) ||
                             (depth2 != kInvalidProtoDepth);
@@ -746,23 +765,31 @@
       ReserveSpaceForFastApiCall(masm, scratch1);
     }
 
+    // Check that the maps from receiver to interceptor's holder
+    // haven't changed and thus we can invoke interceptor.
     Label miss_cleanup;
     Label* miss = can_do_fast_api_call ? &miss_cleanup : miss_label;
     Register holder =
-        stub_compiler_->CheckPrototypes(object, receiver, holder_obj, scratch1,
-                                        scratch2, name, depth1, miss);
+        stub_compiler_->CheckPrototypes(object, receiver, interceptor_holder,
+                                        scratch1, scratch2, name,
+                                        depth1, miss);
 
+    // Invoke an interceptor and if it provides a value,
+    // branch to |regular_invoke|.
     Label regular_invoke;
-    LoadWithInterceptor(masm, receiver, holder, holder_obj, scratch2,
+    LoadWithInterceptor(masm, receiver, holder, interceptor_holder, scratch2,
                         &regular_invoke);
 
-    // Generate code for the failed interceptor case.
+    // Interceptor returned nothing for this property.  Try to use cached
+    // constant function.
 
-    // Check the lookup is still valid.
-    stub_compiler_->CheckPrototypes(holder_obj, receiver,
+    // Check that the maps from interceptor's holder to constant function's
+    // holder haven't changed and thus we can use cached constant function.
+    stub_compiler_->CheckPrototypes(interceptor_holder, receiver,
                                     lookup->holder(), scratch1,
                                     scratch2, name, depth2, miss);
 
+    // Invoke function.
     if (can_do_fast_api_call) {
       GenerateFastApiCall(masm, optimization, arguments_.immediate());
     } else {
@@ -770,12 +797,14 @@
                         JUMP_FUNCTION);
     }
 
+    // Deferred code for fast API call case---clean preallocated space.
     if (can_do_fast_api_call) {
       __ bind(&miss_cleanup);
       FreeSpaceForFastApiCall(masm);
       __ b(miss_label);
     }
 
+    // Invoke a regular function.
     __ bind(&regular_invoke);
     if (can_do_fast_api_call) {
       FreeSpaceForFastApiCall(masm);
@@ -788,10 +817,10 @@
                       Register scratch1,
                       Register scratch2,
                       String* name,
-                      JSObject* holder_obj,
+                      JSObject* interceptor_holder,
                       Label* miss_label) {
     Register holder =
-        stub_compiler_->CheckPrototypes(object, receiver, holder_obj,
+        stub_compiler_->CheckPrototypes(object, receiver, interceptor_holder,
                                         scratch1, scratch2, name,
                                         miss_label);
 
@@ -804,7 +833,7 @@
                              receiver,
                              holder,
                              name_,
-                             holder_obj);
+                             interceptor_holder);
 
     __ CallExternalReference(
           ExternalReference(
@@ -1618,15 +1647,11 @@
                                                  JSObject* object,
                                                  JSObject* last) {
   // ----------- S t a t e -------------
-  //  -- r2    : name
+  //  -- r0    : receiver
   //  -- lr    : return address
-  //  -- [sp]  : receiver
   // -----------------------------------
   Label miss;
 
-  // Load receiver.
-  __ ldr(r0, MemOperand(sp, 0));
-
   // Check that receiver is not a smi.
   __ tst(r0, Operand(kSmiTagMask));
   __ b(eq, &miss);
@@ -1663,14 +1688,12 @@
                                            int index,
                                            String* name) {
   // ----------- S t a t e -------------
+  //  -- r0    : receiver
   //  -- r2    : name
   //  -- lr    : return address
-  //  -- [sp]  : receiver
   // -----------------------------------
   Label miss;
 
-  __ ldr(r0, MemOperand(sp, 0));
-
   GenerateLoadField(object, holder, r0, r3, r1, index, name, &miss);
   __ bind(&miss);
   GenerateLoadMiss(masm(), Code::LOAD_IC);
@@ -1685,13 +1708,12 @@
                                               JSObject* holder,
                                               AccessorInfo* callback) {
   // ----------- S t a t e -------------
+  //  -- r0    : receiver
   //  -- r2    : name
   //  -- lr    : return address
-  //  -- [sp]  : receiver
   // -----------------------------------
   Label miss;
 
-  __ ldr(r0, MemOperand(sp, 0));
   Failure* failure = Failure::InternalError();
   bool success = GenerateLoadCallback(object, holder, r0, r2, r3, r1,
                                       callback, name, &miss, &failure);
@@ -1710,14 +1732,12 @@
                                               Object* value,
                                               String* name) {
   // ----------- S t a t e -------------
+  //  -- r0    : receiver
   //  -- r2    : name
   //  -- lr    : return address
-  //  -- [sp] : receiver
   // -----------------------------------
   Label miss;
 
-  __ ldr(r0, MemOperand(sp, 0));
-
   GenerateLoadConstant(object, holder, r0, r3, r1, value, name, &miss);
   __ bind(&miss);
   GenerateLoadMiss(masm(), Code::LOAD_IC);
@@ -1731,14 +1751,12 @@
                                                  JSObject* holder,
                                                  String* name) {
   // ----------- S t a t e -------------
+  //  -- r0    : receiver
   //  -- r2    : name
   //  -- lr    : return address
-  //  -- [sp]  : receiver
   // -----------------------------------
   Label miss;
 
-  __ ldr(r0, MemOperand(sp, 0));
-
   LookupResult lookup;
   LookupPostInterceptor(holder, name, &lookup);
   GenerateLoadInterceptor(object,
@@ -1764,10 +1782,9 @@
                                             String* name,
                                             bool is_dont_delete) {
   // ----------- S t a t e -------------
+  //  -- r0    : receiver
   //  -- r2    : name
   //  -- lr    : return address
-  //  -- r0    : receiver
-  //  -- sp[0] : receiver
   // -----------------------------------
   Label miss;
 
@@ -1974,32 +1991,31 @@
                                                   String* name) {
   // ----------- S t a t e -------------
   //  -- r0    : value
-  //  -- r2    : name
+  //  -- r1    : key
+  //  -- r2    : receiver
   //  -- lr    : return address
-  //  -- [sp]  : receiver
   // -----------------------------------
   Label miss;
 
-  __ IncrementCounter(&Counters::keyed_store_field, 1, r1, r3);
+  __ IncrementCounter(&Counters::keyed_store_field, 1, r3, r4);
 
   // Check that the name has not changed.
-  __ cmp(r2, Operand(Handle<String>(name)));
+  __ cmp(r1, Operand(Handle<String>(name)));
   __ b(ne, &miss);
 
-  // Load receiver from the stack.
-  __ ldr(r3, MemOperand(sp));
-  // r1 is used as scratch register, r3 and r2 might be clobbered.
+  // r3 is used as scratch register. r1 and r2 keep their values if a jump to
+  // the miss label is generated.
   GenerateStoreField(masm(),
                      object,
                      index,
                      transition,
-                     r3, r2, r1,
+                     r2, r1, r3,
                      &miss);
   __ bind(&miss);
 
-  __ DecrementCounter(&Counters::keyed_store_field, 1, r1, r3);
-  __ mov(r2, Operand(Handle<String>(name)));  // restore name register.
+  __ DecrementCounter(&Counters::keyed_store_field, 1, r3, r4);
   Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Miss));
+
   __ Jump(ic, RelocInfo::CODE_TARGET);
 
   // Return the generated code.
@@ -2153,3 +2169,5 @@
 #undef __
 
 } }  // namespace v8::internal
+
+#endif  // V8_TARGET_ARCH_ARM
diff --git a/src/arm/virtual-frame-arm.cc b/src/arm/virtual-frame-arm.cc
index f7b337d..3acd2df 100644
--- a/src/arm/virtual-frame-arm.cc
+++ b/src/arm/virtual-frame-arm.cc
@@ -27,6 +27,8 @@
 
 #include "v8.h"
 
+#if defined(V8_TARGET_ARCH_ARM)
+
 #include "codegen-inl.h"
 #include "register-allocator-inl.h"
 #include "scopes.h"
@@ -307,7 +309,8 @@
 
 void VirtualFrame::CallLoadIC(Handle<String> name, RelocInfo::Mode mode) {
   Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
-  SpillAllButCopyTOSToR0();
+  PopToR0();
+  SpillAll();
   __ mov(r2, Operand(name));
   CallCodeObject(ic, mode, 0);
 }
@@ -337,8 +340,10 @@
 
 
 void VirtualFrame::CallKeyedStoreIC() {
-  ASSERT(SpilledScope::is_spilled());
   Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize));
+  PopToR1R0();
+  SpillAll();
+  EmitPop(r2);
   CallCodeObject(ic, RelocInfo::CODE_TARGET, 0);
 }
 
@@ -505,36 +510,40 @@
 
 
 void VirtualFrame::Dup() {
-  AssertIsNotSpilled();
-  switch (top_of_stack_state_) {
-    case NO_TOS_REGISTERS:
-      __ ldr(r0, MemOperand(sp, 0));
-      top_of_stack_state_ = R0_TOS;
-      break;
-    case R0_TOS:
-      __ mov(r1, r0);
-      // r0 and r1 contains the same value. Prefer a state with r0 holding TOS.
-      top_of_stack_state_ = R0_R1_TOS;
-      break;
-    case R1_TOS:
-      __ mov(r0, r1);
-      // r0 and r1 contains the same value. Prefer a state with r0 holding TOS.
-      top_of_stack_state_ = R0_R1_TOS;
-      break;
-    case R0_R1_TOS:
-      __ push(r1);
-      __ mov(r1, r0);
-      // r0 and r1 contains the same value. Prefer a state with r0 holding TOS.
-      top_of_stack_state_ = R0_R1_TOS;
-      break;
-    case R1_R0_TOS:
-      __ push(r0);
-      __ mov(r0, r1);
-      // r0 and r1 contains the same value. Prefer a state with r0 holding TOS.
-      top_of_stack_state_ = R0_R1_TOS;
-      break;
-    default:
-      UNREACHABLE();
+  if (SpilledScope::is_spilled()) {
+    __ ldr(ip, MemOperand(sp, 0));
+    __ push(ip);
+  } else {
+    switch (top_of_stack_state_) {
+      case NO_TOS_REGISTERS:
+        __ ldr(r0, MemOperand(sp, 0));
+        top_of_stack_state_ = R0_TOS;
+        break;
+      case R0_TOS:
+        __ mov(r1, r0);
+        // r0 and r1 contains the same value. Prefer state with r0 holding TOS.
+        top_of_stack_state_ = R0_R1_TOS;
+        break;
+      case R1_TOS:
+        __ mov(r0, r1);
+        // r0 and r1 contains the same value. Prefer state with r0 holding TOS.
+        top_of_stack_state_ = R0_R1_TOS;
+        break;
+      case R0_R1_TOS:
+        __ push(r1);
+        __ mov(r1, r0);
+        // r0 and r1 contains the same value. Prefer state with r0 holding TOS.
+        top_of_stack_state_ = R0_R1_TOS;
+        break;
+      case R1_R0_TOS:
+        __ push(r0);
+        __ mov(r0, r1);
+        // r0 and r1 contains the same value. Prefer state with r0 holding TOS.
+        top_of_stack_state_ = R0_R1_TOS;
+        break;
+      default:
+        UNREACHABLE();
+    }
   }
   element_count_++;
 }
@@ -749,3 +758,5 @@
 #undef __
 
 } }  // namespace v8::internal
+
+#endif  // V8_TARGET_ARCH_ARM
diff --git a/src/arm/virtual-frame-arm.h b/src/arm/virtual-frame-arm.h
index 655194d..9471d61 100644
--- a/src/arm/virtual-frame-arm.h
+++ b/src/arm/virtual-frame-arm.h
@@ -278,7 +278,8 @@
                      InvokeJSFlags flag,
                      int arg_count);
 
-  // Call load IC. Receiver is on the stack. Result is returned in r0.
+  // Call load IC. Receiver is on the stack and is consumed. Result is returned
+  // in r0.
   void CallLoadIC(Handle<String> name, RelocInfo::Mode mode);
 
   // Call store IC. If the load is contextual, value is found on top of the
@@ -290,8 +291,8 @@
   // Result is returned in r0.
   void CallKeyedLoadIC();
 
-  // Call keyed store IC. Key and receiver are on the stack and the value is in
-  // r0. Result is returned in r0.
+  // Call keyed store IC. Value, key and receiver are on the stack. All three
+  // are consumed. Result is returned in r0.
   void CallKeyedStoreIC();
 
   // Call into an IC stub given the number of arguments it removes