Added support for strict mode parameter and object property validation.

Fixed a couple of crash bugs.


git-svn-id: http://v8.googlecode.com/svn/trunk@6521 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
diff --git a/ChangeLog b/ChangeLog
index 35ea957..7e2a130 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,3 +1,11 @@
+2011-01-28: Version 3.0.12
+
+        Added support for strict mode parameter and object property
+        validation.
+
+        Fixed a couple of crash bugs.
+
+
 2011-01-25: Version 3.0.11
 
         Fixed a bug in deletion of lookup slots that could cause global
diff --git a/src/arm/assembler-arm.cc b/src/arm/assembler-arm.cc
index 155aef8..0f52ac6 100644
--- a/src/arm/assembler-arm.cc
+++ b/src/arm/assembler-arm.cc
@@ -213,74 +213,29 @@
 
 
 // -----------------------------------------------------------------------------
-// Implementation of Assembler.
-
-// Instruction encoding bits.
-enum {
-  H   = 1 << 5,   // halfword (or byte)
-  S6  = 1 << 6,   // signed (or unsigned)
-  L   = 1 << 20,  // load (or store)
-  S   = 1 << 20,  // set condition code (or leave unchanged)
-  W   = 1 << 21,  // writeback base register (or leave unchanged)
-  A   = 1 << 21,  // accumulate in multiply instruction (or not)
-  B   = 1 << 22,  // unsigned byte (or word)
-  N   = 1 << 22,  // long (or short)
-  U   = 1 << 23,  // positive (or negative) offset/index
-  P   = 1 << 24,  // offset/pre-indexed addressing (or post-indexed addressing)
-  I   = 1 << 25,  // immediate shifter operand (or not)
-
-  B4  = 1 << 4,
-  B5  = 1 << 5,
-  B6  = 1 << 6,
-  B7  = 1 << 7,
-  B8  = 1 << 8,
-  B9  = 1 << 9,
-  B12 = 1 << 12,
-  B16 = 1 << 16,
-  B18 = 1 << 18,
-  B19 = 1 << 19,
-  B20 = 1 << 20,
-  B21 = 1 << 21,
-  B22 = 1 << 22,
-  B23 = 1 << 23,
-  B24 = 1 << 24,
-  B25 = 1 << 25,
-  B26 = 1 << 26,
-  B27 = 1 << 27,
-
-  // Instruction bit masks.
-  RdMask     = 15 << 12,  // in str instruction
-  CondMask   = 15 << 28,
-  CoprocessorMask = 15 << 8,
-  OpCodeMask = 15 << 21,  // in data-processing instructions
-  Imm24Mask  = (1 << 24) - 1,
-  Off12Mask  = (1 << 12) - 1,
-  // Reserved condition.
-  nv = 15 << 28
-};
-
+// Specific instructions, constants, and masks.
 
 // add(sp, sp, 4) instruction (aka Pop())
-static const Instr kPopInstruction =
-    al | 4 * B21 | 4 | LeaveCC | I | sp.code() * B16 | sp.code() * B12;
+const Instr kPopInstruction =
+    al | PostIndex | 4 | LeaveCC | I | sp.code() * B16 | sp.code() * B12;
 // str(r, MemOperand(sp, 4, NegPreIndex), al) instruction (aka push(r))
 // register r is not encoded.
-static const Instr kPushRegPattern =
+const Instr kPushRegPattern =
     al | B26 | 4 | NegPreIndex | sp.code() * B16;
 // ldr(r, MemOperand(sp, 4, PostIndex), al) instruction (aka pop(r))
 // register r is not encoded.
-static const Instr kPopRegPattern =
+const Instr kPopRegPattern =
     al | B26 | L | 4 | PostIndex | sp.code() * B16;
 // mov lr, pc
-const Instr kMovLrPc = al | 13*B21 | pc.code() | lr.code() * B12;
+const Instr kMovLrPc = al | MOV | pc.code() | lr.code() * B12;
 // ldr rd, [pc, #offset]
-const Instr kLdrPCMask = CondMask | 15 * B24 | 7 * B20 | 15 * B16;
+const Instr kLdrPCMask = kCondMask | 15 * B24 | 7 * B20 | 15 * B16;
 const Instr kLdrPCPattern = al | 5 * B24 | L | pc.code() * B16;
 // blxcc rm
 const Instr kBlxRegMask =
     15 * B24 | 15 * B20 | 15 * B16 | 15 * B12 | 15 * B8 | 15 * B4;
 const Instr kBlxRegPattern =
-    B24 | B21 | 15 * B16 | 15 * B12 | 15 * B8 | 3 * B4;
+    B24 | B21 | 15 * B16 | 15 * B12 | 15 * B8 | BLX;
 const Instr kMovMvnMask = 0x6d * B21 | 0xf * B16;
 const Instr kMovMvnPattern = 0xd * B21;
 const Instr kMovMvnFlip = B22;
@@ -292,33 +247,28 @@
 const Instr kCmpCmnMask = 0xdd * B20 | 0xf * B12;
 const Instr kCmpCmnPattern = 0x15 * B20;
 const Instr kCmpCmnFlip = B21;
-const Instr kALUMask = 0x6f * B21;
-const Instr kAddPattern = 0x4 * B21;
-const Instr kSubPattern = 0x2 * B21;
-const Instr kBicPattern = 0xe * B21;
-const Instr kAndPattern = 0x0 * B21;
 const Instr kAddSubFlip = 0x6 * B21;
 const Instr kAndBicFlip = 0xe * B21;
 
 // A mask for the Rd register for push, pop, ldr, str instructions.
-const Instr kRdMask = 0x0000f000;
-static const int kRdShift = 12;
-static const Instr kLdrRegFpOffsetPattern =
+const Instr kLdrRegFpOffsetPattern =
     al | B26 | L | Offset | fp.code() * B16;
-static const Instr kStrRegFpOffsetPattern =
+const Instr kStrRegFpOffsetPattern =
     al | B26 | Offset | fp.code() * B16;
-static const Instr kLdrRegFpNegOffsetPattern =
+const Instr kLdrRegFpNegOffsetPattern =
     al | B26 | L | NegOffset | fp.code() * B16;
-static const Instr kStrRegFpNegOffsetPattern =
+const Instr kStrRegFpNegOffsetPattern =
     al | B26 | NegOffset | fp.code() * B16;
-static const Instr kLdrStrInstrTypeMask = 0xffff0000;
-static const Instr kLdrStrInstrArgumentMask = 0x0000ffff;
-static const Instr kLdrStrOffsetMask = 0x00000fff;
+const Instr kLdrStrInstrTypeMask = 0xffff0000;
+const Instr kLdrStrInstrArgumentMask = 0x0000ffff;
+const Instr kLdrStrOffsetMask = 0x00000fff;
+
 
 // Spare buffer.
 static const int kMinimalBufferSize = 4*KB;
 static byte* spare_buffer_ = NULL;
 
+
 Assembler::Assembler(void* buffer, int buffer_size)
     : positions_recorder_(this),
       allow_peephole_optimization_(false) {
@@ -411,7 +361,7 @@
   ASSERT(IsBranch(instr));
   // Take the jump offset in the lower 24 bits, sign extend it and multiply it
   // with 4 to get the offset in bytes.
-  return ((instr & Imm24Mask) << 8) >> 6;
+  return ((instr & kImm24Mask) << 8) >> 6;
 }
 
 
@@ -423,7 +373,7 @@
 int Assembler::GetLdrRegisterImmediateOffset(Instr instr) {
   ASSERT(IsLdrRegisterImmediate(instr));
   bool positive = (instr & B23) == B23;
-  int offset = instr & Off12Mask;  // Zero extended offset.
+  int offset = instr & kOff12Mask;  // Zero extended offset.
   return positive ? offset : -offset;
 }
 
@@ -436,7 +386,7 @@
   // Set bit indicating whether the offset should be added.
   instr = (instr & ~B23) | (positive ? B23 : 0);
   // Set the actual offset.
-  return (instr & ~Off12Mask) | offset;
+  return (instr & ~kOff12Mask) | offset;
 }
 
 
@@ -453,7 +403,7 @@
   // Set bit indicating whether the offset should be added.
   instr = (instr & ~B23) | (positive ? B23 : 0);
   // Set the actual offset.
-  return (instr & ~Off12Mask) | offset;
+  return (instr & ~kOff12Mask) | offset;
 }
 
 
@@ -467,13 +417,13 @@
   ASSERT(offset >= 0);
   ASSERT(is_uint12(offset));
   // Set the offset.
-  return (instr & ~Off12Mask) | offset;
+  return (instr & ~kOff12Mask) | offset;
 }
 
 
 Register Assembler::GetRd(Instr instr) {
   Register reg;
-  reg.code_ = ((instr & kRdMask) >> kRdShift);
+  reg.code_ = Instruction::RdValue(instr);
   return reg;
 }
 
@@ -511,7 +461,7 @@
 bool Assembler::IsLdrPcImmediateOffset(Instr instr) {
   // Check the instruction is indeed a
   // ldr<cond> <Rd>, [pc +/- offset_12].
-  return (instr & 0x0f7f0000) == 0x051f0000;
+  return (instr & (kLdrPCMask & ~kCondMask)) == 0x051f0000;
 }
 
 
@@ -532,13 +482,14 @@
 
 int Assembler::target_at(int pos)  {
   Instr instr = instr_at(pos);
-  if ((instr & ~Imm24Mask) == 0) {
+  if ((instr & ~kImm24Mask) == 0) {
     // Emitted label constant, not part of a branch.
     return instr - (Code::kHeaderSize - kHeapObjectTag);
   }
   ASSERT((instr & 7*B25) == 5*B25);  // b, bl, or blx imm24
-  int imm26 = ((instr & Imm24Mask) << 8) >> 6;
-  if ((instr & CondMask) == nv && (instr & B24) != 0) {
+  int imm26 = ((instr & kImm24Mask) << 8) >> 6;
+  if ((Instruction::ConditionField(instr) == kSpecialCondition) &&
+      ((instr & B24) != 0)) {
     // blx uses bit 24 to encode bit 2 of imm26
     imm26 += 2;
   }
@@ -548,7 +499,7 @@
 
 void Assembler::target_at_put(int pos, int target_pos) {
   Instr instr = instr_at(pos);
-  if ((instr & ~Imm24Mask) == 0) {
+  if ((instr & ~kImm24Mask) == 0) {
     ASSERT(target_pos == kEndOfChain || target_pos >= 0);
     // Emitted label constant, not part of a branch.
     // Make label relative to Code* of generated Code object.
@@ -557,17 +508,17 @@
   }
   int imm26 = target_pos - (pos + kPcLoadDelta);
   ASSERT((instr & 7*B25) == 5*B25);  // b, bl, or blx imm24
-  if ((instr & CondMask) == nv) {
+  if (Instruction::ConditionField(instr) == kSpecialCondition) {
     // blx uses bit 24 to encode bit 2 of imm26
     ASSERT((imm26 & 1) == 0);
-    instr = (instr & ~(B24 | Imm24Mask)) | ((imm26 & 2) >> 1)*B24;
+    instr = (instr & ~(B24 | kImm24Mask)) | ((imm26 & 2) >> 1)*B24;
   } else {
     ASSERT((imm26 & 3) == 0);
-    instr &= ~Imm24Mask;
+    instr &= ~kImm24Mask;
   }
   int imm24 = imm26 >> 2;
   ASSERT(is_int24(imm24));
-  instr_at_put(pos, instr | (imm24 & Imm24Mask));
+  instr_at_put(pos, instr | (imm24 & kImm24Mask));
 }
 
 
@@ -582,14 +533,14 @@
     while (l.is_linked()) {
       PrintF("@ %d ", l.pos());
       Instr instr = instr_at(l.pos());
-      if ((instr & ~Imm24Mask) == 0) {
+      if ((instr & ~kImm24Mask) == 0) {
         PrintF("value\n");
       } else {
         ASSERT((instr & 7*B25) == 5*B25);  // b, bl, or blx
-        int cond = instr & CondMask;
+        Condition cond = Instruction::ConditionField(instr);
         const char* b;
         const char* c;
-        if (cond == nv) {
+        if (cond == kSpecialCondition) {
           b = "blx";
           c = "";
         } else {
@@ -731,14 +682,14 @@
       }
     } else {
       Instr alu_insn = (*instr & kALUMask);
-      if (alu_insn == kAddPattern ||
-          alu_insn == kSubPattern) {
+      if (alu_insn == ADD ||
+          alu_insn == SUB) {
         if (fits_shifter(-imm32, rotate_imm, immed_8, NULL)) {
           *instr ^= kAddSubFlip;
           return true;
         }
-      } else if (alu_insn == kAndPattern ||
-                 alu_insn == kBicPattern) {
+      } else if (alu_insn == AND ||
+                 alu_insn == BIC) {
         if (fits_shifter(~imm32, rotate_imm, immed_8, NULL)) {
           *instr ^= kAndBicFlip;
           return true;
@@ -782,7 +733,7 @@
                          Register rd,
                          const Operand& x) {
   CheckBuffer();
-  ASSERT((instr & ~(CondMask | OpCodeMask | S)) == 0);
+  ASSERT((instr & ~(kCondMask | kOpCodeMask | S)) == 0);
   if (!x.rm_.is_valid()) {
     // Immediate.
     uint32_t rotate_imm;
@@ -794,8 +745,8 @@
       // However, if the original instruction is a 'mov rd, x' (not setting the
       // condition code), then replace it with a 'ldr rd, [pc]'.
       CHECK(!rn.is(ip));  // rn should never be ip, or will be trashed
-      Condition cond = static_cast<Condition>(instr & CondMask);
-      if ((instr & ~CondMask) == 13*B21) {  // mov, S not set
+      Condition cond = Instruction::ConditionField(instr);
+      if ((instr & ~kCondMask) == 13*B21) {  // mov, S not set
         if (x.must_use_constant_pool() || !CpuFeatures::IsSupported(ARMv7)) {
           RecordRelocInfo(x.rmode_, x.imm32_);
           ldr(rd, MemOperand(pc, 0), cond);
@@ -836,7 +787,7 @@
 
 
 void Assembler::addrmod2(Instr instr, Register rd, const MemOperand& x) {
-  ASSERT((instr & ~(CondMask | B | L)) == B26);
+  ASSERT((instr & ~(kCondMask | B | L)) == B26);
   int am = x.am_;
   if (!x.rm_.is_valid()) {
     // Immediate offset.
@@ -849,8 +800,7 @@
       // Immediate offset cannot be encoded, load it first to register ip
       // rn (and rd in a load) should never be ip, or will be trashed.
       ASSERT(!x.rn_.is(ip) && ((instr & L) == L || !rd.is(ip)));
-      mov(ip, Operand(x.offset_), LeaveCC,
-          static_cast<Condition>(instr & CondMask));
+      mov(ip, Operand(x.offset_), LeaveCC, Instruction::ConditionField(instr));
       addrmod2(instr, rd, MemOperand(x.rn_, ip, x.am_));
       return;
     }
@@ -869,7 +819,7 @@
 
 
 void Assembler::addrmod3(Instr instr, Register rd, const MemOperand& x) {
-  ASSERT((instr & ~(CondMask | L | S6 | H)) == (B4 | B7));
+  ASSERT((instr & ~(kCondMask | L | S6 | H)) == (B4 | B7));
   ASSERT(x.rn_.is_valid());
   int am = x.am_;
   if (!x.rm_.is_valid()) {
@@ -883,8 +833,7 @@
       // Immediate offset cannot be encoded, load it first to register ip
       // rn (and rd in a load) should never be ip, or will be trashed.
       ASSERT(!x.rn_.is(ip) && ((instr & L) == L || !rd.is(ip)));
-      mov(ip, Operand(x.offset_), LeaveCC,
-          static_cast<Condition>(instr & CondMask));
+      mov(ip, Operand(x.offset_), LeaveCC, Instruction::ConditionField(instr));
       addrmod3(instr, rd, MemOperand(x.rn_, ip, x.am_));
       return;
     }
@@ -895,7 +844,7 @@
     // rn (and rd in a load) should never be ip, or will be trashed.
     ASSERT(!x.rn_.is(ip) && ((instr & L) == L || !rd.is(ip)));
     mov(ip, Operand(x.rm_, x.shift_op_, x.shift_imm_), LeaveCC,
-        static_cast<Condition>(instr & CondMask));
+        Instruction::ConditionField(instr));
     addrmod3(instr, rd, MemOperand(x.rn_, ip, x.am_));
     return;
   } else {
@@ -909,7 +858,7 @@
 
 
 void Assembler::addrmod4(Instr instr, Register rn, RegList rl) {
-  ASSERT((instr & ~(CondMask | P | U | W | L)) == B27);
+  ASSERT((instr & ~(kCondMask | P | U | W | L)) == B27);
   ASSERT(rl != 0);
   ASSERT(!rn.is(pc));
   emit(instr | rn.code()*B16 | rl);
@@ -919,7 +868,7 @@
 void Assembler::addrmod5(Instr instr, CRegister crd, const MemOperand& x) {
   // Unindexed addressing is not encoded by this function.
   ASSERT_EQ((B27 | B26),
-            (instr & ~(CondMask | CoprocessorMask | P | U | N | W | L)));
+            (instr & ~(kCondMask | kCoprocessorMask | P | U | N | W | L)));
   ASSERT(x.rn_.is_valid() && !x.rm_.is_valid());
   int am = x.am_;
   int offset_8 = x.offset_;
@@ -982,7 +931,7 @@
   ASSERT((branch_offset & 3) == 0);
   int imm24 = branch_offset >> 2;
   ASSERT(is_int24(imm24));
-  emit(cond | B27 | B25 | (imm24 & Imm24Mask));
+  emit(cond | B27 | B25 | (imm24 & kImm24Mask));
 
   if (cond == al) {
     // Dead code is a good location to emit the constant pool.
@@ -996,7 +945,7 @@
   ASSERT((branch_offset & 3) == 0);
   int imm24 = branch_offset >> 2;
   ASSERT(is_int24(imm24));
-  emit(cond | B27 | B25 | B24 | (imm24 & Imm24Mask));
+  emit(cond | B27 | B25 | B24 | (imm24 & kImm24Mask));
 }
 
 
@@ -1006,21 +955,21 @@
   int h = ((branch_offset & 2) >> 1)*B24;
   int imm24 = branch_offset >> 2;
   ASSERT(is_int24(imm24));
-  emit(nv | B27 | B25 | h | (imm24 & Imm24Mask));
+  emit(kSpecialCondition | B27 | B25 | h | (imm24 & kImm24Mask));
 }
 
 
 void Assembler::blx(Register target, Condition cond) {  // v5 and above
   positions_recorder()->WriteRecordedPositions();
   ASSERT(!target.is(pc));
-  emit(cond | B24 | B21 | 15*B16 | 15*B12 | 15*B8 | 3*B4 | target.code());
+  emit(cond | B24 | B21 | 15*B16 | 15*B12 | 15*B8 | BLX | target.code());
 }
 
 
 void Assembler::bx(Register target, Condition cond) {  // v5 and above, plus v4t
   positions_recorder()->WriteRecordedPositions();
   ASSERT(!target.is(pc));  // use of pc is actually allowed, but discouraged
-  emit(cond | B24 | B21 | 15*B16 | 15*B12 | 15*B8 | B4 | target.code());
+  emit(cond | B24 | B21 | 15*B16 | 15*B12 | 15*B8 | BX | target.code());
 }
 
 
@@ -1028,31 +977,31 @@
 
 void Assembler::and_(Register dst, Register src1, const Operand& src2,
                      SBit s, Condition cond) {
-  addrmod1(cond | 0*B21 | s, src1, dst, src2);
+  addrmod1(cond | AND | s, src1, dst, src2);
 }
 
 
 void Assembler::eor(Register dst, Register src1, const Operand& src2,
                     SBit s, Condition cond) {
-  addrmod1(cond | 1*B21 | s, src1, dst, src2);
+  addrmod1(cond | EOR | s, src1, dst, src2);
 }
 
 
 void Assembler::sub(Register dst, Register src1, const Operand& src2,
                     SBit s, Condition cond) {
-  addrmod1(cond | 2*B21 | s, src1, dst, src2);
+  addrmod1(cond | SUB | s, src1, dst, src2);
 }
 
 
 void Assembler::rsb(Register dst, Register src1, const Operand& src2,
                     SBit s, Condition cond) {
-  addrmod1(cond | 3*B21 | s, src1, dst, src2);
+  addrmod1(cond | RSB | s, src1, dst, src2);
 }
 
 
 void Assembler::add(Register dst, Register src1, const Operand& src2,
                     SBit s, Condition cond) {
-  addrmod1(cond | 4*B21 | s, src1, dst, src2);
+  addrmod1(cond | ADD | s, src1, dst, src2);
 
   // Eliminate pattern: push(r), pop()
   //   str(src, MemOperand(sp, 4, NegPreIndex), al);
@@ -1061,7 +1010,7 @@
   if (can_peephole_optimize(2) &&
       // Pattern.
       instr_at(pc_ - 1 * kInstrSize) == kPopInstruction &&
-      (instr_at(pc_ - 2 * kInstrSize) & ~RdMask) == kPushRegPattern) {
+      (instr_at(pc_ - 2 * kInstrSize) & ~kRdMask) == kPushRegPattern) {
     pc_ -= 2 * kInstrSize;
     if (FLAG_print_peephole_optimization) {
       PrintF("%x push(reg)/pop() eliminated\n", pc_offset());
@@ -1072,45 +1021,45 @@
 
 void Assembler::adc(Register dst, Register src1, const Operand& src2,
                     SBit s, Condition cond) {
-  addrmod1(cond | 5*B21 | s, src1, dst, src2);
+  addrmod1(cond | ADC | s, src1, dst, src2);
 }
 
 
 void Assembler::sbc(Register dst, Register src1, const Operand& src2,
                     SBit s, Condition cond) {
-  addrmod1(cond | 6*B21 | s, src1, dst, src2);
+  addrmod1(cond | SBC | s, src1, dst, src2);
 }
 
 
 void Assembler::rsc(Register dst, Register src1, const Operand& src2,
                     SBit s, Condition cond) {
-  addrmod1(cond | 7*B21 | s, src1, dst, src2);
+  addrmod1(cond | RSC | s, src1, dst, src2);
 }
 
 
 void Assembler::tst(Register src1, const Operand& src2, Condition cond) {
-  addrmod1(cond | 8*B21 | S, src1, r0, src2);
+  addrmod1(cond | TST | S, src1, r0, src2);
 }
 
 
 void Assembler::teq(Register src1, const Operand& src2, Condition cond) {
-  addrmod1(cond | 9*B21 | S, src1, r0, src2);
+  addrmod1(cond | TEQ | S, src1, r0, src2);
 }
 
 
 void Assembler::cmp(Register src1, const Operand& src2, Condition cond) {
-  addrmod1(cond | 10*B21 | S, src1, r0, src2);
+  addrmod1(cond | CMP | S, src1, r0, src2);
 }
 
 
 void Assembler::cmn(Register src1, const Operand& src2, Condition cond) {
-  addrmod1(cond | 11*B21 | S, src1, r0, src2);
+  addrmod1(cond | CMN | S, src1, r0, src2);
 }
 
 
 void Assembler::orr(Register dst, Register src1, const Operand& src2,
                     SBit s, Condition cond) {
-  addrmod1(cond | 12*B21 | s, src1, dst, src2);
+  addrmod1(cond | ORR | s, src1, dst, src2);
 }
 
 
@@ -1122,7 +1071,7 @@
   // the mov instruction. They must be generated using nop(int/NopMarkerTypes)
   // or MarkCode(int/NopMarkerTypes) pseudo instructions.
   ASSERT(!(src.is_reg() && src.rm().is(dst) && s == LeaveCC && cond == al));
-  addrmod1(cond | 13*B21 | s, r0, dst, src);
+  addrmod1(cond | MOV | s, r0, dst, src);
 }
 
 
@@ -1139,12 +1088,12 @@
 
 void Assembler::bic(Register dst, Register src1, const Operand& src2,
                     SBit s, Condition cond) {
-  addrmod1(cond | 14*B21 | s, src1, dst, src2);
+  addrmod1(cond | BIC | s, src1, dst, src2);
 }
 
 
 void Assembler::mvn(Register dst, const Operand& src, SBit s, Condition cond) {
-  addrmod1(cond | 15*B21 | s, r0, dst, src);
+  addrmod1(cond | MVN | s, r0, dst, src);
 }
 
 
@@ -1222,7 +1171,7 @@
   // v5 and above.
   ASSERT(!dst.is(pc) && !src.is(pc));
   emit(cond | B24 | B22 | B21 | 15*B16 | dst.code()*B12 |
-       15*B8 | B4 | src.code());
+       15*B8 | CLZ | src.code());
 }
 
 
@@ -1376,7 +1325,7 @@
     Instr pop_instr = instr_at(pc_ - 1 * kInstrSize);
 
     if (IsPush(push_instr) && IsPop(pop_instr)) {
-      if ((pop_instr & kRdMask) != (push_instr & kRdMask)) {
+      if (Instruction::RdValue(pop_instr) != Instruction::RdValue(push_instr)) {
         // For consecutive push and pop on different registers,
         // we delete both the push & pop and insert a register move.
         // push ry, pop rx --> mov rx, ry
@@ -1457,8 +1406,8 @@
         IsPop(mem_read_instr)) {
       if ((IsLdrRegFpOffset(ldr_instr) ||
         IsLdrRegFpNegOffset(ldr_instr))) {
-        if ((mem_write_instr & kRdMask) ==
-              (mem_read_instr & kRdMask)) {
+        if (Instruction::RdValue(mem_write_instr) ==
+                                  Instruction::RdValue(mem_read_instr)) {
           // Pattern: push & pop from/to same register,
           // with a fp+offset ldr in between
           //
@@ -1473,7 +1422,8 @@
           // else
           //   ldr rz, [fp, #-24]
 
-          if ((mem_write_instr & kRdMask) == (ldr_instr & kRdMask)) {
+          if (Instruction::RdValue(mem_write_instr) ==
+              Instruction::RdValue(ldr_instr)) {
             pc_ -= 3 * kInstrSize;
           } else {
             pc_ -= 3 * kInstrSize;
@@ -1503,22 +1453,23 @@
           //   ldr rz, [fp, #-24]
 
           Register reg_pushed, reg_popped;
-          if ((mem_read_instr & kRdMask) == (ldr_instr & kRdMask)) {
+          if (Instruction::RdValue(mem_read_instr) ==
+              Instruction::RdValue(ldr_instr)) {
             reg_pushed = GetRd(mem_write_instr);
             reg_popped = GetRd(mem_read_instr);
             pc_ -= 3 * kInstrSize;
             mov(reg_popped, reg_pushed);
-          } else if ((mem_write_instr & kRdMask)
-                                != (ldr_instr & kRdMask)) {
+          } else if (Instruction::RdValue(mem_write_instr) !=
+                     Instruction::RdValue(ldr_instr)) {
             reg_pushed = GetRd(mem_write_instr);
             reg_popped = GetRd(mem_read_instr);
             pc_ -= 3 * kInstrSize;
             emit(ldr_instr);
             mov(reg_popped, reg_pushed);
-          } else if (((mem_read_instr & kRdMask)
-                                     != (ldr_instr & kRdMask)) ||
-                    ((mem_write_instr & kRdMask)
-                                     == (ldr_instr & kRdMask)) ) {
+          } else if ((Instruction::RdValue(mem_read_instr) !=
+                      Instruction::RdValue(ldr_instr)) ||
+                     (Instruction::RdValue(mem_write_instr) ==
+                      Instruction::RdValue(ldr_instr))) {
             reg_pushed = GetRd(mem_write_instr);
             reg_popped = GetRd(mem_read_instr);
             pc_ -= 3 * kInstrSize;
@@ -1640,18 +1591,14 @@
 // enabling/disabling and a counter feature. See simulator-arm.h .
 void Assembler::stop(const char* msg, Condition cond, int32_t code) {
 #ifndef __arm__
-  // See constants-arm.h SoftwareInterruptCodes. Unluckily the Assembler and
-  // Simulator do not share constants declaration.
   ASSERT(code >= kDefaultStopCode);
-  static const uint32_t kStopInterruptCode = 1 << 23;
-  static const uint32_t kMaxStopCode = kStopInterruptCode - 1;
   // The Simulator will handle the stop instruction and get the message address.
   // It expects to find the address just after the svc instruction.
   BlockConstPoolFor(2);
   if (code >= 0) {
-    svc(kStopInterruptCode + code, cond);
+    svc(kStopCode + code, cond);
   } else {
-    svc(kStopInterruptCode + kMaxStopCode, cond);
+    svc(kStopCode + kMaxStopCode, cond);
   }
   emit(reinterpret_cast<Instr>(msg));
 #else  // def __arm__
@@ -1673,7 +1620,7 @@
 
 void Assembler::bkpt(uint32_t imm16) {  // v5 and above
   ASSERT(is_uint16(imm16));
-  emit(al | B24 | B21 | (imm16 >> 4)*B8 | 7*B4 | (imm16 & 0xf));
+  emit(al | B24 | B21 | (imm16 >> 4)*B8 | BKPT | (imm16 & 0xf));
 }
 
 
@@ -1703,7 +1650,7 @@
                      CRegister crn,
                      CRegister crm,
                      int opcode_2) {  // v5 and above
-  cdp(coproc, opcode_1, crd, crn, crm, opcode_2, static_cast<Condition>(nv));
+  cdp(coproc, opcode_1, crd, crn, crm, opcode_2, kSpecialCondition);
 }
 
 
@@ -1726,7 +1673,7 @@
                      CRegister crn,
                      CRegister crm,
                      int opcode_2) {  // v5 and above
-  mcr(coproc, opcode_1, rd, crn, crm, opcode_2, static_cast<Condition>(nv));
+  mcr(coproc, opcode_1, rd, crn, crm, opcode_2, kSpecialCondition);
 }
 
 
@@ -1749,7 +1696,7 @@
                      CRegister crn,
                      CRegister crm,
                      int opcode_2) {  // v5 and above
-  mrc(coproc, opcode_1, rd, crn, crm, opcode_2, static_cast<Condition>(nv));
+  mrc(coproc, opcode_1, rd, crn, crm, opcode_2, kSpecialCondition);
 }
 
 
@@ -1779,7 +1726,7 @@
                      CRegister crd,
                      const MemOperand& src,
                      LFlag l) {  // v5 and above
-  ldc(coproc, crd, src, l, static_cast<Condition>(nv));
+  ldc(coproc, crd, src, l, kSpecialCondition);
 }
 
 
@@ -1788,7 +1735,7 @@
                      Register rn,
                      int option,
                      LFlag l) {  // v5 and above
-  ldc(coproc, crd, rn, option, l, static_cast<Condition>(nv));
+  ldc(coproc, crd, rn, option, l, kSpecialCondition);
 }
 
 
@@ -1818,7 +1765,7 @@
                      coproc, CRegister crd,
                      const MemOperand& dst,
                      LFlag l) {  // v5 and above
-  stc(coproc, crd, dst, l, static_cast<Condition>(nv));
+  stc(coproc, crd, dst, l, kSpecialCondition);
 }
 
 
@@ -1827,7 +1774,7 @@
                      Register rn,
                      int option,
                      LFlag l) {  // v5 and above
-  stc(coproc, crd, rn, option, l, static_cast<Condition>(nv));
+  stc(coproc, crd, rn, option, l, kSpecialCondition);
 }
 
 
@@ -2637,7 +2584,7 @@
 
     // Instruction to patch must be a ldr/str [pc, #offset].
     // P and U set, B and W clear, Rn == pc, offset12 still 0.
-    ASSERT((instr & (7*B25 | P | U | B | W | 15*B16 | Off12Mask)) ==
+    ASSERT((instr & (7*B25 | P | U | B | W | 15*B16 | kOff12Mask)) ==
            (2*B25 | P | U | pc.code()*B16));
     int delta = pc_ - rinfo.pc() - 8;
     ASSERT(delta >= -4);  // instr could be ldr pc, [pc, #-4] followed by targ32
diff --git a/src/arm/assembler-arm.h b/src/arm/assembler-arm.h
index ad1bdab..b3343f0 100644
--- a/src/arm/assembler-arm.h
+++ b/src/arm/assembler-arm.h
@@ -41,6 +41,7 @@
 #define V8_ARM_ASSEMBLER_ARM_H_
 #include <stdio.h>
 #include "assembler.h"
+#include "constants-arm.h"
 #include "serialize.h"
 
 namespace v8 {
@@ -300,18 +301,6 @@
 const DwVfpRegister d14 = { 14 };
 const DwVfpRegister d15 = { 15 };
 
-// VFP FPSCR constants.
-static const uint32_t kVFPNConditionFlagBit = 1 << 31;
-static const uint32_t kVFPZConditionFlagBit = 1 << 30;
-static const uint32_t kVFPCConditionFlagBit = 1 << 29;
-static const uint32_t kVFPVConditionFlagBit = 1 << 28;
-
-static const uint32_t kVFPFlushToZeroMask = 1 << 24;
-
-static const uint32_t kVFPRoundingModeMask = 3 << 22;
-static const uint32_t kVFPRoundToMinusInfinityBits = 2 << 22;
-
-static const uint32_t kVFPExceptionMask = 0xf;
 
 // Coprocessor register
 struct CRegister {
@@ -372,149 +361,6 @@
 };
 
 
-// Condition field in instructions.
-enum Condition {
-  // any value < 0 is considered no_condition
-  no_condition  = -1,
-
-  eq =  0 << 28,  // Z set            equal.
-  ne =  1 << 28,  // Z clear          not equal.
-  nz =  1 << 28,  // Z clear          not zero.
-  cs =  2 << 28,  // C set            carry set.
-  hs =  2 << 28,  // C set            unsigned higher or same.
-  cc =  3 << 28,  // C clear          carry clear.
-  lo =  3 << 28,  // C clear          unsigned lower.
-  mi =  4 << 28,  // N set            negative.
-  pl =  5 << 28,  // N clear          positive or zero.
-  vs =  6 << 28,  // V set            overflow.
-  vc =  7 << 28,  // V clear          no overflow.
-  hi =  8 << 28,  // C set, Z clear   unsigned higher.
-  ls =  9 << 28,  // C clear or Z set unsigned lower or same.
-  ge = 10 << 28,  // N == V           greater or equal.
-  lt = 11 << 28,  // N != V           less than.
-  gt = 12 << 28,  // Z clear, N == V  greater than.
-  le = 13 << 28,  // Z set or N != V  less then or equal
-  al = 14 << 28   //                  always.
-};
-
-
-// Returns the equivalent of !cc.
-inline Condition NegateCondition(Condition cc) {
-  ASSERT(cc != al);
-  return static_cast<Condition>(cc ^ ne);
-}
-
-
-// Corresponds to transposing the operands of a comparison.
-inline Condition ReverseCondition(Condition cc) {
-  switch (cc) {
-    case lo:
-      return hi;
-    case hi:
-      return lo;
-    case hs:
-      return ls;
-    case ls:
-      return hs;
-    case lt:
-      return gt;
-    case gt:
-      return lt;
-    case ge:
-      return le;
-    case le:
-      return ge;
-    default:
-      return cc;
-  };
-}
-
-
-// Branch hints are not used on the ARM.  They are defined so that they can
-// appear in shared function signatures, but will be ignored in ARM
-// implementations.
-enum Hint { no_hint };
-
-// Hints are not used on the arm.  Negating is trivial.
-inline Hint NegateHint(Hint ignored) { return no_hint; }
-
-
-// -----------------------------------------------------------------------------
-// Addressing modes and instruction variants
-
-// Shifter operand shift operation
-enum ShiftOp {
-  LSL = 0 << 5,
-  LSR = 1 << 5,
-  ASR = 2 << 5,
-  ROR = 3 << 5,
-  RRX = -1
-};
-
-
-// Condition code updating mode
-enum SBit {
-  SetCC   = 1 << 20,  // set condition code
-  LeaveCC = 0 << 20   // leave condition code unchanged
-};
-
-
-// Status register selection
-enum SRegister {
-  CPSR = 0 << 22,
-  SPSR = 1 << 22
-};
-
-
-// Status register fields
-enum SRegisterField {
-  CPSR_c = CPSR | 1 << 16,
-  CPSR_x = CPSR | 1 << 17,
-  CPSR_s = CPSR | 1 << 18,
-  CPSR_f = CPSR | 1 << 19,
-  SPSR_c = SPSR | 1 << 16,
-  SPSR_x = SPSR | 1 << 17,
-  SPSR_s = SPSR | 1 << 18,
-  SPSR_f = SPSR | 1 << 19
-};
-
-// Status register field mask (or'ed SRegisterField enum values)
-typedef uint32_t SRegisterFieldMask;
-
-
-// Memory operand addressing mode
-enum AddrMode {
-  // bit encoding P U W
-  Offset       = (8|4|0) << 21,  // offset (without writeback to base)
-  PreIndex     = (8|4|1) << 21,  // pre-indexed addressing with writeback
-  PostIndex    = (0|4|0) << 21,  // post-indexed addressing with writeback
-  NegOffset    = (8|0|0) << 21,  // negative offset (without writeback to base)
-  NegPreIndex  = (8|0|1) << 21,  // negative pre-indexed with writeback
-  NegPostIndex = (0|0|0) << 21   // negative post-indexed with writeback
-};
-
-
-// Load/store multiple addressing mode
-enum BlockAddrMode {
-  // bit encoding P U W
-  da           = (0|0|0) << 21,  // decrement after
-  ia           = (0|4|0) << 21,  // increment after
-  db           = (8|0|0) << 21,  // decrement before
-  ib           = (8|4|0) << 21,  // increment before
-  da_w         = (0|0|1) << 21,  // decrement after with writeback to base
-  ia_w         = (0|4|1) << 21,  // increment after with writeback to base
-  db_w         = (8|0|1) << 21,  // decrement before with writeback to base
-  ib_w         = (8|4|1) << 21   // increment before with writeback to base
-};
-
-
-// Coprocessor load/store operand size
-enum LFlag {
-  Long  = 1 << 22,  // long load/store coprocessor
-  Short = 0 << 22   // short load/store coprocessor
-};
-
-
 // -----------------------------------------------------------------------------
 // Machine instruction Operands
 
@@ -658,9 +504,6 @@
 };
 
 
-typedef int32_t Instr;
-
-
 extern const Instr kMovLrPc;
 extern const Instr kLdrPCMask;
 extern const Instr kLdrPCPattern;
@@ -680,15 +523,11 @@
 extern const Instr kCmpCmnMask;
 extern const Instr kCmpCmnPattern;
 extern const Instr kCmpCmnFlip;
-
-extern const Instr kALUMask;
-extern const Instr kAddPattern;
-extern const Instr kSubPattern;
-extern const Instr kAndPattern;
-extern const Instr kBicPattern;
 extern const Instr kAddSubFlip;
 extern const Instr kAndBicFlip;
 
+
+
 class Assembler : public Malloced {
  public:
   // Create an assembler. Instructions and relocation information are emitted
@@ -1001,7 +840,6 @@
   void stm(BlockAddrMode am, Register base, RegList src, Condition cond = al);
 
   // Exception-generating instructions and debugging support
-  static const int kDefaultStopCode = -1;
   void stop(const char* msg,
             Condition cond = al,
             int32_t code = kDefaultStopCode);
diff --git a/src/arm/builtins-arm.cc b/src/arm/builtins-arm.cc
index 0210b1b..dbb8242 100644
--- a/src/arm/builtins-arm.cc
+++ b/src/arm/builtins-arm.cc
@@ -190,7 +190,7 @@
 
   // Check whether an empty sized array is requested.
   __ tst(array_size, array_size);
-  __ b(nz, &not_empty);
+  __ b(ne, &not_empty);
 
   // If an empty array is requested allocate a small elements array anyway. This
   // keeps the code below free of special casing for the empty array.
@@ -566,7 +566,7 @@
   // if it's a string already before calling the conversion builtin.
   Label convert_argument;
   __ bind(&not_cached);
-  __ BranchOnSmi(r0, &convert_argument);
+  __ JumpIfSmi(r0, &convert_argument);
 
   // Is it a String?
   __ ldr(r2, FieldMemOperand(r0, HeapObject::kMapOffset));
@@ -666,7 +666,7 @@
     __ mov(r2, Operand(debug_step_in_fp));
     __ ldr(r2, MemOperand(r2));
     __ tst(r2, r2);
-    __ b(nz, &rt_call);
+    __ b(ne, &rt_call);
 #endif
 
     // Load the initial map and verify that it is in fact a map.
diff --git a/src/arm/code-stubs-arm.cc b/src/arm/code-stubs-arm.cc
index f0fd09a..4fa927f 100644
--- a/src/arm/code-stubs-arm.cc
+++ b/src/arm/code-stubs-arm.cc
@@ -41,7 +41,7 @@
 
 static void EmitIdenticalObjectComparison(MacroAssembler* masm,
                                           Label* slow,
-                                          Condition cc,
+                                          Condition cond,
                                           bool never_nan_nan);
 static void EmitSmiNonsmiComparison(MacroAssembler* masm,
                                     Register lhs,
@@ -49,7 +49,7 @@
                                     Label* lhs_not_nan,
                                     Label* slow,
                                     bool strict);
-static void EmitTwoNonNanDoubleComparison(MacroAssembler* masm, Condition cc);
+static void EmitTwoNonNanDoubleComparison(MacroAssembler* masm, Condition cond);
 static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm,
                                            Register lhs,
                                            Register rhs);
@@ -344,6 +344,155 @@
 }
 
 
+class FloatingPointHelper : public AllStatic {
+ public:
+
+  enum Destination {
+    kVFPRegisters,
+    kCoreRegisters
+  };
+
+
+  // Loads smis from r0 and r1 (right and left in binary operations) into
+  // floating point registers. Depending on the destination the values ends up
+  // either d7 and d6 or in r2/r3 and r0/r1 respectively. If the destination is
+  // floating point registers VFP3 must be supported. If core registers are
+  // requested when VFP3 is supported d6 and d7 will be scratched.
+  static void LoadSmis(MacroAssembler* masm,
+                       Destination destination,
+                       Register scratch1,
+                       Register scratch2);
+
+  // Loads objects from r0 and r1 (right and left in binary operations) into
+  // floating point registers. Depending on the destination the values ends up
+  // either d7 and d6 or in r2/r3 and r0/r1 respectively. If the destination is
+  // floating point registers VFP3 must be supported. If core registers are
+  // requested when VFP3 is supported d6 and d7 will still be scratched. If
+  // either r0 or r1 is not a number (not smi and not heap number object) the
+  // not_number label is jumped to.
+  static void LoadOperands(MacroAssembler* masm,
+                           FloatingPointHelper::Destination destination,
+                           Register heap_number_map,
+                           Register scratch1,
+                           Register scratch2,
+                           Label* not_number);
+ private:
+  static void LoadNumber(MacroAssembler* masm,
+                         FloatingPointHelper::Destination destination,
+                         Register object,
+                         DwVfpRegister dst,
+                         Register dst1,
+                         Register dst2,
+                         Register heap_number_map,
+                         Register scratch1,
+                         Register scratch2,
+                         Label* not_number);
+};
+
+
+void FloatingPointHelper::LoadSmis(MacroAssembler* masm,
+                                   FloatingPointHelper::Destination destination,
+                                   Register scratch1,
+                                   Register scratch2) {
+  if (CpuFeatures::IsSupported(VFP3)) {
+    CpuFeatures::Scope scope(VFP3);
+    __ mov(scratch1, Operand(r0, ASR, kSmiTagSize));
+    __ vmov(s15, scratch1);
+    __ vcvt_f64_s32(d7, s15);
+    __ mov(scratch1, Operand(r1, ASR, kSmiTagSize));
+    __ vmov(s13, scratch1);
+    __ vcvt_f64_s32(d6, s13);
+    if (destination == kCoreRegisters) {
+      __ vmov(r2, r3, d7);
+      __ vmov(r0, r1, d6);
+    }
+  } else {
+    ASSERT(destination == kCoreRegisters);
+    // Write Smi from r0 to r3 and r2 in double format.
+    __ mov(scratch1, Operand(r0));
+    ConvertToDoubleStub stub1(r3, r2, scratch1, scratch2);
+    __ push(lr);
+    __ Call(stub1.GetCode(), RelocInfo::CODE_TARGET);
+    // Write Smi from r1 to r1 and r0 in double format.  r9 is scratch.
+    __ mov(scratch1, Operand(r1));
+    ConvertToDoubleStub stub2(r1, r0, scratch1, scratch2);
+    __ Call(stub2.GetCode(), RelocInfo::CODE_TARGET);
+    __ pop(lr);
+  }
+}
+
+
+void FloatingPointHelper::LoadOperands(
+    MacroAssembler* masm,
+    FloatingPointHelper::Destination destination,
+    Register heap_number_map,
+    Register scratch1,
+    Register scratch2,
+    Label* slow) {
+
+  // Load right operand (r0) to d6 or r2/r3.
+  LoadNumber(masm, destination,
+             r0, d7, r2, r3, heap_number_map, scratch1, scratch2, slow);
+
+  // Load left operand (r1) to d7 or r0/r1.
+  LoadNumber(masm, destination,
+             r1, d6, r0, r1, heap_number_map, scratch1, scratch2, slow);
+}
+
+
+void FloatingPointHelper::LoadNumber(MacroAssembler* masm,
+                                       Destination destination,
+                                       Register object,
+                                       DwVfpRegister dst,
+                                       Register dst1,
+                                       Register dst2,
+                                       Register heap_number_map,
+                                       Register scratch1,
+                                       Register scratch2,
+                                       Label* not_number) {
+  Label is_smi, done;
+
+  __ JumpIfSmi(object, &is_smi);
+  __ JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_number);
+
+  // Handle loading a double from a heap number.
+  if (CpuFeatures::IsSupported(VFP3)) {
+    CpuFeatures::Scope scope(VFP3);
+    // Load the double from tagged HeapNumber to double register.
+    __ sub(scratch1, object, Operand(kHeapObjectTag));
+    __ vldr(dst, scratch1, HeapNumber::kValueOffset);
+  } else {
+    ASSERT(destination == kCoreRegisters);
+    // Load the double from heap number to dst1 and dst2 in double format.
+    __ Ldrd(dst1, dst2, FieldMemOperand(object, HeapNumber::kValueOffset));
+  }
+  __ jmp(&done);
+
+  // Handle loading a double from a smi.
+  __ bind(&is_smi);
+  if (CpuFeatures::IsSupported(VFP3)) {
+    CpuFeatures::Scope scope(VFP3);
+    // Convert smi to double.
+    __ SmiUntag(scratch1, object);
+    __ vmov(dst.high(), scratch1);
+    __ vcvt_f64_s32(dst, dst.high());
+    if (destination == kCoreRegisters) {
+      __ vmov(dst1, dst2, dst);
+    }
+  } else {
+    ASSERT(destination == kCoreRegisters);
+    // Write Smi to dst1 and dst2 double format.
+    __ mov(scratch1, Operand(object));
+    ConvertToDoubleStub stub(dst2, dst1, scratch1, scratch2);
+    __ push(lr);
+    __ Call(stub.GetCode(), RelocInfo::CODE_TARGET);
+    __ pop(lr);
+  }
+
+  __ bind(&done);
+}
+
+
 // See comment for class.
 void WriteInt32ToHeapNumberStub::Generate(MacroAssembler* masm) {
   Label max_negative_int;
@@ -395,7 +544,7 @@
 // for "identity and not NaN".
 static void EmitIdenticalObjectComparison(MacroAssembler* masm,
                                           Label* slow,
-                                          Condition cc,
+                                          Condition cond,
                                           bool never_nan_nan) {
   Label not_identical;
   Label heap_number, return_equal;
@@ -404,31 +553,31 @@
 
   // The two objects are identical.  If we know that one of them isn't NaN then
   // we now know they test equal.
-  if (cc != eq || !never_nan_nan) {
+  if (cond != eq || !never_nan_nan) {
     // Test for NaN. Sadly, we can't just compare to Factory::nan_value(),
     // so we do the second best thing - test it ourselves.
     // They are both equal and they are not both Smis so both of them are not
     // Smis.  If it's not a heap number, then return equal.
-    if (cc == lt || cc == gt) {
+    if (cond == lt || cond == gt) {
       __ CompareObjectType(r0, r4, r4, FIRST_JS_OBJECT_TYPE);
       __ b(ge, slow);
     } else {
       __ CompareObjectType(r0, r4, r4, HEAP_NUMBER_TYPE);
       __ b(eq, &heap_number);
       // Comparing JS objects with <=, >= is complicated.
-      if (cc != eq) {
+      if (cond != eq) {
         __ cmp(r4, Operand(FIRST_JS_OBJECT_TYPE));
         __ b(ge, slow);
         // Normally here we fall through to return_equal, but undefined is
         // special: (undefined == undefined) == true, but
         // (undefined <= undefined) == false!  See ECMAScript 11.8.5.
-        if (cc == le || cc == ge) {
+        if (cond == le || cond == ge) {
           __ cmp(r4, Operand(ODDBALL_TYPE));
           __ b(ne, &return_equal);
           __ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
           __ cmp(r0, r2);
           __ b(ne, &return_equal);
-          if (cc == le) {
+          if (cond == le) {
             // undefined <= undefined should fail.
             __ mov(r0, Operand(GREATER));
           } else  {
@@ -442,20 +591,20 @@
   }
 
   __ bind(&return_equal);
-  if (cc == lt) {
+  if (cond == lt) {
     __ mov(r0, Operand(GREATER));  // Things aren't less than themselves.
-  } else if (cc == gt) {
+  } else if (cond == gt) {
     __ mov(r0, Operand(LESS));     // Things aren't greater than themselves.
   } else {
     __ mov(r0, Operand(EQUAL));    // Things are <=, >=, ==, === themselves.
   }
   __ Ret();
 
-  if (cc != eq || !never_nan_nan) {
+  if (cond != eq || !never_nan_nan) {
     // For less and greater we don't have to check for NaN since the result of
     // x < x is false regardless.  For the others here is some code to check
     // for NaN.
-    if (cc != lt && cc != gt) {
+    if (cond != lt && cond != gt) {
       __ bind(&heap_number);
       // It is a heap number, so return non-equal if it's NaN and equal if it's
       // not NaN.
@@ -479,10 +628,10 @@
       // if all bits in mantissa are zero (it's an Infinity) and non-zero if
       // not (it's a NaN).  For <= and >= we need to load r0 with the failing
       // value if it's a NaN.
-      if (cc != eq) {
+      if (cond != eq) {
         // All-zero means Infinity means equal.
         __ Ret(eq);
-        if (cc == le) {
+        if (cond == le) {
           __ mov(r0, Operand(GREATER));  // NaN <= NaN should fail.
         } else {
           __ mov(r0, Operand(LESS));     // NaN >= NaN should fail.
@@ -589,7 +738,7 @@
 }
 
 
-void EmitNanCheck(MacroAssembler* masm, Label* lhs_not_nan, Condition cc) {
+void EmitNanCheck(MacroAssembler* masm, Label* lhs_not_nan, Condition cond) {
   bool exp_first = (HeapNumber::kExponentOffset == HeapNumber::kValueOffset);
   Register rhs_exponent = exp_first ? r0 : r1;
   Register lhs_exponent = exp_first ? r2 : r3;
@@ -629,7 +778,7 @@
   __ bind(&one_is_nan);
   // NaN comparisons always fail.
   // Load whatever we need in r0 to make the comparison fail.
-  if (cc == lt || cc == le) {
+  if (cond == lt || cond == le) {
     __ mov(r0, Operand(GREATER));
   } else {
     __ mov(r0, Operand(LESS));
@@ -641,7 +790,8 @@
 
 
 // See comment at call site.
-static void EmitTwoNonNanDoubleComparison(MacroAssembler* masm, Condition cc) {
+static void EmitTwoNonNanDoubleComparison(MacroAssembler* masm,
+                                          Condition cond) {
   bool exp_first = (HeapNumber::kExponentOffset == HeapNumber::kValueOffset);
   Register rhs_exponent = exp_first ? r0 : r1;
   Register lhs_exponent = exp_first ? r2 : r3;
@@ -649,7 +799,7 @@
   Register lhs_mantissa = exp_first ? r3 : r2;
 
   // r0, r1, r2, r3 have the two doubles.  Neither is a NaN.
-  if (cc == eq) {
+  if (cond == eq) {
     // Doubles are not equal unless they have the same bit pattern.
     // Exception: 0 and -0.
     __ cmp(rhs_mantissa, Operand(lhs_mantissa));
@@ -835,7 +985,7 @@
   Label is_smi;
   Label load_result_from_cache;
   if (!object_is_smi) {
-    __ BranchOnSmi(object, &is_smi);
+    __ JumpIfSmi(object, &is_smi);
     if (CpuFeatures::IsSupported(VFP3)) {
       CpuFeatures::Scope scope(VFP3);
       __ CheckMap(object,
@@ -861,7 +1011,7 @@
       Register probe = mask;
       __ ldr(probe,
              FieldMemOperand(scratch1, FixedArray::kHeaderSize));
-      __ BranchOnSmi(probe, not_found);
+      __ JumpIfSmi(probe, not_found);
       __ sub(scratch2, object, Operand(kHeapObjectTag));
       __ vldr(d0, scratch2, HeapNumber::kValueOffset);
       __ sub(probe, probe, Operand(kHeapObjectTag));
@@ -938,7 +1088,7 @@
   } else if (FLAG_debug_code) {
     __ orr(r2, r1, r0);
     __ tst(r2, Operand(kSmiTagMask));
-    __ Assert(nz, "CompareStub: unexpected smi operands.");
+    __ Assert(ne, "CompareStub: unexpected smi operands.");
   }
 
   // NOTICE! This code is only reached after a smi-fast-case check, so
@@ -1375,7 +1525,7 @@
         __ sub(r0, r5, Operand(kHeapObjectTag));
         __ vstr(d5, r0, HeapNumber::kValueOffset);
         __ add(r0, r0, Operand(kHeapObjectTag));
-        __ mov(pc, lr);
+        __ Ret();
       } else {
         // If we did not inline the operation, then the arguments are in:
         // r0: Left value (least significant part of mantissa).
@@ -1960,7 +2110,7 @@
       Label not_smi;
       if (ShouldGenerateSmiCode() && specialized_on_rhs_) {
         Label lhs_is_unsuitable;
-        __ BranchOnNotSmi(lhs, &not_smi);
+        __ JumpIfNotSmi(lhs, &not_smi);
         if (IsPowerOf2(constant_rhs_)) {
           if (op_ == Token::MOD) {
             __ and_(rhs,
@@ -2207,8 +2357,467 @@
 Handle<Code> GetTypeRecordingBinaryOpStub(int key,
     TRBinaryOpIC::TypeInfo type_info,
     TRBinaryOpIC::TypeInfo result_type_info) {
+  TypeRecordingBinaryOpStub stub(key, type_info, result_type_info);
+  return stub.GetCode();
+}
+
+
+void TypeRecordingBinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
+  Label get_result;
+
+  __ Push(r1, r0);
+
+  __ mov(r2, Operand(Smi::FromInt(MinorKey())));
+  __ mov(r1, Operand(Smi::FromInt(op_)));
+  __ mov(r0, Operand(Smi::FromInt(operands_type_)));
+  __ Push(r2, r1, r0);
+
+  __ TailCallExternalReference(
+      ExternalReference(IC_Utility(IC::kTypeRecordingBinaryOp_Patch)),
+      5,
+      1);
+}
+
+
+void TypeRecordingBinaryOpStub::GenerateTypeTransitionWithSavedArgs(
+    MacroAssembler* masm) {
   UNIMPLEMENTED();
-  return Handle<Code>::null();
+}
+
+
+void TypeRecordingBinaryOpStub::Generate(MacroAssembler* masm) {
+  switch (operands_type_) {
+    case TRBinaryOpIC::UNINITIALIZED:
+      GenerateTypeTransition(masm);
+      break;
+    case TRBinaryOpIC::SMI:
+      GenerateSmiStub(masm);
+      break;
+    case TRBinaryOpIC::INT32:
+      GenerateInt32Stub(masm);
+      break;
+    case TRBinaryOpIC::HEAP_NUMBER:
+      GenerateHeapNumberStub(masm);
+      break;
+    case TRBinaryOpIC::STRING:
+      GenerateStringStub(masm);
+      break;
+    case TRBinaryOpIC::GENERIC:
+      GenerateGeneric(masm);
+      break;
+    default:
+      UNREACHABLE();
+  }
+}
+
+
+const char* TypeRecordingBinaryOpStub::GetName() {
+  if (name_ != NULL) return name_;
+  const int kMaxNameLength = 100;
+  name_ = Bootstrapper::AllocateAutoDeletedArray(kMaxNameLength);
+  if (name_ == NULL) return "OOM";
+  const char* op_name = Token::Name(op_);
+  const char* overwrite_name;
+  switch (mode_) {
+    case NO_OVERWRITE: overwrite_name = "Alloc"; break;
+    case OVERWRITE_RIGHT: overwrite_name = "OverwriteRight"; break;
+    case OVERWRITE_LEFT: overwrite_name = "OverwriteLeft"; break;
+    default: overwrite_name = "UnknownOverwrite"; break;
+  }
+
+  OS::SNPrintF(Vector<char>(name_, kMaxNameLength),
+               "TypeRecordingBinaryOpStub_%s_%s_%s",
+               op_name,
+               overwrite_name,
+               TRBinaryOpIC::GetName(operands_type_));
+  return name_;
+}
+
+
+void TypeRecordingBinaryOpStub::GenerateSmiSmiOperation(
+    MacroAssembler* masm) {
+  Register left = r1;
+  Register right = r0;
+  Register scratch1 = r7;
+  Register scratch2 = r9;
+
+  ASSERT(right.is(r0));
+  STATIC_ASSERT(kSmiTag == 0);
+
+  Label not_smi_result;
+  switch (op_) {
+    case Token::ADD:
+      __ add(right, left, Operand(right), SetCC);  // Add optimistically.
+      __ Ret(vc);
+      __ sub(right, right, Operand(left));  // Revert optimistic add.
+      break;
+    case Token::SUB:
+      __ sub(right, left, Operand(right), SetCC);  // Subtract optimistically.
+      __ Ret(vc);
+      __ sub(right, left, Operand(right));  // Revert optimistic subtract.
+      break;
+    case Token::MUL:
+      // Remove tag from one of the operands. This way the multiplication result
+      // will be a smi if it fits the smi range.
+      __ SmiUntag(ip, right);
+      // Do multiplication
+      // scratch1 = lower 32 bits of ip * left.
+      // scratch2 = higher 32 bits of ip * left.
+      __ smull(scratch1, scratch2, left, ip);
+      // Check for overflowing the smi range - no overflow if higher 33 bits of
+      // the result are identical.
+      __ mov(ip, Operand(scratch1, ASR, 31));
+      __ cmp(ip, Operand(scratch2));
+      __ b(ne, &not_smi_result);
+      // Go slow on zero result to handle -0.
+      __ tst(scratch1, Operand(scratch1));
+      __ mov(right, Operand(scratch1), LeaveCC, ne);
+      __ Ret(ne);
+      // We need -0 if we were multiplying a negative number with 0 to get 0.
+      // We know one of them was zero.
+      __ add(scratch2, right, Operand(left), SetCC);
+      __ mov(right, Operand(Smi::FromInt(0)), LeaveCC, pl);
+      __ Ret(pl);  // Return smi 0 if the non-zero one was positive.
+      // We fall through here if we multiplied a negative number with 0, because
+      // that would mean we should produce -0.
+      break;
+    default:
+      UNREACHABLE();
+  }
+  __ bind(&not_smi_result);
+}
+
+
+void TypeRecordingBinaryOpStub::GenerateVFPOperation(
+    MacroAssembler* masm) {
+  switch (op_) {
+    case Token::ADD:
+      __ vadd(d5, d6, d7);
+      break;
+    case Token::SUB:
+      __ vsub(d5, d6, d7);
+      break;
+    case Token::MUL:
+      __ vmul(d5, d6, d7);
+      break;
+    default:
+      UNREACHABLE();
+  }
+}
+
+
+// Generate the smi code. If the operation on smis are successful this return is
+// generated. If the result is not a smi and heap number allocation is not
+// requested the code falls through. If number allocation is requested but a
+// heap number cannot be allocated the code jumps to the lable gc_required.
+void TypeRecordingBinaryOpStub::GenerateSmiCode(MacroAssembler* masm,
+    Label* gc_required,
+    SmiCodeGenerateHeapNumberResults allow_heapnumber_results) {
+  Label not_smis;
+
+  ASSERT(op_ == Token::ADD || op_ == Token::SUB || op_ == Token::MUL);
+
+  Register left = r1;
+  Register right = r0;
+  Register scratch1 = r7;
+  Register scratch2 = r9;
+
+  // Perform combined smi check on both operands.
+  __ orr(scratch1, left, Operand(right));
+  STATIC_ASSERT(kSmiTag == 0);
+  __ tst(scratch1, Operand(kSmiTagMask));
+  __ b(ne, &not_smis);
+
+  GenerateSmiSmiOperation(masm);
+
+  // If heap number results are possible generate the result in an allocated
+  // heap number.
+  if (allow_heapnumber_results == ALLOW_HEAPNUMBER_RESULTS) {
+    FloatingPointHelper::Destination destination =
+        CpuFeatures::IsSupported(VFP3) && Token::MOD != op_ ?
+        FloatingPointHelper::kVFPRegisters :
+        FloatingPointHelper::kCoreRegisters;
+
+    Register heap_number_map = r6;
+    __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
+
+    // Allocate new heap number for result.
+    Register heap_number = r5;
+    __ AllocateHeapNumber(
+        heap_number, scratch1, scratch2, heap_number_map, gc_required);
+
+    // Load the smis.
+    FloatingPointHelper::LoadSmis(masm, destination, scratch1, scratch2);
+
+    // Calculate the result.
+    if (destination == FloatingPointHelper::kVFPRegisters) {
+      // Using VFP registers:
+      // d6: Left value
+      // d7: Right value
+      CpuFeatures::Scope scope(VFP3);
+      GenerateVFPOperation(masm);
+
+      __ sub(r0, heap_number, Operand(kHeapObjectTag));
+      __ vstr(d5, r0, HeapNumber::kValueOffset);
+      __ add(r0, r0, Operand(kHeapObjectTag));
+      __ Ret();
+    } else {
+      // Using core registers:
+      // r0: Left value (least significant part of mantissa).
+      // r1: Left value (sign, exponent, top of mantissa).
+      // r2: Right value (least significant part of mantissa).
+      // r3: Right value (sign, exponent, top of mantissa).
+
+      __ push(lr);  // For later.
+      __ PrepareCallCFunction(4, scratch1);  // Two doubles are 4 arguments.
+      // Call C routine that may not cause GC or other trouble. r5 is callee
+      // save.
+      __ CallCFunction(ExternalReference::double_fp_operation(op_), 4);
+      // Store answer in the overwritable heap number.
+#if !defined(USE_ARM_EABI)
+      // Double returned in fp coprocessor register 0 and 1, encoded as
+      // register cr8.  Offsets must be divisible by 4 for coprocessor so we
+      // need to substract the tag from r5.
+      __ sub(scratch1, heap_number, Operand(kHeapObjectTag));
+      __ stc(p1, cr8, MemOperand(scratch1, HeapNumber::kValueOffset));
+#else
+      // Double returned in registers 0 and 1.
+      __ Strd(r0, r1, FieldMemOperand(heap_number, HeapNumber::kValueOffset));
+#endif
+      __ mov(r0, Operand(heap_number));
+      // And we are done.
+      __ pop(pc);
+    }
+  }
+  __ bind(&not_smis);
+}
+
+
+void TypeRecordingBinaryOpStub::GenerateSmiStub(MacroAssembler* masm) {
+  Label not_smis, call_runtime;
+
+  ASSERT(op_ == Token::ADD || op_ == Token::SUB || op_ == Token::MUL);
+
+  if (result_type_ == TRBinaryOpIC::UNINITIALIZED ||
+      result_type_ == TRBinaryOpIC::SMI) {
+    // Only allow smi results.
+    GenerateSmiCode(masm, NULL, NO_HEAPNUMBER_RESULTS);
+  } else {
+    // Allow heap number result and don't make a transition if a heap number
+    // cannot be allocated.
+    GenerateSmiCode(masm, &call_runtime, ALLOW_HEAPNUMBER_RESULTS);
+  }
+
+  // Code falls through if the result is not returned as either a smi or heap
+  // number.
+  GenerateTypeTransition(masm);
+
+  __ bind(&call_runtime);
+  GenerateCallRuntime(masm);
+}
+
+
+void TypeRecordingBinaryOpStub::GenerateStringStub(MacroAssembler* masm) {
+  ASSERT(operands_type_ == TRBinaryOpIC::STRING);
+  ASSERT(op_ == Token::ADD);
+  // Try to add arguments as strings, otherwise, transition to the generic
+  // TRBinaryOpIC type.
+  GenerateAddStrings(masm);
+  GenerateTypeTransition(masm);
+}
+
+
+void TypeRecordingBinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
+  ASSERT(op_ == Token::ADD || op_ == Token::SUB || op_ == Token::MUL);
+
+  ASSERT(operands_type_ == TRBinaryOpIC::INT32);
+
+  GenerateTypeTransition(masm);
+}
+
+
+void TypeRecordingBinaryOpStub::GenerateHeapNumberStub(MacroAssembler* masm) {
+  ASSERT(op_ == Token::ADD || op_ == Token::SUB || op_ == Token::MUL);
+
+  Register scratch1 = r7;
+  Register scratch2 = r9;
+
+  Label not_number, call_runtime;
+  ASSERT(operands_type_ == TRBinaryOpIC::HEAP_NUMBER);
+
+  Register heap_number_map = r6;
+  __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
+
+  // Load left and right operands into d6 and d7 or r0/r1 and r2/r3 depending on
+  // whether VFP3 is available.
+  FloatingPointHelper::Destination destination =
+      CpuFeatures::IsSupported(VFP3) ?
+      FloatingPointHelper::kVFPRegisters :
+      FloatingPointHelper::kCoreRegisters;
+  FloatingPointHelper::LoadOperands(masm,
+                                    destination,
+                                    heap_number_map,
+                                    scratch1,
+                                    scratch2,
+                                    &not_number);
+  if (destination == FloatingPointHelper::kVFPRegisters) {
+    // Use floating point instructions for the binary operation.
+    CpuFeatures::Scope scope(VFP3);
+    GenerateVFPOperation(masm);
+
+    // Get a heap number object for the result - might be left or right if one
+    // of these are overwritable.
+    GenerateHeapResultAllocation(
+        masm, r4, heap_number_map, scratch1, scratch2, &call_runtime);
+
+    // Fill the result into the allocated heap number and return.
+    __ sub(r0, r4, Operand(kHeapObjectTag));
+    __ vstr(d5, r0, HeapNumber::kValueOffset);
+    __ add(r0, r0, Operand(kHeapObjectTag));
+    __ Ret();
+
+  } else {
+    // Call a C function for the binary operation.
+    // r0/r1: Left operand
+    // r2/r3: Right operand
+
+    // Get a heap number object for the result - might be left or right if one
+    // of these are overwritable. Uses a callee-save register to keep the value
+    // across the c call.
+    GenerateHeapResultAllocation(
+        masm, r4, heap_number_map, scratch1, scratch2, &call_runtime);
+
+    __ push(lr);  // For returning later (no GC after this point).
+    __ PrepareCallCFunction(4, scratch1);  // Two doubles count as 4 arguments.
+    // Call C routine that may not cause GC or other trouble. r4 is callee
+    // saved.
+    __ CallCFunction(ExternalReference::double_fp_operation(op_), 4);
+
+    // Fill the result into the allocated heap number.
+  #if !defined(USE_ARM_EABI)
+    // Double returned in fp coprocessor register 0 and 1, encoded as
+    // register cr8.  Offsets must be divisible by 4 for coprocessor so we
+    // need to substract the tag from r5.
+    __ sub(scratch1, r4, Operand(kHeapObjectTag));
+    __ stc(p1, cr8, MemOperand(scratch1, HeapNumber::kValueOffset));
+  #else
+    // Double returned in registers 0 and 1.
+    __ Strd(r0, r1, FieldMemOperand(r4, HeapNumber::kValueOffset));
+  #endif
+    __ mov(r0, Operand(r4));
+    __ pop(pc);  // Return to the pushed lr.
+  }
+
+  __ bind(&not_number);
+  GenerateTypeTransition(masm);
+
+  __ bind(&call_runtime);
+  GenerateCallRuntime(masm);
+}
+
+
+void TypeRecordingBinaryOpStub::GenerateGeneric(MacroAssembler* masm) {
+  ASSERT(op_ == Token::ADD || op_ == Token::SUB || op_ == Token::MUL);
+
+  Label call_runtime;
+
+  GenerateSmiCode(masm, &call_runtime, ALLOW_HEAPNUMBER_RESULTS);
+
+  // If all else fails, use the runtime system to get the correct
+  // result.
+  __ bind(&call_runtime);
+
+  // Try to add strings before calling runtime.
+  if (op_ == Token::ADD) {
+    GenerateAddStrings(masm);
+  }
+
+  GenericBinaryOpStub stub(op_, mode_, r1, r0);
+  __ TailCallStub(&stub);
+}
+
+
+void TypeRecordingBinaryOpStub::GenerateAddStrings(MacroAssembler* masm) {
+  ASSERT(op_ == Token::ADD);
+
+  Register left = r1;
+  Register right = r0;
+  Label call_runtime;
+
+  // Check if first argument is a string.
+  __ JumpIfSmi(left, &call_runtime);
+  __ CompareObjectType(left, r2, r2, FIRST_NONSTRING_TYPE);
+  __ b(ge, &call_runtime);
+
+  // First argument is a a string, test second.
+  __ JumpIfSmi(right, &call_runtime);
+  __ CompareObjectType(right, r2, r2, FIRST_NONSTRING_TYPE);
+  __ b(ge, &call_runtime);
+
+  // First and second argument are strings.
+  StringAddStub string_add_stub(NO_STRING_CHECK_IN_STUB);
+  GenerateRegisterArgsPush(masm);
+  __ TailCallStub(&string_add_stub);
+
+  // At least one argument is not a string.
+  __ bind(&call_runtime);
+}
+
+
+void TypeRecordingBinaryOpStub::GenerateCallRuntime(MacroAssembler* masm) {
+  GenerateRegisterArgsPush(masm);
+  switch (op_) {
+    case Token::ADD:
+      __ InvokeBuiltin(Builtins::ADD, JUMP_JS);
+      break;
+    case Token::SUB:
+      __ InvokeBuiltin(Builtins::SUB, JUMP_JS);
+      break;
+    case Token::MUL:
+      __ InvokeBuiltin(Builtins::MUL, JUMP_JS);
+      break;
+    default:
+      UNREACHABLE();
+  }
+}
+
+
+void TypeRecordingBinaryOpStub::GenerateHeapResultAllocation(
+    MacroAssembler* masm,
+    Register result,
+    Register heap_number_map,
+    Register scratch1,
+    Register scratch2,
+    Label* gc_required) {
+
+  // Code below will scratch result if allocation fails. To keep both arguments
+  // intact for the runtime call result cannot be one of these.
+  ASSERT(!result.is(r0) && !result.is(r1));
+
+  if (mode_ == OVERWRITE_LEFT || mode_ == OVERWRITE_RIGHT) {
+    Label skip_allocation, allocated;
+    Register overwritable_operand = mode_ == OVERWRITE_LEFT ? r1 : r0;
+    // If the overwritable operand is already an object, we skip the
+    // allocation of a heap number.
+    __ JumpIfNotSmi(overwritable_operand, &skip_allocation);
+    // Allocate a heap number for the result.
+    __ AllocateHeapNumber(
+        result, scratch1, scratch2, heap_number_map, gc_required);
+    __ b(&allocated);
+    __ bind(&skip_allocation);
+    // Use object holding the overwritable operand for result.
+    __ mov(result, Operand(overwritable_operand));
+    __ bind(&allocated);
+  } else {
+    ASSERT(mode_ == NO_OVERWRITE);
+    __ AllocateHeapNumber(
+        result, scratch1, scratch2, heap_number_map, gc_required);
+  }
+}
+
+
+void TypeRecordingBinaryOpStub::GenerateRegisterArgsPush(MacroAssembler* masm) {
+  __ Push(r1, r0);
 }
 
 
@@ -2220,7 +2829,7 @@
 
   if (CpuFeatures::IsSupported(VFP3)) {
     // Load argument and check if it is a smi.
-    __ BranchOnNotSmi(r0, &input_not_smi);
+    __ JumpIfNotSmi(r0, &input_not_smi);
 
     CpuFeatures::Scope scope(VFP3);
     // Input is a smi. Convert to double and load the low and high words
@@ -2374,7 +2983,7 @@
   } else if (op_ == Token::BIT_NOT) {
     if (include_smi_code_) {
       Label non_smi;
-      __ BranchOnNotSmi(r0, &non_smi);
+      __ JumpIfNotSmi(r0, &non_smi);
       __ mvn(r0, Operand(r0));
       // Bit-clear inverted smi-tag.
       __ bic(r0, r0, Operand(kSmiTagMask));
@@ -2591,7 +3200,7 @@
     if (frame_alignment > kPointerSize) {
       Label alignment_as_expected;
       ASSERT(IsPowerOf2(frame_alignment));
-      __ tst(r2, Operand(frame_alignment_mask));
+      __ tst(sp, Operand(frame_alignment_mask));
       __ b(eq, &alignment_as_expected);
       // Don't use Check here, as it will call Runtime_Abort re-entering here.
       __ stop("Unexpected alignment");
@@ -2903,7 +3512,7 @@
   }
 
   // Check that the left hand is a JS object and load map.
-  __ BranchOnSmi(object, &not_js_object);
+  __ JumpIfSmi(object, &not_js_object);
   __ IsObjectJSObjectType(object, map, scratch, &not_js_object);
 
   // If there is a call site cache don't look in the global cache, but do the
@@ -2926,7 +3535,7 @@
   __ TryGetFunctionPrototype(function, prototype, scratch, &slow);
 
   // Check that the function prototype is a JS object.
-  __ BranchOnSmi(prototype, &slow);
+  __ JumpIfSmi(prototype, &slow);
   __ IsObjectJSObjectType(prototype, scratch, scratch, &slow);
 
   // Update the global instanceof or call site inlined cache with the current
@@ -3006,7 +3615,7 @@
   __ bind(&not_js_object);
   // Before null, smi and string value checks, check that the rhs is a function
   // as for a non-function rhs an exception needs to be thrown.
-  __ BranchOnSmi(function, &slow);
+  __ JumpIfSmi(function, &slow);
   __ CompareObjectType(function, scratch2, scratch, JS_FUNCTION_TYPE);
   __ b(ne, &slow);
 
@@ -3018,7 +3627,7 @@
 
   __ bind(&object_not_null);
   // Smi values are not instances of anything.
-  __ BranchOnNotSmi(object, &object_not_null_or_smi);
+  __ JumpIfNotSmi(object, &object_not_null_or_smi);
   __ mov(r0, Operand(Smi::FromInt(1)));
   __ Ret(HasArgsInRegisters() ? 0 : 2);
 
@@ -3062,7 +3671,7 @@
 
   // Check that the key is a smi.
   Label slow;
-  __ BranchOnNotSmi(r1, &slow);
+  __ JumpIfNotSmi(r1, &slow);
 
   // Check if the calling frame is an arguments adaptor frame.
   Label adaptor;
@@ -3266,7 +3875,7 @@
   __ ldr(regexp_data, FieldMemOperand(r0, JSRegExp::kDataOffset));
   if (FLAG_debug_code) {
     __ tst(regexp_data, Operand(kSmiTagMask));
-    __ Check(nz, "Unexpected type for RegExp data, FixedArray expected");
+    __ Check(ne, "Unexpected type for RegExp data, FixedArray expected");
     __ CompareObjectType(regexp_data, r0, r0, FIXED_ARRAY_TYPE);
     __ Check(eq, "Unexpected type for RegExp data, FixedArray expected");
   }
@@ -3369,7 +3978,7 @@
   // Is first part a flat string?
   STATIC_ASSERT(kSeqStringTag == 0);
   __ tst(r0, Operand(kStringRepresentationMask));
-  __ b(nz, &runtime);
+  __ b(ne, &runtime);
 
   __ bind(&seq_string);
   // subject: Subject string
@@ -3644,7 +4253,7 @@
     __ ldr(r1, MemOperand(sp, argc_ * kPointerSize));
 
     // Check if receiver is a smi (which is a number value).
-    __ BranchOnSmi(r1, &receiver_is_value);
+    __ JumpIfSmi(r1, &receiver_is_value);
 
     // Check if the receiver is a valid JS object.
     __ CompareObjectType(r1, r2, r2, FIRST_JS_OBJECT_TYPE);
@@ -3667,7 +4276,7 @@
 
   // Check that the function is really a JavaScript function.
   // r1: pushed function (to be verified)
-  __ BranchOnSmi(r1, &slow);
+  __ JumpIfSmi(r1, &slow);
   // Get the map of the function object.
   __ CompareObjectType(r1, r2, r2, JS_FUNCTION_TYPE);
   __ b(ne, &slow);
@@ -3771,7 +4380,7 @@
   Label got_char_code;
 
   // If the receiver is a smi trigger the non-string case.
-  __ BranchOnSmi(object_, receiver_not_string_);
+  __ JumpIfSmi(object_, receiver_not_string_);
 
   // Fetch the instance type of the receiver into result register.
   __ ldr(result_, FieldMemOperand(object_, HeapObject::kMapOffset));
@@ -3781,7 +4390,7 @@
   __ b(ne, receiver_not_string_);
 
   // If the index is non-smi trigger the non-smi case.
-  __ BranchOnNotSmi(index_, &index_not_smi_);
+  __ JumpIfNotSmi(index_, &index_not_smi_);
 
   // Put smi-tagged index into scratch register.
   __ mov(scratch_, index_);
@@ -3817,13 +4426,13 @@
   // If the first cons component is also non-flat, then go to runtime.
   STATIC_ASSERT(kSeqStringTag == 0);
   __ tst(result_, Operand(kStringRepresentationMask));
-  __ b(nz, &call_runtime_);
+  __ b(ne, &call_runtime_);
 
   // Check for 1-byte or 2-byte string.
   __ bind(&flat_string);
   STATIC_ASSERT(kAsciiStringTag != 0);
   __ tst(result_, Operand(kStringEncodingMask));
-  __ b(nz, &ascii_string);
+  __ b(ne, &ascii_string);
 
   // 2-byte string.
   // Load the 2-byte character code into the result register. We can
@@ -3878,7 +4487,7 @@
   __ ldrb(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset));
   call_helper.AfterCall(masm);
   // If index is still not a smi, it must be out of range.
-  __ BranchOnNotSmi(scratch_, index_out_of_range_);
+  __ JumpIfNotSmi(scratch_, index_out_of_range_);
   // Otherwise, return to the fast path.
   __ jmp(&got_smi_index_);
 
@@ -3908,7 +4517,7 @@
   __ tst(code_,
          Operand(kSmiTagMask |
                  ((~String::kMaxAsciiCharCode) << kSmiTagSize)));
-  __ b(nz, &slow_case_);
+  __ b(ne, &slow_case_);
 
   __ LoadRoot(result_, Heap::kSingleCharacterStringCacheRootIndex);
   // At this point code register contains smi tagged ascii char code.
@@ -4355,7 +4964,7 @@
   __ add(hash, hash, Operand(hash, LSL, 15), SetCC);
 
   // if (hash == 0) hash = 27;
-  __ mov(hash, Operand(27), LeaveCC, nz);
+  __ mov(hash, Operand(27), LeaveCC, ne);
 }
 
 
diff --git a/src/arm/code-stubs-arm.h b/src/arm/code-stubs-arm.h
index 9fa8687..a79b239 100644
--- a/src/arm/code-stubs-arm.h
+++ b/src/arm/code-stubs-arm.h
@@ -218,6 +218,117 @@
 };
 
 
+class TypeRecordingBinaryOpStub: public CodeStub {
+ public:
+  TypeRecordingBinaryOpStub(Token::Value op, OverwriteMode mode)
+      : op_(op),
+        mode_(mode),
+        operands_type_(TRBinaryOpIC::UNINITIALIZED),
+        result_type_(TRBinaryOpIC::UNINITIALIZED),
+        name_(NULL) {
+    use_vfp3_ = CpuFeatures::IsSupported(VFP3);
+    ASSERT(OpBits::is_valid(Token::NUM_TOKENS));
+  }
+
+  TypeRecordingBinaryOpStub(
+      int key,
+      TRBinaryOpIC::TypeInfo operands_type,
+      TRBinaryOpIC::TypeInfo result_type = TRBinaryOpIC::UNINITIALIZED)
+      : op_(OpBits::decode(key)),
+        mode_(ModeBits::decode(key)),
+        use_vfp3_(VFP3Bits::decode(key)),
+        operands_type_(operands_type),
+        result_type_(result_type),
+        name_(NULL) { }
+
+ private:
+  enum SmiCodeGenerateHeapNumberResults {
+    ALLOW_HEAPNUMBER_RESULTS,
+    NO_HEAPNUMBER_RESULTS
+  };
+
+  Token::Value op_;
+  OverwriteMode mode_;
+  bool use_vfp3_;
+
+  // Operand type information determined at runtime.
+  TRBinaryOpIC::TypeInfo operands_type_;
+  TRBinaryOpIC::TypeInfo result_type_;
+
+  char* name_;
+
+  const char* GetName();
+
+#ifdef DEBUG
+  void Print() {
+    PrintF("TypeRecordingBinaryOpStub %d (op %s), "
+           "(mode %d, runtime_type_info %s)\n",
+           MinorKey(),
+           Token::String(op_),
+           static_cast<int>(mode_),
+           TRBinaryOpIC::GetName(operands_type_));
+  }
+#endif
+
+  // Minor key encoding in 16 bits RRRTTTVOOOOOOOMM.
+  class ModeBits: public BitField<OverwriteMode, 0, 2> {};
+  class OpBits: public BitField<Token::Value, 2, 7> {};
+  class VFP3Bits: public BitField<bool, 9, 1> {};
+  class OperandTypeInfoBits: public BitField<TRBinaryOpIC::TypeInfo, 10, 3> {};
+  class ResultTypeInfoBits: public BitField<TRBinaryOpIC::TypeInfo, 13, 3> {};
+
+  Major MajorKey() { return TypeRecordingBinaryOp; }
+  int MinorKey() {
+    return OpBits::encode(op_)
+           | ModeBits::encode(mode_)
+           | VFP3Bits::encode(use_vfp3_)
+           | OperandTypeInfoBits::encode(operands_type_)
+           | ResultTypeInfoBits::encode(result_type_);
+  }
+
+  void Generate(MacroAssembler* masm);
+  void GenerateGeneric(MacroAssembler* masm);
+  void GenerateSmiSmiOperation(MacroAssembler* masm);
+  void GenerateVFPOperation(MacroAssembler* masm);
+  void GenerateSmiCode(MacroAssembler* masm,
+                       Label* gc_required,
+                       SmiCodeGenerateHeapNumberResults heapnumber_results);
+  void GenerateLoadArguments(MacroAssembler* masm);
+  void GenerateReturn(MacroAssembler* masm);
+  void GenerateUninitializedStub(MacroAssembler* masm);
+  void GenerateSmiStub(MacroAssembler* masm);
+  void GenerateInt32Stub(MacroAssembler* masm);
+  void GenerateHeapNumberStub(MacroAssembler* masm);
+  void GenerateStringStub(MacroAssembler* masm);
+  void GenerateGenericStub(MacroAssembler* masm);
+  void GenerateAddStrings(MacroAssembler* masm);
+  void GenerateCallRuntime(MacroAssembler* masm);
+
+  void GenerateHeapResultAllocation(MacroAssembler* masm,
+                                    Register result,
+                                    Register heap_number_map,
+                                    Register scratch1,
+                                    Register scratch2,
+                                    Label* gc_required);
+  void GenerateRegisterArgsPush(MacroAssembler* masm);
+  void GenerateTypeTransition(MacroAssembler* masm);
+  void GenerateTypeTransitionWithSavedArgs(MacroAssembler* masm);
+
+  virtual int GetCodeKind() { return Code::TYPE_RECORDING_BINARY_OP_IC; }
+
+  virtual InlineCacheState GetICState() {
+    return TRBinaryOpIC::ToState(operands_type_);
+  }
+
+  virtual void FinishCode(Code* code) {
+    code->set_type_recording_binary_op_type(operands_type_);
+    code->set_type_recording_binary_op_result_type(result_type_);
+  }
+
+  friend class CodeGenerator;
+};
+
+
 // Flag that indicates how to generate code for the stub StringAddStub.
 enum StringAddFlags {
   NO_STRING_ADD_FLAGS = 0,
diff --git a/src/arm/codegen-arm-inl.h b/src/arm/codegen-arm-inl.h
index 264498d..81ed2d0 100644
--- a/src/arm/codegen-arm-inl.h
+++ b/src/arm/codegen-arm-inl.h
@@ -39,7 +39,7 @@
 // Platform-specific inline functions.
 
 void DeferredCode::Jump() { __ jmp(&entry_label_); }
-void DeferredCode::Branch(Condition cc) { __ b(cc, &entry_label_); }
+void DeferredCode::Branch(Condition cond) { __ b(cond, &entry_label_); }
 
 #undef __
 
diff --git a/src/arm/codegen-arm.cc b/src/arm/codegen-arm.cc
index f76bd35..0d429d6 100644
--- a/src/arm/codegen-arm.cc
+++ b/src/arm/codegen-arm.cc
@@ -1589,7 +1589,7 @@
 }
 
 
-void CodeGenerator::Comparison(Condition cc,
+void CodeGenerator::Comparison(Condition cond,
                                Expression* left,
                                Expression* right,
                                bool strict) {
@@ -1603,7 +1603,7 @@
   // result : cc register
 
   // Strict only makes sense for equality comparisons.
-  ASSERT(!strict || cc == eq);
+  ASSERT(!strict || cond == eq);
 
   Register lhs;
   Register rhs;
@@ -1614,8 +1614,8 @@
   // We load the top two stack positions into registers chosen by the virtual
   // frame.  This should keep the register shuffling to a minimum.
   // Implement '>' and '<=' by reversal to obtain ECMA-262 conversion order.
-  if (cc == gt || cc == le) {
-    cc = ReverseCondition(cc);
+  if (cond == gt || cond == le) {
+    cond = ReverseCondition(cond);
     lhs_is_smi = frame_->KnownSmiAt(0);
     rhs_is_smi = frame_->KnownSmiAt(1);
     lhs = frame_->PopToRegister();
@@ -1655,7 +1655,7 @@
     // Perform non-smi comparison by stub.
     // CompareStub takes arguments in r0 and r1, returns <0, >0 or 0 in r0.
     // We call with 0 args because there are 0 on the stack.
-    CompareStub stub(cc, strict, NO_SMI_COMPARE_IN_STUB, lhs, rhs);
+    CompareStub stub(cond, strict, NO_SMI_COMPARE_IN_STUB, lhs, rhs);
     frame_->CallStub(&stub, 0);
     __ cmp(r0, Operand(0, RelocInfo::NONE));
     exit.Jump();
@@ -1667,7 +1667,7 @@
   __ cmp(lhs, Operand(rhs));
 
   exit.Bind();
-  cc_reg_ = cc;
+  cc_reg_ = cond;
 }
 
 
@@ -1762,7 +1762,7 @@
   //   sp[2]: applicand.
 
   // Check that the receiver really is a JavaScript object.
-  __ BranchOnSmi(receiver_reg, &build_args);
+  __ JumpIfSmi(receiver_reg, &build_args);
   // We allow all JSObjects including JSFunctions.  As long as
   // JS_FUNCTION_TYPE is the last instance type and it is right
   // after LAST_JS_OBJECT_TYPE, we do not have to check the upper
@@ -1774,7 +1774,7 @@
 
   // Check that applicand.apply is Function.prototype.apply.
   __ ldr(r0, MemOperand(sp, kPointerSize));
-  __ BranchOnSmi(r0, &build_args);
+  __ JumpIfSmi(r0, &build_args);
   __ CompareObjectType(r0, r1, r2, JS_FUNCTION_TYPE);
   __ b(ne, &build_args);
   Handle<Code> apply_code(Builtins::builtin(Builtins::FunctionApply));
@@ -1785,7 +1785,7 @@
 
   // Check that applicand is a function.
   __ ldr(r1, MemOperand(sp, 2 * kPointerSize));
-  __ BranchOnSmi(r1, &build_args);
+  __ JumpIfSmi(r1, &build_args);
   __ CompareObjectType(r1, r2, r3, JS_FUNCTION_TYPE);
   __ b(ne, &build_args);
 
@@ -1885,8 +1885,8 @@
 
 void CodeGenerator::Branch(bool if_true, JumpTarget* target) {
   ASSERT(has_cc());
-  Condition cc = if_true ? cc_reg_ : NegateCondition(cc_reg_);
-  target->Branch(cc);
+  Condition cond = if_true ? cc_reg_ : NegateCondition(cc_reg_);
+  target->Branch(cond);
   cc_reg_ = al;
 }
 
@@ -4618,8 +4618,8 @@
     ASSERT(runtime.entry_frame() == NULL);
     runtime.set_entry_frame(frame_);
 
-    __ BranchOnNotSmi(exponent, &exponent_nonsmi);
-    __ BranchOnNotSmi(base, &base_nonsmi);
+    __ JumpIfNotSmi(exponent, &exponent_nonsmi);
+    __ JumpIfNotSmi(base, &base_nonsmi);
 
     heap_number_map = r6;
     __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
@@ -5572,7 +5572,7 @@
   deferred->Branch(lt);
   __ ldrb(tmp2, FieldMemOperand(tmp1, Map::kBitFieldOffset));
   __ tst(tmp2, Operand(KeyedLoadIC::kSlowCaseBitFieldMask));
-  deferred->Branch(nz);
+  deferred->Branch(ne);
 
   // Check the object's elements are in fast case and writable.
   __ ldr(tmp1, FieldMemOperand(object, JSObject::kElementsOffset));
@@ -5589,7 +5589,7 @@
   __ mov(tmp2, index1);
   __ orr(tmp2, tmp2, index2);
   __ tst(tmp2, Operand(kSmiTagMask));
-  deferred->Branch(nz);
+  deferred->Branch(ne);
 
   // Check that both indices are valid.
   __ ldr(tmp2, FieldMemOperand(object, JSArray::kLengthOffset));
diff --git a/src/arm/constants-arm.cc b/src/arm/constants-arm.cc
index 3df7b4e..bf9da23 100644
--- a/src/arm/constants-arm.cc
+++ b/src/arm/constants-arm.cc
@@ -32,12 +32,10 @@
 #include "constants-arm.h"
 
 
-namespace assembler {
-namespace arm {
+namespace v8 {
+namespace internal {
 
-namespace v8i = v8::internal;
-
-double Instr::DoubleImmedVmov() const {
+double Instruction::DoubleImmedVmov() const {
   // Reconstruct a double from the immediate encoded in the vmov instruction.
   //
   //   instruction: [xxxxxxxx,xxxxabcd,xxxxxxxx,xxxxefgh]
@@ -149,6 +147,6 @@
 }
 
 
-} }  // namespace assembler::arm
+} }  // namespace v8::internal
 
 #endif  // V8_TARGET_ARCH_ARM
diff --git a/src/arm/constants-arm.h b/src/arm/constants-arm.h
index ff81447..7502ef0 100644
--- a/src/arm/constants-arm.h
+++ b/src/arm/constants-arm.h
@@ -86,8 +86,8 @@
 #define USE_BLX 1
 #endif
 
-namespace assembler {
-namespace arm {
+namespace v8 {
+namespace internal {
 
 // Number of registers in normal ARM mode.
 static const int kNumRegisters = 16;
@@ -102,6 +102,9 @@
 static const int kPCRegister = 15;
 static const int kNoRegister = -1;
 
+// -----------------------------------------------------------------------------
+// Conditions.
+
 // Defines constants and accessor classes to assemble, disassemble and
 // simulate ARM instructions.
 //
@@ -111,93 +114,262 @@
 // Constants for specific fields are defined in their respective named enums.
 // General constants are in an anonymous enum in class Instr.
 
-typedef unsigned char byte;
-
 // Values for the condition field as defined in section A3.2
 enum Condition {
-  no_condition = -1,
-  EQ =  0,  // equal
-  NE =  1,  // not equal
-  CS =  2,  // carry set/unsigned higher or same
-  CC =  3,  // carry clear/unsigned lower
-  MI =  4,  // minus/negative
-  PL =  5,  // plus/positive or zero
-  VS =  6,  // overflow
-  VC =  7,  // no overflow
-  HI =  8,  // unsigned higher
-  LS =  9,  // unsigned lower or same
-  GE = 10,  // signed greater than or equal
-  LT = 11,  // signed less than
-  GT = 12,  // signed greater than
-  LE = 13,  // signed less than or equal
-  AL = 14,  // always (unconditional)
-  special_condition = 15,  // special condition (refer to section A3.2.1)
-  max_condition = 16
+  kNoCondition = -1,
+
+  eq =  0 << 28,                 // Z set            Equal.
+  ne =  1 << 28,                 // Z clear          Not equal.
+  cs =  2 << 28,                 // C set            Unsigned higher or same.
+  cc =  3 << 28,                 // C clear          Unsigned lower.
+  mi =  4 << 28,                 // N set            Negative.
+  pl =  5 << 28,                 // N clear          Positive or zero.
+  vs =  6 << 28,                 // V set            Overflow.
+  vc =  7 << 28,                 // V clear          No overflow.
+  hi =  8 << 28,                 // C set, Z clear   Unsigned higher.
+  ls =  9 << 28,                 // C clear or Z set Unsigned lower or same.
+  ge = 10 << 28,                 // N == V           Greater or equal.
+  lt = 11 << 28,                 // N != V           Less than.
+  gt = 12 << 28,                 // Z clear, N == V  Greater than.
+  le = 13 << 28,                 // Z set or N != V  Less then or equal
+  al = 14 << 28,                 //                  Always.
+
+  kSpecialCondition = 15 << 28,  // Special condition (refer to section A3.2.1).
+  kNumberOfConditions = 16,
+
+  // Aliases.
+  hs = cs,                       // C set            Unsigned higher or same.
+  lo = cc                        // C clear          Unsigned lower.
 };
 
 
+inline Condition NegateCondition(Condition cond) {
+  ASSERT(cond != al);
+  return static_cast<Condition>(cond ^ ne);
+}
+
+
+// Corresponds to transposing the operands of a comparison.
+inline Condition ReverseCondition(Condition cond) {
+  switch (cond) {
+    case lo:
+      return hi;
+    case hi:
+      return lo;
+    case hs:
+      return ls;
+    case ls:
+      return hs;
+    case lt:
+      return gt;
+    case gt:
+      return lt;
+    case ge:
+      return le;
+    case le:
+      return ge;
+    default:
+      return cond;
+  };
+}
+
+
+// -----------------------------------------------------------------------------
+// Instructions encoding.
+
+// Instr is merely used by the Assembler to distinguish 32bit integers
+// representing instructions from usual 32 bit values.
+// Instruction objects are pointers to 32bit values, and provide methods to
+// access the various ISA fields.
+typedef int32_t Instr;
+
+
 // Opcodes for Data-processing instructions (instructions with a type 0 and 1)
 // as defined in section A3.4
 enum Opcode {
-  no_operand = -1,
-  AND =  0,  // Logical AND
-  EOR =  1,  // Logical Exclusive OR
-  SUB =  2,  // Subtract
-  RSB =  3,  // Reverse Subtract
-  ADD =  4,  // Add
-  ADC =  5,  // Add with Carry
-  SBC =  6,  // Subtract with Carry
-  RSC =  7,  // Reverse Subtract with Carry
-  TST =  8,  // Test
-  TEQ =  9,  // Test Equivalence
-  CMP = 10,  // Compare
-  CMN = 11,  // Compare Negated
-  ORR = 12,  // Logical (inclusive) OR
-  MOV = 13,  // Move
-  BIC = 14,  // Bit Clear
-  MVN = 15,  // Move Not
-  max_operand = 16
+  AND =  0 << 21,  // Logical AND.
+  EOR =  1 << 21,  // Logical Exclusive OR.
+  SUB =  2 << 21,  // Subtract.
+  RSB =  3 << 21,  // Reverse Subtract.
+  ADD =  4 << 21,  // Add.
+  ADC =  5 << 21,  // Add with Carry.
+  SBC =  6 << 21,  // Subtract with Carry.
+  RSC =  7 << 21,  // Reverse Subtract with Carry.
+  TST =  8 << 21,  // Test.
+  TEQ =  9 << 21,  // Test Equivalence.
+  CMP = 10 << 21,  // Compare.
+  CMN = 11 << 21,  // Compare Negated.
+  ORR = 12 << 21,  // Logical (inclusive) OR.
+  MOV = 13 << 21,  // Move.
+  BIC = 14 << 21,  // Bit Clear.
+  MVN = 15 << 21   // Move Not.
 };
 
 
 // The bits for bit 7-4 for some type 0 miscellaneous instructions.
 enum MiscInstructionsBits74 {
   // With bits 22-21 01.
-  BX   =  1,
-  BXJ  =  2,
-  BLX  =  3,
-  BKPT =  7,
+  BX   =  1 << 4,
+  BXJ  =  2 << 4,
+  BLX  =  3 << 4,
+  BKPT =  7 << 4,
 
   // With bits 22-21 11.
-  CLZ  =  1
+  CLZ  =  1 << 4
+};
+
+
+// Instruction encoding bits and masks.
+enum {
+  H   = 1 << 5,   // Halfword (or byte).
+  S6  = 1 << 6,   // Signed (or unsigned).
+  L   = 1 << 20,  // Load (or store).
+  S   = 1 << 20,  // Set condition code (or leave unchanged).
+  W   = 1 << 21,  // Writeback base register (or leave unchanged).
+  A   = 1 << 21,  // Accumulate in multiply instruction (or not).
+  B   = 1 << 22,  // Unsigned byte (or word).
+  N   = 1 << 22,  // Long (or short).
+  U   = 1 << 23,  // Positive (or negative) offset/index.
+  P   = 1 << 24,  // Offset/pre-indexed addressing (or post-indexed addressing).
+  I   = 1 << 25,  // Immediate shifter operand (or not).
+
+  B4  = 1 << 4,
+  B5  = 1 << 5,
+  B6  = 1 << 6,
+  B7  = 1 << 7,
+  B8  = 1 << 8,
+  B9  = 1 << 9,
+  B12 = 1 << 12,
+  B16 = 1 << 16,
+  B18 = 1 << 18,
+  B19 = 1 << 19,
+  B20 = 1 << 20,
+  B21 = 1 << 21,
+  B22 = 1 << 22,
+  B23 = 1 << 23,
+  B24 = 1 << 24,
+  B25 = 1 << 25,
+  B26 = 1 << 26,
+  B27 = 1 << 27,
+  B28 = 1 << 28,
+
+  // Instruction bit masks.
+  kCondMask   = 15 << 28,
+  kALUMask    = 0x6f << 21,
+  kRdMask     = 15 << 12,  // In str instruction.
+  kCoprocessorMask = 15 << 8,
+  kOpCodeMask = 15 << 21,  // In data-processing instructions.
+  kImm24Mask  = (1 << 24) - 1,
+  kOff12Mask  = (1 << 12) - 1
+};
+
+
+// -----------------------------------------------------------------------------
+// Addressing modes and instruction variants.
+
+// Condition code updating mode.
+enum SBit {
+  SetCC   = 1 << 20,  // Set condition code.
+  LeaveCC = 0 << 20   // Leave condition code unchanged.
+};
+
+
+// Status register selection.
+enum SRegister {
+  CPSR = 0 << 22,
+  SPSR = 1 << 22
 };
 
 
 // Shifter types for Data-processing operands as defined in section A5.1.2.
-enum Shift {
-  no_shift = -1,
-  LSL = 0,  // Logical shift left
-  LSR = 1,  // Logical shift right
-  ASR = 2,  // Arithmetic shift right
-  ROR = 3,  // Rotate right
-  max_shift = 4
+enum ShiftOp {
+  LSL = 0 << 5,   // Logical shift left.
+  LSR = 1 << 5,   // Logical shift right.
+  ASR = 2 << 5,   // Arithmetic shift right.
+  ROR = 3 << 5,   // Rotate right.
+
+  // RRX is encoded as ROR with shift_imm == 0.
+  // Use a special code to make the distinction. The RRX ShiftOp is only used
+  // as an argument, and will never actually be encoded. The Assembler will
+  // detect it and emit the correct ROR shift operand with shift_imm == 0.
+  RRX = -1,
+  kNumberOfShifts = 4
 };
 
 
+// Status register fields.
+enum SRegisterField {
+  CPSR_c = CPSR | 1 << 16,
+  CPSR_x = CPSR | 1 << 17,
+  CPSR_s = CPSR | 1 << 18,
+  CPSR_f = CPSR | 1 << 19,
+  SPSR_c = SPSR | 1 << 16,
+  SPSR_x = SPSR | 1 << 17,
+  SPSR_s = SPSR | 1 << 18,
+  SPSR_f = SPSR | 1 << 19
+};
+
+// Status register field mask (or'ed SRegisterField enum values).
+typedef uint32_t SRegisterFieldMask;
+
+
+// Memory operand addressing mode.
+enum AddrMode {
+  // Bit encoding P U W.
+  Offset       = (8|4|0) << 21,  // Offset (without writeback to base).
+  PreIndex     = (8|4|1) << 21,  // Pre-indexed addressing with writeback.
+  PostIndex    = (0|4|0) << 21,  // Post-indexed addressing with writeback.
+  NegOffset    = (8|0|0) << 21,  // Negative offset (without writeback to base).
+  NegPreIndex  = (8|0|1) << 21,  // Negative pre-indexed with writeback.
+  NegPostIndex = (0|0|0) << 21   // Negative post-indexed with writeback.
+};
+
+
+// Load/store multiple addressing mode.
+enum BlockAddrMode {
+  // Bit encoding P U W .
+  da           = (0|0|0) << 21,  // Decrement after.
+  ia           = (0|4|0) << 21,  // Increment after.
+  db           = (8|0|0) << 21,  // Decrement before.
+  ib           = (8|4|0) << 21,  // Increment before.
+  da_w         = (0|0|1) << 21,  // Decrement after with writeback to base.
+  ia_w         = (0|4|1) << 21,  // Increment after with writeback to base.
+  db_w         = (8|0|1) << 21,  // Decrement before with writeback to base.
+  ib_w         = (8|4|1) << 21,  // Increment before with writeback to base.
+
+  // Alias modes for comparison when writeback does not matter.
+  da_x         = (0|0|0) << 21,  // Decrement after.
+  ia_x         = (0|4|0) << 21,  // Increment after.
+  db_x         = (8|0|0) << 21,  // Decrement before.
+  ib_x         = (8|4|0) << 21   // Increment before.
+};
+
+
+// Coprocessor load/store operand size.
+enum LFlag {
+  Long  = 1 << 22,  // Long load/store coprocessor.
+  Short = 0 << 22   // Short load/store coprocessor.
+};
+
+
+// -----------------------------------------------------------------------------
+// Supervisor Call (svc) specific support.
+
 // Special Software Interrupt codes when used in the presence of the ARM
 // simulator.
 // svc (formerly swi) provides a 24bit immediate value. Use bits 22:0 for
 // standard SoftwareInterrupCode. Bit 23 is reserved for the stop feature.
 enum SoftwareInterruptCodes {
   // transition to C code
-  call_rt_redirected = 0x10,
+  kCallRtRedirected= 0x10,
   // break point
-  break_point = 0x20,
+  kBreakpoint= 0x20,
   // stop
-  stop = 1 << 23
+  kStopCode = 1 << 23
 };
-static const int32_t kStopCodeMask = stop - 1;
-static const uint32_t kMaxStopCode = stop - 1;
+static const uint32_t kStopCodeMask = kStopCode - 1;
+static const uint32_t kMaxStopCode = kStopCode - 1;
+static const int32_t  kDefaultStopCode = -1;
 
 
 // Type of VFP register. Determines register encoding.
@@ -206,6 +378,20 @@
   kDoublePrecision = 1
 };
 
+
+// VFP FPSCR constants.
+static const uint32_t kVFPExceptionMask = 0xf;
+static const uint32_t kVFPRoundingModeMask = 3 << 22;
+static const uint32_t kVFPFlushToZeroMask = 1 << 24;
+static const uint32_t kVFPRoundToMinusInfinityBits = 2 << 22;
+static const uint32_t kVFPInvalidExceptionBit = 1;
+
+static const uint32_t kVFPNConditionFlagBit = 1 << 31;
+static const uint32_t kVFPZConditionFlagBit = 1 << 30;
+static const uint32_t kVFPCConditionFlagBit = 1 << 29;
+static const uint32_t kVFPVConditionFlagBit = 1 << 28;
+
+
 // VFP rounding modes. See ARM DDI 0406B Page A2-29.
 enum FPSCRRoundingModes {
   RN,   // Round to Nearest.
@@ -214,22 +400,91 @@
   RZ    // Round towards zero.
 };
 
-typedef int32_t instr_t;
+
+// -----------------------------------------------------------------------------
+// Hints.
+
+// Branch hints are not used on the ARM.  They are defined so that they can
+// appear in shared function signatures, but will be ignored in ARM
+// implementations.
+enum Hint { no_hint };
+
+// Hints are not used on the arm.  Negating is trivial.
+inline Hint NegateHint(Hint ignored) { return no_hint; }
 
 
-// The class Instr enables access to individual fields defined in the ARM
+// -----------------------------------------------------------------------------
+// Specific instructions, constants, and masks.
+// These constants are declared in assembler-arm.cc, as they use named registers
+// and other constants.
+
+
+// add(sp, sp, 4) instruction (aka Pop())
+extern const Instr kPopInstruction;
+
+// str(r, MemOperand(sp, 4, NegPreIndex), al) instruction (aka push(r))
+// register r is not encoded.
+extern const Instr kPushRegPattern;
+
+// ldr(r, MemOperand(sp, 4, PostIndex), al) instruction (aka pop(r))
+// register r is not encoded.
+extern const Instr kPopRegPattern;
+
+// mov lr, pc
+extern const Instr kMovLrPc;
+// ldr rd, [pc, #offset]
+extern const Instr kLdrPCMask;
+extern const Instr kLdrPCPattern;
+// blxcc rm
+extern const Instr kBlxRegMask;
+
+extern const Instr kBlxRegPattern;
+
+extern const Instr kMovMvnMask;
+extern const Instr kMovMvnPattern;
+extern const Instr kMovMvnFlip;
+extern const Instr kMovLeaveCCMask;
+extern const Instr kMovLeaveCCPattern;
+extern const Instr kMovwMask;
+extern const Instr kMovwPattern;
+extern const Instr kMovwLeaveCCFlip;
+extern const Instr kCmpCmnMask;
+extern const Instr kCmpCmnPattern;
+extern const Instr kCmpCmnFlip;
+extern const Instr kAddSubFlip;
+extern const Instr kAndBicFlip;
+
+// A mask for the Rd register for push, pop, ldr, str instructions.
+extern const Instr kLdrRegFpOffsetPattern;
+
+extern const Instr kStrRegFpOffsetPattern;
+
+extern const Instr kLdrRegFpNegOffsetPattern;
+
+extern const Instr kStrRegFpNegOffsetPattern;
+
+extern const Instr kLdrStrInstrTypeMask;
+extern const Instr kLdrStrInstrArgumentMask;
+extern const Instr kLdrStrOffsetMask;
+
+
+// -----------------------------------------------------------------------------
+// Instruction abstraction.
+
+// The class Instruction enables access to individual fields defined in the ARM
 // architecture instruction set encoding as described in figure A3-1.
+// Note that the Assembler uses typedef int32_t Instr.
 //
 // Example: Test whether the instruction at ptr does set the condition code
 // bits.
 //
 // bool InstructionSetsConditionCodes(byte* ptr) {
-//   Instr* instr = Instr::At(ptr);
-//   int type = instr->TypeField();
+//   Instruction* instr = Instruction::At(ptr);
+//   int type = instr->TypeValue();
 //   return ((type == 0) || (type == 1)) && instr->HasS();
 // }
 //
-class Instr {
+class Instruction {
  public:
   enum {
     kInstrSize = 4,
@@ -237,14 +492,24 @@
     kPCReadOffset = 8
   };
 
+  // Helper macro to define static accessors.
+  // We use the cast to char* trick to bypass the strict anti-aliasing rules.
+  #define DECLARE_STATIC_TYPED_ACCESSOR(return_type, Name)                     \
+    static inline return_type Name(Instr instr) {                              \
+      char* temp = reinterpret_cast<char*>(&instr);                            \
+      return reinterpret_cast<Instruction*>(temp)->Name();                     \
+    }
+
+  #define DECLARE_STATIC_ACCESSOR(Name) DECLARE_STATIC_TYPED_ACCESSOR(int, Name)
+
   // Get the raw instruction bits.
-  inline instr_t InstructionBits() const {
-    return *reinterpret_cast<const instr_t*>(this);
+  inline Instr InstructionBits() const {
+    return *reinterpret_cast<const Instr*>(this);
   }
 
   // Set the raw instruction bits to value.
-  inline void SetInstructionBits(instr_t value) {
-    *reinterpret_cast<instr_t*>(this) = value;
+  inline void SetInstructionBits(Instr value) {
+    *reinterpret_cast<Instr*>(this) = value;
   }
 
   // Read one particular bit out of the instruction bits.
@@ -252,93 +517,141 @@
     return (InstructionBits() >> nr) & 1;
   }
 
-  // Read a bit field out of the instruction bits.
+  // Read a bit field's value out of the instruction bits.
   inline int Bits(int hi, int lo) const {
     return (InstructionBits() >> lo) & ((2 << (hi - lo)) - 1);
   }
 
+  // Read a bit field out of the instruction bits.
+  inline int BitField(int hi, int lo) const {
+    return InstructionBits() & (((2 << (hi - lo)) - 1) << lo);
+  }
+
+  // Static support.
+
+  // Read one particular bit out of the instruction bits.
+  static inline int Bit(Instr instr, int nr) {
+    return (instr >> nr) & 1;
+  }
+
+  // Read the value of a bit field out of the instruction bits.
+  static inline int Bits(Instr instr, int hi, int lo) {
+    return (instr >> lo) & ((2 << (hi - lo)) - 1);
+  }
+
+
+  // Read a bit field out of the instruction bits.
+  static inline int BitField(Instr instr, int hi, int lo) {
+    return instr & (((2 << (hi - lo)) - 1) << lo);
+  }
+
 
   // Accessors for the different named fields used in the ARM encoding.
   // The naming of these accessor corresponds to figure A3-1.
+  //
+  // Two kind of accessors are declared:
+  // - <Name>Field() will return the raw field, ie the field's bits at their
+  //   original place in the instruction encoding.
+  //   eg. if instr is the 'addgt r0, r1, r2' instruction, encoded as 0xC0810002
+  //   ConditionField(instr) will return 0xC0000000.
+  // - <Name>Value() will return the field value, shifted back to bit 0.
+  //   eg. if instr is the 'addgt r0, r1, r2' instruction, encoded as 0xC0810002
+  //   ConditionField(instr) will return 0xC.
+
+
   // Generally applicable fields
-  inline Condition ConditionField() const {
+  inline Condition ConditionValue() const {
     return static_cast<Condition>(Bits(31, 28));
   }
-  inline int TypeField() const { return Bits(27, 25); }
+  inline Condition ConditionField() const {
+    return static_cast<Condition>(BitField(31, 28));
+  }
+  DECLARE_STATIC_TYPED_ACCESSOR(Condition, ConditionValue);
+  DECLARE_STATIC_TYPED_ACCESSOR(Condition, ConditionField);
 
-  inline int RnField() const { return Bits(19, 16); }
-  inline int RdField() const { return Bits(15, 12); }
+  inline int TypeValue() const { return Bits(27, 25); }
 
-  inline int CoprocessorField() const { return Bits(11, 8); }
+  inline int RnValue() const { return Bits(19, 16); }
+  inline int RdValue() const { return Bits(15, 12); }
+  DECLARE_STATIC_ACCESSOR(RdValue);
+
+  inline int CoprocessorValue() const { return Bits(11, 8); }
   // Support for VFP.
   // Vn(19-16) | Vd(15-12) |  Vm(3-0)
-  inline int VnField() const { return Bits(19, 16); }
-  inline int VmField() const { return Bits(3, 0); }
-  inline int VdField() const { return Bits(15, 12); }
-  inline int NField() const { return Bit(7); }
-  inline int MField() const { return Bit(5); }
-  inline int DField() const { return Bit(22); }
-  inline int RtField() const { return Bits(15, 12); }
-  inline int PField() const { return Bit(24); }
-  inline int UField() const { return Bit(23); }
-  inline int Opc1Field() const { return (Bit(23) << 2) | Bits(21, 20); }
-  inline int Opc2Field() const { return Bits(19, 16); }
-  inline int Opc3Field() const { return Bits(7, 6); }
-  inline int SzField() const { return Bit(8); }
-  inline int VLField() const { return Bit(20); }
-  inline int VCField() const { return Bit(8); }
-  inline int VAField() const { return Bits(23, 21); }
-  inline int VBField() const { return Bits(6, 5); }
-  inline int VFPNRegCode(VFPRegPrecision pre) {
-    return VFPGlueRegCode(pre, 16, 7);
+  inline int VnValue() const { return Bits(19, 16); }
+  inline int VmValue() const { return Bits(3, 0); }
+  inline int VdValue() const { return Bits(15, 12); }
+  inline int NValue() const { return Bit(7); }
+  inline int MValue() const { return Bit(5); }
+  inline int DValue() const { return Bit(22); }
+  inline int RtValue() const { return Bits(15, 12); }
+  inline int PValue() const { return Bit(24); }
+  inline int UValue() const { return Bit(23); }
+  inline int Opc1Value() const { return (Bit(23) << 2) | Bits(21, 20); }
+  inline int Opc2Value() const { return Bits(19, 16); }
+  inline int Opc3Value() const { return Bits(7, 6); }
+  inline int SzValue() const { return Bit(8); }
+  inline int VLValue() const { return Bit(20); }
+  inline int VCValue() const { return Bit(8); }
+  inline int VAValue() const { return Bits(23, 21); }
+  inline int VBValue() const { return Bits(6, 5); }
+  inline int VFPNRegValue(VFPRegPrecision pre) {
+    return VFPGlueRegValue(pre, 16, 7);
   }
-  inline int VFPMRegCode(VFPRegPrecision pre) {
-    return VFPGlueRegCode(pre, 0, 5);
+  inline int VFPMRegValue(VFPRegPrecision pre) {
+    return VFPGlueRegValue(pre, 0, 5);
   }
-  inline int VFPDRegCode(VFPRegPrecision pre) {
-    return VFPGlueRegCode(pre, 12, 22);
+  inline int VFPDRegValue(VFPRegPrecision pre) {
+    return VFPGlueRegValue(pre, 12, 22);
   }
 
   // Fields used in Data processing instructions
-  inline Opcode OpcodeField() const {
+  inline int OpcodeValue() const {
     return static_cast<Opcode>(Bits(24, 21));
   }
-  inline int SField() const { return Bit(20); }
+  inline Opcode OpcodeField() const {
+    return static_cast<Opcode>(BitField(24, 21));
+  }
+  inline int SValue() const { return Bit(20); }
     // with register
-  inline int RmField() const { return Bits(3, 0); }
-  inline Shift ShiftField() const { return static_cast<Shift>(Bits(6, 5)); }
-  inline int RegShiftField() const { return Bit(4); }
-  inline int RsField() const { return Bits(11, 8); }
-  inline int ShiftAmountField() const { return Bits(11, 7); }
+  inline int RmValue() const { return Bits(3, 0); }
+  inline int ShiftValue() const { return static_cast<ShiftOp>(Bits(6, 5)); }
+  inline ShiftOp ShiftField() const {
+    return static_cast<ShiftOp>(BitField(6, 5));
+  }
+  inline int RegShiftValue() const { return Bit(4); }
+  inline int RsValue() const { return Bits(11, 8); }
+  inline int ShiftAmountValue() const { return Bits(11, 7); }
     // with immediate
-  inline int RotateField() const { return Bits(11, 8); }
-  inline int Immed8Field() const { return Bits(7, 0); }
-  inline int Immed4Field() const { return Bits(19, 16); }
-  inline int ImmedMovwMovtField() const {
-      return Immed4Field() << 12 | Offset12Field(); }
+  inline int RotateValue() const { return Bits(11, 8); }
+  inline int Immed8Value() const { return Bits(7, 0); }
+  inline int Immed4Value() const { return Bits(19, 16); }
+  inline int ImmedMovwMovtValue() const {
+      return Immed4Value() << 12 | Offset12Value(); }
 
   // Fields used in Load/Store instructions
-  inline int PUField() const { return Bits(24, 23); }
-  inline int  BField() const { return Bit(22); }
-  inline int  WField() const { return Bit(21); }
-  inline int  LField() const { return Bit(20); }
+  inline int PUValue() const { return Bits(24, 23); }
+  inline int PUField() const { return BitField(24, 23); }
+  inline int  BValue() const { return Bit(22); }
+  inline int  WValue() const { return Bit(21); }
+  inline int  LValue() const { return Bit(20); }
     // with register uses same fields as Data processing instructions above
     // with immediate
-  inline int Offset12Field() const { return Bits(11, 0); }
+  inline int Offset12Value() const { return Bits(11, 0); }
     // multiple
-  inline int RlistField() const { return Bits(15, 0); }
+  inline int RlistValue() const { return Bits(15, 0); }
     // extra loads and stores
-  inline int SignField() const { return Bit(6); }
-  inline int HField() const { return Bit(5); }
-  inline int ImmedHField() const { return Bits(11, 8); }
-  inline int ImmedLField() const { return Bits(3, 0); }
+  inline int SignValue() const { return Bit(6); }
+  inline int HValue() const { return Bit(5); }
+  inline int ImmedHValue() const { return Bits(11, 8); }
+  inline int ImmedLValue() const { return Bits(3, 0); }
 
   // Fields used in Branch instructions
-  inline int LinkField() const { return Bit(24); }
-  inline int SImmed24Field() const { return ((InstructionBits() << 8) >> 8); }
+  inline int LinkValue() const { return Bit(24); }
+  inline int SImmed24Value() const { return ((InstructionBits() << 8) >> 8); }
 
   // Fields used in Software interrupt instructions
-  inline SoftwareInterruptCodes SvcField() const {
+  inline SoftwareInterruptCodes SvcValue() const {
     return static_cast<SoftwareInterruptCodes>(Bits(23, 0));
   }
 
@@ -354,42 +667,45 @@
 
   // Test for a stop instruction.
   inline bool IsStop() const {
-    return (TypeField() == 7) && (Bit(24) == 1) && (SvcField() >= stop);
+    return (TypeValue() == 7) && (Bit(24) == 1) && (SvcValue() >= kStopCode);
   }
 
   // Special accessors that test for existence of a value.
-  inline bool HasS()    const { return SField() == 1; }
-  inline bool HasB()    const { return BField() == 1; }
-  inline bool HasW()    const { return WField() == 1; }
-  inline bool HasL()    const { return LField() == 1; }
-  inline bool HasU()    const { return UField() == 1; }
-  inline bool HasSign() const { return SignField() == 1; }
-  inline bool HasH()    const { return HField() == 1; }
-  inline bool HasLink() const { return LinkField() == 1; }
+  inline bool HasS()    const { return SValue() == 1; }
+  inline bool HasB()    const { return BValue() == 1; }
+  inline bool HasW()    const { return WValue() == 1; }
+  inline bool HasL()    const { return LValue() == 1; }
+  inline bool HasU()    const { return UValue() == 1; }
+  inline bool HasSign() const { return SignValue() == 1; }
+  inline bool HasH()    const { return HValue() == 1; }
+  inline bool HasLink() const { return LinkValue() == 1; }
 
   // Decoding the double immediate in the vmov instruction.
   double DoubleImmedVmov() const;
 
   // Instructions are read of out a code stream. The only way to get a
   // reference to an instruction is to convert a pointer. There is no way
-  // to allocate or create instances of class Instr.
-  // Use the At(pc) function to create references to Instr.
-  static Instr* At(byte* pc) { return reinterpret_cast<Instr*>(pc); }
+  // to allocate or create instances of class Instruction.
+  // Use the At(pc) function to create references to Instruction.
+  static Instruction* At(byte* pc) {
+    return reinterpret_cast<Instruction*>(pc);
+  }
+
 
  private:
   // Join split register codes, depending on single or double precision.
   // four_bit is the position of the least-significant bit of the four
   // bit specifier. one_bit is the position of the additional single bit
   // specifier.
-  inline int VFPGlueRegCode(VFPRegPrecision pre, int four_bit, int one_bit) {
+  inline int VFPGlueRegValue(VFPRegPrecision pre, int four_bit, int one_bit) {
     if (pre == kSinglePrecision) {
       return (Bits(four_bit + 3, four_bit) << 1) | Bit(one_bit);
     }
     return (Bit(one_bit) << 4) | Bits(four_bit + 3, four_bit);
   }
 
-  // We need to prevent the creation of instances of class Instr.
-  DISALLOW_IMPLICIT_CONSTRUCTORS(Instr);
+  // We need to prevent the creation of instances of class Instruction.
+  DISALLOW_IMPLICIT_CONSTRUCTORS(Instruction);
 };
 
 
@@ -428,6 +744,6 @@
 };
 
 
-} }  // namespace assembler::arm
+} }  // namespace v8::internal
 
 #endif  // V8_ARM_CONSTANTS_ARM_H_
diff --git a/src/arm/cpu-arm.cc b/src/arm/cpu-arm.cc
index b359dce..507954d 100644
--- a/src/arm/cpu-arm.cc
+++ b/src/arm/cpu-arm.cc
@@ -56,7 +56,7 @@
   // that the Icache was flushed.
   // None of this code ends up in the snapshot so there are no issues
   // around whether or not to generate the code when building snapshots.
-  assembler::arm::Simulator::FlushICache(start, size);
+  Simulator::FlushICache(start, size);
 #else
   // Ideally, we would call
   //   syscall(__ARM_NR_cacheflush, start,
diff --git a/src/arm/disasm-arm.cc b/src/arm/disasm-arm.cc
index 297a2db..4e77ef3 100644
--- a/src/arm/disasm-arm.cc
+++ b/src/arm/disasm-arm.cc
@@ -64,10 +64,8 @@
 #include "platform.h"
 
 
-namespace assembler {
-namespace arm {
-
-namespace v8i = v8::internal;
+namespace v8 {
+namespace internal {
 
 
 //------------------------------------------------------------------------------
@@ -78,7 +76,7 @@
 class Decoder {
  public:
   Decoder(const disasm::NameConverter& converter,
-          v8::internal::Vector<char> out_buffer)
+          Vector<char> out_buffer)
     : converter_(converter),
       out_buffer_(out_buffer),
       out_buffer_pos_(0) {
@@ -100,45 +98,45 @@
   void PrintRegister(int reg);
   void PrintSRegister(int reg);
   void PrintDRegister(int reg);
-  int FormatVFPRegister(Instr* instr, const char* format);
-  void PrintMovwMovt(Instr* instr);
-  int FormatVFPinstruction(Instr* instr, const char* format);
-  void PrintCondition(Instr* instr);
-  void PrintShiftRm(Instr* instr);
-  void PrintShiftImm(Instr* instr);
-  void PrintShiftSat(Instr* instr);
-  void PrintPU(Instr* instr);
+  int FormatVFPRegister(Instruction* instr, const char* format);
+  void PrintMovwMovt(Instruction* instr);
+  int FormatVFPinstruction(Instruction* instr, const char* format);
+  void PrintCondition(Instruction* instr);
+  void PrintShiftRm(Instruction* instr);
+  void PrintShiftImm(Instruction* instr);
+  void PrintShiftSat(Instruction* instr);
+  void PrintPU(Instruction* instr);
   void PrintSoftwareInterrupt(SoftwareInterruptCodes svc);
 
   // Handle formatting of instructions and their options.
-  int FormatRegister(Instr* instr, const char* option);
-  int FormatOption(Instr* instr, const char* option);
-  void Format(Instr* instr, const char* format);
-  void Unknown(Instr* instr);
+  int FormatRegister(Instruction* instr, const char* option);
+  int FormatOption(Instruction* instr, const char* option);
+  void Format(Instruction* instr, const char* format);
+  void Unknown(Instruction* instr);
 
   // Each of these functions decodes one particular instruction type, a 3-bit
   // field in the instruction encoding.
   // Types 0 and 1 are combined as they are largely the same except for the way
   // they interpret the shifter operand.
-  void DecodeType01(Instr* instr);
-  void DecodeType2(Instr* instr);
-  void DecodeType3(Instr* instr);
-  void DecodeType4(Instr* instr);
-  void DecodeType5(Instr* instr);
-  void DecodeType6(Instr* instr);
+  void DecodeType01(Instruction* instr);
+  void DecodeType2(Instruction* instr);
+  void DecodeType3(Instruction* instr);
+  void DecodeType4(Instruction* instr);
+  void DecodeType5(Instruction* instr);
+  void DecodeType6(Instruction* instr);
   // Type 7 includes special Debugger instructions.
-  int DecodeType7(Instr* instr);
+  int DecodeType7(Instruction* instr);
   // For VFP support.
-  void DecodeTypeVFP(Instr* instr);
-  void DecodeType6CoprocessorIns(Instr* instr);
+  void DecodeTypeVFP(Instruction* instr);
+  void DecodeType6CoprocessorIns(Instruction* instr);
 
-  void DecodeVMOVBetweenCoreAndSinglePrecisionRegisters(Instr* instr);
-  void DecodeVCMP(Instr* instr);
-  void DecodeVCVTBetweenDoubleAndSingle(Instr* instr);
-  void DecodeVCVTBetweenFloatingPointAndInteger(Instr* instr);
+  void DecodeVMOVBetweenCoreAndSinglePrecisionRegisters(Instruction* instr);
+  void DecodeVCMP(Instruction* instr);
+  void DecodeVCVTBetweenDoubleAndSingle(Instruction* instr);
+  void DecodeVCVTBetweenFloatingPointAndInteger(Instruction* instr);
 
   const disasm::NameConverter& converter_;
-  v8::internal::Vector<char> out_buffer_;
+  Vector<char> out_buffer_;
   int out_buffer_pos_;
 
   DISALLOW_COPY_AND_ASSIGN(Decoder);
@@ -169,15 +167,15 @@
 
 // These condition names are defined in a way to match the native disassembler
 // formatting. See for example the command "objdump -d <binary file>".
-static const char* cond_names[max_condition] = {
+static const char* cond_names[kNumberOfConditions] = {
   "eq", "ne", "cs" , "cc" , "mi" , "pl" , "vs" , "vc" ,
   "hi", "ls", "ge", "lt", "gt", "le", "", "invalid",
 };
 
 
 // Print the condition guarding the instruction.
-void Decoder::PrintCondition(Instr* instr) {
-  Print(cond_names[instr->ConditionField()]);
+void Decoder::PrintCondition(Instruction* instr) {
+  Print(cond_names[instr->ConditionValue()]);
 }
 
 
@@ -188,36 +186,37 @@
 
 // Print the VFP S register name according to the active name converter.
 void Decoder::PrintSRegister(int reg) {
-  Print(assembler::arm::VFPRegisters::Name(reg, false));
+  Print(VFPRegisters::Name(reg, false));
 }
 
 // Print the  VFP D register name according to the active name converter.
 void Decoder::PrintDRegister(int reg) {
-  Print(assembler::arm::VFPRegisters::Name(reg, true));
+  Print(VFPRegisters::Name(reg, true));
 }
 
 
 // These shift names are defined in a way to match the native disassembler
 // formatting. See for example the command "objdump -d <binary file>".
-static const char* shift_names[max_shift] = {
+static const char* shift_names[kNumberOfShifts] = {
   "lsl", "lsr", "asr", "ror"
 };
 
 
 // Print the register shift operands for the instruction. Generally used for
 // data processing instructions.
-void Decoder::PrintShiftRm(Instr* instr) {
-  Shift shift = instr->ShiftField();
-  int shift_amount = instr->ShiftAmountField();
-  int rm = instr->RmField();
+void Decoder::PrintShiftRm(Instruction* instr) {
+  ShiftOp shift = instr->ShiftField();
+  int shift_index = instr->ShiftValue();
+  int shift_amount = instr->ShiftAmountValue();
+  int rm = instr->RmValue();
 
   PrintRegister(rm);
 
-  if ((instr->RegShiftField() == 0) && (shift == LSL) && (shift_amount == 0)) {
+  if ((instr->RegShiftValue() == 0) && (shift == LSL) && (shift_amount == 0)) {
     // Special case for using rm only.
     return;
   }
-  if (instr->RegShiftField() == 0) {
+  if (instr->RegShiftValue() == 0) {
     // by immediate
     if ((shift == ROR) && (shift_amount == 0)) {
       Print(", RRX");
@@ -225,14 +224,15 @@
     } else if (((shift == LSR) || (shift == ASR)) && (shift_amount == 0)) {
       shift_amount = 32;
     }
-    out_buffer_pos_ += v8i::OS::SNPrintF(out_buffer_ + out_buffer_pos_,
-                                         ", %s #%d",
-                                         shift_names[shift], shift_amount);
+    out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_,
+                                    ", %s #%d",
+                                    shift_names[shift_index],
+                                    shift_amount);
   } else {
     // by register
-    int rs = instr->RsField();
-    out_buffer_pos_ += v8i::OS::SNPrintF(out_buffer_ + out_buffer_pos_,
-                                         ", %s ", shift_names[shift]);
+    int rs = instr->RsValue();
+    out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_,
+                                    ", %s ", shift_names[shift_index]);
     PrintRegister(rs);
   }
 }
@@ -240,43 +240,43 @@
 
 // Print the immediate operand for the instruction. Generally used for data
 // processing instructions.
-void Decoder::PrintShiftImm(Instr* instr) {
-  int rotate = instr->RotateField() * 2;
-  int immed8 = instr->Immed8Field();
+void Decoder::PrintShiftImm(Instruction* instr) {
+  int rotate = instr->RotateValue() * 2;
+  int immed8 = instr->Immed8Value();
   int imm = (immed8 >> rotate) | (immed8 << (32 - rotate));
-  out_buffer_pos_ += v8i::OS::SNPrintF(out_buffer_ + out_buffer_pos_,
-                                       "#%d", imm);
+  out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_,
+                                  "#%d", imm);
 }
 
 
 // Print the optional shift and immediate used by saturating instructions.
-void Decoder::PrintShiftSat(Instr* instr) {
+void Decoder::PrintShiftSat(Instruction* instr) {
   int shift = instr->Bits(11, 7);
   if (shift > 0) {
-    out_buffer_pos_ += v8i::OS::SNPrintF(out_buffer_ + out_buffer_pos_,
-                                         ", %s #%d",
-                                         shift_names[instr->Bit(6) * 2],
-                                         instr->Bits(11, 7));
+    out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_,
+                                    ", %s #%d",
+                                    shift_names[instr->Bit(6) * 2],
+                                    instr->Bits(11, 7));
   }
 }
 
 
 // Print PU formatting to reduce complexity of FormatOption.
-void Decoder::PrintPU(Instr* instr) {
+void Decoder::PrintPU(Instruction* instr) {
   switch (instr->PUField()) {
-    case 0: {
+    case da_x: {
       Print("da");
       break;
     }
-    case 1: {
+    case ia_x: {
       Print("ia");
       break;
     }
-    case 2: {
+    case db_x: {
       Print("db");
       break;
     }
-    case 3: {
+    case ib_x: {
       Print("ib");
       break;
     }
@@ -292,22 +292,22 @@
 // the FormatOption method.
 void Decoder::PrintSoftwareInterrupt(SoftwareInterruptCodes svc) {
   switch (svc) {
-    case call_rt_redirected:
-      Print("call_rt_redirected");
+    case kCallRtRedirected:
+      Print("call rt redirected");
       return;
-    case break_point:
-      Print("break_point");
+    case kBreakpoint:
+      Print("breakpoint");
       return;
     default:
-      if (svc >= stop) {
-        out_buffer_pos_ += v8i::OS::SNPrintF(out_buffer_ + out_buffer_pos_,
-                                             "%d - 0x%x",
-                                             svc & kStopCodeMask,
-                                             svc & kStopCodeMask);
+      if (svc >= kStopCode) {
+        out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_,
+                                        "%d - 0x%x",
+                                        svc & kStopCodeMask,
+                                        svc & kStopCodeMask);
       } else {
-        out_buffer_pos_ += v8i::OS::SNPrintF(out_buffer_ + out_buffer_pos_,
-                                             "%d",
-                                             svc);
+        out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_,
+                                        "%d",
+                                        svc);
       }
       return;
   }
@@ -316,32 +316,32 @@
 
 // Handle all register based formatting in this function to reduce the
 // complexity of FormatOption.
-int Decoder::FormatRegister(Instr* instr, const char* format) {
+int Decoder::FormatRegister(Instruction* instr, const char* format) {
   ASSERT(format[0] == 'r');
   if (format[1] == 'n') {  // 'rn: Rn register
-    int reg = instr->RnField();
+    int reg = instr->RnValue();
     PrintRegister(reg);
     return 2;
   } else if (format[1] == 'd') {  // 'rd: Rd register
-    int reg = instr->RdField();
+    int reg = instr->RdValue();
     PrintRegister(reg);
     return 2;
   } else if (format[1] == 's') {  // 'rs: Rs register
-    int reg = instr->RsField();
+    int reg = instr->RsValue();
     PrintRegister(reg);
     return 2;
   } else if (format[1] == 'm') {  // 'rm: Rm register
-    int reg = instr->RmField();
+    int reg = instr->RmValue();
     PrintRegister(reg);
     return 2;
   } else if (format[1] == 't') {  // 'rt: Rt register
-    int reg = instr->RtField();
+    int reg = instr->RtValue();
     PrintRegister(reg);
     return 2;
   } else if (format[1] == 'l') {
     // 'rlist: register list for load and store multiple instructions
     ASSERT(STRING_STARTS_WITH(format, "rlist"));
-    int rlist = instr->RlistField();
+    int rlist = instr->RlistValue();
     int reg = 0;
     Print("{");
     // Print register list in ascending order, by scanning the bit mask.
@@ -365,22 +365,22 @@
 
 // Handle all VFP register based formatting in this function to reduce the
 // complexity of FormatOption.
-int Decoder::FormatVFPRegister(Instr* instr, const char* format) {
+int Decoder::FormatVFPRegister(Instruction* instr, const char* format) {
   ASSERT((format[0] == 'S') || (format[0] == 'D'));
 
   if (format[1] == 'n') {
-    int reg = instr->VnField();
-    if (format[0] == 'S') PrintSRegister(((reg << 1) | instr->NField()));
+    int reg = instr->VnValue();
+    if (format[0] == 'S') PrintSRegister(((reg << 1) | instr->NValue()));
     if (format[0] == 'D') PrintDRegister(reg);
     return 2;
   } else if (format[1] == 'm') {
-    int reg = instr->VmField();
-    if (format[0] == 'S') PrintSRegister(((reg << 1) | instr->MField()));
+    int reg = instr->VmValue();
+    if (format[0] == 'S') PrintSRegister(((reg << 1) | instr->MValue()));
     if (format[0] == 'D') PrintDRegister(reg);
     return 2;
   } else if (format[1] == 'd') {
-    int reg = instr->VdField();
-    if (format[0] == 'S') PrintSRegister(((reg << 1) | instr->DField()));
+    int reg = instr->VdValue();
+    if (format[0] == 'S') PrintSRegister(((reg << 1) | instr->DValue()));
     if (format[0] == 'D') PrintDRegister(reg);
     return 2;
   }
@@ -390,19 +390,19 @@
 }
 
 
-int Decoder::FormatVFPinstruction(Instr* instr, const char* format) {
+int Decoder::FormatVFPinstruction(Instruction* instr, const char* format) {
     Print(format);
     return 0;
 }
 
 
 // Print the movw or movt instruction.
-void Decoder::PrintMovwMovt(Instr* instr) {
-  int imm = instr->ImmedMovwMovtField();
-  int rd = instr->RdField();
+void Decoder::PrintMovwMovt(Instruction* instr) {
+  int imm = instr->ImmedMovwMovtValue();
+  int rd = instr->RdValue();
   PrintRegister(rd);
-  out_buffer_pos_ += v8i::OS::SNPrintF(out_buffer_ + out_buffer_pos_,
-                                       ", #%d", imm);
+  out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_,
+                                  ", #%d", imm);
 }
 
 
@@ -411,7 +411,7 @@
 // character of the option string (the option escape has already been
 // consumed by the caller.)  FormatOption returns the number of
 // characters that were consumed from the formatting string.
-int Decoder::FormatOption(Instr* instr, const char* format) {
+int Decoder::FormatOption(Instruction* instr, const char* format) {
   switch (format[0]) {
     case 'a': {  // 'a: accumulate multiplies
       if (instr->Bit(21) == 0) {
@@ -434,8 +434,8 @@
     }
     case 'd': {  // 'd: vmov double immediate.
       double d = instr->DoubleImmedVmov();
-      out_buffer_pos_ += v8i::OS::SNPrintF(out_buffer_ + out_buffer_pos_,
-                                           "#%g", d);
+      out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_,
+                                      "#%g", d);
       return 1;
     }
     case 'f': {  // 'f: bitfield instructions - v7 and above.
@@ -448,8 +448,8 @@
         ASSERT(width > 0);
       }
       ASSERT((width + lsbit) <= 32);
-      out_buffer_pos_ += v8i::OS::SNPrintF(out_buffer_ + out_buffer_pos_,
-                                           "#%d, #%d", lsbit, width);
+      out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_,
+                                      "#%d, #%d", lsbit, width);
       return 1;
     }
     case 'h': {  // 'h: halfword operation for extra loads and stores
@@ -469,9 +469,9 @@
       ASSERT((lsb >= 0) && (lsb <= 31));
       ASSERT((width + lsb) <= 32);
 
-      out_buffer_pos_ += v8i::OS::SNPrintF(out_buffer_ + out_buffer_pos_,
-                                           "%d",
-                                           instr->Bits(width + lsb - 1, lsb));
+      out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_,
+                                      "%d",
+                                      instr->Bits(width + lsb - 1, lsb));
       return 8;
     }
     case 'l': {  // 'l: branch and link
@@ -505,31 +505,31 @@
       ASSERT(STRING_STARTS_WITH(format, "msg"));
       byte* str =
           reinterpret_cast<byte*>(instr->InstructionBits() & 0x0fffffff);
-      out_buffer_pos_ += v8i::OS::SNPrintF(out_buffer_ + out_buffer_pos_,
-                                           "%s", converter_.NameInCode(str));
+      out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_,
+                                      "%s", converter_.NameInCode(str));
       return 3;
     }
     case 'o': {
       if ((format[3] == '1') && (format[4] == '2')) {
         // 'off12: 12-bit offset for load and store instructions
         ASSERT(STRING_STARTS_WITH(format, "off12"));
-        out_buffer_pos_ += v8i::OS::SNPrintF(out_buffer_ + out_buffer_pos_,
-                                             "%d", instr->Offset12Field());
+        out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_,
+                                        "%d", instr->Offset12Value());
         return 5;
       } else if (format[3] == '0') {
         // 'off0to3and8to19 16-bit immediate encoded in bits 19-8 and 3-0.
         ASSERT(STRING_STARTS_WITH(format, "off0to3and8to19"));
-        out_buffer_pos_ += v8i::OS::SNPrintF(out_buffer_ + out_buffer_pos_,
-                                            "%d",
-                                            (instr->Bits(19, 8) << 4) +
-                                                instr->Bits(3, 0));
+        out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_,
+                                        "%d",
+                                        (instr->Bits(19, 8) << 4) +
+                                        instr->Bits(3, 0));
         return 15;
       }
       // 'off8: 8-bit offset for extra load and store instructions
       ASSERT(STRING_STARTS_WITH(format, "off8"));
-      int offs8 = (instr->ImmedHField() << 4) | instr->ImmedLField();
-      out_buffer_pos_ += v8i::OS::SNPrintF(out_buffer_ + out_buffer_pos_,
-                                           "%d", offs8);
+      int offs8 = (instr->ImmedHValue() << 4) | instr->ImmedLValue();
+      out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_,
+                                      "%d", offs8);
       return 4;
     }
     case 'p': {  // 'pu: P and U bits for load and store instructions
@@ -544,10 +544,10 @@
       if (format[1] == 'h') {  // 'shift_op or 'shift_rm or 'shift_sat.
         if (format[6] == 'o') {  // 'shift_op
           ASSERT(STRING_STARTS_WITH(format, "shift_op"));
-          if (instr->TypeField() == 0) {
+          if (instr->TypeValue() == 0) {
             PrintShiftRm(instr);
           } else {
-            ASSERT(instr->TypeField() == 1);
+            ASSERT(instr->TypeValue() == 1);
             PrintShiftImm(instr);
           }
           return 8;
@@ -562,7 +562,7 @@
         }
       } else if (format[1] == 'v') {  // 'svc
         ASSERT(STRING_STARTS_WITH(format, "svc"));
-        PrintSoftwareInterrupt(instr->SvcField());
+        PrintSoftwareInterrupt(instr->SvcValue());
         return 3;
       } else if (format[1] == 'i') {  // 'sign: signed extra loads and stores
         ASSERT(STRING_STARTS_WITH(format, "sign"));
@@ -579,12 +579,12 @@
     }
     case 't': {  // 'target: target of branch instructions
       ASSERT(STRING_STARTS_WITH(format, "target"));
-      int off = (instr->SImmed24Field() << 2) + 8;
-      out_buffer_pos_ += v8i::OS::SNPrintF(
-          out_buffer_ + out_buffer_pos_,
-          "%+d -> %s",
-          off,
-          converter_.NameOfAddress(reinterpret_cast<byte*>(instr) + off));
+      int off = (instr->SImmed24Value() << 2) + 8;
+      out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_,
+                                      "%+d -> %s",
+                                      off,
+                                      converter_.NameOfAddress(
+                                        reinterpret_cast<byte*>(instr) + off));
       return 6;
     }
     case 'u': {  // 'u: signed or unsigned multiplies
@@ -633,7 +633,7 @@
 // Format takes a formatting string for a whole instruction and prints it into
 // the output buffer. All escaped options are handed to FormatOption to be
 // parsed further.
-void Decoder::Format(Instr* instr, const char* format) {
+void Decoder::Format(Instruction* instr, const char* format) {
   char cur = *format++;
   while ((cur != 0) && (out_buffer_pos_ < (out_buffer_.length() - 1))) {
     if (cur == '\'') {  // Single quote is used as the formatting escape.
@@ -649,13 +649,13 @@
 
 // For currently unimplemented decodings the disassembler calls Unknown(instr)
 // which will just print "unknown" of the instruction bits.
-void Decoder::Unknown(Instr* instr) {
+void Decoder::Unknown(Instruction* instr) {
   Format(instr, "unknown");
 }
 
 
-void Decoder::DecodeType01(Instr* instr) {
-  int type = instr->TypeField();
+void Decoder::DecodeType01(Instruction* instr) {
+  int type = instr->TypeValue();
   if ((type == 0) && instr->IsSpecialType0()) {
     // multiply instruction or extra loads and stores
     if (instr->Bits(7, 4) == 9) {
@@ -689,7 +689,7 @@
     } else if ((instr->Bit(20) == 0) && ((instr->Bits(7, 4) & 0xd) == 0xd)) {
       // ldrd, strd
       switch (instr->PUField()) {
-        case 0: {
+        case da_x: {
           if (instr->Bit(22) == 0) {
             Format(instr, "'memop'cond's 'rd, ['rn], -'rm");
           } else {
@@ -697,7 +697,7 @@
           }
           break;
         }
-        case 1: {
+        case ia_x: {
           if (instr->Bit(22) == 0) {
             Format(instr, "'memop'cond's 'rd, ['rn], +'rm");
           } else {
@@ -705,7 +705,7 @@
           }
           break;
         }
-        case 2: {
+        case db_x: {
           if (instr->Bit(22) == 0) {
             Format(instr, "'memop'cond's 'rd, ['rn, -'rm]'w");
           } else {
@@ -713,7 +713,7 @@
           }
           break;
         }
-        case 3: {
+        case ib_x: {
           if (instr->Bit(22) == 0) {
             Format(instr, "'memop'cond's 'rd, ['rn, +'rm]'w");
           } else {
@@ -730,7 +730,7 @@
     } else {
       // extra load/store instructions
       switch (instr->PUField()) {
-        case 0: {
+        case da_x: {
           if (instr->Bit(22) == 0) {
             Format(instr, "'memop'cond'sign'h 'rd, ['rn], -'rm");
           } else {
@@ -738,7 +738,7 @@
           }
           break;
         }
-        case 1: {
+        case ia_x: {
           if (instr->Bit(22) == 0) {
             Format(instr, "'memop'cond'sign'h 'rd, ['rn], +'rm");
           } else {
@@ -746,7 +746,7 @@
           }
           break;
         }
-        case 2: {
+        case db_x: {
           if (instr->Bit(22) == 0) {
             Format(instr, "'memop'cond'sign'h 'rd, ['rn, -'rm]'w");
           } else {
@@ -754,7 +754,7 @@
           }
           break;
         }
-        case 3: {
+        case ib_x: {
           if (instr->Bit(22) == 0) {
             Format(instr, "'memop'cond'sign'h 'rd, ['rn, +'rm]'w");
           } else {
@@ -772,7 +772,7 @@
     }
   } else if ((type == 0) && instr->IsMiscType0()) {
     if (instr->Bits(22, 21) == 1) {
-      switch (instr->Bits(7, 4)) {
+      switch (instr->BitField(7, 4)) {
         case BX:
           Format(instr, "bx'cond 'rm");
           break;
@@ -787,7 +787,7 @@
           break;
       }
     } else if (instr->Bits(22, 21) == 3) {
-      switch (instr->Bits(7, 4)) {
+      switch (instr->BitField(7, 4)) {
         case CLZ:
           Format(instr, "clz'cond 'rd, 'rm");
           break;
@@ -894,27 +894,27 @@
 }
 
 
-void Decoder::DecodeType2(Instr* instr) {
+void Decoder::DecodeType2(Instruction* instr) {
   switch (instr->PUField()) {
-    case 0: {
+    case da_x: {
       if (instr->HasW()) {
         Unknown(instr);  // not used in V8
       }
       Format(instr, "'memop'cond'b 'rd, ['rn], #-'off12");
       break;
     }
-    case 1: {
+    case ia_x: {
       if (instr->HasW()) {
         Unknown(instr);  // not used in V8
       }
       Format(instr, "'memop'cond'b 'rd, ['rn], #+'off12");
       break;
     }
-    case 2: {
+    case db_x: {
       Format(instr, "'memop'cond'b 'rd, ['rn, #-'off12]'w");
       break;
     }
-    case 3: {
+    case ib_x: {
       Format(instr, "'memop'cond'b 'rd, ['rn, #+'off12]'w");
       break;
     }
@@ -927,14 +927,14 @@
 }
 
 
-void Decoder::DecodeType3(Instr* instr) {
+void Decoder::DecodeType3(Instruction* instr) {
   switch (instr->PUField()) {
-    case 0: {
+    case da_x: {
       ASSERT(!instr->HasW());
       Format(instr, "'memop'cond'b 'rd, ['rn], -'shift_rm");
       break;
     }
-    case 1: {
+    case ia_x: {
       if (instr->HasW()) {
         ASSERT(instr->Bits(5, 4) == 0x1);
         if (instr->Bit(22) == 0x1) {
@@ -947,11 +947,11 @@
       }
       break;
     }
-    case 2: {
+    case db_x: {
       Format(instr, "'memop'cond'b 'rd, ['rn, -'shift_rm]'w");
       break;
     }
-    case 3: {
+    case ib_x: {
       if (instr->HasW() && (instr->Bits(6, 4) == 0x5)) {
         uint32_t widthminus1 = static_cast<uint32_t>(instr->Bits(20, 16));
         uint32_t lsbit = static_cast<uint32_t>(instr->Bits(11, 7));
@@ -969,7 +969,7 @@
         uint32_t lsbit = static_cast<uint32_t>(instr->Bits(11, 7));
         uint32_t msbit = static_cast<uint32_t>(instr->Bits(20, 16));
         if (msbit >= lsbit) {
-          if (instr->RmField() == 15) {
+          if (instr->RmValue() == 15) {
             Format(instr, "bfc'cond 'rd, 'f");
           } else {
             Format(instr, "bfi'cond 'rd, 'rm, 'f");
@@ -991,7 +991,7 @@
 }
 
 
-void Decoder::DecodeType4(Instr* instr) {
+void Decoder::DecodeType4(Instruction* instr) {
   ASSERT(instr->Bit(22) == 0);  // Privileged mode currently not supported.
   if (instr->HasL()) {
     Format(instr, "ldm'cond'pu 'rn'w, 'rlist");
@@ -1001,41 +1001,43 @@
 }
 
 
-void Decoder::DecodeType5(Instr* instr) {
+void Decoder::DecodeType5(Instruction* instr) {
   Format(instr, "b'l'cond 'target");
 }
 
 
-void Decoder::DecodeType6(Instr* instr) {
+void Decoder::DecodeType6(Instruction* instr) {
   DecodeType6CoprocessorIns(instr);
 }
 
 
-int Decoder::DecodeType7(Instr* instr) {
+int Decoder::DecodeType7(Instruction* instr) {
   if (instr->Bit(24) == 1) {
-    if (instr->SvcField() >= stop) {
+    if (instr->SvcValue() >= kStopCode) {
       Format(instr, "stop'cond 'svc");
       // Also print the stop message. Its address is encoded
       // in the following 4 bytes.
-      out_buffer_pos_ +=
-        v8i::OS::SNPrintF(out_buffer_ + out_buffer_pos_,
-                          "\n  %p  %08x       stop message: %s",
-                          reinterpret_cast<int32_t*>(instr + Instr::kInstrSize),
-                          *reinterpret_cast<char**>(instr + Instr::kInstrSize),
-                          *reinterpret_cast<char**>(instr + Instr::kInstrSize));
-      // We have decoded 2 * Instr::kInstrSize bytes.
-      return 2 * Instr::kInstrSize;
+      out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_,
+                                      "\n  %p  %08x       stop message: %s",
+                                      reinterpret_cast<int32_t*>(instr
+                                                     + Instruction::kInstrSize),
+                                      *reinterpret_cast<char**>(instr
+                                                    + Instruction::kInstrSize),
+                                      *reinterpret_cast<char**>(instr
+                                                    + Instruction::kInstrSize));
+      // We have decoded 2 * Instruction::kInstrSize bytes.
+      return 2 * Instruction::kInstrSize;
     } else {
       Format(instr, "svc'cond 'svc");
     }
   } else {
     DecodeTypeVFP(instr);
   }
-  return Instr::kInstrSize;
+  return Instruction::kInstrSize;
 }
 
 
-// void Decoder::DecodeTypeVFP(Instr* instr)
+// void Decoder::DecodeTypeVFP(Instruction* instr)
 // vmov: Sn = Rt
 // vmov: Rt = Sn
 // vcvt: Dd = Sm
@@ -1048,34 +1050,34 @@
 // vmrs
 // vmsr
 // Dd = vsqrt(Dm)
-void Decoder::DecodeTypeVFP(Instr* instr) {
-  ASSERT((instr->TypeField() == 7) && (instr->Bit(24) == 0x0) );
+void Decoder::DecodeTypeVFP(Instruction* instr) {
+  ASSERT((instr->TypeValue() == 7) && (instr->Bit(24) == 0x0) );
   ASSERT(instr->Bits(11, 9) == 0x5);
 
   if (instr->Bit(4) == 0) {
-    if (instr->Opc1Field() == 0x7) {
+    if (instr->Opc1Value() == 0x7) {
       // Other data processing instructions
-      if ((instr->Opc2Field() == 0x0) && (instr->Opc3Field() == 0x1)) {
+      if ((instr->Opc2Value() == 0x0) && (instr->Opc3Value() == 0x1)) {
         // vmov register to register.
-        if (instr->SzField() == 0x1) {
+        if (instr->SzValue() == 0x1) {
           Format(instr, "vmov.f64'cond 'Dd, 'Dm");
         } else {
           Format(instr, "vmov.f32'cond 'Sd, 'Sm");
         }
-      } else if ((instr->Opc2Field() == 0x7) && (instr->Opc3Field() == 0x3)) {
+      } else if ((instr->Opc2Value() == 0x7) && (instr->Opc3Value() == 0x3)) {
         DecodeVCVTBetweenDoubleAndSingle(instr);
-      } else if ((instr->Opc2Field() == 0x8) && (instr->Opc3Field() & 0x1)) {
+      } else if ((instr->Opc2Value() == 0x8) && (instr->Opc3Value() & 0x1)) {
         DecodeVCVTBetweenFloatingPointAndInteger(instr);
-      } else if (((instr->Opc2Field() >> 1) == 0x6) &&
-                 (instr->Opc3Field() & 0x1)) {
+      } else if (((instr->Opc2Value() >> 1) == 0x6) &&
+                 (instr->Opc3Value() & 0x1)) {
         DecodeVCVTBetweenFloatingPointAndInteger(instr);
-      } else if (((instr->Opc2Field() == 0x4) || (instr->Opc2Field() == 0x5)) &&
-                 (instr->Opc3Field() & 0x1)) {
+      } else if (((instr->Opc2Value() == 0x4) || (instr->Opc2Value() == 0x5)) &&
+                 (instr->Opc3Value() & 0x1)) {
         DecodeVCMP(instr);
-      } else if (((instr->Opc2Field() == 0x1)) && (instr->Opc3Field() == 0x3)) {
+      } else if (((instr->Opc2Value() == 0x1)) && (instr->Opc3Value() == 0x3)) {
         Format(instr, "vsqrt.f64'cond 'Dd, 'Dm");
-      } else if (instr->Opc3Field() == 0x0) {
-        if (instr->SzField() == 0x1) {
+      } else if (instr->Opc3Value() == 0x0) {
+        if (instr->SzValue() == 0x1) {
           Format(instr, "vmov.f64'cond 'Dd, 'd");
         } else {
           Unknown(instr);  // Not used by V8.
@@ -1083,9 +1085,9 @@
       } else {
         Unknown(instr);  // Not used by V8.
       }
-    } else if (instr->Opc1Field() == 0x3) {
-      if (instr->SzField() == 0x1) {
-        if (instr->Opc3Field() & 0x1) {
+    } else if (instr->Opc1Value() == 0x3) {
+      if (instr->SzValue() == 0x1) {
+        if (instr->Opc3Value() & 0x1) {
           Format(instr, "vsub.f64'cond 'Dd, 'Dn, 'Dm");
         } else {
           Format(instr, "vadd.f64'cond 'Dd, 'Dn, 'Dm");
@@ -1093,14 +1095,14 @@
       } else {
         Unknown(instr);  // Not used by V8.
       }
-    } else if ((instr->Opc1Field() == 0x2) && !(instr->Opc3Field() & 0x1)) {
-      if (instr->SzField() == 0x1) {
+    } else if ((instr->Opc1Value() == 0x2) && !(instr->Opc3Value() & 0x1)) {
+      if (instr->SzValue() == 0x1) {
         Format(instr, "vmul.f64'cond 'Dd, 'Dn, 'Dm");
       } else {
         Unknown(instr);  // Not used by V8.
       }
-    } else if ((instr->Opc1Field() == 0x4) && !(instr->Opc3Field() & 0x1)) {
-      if (instr->SzField() == 0x1) {
+    } else if ((instr->Opc1Value() == 0x4) && !(instr->Opc3Value() & 0x1)) {
+      if (instr->SzValue() == 0x1) {
         Format(instr, "vdiv.f64'cond 'Dd, 'Dn, 'Dm");
       } else {
         Unknown(instr);  // Not used by V8.
@@ -1109,13 +1111,13 @@
       Unknown(instr);  // Not used by V8.
     }
   } else {
-    if ((instr->VCField() == 0x0) &&
-        (instr->VAField() == 0x0)) {
+    if ((instr->VCValue() == 0x0) &&
+        (instr->VAValue() == 0x0)) {
       DecodeVMOVBetweenCoreAndSinglePrecisionRegisters(instr);
-    } else if ((instr->VCField() == 0x0) &&
-               (instr->VAField() == 0x7) &&
+    } else if ((instr->VCValue() == 0x0) &&
+               (instr->VAValue() == 0x7) &&
                (instr->Bits(19, 16) == 0x1)) {
-      if (instr->VLField() == 0) {
+      if (instr->VLValue() == 0) {
         if (instr->Bits(15, 12) == 0xF) {
           Format(instr, "vmsr'cond FPSCR, APSR");
         } else {
@@ -1133,11 +1135,12 @@
 }
 
 
-void Decoder::DecodeVMOVBetweenCoreAndSinglePrecisionRegisters(Instr* instr) {
-  ASSERT((instr->Bit(4) == 1) && (instr->VCField() == 0x0) &&
-         (instr->VAField() == 0x0));
+void Decoder::DecodeVMOVBetweenCoreAndSinglePrecisionRegisters(
+    Instruction* instr) {
+  ASSERT((instr->Bit(4) == 1) && (instr->VCValue() == 0x0) &&
+         (instr->VAValue() == 0x0));
 
-  bool to_arm_register = (instr->VLField() == 0x1);
+  bool to_arm_register = (instr->VLValue() == 0x1);
 
   if (to_arm_register) {
     Format(instr, "vmov'cond 'rt, 'Sn");
@@ -1147,19 +1150,19 @@
 }
 
 
-void Decoder::DecodeVCMP(Instr* instr) {
-  ASSERT((instr->Bit(4) == 0) && (instr->Opc1Field() == 0x7));
-  ASSERT(((instr->Opc2Field() == 0x4) || (instr->Opc2Field() == 0x5)) &&
-         (instr->Opc3Field() & 0x1));
+void Decoder::DecodeVCMP(Instruction* instr) {
+  ASSERT((instr->Bit(4) == 0) && (instr->Opc1Value() == 0x7));
+  ASSERT(((instr->Opc2Value() == 0x4) || (instr->Opc2Value() == 0x5)) &&
+         (instr->Opc3Value() & 0x1));
 
   // Comparison.
-  bool dp_operation = (instr->SzField() == 1);
+  bool dp_operation = (instr->SzValue() == 1);
   bool raise_exception_for_qnan = (instr->Bit(7) == 0x1);
 
   if (dp_operation && !raise_exception_for_qnan) {
-    if (instr->Opc2Field() == 0x4) {
+    if (instr->Opc2Value() == 0x4) {
       Format(instr, "vcmp.f64'cond 'Dd, 'Dm");
-    } else if (instr->Opc2Field() == 0x5) {
+    } else if (instr->Opc2Value() == 0x5) {
       Format(instr, "vcmp.f64'cond 'Dd, #0.0");
     } else {
       Unknown(instr);  // invalid
@@ -1170,11 +1173,11 @@
 }
 
 
-void Decoder::DecodeVCVTBetweenDoubleAndSingle(Instr* instr) {
-  ASSERT((instr->Bit(4) == 0) && (instr->Opc1Field() == 0x7));
-  ASSERT((instr->Opc2Field() == 0x7) && (instr->Opc3Field() == 0x3));
+void Decoder::DecodeVCVTBetweenDoubleAndSingle(Instruction* instr) {
+  ASSERT((instr->Bit(4) == 0) && (instr->Opc1Value() == 0x7));
+  ASSERT((instr->Opc2Value() == 0x7) && (instr->Opc3Value() == 0x3));
 
-  bool double_to_single = (instr->SzField() == 1);
+  bool double_to_single = (instr->SzValue() == 1);
 
   if (double_to_single) {
     Format(instr, "vcvt.f32.f64'cond 'Sd, 'Dm");
@@ -1184,13 +1187,13 @@
 }
 
 
-void Decoder::DecodeVCVTBetweenFloatingPointAndInteger(Instr* instr) {
-  ASSERT((instr->Bit(4) == 0) && (instr->Opc1Field() == 0x7));
-  ASSERT(((instr->Opc2Field() == 0x8) && (instr->Opc3Field() & 0x1)) ||
-         (((instr->Opc2Field() >> 1) == 0x6) && (instr->Opc3Field() & 0x1)));
+void Decoder::DecodeVCVTBetweenFloatingPointAndInteger(Instruction* instr) {
+  ASSERT((instr->Bit(4) == 0) && (instr->Opc1Value() == 0x7));
+  ASSERT(((instr->Opc2Value() == 0x8) && (instr->Opc3Value() & 0x1)) ||
+         (((instr->Opc2Value() >> 1) == 0x6) && (instr->Opc3Value() & 0x1)));
 
   bool to_integer = (instr->Bit(18) == 1);
-  bool dp_operation = (instr->SzField() == 1);
+  bool dp_operation = (instr->SzValue() == 1);
   if (to_integer) {
     bool unsigned_integer = (instr->Bit(16) == 0);
 
@@ -1232,11 +1235,11 @@
 // <Rt, Rt2> = vmov(Dm)
 // Ddst = MEM(Rbase + 4*offset).
 // MEM(Rbase + 4*offset) = Dsrc.
-void Decoder::DecodeType6CoprocessorIns(Instr* instr) {
-  ASSERT((instr->TypeField() == 6));
+void Decoder::DecodeType6CoprocessorIns(Instruction* instr) {
+  ASSERT(instr->TypeValue() == 6);
 
-  if (instr->CoprocessorField() == 0xA) {
-    switch (instr->OpcodeField()) {
+  if (instr->CoprocessorValue() == 0xA) {
+    switch (instr->OpcodeValue()) {
       case 0x8:
       case 0xA:
         if (instr->HasL()) {
@@ -1257,8 +1260,8 @@
         Unknown(instr);  // Not used by V8.
         break;
     }
-  } else if (instr->CoprocessorField() == 0xB) {
-    switch (instr->OpcodeField()) {
+  } else if (instr->CoprocessorValue() == 0xB) {
+    switch (instr->OpcodeValue()) {
       case 0x2:
         // Load and store double to two GP registers
         if (instr->Bits(7, 4) != 0x1) {
@@ -1295,16 +1298,16 @@
 
 // Disassemble the instruction at *instr_ptr into the output buffer.
 int Decoder::InstructionDecode(byte* instr_ptr) {
-  Instr* instr = Instr::At(instr_ptr);
+  Instruction* instr = Instruction::At(instr_ptr);
   // Print raw instruction bytes.
-  out_buffer_pos_ += v8i::OS::SNPrintF(out_buffer_ + out_buffer_pos_,
-                                       "%08x       ",
-                                       instr->InstructionBits());
-  if (instr->ConditionField() == special_condition) {
+  out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_,
+                                  "%08x       ",
+                                  instr->InstructionBits());
+  if (instr->ConditionField() == kSpecialCondition) {
     UNIMPLEMENTED();
-    return Instr::kInstrSize;
+    return Instruction::kInstrSize;
   }
-  switch (instr->TypeField()) {
+  switch (instr->TypeValue()) {
     case 0:
     case 1: {
       DecodeType01(instr);
@@ -1339,11 +1342,11 @@
       break;
     }
   }
-  return Instr::kInstrSize;
+  return Instruction::kInstrSize;
 }
 
 
-} }  // namespace assembler::arm
+} }  // namespace v8::internal
 
 
 
@@ -1351,8 +1354,6 @@
 
 namespace disasm {
 
-namespace v8i = v8::internal;
-
 
 const char* NameConverter::NameOfAddress(byte* addr) const {
   static v8::internal::EmbeddedVector<char, 32> tmp_buffer;
@@ -1367,7 +1368,7 @@
 
 
 const char* NameConverter::NameOfCPURegister(int reg) const {
-  return assembler::arm::Registers::Name(reg);
+  return v8::internal::Registers::Name(reg);
 }
 
 
@@ -1401,7 +1402,7 @@
 
 int Disassembler::InstructionDecode(v8::internal::Vector<char> buffer,
                                     byte* instruction) {
-  assembler::arm::Decoder d(converter_, buffer);
+  v8::internal::Decoder d(converter_, buffer);
   return d.InstructionDecode(instruction);
 }
 
diff --git a/src/arm/full-codegen-arm.cc b/src/arm/full-codegen-arm.cc
index b7a1cc1..66de8e9 100644
--- a/src/arm/full-codegen-arm.cc
+++ b/src/arm/full-codegen-arm.cc
@@ -517,16 +517,16 @@
 }
 
 
-void FullCodeGenerator::Split(Condition cc,
+void FullCodeGenerator::Split(Condition cond,
                               Label* if_true,
                               Label* if_false,
                               Label* fall_through) {
   if (if_false == fall_through) {
-    __ b(cc, if_true);
+    __ b(cond, if_true);
   } else if (if_true == fall_through) {
-    __ b(NegateCondition(cc), if_false);
+    __ b(NegateCondition(cond), if_false);
   } else {
-    __ b(cc, if_true);
+    __ b(cond, if_true);
     __ b(if_false);
   }
 }
@@ -819,7 +819,7 @@
 
   // Convert the object to a JS object.
   Label convert, done_convert;
-  __ BranchOnSmi(r0, &convert);
+  __ JumpIfSmi(r0, &convert);
   __ CompareObjectType(r0, r1, r1, FIRST_JS_OBJECT_TYPE);
   __ b(hs, &done_convert);
   __ bind(&convert);
@@ -1550,8 +1550,13 @@
 void FullCodeGenerator::EmitBinaryOp(Token::Value op,
                                      OverwriteMode mode) {
   __ pop(r1);
-  GenericBinaryOpStub stub(op, mode, r1, r0);
-  __ CallStub(&stub);
+  if (op == Token::ADD || op == Token::SUB || op == Token::MUL) {
+    TypeRecordingBinaryOpStub stub(op, mode);
+    __ CallStub(&stub);
+  } else {
+    GenericBinaryOpStub stub(op, mode, r1, r0);
+    __ CallStub(&stub);
+  }
   context()->Plug(r0);
 }
 
@@ -2132,7 +2137,7 @@
   context()->PrepareTest(&materialize_true, &materialize_false,
                          &if_true, &if_false, &fall_through);
 
-  __ BranchOnSmi(r0, if_false);
+  __ JumpIfSmi(r0, if_false);
   __ LoadRoot(ip, Heap::kNullValueRootIndex);
   __ cmp(r0, ip);
   __ b(eq, if_true);
@@ -2164,7 +2169,7 @@
   context()->PrepareTest(&materialize_true, &materialize_false,
                          &if_true, &if_false, &fall_through);
 
-  __ BranchOnSmi(r0, if_false);
+  __ JumpIfSmi(r0, if_false);
   __ CompareObjectType(r0, r1, r1, FIRST_JS_OBJECT_TYPE);
   PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
   Split(ge, if_true, if_false, fall_through);
@@ -2185,7 +2190,7 @@
   context()->PrepareTest(&materialize_true, &materialize_false,
                          &if_true, &if_false, &fall_through);
 
-  __ BranchOnSmi(r0, if_false);
+  __ JumpIfSmi(r0, if_false);
   __ ldr(r1, FieldMemOperand(r0, HeapObject::kMapOffset));
   __ ldrb(r1, FieldMemOperand(r1, Map::kBitFieldOffset));
   __ tst(r1, Operand(1 << Map::kIsUndetectable));
@@ -2231,7 +2236,7 @@
   context()->PrepareTest(&materialize_true, &materialize_false,
                          &if_true, &if_false, &fall_through);
 
-  __ BranchOnSmi(r0, if_false);
+  __ JumpIfSmi(r0, if_false);
   __ CompareObjectType(r0, r1, r1, JS_FUNCTION_TYPE);
   PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
   Split(eq, if_true, if_false, fall_through);
@@ -2252,7 +2257,7 @@
   context()->PrepareTest(&materialize_true, &materialize_false,
                          &if_true, &if_false, &fall_through);
 
-  __ BranchOnSmi(r0, if_false);
+  __ JumpIfSmi(r0, if_false);
   __ CompareObjectType(r0, r1, r1, JS_ARRAY_TYPE);
   PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
   Split(eq, if_true, if_false, fall_through);
@@ -2273,7 +2278,7 @@
   context()->PrepareTest(&materialize_true, &materialize_false,
                          &if_true, &if_false, &fall_through);
 
-  __ BranchOnSmi(r0, if_false);
+  __ JumpIfSmi(r0, if_false);
   __ CompareObjectType(r0, r1, r1, JS_REGEXP_TYPE);
   PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
   Split(eq, if_true, if_false, fall_through);
@@ -2380,7 +2385,7 @@
   VisitForAccumulatorValue(args->at(0));
 
   // If the object is a smi, we return null.
-  __ BranchOnSmi(r0, &null);
+  __ JumpIfSmi(r0, &null);
 
   // Check that the object is a JS object but take special care of JS
   // functions to make sure they have 'Function' as their class.
@@ -2531,7 +2536,7 @@
 
   Label done;
   // If the object is a smi return the object.
-  __ BranchOnSmi(r0, &done);
+  __ JumpIfSmi(r0, &done);
   // If the object is not a value type, return the object.
   __ CompareObjectType(r0, r1, r1, JS_VALUE_TYPE);
   __ b(ne, &done);
@@ -2561,7 +2566,7 @@
 
   Label done;
   // If the object is a smi, return the value.
-  __ BranchOnSmi(r1, &done);
+  __ JumpIfSmi(r1, &done);
 
   // If the object is not a value type, return the value.
   __ CompareObjectType(r1, r2, r2, JS_VALUE_TYPE);
@@ -3084,7 +3089,7 @@
       bool inline_smi_code = ShouldInlineSmiCase(expr->op());
       if (inline_smi_code) {
         Label call_stub;
-        __ BranchOnNotSmi(r0, &call_stub);
+        __ JumpIfNotSmi(r0, &call_stub);
         __ mvn(r0, Operand(r0));
         // Bit-clear inverted smi-tag.
         __ bic(r0, r0, Operand(kSmiTagMask));
@@ -3171,7 +3176,7 @@
 
   // Call ToNumber only if operand is not a smi.
   Label no_conversion;
-  __ BranchOnSmi(r0, &no_conversion);
+  __ JumpIfSmi(r0, &no_conversion);
   __ push(r0);
   __ InvokeBuiltin(Builtins::TO_NUMBER, CALL_JS);
   __ bind(&no_conversion);
@@ -3205,7 +3210,7 @@
     __ b(vs, &stub_call);
     // We could eliminate this smi check if we split the code at
     // the first smi check before calling ToNumber.
-    __ BranchOnSmi(r0, &done);
+    __ JumpIfSmi(r0, &done);
     __ bind(&stub_call);
     // Call stub. Undo operation first.
     __ sub(r0, r0, Operand(Smi::FromInt(count_value)));
@@ -3458,34 +3463,34 @@
 
     default: {
       VisitForAccumulatorValue(expr->right());
-      Condition cc = eq;
+      Condition cond = eq;
       bool strict = false;
       switch (op) {
         case Token::EQ_STRICT:
           strict = true;
           // Fall through
         case Token::EQ:
-          cc = eq;
+          cond = eq;
           __ pop(r1);
           break;
         case Token::LT:
-          cc = lt;
+          cond = lt;
           __ pop(r1);
           break;
         case Token::GT:
           // Reverse left and right sides to obtain ECMA-262 conversion order.
-          cc = lt;
+          cond = lt;
           __ mov(r1, result_register());
           __ pop(r0);
          break;
         case Token::LTE:
           // Reverse left and right sides to obtain ECMA-262 conversion order.
-          cc = ge;
+          cond = ge;
           __ mov(r1, result_register());
           __ pop(r0);
           break;
         case Token::GTE:
-          cc = ge;
+          cond = ge;
           __ pop(r1);
           break;
         case Token::IN:
@@ -3498,19 +3503,19 @@
       if (inline_smi_code) {
         Label slow_case;
         __ orr(r2, r0, Operand(r1));
-        __ BranchOnNotSmi(r2, &slow_case);
+        __ JumpIfNotSmi(r2, &slow_case);
         __ cmp(r1, r0);
-        Split(cc, if_true, if_false, NULL);
+        Split(cond, if_true, if_false, NULL);
         __ bind(&slow_case);
       }
       CompareFlags flags = inline_smi_code
           ? NO_SMI_COMPARE_IN_STUB
           : NO_COMPARE_FLAGS;
-      CompareStub stub(cc, strict, flags, r1, r0);
+      CompareStub stub(cond, strict, flags, r1, r0);
       __ CallStub(&stub);
       PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
       __ cmp(r0, Operand(0, RelocInfo::NONE));
-      Split(cc, if_true, if_false, fall_through);
+      Split(cond, if_true, if_false, fall_through);
     }
   }
 
diff --git a/src/arm/ic-arm.cc b/src/arm/ic-arm.cc
index 51a8149..d74468c 100644
--- a/src/arm/ic-arm.cc
+++ b/src/arm/ic-arm.cc
@@ -95,13 +95,13 @@
   __ ldrb(t1, FieldMemOperand(t0, Map::kBitFieldOffset));
   __ tst(t1, Operand((1 << Map::kIsAccessCheckNeeded) |
                      (1 << Map::kHasNamedInterceptor)));
-  __ b(nz, miss);
+  __ b(ne, miss);
 
   __ ldr(elements, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
   __ ldr(t1, FieldMemOperand(elements, HeapObject::kMapOffset));
   __ LoadRoot(ip, Heap::kHashTableMapRootIndex);
   __ cmp(t1, ip);
-  __ b(nz, miss);
+  __ b(ne, miss);
 }
 
 
@@ -379,7 +379,7 @@
 }
 
 
-void LoadIC::GenerateStringLength(MacroAssembler* masm) {
+void LoadIC::GenerateStringLength(MacroAssembler* masm, bool support_wrappers) {
   // ----------- S t a t e -------------
   //  -- r2    : name
   //  -- lr    : return address
@@ -388,7 +388,8 @@
   // -----------------------------------
   Label miss;
 
-  StubCompiler::GenerateLoadStringLength(masm, r0, r1, r3, &miss);
+  StubCompiler::GenerateLoadStringLength(masm, r0, r1, r3, &miss,
+                                         support_wrappers);
   // Cache miss: Jump to runtime.
   __ bind(&miss);
   StubCompiler::GenerateLoadMiss(masm, Code::LOAD_IC);
@@ -419,14 +420,14 @@
                                            int interceptor_bit,
                                            Label* slow) {
   // Check that the object isn't a smi.
-  __ BranchOnSmi(receiver, slow);
+  __ JumpIfSmi(receiver, slow);
   // Get the map of the receiver.
   __ ldr(map, FieldMemOperand(receiver, HeapObject::kMapOffset));
   // Check bit field.
   __ ldrb(scratch, FieldMemOperand(map, Map::kBitFieldOffset));
   __ tst(scratch,
          Operand((1 << Map::kIsAccessCheckNeeded) | (1 << interceptor_bit)));
-  __ b(nz, slow);
+  __ b(ne, slow);
   // Check that the object is some kind of JS object EXCEPT JS Value type.
   // In the case that the object is a value-wrapper object,
   // we enter the runtime system to make sure that indexing into string
@@ -749,7 +750,7 @@
   Label index_smi, index_string;
 
   // Check that the key is a smi.
-  __ BranchOnNotSmi(r2, &check_string);
+  __ JumpIfNotSmi(r2, &check_string);
   __ bind(&index_smi);
   // Now the key is known to be a smi. This place is also jumped to from below
   // where a numeric string is converted to a smi.
@@ -1165,7 +1166,7 @@
   Register receiver = r1;
 
   // Check that the key is a smi.
-  __ BranchOnNotSmi(key, &check_string);
+  __ JumpIfNotSmi(key, &check_string);
   __ bind(&index_smi);
   // Now the key is known to be a smi. This place is also jumped to from below
   // where a numeric string is converted to a smi.
@@ -1346,7 +1347,7 @@
   Label slow;
 
   // Check that the receiver isn't a smi.
-  __ BranchOnSmi(r1, &slow);
+  __ JumpIfSmi(r1, &slow);
 
   // Check that the key is an array index, that is Uint32.
   __ tst(r0, Operand(kSmiTagMask | kSmiSignMask));
@@ -1470,7 +1471,7 @@
   __ b(ne, &slow);
   // Check that the value is a smi. If a conversion is needed call into the
   // runtime to convert and clamp.
-  __ BranchOnNotSmi(value, &slow);
+  __ JumpIfNotSmi(value, &slow);
   __ mov(r4, Operand(key, ASR, kSmiTagSize));  // Untag the key.
   __ ldr(ip, FieldMemOperand(elements, PixelArray::kLengthOffset));
   __ cmp(r4, Operand(ip));
@@ -1589,7 +1590,7 @@
   Register scratch = r3;
 
   // Check that the receiver isn't a smi.
-  __ BranchOnSmi(receiver, &miss);
+  __ JumpIfSmi(receiver, &miss);
 
   // Check that the object is a JS array.
   __ CompareObjectType(receiver, scratch, scratch, JS_ARRAY_TYPE);
@@ -1603,7 +1604,7 @@
   __ b(ne, &miss);
 
   // Check that value is a smi.
-  __ BranchOnNotSmi(value, &miss);
+  __ JumpIfNotSmi(value, &miss);
 
   // Prepare tail call to StoreIC_ArrayLength.
   __ Push(receiver, value);
@@ -1673,7 +1674,7 @@
       return ge;
     default:
       UNREACHABLE();
-      return no_condition;
+      return kNoCondition;
   }
 }
 
@@ -1704,7 +1705,7 @@
 
 
 void PatchInlinedSmiCode(Address address) {
-  UNIMPLEMENTED();
+  // Currently there is no smi inlining in the ARM full code generator.
 }
 
 
diff --git a/src/arm/jump-target-arm.cc b/src/arm/jump-target-arm.cc
index c6eb628..b9e6ebf 100644
--- a/src/arm/jump-target-arm.cc
+++ b/src/arm/jump-target-arm.cc
@@ -76,7 +76,7 @@
 }
 
 
-void JumpTarget::DoBranch(Condition cc, Hint ignored) {
+void JumpTarget::DoBranch(Condition cond, Hint ignored) {
   ASSERT(cgen()->has_valid_frame());
 
   if (entry_frame_set_) {
@@ -86,7 +86,7 @@
       ASSERT(entry_frame_.IsCompatibleWith(cgen()->frame()));
     }
     // We have an expected frame to merge to on the backward edge.
-    cgen()->frame()->MergeTo(&entry_frame_, cc);
+    cgen()->frame()->MergeTo(&entry_frame_, cond);
   } else {
     // Clone the current frame to use as the expected one at the target.
     set_entry_frame(cgen()->frame());
@@ -98,8 +98,8 @@
     // frame with less precise type info branches to them.
     ASSERT(direction_ != FORWARD_ONLY);
   }
-  __ b(cc, &entry_label_);
-  if (cc == al) {
+  __ b(cond, &entry_label_);
+  if (cond == al) {
     cgen()->DeleteFrame();
   }
 }
diff --git a/src/arm/lithium-arm.cc b/src/arm/lithium-arm.cc
index 5a63144..c458138 100644
--- a/src/arm/lithium-arm.cc
+++ b/src/arm/lithium-arm.cc
@@ -820,6 +820,7 @@
   return MarkAsCall(DefineFixed(result, r0), instr);
 }
 
+
 void LChunkBuilder::DoBasicBlock(HBasicBlock* block, HBasicBlock* next_block) {
   ASSERT(is_building());
   current_block_ = block;
@@ -1027,8 +1028,8 @@
     } else if (v->IsInstanceOf()) {
       HInstanceOf* instance_of = HInstanceOf::cast(v);
       LInstruction* result =
-          new LInstanceOfAndBranch(Use(instance_of->left()),
-                                   Use(instance_of->right()));
+          new LInstanceOfAndBranch(UseFixed(instance_of->left(), r0),
+                                   UseFixed(instance_of->right(), r1));
       return MarkAsCall(result, instr);
     } else if (v->IsTypeofIs()) {
       HTypeofIs* typeof_is = HTypeofIs::cast(v);
@@ -1130,7 +1131,7 @@
     case kMathAbs:
       return AssignEnvironment(AssignPointerMap(DefineSameAsFirst(result)));
     case kMathFloor:
-      return AssignEnvironment(DefineAsRegister(result));
+      return AssignEnvironment(AssignPointerMap(DefineAsRegister(result)));
     case kMathSqrt:
       return DefineSameAsFirst(result);
     case kMathRound:
@@ -1601,7 +1602,14 @@
 
 
 LInstruction* LChunkBuilder::DoStoreGlobal(HStoreGlobal* instr) {
-  return new LStoreGlobal(UseRegisterAtStart(instr->value()));
+  if (instr->check_hole_value()) {
+    LOperand* temp = TempRegister();
+    LOperand* value = UseRegister(instr->value());
+    return AssignEnvironment(new LStoreGlobal(value, temp));
+  } else {
+    LOperand* value = UseRegisterAtStart(instr->value());
+    return new LStoreGlobal(value, NULL);
+  }
 }
 
 
diff --git a/src/arm/lithium-arm.h b/src/arm/lithium-arm.h
index eb90b8c..3de5832 100644
--- a/src/arm/lithium-arm.h
+++ b/src/arm/lithium-arm.h
@@ -1254,10 +1254,11 @@
 };
 
 
-class LStoreGlobal: public LTemplateInstruction<0, 1, 0> {
+class LStoreGlobal: public LTemplateInstruction<0, 1, 1> {
  public:
-  explicit LStoreGlobal(LOperand* value) {
+  LStoreGlobal(LOperand* value, LOperand* temp) {
     inputs_[0] = value;
+    temps_[0] = temp;
   }
 
   DECLARE_CONCRETE_INSTRUCTION(StoreGlobal, "store-global")
diff --git a/src/arm/lithium-codegen-arm.cc b/src/arm/lithium-codegen-arm.cc
index f34724c..1ccad17 100644
--- a/src/arm/lithium-codegen-arm.cc
+++ b/src/arm/lithium-codegen-arm.cc
@@ -661,7 +661,7 @@
     return;
   }
 
-  if (cc == no_condition) {
+  if (cc == kNoCondition) {
     if (FLAG_trap_on_deopt) __ stop("trap_on_deopt");
     __ Jump(entry, RelocInfo::RUNTIME_ENTRY);
   } else {
@@ -736,37 +736,40 @@
 }
 
 
-void LCodeGen::RecordSafepoint(LPointerMap* pointers,
-                               int deoptimization_index) {
+void LCodeGen::RecordSafepoint(
+    LPointerMap* pointers,
+    Safepoint::Kind kind,
+    int arguments,
+    int deoptimization_index) {
   const ZoneList<LOperand*>* operands = pointers->operands();
   Safepoint safepoint = safepoints_.DefineSafepoint(masm(),
-                                                    deoptimization_index);
+      kind, arguments, deoptimization_index);
   for (int i = 0; i < operands->length(); i++) {
     LOperand* pointer = operands->at(i);
     if (pointer->IsStackSlot()) {
       safepoint.DefinePointerSlot(pointer->index());
+    } else if (pointer->IsRegister() && (kind & Safepoint::kWithRegisters)) {
+      safepoint.DefinePointerRegister(ToRegister(pointer));
     }
   }
+  if (kind & Safepoint::kWithRegisters) {
+    // Register cp always contains a pointer to the context.
+    safepoint.DefinePointerRegister(cp);
+  }
+}
+
+
+void LCodeGen::RecordSafepoint(LPointerMap* pointers,
+                               int deoptimization_index) {
+  RecordSafepoint(pointers, Safepoint::kSimple, 0, deoptimization_index);
 }
 
 
 void LCodeGen::RecordSafepointWithRegisters(LPointerMap* pointers,
                                             int arguments,
                                             int deoptimization_index) {
-  const ZoneList<LOperand*>* operands = pointers->operands();
-  Safepoint safepoint =
-      safepoints_.DefineSafepointWithRegisters(
-          masm(), arguments, deoptimization_index);
-  for (int i = 0; i < operands->length(); i++) {
-    LOperand* pointer = operands->at(i);
-    if (pointer->IsStackSlot()) {
-      safepoint.DefinePointerSlot(pointer->index());
-    } else if (pointer->IsRegister()) {
-      safepoint.DefinePointerRegister(ToRegister(pointer));
-    }
-  }
-  // Register cp always contains a pointer to the context.
-  safepoint.DefinePointerRegister(cp);
+  RecordSafepoint(pointers, Safepoint::kWithRegisters, arguments,
+      deoptimization_index);
 }
 
 
@@ -774,20 +777,8 @@
     LPointerMap* pointers,
     int arguments,
     int deoptimization_index) {
-  const ZoneList<LOperand*>* operands = pointers->operands();
-  Safepoint safepoint =
-      safepoints_.DefineSafepointWithRegistersAndDoubles(
-          masm(), arguments, deoptimization_index);
-  for (int i = 0; i < operands->length(); i++) {
-    LOperand* pointer = operands->at(i);
-    if (pointer->IsStackSlot()) {
-      safepoint.DefinePointerSlot(pointer->index());
-    } else if (pointer->IsRegister()) {
-      safepoint.DefinePointerRegister(ToRegister(pointer));
-    }
-  }
-  // Register cp always contains a pointer to the context.
-  safepoint.DefinePointerRegister(cp);
+  RecordSafepoint(pointers, Safepoint::kWithRegistersAndDoubles, arguments,
+      deoptimization_index);
 }
 
 
@@ -1080,7 +1071,7 @@
   __ bind(deferred->exit());
 
   // If the result in r0 is a Smi, untag it, else deoptimize.
-  __ BranchOnNotSmi(result, &deoptimize);
+  __ JumpIfNotSmi(result, &deoptimize);
   __ SmiUntag(result);
 
   __ b(al, &done);
@@ -1160,7 +1151,7 @@
   __ bind(deferred->exit());
 
   // If the result in r0 is a Smi, untag it, else deoptimize.
-  __ BranchOnNotSmi(result, &deoptimize);
+  __ JumpIfNotSmi(result, &deoptimize);
   __ SmiUntag(result);
   __ b(&done);
 
@@ -1216,7 +1207,7 @@
     __ b(ne, &done);
     if (instr->InputAt(1)->IsConstantOperand()) {
       if (ToInteger32(LConstantOperand::cast(instr->InputAt(1))) < 0) {
-        DeoptimizeIf(no_condition, instr->environment());
+        DeoptimizeIf(kNoCondition, instr->environment());
       }
     } else {
       // Test the non-zero operand for negative sign.
@@ -1483,7 +1474,7 @@
   if (r.IsInteger32()) {
     Register reg = ToRegister(instr->InputAt(0));
     __ cmp(reg, Operand(0));
-    EmitBranch(true_block, false_block, nz);
+    EmitBranch(true_block, false_block, ne);
   } else if (r.IsDouble()) {
     DoubleRegister reg = ToDoubleRegister(instr->InputAt(0));
     Register scratch = scratch0();
@@ -1541,7 +1532,7 @@
       __ CallStub(&stub);
       __ cmp(reg, Operand(0));
       __ ldm(ia_w, sp, saved_regs);
-      EmitBranch(true_block, false_block, nz);
+      EmitBranch(true_block, false_block, ne);
     }
   }
 }
@@ -1593,7 +1584,7 @@
 
 
 Condition LCodeGen::TokenToCondition(Token::Value op, bool is_unsigned) {
-  Condition cond = no_condition;
+  Condition cond = kNoCondition;
   switch (op) {
     case Token::EQ:
     case Token::EQ_STRICT:
@@ -1730,7 +1721,7 @@
                                  Register temp2,
                                  Label* is_not_object,
                                  Label* is_object) {
-  __ BranchOnSmi(input, is_not_object);
+  __ JumpIfSmi(input, is_not_object);
 
   __ LoadRoot(temp1, Heap::kNullValueRootIndex);
   __ cmp(input, temp1);
@@ -2000,7 +1991,16 @@
 
 
 void LCodeGen::DoInstanceOfAndBranch(LInstanceOfAndBranch* instr) {
-  Abort("DoInstanceOfAndBranch unimplemented.");
+  ASSERT(ToRegister(instr->InputAt(0)).is(r0));  // Object is in r0.
+  ASSERT(ToRegister(instr->InputAt(1)).is(r1));  // Function is in r1.
+
+  int true_block = chunk_->LookupDestination(instr->true_block_id());
+  int false_block = chunk_->LookupDestination(instr->false_block_id());
+
+  InstanceofStub stub(InstanceofStub::kArgsInRegisters);
+  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+  __ tst(r0, Operand(r0));
+  EmitBranch(true_block, false_block, eq);
 }
 
 
@@ -2033,7 +2033,7 @@
   ASSERT(result.is(r0));
 
   // A Smi is not instance of anything.
-  __ BranchOnSmi(object, &false_result);
+  __ JumpIfSmi(object, &false_result);
 
   // This is the inlined call site instanceof cache. The two occurences of the
   // hole value will be patched to the last map/result pair generated by the
@@ -2136,7 +2136,7 @@
       return ge;
     default:
       UNREACHABLE();
-      return no_condition;
+      return kNoCondition;
   }
 }
 
@@ -2195,8 +2195,26 @@
 
 void LCodeGen::DoStoreGlobal(LStoreGlobal* instr) {
   Register value = ToRegister(instr->InputAt(0));
-  __ mov(ip, Operand(Handle<Object>(instr->hydrogen()->cell())));
-  __ str(value, FieldMemOperand(ip, JSGlobalPropertyCell::kValueOffset));
+  Register scratch = scratch0();
+
+  // Load the cell.
+  __ mov(scratch, Operand(Handle<Object>(instr->hydrogen()->cell())));
+
+  // If the cell we are storing to contains the hole it could have
+  // been deleted from the property dictionary. In that case, we need
+  // to update the property details in the property dictionary to mark
+  // it as no longer deleted.
+  if (instr->hydrogen()->check_hole_value()) {
+    Register scratch2 = ToRegister(instr->TempAt(0));
+    __ ldr(scratch2,
+           FieldMemOperand(scratch, JSGlobalPropertyCell::kValueOffset));
+    __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
+    __ cmp(scratch2, ip);
+    DeoptimizeIf(eq, instr->environment());
+  }
+
+  // Store the value.
+  __ str(value, FieldMemOperand(scratch, JSGlobalPropertyCell::kValueOffset));
 }
 
 
@@ -2609,7 +2627,7 @@
         new DeferredMathAbsTaggedHeapNumber(this, instr);
     Register input = ToRegister(instr->InputAt(0));
     // Smi check.
-    __ BranchOnNotSmi(input, deferred->entry());
+    __ JumpIfNotSmi(input, deferred->entry());
     // If smi, handle it directly.
     EmitIntegerMathAbs(instr);
     __ bind(deferred->exit());
@@ -3556,7 +3574,7 @@
                                  Label* false_label,
                                  Register input,
                                  Handle<String> type_name) {
-  Condition final_branch_condition = no_condition;
+  Condition final_branch_condition = kNoCondition;
   Register scratch = scratch0();
   if (type_name->Equals(Heap::number_symbol())) {
     __ tst(input, Operand(kSmiTagMask));
@@ -3641,7 +3659,7 @@
 
 
 void LCodeGen::DoDeoptimize(LDeoptimize* instr) {
-  DeoptimizeIf(no_condition, instr->environment());
+  DeoptimizeIf(kNoCondition, instr->environment());
 }
 
 
diff --git a/src/arm/lithium-codegen-arm.h b/src/arm/lithium-codegen-arm.h
index 3b2ad80..27a72f2 100644
--- a/src/arm/lithium-codegen-arm.h
+++ b/src/arm/lithium-codegen-arm.h
@@ -223,6 +223,10 @@
   void DoMathSqrt(LUnaryMathOperation* instr);
 
   // Support for recording safepoint and position information.
+  void RecordSafepoint(LPointerMap* pointers,
+                       Safepoint::Kind kind,
+                       int arguments,
+                       int deoptimization_index);
   void RecordSafepoint(LPointerMap* pointers, int deoptimization_index);
   void RecordSafepointWithRegisters(LPointerMap* pointers,
                                     int arguments,
diff --git a/src/arm/macro-assembler-arm.cc b/src/arm/macro-assembler-arm.cc
index 7431f3b..66cfdca 100644
--- a/src/arm/macro-assembler-arm.cc
+++ b/src/arm/macro-assembler-arm.cc
@@ -318,7 +318,7 @@
   CheckConstPool(true, true);
   add(pc, pc, Operand(index,
                       LSL,
-                      assembler::arm::Instr::kInstrSizeLog2 - kSmiTagSize));
+                      Instruction::kInstrSizeLog2 - kSmiTagSize));
   BlockConstPoolBefore(pc_offset() + (targets.length() + 1) * kInstrSize);
   nop();  // Jump table alignment.
   for (int i = 0; i < targets.length(); i++) {
@@ -369,12 +369,12 @@
 
 void MacroAssembler::InNewSpace(Register object,
                                 Register scratch,
-                                Condition cc,
+                                Condition cond,
                                 Label* branch) {
-  ASSERT(cc == eq || cc == ne);
+  ASSERT(cond == eq || cond == ne);
   and_(scratch, object, Operand(ExternalReference::new_space_mask()));
   cmp(scratch, Operand(ExternalReference::new_space_start()));
-  b(cc, branch);
+  b(cond, branch);
 }
 
 
@@ -926,7 +926,7 @@
   ldr(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
   ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
   tst(scratch, Operand(kIsNotStringMask));
-  b(nz, fail);
+  b(ne, fail);
 }
 
 
@@ -1385,7 +1385,7 @@
                               Label* fail,
                               bool is_heap_object) {
   if (!is_heap_object) {
-    BranchOnSmi(obj, fail);
+    JumpIfSmi(obj, fail);
   }
   ldr(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
   mov(ip, Operand(map));
@@ -1400,7 +1400,7 @@
                               Label* fail,
                               bool is_heap_object) {
   if (!is_heap_object) {
-    BranchOnSmi(obj, fail);
+    JumpIfSmi(obj, fail);
   }
   ldr(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
   LoadRoot(ip, index);
@@ -1414,7 +1414,7 @@
                                              Register scratch,
                                              Label* miss) {
   // Check that the receiver isn't a smi.
-  BranchOnSmi(function, miss);
+  JumpIfSmi(function, miss);
 
   // Check that the function really is a function.  Load map into result reg.
   CompareObjectType(function, result, scratch, JS_FUNCTION_TYPE);
@@ -1513,7 +1513,7 @@
   Label done;
   if ((flags & OBJECT_NOT_SMI) == 0) {
     Label not_smi;
-    BranchOnNotSmi(object, &not_smi);
+    JumpIfNotSmi(object, &not_smi);
     // Remove smi tag and convert to double.
     mov(scratch1, Operand(object, ASR, kSmiTagSize));
     vmov(scratch3, scratch1);
@@ -1806,9 +1806,9 @@
 }
 
 
-void MacroAssembler::Assert(Condition cc, const char* msg) {
+void MacroAssembler::Assert(Condition cond, const char* msg) {
   if (FLAG_debug_code)
-    Check(cc, msg);
+    Check(cond, msg);
 }
 
 
@@ -1841,9 +1841,9 @@
 }
 
 
-void MacroAssembler::Check(Condition cc, const char* msg) {
+void MacroAssembler::Check(Condition cond, const char* msg) {
   Label L;
-  b(cc, &L);
+  b(cond, &L);
   Abort(msg);
   // will not return here
   bind(&L);
@@ -1939,7 +1939,7 @@
 void MacroAssembler::JumpIfNotBothSmi(Register reg1,
                                       Register reg2,
                                       Label* on_not_both_smi) {
-  ASSERT_EQ(0, kSmiTag);
+  STATIC_ASSERT(kSmiTag == 0);
   tst(reg1, Operand(kSmiTagMask));
   tst(reg2, Operand(kSmiTagMask), eq);
   b(ne, on_not_both_smi);
@@ -1949,7 +1949,7 @@
 void MacroAssembler::JumpIfEitherSmi(Register reg1,
                                      Register reg2,
                                      Label* on_either_smi) {
-  ASSERT_EQ(0, kSmiTag);
+  STATIC_ASSERT(kSmiTag == 0);
   tst(reg1, Operand(kSmiTagMask));
   tst(reg2, Operand(kSmiTagMask), ne);
   b(eq, on_either_smi);
@@ -1957,19 +1957,30 @@
 
 
 void MacroAssembler::AbortIfSmi(Register object) {
-  ASSERT_EQ(0, kSmiTag);
+  STATIC_ASSERT(kSmiTag == 0);
   tst(object, Operand(kSmiTagMask));
   Assert(ne, "Operand is a smi");
 }
 
 
 void MacroAssembler::AbortIfNotSmi(Register object) {
-  ASSERT_EQ(0, kSmiTag);
+  STATIC_ASSERT(kSmiTag == 0);
   tst(object, Operand(kSmiTagMask));
   Assert(eq, "Operand is not smi");
 }
 
 
+void MacroAssembler::JumpIfNotHeapNumber(Register object,
+                                         Register heap_number_map,
+                                         Register scratch,
+                                         Label* on_not_heap_number) {
+  ldr(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
+  AssertRegisterIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
+  cmp(scratch, heap_number_map);
+  b(ne, on_not_heap_number);
+}
+
+
 void MacroAssembler::JumpIfNonSmisNotBothSequentialAsciiStrings(
     Register first,
     Register second,
@@ -1996,7 +2007,7 @@
                                                          Register scratch2,
                                                          Label* failure) {
   // Check that neither is a smi.
-  ASSERT_EQ(0, kSmiTag);
+  STATIC_ASSERT(kSmiTag == 0);
   and_(scratch1, first, Operand(second));
   tst(scratch1, Operand(kSmiTagMask));
   b(eq, failure);
diff --git a/src/arm/macro-assembler-arm.h b/src/arm/macro-assembler-arm.h
index 7392d36..e2b1db8 100644
--- a/src/arm/macro-assembler-arm.h
+++ b/src/arm/macro-assembler-arm.h
@@ -139,7 +139,7 @@
   // scratch can be object itself, but it will be clobbered.
   void InNewSpace(Register object,
                   Register scratch,
-                  Condition cc,  // eq for new space, ne otherwise
+                  Condition cond,  // eq for new space, ne otherwise
                   Label* branch);
 
 
@@ -545,16 +545,6 @@
   }
 
 
-  inline void BranchOnSmi(Register value, Label* smi_label) {
-    tst(value, Operand(kSmiTagMask));
-    b(eq, smi_label);
-  }
-
-  inline void BranchOnNotSmi(Register value, Label* not_smi_label) {
-    tst(value, Operand(kSmiTagMask));
-    b(ne, not_smi_label);
-  }
-
   // Generates code for reporting that an illegal operation has
   // occurred.
   void IllegalOperation(int num_arguments);
@@ -695,14 +685,14 @@
   // ---------------------------------------------------------------------------
   // Debugging
 
-  // Calls Abort(msg) if the condition cc is not satisfied.
+  // Calls Abort(msg) if the condition cond is not satisfied.
   // Use --debug_code to enable.
-  void Assert(Condition cc, const char* msg);
+  void Assert(Condition cond, const char* msg);
   void AssertRegisterIsRoot(Register reg, Heap::RootListIndex index);
   void AssertFastElements(Register elements);
 
   // Like Assert(), but always enabled.
-  void Check(Condition cc, const char* msg);
+  void Check(Condition cond, const char* msg);
 
   // Print a message to stdout and abort execution.
   void Abort(const char* msg);
@@ -719,6 +709,9 @@
   void SmiTag(Register reg, SBit s = LeaveCC) {
     add(reg, reg, Operand(reg), s);
   }
+  void SmiTag(Register dst, Register src, SBit s = LeaveCC) {
+    add(dst, src, Operand(src), s);
+  }
 
   // Try to convert int32 to smi. If the value is to large, preserve
   // the original value and jump to not_a_smi. Destroys scratch and
@@ -733,7 +726,20 @@
   void SmiUntag(Register reg) {
     mov(reg, Operand(reg, ASR, kSmiTagSize));
   }
+  void SmiUntag(Register dst, Register src) {
+    mov(dst, Operand(src, ASR, kSmiTagSize));
+  }
 
+  // Jump the register contains a smi.
+  inline void JumpIfSmi(Register value, Label* smi_label) {
+    tst(value, Operand(kSmiTagMask));
+    b(eq, smi_label);
+  }
+  // Jump if either of the registers contain a non-smi.
+  inline void JumpIfNotSmi(Register value, Label* not_smi_label) {
+    tst(value, Operand(kSmiTagMask));
+    b(ne, not_smi_label);
+  }
   // Jump if either of the registers contain a non-smi.
   void JumpIfNotBothSmi(Register reg1, Register reg2, Label* on_not_both_smi);
   // Jump if either of the registers contain a smi.
@@ -744,6 +750,14 @@
   void AbortIfNotSmi(Register object);
 
   // ---------------------------------------------------------------------------
+  // HeapNumber utilities
+
+  void JumpIfNotHeapNumber(Register object,
+                           Register heap_number_map,
+                           Register scratch,
+                           Label* on_not_heap_number);
+
+  // ---------------------------------------------------------------------------
   // String utilities
 
   // Checks if both objects are sequential ASCII strings and jumps to label
diff --git a/src/arm/simulator-arm.cc b/src/arm/simulator-arm.cc
index 138e8f8..296b2b4 100644
--- a/src/arm/simulator-arm.cc
+++ b/src/arm/simulator-arm.cc
@@ -40,14 +40,8 @@
 #if defined(USE_SIMULATOR)
 
 // Only build the simulator if not compiling for real ARM hardware.
-namespace assembler {
-namespace arm {
-
-using ::v8::internal::Object;
-using ::v8::internal::PrintF;
-using ::v8::internal::OS;
-using ::v8::internal::ReadLine;
-using ::v8::internal::DeleteArray;
+namespace v8 {
+namespace internal {
 
 // This macro provides a platform independent use of sscanf. The reason for
 // SScanF not being implemented in a platform independent way through
@@ -62,14 +56,13 @@
   explicit Debugger(Simulator* sim);
   ~Debugger();
 
-  void Stop(Instr* instr);
+  void Stop(Instruction* instr);
   void Debug();
 
  private:
-  static const instr_t kBreakpointInstr =
-      ((AL << 28) | (7 << 25) | (1 << 24) | break_point);
-  static const instr_t kNopInstr =
-      ((AL << 28) | (13 << 21));
+  static const Instr kBreakpointInstr =
+      (al | (7*B25) | (1*B24) | kBreakpoint);
+  static const Instr kNopInstr = (al | (13*B21));
 
   Simulator* sim_;
 
@@ -80,8 +73,8 @@
   bool GetVFPDoubleValue(const char* desc, double* value);
 
   // Set or delete a breakpoint. Returns true if successful.
-  bool SetBreakpoint(Instr* breakpc);
-  bool DeleteBreakpoint(Instr* breakpc);
+  bool SetBreakpoint(Instruction* breakpc);
+  bool DeleteBreakpoint(Instruction* breakpc);
 
   // Undo and redo all breakpoints. This is needed to bracket disassembly and
   // execution to skip past breakpoints when run from the debugger.
@@ -112,12 +105,12 @@
 }
 
 
-void Debugger::Stop(Instr* instr) {
+void Debugger::Stop(Instruction* instr) {
   // Get the stop code.
-  uint32_t code = instr->SvcField() & kStopCodeMask;
+  uint32_t code = instr->SvcValue() & kStopCodeMask;
   // Retrieve the encoded address, which comes just after this stop.
   char** msg_address =
-    reinterpret_cast<char**>(sim_->get_pc() + Instr::kInstrSize);
+    reinterpret_cast<char**>(sim_->get_pc() + Instruction::kInstrSize);
   char* msg = *msg_address;
   ASSERT(msg != NULL);
 
@@ -133,9 +126,9 @@
     }
     // Overwrite the instruction and address with nops.
     instr->SetInstructionBits(kNopInstr);
-    reinterpret_cast<Instr*>(msg_address)->SetInstructionBits(kNopInstr);
+    reinterpret_cast<Instruction*>(msg_address)->SetInstructionBits(kNopInstr);
   }
-  sim_->set_pc(sim_->get_pc() + 2 * Instr::kInstrSize);
+  sim_->set_pc(sim_->get_pc() + 2 * Instruction::kInstrSize);
 }
 
 #else  // ndef GENERATED_CODE_COVERAGE
@@ -144,11 +137,12 @@
 }
 
 
-void Debugger::Stop(Instr* instr) {
+void Debugger::Stop(Instruction* instr) {
   // Get the stop code.
-  uint32_t code = instr->SvcField() & kStopCodeMask;
+  uint32_t code = instr->SvcValue() & kStopCodeMask;
   // Retrieve the encoded address, which comes just after this stop.
-  char* msg = *reinterpret_cast<char**>(sim_->get_pc() + Instr::kInstrSize);
+  char* msg = *reinterpret_cast<char**>(sim_->get_pc()
+                                        + Instruction::kInstrSize);
   // Update this stop description.
   if (sim_->isWatchedStop(code) && !sim_->watched_stops[code].desc) {
     sim_->watched_stops[code].desc = msg;
@@ -159,7 +153,7 @@
   } else {
     PrintF("Simulator hit %s\n", msg);
   }
-  sim_->set_pc(sim_->get_pc() + 2 * Instr::kInstrSize);
+  sim_->set_pc(sim_->get_pc() + 2 * Instruction::kInstrSize);
   Debug();
 }
 #endif
@@ -217,7 +211,7 @@
 }
 
 
-bool Debugger::SetBreakpoint(Instr* breakpc) {
+bool Debugger::SetBreakpoint(Instruction* breakpc) {
   // Check if a breakpoint can be set. If not return without any side-effects.
   if (sim_->break_pc_ != NULL) {
     return false;
@@ -232,7 +226,7 @@
 }
 
 
-bool Debugger::DeleteBreakpoint(Instr* breakpc) {
+bool Debugger::DeleteBreakpoint(Instruction* breakpc) {
   if (sim_->break_pc_ != NULL) {
     sim_->break_pc_->SetInstructionBits(sim_->break_instr_);
   }
@@ -304,10 +298,10 @@
                         "%" XSTR(ARG_SIZE) "s",
                         cmd, arg1, arg2);
       if ((strcmp(cmd, "si") == 0) || (strcmp(cmd, "stepi") == 0)) {
-        sim_->InstructionDecode(reinterpret_cast<Instr*>(sim_->get_pc()));
+        sim_->InstructionDecode(reinterpret_cast<Instruction*>(sim_->get_pc()));
       } else if ((strcmp(cmd, "c") == 0) || (strcmp(cmd, "cont") == 0)) {
         // Execute the one instruction we broke at with breakpoints disabled.
-        sim_->InstructionDecode(reinterpret_cast<Instr*>(sim_->get_pc()));
+        sim_->InstructionDecode(reinterpret_cast<Instruction*>(sim_->get_pc()));
         // Leave the debugger shell.
         done = true;
       } else if ((strcmp(cmd, "p") == 0) || (strcmp(cmd, "print") == 0)) {
@@ -402,20 +396,20 @@
 
         if (argc == 1) {
           cur = reinterpret_cast<byte*>(sim_->get_pc());
-          end = cur + (10 * Instr::kInstrSize);
+          end = cur + (10 * Instruction::kInstrSize);
         } else if (argc == 2) {
           int32_t value;
           if (GetValue(arg1, &value)) {
             cur = reinterpret_cast<byte*>(sim_->get_pc());
             // Disassemble <arg1> instructions.
-            end = cur + (value * Instr::kInstrSize);
+            end = cur + (value * Instruction::kInstrSize);
           }
         } else {
           int32_t value1;
           int32_t value2;
           if (GetValue(arg1, &value1) && GetValue(arg2, &value2)) {
             cur = reinterpret_cast<byte*>(value1);
-            end = cur + (value2 * Instr::kInstrSize);
+            end = cur + (value2 * Instruction::kInstrSize);
           }
         }
 
@@ -433,7 +427,7 @@
         if (argc == 2) {
           int32_t value;
           if (GetValue(arg1, &value)) {
-            if (!SetBreakpoint(reinterpret_cast<Instr*>(value))) {
+            if (!SetBreakpoint(reinterpret_cast<Instruction*>(value))) {
               PrintF("setting breakpoint failed\n");
             }
           } else {
@@ -458,10 +452,10 @@
         PrintF("INEXACT flag: %d;\n", sim_->inexact_vfp_flag_);
       } else if (strcmp(cmd, "stop") == 0) {
         int32_t value;
-        intptr_t stop_pc = sim_->get_pc() - 2 * Instr::kInstrSize;
-        Instr* stop_instr = reinterpret_cast<Instr*>(stop_pc);
-        Instr* msg_address =
-          reinterpret_cast<Instr*>(stop_pc + Instr::kInstrSize);
+        intptr_t stop_pc = sim_->get_pc() - 2 * Instruction::kInstrSize;
+        Instruction* stop_instr = reinterpret_cast<Instruction*>(stop_pc);
+        Instruction* msg_address =
+          reinterpret_cast<Instruction*>(stop_pc + Instruction::kInstrSize);
         if ((argc == 2) && (strcmp(arg1, "unstop") == 0)) {
           // Remove the current stop.
           if (sim_->isStopInstruction(stop_instr)) {
@@ -646,7 +640,7 @@
 }
 
 
-void Simulator::CheckICache(Instr* instr) {
+void Simulator::CheckICache(Instruction* instr) {
   intptr_t address = reinterpret_cast<intptr_t>(instr);
   void* page = reinterpret_cast<void*>(address & (~CachePage::kPageMask));
   void* line = reinterpret_cast<void*>(address & (~CachePage::kLineMask));
@@ -659,7 +653,7 @@
     // Check that the data in memory matches the contents of the I-cache.
     CHECK(memcmp(reinterpret_cast<void*>(instr),
                  cache_page->CachedData(offset),
-                 Instr::kInstrSize) == 0);
+                 Instruction::kInstrSize) == 0);
   } else {
     // Cache miss.  Load memory into the cache.
     memcpy(cached_line, line, CachePage::kLineLength);
@@ -752,12 +746,12 @@
  public:
   Redirection(void* external_function, bool fp_return)
       : external_function_(external_function),
-        swi_instruction_((AL << 28) | (0xf << 24) | call_rt_redirected),
+        swi_instruction_(al | (0xf*B24) | kCallRtRedirected),
         fp_return_(fp_return),
         next_(list_) {
     Simulator::current()->
         FlushICache(reinterpret_cast<void*>(&swi_instruction_),
-                      Instr::kInstrSize);
+                      Instruction::kInstrSize);
     list_ = this;
   }
 
@@ -776,7 +770,7 @@
     return new Redirection(external_function, fp_return);
   }
 
-  static Redirection* FromSwiInstruction(Instr* swi_instruction) {
+  static Redirection* FromSwiInstruction(Instruction* swi_instruction) {
     char* addr_of_swi = reinterpret_cast<char*>(swi_instruction);
     char* addr_of_redirection =
         addr_of_swi - OFFSET_OF(Redirection, swi_instruction_);
@@ -835,7 +829,7 @@
   // See: http://gcc.gnu.org/bugzilla/show_bug.cgi?id=43949
   if (reg >= num_registers) return 0;
   // End stupid code.
-  return registers_[reg] + ((reg == pc) ? Instr::kPCReadOffset : 0);
+  return registers_[reg] + ((reg == pc) ? Instruction::kPCReadOffset : 0);
 }
 
 
@@ -1001,7 +995,7 @@
 // targets that don't support unaligned loads and stores.
 
 
-int Simulator::ReadW(int32_t addr, Instr* instr) {
+int Simulator::ReadW(int32_t addr, Instruction* instr) {
 #if V8_TARGET_CAN_READ_UNALIGNED
   intptr_t* ptr = reinterpret_cast<intptr_t*>(addr);
   return *ptr;
@@ -1017,7 +1011,7 @@
 }
 
 
-void Simulator::WriteW(int32_t addr, int value, Instr* instr) {
+void Simulator::WriteW(int32_t addr, int value, Instruction* instr) {
 #if V8_TARGET_CAN_READ_UNALIGNED
   intptr_t* ptr = reinterpret_cast<intptr_t*>(addr);
   *ptr = value;
@@ -1034,7 +1028,7 @@
 }
 
 
-uint16_t Simulator::ReadHU(int32_t addr, Instr* instr) {
+uint16_t Simulator::ReadHU(int32_t addr, Instruction* instr) {
 #if V8_TARGET_CAN_READ_UNALIGNED
   uint16_t* ptr = reinterpret_cast<uint16_t*>(addr);
   return *ptr;
@@ -1050,7 +1044,7 @@
 }
 
 
-int16_t Simulator::ReadH(int32_t addr, Instr* instr) {
+int16_t Simulator::ReadH(int32_t addr, Instruction* instr) {
 #if V8_TARGET_CAN_READ_UNALIGNED
   int16_t* ptr = reinterpret_cast<int16_t*>(addr);
   return *ptr;
@@ -1066,7 +1060,7 @@
 }
 
 
-void Simulator::WriteH(int32_t addr, uint16_t value, Instr* instr) {
+void Simulator::WriteH(int32_t addr, uint16_t value, Instruction* instr) {
 #if V8_TARGET_CAN_READ_UNALIGNED
   uint16_t* ptr = reinterpret_cast<uint16_t*>(addr);
   *ptr = value;
@@ -1083,7 +1077,7 @@
 }
 
 
-void Simulator::WriteH(int32_t addr, int16_t value, Instr* instr) {
+void Simulator::WriteH(int32_t addr, int16_t value, Instruction* instr) {
 #if V8_TARGET_CAN_READ_UNALIGNED
   int16_t* ptr = reinterpret_cast<int16_t*>(addr);
   *ptr = value;
@@ -1168,7 +1162,7 @@
 
 
 // Unsupported instructions use Format to print an error and stop execution.
-void Simulator::Format(Instr* instr, const char* format) {
+void Simulator::Format(Instruction* instr, const char* format) {
   PrintF("Simulator found unsupported instruction:\n 0x%08x: %s\n",
          reinterpret_cast<intptr_t>(instr), format);
   UNIMPLEMENTED();
@@ -1177,23 +1171,23 @@
 
 // Checks if the current instruction should be executed based on its
 // condition bits.
-bool Simulator::ConditionallyExecute(Instr* instr) {
+bool Simulator::ConditionallyExecute(Instruction* instr) {
   switch (instr->ConditionField()) {
-    case EQ: return z_flag_;
-    case NE: return !z_flag_;
-    case CS: return c_flag_;
-    case CC: return !c_flag_;
-    case MI: return n_flag_;
-    case PL: return !n_flag_;
-    case VS: return v_flag_;
-    case VC: return !v_flag_;
-    case HI: return c_flag_ && !z_flag_;
-    case LS: return !c_flag_ || z_flag_;
-    case GE: return n_flag_ == v_flag_;
-    case LT: return n_flag_ != v_flag_;
-    case GT: return !z_flag_ && (n_flag_ == v_flag_);
-    case LE: return z_flag_ || (n_flag_ != v_flag_);
-    case AL: return true;
+    case eq: return z_flag_;
+    case ne: return !z_flag_;
+    case cs: return c_flag_;
+    case cc: return !c_flag_;
+    case mi: return n_flag_;
+    case pl: return !n_flag_;
+    case vs: return v_flag_;
+    case vc: return !v_flag_;
+    case hi: return c_flag_ && !z_flag_;
+    case ls: return !c_flag_ || z_flag_;
+    case ge: return n_flag_ == v_flag_;
+    case lt: return n_flag_ != v_flag_;
+    case gt: return !z_flag_ && (n_flag_ == v_flag_);
+    case le: return z_flag_ || (n_flag_ != v_flag_);
+    case al: return true;
     default: UNREACHABLE();
   }
   return false;
@@ -1295,10 +1289,10 @@
 
 // Addressing Mode 1 - Data-processing operands:
 // Get the value based on the shifter_operand with register.
-int32_t Simulator::GetShiftRm(Instr* instr, bool* carry_out) {
-  Shift shift = instr->ShiftField();
-  int shift_amount = instr->ShiftAmountField();
-  int32_t result = get_register(instr->RmField());
+int32_t Simulator::GetShiftRm(Instruction* instr, bool* carry_out) {
+  ShiftOp shift = instr->ShiftField();
+  int shift_amount = instr->ShiftAmountValue();
+  int32_t result = get_register(instr->RmValue());
   if (instr->Bit(4) == 0) {
     // by immediate
     if ((shift == ROR) && (shift_amount == 0)) {
@@ -1362,7 +1356,7 @@
     }
   } else {
     // by register
-    int rs = instr->RsField();
+    int rs = instr->RsValue();
     shift_amount = get_register(rs) &0xff;
     switch (shift) {
       case ASR: {
@@ -1439,9 +1433,9 @@
 
 // Addressing Mode 1 - Data-processing operands:
 // Get the value based on the shifter_operand with immediate.
-int32_t Simulator::GetImm(Instr* instr, bool* carry_out) {
-  int rotate = instr->RotateField() * 2;
-  int immed8 = instr->Immed8Field();
+int32_t Simulator::GetImm(Instruction* instr, bool* carry_out) {
+  int rotate = instr->RotateValue() * 2;
+  int immed8 = instr->Immed8Value();
   int imm = (immed8 >> rotate) | (immed8 << (32 - rotate));
   *carry_out = (rotate == 0) ? c_flag_ : (imm < 0);
   return imm;
@@ -1461,36 +1455,32 @@
 
 
 // Addressing Mode 4 - Load and Store Multiple
-void Simulator::HandleRList(Instr* instr, bool load) {
-  int rn = instr->RnField();
+void Simulator::HandleRList(Instruction* instr, bool load) {
+  int rn = instr->RnValue();
   int32_t rn_val = get_register(rn);
-  int rlist = instr->RlistField();
+  int rlist = instr->RlistValue();
   int num_regs = count_bits(rlist);
 
   intptr_t start_address = 0;
   intptr_t end_address = 0;
   switch (instr->PUField()) {
-    case 0: {
-      // Print("da");
+    case da_x: {
       UNIMPLEMENTED();
       break;
     }
-    case 1: {
-      // Print("ia");
+    case ia_x: {
       start_address = rn_val;
       end_address = rn_val + (num_regs * 4) - 4;
       rn_val = rn_val + (num_regs * 4);
       break;
     }
-    case 2: {
-      // Print("db");
+    case db_x: {
       start_address = rn_val - (num_regs * 4);
       end_address = rn_val - 4;
       rn_val = start_address;
       break;
     }
-    case 3: {
-      // Print("ib");
+    case ib_x: {
       start_address = rn_val + 4;
       end_address = rn_val + (num_regs * 4);
       rn_val = end_address;
@@ -1541,10 +1531,10 @@
 
 // Software interrupt instructions are used by the simulator to call into the
 // C-based V8 runtime.
-void Simulator::SoftwareInterrupt(Instr* instr) {
-  int svc = instr->SvcField();
+void Simulator::SoftwareInterrupt(Instruction* instr) {
+  int svc = instr->SvcValue();
   switch (svc) {
-    case call_rt_redirected: {
+    case kCallRtRedirected: {
       // Check if stack is aligned. Error if not aligned is reported below to
       // include information on the function called.
       bool stack_aligned =
@@ -1611,7 +1601,7 @@
       set_pc(get_register(lr));
       break;
     }
-    case break_point: {
+    case kBreakpoint: {
       Debugger dbg(this);
       dbg.Debug();
       break;
@@ -1629,7 +1619,7 @@
           Debugger dbg(this);
           dbg.Stop(instr);
         } else {
-          set_pc(get_pc() + 2 * Instr::kInstrSize);
+          set_pc(get_pc() + 2 * Instruction::kInstrSize);
         }
       } else {
         // This is not a valid svc code.
@@ -1642,8 +1632,8 @@
 
 
 // Stop helper functions.
-bool Simulator::isStopInstruction(Instr* instr) {
-  return (instr->Bits(27, 24) == 0xF) && (instr->SvcField() >= stop);
+bool Simulator::isStopInstruction(Instruction* instr) {
+  return (instr->Bits(27, 24) == 0xF) && (instr->SvcValue() >= kStopCode);
 }
 
 
@@ -1717,17 +1707,17 @@
 
 // Instruction types 0 and 1 are both rolled into one function because they
 // only differ in the handling of the shifter_operand.
-void Simulator::DecodeType01(Instr* instr) {
-  int type = instr->TypeField();
+void Simulator::DecodeType01(Instruction* instr) {
+  int type = instr->TypeValue();
   if ((type == 0) && instr->IsSpecialType0()) {
     // multiply instruction or extra loads and stores
     if (instr->Bits(7, 4) == 9) {
       if (instr->Bit(24) == 0) {
         // Raw field decoding here. Multiply instructions have their Rd in
         // funny places.
-        int rn = instr->RnField();
-        int rm = instr->RmField();
-        int rs = instr->RsField();
+        int rn = instr->RnValue();
+        int rm = instr->RmValue();
+        int rs = instr->RsValue();
         int32_t rs_val = get_register(rs);
         int32_t rm_val = get_register(rm);
         if (instr->Bit(23) == 0) {
@@ -1761,7 +1751,7 @@
           //             at a very detailed level.)
           // Format(instr, "'um'al'cond's 'rd, 'rn, 'rs, 'rm");
           int rd_hi = rn;  // Remap the rn field to the RdHi register.
-          int rd_lo = instr->RdField();
+          int rd_lo = instr->RdValue();
           int32_t hi_res = 0;
           int32_t lo_res = 0;
           if (instr->Bit(22) == 1) {
@@ -1789,15 +1779,15 @@
       }
     } else {
       // extra load/store instructions
-      int rd = instr->RdField();
-      int rn = instr->RnField();
+      int rd = instr->RdValue();
+      int rn = instr->RnValue();
       int32_t rn_val = get_register(rn);
       int32_t addr = 0;
       if (instr->Bit(22) == 0) {
-        int rm = instr->RmField();
+        int rm = instr->RmValue();
         int32_t rm_val = get_register(rm);
         switch (instr->PUField()) {
-          case 0: {
+          case da_x: {
             // Format(instr, "'memop'cond'sign'h 'rd, ['rn], -'rm");
             ASSERT(!instr->HasW());
             addr = rn_val;
@@ -1805,7 +1795,7 @@
             set_register(rn, rn_val);
             break;
           }
-          case 1: {
+          case ia_x: {
             // Format(instr, "'memop'cond'sign'h 'rd, ['rn], +'rm");
             ASSERT(!instr->HasW());
             addr = rn_val;
@@ -1813,7 +1803,7 @@
             set_register(rn, rn_val);
             break;
           }
-          case 2: {
+          case db_x: {
             // Format(instr, "'memop'cond'sign'h 'rd, ['rn, -'rm]'w");
             rn_val -= rm_val;
             addr = rn_val;
@@ -1822,7 +1812,7 @@
             }
             break;
           }
-          case 3: {
+          case ib_x: {
             // Format(instr, "'memop'cond'sign'h 'rd, ['rn, +'rm]'w");
             rn_val += rm_val;
             addr = rn_val;
@@ -1838,9 +1828,9 @@
           }
         }
       } else {
-        int32_t imm_val = (instr->ImmedHField() << 4) | instr->ImmedLField();
+        int32_t imm_val = (instr->ImmedHValue() << 4) | instr->ImmedLValue();
         switch (instr->PUField()) {
-          case 0: {
+          case da_x: {
             // Format(instr, "'memop'cond'sign'h 'rd, ['rn], #-'off8");
             ASSERT(!instr->HasW());
             addr = rn_val;
@@ -1848,7 +1838,7 @@
             set_register(rn, rn_val);
             break;
           }
-          case 1: {
+          case ia_x: {
             // Format(instr, "'memop'cond'sign'h 'rd, ['rn], #+'off8");
             ASSERT(!instr->HasW());
             addr = rn_val;
@@ -1856,7 +1846,7 @@
             set_register(rn, rn_val);
             break;
           }
-          case 2: {
+          case db_x: {
             // Format(instr, "'memop'cond'sign'h 'rd, ['rn, #-'off8]'w");
             rn_val -= imm_val;
             addr = rn_val;
@@ -1865,7 +1855,7 @@
             }
             break;
           }
-          case 3: {
+          case ib_x: {
             // Format(instr, "'memop'cond'sign'h 'rd, ['rn, #+'off8]'w");
             rn_val += imm_val;
             addr = rn_val;
@@ -1922,15 +1912,15 @@
     }
   } else if ((type == 0) && instr->IsMiscType0()) {
     if (instr->Bits(22, 21) == 1) {
-      int rm = instr->RmField();
-      switch (instr->Bits(7, 4)) {
+      int rm = instr->RmValue();
+      switch (instr->BitField(7, 4)) {
         case BX:
           set_pc(get_register(rm));
           break;
         case BLX: {
           uint32_t old_pc = get_pc();
           set_pc(get_register(rm));
-          set_register(lr, old_pc + Instr::kInstrSize);
+          set_register(lr, old_pc + Instruction::kInstrSize);
           break;
         }
         case BKPT: {
@@ -1943,9 +1933,9 @@
           UNIMPLEMENTED();
       }
     } else if (instr->Bits(22, 21) == 3) {
-      int rm = instr->RmField();
-      int rd = instr->RdField();
-      switch (instr->Bits(7, 4)) {
+      int rm = instr->RmValue();
+      int rd = instr->RdValue();
+      switch (instr->BitField(7, 4)) {
         case CLZ: {
           uint32_t bits = get_register(rm);
           int leading_zeros = 0;
@@ -1968,15 +1958,15 @@
       UNIMPLEMENTED();
     }
   } else {
-    int rd = instr->RdField();
-    int rn = instr->RnField();
+    int rd = instr->RdValue();
+    int rn = instr->RnValue();
     int32_t rn_val = get_register(rn);
     int32_t shifter_operand = 0;
     bool shifter_carry_out = 0;
     if (type == 0) {
       shifter_operand = GetShiftRm(instr, &shifter_carry_out);
     } else {
-      ASSERT(instr->TypeField() == 1);
+      ASSERT(instr->TypeValue() == 1);
       shifter_operand = GetImm(instr, &shifter_carry_out);
     }
     int32_t alu_out;
@@ -2072,7 +2062,7 @@
           SetCFlag(shifter_carry_out);
         } else {
           // Format(instr, "movw'cond 'rd, 'imm").
-          alu_out = instr->ImmedMovwMovtField();
+          alu_out = instr->ImmedMovwMovtValue();
           set_register(rd, alu_out);
         }
         break;
@@ -2104,7 +2094,7 @@
         } else {
           // Format(instr, "movt'cond 'rd, 'imm").
           alu_out = (get_register(rd) & 0xffff) |
-              (instr->ImmedMovwMovtField() << 16);
+              (instr->ImmedMovwMovtValue() << 16);
           set_register(rd, alu_out);
         }
         break;
@@ -2183,14 +2173,14 @@
 }
 
 
-void Simulator::DecodeType2(Instr* instr) {
-  int rd = instr->RdField();
-  int rn = instr->RnField();
+void Simulator::DecodeType2(Instruction* instr) {
+  int rd = instr->RdValue();
+  int rn = instr->RnValue();
   int32_t rn_val = get_register(rn);
-  int32_t im_val = instr->Offset12Field();
+  int32_t im_val = instr->Offset12Value();
   int32_t addr = 0;
   switch (instr->PUField()) {
-    case 0: {
+    case da_x: {
       // Format(instr, "'memop'cond'b 'rd, ['rn], #-'off12");
       ASSERT(!instr->HasW());
       addr = rn_val;
@@ -2198,7 +2188,7 @@
       set_register(rn, rn_val);
       break;
     }
-    case 1: {
+    case ia_x: {
       // Format(instr, "'memop'cond'b 'rd, ['rn], #+'off12");
       ASSERT(!instr->HasW());
       addr = rn_val;
@@ -2206,7 +2196,7 @@
       set_register(rn, rn_val);
       break;
     }
-    case 2: {
+    case db_x: {
       // Format(instr, "'memop'cond'b 'rd, ['rn, #-'off12]'w");
       rn_val -= im_val;
       addr = rn_val;
@@ -2215,7 +2205,7 @@
       }
       break;
     }
-    case 3: {
+    case ib_x: {
       // Format(instr, "'memop'cond'b 'rd, ['rn, #+'off12]'w");
       rn_val += im_val;
       addr = rn_val;
@@ -2247,21 +2237,21 @@
 }
 
 
-void Simulator::DecodeType3(Instr* instr) {
-  int rd = instr->RdField();
-  int rn = instr->RnField();
+void Simulator::DecodeType3(Instruction* instr) {
+  int rd = instr->RdValue();
+  int rn = instr->RnValue();
   int32_t rn_val = get_register(rn);
   bool shifter_carry_out = 0;
   int32_t shifter_operand = GetShiftRm(instr, &shifter_carry_out);
   int32_t addr = 0;
   switch (instr->PUField()) {
-    case 0: {
+    case da_x: {
       ASSERT(!instr->HasW());
       Format(instr, "'memop'cond'b 'rd, ['rn], -'shift_rm");
       UNIMPLEMENTED();
       break;
     }
-    case 1: {
+    case ia_x: {
       if (instr->HasW()) {
         ASSERT(instr->Bits(5, 4) == 0x1);
 
@@ -2270,7 +2260,7 @@
           int32_t sat_val = (1 << sat_pos) - 1;
           int32_t shift = instr->Bits(11, 7);
           int32_t shift_type = instr->Bit(6);
-          int32_t rm_val = get_register(instr->RmField());
+          int32_t rm_val = get_register(instr->RmValue());
           if (shift_type == 0) {  // LSL
             rm_val <<= shift;
           } else {  // ASR
@@ -2295,7 +2285,7 @@
       }
       break;
     }
-    case 2: {
+    case db_x: {
       // Format(instr, "'memop'cond'b 'rd, ['rn, -'shift_rm]'w");
       addr = rn_val - shifter_operand;
       if (instr->HasW()) {
@@ -2303,7 +2293,7 @@
       }
       break;
     }
-    case 3: {
+    case ib_x: {
       if (instr->HasW() && (instr->Bits(6, 4) == 0x5)) {
         uint32_t widthminus1 = static_cast<uint32_t>(instr->Bits(20, 16));
         uint32_t lsbit = static_cast<uint32_t>(instr->Bits(11, 7));
@@ -2312,16 +2302,16 @@
           if (instr->Bit(22)) {
             // ubfx - unsigned bitfield extract.
             uint32_t rm_val =
-                static_cast<uint32_t>(get_register(instr->RmField()));
+                static_cast<uint32_t>(get_register(instr->RmValue()));
             uint32_t extr_val = rm_val << (31 - msbit);
             extr_val = extr_val >> (31 - widthminus1);
-            set_register(instr->RdField(), extr_val);
+            set_register(instr->RdValue(), extr_val);
           } else {
             // sbfx - signed bitfield extract.
-            int32_t rm_val = get_register(instr->RmField());
+            int32_t rm_val = get_register(instr->RmValue());
             int32_t extr_val = rm_val << (31 - msbit);
             extr_val = extr_val >> (31 - widthminus1);
-            set_register(instr->RdField(), extr_val);
+            set_register(instr->RdValue(), extr_val);
           }
         } else {
           UNREACHABLE();
@@ -2333,18 +2323,18 @@
         if (msbit >= lsbit) {
           // bfc or bfi - bitfield clear/insert.
           uint32_t rd_val =
-              static_cast<uint32_t>(get_register(instr->RdField()));
+              static_cast<uint32_t>(get_register(instr->RdValue()));
           uint32_t bitcount = msbit - lsbit + 1;
           uint32_t mask = (1 << bitcount) - 1;
           rd_val &= ~(mask << lsbit);
-          if (instr->RmField() != 15) {
+          if (instr->RmValue() != 15) {
             // bfi - bitfield insert.
             uint32_t rm_val =
-                static_cast<uint32_t>(get_register(instr->RmField()));
+                static_cast<uint32_t>(get_register(instr->RmValue()));
             rm_val &= mask;
             rd_val |= rm_val << lsbit;
           }
-          set_register(instr->RdField(), rd_val);
+          set_register(instr->RdValue(), rd_val);
         } else {
           UNREACHABLE();
         }
@@ -2381,7 +2371,7 @@
 }
 
 
-void Simulator::DecodeType4(Instr* instr) {
+void Simulator::DecodeType4(Instruction* instr) {
   ASSERT(instr->Bit(22) == 0);  // only allowed to be set in privileged mode
   if (instr->HasL()) {
     // Format(instr, "ldm'cond'pu 'rn'w, 'rlist");
@@ -2393,24 +2383,24 @@
 }
 
 
-void Simulator::DecodeType5(Instr* instr) {
+void Simulator::DecodeType5(Instruction* instr) {
   // Format(instr, "b'l'cond 'target");
-  int off = (instr->SImmed24Field() << 2);
+  int off = (instr->SImmed24Value() << 2);
   intptr_t pc_address = get_pc();
   if (instr->HasLink()) {
-    set_register(lr, pc_address + Instr::kInstrSize);
+    set_register(lr, pc_address + Instruction::kInstrSize);
   }
   int pc_reg = get_register(pc);
   set_pc(pc_reg + off);
 }
 
 
-void Simulator::DecodeType6(Instr* instr) {
+void Simulator::DecodeType6(Instruction* instr) {
   DecodeType6CoprocessorIns(instr);
 }
 
 
-void Simulator::DecodeType7(Instr* instr) {
+void Simulator::DecodeType7(Instruction* instr) {
   if (instr->Bit(24) == 1) {
     SoftwareInterrupt(instr);
   } else {
@@ -2419,7 +2409,7 @@
 }
 
 
-// void Simulator::DecodeTypeVFP(Instr* instr)
+// void Simulator::DecodeTypeVFP(Instruction* instr)
 // The Following ARMv7 VFPv instructions are currently supported.
 // vmov :Sn = Rt
 // vmov :Rt = Sn
@@ -2432,47 +2422,47 @@
 // vcmp(Dd, Dm)
 // vmrs
 // Dd = vsqrt(Dm)
-void Simulator::DecodeTypeVFP(Instr* instr) {
-  ASSERT((instr->TypeField() == 7) && (instr->Bit(24) == 0x0) );
+void Simulator::DecodeTypeVFP(Instruction* instr) {
+  ASSERT((instr->TypeValue() == 7) && (instr->Bit(24) == 0x0) );
   ASSERT(instr->Bits(11, 9) == 0x5);
 
   // Obtain double precision register codes.
-  int vm = instr->VFPMRegCode(kDoublePrecision);
-  int vd = instr->VFPDRegCode(kDoublePrecision);
-  int vn = instr->VFPNRegCode(kDoublePrecision);
+  int vm = instr->VFPMRegValue(kDoublePrecision);
+  int vd = instr->VFPDRegValue(kDoublePrecision);
+  int vn = instr->VFPNRegValue(kDoublePrecision);
 
   if (instr->Bit(4) == 0) {
-    if (instr->Opc1Field() == 0x7) {
+    if (instr->Opc1Value() == 0x7) {
       // Other data processing instructions
-      if ((instr->Opc2Field() == 0x0) && (instr->Opc3Field() == 0x1)) {
+      if ((instr->Opc2Value() == 0x0) && (instr->Opc3Value() == 0x1)) {
         // vmov register to register.
-        if (instr->SzField() == 0x1) {
-          int m = instr->VFPMRegCode(kDoublePrecision);
-          int d = instr->VFPDRegCode(kDoublePrecision);
+        if (instr->SzValue() == 0x1) {
+          int m = instr->VFPMRegValue(kDoublePrecision);
+          int d = instr->VFPDRegValue(kDoublePrecision);
           set_d_register_from_double(d, get_double_from_d_register(m));
         } else {
-          int m = instr->VFPMRegCode(kSinglePrecision);
-          int d = instr->VFPDRegCode(kSinglePrecision);
+          int m = instr->VFPMRegValue(kSinglePrecision);
+          int d = instr->VFPDRegValue(kSinglePrecision);
           set_s_register_from_float(d, get_float_from_s_register(m));
         }
-      } else if ((instr->Opc2Field() == 0x7) && (instr->Opc3Field() == 0x3)) {
+      } else if ((instr->Opc2Value() == 0x7) && (instr->Opc3Value() == 0x3)) {
         DecodeVCVTBetweenDoubleAndSingle(instr);
-      } else if ((instr->Opc2Field() == 0x8) && (instr->Opc3Field() & 0x1)) {
+      } else if ((instr->Opc2Value() == 0x8) && (instr->Opc3Value() & 0x1)) {
         DecodeVCVTBetweenFloatingPointAndInteger(instr);
-      } else if (((instr->Opc2Field() >> 1) == 0x6) &&
-                 (instr->Opc3Field() & 0x1)) {
+      } else if (((instr->Opc2Value() >> 1) == 0x6) &&
+                 (instr->Opc3Value() & 0x1)) {
         DecodeVCVTBetweenFloatingPointAndInteger(instr);
-      } else if (((instr->Opc2Field() == 0x4) || (instr->Opc2Field() == 0x5)) &&
-                 (instr->Opc3Field() & 0x1)) {
+      } else if (((instr->Opc2Value() == 0x4) || (instr->Opc2Value() == 0x5)) &&
+                 (instr->Opc3Value() & 0x1)) {
         DecodeVCMP(instr);
-      } else if (((instr->Opc2Field() == 0x1)) && (instr->Opc3Field() == 0x3)) {
+      } else if (((instr->Opc2Value() == 0x1)) && (instr->Opc3Value() == 0x3)) {
         // vsqrt
         double dm_value = get_double_from_d_register(vm);
         double dd_value = sqrt(dm_value);
         set_d_register_from_double(vd, dd_value);
-      } else if (instr->Opc3Field() == 0x0) {
+      } else if (instr->Opc3Value() == 0x0) {
         // vmov immediate.
-        if (instr->SzField() == 0x1) {
+        if (instr->SzValue() == 0x1) {
           set_d_register_from_double(vd, instr->DoubleImmedVmov());
         } else {
           UNREACHABLE();  // Not used by v8.
@@ -2480,12 +2470,12 @@
       } else {
         UNREACHABLE();  // Not used by V8.
       }
-    } else if (instr->Opc1Field() == 0x3) {
-      if (instr->SzField() != 0x1) {
+    } else if (instr->Opc1Value() == 0x3) {
+      if (instr->SzValue() != 0x1) {
         UNREACHABLE();  // Not used by V8.
       }
 
-      if (instr->Opc3Field() & 0x1) {
+      if (instr->Opc3Value() & 0x1) {
         // vsub
         double dn_value = get_double_from_d_register(vn);
         double dm_value = get_double_from_d_register(vm);
@@ -2498,9 +2488,9 @@
         double dd_value = dn_value + dm_value;
         set_d_register_from_double(vd, dd_value);
       }
-    } else if ((instr->Opc1Field() == 0x2) && !(instr->Opc3Field() & 0x1)) {
+    } else if ((instr->Opc1Value() == 0x2) && !(instr->Opc3Value() & 0x1)) {
       // vmul
-      if (instr->SzField() != 0x1) {
+      if (instr->SzValue() != 0x1) {
         UNREACHABLE();  // Not used by V8.
       }
 
@@ -2508,9 +2498,9 @@
       double dm_value = get_double_from_d_register(vm);
       double dd_value = dn_value * dm_value;
       set_d_register_from_double(vd, dd_value);
-    } else if ((instr->Opc1Field() == 0x4) && !(instr->Opc3Field() & 0x1)) {
+    } else if ((instr->Opc1Value() == 0x4) && !(instr->Opc3Value() & 0x1)) {
       // vdiv
-      if (instr->SzField() != 0x1) {
+      if (instr->SzValue() != 0x1) {
         UNREACHABLE();  // Not used by V8.
       }
 
@@ -2522,15 +2512,15 @@
       UNIMPLEMENTED();  // Not used by V8.
     }
   } else {
-    if ((instr->VCField() == 0x0) &&
-        (instr->VAField() == 0x0)) {
+    if ((instr->VCValue() == 0x0) &&
+        (instr->VAValue() == 0x0)) {
       DecodeVMOVBetweenCoreAndSinglePrecisionRegisters(instr);
-    } else if ((instr->VLField() == 0x1) &&
-               (instr->VCField() == 0x0) &&
-               (instr->VAField() == 0x7) &&
+    } else if ((instr->VLValue() == 0x1) &&
+               (instr->VCValue() == 0x0) &&
+               (instr->VAValue() == 0x7) &&
                (instr->Bits(19, 16) == 0x1)) {
       // vmrs
-      uint32_t rt = instr->RtField();
+      uint32_t rt = instr->RtValue();
       if (rt == 0xF) {
         Copy_FPSCR_to_APSR();
       } else {
@@ -2547,12 +2537,12 @@
                          (FPSCR_rounding_mode_ << 22);
         set_register(rt, fpscr);
       }
-    } else if ((instr->VLField() == 0x0) &&
-               (instr->VCField() == 0x0) &&
-               (instr->VAField() == 0x7) &&
+    } else if ((instr->VLValue() == 0x0) &&
+               (instr->VCValue() == 0x0) &&
+               (instr->VAValue() == 0x7) &&
                (instr->Bits(19, 16) == 0x1)) {
       // vmsr
-      uint32_t rt = instr->RtField();
+      uint32_t rt = instr->RtValue();
       if (rt == pc) {
         UNREACHABLE();
       } else {
@@ -2576,13 +2566,14 @@
 }
 
 
-void Simulator::DecodeVMOVBetweenCoreAndSinglePrecisionRegisters(Instr* instr) {
-  ASSERT((instr->Bit(4) == 1) && (instr->VCField() == 0x0) &&
-         (instr->VAField() == 0x0));
+void Simulator::DecodeVMOVBetweenCoreAndSinglePrecisionRegisters(
+    Instruction* instr) {
+  ASSERT((instr->Bit(4) == 1) && (instr->VCValue() == 0x0) &&
+         (instr->VAValue() == 0x0));
 
-  int t = instr->RtField();
-  int n = instr->VFPNRegCode(kSinglePrecision);
-  bool to_arm_register = (instr->VLField() == 0x1);
+  int t = instr->RtValue();
+  int n = instr->VFPNRegValue(kSinglePrecision);
+  bool to_arm_register = (instr->VLValue() == 0x1);
 
   if (to_arm_register) {
     int32_t int_value = get_sinteger_from_s_register(n);
@@ -2594,27 +2585,27 @@
 }
 
 
-void Simulator::DecodeVCMP(Instr* instr) {
-  ASSERT((instr->Bit(4) == 0) && (instr->Opc1Field() == 0x7));
-  ASSERT(((instr->Opc2Field() == 0x4) || (instr->Opc2Field() == 0x5)) &&
-         (instr->Opc3Field() & 0x1));
+void Simulator::DecodeVCMP(Instruction* instr) {
+  ASSERT((instr->Bit(4) == 0) && (instr->Opc1Value() == 0x7));
+  ASSERT(((instr->Opc2Value() == 0x4) || (instr->Opc2Value() == 0x5)) &&
+         (instr->Opc3Value() & 0x1));
   // Comparison.
 
   VFPRegPrecision precision = kSinglePrecision;
-  if (instr->SzField() == 1) {
+  if (instr->SzValue() == 1) {
     precision = kDoublePrecision;
   }
 
-  int d = instr->VFPDRegCode(precision);
+  int d = instr->VFPDRegValue(precision);
   int m = 0;
-  if (instr->Opc2Field() == 0x4) {
-    m = instr->VFPMRegCode(precision);
+  if (instr->Opc2Value() == 0x4) {
+    m = instr->VFPMRegValue(precision);
   }
 
   if (precision == kDoublePrecision) {
     double dd_value = get_double_from_d_register(d);
     double dm_value = 0.0;
-    if (instr->Opc2Field() == 0x4) {
+    if (instr->Opc2Value() == 0x4) {
       dm_value = get_double_from_d_register(m);
     }
 
@@ -2632,19 +2623,19 @@
 }
 
 
-void Simulator::DecodeVCVTBetweenDoubleAndSingle(Instr* instr) {
-  ASSERT((instr->Bit(4) == 0) && (instr->Opc1Field() == 0x7));
-  ASSERT((instr->Opc2Field() == 0x7) && (instr->Opc3Field() == 0x3));
+void Simulator::DecodeVCVTBetweenDoubleAndSingle(Instruction* instr) {
+  ASSERT((instr->Bit(4) == 0) && (instr->Opc1Value() == 0x7));
+  ASSERT((instr->Opc2Value() == 0x7) && (instr->Opc3Value() == 0x3));
 
   VFPRegPrecision dst_precision = kDoublePrecision;
   VFPRegPrecision src_precision = kSinglePrecision;
-  if (instr->SzField() == 1) {
+  if (instr->SzValue() == 1) {
     dst_precision = kSinglePrecision;
     src_precision = kDoublePrecision;
   }
 
-  int dst = instr->VFPDRegCode(dst_precision);
-  int src = instr->VFPMRegCode(src_precision);
+  int dst = instr->VFPDRegValue(dst_precision);
+  int src = instr->VFPMRegValue(src_precision);
 
   if (dst_precision == kSinglePrecision) {
     double val = get_double_from_d_register(src);
@@ -2656,16 +2647,16 @@
 }
 
 
-void Simulator::DecodeVCVTBetweenFloatingPointAndInteger(Instr* instr) {
-  ASSERT((instr->Bit(4) == 0) && (instr->Opc1Field() == 0x7));
-  ASSERT(((instr->Opc2Field() == 0x8) && (instr->Opc3Field() & 0x1)) ||
-         (((instr->Opc2Field() >> 1) == 0x6) && (instr->Opc3Field() & 0x1)));
+void Simulator::DecodeVCVTBetweenFloatingPointAndInteger(Instruction* instr) {
+  ASSERT((instr->Bit(4) == 0) && (instr->Opc1Value() == 0x7));
+  ASSERT(((instr->Opc2Value() == 0x8) && (instr->Opc3Value() & 0x1)) ||
+         (((instr->Opc2Value() >> 1) == 0x6) && (instr->Opc3Value() & 0x1)));
 
   // Conversion between floating-point and integer.
   bool to_integer = (instr->Bit(18) == 1);
 
   VFPRegPrecision src_precision = kSinglePrecision;
-  if (instr->SzField() == 1) {
+  if (instr->SzValue() == 1) {
     src_precision = kDoublePrecision;
   }
 
@@ -2682,8 +2673,8 @@
       mode = RZ;
     }
 
-    int dst = instr->VFPDRegCode(kSinglePrecision);
-    int src = instr->VFPMRegCode(src_precision);
+    int dst = instr->VFPDRegValue(kSinglePrecision);
+    int src = instr->VFPMRegValue(src_precision);
     int32_t kMaxInt = v8::internal::kMaxInt;
     int32_t kMinInt = v8::internal::kMinInt;
     switch (mode) {
@@ -2739,8 +2730,8 @@
   } else {
     bool unsigned_integer = (instr->Bit(7) == 0);
 
-    int dst = instr->VFPDRegCode(src_precision);
-    int src = instr->VFPMRegCode(kSinglePrecision);
+    int dst = instr->VFPDRegValue(src_precision);
+    int src = instr->VFPMRegValue(kSinglePrecision);
 
     int val = get_sinteger_from_s_register(src);
 
@@ -2763,24 +2754,24 @@
 }
 
 
-// void Simulator::DecodeType6CoprocessorIns(Instr* instr)
+// void Simulator::DecodeType6CoprocessorIns(Instruction* instr)
 // Decode Type 6 coprocessor instructions.
 // Dm = vmov(Rt, Rt2)
 // <Rt, Rt2> = vmov(Dm)
 // Ddst = MEM(Rbase + 4*offset).
 // MEM(Rbase + 4*offset) = Dsrc.
-void Simulator::DecodeType6CoprocessorIns(Instr* instr) {
-  ASSERT((instr->TypeField() == 6));
+void Simulator::DecodeType6CoprocessorIns(Instruction* instr) {
+  ASSERT((instr->TypeValue() == 6));
 
-  if (instr->CoprocessorField() == 0xA) {
-    switch (instr->OpcodeField()) {
+  if (instr->CoprocessorValue() == 0xA) {
+    switch (instr->OpcodeValue()) {
       case 0x8:
       case 0xA:
       case 0xC:
       case 0xE: {  // Load and store single precision float to memory.
-        int rn = instr->RnField();
-        int vd = instr->VFPDRegCode(kSinglePrecision);
-        int offset = instr->Immed8Field();
+        int rn = instr->RnValue();
+        int vd = instr->VFPDRegValue(kSinglePrecision);
+        int offset = instr->Immed8Value();
         if (!instr->HasU()) {
           offset = -offset;
         }
@@ -2799,16 +2790,16 @@
         UNIMPLEMENTED();  // Not used by V8.
         break;
     }
-  } else if (instr->CoprocessorField() == 0xB) {
-    switch (instr->OpcodeField()) {
+  } else if (instr->CoprocessorValue() == 0xB) {
+    switch (instr->OpcodeValue()) {
       case 0x2:
         // Load and store double to two GP registers
         if (instr->Bits(7, 4) != 0x1) {
           UNIMPLEMENTED();  // Not used by V8.
         } else {
-          int rt = instr->RtField();
-          int rn = instr->RnField();
-          int vm = instr->VmField();
+          int rt = instr->RtValue();
+          int rn = instr->RnValue();
+          int vm = instr->VmValue();
           if (instr->HasL()) {
             int32_t rt_int_value = get_sinteger_from_s_register(2*vm);
             int32_t rn_int_value = get_sinteger_from_s_register(2*vm+1);
@@ -2826,9 +2817,9 @@
         break;
       case 0x8:
       case 0xC: {  // Load and store double to memory.
-        int rn = instr->RnField();
-        int vd = instr->VdField();
-        int offset = instr->Immed8Field();
+        int rn = instr->RnValue();
+        int vd = instr->VdValue();
+        int offset = instr->Immed8Value();
         if (!instr->HasU()) {
           offset = -offset;
         }
@@ -2855,7 +2846,7 @@
 
 
 // Executes the current instruction.
-void Simulator::InstructionDecode(Instr* instr) {
+void Simulator::InstructionDecode(Instruction* instr) {
   if (v8::internal::FLAG_check_icache) {
     CheckICache(instr);
   }
@@ -2869,10 +2860,10 @@
                            reinterpret_cast<byte*>(instr));
     PrintF("  0x%08x  %s\n", reinterpret_cast<intptr_t>(instr), buffer.start());
   }
-  if (instr->ConditionField() == special_condition) {
+  if (instr->ConditionField() == kSpecialCondition) {
     UNIMPLEMENTED();
   } else if (ConditionallyExecute(instr)) {
-    switch (instr->TypeField()) {
+    switch (instr->TypeValue()) {
       case 0:
       case 1: {
         DecodeType01(instr);
@@ -2910,10 +2901,11 @@
   // If the instruction is a non taken conditional stop, we need to skip the
   // inlined message address.
   } else if (instr->IsStop()) {
-    set_pc(get_pc() + 2 * Instr::kInstrSize);
+    set_pc(get_pc() + 2 * Instruction::kInstrSize);
   }
   if (!pc_modified_) {
-    set_register(pc, reinterpret_cast<int32_t>(instr) + Instr::kInstrSize);
+    set_register(pc, reinterpret_cast<int32_t>(instr)
+                         + Instruction::kInstrSize);
   }
 }
 
@@ -2927,7 +2919,7 @@
     // Fast version of the dispatch loop without checking whether the simulator
     // should be stopping at a particular executed instruction.
     while (program_counter != end_sim_pc) {
-      Instr* instr = reinterpret_cast<Instr*>(program_counter);
+      Instruction* instr = reinterpret_cast<Instruction*>(program_counter);
       icount_++;
       InstructionDecode(instr);
       program_counter = get_pc();
@@ -2936,7 +2928,7 @@
     // FLAG_stop_sim_at is at the non-default value. Stop in the debugger when
     // we reach the particular instuction count.
     while (program_counter != end_sim_pc) {
-      Instr* instr = reinterpret_cast<Instr*>(program_counter);
+      Instruction* instr = reinterpret_cast<Instruction*>(program_counter);
       icount_++;
       if (icount_ == ::v8::internal::FLAG_stop_sim_at) {
         Debugger dbg(this);
@@ -3057,7 +3049,7 @@
   return address;
 }
 
-} }  // namespace assembler::arm
+} }  // namespace v8::internal
 
 #endif  // USE_SIMULATOR
 
diff --git a/src/arm/simulator-arm.h b/src/arm/simulator-arm.h
index 7bfe76a..be44766 100644
--- a/src/arm/simulator-arm.h
+++ b/src/arm/simulator-arm.h
@@ -80,8 +80,8 @@
 #include "constants-arm.h"
 #include "hashmap.h"
 
-namespace assembler {
-namespace arm {
+namespace v8 {
+namespace internal {
 
 class CachePage {
  public:
@@ -203,11 +203,11 @@
   };
 
   // Unsupported instructions use Format to print an error and stop execution.
-  void Format(Instr* instr, const char* format);
+  void Format(Instruction* instr, const char* format);
 
   // Checks if the current instruction should be executed based on its
   // condition bits.
-  bool ConditionallyExecute(Instr* instr);
+  bool ConditionallyExecute(Instruction* instr);
 
   // Helper functions to set the conditional flags in the architecture state.
   void SetNZFlags(int32_t val);
@@ -225,13 +225,13 @@
   void Copy_FPSCR_to_APSR();
 
   // Helper functions to decode common "addressing" modes
-  int32_t GetShiftRm(Instr* instr, bool* carry_out);
-  int32_t GetImm(Instr* instr, bool* carry_out);
-  void HandleRList(Instr* instr, bool load);
-  void SoftwareInterrupt(Instr* instr);
+  int32_t GetShiftRm(Instruction* instr, bool* carry_out);
+  int32_t GetImm(Instruction* instr, bool* carry_out);
+  void HandleRList(Instruction* instr, bool load);
+  void SoftwareInterrupt(Instruction* instr);
 
   // Stop helper functions.
-  inline bool isStopInstruction(Instr* instr);
+  inline bool isStopInstruction(Instruction* instr);
   inline bool isWatchedStop(uint32_t bkpt_code);
   inline bool isEnabledStop(uint32_t bkpt_code);
   inline void EnableStop(uint32_t bkpt_code);
@@ -245,41 +245,42 @@
   inline void WriteB(int32_t addr, uint8_t value);
   inline void WriteB(int32_t addr, int8_t value);
 
-  inline uint16_t ReadHU(int32_t addr, Instr* instr);
-  inline int16_t ReadH(int32_t addr, Instr* instr);
+  inline uint16_t ReadHU(int32_t addr, Instruction* instr);
+  inline int16_t ReadH(int32_t addr, Instruction* instr);
   // Note: Overloaded on the sign of the value.
-  inline void WriteH(int32_t addr, uint16_t value, Instr* instr);
-  inline void WriteH(int32_t addr, int16_t value, Instr* instr);
+  inline void WriteH(int32_t addr, uint16_t value, Instruction* instr);
+  inline void WriteH(int32_t addr, int16_t value, Instruction* instr);
 
-  inline int ReadW(int32_t addr, Instr* instr);
-  inline void WriteW(int32_t addr, int value, Instr* instr);
+  inline int ReadW(int32_t addr, Instruction* instr);
+  inline void WriteW(int32_t addr, int value, Instruction* instr);
 
   int32_t* ReadDW(int32_t addr);
   void WriteDW(int32_t addr, int32_t value1, int32_t value2);
 
   // Executing is handled based on the instruction type.
-  void DecodeType01(Instr* instr);  // both type 0 and type 1 rolled into one
-  void DecodeType2(Instr* instr);
-  void DecodeType3(Instr* instr);
-  void DecodeType4(Instr* instr);
-  void DecodeType5(Instr* instr);
-  void DecodeType6(Instr* instr);
-  void DecodeType7(Instr* instr);
+  // Both type 0 and type 1 rolled into one.
+  void DecodeType01(Instruction* instr);
+  void DecodeType2(Instruction* instr);
+  void DecodeType3(Instruction* instr);
+  void DecodeType4(Instruction* instr);
+  void DecodeType5(Instruction* instr);
+  void DecodeType6(Instruction* instr);
+  void DecodeType7(Instruction* instr);
 
   // Support for VFP.
-  void DecodeTypeVFP(Instr* instr);
-  void DecodeType6CoprocessorIns(Instr* instr);
+  void DecodeTypeVFP(Instruction* instr);
+  void DecodeType6CoprocessorIns(Instruction* instr);
 
-  void DecodeVMOVBetweenCoreAndSinglePrecisionRegisters(Instr* instr);
-  void DecodeVCMP(Instr* instr);
-  void DecodeVCVTBetweenDoubleAndSingle(Instr* instr);
-  void DecodeVCVTBetweenFloatingPointAndInteger(Instr* instr);
+  void DecodeVMOVBetweenCoreAndSinglePrecisionRegisters(Instruction* instr);
+  void DecodeVCMP(Instruction* instr);
+  void DecodeVCVTBetweenDoubleAndSingle(Instruction* instr);
+  void DecodeVCVTBetweenFloatingPointAndInteger(Instruction* instr);
 
   // Executes one instruction.
-  void InstructionDecode(Instr* instr);
+  void InstructionDecode(Instruction* instr);
 
   // ICache.
-  static void CheckICache(Instr* instr);
+  static void CheckICache(Instruction* instr);
   static void FlushOnePage(intptr_t start, int size);
   static CachePage* GetCachePage(void* page);
 
@@ -330,8 +331,8 @@
   static v8::internal::HashMap* i_cache_;
 
   // Registered breakpoints.
-  Instr* break_pc_;
-  instr_t break_instr_;
+  Instruction* break_pc_;
+  Instr break_instr_;
 
   // A stop is watched if its code is less than kNumOfWatchedStops.
   // Only watched stops support enabling/disabling and the counter feature.
@@ -344,27 +345,22 @@
   // instruction, if bit 31 of watched_stops[code].count is unset.
   // The value watched_stops[code].count & ~(1 << 31) indicates how many times
   // the breakpoint was hit or gone through.
-  struct StopCoundAndDesc {
+  struct StopCountAndDesc {
     uint32_t count;
     char* desc;
   };
-  StopCoundAndDesc watched_stops[kNumOfWatchedStops];
+  StopCountAndDesc watched_stops[kNumOfWatchedStops];
 };
 
-} }  // namespace assembler::arm
-
-
-namespace v8 {
-namespace internal {
 
 // When running with the simulator transition into simulated execution at this
 // point.
 #define CALL_GENERATED_CODE(entry, p0, p1, p2, p3, p4) \
-  reinterpret_cast<Object*>(assembler::arm::Simulator::current()->Call( \
+  reinterpret_cast<Object*>(Simulator::current()->Call( \
       FUNCTION_ADDR(entry), 5, p0, p1, p2, p3, p4))
 
 #define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6) \
-  assembler::arm::Simulator::current()->Call( \
+  Simulator::current()->Call( \
       FUNCTION_ADDR(entry), 7, p0, p1, p2, p3, p4, p5, p6)
 
 #define TRY_CATCH_FROM_ADDRESS(try_catch_address) \
@@ -380,16 +376,16 @@
 class SimulatorStack : public v8::internal::AllStatic {
  public:
   static inline uintptr_t JsLimitFromCLimit(uintptr_t c_limit) {
-    return assembler::arm::Simulator::current()->StackLimit();
+    return Simulator::current()->StackLimit();
   }
 
   static inline uintptr_t RegisterCTryCatch(uintptr_t try_catch_address) {
-    assembler::arm::Simulator* sim = assembler::arm::Simulator::current();
+    Simulator* sim = Simulator::current();
     return sim->PushAddress(try_catch_address);
   }
 
   static inline void UnregisterCTryCatch() {
-    assembler::arm::Simulator::current()->PopAddress();
+    Simulator::current()->PopAddress();
   }
 };
 
diff --git a/src/arm/stub-cache-arm.cc b/src/arm/stub-cache-arm.cc
index ce1d854..1e99e60 100644
--- a/src/arm/stub-cache-arm.cc
+++ b/src/arm/stub-cache-arm.cc
@@ -370,27 +370,31 @@
                                             Register receiver,
                                             Register scratch1,
                                             Register scratch2,
-                                            Label* miss) {
+                                            Label* miss,
+                                            bool support_wrappers) {
   Label check_wrapper;
 
   // Check if the object is a string leaving the instance type in the
   // scratch1 register.
-  GenerateStringCheck(masm, receiver, scratch1, scratch2, miss, &check_wrapper);
+  GenerateStringCheck(masm, receiver, scratch1, scratch2, miss,
+                      support_wrappers ? &check_wrapper : miss);
 
   // Load length directly from the string.
   __ ldr(r0, FieldMemOperand(receiver, String::kLengthOffset));
   __ Ret();
 
-  // Check if the object is a JSValue wrapper.
-  __ bind(&check_wrapper);
-  __ cmp(scratch1, Operand(JS_VALUE_TYPE));
-  __ b(ne, miss);
+  if (support_wrappers) {
+    // Check if the object is a JSValue wrapper.
+    __ bind(&check_wrapper);
+    __ cmp(scratch1, Operand(JS_VALUE_TYPE));
+    __ b(ne, miss);
 
-  // Unwrap the value and check if the wrapped value is a string.
-  __ ldr(scratch1, FieldMemOperand(receiver, JSValue::kValueOffset));
-  GenerateStringCheck(masm, scratch1, scratch2, scratch2, miss, miss);
-  __ ldr(r0, FieldMemOperand(scratch1, String::kLengthOffset));
-  __ Ret();
+    // Unwrap the value and check if the wrapped value is a string.
+    __ ldr(scratch1, FieldMemOperand(receiver, JSValue::kValueOffset));
+    GenerateStringCheck(masm, scratch1, scratch2, scratch2, miss, miss);
+    __ ldr(r0, FieldMemOperand(scratch1, String::kLengthOffset));
+    __ Ret();
+  }
 }
 
 
@@ -521,7 +525,7 @@
   // -----------------------------------
 
   // Check that the function really is a function.
-  __ BranchOnSmi(r1, miss);
+  __ JumpIfSmi(r1, miss);
   __ CompareObjectType(r1, r3, r3, JS_FUNCTION_TYPE);
   __ b(ne, miss);
 
@@ -660,7 +664,7 @@
     ASSERT(!holder->GetNamedInterceptor()->getter()->IsUndefined());
 
     // Check that the receiver isn't a smi.
-    __ BranchOnSmi(receiver, miss);
+    __ JumpIfSmi(receiver, miss);
 
     CallOptimization optimization(lookup);
 
@@ -1194,17 +1198,16 @@
 }
 
 
-bool StubCompiler::GenerateLoadCallback(JSObject* object,
-                                        JSObject* holder,
-                                        Register receiver,
-                                        Register name_reg,
-                                        Register scratch1,
-                                        Register scratch2,
-                                        Register scratch3,
-                                        AccessorInfo* callback,
-                                        String* name,
-                                        Label* miss,
-                                        Failure** failure) {
+MaybeObject* StubCompiler::GenerateLoadCallback(JSObject* object,
+                                                JSObject* holder,
+                                                Register receiver,
+                                                Register name_reg,
+                                                Register scratch1,
+                                                Register scratch2,
+                                                Register scratch3,
+                                                AccessorInfo* callback,
+                                                String* name,
+                                                Label* miss) {
   // Check that the receiver isn't a smi.
   __ tst(receiver, Operand(kSmiTagMask));
   __ b(eq, miss);
@@ -1225,7 +1228,7 @@
       ExternalReference(IC_Utility(IC::kLoadCallbackProperty));
   __ TailCallExternalReference(load_callback_property, 5, 1);
 
-  return true;
+  return Heap::undefined_value();  // Success.
 }
 
 
@@ -1243,7 +1246,7 @@
   ASSERT(!interceptor_holder->GetNamedInterceptor()->getter()->IsUndefined());
 
   // Check that the receiver isn't a smi.
-  __ BranchOnSmi(receiver, miss);
+  __ JumpIfSmi(receiver, miss);
 
   // So far the most popular follow ups for interceptor loads are FIELD
   // and CALLBACKS, so inline only them, other cases may be added
@@ -1511,7 +1514,7 @@
   __ ldr(receiver, MemOperand(sp, argc * kPointerSize));
 
   // Check that the receiver isn't a smi.
-  __ BranchOnSmi(receiver, &miss);
+  __ JumpIfSmi(receiver, &miss);
 
   // Check that the maps haven't changed.
   CheckPrototypes(JSObject::cast(object), receiver,
@@ -1565,7 +1568,7 @@
       __ str(r4, MemOperand(end_elements, kEndElementsOffset, PreIndex));
 
       // Check for a smi.
-      __ BranchOnNotSmi(r4, &with_write_barrier);
+      __ JumpIfNotSmi(r4, &with_write_barrier);
       __ bind(&exit);
       __ Drop(argc + 1);
       __ Ret();
@@ -1672,7 +1675,7 @@
   __ ldr(receiver, MemOperand(sp, argc * kPointerSize));
 
   // Check that the receiver isn't a smi.
-  __ BranchOnSmi(receiver, &miss);
+  __ JumpIfSmi(receiver, &miss);
 
   // Check that the maps haven't changed.
   CheckPrototypes(JSObject::cast(object),
@@ -2009,7 +2012,7 @@
     __ ldr(r1, MemOperand(sp, 1 * kPointerSize));
 
     STATIC_ASSERT(kSmiTag == 0);
-    __ BranchOnSmi(r1, &miss);
+    __ JumpIfSmi(r1, &miss);
 
     CheckPrototypes(JSObject::cast(object), r1, holder, r0, r3, r4, name,
                     &miss);
@@ -2168,7 +2171,7 @@
   // Check if the argument is a smi.
   Label not_smi;
   STATIC_ASSERT(kSmiTag == 0);
-  __ BranchOnNotSmi(r0, &not_smi);
+  __ JumpIfNotSmi(r0, &not_smi);
 
   // Do bitwise not or do nothing depending on the sign of the
   // argument.
@@ -2646,9 +2649,18 @@
   __ cmp(r3, Operand(Handle<Map>(object->map())));
   __ b(ne, &miss);
 
+  // Check that the value in the cell is not the hole. If it is, this
+  // cell could have been deleted and reintroducing the global needs
+  // to update the property details in the property dictionary of the
+  // global object. We bail out to the runtime system to do that.
+  __ mov(r4, Operand(Handle<JSGlobalPropertyCell>(cell)));
+  __ LoadRoot(r5, Heap::kTheHoleValueRootIndex);
+  __ ldr(r6, FieldMemOperand(r4, JSGlobalPropertyCell::kValueOffset));
+  __ cmp(r5, r6);
+  __ b(eq, &miss);
+
   // Store the value in the cell.
-  __ mov(r2, Operand(Handle<JSGlobalPropertyCell>(cell)));
-  __ str(r0, FieldMemOperand(r2, JSGlobalPropertyCell::kValueOffset));
+  __ str(r0, FieldMemOperand(r4, JSGlobalPropertyCell::kValueOffset));
 
   __ IncrementCounter(&Counters::named_store_global_inline, 1, r4, r3);
   __ Ret();
@@ -2738,12 +2750,11 @@
   // -----------------------------------
   Label miss;
 
-  Failure* failure = Failure::InternalError();
-  bool success = GenerateLoadCallback(object, holder, r0, r2, r3, r1, r4,
-                                      callback, name, &miss, &failure);
-  if (!success) {
+  MaybeObject* result = GenerateLoadCallback(object, holder, r0, r2, r3, r1, r4,
+                                             callback, name, &miss);
+  if (result->IsFailure()) {
     miss.Unuse();
-    return failure;
+    return result;
   }
 
   __ bind(&miss);
@@ -2890,12 +2901,11 @@
   __ cmp(r0, Operand(Handle<String>(name)));
   __ b(ne, &miss);
 
-  Failure* failure = Failure::InternalError();
-  bool success = GenerateLoadCallback(receiver, holder, r1, r0, r2, r3, r4,
-                                      callback, name, &miss, &failure);
-  if (!success) {
+  MaybeObject* result = GenerateLoadCallback(receiver, holder, r1, r0, r2, r3,
+                                             r4, callback, name, &miss);
+  if (result->IsFailure()) {
     miss.Unuse();
-    return failure;
+    return result;
   }
 
   __ bind(&miss);
@@ -2995,7 +3005,7 @@
   __ cmp(r0, Operand(Handle<String>(name)));
   __ b(ne, &miss);
 
-  GenerateLoadStringLength(masm(), r1, r2, r3, &miss);
+  GenerateLoadStringLength(masm(), r1, r2, r3, &miss, true);
   __ bind(&miss);
   __ DecrementCounter(&Counters::keyed_load_string_length, 1, r2, r3);
 
@@ -3361,10 +3371,10 @@
   Register receiver = r1;
 
   // Check that the object isn't a smi
-  __ BranchOnSmi(receiver, &slow);
+  __ JumpIfSmi(receiver, &slow);
 
   // Check that the key is a smi.
-  __ BranchOnNotSmi(key, &slow);
+  __ JumpIfNotSmi(key, &slow);
 
   // Check that the object is a JS object. Load map into r2.
   __ CompareObjectType(receiver, r2, r3, FIRST_JS_OBJECT_TYPE);
@@ -3645,7 +3655,7 @@
   // r3 mostly holds the elements array or the destination external array.
 
   // Check that the object isn't a smi.
-  __ BranchOnSmi(receiver, &slow);
+  __ JumpIfSmi(receiver, &slow);
 
   // Check that the object is a JS object. Load map into r3.
   __ CompareObjectType(receiver, r3, r4, FIRST_JS_OBJECT_TYPE);
@@ -3658,7 +3668,7 @@
   __ b(ne, &slow);
 
   // Check that the key is a smi.
-  __ BranchOnNotSmi(key, &slow);
+  __ JumpIfNotSmi(key, &slow);
 
   // Check that the elements array is the appropriate type of ExternalArray.
   __ ldr(r3, FieldMemOperand(receiver, JSObject::kElementsOffset));
@@ -3678,7 +3688,7 @@
   // runtime for all other kinds of values.
   // r3: external array.
   // r4: key (integer).
-  __ BranchOnNotSmi(value, &check_heap_number);
+  __ JumpIfNotSmi(value, &check_heap_number);
   __ mov(r5, Operand(value, ASR, kSmiTagSize));  // Untag the value.
   __ ldr(r3, FieldMemOperand(r3, ExternalArray::kExternalPointerOffset));
 
diff --git a/src/assembler.h b/src/assembler.h
index 4ef61e4..a29aa06 100644
--- a/src/assembler.h
+++ b/src/assembler.h
@@ -185,7 +185,6 @@
     DEBUG_BREAK,  // Code target for the debugger statement.
     CODE_TARGET,  // Code target which is not any of the above.
     EMBEDDED_OBJECT,
-
     GLOBAL_PROPERTY_CELL,
 
     // Everything after runtime_entry (inclusive) is not GC'ed.
@@ -203,7 +202,7 @@
     NUMBER_OF_MODES,  // must be no greater than 14 - see RelocInfoWriter
     NONE,  // never recorded
     LAST_CODE_ENUM = CODE_TARGET,
-    LAST_GCED_ENUM = EMBEDDED_OBJECT
+    LAST_GCED_ENUM = GLOBAL_PROPERTY_CELL
   };
 
 
diff --git a/src/ast.cc b/src/ast.cc
index e8b3e03..80927a8 100644
--- a/src/ast.cc
+++ b/src/ast.cc
@@ -521,6 +521,8 @@
   if (key()->IsPropertyName()) {
     if (oracle->LoadIsBuiltin(this, Builtins::LoadIC_ArrayLength)) {
       is_array_length_ = true;
+    } else if (oracle->LoadIsBuiltin(this, Builtins::LoadIC_StringLength)) {
+      is_string_length_ = true;
     } else if (oracle->LoadIsBuiltin(this,
                                      Builtins::LoadIC_FunctionPrototype)) {
       is_function_prototype_ = true;
@@ -663,19 +665,11 @@
 }
 
 
-void BinaryOperation::RecordTypeFeedback(TypeFeedbackOracle* oracle) {
-  TypeInfo left = oracle->BinaryType(this, TypeFeedbackOracle::LEFT);
-  TypeInfo right = oracle->BinaryType(this, TypeFeedbackOracle::RIGHT);
-  is_smi_only_ = left.IsSmi() && right.IsSmi();
-}
-
-
 void CompareOperation::RecordTypeFeedback(TypeFeedbackOracle* oracle) {
-  TypeInfo left = oracle->CompareType(this, TypeFeedbackOracle::LEFT);
-  TypeInfo right = oracle->CompareType(this, TypeFeedbackOracle::RIGHT);
-  if (left.IsSmi() && right.IsSmi()) {
+  TypeInfo info = oracle->CompareType(this);
+  if (info.IsSmi()) {
     compare_type_ = SMI_ONLY;
-  } else if (left.IsNonPrimitive() && right.IsNonPrimitive()) {
+  } else if (info.IsNonPrimitive()) {
     compare_type_ = OBJECT_ONLY;
   } else {
     ASSERT(compare_type_ == NONE);
diff --git a/src/ast.h b/src/ast.h
index a897e88..2aee5d7 100644
--- a/src/ast.h
+++ b/src/ast.h
@@ -1205,9 +1205,10 @@
         key_(key),
         pos_(pos),
         type_(type),
-        is_monomorphic_(false),
         receiver_types_(NULL),
+        is_monomorphic_(false),
         is_array_length_(false),
+        is_string_length_(false),
         is_function_prototype_(false),
         is_arguments_access_(false) { }
 
@@ -1221,6 +1222,7 @@
   int position() const { return pos_; }
   bool is_synthetic() const { return type_ == SYNTHETIC; }
 
+  bool IsStringLength() const { return is_string_length_; }
   bool IsFunctionPrototype() const { return is_function_prototype_; }
 
   // Marks that this is actually an argument rewritten to a keyed property
@@ -1249,11 +1251,12 @@
   int pos_;
   Type type_;
 
-  bool is_monomorphic_;
   ZoneMapList* receiver_types_;
-  bool is_array_length_;
-  bool is_function_prototype_;
-  bool is_arguments_access_;
+  bool is_monomorphic_ : 1;
+  bool is_array_length_ : 1;
+  bool is_string_length_ : 1;
+  bool is_function_prototype_ : 1;
+  bool is_arguments_access_ : 1;
   Handle<Map> monomorphic_receiver_type_;
 
   // Dummy property used during preparsing.
@@ -1395,7 +1398,7 @@
                   Expression* left,
                   Expression* right,
                   int pos)
-      : op_(op), left_(left), right_(right), pos_(pos), is_smi_only_(false) {
+      : op_(op), left_(left), right_(right), pos_(pos) {
     ASSERT(Token::IsBinaryOp(op));
     right_id_ = (op == Token::AND || op == Token::OR)
         ? static_cast<int>(GetNextId())
@@ -1416,10 +1419,6 @@
   Expression* right() const { return right_; }
   int position() const { return pos_; }
 
-  // Type feedback information.
-  void RecordTypeFeedback(TypeFeedbackOracle* oracle);
-  bool IsSmiOnly() const { return is_smi_only_; }
-
   // Bailout support.
   int RightId() const { return right_id_; }
 
@@ -1428,7 +1427,6 @@
   Expression* left_;
   Expression* right_;
   int pos_;
-  bool is_smi_only_;
   // The short-circuit logical operations have an AST ID for their
   // right-hand subexpression.
   int right_id_;
diff --git a/src/bootstrapper.cc b/src/bootstrapper.cc
index cae1a9a..16a186f 100644
--- a/src/bootstrapper.cc
+++ b/src/bootstrapper.cc
@@ -1805,9 +1805,8 @@
     AddToWeakGlobalContextList(*global_context_);
     Top::set_context(*global_context_);
     i::Counters::contexts_created_by_snapshot.Increment();
-    result_ = global_context_;
     JSFunction* empty_function =
-        JSFunction::cast(result_->function_map()->prototype());
+        JSFunction::cast(global_context_->function_map()->prototype());
     empty_function_ = Handle<JSFunction>(empty_function);
     Handle<GlobalObject> inner_global;
     Handle<JSGlobalProxy> global_proxy =
diff --git a/src/builtins.cc b/src/builtins.cc
index 7c2c2bc..58dd439 100644
--- a/src/builtins.cc
+++ b/src/builtins.cc
@@ -1228,7 +1228,12 @@
 
 
 static void Generate_LoadIC_StringLength(MacroAssembler* masm) {
-  LoadIC::GenerateStringLength(masm);
+  LoadIC::GenerateStringLength(masm, false);
+}
+
+
+static void Generate_LoadIC_StringWrapperLength(MacroAssembler* masm) {
+  LoadIC::GenerateStringLength(masm, true);
 }
 
 
diff --git a/src/builtins.h b/src/builtins.h
index 39f3546..88d31c7 100644
--- a/src/builtins.h
+++ b/src/builtins.h
@@ -86,6 +86,7 @@
   V(LoadIC_Normal,              LOAD_IC, MONOMORPHIC)                     \
   V(LoadIC_ArrayLength,         LOAD_IC, MONOMORPHIC)                     \
   V(LoadIC_StringLength,        LOAD_IC, MONOMORPHIC)                     \
+  V(LoadIC_StringWrapperLength, LOAD_IC, MONOMORPHIC)                     \
   V(LoadIC_FunctionPrototype,   LOAD_IC, MONOMORPHIC)                     \
   V(LoadIC_Megamorphic,         LOAD_IC, MEGAMORPHIC)                     \
                                                                           \
diff --git a/src/frames.cc b/src/frames.cc
index 7f28ff1..24ea8dd 100644
--- a/src/frames.cc
+++ b/src/frames.cc
@@ -695,7 +695,7 @@
   ASSERT(frames->length() == 0);
   ASSERT(is_optimized());
 
-  int deopt_index = AstNode::kNoNumber;
+  int deopt_index = Safepoint::kNoDeoptimizationIndex;
   DeoptimizationInputData* data = GetDeoptimizationData(&deopt_index);
 
   // BUG(3243555): Since we don't have a lazy-deopt registered at
@@ -793,7 +793,7 @@
 
   SafepointEntry safepoint_entry = code->GetSafepointEntry(pc());
   *deopt_index = safepoint_entry.deoptimization_index();
-  ASSERT(*deopt_index != AstNode::kNoNumber);
+  ASSERT(*deopt_index != Safepoint::kNoDeoptimizationIndex);
 
   return DeoptimizationInputData::cast(code->deoptimization_data());
 }
@@ -803,7 +803,7 @@
   ASSERT(functions->length() == 0);
   ASSERT(is_optimized());
 
-  int deopt_index = AstNode::kNoNumber;
+  int deopt_index = Safepoint::kNoDeoptimizationIndex;
   DeoptimizationInputData* data = GetDeoptimizationData(&deopt_index);
 
   TranslationIterator it(data->TranslationByteArray(),
diff --git a/src/hydrogen-instructions.cc b/src/hydrogen-instructions.cc
index d1a4782..2f8b6e2 100644
--- a/src/hydrogen-instructions.cc
+++ b/src/hydrogen-instructions.cc
@@ -490,7 +490,7 @@
 
 
 #ifdef DEBUG
-void HInstruction::Verify() const {
+void HInstruction::Verify() {
   // Verify that input operands are defined before use.
   HBasicBlock* cur_block = block();
   for (int i = 0; i < OperandCount(); ++i) {
@@ -517,6 +517,11 @@
   if (HasSideEffects() && !IsOsrEntry()) {
     ASSERT(next()->IsSimulate());
   }
+
+  // Verify that instructions that can be eliminated by GVN have overridden
+  // HValue::DataEquals.  The default implementation is UNREACHABLE.  We
+  // don't actually care whether DataEquals returns true or false here.
+  if (CheckFlag(kUseGVN)) DataEquals(this);
 }
 #endif
 
@@ -524,7 +529,7 @@
 HCall::HCall(int count) : arguments_(Zone::NewArray<HValue*>(count), count) {
   for (int i = 0; i < count; ++i) arguments_[i] = NULL;
   set_representation(Representation::Tagged());
-  SetFlagMask(AllSideEffects());
+  SetAllSideEffects();
 }
 
 
@@ -1119,10 +1124,10 @@
 void HCompare::SetInputRepresentation(Representation r) {
   input_representation_ = r;
   if (r.IsTagged()) {
-    SetFlagMask(AllSideEffects());
+    SetAllSideEffects();
     ClearFlag(kUseGVN);
   } else {
-    ClearFlagMask(AllSideEffects());
+    ClearAllSideEffects();
     SetFlag(kUseGVN);
   }
 }
@@ -1388,7 +1393,7 @@
 // Node-specific verification code is only included in debug mode.
 #ifdef DEBUG
 
-void HPhi::Verify() const {
+void HPhi::Verify() {
   ASSERT(OperandCount() == block()->predecessors()->length());
   for (int i = 0; i < OperandCount(); ++i) {
     HValue* value = OperandAt(i);
@@ -1400,49 +1405,49 @@
 }
 
 
-void HSimulate::Verify() const {
+void HSimulate::Verify() {
   HInstruction::Verify();
   ASSERT(HasAstId());
 }
 
 
-void HBoundsCheck::Verify() const {
+void HBoundsCheck::Verify() {
   HInstruction::Verify();
   ASSERT(HasNoUses());
 }
 
 
-void HCheckSmi::Verify() const {
+void HCheckSmi::Verify() {
   HInstruction::Verify();
   ASSERT(HasNoUses());
 }
 
 
-void HCheckNonSmi::Verify() const {
+void HCheckNonSmi::Verify() {
   HInstruction::Verify();
   ASSERT(HasNoUses());
 }
 
 
-void HCheckInstanceType::Verify() const {
+void HCheckInstanceType::Verify() {
   HInstruction::Verify();
   ASSERT(HasNoUses());
 }
 
 
-void HCheckMap::Verify() const {
+void HCheckMap::Verify() {
   HInstruction::Verify();
   ASSERT(HasNoUses());
 }
 
 
-void HCheckFunction::Verify() const {
+void HCheckFunction::Verify() {
   HInstruction::Verify();
   ASSERT(HasNoUses());
 }
 
 
-void HCheckPrototypeMaps::Verify() const {
+void HCheckPrototypeMaps::Verify() {
   HInstruction::Verify();
   ASSERT(HasNoUses());
 }
diff --git a/src/hydrogen-instructions.h b/src/hydrogen-instructions.h
index d57655a..ff81700 100644
--- a/src/hydrogen-instructions.h
+++ b/src/hydrogen-instructions.h
@@ -46,112 +46,6 @@
 class LChunkBuilder;
 
 
-// Type hierarchy:
-//
-// HValue
-//   HInstruction
-//     HAccessArgumentsAt
-//     HApplyArguments
-//     HArgumentsElements
-//     HArgumentsLength
-//     HArgumentsObject
-//     HBinaryOperation
-//       HArithmeticBinaryOperation
-//         HAdd
-//         HDiv
-//         HMod
-//         HMul
-//         HSub
-//       HBitwiseBinaryOperation
-//         HBitAnd
-//         HBitOr
-//         HBitXor
-//         HSar
-//         HShl
-//         HShr
-//       HBoundsCheck
-//       HCompare
-//       HCompareJSObjectEq
-//       HInstanceOf
-//       HInstanceOfKnownGlobal
-//       HLoadKeyed
-//         HLoadKeyedFastElement
-//         HLoadKeyedGeneric
-//       HPower
-//       HStoreNamed
-//         HStoreNamedField
-//         HStoreNamedGeneric
-//       HStringCharCodeAt
-//     HBlockEntry
-//     HCall
-//       HCallConstantFunction
-//       HCallFunction
-//       HCallGlobal
-//       HCallKeyed
-//       HCallKnownGlobal
-//       HCallNamed
-//       HCallNew
-//       HCallRuntime
-//     HCallStub
-//     HCheckPrototypeMaps
-//     HConstant
-//     HControlInstruction
-//       HDeoptimize
-//       HGoto
-//       HUnaryControlInstruction
-//         HCompareMap
-//         HReturn
-//         HTest
-//         HThrow
-//     HEnterInlined
-//     HFunctionLiteral
-//     HGlobalObject
-//     HGlobalReceiver
-//     HLeaveInlined
-//     HLoadContextSlot
-//     HLoadGlobal
-//     HMaterializedLiteral
-//       HArrayLiteral
-//       HObjectLiteral
-//       HRegExpLiteral
-//     HOsrEntry
-//     HParameter
-//     HSimulate
-//     HStackCheck
-//     HStoreKeyed
-//       HStoreKeyedFastElement
-//       HStoreKeyedGeneric
-//     HUnaryOperation
-//       HBitNot
-//       HChange
-//       HCheckFunction
-//       HCheckInstanceType
-//       HCheckMap
-//       HCheckNonSmi
-//       HCheckSmi
-//       HDeleteProperty
-//       HFixedArrayLength
-//       HJSArrayLength
-//       HLoadElements
-//         HTypeofIs
-//       HLoadNamedField
-//       HLoadNamedGeneric
-//       HLoadFunctionPrototype
-//       HPushArgument
-//       HStringLength
-//       HTypeof
-//       HUnaryMathOperation
-//       HUnaryPredicate
-//         HClassOfTest
-//         HHasCachedArrayIndex
-//         HHasInstanceType
-//         HIsNull
-//         HIsObject
-//         HIsSmi
-//       HValueOf
-//     HUnknownOSRValue
-//   HPhi
-
 #define HYDROGEN_ALL_INSTRUCTION_LIST(V)       \
   V(ArithmeticBinaryOperation)                 \
   V(BinaryOperation)                           \
@@ -224,12 +118,12 @@
   V(LeaveInlined)                              \
   V(LoadContextSlot)                           \
   V(LoadElements)                              \
+  V(LoadFunctionPrototype)                     \
   V(LoadGlobal)                                \
   V(LoadKeyedFastElement)                      \
   V(LoadKeyedGeneric)                          \
   V(LoadNamedField)                            \
   V(LoadNamedGeneric)                          \
-  V(LoadFunctionPrototype)                     \
   V(Mod)                                       \
   V(Mul)                                       \
   V(ObjectLiteral)                             \
@@ -268,7 +162,6 @@
   V(GlobalVars)                                \
   V(Maps)                                      \
   V(ArrayLengths)                              \
-  V(FunctionPrototypes)                        \
   V(OsrEntries)
 
 #define DECLARE_INSTRUCTION(type)                   \
@@ -573,11 +466,6 @@
     return flags << kChangesToDependsFlagsLeftShift;
   }
 
-  // A flag mask to mark an instruction as having arbitrary side effects.
-  static int AllSideEffects() {
-    return ChangesFlagsMask() & ~(1 << kChangesOsrEntries);
-  }
-
   static HValue* cast(HValue* value) { return value; }
 
   enum Opcode {
@@ -636,9 +524,6 @@
     return NULL;
   }
 
-  bool HasSideEffects() const {
-    return (flags_ & AllSideEffects()) != 0;
-  }
   bool IsDefinedAfter(HBasicBlock* other) const;
 
   // Operands.
@@ -661,12 +546,13 @@
   void Delete();
 
   int flags() const { return flags_; }
-  void SetFlagMask(int mask) { flags_ |= mask; }
-  void SetFlag(Flag f) { SetFlagMask(1 << f); }
-  void ClearFlagMask(int mask) { flags_ &= ~mask; }
-  void ClearFlag(Flag f) { ClearFlagMask(1 << f); }
-  bool CheckFlag(Flag f) const { return CheckFlagMask(1 << f); }
-  bool CheckFlagMask(int mask) const { return (flags_ & mask) != 0; }
+  void SetFlag(Flag f) { flags_ |= (1 << f); }
+  void ClearFlag(Flag f) { flags_ &= ~(1 << f); }
+  bool CheckFlag(Flag f) const { return (flags_ & (1 << f)) != 0; }
+
+  void SetAllSideEffects() { flags_ |= AllSideEffects(); }
+  void ClearAllSideEffects() { flags_ &= ~AllSideEffects(); }
+  bool HasSideEffects() const { return (flags_ & AllSideEffects()) != 0; }
 
   Range* range() const { return range_; }
   bool HasRange() const { return range_ != NULL; }
@@ -714,11 +600,16 @@
   void InsertInputConversion(HInstruction* previous, int index, HType type);
 
 #ifdef DEBUG
-  virtual void Verify() const = 0;
+  virtual void Verify() = 0;
 #endif
 
  protected:
-  virtual bool DataEquals(HValue* other) const { return true; }
+  // This function must be overridden for instructions with flag kUseGVN, to
+  // compare the non-Operand parts of the instruction.
+  virtual bool DataEquals(HValue* other) const {
+    UNREACHABLE();
+    return false;
+  }
   virtual void RepresentationChanged(Representation to) { }
   virtual Range* InferRange();
   virtual void DeleteFromGraph() = 0;
@@ -735,6 +626,11 @@
   }
 
  private:
+  // A flag mask to mark an instruction as having arbitrary side effects.
+  static int AllSideEffects() {
+    return ChangesFlagsMask() & ~(1 << kChangesOsrEntries);
+  }
+
   void InternalReplaceAtUse(HValue* use, HValue* other);
   void RegisterUse(int index, HValue* new_value);
 
@@ -774,7 +670,7 @@
   virtual LInstruction* CompileToLithium(LChunkBuilder* builder) = 0;
 
 #ifdef DEBUG
-  virtual void Verify() const;
+  virtual void Verify();
 #endif
 
   // Returns whether this is some kind of deoptimizing check
@@ -1063,7 +959,7 @@
   DECLARE_CONCRETE_INSTRUCTION(Simulate, "simulate")
 
 #ifdef DEBUG
-  virtual void Verify() const;
+  virtual void Verify();
 #endif
 
  protected:
@@ -1159,6 +1055,9 @@
   }
 
   DECLARE_CONCRETE_INSTRUCTION(GlobalObject, "global_object")
+
+ protected:
+  virtual bool DataEquals(HValue* other) const { return true; }
 };
 
 
@@ -1171,6 +1070,9 @@
   }
 
   DECLARE_CONCRETE_INSTRUCTION(GlobalReceiver, "global_receiver")
+
+ protected:
+  virtual bool DataEquals(HValue* other) const { return true; }
 };
 
 
@@ -1361,6 +1263,9 @@
   }
 
   DECLARE_CONCRETE_INSTRUCTION(JSArrayLength, "js_array_length")
+
+ protected:
+  virtual bool DataEquals(HValue* other) const { return true; }
 };
 
 
@@ -1377,6 +1282,9 @@
   }
 
   DECLARE_CONCRETE_INSTRUCTION(FixedArrayLength, "fixed_array_length")
+
+ protected:
+  virtual bool DataEquals(HValue* other) const { return true; }
 };
 
 
@@ -1394,6 +1302,9 @@
   virtual HType CalculateInferredType() const;
 
   DECLARE_CONCRETE_INSTRUCTION(BitNot, "bit_not")
+
+ protected:
+  virtual bool DataEquals(HValue* other) const { return true; }
 };
 
 
@@ -1489,6 +1400,9 @@
   }
 
   DECLARE_CONCRETE_INSTRUCTION(LoadElements, "load-elements")
+
+ protected:
+  virtual bool DataEquals(HValue* other) const { return true; }
 };
 
 
@@ -1510,7 +1424,7 @@
   virtual HType CalculateInferredType() const;
 
 #ifdef DEBUG
-  virtual void Verify() const;
+  virtual void Verify();
 #endif
 
   Handle<Map> map() const { return map_; }
@@ -1545,7 +1459,7 @@
   virtual HType CalculateInferredType() const;
 
 #ifdef DEBUG
-  virtual void Verify() const;
+  virtual void Verify();
 #endif
 
   Handle<JSFunction> target() const { return target_; }
@@ -1587,7 +1501,7 @@
   }
 
 #ifdef DEBUG
-  virtual void Verify() const;
+  virtual void Verify();
 #endif
 
   static HCheckInstanceType* NewIsJSObjectOrJSFunction(HValue* value);
@@ -1628,10 +1542,13 @@
   virtual HType CalculateInferredType() const;
 
 #ifdef DEBUG
-  virtual void Verify() const;
+  virtual void Verify();
 #endif
 
   DECLARE_CONCRETE_INSTRUCTION(CheckNonSmi, "check_non_smi")
+
+ protected:
+  virtual bool DataEquals(HValue* other) const { return true; }
 };
 
 
@@ -1646,7 +1563,7 @@
   virtual bool IsCheckInstruction() const { return true; }
 
 #ifdef DEBUG
-  virtual void Verify() const;
+  virtual void Verify();
 #endif
 
   Handle<JSObject> prototype() const { return prototype_; }
@@ -1689,10 +1606,13 @@
   virtual HType CalculateInferredType() const;
 
 #ifdef DEBUG
-  virtual void Verify() const;
+  virtual void Verify();
 #endif
 
   DECLARE_CONCRETE_INSTRUCTION(CheckSmi, "check_smi")
+
+ protected:
+  virtual bool DataEquals(HValue* other) const { return true; }
 };
 
 
@@ -1745,7 +1665,7 @@
   virtual void PrintTo(StringStream* stream) const;
 
 #ifdef DEBUG
-  virtual void Verify() const;
+  virtual void Verify();
 #endif
 
   DECLARE_INSTRUCTION(Phi)
@@ -1833,7 +1753,7 @@
   }
 
 #ifdef DEBUG
-  virtual void Verify() const { }
+  virtual void Verify() { }
 #endif
 
   DECLARE_CONCRETE_INSTRUCTION(Constant, "constant")
@@ -1952,6 +1872,9 @@
   }
 
   DECLARE_CONCRETE_INSTRUCTION(ArgumentsElements, "arguments_elements")
+
+ protected:
+  virtual bool DataEquals(HValue* other) const { return true; }
 };
 
 
@@ -1963,6 +1886,9 @@
   }
 
   DECLARE_CONCRETE_INSTRUCTION(ArgumentsLength, "arguments_length")
+
+ protected:
+  virtual bool DataEquals(HValue* other) const { return true; }
 };
 
 
@@ -1999,6 +1925,8 @@
     operands_[index] = value;
   }
 
+  virtual bool DataEquals(HValue* other) const { return true; }
+
  private:
   HOperandVector<3> operands_;
 };
@@ -2018,13 +1946,16 @@
   }
 
 #ifdef DEBUG
-  virtual void Verify() const;
+  virtual void Verify();
 #endif
 
   HValue* index() const { return left(); }
   HValue* length() const { return right(); }
 
   DECLARE_CONCRETE_INSTRUCTION(BoundsCheck, "bounds_check")
+
+ protected:
+  virtual bool DataEquals(HValue* other) const { return true; }
 };
 
 
@@ -2034,7 +1965,7 @@
       : HBinaryOperation(left, right) {
     set_representation(Representation::Tagged());
     SetFlag(kFlexibleRepresentation);
-    SetFlagMask(AllSideEffects());
+    SetAllSideEffects();
   }
 
   virtual Representation RequiredInputRepresentation(int index) const {
@@ -2044,7 +1975,7 @@
   virtual void RepresentationChanged(Representation to) {
     if (!to.IsTagged()) {
       ASSERT(to.IsInteger32());
-      ClearFlagMask(AllSideEffects());
+      ClearAllSideEffects();
       SetFlag(kTruncatingToInt32);
       SetFlag(kUseGVN);
     }
@@ -2062,12 +1993,12 @@
       : HBinaryOperation(left, right) {
     set_representation(Representation::Tagged());
     SetFlag(kFlexibleRepresentation);
-    SetFlagMask(AllSideEffects());
+    SetAllSideEffects();
   }
 
   virtual void RepresentationChanged(Representation to) {
     if (!to.IsTagged()) {
-      ClearFlagMask(AllSideEffects());
+      ClearAllSideEffects();
       SetFlag(kUseGVN);
     }
   }
@@ -2093,7 +2024,7 @@
       : HBinaryOperation(left, right), token_(token) {
     ASSERT(Token::IsCompareOp(token));
     set_representation(Representation::Tagged());
-    SetFlagMask(AllSideEffects());
+    SetAllSideEffects();
   }
 
   void SetInputRepresentation(Representation r);
@@ -2142,6 +2073,9 @@
   virtual HType CalculateInferredType() const;
 
   DECLARE_CONCRETE_INSTRUCTION(CompareJSObjectEq, "compare-js-object-eq")
+
+ protected:
+  virtual bool DataEquals(HValue* other) const { return true; }
 };
 
 
@@ -2184,6 +2118,9 @@
   explicit HIsObject(HValue* value) : HUnaryPredicate(value) { }
 
   DECLARE_CONCRETE_INSTRUCTION(IsObject, "is_object")
+
+ protected:
+  virtual bool DataEquals(HValue* other) const { return true; }
 };
 
 
@@ -2192,6 +2129,9 @@
   explicit HIsSmi(HValue* value) : HUnaryPredicate(value) { }
 
   DECLARE_CONCRETE_INSTRUCTION(IsSmi, "is_smi")
+
+ protected:
+  virtual bool DataEquals(HValue* other) const { return true; }
 };
 
 
@@ -2228,6 +2168,9 @@
   explicit HHasCachedArrayIndex(HValue* value) : HUnaryPredicate(value) { }
 
   DECLARE_CONCRETE_INSTRUCTION(HasCachedArrayIndex, "has_cached_array_index")
+
+ protected:
+  virtual bool DataEquals(HValue* other) const { return true; }
 };
 
 
@@ -2278,7 +2221,7 @@
  public:
   HInstanceOf(HValue* left, HValue* right) : HBinaryOperation(left, right) {
     set_representation(Representation::Tagged());
-    SetFlagMask(AllSideEffects());
+    SetAllSideEffects();
   }
 
   virtual bool EmitAtUses() const { return uses()->length() <= 1; }
@@ -2296,7 +2239,7 @@
   HInstanceOfKnownGlobal(HValue* left, Handle<JSFunction> right)
       : HUnaryOperation(left), function_(right) {
     set_representation(Representation::Tagged());
-    SetFlagMask(AllSideEffects());
+    SetAllSideEffects();
   }
 
   Handle<JSFunction> function() { return function_; }
@@ -2326,6 +2269,9 @@
   }
 
   DECLARE_CONCRETE_INSTRUCTION(Power, "power")
+
+ protected:
+  virtual bool DataEquals(HValue* other) const { return true; }
 };
 
 
@@ -2348,6 +2294,8 @@
   DECLARE_CONCRETE_INSTRUCTION(Add, "add")
 
  protected:
+  virtual bool DataEquals(HValue* other) const { return true; }
+
   virtual Range* InferRange();
 };
 
@@ -2363,6 +2311,8 @@
   DECLARE_CONCRETE_INSTRUCTION(Sub, "sub")
 
  protected:
+  virtual bool DataEquals(HValue* other) const { return true; }
+
   virtual Range* InferRange();
 };
 
@@ -2383,6 +2333,8 @@
   DECLARE_CONCRETE_INSTRUCTION(Mul, "mul")
 
  protected:
+  virtual bool DataEquals(HValue* other) const { return true; }
+
   virtual Range* InferRange();
 };
 
@@ -2398,6 +2350,8 @@
   DECLARE_CONCRETE_INSTRUCTION(Mod, "mod")
 
  protected:
+  virtual bool DataEquals(HValue* other) const { return true; }
+
   virtual Range* InferRange();
 };
 
@@ -2414,6 +2368,8 @@
   DECLARE_CONCRETE_INSTRUCTION(Div, "div")
 
  protected:
+  virtual bool DataEquals(HValue* other) const { return true; }
+
   virtual Range* InferRange();
 };
 
@@ -2429,6 +2385,8 @@
   DECLARE_CONCRETE_INSTRUCTION(BitAnd, "bit_and")
 
  protected:
+  virtual bool DataEquals(HValue* other) const { return true; }
+
   virtual Range* InferRange();
 };
 
@@ -2442,6 +2400,9 @@
   virtual HType CalculateInferredType() const;
 
   DECLARE_CONCRETE_INSTRUCTION(BitXor, "bit_xor")
+
+ protected:
+  virtual bool DataEquals(HValue* other) const { return true; }
 };
 
 
@@ -2456,6 +2417,8 @@
   DECLARE_CONCRETE_INSTRUCTION(BitOr, "bit_or")
 
  protected:
+  virtual bool DataEquals(HValue* other) const { return true; }
+
   virtual Range* InferRange();
 };
 
@@ -2469,6 +2432,9 @@
   virtual HType CalculateInferredType() const;
 
   DECLARE_CONCRETE_INSTRUCTION(Shl, "shl")
+
+ protected:
+  virtual bool DataEquals(HValue* other) const { return true; }
 };
 
 
@@ -2480,6 +2446,9 @@
   virtual HType CalculateInferredType() const;
 
   DECLARE_CONCRETE_INSTRUCTION(Shr, "shr")
+
+ protected:
+  virtual bool DataEquals(HValue* other) const { return true; }
 };
 
 
@@ -2492,6 +2461,9 @@
   virtual HType CalculateInferredType() const;
 
   DECLARE_CONCRETE_INSTRUCTION(Sar, "sar")
+
+ protected:
+  virtual bool DataEquals(HValue* other) const { return true; }
 };
 
 
@@ -2534,7 +2506,7 @@
         argument_count_(argument_count),
         transcendental_type_(TranscendentalCache::kNumberOfCaches) {
     set_representation(Representation::Tagged());
-    SetFlagMask(AllSideEffects());
+    SetAllSideEffects();
   }
 
   CodeStub::Major major_key() { return major_key_; }
@@ -2603,12 +2575,17 @@
 
 class HStoreGlobal: public HUnaryOperation {
  public:
-  HStoreGlobal(HValue* value, Handle<JSGlobalPropertyCell> cell)
-      : HUnaryOperation(value), cell_(cell) {
+  HStoreGlobal(HValue* value,
+               Handle<JSGlobalPropertyCell> cell,
+               bool check_hole_value)
+      : HUnaryOperation(value),
+        cell_(cell),
+        check_hole_value_(check_hole_value) {
     SetFlag(kChangesGlobalVars);
   }
 
   Handle<JSGlobalPropertyCell> cell() const { return cell_; }
+  bool check_hole_value() const { return check_hole_value_; }
 
   virtual Representation RequiredInputRepresentation(int index) const {
     return Representation::Tagged();
@@ -2617,14 +2594,9 @@
 
   DECLARE_CONCRETE_INSTRUCTION(StoreGlobal, "store_global")
 
- protected:
-  virtual bool DataEquals(HValue* other) const {
-    HStoreGlobal* b = HStoreGlobal::cast(other);
-    return cell_.is_identical_to(b->cell());
-  }
-
  private:
   Handle<JSGlobalPropertyCell> cell_;
+  bool check_hole_value_;
 };
 
 
@@ -2704,7 +2676,7 @@
   HLoadNamedGeneric(HValue* object, Handle<Object> name)
       : HUnaryOperation(object), name_(name) {
     set_representation(Representation::Tagged());
-    SetFlagMask(AllSideEffects());
+    SetAllSideEffects();
   }
 
   HValue* object() const { return OperandAt(0); }
@@ -2716,12 +2688,6 @@
 
   DECLARE_CONCRETE_INSTRUCTION(LoadNamedGeneric, "load_named_generic")
 
- protected:
-  virtual bool DataEquals(HValue* other) const {
-    HLoadNamedGeneric* b = HLoadNamedGeneric::cast(other);
-    return name_.is_identical_to(b->name_);
-  }
-
  private:
   Handle<Object> name_;
 };
@@ -2732,7 +2698,8 @@
   explicit HLoadFunctionPrototype(HValue* function)
       : HUnaryOperation(function) {
     set_representation(Representation::Tagged());
-    SetFlagMask(kDependsOnFunctionPrototypes);
+    SetFlag(kUseGVN);
+    SetFlag(kDependsOnCalls);
   }
 
   HValue* function() const { return OperandAt(0); }
@@ -2781,13 +2748,16 @@
 
   DECLARE_CONCRETE_INSTRUCTION(LoadKeyedFastElement,
                                "load_keyed_fast_element")
+
+ protected:
+  virtual bool DataEquals(HValue* other) const { return true; }
 };
 
 
 class HLoadKeyedGeneric: public HLoadKeyed {
  public:
   HLoadKeyedGeneric(HValue* obj, HValue* key) : HLoadKeyed(obj, key) {
-    SetFlagMask(AllSideEffects());
+    SetAllSideEffects();
   }
 
   DECLARE_CONCRETE_INSTRUCTION(LoadKeyedGeneric, "load_keyed_generic")
@@ -2823,12 +2793,6 @@
 
   DECLARE_INSTRUCTION(StoreNamed)
 
- protected:
-  virtual bool DataEquals(HValue* other) const {
-    HStoreNamed* b = HStoreNamed::cast(other);
-    return name_.is_identical_to(b->name_);
-  }
-
  private:
   Handle<Object> name_;
 };
@@ -2874,7 +2838,7 @@
  public:
   HStoreNamedGeneric(HValue* obj, Handle<Object> name, HValue* val)
       : HStoreNamed(obj, name, val) {
-    SetFlagMask(AllSideEffects());
+    SetAllSideEffects();
   }
 
   DECLARE_CONCRETE_INSTRUCTION(StoreNamedGeneric, "store_named_generic")
@@ -2939,7 +2903,7 @@
  public:
   HStoreKeyedGeneric(HValue* obj, HValue* key, HValue* val)
       : HStoreKeyed(obj, key, val) {
-    SetFlagMask(AllSideEffects());
+    SetAllSideEffects();
   }
 
   DECLARE_CONCRETE_INSTRUCTION(StoreKeyedGeneric, "store_keyed_generic")
@@ -2960,14 +2924,14 @@
         : Representation::Tagged();
   }
 
-  virtual bool DataEquals(HValue* other) const { return true; }
-
   HValue* string() const { return OperandAt(0); }
   HValue* index() const { return OperandAt(1); }
 
   DECLARE_CONCRETE_INSTRUCTION(StringCharCodeAt, "string_char_code_at")
 
  protected:
+  virtual bool DataEquals(HValue* other) const { return true; }
+
   virtual Range* InferRange() {
     return new Range(0, String::kMaxUC16CharCode);
   }
@@ -2990,11 +2954,11 @@
     return HType::Smi();
   }
 
-  virtual bool DataEquals(HValue* other) const { return true; }
-
   DECLARE_CONCRETE_INSTRUCTION(StringLength, "string_length")
 
  protected:
+  virtual bool DataEquals(HValue* other) const { return true; }
+
   virtual Range* InferRange() {
     return new Range(0, String::kMaxLength);
   }
@@ -3128,7 +3092,7 @@
   HDeleteProperty(HValue* obj, HValue* key)
       : HBinaryOperation(obj, key) {
     set_representation(Representation::Tagged());
-    SetFlagMask(AllSideEffects());
+    SetAllSideEffects();
   }
 
   virtual Representation RequiredInputRepresentation(int index) const {
diff --git a/src/hydrogen.cc b/src/hydrogen.cc
index a072f90..8462306 100644
--- a/src/hydrogen.cc
+++ b/src/hydrogen.cc
@@ -3370,9 +3370,10 @@
   LookupGlobalPropertyCell(var, &lookup, true);
   CHECK_BAILOUT;
 
+  bool check_hole = !lookup.IsDontDelete() || lookup.IsReadOnly();
   Handle<GlobalObject> global(graph()->info()->global_object());
   Handle<JSGlobalPropertyCell> cell(global->GetPropertyCell(&lookup));
-  HInstruction* instr = new HStoreGlobal(value, cell);
+  HInstruction* instr = new HStoreGlobal(value, cell, check_hole);
   instr->set_position(position);
   AddInstruction(instr);
   if (instr->HasSideEffects()) AddSimulate(ast_id);
@@ -3389,7 +3390,6 @@
   // We have a second position recorded in the FullCodeGenerator to have
   // type feedback for the binary operation.
   BinaryOperation* operation = expr->binary_operation();
-  operation->RecordTypeFeedback(oracle());
 
   if (var != NULL) {
     if (!var->is_global() && !var->IsStackAllocated()) {
@@ -3770,6 +3770,14 @@
     AddInstruction(new HCheckInstanceType(array, JS_ARRAY_TYPE, JS_ARRAY_TYPE));
     instr = new HJSArrayLength(array);
 
+  } else if (expr->IsStringLength()) {
+    HValue* string = Pop();
+    AddInstruction(new HCheckNonSmi(string));
+    AddInstruction(new HCheckInstanceType(string,
+                                          FIRST_STRING_TYPE,
+                                          LAST_STRING_TYPE));
+    instr = new HStringLength(string);
+
   } else if (expr->IsFunctionPrototype()) {
     HValue* function = Pop();
     AddInstruction(new HCheckNonSmi(function));
@@ -3956,8 +3964,7 @@
   int count_before = AstNode::Count();
 
   // Parse and allocate variables.
-  Handle<SharedFunctionInfo> shared(target->shared());
-  CompilationInfo inner_info(shared);
+  CompilationInfo inner_info(target);
   if (!ParserApi::Parse(&inner_info) ||
       !Scope::Analyze(&inner_info)) {
     return false;
@@ -3980,9 +3987,10 @@
 
   // Don't inline functions that uses the arguments object or that
   // have a mismatching number of parameters.
+  Handle<SharedFunctionInfo> shared(target->shared());
   int arity = expr->arguments()->length();
   if (function->scope()->arguments() != NULL ||
-      arity != target->shared()->formal_parameter_count()) {
+      arity != shared->formal_parameter_count()) {
     return false;
   }
 
@@ -4805,7 +4813,7 @@
     default:
       UNREACHABLE();
   }
-  TypeInfo info = oracle()->BinaryType(expr, TypeFeedbackOracle::RESULT);
+  TypeInfo info = oracle()->BinaryType(expr);
   // If we hit an uninitialized binary op stub we will get type info
   // for a smi operation. If one of the operands is a constant string
   // do not generate code assuming it is a smi operation.
@@ -4956,7 +4964,7 @@
   HValue* left = Pop();
   Token::Value op = expr->op();
 
-  TypeInfo info = oracle()->CompareType(expr, TypeFeedbackOracle::RESULT);
+  TypeInfo info = oracle()->CompareType(expr);
   HInstruction* instr = NULL;
   if (op == Token::INSTANCEOF) {
     // Check to see if the rhs of the instanceof is a global function not
diff --git a/src/ia32/ic-ia32.cc b/src/ia32/ic-ia32.cc
index c234b36..c136977 100644
--- a/src/ia32/ic-ia32.cc
+++ b/src/ia32/ic-ia32.cc
@@ -388,7 +388,8 @@
 }
 
 
-void LoadIC::GenerateStringLength(MacroAssembler* masm) {
+void LoadIC::GenerateStringLength(MacroAssembler* masm,
+                                  bool support_wrappers) {
   // ----------- S t a t e -------------
   //  -- eax    : receiver
   //  -- ecx    : name
@@ -396,7 +397,8 @@
   // -----------------------------------
   Label miss;
 
-  StubCompiler::GenerateLoadStringLength(masm, eax, edx, ebx, &miss);
+  StubCompiler::GenerateLoadStringLength(masm, eax, edx, ebx, &miss,
+                                         support_wrappers);
   __ bind(&miss);
   StubCompiler::GenerateLoadMiss(masm, Code::LOAD_IC);
 }
diff --git a/src/ia32/lithium-codegen-ia32.cc b/src/ia32/lithium-codegen-ia32.cc
index 3bfb10f..d35bfc9 100644
--- a/src/ia32/lithium-codegen-ia32.cc
+++ b/src/ia32/lithium-codegen-ia32.cc
@@ -566,37 +566,40 @@
 }
 
 
-void LCodeGen::RecordSafepoint(LPointerMap* pointers,
-                               int deoptimization_index) {
+void LCodeGen::RecordSafepoint(
+    LPointerMap* pointers,
+    Safepoint::Kind kind,
+    int arguments,
+    int deoptimization_index) {
   const ZoneList<LOperand*>* operands = pointers->operands();
   Safepoint safepoint = safepoints_.DefineSafepoint(masm(),
-                                                    deoptimization_index);
+      kind, arguments, deoptimization_index);
   for (int i = 0; i < operands->length(); i++) {
     LOperand* pointer = operands->at(i);
     if (pointer->IsStackSlot()) {
       safepoint.DefinePointerSlot(pointer->index());
+    } else if (pointer->IsRegister() && (kind & Safepoint::kWithRegisters)) {
+      safepoint.DefinePointerRegister(ToRegister(pointer));
     }
   }
+  if (kind & Safepoint::kWithRegisters) {
+    // Register esi always contains a pointer to the context.
+    safepoint.DefinePointerRegister(esi);
+  }
+}
+
+
+void LCodeGen::RecordSafepoint(LPointerMap* pointers,
+                               int deoptimization_index) {
+  RecordSafepoint(pointers, Safepoint::kSimple, 0, deoptimization_index);
 }
 
 
 void LCodeGen::RecordSafepointWithRegisters(LPointerMap* pointers,
                                             int arguments,
                                             int deoptimization_index) {
-  const ZoneList<LOperand*>* operands = pointers->operands();
-  Safepoint safepoint =
-      safepoints_.DefineSafepointWithRegisters(
-          masm(), arguments, deoptimization_index);
-  for (int i = 0; i < operands->length(); i++) {
-    LOperand* pointer = operands->at(i);
-    if (pointer->IsStackSlot()) {
-      safepoint.DefinePointerSlot(pointer->index());
-    } else if (pointer->IsRegister()) {
-      safepoint.DefinePointerRegister(ToRegister(pointer));
-    }
-  }
-  // Register esi always contains a pointer to the context.
-  safepoint.DefinePointerRegister(esi);
+  RecordSafepoint(pointers, Safepoint::kWithRegisters, arguments,
+      deoptimization_index);
 }
 
 
@@ -1908,7 +1911,19 @@
 
 void LCodeGen::DoStoreGlobal(LStoreGlobal* instr) {
   Register value = ToRegister(instr->InputAt(0));
-  __ mov(Operand::Cell(instr->hydrogen()->cell()), value);
+  Operand cell_operand = Operand::Cell(instr->hydrogen()->cell());
+
+  // If the cell we are storing to contains the hole it could have
+  // been deleted from the property dictionary. In that case, we need
+  // to update the property details in the property dictionary to mark
+  // it as no longer deleted. We deoptimize in that case.
+  if (instr->hydrogen()->check_hole_value()) {
+    __ cmp(cell_operand, Factory::the_hole_value());
+    DeoptimizeIf(equal, instr->environment());
+  }
+
+  // Store the value.
+  __ mov(cell_operand, value);
 }
 
 
diff --git a/src/ia32/lithium-codegen-ia32.h b/src/ia32/lithium-codegen-ia32.h
index 780525a..f0379c0 100644
--- a/src/ia32/lithium-codegen-ia32.h
+++ b/src/ia32/lithium-codegen-ia32.h
@@ -198,6 +198,10 @@
   void DoMathSin(LUnaryMathOperation* instr);
 
   // Support for recording safepoint and position information.
+  void RecordSafepoint(LPointerMap* pointers,
+                       Safepoint::Kind kind,
+                       int arguments,
+                       int deoptimization_index);
   void RecordSafepoint(LPointerMap* pointers, int deoptimization_index);
   void RecordSafepointWithRegisters(LPointerMap* pointers,
                                     int arguments,
diff --git a/src/ia32/lithium-ia32.cc b/src/ia32/lithium-ia32.cc
index a5066c6..100a2d4 100644
--- a/src/ia32/lithium-ia32.cc
+++ b/src/ia32/lithium-ia32.cc
@@ -1645,7 +1645,8 @@
 
 
 LInstruction* LChunkBuilder::DoStoreGlobal(HStoreGlobal* instr) {
-  return new LStoreGlobal(UseRegisterAtStart(instr->value()));
+  LStoreGlobal* result = new LStoreGlobal(UseRegisterAtStart(instr->value()));
+  return instr->check_hole_value() ? AssignEnvironment(result) : result;
 }
 
 
diff --git a/src/ia32/macro-assembler-ia32.cc b/src/ia32/macro-assembler-ia32.cc
index 929008f..a4c9b11 100644
--- a/src/ia32/macro-assembler-ia32.cc
+++ b/src/ia32/macro-assembler-ia32.cc
@@ -1288,7 +1288,7 @@
   ExternalReference scheduled_exception_address =
       ExternalReference::scheduled_exception_address();
   cmp(Operand::StaticVariable(scheduled_exception_address),
-         Immediate(Factory::the_hole_value()));
+      Immediate(Factory::the_hole_value()));
   j(not_equal, &promote_scheduled_exception, not_taken);
   LeaveApiExitFrame();
   ret(stack_space * kPointerSize);
diff --git a/src/ia32/stub-cache-ia32.cc b/src/ia32/stub-cache-ia32.cc
index 6d353c2..6de9a41 100644
--- a/src/ia32/stub-cache-ia32.cc
+++ b/src/ia32/stub-cache-ia32.cc
@@ -327,28 +327,32 @@
                                             Register receiver,
                                             Register scratch1,
                                             Register scratch2,
-                                            Label* miss) {
+                                            Label* miss,
+                                            bool support_wrappers) {
   Label check_wrapper;
 
   // Check if the object is a string leaving the instance type in the
   // scratch register.
-  GenerateStringCheck(masm, receiver, scratch1, miss, &check_wrapper);
+  GenerateStringCheck(masm, receiver, scratch1, miss,
+                      support_wrappers ? &check_wrapper : miss);
 
   // Load length from the string and convert to a smi.
   __ mov(eax, FieldOperand(receiver, String::kLengthOffset));
   __ ret(0);
 
-  // Check if the object is a JSValue wrapper.
-  __ bind(&check_wrapper);
-  __ cmp(scratch1, JS_VALUE_TYPE);
-  __ j(not_equal, miss, not_taken);
+  if (support_wrappers) {
+    // Check if the object is a JSValue wrapper.
+    __ bind(&check_wrapper);
+    __ cmp(scratch1, JS_VALUE_TYPE);
+    __ j(not_equal, miss, not_taken);
 
-  // Check if the wrapped value is a string and load the length
-  // directly if it is.
-  __ mov(scratch2, FieldOperand(receiver, JSValue::kValueOffset));
-  GenerateStringCheck(masm, scratch2, scratch1, miss, miss);
-  __ mov(eax, FieldOperand(scratch2, String::kLengthOffset));
-  __ ret(0);
+    // Check if the wrapped value is a string and load the length
+    // directly if it is.
+    __ mov(scratch2, FieldOperand(receiver, JSValue::kValueOffset));
+    GenerateStringCheck(masm, scratch2, scratch1, miss, miss);
+    __ mov(eax, FieldOperand(scratch2, String::kLengthOffset));
+    __ ret(0);
+  }
 }
 
 
@@ -451,10 +455,9 @@
 
 
 // Generates call to API function.
-static bool GenerateFastApiCall(MacroAssembler* masm,
-                                const CallOptimization& optimization,
-                                int argc,
-                                Failure** failure) {
+static MaybeObject* GenerateFastApiCall(MacroAssembler* masm,
+                                        const CallOptimization& optimization,
+                                        int argc) {
   // ----------- S t a t e -------------
   //  -- esp[0]              : return address
   //  -- esp[4]              : object passing the type check
@@ -516,13 +519,8 @@
   // already generated).  Do not allow the assembler to perform a
   // garbage collection but instead return the allocation failure
   // object.
-  MaybeObject* result =
-      masm->TryCallApiFunctionAndReturn(&fun, argc + kFastApiCallArguments + 1);
-  if (result->IsFailure()) {
-    *failure = Failure::cast(result);
-    return false;
-  }
-  return true;
+  return masm->TryCallApiFunctionAndReturn(&fun,
+                                           argc + kFastApiCallArguments + 1);
 }
 
 
@@ -535,17 +533,16 @@
         arguments_(arguments),
         name_(name) {}
 
-  bool Compile(MacroAssembler* masm,
-               JSObject* object,
-               JSObject* holder,
-               String* name,
-               LookupResult* lookup,
-               Register receiver,
-               Register scratch1,
-               Register scratch2,
-               Register scratch3,
-               Label* miss,
-               Failure** failure) {
+  MaybeObject* Compile(MacroAssembler* masm,
+                       JSObject* object,
+                       JSObject* holder,
+                       String* name,
+                       LookupResult* lookup,
+                       Register receiver,
+                       Register scratch1,
+                       Register scratch2,
+                       Register scratch3,
+                       Label* miss) {
     ASSERT(holder->HasNamedInterceptor());
     ASSERT(!holder->GetNamedInterceptor()->getter()->IsUndefined());
 
@@ -566,8 +563,7 @@
                               lookup,
                               name,
                               optimization,
-                              miss,
-                              failure);
+                              miss);
     } else {
       CompileRegular(masm,
                      object,
@@ -578,23 +574,22 @@
                      name,
                      holder,
                      miss);
-      return true;
+      return Heap::undefined_value();  // Success.
     }
   }
 
  private:
-  bool CompileCacheable(MacroAssembler* masm,
-                        JSObject* object,
-                        Register receiver,
-                        Register scratch1,
-                        Register scratch2,
-                        Register scratch3,
-                        JSObject* interceptor_holder,
-                        LookupResult* lookup,
-                        String* name,
-                        const CallOptimization& optimization,
-                        Label* miss_label,
-                        Failure** failure) {
+  MaybeObject* CompileCacheable(MacroAssembler* masm,
+                                JSObject* object,
+                                Register receiver,
+                                Register scratch1,
+                                Register scratch2,
+                                Register scratch3,
+                                JSObject* interceptor_holder,
+                                LookupResult* lookup,
+                                String* name,
+                                const CallOptimization& optimization,
+                                Label* miss_label) {
     ASSERT(optimization.is_constant_call());
     ASSERT(!lookup->holder()->IsGlobalObject());
 
@@ -656,11 +651,9 @@
 
     // Invoke function.
     if (can_do_fast_api_call) {
-      bool success = GenerateFastApiCall(masm, optimization,
-                                         arguments_.immediate(), failure);
-      if (!success) {
-        return false;
-      }
+      MaybeObject* result =
+          GenerateFastApiCall(masm, optimization, arguments_.immediate());
+      if (result->IsFailure()) return result;
     } else {
       __ InvokeFunction(optimization.constant_function(), arguments_,
                         JUMP_FUNCTION);
@@ -679,7 +672,7 @@
       FreeSpaceForFastApiCall(masm, scratch1);
     }
 
-    return true;
+    return Heap::undefined_value();  // Success.
   }
 
   void CompileRegular(MacroAssembler* masm,
@@ -1057,17 +1050,16 @@
 }
 
 
-bool StubCompiler::GenerateLoadCallback(JSObject* object,
-                                        JSObject* holder,
-                                        Register receiver,
-                                        Register name_reg,
-                                        Register scratch1,
-                                        Register scratch2,
-                                        Register scratch3,
-                                        AccessorInfo* callback,
-                                        String* name,
-                                        Label* miss,
-                                        Failure** failure) {
+MaybeObject* StubCompiler::GenerateLoadCallback(JSObject* object,
+                                                JSObject* holder,
+                                                Register receiver,
+                                                Register name_reg,
+                                                Register scratch1,
+                                                Register scratch2,
+                                                Register scratch3,
+                                                AccessorInfo* callback,
+                                                String* name,
+                                                Label* miss) {
   // Check that the receiver isn't a smi.
   __ test(receiver, Immediate(kSmiTagMask));
   __ j(zero, miss, not_taken);
@@ -1122,13 +1114,7 @@
   // already generated).  Do not allow the assembler to perform a
   // garbage collection but instead return the allocation failure
   // object.
-  MaybeObject* result = masm()->TryCallApiFunctionAndReturn(&fun, kStackSpace);
-  if (result->IsFailure()) {
-    *failure = Failure::cast(result);
-    return false;
-  }
-
-  return true;
+  return masm()->TryCallApiFunctionAndReturn(&fun, kStackSpace);
 }
 
 
@@ -2280,17 +2266,14 @@
   }
 
   if (depth != kInvalidProtoDepth) {
-    Failure* failure;
     // Move the return address on top of the stack.
     __ mov(eax, Operand(esp, 3 * kPointerSize));
     __ mov(Operand(esp, 0 * kPointerSize), eax);
 
     // esp[2 * kPointerSize] is uninitialized, esp[3 * kPointerSize] contains
     // duplicate of return address and will be overwritten.
-    bool success = GenerateFastApiCall(masm(), optimization, argc, &failure);
-    if (!success) {
-      return failure;
-    }
+    MaybeObject* result = GenerateFastApiCall(masm(), optimization, argc);
+    if (result->IsFailure()) return result;
   } else {
     __ InvokeFunction(function, arguments(), JUMP_FUNCTION);
   }
@@ -2335,21 +2318,17 @@
   __ mov(edx, Operand(esp, (argc + 1) * kPointerSize));
 
   CallInterceptorCompiler compiler(this, arguments(), ecx);
-  Failure* failure;
-  bool success = compiler.Compile(masm(),
-                                  object,
-                                  holder,
-                                  name,
-                                  &lookup,
-                                  edx,
-                                  ebx,
-                                  edi,
-                                  eax,
-                                  &miss,
-                                  &failure);
-  if (!success) {
-    return failure;
-  }
+  MaybeObject* result = compiler.Compile(masm(),
+                                         object,
+                                         holder,
+                                         name,
+                                         &lookup,
+                                         edx,
+                                         ebx,
+                                         edi,
+                                         eax,
+                                         &miss);
+  if (result->IsFailure()) return result;
 
   // Restore receiver.
   __ mov(edx, Operand(esp, (argc + 1) * kPointerSize));
@@ -2603,14 +2582,24 @@
          Immediate(Handle<Map>(object->map())));
   __ j(not_equal, &miss, not_taken);
 
-  // Store the value in the cell.
+
+  // Compute the cell operand to use.
+  Operand cell_operand = Operand::Cell(Handle<JSGlobalPropertyCell>(cell));
   if (Serializer::enabled()) {
     __ mov(ecx, Immediate(Handle<JSGlobalPropertyCell>(cell)));
-    __ mov(FieldOperand(ecx, JSGlobalPropertyCell::kValueOffset), eax);
-  } else {
-    __ mov(Operand::Cell(Handle<JSGlobalPropertyCell>(cell)), eax);
+    cell_operand = FieldOperand(ecx, JSGlobalPropertyCell::kValueOffset);
   }
 
+  // Check that the value in the cell is not the hole. If it is, this
+  // cell could have been deleted and reintroducing the global needs
+  // to update the property details in the property dictionary of the
+  // global object. We bail out to the runtime system to do that.
+  __ cmp(cell_operand, Factory::the_hole_value());
+  __ j(equal, &miss);
+
+  // Store the value in the cell.
+  __ mov(cell_operand, eax);
+
   // Return the value (register eax).
   __ IncrementCounter(&Counters::named_store_global_inline, 1);
   __ ret(0);
@@ -2799,12 +2788,11 @@
   // -----------------------------------
   Label miss;
 
-  Failure* failure = Failure::InternalError();
-  bool success = GenerateLoadCallback(object, holder, eax, ecx, ebx, edx, edi,
-                                      callback, name, &miss, &failure);
-  if (!success) {
+  MaybeObject* result = GenerateLoadCallback(object, holder, eax, ecx, ebx, edx,
+                                             edi, callback, name, &miss);
+  if (result->IsFailure()) {
     miss.Unuse();
-    return failure;
+    return result;
   }
 
   __ bind(&miss);
@@ -2968,12 +2956,11 @@
   __ cmp(Operand(eax), Immediate(Handle<String>(name)));
   __ j(not_equal, &miss, not_taken);
 
-  Failure* failure = Failure::InternalError();
-  bool success = GenerateLoadCallback(receiver, holder, edx, eax, ebx, ecx, edi,
-                                      callback, name, &miss, &failure);
-  if (!success) {
+  MaybeObject* result = GenerateLoadCallback(receiver, holder, edx, eax, ebx,
+                                             ecx, edi, callback, name, &miss);
+  if (result->IsFailure()) {
     miss.Unuse();
-    return failure;
+    return result;
   }
 
   __ bind(&miss);
@@ -3089,7 +3076,7 @@
   __ cmp(Operand(eax), Immediate(Handle<String>(name)));
   __ j(not_equal, &miss, not_taken);
 
-  GenerateLoadStringLength(masm(), edx, ecx, ebx, &miss);
+  GenerateLoadStringLength(masm(), edx, ecx, ebx, &miss, true);
   __ bind(&miss);
   __ DecrementCounter(&Counters::keyed_load_string_length, 1);
   GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
diff --git a/src/ic.cc b/src/ic.cc
index 555ce3f..9a277d6 100644
--- a/src/ic.cc
+++ b/src/ic.cc
@@ -822,6 +822,9 @@
   }
 
   if (FLAG_use_ic) {
+    Code* non_monomorphic_stub =
+        (state == UNINITIALIZED) ? pre_monomorphic_stub() : megamorphic_stub();
+
     // Use specialized code for getting the length of strings and
     // string wrapper objects.  The length property of string wrapper
     // objects is read-only and therefore always returns the length of
@@ -829,22 +832,27 @@
     if ((object->IsString() || object->IsStringWrapper()) &&
         name->Equals(Heap::length_symbol())) {
       HandleScope scope;
+#ifdef DEBUG
+      if (FLAG_trace_ic) PrintF("[LoadIC : +#length /string]\n");
+#endif
+      if (state == PREMONOMORPHIC) {
+        if (object->IsString()) {
+          Map* map = HeapObject::cast(*object)->map();
+          const int offset = String::kLengthOffset;
+          PatchInlinedLoad(address(), map, offset);
+          set_target(Builtins::builtin(Builtins::LoadIC_StringLength));
+        } else {
+          set_target(Builtins::builtin(Builtins::LoadIC_StringWrapperLength));
+        }
+      } else if (state == MONOMORPHIC && object->IsStringWrapper()) {
+        set_target(Builtins::builtin(Builtins::LoadIC_StringWrapperLength));
+      } else {
+        set_target(non_monomorphic_stub);
+      }
       // Get the string if we have a string wrapper object.
       if (object->IsJSValue()) {
         object = Handle<Object>(Handle<JSValue>::cast(object)->value());
       }
-#ifdef DEBUG
-      if (FLAG_trace_ic) PrintF("[LoadIC : +#length /string]\n");
-#endif
-      Map* map = HeapObject::cast(*object)->map();
-      if (object->IsString()) {
-        const int offset = String::kLengthOffset;
-        PatchInlinedLoad(address(), map, offset);
-      }
-
-      Code* target = NULL;
-      target = Builtins::builtin(Builtins::LoadIC_StringLength);
-      set_target(target);
       return Smi::FromInt(String::cast(*object)->length());
     }
 
@@ -853,12 +861,14 @@
 #ifdef DEBUG
       if (FLAG_trace_ic) PrintF("[LoadIC : +#length /array]\n");
 #endif
-      Map* map = HeapObject::cast(*object)->map();
-      const int offset = JSArray::kLengthOffset;
-      PatchInlinedLoad(address(), map, offset);
-
-      Code* target = Builtins::builtin(Builtins::LoadIC_ArrayLength);
-      set_target(target);
+      if (state == PREMONOMORPHIC) {
+        Map* map = HeapObject::cast(*object)->map();
+        const int offset = JSArray::kLengthOffset;
+        PatchInlinedLoad(address(), map, offset);
+        set_target(Builtins::builtin(Builtins::LoadIC_ArrayLength));
+      } else {
+        set_target(non_monomorphic_stub);
+      }
       return JSArray::cast(*object)->length();
     }
 
@@ -868,8 +878,11 @@
 #ifdef DEBUG
       if (FLAG_trace_ic) PrintF("[LoadIC : +#prototype /function]\n");
 #endif
-      Code* target = Builtins::builtin(Builtins::LoadIC_FunctionPrototype);
-      set_target(target);
+      if (state == PREMONOMORPHIC) {
+        set_target(Builtins::builtin(Builtins::LoadIC_FunctionPrototype));
+      } else {
+        set_target(non_monomorphic_stub);
+      }
       return Accessors::FunctionGetPrototype(*object, 0);
     }
   }
@@ -1092,6 +1105,8 @@
     }
 
     if (FLAG_use_ic) {
+      // TODO(1073): don't ignore the current stub state.
+
       // Use specialized code for getting the length of strings.
       if (object->IsString() && name->Equals(Heap::length_symbol())) {
         Handle<String> string = Handle<String>::cast(object);
@@ -2098,8 +2113,6 @@
 
   Handle<Code> code = GetTypeRecordingBinaryOpStub(key, type, result_type);
   if (!code.is_null()) {
-    TRBinaryOpIC ic;
-    ic.patch(*code);
     if (FLAG_trace_ic) {
       PrintF("[TypeRecordingBinaryOpIC (%s->(%s->%s))#%s]\n",
              TRBinaryOpIC::GetName(previous_type),
@@ -2107,6 +2120,8 @@
              TRBinaryOpIC::GetName(result_type),
              Token::Name(op));
     }
+    TRBinaryOpIC ic;
+    ic.patch(*code);
 
     // Activate inlined smi code.
     if (previous_type == TRBinaryOpIC::UNINITIALIZED) {
diff --git a/src/ic.h b/src/ic.h
index 55cb34a..409ad38 100644
--- a/src/ic.h
+++ b/src/ic.h
@@ -284,7 +284,8 @@
 
   // Specialized code generator routines.
   static void GenerateArrayLength(MacroAssembler* masm);
-  static void GenerateStringLength(MacroAssembler* masm);
+  static void GenerateStringLength(MacroAssembler* masm,
+                                   bool support_wrappers);
   static void GenerateFunctionPrototype(MacroAssembler* masm);
 
   // Clear the use of the inlined version.
diff --git a/src/messages.js b/src/messages.js
index 24b642f..a072d3b 100644
--- a/src/messages.js
+++ b/src/messages.js
@@ -82,8 +82,11 @@
   var result = format;
   for (var i = 0; i < args.length; i++) {
     var str;
-    try { str = ToDetailString(args[i]); }
-    catch (e) { str = "#<error>"; }
+    try {
+      str = ToDetailString(args[i]);
+    } catch (e) {
+      str = "#<error>";
+    }
     result = ArrayJoin.call(StringSplit.call(result, "%" + i), str);
   }
   return result;
@@ -124,7 +127,9 @@
     var constructor = obj.constructor;
     if (!constructor) return ToStringCheckErrorObject(obj);
     var constructorName = constructor.name;
-    if (!constructorName) return ToStringCheckErrorObject(obj);
+    if (!constructorName || !IS_STRING(constructorName)) {
+      return ToStringCheckErrorObject(obj);
+    }
     return "#<" + GetInstanceName(constructorName) + ">";
   } else {
     return ToStringCheckErrorObject(obj);
@@ -233,6 +238,12 @@
       strict_var_name:              "Variable name may not be eval or arguments in strict mode",
       strict_function_name:         "Function name may not be eval or arguments in strict mode",
       strict_octal_literal:         "Octal literals are not allowed in strict mode.",
+      strict_duplicate_property:    "Duplicate data property in object literal not allowed in strict mode",
+      accessor_data_property:       "Object literal may not have data and accessor property with the same name",
+      accessor_get_set:             "Object literal may not have multiple get/set accessors with the same name",
+      strict_lhs_eval_assignment:   "Assignment to eval or arguments is not allowed in strict mode",
+      strict_lhs_postfix:           "Postfix increment/decrement may not have eval or arguments operand in strict mode",
+      strict_lhs_prefix:            "Prefix increment/decrement may not have eval or arguments operand in strict mode",
     };
   }
   var format = kMessages[message.type];
diff --git a/src/objects-inl.h b/src/objects-inl.h
index 21e318b..db9e2ef 100644
--- a/src/objects-inl.h
+++ b/src/objects-inl.h
@@ -57,8 +57,7 @@
 
 
 PropertyDetails PropertyDetails::AsDeleted() {
-  PropertyDetails d(DONT_ENUM, NORMAL);
-  Smi* smi = Smi::FromInt(AsSmi()->value() | DeletedField::encode(1));
+  Smi* smi = Smi::FromInt(value_ | DeletedField::encode(1));
   return PropertyDetails(smi);
 }
 
diff --git a/src/parser.cc b/src/parser.cc
index a1ba9ca..c097698 100644
--- a/src/parser.cc
+++ b/src/parser.cc
@@ -2292,6 +2292,11 @@
     expression = NewThrowReferenceError(type);
   }
 
+  if (temp_scope_->StrictMode()) {
+    // Assignment to eval or arguments is disallowed in strict mode.
+    CheckStrictModeLValue(expression, "strict_lhs_assignment", CHECK_OK);
+  }
+
   Token::Value op = Next();  // Get assignment operator.
   int pos = scanner().location().beg_pos;
   Expression* right = ParseAssignmentExpression(accept_IN, CHECK_OK);
@@ -2518,6 +2523,12 @@
       Handle<String> type = Factory::invalid_lhs_in_prefix_op_symbol();
       expression = NewThrowReferenceError(type);
     }
+
+    if (temp_scope_->StrictMode()) {
+      // Prefix expression operand in strict mode may not be eval or arguments.
+      CheckStrictModeLValue(expression, "strict_lhs_prefix", CHECK_OK);
+    }
+
     int position = scanner().location().beg_pos;
     IncrementOperation* increment = new IncrementOperation(op, expression);
     return new CountOperation(true /* prefix */, increment, position);
@@ -2543,6 +2554,12 @@
       Handle<String> type = Factory::invalid_lhs_in_postfix_op_symbol();
       expression = NewThrowReferenceError(type);
     }
+
+    if (temp_scope_->StrictMode()) {
+      // Postfix expression operand in strict mode may not be eval or arguments.
+      CheckStrictModeLValue(expression, "strict_lhs_prefix", CHECK_OK);
+    }
+
     Token::Value next = Next();
     int position = scanner().location().beg_pos;
     IncrementOperation* increment = new IncrementOperation(next, expression);
@@ -3016,6 +3033,126 @@
   return Factory::undefined_value();
 }
 
+// Defined in ast.cc
+bool IsEqualString(void* first, void* second);
+bool IsEqualSmi(void* first, void* second);
+
+
+// Validation per 11.1.5 Object Initialiser
+class ObjectLiteralPropertyChecker {
+ public:
+  ObjectLiteralPropertyChecker(Parser* parser, bool strict) :
+    props(&IsEqualString),
+    elems(&IsEqualSmi),
+    parser_(parser),
+    strict_(strict) {
+  }
+
+  void CheckProperty(
+    ObjectLiteral::Property* property,
+    Scanner::Location loc,
+    bool* ok);
+
+ private:
+  enum PropertyKind {
+    kGetAccessor = 0x01,
+    kSetAccessor = 0x02,
+    kAccessor = kGetAccessor | kSetAccessor,
+    kData = 0x04
+  };
+
+  static intptr_t GetPropertyKind(ObjectLiteral::Property* property) {
+    switch (property->kind()) {
+      case ObjectLiteral::Property::GETTER:
+        return kGetAccessor;
+      case ObjectLiteral::Property::SETTER:
+        return kSetAccessor;
+      default:
+        return kData;
+    }
+  }
+
+  HashMap props;
+  HashMap elems;
+  Parser* parser_;
+  bool strict_;
+};
+
+
+void ObjectLiteralPropertyChecker::CheckProperty(
+    ObjectLiteral::Property* property,
+    Scanner::Location loc,
+    bool* ok) {
+
+  ASSERT(property != NULL);
+
+  Literal *lit = property->key();
+  Handle<Object> handle = lit->handle();
+
+  uint32_t hash;
+  HashMap* map;
+  void* key;
+  Smi* smi_key_location;
+
+  if (handle->IsSymbol()) {
+    Handle<String> name(String::cast(*handle));
+    if (name->AsArrayIndex(&hash)) {
+      smi_key_location = Smi::FromInt(hash);
+      key = &smi_key_location;
+      map = &elems;
+    } else {
+      key = handle.location();
+      hash = name->Hash();
+      map = &props;
+    }
+  } else if (handle->ToArrayIndex(&hash)) {
+    key = handle.location();
+    map = &elems;
+  } else {
+    ASSERT(handle->IsNumber());
+    double num = handle->Number();
+    char arr[100];
+    Vector<char> buffer(arr, ARRAY_SIZE(arr));
+    const char* str = DoubleToCString(num, buffer);
+    Handle<String> name = Factory::NewStringFromAscii(CStrVector(str));
+    key = name.location();
+    hash = name->Hash();
+    map = &props;
+  }
+
+  // Lookup property previously defined, if any.
+  HashMap::Entry* entry = map->Lookup(key, hash, true);
+  intptr_t prev = reinterpret_cast<intptr_t> (entry->value);
+  intptr_t curr = GetPropertyKind(property);
+
+  // Duplicate data properties are illegal in strict mode.
+  if (strict_ && (curr & prev & kData) != 0) {
+    parser_->ReportMessageAt(loc, "strict_duplicate_property",
+                             Vector<const char*>::empty());
+    *ok = false;
+    return;
+  }
+  // Data property conflicting with an accessor.
+  if (((curr & kData) && (prev & kAccessor)) ||
+      ((prev & kData) && (curr & kAccessor))) {
+    parser_->ReportMessageAt(loc, "accessor_data_property",
+                             Vector<const char*>::empty());
+    *ok = false;
+    return;
+  }
+  // Two accessors of the same type conflicting
+  if ((curr & prev & kAccessor) != 0) {
+    parser_->ReportMessageAt(loc, "accessor_get_set",
+                             Vector<const char*>::empty());
+    *ok = false;
+    return;
+  }
+
+  // Update map
+  entry->value = reinterpret_cast<void*> (prev | curr);
+  *ok = true;
+}
+
 
 void Parser::BuildObjectLiteralConstantProperties(
     ZoneList<ObjectLiteral::Property*>* properties,
@@ -3121,12 +3258,20 @@
       new ZoneList<ObjectLiteral::Property*>(4);
   int number_of_boilerplate_properties = 0;
 
+  ObjectLiteralPropertyChecker checker(this, temp_scope_->StrictMode());
+
   Expect(Token::LBRACE, CHECK_OK);
+  Scanner::Location loc = scanner().location();
+
   while (peek() != Token::RBRACE) {
     if (fni_ != NULL) fni_->Enter();
 
     Literal* key = NULL;
     Token::Value next = peek();
+
+    // Location of the property name token
+    Scanner::Location loc = scanner().peek_location();
+
     switch (next) {
       case Token::IDENTIFIER: {
         bool is_getter = false;
@@ -3136,11 +3281,15 @@
         if (fni_ != NULL) fni_->PushLiteralName(id);
 
         if ((is_getter || is_setter) && peek() != Token::COLON) {
+            // Update loc to point to the identifier
+            loc = scanner().peek_location();
             ObjectLiteral::Property* property =
                 ParseObjectLiteralGetSet(is_getter, CHECK_OK);
             if (IsBoilerplateProperty(property)) {
               number_of_boilerplate_properties++;
             }
+            // Validate the property.
+            checker.CheckProperty(property, loc, CHECK_OK);
             properties->Add(property);
             if (peek() != Token::RBRACE) Expect(Token::COMMA, CHECK_OK);
 
@@ -3197,6 +3346,8 @@
 
     // Count CONSTANT or COMPUTED properties to maintain the enumeration order.
     if (IsBoilerplateProperty(property)) number_of_boilerplate_properties++;
+    // Validate the property
+    checker.CheckProperty(property, loc, CHECK_OK);
     properties->Add(property);
 
     // TODO(1240767): Consider allowing trailing comma.
@@ -3208,6 +3359,7 @@
     }
   }
   Expect(Token::RBRACE, CHECK_OK);
+
   // Computation of literal_index must happen before pre parse bailout.
   int literal_index = temp_scope_->NextMaterializedLiteralIndex();
 
@@ -3300,10 +3452,21 @@
     //    '(' (Identifier)*[','] ')'
     Expect(Token::LPAREN, CHECK_OK);
     int start_pos = scanner().location().beg_pos;
+    Scanner::Location name_loc = Scanner::NoLocation();
+    Scanner::Location dupe_loc = Scanner::NoLocation();
 
     bool done = (peek() == Token::RPAREN);
     while (!done) {
       Handle<String> param_name = ParseIdentifier(CHECK_OK);
+
+      // Store locations for possible future error reports.
+      if (!name_loc.IsValid() && IsEvalOrArguments(param_name)) {
+        name_loc = scanner().location();
+      }
+      if (!dupe_loc.IsValid() && top_scope_->IsDeclared(param_name)) {
+        dupe_loc = scanner().location();
+      }
+
       Variable* parameter = top_scope_->DeclareLocal(param_name, Variable::VAR);
       top_scope_->AddParameter(parameter);
       num_parameters++;
@@ -3381,13 +3544,25 @@
     if (temp_scope_->StrictMode()) {
       if (IsEvalOrArguments(name)) {
         int position = function_token_position != RelocInfo::kNoPosition
-                         ? function_token_position
-                         : (start_pos > 0 ? start_pos - 1 : start_pos);
+            ? function_token_position
+            : (start_pos > 0 ? start_pos - 1 : start_pos);
         ReportMessageAt(Scanner::Location(position, start_pos),
                         "strict_function_name", Vector<const char*>::empty());
         *ok = false;
         return NULL;
       }
+      if (name_loc.IsValid()) {
+        ReportMessageAt(name_loc, "strict_param_name",
+                        Vector<const char*>::empty());
+        *ok = false;
+        return NULL;
+      }
+      if (dupe_loc.IsValid()) {
+        ReportMessageAt(dupe_loc, "strict_param_dupe",
+                        Vector<const char*>::empty());
+        *ok = false;
+        return NULL;
+      }
       CheckOctalLiteral(start_pos, end_pos, CHECK_OK);
     }
 
@@ -3534,6 +3709,24 @@
   return GetSymbol(ok);
 }
 
+
+// Checks LHS expression for assignment and prefix/postfix increment/decrement
+// in strict mode.
+void Parser::CheckStrictModeLValue(Expression* expression,
+                                   const char* error,
+                                   bool* ok) {
+  ASSERT(temp_scope_->StrictMode());
+  VariableProxy* lhs = expression != NULL
+      ? expression->AsVariableProxy()
+      : NULL;
+
+  if (lhs != NULL && !lhs->is_this() && IsEvalOrArguments(lhs->name())) {
+    ReportMessage(error, Vector<const char*>::empty());
+    *ok = false;
+  }
+}
+
+
 // Checks whether octal literal last seen is between beg_pos and end_pos.
 // If so, reports an error.
 void Parser::CheckOctalLiteral(int beg_pos, int end_pos, bool* ok) {
diff --git a/src/parser.h b/src/parser.h
index 916f9ed..68983b4 100644
--- a/src/parser.h
+++ b/src/parser.h
@@ -613,6 +613,11 @@
                                            bool* is_set,
                                            bool* ok);
 
+  // Strict mode validation of LValue expressions
+  void CheckStrictModeLValue(Expression* expression,
+                             const char* error,
+                             bool* ok);
+
   // Strict mode octal literal validation.
   void CheckOctalLiteral(int beg_pos, int end_pos, bool* ok);
 
diff --git a/src/safepoint-table.cc b/src/safepoint-table.cc
index e79dcff..153bf43 100644
--- a/src/safepoint-table.cc
+++ b/src/safepoint-table.cc
@@ -117,24 +117,9 @@
 }
 
 
-Safepoint SafepointTableBuilder::DefineSafepoint(Assembler* assembler,
-                                                 int deoptimization_index) {
-  ASSERT(deoptimization_index != -1);
-  DeoptimizationInfo pc_and_deoptimization_index;
-  pc_and_deoptimization_index.pc = assembler->pc_offset();
-  pc_and_deoptimization_index.deoptimization_index = deoptimization_index;
-  pc_and_deoptimization_index.pc_after_gap = assembler->pc_offset();
-  pc_and_deoptimization_index.arguments = 0;
-  pc_and_deoptimization_index.has_doubles = false;
-  deoptimization_info_.Add(pc_and_deoptimization_index);
-  indexes_.Add(new ZoneList<int>(8));
-  registers_.Add(NULL);
-  return Safepoint(indexes_.last(), registers_.last());
-}
-
-
-Safepoint SafepointTableBuilder::DefineSafepointWithRegisters(
-    Assembler* assembler, int arguments, int deoptimization_index) {
+Safepoint SafepointTableBuilder::DefineSafepoint(
+    Assembler* assembler, Safepoint::Kind kind, int arguments,
+    int deoptimization_index) {
   ASSERT(deoptimization_index != -1);
   ASSERT(arguments >= 0);
   DeoptimizationInfo pc_and_deoptimization_index;
@@ -142,30 +127,16 @@
   pc_and_deoptimization_index.deoptimization_index = deoptimization_index;
   pc_and_deoptimization_index.pc_after_gap = assembler->pc_offset();
   pc_and_deoptimization_index.arguments = arguments;
-  pc_and_deoptimization_index.has_doubles = false;
+  pc_and_deoptimization_index.has_doubles = (kind & Safepoint::kWithDoubles);
   deoptimization_info_.Add(pc_and_deoptimization_index);
   indexes_.Add(new ZoneList<int>(8));
-  registers_.Add(new ZoneList<int>(4));
+  registers_.Add((kind & Safepoint::kWithRegisters)
+      ? new ZoneList<int>(4)
+      : NULL);
   return Safepoint(indexes_.last(), registers_.last());
 }
 
 
-Safepoint SafepointTableBuilder::DefineSafepointWithRegistersAndDoubles(
-    Assembler* assembler, int arguments, int deoptimization_index) {
-  ASSERT(deoptimization_index != -1);
-  ASSERT(arguments >= 0);
-  DeoptimizationInfo pc_and_deoptimization_index;
-  pc_and_deoptimization_index.pc = assembler->pc_offset();
-  pc_and_deoptimization_index.deoptimization_index = deoptimization_index;
-  pc_and_deoptimization_index.pc_after_gap = assembler->pc_offset();
-  pc_and_deoptimization_index.arguments = arguments;
-  pc_and_deoptimization_index.has_doubles = true;
-  deoptimization_info_.Add(pc_and_deoptimization_index);
-  indexes_.Add(new ZoneList<int>(8));
-  registers_.Add(new ZoneList<int>(4));
-  return Safepoint(indexes_.last(), registers_.last());
-}
-
 unsigned SafepointTableBuilder::GetCodeOffset() const {
   ASSERT(emitted_);
   return offset_;
diff --git a/src/safepoint-table.h b/src/safepoint-table.h
index d703051..fa35905 100644
--- a/src/safepoint-table.h
+++ b/src/safepoint-table.h
@@ -180,6 +180,13 @@
 
 class Safepoint BASE_EMBEDDED {
  public:
+  typedef enum {
+    kSimple = 0,
+    kWithRegisters = 1 << 0,
+    kWithDoubles = 1 << 1,
+    kWithRegistersAndDoubles = kWithRegisters | kWithDoubles
+  } Kind;
+
   static const int kNoDeoptimizationIndex =
       (1 << (SafepointEntry::kDeoptIndexBits)) - 1;
 
@@ -210,23 +217,7 @@
   // Define a new safepoint for the current position in the body.
   Safepoint DefineSafepoint(
       Assembler* assembler,
-      int deoptimization_index = Safepoint::kNoDeoptimizationIndex);
-
-  // Define a new safepoint with registers on the stack for the
-  // current position in the body and take the number of arguments on
-  // top of the registers into account.
-  Safepoint DefineSafepointWithRegisters(
-      Assembler* assembler,
-      int arguments,
-      int deoptimization_index = Safepoint::kNoDeoptimizationIndex);
-
-  // Define a new safepoint with all double registers and the normal
-  // registers on the stack for the current position in the body and
-  // take the number of arguments on top of the registers into account.
-  // TODO(1043) Rewrite the three SafepointTableBuilder::DefineSafepoint
-  // methods to one method that uses template arguments.
-  Safepoint DefineSafepointWithRegistersAndDoubles(
-      Assembler* assembler,
+      Safepoint::Kind kind,
       int arguments,
       int deoptimization_index = Safepoint::kNoDeoptimizationIndex);
 
diff --git a/src/scanner-base.h b/src/scanner-base.h
index a3e07d3..3d9800f 100644
--- a/src/scanner-base.h
+++ b/src/scanner-base.h
@@ -274,10 +274,19 @@
   struct Location {
     Location(int b, int e) : beg_pos(b), end_pos(e) { }
     Location() : beg_pos(0), end_pos(0) { }
+
+    bool IsValid() const {
+      return beg_pos >= 0 && end_pos >= beg_pos;
+    }
+
     int beg_pos;
     int end_pos;
   };
 
+  static Location NoLocation() {
+    return Location(-1, -1);
+  }
+
   // Returns the location information for the current token
   // (the token returned by Next()).
   Location location() const { return current_.location; }
diff --git a/src/scopes.h b/src/scopes.h
index 09901ad..a9220eb 100644
--- a/src/scopes.h
+++ b/src/scopes.h
@@ -289,6 +289,17 @@
   int ContextChainLength(Scope* scope);
 
   // ---------------------------------------------------------------------------
+  // Strict mode support.
+  bool IsDeclared(Handle<String> name) {
+    // During formal parameter list parsing the scope only contains
+    // two variables inserted at initialization: "this" and "arguments".
+    // "this" is an invalid parameter name and "arguments" is invalid parameter
+    // name in strict mode. Therefore looking up with the map which includes
+    // "this" and "arguments" in addition to all formal parameters is safe.
+    return variables_.Lookup(name) != NULL;
+  }
+
+  // ---------------------------------------------------------------------------
   // Debugging.
 
 #ifdef DEBUG
diff --git a/src/serialize.cc b/src/serialize.cc
index 6a6c6bb..60c2cbb 100644
--- a/src/serialize.cc
+++ b/src/serialize.cc
@@ -335,7 +335,7 @@
 
   Add(ExternalReference::delete_handle_scope_extensions().address(),
       RUNTIME_ENTRY,
-      3,
+      4,
       "HandleScope::DeleteExtensions");
 
   // Miscellaneous
@@ -504,7 +504,7 @@
       "power_double_int_function");
   Add(ExternalReference::arguments_marker_location().address(),
       UNCLASSIFIED,
-      40,
+      41,
       "Factory::arguments_marker().location()");
 }
 
diff --git a/src/stub-cache.h b/src/stub-cache.h
index 1f534d9..07f894a 100644
--- a/src/stub-cache.h
+++ b/src/stub-cache.h
@@ -427,7 +427,8 @@
                                        Register receiver,
                                        Register scratch1,
                                        Register scratch2,
-                                       Label* miss_label);
+                                       Label* miss_label,
+                                       bool support_wrappers);
 
   static void GenerateLoadFunctionPrototype(MacroAssembler* masm,
                                             Register receiver,
@@ -501,17 +502,16 @@
                          String* name,
                          Label* miss);
 
-  bool GenerateLoadCallback(JSObject* object,
-                            JSObject* holder,
-                            Register receiver,
-                            Register name_reg,
-                            Register scratch1,
-                            Register scratch2,
-                            Register scratch3,
-                            AccessorInfo* callback,
-                            String* name,
-                            Label* miss,
-                            Failure** failure);
+  MaybeObject* GenerateLoadCallback(JSObject* object,
+                                    JSObject* holder,
+                                    Register receiver,
+                                    Register name_reg,
+                                    Register scratch1,
+                                    Register scratch2,
+                                    Register scratch3,
+                                    AccessorInfo* callback,
+                                    String* name,
+                                    Label* miss);
 
   void GenerateLoadConstant(JSObject* object,
                             JSObject* holder,
diff --git a/src/top.cc b/src/top.cc
index 98c673c..e32eb6b 100644
--- a/src/top.cc
+++ b/src/top.cc
@@ -72,7 +72,7 @@
   handler_ = 0;
 #ifdef USE_SIMULATOR
 #ifdef V8_TARGET_ARCH_ARM
-  simulator_ = assembler::arm::Simulator::current();
+  simulator_ = Simulator::current();
 #elif V8_TARGET_ARCH_MIPS
   simulator_ = assembler::mips::Simulator::current();
 #endif
@@ -806,7 +806,7 @@
 }
 
 
-bool Top::ShouldReturnException(bool* is_caught_externally,
+bool Top::ShouldReportException(bool* is_caught_externally,
                                 bool catchable_by_javascript) {
   // Find the top-most try-catch handler.
   StackHandler* handler =
@@ -847,15 +847,15 @@
   Handle<Object> exception_handle(exception_object);
 
   // Determine reporting and whether the exception is caught externally.
-  bool is_caught_externally = false;
   bool is_out_of_memory = exception == Failure::OutOfMemoryException();
   bool is_termination_exception = exception == Heap::termination_exception();
   bool catchable_by_javascript = !is_termination_exception && !is_out_of_memory;
   // Only real objects can be caught by JS.
   ASSERT(!catchable_by_javascript || is_object);
-  bool should_return_exception =
-      ShouldReturnException(&is_caught_externally, catchable_by_javascript);
-  bool report_exception = catchable_by_javascript && should_return_exception;
+  bool is_caught_externally = false;
+  bool should_report_exception =
+      ShouldReportException(&is_caught_externally, catchable_by_javascript);
+  bool report_exception = catchable_by_javascript && should_report_exception;
 
 #ifdef ENABLE_DEBUGGER_SUPPORT
   // Notify debugger of exception.
@@ -1095,7 +1095,7 @@
   // thread_local_ is restored on a separate OS thread.
 #ifdef USE_SIMULATOR
 #ifdef V8_TARGET_ARCH_ARM
-  thread_local_.simulator_ = assembler::arm::Simulator::current();
+  thread_local_.simulator_ = Simulator::current();
 #elif V8_TARGET_ARCH_MIPS
   thread_local_.simulator_ = assembler::mips::Simulator::current();
 #endif
diff --git a/src/top.h b/src/top.h
index e485de1..5b0fd61 100644
--- a/src/top.h
+++ b/src/top.h
@@ -109,7 +109,7 @@
 
 #ifdef USE_SIMULATOR
 #ifdef V8_TARGET_ARCH_ARM
-  assembler::arm::Simulator* simulator_;
+  Simulator* simulator_;
 #elif V8_TARGET_ARCH_MIPS
   assembler::mips::Simulator* simulator_;
 #endif
@@ -386,7 +386,9 @@
   static void DoThrow(MaybeObject* exception,
                       MessageLocation* location,
                       const char* message);
-  static bool ShouldReturnException(bool* is_caught_externally,
+  // Checks if exception should be reported and finds out if it's
+  // caught externally.
+  static bool ShouldReportException(bool* is_caught_externally,
                                     bool catchable_by_javascript);
 
   // Attempts to compute the current source location, storing the
diff --git a/src/type-info.cc b/src/type-info.cc
index f4f65e9..0bb7262 100644
--- a/src/type-info.cc
+++ b/src/type-info.cc
@@ -171,7 +171,7 @@
 }
 
 
-TypeInfo TypeFeedbackOracle::CompareType(CompareOperation* expr, Side side) {
+TypeInfo TypeFeedbackOracle::CompareType(CompareOperation* expr) {
   Handle<Object> object = GetElement(map_, expr->position());
   TypeInfo unknown = TypeInfo::Unknown();
   if (!object->IsCode()) return unknown;
@@ -198,7 +198,7 @@
 }
 
 
-TypeInfo TypeFeedbackOracle::BinaryType(BinaryOperation* expr, Side side) {
+TypeInfo TypeFeedbackOracle::BinaryType(BinaryOperation* expr) {
   Handle<Object> object = GetElement(map_, expr->position());
   TypeInfo unknown = TypeInfo::Unknown();
   if (!object->IsCode()) return unknown;
diff --git a/src/type-info.h b/src/type-info.h
index e026e88..c7029c8 100644
--- a/src/type-info.h
+++ b/src/type-info.h
@@ -236,12 +236,6 @@
 
 class TypeFeedbackOracle BASE_EMBEDDED {
  public:
-  enum Side {
-    LEFT,
-    RIGHT,
-    RESULT
-  };
-
   TypeFeedbackOracle(Handle<Code> code, Handle<Context> global_context);
 
   bool LoadIsMonomorphic(Property* expr);
@@ -261,8 +255,8 @@
   bool LoadIsBuiltin(Property* expr, Builtins::Name id);
 
   // Get type information for arithmetic operations and compares.
-  TypeInfo BinaryType(BinaryOperation* expr, Side side);
-  TypeInfo CompareType(CompareOperation* expr, Side side);
+  TypeInfo BinaryType(BinaryOperation* expr);
+  TypeInfo CompareType(CompareOperation* expr);
   TypeInfo SwitchType(CaseClause* clause);
 
  private:
diff --git a/src/v8.cc b/src/v8.cc
index f5b6150..c5a5775 100644
--- a/src/v8.cc
+++ b/src/v8.cc
@@ -79,7 +79,7 @@
   // Initialize other runtime facilities
 #if defined(USE_SIMULATOR)
 #if defined(V8_TARGET_ARCH_ARM)
-  ::assembler::arm::Simulator::Initialize();
+  Simulator::Initialize();
 #elif defined(V8_TARGET_ARCH_MIPS)
   ::assembler::mips::Simulator::Initialize();
 #endif
diff --git a/src/variables.cc b/src/variables.cc
index 7f580fc..fa7ce1b 100644
--- a/src/variables.cc
+++ b/src/variables.cc
@@ -112,12 +112,12 @@
   : scope_(scope),
     name_(name),
     mode_(mode),
-    is_valid_LHS_(is_valid_LHS),
     kind_(kind),
     local_if_not_shadowed_(NULL),
+    rewrite_(NULL),
+    is_valid_LHS_(is_valid_LHS),
     is_accessed_from_inner_scope_(false),
-    is_used_(false),
-    rewrite_(NULL) {
+    is_used_(false) {
   // names must be canonicalized for fast equality checks
   ASSERT(name->IsSymbol());
 }
diff --git a/src/variables.h b/src/variables.h
index 882a52e..5d27a02 100644
--- a/src/variables.h
+++ b/src/variables.h
@@ -187,21 +187,23 @@
   Scope* scope_;
   Handle<String> name_;
   Mode mode_;
-  bool is_valid_LHS_;
   Kind kind_;
 
   Variable* local_if_not_shadowed_;
 
-  // Usage info.
-  bool is_accessed_from_inner_scope_;  // set by variable resolver
-  bool is_used_;
-
   // Static type information
   StaticType type_;
 
   // Code generation.
   // rewrite_ is usually a Slot or a Property, but may be any expression.
   Expression* rewrite_;
+
+  // Valid as a LHS? (const and this are not valid LHS, for example)
+  bool is_valid_LHS_;
+
+  // Usage info.
+  bool is_accessed_from_inner_scope_;  // set by variable resolver
+  bool is_used_;
 };
 
 
diff --git a/src/version.cc b/src/version.cc
index ca7f528..6c5ffd9 100644
--- a/src/version.cc
+++ b/src/version.cc
@@ -34,8 +34,8 @@
 // cannot be changed without changing the SCons build script.
 #define MAJOR_VERSION     3
 #define MINOR_VERSION     0
-#define BUILD_NUMBER      11
-#define PATCH_LEVEL       3
+#define BUILD_NUMBER      12
+#define PATCH_LEVEL       0
 #define CANDIDATE_VERSION false
 
 // Define SONAME to have the SCons build the put a specific SONAME into the
diff --git a/src/x64/assembler-x64.cc b/src/x64/assembler-x64.cc
index 6a5ec61..999306e 100644
--- a/src/x64/assembler-x64.cc
+++ b/src/x64/assembler-x64.cc
@@ -1306,18 +1306,6 @@
 }
 
 
-void Assembler::j(Condition cc, byte* entry, RelocInfo::Mode rmode) {
-  EnsureSpace ensure_space(this);
-  RecordRelocInfo(rmode);
-  last_pc_ = pc_;
-  ASSERT((0 <= cc) && (cc < 16));
-  // 0000 1111 1000 tttn #32-bit disp.
-  emit(0x0F);
-  emit(0x80 | cc);
-  emit(entry - (pc_ + sizeof(intptr_t)));
-}
-
-
 void Assembler::jmp(Label* L) {
   EnsureSpace ensure_space(this);
   last_pc_ = pc_;
diff --git a/src/x64/assembler-x64.h b/src/x64/assembler-x64.h
index 9d4694b..29817a3 100644
--- a/src/x64/assembler-x64.h
+++ b/src/x64/assembler-x64.h
@@ -1134,7 +1134,6 @@
 
   // Conditional jumps
   void j(Condition cc, Label* L);
-  void j(Condition cc, byte* entry, RelocInfo::Mode rmode);
   void j(Condition cc, Handle<Code> target, RelocInfo::Mode rmode);
 
   // Conditional short jump
diff --git a/src/x64/deoptimizer-x64.cc b/src/x64/deoptimizer-x64.cc
index d5ba567..60d46ef 100644
--- a/src/x64/deoptimizer-x64.cc
+++ b/src/x64/deoptimizer-x64.cc
@@ -82,6 +82,7 @@
   }
 #ifdef DEBUG
   // Destroy the code which is not supposed to run again.
+  CHECK(code->safepoint_table_start() >= last_pc_offset);
   unsigned instructions = code->safepoint_table_start() - last_pc_offset;
   CodePatcher destroyer(code->instruction_start() + last_pc_offset,
                         instructions);
@@ -145,7 +146,7 @@
   // The 'fixed' part of the frame consists of the incoming parameters and
   // the part described by JavaScriptFrameConstants.
   unsigned fixed_frame_size = ComputeFixedSize(function);
-  unsigned input_frame_size = input_->GetFrameSize();
+  unsigned input_frame_size = static_cast<unsigned>(input_->GetFrameSize());
   unsigned output_frame_size = height_in_bytes + fixed_frame_size;
 
   // Allocate and store the output frame description.
diff --git a/src/x64/ic-x64.cc b/src/x64/ic-x64.cc
index e31a341..d060e31 100644
--- a/src/x64/ic-x64.cc
+++ b/src/x64/ic-x64.cc
@@ -397,7 +397,7 @@
 }
 
 
-void LoadIC::GenerateStringLength(MacroAssembler* masm) {
+void LoadIC::GenerateStringLength(MacroAssembler* masm, bool support_wrappers) {
   // ----------- S t a t e -------------
   //  -- rax    : receiver
   //  -- rcx    : name
@@ -405,7 +405,8 @@
   // -----------------------------------
   Label miss;
 
-  StubCompiler::GenerateLoadStringLength(masm, rax, rdx, rbx, &miss);
+  StubCompiler::GenerateLoadStringLength(masm, rax, rdx, rbx, &miss,
+                                         support_wrappers);
   __ bind(&miss);
   StubCompiler::GenerateLoadMiss(masm, Code::LOAD_IC);
 }
diff --git a/src/x64/lithium-codegen-x64.cc b/src/x64/lithium-codegen-x64.cc
index 14c6acb..513c984 100644
--- a/src/x64/lithium-codegen-x64.cc
+++ b/src/x64/lithium-codegen-x64.cc
@@ -188,6 +188,10 @@
 
 bool LCodeGen::GenerateSafepointTable() {
   ASSERT(is_done());
+  // Ensure that patching a deoptimization point won't overwrite the table.
+  for (int i = 0; i < Assembler::kCallInstructionLength; i++) {
+    masm()->int3();
+  }
   safepoints_.Emit(masm(), StackSlotCount());
   return !is_aborted();
 }
@@ -429,7 +433,10 @@
   if (cc == no_condition) {
     __ Jump(entry, RelocInfo::RUNTIME_ENTRY);
   } else {
-    __ j(cc, entry, RelocInfo::RUNTIME_ENTRY);
+    NearLabel done;
+    __ j(NegateCondition(cc), &done);
+    __ Jump(entry, RelocInfo::RUNTIME_ENTRY);
+    __ bind(&done);
   }
 }
 
@@ -492,37 +499,40 @@
 }
 
 
-void LCodeGen::RecordSafepoint(LPointerMap* pointers,
-                               int deoptimization_index) {
+void LCodeGen::RecordSafepoint(
+    LPointerMap* pointers,
+    Safepoint::Kind kind,
+    int arguments,
+    int deoptimization_index) {
   const ZoneList<LOperand*>* operands = pointers->operands();
   Safepoint safepoint = safepoints_.DefineSafepoint(masm(),
-                                                    deoptimization_index);
+      kind, arguments, deoptimization_index);
   for (int i = 0; i < operands->length(); i++) {
     LOperand* pointer = operands->at(i);
     if (pointer->IsStackSlot()) {
       safepoint.DefinePointerSlot(pointer->index());
+    } else if (pointer->IsRegister() && (kind & Safepoint::kWithRegisters)) {
+      safepoint.DefinePointerRegister(ToRegister(pointer));
     }
   }
+  if (kind & Safepoint::kWithRegisters) {
+    // Register rsi always contains a pointer to the context.
+    safepoint.DefinePointerRegister(rsi);
+  }
+}
+
+
+void LCodeGen::RecordSafepoint(LPointerMap* pointers,
+                               int deoptimization_index) {
+  RecordSafepoint(pointers, Safepoint::kSimple, 0, deoptimization_index);
 }
 
 
 void LCodeGen::RecordSafepointWithRegisters(LPointerMap* pointers,
                                             int arguments,
                                             int deoptimization_index) {
-  const ZoneList<LOperand*>* operands = pointers->operands();
-  Safepoint safepoint =
-      safepoints_.DefineSafepointWithRegisters(
-          masm(), arguments, deoptimization_index);
-  for (int i = 0; i < operands->length(); i++) {
-    LOperand* pointer = operands->at(i);
-    if (pointer->IsStackSlot()) {
-      safepoint.DefinePointerSlot(pointer->index());
-    } else if (pointer->IsRegister()) {
-      safepoint.DefinePointerRegister(ToRegister(pointer));
-    }
-  }
-  // Register rsi always contains a pointer to the context.
-  safepoint.DefinePointerRegister(rsi);
+  RecordSafepoint(pointers, Safepoint::kWithRegisters, arguments,
+      deoptimization_index);
 }
 
 
@@ -1385,12 +1395,32 @@
 
 
 void LCodeGen::DoLoadGlobal(LLoadGlobal* instr) {
-  Abort("Unimplemented: %s", "DoLoadGlobal");
+  Register result = ToRegister(instr->result());
+  if (result.is(rax)) {
+    __ load_rax(instr->hydrogen()->cell().location(),
+                RelocInfo::GLOBAL_PROPERTY_CELL);
+  } else {
+    __ movq(result, instr->hydrogen()->cell(), RelocInfo::GLOBAL_PROPERTY_CELL);
+    __ movq(result, Operand(result, 0));
+  }
+  if (instr->hydrogen()->check_hole_value()) {
+    __ CompareRoot(result, Heap::kTheHoleValueRootIndex);
+    DeoptimizeIf(equal, instr->environment());
+  }
 }
 
 
 void LCodeGen::DoStoreGlobal(LStoreGlobal* instr) {
-  Abort("Unimplemented: %s", "DoStoreGlobal");
+  Register value = ToRegister(instr->InputAt(0));
+  if (value.is(rax)) {
+    __ store_rax(instr->hydrogen()->cell().location(),
+                 RelocInfo::GLOBAL_PROPERTY_CELL);
+  } else {
+    __ movq(kScratchRegister,
+            Handle<Object>::cast(instr->hydrogen()->cell()),
+            RelocInfo::GLOBAL_PROPERTY_CELL);
+    __ movq(Operand(kScratchRegister, 0), value);
+  }
 }
 
 
@@ -1400,7 +1430,14 @@
 
 
 void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
-  Abort("Unimplemented: %s", "DoLoadNamedField");
+  Register object = ToRegister(instr->InputAt(0));
+  Register result = ToRegister(instr->result());
+  if (instr->hydrogen()->is_in_object()) {
+    __ movq(result, FieldOperand(object, instr->hydrogen()->offset()));
+  } else {
+    __ movq(result, FieldOperand(object, JSObject::kPropertiesOffset));
+    __ movq(result, FieldOperand(result, instr->hydrogen()->offset()));
+  }
 }
 
 
@@ -1450,7 +1487,26 @@
 
 
 void LCodeGen::DoPushArgument(LPushArgument* instr) {
-  Abort("Unimplemented: %s", "DoPushArgument");
+  LOperand* argument = instr->InputAt(0);
+  if (argument->IsConstantOperand()) {
+    LConstantOperand* const_op = LConstantOperand::cast(argument);
+    Handle<Object> literal = chunk_->LookupLiteral(const_op);
+    Representation r = chunk_->LookupLiteralRepresentation(const_op);
+    if (r.IsInteger32()) {
+      ASSERT(literal->IsNumber());
+      __ push(Immediate(static_cast<int32_t>(literal->Number())));
+    } else if (r.IsDouble()) {
+      Abort("unsupported double immediate");
+    } else {
+      ASSERT(r.IsTagged());
+      __ Push(literal);
+    }
+  } else if (argument->IsRegister()) {
+    __ push(ToRegister(argument));
+  } else {
+    ASSERT(!argument->IsDoubleRegister());
+    __ push(ToOperand(argument));
+  }
 }
 
 
@@ -1461,7 +1517,9 @@
 
 
 void LCodeGen::DoGlobalReceiver(LGlobalReceiver* instr) {
-  Abort("Unimplemented: %s", "DoGlobalReceiver");
+  Register result = ToRegister(instr->result());
+  __ movq(result, Operand(rsi, Context::SlotOffset(Context::GLOBAL_INDEX)));
+  __ movq(result, FieldOperand(result, GlobalObject::kGlobalReceiverOffset));
 }
 
 
@@ -1558,7 +1616,12 @@
 
 
 void LCodeGen::DoCallNew(LCallNew* instr) {
-  Abort("Unimplemented: %s", "DoCallNew");
+  ASSERT(ToRegister(instr->InputAt(0)).is(rdi));
+  ASSERT(ToRegister(instr->result()).is(rax));
+
+  Handle<Code> builtin(Builtins::builtin(Builtins::JSConstructCall));
+  __ Set(rax, instr->arity());
+  CallCode(builtin, RelocInfo::CONSTRUCT_CALL, instr);
 }
 
 
@@ -1568,7 +1631,32 @@
 
 
 void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
-  Abort("Unimplemented: %s", "DoStoreNamedField");
+  Register object = ToRegister(instr->object());
+  Register value = ToRegister(instr->value());
+  int offset = instr->offset();
+
+  if (!instr->transition().is_null()) {
+    __ Move(FieldOperand(object, HeapObject::kMapOffset), instr->transition());
+  }
+
+  // Do the store.
+  if (instr->is_in_object()) {
+    __ movq(FieldOperand(object, offset), value);
+    if (instr->needs_write_barrier()) {
+      Register temp = ToRegister(instr->TempAt(0));
+      // Update the write barrier for the object for in-object properties.
+      __ RecordWrite(object, offset, value, temp);
+    }
+  } else {
+    Register temp = ToRegister(instr->TempAt(0));
+    __ movq(temp, FieldOperand(object, JSObject::kPropertiesOffset));
+    __ movq(FieldOperand(temp, offset), value);
+    if (instr->needs_write_barrier()) {
+      // Update the write barrier for the properties array.
+      // object is used as a scratch register.
+      __ RecordWrite(temp, offset, value, object);
+    }
+  }
 }
 
 
@@ -1718,7 +1806,13 @@
 
 
 void LCodeGen::DoCheckSmi(LCheckSmi* instr) {
-  Abort("Unimplemented: %s", "DoCheckSmi");
+  LOperand* input = instr->InputAt(0);
+  ASSERT(input->IsRegister());
+  Condition cc = masm()->CheckSmi(ToRegister(input));
+  if (instr->condition() != equal) {
+    cc = NegateCondition(cc);
+  }
+  DeoptimizeIf(cc, instr->environment());
 }
 
 
@@ -1728,12 +1822,20 @@
 
 
 void LCodeGen::DoCheckFunction(LCheckFunction* instr) {
-  Abort("Unimplemented: %s", "DoCheckFunction");
+  ASSERT(instr->InputAt(0)->IsRegister());
+  Register reg = ToRegister(instr->InputAt(0));
+  __ Cmp(reg, instr->hydrogen()->target());
+  DeoptimizeIf(not_equal, instr->environment());
 }
 
 
 void LCodeGen::DoCheckMap(LCheckMap* instr) {
-  Abort("Unimplemented: %s", "DoCheckMap");
+  LOperand* input = instr->InputAt(0);
+  ASSERT(input->IsRegister());
+  Register reg = ToRegister(input);
+  __ Cmp(FieldOperand(reg, HeapObject::kMapOffset),
+         instr->hydrogen()->map());
+  DeoptimizeIf(not_equal, instr->environment());
 }
 
 
@@ -1743,7 +1845,29 @@
 
 
 void LCodeGen::DoCheckPrototypeMaps(LCheckPrototypeMaps* instr) {
-  Abort("Unimplemented: %s", "DoCheckPrototypeMaps");
+  Register reg = ToRegister(instr->TempAt(0));
+
+  Handle<JSObject> holder = instr->holder();
+  Handle<JSObject> current_prototype = instr->prototype();
+
+  // Load prototype object.
+  LoadHeapObject(reg, current_prototype);
+
+  // Check prototype maps up to the holder.
+  while (!current_prototype.is_identical_to(holder)) {
+    __ Cmp(FieldOperand(reg, HeapObject::kMapOffset),
+           Handle<Map>(current_prototype->map()));
+    DeoptimizeIf(not_equal, instr->environment());
+    current_prototype =
+        Handle<JSObject>(JSObject::cast(current_prototype->GetPrototype()));
+    // Load next prototype object.
+    LoadHeapObject(reg, current_prototype);
+  }
+
+  // Check the holder map.
+  __ Cmp(FieldOperand(reg, HeapObject::kMapOffset),
+         Handle<Map>(current_prototype->map()));
+  DeoptimizeIf(not_equal, instr->environment());
 }
 
 
diff --git a/src/x64/lithium-codegen-x64.h b/src/x64/lithium-codegen-x64.h
index 7da4047..cbcc5c8 100644
--- a/src/x64/lithium-codegen-x64.h
+++ b/src/x64/lithium-codegen-x64.h
@@ -192,6 +192,10 @@
   void DoMathSin(LUnaryMathOperation* instr);
 
   // Support for recording safepoint and position information.
+  void RecordSafepoint(LPointerMap* pointers,
+                       Safepoint::Kind kind,
+                       int arguments,
+                       int deoptimization_index);
   void RecordSafepoint(LPointerMap* pointers, int deoptimization_index);
   void RecordSafepointWithRegisters(LPointerMap* pointers,
                                     int arguments,
diff --git a/src/x64/lithium-x64.cc b/src/x64/lithium-x64.cc
index bdda1fa..2dd5cf7 100644
--- a/src/x64/lithium-x64.cc
+++ b/src/x64/lithium-x64.cc
@@ -1043,8 +1043,9 @@
 
 
 LInstruction* LChunkBuilder::DoPushArgument(HPushArgument* instr) {
-  Abort("Unimplemented: %s", "DoPushArgument");
-  return NULL;
+  ++argument_count_;
+  LOperand* argument = UseOrConstant(instr->argument());
+  return new LPushArgument(argument);
 }
 
 
@@ -1054,8 +1055,7 @@
 
 
 LInstruction* LChunkBuilder::DoGlobalReceiver(HGlobalReceiver* instr) {
-  Abort("Unimplemented: %s", "DoGlobalReceiver");
-  return NULL;
+  return DefineAsRegister(new LGlobalReceiver);
 }
 
 
@@ -1097,8 +1097,10 @@
 
 
 LInstruction* LChunkBuilder::DoCallNew(HCallNew* instr) {
-  Abort("Unimplemented: %s", "DoCallNew");
-  return NULL;
+  LOperand* constructor = UseFixed(instr->constructor(), rdi);
+  argument_count_ -= instr->argument_count();
+  LCallNew* result = new LCallNew(constructor);
+  return MarkAsCall(DefineFixed(result, rax), instr);
 }
 
 
@@ -1394,8 +1396,8 @@
 
 
 LInstruction* LChunkBuilder::DoCheckNonSmi(HCheckNonSmi* instr) {
-  Abort("Unimplemented: %s", "DoCheckNonSmi");
-  return NULL;
+  LOperand* value = UseRegisterAtStart(instr->value());
+  return AssignEnvironment(new LCheckSmi(value, zero));
 }
 
 
@@ -1406,26 +1408,28 @@
 
 
 LInstruction* LChunkBuilder::DoCheckPrototypeMaps(HCheckPrototypeMaps* instr) {
-  Abort("Unimplemented: %s", "DoCheckPrototypeMaps");
-  return NULL;
+  LOperand* temp = TempRegister();
+  LCheckPrototypeMaps* result = new LCheckPrototypeMaps(temp);
+  return AssignEnvironment(result);
 }
 
 
 LInstruction* LChunkBuilder::DoCheckSmi(HCheckSmi* instr) {
-  Abort("Unimplemented: %s", "DoCheckSmi");
-  return NULL;
+  LOperand* value = UseRegisterAtStart(instr->value());
+  return AssignEnvironment(new LCheckSmi(value, not_zero));
 }
 
 
 LInstruction* LChunkBuilder::DoCheckFunction(HCheckFunction* instr) {
-  Abort("Unimplemented: %s", "DoCheckFunction");
-  return NULL;
+  LOperand* value = UseRegisterAtStart(instr->value());
+  return AssignEnvironment(new LCheckFunction(value));
 }
 
 
 LInstruction* LChunkBuilder::DoCheckMap(HCheckMap* instr) {
-  Abort("Unimplemented: %s", "DoCheckMap");
-  return NULL;
+  LOperand* value = UseRegisterAtStart(instr->value());
+  LCheckMap* result = new LCheckMap(value);
+  return AssignEnvironment(result);
 }
 
 
@@ -1453,15 +1457,15 @@
 
 
 LInstruction* LChunkBuilder::DoLoadGlobal(HLoadGlobal* instr) {
-  Abort("Unimplemented: %s", "DoLoadGlobal");
-  return NULL;
+  LLoadGlobal* result = new LLoadGlobal;
+  return instr->check_hole_value()
+      ? AssignEnvironment(DefineAsRegister(result))
+      : DefineAsRegister(result);
 }
 
 
 LInstruction* LChunkBuilder::DoStoreGlobal(HStoreGlobal* instr) {
-  Abort("Unimplemented: %s", "DoStoreGlobal");
-  return NULL;
-}
+  return new LStoreGlobal(UseRegisterAtStart(instr->value()));}
 
 
 LInstruction* LChunkBuilder::DoLoadContextSlot(HLoadContextSlot* instr) {
@@ -1471,8 +1475,9 @@
 
 
 LInstruction* LChunkBuilder::DoLoadNamedField(HLoadNamedField* instr) {
-  Abort("Unimplemented: %s", "DoLoadNamedField");
-  return NULL;
+  ASSERT(instr->representation().IsTagged());
+  LOperand* obj = UseRegisterAtStart(instr->object());
+  return DefineAsRegister(new LLoadNamedField(obj));
 }
 
 
@@ -1522,8 +1527,22 @@
 
 
 LInstruction* LChunkBuilder::DoStoreNamedField(HStoreNamedField* instr) {
-  Abort("Unimplemented: %s", "DoStoreNamedField");
-  return NULL;
+  bool needs_write_barrier = instr->NeedsWriteBarrier();
+
+  LOperand* obj = needs_write_barrier
+      ? UseTempRegister(instr->object())
+      : UseRegisterAtStart(instr->object());
+
+  LOperand* val = needs_write_barrier
+      ? UseTempRegister(instr->value())
+      : UseRegister(instr->value());
+
+  // We only need a scratch register if we have a write barrier or we
+  // have a store into the properties array (not in-object-property).
+  LOperand* temp = (!instr->is_in_object() || needs_write_barrier)
+      ? TempRegister() : NULL;
+
+  return new LStoreNamedField(obj, val, temp);
 }
 
 
@@ -1660,7 +1679,14 @@
 
 
 LInstruction* LChunkBuilder::DoEnterInlined(HEnterInlined* instr) {
-  Abort("Unimplemented: %s", "DoEnterInlined");
+  HEnvironment* outer = current_block_->last_environment();
+  HConstant* undefined = graph()->GetConstantUndefined();
+  HEnvironment* inner = outer->CopyForInlining(instr->closure(),
+                                               instr->function(),
+                                               false,
+                                               undefined);
+  current_block_->UpdateEnvironment(inner);
+  chunk_->AddInlinedClosure(instr->closure());
   return NULL;
 }
 
diff --git a/src/x64/macro-assembler-x64.cc b/src/x64/macro-assembler-x64.cc
index 2fc825f..e104e5b 100644
--- a/src/x64/macro-assembler-x64.cc
+++ b/src/x64/macro-assembler-x64.cc
@@ -1770,10 +1770,18 @@
   Move(rdi, Handle<JSFunction>(function));
   movq(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
 
-  // Invoke the cached code.
-  Handle<Code> code(function->code());
-  ParameterCount expected(function->shared()->formal_parameter_count());
-  InvokeCode(code, expected, actual, RelocInfo::CODE_TARGET, flag);
+  if (V8::UseCrankshaft()) {
+    // Since Crankshaft can recompile a function, we need to load
+    // the Code object every time we call the function.
+    movq(rdx, FieldOperand(rdi, JSFunction::kCodeEntryOffset));
+    ParameterCount expected(function->shared()->formal_parameter_count());
+    InvokeCode(rdx, expected, actual, flag);
+  } else {
+    // Invoke the cached code.
+    Handle<Code> code(function->code());
+    ParameterCount expected(function->shared()->formal_parameter_count());
+    InvokeCode(code, expected, actual, RelocInfo::CODE_TARGET, flag);
+  }
 }
 
 
diff --git a/src/x64/stub-cache-x64.cc b/src/x64/stub-cache-x64.cc
index c86f43d..8888d70 100644
--- a/src/x64/stub-cache-x64.cc
+++ b/src/x64/stub-cache-x64.cc
@@ -307,28 +307,32 @@
                                             Register receiver,
                                             Register scratch1,
                                             Register scratch2,
-                                            Label* miss) {
+                                            Label* miss,
+                                            bool support_wrappers) {
   Label check_wrapper;
 
   // Check if the object is a string leaving the instance type in the
   // scratch register.
-  GenerateStringCheck(masm, receiver, scratch1, miss, &check_wrapper);
+  GenerateStringCheck(masm, receiver, scratch1, miss,
+                      support_wrappers ? &check_wrapper : miss);
 
   // Load length directly from the string.
   __ movq(rax, FieldOperand(receiver, String::kLengthOffset));
   __ ret(0);
 
-  // Check if the object is a JSValue wrapper.
-  __ bind(&check_wrapper);
-  __ cmpl(scratch1, Immediate(JS_VALUE_TYPE));
-  __ j(not_equal, miss);
+  if (support_wrappers) {
+    // Check if the object is a JSValue wrapper.
+    __ bind(&check_wrapper);
+    __ cmpl(scratch1, Immediate(JS_VALUE_TYPE));
+    __ j(not_equal, miss);
 
-  // Check if the wrapped value is a string and load the length
-  // directly if it is.
-  __ movq(scratch2, FieldOperand(receiver, JSValue::kValueOffset));
-  GenerateStringCheck(masm, scratch2, scratch1, miss, miss);
-  __ movq(rax, FieldOperand(scratch2, String::kLengthOffset));
-  __ ret(0);
+    // Check if the wrapped value is a string and load the length
+    // directly if it is.
+    __ movq(scratch2, FieldOperand(receiver, JSValue::kValueOffset));
+    GenerateStringCheck(masm, scratch2, scratch1, miss, miss);
+    __ movq(rax, FieldOperand(scratch2, String::kLengthOffset));
+    __ ret(0);
+  }
 }
 
 
@@ -437,10 +441,9 @@
 
 
 // Generates call to API function.
-static bool GenerateFastApiCall(MacroAssembler* masm,
-                                const CallOptimization& optimization,
-                                int argc,
-                                Failure** failure) {
+static MaybeObject* GenerateFastApiCall(MacroAssembler* masm,
+                                        const CallOptimization& optimization,
+                                        int argc) {
   // ----------- S t a t e -------------
   //  -- rsp[0]              : return address
   //  -- rsp[8]              : object passing the type check
@@ -504,13 +507,8 @@
   // already generated).  Do not allow the assembler to perform a
   // garbage collection but instead return the allocation failure
   // object.
-  MaybeObject* result =
-      masm->TryCallApiFunctionAndReturn(&fun, argc + kFastApiCallArguments + 1);
-  if (result->IsFailure()) {
-    *failure = Failure::cast(result);
-    return false;
-  }
-  return true;
+  return masm->TryCallApiFunctionAndReturn(&fun,
+                                           argc + kFastApiCallArguments + 1);
 }
 
 
@@ -523,17 +521,16 @@
         arguments_(arguments),
         name_(name) {}
 
-  bool Compile(MacroAssembler* masm,
-               JSObject* object,
-               JSObject* holder,
-               String* name,
-               LookupResult* lookup,
-               Register receiver,
-               Register scratch1,
-               Register scratch2,
-               Register scratch3,
-               Label* miss,
-               Failure** failure) {
+  MaybeObject* Compile(MacroAssembler* masm,
+                       JSObject* object,
+                       JSObject* holder,
+                       String* name,
+                       LookupResult* lookup,
+                       Register receiver,
+                       Register scratch1,
+                       Register scratch2,
+                       Register scratch3,
+                       Label* miss) {
     ASSERT(holder->HasNamedInterceptor());
     ASSERT(!holder->GetNamedInterceptor()->getter()->IsUndefined());
 
@@ -553,8 +550,7 @@
                               lookup,
                               name,
                               optimization,
-                              miss,
-                              failure);
+                              miss);
     } else {
       CompileRegular(masm,
                      object,
@@ -565,23 +561,22 @@
                      name,
                      holder,
                      miss);
-      return true;
+      return Heap::undefined_value();  // Success.
     }
   }
 
  private:
-  bool CompileCacheable(MacroAssembler* masm,
-                        JSObject* object,
-                        Register receiver,
-                        Register scratch1,
-                        Register scratch2,
-                        Register scratch3,
-                        JSObject* interceptor_holder,
-                        LookupResult* lookup,
-                        String* name,
-                        const CallOptimization& optimization,
-                        Label* miss_label,
-                        Failure** failure) {
+  MaybeObject* CompileCacheable(MacroAssembler* masm,
+                                JSObject* object,
+                                Register receiver,
+                                Register scratch1,
+                                Register scratch2,
+                                Register scratch3,
+                                JSObject* interceptor_holder,
+                                LookupResult* lookup,
+                                String* name,
+                                const CallOptimization& optimization,
+                                Label* miss_label) {
     ASSERT(optimization.is_constant_call());
     ASSERT(!lookup->holder()->IsGlobalObject());
 
@@ -643,13 +638,10 @@
 
     // Invoke function.
     if (can_do_fast_api_call) {
-      bool success = GenerateFastApiCall(masm,
-                                         optimization,
-                                         arguments_.immediate(),
-                                         failure);
-      if (!success) {
-        return false;
-      }
+      MaybeObject* result = GenerateFastApiCall(masm,
+                                                optimization,
+                                                arguments_.immediate());
+      if (result->IsFailure()) return result;
     } else {
       __ InvokeFunction(optimization.constant_function(), arguments_,
                         JUMP_FUNCTION);
@@ -668,7 +660,7 @@
       FreeSpaceForFastApiCall(masm, scratch1);
     }
 
-    return true;
+    return Heap::undefined_value();  // Success.
   }
 
   void CompileRegular(MacroAssembler* masm,
@@ -1021,17 +1013,16 @@
 }
 
 
-bool StubCompiler::GenerateLoadCallback(JSObject* object,
-                                        JSObject* holder,
-                                        Register receiver,
-                                        Register name_reg,
-                                        Register scratch1,
-                                        Register scratch2,
-                                        Register scratch3,
-                                        AccessorInfo* callback,
-                                        String* name,
-                                        Label* miss,
-                                        Failure** failure) {
+MaybeObject* StubCompiler::GenerateLoadCallback(JSObject* object,
+                                                JSObject* holder,
+                                                Register receiver,
+                                                Register name_reg,
+                                                Register scratch1,
+                                                Register scratch2,
+                                                Register scratch3,
+                                                AccessorInfo* callback,
+                                                String* name,
+                                                Label* miss) {
   // Check that the receiver isn't a smi.
   __ JumpIfSmi(receiver, miss);
 
@@ -1095,12 +1086,7 @@
   // already generated).  Do not allow the assembler to perform a
   // garbage collection but instead return the allocation failure
   // object.
-  MaybeObject* result = masm()->TryCallApiFunctionAndReturn(&fun, kStackSpace);
-  if (result->IsFailure()) {
-    *failure = Failure::cast(result);
-    return false;
-  }
-  return true;
+  return masm()->TryCallApiFunctionAndReturn(&fun, kStackSpace);
 }
 
 
@@ -2135,17 +2121,14 @@
   }
 
   if (depth != kInvalidProtoDepth) {
-    Failure* failure;
     // Move the return address on top of the stack.
     __ movq(rax, Operand(rsp, 3 * kPointerSize));
     __ movq(Operand(rsp, 0 * kPointerSize), rax);
 
     // rsp[2 * kPointerSize] is uninitialized, rsp[3 * kPointerSize] contains
     // duplicate of return address and will be overwritten.
-    bool success = GenerateFastApiCall(masm(), optimization, argc, &failure);
-    if (!success) {
-      return failure;
-    }
+    MaybeObject* result = GenerateFastApiCall(masm(), optimization, argc);
+    if (result->IsFailure()) return result;
   } else {
     __ InvokeFunction(function, arguments(), JUMP_FUNCTION);
   }
@@ -2194,21 +2177,17 @@
   __ movq(rdx, Operand(rsp, (argc + 1) * kPointerSize));
 
   CallInterceptorCompiler compiler(this, arguments(), rcx);
-  Failure* failure;
-  bool success = compiler.Compile(masm(),
-                                  object,
-                                  holder,
-                                  name,
-                                  &lookup,
-                                  rdx,
-                                  rbx,
-                                  rdi,
-                                  rax,
-                                  &miss,
-                                  &failure);
-  if (!success) {
-    return failure;
-  }
+  MaybeObject* result = compiler.Compile(masm(),
+                                         object,
+                                         holder,
+                                         name,
+                                         &lookup,
+                                         rdx,
+                                         rbx,
+                                         rdi,
+                                         rax,
+                                         &miss);
+  if (result->IsFailure()) return result;
 
   // Restore receiver.
   __ movq(rdx, Operand(rsp, (argc + 1) * kPointerSize));
@@ -2459,9 +2438,17 @@
          Handle<Map>(object->map()));
   __ j(not_equal, &miss);
 
+  // Check that the value in the cell is not the hole. If it is, this
+  // cell could have been deleted and reintroducing the global needs
+  // to update the property details in the property dictionary of the
+  // global object. We bail out to the runtime system to do that.
+  __ Move(rbx, Handle<JSGlobalPropertyCell>(cell));
+  __ CompareRoot(FieldOperand(rbx, JSGlobalPropertyCell::kValueOffset),
+                 Heap::kTheHoleValueRootIndex);
+  __ j(equal, &miss);
+
   // Store the value in the cell.
-  __ Move(rcx, Handle<JSGlobalPropertyCell>(cell));
-  __ movq(FieldOperand(rcx, JSGlobalPropertyCell::kValueOffset), rax);
+  __ movq(FieldOperand(rbx, JSGlobalPropertyCell::kValueOffset), rax);
 
   // Return the value (register rax).
   __ IncrementCounter(&Counters::named_store_global_inline, 1);
@@ -2648,12 +2635,11 @@
   // -----------------------------------
   Label miss;
 
-  Failure* failure = Failure::InternalError();
-  bool success = GenerateLoadCallback(object, holder, rax, rcx, rdx, rbx, rdi,
-                                      callback, name, &miss, &failure);
-  if (!success) {
+  MaybeObject* result = GenerateLoadCallback(object, holder, rax, rcx, rdx, rbx,
+                                             rdi, callback, name, &miss);
+  if (result->IsFailure()) {
     miss.Unuse();
-    return failure;
+    return result;
   }
 
   __ bind(&miss);
@@ -2812,12 +2798,11 @@
   __ Cmp(rax, Handle<String>(name));
   __ j(not_equal, &miss);
 
-  Failure* failure = Failure::InternalError();
-  bool success = GenerateLoadCallback(receiver, holder, rdx, rax, rbx, rcx, rdi,
-                                      callback, name, &miss, &failure);
-  if (!success) {
+  MaybeObject* result = GenerateLoadCallback(receiver, holder, rdx, rax, rbx,
+                                             rcx, rdi, callback, name, &miss);
+  if (result->IsFailure()) {
     miss.Unuse();
-    return failure;
+    return result;
   }
 
   __ bind(&miss);
@@ -2933,7 +2918,7 @@
   __ Cmp(rax, Handle<String>(name));
   __ j(not_equal, &miss);
 
-  GenerateLoadStringLength(masm(), rdx, rcx, rbx, &miss);
+  GenerateLoadStringLength(masm(), rdx, rcx, rbx, &miss, true);
   __ bind(&miss);
   __ DecrementCounter(&Counters::keyed_load_string_length, 1);
   GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
diff --git a/test/cctest/cctest.status b/test/cctest/cctest.status
index b056403..1009f85 100644
--- a/test/cctest/cctest.status
+++ b/test/cctest/cctest.status
@@ -51,6 +51,8 @@
 test-serialize/TestThatAlwaysFails: FAIL
 test-serialize/DependentTestThatAlwaysFails: FAIL
 
+# BUG(1079)
+test-api/CaptureStackTraceForUncaughtException: PASS || FAIL
 
 ##############################################################################
 [ $arch == x64 ]
@@ -101,6 +103,11 @@
 test-debug/DebuggerAgentProtocolOverflowHeader: SKIP
 test-sockets/Socket: SKIP
 
+# BUG(1075): Some deserialization tests fail om ARM
+test-serialize/Deserialize: SKIP
+test-serialize/DeserializeFromSecondSerializationAndRunScript2: SKIP
+test-serialize/DeserializeAndRunScript2: SKIP
+test-serialize/DeserializeFromSecondSerialization: SKIP
 
 ##############################################################################
 [ $arch == arm && $crankshaft ]
diff --git a/test/cctest/test-api.cc b/test/cctest/test-api.cc
index 90f4996..48dc72e 100644
--- a/test/cctest/test-api.cc
+++ b/test/cctest/test-api.cc
@@ -2389,6 +2389,11 @@
   CompileRun("asdf;");
   v8::Handle<Value> string = CompileRun("try { asdf; } catch(e) { e + ''; }");
   CHECK(string->Equals(v8_str("Whoops")));
+  CompileRun("ReferenceError.prototype.constructor = new Object();"
+             "ReferenceError.prototype.constructor.name = 1;"
+             "Number.prototype.toString = function() { return 'Whoops'; };"
+             "ReferenceError.prototype.toString = Object.prototype.toString;");
+  CompileRun("asdf;");
   v8::V8::RemoveMessageListeners(check_message);
 }
 
diff --git a/test/cctest/test-assembler-arm.cc b/test/cctest/test-assembler-arm.cc
index 0f12f98..af1a4e8 100644
--- a/test/cctest/test-assembler-arm.cc
+++ b/test/cctest/test-assembler-arm.cc
@@ -45,11 +45,7 @@
 static v8::Persistent<v8::Context> env;
 
 
-// The test framework does not accept flags on the command line, so we set them
 static void InitializeVM() {
-  // enable generation of comments
-  FLAG_debug_code = true;
-
   if (env.IsEmpty()) {
     env = v8::Context::New();
   }
diff --git a/test/cctest/test-assembler-mips.cc b/test/cctest/test-assembler-mips.cc
index 955562b..ecb42e2 100644
--- a/test/cctest/test-assembler-mips.cc
+++ b/test/cctest/test-assembler-mips.cc
@@ -47,14 +47,10 @@
 static v8::Persistent<v8::Context> env;
 
 
-// The test framework does not accept flags on the command line, so we set them.
 static void InitializeVM() {
   // Disable compilation of natives.
   FLAG_disable_native_files = true;
 
-  // Enable generation of comments.
-  FLAG_debug_code = true;
-
   if (env.IsEmpty()) {
     env = v8::Context::New();
   }
diff --git a/test/mjsunit/delete-global-properties.js b/test/mjsunit/delete-global-properties.js
index b3813dc..2acf591 100644
--- a/test/mjsunit/delete-global-properties.js
+++ b/test/mjsunit/delete-global-properties.js
@@ -1,4 +1,4 @@
-// Copyright 2008 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -32,6 +32,17 @@
 assertTrue("tmp" in this);
 function f() { return 1; }
 assertFalse(delete f);  // should be DONT_DELETE
-assertEquals(1, f());  
+assertEquals(1, f());
 
-/* Perhaps related to bugs/11? */
+// Check that deleting and reintroducing global variables works.
+// Get into the IC case for storing to a deletable global property.
+function introduce_x() { x = 42; }
+for (var i = 0; i < 10; i++) introduce_x();
+// Check that the property has been introduced.
+assertTrue(this.hasOwnProperty('x'));
+// Check that deletion works.
+delete x;
+assertFalse(this.hasOwnProperty('x'));
+// Check that reintroduction works.
+introduce_x();
+assertTrue(this.hasOwnProperty('x'));
diff --git a/test/mjsunit/strict-mode.js b/test/mjsunit/strict-mode.js
index 83a22cc..6f3a244 100644
--- a/test/mjsunit/strict-mode.js
+++ b/test/mjsunit/strict-mode.js
@@ -44,6 +44,23 @@
     }", exception);
 }
 
+function CheckFunctionConstructorStrictMode() {
+  var args = [];
+  for (var i = 0; i < arguments.length; i ++) {
+    args[i] = arguments[i];
+  }
+  // Create non-strict function. No exception.
+  args[arguments.length] = "";
+  assertDoesNotThrow(function() {
+    Function.apply(this, args);
+  });
+  // Create strict mode function. Exception expected.
+  args[arguments.length] = "'use strict';";
+  assertThrows(function() {
+    Function.apply(this, args);
+  }, SyntaxError);
+}
+
 // Incorrect 'use strict' directive.
 function UseStrictEscape() {
   "use\\x20strict";
@@ -76,19 +93,29 @@
 CheckStrictMode("function arguments() {}", SyntaxError)
 
 // Function parameter named 'eval'.
-//CheckStrictMode("function foo(a, b, eval, c, d) {}", SyntaxError)
+CheckStrictMode("function foo(a, b, eval, c, d) {}", SyntaxError)
 
 // Function parameter named 'arguments'.
-//CheckStrictMode("function foo(a, b, arguments, c, d) {}", SyntaxError)
+CheckStrictMode("function foo(a, b, arguments, c, d) {}", SyntaxError)
 
 // Property accessor parameter named 'eval'.
-//CheckStrictMode("var o = { set foo(eval) {} }", SyntaxError)
+CheckStrictMode("var o = { set foo(eval) {} }", SyntaxError)
 
 // Property accessor parameter named 'arguments'.
-//CheckStrictMode("var o = { set foo(arguments) {} }", SyntaxError)
+CheckStrictMode("var o = { set foo(arguments) {} }", SyntaxError)
 
 // Duplicate function parameter name.
-//CheckStrictMode("function foo(a, b, c, d, b) {}", SyntaxError)
+CheckStrictMode("function foo(a, b, c, d, b) {}", SyntaxError)
+
+// Function constructor: eval parameter name.
+CheckFunctionConstructorStrictMode("eval")
+
+// Function constructor: arguments parameter name.
+CheckFunctionConstructorStrictMode("arguments")
+
+// Function constructor: duplicate parameter name.
+CheckFunctionConstructorStrictMode("a", "b", "c", "b")
+CheckFunctionConstructorStrictMode("a,b,c,b")
 
 // catch(eval)
 CheckStrictMode("try{}catch(eval){};", SyntaxError)
@@ -103,10 +130,10 @@
 CheckStrictMode("var arguments;", SyntaxError)
 
 // Strict mode applies to the function in which the directive is used..
-//assertThrows('\
-//function foo(eval) {\
-//  "use strict";\
-//}', SyntaxError);
+assertThrows('\
+function foo(eval) {\
+  "use strict";\
+}', SyntaxError);
 
 // Strict mode doesn't affect the outer stop of strict code.
 function NotStrict(eval) {
@@ -129,3 +156,112 @@
     "octal\\032directive";\
     "use strict";\
   }', SyntaxError);
+
+// Duplicate data properties.
+CheckStrictMode("var x = { dupe : 1, nondupe: 3, dupe : 2 };", SyntaxError)
+CheckStrictMode("var x = { '1234' : 1, '2345' : 2, '1234' : 3 };", SyntaxError)
+CheckStrictMode("var x = { '1234' : 1, '2345' : 2, 1234 : 3 };", SyntaxError)
+CheckStrictMode("var x = { 3.14 : 1, 2.71 : 2, 3.14 : 3 };", SyntaxError)
+CheckStrictMode("var x = { 3.14 : 1, '3.14' : 2 };", SyntaxError)
+CheckStrictMode("var x = { 123: 1, 123.00000000000000000000000000000000000000000000000000000000000000000001 : 2 }", SyntaxError)
+
+// Non-conflicting data properties.
+function StrictModeNonDuplicate() {
+  "use strict";
+  var x = { 123 : 1, "0123" : 2 };
+  var x = { 123: 1, '123.00000000000000000000000000000000000000000000000000000000000000000001' : 2 }
+}
+
+// Two getters (non-strict)
+assertThrows("var x = { get foo() { }, get foo() { } };", SyntaxError)
+assertThrows("var x = { get foo(){}, get 'foo'(){}};", SyntaxError)
+assertThrows("var x = { get 12(){}, get '12'(){}};", SyntaxError)
+
+// Two setters (non-strict)
+assertThrows("var x = { set foo(v) { }, set foo(v) { } };", SyntaxError)
+assertThrows("var x = { set foo(v) { }, set 'foo'(v) { } };", SyntaxError)
+assertThrows("var x = { set 13(v) { }, set '13'(v) { } };", SyntaxError)
+
+// Setter and data (non-strict)
+assertThrows("var x = { foo: 'data', set foo(v) { } };", SyntaxError)
+assertThrows("var x = { set foo(v) { }, foo: 'data' };", SyntaxError)
+assertThrows("var x = { foo: 'data', set 'foo'(v) { } };", SyntaxError)
+assertThrows("var x = { set foo(v) { }, 'foo': 'data' };", SyntaxError)
+assertThrows("var x = { 'foo': 'data', set foo(v) { } };", SyntaxError)
+assertThrows("var x = { set 'foo'(v) { }, foo: 'data' };", SyntaxError)
+assertThrows("var x = { 'foo': 'data', set 'foo'(v) { } };", SyntaxError)
+assertThrows("var x = { set 'foo'(v) { }, 'foo': 'data' };", SyntaxError)
+assertThrows("var x = { 12: 1, set '12'(v){}};", SyntaxError);
+assertThrows("var x = { 12: 1, set 12(v){}};", SyntaxError);
+assertThrows("var x = { '12': 1, set '12'(v){}};", SyntaxError);
+assertThrows("var x = { '12': 1, set 12(v){}};", SyntaxError);
+
+// Getter and data (non-strict)
+assertThrows("var x = { foo: 'data', get foo() { } };", SyntaxError)
+assertThrows("var x = { get foo() { }, foo: 'data' };", SyntaxError)
+assertThrows("var x = { 'foo': 'data', get foo() { } };", SyntaxError)
+assertThrows("var x = { get 'foo'() { }, 'foo': 'data' };", SyntaxError)
+assertThrows("var x = { '12': 1, get '12'(){}};", SyntaxError);
+assertThrows("var x = { '12': 1, get 12(){}};", SyntaxError);
+
+// Assignment to eval or arguments
+CheckStrictMode("function strict() { eval = undefined; }", SyntaxError)
+CheckStrictMode("function strict() { arguments = undefined; }", SyntaxError)
+CheckStrictMode("function strict() { print(eval = undefined); }", SyntaxError)
+CheckStrictMode("function strict() { print(arguments = undefined); }", SyntaxError)
+CheckStrictMode("function strict() { var x = eval = undefined; }", SyntaxError)
+CheckStrictMode("function strict() { var x = arguments = undefined; }", SyntaxError)
+
+// Compound assignment to eval or arguments
+CheckStrictMode("function strict() { eval *= undefined; }", SyntaxError)
+CheckStrictMode("function strict() { arguments /= undefined; }", SyntaxError)
+CheckStrictMode("function strict() { print(eval %= undefined); }", SyntaxError)
+CheckStrictMode("function strict() { print(arguments %= undefined); }", SyntaxError)
+CheckStrictMode("function strict() { var x = eval += undefined; }", SyntaxError)
+CheckStrictMode("function strict() { var x = arguments -= undefined; }", SyntaxError)
+CheckStrictMode("function strict() { eval <<= undefined; }", SyntaxError)
+CheckStrictMode("function strict() { arguments >>= undefined; }", SyntaxError)
+CheckStrictMode("function strict() { print(eval >>>= undefined); }", SyntaxError)
+CheckStrictMode("function strict() { print(arguments &= undefined); }", SyntaxError)
+CheckStrictMode("function strict() { var x = eval ^= undefined; }", SyntaxError)
+CheckStrictMode("function strict() { var x = arguments |= undefined; }", SyntaxError)
+
+// Postfix increment with eval or arguments
+CheckStrictMode("function strict() { eval++; }", SyntaxError)
+CheckStrictMode("function strict() { arguments++; }", SyntaxError)
+CheckStrictMode("function strict() { print(eval++); }", SyntaxError)
+CheckStrictMode("function strict() { print(arguments++); }", SyntaxError)
+CheckStrictMode("function strict() { var x = eval++; }", SyntaxError)
+CheckStrictMode("function strict() { var x = arguments++; }", SyntaxError)
+
+// Postfix decrement with eval or arguments
+CheckStrictMode("function strict() { eval--; }", SyntaxError)
+CheckStrictMode("function strict() { arguments--; }", SyntaxError)
+CheckStrictMode("function strict() { print(eval--); }", SyntaxError)
+CheckStrictMode("function strict() { print(arguments--); }", SyntaxError)
+CheckStrictMode("function strict() { var x = eval--; }", SyntaxError)
+CheckStrictMode("function strict() { var x = arguments--; }", SyntaxError)
+
+// Prefix increment with eval or arguments
+CheckStrictMode("function strict() { ++eval; }", SyntaxError)
+CheckStrictMode("function strict() { ++arguments; }", SyntaxError)
+CheckStrictMode("function strict() { print(++eval); }", SyntaxError)
+CheckStrictMode("function strict() { print(++arguments); }", SyntaxError)
+CheckStrictMode("function strict() { var x = ++eval; }", SyntaxError)
+CheckStrictMode("function strict() { var x = ++arguments; }", SyntaxError)
+
+// Prefix decrement with eval or arguments
+CheckStrictMode("function strict() { --eval; }", SyntaxError)
+CheckStrictMode("function strict() { --arguments; }", SyntaxError)
+CheckStrictMode("function strict() { print(--eval); }", SyntaxError)
+CheckStrictMode("function strict() { print(--arguments); }", SyntaxError)
+CheckStrictMode("function strict() { var x = --eval; }", SyntaxError)
+CheckStrictMode("function strict() { var x = --arguments; }", SyntaxError)
+
+// Prefix unary operators other than delete, ++, -- are valid in strict mode
+function StrictModeUnaryOperators() {
+  "use strict";
+  var x = [void eval, typeof eval, +eval, -eval, ~eval, !eval];
+  var y = [void arguments, typeof arguments,
+           +arguments, -arguments, ~arguments, !arguments];
+}
\ No newline at end of file