Update V8 to r5425 as required by WebKit r67178

Change-Id: Ic338e7242d33e5a024bd5978f4a5a3a681af4ebd
diff --git a/src/arm/builtins-arm.cc b/src/arm/builtins-arm.cc
index a902fc2..8b21558 100644
--- a/src/arm/builtins-arm.cc
+++ b/src/arm/builtins-arm.cc
@@ -125,7 +125,7 @@
   __ LoadRoot(scratch1, Heap::kEmptyFixedArrayRootIndex);
   __ str(scratch1, FieldMemOperand(result, JSArray::kPropertiesOffset));
   // Field JSArray::kElementsOffset is initialized later.
-  __ mov(scratch3,  Operand(0));
+  __ mov(scratch3,  Operand(0, RelocInfo::NONE));
   __ str(scratch3, FieldMemOperand(result, JSArray::kLengthOffset));
 
   // Calculate the location of the elements array and set elements array member
@@ -311,7 +311,7 @@
   Label argc_one_or_more, argc_two_or_more;
 
   // Check for array construction with zero arguments or one.
-  __ cmp(r0, Operand(0));
+  __ cmp(r0, Operand(0, RelocInfo::NONE));
   __ b(ne, &argc_one_or_more);
 
   // Handle construction of an empty array.
@@ -513,7 +513,7 @@
   // r1: called object
   __ bind(&non_function_call);
   // Set expected number of arguments to zero (not changing r0).
-  __ mov(r2, Operand(0));
+  __ mov(r2, Operand(0, RelocInfo::NONE));
   __ GetBuiltinEntry(r3, Builtins::CALL_NON_FUNCTION_AS_CONSTRUCTOR);
   __ Jump(Handle<Code>(builtin(ArgumentsAdaptorTrampoline)),
           RelocInfo::CODE_TARGET);
@@ -843,7 +843,7 @@
   // r5-r7, cp may be clobbered
 
   // Clear the context before we push it when entering the JS frame.
-  __ mov(cp, Operand(0));
+  __ mov(cp, Operand(0, RelocInfo::NONE));
 
   // Enter an internal frame.
   __ EnterInternalFrame();
@@ -1030,7 +1030,7 @@
   __ add(r2, sp, Operand(r0, LSL, kPointerSizeLog2));
   __ str(r1, MemOperand(r2, -kPointerSize));
   // Clear r1 to indicate a non-function being called.
-  __ mov(r1, Operand(0));
+  __ mov(r1, Operand(0, RelocInfo::NONE));
 
   // 4. Shift arguments and return address one slot down on the stack
   //    (overwriting the original receiver).  Adjust argument count to make
@@ -1060,7 +1060,8 @@
   { Label function;
     __ tst(r1, r1);
     __ b(ne, &function);
-    __ mov(r2, Operand(0));  // expected arguments is 0 for CALL_NON_FUNCTION
+    // Expected number of arguments is 0 for CALL_NON_FUNCTION.
+    __ mov(r2, Operand(0, RelocInfo::NONE));
     __ GetBuiltinEntry(r3, Builtins::CALL_NON_FUNCTION);
     __ Jump(Handle<Code>(builtin(ArgumentsAdaptorTrampoline)),
                          RelocInfo::CODE_TARGET);
@@ -1123,7 +1124,7 @@
   // Push current limit and index.
   __ bind(&okay);
   __ push(r0);  // limit
-  __ mov(r1, Operand(0));  // initial index
+  __ mov(r1, Operand(0, RelocInfo::NONE));  // initial index
   __ push(r1);
 
   // Change context eagerly to get the right global object if necessary.
diff --git a/src/arm/code-stubs-arm.cc b/src/arm/code-stubs-arm.cc
index f75ee8b..fa93030 100644
--- a/src/arm/code-stubs-arm.cc
+++ b/src/arm/code-stubs-arm.cc
@@ -296,7 +296,7 @@
   STATIC_ASSERT(HeapNumber::kSignMask == 0x80000000u);
   __ and_(exponent, source_, Operand(HeapNumber::kSignMask), SetCC);
   // Subtract from 0 if source was negative.
-  __ rsb(source_, source_, Operand(0), LeaveCC, ne);
+  __ rsb(source_, source_, Operand(0, RelocInfo::NONE), LeaveCC, ne);
 
   // We have -1, 0 or 1, which we treat specially. Register source_ contains
   // absolute value: it is either equal to 1 (special case of -1 and 1),
@@ -309,7 +309,7 @@
       HeapNumber::kExponentBias << HeapNumber::kExponentShift;
   __ orr(exponent, exponent, Operand(exponent_word_for_1), LeaveCC, eq);
   // 1, 0 and -1 all have 0 for the second word.
-  __ mov(mantissa, Operand(0));
+  __ mov(mantissa, Operand(0, RelocInfo::NONE));
   __ Ret();
 
   __ bind(&not_special);
@@ -357,7 +357,7 @@
   // Set the sign bit in scratch_ if the value was negative.
   __ orr(scratch_, scratch_, Operand(HeapNumber::kSignMask), LeaveCC, cs);
   // Subtract from 0 if the value was negative.
-  __ rsb(the_int_, the_int_, Operand(0), LeaveCC, cs);
+  __ rsb(the_int_, the_int_, Operand(0, RelocInfo::NONE), LeaveCC, cs);
   // We should be masking the implict first digit of the mantissa away here,
   // but it just ends up combining harmlessly with the last digit of the
   // exponent that happens to be 1.  The sign bit is 0 so we shift 10 to get
@@ -380,7 +380,7 @@
   non_smi_exponent += 1 << HeapNumber::kExponentShift;
   __ mov(ip, Operand(HeapNumber::kSignMask | non_smi_exponent));
   __ str(ip, FieldMemOperand(the_heap_number_, HeapNumber::kExponentOffset));
-  __ mov(ip, Operand(0));
+  __ mov(ip, Operand(0, RelocInfo::NONE));
   __ str(ip, FieldMemOperand(the_heap_number_, HeapNumber::kMantissaOffset));
   __ Ret();
 }
@@ -604,7 +604,7 @@
          Operand(lhs_exponent, LSL, HeapNumber::kNonMantissaBitsInTopWord),
          SetCC);
   __ b(ne, &one_is_nan);
-  __ cmp(lhs_mantissa, Operand(0));
+  __ cmp(lhs_mantissa, Operand(0, RelocInfo::NONE));
   __ b(ne, &one_is_nan);
 
   __ bind(lhs_not_nan);
@@ -619,7 +619,7 @@
          Operand(rhs_exponent, LSL, HeapNumber::kNonMantissaBitsInTopWord),
          SetCC);
   __ b(ne, &one_is_nan);
-  __ cmp(rhs_mantissa, Operand(0));
+  __ cmp(rhs_mantissa, Operand(0, RelocInfo::NONE));
   __ b(eq, &neither_is_nan);
 
   __ bind(&one_is_nan);
@@ -1085,8 +1085,8 @@
   // "tos_" is a register, and contains a non zero value by default.
   // Hence we only need to overwrite "tos_" with zero to return false for
   // FP_ZERO or FP_NAN cases. Otherwise, by default it returns true.
-  __ mov(tos_, Operand(0), LeaveCC, eq);  // for FP_ZERO
-  __ mov(tos_, Operand(0), LeaveCC, vs);  // for FP_NAN
+  __ mov(tos_, Operand(0, RelocInfo::NONE), LeaveCC, eq);  // for FP_ZERO
+  __ mov(tos_, Operand(0, RelocInfo::NONE), LeaveCC, vs);  // for FP_NAN
   __ Ret();
 
   __ bind(&not_heap_number);
@@ -1131,7 +1131,7 @@
 
   // Return 0 in "tos_" for false .
   __ bind(&false_result);
-  __ mov(tos_, Operand(0));
+  __ mov(tos_, Operand(0, RelocInfo::NONE));
   __ Ret();
 }
 
@@ -1463,95 +1463,6 @@
 }
 
 
-// Tries to get a signed int32 out of a double precision floating point heap
-// number.  Rounds towards 0.  Fastest for doubles that are in the ranges
-// -0x7fffffff to -0x40000000 or 0x40000000 to 0x7fffffff.  This corresponds
-// almost to the range of signed int32 values that are not Smis.  Jumps to the
-// label 'slow' if the double isn't in the range -0x80000000.0 to 0x80000000.0
-// (excluding the endpoints).
-static void GetInt32(MacroAssembler* masm,
-                     Register source,
-                     Register dest,
-                     Register scratch,
-                     Register scratch2,
-                     Label* slow) {
-  Label right_exponent, done;
-  // Get exponent word.
-  __ ldr(scratch, FieldMemOperand(source, HeapNumber::kExponentOffset));
-  // Get exponent alone in scratch2.
-  __ Ubfx(scratch2,
-          scratch,
-          HeapNumber::kExponentShift,
-          HeapNumber::kExponentBits);
-  // Load dest with zero.  We use this either for the final shift or
-  // for the answer.
-  __ mov(dest, Operand(0));
-  // Check whether the exponent matches a 32 bit signed int that is not a Smi.
-  // A non-Smi integer is 1.xxx * 2^30 so the exponent is 30 (biased).  This is
-  // the exponent that we are fastest at and also the highest exponent we can
-  // handle here.
-  const uint32_t non_smi_exponent = HeapNumber::kExponentBias + 30;
-  // The non_smi_exponent, 0x41d, is too big for ARM's immediate field so we
-  // split it up to avoid a constant pool entry.  You can't do that in general
-  // for cmp because of the overflow flag, but we know the exponent is in the
-  // range 0-2047 so there is no overflow.
-  int fudge_factor = 0x400;
-  __ sub(scratch2, scratch2, Operand(fudge_factor));
-  __ cmp(scratch2, Operand(non_smi_exponent - fudge_factor));
-  // If we have a match of the int32-but-not-Smi exponent then skip some logic.
-  __ b(eq, &right_exponent);
-  // If the exponent is higher than that then go to slow case.  This catches
-  // numbers that don't fit in a signed int32, infinities and NaNs.
-  __ b(gt, slow);
-
-  // We know the exponent is smaller than 30 (biased).  If it is less than
-  // 0 (biased) then the number is smaller in magnitude than 1.0 * 2^0, ie
-  // it rounds to zero.
-  const uint32_t zero_exponent = HeapNumber::kExponentBias + 0;
-  __ sub(scratch2, scratch2, Operand(zero_exponent - fudge_factor), SetCC);
-  // Dest already has a Smi zero.
-  __ b(lt, &done);
-  if (!CpuFeatures::IsSupported(VFP3)) {
-    // We have an exponent between 0 and 30 in scratch2.  Subtract from 30 to
-    // get how much to shift down.
-    __ rsb(dest, scratch2, Operand(30));
-  }
-  __ bind(&right_exponent);
-  if (CpuFeatures::IsSupported(VFP3)) {
-    CpuFeatures::Scope scope(VFP3);
-    // ARMv7 VFP3 instructions implementing double precision to integer
-    // conversion using round to zero.
-    __ ldr(scratch2, FieldMemOperand(source, HeapNumber::kMantissaOffset));
-    __ vmov(d7, scratch2, scratch);
-    __ vcvt_s32_f64(s15, d7);
-    __ vmov(dest, s15);
-  } else {
-    // Get the top bits of the mantissa.
-    __ and_(scratch2, scratch, Operand(HeapNumber::kMantissaMask));
-    // Put back the implicit 1.
-    __ orr(scratch2, scratch2, Operand(1 << HeapNumber::kExponentShift));
-    // Shift up the mantissa bits to take up the space the exponent used to
-    // take. We just orred in the implicit bit so that took care of one and
-    // we want to leave the sign bit 0 so we subtract 2 bits from the shift
-    // distance.
-    const int shift_distance = HeapNumber::kNonMantissaBitsInTopWord - 2;
-    __ mov(scratch2, Operand(scratch2, LSL, shift_distance));
-    // Put sign in zero flag.
-    __ tst(scratch, Operand(HeapNumber::kSignMask));
-    // Get the second half of the double. For some exponents we don't
-    // actually need this because the bits get shifted out again, but
-    // it's probably slower to test than just to do it.
-    __ ldr(scratch, FieldMemOperand(source, HeapNumber::kMantissaOffset));
-    // Shift down 22 bits to get the last 10 bits.
-    __ orr(scratch, scratch2, Operand(scratch, LSR, 32 - shift_distance));
-    // Move down according to the exponent.
-    __ mov(dest, Operand(scratch, LSR, dest));
-    // Fix sign if sign bit was set.
-    __ rsb(dest, dest, Operand(0), LeaveCC, ne);
-  }
-  __ bind(&done);
-}
-
 // For bitwise ops where the inputs are not both Smis we here try to determine
 // whether both inputs are either Smis or at least heap numbers that can be
 // represented by a 32 bit signed value.  We truncate towards zero as required
@@ -1574,7 +1485,7 @@
   __ ldr(r4, FieldMemOperand(lhs, HeapNumber::kMapOffset));
   __ cmp(r4, heap_number_map);
   __ b(ne, &slow);
-  GetInt32(masm, lhs, r3, r5, r4, &slow);
+  __ ConvertToInt32(lhs, r3, r5, r4, &slow);
   __ jmp(&done_checking_lhs);
   __ bind(&lhs_is_smi);
   __ mov(r3, Operand(lhs, ASR, 1));
@@ -1585,7 +1496,7 @@
   __ ldr(r4, FieldMemOperand(rhs, HeapNumber::kMapOffset));
   __ cmp(r4, heap_number_map);
   __ b(ne, &slow);
-  GetInt32(masm, rhs, r2, r5, r4, &slow);
+  __ ConvertToInt32(rhs, r2, r5, r4, &slow);
   __ jmp(&done_checking_rhs);
   __ bind(&rhs_is_smi);
   __ mov(r2, Operand(rhs, ASR, 1));
@@ -2320,7 +2231,7 @@
     __ ldr(r0, MemOperand(r0, type_ * sizeof(TranscendentalCache::caches_[0])));
     // r0 points to the cache for the type type_.
     // If NULL, the cache hasn't been initialized yet, so go through runtime.
-    __ cmp(r0, Operand(0));
+    __ cmp(r0, Operand(0, RelocInfo::NONE));
     __ b(eq, &runtime_call);
 
 #ifdef DEBUG
@@ -2400,12 +2311,12 @@
       // smi while we are at it.
       __ bic(ip, r0, Operand(0x80000000), SetCC);
       __ b(eq, &slow);
-      __ rsb(r0, r0, Operand(0));
+      __ rsb(r0, r0, Operand(0, RelocInfo::NONE));
       __ StubReturn(1);
     } else {
       // The value of the expression is a smi and 0 is OK for -0.  Try
       // optimistic subtraction '0 - value'.
-      __ rsb(r0, r0, Operand(0), SetCC);
+      __ rsb(r0, r0, Operand(0, RelocInfo::NONE), SetCC);
       __ StubReturn(1, vc);
       // We don't have to reverse the optimistic neg since the only case
       // where we fall through is the minimum negative Smi, which is the case
@@ -2440,7 +2351,7 @@
     __ b(ne, &slow);
 
     // Convert the heap number is r0 to an untagged integer in r1.
-    GetInt32(masm, r0, r1, r2, r3, &slow);
+    __ ConvertToInt32(r0, r1, r2, r3, &slow);
 
     // Do the bitwise operation (move negated) and check if the result
     // fits in a smi.
@@ -2518,9 +2429,9 @@
   // Before returning we restore the context from the frame pointer if
   // not NULL.  The frame pointer is NULL in the exception handler of a
   // JS entry frame.
-  __ cmp(fp, Operand(0));
+  __ cmp(fp, Operand(0, RelocInfo::NONE));
   // Set cp to NULL if fp is NULL.
-  __ mov(cp, Operand(0), LeaveCC, eq);
+  __ mov(cp, Operand(0, RelocInfo::NONE), LeaveCC, eq);
   // Restore cp otherwise.
   __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset), ne);
 #ifdef DEBUG
@@ -2586,9 +2497,9 @@
   // Before returning we restore the context from the frame pointer if
   // not NULL.  The frame pointer is NULL in the exception handler of a
   // JS entry frame.
-  __ cmp(fp, Operand(0));
+  __ cmp(fp, Operand(0, RelocInfo::NONE));
   // Set cp to NULL if fp is NULL.
-  __ mov(cp, Operand(0), LeaveCC, eq);
+  __ mov(cp, Operand(0, RelocInfo::NONE), LeaveCC, eq);
   // Restore cp otherwise.
   __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset), ne);
 #ifdef DEBUG
@@ -2656,7 +2567,7 @@
       (frame_alignment_skew + kPointerSize) & frame_alignment_mask;
   if (alignment_before_call > 0) {
     // Push until the alignment before the call is met.
-    __ mov(r2, Operand(0));
+    __ mov(r2, Operand(0, RelocInfo::NONE));
     for (int i = alignment_before_call;
         (i & frame_alignment_mask) != 0;
         i += kPointerSize) {
@@ -3080,7 +2991,7 @@
   // of the arguments object and the elements array in words.
   Label add_arguments_object;
   __ bind(&try_allocate);
-  __ cmp(r1, Operand(0));
+  __ cmp(r1, Operand(0, RelocInfo::NONE));
   __ b(eq, &add_arguments_object);
   __ mov(r1, Operand(r1, LSR, kSmiTagSize));
   __ add(r1, r1, Operand(FixedArray::kHeaderSize / kPointerSize));
@@ -3117,7 +3028,7 @@
 
   // If there are no actual arguments, we're done.
   Label done;
-  __ cmp(r1, Operand(0));
+  __ cmp(r1, Operand(0, RelocInfo::NONE));
   __ b(eq, &done);
 
   // Get the parameters pointer from the stack.
@@ -3143,7 +3054,7 @@
   // Post-increment r4 with kPointerSize on each iteration.
   __ str(r3, MemOperand(r4, kPointerSize, PostIndex));
   __ sub(r1, r1, Operand(1));
-  __ cmp(r1, Operand(0));
+  __ cmp(r1, Operand(0, RelocInfo::NONE));
   __ b(ne, &loop);
 
   // Return and remove the on-stack parameters.
@@ -3541,7 +3452,7 @@
   // of the original receiver from the call site).
   __ str(r1, MemOperand(sp, argc_ * kPointerSize));
   __ mov(r0, Operand(argc_));  // Setup the number of arguments.
-  __ mov(r2, Operand(0));
+  __ mov(r2, Operand(0, RelocInfo::NONE));
   __ GetBuiltinEntry(r3, Builtins::CALL_NON_FUNCTION);
   __ Jump(Handle<Code>(Builtins::builtin(Builtins::ArgumentsAdaptorTrampoline)),
           RelocInfo::CODE_TARGET);
@@ -3883,7 +3794,7 @@
   if (!ascii) {
     __ add(count, count, Operand(count), SetCC);
   } else {
-    __ cmp(count, Operand(0));
+    __ cmp(count, Operand(0, RelocInfo::NONE));
   }
   __ b(eq, &done);
 
@@ -3938,7 +3849,7 @@
   if (!ascii) {
     __ add(count, count, Operand(count), SetCC);
   } else {
-    __ cmp(count, Operand(0));
+    __ cmp(count, Operand(0, RelocInfo::NONE));
   }
   __ b(eq, &done);
 
diff --git a/src/arm/codegen-arm.cc b/src/arm/codegen-arm.cc
index 08a8da0..f985fb4 100644
--- a/src/arm/codegen-arm.cc
+++ b/src/arm/codegen-arm.cc
@@ -770,7 +770,7 @@
       ToBooleanStub stub(tos);
       frame_->CallStub(&stub, 0);
       // Convert the result in "tos" to a condition code.
-      __ cmp(tos, Operand(0));
+      __ cmp(tos, Operand(0, RelocInfo::NONE));
     } else {
       // Implements slow case by calling the runtime.
       frame_->EmitPush(tos);
@@ -917,16 +917,55 @@
   }
 
   virtual void Generate();
+  // This stub makes explicit calls to SaveRegisters(), RestoreRegisters() and
+  // Exit(). Currently on ARM SaveRegisters() and RestoreRegisters() are empty
+  // methods, it is the responsibility of the deferred code to save and restore
+  // registers.
+  virtual bool AutoSaveAndRestore() { return false; }
+
+  void JumpToNonSmiInput(Condition cond);
+  void JumpToAnswerOutOfRange(Condition cond);
 
  private:
+  void GenerateNonSmiInput();
+  void GenerateAnswerOutOfRange();
+  void WriteNonSmiAnswer(Register answer,
+                         Register heap_number,
+                         Register scratch);
+
   Token::Value op_;
   int value_;
   bool reversed_;
   OverwriteMode overwrite_mode_;
   Register tos_register_;
+  Label non_smi_input_;
+  Label answer_out_of_range_;
 };
 
 
+// For bit operations we try harder and handle the case where the input is not
+// a Smi but a 32bits integer without calling the generic stub.
+void DeferredInlineSmiOperation::JumpToNonSmiInput(Condition cond) {
+  ASSERT(Token::IsBitOp(op_));
+
+  __ b(cond, &non_smi_input_);
+}
+
+
+// For bit operations the result is always 32bits so we handle the case where
+// the result does not fit in a Smi without calling the generic stub.
+void DeferredInlineSmiOperation::JumpToAnswerOutOfRange(Condition cond) {
+  ASSERT(Token::IsBitOp(op_));
+
+  if ((op_ == Token::SHR) && !CpuFeatures::IsSupported(VFP3)) {
+    // >>> requires an unsigned to double conversion and the non VFP code
+    // does not support this conversion.
+    __ b(cond, entry_label());
+  } else {
+    __ b(cond, &answer_out_of_range_);
+  }
+}
+
 
 // On entry the non-constant side of the binary operation is in tos_register_
 // and the constant smi side is nowhere.  The tos_register_ is not used by the
@@ -1005,6 +1044,172 @@
   // came into this function with, so we can merge back to that frame
   // without trashing it.
   copied_frame.MergeTo(frame_state()->frame());
+
+  Exit();
+
+  if (non_smi_input_.is_linked()) {
+    GenerateNonSmiInput();
+  }
+
+  if (answer_out_of_range_.is_linked()) {
+    GenerateAnswerOutOfRange();
+  }
+}
+
+
+// Convert and write the integer answer into heap_number.
+void DeferredInlineSmiOperation::WriteNonSmiAnswer(Register answer,
+                                                   Register heap_number,
+                                                   Register scratch) {
+  if (CpuFeatures::IsSupported(VFP3)) {
+    CpuFeatures::Scope scope(VFP3);
+    __ vmov(s0, answer);
+    if (op_ == Token::SHR) {
+      __ vcvt_f64_u32(d0, s0);
+    } else {
+      __ vcvt_f64_s32(d0, s0);
+    }
+    __ sub(scratch, heap_number, Operand(kHeapObjectTag));
+    __ vstr(d0, scratch, HeapNumber::kValueOffset);
+  } else {
+    WriteInt32ToHeapNumberStub stub(answer, heap_number, scratch);
+    __ CallStub(&stub);
+  }
+}
+
+
+void DeferredInlineSmiOperation::GenerateNonSmiInput() {
+  // We know the left hand side is not a Smi and the right hand side is an
+  // immediate value (value_) which can be represented as a Smi. We only
+  // handle bit operations.
+  ASSERT(Token::IsBitOp(op_));
+
+  if (FLAG_debug_code) {
+    __ Abort("Should not fall through!");
+  }
+
+  __ bind(&non_smi_input_);
+  if (FLAG_debug_code) {
+    __ AbortIfSmi(tos_register_);
+  }
+
+  // This routine uses the registers from r2 to r6.  At the moment they are
+  // not used by the register allocator, but when they are it should use
+  // SpillAll and MergeTo like DeferredInlineSmiOperation::Generate() above.
+
+  Register heap_number_map = r7;
+  __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
+  __ ldr(r3, FieldMemOperand(tos_register_, HeapNumber::kMapOffset));
+  __ cmp(r3, heap_number_map);
+  // Not a number, fall back to the GenericBinaryOpStub.
+  __ b(ne, entry_label());
+
+  Register int32 = r2;
+  // Not a 32bits signed int, fall back to the GenericBinaryOpStub.
+  __ ConvertToInt32(tos_register_, int32, r4, r5, entry_label());
+
+  // tos_register_ (r0 or r1): Original heap number.
+  // int32: signed 32bits int.
+
+  Label result_not_a_smi;
+  int shift_value = value_ & 0x1f;
+  switch (op_) {
+    case Token::BIT_OR:  __ orr(int32, int32, Operand(value_)); break;
+    case Token::BIT_XOR: __ eor(int32, int32, Operand(value_)); break;
+    case Token::BIT_AND: __ and_(int32, int32, Operand(value_)); break;
+    case Token::SAR:
+      ASSERT(!reversed_);
+      if (shift_value != 0) {
+         __ mov(int32, Operand(int32, ASR, shift_value));
+      }
+      break;
+    case Token::SHR:
+      ASSERT(!reversed_);
+      if (shift_value != 0) {
+        __ mov(int32, Operand(int32, LSR, shift_value), SetCC);
+      } else {
+        // SHR is special because it is required to produce a positive answer.
+        __ cmp(int32, Operand(0, RelocInfo::NONE));
+      }
+      if (CpuFeatures::IsSupported(VFP3)) {
+        __ b(mi, &result_not_a_smi);
+      } else {
+        // Non VFP code cannot convert from unsigned to double, so fall back
+        // to GenericBinaryOpStub.
+        __ b(mi, entry_label());
+      }
+      break;
+    case Token::SHL:
+      ASSERT(!reversed_);
+      if (shift_value != 0) {
+        __ mov(int32, Operand(int32, LSL, shift_value));
+      }
+      break;
+    default: UNREACHABLE();
+  }
+  // Check that the *signed* result fits in a smi. Not necessary for AND, SAR
+  // if the shift if more than 0 or SHR if the shit is more than 1.
+  if (!( (op_ == Token::AND) ||
+        ((op_ == Token::SAR) && (shift_value > 0)) ||
+        ((op_ == Token::SHR) && (shift_value > 1)))) {
+    __ add(r3, int32, Operand(0x40000000), SetCC);
+    __ b(mi, &result_not_a_smi);
+  }
+  __ mov(tos_register_, Operand(int32, LSL, kSmiTagSize));
+  Exit();
+
+  if (result_not_a_smi.is_linked()) {
+    __ bind(&result_not_a_smi);
+    if (overwrite_mode_ != OVERWRITE_LEFT) {
+      ASSERT((overwrite_mode_ == NO_OVERWRITE) ||
+             (overwrite_mode_ == OVERWRITE_RIGHT));
+      // If the allocation fails, fall back to the GenericBinaryOpStub.
+      __ AllocateHeapNumber(r4, r5, r6, heap_number_map, entry_label());
+      // Nothing can go wrong now, so overwrite tos.
+      __ mov(tos_register_, Operand(r4));
+    }
+
+    // int32: answer as signed 32bits integer.
+    // tos_register_: Heap number to write the answer into.
+    WriteNonSmiAnswer(int32, tos_register_, r3);
+
+    Exit();
+  }
+}
+
+
+void DeferredInlineSmiOperation::GenerateAnswerOutOfRange() {
+  // The input from a bitwise operation were Smis but the result cannot fit
+  // into a Smi, so we store it into a heap number. tos_resgiter_ holds the
+  // result to be converted.
+  ASSERT(Token::IsBitOp(op_));
+  ASSERT(!reversed_);
+
+  if (FLAG_debug_code) {
+    __ Abort("Should not fall through!");
+  }
+
+  __ bind(&answer_out_of_range_);
+  if (((value_ & 0x1f) == 0) && (op_ == Token::SHR)) {
+    // >>> 0 is a special case where the result is already tagged but wrong
+    // because the Smi is negative. We untag it.
+    __ mov(tos_register_, Operand(tos_register_, ASR, kSmiTagSize));
+  }
+
+  // This routine uses the registers from r2 to r6.  At the moment they are
+  // not used by the register allocator, but when they are it should use
+  // SpillAll and MergeTo like DeferredInlineSmiOperation::Generate() above.
+
+  // Allocate the result heap number.
+  Register heap_number_map = r7;
+  Register heap_number = r4;
+  __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
+  // If the allocation fails, fall back to the GenericBinaryOpStub.
+  __ AllocateHeapNumber(heap_number, r5, r6, heap_number_map, entry_label());
+  WriteNonSmiAnswer(tos_register_, heap_number, r3);
+  __ mov(tos_register_, Operand(heap_number));
+
+  Exit();
 }
 
 
@@ -1191,10 +1396,10 @@
         }
         frame_->EmitPush(tos, TypeInfo::Smi());
       } else {
-        DeferredCode* deferred =
+        DeferredInlineSmiOperation* deferred =
           new DeferredInlineSmiOperation(op, int_value, reversed, mode, tos);
         __ tst(tos, Operand(kSmiTagMask));
-        deferred->Branch(ne);
+        deferred->JumpToNonSmiInput(ne);
         switch (op) {
           case Token::BIT_OR:  __ orr(tos, tos, Operand(value)); break;
           case Token::BIT_XOR: __ eor(tos, tos, Operand(value)); break;
@@ -1240,17 +1445,17 @@
     case Token::SHR:
     case Token::SAR: {
       ASSERT(!reversed);
-      int shift_amount = int_value & 0x1f;
+      int shift_value = int_value & 0x1f;
       TypeInfo result = TypeInfo::Number();
 
       if (op == Token::SHR) {
-        if (shift_amount > 1) {
+        if (shift_value > 1) {
           result = TypeInfo::Smi();
-        } else if (shift_amount > 0) {
+        } else if (shift_value > 0) {
           result = TypeInfo::Integer32();
         }
       } else if (op == Token::SAR) {
-        if (shift_amount > 0) {
+        if (shift_value > 0) {
           result = TypeInfo::Smi();
         } else {
           result = TypeInfo::Integer32();
@@ -1260,77 +1465,67 @@
         result = TypeInfo::Integer32();
       }
 
-      Register scratch = VirtualFrame::scratch0();
-      Register scratch2 = VirtualFrame::scratch1();
-      int shift_value = int_value & 0x1f;  // least significant 5 bits
-      DeferredCode* deferred =
+      DeferredInlineSmiOperation* deferred =
         new DeferredInlineSmiOperation(op, shift_value, false, mode, tos);
-      uint32_t problematic_mask = kSmiTagMask;
-      // For unsigned shift by zero all negative smis are problematic.
-      bool skip_smi_test = both_sides_are_smi;
-      if (shift_value == 0 && op == Token::SHR) {
-        problematic_mask |= 0x80000000;
-        skip_smi_test = false;
-      }
-      if (!skip_smi_test) {
-        __ tst(tos, Operand(problematic_mask));
-        deferred->Branch(ne);  // Go slow for problematic input.
+      if (!both_sides_are_smi) {
+        __ tst(tos, Operand(kSmiTagMask));
+        deferred->JumpToNonSmiInput(ne);
       }
       switch (op) {
         case Token::SHL: {
           if (shift_value != 0) {
+            Register scratch = VirtualFrame::scratch0();
             int adjusted_shift = shift_value - kSmiTagSize;
             ASSERT(adjusted_shift >= 0);
+
             if (adjusted_shift != 0) {
-              __ mov(scratch, Operand(tos, LSL, adjusted_shift));
-              // Check that the *signed* result fits in a smi.
-              __ add(scratch2, scratch, Operand(0x40000000), SetCC);
-              deferred->Branch(mi);
-              __ mov(tos, Operand(scratch, LSL, kSmiTagSize));
-            } else {
-              // Check that the *signed* result fits in a smi.
-              __ add(scratch2, tos, Operand(0x40000000), SetCC);
-              deferred->Branch(mi);
-              __ mov(tos, Operand(tos, LSL, kSmiTagSize));
+              __ mov(tos, Operand(tos, LSL, adjusted_shift));
             }
+            // Check that the *signed* result fits in a smi.
+            __ add(scratch, tos, Operand(0x40000000), SetCC);
+            deferred->JumpToAnswerOutOfRange(mi);
+            __ mov(tos, Operand(tos, LSL, kSmiTagSize));
           }
           break;
         }
         case Token::SHR: {
           if (shift_value != 0) {
+            Register scratch = VirtualFrame::scratch0();
             __ mov(scratch, Operand(tos, ASR, kSmiTagSize));  // Remove tag.
-            // LSR by immediate 0 means shifting 32 bits.
-            __ mov(scratch, Operand(scratch, LSR, shift_value));
+            __ mov(tos, Operand(scratch, LSR, shift_value));
             if (shift_value == 1) {
-              // check that the *unsigned* result fits in a smi
-              // neither of the two high-order bits can be set:
+              // Check that the *unsigned* result fits in a smi.
+              // Neither of the two high-order bits can be set:
               // - 0x80000000: high bit would be lost when smi tagging
-              // - 0x40000000: this number would convert to negative when
-              // smi tagging these two cases can only happen with shifts
-              // by 0 or 1 when handed a valid smi
-              __ tst(scratch, Operand(0xc0000000));
-              deferred->Branch(ne);
-            } else {
-              ASSERT(shift_value >= 2);
-              result = TypeInfo::Smi();  // SHR by at least 2 gives a Smi.
+              // - 0x40000000: this number would convert to negative when Smi
+              //   tagging.
+              // These two cases can only happen with shifts by 0 or 1 when
+              // handed a valid smi.
+              __ tst(tos, Operand(0xc0000000));
+              if (!CpuFeatures::IsSupported(VFP3)) {
+                // If the unsigned result does not fit in a Smi, we require an
+                // unsigned to double conversion. Without VFP V8 has to fall
+                // back to the runtime. The deferred code will expect tos
+                // to hold the original Smi to be shifted.
+                __ mov(tos, Operand(scratch, LSL, kSmiTagSize), LeaveCC, ne);
+              }
+              deferred->JumpToAnswerOutOfRange(ne);
             }
-            __ mov(tos, Operand(scratch, LSL, kSmiTagSize));
+            __ mov(tos, Operand(tos, LSL, kSmiTagSize));
+          } else {
+            __ cmp(tos, Operand(0, RelocInfo::NONE));
+            deferred->JumpToAnswerOutOfRange(mi);
           }
           break;
         }
         case Token::SAR: {
-          // In the ARM instructions set, ASR by immediate 0 means shifting 32
-          // bits.
           if (shift_value != 0) {
-            // Do the shift and the tag removal in one operation.  If the shift
+            // Do the shift and the tag removal in one operation. If the shift
             // is 31 bits (the highest possible value) then we emit the
-            // instruction as a shift by 0 which means shift arithmetically by
-            // 32.
+            // instruction as a shift by 0 which in the ARM ISA means shift
+            // arithmetically by 32.
             __ mov(tos, Operand(tos, ASR, (kSmiTagSize + shift_value) & 0x1f));
-            // Put tag back.
             __ mov(tos, Operand(tos, LSL, kSmiTagSize));
-            // SAR by at least 1 gives a Smi.
-            result = TypeInfo::Smi();
           }
           break;
         }
@@ -1458,7 +1653,7 @@
     // We call with 0 args because there are 0 on the stack.
     CompareStub stub(cc, strict, kBothCouldBeNaN, true, lhs, rhs);
     frame_->CallStub(&stub, 0);
-    __ cmp(r0, Operand(0));
+    __ cmp(r0, Operand(0, RelocInfo::NONE));
     exit.Jump();
 
     smi.Bind();
@@ -1622,7 +1817,7 @@
   // frame.
   Label loop;
   // r3 is a small non-negative integer, due to the test above.
-  __ cmp(r3, Operand(0));
+  __ cmp(r3, Operand(0, RelocInfo::NONE));
   __ b(eq, &invoke);
   // Compute the address of the first argument.
   __ add(r2, r2, Operand(r3, LSL, kPointerSizeLog2));
@@ -1780,7 +1975,7 @@
     } else if (node->fun() != NULL) {
       Load(node->fun());
     } else {
-      frame_->EmitPush(Operand(0));
+      frame_->EmitPush(Operand(0, RelocInfo::NONE));
     }
 
     frame_->CallRuntime(Runtime::kDeclareContextSlot, 4);
@@ -4417,7 +4612,8 @@
     // Get the absolute untagged value of the exponent and use that for the
     // calculation.
     __ mov(scratch1, Operand(exponent, ASR, kSmiTagSize), SetCC);
-    __ rsb(scratch1, scratch1, Operand(0), LeaveCC, mi);  // Negate if negative.
+    // Negate if negative.
+    __ rsb(scratch1, scratch1, Operand(0, RelocInfo::NONE), LeaveCC, mi);
     __ vmov(d2, d0, mi);  // 1.0 needed in d2 later if exponent is negative.
 
     // Run through all the bits in the exponent. The result is calculated in d0
@@ -4430,14 +4626,14 @@
     __ b(ne, &more_bits);
 
     // If exponent is positive we are done.
-    __ cmp(exponent, Operand(0));
+    __ cmp(exponent, Operand(0, RelocInfo::NONE));
     __ b(ge, &allocate_return);
 
     // If exponent is negative result is 1/result (d2 already holds 1.0 in that
     // case). However if d0 has reached infinity this will not provide the
     // correct result, so call runtime if that is the case.
     __ mov(scratch2, Operand(0x7FF00000));
-    __ mov(scratch1, Operand(0));
+    __ mov(scratch1, Operand(0, RelocInfo::NONE));
     __ vmov(d1, scratch1, scratch2);  // Load infinity into d1.
     __ vcmp(d0, d1);
     __ vmrs(pc);
@@ -4940,7 +5136,7 @@
     __ jmp(exit_label());
     __ bind(&false_result);
     // Set false result.
-    __ mov(map_result_, Operand(0));
+    __ mov(map_result_, Operand(0, RelocInfo::NONE));
   }
 
  private:
@@ -5114,7 +5310,7 @@
     // Move 0x41300000xxxxxxxx (x = random bits) to VFP.
     __ vmov(d7, r0, r1);
     // Move 0x4130000000000000 to VFP.
-    __ mov(r0, Operand(0));
+    __ mov(r0, Operand(0, RelocInfo::NONE));
     __ vmov(d8, r0, r1);
     // Subtract and store the result in the heap number.
     __ vsub(d7, d7, d8);
diff --git a/src/arm/codegen-arm.h b/src/arm/codegen-arm.h
index c522154..162d97f 100644
--- a/src/arm/codegen-arm.h
+++ b/src/arm/codegen-arm.h
@@ -455,9 +455,6 @@
 
   static InlineRuntimeLUT* FindInlineRuntimeLUT(Handle<String> name);
   bool CheckForInlineRuntimeCall(CallRuntime* node);
-  static bool PatchInlineRuntimeEntry(Handle<String> name,
-                                      const InlineRuntimeLUT& new_entry,
-                                      InlineRuntimeLUT* old_entry);
 
   static Handle<Code> ComputeLazyCompile(int argc);
   void ProcessDeclarations(ZoneList<Declaration*>* declarations);
diff --git a/src/arm/debug-arm.cc b/src/arm/debug-arm.cc
index 82f93b6..8128f7d 100644
--- a/src/arm/debug-arm.cc
+++ b/src/arm/debug-arm.cc
@@ -158,7 +158,7 @@
 #ifdef DEBUG
   __ RecordComment("// Calling from debug break to runtime - come in - over");
 #endif
-  __ mov(r0, Operand(0));  // no arguments
+  __ mov(r0, Operand(0, RelocInfo::NONE));  // no arguments
   __ mov(r1, Operand(ExternalReference::debug_break()));
 
   CEntryStub ceb(1);
diff --git a/src/arm/full-codegen-arm.cc b/src/arm/full-codegen-arm.cc
index 912fefc..f32da6d 100644
--- a/src/arm/full-codegen-arm.cc
+++ b/src/arm/full-codegen-arm.cc
@@ -688,7 +688,7 @@
 
     CompareStub stub(eq, true, kBothCouldBeNaN, true, r1, r0);
     __ CallStub(&stub);
-    __ cmp(r0, Operand(0));
+    __ cmp(r0, Operand(0, RelocInfo::NONE));
     __ b(ne, &next_test);
     __ Drop(1);  // Switch value is no longer needed.
     __ b(clause->body_target()->entry_label());
@@ -2132,7 +2132,7 @@
     // Move 0x41300000xxxxxxxx (x = random bits) to VFP.
     __ vmov(d7, r0, r1);
     // Move 0x4130000000000000 to VFP.
-    __ mov(r0, Operand(0));
+    __ mov(r0, Operand(0, RelocInfo::NONE));
     __ vmov(d8, r0, r1);
     // Subtract and store the result in the heap number.
     __ vsub(d7, d7, d8);
@@ -3125,7 +3125,7 @@
 
       CompareStub stub(cc, strict, kBothCouldBeNaN, true, r1, r0);
       __ CallStub(&stub);
-      __ cmp(r0, Operand(0));
+      __ cmp(r0, Operand(0, RelocInfo::NONE));
       Split(cc, if_true, if_false, fall_through);
     }
   }
diff --git a/src/arm/ic-arm.cc b/src/arm/ic-arm.cc
index 49d7b2d..1a76db2 100644
--- a/src/arm/ic-arm.cc
+++ b/src/arm/ic-arm.cc
@@ -1292,7 +1292,7 @@
     __ mov(loword, Operand(hiword, LSL, mantissa_shift_for_lo_word));
     __ orr(hiword, scratch, Operand(hiword, LSR, mantissa_shift_for_hi_word));
   } else {
-    __ mov(loword, Operand(0));
+    __ mov(loword, Operand(0, RelocInfo::NONE));
     __ orr(hiword, scratch, Operand(hiword, LSL, mantissa_shift_for_hi_word));
   }
 
@@ -1790,7 +1790,7 @@
 
     __ and_(fval, ival, Operand(kBinary32SignMask), SetCC);
     // Negate value if it is negative.
-    __ rsb(ival, ival, Operand(0), LeaveCC, ne);
+    __ rsb(ival, ival, Operand(0, RelocInfo::NONE), LeaveCC, ne);
 
     // We have -1, 0 or 1, which we treat specially. Register ival contains
     // absolute value: it is either equal to 1 (special case of -1 and 1),
@@ -2075,18 +2075,18 @@
       // and infinities. All these should be converted to 0.
       __ mov(r7, Operand(HeapNumber::kExponentMask));
       __ and_(r9, r5, Operand(r7), SetCC);
-      __ mov(r5, Operand(0), LeaveCC, eq);
+      __ mov(r5, Operand(0, RelocInfo::NONE), LeaveCC, eq);
       __ b(eq, &done);
 
       __ teq(r9, Operand(r7));
-      __ mov(r5, Operand(0), LeaveCC, eq);
+      __ mov(r5, Operand(0, RelocInfo::NONE), LeaveCC, eq);
       __ b(eq, &done);
 
       // Unbias exponent.
       __ mov(r9, Operand(r9, LSR, HeapNumber::kExponentShift));
       __ sub(r9, r9, Operand(HeapNumber::kExponentBias), SetCC);
       // If exponent is negative than result is 0.
-      __ mov(r5, Operand(0), LeaveCC, mi);
+      __ mov(r5, Operand(0, RelocInfo::NONE), LeaveCC, mi);
       __ b(mi, &done);
 
       // If exponent is too big than result is minimal value.
@@ -2102,14 +2102,14 @@
       __ mov(r5, Operand(r5, LSR, r9), LeaveCC, pl);
       __ b(pl, &sign);
 
-      __ rsb(r9, r9, Operand(0));
+      __ rsb(r9, r9, Operand(0, RelocInfo::NONE));
       __ mov(r5, Operand(r5, LSL, r9));
       __ rsb(r9, r9, Operand(meaningfull_bits));
       __ orr(r5, r5, Operand(r6, LSR, r9));
 
       __ bind(&sign);
-      __ teq(r7, Operand(0));
-      __ rsb(r5, r5, Operand(0), LeaveCC, ne);
+      __ teq(r7, Operand(0, RelocInfo::NONE));
+      __ rsb(r5, r5, Operand(0, RelocInfo::NONE), LeaveCC, ne);
 
       __ bind(&done);
       switch (array_type) {
diff --git a/src/arm/macro-assembler-arm.cc b/src/arm/macro-assembler-arm.cc
index 0b6e7b3..3554431 100644
--- a/src/arm/macro-assembler-arm.cc
+++ b/src/arm/macro-assembler-arm.cc
@@ -25,6 +25,8 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
+#include <limits.h>  // For LONG_MIN, LONG_MAX.
+
 #include "v8.h"
 
 #if defined(V8_TARGET_ARCH_ARM)
@@ -224,7 +226,7 @@
   }
   int32_t immediate = src2.immediate();
   if (immediate == 0) {
-    mov(dst, Operand(0), LeaveCC, cond);
+    mov(dst, Operand(0, RelocInfo::NONE), LeaveCC, cond);
     return;
   }
   if (IsPowerOf2(immediate + 1) && ((immediate & 1) != 0)) {
@@ -303,7 +305,7 @@
     }
     tst(dst, Operand(~satval));
     b(eq, &done);
-    mov(dst, Operand(0), LeaveCC, mi);  // 0 if negative.
+    mov(dst, Operand(0, RelocInfo::NONE), LeaveCC, mi);  // 0 if negative.
     mov(dst, Operand(satval), LeaveCC, pl);  // satval if positive.
     bind(&done);
   } else {
@@ -592,7 +594,7 @@
 
 void MacroAssembler::LeaveExitFrame() {
   // Clear top frame.
-  mov(r3, Operand(0));
+  mov(r3, Operand(0, RelocInfo::NONE));
   mov(ip, Operand(ExternalReference(Top::k_c_entry_fp_address)));
   str(r3, MemOperand(ip));
 
@@ -761,7 +763,7 @@
 #ifdef ENABLE_DEBUGGER_SUPPORT
 void MacroAssembler::DebugBreak() {
   ASSERT(allow_stub_calls());
-  mov(r0, Operand(0));
+  mov(r0, Operand(0, RelocInfo::NONE));
   mov(r1, Operand(ExternalReference(Runtime::kDebugBreak)));
   CEntryStub ces(1);
   Call(ces.GetCode(), RelocInfo::DEBUG_BREAK);
@@ -797,7 +799,7 @@
     // The frame pointer does not point to a JS frame so we save NULL
     // for fp. We expect the code throwing an exception to check fp
     // before dereferencing it to restore the context.
-    mov(ip, Operand(0));  // To save a NULL frame pointer.
+    mov(ip, Operand(0, RelocInfo::NONE));  // To save a NULL frame pointer.
     mov(r6, Operand(StackHandler::ENTRY));
     ASSERT(StackHandlerConstants::kStateOffset == 1 * kPointerSize
            && StackHandlerConstants::kFPOffset == 2 * kPointerSize
@@ -836,7 +838,7 @@
   ldr(scratch, MemOperand(fp, StandardFrameConstants::kContextOffset));
   // In debug mode, make sure the lexical context is set.
 #ifdef DEBUG
-  cmp(scratch, Operand(0));
+  cmp(scratch, Operand(0, RelocInfo::NONE));
   Check(ne, "we should not have an empty lexical context");
 #endif
 
@@ -1333,6 +1335,104 @@
 }
 
 
+// Tries to get a signed int32 out of a double precision floating point heap
+// number. Rounds towards 0. Branch to 'not_int32' if the double is out of the
+// 32bits signed integer range.
+void MacroAssembler::ConvertToInt32(Register source,
+                                    Register dest,
+                                    Register scratch,
+                                    Register scratch2,
+                                    Label *not_int32) {
+  if (CpuFeatures::IsSupported(VFP3)) {
+    CpuFeatures::Scope scope(VFP3);
+    sub(scratch, source, Operand(kHeapObjectTag));
+    vldr(d0, scratch, HeapNumber::kValueOffset);
+    vcvt_s32_f64(s0, d0);
+    vmov(dest, s0);
+    // Signed vcvt instruction will saturate to the minimum (0x80000000) or
+    // maximun (0x7fffffff) signed 32bits integer when the double is out of
+    // range. When substracting one, the minimum signed integer becomes the
+    // maximun signed integer.
+    sub(scratch, dest, Operand(1));
+    cmp(scratch, Operand(LONG_MAX - 1));
+    // If equal then dest was LONG_MAX, if greater dest was LONG_MIN.
+    b(ge, not_int32);
+  } else {
+    // This code is faster for doubles that are in the ranges -0x7fffffff to
+    // -0x40000000 or 0x40000000 to 0x7fffffff. This corresponds almost to
+    // the range of signed int32 values that are not Smis.  Jumps to the label
+    // 'not_int32' if the double isn't in the range -0x80000000.0 to
+    // 0x80000000.0 (excluding the endpoints).
+    Label right_exponent, done;
+    // Get exponent word.
+    ldr(scratch, FieldMemOperand(source, HeapNumber::kExponentOffset));
+    // Get exponent alone in scratch2.
+    Ubfx(scratch2,
+            scratch,
+            HeapNumber::kExponentShift,
+            HeapNumber::kExponentBits);
+    // Load dest with zero.  We use this either for the final shift or
+    // for the answer.
+    mov(dest, Operand(0, RelocInfo::NONE));
+    // Check whether the exponent matches a 32 bit signed int that is not a Smi.
+    // A non-Smi integer is 1.xxx * 2^30 so the exponent is 30 (biased). This is
+    // the exponent that we are fastest at and also the highest exponent we can
+    // handle here.
+    const uint32_t non_smi_exponent = HeapNumber::kExponentBias + 30;
+    // The non_smi_exponent, 0x41d, is too big for ARM's immediate field so we
+    // split it up to avoid a constant pool entry.  You can't do that in general
+    // for cmp because of the overflow flag, but we know the exponent is in the
+    // range 0-2047 so there is no overflow.
+    int fudge_factor = 0x400;
+    sub(scratch2, scratch2, Operand(fudge_factor));
+    cmp(scratch2, Operand(non_smi_exponent - fudge_factor));
+    // If we have a match of the int32-but-not-Smi exponent then skip some
+    // logic.
+    b(eq, &right_exponent);
+    // If the exponent is higher than that then go to slow case.  This catches
+    // numbers that don't fit in a signed int32, infinities and NaNs.
+    b(gt, not_int32);
+
+    // We know the exponent is smaller than 30 (biased).  If it is less than
+    // 0 (biased) then the number is smaller in magnitude than 1.0 * 2^0, ie
+    // it rounds to zero.
+    const uint32_t zero_exponent = HeapNumber::kExponentBias + 0;
+    sub(scratch2, scratch2, Operand(zero_exponent - fudge_factor), SetCC);
+    // Dest already has a Smi zero.
+    b(lt, &done);
+
+    // We have an exponent between 0 and 30 in scratch2.  Subtract from 30 to
+    // get how much to shift down.
+    rsb(dest, scratch2, Operand(30));
+
+    bind(&right_exponent);
+    // Get the top bits of the mantissa.
+    and_(scratch2, scratch, Operand(HeapNumber::kMantissaMask));
+    // Put back the implicit 1.
+    orr(scratch2, scratch2, Operand(1 << HeapNumber::kExponentShift));
+    // Shift up the mantissa bits to take up the space the exponent used to
+    // take. We just orred in the implicit bit so that took care of one and
+    // we want to leave the sign bit 0 so we subtract 2 bits from the shift
+    // distance.
+    const int shift_distance = HeapNumber::kNonMantissaBitsInTopWord - 2;
+    mov(scratch2, Operand(scratch2, LSL, shift_distance));
+    // Put sign in zero flag.
+    tst(scratch, Operand(HeapNumber::kSignMask));
+    // Get the second half of the double. For some exponents we don't
+    // actually need this because the bits get shifted out again, but
+    // it's probably slower to test than just to do it.
+    ldr(scratch, FieldMemOperand(source, HeapNumber::kMantissaOffset));
+    // Shift down 22 bits to get the last 10 bits.
+    orr(scratch, scratch2, Operand(scratch, LSR, 32 - shift_distance));
+    // Move down according to the exponent.
+    mov(dest, Operand(scratch, LSR, dest));
+    // Fix sign if sign bit was set.
+    rsb(dest, dest, Operand(0, RelocInfo::NONE), LeaveCC, ne);
+    bind(&done);
+  }
+}
+
+
 void MacroAssembler::GetLeastBitsFromSmi(Register dst,
                                          Register src,
                                          int num_least_bits) {
@@ -1718,7 +1818,7 @@
 #ifdef CAN_USE_ARMV5_INSTRUCTIONS
   clz(zeros, source);  // This instruction is only supported after ARM5.
 #else
-  mov(zeros, Operand(0));
+  mov(zeros, Operand(0, RelocInfo::NONE));
   Move(scratch, source);
   // Top 16.
   tst(scratch, Operand(0xffff0000));
diff --git a/src/arm/macro-assembler-arm.h b/src/arm/macro-assembler-arm.h
index 207ee5c..febd87e 100644
--- a/src/arm/macro-assembler-arm.h
+++ b/src/arm/macro-assembler-arm.h
@@ -504,6 +504,15 @@
                               Register scratch1,
                               SwVfpRegister scratch2);
 
+  // Convert the HeapNumber pointed to by source to a 32bits signed integer
+  // dest. If the HeapNumber does not fit into a 32bits signed integer branch
+  // to not_int32 label.
+  void ConvertToInt32(Register source,
+                      Register dest,
+                      Register scratch,
+                      Register scratch2,
+                      Label *not_int32);
+
   // Count leading zeros in a 32 bit word.  On ARM5 and later it uses the clz
   // instruction.  On pre-ARM5 hardware this routine gives the wrong answer
   // for 0 (31 instead of 32).  Source and scratch can be the same in which case
diff --git a/src/arm/regexp-macro-assembler-arm.cc b/src/arm/regexp-macro-assembler-arm.cc
index 72b635f..8f45886 100644
--- a/src/arm/regexp-macro-assembler-arm.cc
+++ b/src/arm/regexp-macro-assembler-arm.cc
@@ -189,7 +189,7 @@
   Label not_at_start;
   // Did we start the match at the start of the string at all?
   __ ldr(r0, MemOperand(frame_pointer(), kAtStart));
-  __ cmp(r0, Operand(0));
+  __ cmp(r0, Operand(0, RelocInfo::NONE));
   BranchOrBacktrack(eq, &not_at_start);
 
   // If we did, are we still at the start of the input?
@@ -204,7 +204,7 @@
 void RegExpMacroAssemblerARM::CheckNotAtStart(Label* on_not_at_start) {
   // Did we start the match at the start of the string at all?
   __ ldr(r0, MemOperand(frame_pointer(), kAtStart));
-  __ cmp(r0, Operand(0));
+  __ cmp(r0, Operand(0, RelocInfo::NONE));
   BranchOrBacktrack(eq, on_not_at_start);
   // If we did, are we still at the start of the input?
   __ ldr(r1, MemOperand(frame_pointer(), kInputStart));
@@ -364,7 +364,7 @@
     __ CallCFunction(function, argument_count);
 
     // Check if function returned non-zero for success or zero for failure.
-    __ cmp(r0, Operand(0));
+    __ cmp(r0, Operand(0, RelocInfo::NONE));
     BranchOrBacktrack(eq, on_no_match);
     // On success, increment position by length of capture.
     __ add(current_input_offset(), current_input_offset(), Operand(r4));
@@ -634,7 +634,7 @@
 
   __ bind(&stack_limit_hit);
   CallCheckStackGuardState(r0);
-  __ cmp(r0, Operand(0));
+  __ cmp(r0, Operand(0, RelocInfo::NONE));
   // If returned value is non-zero, we exit with the returned value as result.
   __ b(ne, &exit_label_);
 
@@ -661,7 +661,7 @@
   // string, and store that value in a local variable.
   __ tst(r1, Operand(r1));
   __ mov(r1, Operand(1), LeaveCC, eq);
-  __ mov(r1, Operand(0), LeaveCC, ne);
+  __ mov(r1, Operand(0, RelocInfo::NONE), LeaveCC, ne);
   __ str(r1, MemOperand(frame_pointer(), kAtStart));
 
   if (num_saved_registers_ > 0) {  // Always is, if generated from a regexp.
@@ -684,7 +684,7 @@
   // Load previous char as initial value of current character register.
   Label at_start;
   __ ldr(r0, MemOperand(frame_pointer(), kAtStart));
-  __ cmp(r0, Operand(0));
+  __ cmp(r0, Operand(0, RelocInfo::NONE));
   __ b(ne, &at_start);
   LoadCurrentCharacterUnchecked(-1, 1);  // Load previous char.
   __ jmp(&start_label_);
@@ -751,7 +751,7 @@
     SafeCallTarget(&check_preempt_label_);
 
     CallCheckStackGuardState(r0);
-    __ cmp(r0, Operand(0));
+    __ cmp(r0, Operand(0, RelocInfo::NONE));
     // If returning non-zero, we should end execution with the given
     // result as return value.
     __ b(ne, &exit_label_);
@@ -778,7 +778,7 @@
     __ CallCFunction(grow_stack, num_arguments);
     // If return NULL, we have failed to grow the stack, and
     // must exit with a stack-overflow exception.
-    __ cmp(r0, Operand(0));
+    __ cmp(r0, Operand(0, RelocInfo::NONE));
     __ b(eq, &exit_with_exception);
     // Otherwise use return value as new stack pointer.
     __ mov(backtrack_stackpointer(), r0);