Push version 1.2.0 to trunk.

Optimized floating-point operations on ARM.

Added a number of extensions to the debugger API.

Changed the enumeration order for unsigned integer keys to always be numerical order.

Added a "read" extension to the shell sample.

Added support for Array.prototype.reduce and Array.prototype.reduceRight.

Added an option to the SCons build to control Microsoft Visual C++ link-time code generation.

Fixed a number of bugs (in particular issue 315, issue 316, issue 317 and issue 318).


git-svn-id: http://v8.googlecode.com/svn/trunk@1781 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
diff --git a/src/codegen-arm.cc b/src/codegen-arm.cc
index 6fdabc3..9337454 100644
--- a/src/codegen-arm.cc
+++ b/src/codegen-arm.cc
@@ -38,7 +38,8 @@
 
 namespace v8 { namespace internal {
 
-#define __ masm_->
+#define __ ACCESS_MASM(masm_)
+
 
 // -------------------------------------------------------------------------
 // CodeGenState implementation.
@@ -146,13 +147,13 @@
       frame_->EmitPush(r0);
       frame_->CallRuntime(Runtime::kNewContext, 1);  // r0 holds the result
 
-      if (kDebug) {
-        JumpTarget verified_true(this);
-        __ cmp(r0, Operand(cp));
-        verified_true.Branch(eq);
-        __ stop("NewContext: r0 is expected to be the same as cp");
-        verified_true.Bind();
-      }
+#ifdef DEBUG
+      JumpTarget verified_true(this);
+      __ cmp(r0, Operand(cp));
+      verified_true.Branch(eq);
+      __ stop("NewContext: r0 is expected to be the same as cp");
+      verified_true.Bind();
+#endif
       // Update context local.
       __ str(cp, frame_->Context());
     }
@@ -653,37 +654,27 @@
 }
 
 
-class GetPropertyStub : public CodeStub {
- public:
-  GetPropertyStub() { }
-
- private:
-  Major MajorKey() { return GetProperty; }
-  int MinorKey() { return 0; }
-  void Generate(MacroAssembler* masm);
-};
-
-
-class SetPropertyStub : public CodeStub {
- public:
-  SetPropertyStub() { }
-
- private:
-  Major MajorKey() { return SetProperty; }
-  int MinorKey() { return 0; }
-  void Generate(MacroAssembler* masm);
-};
-
-
 class GenericBinaryOpStub : public CodeStub {
  public:
-  explicit GenericBinaryOpStub(Token::Value op) : op_(op) { }
+  GenericBinaryOpStub(Token::Value op,
+                      OverwriteMode mode)
+      : op_(op), mode_(mode) { }
 
  private:
   Token::Value op_;
+  OverwriteMode mode_;
+
+  // Minor key encoding in 16 bits.
+  class ModeBits: public BitField<OverwriteMode, 0, 2> {};
+  class OpBits: public BitField<Token::Value, 2, 14> {};
 
   Major MajorKey() { return GenericBinaryOp; }
-  int MinorKey() { return static_cast<int>(op_); }
+  int MinorKey() {
+    // Encode the parameters in a unique 16 bit value.
+    return OpBits::encode(op_)
+           | ModeBits::encode(mode_);
+  }
+
   void Generate(MacroAssembler* masm);
 
   const char* GetName() {
@@ -708,7 +699,8 @@
 };
 
 
-void CodeGenerator::GenericBinaryOperation(Token::Value op) {
+void CodeGenerator::GenericBinaryOperation(Token::Value op,
+                                           OverwriteMode overwrite_mode) {
   VirtualFrame::SpilledScope spilled_scope(this);
   // sp[0] : y
   // sp[1] : x
@@ -727,7 +719,7 @@
     case Token::SAR: {
       frame_->EmitPop(r0);  // r0 : y
       frame_->EmitPop(r1);  // r1 : x
-      GenericBinaryOpStub stub(op);
+      GenericBinaryOpStub stub(op, overwrite_mode);
       frame_->CallStub(&stub, 0);
       break;
     }
@@ -767,11 +759,13 @@
   DeferredInlineSmiOperation(CodeGenerator* generator,
                              Token::Value op,
                              int value,
-                             bool reversed)
+                             bool reversed,
+                             OverwriteMode overwrite_mode)
       : DeferredCode(generator),
         op_(op),
         value_(value),
-        reversed_(reversed) {
+        reversed_(reversed),
+        overwrite_mode_(overwrite_mode) {
     set_comment("[ DeferredInlinedSmiOperation");
   }
 
@@ -781,6 +775,7 @@
   Token::Value op_;
   int value_;
   bool reversed_;
+  OverwriteMode overwrite_mode_;
 };
 
 
@@ -844,7 +839,7 @@
       break;
   }
 
-  GenericBinaryOpStub igostub(op_);
+  GenericBinaryOpStub igostub(op_, overwrite_mode_);
   Result arg0 = generator()->allocator()->Allocate(r1);
   ASSERT(arg0.is_valid());
   Result arg1 = generator()->allocator()->Allocate(r0);
@@ -856,7 +851,8 @@
 
 void CodeGenerator::SmiOperation(Token::Value op,
                                  Handle<Object> value,
-                                 bool reversed) {
+                                 bool reversed,
+                                 OverwriteMode mode) {
   VirtualFrame::SpilledScope spilled_scope(this);
   // NOTE: This is an attempt to inline (a bit) more of the code for
   // some possible smi operations (like + and -) when (at least) one
@@ -875,7 +871,7 @@
   switch (op) {
     case Token::ADD: {
       DeferredCode* deferred =
-        new DeferredInlineSmiOperation(this, op, int_value, reversed);
+        new DeferredInlineSmiOperation(this, op, int_value, reversed, mode);
 
       __ add(r0, r0, Operand(value), SetCC);
       deferred->enter()->Branch(vs);
@@ -887,7 +883,7 @@
 
     case Token::SUB: {
       DeferredCode* deferred =
-        new DeferredInlineSmiOperation(this, op, int_value, reversed);
+        new DeferredInlineSmiOperation(this, op, int_value, reversed, mode);
 
       if (!reversed) {
         __ sub(r0, r0, Operand(value), SetCC);
@@ -905,7 +901,7 @@
     case Token::BIT_XOR:
     case Token::BIT_AND: {
       DeferredCode* deferred =
-        new DeferredInlineSmiOperation(this, op, int_value, reversed);
+        new DeferredInlineSmiOperation(this, op, int_value, reversed, mode);
       __ tst(r0, Operand(kSmiTagMask));
       deferred->enter()->Branch(ne);
       switch (op) {
@@ -925,12 +921,12 @@
         __ mov(ip, Operand(value));
         frame_->EmitPush(ip);
         frame_->EmitPush(r0);
-        GenericBinaryOperation(op);
+        GenericBinaryOperation(op, mode);
 
       } else {
         int shift_value = int_value & 0x1f;  // least significant 5 bits
         DeferredCode* deferred =
-          new DeferredInlineSmiOperation(this, op, shift_value, false);
+          new DeferredInlineSmiOperation(this, op, shift_value, false, mode);
         __ tst(r0, Operand(kSmiTagMask));
         deferred->enter()->Branch(ne);
         __ mov(r2, Operand(r0, ASR, kSmiTagSize));  // remove tags
@@ -982,7 +978,7 @@
         frame_->EmitPush(ip);
         frame_->EmitPush(r0);
       }
-      GenericBinaryOperation(op);
+      GenericBinaryOperation(op, mode);
       break;
   }
 
@@ -1427,13 +1423,13 @@
   } else {
     frame_->CallRuntime(Runtime::kPushContext, 1);
   }
-  if (kDebug) {
-    JumpTarget verified_true(this);
-    __ cmp(r0, Operand(cp));
-    verified_true.Branch(eq);
-    __ stop("PushContext: r0 is expected to be the same as cp");
-    verified_true.Bind();
-  }
+#ifdef DEBUG
+  JumpTarget verified_true(this);
+  __ cmp(r0, Operand(cp));
+  verified_true.Branch(eq);
+  __ stop("PushContext: r0 is expected to be the same as cp");
+  verified_true.Bind();
+#endif
   // Update context local.
   __ str(cp, frame_->Context());
   ASSERT(frame_->height() == original_height);
@@ -1487,8 +1483,8 @@
   // Test for a Smi value in a HeapNumber.
   __ tst(r0, Operand(kSmiTagMask));
   is_smi.Branch(eq);
-  __ ldr(r1, MemOperand(r0, HeapObject::kMapOffset - kHeapObjectTag));
-  __ ldrb(r1, MemOperand(r1, Map::kInstanceTypeOffset - kHeapObjectTag));
+  __ ldr(r1, FieldMemOperand(r0, HeapObject::kMapOffset));
+  __ ldrb(r1, FieldMemOperand(r1, Map::kInstanceTypeOffset));
   __ cmp(r1, Operand(HEAP_NUMBER_TYPE));
   default_target->Branch(ne);
   frame_->EmitPush(r0);
@@ -2339,7 +2335,9 @@
   VirtualFrame::SpilledScope spilled_scope(this);
   Comment cmnt(masm_, "[ DebuggerStatament");
   CodeForStatementPosition(node);
+#ifdef ENABLE_DEBUGGER_SUPPORT
   frame_->CallRuntime(Runtime::kDebugBreak, 0);
+#endif
   // Ignore the return value.
   ASSERT(frame_->height() == original_height);
 }
@@ -2523,7 +2521,9 @@
 
   if (s->is_eval_scope()) {
     Label next, fast;
-    if (!context.is(tmp)) __ mov(tmp, Operand(context));
+    if (!context.is(tmp)) {
+      __ mov(tmp, Operand(context));
+    }
     __ bind(&next);
     // Terminate at global context.
     __ ldr(tmp2, FieldMemOperand(tmp, HeapObject::kMapOffset));
@@ -2934,15 +2934,24 @@
       LoadAndSpill(node->value());
 
     } else {
+      // +=, *= and similar binary assignments.
+      // Get the old value of the lhs.
       target.GetValueAndSpill(NOT_INSIDE_TYPEOF);
       Literal* literal = node->value()->AsLiteral();
+      bool overwrite =
+          (node->value()->AsBinaryOperation() != NULL &&
+           node->value()->AsBinaryOperation()->ResultOverwriteAllowed());
       if (literal != NULL && literal->handle()->IsSmi()) {
-        SmiOperation(node->binary_op(), literal->handle(), false);
+        SmiOperation(node->binary_op(),
+                     literal->handle(),
+                     false,
+                     overwrite ? OVERWRITE_RIGHT : NO_OVERWRITE);
         frame_->EmitPush(r0);
 
       } else {
         LoadAndSpill(node->value());
-        GenericBinaryOperation(node->binary_op());
+        GenericBinaryOperation(node->binary_op(),
+                               overwrite ? OVERWRITE_RIGHT : NO_OVERWRITE);
         frame_->EmitPush(r0);
       }
     }
@@ -3822,19 +3831,39 @@
     // is a literal small integer.
     Literal* lliteral = node->left()->AsLiteral();
     Literal* rliteral = node->right()->AsLiteral();
+    // NOTE: The code below assumes that the slow cases (calls to runtime)
+    // never return a constant/immutable object.
+    bool overwrite_left =
+        (node->left()->AsBinaryOperation() != NULL &&
+         node->left()->AsBinaryOperation()->ResultOverwriteAllowed());
+    bool overwrite_right =
+        (node->right()->AsBinaryOperation() != NULL &&
+         node->right()->AsBinaryOperation()->ResultOverwriteAllowed());
 
     if (rliteral != NULL && rliteral->handle()->IsSmi()) {
       LoadAndSpill(node->left());
-      SmiOperation(node->op(), rliteral->handle(), false);
+      SmiOperation(node->op(),
+                   rliteral->handle(),
+                   false,
+                   overwrite_right ? OVERWRITE_RIGHT : NO_OVERWRITE);
 
     } else if (lliteral != NULL && lliteral->handle()->IsSmi()) {
       LoadAndSpill(node->right());
-      SmiOperation(node->op(), lliteral->handle(), true);
+      SmiOperation(node->op(),
+                   lliteral->handle(),
+                   true,
+                   overwrite_left ? OVERWRITE_LEFT : NO_OVERWRITE);
 
     } else {
+      OverwriteMode overwrite_mode = NO_OVERWRITE;
+      if (overwrite_left) {
+        overwrite_mode = OVERWRITE_LEFT;
+      } else if (overwrite_right) {
+        overwrite_mode = OVERWRITE_RIGHT;
+      }
       LoadAndSpill(node->left());
       LoadAndSpill(node->right());
-      GenericBinaryOperation(node->op());
+      GenericBinaryOperation(node->op(), overwrite_mode);
     }
     frame_->EmitPush(r0);
   }
@@ -4067,7 +4096,8 @@
 
 
 #undef __
-#define __ masm->
+#define __ ACCESS_MASM(masm)
+
 
 Handle<String> Reference::GetName() {
   ASSERT(type_ == NAMED);
@@ -4305,167 +4335,80 @@
 }
 
 
-void GetPropertyStub::Generate(MacroAssembler* masm) {
-  // sp[0]: key
-  // sp[1]: receiver
-  Label slow, fast;
-  // Get the key and receiver object from the stack.
-  __ ldm(ia, sp, r0.bit() | r1.bit());
-  // Check that the key is a smi.
-  __ tst(r0, Operand(kSmiTagMask));
-  __ b(ne, &slow);
-  __ mov(r0, Operand(r0, ASR, kSmiTagSize));
-  // Check that the object isn't a smi.
-  __ tst(r1, Operand(kSmiTagMask));
-  __ b(eq, &slow);
-
-  // Check that the object is some kind of JS object EXCEPT JS Value type.
-  // In the case that the object is a value-wrapper object,
-  // we enter the runtime system to make sure that indexing into string
-  // objects work as intended.
-  ASSERT(JS_OBJECT_TYPE > JS_VALUE_TYPE);
-  __ ldr(r2, FieldMemOperand(r1, HeapObject::kMapOffset));
-  __ ldrb(r2, FieldMemOperand(r2, Map::kInstanceTypeOffset));
-  __ cmp(r2, Operand(JS_OBJECT_TYPE));
-  __ b(lt, &slow);
-
-  // Get the elements array of the object.
-  __ ldr(r1, FieldMemOperand(r1, JSObject::kElementsOffset));
-  // Check that the object is in fast mode (not dictionary).
-  __ ldr(r3, FieldMemOperand(r1, HeapObject::kMapOffset));
-  __ cmp(r3, Operand(Factory::hash_table_map()));
-  __ b(eq, &slow);
-  // Check that the key (index) is within bounds.
-  __ ldr(r3, FieldMemOperand(r1, Array::kLengthOffset));
-  __ cmp(r0, Operand(r3));
-  __ b(lo, &fast);
-
-  // Slow case: Push extra copies of the arguments (2).
+static void HandleBinaryOpSlowCases(MacroAssembler* masm,
+                                    Label* not_smi,
+                                    const Builtins::JavaScript& builtin,
+                                    Token::Value operation,
+                                    int swi_number,
+                                    OverwriteMode mode) {
+  Label slow;
+  if (mode == NO_OVERWRITE) {
+    __ bind(not_smi);
+  }
   __ bind(&slow);
-  __ ldm(ia, sp, r0.bit() | r1.bit());
-  __ stm(db_w, sp, r0.bit() | r1.bit());
-  // Do tail-call to runtime routine.
-  __ TailCallRuntime(ExternalReference(Runtime::kGetProperty), 2);
+  __ push(r1);
+  __ push(r0);
+  __ mov(r0, Operand(1));  // Set number of arguments.
+  __ InvokeBuiltin(builtin, JUMP_JS);  // Tail call.
 
-  // Fast case: Do the load.
-  __ bind(&fast);
-  __ add(r3, r1, Operand(Array::kHeaderSize - kHeapObjectTag));
-  __ ldr(r0, MemOperand(r3, r0, LSL, kPointerSizeLog2));
-  __ cmp(r0, Operand(Factory::the_hole_value()));
-  // In case the loaded value is the_hole we have to consult GetProperty
-  // to ensure the prototype chain is searched.
-  __ b(eq, &slow);
-
-  __ StubReturn(1);
-}
-
-
-void SetPropertyStub::Generate(MacroAssembler* masm) {
-  // r0 : value
-  // sp[0] : key
-  // sp[1] : receiver
-
-  Label slow, fast, array, extra, exit;
-  // Get the key and the object from the stack.
-  __ ldm(ia, sp, r1.bit() | r3.bit());  // r1 = key, r3 = receiver
-  // Check that the key is a smi.
-  __ tst(r1, Operand(kSmiTagMask));
-  __ b(ne, &slow);
-  // Check that the object isn't a smi.
-  __ tst(r3, Operand(kSmiTagMask));
-  __ b(eq, &slow);
-  // Get the type of the object from its map.
-  __ ldr(r2, FieldMemOperand(r3, HeapObject::kMapOffset));
-  __ ldrb(r2, FieldMemOperand(r2, Map::kInstanceTypeOffset));
-  // Check if the object is a JS array or not.
-  __ cmp(r2, Operand(JS_ARRAY_TYPE));
-  __ b(eq, &array);
-  // Check that the object is some kind of JS object.
-  __ cmp(r2, Operand(FIRST_JS_OBJECT_TYPE));
-  __ b(lt, &slow);
-
-
-  // Object case: Check key against length in the elements array.
-  __ ldr(r3, FieldMemOperand(r3, JSObject::kElementsOffset));
-  // Check that the object is in fast mode (not dictionary).
-  __ ldr(r2, FieldMemOperand(r3, HeapObject::kMapOffset));
-  __ cmp(r2, Operand(Factory::hash_table_map()));
-  __ b(eq, &slow);
-  // Untag the key (for checking against untagged length in the fixed array).
-  __ mov(r1, Operand(r1, ASR, kSmiTagSize));
-  // Compute address to store into and check array bounds.
-  __ add(r2, r3, Operand(Array::kHeaderSize - kHeapObjectTag));
-  __ add(r2, r2, Operand(r1, LSL, kPointerSizeLog2));
-  __ ldr(ip, FieldMemOperand(r3, Array::kLengthOffset));
-  __ cmp(r1, Operand(ip));
-  __ b(lo, &fast);
-
-
-  // Slow case: Push extra copies of the arguments (3).
-  __ bind(&slow);
-  __ ldm(ia, sp, r1.bit() | r3.bit());  // r0 == value, r1 == key, r3 == object
-  __ stm(db_w, sp, r0.bit() | r1.bit() | r3.bit());
-  // Do tail-call to runtime routine.
-  __ TailCallRuntime(ExternalReference(Runtime::kSetProperty), 3);
-
-
-  // Extra capacity case: Check if there is extra capacity to
-  // perform the store and update the length. Used for adding one
-  // element to the array by writing to array[array.length].
-  // r0 == value, r1 == key, r2 == elements, r3 == object
-  __ bind(&extra);
-  __ b(ne, &slow);  // do not leave holes in the array
-  __ mov(r1, Operand(r1, ASR, kSmiTagSize));  // untag
-  __ ldr(ip, FieldMemOperand(r2, Array::kLengthOffset));
-  __ cmp(r1, Operand(ip));
-  __ b(hs, &slow);
-  __ mov(r1, Operand(r1, LSL, kSmiTagSize));  // restore tag
-  __ add(r1, r1, Operand(1 << kSmiTagSize));  // and increment
-  __ str(r1, FieldMemOperand(r3, JSArray::kLengthOffset));
-  __ mov(r3, Operand(r2));
-  // NOTE: Computing the address to store into must take the fact
-  // that the key has been incremented into account.
-  int displacement = Array::kHeaderSize - kHeapObjectTag -
-      ((1 << kSmiTagSize) * 2);
-  __ add(r2, r2, Operand(displacement));
-  __ add(r2, r2, Operand(r1, LSL, kPointerSizeLog2 - kSmiTagSize));
-  __ b(&fast);
-
-
-  // Array case: Get the length and the elements array from the JS
-  // array. Check that the array is in fast mode; if it is the
-  // length is always a smi.
-  // r0 == value, r3 == object
-  __ bind(&array);
-  __ ldr(r2, FieldMemOperand(r3, JSObject::kElementsOffset));
-  __ ldr(r1, FieldMemOperand(r2, HeapObject::kMapOffset));
-  __ cmp(r1, Operand(Factory::hash_table_map()));
-  __ b(eq, &slow);
-
-  // Check the key against the length in the array, compute the
-  // address to store into and fall through to fast case.
-  __ ldr(r1, MemOperand(sp));
-  // r0 == value, r1 == key, r2 == elements, r3 == object.
-  __ ldr(ip, FieldMemOperand(r3, JSArray::kLengthOffset));
-  __ cmp(r1, Operand(ip));
-  __ b(hs, &extra);
-  __ mov(r3, Operand(r2));
-  __ add(r2, r2, Operand(Array::kHeaderSize - kHeapObjectTag));
-  __ add(r2, r2, Operand(r1, LSL, kPointerSizeLog2 - kSmiTagSize));
-
-
-  // Fast case: Do the store.
-  // r0 == value, r2 == address to store into, r3 == elements
-  __ bind(&fast);
-  __ str(r0, MemOperand(r2));
-  // Skip write barrier if the written value is a smi.
-  __ tst(r0, Operand(kSmiTagMask));
-  __ b(eq, &exit);
-  // Update write barrier for the elements array address.
-  __ sub(r1, r2, Operand(r3));
-  __ RecordWrite(r3, r1, r2);
-  __ bind(&exit);
-  __ StubReturn(1);
+  // Could it be a double-double op?  If we already have a place to put
+  // the answer then we can do the op and skip the builtin and runtime call.
+  if (mode != NO_OVERWRITE) {
+    __ bind(not_smi);
+    __ tst(r0, Operand(kSmiTagMask));
+    __ b(eq, &slow);  // We can't handle a Smi-double combination yet.
+    __ tst(r1, Operand(kSmiTagMask));
+    __ b(eq, &slow);  // We can't handle a Smi-double combination yet.
+    // Get map of r0 into r2.
+    __ ldr(r2, FieldMemOperand(r0, HeapObject::kMapOffset));
+    // Get type of r0 into r3.
+    __ ldrb(r3, FieldMemOperand(r2, Map::kInstanceTypeOffset));
+    __ cmp(r3, Operand(HEAP_NUMBER_TYPE));
+    __ b(ne, &slow);
+    // Get type of r1 into r3.
+    __ ldr(r3, FieldMemOperand(r1, HeapObject::kMapOffset));
+    // Check they are both the same map (heap number map).
+    __ cmp(r2, r3);
+    __ b(ne, &slow);
+    // Both are doubles.
+    // Calling convention says that second double is in r2 and r3.
+    __ ldr(r2, FieldMemOperand(r0, HeapNumber::kValueOffset));
+    __ ldr(r3, FieldMemOperand(r0, HeapNumber::kValueOffset + kPointerSize));
+    __ push(lr);
+    if (mode == OVERWRITE_LEFT) {
+      __ push(r1);
+    } else {
+      __ push(r0);
+    }
+    // Calling convention says that first double is in r0 and r1.
+    __ ldr(r0, FieldMemOperand(r1, HeapNumber::kValueOffset));
+    __ ldr(r1, FieldMemOperand(r1, HeapNumber::kValueOffset + kPointerSize));
+    // Call C routine that may not cause GC or other trouble.
+    __ mov(r5, Operand(ExternalReference::double_fp_operation(operation)));
+#if !defined(__arm__)
+    // Notify the simulator that we are calling an add routine in C.
+    __ swi(swi_number);
+#else
+    // Actually call the add routine written in C.
+    __ Call(r5);
+#endif
+    // Store answer in the overwritable heap number.
+    __ pop(r4);
+#if !defined(__ARM_EABI__) && defined(__arm__)
+    // Double returned in fp coprocessor register 0 and 1, encoded as register
+    // cr8.  Offsets must be divisible by 4 for coprocessor so we need to
+    // substract the tag from r4.
+    __ sub(r5, r4, Operand(kHeapObjectTag));
+    __ stc(p1, cr8, MemOperand(r5, HeapNumber::kValueOffset));
+#else
+    // Double returned in fp coprocessor register 0 and 1.
+    __ str(r0, FieldMemOperand(r4, HeapNumber::kValueOffset));
+    __ str(r1, FieldMemOperand(r4, HeapNumber::kValueOffset + kPointerSize));
+#endif
+    __ mov(r0, Operand(r4));
+    // And we are done.
+    __ pop(pc);
+  }
 }
 
 
@@ -4474,89 +4417,84 @@
   // r0 : y
   // result : r0
 
+  // All ops need to know whether we are dealing with two Smis.  Set up r2 to
+  // tell us that.
+  __ orr(r2, r1, Operand(r0));  // r2 = x | y;
+
   switch (op_) {
     case Token::ADD: {
-      Label slow, exit;
-      // fast path
-      __ orr(r2, r1, Operand(r0));  // r2 = x | y;
-      __ add(r0, r1, Operand(r0), SetCC);  // add y optimistically
-      // go slow-path in case of overflow
-      __ b(vs, &slow);
-      // go slow-path in case of non-smi operands
-      ASSERT(kSmiTag == 0);  // adjust code below
+      Label not_smi;
+      // Fast path.
+      ASSERT(kSmiTag == 0);  // Adjust code below.
       __ tst(r2, Operand(kSmiTagMask));
-      __ b(eq, &exit);
-      // slow path
-      __ bind(&slow);
-      __ sub(r0, r0, Operand(r1));  // revert optimistic add
-      __ push(r1);
-      __ push(r0);
-      __ mov(r0, Operand(1));  // set number of arguments
-      __ InvokeBuiltin(Builtins::ADD, JUMP_JS);
-      // done
-      __ bind(&exit);
+      __ b(ne, &not_smi);
+      __ add(r0, r1, Operand(r0), SetCC);  // Add y optimistically.
+      // Return if no overflow.
+      __ Ret(vc);
+      __ sub(r0, r0, Operand(r1));  // Revert optimistic add.
+
+      HandleBinaryOpSlowCases(masm,
+                              &not_smi,
+                              Builtins::ADD,
+                              Token::ADD,
+                              assembler::arm::simulator_fp_add,
+                              mode_);
       break;
     }
 
     case Token::SUB: {
-      Label slow, exit;
-      // fast path
-      __ orr(r2, r1, Operand(r0));  // r2 = x | y;
-      __ sub(r3, r1, Operand(r0), SetCC);  // subtract y optimistically
-      // go slow-path in case of overflow
-      __ b(vs, &slow);
-      // go slow-path in case of non-smi operands
-      ASSERT(kSmiTag == 0);  // adjust code below
+      Label not_smi;
+      // Fast path.
+      ASSERT(kSmiTag == 0);  // Adjust code below.
       __ tst(r2, Operand(kSmiTagMask));
-      __ mov(r0, Operand(r3), LeaveCC, eq);  // conditionally set r0 to result
-      __ b(eq, &exit);
-      // slow path
-      __ bind(&slow);
-      __ push(r1);
-      __ push(r0);
-      __ mov(r0, Operand(1));  // set number of arguments
-      __ InvokeBuiltin(Builtins::SUB, JUMP_JS);
-      // done
-      __ bind(&exit);
+      __ b(ne, &not_smi);
+      __ sub(r0, r1, Operand(r0), SetCC);  // Subtract y optimistically.
+      // Return if no overflow.
+      __ Ret(vc);
+      __ sub(r0, r1, Operand(r0));  // Revert optimistic subtract.
+
+      HandleBinaryOpSlowCases(masm,
+                              &not_smi,
+                              Builtins::SUB,
+                              Token::SUB,
+                              assembler::arm::simulator_fp_sub,
+                              mode_);
       break;
     }
 
     case Token::MUL: {
-      Label slow, exit;
-      // tag check
-      __ orr(r2, r1, Operand(r0));  // r2 = x | y;
+      Label not_smi, slow;
       ASSERT(kSmiTag == 0);  // adjust code below
       __ tst(r2, Operand(kSmiTagMask));
-      __ b(ne, &slow);
-      // remove tag from one operand (but keep sign), so that result is smi
+      __ b(ne, &not_smi);
+      // Remove tag from one operand (but keep sign), so that result is Smi.
       __ mov(ip, Operand(r0, ASR, kSmiTagSize));
-      // do multiplication
-      __ smull(r3, r2, r1, ip);  // r3 = lower 32 bits of ip*r1
-      // go slow on overflows (overflow bit is not set)
+      // Do multiplication
+      __ smull(r3, r2, r1, ip);  // r3 = lower 32 bits of ip*r1.
+      // Go slow on overflows (overflow bit is not set).
       __ mov(ip, Operand(r3, ASR, 31));
       __ cmp(ip, Operand(r2));  // no overflow if higher 33 bits are identical
       __ b(ne, &slow);
-      // go slow on zero result to handle -0
+      // Go slow on zero result to handle -0.
       __ tst(r3, Operand(r3));
       __ mov(r0, Operand(r3), LeaveCC, ne);
-      __ b(ne, &exit);
-      // slow case
+      __ Ret(ne);
+      // Slow case.
       __ bind(&slow);
-      __ push(r1);
-      __ push(r0);
-      __ mov(r0, Operand(1));  // set number of arguments
-      __ InvokeBuiltin(Builtins::MUL, JUMP_JS);
-      // done
-      __ bind(&exit);
+
+      HandleBinaryOpSlowCases(masm,
+                              &not_smi,
+                              Builtins::MUL,
+                              Token::MUL,
+                              assembler::arm::simulator_fp_mul,
+                              mode_);
       break;
     }
 
     case Token::BIT_OR:
     case Token::BIT_AND:
     case Token::BIT_XOR: {
-      Label slow, exit;
-      // tag check
-      __ orr(r2, r1, Operand(r0));  // r2 = x | y;
+      Label slow;
       ASSERT(kSmiTag == 0);  // adjust code below
       __ tst(r2, Operand(kSmiTagMask));
       __ b(ne, &slow);
@@ -4566,7 +4504,7 @@
         case Token::BIT_XOR: __ eor(r0, r0, Operand(r1)); break;
         default: UNREACHABLE();
       }
-      __ b(&exit);
+      __ Ret();
       __ bind(&slow);
       __ push(r1);  // restore stack
       __ push(r0);
@@ -4584,16 +4522,13 @@
         default:
           UNREACHABLE();
       }
-      __ bind(&exit);
       break;
     }
 
     case Token::SHL:
     case Token::SHR:
     case Token::SAR: {
-      Label slow, exit;
-      // tag check
-      __ orr(r2, r1, Operand(r0));  // r2 = x | y;
+      Label slow;
       ASSERT(kSmiTag == 0);  // adjust code below
       __ tst(r2, Operand(kSmiTagMask));
       __ b(ne, &slow);
@@ -4633,7 +4568,7 @@
       // tag result and store it in r0
       ASSERT(kSmiTag == 0);  // adjust code below
       __ mov(r0, Operand(r3, LSL, kSmiTagSize));
-      __ b(&exit);
+      __ Ret();
       // slow case
       __ bind(&slow);
       __ push(r1);  // restore stack
@@ -4645,13 +4580,13 @@
         case Token::SHL: __ InvokeBuiltin(Builtins::SHL, JUMP_JS); break;
         default: UNREACHABLE();
       }
-      __ bind(&exit);
       break;
     }
 
     default: UNREACHABLE();
   }
-  __ Ret();
+  // This code should be unreachable.
+  __ stop("Unreachable");
 }
 
 
@@ -4721,7 +4656,11 @@
   __ mov(cp, Operand(0), LeaveCC, eq);
   // Restore cp otherwise.
   __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset), ne);
-  if (kDebug && FLAG_debug_code) __ mov(lr, Operand(pc));
+#ifdef DEBUG
+  if (FLAG_debug_code) {
+    __ mov(lr, Operand(pc));
+  }
+#endif
   __ pop(pc);
 }
 
@@ -4784,7 +4723,11 @@
   __ mov(cp, Operand(0), LeaveCC, eq);
   // Restore cp otherwise.
   __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset), ne);
-  if (kDebug && FLAG_debug_code) __ mov(lr, Operand(pc));
+#ifdef DEBUG
+  if (FLAG_debug_code) {
+    __ mov(lr, Operand(pc));
+  }
+#endif
   __ pop(pc);
 }
 
@@ -5043,9 +4986,11 @@
   }
   __ ldr(ip, MemOperand(ip));  // deref address
 
-  // Branch and link to JSEntryTrampoline
+  // Branch and link to JSEntryTrampoline.  We don't use the double underscore
+  // macro for the add instruction because we don't want the coverage tool
+  // inserting instructions here after we read the pc.
   __ mov(lr, Operand(pc));
-  __ add(pc, ip, Operand(Code::kHeaderSize - kHeapObjectTag));
+  masm->add(pc, ip, Operand(Code::kHeaderSize - kHeapObjectTag));
 
   // Unlink this frame from the handler chain. When reading the
   // address of the next handler, there is no need to use the address
@@ -5057,6 +5002,7 @@
   // No need to restore registers
   __ add(sp, sp, Operand(StackHandlerConstants::kSize));
 
+
   __ bind(&exit);  // r0 holds result
   // Restore the top frame descriptors from the stack.
   __ pop(r3);
@@ -5068,7 +5014,9 @@
 
   // Restore callee-saved registers and return.
 #ifdef DEBUG
-  if (FLAG_debug_code) __ mov(lr, Operand(pc));
+  if (FLAG_debug_code) {
+    __ mov(lr, Operand(pc));
+  }
 #endif
   __ ldm(ia_w, sp, kCalleeSaved | pc.bit());
 }