Update V8 to r5136 as required by WebKit r64264

Change-Id: I55b86fa101d9d53e889e2e3811fdf75f463ac3c6
diff --git a/src/arm/codegen-arm.cc b/src/arm/codegen-arm.cc
index 0b2081b..1271e80 100644
--- a/src/arm/codegen-arm.cc
+++ b/src/arm/codegen-arm.cc
@@ -151,6 +151,8 @@
 // -------------------------------------------------------------------------
 // CodeGenerator implementation
 
+int CodeGenerator::inlined_write_barrier_size_ = -1;
+
 CodeGenerator::CodeGenerator(MacroAssembler* masm)
     : deferred_(8),
       masm_(masm),
@@ -815,7 +817,7 @@
         // Check they are both small and positive.
         __ tst(scratch, Operand(kSmiTagMask | 0xc0000000));
         ASSERT(rhs.is(r0) || lhs.is(r0));  // r0 is free now.
-        ASSERT_EQ(0, kSmiTag);
+        STATIC_ASSERT(kSmiTag == 0);
         if (op == Token::ADD) {
           __ add(r0, lhs, Operand(rhs), LeaveCC, eq);
         } else {
@@ -863,7 +865,7 @@
           __ and_(r0, lhs, Operand(rhs), LeaveCC, cond);
         } else {
           ASSERT(op == Token::BIT_XOR);
-          ASSERT_EQ(0, kSmiTag);
+          STATIC_ASSERT(kSmiTag == 0);
           __ eor(r0, lhs, Operand(rhs), LeaveCC, cond);
         }
         if (cond != al) {
@@ -1520,8 +1522,8 @@
   // JS_FUNCTION_TYPE is the last instance type and it is right
   // after LAST_JS_OBJECT_TYPE, we do not have to check the upper
   // bound.
-  ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
-  ASSERT(JS_FUNCTION_TYPE == LAST_JS_OBJECT_TYPE + 1);
+  STATIC_ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
+  STATIC_ASSERT(JS_FUNCTION_TYPE == LAST_JS_OBJECT_TYPE + 1);
   __ CompareObjectType(receiver_reg, r2, r3, FIRST_JS_OBJECT_TYPE);
   __ b(lt, &build_args);
 
@@ -2610,7 +2612,7 @@
     // The next handler address is on top of the frame.  Unlink from
     // the handler list and drop the rest of this handler from the
     // frame.
-    ASSERT(StackHandlerConstants::kNextOffset == 0);
+    STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
     frame_->EmitPop(r1);
     __ mov(r3, Operand(handler_address));
     __ str(r1, MemOperand(r3));
@@ -2636,7 +2638,7 @@
       __ ldr(sp, MemOperand(r3));
       frame_->Forget(frame_->height() - handler_height);
 
-      ASSERT(StackHandlerConstants::kNextOffset == 0);
+      STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
       frame_->EmitPop(r1);
       __ str(r1, MemOperand(r3));
       frame_->Drop(StackHandlerConstants::kSize / kPointerSize - 1);
@@ -2723,7 +2725,7 @@
   // chain and set the state on the frame to FALLING.
   if (has_valid_frame()) {
     // The next handler address is on top of the frame.
-    ASSERT(StackHandlerConstants::kNextOffset == 0);
+    STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
     frame_->EmitPop(r1);
     __ mov(r3, Operand(handler_address));
     __ str(r1, MemOperand(r3));
@@ -2762,7 +2764,7 @@
 
       // Unlink this handler and drop it from the frame.  The next
       // handler address is currently on top of the frame.
-      ASSERT(StackHandlerConstants::kNextOffset == 0);
+      STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
       frame_->EmitPop(r1);
       __ str(r1, MemOperand(r3));
       frame_->Drop(StackHandlerConstants::kSize / kPointerSize - 1);
@@ -4181,8 +4183,8 @@
   // As long as JS_FUNCTION_TYPE is the last instance type and it is
   // right after LAST_JS_OBJECT_TYPE, we can avoid checking for
   // LAST_JS_OBJECT_TYPE.
-  ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
-  ASSERT(JS_FUNCTION_TYPE == LAST_JS_OBJECT_TYPE + 1);
+  STATIC_ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
+  STATIC_ASSERT(JS_FUNCTION_TYPE == LAST_JS_OBJECT_TYPE + 1);
   __ cmp(r1, Operand(JS_FUNCTION_TYPE));
   function.Branch(eq);
 
@@ -5128,7 +5130,7 @@
 
   const int kFingerOffset =
       FixedArray::OffsetOfElementAt(JSFunctionResultCache::kFingerIndex);
-  ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
+  STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
   __ ldr(r0, FieldMemOperand(r1, kFingerOffset));
   // r0 now holds finger offset as a smi.
   __ add(r3, r1, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
@@ -6207,6 +6209,60 @@
 }
 
 
+class DeferredReferenceSetNamedValue: public DeferredCode {
+ public:
+  DeferredReferenceSetNamedValue(Register value,
+                                 Register receiver,
+                                 Handle<String> name)
+      : value_(value), receiver_(receiver), name_(name) {
+    set_comment("[ DeferredReferenceSetNamedValue");
+  }
+
+  virtual void Generate();
+
+ private:
+  Register value_;
+  Register receiver_;
+  Handle<String> name_;
+};
+
+
+// Takes value in r0, receiver in r1 and returns the result (the
+// value) in r0.
+void DeferredReferenceSetNamedValue::Generate() {
+  // Record the entry frame and spill.
+  VirtualFrame copied_frame(*frame_state()->frame());
+  copied_frame.SpillAll();
+
+  // Ensure value in r0, receiver in r1 to match store ic calling
+  // convention.
+  ASSERT(value_.is(r0) && receiver_.is(r1));
+  __ mov(r2, Operand(name_));
+
+  // The rest of the instructions in the deferred code must be together.
+  { Assembler::BlockConstPoolScope block_const_pool(masm_);
+    // Call keyed store IC. It has the arguments value, key and receiver in r0,
+    // r1 and r2.
+    Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
+    __ Call(ic, RelocInfo::CODE_TARGET);
+    // The call must be followed by a nop instruction to indicate that the
+    // named store has been inlined.
+    __ nop(PROPERTY_ACCESS_INLINED);
+
+    // Go back to the frame we entered with. The instructions
+    // generated by this merge are skipped over by the inline store
+    // patching mechanism when looking for the branch instruction that
+    // tells it where the code to patch is.
+    copied_frame.MergeTo(frame_state()->frame());
+
+    // Block the constant pool for one more instruction after leaving this
+    // constant pool block scope to include the branch instruction ending the
+    // deferred code.
+    __ BlockConstPoolFor(1);
+  }
+}
+
+
 // Consumes the top of stack (the receiver) and pushes the result instead.
 void CodeGenerator::EmitNamedLoad(Handle<String> name, bool is_contextual) {
   if (is_contextual || scope()->is_global_scope() || loop_nesting() == 0) {
@@ -6277,11 +6333,88 @@
 
 void CodeGenerator::EmitNamedStore(Handle<String> name, bool is_contextual) {
 #ifdef DEBUG
-  int expected_height = frame_->height() - (is_contextual ? 1 : 2);
+  int expected_height = frame()->height() - (is_contextual ? 1 : 2);
 #endif
-  frame_->CallStoreIC(name, is_contextual);
 
-  ASSERT_EQ(expected_height, frame_->height());
+  Result result;
+  if (is_contextual || scope()->is_global_scope() || loop_nesting() == 0) {
+    frame()->CallStoreIC(name, is_contextual);
+  } else {
+    // Inline the in-object property case.
+    JumpTarget slow, done;
+
+    // Get the value and receiver from the stack.
+    frame()->PopToR0();
+    Register value = r0;
+    frame()->PopToR1();
+    Register receiver = r1;
+
+    DeferredReferenceSetNamedValue* deferred =
+        new DeferredReferenceSetNamedValue(value, receiver, name);
+
+    // Check that the receiver is a heap object.
+    __ tst(receiver, Operand(kSmiTagMask));
+    deferred->Branch(eq);
+
+    // The following instructions are the part of the inlined
+    // in-object property store code which can be patched. Therefore
+    // the exact number of instructions generated must be fixed, so
+    // the constant pool is blocked while generating this code.
+    { Assembler::BlockConstPoolScope block_const_pool(masm_);
+      Register scratch0 = VirtualFrame::scratch0();
+      Register scratch1 = VirtualFrame::scratch1();
+
+      // Check the map. Initially use an invalid map to force a
+      // failure. The map check will be patched in the runtime system.
+      __ ldr(scratch1, FieldMemOperand(receiver, HeapObject::kMapOffset));
+
+#ifdef DEBUG
+      Label check_inlined_codesize;
+      masm_->bind(&check_inlined_codesize);
+#endif
+      __ mov(scratch0, Operand(Factory::null_value()));
+      __ cmp(scratch0, scratch1);
+      deferred->Branch(ne);
+
+      int offset = 0;
+      __ str(value, MemOperand(receiver, offset));
+
+      // Update the write barrier and record its size. We do not use
+      // the RecordWrite macro here because we want the offset
+      // addition instruction first to make it easy to patch.
+      Label record_write_start, record_write_done;
+      __ bind(&record_write_start);
+      // Add offset into the object.
+      __ add(scratch0, receiver, Operand(offset));
+      // Test that the object is not in the new space.  We cannot set
+      // region marks for new space pages.
+      __ InNewSpace(receiver, scratch1, eq, &record_write_done);
+      // Record the actual write.
+      __ RecordWriteHelper(receiver, scratch0, scratch1);
+      __ bind(&record_write_done);
+      // Clobber all input registers when running with the debug-code flag
+      // turned on to provoke errors.
+      if (FLAG_debug_code) {
+        __ mov(receiver, Operand(BitCast<int32_t>(kZapValue)));
+        __ mov(scratch0, Operand(BitCast<int32_t>(kZapValue)));
+        __ mov(scratch1, Operand(BitCast<int32_t>(kZapValue)));
+      }
+      // Check that this is the first inlined write barrier or that
+      // this inlined write barrier has the same size as all the other
+      // inlined write barriers.
+      ASSERT((inlined_write_barrier_size_ == -1) ||
+             (inlined_write_barrier_size_ ==
+              masm()->InstructionsGeneratedSince(&record_write_start)));
+      inlined_write_barrier_size_ =
+          masm()->InstructionsGeneratedSince(&record_write_start);
+
+      // Make sure that the expected number of instructions are generated.
+      ASSERT_EQ(GetInlinedNamedStoreInstructionsAfterPatch(),
+                masm()->InstructionsGeneratedSince(&check_inlined_codesize));
+    }
+    deferred->BindExit();
+  }
+  ASSERT_EQ(expected_height, frame()->height());
 }
 
 
@@ -6848,7 +6981,7 @@
   // Move sign bit from source to destination.  This works because the sign bit
   // in the exponent word of the double has the same position and polarity as
   // the 2's complement sign bit in a Smi.
-  ASSERT(HeapNumber::kSignMask == 0x80000000u);
+  STATIC_ASSERT(HeapNumber::kSignMask == 0x80000000u);
   __ and_(exponent, source_, Operand(HeapNumber::kSignMask), SetCC);
   // Subtract from 0 if source was negative.
   __ rsb(source_, source_, Operand(0), LeaveCC, ne);
@@ -6901,7 +7034,7 @@
   // the_int_ has the answer which is a signed int32 but not a Smi.
   // We test for the special value that has a different exponent.  This test
   // has the neat side effect of setting the flags according to the sign.
-  ASSERT(HeapNumber::kSignMask == 0x80000000u);
+  STATIC_ASSERT(HeapNumber::kSignMask == 0x80000000u);
   __ cmp(the_int_, Operand(0x80000000u));
   __ b(eq, &max_negative_int);
   // Set up the correct exponent in scratch_.  All non-Smi int32s have the same.
@@ -7246,7 +7379,7 @@
     // If either operand is a JSObject or an oddball value, then they are
     // not equal since their pointers are different.
     // There is no test for undetectability in strict equality.
-    ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
+    STATIC_ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
     Label first_non_object;
     // Get the type of the first operand into r2 and compare it with
     // FIRST_JS_OBJECT_TYPE.
@@ -7272,8 +7405,8 @@
 
     // Now that we have the types we might as well check for symbol-symbol.
     // Ensure that no non-strings have the symbol bit set.
-    ASSERT(kNotStringTag + kIsSymbolMask > LAST_TYPE);
-    ASSERT(kSymbolTag != 0);
+    STATIC_ASSERT(LAST_TYPE < kNotStringTag + kIsSymbolMask);
+    STATIC_ASSERT(kSymbolTag != 0);
     __ and_(r2, r2, Operand(r3));
     __ tst(r2, Operand(kIsSymbolMask));
     __ b(ne, &return_not_equal);
@@ -7324,7 +7457,7 @@
   // r2 is object type of rhs.
   // Ensure that no non-strings have the symbol bit set.
   Label object_test;
-  ASSERT(kSymbolTag != 0);
+  STATIC_ASSERT(kSymbolTag != 0);
   __ tst(r2, Operand(kIsNotStringMask));
   __ b(ne, &object_test);
   __ tst(r2, Operand(kIsSymbolMask));
@@ -7395,7 +7528,7 @@
                   not_found,
                   true);
 
-      ASSERT_EQ(8, kDoubleSize);
+      STATIC_ASSERT(8 == kDoubleSize);
       __ add(scratch1,
              object,
              Operand(HeapNumber::kValueOffset - kHeapObjectTag));
@@ -7494,7 +7627,7 @@
 
   // If either is a Smi (we know that not both are), then they can only
   // be strictly equal if the other is a HeapNumber.
-  ASSERT_EQ(0, kSmiTag);
+  STATIC_ASSERT(kSmiTag == 0);
   ASSERT_EQ(0, Smi::FromInt(0));
   __ and_(r2, lhs_, Operand(rhs_));
   __ tst(r2, Operand(kSmiTagMask));
@@ -8497,7 +8630,7 @@
       Label not_smi;
       // Fast path.
       if (ShouldGenerateSmiCode()) {
-        ASSERT(kSmiTag == 0);  // Adjust code below.
+        STATIC_ASSERT(kSmiTag == 0);  // Adjust code below.
         __ tst(smi_test_reg, Operand(kSmiTagMask));
         __ b(ne, &not_smi);
         __ add(r0, r1, Operand(r0), SetCC);  // Add y optimistically.
@@ -8513,7 +8646,7 @@
       Label not_smi;
       // Fast path.
       if (ShouldGenerateSmiCode()) {
-        ASSERT(kSmiTag == 0);  // Adjust code below.
+        STATIC_ASSERT(kSmiTag == 0);  // Adjust code below.
         __ tst(smi_test_reg, Operand(kSmiTagMask));
         __ b(ne, &not_smi);
         if (lhs.is(r1)) {
@@ -8535,7 +8668,7 @@
     case Token::MUL: {
       Label not_smi, slow;
       if (ShouldGenerateSmiCode()) {
-        ASSERT(kSmiTag == 0);  // adjust code below
+        STATIC_ASSERT(kSmiTag == 0);  // adjust code below
         __ tst(smi_test_reg, Operand(kSmiTagMask));
         Register scratch2 = smi_test_reg;
         smi_test_reg = no_reg;
@@ -8671,7 +8804,7 @@
         Label slow;
         Label not_power_of_2;
         ASSERT(!ShouldGenerateSmiCode());
-        ASSERT(kSmiTag == 0);  // Adjust code below.
+        STATIC_ASSERT(kSmiTag == 0);  // Adjust code below.
         // Check for two positive smis.
         __ orr(smi_test_reg, lhs, Operand(rhs));
         __ tst(smi_test_reg, Operand(0x80000000u | kSmiTagMask));
@@ -8731,7 +8864,7 @@
     case Token::SHR:
     case Token::SHL: {
       Label slow;
-      ASSERT(kSmiTag == 0);  // adjust code below
+      STATIC_ASSERT(kSmiTag == 0);  // adjust code below
       __ tst(smi_test_reg, Operand(kSmiTagMask));
       __ b(ne, &slow);
       Register scratch2 = smi_test_reg;
@@ -9045,17 +9178,17 @@
   // r0 holds the exception.
 
   // Adjust this code if not the case.
-  ASSERT(StackHandlerConstants::kSize == 4 * kPointerSize);
+  STATIC_ASSERT(StackHandlerConstants::kSize == 4 * kPointerSize);
 
   // Drop the sp to the top of the handler.
   __ mov(r3, Operand(ExternalReference(Top::k_handler_address)));
   __ ldr(sp, MemOperand(r3));
 
   // Restore the next handler and frame pointer, discard handler state.
-  ASSERT(StackHandlerConstants::kNextOffset == 0);
+  STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
   __ pop(r2);
   __ str(r2, MemOperand(r3));
-  ASSERT(StackHandlerConstants::kFPOffset == 2 * kPointerSize);
+  STATIC_ASSERT(StackHandlerConstants::kFPOffset == 2 * kPointerSize);
   __ ldm(ia_w, sp, r3.bit() | fp.bit());  // r3: discarded state.
 
   // Before returning we restore the context from the frame pointer if
@@ -9071,7 +9204,7 @@
     __ mov(lr, Operand(pc));
   }
 #endif
-  ASSERT(StackHandlerConstants::kPCOffset == 3 * kPointerSize);
+  STATIC_ASSERT(StackHandlerConstants::kPCOffset == 3 * kPointerSize);
   __ pop(pc);
 }
 
@@ -9079,7 +9212,7 @@
 void CEntryStub::GenerateThrowUncatchable(MacroAssembler* masm,
                                           UncatchableExceptionType type) {
   // Adjust this code if not the case.
-  ASSERT(StackHandlerConstants::kSize == 4 * kPointerSize);
+  STATIC_ASSERT(StackHandlerConstants::kSize == 4 * kPointerSize);
 
   // Drop sp to the top stack handler.
   __ mov(r3, Operand(ExternalReference(Top::k_handler_address)));
@@ -9100,7 +9233,7 @@
   __ bind(&done);
 
   // Set the top handler address to next handler past the current ENTRY handler.
-  ASSERT(StackHandlerConstants::kNextOffset == 0);
+  STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
   __ pop(r2);
   __ str(r2, MemOperand(r3));
 
@@ -9124,7 +9257,7 @@
   //         lr
 
   // Discard handler state (r2 is not used) and restore frame pointer.
-  ASSERT(StackHandlerConstants::kFPOffset == 2 * kPointerSize);
+  STATIC_ASSERT(StackHandlerConstants::kFPOffset == 2 * kPointerSize);
   __ ldm(ia_w, sp, r2.bit() | fp.bit());  // r2: discarded state.
   // Before returning we restore the context from the frame pointer if
   // not NULL.  The frame pointer is NULL in the exception handler of a
@@ -9139,7 +9272,7 @@
     __ mov(lr, Operand(pc));
   }
 #endif
-  ASSERT(StackHandlerConstants::kPCOffset == 3 * kPointerSize);
+  STATIC_ASSERT(StackHandlerConstants::kPCOffset == 3 * kPointerSize);
   __ pop(pc);
 }
 
@@ -9234,7 +9367,7 @@
 
   // check for failure result
   Label failure_returned;
-  ASSERT(((kFailureTag + 1) & kFailureTagMask) == 0);
+  STATIC_ASSERT(((kFailureTag + 1) & kFailureTagMask) == 0);
   // Lower 2 bits of r2 are 0 iff r0 has failure tag.
   __ add(r2, r0, Operand(1));
   __ tst(r2, Operand(kFailureTagMask));
@@ -9249,7 +9382,7 @@
   // check if we should retry or throw exception
   Label retry;
   __ bind(&failure_returned);
-  ASSERT(Failure::RETRY_AFTER_GC == 0);
+  STATIC_ASSERT(Failure::RETRY_AFTER_GC == 0);
   __ tst(r0, Operand(((1 << kFailureTypeTagSize) - 1) << kFailureTagSize));
   __ b(eq, &retry);
 
@@ -9652,12 +9785,12 @@
   }
 
   // Setup the callee in-object property.
-  ASSERT(Heap::arguments_callee_index == 0);
+  STATIC_ASSERT(Heap::arguments_callee_index == 0);
   __ ldr(r3, MemOperand(sp, 2 * kPointerSize));
   __ str(r3, FieldMemOperand(r0, JSObject::kHeaderSize));
 
   // Get the length (smi tagged) and set that as an in-object property too.
-  ASSERT(Heap::arguments_length_index == 1);
+  STATIC_ASSERT(Heap::arguments_length_index == 1);
   __ ldr(r1, MemOperand(sp, 0 * kPointerSize));
   __ str(r1, FieldMemOperand(r0, JSObject::kHeaderSize + kPointerSize));
 
@@ -9749,7 +9882,7 @@
 
   // Check that the first argument is a JSRegExp object.
   __ ldr(r0, MemOperand(sp, kJSRegExpOffset));
-  ASSERT_EQ(0, kSmiTag);
+  STATIC_ASSERT(kSmiTag == 0);
   __ tst(r0, Operand(kSmiTagMask));
   __ b(eq, &runtime);
   __ CompareObjectType(r0, r1, r1, JS_REGEXP_TYPE);
@@ -9776,8 +9909,8 @@
          FieldMemOperand(regexp_data, JSRegExp::kIrregexpCaptureCountOffset));
   // Calculate number of capture registers (number_of_captures + 1) * 2. This
   // uses the asumption that smis are 2 * their untagged value.
-  ASSERT_EQ(0, kSmiTag);
-  ASSERT_EQ(1, kSmiTagSize + kSmiShiftSize);
+  STATIC_ASSERT(kSmiTag == 0);
+  STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
   __ add(r2, r2, Operand(2));  // r2 was a smi.
   // Check that the static offsets vector buffer is large enough.
   __ cmp(r2, Operand(OffsetsVector::kStaticOffsetsVectorSize));
@@ -9838,7 +9971,7 @@
   __ ldrb(r0, FieldMemOperand(r0, Map::kInstanceTypeOffset));
   // First check for flat string.
   __ tst(r0, Operand(kIsNotStringMask | kStringRepresentationMask));
-  ASSERT_EQ(0, kStringTag | kSeqStringTag);
+  STATIC_ASSERT((kStringTag | kSeqStringTag) == 0);
   __ b(eq, &seq_string);
 
   // subject: Subject string
@@ -9848,8 +9981,8 @@
   // string. In that case the subject string is just the first part of the cons
   // string. Also in this case the first part of the cons string is known to be
   // a sequential string or an external string.
-  ASSERT(kExternalStringTag !=0);
-  ASSERT_EQ(0, kConsStringTag & kExternalStringTag);
+  STATIC_ASSERT(kExternalStringTag !=0);
+  STATIC_ASSERT((kConsStringTag & kExternalStringTag) == 0);
   __ tst(r0, Operand(kIsNotStringMask | kExternalStringTag));
   __ b(ne, &runtime);
   __ ldr(r0, FieldMemOperand(subject, ConsString::kSecondOffset));
@@ -9860,7 +9993,7 @@
   __ ldr(r0, FieldMemOperand(subject, HeapObject::kMapOffset));
   __ ldrb(r0, FieldMemOperand(r0, Map::kInstanceTypeOffset));
   // Is first part a flat string?
-  ASSERT_EQ(0, kSeqStringTag);
+  STATIC_ASSERT(kSeqStringTag == 0);
   __ tst(r0, Operand(kStringRepresentationMask));
   __ b(nz, &runtime);
 
@@ -9868,8 +10001,8 @@
   // subject: Subject string
   // regexp_data: RegExp data (FixedArray)
   // r0: Instance type of subject string
-  ASSERT_EQ(4, kAsciiStringTag);
-  ASSERT_EQ(0, kTwoByteStringTag);
+  STATIC_ASSERT(4 == kAsciiStringTag);
+  STATIC_ASSERT(kTwoByteStringTag == 0);
   // Find the code object based on the assumptions above.
   __ and_(r0, r0, Operand(kStringEncodingMask));
   __ mov(r3, Operand(r0, ASR, 2), SetCC);
@@ -9923,7 +10056,7 @@
   // calculate the shift of the index (0 for ASCII and 1 for two byte).
   __ ldr(r0, FieldMemOperand(subject, String::kLengthOffset));
   __ mov(r0, Operand(r0, ASR, kSmiTagSize));
-  ASSERT_EQ(SeqAsciiString::kHeaderSize, SeqTwoByteString::kHeaderSize);
+  STATIC_ASSERT(SeqAsciiString::kHeaderSize == SeqTwoByteString::kHeaderSize);
   __ add(r9, subject, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
   __ eor(r3, r3, Operand(1));
   // Argument 4 (r3): End of string data
@@ -9978,8 +10111,8 @@
   __ ldr(r1,
          FieldMemOperand(regexp_data, JSRegExp::kIrregexpCaptureCountOffset));
   // Calculate number of capture registers (number_of_captures + 1) * 2.
-  ASSERT_EQ(0, kSmiTag);
-  ASSERT_EQ(1, kSmiTagSize + kSmiShiftSize);
+  STATIC_ASSERT(kSmiTag == 0);
+  STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
   __ add(r1, r1, Operand(2));  // r1 was a smi.
 
   // r1: number of capture registers
@@ -10191,7 +10324,7 @@
   __ b(ls, index_out_of_range_);
 
   // We need special handling for non-flat strings.
-  ASSERT(kSeqStringTag == 0);
+  STATIC_ASSERT(kSeqStringTag == 0);
   __ tst(result_, Operand(kStringRepresentationMask));
   __ b(eq, &flat_string);
 
@@ -10213,13 +10346,13 @@
   __ ldr(result_, FieldMemOperand(object_, HeapObject::kMapOffset));
   __ ldrb(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset));
   // If the first cons component is also non-flat, then go to runtime.
-  ASSERT(kSeqStringTag == 0);
+  STATIC_ASSERT(kSeqStringTag == 0);
   __ tst(result_, Operand(kStringRepresentationMask));
   __ b(nz, &call_runtime_);
 
   // Check for 1-byte or 2-byte string.
   __ bind(&flat_string);
-  ASSERT(kAsciiStringTag != 0);
+  STATIC_ASSERT(kAsciiStringTag != 0);
   __ tst(result_, Operand(kStringEncodingMask));
   __ b(nz, &ascii_string);
 
@@ -10227,7 +10360,7 @@
   // Load the 2-byte character code into the result register. We can
   // add without shifting since the smi tag size is the log2 of the
   // number of bytes in a two-byte character.
-  ASSERT(kSmiTag == 0 && kSmiTagSize == 1 && kSmiShiftSize == 0);
+  STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize == 1 && kSmiShiftSize == 0);
   __ add(scratch_, object_, Operand(scratch_));
   __ ldrh(result_, FieldMemOperand(scratch_, SeqTwoByteString::kHeaderSize));
   __ jmp(&got_char_code);
@@ -10304,8 +10437,8 @@
 
 void StringCharFromCodeGenerator::GenerateFast(MacroAssembler* masm) {
   // Fast case of Heap::LookupSingleCharacterStringFromCode.
-  ASSERT(kSmiTag == 0);
-  ASSERT(kSmiShiftSize == 0);
+  STATIC_ASSERT(kSmiTag == 0);
+  STATIC_ASSERT(kSmiShiftSize == 0);
   ASSERT(IsPowerOf2(String::kMaxAsciiCharCode + 1));
   __ tst(code_,
          Operand(kSmiTagMask |
@@ -10314,7 +10447,7 @@
 
   __ LoadRoot(result_, Heap::kSingleCharacterStringCacheRootIndex);
   // At this point code register contains smi tagged ascii char code.
-  ASSERT(kSmiTag == 0);
+  STATIC_ASSERT(kSmiTag == 0);
   __ add(result_, result_, Operand(code_, LSL, kPointerSizeLog2 - kSmiTagSize));
   __ ldr(result_, FieldMemOperand(result_, FixedArray::kHeaderSize));
   __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
@@ -10419,7 +10552,7 @@
   // Ensure that reading an entire aligned word containing the last character
   // of a string will not read outside the allocated area (because we pad up
   // to kObjectAlignment).
-  ASSERT(kObjectAlignment >= kReadAlignment);
+  STATIC_ASSERT(kObjectAlignment >= kReadAlignment);
   // Assumes word reads and writes are little endian.
   // Nothing to do for zero characters.
   Label done;
@@ -10623,7 +10756,7 @@
     __ and_(candidate, candidate, Operand(mask));
 
     // Load the entry from the symble table.
-    ASSERT_EQ(1, SymbolTable::kEntrySize);
+    STATIC_ASSERT(SymbolTable::kEntrySize == 1);
     __ ldr(candidate,
            MemOperand(first_symbol_table_element,
                       candidate,
@@ -10723,8 +10856,8 @@
   // Check bounds and smi-ness.
   __ ldr(r7, MemOperand(sp, kToOffset));
   __ ldr(r6, MemOperand(sp, kFromOffset));
-  ASSERT_EQ(0, kSmiTag);
-  ASSERT_EQ(1, kSmiTagSize + kSmiShiftSize);
+  STATIC_ASSERT(kSmiTag == 0);
+  STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
   // I.e., arithmetic shift right by one un-smi-tags.
   __ mov(r2, Operand(r7, ASR, 1), SetCC);
   __ mov(r3, Operand(r6, ASR, 1), SetCC, cc);
@@ -10747,7 +10880,7 @@
 
   // Make sure first argument is a sequential (or flat) string.
   __ ldr(r5, MemOperand(sp, kStringOffset));
-  ASSERT_EQ(0, kSmiTag);
+  STATIC_ASSERT(kSmiTag == 0);
   __ tst(r5, Operand(kSmiTagMask));
   __ b(eq, &runtime);
   Condition is_string = masm->IsObjectStringType(r5, r1);
@@ -10761,8 +10894,8 @@
   // r7: to (smi)
   Label seq_string;
   __ and_(r4, r1, Operand(kStringRepresentationMask));
-  ASSERT(kSeqStringTag < kConsStringTag);
-  ASSERT(kExternalStringTag > kConsStringTag);
+  STATIC_ASSERT(kSeqStringTag < kConsStringTag);
+  STATIC_ASSERT(kConsStringTag < kExternalStringTag);
   __ cmp(r4, Operand(kConsStringTag));
   __ b(gt, &runtime);  // External strings go to runtime.
   __ b(lt, &seq_string);  // Sequential strings are handled directly.
@@ -10774,7 +10907,7 @@
   __ ldr(r4, FieldMemOperand(r5, HeapObject::kMapOffset));
   __ ldrb(r1, FieldMemOperand(r4, Map::kInstanceTypeOffset));
   __ tst(r1, Operand(kStringRepresentationMask));
-  ASSERT_EQ(0, kSeqStringTag);
+  STATIC_ASSERT(kSeqStringTag == 0);
   __ b(ne, &runtime);  // Cons and External strings go to runtime.
 
   // Definitly a sequential string.
@@ -10798,7 +10931,7 @@
   // Check for flat ascii string.
   Label non_ascii_flat;
   __ tst(r1, Operand(kStringEncodingMask));
-  ASSERT_EQ(0, kTwoByteStringTag);
+  STATIC_ASSERT(kTwoByteStringTag == 0);
   __ b(eq, &non_ascii_flat);
 
   Label result_longer_than_two;
@@ -10847,7 +10980,7 @@
   // r1: first character of result string.
   // r2: result string length.
   // r5: first character of sub string to copy.
-  ASSERT_EQ(0, SeqAsciiString::kHeaderSize & kObjectAlignmentMask);
+  STATIC_ASSERT((SeqAsciiString::kHeaderSize & kObjectAlignmentMask) == 0);
   StringHelper::GenerateCopyCharactersLong(masm, r1, r5, r2, r3, r4, r6, r7, r9,
                                            COPY_ASCII | DEST_ALWAYS_ALIGNED);
   __ IncrementCounter(&Counters::sub_string_native, 1, r3, r4);
@@ -10878,7 +11011,7 @@
   // r1: first character of result.
   // r2: result length.
   // r5: first character of string to copy.
-  ASSERT_EQ(0, SeqTwoByteString::kHeaderSize & kObjectAlignmentMask);
+  STATIC_ASSERT((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0);
   StringHelper::GenerateCopyCharactersLong(masm, r1, r5, r2, r3, r4, r6, r7, r9,
                                            DEST_ALWAYS_ALIGNED);
   __ IncrementCounter(&Counters::sub_string_native, 1, r3, r4);
@@ -10906,7 +11039,7 @@
   Register length_delta = scratch3;
   __ mov(scratch1, scratch2, LeaveCC, gt);
   Register min_length = scratch1;
-  ASSERT(kSmiTag == 0);
+  STATIC_ASSERT(kSmiTag == 0);
   __ tst(min_length, Operand(min_length));
   __ b(eq, &compare_lengths);
 
@@ -10962,8 +11095,8 @@
   Label not_same;
   __ cmp(r0, r1);
   __ b(ne, &not_same);
-  ASSERT_EQ(0, EQUAL);
-  ASSERT_EQ(0, kSmiTag);
+  STATIC_ASSERT(EQUAL == 0);
+  STATIC_ASSERT(kSmiTag == 0);
   __ mov(r0, Operand(Smi::FromInt(EQUAL)));
   __ IncrementCounter(&Counters::string_compare_native, 1, r1, r2);
   __ add(sp, sp, Operand(2 * kPointerSize));
@@ -10998,14 +11131,14 @@
 
   // Make sure that both arguments are strings if not known in advance.
   if (string_check_) {
-    ASSERT_EQ(0, kSmiTag);
+    STATIC_ASSERT(kSmiTag == 0);
     __ JumpIfEitherSmi(r0, r1, &string_add_runtime);
     // Load instance types.
     __ ldr(r4, FieldMemOperand(r0, HeapObject::kMapOffset));
     __ ldr(r5, FieldMemOperand(r1, HeapObject::kMapOffset));
     __ ldrb(r4, FieldMemOperand(r4, Map::kInstanceTypeOffset));
     __ ldrb(r5, FieldMemOperand(r5, Map::kInstanceTypeOffset));
-    ASSERT_EQ(0, kStringTag);
+    STATIC_ASSERT(kStringTag == 0);
     // If either is not a string, go to runtime.
     __ tst(r4, Operand(kIsNotStringMask));
     __ tst(r5, Operand(kIsNotStringMask), eq);
@@ -11022,10 +11155,10 @@
     // Check if either of the strings are empty. In that case return the other.
     __ ldr(r2, FieldMemOperand(r0, String::kLengthOffset));
     __ ldr(r3, FieldMemOperand(r1, String::kLengthOffset));
-    ASSERT(kSmiTag == 0);
+    STATIC_ASSERT(kSmiTag == 0);
     __ cmp(r2, Operand(Smi::FromInt(0)));  // Test if first string is empty.
     __ mov(r0, Operand(r1), LeaveCC, eq);  // If first is empty, return second.
-    ASSERT(kSmiTag == 0);
+    STATIC_ASSERT(kSmiTag == 0);
      // Else test if second string is empty.
     __ cmp(r3, Operand(Smi::FromInt(0)), ne);
     __ b(ne, &strings_not_empty);  // If either string was empty, return r0.
@@ -11049,7 +11182,7 @@
   // Look at the length of the result of adding the two strings.
   Label string_add_flat_result, longer_than_two;
   // Adding two lengths can't overflow.
-  ASSERT(String::kMaxLength * 2 > String::kMaxLength);
+  STATIC_ASSERT(String::kMaxLength < String::kMaxLength * 2);
   __ add(r6, r2, Operand(r3));
   // Use the runtime system when adding two one character strings, as it
   // contains optimizations for this specific case using the symbol table.
@@ -11097,7 +11230,7 @@
   __ cmp(r6, Operand(String::kMinNonFlatLength));
   __ b(lt, &string_add_flat_result);
   // Handle exceptionally long strings in the runtime system.
-  ASSERT((String::kMaxLength & 0x80000000) == 0);
+  STATIC_ASSERT((String::kMaxLength & 0x80000000) == 0);
   ASSERT(IsPowerOf2(String::kMaxLength + 1));
   // kMaxLength + 1 is representable as shifted literal, kMaxLength is not.
   __ cmp(r6, Operand(String::kMaxLength + 1));
@@ -11112,7 +11245,7 @@
     __ ldrb(r5, FieldMemOperand(r5, Map::kInstanceTypeOffset));
   }
   Label non_ascii, allocated, ascii_data;
-  ASSERT_EQ(0, kTwoByteStringTag);
+  STATIC_ASSERT(kTwoByteStringTag == 0);
   __ tst(r4, Operand(kStringEncodingMask));
   __ tst(r5, Operand(kStringEncodingMask), ne);
   __ b(eq, &non_ascii);
@@ -11138,7 +11271,7 @@
   __ tst(r5, Operand(kAsciiDataHintMask), ne);
   __ b(ne, &ascii_data);
   __ eor(r4, r4, Operand(r5));
-  ASSERT(kAsciiStringTag != 0 && kAsciiDataHintTag != 0);
+  STATIC_ASSERT(kAsciiStringTag != 0 && kAsciiDataHintTag != 0);
   __ and_(r4, r4, Operand(kAsciiStringTag | kAsciiDataHintTag));
   __ cmp(r4, Operand(kAsciiStringTag | kAsciiDataHintTag));
   __ b(eq, &ascii_data);
@@ -11164,7 +11297,7 @@
     __ ldrb(r5, FieldMemOperand(r5, Map::kInstanceTypeOffset));
   }
   // Check that both strings are sequential.
-  ASSERT_EQ(0, kSeqStringTag);
+  STATIC_ASSERT(kSeqStringTag == 0);
   __ tst(r4, Operand(kStringRepresentationMask));
   __ tst(r5, Operand(kStringRepresentationMask), eq);
   __ b(ne, &string_add_runtime);