Version 1.3.2.

Started new compiler infrastructure for two-pass compilation using a control flow graph constructed from the AST.

Profiler stack sampling for X64.

Safe handling of NaN to Posix platform-dependent time functions.

Added a new profiler control API to unify controlling various aspects for profiling.

Fixed issue 392.



git-svn-id: http://v8.googlecode.com/svn/trunk@2624 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
diff --git a/src/ia32/macro-assembler-ia32.cc b/src/ia32/macro-assembler-ia32.cc
index fae1525..de0ef8e 100644
--- a/src/ia32/macro-assembler-ia32.cc
+++ b/src/ia32/macro-assembler-ia32.cc
@@ -146,43 +146,30 @@
   // for the remembered set bits.
   Label done;
 
-  // This optimization cannot survive serialization and deserialization,
-  // so we disable as long as serialization can take place.
-  int32_t new_space_start =
-      reinterpret_cast<int32_t>(ExternalReference::new_space_start().address());
-  if (Serializer::enabled() || new_space_start < 0) {
-    // Cannot do smart bit-twiddling. Need to do two consecutive checks.
-    // Check for Smi first.
-    test(value, Immediate(kSmiTagMask));
-    j(zero, &done);
-    // Test that the object address is not in the new space.  We cannot
-    // set remembered set bits in the new space.
+  // Skip barrier if writing a smi.
+  ASSERT_EQ(0, kSmiTag);
+  test(value, Immediate(kSmiTagMask));
+  j(zero, &done);
+
+  if (Serializer::enabled()) {
+    // Can't do arithmetic on external references if it might get serialized.
     mov(value, Operand(object));
     and_(value, Heap::NewSpaceMask());
     cmp(Operand(value), Immediate(ExternalReference::new_space_start()));
     j(equal, &done);
   } else {
-    // move the value SmiTag into the sign bit
-    shl(value, 31);
-    // combine the object with value SmiTag
-    or_(value, Operand(object));
-    // remove the uninteresing bits inside the page
-    and_(value, Heap::NewSpaceMask() | (1 << 31));
-    // xor has two effects:
-    // - if the value was a smi, then the result will be negative
-    // - if the object is pointing into new space area the page bits will
-    //   all be zero
-    xor_(value, new_space_start | (1 << 31));
-    // Check for both conditions in one branch
-    j(less_equal, &done);
+    int32_t new_space_start = reinterpret_cast<int32_t>(
+        ExternalReference::new_space_start().address());
+    lea(value, Operand(object, -new_space_start));
+    and_(value, Heap::NewSpaceMask());
+    j(equal, &done);
   }
 
   if ((offset > 0) && (offset < Page::kMaxHeapObjectSize)) {
     // Compute the bit offset in the remembered set, leave it in 'value'.
-    mov(value, Operand(object));
+    lea(value, Operand(object, offset));
     and_(value, Page::kPageAlignmentMask);
-    add(Operand(value), Immediate(offset));
-    shr(value, kObjectAlignmentBits);
+    shr(value, kPointerSizeLog2);
 
     // Compute the page address from the heap object pointer, leave it in
     // 'object'.
@@ -192,7 +179,7 @@
     // to limit code size. We should probably evaluate this decision by
     // measuring the performance of an equivalent implementation using
     // "simpler" instructions
-    bts(Operand(object, 0), value);
+    bts(Operand(object, Page::kRSetOffset), value);
   } else {
     Register dst = scratch;
     if (offset != 0) {
@@ -201,7 +188,9 @@
       // array access: calculate the destination address in the same manner as
       // KeyedStoreIC::GenerateGeneric.  Multiply a smi by 2 to get an offset
       // into an array of words.
-      lea(dst, Operand(object, dst, times_2,
+      ASSERT_EQ(1, kSmiTagSize);
+      ASSERT_EQ(0, kSmiTag);
+      lea(dst, Operand(object, dst, times_half_pointer_size,
                        FixedArray::kHeaderSize - kHeapObjectTag));
     }
     // If we are already generating a shared stub, not inlining the