Improved performance of Array.prototype.concat by moving the implementation to C++ (issue 123).

Fixed heap growth policy to avoid growing old space to its maximum capacity before doing a garbage collection and fixed issue that would lead to artificial out of memory situations (issue 129).

Fixed Date.prototype.toLocaleDateString to return the date in the same format as WebKit.

Added missing initialization checks to debugger API.

Added removing of unused maps during GC.


git-svn-id: http://v8.googlecode.com/svn/trunk@655 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
diff --git a/src/heap.cc b/src/heap.cc
index 14c1940..bb30deb 100644
--- a/src/heap.cc
+++ b/src/heap.cc
@@ -64,7 +64,12 @@
 MapSpace* Heap::map_space_ = NULL;
 LargeObjectSpace* Heap::lo_space_ = NULL;
 
-int Heap::promoted_space_limit_ = 0;
+static const int kMinimumPromotionLimit = 2*MB;
+static const int kMinimumAllocationLimit = 8*MB;
+
+int Heap::old_gen_promotion_limit_ = kMinimumPromotionLimit;
+int Heap::old_gen_allocation_limit_ = kMinimumAllocationLimit;
+
 int Heap::old_gen_exhausted_ = false;
 
 int Heap::amount_of_external_allocated_memory_ = 0;
@@ -91,6 +96,8 @@
 int Heap::mc_count_ = 0;
 int Heap::gc_count_ = 0;
 
+int Heap::always_allocate_scope_depth_ = 0;
+
 #ifdef DEBUG
 bool Heap::allocation_allowed_ = true;
 
@@ -138,8 +145,7 @@
   }
 
   // Is enough data promoted to justify a global GC?
-  if (PromotedSpaceSize() + PromotedExternalMemorySize()
-      > promoted_space_limit_) {
+  if (OldGenerationPromotionLimitReached()) {
     Counters::gc_compactor_caused_by_promoted_data.Increment();
     return MARK_COMPACTOR;
   }
@@ -360,9 +366,11 @@
   if (collector == MARK_COMPACTOR) {
     MarkCompact(tracer);
 
-    int promoted_space_size = PromotedSpaceSize();
-    promoted_space_limit_ =
-        promoted_space_size + Max(2 * MB, (promoted_space_size/100) * 35);
+    int old_gen_size = PromotedSpaceSize();
+    old_gen_promotion_limit_ =
+        old_gen_size + Max(kMinimumPromotionLimit, old_gen_size / 3);
+    old_gen_allocation_limit_ =
+        old_gen_size + Max(kMinimumAllocationLimit, old_gen_size / 3);
     old_gen_exhausted_ = false;
 
     // If we have used the mark-compact collector to collect the new
@@ -1019,7 +1027,7 @@
   // spaces.
   STATIC_ASSERT(HeapNumber::kSize <= Page::kMaxHeapObjectSize);
   AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
-  Object* result = AllocateRaw(HeapNumber::kSize, space);
+  Object* result = AllocateRaw(HeapNumber::kSize, space, OLD_DATA_SPACE);
   if (result->IsFailure()) return result;
 
   HeapObject::cast(result)->set_map(heap_number_map());
@@ -1029,6 +1037,8 @@
 
 
 Object* Heap::AllocateHeapNumber(double value) {
+  // Use general version, if we're forced to always allocate.
+  if (always_allocate()) return AllocateHeapNumber(value, NOT_TENURED);
   // This version of AllocateHeapNumber is optimized for
   // allocation in new space.
   STATIC_ASSERT(HeapNumber::kSize <= Page::kMaxHeapObjectSize);
@@ -1525,7 +1535,7 @@
   AllocationSpace space =
       size > MaxHeapObjectSize() ? LO_SPACE : NEW_SPACE;
 
-  Object* result = AllocateRaw(size, space);
+  Object* result = AllocateRaw(size, space, OLD_DATA_SPACE);
 
   if (result->IsFailure()) return result;
 
@@ -1598,7 +1608,9 @@
 Object* Heap::Allocate(Map* map, AllocationSpace space) {
   ASSERT(gc_state_ == NOT_IN_GC);
   ASSERT(map->instance_type() != MAP_TYPE);
-  Object* result = AllocateRaw(map->instance_size(), space);
+  Object* result = AllocateRaw(map->instance_size(),
+                               space,
+                               TargetSpaceId(map->instance_type()));
   if (result->IsFailure()) return result;
   HeapObject::cast(result)->set_map(map);
   return result;
@@ -1658,19 +1670,19 @@
   // Make the clone.
   Map* map = boilerplate->map();
   int object_size = map->instance_size();
-  Object* result = new_space_.AllocateRaw(object_size);
+  Object* result = AllocateRaw(object_size, NEW_SPACE, OLD_POINTER_SPACE);
   if (result->IsFailure()) return result;
-  ASSERT(Heap::InNewSpace(result));
 
-  // Copy the content.
+  // Copy the content. The arguments boilerplate doesn't have any
+  // fields that point to new space so it's safe to skip the write
+  // barrier here.
   CopyBlock(reinterpret_cast<Object**>(HeapObject::cast(result)->address()),
             reinterpret_cast<Object**>(boilerplate->address()),
             object_size);
 
   // Set the two properties.
   JSObject::cast(result)->InObjectPropertyAtPut(arguments_callee_index,
-                                                callee,
-                                                SKIP_WRITE_BARRIER);
+                                                callee);
   JSObject::cast(result)->InObjectPropertyAtPut(arguments_length_index,
                                                 Smi::FromInt(length),
                                                 SKIP_WRITE_BARRIER);
@@ -1778,14 +1790,33 @@
   // Make the clone.
   Map* map = source->map();
   int object_size = map->instance_size();
-  Object* clone = new_space_.AllocateRaw(object_size);
-  if (clone->IsFailure()) return clone;
-  ASSERT(Heap::InNewSpace(clone));
+  Object* clone;
 
-  // Copy the content.
-  CopyBlock(reinterpret_cast<Object**>(HeapObject::cast(clone)->address()),
-            reinterpret_cast<Object**>(source->address()),
-            object_size);
+  // If we're forced to always allocate, we use the general allocation
+  // functions which may leave us with an object in old space.
+  if (always_allocate()) {
+    clone = AllocateRaw(object_size, NEW_SPACE, OLD_POINTER_SPACE);
+    if (clone->IsFailure()) return clone;
+    Address clone_address = HeapObject::cast(clone)->address();
+    CopyBlock(reinterpret_cast<Object**>(clone_address),
+              reinterpret_cast<Object**>(source->address()),
+              object_size);
+    // Update write barrier for all fields that lie beyond the header.
+    for (int offset = JSObject::kHeaderSize;
+         offset < object_size;
+         offset += kPointerSize) {
+      RecordWrite(clone_address, offset);
+    }
+  } else {
+    clone = new_space_.AllocateRaw(object_size);
+    if (clone->IsFailure()) return clone;
+    ASSERT(Heap::InNewSpace(clone));
+    // Since we know the clone is allocated in new space, we can copy
+    // the contents without worring about updating the write barrier.
+    CopyBlock(reinterpret_cast<Object**>(HeapObject::cast(clone)->address()),
+              reinterpret_cast<Object**>(source->address()),
+              object_size);
+  }
 
   FixedArray* elements = FixedArray::cast(source->elements());
   FixedArray* properties = FixedArray::cast(source->properties());
@@ -2007,7 +2038,7 @@
   // Allocate string.
   AllocationSpace space =
       (size > MaxHeapObjectSize()) ? LO_SPACE : OLD_DATA_SPACE;
-  Object* result = AllocateRaw(size, space);
+  Object* result = AllocateRaw(size, space, OLD_DATA_SPACE);
   if (result->IsFailure()) return result;
 
   reinterpret_cast<HeapObject*>(result)->set_map(map);
@@ -2033,7 +2064,7 @@
 
   // Use AllocateRaw rather than Allocate because the object's size cannot be
   // determined from the map.
-  Object* result = AllocateRaw(size, space);
+  Object* result = AllocateRaw(size, space, OLD_DATA_SPACE);
   if (result->IsFailure()) return result;
 
   // Determine the map based on the string's length.
@@ -2063,7 +2094,7 @@
 
   // Use AllocateRaw rather than Allocate because the object's size cannot be
   // determined from the map.
-  Object* result = AllocateRaw(size, space);
+  Object* result = AllocateRaw(size, space, OLD_DATA_SPACE);
   if (result->IsFailure()) return result;
 
   // Determine the map based on the string's length.
@@ -2086,7 +2117,7 @@
 
 Object* Heap::AllocateEmptyFixedArray() {
   int size = FixedArray::SizeFor(0);
-  Object* result = AllocateRaw(size, OLD_DATA_SPACE);
+  Object* result = AllocateRaw(size, OLD_DATA_SPACE, OLD_DATA_SPACE);
   if (result->IsFailure()) return result;
   // Initialize the object.
   reinterpret_cast<Array*>(result)->set_map(fixed_array_map());
@@ -2096,6 +2127,8 @@
 
 
 Object* Heap::AllocateRawFixedArray(int length) {
+  // Use the general function if we're forced to always allocate.
+  if (always_allocate()) return AllocateFixedArray(length, NOT_TENURED);
   // Allocate the raw data for a fixed array.
   int size = FixedArray::SizeFor(length);
   return (size > MaxHeapObjectSize())
@@ -2153,7 +2186,7 @@
   } else {
     AllocationSpace space =
         (pretenure == TENURED) ? OLD_POINTER_SPACE : NEW_SPACE;
-    result = AllocateRaw(size, space);
+    result = AllocateRaw(size, space, OLD_POINTER_SPACE);
   }
   if (result->IsFailure()) return result;
 
@@ -2291,7 +2324,8 @@
   PrintF(">>>>>> =============== %s (%d) =============== >>>>>>\n",
          title, gc_count_);
   PrintF("mark-compact GC : %d\n", mc_count_);
-  PrintF("promoted_space_limit_ %d\n", promoted_space_limit_);
+  PrintF("old_gen_promotion_limit_ %d\n", old_gen_promotion_limit_);
+  PrintF("old_gen_allocation_limit_ %d\n", old_gen_allocation_limit_);
 
   PrintF("\n");
   PrintF("Number of handles : %d\n", HandleScope::NumberOfHandles());