Push version 1.3.17 to trunk.

Added API method to get simple heap statistics.

Improved heap profiler support.

Fixed the implementation of the resource constraint API so it works when using snapshots.

Fixed a number of issues in the Windows 64-bit version.

Optimized calls to API getters.

Added valgrind notification on code modification to the 64-bit version.

Fixed issue where we logged shared library addresses on Windows at startup and never used them.



git-svn-id: http://v8.googlecode.com/svn/trunk@3167 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
diff --git a/src/spaces.cc b/src/spaces.cc
index 43abaa4..c69579a 100644
--- a/src/spaces.cc
+++ b/src/spaces.cc
@@ -982,7 +982,7 @@
   // To support fast containment testing in the new space, the size of
   // this chunk must be a power of two and it must be aligned to its size.
   int initial_semispace_capacity = Heap::InitialSemiSpaceSize();
-  int maximum_semispace_capacity = Heap::SemiSpaceSize();
+  int maximum_semispace_capacity = Heap::MaxSemiSpaceSize();
 
   ASSERT(initial_semispace_capacity <= maximum_semispace_capacity);
   ASSERT(IsPowerOf2(maximum_semispace_capacity));
@@ -998,7 +998,7 @@
 #undef SET_NAME
 #endif
 
-  ASSERT(size == 2 * maximum_semispace_capacity);
+  ASSERT(size == 2 * Heap::ReservedSemiSpaceSize());
   ASSERT(IsAddressAligned(start, size, 0));
 
   if (!to_space_.Setup(start,
@@ -1527,7 +1527,9 @@
   // correct size.
   if (size_in_bytes > ByteArray::kAlignedSize) {
     set_map(Heap::raw_unchecked_byte_array_map());
-    ByteArray::cast(this)->set_length(ByteArray::LengthFor(size_in_bytes));
+    // Can't use ByteArray::cast because it fails during deserialization.
+    ByteArray* this_as_byte_array = reinterpret_cast<ByteArray*>(this);
+    this_as_byte_array->set_length(ByteArray::LengthFor(size_in_bytes));
   } else if (size_in_bytes == kPointerSize) {
     set_map(Heap::raw_unchecked_one_pointer_filler_map());
   } else if (size_in_bytes == 2 * kPointerSize) {
@@ -1535,13 +1537,13 @@
   } else {
     UNREACHABLE();
   }
-  ASSERT(Size() == size_in_bytes);
+  // We would like to ASSERT(Size() == size_in_bytes) but this would fail during
+  // deserialization because the byte array map is not done yet.
 }
 
 
 Address FreeListNode::next() {
-  ASSERT(map() == Heap::raw_unchecked_byte_array_map() ||
-         map() == Heap::raw_unchecked_two_pointer_filler_map());
+  ASSERT(IsFreeListNode(this));
   if (map() == Heap::raw_unchecked_byte_array_map()) {
     ASSERT(Size() >= kNextOffset + kPointerSize);
     return Memory::Address_at(address() + kNextOffset);
@@ -1552,8 +1554,7 @@
 
 
 void FreeListNode::set_next(Address next) {
-  ASSERT(map() == Heap::raw_unchecked_byte_array_map() ||
-         map() == Heap::raw_unchecked_two_pointer_filler_map());
+  ASSERT(IsFreeListNode(this));
   if (map() == Heap::raw_unchecked_byte_array_map()) {
     ASSERT(Size() >= kNextOffset + kPointerSize);
     Memory::Address_at(address() + kNextOffset) = next;
@@ -1830,13 +1831,16 @@
     return AllocateInNextPage(current_page, size_in_bytes);
   }
 
-  // There is no next page in this space.  Try free list allocation.
-  int wasted_bytes;
-  Object* result = free_list_.Allocate(size_in_bytes, &wasted_bytes);
-  accounting_stats_.WasteBytes(wasted_bytes);
-  if (!result->IsFailure()) {
-    accounting_stats_.AllocateBytes(size_in_bytes);
-    return HeapObject::cast(result);
+  // There is no next page in this space.  Try free list allocation unless that
+  // is currently forbidden.
+  if (!Heap::linear_allocation()) {
+    int wasted_bytes;
+    Object* result = free_list_.Allocate(size_in_bytes, &wasted_bytes);
+    accounting_stats_.WasteBytes(wasted_bytes);
+    if (!result->IsFailure()) {
+      accounting_stats_.AllocateBytes(size_in_bytes);
+      return HeapObject::cast(result);
+    }
   }
 
   // Free list allocation failed and there is no next page.  Fail if we have
@@ -2232,10 +2236,10 @@
     return AllocateInNextPage(current_page, size_in_bytes);
   }
 
-  // There is no next page in this space.  Try free list allocation.
-  // The fixed space free list implicitly assumes that all free blocks
-  // are of the fixed size.
-  if (size_in_bytes == object_size_in_bytes_) {
+  // There is no next page in this space.  Try free list allocation unless
+  // that is currently forbidden.  The fixed space free list implicitly assumes
+  // that all free blocks are of the fixed size.
+  if (!Heap::linear_allocation()) {
     Object* result = free_list_.Allocate();
     if (!result->IsFailure()) {
       accounting_stats_.AllocateBytes(size_in_bytes);