Merge V8 3.9 at 3.9.24.9

http://v8.googlecode.com/svn/branches/3.9@11260

Bug: 5688872
Change-Id: Iddd944e82189d92df3fc427dc5f0d3f1b2f0c6c8
diff --git a/src/spaces.cc b/src/spaces.cc
index de738fb..defe352 100644
--- a/src/spaces.cc
+++ b/src/spaces.cc
@@ -594,6 +594,9 @@
     PerformAllocationCallback(space, kAllocationActionFree, chunk->size());
   }
 
+  isolate_->heap()->RememberUnmappedPage(
+      reinterpret_cast<Address>(chunk), chunk->IsEvacuationCandidate());
+
   delete chunk->slots_buffer();
   delete chunk->skip_list();
 
@@ -699,7 +702,7 @@
 
 
 int MemoryAllocator::CodePageGuardSize() {
-  return OS::CommitPageSize();
+  return static_cast<int>(OS::CommitPageSize());
 }
 
 
@@ -713,7 +716,7 @@
 int MemoryAllocator::CodePageAreaEndOffset() {
   // We are guarding code pages: the last OS page will be protected as
   // non-writable.
-  return Page::kPageSize - OS::CommitPageSize();
+  return Page::kPageSize - static_cast<int>(OS::CommitPageSize());
 }
 
 
@@ -2217,7 +2220,9 @@
 // You have to call this last, since the implementation from PagedSpace
 // doesn't know that memory was 'promised' to large object space.
 bool LargeObjectSpace::ReserveSpace(int bytes) {
-  return heap()->OldGenerationSpaceAvailable() >= bytes;
+  return heap()->OldGenerationCapacityAvailable() >= bytes &&
+         (!heap()->incremental_marking()->IsStopped() ||
+           heap()->OldGenerationSpaceAvailable() >= bytes);
 }
 
 
@@ -2520,6 +2525,10 @@
 
 // -----------------------------------------------------------------------------
 // LargeObjectSpace
+static bool ComparePointers(void* key1, void* key2) {
+    return key1 == key2;
+}
+
 
 LargeObjectSpace::LargeObjectSpace(Heap* heap,
                                    intptr_t max_capacity,
@@ -2529,7 +2538,8 @@
       first_page_(NULL),
       size_(0),
       page_count_(0),
-      objects_size_(0) {}
+      objects_size_(0),
+      chunk_map_(ComparePointers, 1024) {}
 
 
 bool LargeObjectSpace::SetUp() {
@@ -2537,6 +2547,7 @@
   size_ = 0;
   page_count_ = 0;
   objects_size_ = 0;
+  chunk_map_.Clear();
   return true;
 }
 
@@ -2580,6 +2591,18 @@
   page->set_next_page(first_page_);
   first_page_ = page;
 
+  // Register all MemoryChunk::kAlignment-aligned chunks covered by
+  // this large page in the chunk map.
+  uintptr_t base = reinterpret_cast<uintptr_t>(page) / MemoryChunk::kAlignment;
+  uintptr_t limit = base + (page->size() - 1) / MemoryChunk::kAlignment;
+  for (uintptr_t key = base; key <= limit; key++) {
+    HashMap::Entry* entry = chunk_map_.Lookup(reinterpret_cast<void*>(key),
+                                              static_cast<uint32_t>(key),
+                                              true);
+    ASSERT(entry != NULL);
+    entry->value = page;
+  }
+
   HeapObject* object = page->GetObject();
 
 #ifdef DEBUG
@@ -2596,27 +2619,25 @@
 
 // GC support
 MaybeObject* LargeObjectSpace::FindObject(Address a) {
-  for (LargePage* page = first_page_;
-       page != NULL;
-       page = page->next_page()) {
-    Address page_address = page->address();
-    if (page_address <= a && a < page_address + page->size()) {
-      return page->GetObject();
-    }
+  LargePage* page = FindPage(a);
+  if (page != NULL) {
+    return page->GetObject();
   }
   return Failure::Exception();
 }
 
 
-LargePage* LargeObjectSpace::FindPageContainingPc(Address pc) {
-  // TODO(853): Change this implementation to only find executable
-  // chunks and use some kind of hash-based approach to speed it up.
-  for (LargePage* chunk = first_page_;
-       chunk != NULL;
-       chunk = chunk->next_page()) {
-    Address chunk_address = chunk->address();
-    if (chunk_address <= pc && pc < chunk_address + chunk->size()) {
-      return chunk;
+LargePage* LargeObjectSpace::FindPage(Address a) {
+  uintptr_t key = reinterpret_cast<uintptr_t>(a) / MemoryChunk::kAlignment;
+  HashMap::Entry* e = chunk_map_.Lookup(reinterpret_cast<void*>(key),
+                                        static_cast<uint32_t>(key),
+                                        false);
+  if (e != NULL) {
+    ASSERT(e->value != NULL);
+    LargePage* page = reinterpret_cast<LargePage*>(e->value);
+    ASSERT(page->is_valid());
+    if (page->Contains(a)) {
+      return page;
     }
   }
   return NULL;
@@ -2654,6 +2675,17 @@
       objects_size_ -= object->Size();
       page_count_--;
 
+      // Remove entries belonging to this page.
+      // Use variable alignment to help pass length check (<= 80 characters)
+      // of single line in tools/presubmit.py.
+      const intptr_t alignment = MemoryChunk::kAlignment;
+      uintptr_t base = reinterpret_cast<uintptr_t>(page)/alignment;
+      uintptr_t limit = base + (page->size()-1)/alignment;
+      for (uintptr_t key = base; key <= limit; key++) {
+        chunk_map_.Remove(reinterpret_cast<void*>(key),
+                          static_cast<uint32_t>(key));
+      }
+
       if (is_pointer_object) {
         heap()->QueueMemoryChunkForFree(page);
       } else {