Update V8 to r6101 as required by WebKit r74534

Change-Id: I7f84af8dd732f11898fd644b2c2b1538914cb78d
diff --git a/src/spaces.h b/src/spaces.h
index 1b99c56..4f2d07b 100644
--- a/src/spaces.h
+++ b/src/spaces.h
@@ -1,4 +1,4 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Copyright 2006-2010 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -609,6 +609,9 @@
     return (Available() / Page::kPageSize) * Page::kObjectAreaSize;
   }
 
+  // Sanity check on a pointer.
+  static bool SafeIsInAPageChunk(Address addr);
+
   // Links two pages.
   static inline void SetNextPage(Page* prev, Page* next);
 
@@ -650,23 +653,50 @@
   static void ReportStatistics();
 #endif
 
+  static void AddToAllocatedChunks(Address addr, intptr_t size);
+  static void RemoveFromAllocatedChunks(Address addr, intptr_t size);
+  // Note: This only checks the regular chunks, not the odd-sized initial
+  // chunk.
+  static bool InAllocatedChunks(Address addr);
+
   // Due to encoding limitation, we can only have 8K chunks.
   static const int kMaxNofChunks = 1 << kPageSizeBits;
   // If a chunk has at least 16 pages, the maximum heap size is about
   // 8K * 8K * 16 = 1G bytes.
 #ifdef V8_TARGET_ARCH_X64
   static const int kPagesPerChunk = 32;
+  // On 64 bit the chunk table consists of 4 levels of 4096-entry tables.
+  static const int kPagesPerChunkLog2 = 5;
+  static const int kChunkTableLevels = 4;
+  static const int kChunkTableBitsPerLevel = 12;
 #else
   static const int kPagesPerChunk = 16;
+  // On 32 bit the chunk table consists of 2 levels of 256-entry tables.
+  static const int kPagesPerChunkLog2 = 4;
+  static const int kChunkTableLevels = 2;
+  static const int kChunkTableBitsPerLevel = 8;
 #endif
-  static const int kChunkSize = kPagesPerChunk * Page::kPageSize;
 
  private:
+  static const int kChunkSize = kPagesPerChunk * Page::kPageSize;
+  static const int kChunkSizeLog2 = kPagesPerChunkLog2 + kPageSizeBits;
+  static const int kChunkTableTopLevelEntries =
+      1 << (sizeof(intptr_t) * kBitsPerByte - kChunkSizeLog2 -
+          (kChunkTableLevels - 1) * kChunkTableBitsPerLevel);
+
+  // The chunks are not chunk-size aligned so for a given chunk-sized area of
+  // memory there can be two chunks that cover it.
+  static const int kChunkTableFineGrainedWordsPerEntry = 2;
+  static const uintptr_t kUnusedChunkTableEntry = 0;
+
   // Maximum space size in bytes.
   static intptr_t capacity_;
   // Maximum subset of capacity_ that can be executable
   static intptr_t capacity_executable_;
 
+  // Top level table to track whether memory is part of a chunk or not.
+  static uintptr_t chunk_table_[kChunkTableTopLevelEntries];
+
   // Allocated space size in bytes.
   static intptr_t size_;
   // Allocated executable space size in bytes.
@@ -725,6 +755,28 @@
   // Frees a chunk.
   static void DeleteChunk(int chunk_id);
 
+  // Helpers to maintain and query the chunk tables.
+  static void AddChunkUsingAddress(
+      uintptr_t chunk_start,        // Where the chunk starts.
+      uintptr_t chunk_index_base);  // Used to place the chunk in the tables.
+  static void RemoveChunkFoundUsingAddress(
+      uintptr_t chunk_start,        // Where the chunk starts.
+      uintptr_t chunk_index_base);  // Used to locate the entry in the tables.
+  // Controls whether the lookup creates intermediate levels of tables as
+  // needed.
+  enum CreateTables { kDontCreateTables, kCreateTablesAsNeeded };
+  static uintptr_t* AllocatedChunksFinder(uintptr_t* table,
+                                          uintptr_t address,
+                                          int bit_position,
+                                          CreateTables create_as_needed);
+  static void FreeChunkTables(uintptr_t* array, int length, int level);
+  static int FineGrainedIndexForAddress(uintptr_t address) {
+    int index = ((address >> kChunkSizeLog2) &
+        ((1 << kChunkTableBitsPerLevel) - 1));
+    return index * kChunkTableFineGrainedWordsPerEntry;
+  }
+
+
   // Basic check whether a chunk id is in the valid range.
   static inline bool IsValidChunkId(int chunk_id);
 
@@ -1019,6 +1071,8 @@
   // Checks whether an object/address is in this space.
   inline bool Contains(Address a);
   bool Contains(HeapObject* o) { return Contains(o->address()); }
+  // Never crashes even if a is not a valid pointer.
+  inline bool SafeContains(Address a);
 
   // Given an address occupied by a live object, return that object if it is
   // in this space, or Failure::Exception() if it is not. The implementation
@@ -1588,6 +1642,11 @@
 
   virtual bool ReserveSpace(int bytes);
 
+  // Resizes a sequential string which must be the most recent thing that was
+  // allocated in new space.
+  template <typename StringType>
+  inline void ShrinkStringAtAllocationBoundary(String* string, int len);
+
 #ifdef ENABLE_HEAP_PROTECTION
   // Protect/unprotect the space by marking it read-only/writable.
   virtual void Protect();
@@ -2062,12 +2121,6 @@
     accounting_stats_.DeallocateBytes(accounting_stats_.Size());
     accounting_stats_.AllocateBytes(new_size);
 
-    // Flush allocation watermarks.
-    for (Page* p = first_page_; p != top_page; p = p->next_page()) {
-      p->SetAllocationWatermark(p->AllocationTop());
-    }
-    top_page->SetAllocationWatermark(new_top);
-
 #ifdef DEBUG
     if (FLAG_enable_slow_asserts) {
       intptr_t actual_size = 0;
@@ -2138,10 +2191,10 @@
   // Allocates a new LargeObjectChunk that contains a large object page
   // (Page::kPageSize aligned) that has at least size_in_bytes (for a large
   // object) bytes after the object area start of that page.
-  // The allocated chunk size is set in the output parameter chunk_size.
-  static LargeObjectChunk* New(int size_in_bytes,
-                               size_t* chunk_size,
-                               Executability executable);
+  static LargeObjectChunk* New(int size_in_bytes, Executability executable);
+
+  // Free the memory associated with the chunk.
+  inline void Free(Executability executable);
 
   // Interpret a raw address as a large object chunk.
   static LargeObjectChunk* FromAddress(Address address) {
@@ -2154,12 +2207,13 @@
   // Accessors for the fields of the chunk.
   LargeObjectChunk* next() { return next_; }
   void set_next(LargeObjectChunk* chunk) { next_ = chunk; }
-
   size_t size() { return size_ & ~Page::kPageFlagMask; }
-  void set_size(size_t size_in_bytes) { size_ = size_in_bytes; }
+
+  // Compute the start address in the chunk.
+  inline Address GetStartAddress();
 
   // Returns the object in this chunk.
-  inline HeapObject* GetObject();
+  HeapObject* GetObject() { return HeapObject::FromAddress(GetStartAddress()); }
 
   // Given a requested size returns the physical size of a chunk to be
   // allocated.
@@ -2176,7 +2230,7 @@
   // A pointer to the next large object chunk in the space or NULL.
   LargeObjectChunk* next_;
 
-  // The size of this chunk.
+  // The total size of this chunk.
   size_t size_;
 
  public: