Push version 2.0.0 to trunk.

Added support for VFP on ARM.

Added TryCatch::ReThrow method to the API.

Reduced the size of snapshots and improved the snapshot load time.

Improved heap profiler support.

64-bit version now supported on Windows.

Fixed a number of debugger issues.

Fixed bugs.


git-svn-id: http://v8.googlecode.com/svn/trunk@3333 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
diff --git a/src/spaces.cc b/src/spaces.cc
index c69579a..f3b6b9f 100644
--- a/src/spaces.cc
+++ b/src/spaces.cc
@@ -354,7 +354,7 @@
   } else {
     mem = OS::Allocate(requested, allocated, (executable == EXECUTABLE));
   }
-  int alloced = *allocated;
+  int alloced = static_cast<int>(*allocated);
   size_ += alloced;
   Counters::memory_allocated.Increment(alloced);
   return mem;
@@ -367,8 +367,8 @@
   } else {
     OS::Free(mem, length);
   }
-  Counters::memory_allocated.Decrement(length);
-  size_ -= length;
+  Counters::memory_allocated.Decrement(static_cast<int>(length));
+  size_ -= static_cast<int>(length);
   ASSERT(size_ >= 0);
 }
 
@@ -387,7 +387,7 @@
   // We are sure that we have mapped a block of requested addresses.
   ASSERT(initial_chunk_->size() == requested);
   LOG(NewEvent("InitialChunk", initial_chunk_->address(), requested));
-  size_ += requested;
+  size_ += static_cast<int>(requested);
   return initial_chunk_->address();
 }
 
@@ -397,8 +397,8 @@
   // and the last page ends on the last page-aligned address before
   // start+size.  Page::kPageSize is a power of two so we can divide by
   // shifting.
-  return (RoundDown(start + size, Page::kPageSize)
-          - RoundUp(start, Page::kPageSize)) >> Page::kPageSizeBits;
+  return static_cast<int>((RoundDown(start + size, Page::kPageSize)
+      - RoundUp(start, Page::kPageSize)) >> Page::kPageSizeBits);
 }
 
 
@@ -412,7 +412,7 @@
   if (size_ + static_cast<int>(chunk_size) > capacity_) {
     // Request as many pages as we can.
     chunk_size = capacity_ - size_;
-    requested_pages = chunk_size >> Page::kPageSizeBits;
+    requested_pages = static_cast<int>(chunk_size >> Page::kPageSizeBits);
 
     if (requested_pages <= 0) return Page::FromAddress(NULL);
   }
@@ -445,7 +445,7 @@
   if (!initial_chunk_->Commit(start, size, owner->executable() == EXECUTABLE)) {
     return Page::FromAddress(NULL);
   }
-  Counters::memory_allocated.Increment(size);
+  Counters::memory_allocated.Increment(static_cast<int>(size));
 
   // So long as we correctly overestimated the number of chunks we should not
   // run out of chunk ids.
@@ -466,7 +466,7 @@
   ASSERT(InInitialChunk(start + size - 1));
 
   if (!initial_chunk_->Commit(start, size, executable)) return false;
-  Counters::memory_allocated.Increment(size);
+  Counters::memory_allocated.Increment(static_cast<int>(size));
   return true;
 }
 
@@ -478,7 +478,7 @@
   ASSERT(InInitialChunk(start + size - 1));
 
   if (!initial_chunk_->Uncommit(start, size)) return false;
-  Counters::memory_allocated.Decrement(size);
+  Counters::memory_allocated.Decrement(static_cast<int>(size));
   return true;
 }
 
@@ -558,7 +558,7 @@
     // TODO(1240712): VirtualMemory::Uncommit has a return value which
     // is ignored here.
     initial_chunk_->Uncommit(c.address(), c.size());
-    Counters::memory_allocated.Decrement(c.size());
+    Counters::memory_allocated.Decrement(static_cast<int>(c.size()));
   } else {
     LOG(DeleteEvent("PagedChunk", c.address()));
     FreeRawMemory(c.address(), c.size());
@@ -1096,7 +1096,8 @@
 
 void NewSpace::Shrink() {
   int new_capacity = Max(InitialCapacity(), 2 * Size());
-  int rounded_new_capacity = RoundUp(new_capacity, OS::AllocateAlignment());
+  int rounded_new_capacity =
+      RoundUp(new_capacity, static_cast<int>(OS::AllocateAlignment()));
   if (rounded_new_capacity < Capacity() &&
       to_space_.ShrinkTo(rounded_new_capacity))  {
     // Only shrink from space if we managed to shrink to space.
@@ -1234,7 +1235,7 @@
 bool SemiSpace::Grow() {
   // Double the semispace size but only up to maximum capacity.
   int maximum_extra = maximum_capacity_ - capacity_;
-  int extra = Min(RoundUp(capacity_, OS::AllocateAlignment()),
+  int extra = Min(RoundUp(capacity_, static_cast<int>(OS::AllocateAlignment())),
                   maximum_extra);
   if (!MemoryAllocator::CommitBlock(high(), extra, executable())) {
     return false;
@@ -1797,12 +1798,14 @@
   while (it.has_next()) {
     Page* p = it.next();
     // Space below the relocation pointer is allocated.
-    computed_size += p->mc_relocation_top - p->ObjectAreaStart();
+    computed_size +=
+        static_cast<int>(p->mc_relocation_top - p->ObjectAreaStart());
     if (it.has_next()) {
       // Free the space at the top of the page.  We cannot use
       // p->mc_relocation_top after the call to Free (because Free will clear
       // remembered set bits).
-      int extra_size = p->ObjectAreaEnd() - p->mc_relocation_top;
+      int extra_size =
+          static_cast<int>(p->ObjectAreaEnd() - p->mc_relocation_top);
       if (extra_size > 0) {
         int wasted_bytes = free_list_.Free(p->mc_relocation_top, extra_size);
         // The bytes we have just "freed" to add to the free list were
@@ -1868,7 +1871,8 @@
                                          int size_in_bytes) {
   ASSERT(current_page->next_page()->is_valid());
   // Add the block at the top of this page to the free list.
-  int free_size = current_page->ObjectAreaEnd() - allocation_info_.top;
+  int free_size =
+      static_cast<int>(current_page->ObjectAreaEnd() - allocation_info_.top);
   if (free_size > 0) {
     int wasted_bytes = free_list_.Free(allocation_info_.top, free_size);
     accounting_stats_.WasteBytes(wasted_bytes);
@@ -1968,7 +1972,7 @@
     if (it->rinfo()->rmode() == RelocInfo::COMMENT) {
       const char* const txt =
           reinterpret_cast<const char*>(it->rinfo()->data());
-      flat_delta += it->rinfo()->pc() - prev_pc;
+      flat_delta += static_cast<int>(it->rinfo()->pc() - prev_pc);
       if (txt[0] == ']') break;  // End of nested  comment
       // A new comment
       CollectCommentStatistics(it);
@@ -1996,7 +2000,7 @@
       const byte* prev_pc = code->instruction_start();
       while (!it.done()) {
         if (it.rinfo()->rmode() == RelocInfo::COMMENT) {
-          delta += it.rinfo()->pc() - prev_pc;
+          delta += static_cast<int>(it.rinfo()->pc() - prev_pc);
           CollectCommentStatistics(&it);
           prev_pc = it.rinfo()->pc();
         }
@@ -2005,7 +2009,7 @@
 
       ASSERT(code->instruction_start() <= prev_pc &&
              prev_pc <= code->relocation_start());
-      delta += code->relocation_start() - prev_pc;
+      delta += static_cast<int>(code->relocation_start() - prev_pc);
       EnterComment("NoComment", delta);
     }
   }
@@ -2034,7 +2038,8 @@
       int rset = Memory::int_at(rset_addr);
       if (rset != 0) {
         // Bits were set
-        int intoff = rset_addr - p->address() - Page::kRSetOffset;
+        int intoff =
+            static_cast<int>(rset_addr - p->address() - Page::kRSetOffset);
         int bitoff = 0;
         for (; bitoff < kBitsPerInt; ++bitoff) {
           if ((rset & (1 << bitoff)) != 0) {
@@ -2211,9 +2216,10 @@
   while (it.has_next()) {
     Page* page = it.next();
     Address page_top = page->AllocationTop();
-    computed_size += page_top - page->ObjectAreaStart();
+    computed_size += static_cast<int>(page_top - page->ObjectAreaStart());
     if (it.has_next()) {
-      accounting_stats_.WasteBytes(page->ObjectAreaEnd() - page_top);
+      accounting_stats_.WasteBytes(
+          static_cast<int>(page->ObjectAreaEnd() - page_top));
     }
   }
 
@@ -2299,7 +2305,8 @@
       int rset = Memory::int_at(rset_addr);
       if (rset != 0) {
         // Bits were set
-        int intoff = rset_addr - p->address() - Page::kRSetOffset;
+        int intoff =
+            static_cast<int>(rset_addr - p->address() - Page::kRSetOffset);
         int bitoff = 0;
         for (; bitoff < kBitsPerInt; ++bitoff) {
           if ((rset & (1 << bitoff)) != 0) {
@@ -2420,7 +2427,7 @@
 
 
 int LargeObjectChunk::ChunkSizeFor(int size_in_bytes) {
-  int os_alignment = OS::AllocateAlignment();
+  int os_alignment = static_cast<int>(OS::AllocateAlignment());
   if (os_alignment < Page::kPageSize)
     size_in_bytes += (Page::kPageSize - os_alignment);
   return size_in_bytes + Page::kObjectStartOffset;
@@ -2499,7 +2506,7 @@
     return Failure::RetryAfterGC(requested_size, identity());
   }
 
-  size_ += chunk_size;
+  size_ += static_cast<int>(chunk_size);
   page_count_++;
   chunk->set_next(first_chunk_);
   chunk->set_size(chunk_size);
@@ -2650,7 +2657,7 @@
       if (object->IsCode()) {
         LOG(CodeDeleteEvent(object->address()));
       }
-      size_ -= chunk_size;
+      size_ -= static_cast<int>(chunk_size);
       page_count_--;
       MemoryAllocator::FreeRawMemory(chunk_address, chunk_size);
       LOG(DeleteEvent("LargeObjectChunk", chunk_address));