2010-04-26: Version 2.2.5
Various performance improvements (especially for ARM and x64)
Fixed bug in CPU profiling (http://crbug.com/42137)
Fixed a bug with the natives cache.
Fixed two bugs in the ARM code generator that can cause wrong calculations.
Fixed a bug that may cause a wrong result for shift operations.
git-svn-id: http://v8.googlecode.com/svn/trunk@4498 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
diff --git a/src/spaces.cc b/src/spaces.cc
index 5f191ed..6b6d926 100644
--- a/src/spaces.cc
+++ b/src/spaces.cc
@@ -524,7 +524,7 @@
for (int i = 0; i < pages_in_chunk; i++) {
Page* p = Page::FromAddress(page_addr);
p->opaque_header = OffsetFrom(page_addr + Page::kPageSize) | chunk_id;
- p->is_normal_page = 1;
+ p->SetIsLargeObjectPage(false);
page_addr += Page::kPageSize;
}
@@ -568,6 +568,15 @@
}
+void MemoryAllocator::FreeAllPages(PagedSpace* space) {
+ for (int i = 0, length = chunks_.length(); i < length; i++) {
+ if (chunks_[i].owner() == space) {
+ DeleteChunk(i);
+ }
+ }
+}
+
+
void MemoryAllocator::DeleteChunk(int chunk_id) {
ASSERT(IsValidChunk(chunk_id));
@@ -622,6 +631,74 @@
#endif
+void MemoryAllocator::RelinkPageListInChunkOrder(PagedSpace* space,
+ Page** first_page,
+ Page** last_page,
+ Page** last_page_in_use) {
+ Page* first = NULL;
+ Page* last = NULL;
+
+ for (int i = 0, length = chunks_.length(); i < length; i++) {
+ ChunkInfo& chunk = chunks_[i];
+
+ if (chunk.owner() == space) {
+ if (first == NULL) {
+ Address low = RoundUp(chunk.address(), Page::kPageSize);
+ first = Page::FromAddress(low);
+ }
+ last = RelinkPagesInChunk(i,
+ chunk.address(),
+ chunk.size(),
+ last,
+ last_page_in_use);
+ }
+ }
+
+ if (first_page != NULL) {
+ *first_page = first;
+ }
+
+ if (last_page != NULL) {
+ *last_page = last;
+ }
+}
+
+
+Page* MemoryAllocator::RelinkPagesInChunk(int chunk_id,
+ Address chunk_start,
+ size_t chunk_size,
+ Page* prev,
+ Page** last_page_in_use) {
+ Address page_addr = RoundUp(chunk_start, Page::kPageSize);
+ int pages_in_chunk = PagesInChunk(chunk_start, chunk_size);
+
+ if (prev->is_valid()) {
+ SetNextPage(prev, Page::FromAddress(page_addr));
+ }
+
+ for (int i = 0; i < pages_in_chunk; i++) {
+ Page* p = Page::FromAddress(page_addr);
+ p->opaque_header = OffsetFrom(page_addr + Page::kPageSize) | chunk_id;
+ page_addr += Page::kPageSize;
+
+ if (p->WasInUseBeforeMC()) {
+ *last_page_in_use = p;
+ }
+ }
+
+ // Set the next page of the last page to 0.
+ Page* last_page = Page::FromAddress(page_addr - Page::kPageSize);
+ last_page->opaque_header = OffsetFrom(0) | chunk_id;
+
+ if (last_page->WasInUseBeforeMC()) {
+ *last_page_in_use = last_page;
+ }
+
+ return last_page;
+}
+
+
+
// -----------------------------------------------------------------------------
// PagedSpace implementation
@@ -677,6 +754,8 @@
// Use first_page_ for allocation.
SetAllocationInfo(&allocation_info_, first_page_);
+ page_list_is_chunk_ordered_ = true;
+
return true;
}
@@ -687,9 +766,8 @@
void PagedSpace::TearDown() {
- first_page_ = MemoryAllocator::FreePages(first_page_);
- ASSERT(!first_page_->is_valid());
-
+ MemoryAllocator::FreeAllPages(this);
+ first_page_ = NULL;
accounting_stats_.Clear();
}
@@ -874,6 +952,12 @@
void PagedSpace::Shrink() {
+ if (!page_list_is_chunk_ordered_) {
+ // We can't shrink space if pages is not chunk-ordered
+ // (see comment for class MemoryAllocator for definition).
+ return;
+ }
+
// Release half of free pages.
Page* top_page = AllocationTopPage();
ASSERT(top_page->is_valid());
@@ -955,7 +1039,7 @@
// The next page will be above the allocation top.
above_allocation_top = true;
} else {
- ASSERT(top == current_page->ObjectAreaEnd() - page_extra_);
+ ASSERT(top == PageAllocationLimit(current_page));
}
// It should be packed with objects from the bottom to the top.
@@ -1363,7 +1447,7 @@
static void ReportCodeKindStatistics() {
- const char* table[Code::NUMBER_OF_KINDS];
+ const char* table[Code::NUMBER_OF_KINDS] = { NULL };
#define CASE(name) \
case Code::name: table[Code::name] = #name; \
@@ -1782,6 +1866,9 @@
// OldSpace implementation
void OldSpace::PrepareForMarkCompact(bool will_compact) {
+ // Call prepare of the super class.
+ PagedSpace::PrepareForMarkCompact(will_compact);
+
if (will_compact) {
// Reset relocation info. During a compacting collection, everything in
// the space is considered 'available' and we will rediscover live data
@@ -1852,6 +1939,112 @@
}
+void PagedSpace::FreePages(Page* prev, Page* last) {
+ if (last == AllocationTopPage()) {
+ // Pages are already at the end of used pages.
+ return;
+ }
+
+ Page* first = NULL;
+
+ // Remove pages from the list.
+ if (prev == NULL) {
+ first = first_page_;
+ first_page_ = last->next_page();
+ } else {
+ first = prev->next_page();
+ MemoryAllocator::SetNextPage(prev, last->next_page());
+ }
+
+ // Attach it after the last page.
+ MemoryAllocator::SetNextPage(last_page_, first);
+ last_page_ = last;
+ MemoryAllocator::SetNextPage(last, NULL);
+
+ // Clean them up.
+ do {
+ first->ClearRSet();
+ first = first->next_page();
+ } while (first != NULL);
+
+ // Order of pages in this space might no longer be consistent with
+ // order of pages in chunks.
+ page_list_is_chunk_ordered_ = false;
+}
+
+
+void PagedSpace::PrepareForMarkCompact(bool will_compact) {
+ if (will_compact) {
+ // MarkCompact collector relies on WAS_IN_USE_BEFORE_MC page flag
+ // to skip unused pages. Update flag value for all pages in space.
+ PageIterator all_pages_iterator(this, PageIterator::ALL_PAGES);
+ Page* last_in_use = AllocationTopPage();
+ bool in_use = true;
+
+ while (all_pages_iterator.has_next()) {
+ Page* p = all_pages_iterator.next();
+ p->SetWasInUseBeforeMC(in_use);
+ if (p == last_in_use) {
+ // We passed a page containing allocation top. All consequent
+ // pages are not used.
+ in_use = false;
+ }
+ }
+
+ if (!page_list_is_chunk_ordered_) {
+ Page* new_last_in_use = Page::FromAddress(NULL);
+ MemoryAllocator::RelinkPageListInChunkOrder(this,
+ &first_page_,
+ &last_page_,
+ &new_last_in_use);
+ ASSERT(new_last_in_use->is_valid());
+
+ if (new_last_in_use != last_in_use) {
+ // Current allocation top points to a page which is now in the middle
+ // of page list. We should move allocation top forward to the new last
+ // used page so various object iterators will continue to work properly.
+
+ int size_in_bytes = static_cast<int>(PageAllocationLimit(last_in_use) -
+ last_in_use->AllocationTop());
+
+ if (size_in_bytes > 0) {
+ // There is still some space left on this page. Create a fake
+ // object which will occupy all free space on this page.
+ // Otherwise iterators would not be able to scan this page
+ // correctly.
+
+ Heap::CreateFillerObjectAt(last_in_use->AllocationTop(),
+ size_in_bytes);
+ }
+
+ // New last in use page was in the middle of the list before
+ // sorting so it full.
+ SetTop(new_last_in_use->AllocationTop());
+
+ ASSERT(AllocationTopPage() == new_last_in_use);
+ ASSERT(AllocationTopPage()->WasInUseBeforeMC());
+ }
+
+ PageIterator pages_in_use_iterator(this, PageIterator::PAGES_IN_USE);
+ while (pages_in_use_iterator.has_next()) {
+ Page* p = pages_in_use_iterator.next();
+ if (!p->WasInUseBeforeMC()) {
+ // Empty page is in the middle of a sequence of used pages.
+ // Create a fake object which will occupy all free space on this page.
+ // Otherwise iterators would not be able to scan this page correctly.
+ int size_in_bytes = static_cast<int>(PageAllocationLimit(p) -
+ p->ObjectAreaStart());
+
+ Heap::CreateFillerObjectAt(p->ObjectAreaStart(), size_in_bytes);
+ }
+ }
+
+ page_list_is_chunk_ordered_ = true;
+ }
+ }
+}
+
+
bool PagedSpace::ReserveSpace(int bytes) {
Address limit = allocation_info_.limit;
Address top = allocation_info_.top;
@@ -2263,6 +2456,9 @@
// FixedSpace implementation
void FixedSpace::PrepareForMarkCompact(bool will_compact) {
+ // Call prepare of the super class.
+ PagedSpace::PrepareForMarkCompact(will_compact);
+
if (will_compact) {
// Reset relocation info.
MCResetRelocationInfo();
@@ -2360,7 +2556,7 @@
HeapObject* FixedSpace::AllocateInNextPage(Page* current_page,
int size_in_bytes) {
ASSERT(current_page->next_page()->is_valid());
- ASSERT(current_page->ObjectAreaEnd() - allocation_info_.top == page_extra_);
+ ASSERT(allocation_info_.top == PageAllocationLimit(current_page));
ASSERT_EQ(object_size_in_bytes_, size_in_bytes);
accounting_stats_.WasteBytes(page_extra_);
SetAllocationInfo(&allocation_info_, current_page->next_page());
@@ -2605,7 +2801,7 @@
// large object page. If the chunk_size happened to be written there, its
// low order bit should already be clear.
ASSERT((chunk_size & 0x1) == 0);
- page->is_normal_page &= ~0x1;
+ page->SetIsLargeObjectPage(true);
page->ClearRSet();
int extra_bytes = requested_size - object_size;
if (extra_bytes > 0) {