Version 3.7.6
Fixed filtering of store buffer for large object pages. (issue 1817)
Fixed generated hash function on all platforms. (issue 1808)
Fixed Heap::Shrink to ensure that it does not free pages that are still in use. (Chromium issue 100414)
Stability and performance improvements on all platforms.
git-svn-id: http://v8.googlecode.com/svn/trunk@9968 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
diff --git a/src/heap.cc b/src/heap.cc
index 0cbe13f..c070299 100644
--- a/src/heap.cc
+++ b/src/heap.cc
@@ -5609,8 +5609,11 @@
void Heap::Shrink() {
// Try to shrink all paged spaces.
PagedSpaces spaces;
- for (PagedSpace* space = spaces.next(); space != NULL; space = spaces.next())
+ for (PagedSpace* space = spaces.next();
+ space != NULL;
+ space = spaces.next()) {
space->ReleaseAllUnusedPages();
+ }
}
@@ -6431,8 +6434,8 @@
// it try to perform a search in the list of pages owned by of the large
// object space and queued chunks were detached from that list.
// To work around this we split large chunk into normal kPageSize aligned
- // pieces and initialize owner field and flags of every piece.
- // If FromAnyPointerAddress encounteres a slot that belongs to one of
+ // pieces and initialize size, owner and flags field of every piece.
+ // If FromAnyPointerAddress encounters a slot that belongs to one of
// these smaller pieces it will treat it as a slot on a normal Page.
MemoryChunk* inner = MemoryChunk::FromAddress(
chunk->address() + Page::kPageSize);
@@ -6440,8 +6443,9 @@
chunk->address() + chunk->size() - 1);
while (inner <= inner_last) {
// Size of a large chunk is always a multiple of
- // OS::AllocationAlignment() so there is always
+ // MemoryChunk::kAlignment so there is always
// enough space for a fake MemoryChunk header.
+ inner->set_size(Page::kPageSize);
inner->set_owner(lo_space());
inner->SetFlag(MemoryChunk::ABOUT_TO_BE_FREED);
inner = MemoryChunk::FromAddress(