blob: 1d868e9ac731d32a7c354bf8b8b8c6098ce322ec [file] [log] [blame]
ager@chromium.org9258b6b2008-09-11 09:11:10 +00001// Copyright 2006-2008 the V8 project authors. All rights reserved.
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002// Redistribution and use in source and binary forms, with or without
3// modification, are permitted provided that the following conditions are
4// met:
5//
6// * Redistributions of source code must retain the above copyright
7// notice, this list of conditions and the following disclaimer.
8// * Redistributions in binary form must reproduce the above
9// copyright notice, this list of conditions and the following
10// disclaimer in the documentation and/or other materials provided
11// with the distribution.
12// * Neither the name of Google Inc. nor the names of its
13// contributors may be used to endorse or promote products derived
14// from this software without specific prior written permission.
15//
16// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
28#include "v8.h"
29
30#include "macro-assembler.h"
31#include "mark-compact.h"
32#include "platform.h"
33
kasperl@chromium.org71affb52009-05-26 05:44:31 +000034namespace v8 {
35namespace internal {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +000036
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +000037// For contiguous spaces, top should be in the space (or at the end) and limit
38// should be the end of the space.
39#define ASSERT_SEMISPACE_ALLOCATION_INFO(info, space) \
kasperl@chromium.orgdefbd102009-07-13 14:04:26 +000040 ASSERT((space).low() <= (info).top \
41 && (info).top <= (space).high() \
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +000042 && (info).limit == (space).high())
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +000043
ricow@chromium.org30ce4112010-05-31 10:38:25 +000044intptr_t Page::watermark_invalidated_mark_ = Page::WATERMARK_INVALIDATED;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +000045
46// ----------------------------------------------------------------------------
47// HeapObjectIterator
48
49HeapObjectIterator::HeapObjectIterator(PagedSpace* space) {
50 Initialize(space->bottom(), space->top(), NULL);
51}
52
53
54HeapObjectIterator::HeapObjectIterator(PagedSpace* space,
55 HeapObjectCallback size_func) {
56 Initialize(space->bottom(), space->top(), size_func);
57}
58
59
60HeapObjectIterator::HeapObjectIterator(PagedSpace* space, Address start) {
61 Initialize(start, space->top(), NULL);
62}
63
64
65HeapObjectIterator::HeapObjectIterator(PagedSpace* space, Address start,
66 HeapObjectCallback size_func) {
67 Initialize(start, space->top(), size_func);
68}
69
70
71void HeapObjectIterator::Initialize(Address cur, Address end,
72 HeapObjectCallback size_f) {
73 cur_addr_ = cur;
74 end_addr_ = end;
75 end_page_ = Page::FromAllocationTop(end);
76 size_func_ = size_f;
77 Page* p = Page::FromAllocationTop(cur_addr_);
78 cur_limit_ = (p == end_page_) ? end_addr_ : p->AllocationTop();
79
80#ifdef DEBUG
81 Verify();
82#endif
83}
84
85
sgjesse@chromium.orgb302e562010-02-03 11:26:59 +000086HeapObject* HeapObjectIterator::FromNextPage() {
87 if (cur_addr_ == end_addr_) return NULL;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +000088
89 Page* cur_page = Page::FromAllocationTop(cur_addr_);
90 cur_page = cur_page->next_page();
91 ASSERT(cur_page->is_valid());
92
93 cur_addr_ = cur_page->ObjectAreaStart();
94 cur_limit_ = (cur_page == end_page_) ? end_addr_ : cur_page->AllocationTop();
95
sgjesse@chromium.orgb302e562010-02-03 11:26:59 +000096 if (cur_addr_ == end_addr_) return NULL;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +000097 ASSERT(cur_addr_ < cur_limit_);
98#ifdef DEBUG
99 Verify();
100#endif
sgjesse@chromium.orgb302e562010-02-03 11:26:59 +0000101 return FromCurrentPage();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000102}
103
104
105#ifdef DEBUG
106void HeapObjectIterator::Verify() {
107 Page* p = Page::FromAllocationTop(cur_addr_);
108 ASSERT(p == Page::FromAllocationTop(cur_limit_));
109 ASSERT(p->Offset(cur_addr_) <= p->Offset(cur_limit_));
110}
111#endif
112
113
114// -----------------------------------------------------------------------------
115// PageIterator
116
kasperl@chromium.orgb3284ad2009-05-18 06:12:45 +0000117PageIterator::PageIterator(PagedSpace* space, Mode mode) : space_(space) {
118 prev_page_ = NULL;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000119 switch (mode) {
120 case PAGES_IN_USE:
kasperl@chromium.orgb3284ad2009-05-18 06:12:45 +0000121 stop_page_ = space->AllocationTopPage();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000122 break;
123 case PAGES_USED_BY_MC:
kasperl@chromium.orgb3284ad2009-05-18 06:12:45 +0000124 stop_page_ = space->MCRelocationTopPage();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000125 break;
126 case ALL_PAGES:
kasperl@chromium.orgb3284ad2009-05-18 06:12:45 +0000127#ifdef DEBUG
128 // Verify that the cached last page in the space is actually the
129 // last page.
130 for (Page* p = space->first_page_; p->is_valid(); p = p->next_page()) {
131 if (!p->next_page()->is_valid()) {
132 ASSERT(space->last_page_ == p);
133 }
134 }
135#endif
136 stop_page_ = space->last_page_;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000137 break;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000138 }
139}
140
141
142// -----------------------------------------------------------------------------
sgjesse@chromium.orgc5145742009-10-07 09:00:33 +0000143// CodeRange
144
145List<CodeRange::FreeBlock> CodeRange::free_list_(0);
146List<CodeRange::FreeBlock> CodeRange::allocation_list_(0);
147int CodeRange::current_allocation_block_index_ = 0;
148VirtualMemory* CodeRange::code_range_ = NULL;
149
150
151bool CodeRange::Setup(const size_t requested) {
152 ASSERT(code_range_ == NULL);
153
154 code_range_ = new VirtualMemory(requested);
155 CHECK(code_range_ != NULL);
156 if (!code_range_->IsReserved()) {
157 delete code_range_;
158 code_range_ = NULL;
159 return false;
160 }
161
162 // We are sure that we have mapped a block of requested addresses.
163 ASSERT(code_range_->size() == requested);
164 LOG(NewEvent("CodeRange", code_range_->address(), requested));
165 allocation_list_.Add(FreeBlock(code_range_->address(), code_range_->size()));
166 current_allocation_block_index_ = 0;
167 return true;
168}
169
170
171int CodeRange::CompareFreeBlockAddress(const FreeBlock* left,
172 const FreeBlock* right) {
173 // The entire point of CodeRange is that the difference between two
174 // addresses in the range can be represented as a signed 32-bit int,
175 // so the cast is semantically correct.
176 return static_cast<int>(left->start - right->start);
177}
178
179
180void CodeRange::GetNextAllocationBlock(size_t requested) {
181 for (current_allocation_block_index_++;
182 current_allocation_block_index_ < allocation_list_.length();
183 current_allocation_block_index_++) {
184 if (requested <= allocation_list_[current_allocation_block_index_].size) {
185 return; // Found a large enough allocation block.
186 }
187 }
188
189 // Sort and merge the free blocks on the free list and the allocation list.
190 free_list_.AddAll(allocation_list_);
191 allocation_list_.Clear();
192 free_list_.Sort(&CompareFreeBlockAddress);
193 for (int i = 0; i < free_list_.length();) {
194 FreeBlock merged = free_list_[i];
195 i++;
196 // Add adjacent free blocks to the current merged block.
197 while (i < free_list_.length() &&
198 free_list_[i].start == merged.start + merged.size) {
199 merged.size += free_list_[i].size;
200 i++;
201 }
202 if (merged.size > 0) {
203 allocation_list_.Add(merged);
204 }
205 }
206 free_list_.Clear();
207
208 for (current_allocation_block_index_ = 0;
209 current_allocation_block_index_ < allocation_list_.length();
210 current_allocation_block_index_++) {
211 if (requested <= allocation_list_[current_allocation_block_index_].size) {
212 return; // Found a large enough allocation block.
213 }
214 }
215
216 // Code range is full or too fragmented.
217 V8::FatalProcessOutOfMemory("CodeRange::GetNextAllocationBlock");
218}
219
220
221
222void* CodeRange::AllocateRawMemory(const size_t requested, size_t* allocated) {
223 ASSERT(current_allocation_block_index_ < allocation_list_.length());
224 if (requested > allocation_list_[current_allocation_block_index_].size) {
225 // Find an allocation block large enough. This function call may
226 // call V8::FatalProcessOutOfMemory if it cannot find a large enough block.
227 GetNextAllocationBlock(requested);
228 }
229 // Commit the requested memory at the start of the current allocation block.
230 *allocated = RoundUp(requested, Page::kPageSize);
231 FreeBlock current = allocation_list_[current_allocation_block_index_];
232 if (*allocated >= current.size - Page::kPageSize) {
233 // Don't leave a small free block, useless for a large object or chunk.
234 *allocated = current.size;
235 }
236 ASSERT(*allocated <= current.size);
237 if (!code_range_->Commit(current.start, *allocated, true)) {
238 *allocated = 0;
239 return NULL;
240 }
241 allocation_list_[current_allocation_block_index_].start += *allocated;
242 allocation_list_[current_allocation_block_index_].size -= *allocated;
243 if (*allocated == current.size) {
244 GetNextAllocationBlock(0); // This block is used up, get the next one.
245 }
246 return current.start;
247}
248
249
250void CodeRange::FreeRawMemory(void* address, size_t length) {
251 free_list_.Add(FreeBlock(address, length));
252 code_range_->Uncommit(address, length);
253}
254
255
256void CodeRange::TearDown() {
257 delete code_range_; // Frees all memory in the virtual memory range.
258 code_range_ = NULL;
259 free_list_.Free();
260 allocation_list_.Free();
261}
262
263
264// -----------------------------------------------------------------------------
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000265// MemoryAllocator
266//
267int MemoryAllocator::capacity_ = 0;
268int MemoryAllocator::size_ = 0;
269
270VirtualMemory* MemoryAllocator::initial_chunk_ = NULL;
271
272// 270 is an estimate based on the static default heap size of a pair of 256K
273// semispaces and a 64M old generation.
274const int kEstimatedNumberOfChunks = 270;
275List<MemoryAllocator::ChunkInfo> MemoryAllocator::chunks_(
276 kEstimatedNumberOfChunks);
277List<int> MemoryAllocator::free_chunk_ids_(kEstimatedNumberOfChunks);
278int MemoryAllocator::max_nof_chunks_ = 0;
279int MemoryAllocator::top_ = 0;
280
281
282void MemoryAllocator::Push(int free_chunk_id) {
283 ASSERT(max_nof_chunks_ > 0);
284 ASSERT(top_ < max_nof_chunks_);
285 free_chunk_ids_[top_++] = free_chunk_id;
286}
287
288
289int MemoryAllocator::Pop() {
290 ASSERT(top_ > 0);
291 return free_chunk_ids_[--top_];
292}
293
294
295bool MemoryAllocator::Setup(int capacity) {
296 capacity_ = RoundUp(capacity, Page::kPageSize);
297
298 // Over-estimate the size of chunks_ array. It assumes the expansion of old
299 // space is always in the unit of a chunk (kChunkSize) except the last
300 // expansion.
301 //
302 // Due to alignment, allocated space might be one page less than required
303 // number (kPagesPerChunk) of pages for old spaces.
304 //
kasper.lund7276f142008-07-30 08:49:36 +0000305 // Reserve two chunk ids for semispaces, one for map space, one for old
306 // space, and one for code space.
307 max_nof_chunks_ = (capacity_ / (kChunkSize - Page::kPageSize)) + 5;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000308 if (max_nof_chunks_ > kMaxNofChunks) return false;
309
310 size_ = 0;
311 ChunkInfo info; // uninitialized element.
312 for (int i = max_nof_chunks_ - 1; i >= 0; i--) {
313 chunks_.Add(info);
314 free_chunk_ids_.Add(i);
315 }
316 top_ = max_nof_chunks_;
317 return true;
318}
319
320
321void MemoryAllocator::TearDown() {
322 for (int i = 0; i < max_nof_chunks_; i++) {
323 if (chunks_[i].address() != NULL) DeleteChunk(i);
324 }
325 chunks_.Clear();
326 free_chunk_ids_.Clear();
327
328 if (initial_chunk_ != NULL) {
329 LOG(DeleteEvent("InitialChunk", initial_chunk_->address()));
330 delete initial_chunk_;
331 initial_chunk_ = NULL;
332 }
333
334 ASSERT(top_ == max_nof_chunks_); // all chunks are free
335 top_ = 0;
336 capacity_ = 0;
337 size_ = 0;
338 max_nof_chunks_ = 0;
339}
340
341
342void* MemoryAllocator::AllocateRawMemory(const size_t requested,
kasper.lund7276f142008-07-30 08:49:36 +0000343 size_t* allocated,
ager@chromium.org9258b6b2008-09-11 09:11:10 +0000344 Executability executable) {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000345 if (size_ + static_cast<int>(requested) > capacity_) return NULL;
sgjesse@chromium.orgc5145742009-10-07 09:00:33 +0000346 void* mem;
347 if (executable == EXECUTABLE && CodeRange::exists()) {
348 mem = CodeRange::AllocateRawMemory(requested, allocated);
349 } else {
350 mem = OS::Allocate(requested, allocated, (executable == EXECUTABLE));
351 }
ager@chromium.orgc4c92722009-11-18 14:12:51 +0000352 int alloced = static_cast<int>(*allocated);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000353 size_ += alloced;
sgjesse@chromium.orgb302e562010-02-03 11:26:59 +0000354#ifdef DEBUG
355 ZapBlock(reinterpret_cast<Address>(mem), alloced);
356#endif
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000357 Counters::memory_allocated.Increment(alloced);
358 return mem;
359}
360
361
362void MemoryAllocator::FreeRawMemory(void* mem, size_t length) {
sgjesse@chromium.orgb302e562010-02-03 11:26:59 +0000363#ifdef DEBUG
364 ZapBlock(reinterpret_cast<Address>(mem), length);
365#endif
sgjesse@chromium.orgc5145742009-10-07 09:00:33 +0000366 if (CodeRange::contains(static_cast<Address>(mem))) {
367 CodeRange::FreeRawMemory(mem, length);
368 } else {
369 OS::Free(mem, length);
370 }
ager@chromium.orgc4c92722009-11-18 14:12:51 +0000371 Counters::memory_allocated.Decrement(static_cast<int>(length));
372 size_ -= static_cast<int>(length);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000373 ASSERT(size_ >= 0);
374}
375
376
377void* MemoryAllocator::ReserveInitialChunk(const size_t requested) {
378 ASSERT(initial_chunk_ == NULL);
379
380 initial_chunk_ = new VirtualMemory(requested);
381 CHECK(initial_chunk_ != NULL);
382 if (!initial_chunk_->IsReserved()) {
383 delete initial_chunk_;
384 initial_chunk_ = NULL;
385 return NULL;
386 }
387
388 // We are sure that we have mapped a block of requested addresses.
389 ASSERT(initial_chunk_->size() == requested);
390 LOG(NewEvent("InitialChunk", initial_chunk_->address(), requested));
ager@chromium.orgc4c92722009-11-18 14:12:51 +0000391 size_ += static_cast<int>(requested);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000392 return initial_chunk_->address();
393}
394
395
396static int PagesInChunk(Address start, size_t size) {
397 // The first page starts on the first page-aligned address from start onward
398 // and the last page ends on the last page-aligned address before
399 // start+size. Page::kPageSize is a power of two so we can divide by
400 // shifting.
ager@chromium.orgc4c92722009-11-18 14:12:51 +0000401 return static_cast<int>((RoundDown(start + size, Page::kPageSize)
sgjesse@chromium.org846fb742009-12-18 08:56:33 +0000402 - RoundUp(start, Page::kPageSize)) >> kPageSizeBits);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000403}
404
405
406Page* MemoryAllocator::AllocatePages(int requested_pages, int* allocated_pages,
407 PagedSpace* owner) {
408 if (requested_pages <= 0) return Page::FromAddress(NULL);
409 size_t chunk_size = requested_pages * Page::kPageSize;
410
411 // There is not enough space to guarantee the desired number pages can be
412 // allocated.
413 if (size_ + static_cast<int>(chunk_size) > capacity_) {
414 // Request as many pages as we can.
415 chunk_size = capacity_ - size_;
sgjesse@chromium.org846fb742009-12-18 08:56:33 +0000416 requested_pages = static_cast<int>(chunk_size >> kPageSizeBits);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000417
418 if (requested_pages <= 0) return Page::FromAddress(NULL);
419 }
kasper.lund7276f142008-07-30 08:49:36 +0000420 void* chunk = AllocateRawMemory(chunk_size, &chunk_size, owner->executable());
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000421 if (chunk == NULL) return Page::FromAddress(NULL);
422 LOG(NewEvent("PagedChunk", chunk, chunk_size));
423
424 *allocated_pages = PagesInChunk(static_cast<Address>(chunk), chunk_size);
425 if (*allocated_pages == 0) {
426 FreeRawMemory(chunk, chunk_size);
427 LOG(DeleteEvent("PagedChunk", chunk));
428 return Page::FromAddress(NULL);
429 }
430
431 int chunk_id = Pop();
432 chunks_[chunk_id].init(static_cast<Address>(chunk), chunk_size, owner);
433
434 return InitializePagesInChunk(chunk_id, *allocated_pages, owner);
435}
436
437
438Page* MemoryAllocator::CommitPages(Address start, size_t size,
439 PagedSpace* owner, int* num_pages) {
440 ASSERT(start != NULL);
441 *num_pages = PagesInChunk(start, size);
442 ASSERT(*num_pages > 0);
443 ASSERT(initial_chunk_ != NULL);
kasperl@chromium.orgf5aa8372009-03-24 14:47:14 +0000444 ASSERT(InInitialChunk(start));
445 ASSERT(InInitialChunk(start + size - 1));
ager@chromium.org9258b6b2008-09-11 09:11:10 +0000446 if (!initial_chunk_->Commit(start, size, owner->executable() == EXECUTABLE)) {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000447 return Page::FromAddress(NULL);
448 }
sgjesse@chromium.orgb302e562010-02-03 11:26:59 +0000449#ifdef DEBUG
450 ZapBlock(start, size);
451#endif
ager@chromium.orgc4c92722009-11-18 14:12:51 +0000452 Counters::memory_allocated.Increment(static_cast<int>(size));
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000453
454 // So long as we correctly overestimated the number of chunks we should not
455 // run out of chunk ids.
456 CHECK(!OutOfChunkIds());
457 int chunk_id = Pop();
458 chunks_[chunk_id].init(start, size, owner);
459 return InitializePagesInChunk(chunk_id, *num_pages, owner);
460}
461
462
kasper.lund7276f142008-07-30 08:49:36 +0000463bool MemoryAllocator::CommitBlock(Address start,
464 size_t size,
ager@chromium.org9258b6b2008-09-11 09:11:10 +0000465 Executability executable) {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000466 ASSERT(start != NULL);
467 ASSERT(size > 0);
468 ASSERT(initial_chunk_ != NULL);
kasperl@chromium.orgf5aa8372009-03-24 14:47:14 +0000469 ASSERT(InInitialChunk(start));
470 ASSERT(InInitialChunk(start + size - 1));
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000471
kasper.lund7276f142008-07-30 08:49:36 +0000472 if (!initial_chunk_->Commit(start, size, executable)) return false;
sgjesse@chromium.orgb302e562010-02-03 11:26:59 +0000473#ifdef DEBUG
474 ZapBlock(start, size);
475#endif
ager@chromium.orgc4c92722009-11-18 14:12:51 +0000476 Counters::memory_allocated.Increment(static_cast<int>(size));
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000477 return true;
478}
479
sgjesse@chromium.orgb302e562010-02-03 11:26:59 +0000480
ager@chromium.orgadd848f2009-08-13 12:44:13 +0000481bool MemoryAllocator::UncommitBlock(Address start, size_t size) {
482 ASSERT(start != NULL);
483 ASSERT(size > 0);
484 ASSERT(initial_chunk_ != NULL);
485 ASSERT(InInitialChunk(start));
486 ASSERT(InInitialChunk(start + size - 1));
487
488 if (!initial_chunk_->Uncommit(start, size)) return false;
ager@chromium.orgc4c92722009-11-18 14:12:51 +0000489 Counters::memory_allocated.Decrement(static_cast<int>(size));
ager@chromium.orgadd848f2009-08-13 12:44:13 +0000490 return true;
491}
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000492
sgjesse@chromium.orgb302e562010-02-03 11:26:59 +0000493
494void MemoryAllocator::ZapBlock(Address start, size_t size) {
495 for (size_t s = 0; s + kPointerSize <= size; s += kPointerSize) {
496 Memory::Address_at(start + s) = kZapValue;
497 }
498}
499
500
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000501Page* MemoryAllocator::InitializePagesInChunk(int chunk_id, int pages_in_chunk,
502 PagedSpace* owner) {
503 ASSERT(IsValidChunk(chunk_id));
504 ASSERT(pages_in_chunk > 0);
505
506 Address chunk_start = chunks_[chunk_id].address();
507
508 Address low = RoundUp(chunk_start, Page::kPageSize);
509
510#ifdef DEBUG
511 size_t chunk_size = chunks_[chunk_id].size();
512 Address high = RoundDown(chunk_start + chunk_size, Page::kPageSize);
513 ASSERT(pages_in_chunk <=
514 ((OffsetFrom(high) - OffsetFrom(low)) / Page::kPageSize));
515#endif
516
517 Address page_addr = low;
518 for (int i = 0; i < pages_in_chunk; i++) {
519 Page* p = Page::FromAddress(page_addr);
520 p->opaque_header = OffsetFrom(page_addr + Page::kPageSize) | chunk_id;
ricow@chromium.org30ce4112010-05-31 10:38:25 +0000521 p->InvalidateWatermark(true);
fschneider@chromium.org013f3e12010-04-26 13:27:52 +0000522 p->SetIsLargeObjectPage(false);
ricow@chromium.org30ce4112010-05-31 10:38:25 +0000523 p->SetAllocationWatermark(p->ObjectAreaStart());
524 p->SetCachedAllocationWatermark(p->ObjectAreaStart());
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000525 page_addr += Page::kPageSize;
526 }
527
528 // Set the next page of the last page to 0.
529 Page* last_page = Page::FromAddress(page_addr - Page::kPageSize);
530 last_page->opaque_header = OffsetFrom(0) | chunk_id;
531
532 return Page::FromAddress(low);
533}
534
535
536Page* MemoryAllocator::FreePages(Page* p) {
537 if (!p->is_valid()) return p;
538
539 // Find the first page in the same chunk as 'p'
540 Page* first_page = FindFirstPageInSameChunk(p);
541 Page* page_to_return = Page::FromAddress(NULL);
542
543 if (p != first_page) {
544 // Find the last page in the same chunk as 'prev'.
545 Page* last_page = FindLastPageInSameChunk(p);
546 first_page = GetNextPage(last_page); // first page in next chunk
547
548 // set the next_page of last_page to NULL
549 SetNextPage(last_page, Page::FromAddress(NULL));
550 page_to_return = p; // return 'p' when exiting
551 }
552
553 while (first_page->is_valid()) {
554 int chunk_id = GetChunkId(first_page);
555 ASSERT(IsValidChunk(chunk_id));
556
557 // Find the first page of the next chunk before deleting this chunk.
558 first_page = GetNextPage(FindLastPageInSameChunk(first_page));
559
560 // Free the current chunk.
561 DeleteChunk(chunk_id);
562 }
563
564 return page_to_return;
565}
566
567
fschneider@chromium.org013f3e12010-04-26 13:27:52 +0000568void MemoryAllocator::FreeAllPages(PagedSpace* space) {
569 for (int i = 0, length = chunks_.length(); i < length; i++) {
570 if (chunks_[i].owner() == space) {
571 DeleteChunk(i);
572 }
573 }
574}
575
576
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000577void MemoryAllocator::DeleteChunk(int chunk_id) {
578 ASSERT(IsValidChunk(chunk_id));
579
580 ChunkInfo& c = chunks_[chunk_id];
581
582 // We cannot free a chunk contained in the initial chunk because it was not
583 // allocated with AllocateRawMemory. Instead we uncommit the virtual
584 // memory.
kasperl@chromium.orgf5aa8372009-03-24 14:47:14 +0000585 if (InInitialChunk(c.address())) {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000586 // TODO(1240712): VirtualMemory::Uncommit has a return value which
587 // is ignored here.
588 initial_chunk_->Uncommit(c.address(), c.size());
ager@chromium.orgc4c92722009-11-18 14:12:51 +0000589 Counters::memory_allocated.Decrement(static_cast<int>(c.size()));
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000590 } else {
591 LOG(DeleteEvent("PagedChunk", c.address()));
592 FreeRawMemory(c.address(), c.size());
593 }
594 c.init(NULL, 0, NULL);
595 Push(chunk_id);
596}
597
598
599Page* MemoryAllocator::FindFirstPageInSameChunk(Page* p) {
600 int chunk_id = GetChunkId(p);
601 ASSERT(IsValidChunk(chunk_id));
602
603 Address low = RoundUp(chunks_[chunk_id].address(), Page::kPageSize);
604 return Page::FromAddress(low);
605}
606
607
608Page* MemoryAllocator::FindLastPageInSameChunk(Page* p) {
609 int chunk_id = GetChunkId(p);
610 ASSERT(IsValidChunk(chunk_id));
611
612 Address chunk_start = chunks_[chunk_id].address();
613 size_t chunk_size = chunks_[chunk_id].size();
614
615 Address high = RoundDown(chunk_start + chunk_size, Page::kPageSize);
616 ASSERT(chunk_start <= p->address() && p->address() < high);
617
618 return Page::FromAddress(high - Page::kPageSize);
619}
620
621
622#ifdef DEBUG
623void MemoryAllocator::ReportStatistics() {
624 float pct = static_cast<float>(capacity_ - size_) / capacity_;
625 PrintF(" capacity: %d, used: %d, available: %%%d\n\n",
626 capacity_, size_, static_cast<int>(pct*100));
627}
628#endif
629
630
fschneider@chromium.org013f3e12010-04-26 13:27:52 +0000631void MemoryAllocator::RelinkPageListInChunkOrder(PagedSpace* space,
632 Page** first_page,
633 Page** last_page,
634 Page** last_page_in_use) {
635 Page* first = NULL;
636 Page* last = NULL;
637
638 for (int i = 0, length = chunks_.length(); i < length; i++) {
639 ChunkInfo& chunk = chunks_[i];
640
641 if (chunk.owner() == space) {
642 if (first == NULL) {
643 Address low = RoundUp(chunk.address(), Page::kPageSize);
644 first = Page::FromAddress(low);
645 }
646 last = RelinkPagesInChunk(i,
647 chunk.address(),
648 chunk.size(),
649 last,
650 last_page_in_use);
651 }
652 }
653
654 if (first_page != NULL) {
655 *first_page = first;
656 }
657
658 if (last_page != NULL) {
659 *last_page = last;
660 }
661}
662
663
664Page* MemoryAllocator::RelinkPagesInChunk(int chunk_id,
665 Address chunk_start,
666 size_t chunk_size,
667 Page* prev,
668 Page** last_page_in_use) {
669 Address page_addr = RoundUp(chunk_start, Page::kPageSize);
670 int pages_in_chunk = PagesInChunk(chunk_start, chunk_size);
671
672 if (prev->is_valid()) {
673 SetNextPage(prev, Page::FromAddress(page_addr));
674 }
675
676 for (int i = 0; i < pages_in_chunk; i++) {
677 Page* p = Page::FromAddress(page_addr);
678 p->opaque_header = OffsetFrom(page_addr + Page::kPageSize) | chunk_id;
679 page_addr += Page::kPageSize;
680
ricow@chromium.org30ce4112010-05-31 10:38:25 +0000681 p->InvalidateWatermark(true);
fschneider@chromium.org013f3e12010-04-26 13:27:52 +0000682 if (p->WasInUseBeforeMC()) {
683 *last_page_in_use = p;
684 }
685 }
686
687 // Set the next page of the last page to 0.
688 Page* last_page = Page::FromAddress(page_addr - Page::kPageSize);
689 last_page->opaque_header = OffsetFrom(0) | chunk_id;
690
691 if (last_page->WasInUseBeforeMC()) {
692 *last_page_in_use = last_page;
693 }
694
695 return last_page;
696}
697
698
699
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000700// -----------------------------------------------------------------------------
701// PagedSpace implementation
702
ager@chromium.org9258b6b2008-09-11 09:11:10 +0000703PagedSpace::PagedSpace(int max_capacity,
704 AllocationSpace id,
705 Executability executable)
kasper.lund7276f142008-07-30 08:49:36 +0000706 : Space(id, executable) {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000707 max_capacity_ = (RoundDown(max_capacity, Page::kPageSize) / Page::kPageSize)
708 * Page::kObjectAreaSize;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000709 accounting_stats_.Clear();
710
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000711 allocation_info_.top = NULL;
712 allocation_info_.limit = NULL;
713
714 mc_forwarding_info_.top = NULL;
715 mc_forwarding_info_.limit = NULL;
716}
717
718
719bool PagedSpace::Setup(Address start, size_t size) {
720 if (HasBeenSetup()) return false;
721
722 int num_pages = 0;
723 // Try to use the virtual memory range passed to us. If it is too small to
724 // contain at least one page, ignore it and allocate instead.
ager@chromium.org9258b6b2008-09-11 09:11:10 +0000725 int pages_in_chunk = PagesInChunk(start, size);
726 if (pages_in_chunk > 0) {
727 first_page_ = MemoryAllocator::CommitPages(RoundUp(start, Page::kPageSize),
728 Page::kPageSize * pages_in_chunk,
729 this, &num_pages);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000730 } else {
731 int requested_pages = Min(MemoryAllocator::kPagesPerChunk,
732 max_capacity_ / Page::kObjectAreaSize);
733 first_page_ =
734 MemoryAllocator::AllocatePages(requested_pages, &num_pages, this);
735 if (!first_page_->is_valid()) return false;
736 }
737
738 // We are sure that the first page is valid and that we have at least one
739 // page.
740 ASSERT(first_page_->is_valid());
741 ASSERT(num_pages > 0);
742 accounting_stats_.ExpandSpace(num_pages * Page::kObjectAreaSize);
743 ASSERT(Capacity() <= max_capacity_);
744
ricow@chromium.org30ce4112010-05-31 10:38:25 +0000745 // Sequentially clear region marks in the newly allocated
kasperl@chromium.orgb3284ad2009-05-18 06:12:45 +0000746 // pages and cache the current last page in the space.
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000747 for (Page* p = first_page_; p->is_valid(); p = p->next_page()) {
ricow@chromium.org30ce4112010-05-31 10:38:25 +0000748 p->SetRegionMarks(Page::kAllRegionsCleanMarks);
kasperl@chromium.orgb3284ad2009-05-18 06:12:45 +0000749 last_page_ = p;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000750 }
751
752 // Use first_page_ for allocation.
753 SetAllocationInfo(&allocation_info_, first_page_);
754
fschneider@chromium.org013f3e12010-04-26 13:27:52 +0000755 page_list_is_chunk_ordered_ = true;
756
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000757 return true;
758}
759
760
761bool PagedSpace::HasBeenSetup() {
762 return (Capacity() > 0);
763}
764
765
766void PagedSpace::TearDown() {
fschneider@chromium.org013f3e12010-04-26 13:27:52 +0000767 MemoryAllocator::FreeAllPages(this);
768 first_page_ = NULL;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000769 accounting_stats_.Clear();
770}
771
772
kasperl@chromium.orgf5aa8372009-03-24 14:47:14 +0000773#ifdef ENABLE_HEAP_PROTECTION
774
775void PagedSpace::Protect() {
776 Page* page = first_page_;
777 while (page->is_valid()) {
778 MemoryAllocator::ProtectChunkFromPage(page);
779 page = MemoryAllocator::FindLastPageInSameChunk(page)->next_page();
780 }
781}
782
783
784void PagedSpace::Unprotect() {
785 Page* page = first_page_;
786 while (page->is_valid()) {
787 MemoryAllocator::UnprotectChunkFromPage(page);
788 page = MemoryAllocator::FindLastPageInSameChunk(page)->next_page();
789 }
790}
791
792#endif
793
794
ricow@chromium.org30ce4112010-05-31 10:38:25 +0000795void PagedSpace::MarkAllPagesClean() {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000796 PageIterator it(this, PageIterator::ALL_PAGES);
797 while (it.has_next()) {
ricow@chromium.org30ce4112010-05-31 10:38:25 +0000798 it.next()->SetRegionMarks(Page::kAllRegionsCleanMarks);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000799 }
800}
801
802
803Object* PagedSpace::FindObject(Address addr) {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000804 // Note: this function can only be called before or after mark-compact GC
805 // because it accesses map pointers.
806 ASSERT(!MarkCompactCollector::in_use());
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000807
808 if (!Contains(addr)) return Failure::Exception();
809
810 Page* p = Page::FromAddress(addr);
kasper.lund7276f142008-07-30 08:49:36 +0000811 ASSERT(IsUsed(p));
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000812 Address cur = p->ObjectAreaStart();
813 Address end = p->AllocationTop();
814 while (cur < end) {
815 HeapObject* obj = HeapObject::FromAddress(cur);
816 Address next = cur + obj->Size();
817 if ((cur <= addr) && (addr < next)) return obj;
818 cur = next;
819 }
820
kasper.lund7276f142008-07-30 08:49:36 +0000821 UNREACHABLE();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000822 return Failure::Exception();
823}
824
825
kasper.lund7276f142008-07-30 08:49:36 +0000826bool PagedSpace::IsUsed(Page* page) {
827 PageIterator it(this, PageIterator::PAGES_IN_USE);
828 while (it.has_next()) {
829 if (page == it.next()) return true;
830 }
831 return false;
832}
833
834
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000835void PagedSpace::SetAllocationInfo(AllocationInfo* alloc_info, Page* p) {
836 alloc_info->top = p->ObjectAreaStart();
837 alloc_info->limit = p->ObjectAreaEnd();
kasper.lund7276f142008-07-30 08:49:36 +0000838 ASSERT(alloc_info->VerifyPagedAllocation());
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000839}
840
841
842void PagedSpace::MCResetRelocationInfo() {
843 // Set page indexes.
844 int i = 0;
845 PageIterator it(this, PageIterator::ALL_PAGES);
846 while (it.has_next()) {
847 Page* p = it.next();
848 p->mc_page_index = i++;
849 }
850
851 // Set mc_forwarding_info_ to the first page in the space.
852 SetAllocationInfo(&mc_forwarding_info_, first_page_);
853 // All the bytes in the space are 'available'. We will rediscover
854 // allocated and wasted bytes during GC.
855 accounting_stats_.Reset();
856}
857
858
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000859int PagedSpace::MCSpaceOffsetForAddress(Address addr) {
860#ifdef DEBUG
861 // The Contains function considers the address at the beginning of a
862 // page in the page, MCSpaceOffsetForAddress considers it is in the
863 // previous page.
864 if (Page::IsAlignedToPageSize(addr)) {
865 ASSERT(Contains(addr - kPointerSize));
866 } else {
867 ASSERT(Contains(addr));
868 }
869#endif
870
871 // If addr is at the end of a page, it belongs to previous page
872 Page* p = Page::IsAlignedToPageSize(addr)
873 ? Page::FromAllocationTop(addr)
874 : Page::FromAddress(addr);
875 int index = p->mc_page_index;
876 return (index * Page::kPageSize) + p->Offset(addr);
877}
878
879
kasper.lund7276f142008-07-30 08:49:36 +0000880// Slow case for reallocating and promoting objects during a compacting
881// collection. This function is not space-specific.
882HeapObject* PagedSpace::SlowMCAllocateRaw(int size_in_bytes) {
883 Page* current_page = TopPageOf(mc_forwarding_info_);
884 if (!current_page->next_page()->is_valid()) {
885 if (!Expand(current_page)) {
886 return NULL;
887 }
888 }
889
890 // There are surely more pages in the space now.
891 ASSERT(current_page->next_page()->is_valid());
892 // We do not add the top of page block for current page to the space's
893 // free list---the block may contain live objects so we cannot write
894 // bookkeeping information to it. Instead, we will recover top of page
895 // blocks when we move objects to their new locations.
896 //
897 // We do however write the allocation pointer to the page. The encoding
898 // of forwarding addresses is as an offset in terms of live bytes, so we
899 // need quick access to the allocation top of each page to decode
900 // forwarding addresses.
ricow@chromium.org30ce4112010-05-31 10:38:25 +0000901 current_page->SetAllocationWatermark(mc_forwarding_info_.top);
902 current_page->next_page()->InvalidateWatermark(true);
kasper.lund7276f142008-07-30 08:49:36 +0000903 SetAllocationInfo(&mc_forwarding_info_, current_page->next_page());
904 return AllocateLinearly(&mc_forwarding_info_, size_in_bytes);
905}
906
907
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000908bool PagedSpace::Expand(Page* last_page) {
909 ASSERT(max_capacity_ % Page::kObjectAreaSize == 0);
910 ASSERT(Capacity() % Page::kObjectAreaSize == 0);
911
912 if (Capacity() == max_capacity_) return false;
913
914 ASSERT(Capacity() < max_capacity_);
915 // Last page must be valid and its next page is invalid.
916 ASSERT(last_page->is_valid() && !last_page->next_page()->is_valid());
917
918 int available_pages = (max_capacity_ - Capacity()) / Page::kObjectAreaSize;
919 if (available_pages <= 0) return false;
920
921 int desired_pages = Min(available_pages, MemoryAllocator::kPagesPerChunk);
922 Page* p = MemoryAllocator::AllocatePages(desired_pages, &desired_pages, this);
923 if (!p->is_valid()) return false;
924
925 accounting_stats_.ExpandSpace(desired_pages * Page::kObjectAreaSize);
926 ASSERT(Capacity() <= max_capacity_);
927
928 MemoryAllocator::SetNextPage(last_page, p);
929
ricow@chromium.org30ce4112010-05-31 10:38:25 +0000930 // Sequentially clear region marks of new pages and and cache the
kasperl@chromium.orgb3284ad2009-05-18 06:12:45 +0000931 // new last page in the space.
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000932 while (p->is_valid()) {
ricow@chromium.org30ce4112010-05-31 10:38:25 +0000933 p->SetRegionMarks(Page::kAllRegionsCleanMarks);
kasperl@chromium.orgb3284ad2009-05-18 06:12:45 +0000934 last_page_ = p;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000935 p = p->next_page();
936 }
937
938 return true;
939}
940
941
942#ifdef DEBUG
943int PagedSpace::CountTotalPages() {
944 int count = 0;
945 for (Page* p = first_page_; p->is_valid(); p = p->next_page()) {
946 count++;
947 }
948 return count;
949}
950#endif
951
952
953void PagedSpace::Shrink() {
fschneider@chromium.org013f3e12010-04-26 13:27:52 +0000954 if (!page_list_is_chunk_ordered_) {
955 // We can't shrink space if pages is not chunk-ordered
956 // (see comment for class MemoryAllocator for definition).
957 return;
958 }
959
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000960 // Release half of free pages.
961 Page* top_page = AllocationTopPage();
962 ASSERT(top_page->is_valid());
963
sgjesse@chromium.org911335c2009-08-19 12:59:44 +0000964 // Count the number of pages we would like to free.
965 int pages_to_free = 0;
966 for (Page* p = top_page->next_page(); p->is_valid(); p = p->next_page()) {
967 pages_to_free++;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000968 }
969
sgjesse@chromium.org911335c2009-08-19 12:59:44 +0000970 // Free pages after top_page.
971 Page* p = MemoryAllocator::FreePages(top_page->next_page());
972 MemoryAllocator::SetNextPage(top_page, p);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000973
sgjesse@chromium.org911335c2009-08-19 12:59:44 +0000974 // Find out how many pages we failed to free and update last_page_.
975 // Please note pages can only be freed in whole chunks.
976 last_page_ = top_page;
977 for (Page* p = top_page->next_page(); p->is_valid(); p = p->next_page()) {
978 pages_to_free--;
kasperl@chromium.orgb3284ad2009-05-18 06:12:45 +0000979 last_page_ = p;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000980 }
981
sgjesse@chromium.org911335c2009-08-19 12:59:44 +0000982 accounting_stats_.ShrinkSpace(pages_to_free * Page::kObjectAreaSize);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000983 ASSERT(Capacity() == CountTotalPages() * Page::kObjectAreaSize);
984}
985
986
987bool PagedSpace::EnsureCapacity(int capacity) {
988 if (Capacity() >= capacity) return true;
989
990 // Start from the allocation top and loop to the last page in the space.
991 Page* last_page = AllocationTopPage();
992 Page* next_page = last_page->next_page();
993 while (next_page->is_valid()) {
994 last_page = MemoryAllocator::FindLastPageInSameChunk(next_page);
995 next_page = last_page->next_page();
996 }
997
998 // Expand the space until it has the required capacity or expansion fails.
999 do {
1000 if (!Expand(last_page)) return false;
1001 ASSERT(last_page->next_page()->is_valid());
1002 last_page =
1003 MemoryAllocator::FindLastPageInSameChunk(last_page->next_page());
1004 } while (Capacity() < capacity);
1005
1006 return true;
1007}
1008
1009
1010#ifdef DEBUG
1011void PagedSpace::Print() { }
1012#endif
1013
1014
kasperl@chromium.orgdefbd102009-07-13 14:04:26 +00001015#ifdef DEBUG
1016// We do not assume that the PageIterator works, because it depends on the
1017// invariants we are checking during verification.
1018void PagedSpace::Verify(ObjectVisitor* visitor) {
1019 // The allocation pointer should be valid, and it should be in a page in the
1020 // space.
1021 ASSERT(allocation_info_.VerifyPagedAllocation());
1022 Page* top_page = Page::FromAllocationTop(allocation_info_.top);
1023 ASSERT(MemoryAllocator::IsPageInSpace(top_page, this));
1024
1025 // Loop over all the pages.
1026 bool above_allocation_top = false;
1027 Page* current_page = first_page_;
1028 while (current_page->is_valid()) {
1029 if (above_allocation_top) {
1030 // We don't care what's above the allocation top.
1031 } else {
kasperl@chromium.orgdefbd102009-07-13 14:04:26 +00001032 Address top = current_page->AllocationTop();
1033 if (current_page == top_page) {
1034 ASSERT(top == allocation_info_.top);
1035 // The next page will be above the allocation top.
1036 above_allocation_top = true;
kasperl@chromium.orgdefbd102009-07-13 14:04:26 +00001037 }
1038
1039 // It should be packed with objects from the bottom to the top.
1040 Address current = current_page->ObjectAreaStart();
1041 while (current < top) {
1042 HeapObject* object = HeapObject::FromAddress(current);
1043
1044 // The first word should be a map, and we expect all map pointers to
1045 // be in map space.
1046 Map* map = object->map();
1047 ASSERT(map->IsMap());
1048 ASSERT(Heap::map_space()->Contains(map));
1049
1050 // Perform space-specific object verification.
1051 VerifyObject(object);
1052
1053 // The object itself should look OK.
1054 object->Verify();
1055
1056 // All the interior pointers should be contained in the heap and
ricow@chromium.org30ce4112010-05-31 10:38:25 +00001057 // have page regions covering intergenerational references should be
1058 // marked dirty.
kasperl@chromium.orgdefbd102009-07-13 14:04:26 +00001059 int size = object->Size();
christian.plesner.hansen@gmail.com2bc58ef2009-09-22 10:00:30 +00001060 object->IterateBody(map->instance_type(), size, visitor);
kasperl@chromium.orgdefbd102009-07-13 14:04:26 +00001061
1062 current += size;
1063 }
1064
1065 // The allocation pointer should not be in the middle of an object.
1066 ASSERT(current == top);
1067 }
1068
1069 current_page = current_page->next_page();
1070 }
1071}
1072#endif
1073
1074
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001075// -----------------------------------------------------------------------------
1076// NewSpace implementation
1077
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +00001078
1079bool NewSpace::Setup(Address start, int size) {
1080 // Setup new space based on the preallocated memory block defined by
1081 // start and size. The provided space is divided into two semi-spaces.
1082 // To support fast containment testing in the new space, the size of
1083 // this chunk must be a power of two and it must be aligned to its size.
1084 int initial_semispace_capacity = Heap::InitialSemiSpaceSize();
ager@chromium.org3811b432009-10-28 14:53:37 +00001085 int maximum_semispace_capacity = Heap::MaxSemiSpaceSize();
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +00001086
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001087 ASSERT(initial_semispace_capacity <= maximum_semispace_capacity);
1088 ASSERT(IsPowerOf2(maximum_semispace_capacity));
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001089
1090 // Allocate and setup the histogram arrays if necessary.
1091#if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
1092 allocated_histogram_ = NewArray<HistogramInfo>(LAST_TYPE + 1);
1093 promoted_histogram_ = NewArray<HistogramInfo>(LAST_TYPE + 1);
1094
1095#define SET_NAME(name) allocated_histogram_[name].set_name(#name); \
1096 promoted_histogram_[name].set_name(#name);
1097 INSTANCE_TYPE_LIST(SET_NAME)
1098#undef SET_NAME
1099#endif
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001100
ager@chromium.org3811b432009-10-28 14:53:37 +00001101 ASSERT(size == 2 * Heap::ReservedSemiSpaceSize());
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001102 ASSERT(IsAddressAligned(start, size, 0));
1103
sgjesse@chromium.org911335c2009-08-19 12:59:44 +00001104 if (!to_space_.Setup(start,
1105 initial_semispace_capacity,
1106 maximum_semispace_capacity)) {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001107 return false;
1108 }
sgjesse@chromium.org911335c2009-08-19 12:59:44 +00001109 if (!from_space_.Setup(start + maximum_semispace_capacity,
1110 initial_semispace_capacity,
1111 maximum_semispace_capacity)) {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001112 return false;
1113 }
1114
1115 start_ = start;
1116 address_mask_ = ~(size - 1);
ricow@chromium.org30ce4112010-05-31 10:38:25 +00001117 object_mask_ = address_mask_ | kHeapObjectTagMask;
ager@chromium.org9085a012009-05-11 19:22:57 +00001118 object_expected_ = reinterpret_cast<uintptr_t>(start) | kHeapObjectTag;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001119
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +00001120 allocation_info_.top = to_space_.low();
1121 allocation_info_.limit = to_space_.high();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001122 mc_forwarding_info_.top = NULL;
1123 mc_forwarding_info_.limit = NULL;
1124
1125 ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
1126 return true;
1127}
1128
1129
1130void NewSpace::TearDown() {
1131#if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
1132 if (allocated_histogram_) {
1133 DeleteArray(allocated_histogram_);
1134 allocated_histogram_ = NULL;
1135 }
1136 if (promoted_histogram_) {
1137 DeleteArray(promoted_histogram_);
1138 promoted_histogram_ = NULL;
1139 }
1140#endif
1141
1142 start_ = NULL;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001143 allocation_info_.top = NULL;
1144 allocation_info_.limit = NULL;
1145 mc_forwarding_info_.top = NULL;
1146 mc_forwarding_info_.limit = NULL;
1147
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +00001148 to_space_.TearDown();
1149 from_space_.TearDown();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001150}
1151
1152
kasperl@chromium.orgf5aa8372009-03-24 14:47:14 +00001153#ifdef ENABLE_HEAP_PROTECTION
1154
1155void NewSpace::Protect() {
1156 MemoryAllocator::Protect(ToSpaceLow(), Capacity());
1157 MemoryAllocator::Protect(FromSpaceLow(), Capacity());
1158}
1159
1160
1161void NewSpace::Unprotect() {
1162 MemoryAllocator::Unprotect(ToSpaceLow(), Capacity(),
1163 to_space_.executable());
1164 MemoryAllocator::Unprotect(FromSpaceLow(), Capacity(),
1165 from_space_.executable());
1166}
1167
1168#endif
1169
1170
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001171void NewSpace::Flip() {
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +00001172 SemiSpace tmp = from_space_;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001173 from_space_ = to_space_;
1174 to_space_ = tmp;
1175}
1176
1177
ager@chromium.orgab99eea2009-08-25 07:05:41 +00001178void NewSpace::Grow() {
sgjesse@chromium.org911335c2009-08-19 12:59:44 +00001179 ASSERT(Capacity() < MaximumCapacity());
ager@chromium.orgab99eea2009-08-25 07:05:41 +00001180 if (to_space_.Grow()) {
1181 // Only grow from space if we managed to grow to space.
1182 if (!from_space_.Grow()) {
1183 // If we managed to grow to space but couldn't grow from space,
1184 // attempt to shrink to space.
1185 if (!to_space_.ShrinkTo(from_space_.Capacity())) {
1186 // We are in an inconsistent state because we could not
1187 // commit/uncommit memory from new space.
1188 V8::FatalProcessOutOfMemory("Failed to grow new space.");
1189 }
1190 }
1191 }
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +00001192 allocation_info_.limit = to_space_.high();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001193 ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
ager@chromium.orgab99eea2009-08-25 07:05:41 +00001194}
1195
1196
1197void NewSpace::Shrink() {
1198 int new_capacity = Max(InitialCapacity(), 2 * Size());
ager@chromium.orgc4c92722009-11-18 14:12:51 +00001199 int rounded_new_capacity =
1200 RoundUp(new_capacity, static_cast<int>(OS::AllocateAlignment()));
ager@chromium.orgab99eea2009-08-25 07:05:41 +00001201 if (rounded_new_capacity < Capacity() &&
1202 to_space_.ShrinkTo(rounded_new_capacity)) {
1203 // Only shrink from space if we managed to shrink to space.
1204 if (!from_space_.ShrinkTo(rounded_new_capacity)) {
1205 // If we managed to shrink to space but couldn't shrink from
1206 // space, attempt to grow to space again.
1207 if (!to_space_.GrowTo(from_space_.Capacity())) {
1208 // We are in an inconsistent state because we could not
1209 // commit/uncommit memory from new space.
1210 V8::FatalProcessOutOfMemory("Failed to shrink new space.");
1211 }
1212 }
1213 }
1214 allocation_info_.limit = to_space_.high();
1215 ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001216}
1217
1218
1219void NewSpace::ResetAllocationInfo() {
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +00001220 allocation_info_.top = to_space_.low();
1221 allocation_info_.limit = to_space_.high();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001222 ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
1223}
1224
1225
1226void NewSpace::MCResetRelocationInfo() {
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +00001227 mc_forwarding_info_.top = from_space_.low();
1228 mc_forwarding_info_.limit = from_space_.high();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001229 ASSERT_SEMISPACE_ALLOCATION_INFO(mc_forwarding_info_, from_space_);
1230}
1231
1232
1233void NewSpace::MCCommitRelocationInfo() {
1234 // Assumes that the spaces have been flipped so that mc_forwarding_info_ is
1235 // valid allocation info for the to space.
1236 allocation_info_.top = mc_forwarding_info_.top;
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +00001237 allocation_info_.limit = to_space_.high();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001238 ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
1239}
1240
1241
1242#ifdef DEBUG
1243// We do not use the SemispaceIterator because verification doesn't assume
1244// that it works (it depends on the invariants we are checking).
1245void NewSpace::Verify() {
1246 // The allocation pointer should be in the space or at the very end.
1247 ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
1248
1249 // There should be objects packed in from the low address up to the
1250 // allocation pointer.
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +00001251 Address current = to_space_.low();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001252 while (current < top()) {
1253 HeapObject* object = HeapObject::FromAddress(current);
1254
1255 // The first word should be a map, and we expect all map pointers to
1256 // be in map space.
1257 Map* map = object->map();
1258 ASSERT(map->IsMap());
1259 ASSERT(Heap::map_space()->Contains(map));
1260
1261 // The object should not be code or a map.
1262 ASSERT(!object->IsMap());
1263 ASSERT(!object->IsCode());
1264
1265 // The object itself should look OK.
1266 object->Verify();
1267
1268 // All the interior pointers should be contained in the heap.
1269 VerifyPointersVisitor visitor;
1270 int size = object->Size();
1271 object->IterateBody(map->instance_type(), size, &visitor);
1272
1273 current += size;
1274 }
1275
1276 // The allocation pointer should not be in the middle of an object.
1277 ASSERT(current == top());
1278}
1279#endif
1280
1281
ager@chromium.orgadd848f2009-08-13 12:44:13 +00001282bool SemiSpace::Commit() {
1283 ASSERT(!is_committed());
1284 if (!MemoryAllocator::CommitBlock(start_, capacity_, executable())) {
1285 return false;
1286 }
1287 committed_ = true;
1288 return true;
1289}
1290
1291
1292bool SemiSpace::Uncommit() {
1293 ASSERT(is_committed());
1294 if (!MemoryAllocator::UncommitBlock(start_, capacity_)) {
1295 return false;
1296 }
1297 committed_ = false;
1298 return true;
1299}
1300
1301
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001302// -----------------------------------------------------------------------------
1303// SemiSpace implementation
1304
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +00001305bool SemiSpace::Setup(Address start,
1306 int initial_capacity,
1307 int maximum_capacity) {
1308 // Creates a space in the young generation. The constructor does not
1309 // allocate memory from the OS. A SemiSpace is given a contiguous chunk of
1310 // memory of size 'capacity' when set up, and does not grow or shrink
1311 // otherwise. In the mark-compact collector, the memory region of the from
1312 // space is used as the marking stack. It requires contiguous memory
1313 // addresses.
ager@chromium.orgab99eea2009-08-25 07:05:41 +00001314 initial_capacity_ = initial_capacity;
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +00001315 capacity_ = initial_capacity;
1316 maximum_capacity_ = maximum_capacity;
ager@chromium.orgadd848f2009-08-13 12:44:13 +00001317 committed_ = false;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001318
1319 start_ = start;
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +00001320 address_mask_ = ~(maximum_capacity - 1);
ricow@chromium.org30ce4112010-05-31 10:38:25 +00001321 object_mask_ = address_mask_ | kHeapObjectTagMask;
ager@chromium.org9085a012009-05-11 19:22:57 +00001322 object_expected_ = reinterpret_cast<uintptr_t>(start) | kHeapObjectTag;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001323 age_mark_ = start_;
ager@chromium.orgadd848f2009-08-13 12:44:13 +00001324
1325 return Commit();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001326}
1327
1328
1329void SemiSpace::TearDown() {
1330 start_ = NULL;
1331 capacity_ = 0;
1332}
1333
1334
christian.plesner.hansen@gmail.com5a6af922009-08-12 14:20:51 +00001335bool SemiSpace::Grow() {
sgjesse@chromium.orgc81c8942009-08-21 10:54:26 +00001336 // Double the semispace size but only up to maximum capacity.
sgjesse@chromium.org911335c2009-08-19 12:59:44 +00001337 int maximum_extra = maximum_capacity_ - capacity_;
ager@chromium.orgc4c92722009-11-18 14:12:51 +00001338 int extra = Min(RoundUp(capacity_, static_cast<int>(OS::AllocateAlignment())),
sgjesse@chromium.org911335c2009-08-19 12:59:44 +00001339 maximum_extra);
christian.plesner.hansen@gmail.com5a6af922009-08-12 14:20:51 +00001340 if (!MemoryAllocator::CommitBlock(high(), extra, executable())) {
kasper.lund7276f142008-07-30 08:49:36 +00001341 return false;
1342 }
christian.plesner.hansen@gmail.com5a6af922009-08-12 14:20:51 +00001343 capacity_ += extra;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001344 return true;
1345}
1346
1347
ager@chromium.orgab99eea2009-08-25 07:05:41 +00001348bool SemiSpace::GrowTo(int new_capacity) {
1349 ASSERT(new_capacity <= maximum_capacity_);
1350 ASSERT(new_capacity > capacity_);
1351 size_t delta = new_capacity - capacity_;
1352 ASSERT(IsAligned(delta, OS::AllocateAlignment()));
1353 if (!MemoryAllocator::CommitBlock(high(), delta, executable())) {
1354 return false;
1355 }
1356 capacity_ = new_capacity;
1357 return true;
1358}
1359
1360
1361bool SemiSpace::ShrinkTo(int new_capacity) {
1362 ASSERT(new_capacity >= initial_capacity_);
1363 ASSERT(new_capacity < capacity_);
1364 size_t delta = capacity_ - new_capacity;
1365 ASSERT(IsAligned(delta, OS::AllocateAlignment()));
1366 if (!MemoryAllocator::UncommitBlock(high() - delta, delta)) {
1367 return false;
1368 }
1369 capacity_ = new_capacity;
1370 return true;
1371}
1372
1373
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001374#ifdef DEBUG
1375void SemiSpace::Print() { }
ager@chromium.org9258b6b2008-09-11 09:11:10 +00001376
1377
1378void SemiSpace::Verify() { }
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001379#endif
1380
1381
1382// -----------------------------------------------------------------------------
1383// SemiSpaceIterator implementation.
1384SemiSpaceIterator::SemiSpaceIterator(NewSpace* space) {
1385 Initialize(space, space->bottom(), space->top(), NULL);
1386}
1387
1388
1389SemiSpaceIterator::SemiSpaceIterator(NewSpace* space,
1390 HeapObjectCallback size_func) {
1391 Initialize(space, space->bottom(), space->top(), size_func);
1392}
1393
1394
1395SemiSpaceIterator::SemiSpaceIterator(NewSpace* space, Address start) {
1396 Initialize(space, start, space->top(), NULL);
1397}
1398
1399
1400void SemiSpaceIterator::Initialize(NewSpace* space, Address start,
1401 Address end,
1402 HeapObjectCallback size_func) {
1403 ASSERT(space->ToSpaceContains(start));
1404 ASSERT(space->ToSpaceLow() <= end
1405 && end <= space->ToSpaceHigh());
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +00001406 space_ = &space->to_space_;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001407 current_ = start;
1408 limit_ = end;
1409 size_func_ = size_func;
1410}
1411
1412
1413#ifdef DEBUG
1414// A static array of histogram info for each type.
1415static HistogramInfo heap_histograms[LAST_TYPE+1];
1416static JSObject::SpillInformation js_spill_information;
1417
1418// heap_histograms is shared, always clear it before using it.
1419static void ClearHistograms() {
1420 // We reset the name each time, though it hasn't changed.
1421#define DEF_TYPE_NAME(name) heap_histograms[name].set_name(#name);
1422 INSTANCE_TYPE_LIST(DEF_TYPE_NAME)
1423#undef DEF_TYPE_NAME
1424
1425#define CLEAR_HISTOGRAM(name) heap_histograms[name].clear();
1426 INSTANCE_TYPE_LIST(CLEAR_HISTOGRAM)
1427#undef CLEAR_HISTOGRAM
1428
1429 js_spill_information.Clear();
1430}
1431
1432
1433static int code_kind_statistics[Code::NUMBER_OF_KINDS];
1434
1435
1436static void ClearCodeKindStatistics() {
1437 for (int i = 0; i < Code::NUMBER_OF_KINDS; i++) {
1438 code_kind_statistics[i] = 0;
1439 }
1440}
1441
1442
1443static void ReportCodeKindStatistics() {
fschneider@chromium.org013f3e12010-04-26 13:27:52 +00001444 const char* table[Code::NUMBER_OF_KINDS] = { NULL };
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001445
1446#define CASE(name) \
1447 case Code::name: table[Code::name] = #name; \
1448 break
1449
1450 for (int i = 0; i < Code::NUMBER_OF_KINDS; i++) {
1451 switch (static_cast<Code::Kind>(i)) {
1452 CASE(FUNCTION);
1453 CASE(STUB);
1454 CASE(BUILTIN);
1455 CASE(LOAD_IC);
1456 CASE(KEYED_LOAD_IC);
1457 CASE(STORE_IC);
1458 CASE(KEYED_STORE_IC);
1459 CASE(CALL_IC);
ager@chromium.orgce5e87b2010-03-10 10:24:18 +00001460 CASE(BINARY_OP_IC);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001461 }
1462 }
1463
1464#undef CASE
1465
1466 PrintF("\n Code kind histograms: \n");
1467 for (int i = 0; i < Code::NUMBER_OF_KINDS; i++) {
1468 if (code_kind_statistics[i] > 0) {
1469 PrintF(" %-20s: %10d bytes\n", table[i], code_kind_statistics[i]);
1470 }
1471 }
1472 PrintF("\n");
1473}
1474
1475
1476static int CollectHistogramInfo(HeapObject* obj) {
1477 InstanceType type = obj->map()->instance_type();
1478 ASSERT(0 <= type && type <= LAST_TYPE);
1479 ASSERT(heap_histograms[type].name() != NULL);
1480 heap_histograms[type].increment_number(1);
1481 heap_histograms[type].increment_bytes(obj->Size());
1482
1483 if (FLAG_collect_heap_spill_statistics && obj->IsJSObject()) {
1484 JSObject::cast(obj)->IncrementSpillStatistics(&js_spill_information);
1485 }
1486
1487 return obj->Size();
1488}
1489
1490
1491static void ReportHistogram(bool print_spill) {
1492 PrintF("\n Object Histogram:\n");
1493 for (int i = 0; i <= LAST_TYPE; i++) {
1494 if (heap_histograms[i].number() > 0) {
ager@chromium.orgce5e87b2010-03-10 10:24:18 +00001495 PrintF(" %-34s%10d (%10d bytes)\n",
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001496 heap_histograms[i].name(),
1497 heap_histograms[i].number(),
1498 heap_histograms[i].bytes());
1499 }
1500 }
1501 PrintF("\n");
1502
1503 // Summarize string types.
1504 int string_number = 0;
1505 int string_bytes = 0;
kasperl@chromium.org68ac0092009-07-09 06:00:35 +00001506#define INCREMENT(type, size, name, camel_name) \
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001507 string_number += heap_histograms[type].number(); \
1508 string_bytes += heap_histograms[type].bytes();
1509 STRING_TYPE_LIST(INCREMENT)
1510#undef INCREMENT
1511 if (string_number > 0) {
ager@chromium.orgce5e87b2010-03-10 10:24:18 +00001512 PrintF(" %-34s%10d (%10d bytes)\n\n", "STRING_TYPE", string_number,
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001513 string_bytes);
1514 }
1515
1516 if (FLAG_collect_heap_spill_statistics && print_spill) {
1517 js_spill_information.Print();
1518 }
1519}
1520#endif // DEBUG
1521
1522
1523// Support for statistics gathering for --heap-stats and --log-gc.
1524#if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
1525void NewSpace::ClearHistograms() {
1526 for (int i = 0; i <= LAST_TYPE; i++) {
1527 allocated_histogram_[i].clear();
1528 promoted_histogram_[i].clear();
1529 }
1530}
1531
1532// Because the copying collector does not touch garbage objects, we iterate
1533// the new space before a collection to get a histogram of allocated objects.
1534// This only happens (1) when compiled with DEBUG and the --heap-stats flag is
1535// set, or when compiled with ENABLE_LOGGING_AND_PROFILING and the --log-gc
1536// flag is set.
1537void NewSpace::CollectStatistics() {
1538 ClearHistograms();
1539 SemiSpaceIterator it(this);
sgjesse@chromium.orgb302e562010-02-03 11:26:59 +00001540 for (HeapObject* obj = it.next(); obj != NULL; obj = it.next())
1541 RecordAllocation(obj);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001542}
1543
1544
1545#ifdef ENABLE_LOGGING_AND_PROFILING
1546static void DoReportStatistics(HistogramInfo* info, const char* description) {
1547 LOG(HeapSampleBeginEvent("NewSpace", description));
1548 // Lump all the string types together.
1549 int string_number = 0;
1550 int string_bytes = 0;
kasperl@chromium.org68ac0092009-07-09 06:00:35 +00001551#define INCREMENT(type, size, name, camel_name) \
1552 string_number += info[type].number(); \
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001553 string_bytes += info[type].bytes();
1554 STRING_TYPE_LIST(INCREMENT)
1555#undef INCREMENT
1556 if (string_number > 0) {
1557 LOG(HeapSampleItemEvent("STRING_TYPE", string_number, string_bytes));
1558 }
1559
1560 // Then do the other types.
1561 for (int i = FIRST_NONSTRING_TYPE; i <= LAST_TYPE; ++i) {
1562 if (info[i].number() > 0) {
1563 LOG(HeapSampleItemEvent(info[i].name(), info[i].number(),
1564 info[i].bytes()));
1565 }
1566 }
1567 LOG(HeapSampleEndEvent("NewSpace", description));
1568}
1569#endif // ENABLE_LOGGING_AND_PROFILING
1570
1571
1572void NewSpace::ReportStatistics() {
1573#ifdef DEBUG
1574 if (FLAG_heap_stats) {
1575 float pct = static_cast<float>(Available()) / Capacity();
1576 PrintF(" capacity: %d, available: %d, %%%d\n",
1577 Capacity(), Available(), static_cast<int>(pct*100));
1578 PrintF("\n Object Histogram:\n");
1579 for (int i = 0; i <= LAST_TYPE; i++) {
1580 if (allocated_histogram_[i].number() > 0) {
ager@chromium.orgce5e87b2010-03-10 10:24:18 +00001581 PrintF(" %-34s%10d (%10d bytes)\n",
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001582 allocated_histogram_[i].name(),
1583 allocated_histogram_[i].number(),
1584 allocated_histogram_[i].bytes());
1585 }
1586 }
1587 PrintF("\n");
1588 }
1589#endif // DEBUG
1590
1591#ifdef ENABLE_LOGGING_AND_PROFILING
1592 if (FLAG_log_gc) {
1593 DoReportStatistics(allocated_histogram_, "allocated");
1594 DoReportStatistics(promoted_histogram_, "promoted");
1595 }
1596#endif // ENABLE_LOGGING_AND_PROFILING
1597}
1598
1599
1600void NewSpace::RecordAllocation(HeapObject* obj) {
1601 InstanceType type = obj->map()->instance_type();
1602 ASSERT(0 <= type && type <= LAST_TYPE);
1603 allocated_histogram_[type].increment_number(1);
1604 allocated_histogram_[type].increment_bytes(obj->Size());
1605}
1606
1607
1608void NewSpace::RecordPromotion(HeapObject* obj) {
1609 InstanceType type = obj->map()->instance_type();
1610 ASSERT(0 <= type && type <= LAST_TYPE);
1611 promoted_histogram_[type].increment_number(1);
1612 promoted_histogram_[type].increment_bytes(obj->Size());
1613}
1614#endif // defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
1615
1616
1617// -----------------------------------------------------------------------------
1618// Free lists for old object spaces implementation
1619
1620void FreeListNode::set_size(int size_in_bytes) {
1621 ASSERT(size_in_bytes > 0);
1622 ASSERT(IsAligned(size_in_bytes, kPointerSize));
1623
1624 // We write a map and possibly size information to the block. If the block
1625 // is big enough to be a ByteArray with at least one extra word (the next
1626 // pointer), we set its map to be the byte array map and its size to an
1627 // appropriate array length for the desired size from HeapObject::Size().
1628 // If the block is too small (eg, one or two words), to hold both a size
1629 // field and a next pointer, we give it a filler map that gives it the
1630 // correct size.
ricow@chromium.org30ce4112010-05-31 10:38:25 +00001631 if (size_in_bytes > ByteArray::kHeaderSize) {
kasperl@chromium.org68ac0092009-07-09 06:00:35 +00001632 set_map(Heap::raw_unchecked_byte_array_map());
ager@chromium.org3811b432009-10-28 14:53:37 +00001633 // Can't use ByteArray::cast because it fails during deserialization.
1634 ByteArray* this_as_byte_array = reinterpret_cast<ByteArray*>(this);
1635 this_as_byte_array->set_length(ByteArray::LengthFor(size_in_bytes));
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001636 } else if (size_in_bytes == kPointerSize) {
kasperl@chromium.orgdefbd102009-07-13 14:04:26 +00001637 set_map(Heap::raw_unchecked_one_pointer_filler_map());
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001638 } else if (size_in_bytes == 2 * kPointerSize) {
kasperl@chromium.orgdefbd102009-07-13 14:04:26 +00001639 set_map(Heap::raw_unchecked_two_pointer_filler_map());
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001640 } else {
1641 UNREACHABLE();
1642 }
ager@chromium.org3811b432009-10-28 14:53:37 +00001643 // We would like to ASSERT(Size() == size_in_bytes) but this would fail during
1644 // deserialization because the byte array map is not done yet.
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001645}
1646
1647
1648Address FreeListNode::next() {
ager@chromium.org3811b432009-10-28 14:53:37 +00001649 ASSERT(IsFreeListNode(this));
kasperl@chromium.orgdefbd102009-07-13 14:04:26 +00001650 if (map() == Heap::raw_unchecked_byte_array_map()) {
1651 ASSERT(Size() >= kNextOffset + kPointerSize);
1652 return Memory::Address_at(address() + kNextOffset);
1653 } else {
1654 return Memory::Address_at(address() + kPointerSize);
1655 }
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001656}
1657
1658
1659void FreeListNode::set_next(Address next) {
ager@chromium.org3811b432009-10-28 14:53:37 +00001660 ASSERT(IsFreeListNode(this));
kasperl@chromium.orgdefbd102009-07-13 14:04:26 +00001661 if (map() == Heap::raw_unchecked_byte_array_map()) {
1662 ASSERT(Size() >= kNextOffset + kPointerSize);
1663 Memory::Address_at(address() + kNextOffset) = next;
1664 } else {
1665 Memory::Address_at(address() + kPointerSize) = next;
1666 }
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001667}
1668
1669
1670OldSpaceFreeList::OldSpaceFreeList(AllocationSpace owner) : owner_(owner) {
1671 Reset();
1672}
1673
1674
1675void OldSpaceFreeList::Reset() {
1676 available_ = 0;
1677 for (int i = 0; i < kFreeListsLength; i++) {
1678 free_[i].head_node_ = NULL;
1679 }
1680 needs_rebuild_ = false;
1681 finger_ = kHead;
1682 free_[kHead].next_size_ = kEnd;
1683}
1684
1685
1686void OldSpaceFreeList::RebuildSizeList() {
1687 ASSERT(needs_rebuild_);
1688 int cur = kHead;
1689 for (int i = cur + 1; i < kFreeListsLength; i++) {
1690 if (free_[i].head_node_ != NULL) {
1691 free_[cur].next_size_ = i;
1692 cur = i;
1693 }
1694 }
1695 free_[cur].next_size_ = kEnd;
1696 needs_rebuild_ = false;
1697}
1698
1699
1700int OldSpaceFreeList::Free(Address start, int size_in_bytes) {
1701#ifdef DEBUG
sgjesse@chromium.orgb302e562010-02-03 11:26:59 +00001702 MemoryAllocator::ZapBlock(start, size_in_bytes);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001703#endif
1704 FreeListNode* node = FreeListNode::FromAddress(start);
1705 node->set_size(size_in_bytes);
1706
sgjesse@chromium.org755c5b12009-05-29 11:04:38 +00001707 // We don't use the freelists in compacting mode. This makes it more like a
1708 // GC that only has mark-sweep-compact and doesn't have a mark-sweep
1709 // collector.
1710 if (FLAG_always_compact) {
1711 return size_in_bytes;
1712 }
1713
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001714 // Early return to drop too-small blocks on the floor (one or two word
1715 // blocks cannot hold a map pointer, a size field, and a pointer to the
1716 // next block in the free list).
1717 if (size_in_bytes < kMinBlockSize) {
1718 return size_in_bytes;
1719 }
1720
1721 // Insert other blocks at the head of an exact free list.
1722 int index = size_in_bytes >> kPointerSizeLog2;
1723 node->set_next(free_[index].head_node_);
1724 free_[index].head_node_ = node->address();
1725 available_ += size_in_bytes;
1726 needs_rebuild_ = true;
1727 return 0;
1728}
1729
1730
1731Object* OldSpaceFreeList::Allocate(int size_in_bytes, int* wasted_bytes) {
1732 ASSERT(0 < size_in_bytes);
1733 ASSERT(size_in_bytes <= kMaxBlockSize);
1734 ASSERT(IsAligned(size_in_bytes, kPointerSize));
1735
1736 if (needs_rebuild_) RebuildSizeList();
1737 int index = size_in_bytes >> kPointerSizeLog2;
1738 // Check for a perfect fit.
1739 if (free_[index].head_node_ != NULL) {
1740 FreeListNode* node = FreeListNode::FromAddress(free_[index].head_node_);
1741 // If this was the last block of its size, remove the size.
1742 if ((free_[index].head_node_ = node->next()) == NULL) RemoveSize(index);
1743 available_ -= size_in_bytes;
1744 *wasted_bytes = 0;
sgjesse@chromium.org755c5b12009-05-29 11:04:38 +00001745 ASSERT(!FLAG_always_compact); // We only use the freelists with mark-sweep.
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001746 return node;
1747 }
1748 // Search the size list for the best fit.
1749 int prev = finger_ < index ? finger_ : kHead;
1750 int cur = FindSize(index, &prev);
1751 ASSERT(index < cur);
1752 if (cur == kEnd) {
1753 // No large enough size in list.
1754 *wasted_bytes = 0;
1755 return Failure::RetryAfterGC(size_in_bytes, owner_);
1756 }
sgjesse@chromium.org755c5b12009-05-29 11:04:38 +00001757 ASSERT(!FLAG_always_compact); // We only use the freelists with mark-sweep.
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001758 int rem = cur - index;
1759 int rem_bytes = rem << kPointerSizeLog2;
1760 FreeListNode* cur_node = FreeListNode::FromAddress(free_[cur].head_node_);
kasper.lund7276f142008-07-30 08:49:36 +00001761 ASSERT(cur_node->Size() == (cur << kPointerSizeLog2));
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001762 FreeListNode* rem_node = FreeListNode::FromAddress(free_[cur].head_node_ +
1763 size_in_bytes);
1764 // Distinguish the cases prev < rem < cur and rem <= prev < cur
1765 // to avoid many redundant tests and calls to Insert/RemoveSize.
1766 if (prev < rem) {
1767 // Simple case: insert rem between prev and cur.
1768 finger_ = prev;
1769 free_[prev].next_size_ = rem;
1770 // If this was the last block of size cur, remove the size.
1771 if ((free_[cur].head_node_ = cur_node->next()) == NULL) {
1772 free_[rem].next_size_ = free_[cur].next_size_;
1773 } else {
1774 free_[rem].next_size_ = cur;
1775 }
1776 // Add the remainder block.
1777 rem_node->set_size(rem_bytes);
1778 rem_node->set_next(free_[rem].head_node_);
1779 free_[rem].head_node_ = rem_node->address();
1780 } else {
1781 // If this was the last block of size cur, remove the size.
1782 if ((free_[cur].head_node_ = cur_node->next()) == NULL) {
1783 finger_ = prev;
1784 free_[prev].next_size_ = free_[cur].next_size_;
1785 }
1786 if (rem_bytes < kMinBlockSize) {
1787 // Too-small remainder is wasted.
1788 rem_node->set_size(rem_bytes);
1789 available_ -= size_in_bytes + rem_bytes;
1790 *wasted_bytes = rem_bytes;
1791 return cur_node;
1792 }
1793 // Add the remainder block and, if needed, insert its size.
1794 rem_node->set_size(rem_bytes);
1795 rem_node->set_next(free_[rem].head_node_);
1796 free_[rem].head_node_ = rem_node->address();
1797 if (rem_node->next() == NULL) InsertSize(rem);
1798 }
1799 available_ -= size_in_bytes;
1800 *wasted_bytes = 0;
1801 return cur_node;
1802}
1803
1804
kasper.lund7276f142008-07-30 08:49:36 +00001805#ifdef DEBUG
1806bool OldSpaceFreeList::Contains(FreeListNode* node) {
1807 for (int i = 0; i < kFreeListsLength; i++) {
1808 Address cur_addr = free_[i].head_node_;
1809 while (cur_addr != NULL) {
1810 FreeListNode* cur_node = FreeListNode::FromAddress(cur_addr);
1811 if (cur_node == node) return true;
1812 cur_addr = cur_node->next();
1813 }
1814 }
1815 return false;
1816}
1817#endif
1818
1819
kasperl@chromium.orgdefbd102009-07-13 14:04:26 +00001820FixedSizeFreeList::FixedSizeFreeList(AllocationSpace owner, int object_size)
1821 : owner_(owner), object_size_(object_size) {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001822 Reset();
1823}
1824
1825
kasperl@chromium.orgdefbd102009-07-13 14:04:26 +00001826void FixedSizeFreeList::Reset() {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001827 available_ = 0;
ricow@chromium.org30ce4112010-05-31 10:38:25 +00001828 head_ = tail_ = NULL;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001829}
1830
1831
kasperl@chromium.orgdefbd102009-07-13 14:04:26 +00001832void FixedSizeFreeList::Free(Address start) {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001833#ifdef DEBUG
sgjesse@chromium.orgb302e562010-02-03 11:26:59 +00001834 MemoryAllocator::ZapBlock(start, object_size_);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001835#endif
fschneider@chromium.org0c20e672010-01-14 15:28:53 +00001836 // We only use the freelists with mark-sweep.
1837 ASSERT(!MarkCompactCollector::IsCompacting());
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001838 FreeListNode* node = FreeListNode::FromAddress(start);
kasperl@chromium.orgdefbd102009-07-13 14:04:26 +00001839 node->set_size(object_size_);
ricow@chromium.org30ce4112010-05-31 10:38:25 +00001840 node->set_next(NULL);
1841 if (head_ == NULL) {
1842 tail_ = head_ = node->address();
1843 } else {
1844 FreeListNode::FromAddress(tail_)->set_next(node->address());
1845 tail_ = node->address();
1846 }
kasperl@chromium.orgdefbd102009-07-13 14:04:26 +00001847 available_ += object_size_;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001848}
1849
1850
kasperl@chromium.orgdefbd102009-07-13 14:04:26 +00001851Object* FixedSizeFreeList::Allocate() {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001852 if (head_ == NULL) {
kasperl@chromium.orgdefbd102009-07-13 14:04:26 +00001853 return Failure::RetryAfterGC(object_size_, owner_);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001854 }
1855
sgjesse@chromium.org755c5b12009-05-29 11:04:38 +00001856 ASSERT(!FLAG_always_compact); // We only use the freelists with mark-sweep.
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001857 FreeListNode* node = FreeListNode::FromAddress(head_);
1858 head_ = node->next();
kasperl@chromium.orgdefbd102009-07-13 14:04:26 +00001859 available_ -= object_size_;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001860 return node;
1861}
1862
1863
1864// -----------------------------------------------------------------------------
1865// OldSpace implementation
1866
1867void OldSpace::PrepareForMarkCompact(bool will_compact) {
fschneider@chromium.org013f3e12010-04-26 13:27:52 +00001868 // Call prepare of the super class.
1869 PagedSpace::PrepareForMarkCompact(will_compact);
1870
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001871 if (will_compact) {
1872 // Reset relocation info. During a compacting collection, everything in
1873 // the space is considered 'available' and we will rediscover live data
1874 // and waste during the collection.
1875 MCResetRelocationInfo();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001876 ASSERT(Available() == Capacity());
1877 } else {
1878 // During a non-compacting collection, everything below the linear
1879 // allocation pointer is considered allocated (everything above is
1880 // available) and we will rediscover available and wasted bytes during
1881 // the collection.
1882 accounting_stats_.AllocateBytes(free_list_.available());
1883 accounting_stats_.FillWastedBytes(Waste());
1884 }
1885
kasper.lund7276f142008-07-30 08:49:36 +00001886 // Clear the free list before a full GC---it will be rebuilt afterward.
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001887 free_list_.Reset();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001888}
1889
1890
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001891void OldSpace::MCCommitRelocationInfo() {
1892 // Update fast allocation info.
1893 allocation_info_.top = mc_forwarding_info_.top;
1894 allocation_info_.limit = mc_forwarding_info_.limit;
kasper.lund7276f142008-07-30 08:49:36 +00001895 ASSERT(allocation_info_.VerifyPagedAllocation());
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001896
1897 // The space is compacted and we haven't yet built free lists or
1898 // wasted any space.
1899 ASSERT(Waste() == 0);
1900 ASSERT(AvailableFree() == 0);
1901
1902 // Build the free list for the space.
1903 int computed_size = 0;
1904 PageIterator it(this, PageIterator::PAGES_USED_BY_MC);
1905 while (it.has_next()) {
1906 Page* p = it.next();
1907 // Space below the relocation pointer is allocated.
ager@chromium.orgc4c92722009-11-18 14:12:51 +00001908 computed_size +=
ricow@chromium.org30ce4112010-05-31 10:38:25 +00001909 static_cast<int>(p->AllocationWatermark() - p->ObjectAreaStart());
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001910 if (it.has_next()) {
ricow@chromium.org30ce4112010-05-31 10:38:25 +00001911 // Free the space at the top of the page.
ager@chromium.orgc4c92722009-11-18 14:12:51 +00001912 int extra_size =
ricow@chromium.org30ce4112010-05-31 10:38:25 +00001913 static_cast<int>(p->ObjectAreaEnd() - p->AllocationWatermark());
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001914 if (extra_size > 0) {
ricow@chromium.org30ce4112010-05-31 10:38:25 +00001915 int wasted_bytes = free_list_.Free(p->AllocationWatermark(),
1916 extra_size);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001917 // The bytes we have just "freed" to add to the free list were
1918 // already accounted as available.
1919 accounting_stats_.WasteBytes(wasted_bytes);
1920 }
1921 }
1922 }
1923
1924 // Make sure the computed size - based on the used portion of the pages in
1925 // use - matches the size obtained while computing forwarding addresses.
1926 ASSERT(computed_size == Size());
1927}
1928
1929
fschneider@chromium.org0c20e672010-01-14 15:28:53 +00001930bool NewSpace::ReserveSpace(int bytes) {
1931 // We can't reliably unpack a partial snapshot that needs more new space
1932 // space than the minimum NewSpace size.
1933 ASSERT(bytes <= InitialCapacity());
1934 Address limit = allocation_info_.limit;
1935 Address top = allocation_info_.top;
1936 return limit - top >= bytes;
1937}
1938
1939
fschneider@chromium.org013f3e12010-04-26 13:27:52 +00001940void PagedSpace::FreePages(Page* prev, Page* last) {
1941 if (last == AllocationTopPage()) {
1942 // Pages are already at the end of used pages.
1943 return;
1944 }
1945
1946 Page* first = NULL;
1947
1948 // Remove pages from the list.
1949 if (prev == NULL) {
1950 first = first_page_;
1951 first_page_ = last->next_page();
1952 } else {
1953 first = prev->next_page();
1954 MemoryAllocator::SetNextPage(prev, last->next_page());
1955 }
1956
1957 // Attach it after the last page.
1958 MemoryAllocator::SetNextPage(last_page_, first);
1959 last_page_ = last;
1960 MemoryAllocator::SetNextPage(last, NULL);
1961
1962 // Clean them up.
1963 do {
ricow@chromium.org30ce4112010-05-31 10:38:25 +00001964 first->InvalidateWatermark(true);
1965 first->SetAllocationWatermark(first->ObjectAreaStart());
1966 first->SetCachedAllocationWatermark(first->ObjectAreaStart());
1967 first->SetRegionMarks(Page::kAllRegionsCleanMarks);
fschneider@chromium.org013f3e12010-04-26 13:27:52 +00001968 first = first->next_page();
1969 } while (first != NULL);
1970
1971 // Order of pages in this space might no longer be consistent with
1972 // order of pages in chunks.
1973 page_list_is_chunk_ordered_ = false;
1974}
1975
1976
1977void PagedSpace::PrepareForMarkCompact(bool will_compact) {
1978 if (will_compact) {
1979 // MarkCompact collector relies on WAS_IN_USE_BEFORE_MC page flag
1980 // to skip unused pages. Update flag value for all pages in space.
1981 PageIterator all_pages_iterator(this, PageIterator::ALL_PAGES);
1982 Page* last_in_use = AllocationTopPage();
1983 bool in_use = true;
1984
1985 while (all_pages_iterator.has_next()) {
1986 Page* p = all_pages_iterator.next();
1987 p->SetWasInUseBeforeMC(in_use);
1988 if (p == last_in_use) {
1989 // We passed a page containing allocation top. All consequent
1990 // pages are not used.
1991 in_use = false;
1992 }
1993 }
1994
1995 if (!page_list_is_chunk_ordered_) {
1996 Page* new_last_in_use = Page::FromAddress(NULL);
1997 MemoryAllocator::RelinkPageListInChunkOrder(this,
1998 &first_page_,
1999 &last_page_,
2000 &new_last_in_use);
2001 ASSERT(new_last_in_use->is_valid());
2002
2003 if (new_last_in_use != last_in_use) {
2004 // Current allocation top points to a page which is now in the middle
2005 // of page list. We should move allocation top forward to the new last
2006 // used page so various object iterators will continue to work properly.
ricow@chromium.org30ce4112010-05-31 10:38:25 +00002007 last_in_use->SetAllocationWatermark(last_in_use->AllocationTop());
fschneider@chromium.org013f3e12010-04-26 13:27:52 +00002008
2009 int size_in_bytes = static_cast<int>(PageAllocationLimit(last_in_use) -
2010 last_in_use->AllocationTop());
2011
2012 if (size_in_bytes > 0) {
2013 // There is still some space left on this page. Create a fake
2014 // object which will occupy all free space on this page.
2015 // Otherwise iterators would not be able to scan this page
2016 // correctly.
2017
2018 Heap::CreateFillerObjectAt(last_in_use->AllocationTop(),
2019 size_in_bytes);
2020 }
2021
2022 // New last in use page was in the middle of the list before
2023 // sorting so it full.
2024 SetTop(new_last_in_use->AllocationTop());
2025
2026 ASSERT(AllocationTopPage() == new_last_in_use);
2027 ASSERT(AllocationTopPage()->WasInUseBeforeMC());
2028 }
2029
2030 PageIterator pages_in_use_iterator(this, PageIterator::PAGES_IN_USE);
2031 while (pages_in_use_iterator.has_next()) {
2032 Page* p = pages_in_use_iterator.next();
2033 if (!p->WasInUseBeforeMC()) {
2034 // Empty page is in the middle of a sequence of used pages.
2035 // Create a fake object which will occupy all free space on this page.
2036 // Otherwise iterators would not be able to scan this page correctly.
2037 int size_in_bytes = static_cast<int>(PageAllocationLimit(p) -
2038 p->ObjectAreaStart());
2039
ricow@chromium.org30ce4112010-05-31 10:38:25 +00002040 p->SetAllocationWatermark(p->ObjectAreaStart());
fschneider@chromium.org013f3e12010-04-26 13:27:52 +00002041 Heap::CreateFillerObjectAt(p->ObjectAreaStart(), size_in_bytes);
2042 }
2043 }
2044
2045 page_list_is_chunk_ordered_ = true;
2046 }
2047 }
2048}
2049
2050
fschneider@chromium.org0c20e672010-01-14 15:28:53 +00002051bool PagedSpace::ReserveSpace(int bytes) {
2052 Address limit = allocation_info_.limit;
2053 Address top = allocation_info_.top;
2054 if (limit - top >= bytes) return true;
2055
2056 // There wasn't enough space in the current page. Lets put the rest
2057 // of the page on the free list and start a fresh page.
2058 PutRestOfCurrentPageOnFreeList(TopPageOf(allocation_info_));
2059
2060 Page* reserved_page = TopPageOf(allocation_info_);
2061 int bytes_left_to_reserve = bytes;
2062 while (bytes_left_to_reserve > 0) {
2063 if (!reserved_page->next_page()->is_valid()) {
2064 if (Heap::OldGenerationAllocationLimitReached()) return false;
2065 Expand(reserved_page);
2066 }
2067 bytes_left_to_reserve -= Page::kPageSize;
2068 reserved_page = reserved_page->next_page();
2069 if (!reserved_page->is_valid()) return false;
2070 }
2071 ASSERT(TopPageOf(allocation_info_)->next_page()->is_valid());
ricow@chromium.org30ce4112010-05-31 10:38:25 +00002072 TopPageOf(allocation_info_)->next_page()->InvalidateWatermark(true);
fschneider@chromium.org0c20e672010-01-14 15:28:53 +00002073 SetAllocationInfo(&allocation_info_,
2074 TopPageOf(allocation_info_)->next_page());
2075 return true;
2076}
2077
2078
2079// You have to call this last, since the implementation from PagedSpace
2080// doesn't know that memory was 'promised' to large object space.
2081bool LargeObjectSpace::ReserveSpace(int bytes) {
2082 return Heap::OldGenerationSpaceAvailable() >= bytes;
2083}
2084
2085
kasper.lund7276f142008-07-30 08:49:36 +00002086// Slow case for normal allocation. Try in order: (1) allocate in the next
2087// page in the space, (2) allocate off the space's free list, (3) expand the
2088// space, (4) fail.
2089HeapObject* OldSpace::SlowAllocateRaw(int size_in_bytes) {
2090 // Linear allocation in this space has failed. If there is another page
2091 // in the space, move to that page and allocate there. This allocation
2092 // should succeed (size_in_bytes should not be greater than a page's
2093 // object area size).
2094 Page* current_page = TopPageOf(allocation_info_);
2095 if (current_page->next_page()->is_valid()) {
2096 return AllocateInNextPage(current_page, size_in_bytes);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002097 }
kasper.lund7276f142008-07-30 08:49:36 +00002098
ager@chromium.org3811b432009-10-28 14:53:37 +00002099 // There is no next page in this space. Try free list allocation unless that
2100 // is currently forbidden.
2101 if (!Heap::linear_allocation()) {
2102 int wasted_bytes;
2103 Object* result = free_list_.Allocate(size_in_bytes, &wasted_bytes);
2104 accounting_stats_.WasteBytes(wasted_bytes);
2105 if (!result->IsFailure()) {
2106 accounting_stats_.AllocateBytes(size_in_bytes);
ricow@chromium.org30ce4112010-05-31 10:38:25 +00002107
2108 HeapObject* obj = HeapObject::cast(result);
2109 Page* p = Page::FromAddress(obj->address());
2110
2111 if (obj->address() >= p->AllocationWatermark()) {
2112 // There should be no hole between the allocation watermark
2113 // and allocated object address.
2114 // Memory above the allocation watermark was not swept and
2115 // might contain garbage pointers to new space.
2116 ASSERT(obj->address() == p->AllocationWatermark());
2117 p->SetAllocationWatermark(obj->address() + size_in_bytes);
2118 }
2119
2120 return obj;
ager@chromium.org3811b432009-10-28 14:53:37 +00002121 }
kasper.lund7276f142008-07-30 08:49:36 +00002122 }
2123
kasperl@chromium.org9bbf9682008-10-30 11:53:07 +00002124 // Free list allocation failed and there is no next page. Fail if we have
2125 // hit the old generation size limit that should cause a garbage
2126 // collection.
2127 if (!Heap::always_allocate() && Heap::OldGenerationAllocationLimitReached()) {
2128 return NULL;
2129 }
2130
2131 // Try to expand the space and allocate in the new next page.
kasper.lund7276f142008-07-30 08:49:36 +00002132 ASSERT(!current_page->next_page()->is_valid());
2133 if (Expand(current_page)) {
2134 return AllocateInNextPage(current_page, size_in_bytes);
2135 }
2136
2137 // Finally, fail.
2138 return NULL;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002139}
2140
2141
fschneider@chromium.org0c20e672010-01-14 15:28:53 +00002142void OldSpace::PutRestOfCurrentPageOnFreeList(Page* current_page) {
ricow@chromium.org30ce4112010-05-31 10:38:25 +00002143 current_page->SetAllocationWatermark(allocation_info_.top);
ager@chromium.orgc4c92722009-11-18 14:12:51 +00002144 int free_size =
2145 static_cast<int>(current_page->ObjectAreaEnd() - allocation_info_.top);
kasper.lund7276f142008-07-30 08:49:36 +00002146 if (free_size > 0) {
2147 int wasted_bytes = free_list_.Free(allocation_info_.top, free_size);
2148 accounting_stats_.WasteBytes(wasted_bytes);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002149 }
fschneider@chromium.org0c20e672010-01-14 15:28:53 +00002150}
2151
2152
2153void FixedSpace::PutRestOfCurrentPageOnFreeList(Page* current_page) {
ricow@chromium.org30ce4112010-05-31 10:38:25 +00002154 current_page->SetAllocationWatermark(allocation_info_.top);
fschneider@chromium.org0c20e672010-01-14 15:28:53 +00002155 int free_size =
2156 static_cast<int>(current_page->ObjectAreaEnd() - allocation_info_.top);
2157 // In the fixed space free list all the free list items have the right size.
2158 // We use up the rest of the page while preserving this invariant.
2159 while (free_size >= object_size_in_bytes_) {
2160 free_list_.Free(allocation_info_.top);
2161 allocation_info_.top += object_size_in_bytes_;
2162 free_size -= object_size_in_bytes_;
2163 accounting_stats_.WasteBytes(object_size_in_bytes_);
2164 }
2165}
2166
2167
2168// Add the block at the top of the page to the space's free list, set the
2169// allocation info to the next page (assumed to be one), and allocate
2170// linearly there.
2171HeapObject* OldSpace::AllocateInNextPage(Page* current_page,
2172 int size_in_bytes) {
2173 ASSERT(current_page->next_page()->is_valid());
ricow@chromium.org30ce4112010-05-31 10:38:25 +00002174 Page* next_page = current_page->next_page();
2175 next_page->ClearGCFields();
fschneider@chromium.org0c20e672010-01-14 15:28:53 +00002176 PutRestOfCurrentPageOnFreeList(current_page);
ricow@chromium.org30ce4112010-05-31 10:38:25 +00002177 SetAllocationInfo(&allocation_info_, next_page);
kasper.lund7276f142008-07-30 08:49:36 +00002178 return AllocateLinearly(&allocation_info_, size_in_bytes);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002179}
2180
2181
2182#ifdef DEBUG
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002183struct CommentStatistic {
2184 const char* comment;
2185 int size;
2186 int count;
2187 void Clear() {
2188 comment = NULL;
2189 size = 0;
2190 count = 0;
2191 }
2192};
2193
2194
2195// must be small, since an iteration is used for lookup
2196const int kMaxComments = 64;
2197static CommentStatistic comments_statistics[kMaxComments+1];
2198
2199
2200void PagedSpace::ReportCodeStatistics() {
2201 ReportCodeKindStatistics();
2202 PrintF("Code comment statistics (\" [ comment-txt : size/ "
2203 "count (average)\"):\n");
2204 for (int i = 0; i <= kMaxComments; i++) {
2205 const CommentStatistic& cs = comments_statistics[i];
2206 if (cs.size > 0) {
2207 PrintF(" %-30s: %10d/%6d (%d)\n", cs.comment, cs.size, cs.count,
2208 cs.size/cs.count);
2209 }
2210 }
2211 PrintF("\n");
2212}
2213
2214
2215void PagedSpace::ResetCodeStatistics() {
2216 ClearCodeKindStatistics();
2217 for (int i = 0; i < kMaxComments; i++) comments_statistics[i].Clear();
2218 comments_statistics[kMaxComments].comment = "Unknown";
2219 comments_statistics[kMaxComments].size = 0;
2220 comments_statistics[kMaxComments].count = 0;
2221}
2222
2223
2224// Adds comment to 'comment_statistics' table. Performance OK sa long as
2225// 'kMaxComments' is small
2226static void EnterComment(const char* comment, int delta) {
2227 // Do not count empty comments
2228 if (delta <= 0) return;
2229 CommentStatistic* cs = &comments_statistics[kMaxComments];
2230 // Search for a free or matching entry in 'comments_statistics': 'cs'
2231 // points to result.
2232 for (int i = 0; i < kMaxComments; i++) {
2233 if (comments_statistics[i].comment == NULL) {
2234 cs = &comments_statistics[i];
2235 cs->comment = comment;
2236 break;
2237 } else if (strcmp(comments_statistics[i].comment, comment) == 0) {
2238 cs = &comments_statistics[i];
2239 break;
2240 }
2241 }
2242 // Update entry for 'comment'
2243 cs->size += delta;
2244 cs->count += 1;
2245}
2246
2247
2248// Call for each nested comment start (start marked with '[ xxx', end marked
2249// with ']'. RelocIterator 'it' must point to a comment reloc info.
2250static void CollectCommentStatistics(RelocIterator* it) {
2251 ASSERT(!it->done());
ager@chromium.org236ad962008-09-25 09:45:57 +00002252 ASSERT(it->rinfo()->rmode() == RelocInfo::COMMENT);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002253 const char* tmp = reinterpret_cast<const char*>(it->rinfo()->data());
2254 if (tmp[0] != '[') {
2255 // Not a nested comment; skip
2256 return;
2257 }
2258
2259 // Search for end of nested comment or a new nested comment
2260 const char* const comment_txt =
2261 reinterpret_cast<const char*>(it->rinfo()->data());
2262 const byte* prev_pc = it->rinfo()->pc();
2263 int flat_delta = 0;
2264 it->next();
2265 while (true) {
2266 // All nested comments must be terminated properly, and therefore exit
2267 // from loop.
2268 ASSERT(!it->done());
ager@chromium.org236ad962008-09-25 09:45:57 +00002269 if (it->rinfo()->rmode() == RelocInfo::COMMENT) {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002270 const char* const txt =
2271 reinterpret_cast<const char*>(it->rinfo()->data());
ager@chromium.orgc4c92722009-11-18 14:12:51 +00002272 flat_delta += static_cast<int>(it->rinfo()->pc() - prev_pc);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002273 if (txt[0] == ']') break; // End of nested comment
2274 // A new comment
2275 CollectCommentStatistics(it);
2276 // Skip code that was covered with previous comment
2277 prev_pc = it->rinfo()->pc();
2278 }
2279 it->next();
2280 }
2281 EnterComment(comment_txt, flat_delta);
2282}
2283
2284
2285// Collects code size statistics:
2286// - by code kind
2287// - by code comment
2288void PagedSpace::CollectCodeStatistics() {
2289 HeapObjectIterator obj_it(this);
sgjesse@chromium.orgb302e562010-02-03 11:26:59 +00002290 for (HeapObject* obj = obj_it.next(); obj != NULL; obj = obj_it.next()) {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002291 if (obj->IsCode()) {
2292 Code* code = Code::cast(obj);
2293 code_kind_statistics[code->kind()] += code->Size();
2294 RelocIterator it(code);
2295 int delta = 0;
2296 const byte* prev_pc = code->instruction_start();
2297 while (!it.done()) {
ager@chromium.org236ad962008-09-25 09:45:57 +00002298 if (it.rinfo()->rmode() == RelocInfo::COMMENT) {
ager@chromium.orgc4c92722009-11-18 14:12:51 +00002299 delta += static_cast<int>(it.rinfo()->pc() - prev_pc);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002300 CollectCommentStatistics(&it);
2301 prev_pc = it.rinfo()->pc();
2302 }
2303 it.next();
2304 }
2305
2306 ASSERT(code->instruction_start() <= prev_pc &&
2307 prev_pc <= code->relocation_start());
ager@chromium.orgc4c92722009-11-18 14:12:51 +00002308 delta += static_cast<int>(code->relocation_start() - prev_pc);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002309 EnterComment("NoComment", delta);
2310 }
2311 }
2312}
2313
2314
2315void OldSpace::ReportStatistics() {
2316 int pct = Available() * 100 / Capacity();
2317 PrintF(" capacity: %d, waste: %d, available: %d, %%%d\n",
2318 Capacity(), Waste(), Available(), pct);
2319
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002320 ClearHistograms();
2321 HeapObjectIterator obj_it(this);
sgjesse@chromium.orgb302e562010-02-03 11:26:59 +00002322 for (HeapObject* obj = obj_it.next(); obj != NULL; obj = obj_it.next())
2323 CollectHistogramInfo(obj);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002324 ReportHistogram(true);
2325}
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002326#endif
2327
2328// -----------------------------------------------------------------------------
kasperl@chromium.orgdefbd102009-07-13 14:04:26 +00002329// FixedSpace implementation
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002330
kasperl@chromium.orgdefbd102009-07-13 14:04:26 +00002331void FixedSpace::PrepareForMarkCompact(bool will_compact) {
fschneider@chromium.org013f3e12010-04-26 13:27:52 +00002332 // Call prepare of the super class.
2333 PagedSpace::PrepareForMarkCompact(will_compact);
2334
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002335 if (will_compact) {
2336 // Reset relocation info.
2337 MCResetRelocationInfo();
2338
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002339 // During a compacting collection, everything in the space is considered
2340 // 'available' (set by the call to MCResetRelocationInfo) and we will
2341 // rediscover live and wasted bytes during the collection.
2342 ASSERT(Available() == Capacity());
2343 } else {
2344 // During a non-compacting collection, everything below the linear
2345 // allocation pointer except wasted top-of-page blocks is considered
2346 // allocated and we will rediscover available bytes during the
2347 // collection.
2348 accounting_stats_.AllocateBytes(free_list_.available());
2349 }
2350
kasper.lund7276f142008-07-30 08:49:36 +00002351 // Clear the free list before a full GC---it will be rebuilt afterward.
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002352 free_list_.Reset();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002353}
2354
2355
kasperl@chromium.orgdefbd102009-07-13 14:04:26 +00002356void FixedSpace::MCCommitRelocationInfo() {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002357 // Update fast allocation info.
2358 allocation_info_.top = mc_forwarding_info_.top;
2359 allocation_info_.limit = mc_forwarding_info_.limit;
kasper.lund7276f142008-07-30 08:49:36 +00002360 ASSERT(allocation_info_.VerifyPagedAllocation());
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002361
2362 // The space is compacted and we haven't yet wasted any space.
2363 ASSERT(Waste() == 0);
2364
2365 // Update allocation_top of each page in use and compute waste.
2366 int computed_size = 0;
2367 PageIterator it(this, PageIterator::PAGES_USED_BY_MC);
2368 while (it.has_next()) {
2369 Page* page = it.next();
2370 Address page_top = page->AllocationTop();
ager@chromium.orgc4c92722009-11-18 14:12:51 +00002371 computed_size += static_cast<int>(page_top - page->ObjectAreaStart());
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002372 if (it.has_next()) {
ager@chromium.orgc4c92722009-11-18 14:12:51 +00002373 accounting_stats_.WasteBytes(
2374 static_cast<int>(page->ObjectAreaEnd() - page_top));
ricow@chromium.org30ce4112010-05-31 10:38:25 +00002375 page->SetAllocationWatermark(page_top);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002376 }
2377 }
2378
2379 // Make sure the computed size - based on the used portion of the
2380 // pages in use - matches the size we adjust during allocation.
2381 ASSERT(computed_size == Size());
2382}
2383
2384
kasper.lund7276f142008-07-30 08:49:36 +00002385// Slow case for normal allocation. Try in order: (1) allocate in the next
2386// page in the space, (2) allocate off the space's free list, (3) expand the
2387// space, (4) fail.
kasperl@chromium.orgdefbd102009-07-13 14:04:26 +00002388HeapObject* FixedSpace::SlowAllocateRaw(int size_in_bytes) {
2389 ASSERT_EQ(object_size_in_bytes_, size_in_bytes);
kasper.lund7276f142008-07-30 08:49:36 +00002390 // Linear allocation in this space has failed. If there is another page
2391 // in the space, move to that page and allocate there. This allocation
2392 // should succeed.
2393 Page* current_page = TopPageOf(allocation_info_);
2394 if (current_page->next_page()->is_valid()) {
2395 return AllocateInNextPage(current_page, size_in_bytes);
2396 }
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002397
ager@chromium.org3811b432009-10-28 14:53:37 +00002398 // There is no next page in this space. Try free list allocation unless
2399 // that is currently forbidden. The fixed space free list implicitly assumes
2400 // that all free blocks are of the fixed size.
2401 if (!Heap::linear_allocation()) {
kasper.lund7276f142008-07-30 08:49:36 +00002402 Object* result = free_list_.Allocate();
2403 if (!result->IsFailure()) {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002404 accounting_stats_.AllocateBytes(size_in_bytes);
ricow@chromium.org30ce4112010-05-31 10:38:25 +00002405 HeapObject* obj = HeapObject::cast(result);
2406 Page* p = Page::FromAddress(obj->address());
2407
2408 if (obj->address() >= p->AllocationWatermark()) {
2409 // There should be no hole between the allocation watermark
2410 // and allocated object address.
2411 // Memory above the allocation watermark was not swept and
2412 // might contain garbage pointers to new space.
2413 ASSERT(obj->address() == p->AllocationWatermark());
2414 p->SetAllocationWatermark(obj->address() + size_in_bytes);
2415 }
2416
2417 return obj;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002418 }
2419 }
kasper.lund7276f142008-07-30 08:49:36 +00002420
kasperl@chromium.org9bbf9682008-10-30 11:53:07 +00002421 // Free list allocation failed and there is no next page. Fail if we have
2422 // hit the old generation size limit that should cause a garbage
2423 // collection.
2424 if (!Heap::always_allocate() && Heap::OldGenerationAllocationLimitReached()) {
2425 return NULL;
2426 }
2427
2428 // Try to expand the space and allocate in the new next page.
kasper.lund7276f142008-07-30 08:49:36 +00002429 ASSERT(!current_page->next_page()->is_valid());
2430 if (Expand(current_page)) {
2431 return AllocateInNextPage(current_page, size_in_bytes);
2432 }
2433
2434 // Finally, fail.
2435 return NULL;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002436}
2437
2438
kasper.lund7276f142008-07-30 08:49:36 +00002439// Move to the next page (there is assumed to be one) and allocate there.
2440// The top of page block is always wasted, because it is too small to hold a
2441// map.
kasperl@chromium.orgdefbd102009-07-13 14:04:26 +00002442HeapObject* FixedSpace::AllocateInNextPage(Page* current_page,
2443 int size_in_bytes) {
kasper.lund7276f142008-07-30 08:49:36 +00002444 ASSERT(current_page->next_page()->is_valid());
fschneider@chromium.org013f3e12010-04-26 13:27:52 +00002445 ASSERT(allocation_info_.top == PageAllocationLimit(current_page));
kasperl@chromium.orgdefbd102009-07-13 14:04:26 +00002446 ASSERT_EQ(object_size_in_bytes_, size_in_bytes);
ricow@chromium.org30ce4112010-05-31 10:38:25 +00002447 Page* next_page = current_page->next_page();
2448 next_page->ClearGCFields();
2449 current_page->SetAllocationWatermark(allocation_info_.top);
kasperl@chromium.orgdefbd102009-07-13 14:04:26 +00002450 accounting_stats_.WasteBytes(page_extra_);
ricow@chromium.org30ce4112010-05-31 10:38:25 +00002451 SetAllocationInfo(&allocation_info_, next_page);
kasper.lund7276f142008-07-30 08:49:36 +00002452 return AllocateLinearly(&allocation_info_, size_in_bytes);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002453}
2454
2455
2456#ifdef DEBUG
kasperl@chromium.orgdefbd102009-07-13 14:04:26 +00002457void FixedSpace::ReportStatistics() {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002458 int pct = Available() * 100 / Capacity();
2459 PrintF(" capacity: %d, waste: %d, available: %d, %%%d\n",
2460 Capacity(), Waste(), Available(), pct);
2461
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002462 ClearHistograms();
2463 HeapObjectIterator obj_it(this);
sgjesse@chromium.orgb302e562010-02-03 11:26:59 +00002464 for (HeapObject* obj = obj_it.next(); obj != NULL; obj = obj_it.next())
2465 CollectHistogramInfo(obj);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002466 ReportHistogram(false);
2467}
kasperl@chromium.orgdefbd102009-07-13 14:04:26 +00002468#endif
2469
2470
2471// -----------------------------------------------------------------------------
2472// MapSpace implementation
2473
2474void MapSpace::PrepareForMarkCompact(bool will_compact) {
2475 // Call prepare of the super class.
2476 FixedSpace::PrepareForMarkCompact(will_compact);
2477
2478 if (will_compact) {
2479 // Initialize map index entry.
2480 int page_count = 0;
2481 PageIterator it(this, PageIterator::ALL_PAGES);
2482 while (it.has_next()) {
2483 ASSERT_MAP_PAGE_INDEX(page_count);
2484
2485 Page* p = it.next();
2486 ASSERT(p->mc_page_index == page_count);
2487
2488 page_addresses_[page_count++] = p->address();
2489 }
2490 }
2491}
2492
2493
2494#ifdef DEBUG
2495void MapSpace::VerifyObject(HeapObject* object) {
2496 // The object should be a map or a free-list node.
2497 ASSERT(object->IsMap() || object->IsByteArray());
2498}
2499#endif
2500
2501
2502// -----------------------------------------------------------------------------
2503// GlobalPropertyCellSpace implementation
2504
2505#ifdef DEBUG
2506void CellSpace::VerifyObject(HeapObject* object) {
2507 // The object should be a global object property cell or a free-list node.
2508 ASSERT(object->IsJSGlobalPropertyCell() ||
2509 object->map() == Heap::two_pointer_filler_map());
2510}
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002511#endif
2512
2513
2514// -----------------------------------------------------------------------------
2515// LargeObjectIterator
2516
2517LargeObjectIterator::LargeObjectIterator(LargeObjectSpace* space) {
2518 current_ = space->first_chunk_;
2519 size_func_ = NULL;
2520}
2521
2522
2523LargeObjectIterator::LargeObjectIterator(LargeObjectSpace* space,
2524 HeapObjectCallback size_func) {
2525 current_ = space->first_chunk_;
2526 size_func_ = size_func;
2527}
2528
2529
2530HeapObject* LargeObjectIterator::next() {
sgjesse@chromium.orgb302e562010-02-03 11:26:59 +00002531 if (current_ == NULL) return NULL;
2532
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002533 HeapObject* object = current_->GetObject();
2534 current_ = current_->next();
2535 return object;
2536}
2537
2538
2539// -----------------------------------------------------------------------------
2540// LargeObjectChunk
2541
2542LargeObjectChunk* LargeObjectChunk::New(int size_in_bytes,
kasper.lund7276f142008-07-30 08:49:36 +00002543 size_t* chunk_size,
ager@chromium.org9258b6b2008-09-11 09:11:10 +00002544 Executability executable) {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002545 size_t requested = ChunkSizeFor(size_in_bytes);
kasper.lund7276f142008-07-30 08:49:36 +00002546 void* mem = MemoryAllocator::AllocateRawMemory(requested,
2547 chunk_size,
2548 executable);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002549 if (mem == NULL) return NULL;
2550 LOG(NewEvent("LargeObjectChunk", mem, *chunk_size));
2551 if (*chunk_size < requested) {
2552 MemoryAllocator::FreeRawMemory(mem, *chunk_size);
2553 LOG(DeleteEvent("LargeObjectChunk", mem));
2554 return NULL;
2555 }
2556 return reinterpret_cast<LargeObjectChunk*>(mem);
2557}
2558
2559
2560int LargeObjectChunk::ChunkSizeFor(int size_in_bytes) {
ager@chromium.orgc4c92722009-11-18 14:12:51 +00002561 int os_alignment = static_cast<int>(OS::AllocateAlignment());
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002562 if (os_alignment < Page::kPageSize)
2563 size_in_bytes += (Page::kPageSize - os_alignment);
2564 return size_in_bytes + Page::kObjectStartOffset;
2565}
2566
2567// -----------------------------------------------------------------------------
2568// LargeObjectSpace
2569
ager@chromium.org9258b6b2008-09-11 09:11:10 +00002570LargeObjectSpace::LargeObjectSpace(AllocationSpace id)
2571 : Space(id, NOT_EXECUTABLE), // Managed on a per-allocation basis
kasper.lund7276f142008-07-30 08:49:36 +00002572 first_chunk_(NULL),
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002573 size_(0),
2574 page_count_(0) {}
2575
2576
2577bool LargeObjectSpace::Setup() {
2578 first_chunk_ = NULL;
2579 size_ = 0;
2580 page_count_ = 0;
2581 return true;
2582}
2583
2584
2585void LargeObjectSpace::TearDown() {
2586 while (first_chunk_ != NULL) {
2587 LargeObjectChunk* chunk = first_chunk_;
2588 first_chunk_ = first_chunk_->next();
2589 LOG(DeleteEvent("LargeObjectChunk", chunk->address()));
2590 MemoryAllocator::FreeRawMemory(chunk->address(), chunk->size());
2591 }
2592
2593 size_ = 0;
2594 page_count_ = 0;
2595}
2596
2597
kasperl@chromium.orgf5aa8372009-03-24 14:47:14 +00002598#ifdef ENABLE_HEAP_PROTECTION
2599
2600void LargeObjectSpace::Protect() {
2601 LargeObjectChunk* chunk = first_chunk_;
2602 while (chunk != NULL) {
2603 MemoryAllocator::Protect(chunk->address(), chunk->size());
2604 chunk = chunk->next();
2605 }
2606}
2607
2608
2609void LargeObjectSpace::Unprotect() {
2610 LargeObjectChunk* chunk = first_chunk_;
2611 while (chunk != NULL) {
2612 bool is_code = chunk->GetObject()->IsCode();
2613 MemoryAllocator::Unprotect(chunk->address(), chunk->size(),
2614 is_code ? EXECUTABLE : NOT_EXECUTABLE);
2615 chunk = chunk->next();
2616 }
2617}
2618
2619#endif
2620
2621
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002622Object* LargeObjectSpace::AllocateRawInternal(int requested_size,
ager@chromium.org9258b6b2008-09-11 09:11:10 +00002623 int object_size,
2624 Executability executable) {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002625 ASSERT(0 < object_size && object_size <= requested_size);
kasperl@chromium.org9bbf9682008-10-30 11:53:07 +00002626
2627 // Check if we want to force a GC before growing the old space further.
2628 // If so, fail the allocation.
2629 if (!Heap::always_allocate() && Heap::OldGenerationAllocationLimitReached()) {
2630 return Failure::RetryAfterGC(requested_size, identity());
2631 }
2632
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002633 size_t chunk_size;
2634 LargeObjectChunk* chunk =
ager@chromium.org9258b6b2008-09-11 09:11:10 +00002635 LargeObjectChunk::New(requested_size, &chunk_size, executable);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002636 if (chunk == NULL) {
kasper.lund7276f142008-07-30 08:49:36 +00002637 return Failure::RetryAfterGC(requested_size, identity());
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002638 }
2639
ager@chromium.orgc4c92722009-11-18 14:12:51 +00002640 size_ += static_cast<int>(chunk_size);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002641 page_count_++;
2642 chunk->set_next(first_chunk_);
2643 chunk->set_size(chunk_size);
2644 first_chunk_ = chunk;
2645
ricow@chromium.org30ce4112010-05-31 10:38:25 +00002646 // Initialize page header.
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002647 Page* page = Page::FromAddress(RoundUp(chunk->address(), Page::kPageSize));
2648 Address object_address = page->ObjectAreaStart();
2649 // Clear the low order bit of the second word in the page to flag it as a
2650 // large object page. If the chunk_size happened to be written there, its
2651 // low order bit should already be clear.
2652 ASSERT((chunk_size & 0x1) == 0);
fschneider@chromium.org013f3e12010-04-26 13:27:52 +00002653 page->SetIsLargeObjectPage(true);
ricow@chromium.org30ce4112010-05-31 10:38:25 +00002654 page->SetRegionMarks(Page::kAllRegionsCleanMarks);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002655 return HeapObject::FromAddress(object_address);
2656}
2657
2658
ager@chromium.org9258b6b2008-09-11 09:11:10 +00002659Object* LargeObjectSpace::AllocateRawCode(int size_in_bytes) {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002660 ASSERT(0 < size_in_bytes);
ager@chromium.org9258b6b2008-09-11 09:11:10 +00002661 return AllocateRawInternal(size_in_bytes,
2662 size_in_bytes,
2663 EXECUTABLE);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002664}
2665
2666
2667Object* LargeObjectSpace::AllocateRawFixedArray(int size_in_bytes) {
ager@chromium.org9258b6b2008-09-11 09:11:10 +00002668 ASSERT(0 < size_in_bytes);
ricow@chromium.org30ce4112010-05-31 10:38:25 +00002669 return AllocateRawInternal(size_in_bytes,
ager@chromium.org9258b6b2008-09-11 09:11:10 +00002670 size_in_bytes,
2671 NOT_EXECUTABLE);
2672}
2673
2674
2675Object* LargeObjectSpace::AllocateRaw(int size_in_bytes) {
2676 ASSERT(0 < size_in_bytes);
2677 return AllocateRawInternal(size_in_bytes,
2678 size_in_bytes,
2679 NOT_EXECUTABLE);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002680}
2681
2682
2683// GC support
2684Object* LargeObjectSpace::FindObject(Address a) {
2685 for (LargeObjectChunk* chunk = first_chunk_;
2686 chunk != NULL;
2687 chunk = chunk->next()) {
2688 Address chunk_address = chunk->address();
2689 if (chunk_address <= a && a < chunk_address + chunk->size()) {
2690 return chunk->GetObject();
2691 }
2692 }
2693 return Failure::Exception();
2694}
2695
ricow@chromium.org30ce4112010-05-31 10:38:25 +00002696void LargeObjectSpace::IterateDirtyRegions(ObjectSlotCallback copy_object) {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002697 LargeObjectIterator it(this);
sgjesse@chromium.orgb302e562010-02-03 11:26:59 +00002698 for (HeapObject* object = it.next(); object != NULL; object = it.next()) {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002699 // We only have code, sequential strings, or fixed arrays in large
2700 // object space, and only fixed arrays can possibly contain pointers to
2701 // the young generation.
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002702 if (object->IsFixedArray()) {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002703 Page* page = Page::FromAddress(object->address());
ricow@chromium.org30ce4112010-05-31 10:38:25 +00002704 uint32_t marks = page->GetRegionMarks();
2705 uint32_t newmarks = Page::kAllRegionsCleanMarks;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002706
ricow@chromium.org30ce4112010-05-31 10:38:25 +00002707 if (marks != Page::kAllRegionsCleanMarks) {
2708 // For a large page a single dirty mark corresponds to several
2709 // regions (modulo 32). So we treat a large page as a sequence of
2710 // normal pages of size Page::kPageSize having same dirty marks
2711 // and subsequently iterate dirty regions on each of these pages.
2712 Address start = object->address();
2713 Address end = page->ObjectAreaEnd();
2714 Address object_end = start + object->Size();
2715
2716 // Iterate regions of the first normal page covering object.
2717 uint32_t first_region_number = page->GetRegionNumberForAddress(start);
2718 newmarks |=
2719 Heap::IterateDirtyRegions(marks >> first_region_number,
2720 start,
2721 end,
2722 &Heap::IteratePointersInDirtyRegion,
2723 copy_object) << first_region_number;
2724
2725 start = end;
2726 end = start + Page::kPageSize;
2727 while (end <= object_end) {
2728 // Iterate next 32 regions.
2729 newmarks |=
2730 Heap::IterateDirtyRegions(marks,
2731 start,
2732 end,
2733 &Heap::IteratePointersInDirtyRegion,
2734 copy_object);
2735 start = end;
2736 end = start + Page::kPageSize;
2737 }
2738
2739 if (start != object_end) {
2740 // Iterate the last piece of an object which is less than
2741 // Page::kPageSize.
2742 newmarks |=
2743 Heap::IterateDirtyRegions(marks,
2744 start,
2745 object_end,
2746 &Heap::IteratePointersInDirtyRegion,
2747 copy_object);
2748 }
2749
2750 page->SetRegionMarks(newmarks);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002751 }
2752 }
2753 }
2754}
2755
2756
2757void LargeObjectSpace::FreeUnmarkedObjects() {
2758 LargeObjectChunk* previous = NULL;
2759 LargeObjectChunk* current = first_chunk_;
2760 while (current != NULL) {
2761 HeapObject* object = current->GetObject();
kasper.lund7276f142008-07-30 08:49:36 +00002762 if (object->IsMarked()) {
2763 object->ClearMark();
2764 MarkCompactCollector::tracer()->decrement_marked_count();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002765 previous = current;
2766 current = current->next();
2767 } else {
2768 Address chunk_address = current->address();
2769 size_t chunk_size = current->size();
2770
2771 // Cut the chunk out from the chunk list.
2772 current = current->next();
2773 if (previous == NULL) {
2774 first_chunk_ = current;
2775 } else {
2776 previous->set_next(current);
2777 }
2778
2779 // Free the chunk.
sgjesse@chromium.orgb302e562010-02-03 11:26:59 +00002780 MarkCompactCollector::ReportDeleteIfNeeded(object);
ager@chromium.orgc4c92722009-11-18 14:12:51 +00002781 size_ -= static_cast<int>(chunk_size);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002782 page_count_--;
2783 MemoryAllocator::FreeRawMemory(chunk_address, chunk_size);
2784 LOG(DeleteEvent("LargeObjectChunk", chunk_address));
2785 }
2786 }
2787}
2788
2789
2790bool LargeObjectSpace::Contains(HeapObject* object) {
2791 Address address = object->address();
sgjesse@chromium.orgdf7a2842010-03-25 14:34:15 +00002792 if (Heap::new_space()->Contains(address)) {
2793 return false;
2794 }
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002795 Page* page = Page::FromAddress(address);
2796
2797 SLOW_ASSERT(!page->IsLargeObjectPage()
2798 || !FindObject(address)->IsFailure());
2799
2800 return page->IsLargeObjectPage();
2801}
2802
2803
2804#ifdef DEBUG
2805// We do not assume that the large object iterator works, because it depends
2806// on the invariants we are checking during verification.
2807void LargeObjectSpace::Verify() {
2808 for (LargeObjectChunk* chunk = first_chunk_;
2809 chunk != NULL;
2810 chunk = chunk->next()) {
2811 // Each chunk contains an object that starts at the large object page's
2812 // object area start.
2813 HeapObject* object = chunk->GetObject();
2814 Page* page = Page::FromAddress(object->address());
2815 ASSERT(object->address() == page->ObjectAreaStart());
2816
2817 // The first word should be a map, and we expect all map pointers to be
2818 // in map space.
2819 Map* map = object->map();
2820 ASSERT(map->IsMap());
2821 ASSERT(Heap::map_space()->Contains(map));
2822
ager@chromium.orga1645e22009-09-09 19:27:10 +00002823 // We have only code, sequential strings, external strings
2824 // (sequential strings that have been morphed into external
2825 // strings), fixed arrays, and byte arrays in large object space.
2826 ASSERT(object->IsCode() || object->IsSeqString() ||
2827 object->IsExternalString() || object->IsFixedArray() ||
2828 object->IsByteArray());
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002829
2830 // The object itself should look OK.
mads.s.ager@gmail.com9a4089a2008-09-01 08:55:01 +00002831 object->Verify();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002832
2833 // Byte arrays and strings don't have interior pointers.
2834 if (object->IsCode()) {
2835 VerifyPointersVisitor code_visitor;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002836 object->IterateBody(map->instance_type(),
2837 object->Size(),
2838 &code_visitor);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002839 } else if (object->IsFixedArray()) {
2840 // We loop over fixed arrays ourselves, rather then using the visitor,
2841 // because the visitor doesn't support the start/offset iteration
ricow@chromium.org30ce4112010-05-31 10:38:25 +00002842 // needed for IsRegionDirty.
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002843 FixedArray* array = FixedArray::cast(object);
2844 for (int j = 0; j < array->length(); j++) {
2845 Object* element = array->get(j);
2846 if (element->IsHeapObject()) {
2847 HeapObject* element_object = HeapObject::cast(element);
2848 ASSERT(Heap::Contains(element_object));
2849 ASSERT(element_object->map()->IsMap());
2850 if (Heap::InNewSpace(element_object)) {
ricow@chromium.org30ce4112010-05-31 10:38:25 +00002851 Address array_addr = object->address();
2852 Address element_addr = array_addr + FixedArray::kHeaderSize +
2853 j * kPointerSize;
2854
2855 ASSERT(Page::FromAddress(array_addr)->IsRegionDirty(element_addr));
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002856 }
2857 }
2858 }
2859 }
2860 }
2861}
2862
2863
2864void LargeObjectSpace::Print() {
2865 LargeObjectIterator it(this);
sgjesse@chromium.orgb302e562010-02-03 11:26:59 +00002866 for (HeapObject* obj = it.next(); obj != NULL; obj = it.next()) {
2867 obj->Print();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002868 }
2869}
2870
2871
2872void LargeObjectSpace::ReportStatistics() {
2873 PrintF(" size: %d\n", size_);
2874 int num_objects = 0;
2875 ClearHistograms();
2876 LargeObjectIterator it(this);
sgjesse@chromium.orgb302e562010-02-03 11:26:59 +00002877 for (HeapObject* obj = it.next(); obj != NULL; obj = it.next()) {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002878 num_objects++;
sgjesse@chromium.orgb302e562010-02-03 11:26:59 +00002879 CollectHistogramInfo(obj);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002880 }
2881
2882 PrintF(" number of objects %d\n", num_objects);
2883 if (num_objects > 0) ReportHistogram(false);
2884}
2885
2886
2887void LargeObjectSpace::CollectCodeStatistics() {
2888 LargeObjectIterator obj_it(this);
sgjesse@chromium.orgb302e562010-02-03 11:26:59 +00002889 for (HeapObject* obj = obj_it.next(); obj != NULL; obj = obj_it.next()) {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002890 if (obj->IsCode()) {
2891 Code* code = Code::cast(obj);
2892 code_kind_statistics[code->kind()] += code->Size();
2893 }
2894 }
2895}
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002896#endif // DEBUG
2897
2898} } // namespace v8::internal