blob: 6b6d926e257153f35de2453e9359cb9faf772f0a [file] [log] [blame]
ager@chromium.org9258b6b2008-09-11 09:11:10 +00001// Copyright 2006-2008 the V8 project authors. All rights reserved.
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002// Redistribution and use in source and binary forms, with or without
3// modification, are permitted provided that the following conditions are
4// met:
5//
6// * Redistributions of source code must retain the above copyright
7// notice, this list of conditions and the following disclaimer.
8// * Redistributions in binary form must reproduce the above
9// copyright notice, this list of conditions and the following
10// disclaimer in the documentation and/or other materials provided
11// with the distribution.
12// * Neither the name of Google Inc. nor the names of its
13// contributors may be used to endorse or promote products derived
14// from this software without specific prior written permission.
15//
16// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
28#include "v8.h"
29
30#include "macro-assembler.h"
31#include "mark-compact.h"
32#include "platform.h"
33
kasperl@chromium.org71affb52009-05-26 05:44:31 +000034namespace v8 {
35namespace internal {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +000036
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +000037// For contiguous spaces, top should be in the space (or at the end) and limit
38// should be the end of the space.
39#define ASSERT_SEMISPACE_ALLOCATION_INFO(info, space) \
kasperl@chromium.orgdefbd102009-07-13 14:04:26 +000040 ASSERT((space).low() <= (info).top \
41 && (info).top <= (space).high() \
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +000042 && (info).limit == (space).high())
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +000043
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +000044
45// ----------------------------------------------------------------------------
46// HeapObjectIterator
47
48HeapObjectIterator::HeapObjectIterator(PagedSpace* space) {
49 Initialize(space->bottom(), space->top(), NULL);
50}
51
52
53HeapObjectIterator::HeapObjectIterator(PagedSpace* space,
54 HeapObjectCallback size_func) {
55 Initialize(space->bottom(), space->top(), size_func);
56}
57
58
59HeapObjectIterator::HeapObjectIterator(PagedSpace* space, Address start) {
60 Initialize(start, space->top(), NULL);
61}
62
63
64HeapObjectIterator::HeapObjectIterator(PagedSpace* space, Address start,
65 HeapObjectCallback size_func) {
66 Initialize(start, space->top(), size_func);
67}
68
69
70void HeapObjectIterator::Initialize(Address cur, Address end,
71 HeapObjectCallback size_f) {
72 cur_addr_ = cur;
73 end_addr_ = end;
74 end_page_ = Page::FromAllocationTop(end);
75 size_func_ = size_f;
76 Page* p = Page::FromAllocationTop(cur_addr_);
77 cur_limit_ = (p == end_page_) ? end_addr_ : p->AllocationTop();
78
79#ifdef DEBUG
80 Verify();
81#endif
82}
83
84
sgjesse@chromium.orgb302e562010-02-03 11:26:59 +000085HeapObject* HeapObjectIterator::FromNextPage() {
86 if (cur_addr_ == end_addr_) return NULL;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +000087
88 Page* cur_page = Page::FromAllocationTop(cur_addr_);
89 cur_page = cur_page->next_page();
90 ASSERT(cur_page->is_valid());
91
92 cur_addr_ = cur_page->ObjectAreaStart();
93 cur_limit_ = (cur_page == end_page_) ? end_addr_ : cur_page->AllocationTop();
94
sgjesse@chromium.orgb302e562010-02-03 11:26:59 +000095 if (cur_addr_ == end_addr_) return NULL;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +000096 ASSERT(cur_addr_ < cur_limit_);
97#ifdef DEBUG
98 Verify();
99#endif
sgjesse@chromium.orgb302e562010-02-03 11:26:59 +0000100 return FromCurrentPage();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000101}
102
103
104#ifdef DEBUG
105void HeapObjectIterator::Verify() {
106 Page* p = Page::FromAllocationTop(cur_addr_);
107 ASSERT(p == Page::FromAllocationTop(cur_limit_));
108 ASSERT(p->Offset(cur_addr_) <= p->Offset(cur_limit_));
109}
110#endif
111
112
113// -----------------------------------------------------------------------------
114// PageIterator
115
kasperl@chromium.orgb3284ad2009-05-18 06:12:45 +0000116PageIterator::PageIterator(PagedSpace* space, Mode mode) : space_(space) {
117 prev_page_ = NULL;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000118 switch (mode) {
119 case PAGES_IN_USE:
kasperl@chromium.orgb3284ad2009-05-18 06:12:45 +0000120 stop_page_ = space->AllocationTopPage();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000121 break;
122 case PAGES_USED_BY_MC:
kasperl@chromium.orgb3284ad2009-05-18 06:12:45 +0000123 stop_page_ = space->MCRelocationTopPage();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000124 break;
125 case ALL_PAGES:
kasperl@chromium.orgb3284ad2009-05-18 06:12:45 +0000126#ifdef DEBUG
127 // Verify that the cached last page in the space is actually the
128 // last page.
129 for (Page* p = space->first_page_; p->is_valid(); p = p->next_page()) {
130 if (!p->next_page()->is_valid()) {
131 ASSERT(space->last_page_ == p);
132 }
133 }
134#endif
135 stop_page_ = space->last_page_;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000136 break;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000137 }
138}
139
140
141// -----------------------------------------------------------------------------
142// Page
143
144#ifdef DEBUG
145Page::RSetState Page::rset_state_ = Page::IN_USE;
146#endif
147
148// -----------------------------------------------------------------------------
sgjesse@chromium.orgc5145742009-10-07 09:00:33 +0000149// CodeRange
150
151List<CodeRange::FreeBlock> CodeRange::free_list_(0);
152List<CodeRange::FreeBlock> CodeRange::allocation_list_(0);
153int CodeRange::current_allocation_block_index_ = 0;
154VirtualMemory* CodeRange::code_range_ = NULL;
155
156
157bool CodeRange::Setup(const size_t requested) {
158 ASSERT(code_range_ == NULL);
159
160 code_range_ = new VirtualMemory(requested);
161 CHECK(code_range_ != NULL);
162 if (!code_range_->IsReserved()) {
163 delete code_range_;
164 code_range_ = NULL;
165 return false;
166 }
167
168 // We are sure that we have mapped a block of requested addresses.
169 ASSERT(code_range_->size() == requested);
170 LOG(NewEvent("CodeRange", code_range_->address(), requested));
171 allocation_list_.Add(FreeBlock(code_range_->address(), code_range_->size()));
172 current_allocation_block_index_ = 0;
173 return true;
174}
175
176
177int CodeRange::CompareFreeBlockAddress(const FreeBlock* left,
178 const FreeBlock* right) {
179 // The entire point of CodeRange is that the difference between two
180 // addresses in the range can be represented as a signed 32-bit int,
181 // so the cast is semantically correct.
182 return static_cast<int>(left->start - right->start);
183}
184
185
186void CodeRange::GetNextAllocationBlock(size_t requested) {
187 for (current_allocation_block_index_++;
188 current_allocation_block_index_ < allocation_list_.length();
189 current_allocation_block_index_++) {
190 if (requested <= allocation_list_[current_allocation_block_index_].size) {
191 return; // Found a large enough allocation block.
192 }
193 }
194
195 // Sort and merge the free blocks on the free list and the allocation list.
196 free_list_.AddAll(allocation_list_);
197 allocation_list_.Clear();
198 free_list_.Sort(&CompareFreeBlockAddress);
199 for (int i = 0; i < free_list_.length();) {
200 FreeBlock merged = free_list_[i];
201 i++;
202 // Add adjacent free blocks to the current merged block.
203 while (i < free_list_.length() &&
204 free_list_[i].start == merged.start + merged.size) {
205 merged.size += free_list_[i].size;
206 i++;
207 }
208 if (merged.size > 0) {
209 allocation_list_.Add(merged);
210 }
211 }
212 free_list_.Clear();
213
214 for (current_allocation_block_index_ = 0;
215 current_allocation_block_index_ < allocation_list_.length();
216 current_allocation_block_index_++) {
217 if (requested <= allocation_list_[current_allocation_block_index_].size) {
218 return; // Found a large enough allocation block.
219 }
220 }
221
222 // Code range is full or too fragmented.
223 V8::FatalProcessOutOfMemory("CodeRange::GetNextAllocationBlock");
224}
225
226
227
228void* CodeRange::AllocateRawMemory(const size_t requested, size_t* allocated) {
229 ASSERT(current_allocation_block_index_ < allocation_list_.length());
230 if (requested > allocation_list_[current_allocation_block_index_].size) {
231 // Find an allocation block large enough. This function call may
232 // call V8::FatalProcessOutOfMemory if it cannot find a large enough block.
233 GetNextAllocationBlock(requested);
234 }
235 // Commit the requested memory at the start of the current allocation block.
236 *allocated = RoundUp(requested, Page::kPageSize);
237 FreeBlock current = allocation_list_[current_allocation_block_index_];
238 if (*allocated >= current.size - Page::kPageSize) {
239 // Don't leave a small free block, useless for a large object or chunk.
240 *allocated = current.size;
241 }
242 ASSERT(*allocated <= current.size);
243 if (!code_range_->Commit(current.start, *allocated, true)) {
244 *allocated = 0;
245 return NULL;
246 }
247 allocation_list_[current_allocation_block_index_].start += *allocated;
248 allocation_list_[current_allocation_block_index_].size -= *allocated;
249 if (*allocated == current.size) {
250 GetNextAllocationBlock(0); // This block is used up, get the next one.
251 }
252 return current.start;
253}
254
255
256void CodeRange::FreeRawMemory(void* address, size_t length) {
257 free_list_.Add(FreeBlock(address, length));
258 code_range_->Uncommit(address, length);
259}
260
261
262void CodeRange::TearDown() {
263 delete code_range_; // Frees all memory in the virtual memory range.
264 code_range_ = NULL;
265 free_list_.Free();
266 allocation_list_.Free();
267}
268
269
270// -----------------------------------------------------------------------------
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000271// MemoryAllocator
272//
273int MemoryAllocator::capacity_ = 0;
274int MemoryAllocator::size_ = 0;
275
276VirtualMemory* MemoryAllocator::initial_chunk_ = NULL;
277
278// 270 is an estimate based on the static default heap size of a pair of 256K
279// semispaces and a 64M old generation.
280const int kEstimatedNumberOfChunks = 270;
281List<MemoryAllocator::ChunkInfo> MemoryAllocator::chunks_(
282 kEstimatedNumberOfChunks);
283List<int> MemoryAllocator::free_chunk_ids_(kEstimatedNumberOfChunks);
284int MemoryAllocator::max_nof_chunks_ = 0;
285int MemoryAllocator::top_ = 0;
286
287
288void MemoryAllocator::Push(int free_chunk_id) {
289 ASSERT(max_nof_chunks_ > 0);
290 ASSERT(top_ < max_nof_chunks_);
291 free_chunk_ids_[top_++] = free_chunk_id;
292}
293
294
295int MemoryAllocator::Pop() {
296 ASSERT(top_ > 0);
297 return free_chunk_ids_[--top_];
298}
299
300
301bool MemoryAllocator::Setup(int capacity) {
302 capacity_ = RoundUp(capacity, Page::kPageSize);
303
304 // Over-estimate the size of chunks_ array. It assumes the expansion of old
305 // space is always in the unit of a chunk (kChunkSize) except the last
306 // expansion.
307 //
308 // Due to alignment, allocated space might be one page less than required
309 // number (kPagesPerChunk) of pages for old spaces.
310 //
kasper.lund7276f142008-07-30 08:49:36 +0000311 // Reserve two chunk ids for semispaces, one for map space, one for old
312 // space, and one for code space.
313 max_nof_chunks_ = (capacity_ / (kChunkSize - Page::kPageSize)) + 5;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000314 if (max_nof_chunks_ > kMaxNofChunks) return false;
315
316 size_ = 0;
317 ChunkInfo info; // uninitialized element.
318 for (int i = max_nof_chunks_ - 1; i >= 0; i--) {
319 chunks_.Add(info);
320 free_chunk_ids_.Add(i);
321 }
322 top_ = max_nof_chunks_;
323 return true;
324}
325
326
327void MemoryAllocator::TearDown() {
328 for (int i = 0; i < max_nof_chunks_; i++) {
329 if (chunks_[i].address() != NULL) DeleteChunk(i);
330 }
331 chunks_.Clear();
332 free_chunk_ids_.Clear();
333
334 if (initial_chunk_ != NULL) {
335 LOG(DeleteEvent("InitialChunk", initial_chunk_->address()));
336 delete initial_chunk_;
337 initial_chunk_ = NULL;
338 }
339
340 ASSERT(top_ == max_nof_chunks_); // all chunks are free
341 top_ = 0;
342 capacity_ = 0;
343 size_ = 0;
344 max_nof_chunks_ = 0;
345}
346
347
348void* MemoryAllocator::AllocateRawMemory(const size_t requested,
kasper.lund7276f142008-07-30 08:49:36 +0000349 size_t* allocated,
ager@chromium.org9258b6b2008-09-11 09:11:10 +0000350 Executability executable) {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000351 if (size_ + static_cast<int>(requested) > capacity_) return NULL;
sgjesse@chromium.orgc5145742009-10-07 09:00:33 +0000352 void* mem;
353 if (executable == EXECUTABLE && CodeRange::exists()) {
354 mem = CodeRange::AllocateRawMemory(requested, allocated);
355 } else {
356 mem = OS::Allocate(requested, allocated, (executable == EXECUTABLE));
357 }
ager@chromium.orgc4c92722009-11-18 14:12:51 +0000358 int alloced = static_cast<int>(*allocated);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000359 size_ += alloced;
sgjesse@chromium.orgb302e562010-02-03 11:26:59 +0000360#ifdef DEBUG
361 ZapBlock(reinterpret_cast<Address>(mem), alloced);
362#endif
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000363 Counters::memory_allocated.Increment(alloced);
364 return mem;
365}
366
367
368void MemoryAllocator::FreeRawMemory(void* mem, size_t length) {
sgjesse@chromium.orgb302e562010-02-03 11:26:59 +0000369#ifdef DEBUG
370 ZapBlock(reinterpret_cast<Address>(mem), length);
371#endif
sgjesse@chromium.orgc5145742009-10-07 09:00:33 +0000372 if (CodeRange::contains(static_cast<Address>(mem))) {
373 CodeRange::FreeRawMemory(mem, length);
374 } else {
375 OS::Free(mem, length);
376 }
ager@chromium.orgc4c92722009-11-18 14:12:51 +0000377 Counters::memory_allocated.Decrement(static_cast<int>(length));
378 size_ -= static_cast<int>(length);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000379 ASSERT(size_ >= 0);
380}
381
382
383void* MemoryAllocator::ReserveInitialChunk(const size_t requested) {
384 ASSERT(initial_chunk_ == NULL);
385
386 initial_chunk_ = new VirtualMemory(requested);
387 CHECK(initial_chunk_ != NULL);
388 if (!initial_chunk_->IsReserved()) {
389 delete initial_chunk_;
390 initial_chunk_ = NULL;
391 return NULL;
392 }
393
394 // We are sure that we have mapped a block of requested addresses.
395 ASSERT(initial_chunk_->size() == requested);
396 LOG(NewEvent("InitialChunk", initial_chunk_->address(), requested));
ager@chromium.orgc4c92722009-11-18 14:12:51 +0000397 size_ += static_cast<int>(requested);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000398 return initial_chunk_->address();
399}
400
401
402static int PagesInChunk(Address start, size_t size) {
403 // The first page starts on the first page-aligned address from start onward
404 // and the last page ends on the last page-aligned address before
405 // start+size. Page::kPageSize is a power of two so we can divide by
406 // shifting.
ager@chromium.orgc4c92722009-11-18 14:12:51 +0000407 return static_cast<int>((RoundDown(start + size, Page::kPageSize)
sgjesse@chromium.org846fb742009-12-18 08:56:33 +0000408 - RoundUp(start, Page::kPageSize)) >> kPageSizeBits);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000409}
410
411
412Page* MemoryAllocator::AllocatePages(int requested_pages, int* allocated_pages,
413 PagedSpace* owner) {
414 if (requested_pages <= 0) return Page::FromAddress(NULL);
415 size_t chunk_size = requested_pages * Page::kPageSize;
416
417 // There is not enough space to guarantee the desired number pages can be
418 // allocated.
419 if (size_ + static_cast<int>(chunk_size) > capacity_) {
420 // Request as many pages as we can.
421 chunk_size = capacity_ - size_;
sgjesse@chromium.org846fb742009-12-18 08:56:33 +0000422 requested_pages = static_cast<int>(chunk_size >> kPageSizeBits);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000423
424 if (requested_pages <= 0) return Page::FromAddress(NULL);
425 }
kasper.lund7276f142008-07-30 08:49:36 +0000426 void* chunk = AllocateRawMemory(chunk_size, &chunk_size, owner->executable());
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000427 if (chunk == NULL) return Page::FromAddress(NULL);
428 LOG(NewEvent("PagedChunk", chunk, chunk_size));
429
430 *allocated_pages = PagesInChunk(static_cast<Address>(chunk), chunk_size);
431 if (*allocated_pages == 0) {
432 FreeRawMemory(chunk, chunk_size);
433 LOG(DeleteEvent("PagedChunk", chunk));
434 return Page::FromAddress(NULL);
435 }
436
437 int chunk_id = Pop();
438 chunks_[chunk_id].init(static_cast<Address>(chunk), chunk_size, owner);
439
440 return InitializePagesInChunk(chunk_id, *allocated_pages, owner);
441}
442
443
444Page* MemoryAllocator::CommitPages(Address start, size_t size,
445 PagedSpace* owner, int* num_pages) {
446 ASSERT(start != NULL);
447 *num_pages = PagesInChunk(start, size);
448 ASSERT(*num_pages > 0);
449 ASSERT(initial_chunk_ != NULL);
kasperl@chromium.orgf5aa8372009-03-24 14:47:14 +0000450 ASSERT(InInitialChunk(start));
451 ASSERT(InInitialChunk(start + size - 1));
ager@chromium.org9258b6b2008-09-11 09:11:10 +0000452 if (!initial_chunk_->Commit(start, size, owner->executable() == EXECUTABLE)) {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000453 return Page::FromAddress(NULL);
454 }
sgjesse@chromium.orgb302e562010-02-03 11:26:59 +0000455#ifdef DEBUG
456 ZapBlock(start, size);
457#endif
ager@chromium.orgc4c92722009-11-18 14:12:51 +0000458 Counters::memory_allocated.Increment(static_cast<int>(size));
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000459
460 // So long as we correctly overestimated the number of chunks we should not
461 // run out of chunk ids.
462 CHECK(!OutOfChunkIds());
463 int chunk_id = Pop();
464 chunks_[chunk_id].init(start, size, owner);
465 return InitializePagesInChunk(chunk_id, *num_pages, owner);
466}
467
468
kasper.lund7276f142008-07-30 08:49:36 +0000469bool MemoryAllocator::CommitBlock(Address start,
470 size_t size,
ager@chromium.org9258b6b2008-09-11 09:11:10 +0000471 Executability executable) {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000472 ASSERT(start != NULL);
473 ASSERT(size > 0);
474 ASSERT(initial_chunk_ != NULL);
kasperl@chromium.orgf5aa8372009-03-24 14:47:14 +0000475 ASSERT(InInitialChunk(start));
476 ASSERT(InInitialChunk(start + size - 1));
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000477
kasper.lund7276f142008-07-30 08:49:36 +0000478 if (!initial_chunk_->Commit(start, size, executable)) return false;
sgjesse@chromium.orgb302e562010-02-03 11:26:59 +0000479#ifdef DEBUG
480 ZapBlock(start, size);
481#endif
ager@chromium.orgc4c92722009-11-18 14:12:51 +0000482 Counters::memory_allocated.Increment(static_cast<int>(size));
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000483 return true;
484}
485
sgjesse@chromium.orgb302e562010-02-03 11:26:59 +0000486
ager@chromium.orgadd848f2009-08-13 12:44:13 +0000487bool MemoryAllocator::UncommitBlock(Address start, size_t size) {
488 ASSERT(start != NULL);
489 ASSERT(size > 0);
490 ASSERT(initial_chunk_ != NULL);
491 ASSERT(InInitialChunk(start));
492 ASSERT(InInitialChunk(start + size - 1));
493
494 if (!initial_chunk_->Uncommit(start, size)) return false;
ager@chromium.orgc4c92722009-11-18 14:12:51 +0000495 Counters::memory_allocated.Decrement(static_cast<int>(size));
ager@chromium.orgadd848f2009-08-13 12:44:13 +0000496 return true;
497}
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000498
sgjesse@chromium.orgb302e562010-02-03 11:26:59 +0000499
500void MemoryAllocator::ZapBlock(Address start, size_t size) {
501 for (size_t s = 0; s + kPointerSize <= size; s += kPointerSize) {
502 Memory::Address_at(start + s) = kZapValue;
503 }
504}
505
506
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000507Page* MemoryAllocator::InitializePagesInChunk(int chunk_id, int pages_in_chunk,
508 PagedSpace* owner) {
509 ASSERT(IsValidChunk(chunk_id));
510 ASSERT(pages_in_chunk > 0);
511
512 Address chunk_start = chunks_[chunk_id].address();
513
514 Address low = RoundUp(chunk_start, Page::kPageSize);
515
516#ifdef DEBUG
517 size_t chunk_size = chunks_[chunk_id].size();
518 Address high = RoundDown(chunk_start + chunk_size, Page::kPageSize);
519 ASSERT(pages_in_chunk <=
520 ((OffsetFrom(high) - OffsetFrom(low)) / Page::kPageSize));
521#endif
522
523 Address page_addr = low;
524 for (int i = 0; i < pages_in_chunk; i++) {
525 Page* p = Page::FromAddress(page_addr);
526 p->opaque_header = OffsetFrom(page_addr + Page::kPageSize) | chunk_id;
fschneider@chromium.org013f3e12010-04-26 13:27:52 +0000527 p->SetIsLargeObjectPage(false);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000528 page_addr += Page::kPageSize;
529 }
530
531 // Set the next page of the last page to 0.
532 Page* last_page = Page::FromAddress(page_addr - Page::kPageSize);
533 last_page->opaque_header = OffsetFrom(0) | chunk_id;
534
535 return Page::FromAddress(low);
536}
537
538
539Page* MemoryAllocator::FreePages(Page* p) {
540 if (!p->is_valid()) return p;
541
542 // Find the first page in the same chunk as 'p'
543 Page* first_page = FindFirstPageInSameChunk(p);
544 Page* page_to_return = Page::FromAddress(NULL);
545
546 if (p != first_page) {
547 // Find the last page in the same chunk as 'prev'.
548 Page* last_page = FindLastPageInSameChunk(p);
549 first_page = GetNextPage(last_page); // first page in next chunk
550
551 // set the next_page of last_page to NULL
552 SetNextPage(last_page, Page::FromAddress(NULL));
553 page_to_return = p; // return 'p' when exiting
554 }
555
556 while (first_page->is_valid()) {
557 int chunk_id = GetChunkId(first_page);
558 ASSERT(IsValidChunk(chunk_id));
559
560 // Find the first page of the next chunk before deleting this chunk.
561 first_page = GetNextPage(FindLastPageInSameChunk(first_page));
562
563 // Free the current chunk.
564 DeleteChunk(chunk_id);
565 }
566
567 return page_to_return;
568}
569
570
fschneider@chromium.org013f3e12010-04-26 13:27:52 +0000571void MemoryAllocator::FreeAllPages(PagedSpace* space) {
572 for (int i = 0, length = chunks_.length(); i < length; i++) {
573 if (chunks_[i].owner() == space) {
574 DeleteChunk(i);
575 }
576 }
577}
578
579
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000580void MemoryAllocator::DeleteChunk(int chunk_id) {
581 ASSERT(IsValidChunk(chunk_id));
582
583 ChunkInfo& c = chunks_[chunk_id];
584
585 // We cannot free a chunk contained in the initial chunk because it was not
586 // allocated with AllocateRawMemory. Instead we uncommit the virtual
587 // memory.
kasperl@chromium.orgf5aa8372009-03-24 14:47:14 +0000588 if (InInitialChunk(c.address())) {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000589 // TODO(1240712): VirtualMemory::Uncommit has a return value which
590 // is ignored here.
591 initial_chunk_->Uncommit(c.address(), c.size());
ager@chromium.orgc4c92722009-11-18 14:12:51 +0000592 Counters::memory_allocated.Decrement(static_cast<int>(c.size()));
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000593 } else {
594 LOG(DeleteEvent("PagedChunk", c.address()));
595 FreeRawMemory(c.address(), c.size());
596 }
597 c.init(NULL, 0, NULL);
598 Push(chunk_id);
599}
600
601
602Page* MemoryAllocator::FindFirstPageInSameChunk(Page* p) {
603 int chunk_id = GetChunkId(p);
604 ASSERT(IsValidChunk(chunk_id));
605
606 Address low = RoundUp(chunks_[chunk_id].address(), Page::kPageSize);
607 return Page::FromAddress(low);
608}
609
610
611Page* MemoryAllocator::FindLastPageInSameChunk(Page* p) {
612 int chunk_id = GetChunkId(p);
613 ASSERT(IsValidChunk(chunk_id));
614
615 Address chunk_start = chunks_[chunk_id].address();
616 size_t chunk_size = chunks_[chunk_id].size();
617
618 Address high = RoundDown(chunk_start + chunk_size, Page::kPageSize);
619 ASSERT(chunk_start <= p->address() && p->address() < high);
620
621 return Page::FromAddress(high - Page::kPageSize);
622}
623
624
625#ifdef DEBUG
626void MemoryAllocator::ReportStatistics() {
627 float pct = static_cast<float>(capacity_ - size_) / capacity_;
628 PrintF(" capacity: %d, used: %d, available: %%%d\n\n",
629 capacity_, size_, static_cast<int>(pct*100));
630}
631#endif
632
633
fschneider@chromium.org013f3e12010-04-26 13:27:52 +0000634void MemoryAllocator::RelinkPageListInChunkOrder(PagedSpace* space,
635 Page** first_page,
636 Page** last_page,
637 Page** last_page_in_use) {
638 Page* first = NULL;
639 Page* last = NULL;
640
641 for (int i = 0, length = chunks_.length(); i < length; i++) {
642 ChunkInfo& chunk = chunks_[i];
643
644 if (chunk.owner() == space) {
645 if (first == NULL) {
646 Address low = RoundUp(chunk.address(), Page::kPageSize);
647 first = Page::FromAddress(low);
648 }
649 last = RelinkPagesInChunk(i,
650 chunk.address(),
651 chunk.size(),
652 last,
653 last_page_in_use);
654 }
655 }
656
657 if (first_page != NULL) {
658 *first_page = first;
659 }
660
661 if (last_page != NULL) {
662 *last_page = last;
663 }
664}
665
666
667Page* MemoryAllocator::RelinkPagesInChunk(int chunk_id,
668 Address chunk_start,
669 size_t chunk_size,
670 Page* prev,
671 Page** last_page_in_use) {
672 Address page_addr = RoundUp(chunk_start, Page::kPageSize);
673 int pages_in_chunk = PagesInChunk(chunk_start, chunk_size);
674
675 if (prev->is_valid()) {
676 SetNextPage(prev, Page::FromAddress(page_addr));
677 }
678
679 for (int i = 0; i < pages_in_chunk; i++) {
680 Page* p = Page::FromAddress(page_addr);
681 p->opaque_header = OffsetFrom(page_addr + Page::kPageSize) | chunk_id;
682 page_addr += Page::kPageSize;
683
684 if (p->WasInUseBeforeMC()) {
685 *last_page_in_use = p;
686 }
687 }
688
689 // Set the next page of the last page to 0.
690 Page* last_page = Page::FromAddress(page_addr - Page::kPageSize);
691 last_page->opaque_header = OffsetFrom(0) | chunk_id;
692
693 if (last_page->WasInUseBeforeMC()) {
694 *last_page_in_use = last_page;
695 }
696
697 return last_page;
698}
699
700
701
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000702// -----------------------------------------------------------------------------
703// PagedSpace implementation
704
ager@chromium.org9258b6b2008-09-11 09:11:10 +0000705PagedSpace::PagedSpace(int max_capacity,
706 AllocationSpace id,
707 Executability executable)
kasper.lund7276f142008-07-30 08:49:36 +0000708 : Space(id, executable) {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000709 max_capacity_ = (RoundDown(max_capacity, Page::kPageSize) / Page::kPageSize)
710 * Page::kObjectAreaSize;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000711 accounting_stats_.Clear();
712
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000713 allocation_info_.top = NULL;
714 allocation_info_.limit = NULL;
715
716 mc_forwarding_info_.top = NULL;
717 mc_forwarding_info_.limit = NULL;
718}
719
720
721bool PagedSpace::Setup(Address start, size_t size) {
722 if (HasBeenSetup()) return false;
723
724 int num_pages = 0;
725 // Try to use the virtual memory range passed to us. If it is too small to
726 // contain at least one page, ignore it and allocate instead.
ager@chromium.org9258b6b2008-09-11 09:11:10 +0000727 int pages_in_chunk = PagesInChunk(start, size);
728 if (pages_in_chunk > 0) {
729 first_page_ = MemoryAllocator::CommitPages(RoundUp(start, Page::kPageSize),
730 Page::kPageSize * pages_in_chunk,
731 this, &num_pages);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000732 } else {
733 int requested_pages = Min(MemoryAllocator::kPagesPerChunk,
734 max_capacity_ / Page::kObjectAreaSize);
735 first_page_ =
736 MemoryAllocator::AllocatePages(requested_pages, &num_pages, this);
737 if (!first_page_->is_valid()) return false;
738 }
739
740 // We are sure that the first page is valid and that we have at least one
741 // page.
742 ASSERT(first_page_->is_valid());
743 ASSERT(num_pages > 0);
744 accounting_stats_.ExpandSpace(num_pages * Page::kObjectAreaSize);
745 ASSERT(Capacity() <= max_capacity_);
746
kasperl@chromium.orgb3284ad2009-05-18 06:12:45 +0000747 // Sequentially initialize remembered sets in the newly allocated
748 // pages and cache the current last page in the space.
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000749 for (Page* p = first_page_; p->is_valid(); p = p->next_page()) {
750 p->ClearRSet();
kasperl@chromium.orgb3284ad2009-05-18 06:12:45 +0000751 last_page_ = p;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000752 }
753
754 // Use first_page_ for allocation.
755 SetAllocationInfo(&allocation_info_, first_page_);
756
fschneider@chromium.org013f3e12010-04-26 13:27:52 +0000757 page_list_is_chunk_ordered_ = true;
758
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000759 return true;
760}
761
762
763bool PagedSpace::HasBeenSetup() {
764 return (Capacity() > 0);
765}
766
767
768void PagedSpace::TearDown() {
fschneider@chromium.org013f3e12010-04-26 13:27:52 +0000769 MemoryAllocator::FreeAllPages(this);
770 first_page_ = NULL;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000771 accounting_stats_.Clear();
772}
773
774
kasperl@chromium.orgf5aa8372009-03-24 14:47:14 +0000775#ifdef ENABLE_HEAP_PROTECTION
776
777void PagedSpace::Protect() {
778 Page* page = first_page_;
779 while (page->is_valid()) {
780 MemoryAllocator::ProtectChunkFromPage(page);
781 page = MemoryAllocator::FindLastPageInSameChunk(page)->next_page();
782 }
783}
784
785
786void PagedSpace::Unprotect() {
787 Page* page = first_page_;
788 while (page->is_valid()) {
789 MemoryAllocator::UnprotectChunkFromPage(page);
790 page = MemoryAllocator::FindLastPageInSameChunk(page)->next_page();
791 }
792}
793
794#endif
795
796
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000797void PagedSpace::ClearRSet() {
798 PageIterator it(this, PageIterator::ALL_PAGES);
799 while (it.has_next()) {
800 it.next()->ClearRSet();
801 }
802}
803
804
805Object* PagedSpace::FindObject(Address addr) {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000806 // Note: this function can only be called before or after mark-compact GC
807 // because it accesses map pointers.
808 ASSERT(!MarkCompactCollector::in_use());
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000809
810 if (!Contains(addr)) return Failure::Exception();
811
812 Page* p = Page::FromAddress(addr);
kasper.lund7276f142008-07-30 08:49:36 +0000813 ASSERT(IsUsed(p));
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000814 Address cur = p->ObjectAreaStart();
815 Address end = p->AllocationTop();
816 while (cur < end) {
817 HeapObject* obj = HeapObject::FromAddress(cur);
818 Address next = cur + obj->Size();
819 if ((cur <= addr) && (addr < next)) return obj;
820 cur = next;
821 }
822
kasper.lund7276f142008-07-30 08:49:36 +0000823 UNREACHABLE();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000824 return Failure::Exception();
825}
826
827
kasper.lund7276f142008-07-30 08:49:36 +0000828bool PagedSpace::IsUsed(Page* page) {
829 PageIterator it(this, PageIterator::PAGES_IN_USE);
830 while (it.has_next()) {
831 if (page == it.next()) return true;
832 }
833 return false;
834}
835
836
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000837void PagedSpace::SetAllocationInfo(AllocationInfo* alloc_info, Page* p) {
838 alloc_info->top = p->ObjectAreaStart();
839 alloc_info->limit = p->ObjectAreaEnd();
kasper.lund7276f142008-07-30 08:49:36 +0000840 ASSERT(alloc_info->VerifyPagedAllocation());
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000841}
842
843
844void PagedSpace::MCResetRelocationInfo() {
845 // Set page indexes.
846 int i = 0;
847 PageIterator it(this, PageIterator::ALL_PAGES);
848 while (it.has_next()) {
849 Page* p = it.next();
850 p->mc_page_index = i++;
851 }
852
853 // Set mc_forwarding_info_ to the first page in the space.
854 SetAllocationInfo(&mc_forwarding_info_, first_page_);
855 // All the bytes in the space are 'available'. We will rediscover
856 // allocated and wasted bytes during GC.
857 accounting_stats_.Reset();
858}
859
860
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000861int PagedSpace::MCSpaceOffsetForAddress(Address addr) {
862#ifdef DEBUG
863 // The Contains function considers the address at the beginning of a
864 // page in the page, MCSpaceOffsetForAddress considers it is in the
865 // previous page.
866 if (Page::IsAlignedToPageSize(addr)) {
867 ASSERT(Contains(addr - kPointerSize));
868 } else {
869 ASSERT(Contains(addr));
870 }
871#endif
872
873 // If addr is at the end of a page, it belongs to previous page
874 Page* p = Page::IsAlignedToPageSize(addr)
875 ? Page::FromAllocationTop(addr)
876 : Page::FromAddress(addr);
877 int index = p->mc_page_index;
878 return (index * Page::kPageSize) + p->Offset(addr);
879}
880
881
kasper.lund7276f142008-07-30 08:49:36 +0000882// Slow case for reallocating and promoting objects during a compacting
883// collection. This function is not space-specific.
884HeapObject* PagedSpace::SlowMCAllocateRaw(int size_in_bytes) {
885 Page* current_page = TopPageOf(mc_forwarding_info_);
886 if (!current_page->next_page()->is_valid()) {
887 if (!Expand(current_page)) {
888 return NULL;
889 }
890 }
891
892 // There are surely more pages in the space now.
893 ASSERT(current_page->next_page()->is_valid());
894 // We do not add the top of page block for current page to the space's
895 // free list---the block may contain live objects so we cannot write
896 // bookkeeping information to it. Instead, we will recover top of page
897 // blocks when we move objects to their new locations.
898 //
899 // We do however write the allocation pointer to the page. The encoding
900 // of forwarding addresses is as an offset in terms of live bytes, so we
901 // need quick access to the allocation top of each page to decode
902 // forwarding addresses.
903 current_page->mc_relocation_top = mc_forwarding_info_.top;
904 SetAllocationInfo(&mc_forwarding_info_, current_page->next_page());
905 return AllocateLinearly(&mc_forwarding_info_, size_in_bytes);
906}
907
908
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000909bool PagedSpace::Expand(Page* last_page) {
910 ASSERT(max_capacity_ % Page::kObjectAreaSize == 0);
911 ASSERT(Capacity() % Page::kObjectAreaSize == 0);
912
913 if (Capacity() == max_capacity_) return false;
914
915 ASSERT(Capacity() < max_capacity_);
916 // Last page must be valid and its next page is invalid.
917 ASSERT(last_page->is_valid() && !last_page->next_page()->is_valid());
918
919 int available_pages = (max_capacity_ - Capacity()) / Page::kObjectAreaSize;
920 if (available_pages <= 0) return false;
921
922 int desired_pages = Min(available_pages, MemoryAllocator::kPagesPerChunk);
923 Page* p = MemoryAllocator::AllocatePages(desired_pages, &desired_pages, this);
924 if (!p->is_valid()) return false;
925
926 accounting_stats_.ExpandSpace(desired_pages * Page::kObjectAreaSize);
927 ASSERT(Capacity() <= max_capacity_);
928
929 MemoryAllocator::SetNextPage(last_page, p);
930
kasperl@chromium.orgb3284ad2009-05-18 06:12:45 +0000931 // Sequentially clear remembered set of new pages and and cache the
932 // new last page in the space.
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000933 while (p->is_valid()) {
934 p->ClearRSet();
kasperl@chromium.orgb3284ad2009-05-18 06:12:45 +0000935 last_page_ = p;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000936 p = p->next_page();
937 }
938
939 return true;
940}
941
942
943#ifdef DEBUG
944int PagedSpace::CountTotalPages() {
945 int count = 0;
946 for (Page* p = first_page_; p->is_valid(); p = p->next_page()) {
947 count++;
948 }
949 return count;
950}
951#endif
952
953
954void PagedSpace::Shrink() {
fschneider@chromium.org013f3e12010-04-26 13:27:52 +0000955 if (!page_list_is_chunk_ordered_) {
956 // We can't shrink space if pages is not chunk-ordered
957 // (see comment for class MemoryAllocator for definition).
958 return;
959 }
960
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000961 // Release half of free pages.
962 Page* top_page = AllocationTopPage();
963 ASSERT(top_page->is_valid());
964
sgjesse@chromium.org911335c2009-08-19 12:59:44 +0000965 // Count the number of pages we would like to free.
966 int pages_to_free = 0;
967 for (Page* p = top_page->next_page(); p->is_valid(); p = p->next_page()) {
968 pages_to_free++;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000969 }
970
sgjesse@chromium.org911335c2009-08-19 12:59:44 +0000971 // Free pages after top_page.
972 Page* p = MemoryAllocator::FreePages(top_page->next_page());
973 MemoryAllocator::SetNextPage(top_page, p);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000974
sgjesse@chromium.org911335c2009-08-19 12:59:44 +0000975 // Find out how many pages we failed to free and update last_page_.
976 // Please note pages can only be freed in whole chunks.
977 last_page_ = top_page;
978 for (Page* p = top_page->next_page(); p->is_valid(); p = p->next_page()) {
979 pages_to_free--;
kasperl@chromium.orgb3284ad2009-05-18 06:12:45 +0000980 last_page_ = p;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000981 }
982
sgjesse@chromium.org911335c2009-08-19 12:59:44 +0000983 accounting_stats_.ShrinkSpace(pages_to_free * Page::kObjectAreaSize);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000984 ASSERT(Capacity() == CountTotalPages() * Page::kObjectAreaSize);
985}
986
987
988bool PagedSpace::EnsureCapacity(int capacity) {
989 if (Capacity() >= capacity) return true;
990
991 // Start from the allocation top and loop to the last page in the space.
992 Page* last_page = AllocationTopPage();
993 Page* next_page = last_page->next_page();
994 while (next_page->is_valid()) {
995 last_page = MemoryAllocator::FindLastPageInSameChunk(next_page);
996 next_page = last_page->next_page();
997 }
998
999 // Expand the space until it has the required capacity or expansion fails.
1000 do {
1001 if (!Expand(last_page)) return false;
1002 ASSERT(last_page->next_page()->is_valid());
1003 last_page =
1004 MemoryAllocator::FindLastPageInSameChunk(last_page->next_page());
1005 } while (Capacity() < capacity);
1006
1007 return true;
1008}
1009
1010
1011#ifdef DEBUG
1012void PagedSpace::Print() { }
1013#endif
1014
1015
kasperl@chromium.orgdefbd102009-07-13 14:04:26 +00001016#ifdef DEBUG
1017// We do not assume that the PageIterator works, because it depends on the
1018// invariants we are checking during verification.
1019void PagedSpace::Verify(ObjectVisitor* visitor) {
1020 // The allocation pointer should be valid, and it should be in a page in the
1021 // space.
1022 ASSERT(allocation_info_.VerifyPagedAllocation());
1023 Page* top_page = Page::FromAllocationTop(allocation_info_.top);
1024 ASSERT(MemoryAllocator::IsPageInSpace(top_page, this));
1025
1026 // Loop over all the pages.
1027 bool above_allocation_top = false;
1028 Page* current_page = first_page_;
1029 while (current_page->is_valid()) {
1030 if (above_allocation_top) {
1031 // We don't care what's above the allocation top.
1032 } else {
1033 // Unless this is the last page in the space containing allocated
1034 // objects, the allocation top should be at a constant offset from the
1035 // object area end.
1036 Address top = current_page->AllocationTop();
1037 if (current_page == top_page) {
1038 ASSERT(top == allocation_info_.top);
1039 // The next page will be above the allocation top.
1040 above_allocation_top = true;
1041 } else {
fschneider@chromium.org013f3e12010-04-26 13:27:52 +00001042 ASSERT(top == PageAllocationLimit(current_page));
kasperl@chromium.orgdefbd102009-07-13 14:04:26 +00001043 }
1044
1045 // It should be packed with objects from the bottom to the top.
1046 Address current = current_page->ObjectAreaStart();
1047 while (current < top) {
1048 HeapObject* object = HeapObject::FromAddress(current);
1049
1050 // The first word should be a map, and we expect all map pointers to
1051 // be in map space.
1052 Map* map = object->map();
1053 ASSERT(map->IsMap());
1054 ASSERT(Heap::map_space()->Contains(map));
1055
1056 // Perform space-specific object verification.
1057 VerifyObject(object);
1058
1059 // The object itself should look OK.
1060 object->Verify();
1061
1062 // All the interior pointers should be contained in the heap and
1063 // have their remembered set bits set if required as determined
1064 // by the visitor.
1065 int size = object->Size();
christian.plesner.hansen@gmail.com2bc58ef2009-09-22 10:00:30 +00001066 object->IterateBody(map->instance_type(), size, visitor);
kasperl@chromium.orgdefbd102009-07-13 14:04:26 +00001067
1068 current += size;
1069 }
1070
1071 // The allocation pointer should not be in the middle of an object.
1072 ASSERT(current == top);
1073 }
1074
1075 current_page = current_page->next_page();
1076 }
1077}
1078#endif
1079
1080
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001081// -----------------------------------------------------------------------------
1082// NewSpace implementation
1083
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +00001084
1085bool NewSpace::Setup(Address start, int size) {
1086 // Setup new space based on the preallocated memory block defined by
1087 // start and size. The provided space is divided into two semi-spaces.
1088 // To support fast containment testing in the new space, the size of
1089 // this chunk must be a power of two and it must be aligned to its size.
1090 int initial_semispace_capacity = Heap::InitialSemiSpaceSize();
ager@chromium.org3811b432009-10-28 14:53:37 +00001091 int maximum_semispace_capacity = Heap::MaxSemiSpaceSize();
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +00001092
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001093 ASSERT(initial_semispace_capacity <= maximum_semispace_capacity);
1094 ASSERT(IsPowerOf2(maximum_semispace_capacity));
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001095
1096 // Allocate and setup the histogram arrays if necessary.
1097#if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
1098 allocated_histogram_ = NewArray<HistogramInfo>(LAST_TYPE + 1);
1099 promoted_histogram_ = NewArray<HistogramInfo>(LAST_TYPE + 1);
1100
1101#define SET_NAME(name) allocated_histogram_[name].set_name(#name); \
1102 promoted_histogram_[name].set_name(#name);
1103 INSTANCE_TYPE_LIST(SET_NAME)
1104#undef SET_NAME
1105#endif
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001106
ager@chromium.org3811b432009-10-28 14:53:37 +00001107 ASSERT(size == 2 * Heap::ReservedSemiSpaceSize());
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001108 ASSERT(IsAddressAligned(start, size, 0));
1109
sgjesse@chromium.org911335c2009-08-19 12:59:44 +00001110 if (!to_space_.Setup(start,
1111 initial_semispace_capacity,
1112 maximum_semispace_capacity)) {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001113 return false;
1114 }
sgjesse@chromium.org911335c2009-08-19 12:59:44 +00001115 if (!from_space_.Setup(start + maximum_semispace_capacity,
1116 initial_semispace_capacity,
1117 maximum_semispace_capacity)) {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001118 return false;
1119 }
1120
1121 start_ = start;
1122 address_mask_ = ~(size - 1);
1123 object_mask_ = address_mask_ | kHeapObjectTag;
ager@chromium.org9085a012009-05-11 19:22:57 +00001124 object_expected_ = reinterpret_cast<uintptr_t>(start) | kHeapObjectTag;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001125
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +00001126 allocation_info_.top = to_space_.low();
1127 allocation_info_.limit = to_space_.high();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001128 mc_forwarding_info_.top = NULL;
1129 mc_forwarding_info_.limit = NULL;
1130
1131 ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
1132 return true;
1133}
1134
1135
1136void NewSpace::TearDown() {
1137#if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
1138 if (allocated_histogram_) {
1139 DeleteArray(allocated_histogram_);
1140 allocated_histogram_ = NULL;
1141 }
1142 if (promoted_histogram_) {
1143 DeleteArray(promoted_histogram_);
1144 promoted_histogram_ = NULL;
1145 }
1146#endif
1147
1148 start_ = NULL;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001149 allocation_info_.top = NULL;
1150 allocation_info_.limit = NULL;
1151 mc_forwarding_info_.top = NULL;
1152 mc_forwarding_info_.limit = NULL;
1153
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +00001154 to_space_.TearDown();
1155 from_space_.TearDown();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001156}
1157
1158
kasperl@chromium.orgf5aa8372009-03-24 14:47:14 +00001159#ifdef ENABLE_HEAP_PROTECTION
1160
1161void NewSpace::Protect() {
1162 MemoryAllocator::Protect(ToSpaceLow(), Capacity());
1163 MemoryAllocator::Protect(FromSpaceLow(), Capacity());
1164}
1165
1166
1167void NewSpace::Unprotect() {
1168 MemoryAllocator::Unprotect(ToSpaceLow(), Capacity(),
1169 to_space_.executable());
1170 MemoryAllocator::Unprotect(FromSpaceLow(), Capacity(),
1171 from_space_.executable());
1172}
1173
1174#endif
1175
1176
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001177void NewSpace::Flip() {
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +00001178 SemiSpace tmp = from_space_;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001179 from_space_ = to_space_;
1180 to_space_ = tmp;
1181}
1182
1183
ager@chromium.orgab99eea2009-08-25 07:05:41 +00001184void NewSpace::Grow() {
sgjesse@chromium.org911335c2009-08-19 12:59:44 +00001185 ASSERT(Capacity() < MaximumCapacity());
ager@chromium.orgab99eea2009-08-25 07:05:41 +00001186 if (to_space_.Grow()) {
1187 // Only grow from space if we managed to grow to space.
1188 if (!from_space_.Grow()) {
1189 // If we managed to grow to space but couldn't grow from space,
1190 // attempt to shrink to space.
1191 if (!to_space_.ShrinkTo(from_space_.Capacity())) {
1192 // We are in an inconsistent state because we could not
1193 // commit/uncommit memory from new space.
1194 V8::FatalProcessOutOfMemory("Failed to grow new space.");
1195 }
1196 }
1197 }
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +00001198 allocation_info_.limit = to_space_.high();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001199 ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
ager@chromium.orgab99eea2009-08-25 07:05:41 +00001200}
1201
1202
1203void NewSpace::Shrink() {
1204 int new_capacity = Max(InitialCapacity(), 2 * Size());
ager@chromium.orgc4c92722009-11-18 14:12:51 +00001205 int rounded_new_capacity =
1206 RoundUp(new_capacity, static_cast<int>(OS::AllocateAlignment()));
ager@chromium.orgab99eea2009-08-25 07:05:41 +00001207 if (rounded_new_capacity < Capacity() &&
1208 to_space_.ShrinkTo(rounded_new_capacity)) {
1209 // Only shrink from space if we managed to shrink to space.
1210 if (!from_space_.ShrinkTo(rounded_new_capacity)) {
1211 // If we managed to shrink to space but couldn't shrink from
1212 // space, attempt to grow to space again.
1213 if (!to_space_.GrowTo(from_space_.Capacity())) {
1214 // We are in an inconsistent state because we could not
1215 // commit/uncommit memory from new space.
1216 V8::FatalProcessOutOfMemory("Failed to shrink new space.");
1217 }
1218 }
1219 }
1220 allocation_info_.limit = to_space_.high();
1221 ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001222}
1223
1224
1225void NewSpace::ResetAllocationInfo() {
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +00001226 allocation_info_.top = to_space_.low();
1227 allocation_info_.limit = to_space_.high();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001228 ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
1229}
1230
1231
1232void NewSpace::MCResetRelocationInfo() {
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +00001233 mc_forwarding_info_.top = from_space_.low();
1234 mc_forwarding_info_.limit = from_space_.high();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001235 ASSERT_SEMISPACE_ALLOCATION_INFO(mc_forwarding_info_, from_space_);
1236}
1237
1238
1239void NewSpace::MCCommitRelocationInfo() {
1240 // Assumes that the spaces have been flipped so that mc_forwarding_info_ is
1241 // valid allocation info for the to space.
1242 allocation_info_.top = mc_forwarding_info_.top;
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +00001243 allocation_info_.limit = to_space_.high();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001244 ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
1245}
1246
1247
1248#ifdef DEBUG
1249// We do not use the SemispaceIterator because verification doesn't assume
1250// that it works (it depends on the invariants we are checking).
1251void NewSpace::Verify() {
1252 // The allocation pointer should be in the space or at the very end.
1253 ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
1254
1255 // There should be objects packed in from the low address up to the
1256 // allocation pointer.
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +00001257 Address current = to_space_.low();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001258 while (current < top()) {
1259 HeapObject* object = HeapObject::FromAddress(current);
1260
1261 // The first word should be a map, and we expect all map pointers to
1262 // be in map space.
1263 Map* map = object->map();
1264 ASSERT(map->IsMap());
1265 ASSERT(Heap::map_space()->Contains(map));
1266
1267 // The object should not be code or a map.
1268 ASSERT(!object->IsMap());
1269 ASSERT(!object->IsCode());
1270
1271 // The object itself should look OK.
1272 object->Verify();
1273
1274 // All the interior pointers should be contained in the heap.
1275 VerifyPointersVisitor visitor;
1276 int size = object->Size();
1277 object->IterateBody(map->instance_type(), size, &visitor);
1278
1279 current += size;
1280 }
1281
1282 // The allocation pointer should not be in the middle of an object.
1283 ASSERT(current == top());
1284}
1285#endif
1286
1287
ager@chromium.orgadd848f2009-08-13 12:44:13 +00001288bool SemiSpace::Commit() {
1289 ASSERT(!is_committed());
1290 if (!MemoryAllocator::CommitBlock(start_, capacity_, executable())) {
1291 return false;
1292 }
1293 committed_ = true;
1294 return true;
1295}
1296
1297
1298bool SemiSpace::Uncommit() {
1299 ASSERT(is_committed());
1300 if (!MemoryAllocator::UncommitBlock(start_, capacity_)) {
1301 return false;
1302 }
1303 committed_ = false;
1304 return true;
1305}
1306
1307
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001308// -----------------------------------------------------------------------------
1309// SemiSpace implementation
1310
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +00001311bool SemiSpace::Setup(Address start,
1312 int initial_capacity,
1313 int maximum_capacity) {
1314 // Creates a space in the young generation. The constructor does not
1315 // allocate memory from the OS. A SemiSpace is given a contiguous chunk of
1316 // memory of size 'capacity' when set up, and does not grow or shrink
1317 // otherwise. In the mark-compact collector, the memory region of the from
1318 // space is used as the marking stack. It requires contiguous memory
1319 // addresses.
ager@chromium.orgab99eea2009-08-25 07:05:41 +00001320 initial_capacity_ = initial_capacity;
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +00001321 capacity_ = initial_capacity;
1322 maximum_capacity_ = maximum_capacity;
ager@chromium.orgadd848f2009-08-13 12:44:13 +00001323 committed_ = false;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001324
1325 start_ = start;
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +00001326 address_mask_ = ~(maximum_capacity - 1);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001327 object_mask_ = address_mask_ | kHeapObjectTag;
ager@chromium.org9085a012009-05-11 19:22:57 +00001328 object_expected_ = reinterpret_cast<uintptr_t>(start) | kHeapObjectTag;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001329 age_mark_ = start_;
ager@chromium.orgadd848f2009-08-13 12:44:13 +00001330
1331 return Commit();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001332}
1333
1334
1335void SemiSpace::TearDown() {
1336 start_ = NULL;
1337 capacity_ = 0;
1338}
1339
1340
christian.plesner.hansen@gmail.com5a6af922009-08-12 14:20:51 +00001341bool SemiSpace::Grow() {
sgjesse@chromium.orgc81c8942009-08-21 10:54:26 +00001342 // Double the semispace size but only up to maximum capacity.
sgjesse@chromium.org911335c2009-08-19 12:59:44 +00001343 int maximum_extra = maximum_capacity_ - capacity_;
ager@chromium.orgc4c92722009-11-18 14:12:51 +00001344 int extra = Min(RoundUp(capacity_, static_cast<int>(OS::AllocateAlignment())),
sgjesse@chromium.org911335c2009-08-19 12:59:44 +00001345 maximum_extra);
christian.plesner.hansen@gmail.com5a6af922009-08-12 14:20:51 +00001346 if (!MemoryAllocator::CommitBlock(high(), extra, executable())) {
kasper.lund7276f142008-07-30 08:49:36 +00001347 return false;
1348 }
christian.plesner.hansen@gmail.com5a6af922009-08-12 14:20:51 +00001349 capacity_ += extra;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001350 return true;
1351}
1352
1353
ager@chromium.orgab99eea2009-08-25 07:05:41 +00001354bool SemiSpace::GrowTo(int new_capacity) {
1355 ASSERT(new_capacity <= maximum_capacity_);
1356 ASSERT(new_capacity > capacity_);
1357 size_t delta = new_capacity - capacity_;
1358 ASSERT(IsAligned(delta, OS::AllocateAlignment()));
1359 if (!MemoryAllocator::CommitBlock(high(), delta, executable())) {
1360 return false;
1361 }
1362 capacity_ = new_capacity;
1363 return true;
1364}
1365
1366
1367bool SemiSpace::ShrinkTo(int new_capacity) {
1368 ASSERT(new_capacity >= initial_capacity_);
1369 ASSERT(new_capacity < capacity_);
1370 size_t delta = capacity_ - new_capacity;
1371 ASSERT(IsAligned(delta, OS::AllocateAlignment()));
1372 if (!MemoryAllocator::UncommitBlock(high() - delta, delta)) {
1373 return false;
1374 }
1375 capacity_ = new_capacity;
1376 return true;
1377}
1378
1379
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001380#ifdef DEBUG
1381void SemiSpace::Print() { }
ager@chromium.org9258b6b2008-09-11 09:11:10 +00001382
1383
1384void SemiSpace::Verify() { }
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001385#endif
1386
1387
1388// -----------------------------------------------------------------------------
1389// SemiSpaceIterator implementation.
1390SemiSpaceIterator::SemiSpaceIterator(NewSpace* space) {
1391 Initialize(space, space->bottom(), space->top(), NULL);
1392}
1393
1394
1395SemiSpaceIterator::SemiSpaceIterator(NewSpace* space,
1396 HeapObjectCallback size_func) {
1397 Initialize(space, space->bottom(), space->top(), size_func);
1398}
1399
1400
1401SemiSpaceIterator::SemiSpaceIterator(NewSpace* space, Address start) {
1402 Initialize(space, start, space->top(), NULL);
1403}
1404
1405
1406void SemiSpaceIterator::Initialize(NewSpace* space, Address start,
1407 Address end,
1408 HeapObjectCallback size_func) {
1409 ASSERT(space->ToSpaceContains(start));
1410 ASSERT(space->ToSpaceLow() <= end
1411 && end <= space->ToSpaceHigh());
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +00001412 space_ = &space->to_space_;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001413 current_ = start;
1414 limit_ = end;
1415 size_func_ = size_func;
1416}
1417
1418
1419#ifdef DEBUG
1420// A static array of histogram info for each type.
1421static HistogramInfo heap_histograms[LAST_TYPE+1];
1422static JSObject::SpillInformation js_spill_information;
1423
1424// heap_histograms is shared, always clear it before using it.
1425static void ClearHistograms() {
1426 // We reset the name each time, though it hasn't changed.
1427#define DEF_TYPE_NAME(name) heap_histograms[name].set_name(#name);
1428 INSTANCE_TYPE_LIST(DEF_TYPE_NAME)
1429#undef DEF_TYPE_NAME
1430
1431#define CLEAR_HISTOGRAM(name) heap_histograms[name].clear();
1432 INSTANCE_TYPE_LIST(CLEAR_HISTOGRAM)
1433#undef CLEAR_HISTOGRAM
1434
1435 js_spill_information.Clear();
1436}
1437
1438
1439static int code_kind_statistics[Code::NUMBER_OF_KINDS];
1440
1441
1442static void ClearCodeKindStatistics() {
1443 for (int i = 0; i < Code::NUMBER_OF_KINDS; i++) {
1444 code_kind_statistics[i] = 0;
1445 }
1446}
1447
1448
1449static void ReportCodeKindStatistics() {
fschneider@chromium.org013f3e12010-04-26 13:27:52 +00001450 const char* table[Code::NUMBER_OF_KINDS] = { NULL };
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001451
1452#define CASE(name) \
1453 case Code::name: table[Code::name] = #name; \
1454 break
1455
1456 for (int i = 0; i < Code::NUMBER_OF_KINDS; i++) {
1457 switch (static_cast<Code::Kind>(i)) {
1458 CASE(FUNCTION);
1459 CASE(STUB);
1460 CASE(BUILTIN);
1461 CASE(LOAD_IC);
1462 CASE(KEYED_LOAD_IC);
1463 CASE(STORE_IC);
1464 CASE(KEYED_STORE_IC);
1465 CASE(CALL_IC);
ager@chromium.orgce5e87b2010-03-10 10:24:18 +00001466 CASE(BINARY_OP_IC);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001467 }
1468 }
1469
1470#undef CASE
1471
1472 PrintF("\n Code kind histograms: \n");
1473 for (int i = 0; i < Code::NUMBER_OF_KINDS; i++) {
1474 if (code_kind_statistics[i] > 0) {
1475 PrintF(" %-20s: %10d bytes\n", table[i], code_kind_statistics[i]);
1476 }
1477 }
1478 PrintF("\n");
1479}
1480
1481
1482static int CollectHistogramInfo(HeapObject* obj) {
1483 InstanceType type = obj->map()->instance_type();
1484 ASSERT(0 <= type && type <= LAST_TYPE);
1485 ASSERT(heap_histograms[type].name() != NULL);
1486 heap_histograms[type].increment_number(1);
1487 heap_histograms[type].increment_bytes(obj->Size());
1488
1489 if (FLAG_collect_heap_spill_statistics && obj->IsJSObject()) {
1490 JSObject::cast(obj)->IncrementSpillStatistics(&js_spill_information);
1491 }
1492
1493 return obj->Size();
1494}
1495
1496
1497static void ReportHistogram(bool print_spill) {
1498 PrintF("\n Object Histogram:\n");
1499 for (int i = 0; i <= LAST_TYPE; i++) {
1500 if (heap_histograms[i].number() > 0) {
ager@chromium.orgce5e87b2010-03-10 10:24:18 +00001501 PrintF(" %-34s%10d (%10d bytes)\n",
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001502 heap_histograms[i].name(),
1503 heap_histograms[i].number(),
1504 heap_histograms[i].bytes());
1505 }
1506 }
1507 PrintF("\n");
1508
1509 // Summarize string types.
1510 int string_number = 0;
1511 int string_bytes = 0;
kasperl@chromium.org68ac0092009-07-09 06:00:35 +00001512#define INCREMENT(type, size, name, camel_name) \
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001513 string_number += heap_histograms[type].number(); \
1514 string_bytes += heap_histograms[type].bytes();
1515 STRING_TYPE_LIST(INCREMENT)
1516#undef INCREMENT
1517 if (string_number > 0) {
ager@chromium.orgce5e87b2010-03-10 10:24:18 +00001518 PrintF(" %-34s%10d (%10d bytes)\n\n", "STRING_TYPE", string_number,
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001519 string_bytes);
1520 }
1521
1522 if (FLAG_collect_heap_spill_statistics && print_spill) {
1523 js_spill_information.Print();
1524 }
1525}
1526#endif // DEBUG
1527
1528
1529// Support for statistics gathering for --heap-stats and --log-gc.
1530#if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
1531void NewSpace::ClearHistograms() {
1532 for (int i = 0; i <= LAST_TYPE; i++) {
1533 allocated_histogram_[i].clear();
1534 promoted_histogram_[i].clear();
1535 }
1536}
1537
1538// Because the copying collector does not touch garbage objects, we iterate
1539// the new space before a collection to get a histogram of allocated objects.
1540// This only happens (1) when compiled with DEBUG and the --heap-stats flag is
1541// set, or when compiled with ENABLE_LOGGING_AND_PROFILING and the --log-gc
1542// flag is set.
1543void NewSpace::CollectStatistics() {
1544 ClearHistograms();
1545 SemiSpaceIterator it(this);
sgjesse@chromium.orgb302e562010-02-03 11:26:59 +00001546 for (HeapObject* obj = it.next(); obj != NULL; obj = it.next())
1547 RecordAllocation(obj);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001548}
1549
1550
1551#ifdef ENABLE_LOGGING_AND_PROFILING
1552static void DoReportStatistics(HistogramInfo* info, const char* description) {
1553 LOG(HeapSampleBeginEvent("NewSpace", description));
1554 // Lump all the string types together.
1555 int string_number = 0;
1556 int string_bytes = 0;
kasperl@chromium.org68ac0092009-07-09 06:00:35 +00001557#define INCREMENT(type, size, name, camel_name) \
1558 string_number += info[type].number(); \
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001559 string_bytes += info[type].bytes();
1560 STRING_TYPE_LIST(INCREMENT)
1561#undef INCREMENT
1562 if (string_number > 0) {
1563 LOG(HeapSampleItemEvent("STRING_TYPE", string_number, string_bytes));
1564 }
1565
1566 // Then do the other types.
1567 for (int i = FIRST_NONSTRING_TYPE; i <= LAST_TYPE; ++i) {
1568 if (info[i].number() > 0) {
1569 LOG(HeapSampleItemEvent(info[i].name(), info[i].number(),
1570 info[i].bytes()));
1571 }
1572 }
1573 LOG(HeapSampleEndEvent("NewSpace", description));
1574}
1575#endif // ENABLE_LOGGING_AND_PROFILING
1576
1577
1578void NewSpace::ReportStatistics() {
1579#ifdef DEBUG
1580 if (FLAG_heap_stats) {
1581 float pct = static_cast<float>(Available()) / Capacity();
1582 PrintF(" capacity: %d, available: %d, %%%d\n",
1583 Capacity(), Available(), static_cast<int>(pct*100));
1584 PrintF("\n Object Histogram:\n");
1585 for (int i = 0; i <= LAST_TYPE; i++) {
1586 if (allocated_histogram_[i].number() > 0) {
ager@chromium.orgce5e87b2010-03-10 10:24:18 +00001587 PrintF(" %-34s%10d (%10d bytes)\n",
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001588 allocated_histogram_[i].name(),
1589 allocated_histogram_[i].number(),
1590 allocated_histogram_[i].bytes());
1591 }
1592 }
1593 PrintF("\n");
1594 }
1595#endif // DEBUG
1596
1597#ifdef ENABLE_LOGGING_AND_PROFILING
1598 if (FLAG_log_gc) {
1599 DoReportStatistics(allocated_histogram_, "allocated");
1600 DoReportStatistics(promoted_histogram_, "promoted");
1601 }
1602#endif // ENABLE_LOGGING_AND_PROFILING
1603}
1604
1605
1606void NewSpace::RecordAllocation(HeapObject* obj) {
1607 InstanceType type = obj->map()->instance_type();
1608 ASSERT(0 <= type && type <= LAST_TYPE);
1609 allocated_histogram_[type].increment_number(1);
1610 allocated_histogram_[type].increment_bytes(obj->Size());
1611}
1612
1613
1614void NewSpace::RecordPromotion(HeapObject* obj) {
1615 InstanceType type = obj->map()->instance_type();
1616 ASSERT(0 <= type && type <= LAST_TYPE);
1617 promoted_histogram_[type].increment_number(1);
1618 promoted_histogram_[type].increment_bytes(obj->Size());
1619}
1620#endif // defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
1621
1622
1623// -----------------------------------------------------------------------------
1624// Free lists for old object spaces implementation
1625
1626void FreeListNode::set_size(int size_in_bytes) {
1627 ASSERT(size_in_bytes > 0);
1628 ASSERT(IsAligned(size_in_bytes, kPointerSize));
1629
1630 // We write a map and possibly size information to the block. If the block
1631 // is big enough to be a ByteArray with at least one extra word (the next
1632 // pointer), we set its map to be the byte array map and its size to an
1633 // appropriate array length for the desired size from HeapObject::Size().
1634 // If the block is too small (eg, one or two words), to hold both a size
1635 // field and a next pointer, we give it a filler map that gives it the
1636 // correct size.
kasperl@chromium.orgdefbd102009-07-13 14:04:26 +00001637 if (size_in_bytes > ByteArray::kAlignedSize) {
kasperl@chromium.org68ac0092009-07-09 06:00:35 +00001638 set_map(Heap::raw_unchecked_byte_array_map());
ager@chromium.org3811b432009-10-28 14:53:37 +00001639 // Can't use ByteArray::cast because it fails during deserialization.
1640 ByteArray* this_as_byte_array = reinterpret_cast<ByteArray*>(this);
1641 this_as_byte_array->set_length(ByteArray::LengthFor(size_in_bytes));
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001642 } else if (size_in_bytes == kPointerSize) {
kasperl@chromium.orgdefbd102009-07-13 14:04:26 +00001643 set_map(Heap::raw_unchecked_one_pointer_filler_map());
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001644 } else if (size_in_bytes == 2 * kPointerSize) {
kasperl@chromium.orgdefbd102009-07-13 14:04:26 +00001645 set_map(Heap::raw_unchecked_two_pointer_filler_map());
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001646 } else {
1647 UNREACHABLE();
1648 }
ager@chromium.org3811b432009-10-28 14:53:37 +00001649 // We would like to ASSERT(Size() == size_in_bytes) but this would fail during
1650 // deserialization because the byte array map is not done yet.
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001651}
1652
1653
1654Address FreeListNode::next() {
ager@chromium.org3811b432009-10-28 14:53:37 +00001655 ASSERT(IsFreeListNode(this));
kasperl@chromium.orgdefbd102009-07-13 14:04:26 +00001656 if (map() == Heap::raw_unchecked_byte_array_map()) {
1657 ASSERT(Size() >= kNextOffset + kPointerSize);
1658 return Memory::Address_at(address() + kNextOffset);
1659 } else {
1660 return Memory::Address_at(address() + kPointerSize);
1661 }
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001662}
1663
1664
1665void FreeListNode::set_next(Address next) {
ager@chromium.org3811b432009-10-28 14:53:37 +00001666 ASSERT(IsFreeListNode(this));
kasperl@chromium.orgdefbd102009-07-13 14:04:26 +00001667 if (map() == Heap::raw_unchecked_byte_array_map()) {
1668 ASSERT(Size() >= kNextOffset + kPointerSize);
1669 Memory::Address_at(address() + kNextOffset) = next;
1670 } else {
1671 Memory::Address_at(address() + kPointerSize) = next;
1672 }
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001673}
1674
1675
1676OldSpaceFreeList::OldSpaceFreeList(AllocationSpace owner) : owner_(owner) {
1677 Reset();
1678}
1679
1680
1681void OldSpaceFreeList::Reset() {
1682 available_ = 0;
1683 for (int i = 0; i < kFreeListsLength; i++) {
1684 free_[i].head_node_ = NULL;
1685 }
1686 needs_rebuild_ = false;
1687 finger_ = kHead;
1688 free_[kHead].next_size_ = kEnd;
1689}
1690
1691
1692void OldSpaceFreeList::RebuildSizeList() {
1693 ASSERT(needs_rebuild_);
1694 int cur = kHead;
1695 for (int i = cur + 1; i < kFreeListsLength; i++) {
1696 if (free_[i].head_node_ != NULL) {
1697 free_[cur].next_size_ = i;
1698 cur = i;
1699 }
1700 }
1701 free_[cur].next_size_ = kEnd;
1702 needs_rebuild_ = false;
1703}
1704
1705
1706int OldSpaceFreeList::Free(Address start, int size_in_bytes) {
1707#ifdef DEBUG
sgjesse@chromium.orgb302e562010-02-03 11:26:59 +00001708 MemoryAllocator::ZapBlock(start, size_in_bytes);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001709#endif
1710 FreeListNode* node = FreeListNode::FromAddress(start);
1711 node->set_size(size_in_bytes);
1712
sgjesse@chromium.org755c5b12009-05-29 11:04:38 +00001713 // We don't use the freelists in compacting mode. This makes it more like a
1714 // GC that only has mark-sweep-compact and doesn't have a mark-sweep
1715 // collector.
1716 if (FLAG_always_compact) {
1717 return size_in_bytes;
1718 }
1719
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001720 // Early return to drop too-small blocks on the floor (one or two word
1721 // blocks cannot hold a map pointer, a size field, and a pointer to the
1722 // next block in the free list).
1723 if (size_in_bytes < kMinBlockSize) {
1724 return size_in_bytes;
1725 }
1726
1727 // Insert other blocks at the head of an exact free list.
1728 int index = size_in_bytes >> kPointerSizeLog2;
1729 node->set_next(free_[index].head_node_);
1730 free_[index].head_node_ = node->address();
1731 available_ += size_in_bytes;
1732 needs_rebuild_ = true;
1733 return 0;
1734}
1735
1736
1737Object* OldSpaceFreeList::Allocate(int size_in_bytes, int* wasted_bytes) {
1738 ASSERT(0 < size_in_bytes);
1739 ASSERT(size_in_bytes <= kMaxBlockSize);
1740 ASSERT(IsAligned(size_in_bytes, kPointerSize));
1741
1742 if (needs_rebuild_) RebuildSizeList();
1743 int index = size_in_bytes >> kPointerSizeLog2;
1744 // Check for a perfect fit.
1745 if (free_[index].head_node_ != NULL) {
1746 FreeListNode* node = FreeListNode::FromAddress(free_[index].head_node_);
1747 // If this was the last block of its size, remove the size.
1748 if ((free_[index].head_node_ = node->next()) == NULL) RemoveSize(index);
1749 available_ -= size_in_bytes;
1750 *wasted_bytes = 0;
sgjesse@chromium.org755c5b12009-05-29 11:04:38 +00001751 ASSERT(!FLAG_always_compact); // We only use the freelists with mark-sweep.
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001752 return node;
1753 }
1754 // Search the size list for the best fit.
1755 int prev = finger_ < index ? finger_ : kHead;
1756 int cur = FindSize(index, &prev);
1757 ASSERT(index < cur);
1758 if (cur == kEnd) {
1759 // No large enough size in list.
1760 *wasted_bytes = 0;
1761 return Failure::RetryAfterGC(size_in_bytes, owner_);
1762 }
sgjesse@chromium.org755c5b12009-05-29 11:04:38 +00001763 ASSERT(!FLAG_always_compact); // We only use the freelists with mark-sweep.
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001764 int rem = cur - index;
1765 int rem_bytes = rem << kPointerSizeLog2;
1766 FreeListNode* cur_node = FreeListNode::FromAddress(free_[cur].head_node_);
kasper.lund7276f142008-07-30 08:49:36 +00001767 ASSERT(cur_node->Size() == (cur << kPointerSizeLog2));
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001768 FreeListNode* rem_node = FreeListNode::FromAddress(free_[cur].head_node_ +
1769 size_in_bytes);
1770 // Distinguish the cases prev < rem < cur and rem <= prev < cur
1771 // to avoid many redundant tests and calls to Insert/RemoveSize.
1772 if (prev < rem) {
1773 // Simple case: insert rem between prev and cur.
1774 finger_ = prev;
1775 free_[prev].next_size_ = rem;
1776 // If this was the last block of size cur, remove the size.
1777 if ((free_[cur].head_node_ = cur_node->next()) == NULL) {
1778 free_[rem].next_size_ = free_[cur].next_size_;
1779 } else {
1780 free_[rem].next_size_ = cur;
1781 }
1782 // Add the remainder block.
1783 rem_node->set_size(rem_bytes);
1784 rem_node->set_next(free_[rem].head_node_);
1785 free_[rem].head_node_ = rem_node->address();
1786 } else {
1787 // If this was the last block of size cur, remove the size.
1788 if ((free_[cur].head_node_ = cur_node->next()) == NULL) {
1789 finger_ = prev;
1790 free_[prev].next_size_ = free_[cur].next_size_;
1791 }
1792 if (rem_bytes < kMinBlockSize) {
1793 // Too-small remainder is wasted.
1794 rem_node->set_size(rem_bytes);
1795 available_ -= size_in_bytes + rem_bytes;
1796 *wasted_bytes = rem_bytes;
1797 return cur_node;
1798 }
1799 // Add the remainder block and, if needed, insert its size.
1800 rem_node->set_size(rem_bytes);
1801 rem_node->set_next(free_[rem].head_node_);
1802 free_[rem].head_node_ = rem_node->address();
1803 if (rem_node->next() == NULL) InsertSize(rem);
1804 }
1805 available_ -= size_in_bytes;
1806 *wasted_bytes = 0;
1807 return cur_node;
1808}
1809
1810
kasper.lund7276f142008-07-30 08:49:36 +00001811#ifdef DEBUG
1812bool OldSpaceFreeList::Contains(FreeListNode* node) {
1813 for (int i = 0; i < kFreeListsLength; i++) {
1814 Address cur_addr = free_[i].head_node_;
1815 while (cur_addr != NULL) {
1816 FreeListNode* cur_node = FreeListNode::FromAddress(cur_addr);
1817 if (cur_node == node) return true;
1818 cur_addr = cur_node->next();
1819 }
1820 }
1821 return false;
1822}
1823#endif
1824
1825
kasperl@chromium.orgdefbd102009-07-13 14:04:26 +00001826FixedSizeFreeList::FixedSizeFreeList(AllocationSpace owner, int object_size)
1827 : owner_(owner), object_size_(object_size) {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001828 Reset();
1829}
1830
1831
kasperl@chromium.orgdefbd102009-07-13 14:04:26 +00001832void FixedSizeFreeList::Reset() {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001833 available_ = 0;
1834 head_ = NULL;
1835}
1836
1837
kasperl@chromium.orgdefbd102009-07-13 14:04:26 +00001838void FixedSizeFreeList::Free(Address start) {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001839#ifdef DEBUG
sgjesse@chromium.orgb302e562010-02-03 11:26:59 +00001840 MemoryAllocator::ZapBlock(start, object_size_);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001841#endif
fschneider@chromium.org0c20e672010-01-14 15:28:53 +00001842 // We only use the freelists with mark-sweep.
1843 ASSERT(!MarkCompactCollector::IsCompacting());
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001844 FreeListNode* node = FreeListNode::FromAddress(start);
kasperl@chromium.orgdefbd102009-07-13 14:04:26 +00001845 node->set_size(object_size_);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001846 node->set_next(head_);
1847 head_ = node->address();
kasperl@chromium.orgdefbd102009-07-13 14:04:26 +00001848 available_ += object_size_;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001849}
1850
1851
kasperl@chromium.orgdefbd102009-07-13 14:04:26 +00001852Object* FixedSizeFreeList::Allocate() {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001853 if (head_ == NULL) {
kasperl@chromium.orgdefbd102009-07-13 14:04:26 +00001854 return Failure::RetryAfterGC(object_size_, owner_);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001855 }
1856
sgjesse@chromium.org755c5b12009-05-29 11:04:38 +00001857 ASSERT(!FLAG_always_compact); // We only use the freelists with mark-sweep.
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001858 FreeListNode* node = FreeListNode::FromAddress(head_);
1859 head_ = node->next();
kasperl@chromium.orgdefbd102009-07-13 14:04:26 +00001860 available_ -= object_size_;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001861 return node;
1862}
1863
1864
1865// -----------------------------------------------------------------------------
1866// OldSpace implementation
1867
1868void OldSpace::PrepareForMarkCompact(bool will_compact) {
fschneider@chromium.org013f3e12010-04-26 13:27:52 +00001869 // Call prepare of the super class.
1870 PagedSpace::PrepareForMarkCompact(will_compact);
1871
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001872 if (will_compact) {
1873 // Reset relocation info. During a compacting collection, everything in
1874 // the space is considered 'available' and we will rediscover live data
1875 // and waste during the collection.
1876 MCResetRelocationInfo();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001877 ASSERT(Available() == Capacity());
1878 } else {
1879 // During a non-compacting collection, everything below the linear
1880 // allocation pointer is considered allocated (everything above is
1881 // available) and we will rediscover available and wasted bytes during
1882 // the collection.
1883 accounting_stats_.AllocateBytes(free_list_.available());
1884 accounting_stats_.FillWastedBytes(Waste());
1885 }
1886
kasper.lund7276f142008-07-30 08:49:36 +00001887 // Clear the free list before a full GC---it will be rebuilt afterward.
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001888 free_list_.Reset();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001889}
1890
1891
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001892void OldSpace::MCCommitRelocationInfo() {
1893 // Update fast allocation info.
1894 allocation_info_.top = mc_forwarding_info_.top;
1895 allocation_info_.limit = mc_forwarding_info_.limit;
kasper.lund7276f142008-07-30 08:49:36 +00001896 ASSERT(allocation_info_.VerifyPagedAllocation());
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001897
1898 // The space is compacted and we haven't yet built free lists or
1899 // wasted any space.
1900 ASSERT(Waste() == 0);
1901 ASSERT(AvailableFree() == 0);
1902
1903 // Build the free list for the space.
1904 int computed_size = 0;
1905 PageIterator it(this, PageIterator::PAGES_USED_BY_MC);
1906 while (it.has_next()) {
1907 Page* p = it.next();
1908 // Space below the relocation pointer is allocated.
ager@chromium.orgc4c92722009-11-18 14:12:51 +00001909 computed_size +=
1910 static_cast<int>(p->mc_relocation_top - p->ObjectAreaStart());
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001911 if (it.has_next()) {
1912 // Free the space at the top of the page. We cannot use
1913 // p->mc_relocation_top after the call to Free (because Free will clear
1914 // remembered set bits).
ager@chromium.orgc4c92722009-11-18 14:12:51 +00001915 int extra_size =
1916 static_cast<int>(p->ObjectAreaEnd() - p->mc_relocation_top);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001917 if (extra_size > 0) {
1918 int wasted_bytes = free_list_.Free(p->mc_relocation_top, extra_size);
1919 // The bytes we have just "freed" to add to the free list were
1920 // already accounted as available.
1921 accounting_stats_.WasteBytes(wasted_bytes);
1922 }
1923 }
1924 }
1925
1926 // Make sure the computed size - based on the used portion of the pages in
1927 // use - matches the size obtained while computing forwarding addresses.
1928 ASSERT(computed_size == Size());
1929}
1930
1931
fschneider@chromium.org0c20e672010-01-14 15:28:53 +00001932bool NewSpace::ReserveSpace(int bytes) {
1933 // We can't reliably unpack a partial snapshot that needs more new space
1934 // space than the minimum NewSpace size.
1935 ASSERT(bytes <= InitialCapacity());
1936 Address limit = allocation_info_.limit;
1937 Address top = allocation_info_.top;
1938 return limit - top >= bytes;
1939}
1940
1941
fschneider@chromium.org013f3e12010-04-26 13:27:52 +00001942void PagedSpace::FreePages(Page* prev, Page* last) {
1943 if (last == AllocationTopPage()) {
1944 // Pages are already at the end of used pages.
1945 return;
1946 }
1947
1948 Page* first = NULL;
1949
1950 // Remove pages from the list.
1951 if (prev == NULL) {
1952 first = first_page_;
1953 first_page_ = last->next_page();
1954 } else {
1955 first = prev->next_page();
1956 MemoryAllocator::SetNextPage(prev, last->next_page());
1957 }
1958
1959 // Attach it after the last page.
1960 MemoryAllocator::SetNextPage(last_page_, first);
1961 last_page_ = last;
1962 MemoryAllocator::SetNextPage(last, NULL);
1963
1964 // Clean them up.
1965 do {
1966 first->ClearRSet();
1967 first = first->next_page();
1968 } while (first != NULL);
1969
1970 // Order of pages in this space might no longer be consistent with
1971 // order of pages in chunks.
1972 page_list_is_chunk_ordered_ = false;
1973}
1974
1975
1976void PagedSpace::PrepareForMarkCompact(bool will_compact) {
1977 if (will_compact) {
1978 // MarkCompact collector relies on WAS_IN_USE_BEFORE_MC page flag
1979 // to skip unused pages. Update flag value for all pages in space.
1980 PageIterator all_pages_iterator(this, PageIterator::ALL_PAGES);
1981 Page* last_in_use = AllocationTopPage();
1982 bool in_use = true;
1983
1984 while (all_pages_iterator.has_next()) {
1985 Page* p = all_pages_iterator.next();
1986 p->SetWasInUseBeforeMC(in_use);
1987 if (p == last_in_use) {
1988 // We passed a page containing allocation top. All consequent
1989 // pages are not used.
1990 in_use = false;
1991 }
1992 }
1993
1994 if (!page_list_is_chunk_ordered_) {
1995 Page* new_last_in_use = Page::FromAddress(NULL);
1996 MemoryAllocator::RelinkPageListInChunkOrder(this,
1997 &first_page_,
1998 &last_page_,
1999 &new_last_in_use);
2000 ASSERT(new_last_in_use->is_valid());
2001
2002 if (new_last_in_use != last_in_use) {
2003 // Current allocation top points to a page which is now in the middle
2004 // of page list. We should move allocation top forward to the new last
2005 // used page so various object iterators will continue to work properly.
2006
2007 int size_in_bytes = static_cast<int>(PageAllocationLimit(last_in_use) -
2008 last_in_use->AllocationTop());
2009
2010 if (size_in_bytes > 0) {
2011 // There is still some space left on this page. Create a fake
2012 // object which will occupy all free space on this page.
2013 // Otherwise iterators would not be able to scan this page
2014 // correctly.
2015
2016 Heap::CreateFillerObjectAt(last_in_use->AllocationTop(),
2017 size_in_bytes);
2018 }
2019
2020 // New last in use page was in the middle of the list before
2021 // sorting so it full.
2022 SetTop(new_last_in_use->AllocationTop());
2023
2024 ASSERT(AllocationTopPage() == new_last_in_use);
2025 ASSERT(AllocationTopPage()->WasInUseBeforeMC());
2026 }
2027
2028 PageIterator pages_in_use_iterator(this, PageIterator::PAGES_IN_USE);
2029 while (pages_in_use_iterator.has_next()) {
2030 Page* p = pages_in_use_iterator.next();
2031 if (!p->WasInUseBeforeMC()) {
2032 // Empty page is in the middle of a sequence of used pages.
2033 // Create a fake object which will occupy all free space on this page.
2034 // Otherwise iterators would not be able to scan this page correctly.
2035 int size_in_bytes = static_cast<int>(PageAllocationLimit(p) -
2036 p->ObjectAreaStart());
2037
2038 Heap::CreateFillerObjectAt(p->ObjectAreaStart(), size_in_bytes);
2039 }
2040 }
2041
2042 page_list_is_chunk_ordered_ = true;
2043 }
2044 }
2045}
2046
2047
fschneider@chromium.org0c20e672010-01-14 15:28:53 +00002048bool PagedSpace::ReserveSpace(int bytes) {
2049 Address limit = allocation_info_.limit;
2050 Address top = allocation_info_.top;
2051 if (limit - top >= bytes) return true;
2052
2053 // There wasn't enough space in the current page. Lets put the rest
2054 // of the page on the free list and start a fresh page.
2055 PutRestOfCurrentPageOnFreeList(TopPageOf(allocation_info_));
2056
2057 Page* reserved_page = TopPageOf(allocation_info_);
2058 int bytes_left_to_reserve = bytes;
2059 while (bytes_left_to_reserve > 0) {
2060 if (!reserved_page->next_page()->is_valid()) {
2061 if (Heap::OldGenerationAllocationLimitReached()) return false;
2062 Expand(reserved_page);
2063 }
2064 bytes_left_to_reserve -= Page::kPageSize;
2065 reserved_page = reserved_page->next_page();
2066 if (!reserved_page->is_valid()) return false;
2067 }
2068 ASSERT(TopPageOf(allocation_info_)->next_page()->is_valid());
2069 SetAllocationInfo(&allocation_info_,
2070 TopPageOf(allocation_info_)->next_page());
2071 return true;
2072}
2073
2074
2075// You have to call this last, since the implementation from PagedSpace
2076// doesn't know that memory was 'promised' to large object space.
2077bool LargeObjectSpace::ReserveSpace(int bytes) {
2078 return Heap::OldGenerationSpaceAvailable() >= bytes;
2079}
2080
2081
kasper.lund7276f142008-07-30 08:49:36 +00002082// Slow case for normal allocation. Try in order: (1) allocate in the next
2083// page in the space, (2) allocate off the space's free list, (3) expand the
2084// space, (4) fail.
2085HeapObject* OldSpace::SlowAllocateRaw(int size_in_bytes) {
2086 // Linear allocation in this space has failed. If there is another page
2087 // in the space, move to that page and allocate there. This allocation
2088 // should succeed (size_in_bytes should not be greater than a page's
2089 // object area size).
2090 Page* current_page = TopPageOf(allocation_info_);
2091 if (current_page->next_page()->is_valid()) {
2092 return AllocateInNextPage(current_page, size_in_bytes);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002093 }
kasper.lund7276f142008-07-30 08:49:36 +00002094
ager@chromium.org3811b432009-10-28 14:53:37 +00002095 // There is no next page in this space. Try free list allocation unless that
2096 // is currently forbidden.
2097 if (!Heap::linear_allocation()) {
2098 int wasted_bytes;
2099 Object* result = free_list_.Allocate(size_in_bytes, &wasted_bytes);
2100 accounting_stats_.WasteBytes(wasted_bytes);
2101 if (!result->IsFailure()) {
2102 accounting_stats_.AllocateBytes(size_in_bytes);
2103 return HeapObject::cast(result);
2104 }
kasper.lund7276f142008-07-30 08:49:36 +00002105 }
2106
kasperl@chromium.org9bbf9682008-10-30 11:53:07 +00002107 // Free list allocation failed and there is no next page. Fail if we have
2108 // hit the old generation size limit that should cause a garbage
2109 // collection.
2110 if (!Heap::always_allocate() && Heap::OldGenerationAllocationLimitReached()) {
2111 return NULL;
2112 }
2113
2114 // Try to expand the space and allocate in the new next page.
kasper.lund7276f142008-07-30 08:49:36 +00002115 ASSERT(!current_page->next_page()->is_valid());
2116 if (Expand(current_page)) {
2117 return AllocateInNextPage(current_page, size_in_bytes);
2118 }
2119
2120 // Finally, fail.
2121 return NULL;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002122}
2123
2124
fschneider@chromium.org0c20e672010-01-14 15:28:53 +00002125void OldSpace::PutRestOfCurrentPageOnFreeList(Page* current_page) {
ager@chromium.orgc4c92722009-11-18 14:12:51 +00002126 int free_size =
2127 static_cast<int>(current_page->ObjectAreaEnd() - allocation_info_.top);
kasper.lund7276f142008-07-30 08:49:36 +00002128 if (free_size > 0) {
2129 int wasted_bytes = free_list_.Free(allocation_info_.top, free_size);
2130 accounting_stats_.WasteBytes(wasted_bytes);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002131 }
fschneider@chromium.org0c20e672010-01-14 15:28:53 +00002132}
2133
2134
2135void FixedSpace::PutRestOfCurrentPageOnFreeList(Page* current_page) {
2136 int free_size =
2137 static_cast<int>(current_page->ObjectAreaEnd() - allocation_info_.top);
2138 // In the fixed space free list all the free list items have the right size.
2139 // We use up the rest of the page while preserving this invariant.
2140 while (free_size >= object_size_in_bytes_) {
2141 free_list_.Free(allocation_info_.top);
2142 allocation_info_.top += object_size_in_bytes_;
2143 free_size -= object_size_in_bytes_;
2144 accounting_stats_.WasteBytes(object_size_in_bytes_);
2145 }
2146}
2147
2148
2149// Add the block at the top of the page to the space's free list, set the
2150// allocation info to the next page (assumed to be one), and allocate
2151// linearly there.
2152HeapObject* OldSpace::AllocateInNextPage(Page* current_page,
2153 int size_in_bytes) {
2154 ASSERT(current_page->next_page()->is_valid());
2155 PutRestOfCurrentPageOnFreeList(current_page);
kasper.lund7276f142008-07-30 08:49:36 +00002156 SetAllocationInfo(&allocation_info_, current_page->next_page());
2157 return AllocateLinearly(&allocation_info_, size_in_bytes);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002158}
2159
2160
2161#ifdef DEBUG
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002162struct CommentStatistic {
2163 const char* comment;
2164 int size;
2165 int count;
2166 void Clear() {
2167 comment = NULL;
2168 size = 0;
2169 count = 0;
2170 }
2171};
2172
2173
2174// must be small, since an iteration is used for lookup
2175const int kMaxComments = 64;
2176static CommentStatistic comments_statistics[kMaxComments+1];
2177
2178
2179void PagedSpace::ReportCodeStatistics() {
2180 ReportCodeKindStatistics();
2181 PrintF("Code comment statistics (\" [ comment-txt : size/ "
2182 "count (average)\"):\n");
2183 for (int i = 0; i <= kMaxComments; i++) {
2184 const CommentStatistic& cs = comments_statistics[i];
2185 if (cs.size > 0) {
2186 PrintF(" %-30s: %10d/%6d (%d)\n", cs.comment, cs.size, cs.count,
2187 cs.size/cs.count);
2188 }
2189 }
2190 PrintF("\n");
2191}
2192
2193
2194void PagedSpace::ResetCodeStatistics() {
2195 ClearCodeKindStatistics();
2196 for (int i = 0; i < kMaxComments; i++) comments_statistics[i].Clear();
2197 comments_statistics[kMaxComments].comment = "Unknown";
2198 comments_statistics[kMaxComments].size = 0;
2199 comments_statistics[kMaxComments].count = 0;
2200}
2201
2202
2203// Adds comment to 'comment_statistics' table. Performance OK sa long as
2204// 'kMaxComments' is small
2205static void EnterComment(const char* comment, int delta) {
2206 // Do not count empty comments
2207 if (delta <= 0) return;
2208 CommentStatistic* cs = &comments_statistics[kMaxComments];
2209 // Search for a free or matching entry in 'comments_statistics': 'cs'
2210 // points to result.
2211 for (int i = 0; i < kMaxComments; i++) {
2212 if (comments_statistics[i].comment == NULL) {
2213 cs = &comments_statistics[i];
2214 cs->comment = comment;
2215 break;
2216 } else if (strcmp(comments_statistics[i].comment, comment) == 0) {
2217 cs = &comments_statistics[i];
2218 break;
2219 }
2220 }
2221 // Update entry for 'comment'
2222 cs->size += delta;
2223 cs->count += 1;
2224}
2225
2226
2227// Call for each nested comment start (start marked with '[ xxx', end marked
2228// with ']'. RelocIterator 'it' must point to a comment reloc info.
2229static void CollectCommentStatistics(RelocIterator* it) {
2230 ASSERT(!it->done());
ager@chromium.org236ad962008-09-25 09:45:57 +00002231 ASSERT(it->rinfo()->rmode() == RelocInfo::COMMENT);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002232 const char* tmp = reinterpret_cast<const char*>(it->rinfo()->data());
2233 if (tmp[0] != '[') {
2234 // Not a nested comment; skip
2235 return;
2236 }
2237
2238 // Search for end of nested comment or a new nested comment
2239 const char* const comment_txt =
2240 reinterpret_cast<const char*>(it->rinfo()->data());
2241 const byte* prev_pc = it->rinfo()->pc();
2242 int flat_delta = 0;
2243 it->next();
2244 while (true) {
2245 // All nested comments must be terminated properly, and therefore exit
2246 // from loop.
2247 ASSERT(!it->done());
ager@chromium.org236ad962008-09-25 09:45:57 +00002248 if (it->rinfo()->rmode() == RelocInfo::COMMENT) {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002249 const char* const txt =
2250 reinterpret_cast<const char*>(it->rinfo()->data());
ager@chromium.orgc4c92722009-11-18 14:12:51 +00002251 flat_delta += static_cast<int>(it->rinfo()->pc() - prev_pc);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002252 if (txt[0] == ']') break; // End of nested comment
2253 // A new comment
2254 CollectCommentStatistics(it);
2255 // Skip code that was covered with previous comment
2256 prev_pc = it->rinfo()->pc();
2257 }
2258 it->next();
2259 }
2260 EnterComment(comment_txt, flat_delta);
2261}
2262
2263
2264// Collects code size statistics:
2265// - by code kind
2266// - by code comment
2267void PagedSpace::CollectCodeStatistics() {
2268 HeapObjectIterator obj_it(this);
sgjesse@chromium.orgb302e562010-02-03 11:26:59 +00002269 for (HeapObject* obj = obj_it.next(); obj != NULL; obj = obj_it.next()) {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002270 if (obj->IsCode()) {
2271 Code* code = Code::cast(obj);
2272 code_kind_statistics[code->kind()] += code->Size();
2273 RelocIterator it(code);
2274 int delta = 0;
2275 const byte* prev_pc = code->instruction_start();
2276 while (!it.done()) {
ager@chromium.org236ad962008-09-25 09:45:57 +00002277 if (it.rinfo()->rmode() == RelocInfo::COMMENT) {
ager@chromium.orgc4c92722009-11-18 14:12:51 +00002278 delta += static_cast<int>(it.rinfo()->pc() - prev_pc);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002279 CollectCommentStatistics(&it);
2280 prev_pc = it.rinfo()->pc();
2281 }
2282 it.next();
2283 }
2284
2285 ASSERT(code->instruction_start() <= prev_pc &&
2286 prev_pc <= code->relocation_start());
ager@chromium.orgc4c92722009-11-18 14:12:51 +00002287 delta += static_cast<int>(code->relocation_start() - prev_pc);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002288 EnterComment("NoComment", delta);
2289 }
2290 }
2291}
2292
2293
2294void OldSpace::ReportStatistics() {
2295 int pct = Available() * 100 / Capacity();
2296 PrintF(" capacity: %d, waste: %d, available: %d, %%%d\n",
2297 Capacity(), Waste(), Available(), pct);
2298
2299 // Report remembered set statistics.
2300 int rset_marked_pointers = 0;
2301 int rset_marked_arrays = 0;
2302 int rset_marked_array_elements = 0;
2303 int cross_gen_pointers = 0;
2304 int cross_gen_array_elements = 0;
2305
2306 PageIterator page_it(this, PageIterator::PAGES_IN_USE);
2307 while (page_it.has_next()) {
2308 Page* p = page_it.next();
2309
2310 for (Address rset_addr = p->RSetStart();
2311 rset_addr < p->RSetEnd();
2312 rset_addr += kIntSize) {
2313 int rset = Memory::int_at(rset_addr);
2314 if (rset != 0) {
2315 // Bits were set
ager@chromium.orgc4c92722009-11-18 14:12:51 +00002316 int intoff =
2317 static_cast<int>(rset_addr - p->address() - Page::kRSetOffset);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002318 int bitoff = 0;
2319 for (; bitoff < kBitsPerInt; ++bitoff) {
2320 if ((rset & (1 << bitoff)) != 0) {
2321 int bitpos = intoff*kBitsPerByte + bitoff;
2322 Address slot = p->OffsetToAddress(bitpos << kObjectAlignmentBits);
2323 Object** obj = reinterpret_cast<Object**>(slot);
kasperl@chromium.org68ac0092009-07-09 06:00:35 +00002324 if (*obj == Heap::raw_unchecked_fixed_array_map()) {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002325 rset_marked_arrays++;
2326 FixedArray* fa = FixedArray::cast(HeapObject::FromAddress(slot));
2327
2328 rset_marked_array_elements += fa->length();
2329 // Manually inline FixedArray::IterateBody
2330 Address elm_start = slot + FixedArray::kHeaderSize;
2331 Address elm_stop = elm_start + fa->length() * kPointerSize;
2332 for (Address elm_addr = elm_start;
2333 elm_addr < elm_stop; elm_addr += kPointerSize) {
2334 // Filter non-heap-object pointers
2335 Object** elm_p = reinterpret_cast<Object**>(elm_addr);
2336 if (Heap::InNewSpace(*elm_p))
2337 cross_gen_array_elements++;
2338 }
2339 } else {
2340 rset_marked_pointers++;
2341 if (Heap::InNewSpace(*obj))
2342 cross_gen_pointers++;
2343 }
2344 }
2345 }
2346 }
2347 }
2348 }
2349
2350 pct = rset_marked_pointers == 0 ?
2351 0 : cross_gen_pointers * 100 / rset_marked_pointers;
2352 PrintF(" rset-marked pointers %d, to-new-space %d (%%%d)\n",
2353 rset_marked_pointers, cross_gen_pointers, pct);
2354 PrintF(" rset_marked arrays %d, ", rset_marked_arrays);
2355 PrintF(" elements %d, ", rset_marked_array_elements);
2356 pct = rset_marked_array_elements == 0 ? 0
2357 : cross_gen_array_elements * 100 / rset_marked_array_elements;
2358 PrintF(" pointers to new space %d (%%%d)\n", cross_gen_array_elements, pct);
2359 PrintF(" total rset-marked bits %d\n",
2360 (rset_marked_pointers + rset_marked_arrays));
2361 pct = (rset_marked_pointers + rset_marked_array_elements) == 0 ? 0
2362 : (cross_gen_pointers + cross_gen_array_elements) * 100 /
2363 (rset_marked_pointers + rset_marked_array_elements);
2364 PrintF(" total rset pointers %d, true cross generation ones %d (%%%d)\n",
2365 (rset_marked_pointers + rset_marked_array_elements),
2366 (cross_gen_pointers + cross_gen_array_elements),
2367 pct);
2368
2369 ClearHistograms();
2370 HeapObjectIterator obj_it(this);
sgjesse@chromium.orgb302e562010-02-03 11:26:59 +00002371 for (HeapObject* obj = obj_it.next(); obj != NULL; obj = obj_it.next())
2372 CollectHistogramInfo(obj);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002373 ReportHistogram(true);
2374}
2375
2376
2377// Dump the range of remembered set words between [start, end) corresponding
2378// to the pointers starting at object_p. The allocation_top is an object
2379// pointer which should not be read past. This is important for large object
2380// pages, where some bits in the remembered set range do not correspond to
2381// allocated addresses.
2382static void PrintRSetRange(Address start, Address end, Object** object_p,
2383 Address allocation_top) {
2384 Address rset_address = start;
2385
2386 // If the range starts on on odd numbered word (eg, for large object extra
2387 // remembered set ranges), print some spaces.
ager@chromium.org9085a012009-05-11 19:22:57 +00002388 if ((reinterpret_cast<uintptr_t>(start) / kIntSize) % 2 == 1) {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002389 PrintF(" ");
2390 }
2391
2392 // Loop over all the words in the range.
2393 while (rset_address < end) {
2394 uint32_t rset_word = Memory::uint32_at(rset_address);
2395 int bit_position = 0;
2396
2397 // Loop over all the bits in the word.
2398 while (bit_position < kBitsPerInt) {
2399 if (object_p == reinterpret_cast<Object**>(allocation_top)) {
2400 // Print a bar at the allocation pointer.
2401 PrintF("|");
2402 } else if (object_p > reinterpret_cast<Object**>(allocation_top)) {
2403 // Do not dereference object_p past the allocation pointer.
2404 PrintF("#");
2405 } else if ((rset_word & (1 << bit_position)) == 0) {
2406 // Print a dot for zero bits.
2407 PrintF(".");
2408 } else if (Heap::InNewSpace(*object_p)) {
2409 // Print an X for one bits for pointers to new space.
2410 PrintF("X");
2411 } else {
2412 // Print a circle for one bits for pointers to old space.
2413 PrintF("o");
2414 }
2415
2416 // Print a space after every 8th bit except the last.
2417 if (bit_position % 8 == 7 && bit_position != (kBitsPerInt - 1)) {
2418 PrintF(" ");
2419 }
2420
2421 // Advance to next bit.
2422 bit_position++;
2423 object_p++;
2424 }
2425
2426 // Print a newline after every odd numbered word, otherwise a space.
ager@chromium.org9085a012009-05-11 19:22:57 +00002427 if ((reinterpret_cast<uintptr_t>(rset_address) / kIntSize) % 2 == 1) {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002428 PrintF("\n");
2429 } else {
2430 PrintF(" ");
2431 }
2432
2433 // Advance to next remembered set word.
2434 rset_address += kIntSize;
2435 }
2436}
2437
2438
2439void PagedSpace::DoPrintRSet(const char* space_name) {
2440 PageIterator it(this, PageIterator::PAGES_IN_USE);
2441 while (it.has_next()) {
2442 Page* p = it.next();
2443 PrintF("%s page 0x%x:\n", space_name, p);
2444 PrintRSetRange(p->RSetStart(), p->RSetEnd(),
2445 reinterpret_cast<Object**>(p->ObjectAreaStart()),
2446 p->AllocationTop());
2447 PrintF("\n");
2448 }
2449}
2450
2451
2452void OldSpace::PrintRSet() { DoPrintRSet("old"); }
2453#endif
2454
2455// -----------------------------------------------------------------------------
kasperl@chromium.orgdefbd102009-07-13 14:04:26 +00002456// FixedSpace implementation
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002457
kasperl@chromium.orgdefbd102009-07-13 14:04:26 +00002458void FixedSpace::PrepareForMarkCompact(bool will_compact) {
fschneider@chromium.org013f3e12010-04-26 13:27:52 +00002459 // Call prepare of the super class.
2460 PagedSpace::PrepareForMarkCompact(will_compact);
2461
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002462 if (will_compact) {
2463 // Reset relocation info.
2464 MCResetRelocationInfo();
2465
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002466 // During a compacting collection, everything in the space is considered
2467 // 'available' (set by the call to MCResetRelocationInfo) and we will
2468 // rediscover live and wasted bytes during the collection.
2469 ASSERT(Available() == Capacity());
2470 } else {
2471 // During a non-compacting collection, everything below the linear
2472 // allocation pointer except wasted top-of-page blocks is considered
2473 // allocated and we will rediscover available bytes during the
2474 // collection.
2475 accounting_stats_.AllocateBytes(free_list_.available());
2476 }
2477
kasper.lund7276f142008-07-30 08:49:36 +00002478 // Clear the free list before a full GC---it will be rebuilt afterward.
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002479 free_list_.Reset();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002480}
2481
2482
kasperl@chromium.orgdefbd102009-07-13 14:04:26 +00002483void FixedSpace::MCCommitRelocationInfo() {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002484 // Update fast allocation info.
2485 allocation_info_.top = mc_forwarding_info_.top;
2486 allocation_info_.limit = mc_forwarding_info_.limit;
kasper.lund7276f142008-07-30 08:49:36 +00002487 ASSERT(allocation_info_.VerifyPagedAllocation());
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002488
2489 // The space is compacted and we haven't yet wasted any space.
2490 ASSERT(Waste() == 0);
2491
2492 // Update allocation_top of each page in use and compute waste.
2493 int computed_size = 0;
2494 PageIterator it(this, PageIterator::PAGES_USED_BY_MC);
2495 while (it.has_next()) {
2496 Page* page = it.next();
2497 Address page_top = page->AllocationTop();
ager@chromium.orgc4c92722009-11-18 14:12:51 +00002498 computed_size += static_cast<int>(page_top - page->ObjectAreaStart());
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002499 if (it.has_next()) {
ager@chromium.orgc4c92722009-11-18 14:12:51 +00002500 accounting_stats_.WasteBytes(
2501 static_cast<int>(page->ObjectAreaEnd() - page_top));
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002502 }
2503 }
2504
2505 // Make sure the computed size - based on the used portion of the
2506 // pages in use - matches the size we adjust during allocation.
2507 ASSERT(computed_size == Size());
2508}
2509
2510
kasper.lund7276f142008-07-30 08:49:36 +00002511// Slow case for normal allocation. Try in order: (1) allocate in the next
2512// page in the space, (2) allocate off the space's free list, (3) expand the
2513// space, (4) fail.
kasperl@chromium.orgdefbd102009-07-13 14:04:26 +00002514HeapObject* FixedSpace::SlowAllocateRaw(int size_in_bytes) {
2515 ASSERT_EQ(object_size_in_bytes_, size_in_bytes);
kasper.lund7276f142008-07-30 08:49:36 +00002516 // Linear allocation in this space has failed. If there is another page
2517 // in the space, move to that page and allocate there. This allocation
2518 // should succeed.
2519 Page* current_page = TopPageOf(allocation_info_);
2520 if (current_page->next_page()->is_valid()) {
2521 return AllocateInNextPage(current_page, size_in_bytes);
2522 }
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002523
ager@chromium.org3811b432009-10-28 14:53:37 +00002524 // There is no next page in this space. Try free list allocation unless
2525 // that is currently forbidden. The fixed space free list implicitly assumes
2526 // that all free blocks are of the fixed size.
2527 if (!Heap::linear_allocation()) {
kasper.lund7276f142008-07-30 08:49:36 +00002528 Object* result = free_list_.Allocate();
2529 if (!result->IsFailure()) {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002530 accounting_stats_.AllocateBytes(size_in_bytes);
kasper.lund7276f142008-07-30 08:49:36 +00002531 return HeapObject::cast(result);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002532 }
2533 }
kasper.lund7276f142008-07-30 08:49:36 +00002534
kasperl@chromium.org9bbf9682008-10-30 11:53:07 +00002535 // Free list allocation failed and there is no next page. Fail if we have
2536 // hit the old generation size limit that should cause a garbage
2537 // collection.
2538 if (!Heap::always_allocate() && Heap::OldGenerationAllocationLimitReached()) {
2539 return NULL;
2540 }
2541
2542 // Try to expand the space and allocate in the new next page.
kasper.lund7276f142008-07-30 08:49:36 +00002543 ASSERT(!current_page->next_page()->is_valid());
2544 if (Expand(current_page)) {
2545 return AllocateInNextPage(current_page, size_in_bytes);
2546 }
2547
2548 // Finally, fail.
2549 return NULL;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002550}
2551
2552
kasper.lund7276f142008-07-30 08:49:36 +00002553// Move to the next page (there is assumed to be one) and allocate there.
2554// The top of page block is always wasted, because it is too small to hold a
2555// map.
kasperl@chromium.orgdefbd102009-07-13 14:04:26 +00002556HeapObject* FixedSpace::AllocateInNextPage(Page* current_page,
2557 int size_in_bytes) {
kasper.lund7276f142008-07-30 08:49:36 +00002558 ASSERT(current_page->next_page()->is_valid());
fschneider@chromium.org013f3e12010-04-26 13:27:52 +00002559 ASSERT(allocation_info_.top == PageAllocationLimit(current_page));
kasperl@chromium.orgdefbd102009-07-13 14:04:26 +00002560 ASSERT_EQ(object_size_in_bytes_, size_in_bytes);
2561 accounting_stats_.WasteBytes(page_extra_);
kasper.lund7276f142008-07-30 08:49:36 +00002562 SetAllocationInfo(&allocation_info_, current_page->next_page());
2563 return AllocateLinearly(&allocation_info_, size_in_bytes);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002564}
2565
2566
2567#ifdef DEBUG
kasperl@chromium.orgdefbd102009-07-13 14:04:26 +00002568void FixedSpace::ReportStatistics() {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002569 int pct = Available() * 100 / Capacity();
2570 PrintF(" capacity: %d, waste: %d, available: %d, %%%d\n",
2571 Capacity(), Waste(), Available(), pct);
2572
2573 // Report remembered set statistics.
2574 int rset_marked_pointers = 0;
2575 int cross_gen_pointers = 0;
2576
2577 PageIterator page_it(this, PageIterator::PAGES_IN_USE);
2578 while (page_it.has_next()) {
2579 Page* p = page_it.next();
2580
2581 for (Address rset_addr = p->RSetStart();
2582 rset_addr < p->RSetEnd();
2583 rset_addr += kIntSize) {
2584 int rset = Memory::int_at(rset_addr);
2585 if (rset != 0) {
2586 // Bits were set
ager@chromium.orgc4c92722009-11-18 14:12:51 +00002587 int intoff =
2588 static_cast<int>(rset_addr - p->address() - Page::kRSetOffset);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002589 int bitoff = 0;
2590 for (; bitoff < kBitsPerInt; ++bitoff) {
2591 if ((rset & (1 << bitoff)) != 0) {
2592 int bitpos = intoff*kBitsPerByte + bitoff;
2593 Address slot = p->OffsetToAddress(bitpos << kObjectAlignmentBits);
2594 Object** obj = reinterpret_cast<Object**>(slot);
2595 rset_marked_pointers++;
2596 if (Heap::InNewSpace(*obj))
2597 cross_gen_pointers++;
2598 }
2599 }
2600 }
2601 }
2602 }
2603
2604 pct = rset_marked_pointers == 0 ?
2605 0 : cross_gen_pointers * 100 / rset_marked_pointers;
2606 PrintF(" rset-marked pointers %d, to-new-space %d (%%%d)\n",
2607 rset_marked_pointers, cross_gen_pointers, pct);
2608
2609 ClearHistograms();
2610 HeapObjectIterator obj_it(this);
sgjesse@chromium.orgb302e562010-02-03 11:26:59 +00002611 for (HeapObject* obj = obj_it.next(); obj != NULL; obj = obj_it.next())
2612 CollectHistogramInfo(obj);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002613 ReportHistogram(false);
2614}
2615
2616
kasperl@chromium.orgdefbd102009-07-13 14:04:26 +00002617void FixedSpace::PrintRSet() { DoPrintRSet(name_); }
2618#endif
2619
2620
2621// -----------------------------------------------------------------------------
2622// MapSpace implementation
2623
2624void MapSpace::PrepareForMarkCompact(bool will_compact) {
2625 // Call prepare of the super class.
2626 FixedSpace::PrepareForMarkCompact(will_compact);
2627
2628 if (will_compact) {
2629 // Initialize map index entry.
2630 int page_count = 0;
2631 PageIterator it(this, PageIterator::ALL_PAGES);
2632 while (it.has_next()) {
2633 ASSERT_MAP_PAGE_INDEX(page_count);
2634
2635 Page* p = it.next();
2636 ASSERT(p->mc_page_index == page_count);
2637
2638 page_addresses_[page_count++] = p->address();
2639 }
2640 }
2641}
2642
2643
2644#ifdef DEBUG
2645void MapSpace::VerifyObject(HeapObject* object) {
2646 // The object should be a map or a free-list node.
2647 ASSERT(object->IsMap() || object->IsByteArray());
2648}
2649#endif
2650
2651
2652// -----------------------------------------------------------------------------
2653// GlobalPropertyCellSpace implementation
2654
2655#ifdef DEBUG
2656void CellSpace::VerifyObject(HeapObject* object) {
2657 // The object should be a global object property cell or a free-list node.
2658 ASSERT(object->IsJSGlobalPropertyCell() ||
2659 object->map() == Heap::two_pointer_filler_map());
2660}
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002661#endif
2662
2663
2664// -----------------------------------------------------------------------------
2665// LargeObjectIterator
2666
2667LargeObjectIterator::LargeObjectIterator(LargeObjectSpace* space) {
2668 current_ = space->first_chunk_;
2669 size_func_ = NULL;
2670}
2671
2672
2673LargeObjectIterator::LargeObjectIterator(LargeObjectSpace* space,
2674 HeapObjectCallback size_func) {
2675 current_ = space->first_chunk_;
2676 size_func_ = size_func;
2677}
2678
2679
2680HeapObject* LargeObjectIterator::next() {
sgjesse@chromium.orgb302e562010-02-03 11:26:59 +00002681 if (current_ == NULL) return NULL;
2682
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002683 HeapObject* object = current_->GetObject();
2684 current_ = current_->next();
2685 return object;
2686}
2687
2688
2689// -----------------------------------------------------------------------------
2690// LargeObjectChunk
2691
2692LargeObjectChunk* LargeObjectChunk::New(int size_in_bytes,
kasper.lund7276f142008-07-30 08:49:36 +00002693 size_t* chunk_size,
ager@chromium.org9258b6b2008-09-11 09:11:10 +00002694 Executability executable) {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002695 size_t requested = ChunkSizeFor(size_in_bytes);
kasper.lund7276f142008-07-30 08:49:36 +00002696 void* mem = MemoryAllocator::AllocateRawMemory(requested,
2697 chunk_size,
2698 executable);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002699 if (mem == NULL) return NULL;
2700 LOG(NewEvent("LargeObjectChunk", mem, *chunk_size));
2701 if (*chunk_size < requested) {
2702 MemoryAllocator::FreeRawMemory(mem, *chunk_size);
2703 LOG(DeleteEvent("LargeObjectChunk", mem));
2704 return NULL;
2705 }
2706 return reinterpret_cast<LargeObjectChunk*>(mem);
2707}
2708
2709
2710int LargeObjectChunk::ChunkSizeFor(int size_in_bytes) {
ager@chromium.orgc4c92722009-11-18 14:12:51 +00002711 int os_alignment = static_cast<int>(OS::AllocateAlignment());
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002712 if (os_alignment < Page::kPageSize)
2713 size_in_bytes += (Page::kPageSize - os_alignment);
2714 return size_in_bytes + Page::kObjectStartOffset;
2715}
2716
2717// -----------------------------------------------------------------------------
2718// LargeObjectSpace
2719
ager@chromium.org9258b6b2008-09-11 09:11:10 +00002720LargeObjectSpace::LargeObjectSpace(AllocationSpace id)
2721 : Space(id, NOT_EXECUTABLE), // Managed on a per-allocation basis
kasper.lund7276f142008-07-30 08:49:36 +00002722 first_chunk_(NULL),
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002723 size_(0),
2724 page_count_(0) {}
2725
2726
2727bool LargeObjectSpace::Setup() {
2728 first_chunk_ = NULL;
2729 size_ = 0;
2730 page_count_ = 0;
2731 return true;
2732}
2733
2734
2735void LargeObjectSpace::TearDown() {
2736 while (first_chunk_ != NULL) {
2737 LargeObjectChunk* chunk = first_chunk_;
2738 first_chunk_ = first_chunk_->next();
2739 LOG(DeleteEvent("LargeObjectChunk", chunk->address()));
2740 MemoryAllocator::FreeRawMemory(chunk->address(), chunk->size());
2741 }
2742
2743 size_ = 0;
2744 page_count_ = 0;
2745}
2746
2747
kasperl@chromium.orgf5aa8372009-03-24 14:47:14 +00002748#ifdef ENABLE_HEAP_PROTECTION
2749
2750void LargeObjectSpace::Protect() {
2751 LargeObjectChunk* chunk = first_chunk_;
2752 while (chunk != NULL) {
2753 MemoryAllocator::Protect(chunk->address(), chunk->size());
2754 chunk = chunk->next();
2755 }
2756}
2757
2758
2759void LargeObjectSpace::Unprotect() {
2760 LargeObjectChunk* chunk = first_chunk_;
2761 while (chunk != NULL) {
2762 bool is_code = chunk->GetObject()->IsCode();
2763 MemoryAllocator::Unprotect(chunk->address(), chunk->size(),
2764 is_code ? EXECUTABLE : NOT_EXECUTABLE);
2765 chunk = chunk->next();
2766 }
2767}
2768
2769#endif
2770
2771
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002772Object* LargeObjectSpace::AllocateRawInternal(int requested_size,
ager@chromium.org9258b6b2008-09-11 09:11:10 +00002773 int object_size,
2774 Executability executable) {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002775 ASSERT(0 < object_size && object_size <= requested_size);
kasperl@chromium.org9bbf9682008-10-30 11:53:07 +00002776
2777 // Check if we want to force a GC before growing the old space further.
2778 // If so, fail the allocation.
2779 if (!Heap::always_allocate() && Heap::OldGenerationAllocationLimitReached()) {
2780 return Failure::RetryAfterGC(requested_size, identity());
2781 }
2782
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002783 size_t chunk_size;
2784 LargeObjectChunk* chunk =
ager@chromium.org9258b6b2008-09-11 09:11:10 +00002785 LargeObjectChunk::New(requested_size, &chunk_size, executable);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002786 if (chunk == NULL) {
kasper.lund7276f142008-07-30 08:49:36 +00002787 return Failure::RetryAfterGC(requested_size, identity());
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002788 }
2789
ager@chromium.orgc4c92722009-11-18 14:12:51 +00002790 size_ += static_cast<int>(chunk_size);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002791 page_count_++;
2792 chunk->set_next(first_chunk_);
2793 chunk->set_size(chunk_size);
2794 first_chunk_ = chunk;
2795
2796 // Set the object address and size in the page header and clear its
2797 // remembered set.
2798 Page* page = Page::FromAddress(RoundUp(chunk->address(), Page::kPageSize));
2799 Address object_address = page->ObjectAreaStart();
2800 // Clear the low order bit of the second word in the page to flag it as a
2801 // large object page. If the chunk_size happened to be written there, its
2802 // low order bit should already be clear.
2803 ASSERT((chunk_size & 0x1) == 0);
fschneider@chromium.org013f3e12010-04-26 13:27:52 +00002804 page->SetIsLargeObjectPage(true);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002805 page->ClearRSet();
2806 int extra_bytes = requested_size - object_size;
2807 if (extra_bytes > 0) {
2808 // The extra memory for the remembered set should be cleared.
2809 memset(object_address + object_size, 0, extra_bytes);
2810 }
2811
2812 return HeapObject::FromAddress(object_address);
2813}
2814
2815
ager@chromium.org9258b6b2008-09-11 09:11:10 +00002816Object* LargeObjectSpace::AllocateRawCode(int size_in_bytes) {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002817 ASSERT(0 < size_in_bytes);
ager@chromium.org9258b6b2008-09-11 09:11:10 +00002818 return AllocateRawInternal(size_in_bytes,
2819 size_in_bytes,
2820 EXECUTABLE);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002821}
2822
2823
2824Object* LargeObjectSpace::AllocateRawFixedArray(int size_in_bytes) {
ager@chromium.org9258b6b2008-09-11 09:11:10 +00002825 ASSERT(0 < size_in_bytes);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002826 int extra_rset_bytes = ExtraRSetBytesFor(size_in_bytes);
ager@chromium.org9258b6b2008-09-11 09:11:10 +00002827 return AllocateRawInternal(size_in_bytes + extra_rset_bytes,
2828 size_in_bytes,
2829 NOT_EXECUTABLE);
2830}
2831
2832
2833Object* LargeObjectSpace::AllocateRaw(int size_in_bytes) {
2834 ASSERT(0 < size_in_bytes);
2835 return AllocateRawInternal(size_in_bytes,
2836 size_in_bytes,
2837 NOT_EXECUTABLE);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002838}
2839
2840
2841// GC support
2842Object* LargeObjectSpace::FindObject(Address a) {
2843 for (LargeObjectChunk* chunk = first_chunk_;
2844 chunk != NULL;
2845 chunk = chunk->next()) {
2846 Address chunk_address = chunk->address();
2847 if (chunk_address <= a && a < chunk_address + chunk->size()) {
2848 return chunk->GetObject();
2849 }
2850 }
2851 return Failure::Exception();
2852}
2853
2854
2855void LargeObjectSpace::ClearRSet() {
2856 ASSERT(Page::is_rset_in_use());
2857
2858 LargeObjectIterator it(this);
sgjesse@chromium.orgb302e562010-02-03 11:26:59 +00002859 for (HeapObject* object = it.next(); object != NULL; object = it.next()) {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002860 // We only have code, sequential strings, or fixed arrays in large
2861 // object space, and only fixed arrays need remembered set support.
2862 if (object->IsFixedArray()) {
2863 // Clear the normal remembered set region of the page;
2864 Page* page = Page::FromAddress(object->address());
2865 page->ClearRSet();
2866
2867 // Clear the extra remembered set.
2868 int size = object->Size();
2869 int extra_rset_bytes = ExtraRSetBytesFor(size);
2870 memset(object->address() + size, 0, extra_rset_bytes);
2871 }
2872 }
2873}
2874
2875
2876void LargeObjectSpace::IterateRSet(ObjectSlotCallback copy_object_func) {
2877 ASSERT(Page::is_rset_in_use());
2878
kasperl@chromium.org71affb52009-05-26 05:44:31 +00002879 static void* lo_rset_histogram = StatsTable::CreateHistogram(
2880 "V8.RSetLO",
2881 0,
2882 // Keeping this histogram's buckets the same as the paged space histogram.
2883 Page::kObjectAreaSize / kPointerSize,
2884 30);
2885
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002886 LargeObjectIterator it(this);
sgjesse@chromium.orgb302e562010-02-03 11:26:59 +00002887 for (HeapObject* object = it.next(); object != NULL; object = it.next()) {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002888 // We only have code, sequential strings, or fixed arrays in large
2889 // object space, and only fixed arrays can possibly contain pointers to
2890 // the young generation.
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002891 if (object->IsFixedArray()) {
2892 // Iterate the normal page remembered set range.
2893 Page* page = Page::FromAddress(object->address());
2894 Address object_end = object->address() + object->Size();
kasperl@chromium.org71affb52009-05-26 05:44:31 +00002895 int count = Heap::IterateRSetRange(page->ObjectAreaStart(),
2896 Min(page->ObjectAreaEnd(), object_end),
2897 page->RSetStart(),
2898 copy_object_func);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002899
2900 // Iterate the extra array elements.
2901 if (object_end > page->ObjectAreaEnd()) {
kasperl@chromium.org71affb52009-05-26 05:44:31 +00002902 count += Heap::IterateRSetRange(page->ObjectAreaEnd(), object_end,
2903 object_end, copy_object_func);
2904 }
2905 if (lo_rset_histogram != NULL) {
2906 StatsTable::AddHistogramSample(lo_rset_histogram, count);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002907 }
2908 }
2909 }
2910}
2911
2912
2913void LargeObjectSpace::FreeUnmarkedObjects() {
2914 LargeObjectChunk* previous = NULL;
2915 LargeObjectChunk* current = first_chunk_;
2916 while (current != NULL) {
2917 HeapObject* object = current->GetObject();
kasper.lund7276f142008-07-30 08:49:36 +00002918 if (object->IsMarked()) {
2919 object->ClearMark();
2920 MarkCompactCollector::tracer()->decrement_marked_count();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002921 previous = current;
2922 current = current->next();
2923 } else {
2924 Address chunk_address = current->address();
2925 size_t chunk_size = current->size();
2926
2927 // Cut the chunk out from the chunk list.
2928 current = current->next();
2929 if (previous == NULL) {
2930 first_chunk_ = current;
2931 } else {
2932 previous->set_next(current);
2933 }
2934
2935 // Free the chunk.
sgjesse@chromium.orgb302e562010-02-03 11:26:59 +00002936 MarkCompactCollector::ReportDeleteIfNeeded(object);
ager@chromium.orgc4c92722009-11-18 14:12:51 +00002937 size_ -= static_cast<int>(chunk_size);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002938 page_count_--;
2939 MemoryAllocator::FreeRawMemory(chunk_address, chunk_size);
2940 LOG(DeleteEvent("LargeObjectChunk", chunk_address));
2941 }
2942 }
2943}
2944
2945
2946bool LargeObjectSpace::Contains(HeapObject* object) {
2947 Address address = object->address();
sgjesse@chromium.orgdf7a2842010-03-25 14:34:15 +00002948 if (Heap::new_space()->Contains(address)) {
2949 return false;
2950 }
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002951 Page* page = Page::FromAddress(address);
2952
2953 SLOW_ASSERT(!page->IsLargeObjectPage()
2954 || !FindObject(address)->IsFailure());
2955
2956 return page->IsLargeObjectPage();
2957}
2958
2959
2960#ifdef DEBUG
2961// We do not assume that the large object iterator works, because it depends
2962// on the invariants we are checking during verification.
2963void LargeObjectSpace::Verify() {
2964 for (LargeObjectChunk* chunk = first_chunk_;
2965 chunk != NULL;
2966 chunk = chunk->next()) {
2967 // Each chunk contains an object that starts at the large object page's
2968 // object area start.
2969 HeapObject* object = chunk->GetObject();
2970 Page* page = Page::FromAddress(object->address());
2971 ASSERT(object->address() == page->ObjectAreaStart());
2972
2973 // The first word should be a map, and we expect all map pointers to be
2974 // in map space.
2975 Map* map = object->map();
2976 ASSERT(map->IsMap());
2977 ASSERT(Heap::map_space()->Contains(map));
2978
ager@chromium.orga1645e22009-09-09 19:27:10 +00002979 // We have only code, sequential strings, external strings
2980 // (sequential strings that have been morphed into external
2981 // strings), fixed arrays, and byte arrays in large object space.
2982 ASSERT(object->IsCode() || object->IsSeqString() ||
2983 object->IsExternalString() || object->IsFixedArray() ||
2984 object->IsByteArray());
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002985
2986 // The object itself should look OK.
mads.s.ager@gmail.com9a4089a2008-09-01 08:55:01 +00002987 object->Verify();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002988
2989 // Byte arrays and strings don't have interior pointers.
2990 if (object->IsCode()) {
2991 VerifyPointersVisitor code_visitor;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002992 object->IterateBody(map->instance_type(),
2993 object->Size(),
2994 &code_visitor);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002995 } else if (object->IsFixedArray()) {
2996 // We loop over fixed arrays ourselves, rather then using the visitor,
2997 // because the visitor doesn't support the start/offset iteration
2998 // needed for IsRSetSet.
2999 FixedArray* array = FixedArray::cast(object);
3000 for (int j = 0; j < array->length(); j++) {
3001 Object* element = array->get(j);
3002 if (element->IsHeapObject()) {
3003 HeapObject* element_object = HeapObject::cast(element);
3004 ASSERT(Heap::Contains(element_object));
3005 ASSERT(element_object->map()->IsMap());
3006 if (Heap::InNewSpace(element_object)) {
3007 ASSERT(Page::IsRSetSet(object->address(),
3008 FixedArray::kHeaderSize + j * kPointerSize));
3009 }
3010 }
3011 }
3012 }
3013 }
3014}
3015
3016
3017void LargeObjectSpace::Print() {
3018 LargeObjectIterator it(this);
sgjesse@chromium.orgb302e562010-02-03 11:26:59 +00003019 for (HeapObject* obj = it.next(); obj != NULL; obj = it.next()) {
3020 obj->Print();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00003021 }
3022}
3023
3024
3025void LargeObjectSpace::ReportStatistics() {
3026 PrintF(" size: %d\n", size_);
3027 int num_objects = 0;
3028 ClearHistograms();
3029 LargeObjectIterator it(this);
sgjesse@chromium.orgb302e562010-02-03 11:26:59 +00003030 for (HeapObject* obj = it.next(); obj != NULL; obj = it.next()) {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00003031 num_objects++;
sgjesse@chromium.orgb302e562010-02-03 11:26:59 +00003032 CollectHistogramInfo(obj);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00003033 }
3034
3035 PrintF(" number of objects %d\n", num_objects);
3036 if (num_objects > 0) ReportHistogram(false);
3037}
3038
3039
3040void LargeObjectSpace::CollectCodeStatistics() {
3041 LargeObjectIterator obj_it(this);
sgjesse@chromium.orgb302e562010-02-03 11:26:59 +00003042 for (HeapObject* obj = obj_it.next(); obj != NULL; obj = obj_it.next()) {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00003043 if (obj->IsCode()) {
3044 Code* code = Code::cast(obj);
3045 code_kind_statistics[code->kind()] += code->Size();
3046 }
3047 }
3048}
3049
3050
3051void LargeObjectSpace::PrintRSet() {
3052 LargeObjectIterator it(this);
sgjesse@chromium.orgb302e562010-02-03 11:26:59 +00003053 for (HeapObject* object = it.next(); object != NULL; object = it.next()) {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00003054 if (object->IsFixedArray()) {
3055 Page* page = Page::FromAddress(object->address());
3056
3057 Address allocation_top = object->address() + object->Size();
3058 PrintF("large page 0x%x:\n", page);
3059 PrintRSetRange(page->RSetStart(), page->RSetEnd(),
3060 reinterpret_cast<Object**>(object->address()),
3061 allocation_top);
3062 int extra_array_bytes = object->Size() - Page::kObjectAreaSize;
3063 int extra_rset_bits = RoundUp(extra_array_bytes / kPointerSize,
3064 kBitsPerInt);
3065 PrintF("------------------------------------------------------------"
3066 "-----------\n");
3067 PrintRSetRange(allocation_top,
3068 allocation_top + extra_rset_bits / kBitsPerByte,
3069 reinterpret_cast<Object**>(object->address()
3070 + Page::kObjectAreaSize),
3071 allocation_top);
3072 PrintF("\n");
3073 }
3074 }
3075}
3076#endif // DEBUG
3077
3078} } // namespace v8::internal