blob: cd09398009c833d44d8c4a6854d43be7414fb9ab [file] [log] [blame]
ager@chromium.org9258b6b2008-09-11 09:11:10 +00001// Copyright 2006-2008 the V8 project authors. All rights reserved.
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002// Redistribution and use in source and binary forms, with or without
3// modification, are permitted provided that the following conditions are
4// met:
5//
6// * Redistributions of source code must retain the above copyright
7// notice, this list of conditions and the following disclaimer.
8// * Redistributions in binary form must reproduce the above
9// copyright notice, this list of conditions and the following
10// disclaimer in the documentation and/or other materials provided
11// with the distribution.
12// * Neither the name of Google Inc. nor the names of its
13// contributors may be used to endorse or promote products derived
14// from this software without specific prior written permission.
15//
16// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
28#include "v8.h"
29
30#include "macro-assembler.h"
31#include "mark-compact.h"
32#include "platform.h"
33
kasperl@chromium.org71affb52009-05-26 05:44:31 +000034namespace v8 {
35namespace internal {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +000036
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +000037// For contiguous spaces, top should be in the space (or at the end) and limit
38// should be the end of the space.
39#define ASSERT_SEMISPACE_ALLOCATION_INFO(info, space) \
kasperl@chromium.orgdefbd102009-07-13 14:04:26 +000040 ASSERT((space).low() <= (info).top \
41 && (info).top <= (space).high() \
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +000042 && (info).limit == (space).high())
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +000043
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +000044
45// ----------------------------------------------------------------------------
46// HeapObjectIterator
47
48HeapObjectIterator::HeapObjectIterator(PagedSpace* space) {
49 Initialize(space->bottom(), space->top(), NULL);
50}
51
52
53HeapObjectIterator::HeapObjectIterator(PagedSpace* space,
54 HeapObjectCallback size_func) {
55 Initialize(space->bottom(), space->top(), size_func);
56}
57
58
59HeapObjectIterator::HeapObjectIterator(PagedSpace* space, Address start) {
60 Initialize(start, space->top(), NULL);
61}
62
63
64HeapObjectIterator::HeapObjectIterator(PagedSpace* space, Address start,
65 HeapObjectCallback size_func) {
66 Initialize(start, space->top(), size_func);
67}
68
69
70void HeapObjectIterator::Initialize(Address cur, Address end,
71 HeapObjectCallback size_f) {
72 cur_addr_ = cur;
73 end_addr_ = end;
74 end_page_ = Page::FromAllocationTop(end);
75 size_func_ = size_f;
76 Page* p = Page::FromAllocationTop(cur_addr_);
77 cur_limit_ = (p == end_page_) ? end_addr_ : p->AllocationTop();
78
79#ifdef DEBUG
80 Verify();
81#endif
82}
83
84
85bool HeapObjectIterator::HasNextInNextPage() {
86 if (cur_addr_ == end_addr_) return false;
87
88 Page* cur_page = Page::FromAllocationTop(cur_addr_);
89 cur_page = cur_page->next_page();
90 ASSERT(cur_page->is_valid());
91
92 cur_addr_ = cur_page->ObjectAreaStart();
93 cur_limit_ = (cur_page == end_page_) ? end_addr_ : cur_page->AllocationTop();
94
fschneider@chromium.org0c20e672010-01-14 15:28:53 +000095 if (cur_addr_ == end_addr_) return false;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +000096 ASSERT(cur_addr_ < cur_limit_);
97#ifdef DEBUG
98 Verify();
99#endif
100 return true;
101}
102
103
104#ifdef DEBUG
105void HeapObjectIterator::Verify() {
106 Page* p = Page::FromAllocationTop(cur_addr_);
107 ASSERT(p == Page::FromAllocationTop(cur_limit_));
108 ASSERT(p->Offset(cur_addr_) <= p->Offset(cur_limit_));
109}
110#endif
111
112
113// -----------------------------------------------------------------------------
114// PageIterator
115
kasperl@chromium.orgb3284ad2009-05-18 06:12:45 +0000116PageIterator::PageIterator(PagedSpace* space, Mode mode) : space_(space) {
117 prev_page_ = NULL;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000118 switch (mode) {
119 case PAGES_IN_USE:
kasperl@chromium.orgb3284ad2009-05-18 06:12:45 +0000120 stop_page_ = space->AllocationTopPage();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000121 break;
122 case PAGES_USED_BY_MC:
kasperl@chromium.orgb3284ad2009-05-18 06:12:45 +0000123 stop_page_ = space->MCRelocationTopPage();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000124 break;
125 case ALL_PAGES:
kasperl@chromium.orgb3284ad2009-05-18 06:12:45 +0000126#ifdef DEBUG
127 // Verify that the cached last page in the space is actually the
128 // last page.
129 for (Page* p = space->first_page_; p->is_valid(); p = p->next_page()) {
130 if (!p->next_page()->is_valid()) {
131 ASSERT(space->last_page_ == p);
132 }
133 }
134#endif
135 stop_page_ = space->last_page_;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000136 break;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000137 }
138}
139
140
141// -----------------------------------------------------------------------------
142// Page
143
144#ifdef DEBUG
145Page::RSetState Page::rset_state_ = Page::IN_USE;
146#endif
147
148// -----------------------------------------------------------------------------
sgjesse@chromium.orgc5145742009-10-07 09:00:33 +0000149// CodeRange
150
151List<CodeRange::FreeBlock> CodeRange::free_list_(0);
152List<CodeRange::FreeBlock> CodeRange::allocation_list_(0);
153int CodeRange::current_allocation_block_index_ = 0;
154VirtualMemory* CodeRange::code_range_ = NULL;
155
156
157bool CodeRange::Setup(const size_t requested) {
158 ASSERT(code_range_ == NULL);
159
160 code_range_ = new VirtualMemory(requested);
161 CHECK(code_range_ != NULL);
162 if (!code_range_->IsReserved()) {
163 delete code_range_;
164 code_range_ = NULL;
165 return false;
166 }
167
168 // We are sure that we have mapped a block of requested addresses.
169 ASSERT(code_range_->size() == requested);
170 LOG(NewEvent("CodeRange", code_range_->address(), requested));
171 allocation_list_.Add(FreeBlock(code_range_->address(), code_range_->size()));
172 current_allocation_block_index_ = 0;
173 return true;
174}
175
176
177int CodeRange::CompareFreeBlockAddress(const FreeBlock* left,
178 const FreeBlock* right) {
179 // The entire point of CodeRange is that the difference between two
180 // addresses in the range can be represented as a signed 32-bit int,
181 // so the cast is semantically correct.
182 return static_cast<int>(left->start - right->start);
183}
184
185
186void CodeRange::GetNextAllocationBlock(size_t requested) {
187 for (current_allocation_block_index_++;
188 current_allocation_block_index_ < allocation_list_.length();
189 current_allocation_block_index_++) {
190 if (requested <= allocation_list_[current_allocation_block_index_].size) {
191 return; // Found a large enough allocation block.
192 }
193 }
194
195 // Sort and merge the free blocks on the free list and the allocation list.
196 free_list_.AddAll(allocation_list_);
197 allocation_list_.Clear();
198 free_list_.Sort(&CompareFreeBlockAddress);
199 for (int i = 0; i < free_list_.length();) {
200 FreeBlock merged = free_list_[i];
201 i++;
202 // Add adjacent free blocks to the current merged block.
203 while (i < free_list_.length() &&
204 free_list_[i].start == merged.start + merged.size) {
205 merged.size += free_list_[i].size;
206 i++;
207 }
208 if (merged.size > 0) {
209 allocation_list_.Add(merged);
210 }
211 }
212 free_list_.Clear();
213
214 for (current_allocation_block_index_ = 0;
215 current_allocation_block_index_ < allocation_list_.length();
216 current_allocation_block_index_++) {
217 if (requested <= allocation_list_[current_allocation_block_index_].size) {
218 return; // Found a large enough allocation block.
219 }
220 }
221
222 // Code range is full or too fragmented.
223 V8::FatalProcessOutOfMemory("CodeRange::GetNextAllocationBlock");
224}
225
226
227
228void* CodeRange::AllocateRawMemory(const size_t requested, size_t* allocated) {
229 ASSERT(current_allocation_block_index_ < allocation_list_.length());
230 if (requested > allocation_list_[current_allocation_block_index_].size) {
231 // Find an allocation block large enough. This function call may
232 // call V8::FatalProcessOutOfMemory if it cannot find a large enough block.
233 GetNextAllocationBlock(requested);
234 }
235 // Commit the requested memory at the start of the current allocation block.
236 *allocated = RoundUp(requested, Page::kPageSize);
237 FreeBlock current = allocation_list_[current_allocation_block_index_];
238 if (*allocated >= current.size - Page::kPageSize) {
239 // Don't leave a small free block, useless for a large object or chunk.
240 *allocated = current.size;
241 }
242 ASSERT(*allocated <= current.size);
243 if (!code_range_->Commit(current.start, *allocated, true)) {
244 *allocated = 0;
245 return NULL;
246 }
247 allocation_list_[current_allocation_block_index_].start += *allocated;
248 allocation_list_[current_allocation_block_index_].size -= *allocated;
249 if (*allocated == current.size) {
250 GetNextAllocationBlock(0); // This block is used up, get the next one.
251 }
252 return current.start;
253}
254
255
256void CodeRange::FreeRawMemory(void* address, size_t length) {
257 free_list_.Add(FreeBlock(address, length));
258 code_range_->Uncommit(address, length);
259}
260
261
262void CodeRange::TearDown() {
263 delete code_range_; // Frees all memory in the virtual memory range.
264 code_range_ = NULL;
265 free_list_.Free();
266 allocation_list_.Free();
267}
268
269
270// -----------------------------------------------------------------------------
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000271// MemoryAllocator
272//
273int MemoryAllocator::capacity_ = 0;
274int MemoryAllocator::size_ = 0;
275
276VirtualMemory* MemoryAllocator::initial_chunk_ = NULL;
277
278// 270 is an estimate based on the static default heap size of a pair of 256K
279// semispaces and a 64M old generation.
280const int kEstimatedNumberOfChunks = 270;
281List<MemoryAllocator::ChunkInfo> MemoryAllocator::chunks_(
282 kEstimatedNumberOfChunks);
283List<int> MemoryAllocator::free_chunk_ids_(kEstimatedNumberOfChunks);
284int MemoryAllocator::max_nof_chunks_ = 0;
285int MemoryAllocator::top_ = 0;
286
287
288void MemoryAllocator::Push(int free_chunk_id) {
289 ASSERT(max_nof_chunks_ > 0);
290 ASSERT(top_ < max_nof_chunks_);
291 free_chunk_ids_[top_++] = free_chunk_id;
292}
293
294
295int MemoryAllocator::Pop() {
296 ASSERT(top_ > 0);
297 return free_chunk_ids_[--top_];
298}
299
300
301bool MemoryAllocator::Setup(int capacity) {
302 capacity_ = RoundUp(capacity, Page::kPageSize);
303
304 // Over-estimate the size of chunks_ array. It assumes the expansion of old
305 // space is always in the unit of a chunk (kChunkSize) except the last
306 // expansion.
307 //
308 // Due to alignment, allocated space might be one page less than required
309 // number (kPagesPerChunk) of pages for old spaces.
310 //
kasper.lund7276f142008-07-30 08:49:36 +0000311 // Reserve two chunk ids for semispaces, one for map space, one for old
312 // space, and one for code space.
313 max_nof_chunks_ = (capacity_ / (kChunkSize - Page::kPageSize)) + 5;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000314 if (max_nof_chunks_ > kMaxNofChunks) return false;
315
316 size_ = 0;
317 ChunkInfo info; // uninitialized element.
318 for (int i = max_nof_chunks_ - 1; i >= 0; i--) {
319 chunks_.Add(info);
320 free_chunk_ids_.Add(i);
321 }
322 top_ = max_nof_chunks_;
323 return true;
324}
325
326
327void MemoryAllocator::TearDown() {
328 for (int i = 0; i < max_nof_chunks_; i++) {
329 if (chunks_[i].address() != NULL) DeleteChunk(i);
330 }
331 chunks_.Clear();
332 free_chunk_ids_.Clear();
333
334 if (initial_chunk_ != NULL) {
335 LOG(DeleteEvent("InitialChunk", initial_chunk_->address()));
336 delete initial_chunk_;
337 initial_chunk_ = NULL;
338 }
339
340 ASSERT(top_ == max_nof_chunks_); // all chunks are free
341 top_ = 0;
342 capacity_ = 0;
343 size_ = 0;
344 max_nof_chunks_ = 0;
345}
346
347
348void* MemoryAllocator::AllocateRawMemory(const size_t requested,
kasper.lund7276f142008-07-30 08:49:36 +0000349 size_t* allocated,
ager@chromium.org9258b6b2008-09-11 09:11:10 +0000350 Executability executable) {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000351 if (size_ + static_cast<int>(requested) > capacity_) return NULL;
sgjesse@chromium.orgc5145742009-10-07 09:00:33 +0000352 void* mem;
353 if (executable == EXECUTABLE && CodeRange::exists()) {
354 mem = CodeRange::AllocateRawMemory(requested, allocated);
355 } else {
356 mem = OS::Allocate(requested, allocated, (executable == EXECUTABLE));
357 }
ager@chromium.orgc4c92722009-11-18 14:12:51 +0000358 int alloced = static_cast<int>(*allocated);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000359 size_ += alloced;
360 Counters::memory_allocated.Increment(alloced);
361 return mem;
362}
363
364
365void MemoryAllocator::FreeRawMemory(void* mem, size_t length) {
sgjesse@chromium.orgc5145742009-10-07 09:00:33 +0000366 if (CodeRange::contains(static_cast<Address>(mem))) {
367 CodeRange::FreeRawMemory(mem, length);
368 } else {
369 OS::Free(mem, length);
370 }
ager@chromium.orgc4c92722009-11-18 14:12:51 +0000371 Counters::memory_allocated.Decrement(static_cast<int>(length));
372 size_ -= static_cast<int>(length);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000373 ASSERT(size_ >= 0);
374}
375
376
377void* MemoryAllocator::ReserveInitialChunk(const size_t requested) {
378 ASSERT(initial_chunk_ == NULL);
379
380 initial_chunk_ = new VirtualMemory(requested);
381 CHECK(initial_chunk_ != NULL);
382 if (!initial_chunk_->IsReserved()) {
383 delete initial_chunk_;
384 initial_chunk_ = NULL;
385 return NULL;
386 }
387
388 // We are sure that we have mapped a block of requested addresses.
389 ASSERT(initial_chunk_->size() == requested);
390 LOG(NewEvent("InitialChunk", initial_chunk_->address(), requested));
ager@chromium.orgc4c92722009-11-18 14:12:51 +0000391 size_ += static_cast<int>(requested);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000392 return initial_chunk_->address();
393}
394
395
396static int PagesInChunk(Address start, size_t size) {
397 // The first page starts on the first page-aligned address from start onward
398 // and the last page ends on the last page-aligned address before
399 // start+size. Page::kPageSize is a power of two so we can divide by
400 // shifting.
ager@chromium.orgc4c92722009-11-18 14:12:51 +0000401 return static_cast<int>((RoundDown(start + size, Page::kPageSize)
sgjesse@chromium.org846fb742009-12-18 08:56:33 +0000402 - RoundUp(start, Page::kPageSize)) >> kPageSizeBits);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000403}
404
405
406Page* MemoryAllocator::AllocatePages(int requested_pages, int* allocated_pages,
407 PagedSpace* owner) {
408 if (requested_pages <= 0) return Page::FromAddress(NULL);
409 size_t chunk_size = requested_pages * Page::kPageSize;
410
411 // There is not enough space to guarantee the desired number pages can be
412 // allocated.
413 if (size_ + static_cast<int>(chunk_size) > capacity_) {
414 // Request as many pages as we can.
415 chunk_size = capacity_ - size_;
sgjesse@chromium.org846fb742009-12-18 08:56:33 +0000416 requested_pages = static_cast<int>(chunk_size >> kPageSizeBits);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000417
418 if (requested_pages <= 0) return Page::FromAddress(NULL);
419 }
kasper.lund7276f142008-07-30 08:49:36 +0000420 void* chunk = AllocateRawMemory(chunk_size, &chunk_size, owner->executable());
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000421 if (chunk == NULL) return Page::FromAddress(NULL);
422 LOG(NewEvent("PagedChunk", chunk, chunk_size));
423
424 *allocated_pages = PagesInChunk(static_cast<Address>(chunk), chunk_size);
425 if (*allocated_pages == 0) {
426 FreeRawMemory(chunk, chunk_size);
427 LOG(DeleteEvent("PagedChunk", chunk));
428 return Page::FromAddress(NULL);
429 }
430
431 int chunk_id = Pop();
432 chunks_[chunk_id].init(static_cast<Address>(chunk), chunk_size, owner);
433
434 return InitializePagesInChunk(chunk_id, *allocated_pages, owner);
435}
436
437
438Page* MemoryAllocator::CommitPages(Address start, size_t size,
439 PagedSpace* owner, int* num_pages) {
440 ASSERT(start != NULL);
441 *num_pages = PagesInChunk(start, size);
442 ASSERT(*num_pages > 0);
443 ASSERT(initial_chunk_ != NULL);
kasperl@chromium.orgf5aa8372009-03-24 14:47:14 +0000444 ASSERT(InInitialChunk(start));
445 ASSERT(InInitialChunk(start + size - 1));
ager@chromium.org9258b6b2008-09-11 09:11:10 +0000446 if (!initial_chunk_->Commit(start, size, owner->executable() == EXECUTABLE)) {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000447 return Page::FromAddress(NULL);
448 }
ager@chromium.orgc4c92722009-11-18 14:12:51 +0000449 Counters::memory_allocated.Increment(static_cast<int>(size));
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000450
451 // So long as we correctly overestimated the number of chunks we should not
452 // run out of chunk ids.
453 CHECK(!OutOfChunkIds());
454 int chunk_id = Pop();
455 chunks_[chunk_id].init(start, size, owner);
456 return InitializePagesInChunk(chunk_id, *num_pages, owner);
457}
458
459
kasper.lund7276f142008-07-30 08:49:36 +0000460bool MemoryAllocator::CommitBlock(Address start,
461 size_t size,
ager@chromium.org9258b6b2008-09-11 09:11:10 +0000462 Executability executable) {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000463 ASSERT(start != NULL);
464 ASSERT(size > 0);
465 ASSERT(initial_chunk_ != NULL);
kasperl@chromium.orgf5aa8372009-03-24 14:47:14 +0000466 ASSERT(InInitialChunk(start));
467 ASSERT(InInitialChunk(start + size - 1));
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000468
kasper.lund7276f142008-07-30 08:49:36 +0000469 if (!initial_chunk_->Commit(start, size, executable)) return false;
ager@chromium.orgc4c92722009-11-18 14:12:51 +0000470 Counters::memory_allocated.Increment(static_cast<int>(size));
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000471 return true;
472}
473
ager@chromium.orgadd848f2009-08-13 12:44:13 +0000474bool MemoryAllocator::UncommitBlock(Address start, size_t size) {
475 ASSERT(start != NULL);
476 ASSERT(size > 0);
477 ASSERT(initial_chunk_ != NULL);
478 ASSERT(InInitialChunk(start));
479 ASSERT(InInitialChunk(start + size - 1));
480
481 if (!initial_chunk_->Uncommit(start, size)) return false;
ager@chromium.orgc4c92722009-11-18 14:12:51 +0000482 Counters::memory_allocated.Decrement(static_cast<int>(size));
ager@chromium.orgadd848f2009-08-13 12:44:13 +0000483 return true;
484}
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000485
486Page* MemoryAllocator::InitializePagesInChunk(int chunk_id, int pages_in_chunk,
487 PagedSpace* owner) {
488 ASSERT(IsValidChunk(chunk_id));
489 ASSERT(pages_in_chunk > 0);
490
491 Address chunk_start = chunks_[chunk_id].address();
492
493 Address low = RoundUp(chunk_start, Page::kPageSize);
494
495#ifdef DEBUG
496 size_t chunk_size = chunks_[chunk_id].size();
497 Address high = RoundDown(chunk_start + chunk_size, Page::kPageSize);
498 ASSERT(pages_in_chunk <=
499 ((OffsetFrom(high) - OffsetFrom(low)) / Page::kPageSize));
500#endif
501
502 Address page_addr = low;
503 for (int i = 0; i < pages_in_chunk; i++) {
504 Page* p = Page::FromAddress(page_addr);
505 p->opaque_header = OffsetFrom(page_addr + Page::kPageSize) | chunk_id;
506 p->is_normal_page = 1;
507 page_addr += Page::kPageSize;
508 }
509
510 // Set the next page of the last page to 0.
511 Page* last_page = Page::FromAddress(page_addr - Page::kPageSize);
512 last_page->opaque_header = OffsetFrom(0) | chunk_id;
513
514 return Page::FromAddress(low);
515}
516
517
518Page* MemoryAllocator::FreePages(Page* p) {
519 if (!p->is_valid()) return p;
520
521 // Find the first page in the same chunk as 'p'
522 Page* first_page = FindFirstPageInSameChunk(p);
523 Page* page_to_return = Page::FromAddress(NULL);
524
525 if (p != first_page) {
526 // Find the last page in the same chunk as 'prev'.
527 Page* last_page = FindLastPageInSameChunk(p);
528 first_page = GetNextPage(last_page); // first page in next chunk
529
530 // set the next_page of last_page to NULL
531 SetNextPage(last_page, Page::FromAddress(NULL));
532 page_to_return = p; // return 'p' when exiting
533 }
534
535 while (first_page->is_valid()) {
536 int chunk_id = GetChunkId(first_page);
537 ASSERT(IsValidChunk(chunk_id));
538
539 // Find the first page of the next chunk before deleting this chunk.
540 first_page = GetNextPage(FindLastPageInSameChunk(first_page));
541
542 // Free the current chunk.
543 DeleteChunk(chunk_id);
544 }
545
546 return page_to_return;
547}
548
549
550void MemoryAllocator::DeleteChunk(int chunk_id) {
551 ASSERT(IsValidChunk(chunk_id));
552
553 ChunkInfo& c = chunks_[chunk_id];
554
555 // We cannot free a chunk contained in the initial chunk because it was not
556 // allocated with AllocateRawMemory. Instead we uncommit the virtual
557 // memory.
kasperl@chromium.orgf5aa8372009-03-24 14:47:14 +0000558 if (InInitialChunk(c.address())) {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000559 // TODO(1240712): VirtualMemory::Uncommit has a return value which
560 // is ignored here.
561 initial_chunk_->Uncommit(c.address(), c.size());
ager@chromium.orgc4c92722009-11-18 14:12:51 +0000562 Counters::memory_allocated.Decrement(static_cast<int>(c.size()));
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000563 } else {
564 LOG(DeleteEvent("PagedChunk", c.address()));
565 FreeRawMemory(c.address(), c.size());
566 }
567 c.init(NULL, 0, NULL);
568 Push(chunk_id);
569}
570
571
572Page* MemoryAllocator::FindFirstPageInSameChunk(Page* p) {
573 int chunk_id = GetChunkId(p);
574 ASSERT(IsValidChunk(chunk_id));
575
576 Address low = RoundUp(chunks_[chunk_id].address(), Page::kPageSize);
577 return Page::FromAddress(low);
578}
579
580
581Page* MemoryAllocator::FindLastPageInSameChunk(Page* p) {
582 int chunk_id = GetChunkId(p);
583 ASSERT(IsValidChunk(chunk_id));
584
585 Address chunk_start = chunks_[chunk_id].address();
586 size_t chunk_size = chunks_[chunk_id].size();
587
588 Address high = RoundDown(chunk_start + chunk_size, Page::kPageSize);
589 ASSERT(chunk_start <= p->address() && p->address() < high);
590
591 return Page::FromAddress(high - Page::kPageSize);
592}
593
594
595#ifdef DEBUG
596void MemoryAllocator::ReportStatistics() {
597 float pct = static_cast<float>(capacity_ - size_) / capacity_;
598 PrintF(" capacity: %d, used: %d, available: %%%d\n\n",
599 capacity_, size_, static_cast<int>(pct*100));
600}
601#endif
602
603
604// -----------------------------------------------------------------------------
605// PagedSpace implementation
606
ager@chromium.org9258b6b2008-09-11 09:11:10 +0000607PagedSpace::PagedSpace(int max_capacity,
608 AllocationSpace id,
609 Executability executable)
kasper.lund7276f142008-07-30 08:49:36 +0000610 : Space(id, executable) {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000611 max_capacity_ = (RoundDown(max_capacity, Page::kPageSize) / Page::kPageSize)
612 * Page::kObjectAreaSize;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000613 accounting_stats_.Clear();
614
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000615 allocation_info_.top = NULL;
616 allocation_info_.limit = NULL;
617
618 mc_forwarding_info_.top = NULL;
619 mc_forwarding_info_.limit = NULL;
620}
621
622
623bool PagedSpace::Setup(Address start, size_t size) {
624 if (HasBeenSetup()) return false;
625
626 int num_pages = 0;
627 // Try to use the virtual memory range passed to us. If it is too small to
628 // contain at least one page, ignore it and allocate instead.
ager@chromium.org9258b6b2008-09-11 09:11:10 +0000629 int pages_in_chunk = PagesInChunk(start, size);
630 if (pages_in_chunk > 0) {
631 first_page_ = MemoryAllocator::CommitPages(RoundUp(start, Page::kPageSize),
632 Page::kPageSize * pages_in_chunk,
633 this, &num_pages);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000634 } else {
635 int requested_pages = Min(MemoryAllocator::kPagesPerChunk,
636 max_capacity_ / Page::kObjectAreaSize);
637 first_page_ =
638 MemoryAllocator::AllocatePages(requested_pages, &num_pages, this);
639 if (!first_page_->is_valid()) return false;
640 }
641
642 // We are sure that the first page is valid and that we have at least one
643 // page.
644 ASSERT(first_page_->is_valid());
645 ASSERT(num_pages > 0);
646 accounting_stats_.ExpandSpace(num_pages * Page::kObjectAreaSize);
647 ASSERT(Capacity() <= max_capacity_);
648
kasperl@chromium.orgb3284ad2009-05-18 06:12:45 +0000649 // Sequentially initialize remembered sets in the newly allocated
650 // pages and cache the current last page in the space.
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000651 for (Page* p = first_page_; p->is_valid(); p = p->next_page()) {
652 p->ClearRSet();
kasperl@chromium.orgb3284ad2009-05-18 06:12:45 +0000653 last_page_ = p;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000654 }
655
656 // Use first_page_ for allocation.
657 SetAllocationInfo(&allocation_info_, first_page_);
658
659 return true;
660}
661
662
663bool PagedSpace::HasBeenSetup() {
664 return (Capacity() > 0);
665}
666
667
668void PagedSpace::TearDown() {
669 first_page_ = MemoryAllocator::FreePages(first_page_);
670 ASSERT(!first_page_->is_valid());
671
672 accounting_stats_.Clear();
673}
674
675
kasperl@chromium.orgf5aa8372009-03-24 14:47:14 +0000676#ifdef ENABLE_HEAP_PROTECTION
677
678void PagedSpace::Protect() {
679 Page* page = first_page_;
680 while (page->is_valid()) {
681 MemoryAllocator::ProtectChunkFromPage(page);
682 page = MemoryAllocator::FindLastPageInSameChunk(page)->next_page();
683 }
684}
685
686
687void PagedSpace::Unprotect() {
688 Page* page = first_page_;
689 while (page->is_valid()) {
690 MemoryAllocator::UnprotectChunkFromPage(page);
691 page = MemoryAllocator::FindLastPageInSameChunk(page)->next_page();
692 }
693}
694
695#endif
696
697
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000698void PagedSpace::ClearRSet() {
699 PageIterator it(this, PageIterator::ALL_PAGES);
700 while (it.has_next()) {
701 it.next()->ClearRSet();
702 }
703}
704
705
706Object* PagedSpace::FindObject(Address addr) {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000707 // Note: this function can only be called before or after mark-compact GC
708 // because it accesses map pointers.
709 ASSERT(!MarkCompactCollector::in_use());
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000710
711 if (!Contains(addr)) return Failure::Exception();
712
713 Page* p = Page::FromAddress(addr);
kasper.lund7276f142008-07-30 08:49:36 +0000714 ASSERT(IsUsed(p));
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000715 Address cur = p->ObjectAreaStart();
716 Address end = p->AllocationTop();
717 while (cur < end) {
718 HeapObject* obj = HeapObject::FromAddress(cur);
719 Address next = cur + obj->Size();
720 if ((cur <= addr) && (addr < next)) return obj;
721 cur = next;
722 }
723
kasper.lund7276f142008-07-30 08:49:36 +0000724 UNREACHABLE();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000725 return Failure::Exception();
726}
727
728
kasper.lund7276f142008-07-30 08:49:36 +0000729bool PagedSpace::IsUsed(Page* page) {
730 PageIterator it(this, PageIterator::PAGES_IN_USE);
731 while (it.has_next()) {
732 if (page == it.next()) return true;
733 }
734 return false;
735}
736
737
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000738void PagedSpace::SetAllocationInfo(AllocationInfo* alloc_info, Page* p) {
739 alloc_info->top = p->ObjectAreaStart();
740 alloc_info->limit = p->ObjectAreaEnd();
kasper.lund7276f142008-07-30 08:49:36 +0000741 ASSERT(alloc_info->VerifyPagedAllocation());
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000742}
743
744
745void PagedSpace::MCResetRelocationInfo() {
746 // Set page indexes.
747 int i = 0;
748 PageIterator it(this, PageIterator::ALL_PAGES);
749 while (it.has_next()) {
750 Page* p = it.next();
751 p->mc_page_index = i++;
752 }
753
754 // Set mc_forwarding_info_ to the first page in the space.
755 SetAllocationInfo(&mc_forwarding_info_, first_page_);
756 // All the bytes in the space are 'available'. We will rediscover
757 // allocated and wasted bytes during GC.
758 accounting_stats_.Reset();
759}
760
761
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000762int PagedSpace::MCSpaceOffsetForAddress(Address addr) {
763#ifdef DEBUG
764 // The Contains function considers the address at the beginning of a
765 // page in the page, MCSpaceOffsetForAddress considers it is in the
766 // previous page.
767 if (Page::IsAlignedToPageSize(addr)) {
768 ASSERT(Contains(addr - kPointerSize));
769 } else {
770 ASSERT(Contains(addr));
771 }
772#endif
773
774 // If addr is at the end of a page, it belongs to previous page
775 Page* p = Page::IsAlignedToPageSize(addr)
776 ? Page::FromAllocationTop(addr)
777 : Page::FromAddress(addr);
778 int index = p->mc_page_index;
779 return (index * Page::kPageSize) + p->Offset(addr);
780}
781
782
kasper.lund7276f142008-07-30 08:49:36 +0000783// Slow case for reallocating and promoting objects during a compacting
784// collection. This function is not space-specific.
785HeapObject* PagedSpace::SlowMCAllocateRaw(int size_in_bytes) {
786 Page* current_page = TopPageOf(mc_forwarding_info_);
787 if (!current_page->next_page()->is_valid()) {
788 if (!Expand(current_page)) {
789 return NULL;
790 }
791 }
792
793 // There are surely more pages in the space now.
794 ASSERT(current_page->next_page()->is_valid());
795 // We do not add the top of page block for current page to the space's
796 // free list---the block may contain live objects so we cannot write
797 // bookkeeping information to it. Instead, we will recover top of page
798 // blocks when we move objects to their new locations.
799 //
800 // We do however write the allocation pointer to the page. The encoding
801 // of forwarding addresses is as an offset in terms of live bytes, so we
802 // need quick access to the allocation top of each page to decode
803 // forwarding addresses.
804 current_page->mc_relocation_top = mc_forwarding_info_.top;
805 SetAllocationInfo(&mc_forwarding_info_, current_page->next_page());
806 return AllocateLinearly(&mc_forwarding_info_, size_in_bytes);
807}
808
809
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000810bool PagedSpace::Expand(Page* last_page) {
811 ASSERT(max_capacity_ % Page::kObjectAreaSize == 0);
812 ASSERT(Capacity() % Page::kObjectAreaSize == 0);
813
814 if (Capacity() == max_capacity_) return false;
815
816 ASSERT(Capacity() < max_capacity_);
817 // Last page must be valid and its next page is invalid.
818 ASSERT(last_page->is_valid() && !last_page->next_page()->is_valid());
819
820 int available_pages = (max_capacity_ - Capacity()) / Page::kObjectAreaSize;
821 if (available_pages <= 0) return false;
822
823 int desired_pages = Min(available_pages, MemoryAllocator::kPagesPerChunk);
824 Page* p = MemoryAllocator::AllocatePages(desired_pages, &desired_pages, this);
825 if (!p->is_valid()) return false;
826
827 accounting_stats_.ExpandSpace(desired_pages * Page::kObjectAreaSize);
828 ASSERT(Capacity() <= max_capacity_);
829
830 MemoryAllocator::SetNextPage(last_page, p);
831
kasperl@chromium.orgb3284ad2009-05-18 06:12:45 +0000832 // Sequentially clear remembered set of new pages and and cache the
833 // new last page in the space.
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000834 while (p->is_valid()) {
835 p->ClearRSet();
kasperl@chromium.orgb3284ad2009-05-18 06:12:45 +0000836 last_page_ = p;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000837 p = p->next_page();
838 }
839
840 return true;
841}
842
843
844#ifdef DEBUG
845int PagedSpace::CountTotalPages() {
846 int count = 0;
847 for (Page* p = first_page_; p->is_valid(); p = p->next_page()) {
848 count++;
849 }
850 return count;
851}
852#endif
853
854
855void PagedSpace::Shrink() {
856 // Release half of free pages.
857 Page* top_page = AllocationTopPage();
858 ASSERT(top_page->is_valid());
859
sgjesse@chromium.org911335c2009-08-19 12:59:44 +0000860 // Count the number of pages we would like to free.
861 int pages_to_free = 0;
862 for (Page* p = top_page->next_page(); p->is_valid(); p = p->next_page()) {
863 pages_to_free++;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000864 }
865
sgjesse@chromium.org911335c2009-08-19 12:59:44 +0000866 // Free pages after top_page.
867 Page* p = MemoryAllocator::FreePages(top_page->next_page());
868 MemoryAllocator::SetNextPage(top_page, p);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000869
sgjesse@chromium.org911335c2009-08-19 12:59:44 +0000870 // Find out how many pages we failed to free and update last_page_.
871 // Please note pages can only be freed in whole chunks.
872 last_page_ = top_page;
873 for (Page* p = top_page->next_page(); p->is_valid(); p = p->next_page()) {
874 pages_to_free--;
kasperl@chromium.orgb3284ad2009-05-18 06:12:45 +0000875 last_page_ = p;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000876 }
877
sgjesse@chromium.org911335c2009-08-19 12:59:44 +0000878 accounting_stats_.ShrinkSpace(pages_to_free * Page::kObjectAreaSize);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000879 ASSERT(Capacity() == CountTotalPages() * Page::kObjectAreaSize);
880}
881
882
883bool PagedSpace::EnsureCapacity(int capacity) {
884 if (Capacity() >= capacity) return true;
885
886 // Start from the allocation top and loop to the last page in the space.
887 Page* last_page = AllocationTopPage();
888 Page* next_page = last_page->next_page();
889 while (next_page->is_valid()) {
890 last_page = MemoryAllocator::FindLastPageInSameChunk(next_page);
891 next_page = last_page->next_page();
892 }
893
894 // Expand the space until it has the required capacity or expansion fails.
895 do {
896 if (!Expand(last_page)) return false;
897 ASSERT(last_page->next_page()->is_valid());
898 last_page =
899 MemoryAllocator::FindLastPageInSameChunk(last_page->next_page());
900 } while (Capacity() < capacity);
901
902 return true;
903}
904
905
906#ifdef DEBUG
907void PagedSpace::Print() { }
908#endif
909
910
kasperl@chromium.orgdefbd102009-07-13 14:04:26 +0000911#ifdef DEBUG
912// We do not assume that the PageIterator works, because it depends on the
913// invariants we are checking during verification.
914void PagedSpace::Verify(ObjectVisitor* visitor) {
915 // The allocation pointer should be valid, and it should be in a page in the
916 // space.
917 ASSERT(allocation_info_.VerifyPagedAllocation());
918 Page* top_page = Page::FromAllocationTop(allocation_info_.top);
919 ASSERT(MemoryAllocator::IsPageInSpace(top_page, this));
920
921 // Loop over all the pages.
922 bool above_allocation_top = false;
923 Page* current_page = first_page_;
924 while (current_page->is_valid()) {
925 if (above_allocation_top) {
926 // We don't care what's above the allocation top.
927 } else {
928 // Unless this is the last page in the space containing allocated
929 // objects, the allocation top should be at a constant offset from the
930 // object area end.
931 Address top = current_page->AllocationTop();
932 if (current_page == top_page) {
933 ASSERT(top == allocation_info_.top);
934 // The next page will be above the allocation top.
935 above_allocation_top = true;
936 } else {
937 ASSERT(top == current_page->ObjectAreaEnd() - page_extra_);
938 }
939
940 // It should be packed with objects from the bottom to the top.
941 Address current = current_page->ObjectAreaStart();
942 while (current < top) {
943 HeapObject* object = HeapObject::FromAddress(current);
944
945 // The first word should be a map, and we expect all map pointers to
946 // be in map space.
947 Map* map = object->map();
948 ASSERT(map->IsMap());
949 ASSERT(Heap::map_space()->Contains(map));
950
951 // Perform space-specific object verification.
952 VerifyObject(object);
953
954 // The object itself should look OK.
955 object->Verify();
956
957 // All the interior pointers should be contained in the heap and
958 // have their remembered set bits set if required as determined
959 // by the visitor.
960 int size = object->Size();
christian.plesner.hansen@gmail.com2bc58ef2009-09-22 10:00:30 +0000961 object->IterateBody(map->instance_type(), size, visitor);
kasperl@chromium.orgdefbd102009-07-13 14:04:26 +0000962
963 current += size;
964 }
965
966 // The allocation pointer should not be in the middle of an object.
967 ASSERT(current == top);
968 }
969
970 current_page = current_page->next_page();
971 }
972}
973#endif
974
975
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000976// -----------------------------------------------------------------------------
977// NewSpace implementation
978
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +0000979
980bool NewSpace::Setup(Address start, int size) {
981 // Setup new space based on the preallocated memory block defined by
982 // start and size. The provided space is divided into two semi-spaces.
983 // To support fast containment testing in the new space, the size of
984 // this chunk must be a power of two and it must be aligned to its size.
985 int initial_semispace_capacity = Heap::InitialSemiSpaceSize();
ager@chromium.org3811b432009-10-28 14:53:37 +0000986 int maximum_semispace_capacity = Heap::MaxSemiSpaceSize();
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +0000987
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000988 ASSERT(initial_semispace_capacity <= maximum_semispace_capacity);
989 ASSERT(IsPowerOf2(maximum_semispace_capacity));
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000990
991 // Allocate and setup the histogram arrays if necessary.
992#if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
993 allocated_histogram_ = NewArray<HistogramInfo>(LAST_TYPE + 1);
994 promoted_histogram_ = NewArray<HistogramInfo>(LAST_TYPE + 1);
995
996#define SET_NAME(name) allocated_histogram_[name].set_name(#name); \
997 promoted_histogram_[name].set_name(#name);
998 INSTANCE_TYPE_LIST(SET_NAME)
999#undef SET_NAME
1000#endif
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001001
ager@chromium.org3811b432009-10-28 14:53:37 +00001002 ASSERT(size == 2 * Heap::ReservedSemiSpaceSize());
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001003 ASSERT(IsAddressAligned(start, size, 0));
1004
sgjesse@chromium.org911335c2009-08-19 12:59:44 +00001005 if (!to_space_.Setup(start,
1006 initial_semispace_capacity,
1007 maximum_semispace_capacity)) {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001008 return false;
1009 }
sgjesse@chromium.org911335c2009-08-19 12:59:44 +00001010 if (!from_space_.Setup(start + maximum_semispace_capacity,
1011 initial_semispace_capacity,
1012 maximum_semispace_capacity)) {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001013 return false;
1014 }
1015
1016 start_ = start;
1017 address_mask_ = ~(size - 1);
1018 object_mask_ = address_mask_ | kHeapObjectTag;
ager@chromium.org9085a012009-05-11 19:22:57 +00001019 object_expected_ = reinterpret_cast<uintptr_t>(start) | kHeapObjectTag;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001020
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +00001021 allocation_info_.top = to_space_.low();
1022 allocation_info_.limit = to_space_.high();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001023 mc_forwarding_info_.top = NULL;
1024 mc_forwarding_info_.limit = NULL;
1025
1026 ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
1027 return true;
1028}
1029
1030
1031void NewSpace::TearDown() {
1032#if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
1033 if (allocated_histogram_) {
1034 DeleteArray(allocated_histogram_);
1035 allocated_histogram_ = NULL;
1036 }
1037 if (promoted_histogram_) {
1038 DeleteArray(promoted_histogram_);
1039 promoted_histogram_ = NULL;
1040 }
1041#endif
1042
1043 start_ = NULL;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001044 allocation_info_.top = NULL;
1045 allocation_info_.limit = NULL;
1046 mc_forwarding_info_.top = NULL;
1047 mc_forwarding_info_.limit = NULL;
1048
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +00001049 to_space_.TearDown();
1050 from_space_.TearDown();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001051}
1052
1053
kasperl@chromium.orgf5aa8372009-03-24 14:47:14 +00001054#ifdef ENABLE_HEAP_PROTECTION
1055
1056void NewSpace::Protect() {
1057 MemoryAllocator::Protect(ToSpaceLow(), Capacity());
1058 MemoryAllocator::Protect(FromSpaceLow(), Capacity());
1059}
1060
1061
1062void NewSpace::Unprotect() {
1063 MemoryAllocator::Unprotect(ToSpaceLow(), Capacity(),
1064 to_space_.executable());
1065 MemoryAllocator::Unprotect(FromSpaceLow(), Capacity(),
1066 from_space_.executable());
1067}
1068
1069#endif
1070
1071
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001072void NewSpace::Flip() {
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +00001073 SemiSpace tmp = from_space_;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001074 from_space_ = to_space_;
1075 to_space_ = tmp;
1076}
1077
1078
ager@chromium.orgab99eea2009-08-25 07:05:41 +00001079void NewSpace::Grow() {
sgjesse@chromium.org911335c2009-08-19 12:59:44 +00001080 ASSERT(Capacity() < MaximumCapacity());
ager@chromium.orgab99eea2009-08-25 07:05:41 +00001081 if (to_space_.Grow()) {
1082 // Only grow from space if we managed to grow to space.
1083 if (!from_space_.Grow()) {
1084 // If we managed to grow to space but couldn't grow from space,
1085 // attempt to shrink to space.
1086 if (!to_space_.ShrinkTo(from_space_.Capacity())) {
1087 // We are in an inconsistent state because we could not
1088 // commit/uncommit memory from new space.
1089 V8::FatalProcessOutOfMemory("Failed to grow new space.");
1090 }
1091 }
1092 }
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +00001093 allocation_info_.limit = to_space_.high();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001094 ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
ager@chromium.orgab99eea2009-08-25 07:05:41 +00001095}
1096
1097
1098void NewSpace::Shrink() {
1099 int new_capacity = Max(InitialCapacity(), 2 * Size());
ager@chromium.orgc4c92722009-11-18 14:12:51 +00001100 int rounded_new_capacity =
1101 RoundUp(new_capacity, static_cast<int>(OS::AllocateAlignment()));
ager@chromium.orgab99eea2009-08-25 07:05:41 +00001102 if (rounded_new_capacity < Capacity() &&
1103 to_space_.ShrinkTo(rounded_new_capacity)) {
1104 // Only shrink from space if we managed to shrink to space.
1105 if (!from_space_.ShrinkTo(rounded_new_capacity)) {
1106 // If we managed to shrink to space but couldn't shrink from
1107 // space, attempt to grow to space again.
1108 if (!to_space_.GrowTo(from_space_.Capacity())) {
1109 // We are in an inconsistent state because we could not
1110 // commit/uncommit memory from new space.
1111 V8::FatalProcessOutOfMemory("Failed to shrink new space.");
1112 }
1113 }
1114 }
1115 allocation_info_.limit = to_space_.high();
1116 ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001117}
1118
1119
1120void NewSpace::ResetAllocationInfo() {
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +00001121 allocation_info_.top = to_space_.low();
1122 allocation_info_.limit = to_space_.high();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001123 ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
1124}
1125
1126
1127void NewSpace::MCResetRelocationInfo() {
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +00001128 mc_forwarding_info_.top = from_space_.low();
1129 mc_forwarding_info_.limit = from_space_.high();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001130 ASSERT_SEMISPACE_ALLOCATION_INFO(mc_forwarding_info_, from_space_);
1131}
1132
1133
1134void NewSpace::MCCommitRelocationInfo() {
1135 // Assumes that the spaces have been flipped so that mc_forwarding_info_ is
1136 // valid allocation info for the to space.
1137 allocation_info_.top = mc_forwarding_info_.top;
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +00001138 allocation_info_.limit = to_space_.high();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001139 ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
1140}
1141
1142
1143#ifdef DEBUG
1144// We do not use the SemispaceIterator because verification doesn't assume
1145// that it works (it depends on the invariants we are checking).
1146void NewSpace::Verify() {
1147 // The allocation pointer should be in the space or at the very end.
1148 ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
1149
1150 // There should be objects packed in from the low address up to the
1151 // allocation pointer.
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +00001152 Address current = to_space_.low();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001153 while (current < top()) {
1154 HeapObject* object = HeapObject::FromAddress(current);
1155
1156 // The first word should be a map, and we expect all map pointers to
1157 // be in map space.
1158 Map* map = object->map();
1159 ASSERT(map->IsMap());
1160 ASSERT(Heap::map_space()->Contains(map));
1161
1162 // The object should not be code or a map.
1163 ASSERT(!object->IsMap());
1164 ASSERT(!object->IsCode());
1165
1166 // The object itself should look OK.
1167 object->Verify();
1168
1169 // All the interior pointers should be contained in the heap.
1170 VerifyPointersVisitor visitor;
1171 int size = object->Size();
1172 object->IterateBody(map->instance_type(), size, &visitor);
1173
1174 current += size;
1175 }
1176
1177 // The allocation pointer should not be in the middle of an object.
1178 ASSERT(current == top());
1179}
1180#endif
1181
1182
ager@chromium.orgadd848f2009-08-13 12:44:13 +00001183bool SemiSpace::Commit() {
1184 ASSERT(!is_committed());
1185 if (!MemoryAllocator::CommitBlock(start_, capacity_, executable())) {
1186 return false;
1187 }
1188 committed_ = true;
1189 return true;
1190}
1191
1192
1193bool SemiSpace::Uncommit() {
1194 ASSERT(is_committed());
1195 if (!MemoryAllocator::UncommitBlock(start_, capacity_)) {
1196 return false;
1197 }
1198 committed_ = false;
1199 return true;
1200}
1201
1202
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001203// -----------------------------------------------------------------------------
1204// SemiSpace implementation
1205
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +00001206bool SemiSpace::Setup(Address start,
1207 int initial_capacity,
1208 int maximum_capacity) {
1209 // Creates a space in the young generation. The constructor does not
1210 // allocate memory from the OS. A SemiSpace is given a contiguous chunk of
1211 // memory of size 'capacity' when set up, and does not grow or shrink
1212 // otherwise. In the mark-compact collector, the memory region of the from
1213 // space is used as the marking stack. It requires contiguous memory
1214 // addresses.
ager@chromium.orgab99eea2009-08-25 07:05:41 +00001215 initial_capacity_ = initial_capacity;
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +00001216 capacity_ = initial_capacity;
1217 maximum_capacity_ = maximum_capacity;
ager@chromium.orgadd848f2009-08-13 12:44:13 +00001218 committed_ = false;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001219
1220 start_ = start;
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +00001221 address_mask_ = ~(maximum_capacity - 1);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001222 object_mask_ = address_mask_ | kHeapObjectTag;
ager@chromium.org9085a012009-05-11 19:22:57 +00001223 object_expected_ = reinterpret_cast<uintptr_t>(start) | kHeapObjectTag;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001224 age_mark_ = start_;
ager@chromium.orgadd848f2009-08-13 12:44:13 +00001225
1226 return Commit();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001227}
1228
1229
1230void SemiSpace::TearDown() {
1231 start_ = NULL;
1232 capacity_ = 0;
1233}
1234
1235
christian.plesner.hansen@gmail.com5a6af922009-08-12 14:20:51 +00001236bool SemiSpace::Grow() {
sgjesse@chromium.orgc81c8942009-08-21 10:54:26 +00001237 // Double the semispace size but only up to maximum capacity.
sgjesse@chromium.org911335c2009-08-19 12:59:44 +00001238 int maximum_extra = maximum_capacity_ - capacity_;
ager@chromium.orgc4c92722009-11-18 14:12:51 +00001239 int extra = Min(RoundUp(capacity_, static_cast<int>(OS::AllocateAlignment())),
sgjesse@chromium.org911335c2009-08-19 12:59:44 +00001240 maximum_extra);
christian.plesner.hansen@gmail.com5a6af922009-08-12 14:20:51 +00001241 if (!MemoryAllocator::CommitBlock(high(), extra, executable())) {
kasper.lund7276f142008-07-30 08:49:36 +00001242 return false;
1243 }
christian.plesner.hansen@gmail.com5a6af922009-08-12 14:20:51 +00001244 capacity_ += extra;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001245 return true;
1246}
1247
1248
ager@chromium.orgab99eea2009-08-25 07:05:41 +00001249bool SemiSpace::GrowTo(int new_capacity) {
1250 ASSERT(new_capacity <= maximum_capacity_);
1251 ASSERT(new_capacity > capacity_);
1252 size_t delta = new_capacity - capacity_;
1253 ASSERT(IsAligned(delta, OS::AllocateAlignment()));
1254 if (!MemoryAllocator::CommitBlock(high(), delta, executable())) {
1255 return false;
1256 }
1257 capacity_ = new_capacity;
1258 return true;
1259}
1260
1261
1262bool SemiSpace::ShrinkTo(int new_capacity) {
1263 ASSERT(new_capacity >= initial_capacity_);
1264 ASSERT(new_capacity < capacity_);
1265 size_t delta = capacity_ - new_capacity;
1266 ASSERT(IsAligned(delta, OS::AllocateAlignment()));
1267 if (!MemoryAllocator::UncommitBlock(high() - delta, delta)) {
1268 return false;
1269 }
1270 capacity_ = new_capacity;
1271 return true;
1272}
1273
1274
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001275#ifdef DEBUG
1276void SemiSpace::Print() { }
ager@chromium.org9258b6b2008-09-11 09:11:10 +00001277
1278
1279void SemiSpace::Verify() { }
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001280#endif
1281
1282
1283// -----------------------------------------------------------------------------
1284// SemiSpaceIterator implementation.
1285SemiSpaceIterator::SemiSpaceIterator(NewSpace* space) {
1286 Initialize(space, space->bottom(), space->top(), NULL);
1287}
1288
1289
1290SemiSpaceIterator::SemiSpaceIterator(NewSpace* space,
1291 HeapObjectCallback size_func) {
1292 Initialize(space, space->bottom(), space->top(), size_func);
1293}
1294
1295
1296SemiSpaceIterator::SemiSpaceIterator(NewSpace* space, Address start) {
1297 Initialize(space, start, space->top(), NULL);
1298}
1299
1300
1301void SemiSpaceIterator::Initialize(NewSpace* space, Address start,
1302 Address end,
1303 HeapObjectCallback size_func) {
1304 ASSERT(space->ToSpaceContains(start));
1305 ASSERT(space->ToSpaceLow() <= end
1306 && end <= space->ToSpaceHigh());
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +00001307 space_ = &space->to_space_;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001308 current_ = start;
1309 limit_ = end;
1310 size_func_ = size_func;
1311}
1312
1313
1314#ifdef DEBUG
1315// A static array of histogram info for each type.
1316static HistogramInfo heap_histograms[LAST_TYPE+1];
1317static JSObject::SpillInformation js_spill_information;
1318
1319// heap_histograms is shared, always clear it before using it.
1320static void ClearHistograms() {
1321 // We reset the name each time, though it hasn't changed.
1322#define DEF_TYPE_NAME(name) heap_histograms[name].set_name(#name);
1323 INSTANCE_TYPE_LIST(DEF_TYPE_NAME)
1324#undef DEF_TYPE_NAME
1325
1326#define CLEAR_HISTOGRAM(name) heap_histograms[name].clear();
1327 INSTANCE_TYPE_LIST(CLEAR_HISTOGRAM)
1328#undef CLEAR_HISTOGRAM
1329
1330 js_spill_information.Clear();
1331}
1332
1333
1334static int code_kind_statistics[Code::NUMBER_OF_KINDS];
1335
1336
1337static void ClearCodeKindStatistics() {
1338 for (int i = 0; i < Code::NUMBER_OF_KINDS; i++) {
1339 code_kind_statistics[i] = 0;
1340 }
1341}
1342
1343
1344static void ReportCodeKindStatistics() {
1345 const char* table[Code::NUMBER_OF_KINDS];
1346
1347#define CASE(name) \
1348 case Code::name: table[Code::name] = #name; \
1349 break
1350
1351 for (int i = 0; i < Code::NUMBER_OF_KINDS; i++) {
1352 switch (static_cast<Code::Kind>(i)) {
1353 CASE(FUNCTION);
1354 CASE(STUB);
1355 CASE(BUILTIN);
1356 CASE(LOAD_IC);
1357 CASE(KEYED_LOAD_IC);
1358 CASE(STORE_IC);
1359 CASE(KEYED_STORE_IC);
1360 CASE(CALL_IC);
1361 }
1362 }
1363
1364#undef CASE
1365
1366 PrintF("\n Code kind histograms: \n");
1367 for (int i = 0; i < Code::NUMBER_OF_KINDS; i++) {
1368 if (code_kind_statistics[i] > 0) {
1369 PrintF(" %-20s: %10d bytes\n", table[i], code_kind_statistics[i]);
1370 }
1371 }
1372 PrintF("\n");
1373}
1374
1375
1376static int CollectHistogramInfo(HeapObject* obj) {
1377 InstanceType type = obj->map()->instance_type();
1378 ASSERT(0 <= type && type <= LAST_TYPE);
1379 ASSERT(heap_histograms[type].name() != NULL);
1380 heap_histograms[type].increment_number(1);
1381 heap_histograms[type].increment_bytes(obj->Size());
1382
1383 if (FLAG_collect_heap_spill_statistics && obj->IsJSObject()) {
1384 JSObject::cast(obj)->IncrementSpillStatistics(&js_spill_information);
1385 }
1386
1387 return obj->Size();
1388}
1389
1390
1391static void ReportHistogram(bool print_spill) {
1392 PrintF("\n Object Histogram:\n");
1393 for (int i = 0; i <= LAST_TYPE; i++) {
1394 if (heap_histograms[i].number() > 0) {
1395 PrintF(" %-33s%10d (%10d bytes)\n",
1396 heap_histograms[i].name(),
1397 heap_histograms[i].number(),
1398 heap_histograms[i].bytes());
1399 }
1400 }
1401 PrintF("\n");
1402
1403 // Summarize string types.
1404 int string_number = 0;
1405 int string_bytes = 0;
kasperl@chromium.org68ac0092009-07-09 06:00:35 +00001406#define INCREMENT(type, size, name, camel_name) \
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001407 string_number += heap_histograms[type].number(); \
1408 string_bytes += heap_histograms[type].bytes();
1409 STRING_TYPE_LIST(INCREMENT)
1410#undef INCREMENT
1411 if (string_number > 0) {
1412 PrintF(" %-33s%10d (%10d bytes)\n\n", "STRING_TYPE", string_number,
1413 string_bytes);
1414 }
1415
1416 if (FLAG_collect_heap_spill_statistics && print_spill) {
1417 js_spill_information.Print();
1418 }
1419}
1420#endif // DEBUG
1421
1422
1423// Support for statistics gathering for --heap-stats and --log-gc.
1424#if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
1425void NewSpace::ClearHistograms() {
1426 for (int i = 0; i <= LAST_TYPE; i++) {
1427 allocated_histogram_[i].clear();
1428 promoted_histogram_[i].clear();
1429 }
1430}
1431
1432// Because the copying collector does not touch garbage objects, we iterate
1433// the new space before a collection to get a histogram of allocated objects.
1434// This only happens (1) when compiled with DEBUG and the --heap-stats flag is
1435// set, or when compiled with ENABLE_LOGGING_AND_PROFILING and the --log-gc
1436// flag is set.
1437void NewSpace::CollectStatistics() {
1438 ClearHistograms();
1439 SemiSpaceIterator it(this);
1440 while (it.has_next()) RecordAllocation(it.next());
1441}
1442
1443
1444#ifdef ENABLE_LOGGING_AND_PROFILING
1445static void DoReportStatistics(HistogramInfo* info, const char* description) {
1446 LOG(HeapSampleBeginEvent("NewSpace", description));
1447 // Lump all the string types together.
1448 int string_number = 0;
1449 int string_bytes = 0;
kasperl@chromium.org68ac0092009-07-09 06:00:35 +00001450#define INCREMENT(type, size, name, camel_name) \
1451 string_number += info[type].number(); \
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001452 string_bytes += info[type].bytes();
1453 STRING_TYPE_LIST(INCREMENT)
1454#undef INCREMENT
1455 if (string_number > 0) {
1456 LOG(HeapSampleItemEvent("STRING_TYPE", string_number, string_bytes));
1457 }
1458
1459 // Then do the other types.
1460 for (int i = FIRST_NONSTRING_TYPE; i <= LAST_TYPE; ++i) {
1461 if (info[i].number() > 0) {
1462 LOG(HeapSampleItemEvent(info[i].name(), info[i].number(),
1463 info[i].bytes()));
1464 }
1465 }
1466 LOG(HeapSampleEndEvent("NewSpace", description));
1467}
1468#endif // ENABLE_LOGGING_AND_PROFILING
1469
1470
1471void NewSpace::ReportStatistics() {
1472#ifdef DEBUG
1473 if (FLAG_heap_stats) {
1474 float pct = static_cast<float>(Available()) / Capacity();
1475 PrintF(" capacity: %d, available: %d, %%%d\n",
1476 Capacity(), Available(), static_cast<int>(pct*100));
1477 PrintF("\n Object Histogram:\n");
1478 for (int i = 0; i <= LAST_TYPE; i++) {
1479 if (allocated_histogram_[i].number() > 0) {
1480 PrintF(" %-33s%10d (%10d bytes)\n",
1481 allocated_histogram_[i].name(),
1482 allocated_histogram_[i].number(),
1483 allocated_histogram_[i].bytes());
1484 }
1485 }
1486 PrintF("\n");
1487 }
1488#endif // DEBUG
1489
1490#ifdef ENABLE_LOGGING_AND_PROFILING
1491 if (FLAG_log_gc) {
1492 DoReportStatistics(allocated_histogram_, "allocated");
1493 DoReportStatistics(promoted_histogram_, "promoted");
1494 }
1495#endif // ENABLE_LOGGING_AND_PROFILING
1496}
1497
1498
1499void NewSpace::RecordAllocation(HeapObject* obj) {
1500 InstanceType type = obj->map()->instance_type();
1501 ASSERT(0 <= type && type <= LAST_TYPE);
1502 allocated_histogram_[type].increment_number(1);
1503 allocated_histogram_[type].increment_bytes(obj->Size());
1504}
1505
1506
1507void NewSpace::RecordPromotion(HeapObject* obj) {
1508 InstanceType type = obj->map()->instance_type();
1509 ASSERT(0 <= type && type <= LAST_TYPE);
1510 promoted_histogram_[type].increment_number(1);
1511 promoted_histogram_[type].increment_bytes(obj->Size());
1512}
1513#endif // defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
1514
1515
1516// -----------------------------------------------------------------------------
1517// Free lists for old object spaces implementation
1518
1519void FreeListNode::set_size(int size_in_bytes) {
1520 ASSERT(size_in_bytes > 0);
1521 ASSERT(IsAligned(size_in_bytes, kPointerSize));
1522
1523 // We write a map and possibly size information to the block. If the block
1524 // is big enough to be a ByteArray with at least one extra word (the next
1525 // pointer), we set its map to be the byte array map and its size to an
1526 // appropriate array length for the desired size from HeapObject::Size().
1527 // If the block is too small (eg, one or two words), to hold both a size
1528 // field and a next pointer, we give it a filler map that gives it the
1529 // correct size.
kasperl@chromium.orgdefbd102009-07-13 14:04:26 +00001530 if (size_in_bytes > ByteArray::kAlignedSize) {
kasperl@chromium.org68ac0092009-07-09 06:00:35 +00001531 set_map(Heap::raw_unchecked_byte_array_map());
ager@chromium.org3811b432009-10-28 14:53:37 +00001532 // Can't use ByteArray::cast because it fails during deserialization.
1533 ByteArray* this_as_byte_array = reinterpret_cast<ByteArray*>(this);
1534 this_as_byte_array->set_length(ByteArray::LengthFor(size_in_bytes));
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001535 } else if (size_in_bytes == kPointerSize) {
kasperl@chromium.orgdefbd102009-07-13 14:04:26 +00001536 set_map(Heap::raw_unchecked_one_pointer_filler_map());
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001537 } else if (size_in_bytes == 2 * kPointerSize) {
kasperl@chromium.orgdefbd102009-07-13 14:04:26 +00001538 set_map(Heap::raw_unchecked_two_pointer_filler_map());
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001539 } else {
1540 UNREACHABLE();
1541 }
ager@chromium.org3811b432009-10-28 14:53:37 +00001542 // We would like to ASSERT(Size() == size_in_bytes) but this would fail during
1543 // deserialization because the byte array map is not done yet.
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001544}
1545
1546
1547Address FreeListNode::next() {
ager@chromium.org3811b432009-10-28 14:53:37 +00001548 ASSERT(IsFreeListNode(this));
kasperl@chromium.orgdefbd102009-07-13 14:04:26 +00001549 if (map() == Heap::raw_unchecked_byte_array_map()) {
1550 ASSERT(Size() >= kNextOffset + kPointerSize);
1551 return Memory::Address_at(address() + kNextOffset);
1552 } else {
1553 return Memory::Address_at(address() + kPointerSize);
1554 }
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001555}
1556
1557
1558void FreeListNode::set_next(Address next) {
ager@chromium.org3811b432009-10-28 14:53:37 +00001559 ASSERT(IsFreeListNode(this));
kasperl@chromium.orgdefbd102009-07-13 14:04:26 +00001560 if (map() == Heap::raw_unchecked_byte_array_map()) {
1561 ASSERT(Size() >= kNextOffset + kPointerSize);
1562 Memory::Address_at(address() + kNextOffset) = next;
1563 } else {
1564 Memory::Address_at(address() + kPointerSize) = next;
1565 }
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001566}
1567
1568
1569OldSpaceFreeList::OldSpaceFreeList(AllocationSpace owner) : owner_(owner) {
1570 Reset();
1571}
1572
1573
1574void OldSpaceFreeList::Reset() {
1575 available_ = 0;
1576 for (int i = 0; i < kFreeListsLength; i++) {
1577 free_[i].head_node_ = NULL;
1578 }
1579 needs_rebuild_ = false;
1580 finger_ = kHead;
1581 free_[kHead].next_size_ = kEnd;
1582}
1583
1584
1585void OldSpaceFreeList::RebuildSizeList() {
1586 ASSERT(needs_rebuild_);
1587 int cur = kHead;
1588 for (int i = cur + 1; i < kFreeListsLength; i++) {
1589 if (free_[i].head_node_ != NULL) {
1590 free_[cur].next_size_ = i;
1591 cur = i;
1592 }
1593 }
1594 free_[cur].next_size_ = kEnd;
1595 needs_rebuild_ = false;
1596}
1597
1598
1599int OldSpaceFreeList::Free(Address start, int size_in_bytes) {
1600#ifdef DEBUG
1601 for (int i = 0; i < size_in_bytes; i += kPointerSize) {
1602 Memory::Address_at(start + i) = kZapValue;
1603 }
1604#endif
1605 FreeListNode* node = FreeListNode::FromAddress(start);
1606 node->set_size(size_in_bytes);
1607
sgjesse@chromium.org755c5b12009-05-29 11:04:38 +00001608 // We don't use the freelists in compacting mode. This makes it more like a
1609 // GC that only has mark-sweep-compact and doesn't have a mark-sweep
1610 // collector.
1611 if (FLAG_always_compact) {
1612 return size_in_bytes;
1613 }
1614
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001615 // Early return to drop too-small blocks on the floor (one or two word
1616 // blocks cannot hold a map pointer, a size field, and a pointer to the
1617 // next block in the free list).
1618 if (size_in_bytes < kMinBlockSize) {
1619 return size_in_bytes;
1620 }
1621
1622 // Insert other blocks at the head of an exact free list.
1623 int index = size_in_bytes >> kPointerSizeLog2;
1624 node->set_next(free_[index].head_node_);
1625 free_[index].head_node_ = node->address();
1626 available_ += size_in_bytes;
1627 needs_rebuild_ = true;
1628 return 0;
1629}
1630
1631
1632Object* OldSpaceFreeList::Allocate(int size_in_bytes, int* wasted_bytes) {
1633 ASSERT(0 < size_in_bytes);
1634 ASSERT(size_in_bytes <= kMaxBlockSize);
1635 ASSERT(IsAligned(size_in_bytes, kPointerSize));
1636
1637 if (needs_rebuild_) RebuildSizeList();
1638 int index = size_in_bytes >> kPointerSizeLog2;
1639 // Check for a perfect fit.
1640 if (free_[index].head_node_ != NULL) {
1641 FreeListNode* node = FreeListNode::FromAddress(free_[index].head_node_);
1642 // If this was the last block of its size, remove the size.
1643 if ((free_[index].head_node_ = node->next()) == NULL) RemoveSize(index);
1644 available_ -= size_in_bytes;
1645 *wasted_bytes = 0;
sgjesse@chromium.org755c5b12009-05-29 11:04:38 +00001646 ASSERT(!FLAG_always_compact); // We only use the freelists with mark-sweep.
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001647 return node;
1648 }
1649 // Search the size list for the best fit.
1650 int prev = finger_ < index ? finger_ : kHead;
1651 int cur = FindSize(index, &prev);
1652 ASSERT(index < cur);
1653 if (cur == kEnd) {
1654 // No large enough size in list.
1655 *wasted_bytes = 0;
1656 return Failure::RetryAfterGC(size_in_bytes, owner_);
1657 }
sgjesse@chromium.org755c5b12009-05-29 11:04:38 +00001658 ASSERT(!FLAG_always_compact); // We only use the freelists with mark-sweep.
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001659 int rem = cur - index;
1660 int rem_bytes = rem << kPointerSizeLog2;
1661 FreeListNode* cur_node = FreeListNode::FromAddress(free_[cur].head_node_);
kasper.lund7276f142008-07-30 08:49:36 +00001662 ASSERT(cur_node->Size() == (cur << kPointerSizeLog2));
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001663 FreeListNode* rem_node = FreeListNode::FromAddress(free_[cur].head_node_ +
1664 size_in_bytes);
1665 // Distinguish the cases prev < rem < cur and rem <= prev < cur
1666 // to avoid many redundant tests and calls to Insert/RemoveSize.
1667 if (prev < rem) {
1668 // Simple case: insert rem between prev and cur.
1669 finger_ = prev;
1670 free_[prev].next_size_ = rem;
1671 // If this was the last block of size cur, remove the size.
1672 if ((free_[cur].head_node_ = cur_node->next()) == NULL) {
1673 free_[rem].next_size_ = free_[cur].next_size_;
1674 } else {
1675 free_[rem].next_size_ = cur;
1676 }
1677 // Add the remainder block.
1678 rem_node->set_size(rem_bytes);
1679 rem_node->set_next(free_[rem].head_node_);
1680 free_[rem].head_node_ = rem_node->address();
1681 } else {
1682 // If this was the last block of size cur, remove the size.
1683 if ((free_[cur].head_node_ = cur_node->next()) == NULL) {
1684 finger_ = prev;
1685 free_[prev].next_size_ = free_[cur].next_size_;
1686 }
1687 if (rem_bytes < kMinBlockSize) {
1688 // Too-small remainder is wasted.
1689 rem_node->set_size(rem_bytes);
1690 available_ -= size_in_bytes + rem_bytes;
1691 *wasted_bytes = rem_bytes;
1692 return cur_node;
1693 }
1694 // Add the remainder block and, if needed, insert its size.
1695 rem_node->set_size(rem_bytes);
1696 rem_node->set_next(free_[rem].head_node_);
1697 free_[rem].head_node_ = rem_node->address();
1698 if (rem_node->next() == NULL) InsertSize(rem);
1699 }
1700 available_ -= size_in_bytes;
1701 *wasted_bytes = 0;
1702 return cur_node;
1703}
1704
1705
kasper.lund7276f142008-07-30 08:49:36 +00001706#ifdef DEBUG
1707bool OldSpaceFreeList::Contains(FreeListNode* node) {
1708 for (int i = 0; i < kFreeListsLength; i++) {
1709 Address cur_addr = free_[i].head_node_;
1710 while (cur_addr != NULL) {
1711 FreeListNode* cur_node = FreeListNode::FromAddress(cur_addr);
1712 if (cur_node == node) return true;
1713 cur_addr = cur_node->next();
1714 }
1715 }
1716 return false;
1717}
1718#endif
1719
1720
kasperl@chromium.orgdefbd102009-07-13 14:04:26 +00001721FixedSizeFreeList::FixedSizeFreeList(AllocationSpace owner, int object_size)
1722 : owner_(owner), object_size_(object_size) {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001723 Reset();
1724}
1725
1726
kasperl@chromium.orgdefbd102009-07-13 14:04:26 +00001727void FixedSizeFreeList::Reset() {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001728 available_ = 0;
1729 head_ = NULL;
1730}
1731
1732
kasperl@chromium.orgdefbd102009-07-13 14:04:26 +00001733void FixedSizeFreeList::Free(Address start) {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001734#ifdef DEBUG
kasperl@chromium.orgdefbd102009-07-13 14:04:26 +00001735 for (int i = 0; i < object_size_; i += kPointerSize) {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001736 Memory::Address_at(start + i) = kZapValue;
1737 }
1738#endif
fschneider@chromium.org0c20e672010-01-14 15:28:53 +00001739 // We only use the freelists with mark-sweep.
1740 ASSERT(!MarkCompactCollector::IsCompacting());
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001741 FreeListNode* node = FreeListNode::FromAddress(start);
kasperl@chromium.orgdefbd102009-07-13 14:04:26 +00001742 node->set_size(object_size_);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001743 node->set_next(head_);
1744 head_ = node->address();
kasperl@chromium.orgdefbd102009-07-13 14:04:26 +00001745 available_ += object_size_;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001746}
1747
1748
kasperl@chromium.orgdefbd102009-07-13 14:04:26 +00001749Object* FixedSizeFreeList::Allocate() {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001750 if (head_ == NULL) {
kasperl@chromium.orgdefbd102009-07-13 14:04:26 +00001751 return Failure::RetryAfterGC(object_size_, owner_);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001752 }
1753
sgjesse@chromium.org755c5b12009-05-29 11:04:38 +00001754 ASSERT(!FLAG_always_compact); // We only use the freelists with mark-sweep.
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001755 FreeListNode* node = FreeListNode::FromAddress(head_);
1756 head_ = node->next();
kasperl@chromium.orgdefbd102009-07-13 14:04:26 +00001757 available_ -= object_size_;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001758 return node;
1759}
1760
1761
1762// -----------------------------------------------------------------------------
1763// OldSpace implementation
1764
1765void OldSpace::PrepareForMarkCompact(bool will_compact) {
1766 if (will_compact) {
1767 // Reset relocation info. During a compacting collection, everything in
1768 // the space is considered 'available' and we will rediscover live data
1769 // and waste during the collection.
1770 MCResetRelocationInfo();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001771 ASSERT(Available() == Capacity());
1772 } else {
1773 // During a non-compacting collection, everything below the linear
1774 // allocation pointer is considered allocated (everything above is
1775 // available) and we will rediscover available and wasted bytes during
1776 // the collection.
1777 accounting_stats_.AllocateBytes(free_list_.available());
1778 accounting_stats_.FillWastedBytes(Waste());
1779 }
1780
kasper.lund7276f142008-07-30 08:49:36 +00001781 // Clear the free list before a full GC---it will be rebuilt afterward.
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001782 free_list_.Reset();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001783}
1784
1785
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001786void OldSpace::MCCommitRelocationInfo() {
1787 // Update fast allocation info.
1788 allocation_info_.top = mc_forwarding_info_.top;
1789 allocation_info_.limit = mc_forwarding_info_.limit;
kasper.lund7276f142008-07-30 08:49:36 +00001790 ASSERT(allocation_info_.VerifyPagedAllocation());
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001791
1792 // The space is compacted and we haven't yet built free lists or
1793 // wasted any space.
1794 ASSERT(Waste() == 0);
1795 ASSERT(AvailableFree() == 0);
1796
1797 // Build the free list for the space.
1798 int computed_size = 0;
1799 PageIterator it(this, PageIterator::PAGES_USED_BY_MC);
1800 while (it.has_next()) {
1801 Page* p = it.next();
1802 // Space below the relocation pointer is allocated.
ager@chromium.orgc4c92722009-11-18 14:12:51 +00001803 computed_size +=
1804 static_cast<int>(p->mc_relocation_top - p->ObjectAreaStart());
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001805 if (it.has_next()) {
1806 // Free the space at the top of the page. We cannot use
1807 // p->mc_relocation_top after the call to Free (because Free will clear
1808 // remembered set bits).
ager@chromium.orgc4c92722009-11-18 14:12:51 +00001809 int extra_size =
1810 static_cast<int>(p->ObjectAreaEnd() - p->mc_relocation_top);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001811 if (extra_size > 0) {
1812 int wasted_bytes = free_list_.Free(p->mc_relocation_top, extra_size);
1813 // The bytes we have just "freed" to add to the free list were
1814 // already accounted as available.
1815 accounting_stats_.WasteBytes(wasted_bytes);
1816 }
1817 }
1818 }
1819
1820 // Make sure the computed size - based on the used portion of the pages in
1821 // use - matches the size obtained while computing forwarding addresses.
1822 ASSERT(computed_size == Size());
1823}
1824
1825
fschneider@chromium.org0c20e672010-01-14 15:28:53 +00001826bool NewSpace::ReserveSpace(int bytes) {
1827 // We can't reliably unpack a partial snapshot that needs more new space
1828 // space than the minimum NewSpace size.
1829 ASSERT(bytes <= InitialCapacity());
1830 Address limit = allocation_info_.limit;
1831 Address top = allocation_info_.top;
1832 return limit - top >= bytes;
1833}
1834
1835
1836bool PagedSpace::ReserveSpace(int bytes) {
1837 Address limit = allocation_info_.limit;
1838 Address top = allocation_info_.top;
1839 if (limit - top >= bytes) return true;
1840
1841 // There wasn't enough space in the current page. Lets put the rest
1842 // of the page on the free list and start a fresh page.
1843 PutRestOfCurrentPageOnFreeList(TopPageOf(allocation_info_));
1844
1845 Page* reserved_page = TopPageOf(allocation_info_);
1846 int bytes_left_to_reserve = bytes;
1847 while (bytes_left_to_reserve > 0) {
1848 if (!reserved_page->next_page()->is_valid()) {
1849 if (Heap::OldGenerationAllocationLimitReached()) return false;
1850 Expand(reserved_page);
1851 }
1852 bytes_left_to_reserve -= Page::kPageSize;
1853 reserved_page = reserved_page->next_page();
1854 if (!reserved_page->is_valid()) return false;
1855 }
1856 ASSERT(TopPageOf(allocation_info_)->next_page()->is_valid());
1857 SetAllocationInfo(&allocation_info_,
1858 TopPageOf(allocation_info_)->next_page());
1859 return true;
1860}
1861
1862
1863// You have to call this last, since the implementation from PagedSpace
1864// doesn't know that memory was 'promised' to large object space.
1865bool LargeObjectSpace::ReserveSpace(int bytes) {
1866 return Heap::OldGenerationSpaceAvailable() >= bytes;
1867}
1868
1869
kasper.lund7276f142008-07-30 08:49:36 +00001870// Slow case for normal allocation. Try in order: (1) allocate in the next
1871// page in the space, (2) allocate off the space's free list, (3) expand the
1872// space, (4) fail.
1873HeapObject* OldSpace::SlowAllocateRaw(int size_in_bytes) {
1874 // Linear allocation in this space has failed. If there is another page
1875 // in the space, move to that page and allocate there. This allocation
1876 // should succeed (size_in_bytes should not be greater than a page's
1877 // object area size).
1878 Page* current_page = TopPageOf(allocation_info_);
1879 if (current_page->next_page()->is_valid()) {
1880 return AllocateInNextPage(current_page, size_in_bytes);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001881 }
kasper.lund7276f142008-07-30 08:49:36 +00001882
ager@chromium.org3811b432009-10-28 14:53:37 +00001883 // There is no next page in this space. Try free list allocation unless that
1884 // is currently forbidden.
1885 if (!Heap::linear_allocation()) {
1886 int wasted_bytes;
1887 Object* result = free_list_.Allocate(size_in_bytes, &wasted_bytes);
1888 accounting_stats_.WasteBytes(wasted_bytes);
1889 if (!result->IsFailure()) {
1890 accounting_stats_.AllocateBytes(size_in_bytes);
1891 return HeapObject::cast(result);
1892 }
kasper.lund7276f142008-07-30 08:49:36 +00001893 }
1894
kasperl@chromium.org9bbf9682008-10-30 11:53:07 +00001895 // Free list allocation failed and there is no next page. Fail if we have
1896 // hit the old generation size limit that should cause a garbage
1897 // collection.
1898 if (!Heap::always_allocate() && Heap::OldGenerationAllocationLimitReached()) {
1899 return NULL;
1900 }
1901
1902 // Try to expand the space and allocate in the new next page.
kasper.lund7276f142008-07-30 08:49:36 +00001903 ASSERT(!current_page->next_page()->is_valid());
1904 if (Expand(current_page)) {
1905 return AllocateInNextPage(current_page, size_in_bytes);
1906 }
1907
1908 // Finally, fail.
1909 return NULL;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001910}
1911
1912
fschneider@chromium.org0c20e672010-01-14 15:28:53 +00001913void OldSpace::PutRestOfCurrentPageOnFreeList(Page* current_page) {
ager@chromium.orgc4c92722009-11-18 14:12:51 +00001914 int free_size =
1915 static_cast<int>(current_page->ObjectAreaEnd() - allocation_info_.top);
kasper.lund7276f142008-07-30 08:49:36 +00001916 if (free_size > 0) {
1917 int wasted_bytes = free_list_.Free(allocation_info_.top, free_size);
1918 accounting_stats_.WasteBytes(wasted_bytes);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001919 }
fschneider@chromium.org0c20e672010-01-14 15:28:53 +00001920}
1921
1922
1923void FixedSpace::PutRestOfCurrentPageOnFreeList(Page* current_page) {
1924 int free_size =
1925 static_cast<int>(current_page->ObjectAreaEnd() - allocation_info_.top);
1926 // In the fixed space free list all the free list items have the right size.
1927 // We use up the rest of the page while preserving this invariant.
1928 while (free_size >= object_size_in_bytes_) {
1929 free_list_.Free(allocation_info_.top);
1930 allocation_info_.top += object_size_in_bytes_;
1931 free_size -= object_size_in_bytes_;
1932 accounting_stats_.WasteBytes(object_size_in_bytes_);
1933 }
1934}
1935
1936
1937// Add the block at the top of the page to the space's free list, set the
1938// allocation info to the next page (assumed to be one), and allocate
1939// linearly there.
1940HeapObject* OldSpace::AllocateInNextPage(Page* current_page,
1941 int size_in_bytes) {
1942 ASSERT(current_page->next_page()->is_valid());
1943 PutRestOfCurrentPageOnFreeList(current_page);
kasper.lund7276f142008-07-30 08:49:36 +00001944 SetAllocationInfo(&allocation_info_, current_page->next_page());
1945 return AllocateLinearly(&allocation_info_, size_in_bytes);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001946}
1947
1948
1949#ifdef DEBUG
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001950struct CommentStatistic {
1951 const char* comment;
1952 int size;
1953 int count;
1954 void Clear() {
1955 comment = NULL;
1956 size = 0;
1957 count = 0;
1958 }
1959};
1960
1961
1962// must be small, since an iteration is used for lookup
1963const int kMaxComments = 64;
1964static CommentStatistic comments_statistics[kMaxComments+1];
1965
1966
1967void PagedSpace::ReportCodeStatistics() {
1968 ReportCodeKindStatistics();
1969 PrintF("Code comment statistics (\" [ comment-txt : size/ "
1970 "count (average)\"):\n");
1971 for (int i = 0; i <= kMaxComments; i++) {
1972 const CommentStatistic& cs = comments_statistics[i];
1973 if (cs.size > 0) {
1974 PrintF(" %-30s: %10d/%6d (%d)\n", cs.comment, cs.size, cs.count,
1975 cs.size/cs.count);
1976 }
1977 }
1978 PrintF("\n");
1979}
1980
1981
1982void PagedSpace::ResetCodeStatistics() {
1983 ClearCodeKindStatistics();
1984 for (int i = 0; i < kMaxComments; i++) comments_statistics[i].Clear();
1985 comments_statistics[kMaxComments].comment = "Unknown";
1986 comments_statistics[kMaxComments].size = 0;
1987 comments_statistics[kMaxComments].count = 0;
1988}
1989
1990
1991// Adds comment to 'comment_statistics' table. Performance OK sa long as
1992// 'kMaxComments' is small
1993static void EnterComment(const char* comment, int delta) {
1994 // Do not count empty comments
1995 if (delta <= 0) return;
1996 CommentStatistic* cs = &comments_statistics[kMaxComments];
1997 // Search for a free or matching entry in 'comments_statistics': 'cs'
1998 // points to result.
1999 for (int i = 0; i < kMaxComments; i++) {
2000 if (comments_statistics[i].comment == NULL) {
2001 cs = &comments_statistics[i];
2002 cs->comment = comment;
2003 break;
2004 } else if (strcmp(comments_statistics[i].comment, comment) == 0) {
2005 cs = &comments_statistics[i];
2006 break;
2007 }
2008 }
2009 // Update entry for 'comment'
2010 cs->size += delta;
2011 cs->count += 1;
2012}
2013
2014
2015// Call for each nested comment start (start marked with '[ xxx', end marked
2016// with ']'. RelocIterator 'it' must point to a comment reloc info.
2017static void CollectCommentStatistics(RelocIterator* it) {
2018 ASSERT(!it->done());
ager@chromium.org236ad962008-09-25 09:45:57 +00002019 ASSERT(it->rinfo()->rmode() == RelocInfo::COMMENT);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002020 const char* tmp = reinterpret_cast<const char*>(it->rinfo()->data());
2021 if (tmp[0] != '[') {
2022 // Not a nested comment; skip
2023 return;
2024 }
2025
2026 // Search for end of nested comment or a new nested comment
2027 const char* const comment_txt =
2028 reinterpret_cast<const char*>(it->rinfo()->data());
2029 const byte* prev_pc = it->rinfo()->pc();
2030 int flat_delta = 0;
2031 it->next();
2032 while (true) {
2033 // All nested comments must be terminated properly, and therefore exit
2034 // from loop.
2035 ASSERT(!it->done());
ager@chromium.org236ad962008-09-25 09:45:57 +00002036 if (it->rinfo()->rmode() == RelocInfo::COMMENT) {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002037 const char* const txt =
2038 reinterpret_cast<const char*>(it->rinfo()->data());
ager@chromium.orgc4c92722009-11-18 14:12:51 +00002039 flat_delta += static_cast<int>(it->rinfo()->pc() - prev_pc);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002040 if (txt[0] == ']') break; // End of nested comment
2041 // A new comment
2042 CollectCommentStatistics(it);
2043 // Skip code that was covered with previous comment
2044 prev_pc = it->rinfo()->pc();
2045 }
2046 it->next();
2047 }
2048 EnterComment(comment_txt, flat_delta);
2049}
2050
2051
2052// Collects code size statistics:
2053// - by code kind
2054// - by code comment
2055void PagedSpace::CollectCodeStatistics() {
2056 HeapObjectIterator obj_it(this);
2057 while (obj_it.has_next()) {
2058 HeapObject* obj = obj_it.next();
2059 if (obj->IsCode()) {
2060 Code* code = Code::cast(obj);
2061 code_kind_statistics[code->kind()] += code->Size();
2062 RelocIterator it(code);
2063 int delta = 0;
2064 const byte* prev_pc = code->instruction_start();
2065 while (!it.done()) {
ager@chromium.org236ad962008-09-25 09:45:57 +00002066 if (it.rinfo()->rmode() == RelocInfo::COMMENT) {
ager@chromium.orgc4c92722009-11-18 14:12:51 +00002067 delta += static_cast<int>(it.rinfo()->pc() - prev_pc);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002068 CollectCommentStatistics(&it);
2069 prev_pc = it.rinfo()->pc();
2070 }
2071 it.next();
2072 }
2073
2074 ASSERT(code->instruction_start() <= prev_pc &&
2075 prev_pc <= code->relocation_start());
ager@chromium.orgc4c92722009-11-18 14:12:51 +00002076 delta += static_cast<int>(code->relocation_start() - prev_pc);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002077 EnterComment("NoComment", delta);
2078 }
2079 }
2080}
2081
2082
2083void OldSpace::ReportStatistics() {
2084 int pct = Available() * 100 / Capacity();
2085 PrintF(" capacity: %d, waste: %d, available: %d, %%%d\n",
2086 Capacity(), Waste(), Available(), pct);
2087
2088 // Report remembered set statistics.
2089 int rset_marked_pointers = 0;
2090 int rset_marked_arrays = 0;
2091 int rset_marked_array_elements = 0;
2092 int cross_gen_pointers = 0;
2093 int cross_gen_array_elements = 0;
2094
2095 PageIterator page_it(this, PageIterator::PAGES_IN_USE);
2096 while (page_it.has_next()) {
2097 Page* p = page_it.next();
2098
2099 for (Address rset_addr = p->RSetStart();
2100 rset_addr < p->RSetEnd();
2101 rset_addr += kIntSize) {
2102 int rset = Memory::int_at(rset_addr);
2103 if (rset != 0) {
2104 // Bits were set
ager@chromium.orgc4c92722009-11-18 14:12:51 +00002105 int intoff =
2106 static_cast<int>(rset_addr - p->address() - Page::kRSetOffset);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002107 int bitoff = 0;
2108 for (; bitoff < kBitsPerInt; ++bitoff) {
2109 if ((rset & (1 << bitoff)) != 0) {
2110 int bitpos = intoff*kBitsPerByte + bitoff;
2111 Address slot = p->OffsetToAddress(bitpos << kObjectAlignmentBits);
2112 Object** obj = reinterpret_cast<Object**>(slot);
kasperl@chromium.org68ac0092009-07-09 06:00:35 +00002113 if (*obj == Heap::raw_unchecked_fixed_array_map()) {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002114 rset_marked_arrays++;
2115 FixedArray* fa = FixedArray::cast(HeapObject::FromAddress(slot));
2116
2117 rset_marked_array_elements += fa->length();
2118 // Manually inline FixedArray::IterateBody
2119 Address elm_start = slot + FixedArray::kHeaderSize;
2120 Address elm_stop = elm_start + fa->length() * kPointerSize;
2121 for (Address elm_addr = elm_start;
2122 elm_addr < elm_stop; elm_addr += kPointerSize) {
2123 // Filter non-heap-object pointers
2124 Object** elm_p = reinterpret_cast<Object**>(elm_addr);
2125 if (Heap::InNewSpace(*elm_p))
2126 cross_gen_array_elements++;
2127 }
2128 } else {
2129 rset_marked_pointers++;
2130 if (Heap::InNewSpace(*obj))
2131 cross_gen_pointers++;
2132 }
2133 }
2134 }
2135 }
2136 }
2137 }
2138
2139 pct = rset_marked_pointers == 0 ?
2140 0 : cross_gen_pointers * 100 / rset_marked_pointers;
2141 PrintF(" rset-marked pointers %d, to-new-space %d (%%%d)\n",
2142 rset_marked_pointers, cross_gen_pointers, pct);
2143 PrintF(" rset_marked arrays %d, ", rset_marked_arrays);
2144 PrintF(" elements %d, ", rset_marked_array_elements);
2145 pct = rset_marked_array_elements == 0 ? 0
2146 : cross_gen_array_elements * 100 / rset_marked_array_elements;
2147 PrintF(" pointers to new space %d (%%%d)\n", cross_gen_array_elements, pct);
2148 PrintF(" total rset-marked bits %d\n",
2149 (rset_marked_pointers + rset_marked_arrays));
2150 pct = (rset_marked_pointers + rset_marked_array_elements) == 0 ? 0
2151 : (cross_gen_pointers + cross_gen_array_elements) * 100 /
2152 (rset_marked_pointers + rset_marked_array_elements);
2153 PrintF(" total rset pointers %d, true cross generation ones %d (%%%d)\n",
2154 (rset_marked_pointers + rset_marked_array_elements),
2155 (cross_gen_pointers + cross_gen_array_elements),
2156 pct);
2157
2158 ClearHistograms();
2159 HeapObjectIterator obj_it(this);
2160 while (obj_it.has_next()) { CollectHistogramInfo(obj_it.next()); }
2161 ReportHistogram(true);
2162}
2163
2164
2165// Dump the range of remembered set words between [start, end) corresponding
2166// to the pointers starting at object_p. The allocation_top is an object
2167// pointer which should not be read past. This is important for large object
2168// pages, where some bits in the remembered set range do not correspond to
2169// allocated addresses.
2170static void PrintRSetRange(Address start, Address end, Object** object_p,
2171 Address allocation_top) {
2172 Address rset_address = start;
2173
2174 // If the range starts on on odd numbered word (eg, for large object extra
2175 // remembered set ranges), print some spaces.
ager@chromium.org9085a012009-05-11 19:22:57 +00002176 if ((reinterpret_cast<uintptr_t>(start) / kIntSize) % 2 == 1) {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002177 PrintF(" ");
2178 }
2179
2180 // Loop over all the words in the range.
2181 while (rset_address < end) {
2182 uint32_t rset_word = Memory::uint32_at(rset_address);
2183 int bit_position = 0;
2184
2185 // Loop over all the bits in the word.
2186 while (bit_position < kBitsPerInt) {
2187 if (object_p == reinterpret_cast<Object**>(allocation_top)) {
2188 // Print a bar at the allocation pointer.
2189 PrintF("|");
2190 } else if (object_p > reinterpret_cast<Object**>(allocation_top)) {
2191 // Do not dereference object_p past the allocation pointer.
2192 PrintF("#");
2193 } else if ((rset_word & (1 << bit_position)) == 0) {
2194 // Print a dot for zero bits.
2195 PrintF(".");
2196 } else if (Heap::InNewSpace(*object_p)) {
2197 // Print an X for one bits for pointers to new space.
2198 PrintF("X");
2199 } else {
2200 // Print a circle for one bits for pointers to old space.
2201 PrintF("o");
2202 }
2203
2204 // Print a space after every 8th bit except the last.
2205 if (bit_position % 8 == 7 && bit_position != (kBitsPerInt - 1)) {
2206 PrintF(" ");
2207 }
2208
2209 // Advance to next bit.
2210 bit_position++;
2211 object_p++;
2212 }
2213
2214 // Print a newline after every odd numbered word, otherwise a space.
ager@chromium.org9085a012009-05-11 19:22:57 +00002215 if ((reinterpret_cast<uintptr_t>(rset_address) / kIntSize) % 2 == 1) {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002216 PrintF("\n");
2217 } else {
2218 PrintF(" ");
2219 }
2220
2221 // Advance to next remembered set word.
2222 rset_address += kIntSize;
2223 }
2224}
2225
2226
2227void PagedSpace::DoPrintRSet(const char* space_name) {
2228 PageIterator it(this, PageIterator::PAGES_IN_USE);
2229 while (it.has_next()) {
2230 Page* p = it.next();
2231 PrintF("%s page 0x%x:\n", space_name, p);
2232 PrintRSetRange(p->RSetStart(), p->RSetEnd(),
2233 reinterpret_cast<Object**>(p->ObjectAreaStart()),
2234 p->AllocationTop());
2235 PrintF("\n");
2236 }
2237}
2238
2239
2240void OldSpace::PrintRSet() { DoPrintRSet("old"); }
2241#endif
2242
2243// -----------------------------------------------------------------------------
kasperl@chromium.orgdefbd102009-07-13 14:04:26 +00002244// FixedSpace implementation
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002245
kasperl@chromium.orgdefbd102009-07-13 14:04:26 +00002246void FixedSpace::PrepareForMarkCompact(bool will_compact) {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002247 if (will_compact) {
2248 // Reset relocation info.
2249 MCResetRelocationInfo();
2250
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002251 // During a compacting collection, everything in the space is considered
2252 // 'available' (set by the call to MCResetRelocationInfo) and we will
2253 // rediscover live and wasted bytes during the collection.
2254 ASSERT(Available() == Capacity());
2255 } else {
2256 // During a non-compacting collection, everything below the linear
2257 // allocation pointer except wasted top-of-page blocks is considered
2258 // allocated and we will rediscover available bytes during the
2259 // collection.
2260 accounting_stats_.AllocateBytes(free_list_.available());
2261 }
2262
kasper.lund7276f142008-07-30 08:49:36 +00002263 // Clear the free list before a full GC---it will be rebuilt afterward.
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002264 free_list_.Reset();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002265}
2266
2267
kasperl@chromium.orgdefbd102009-07-13 14:04:26 +00002268void FixedSpace::MCCommitRelocationInfo() {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002269 // Update fast allocation info.
2270 allocation_info_.top = mc_forwarding_info_.top;
2271 allocation_info_.limit = mc_forwarding_info_.limit;
kasper.lund7276f142008-07-30 08:49:36 +00002272 ASSERT(allocation_info_.VerifyPagedAllocation());
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002273
2274 // The space is compacted and we haven't yet wasted any space.
2275 ASSERT(Waste() == 0);
2276
2277 // Update allocation_top of each page in use and compute waste.
2278 int computed_size = 0;
2279 PageIterator it(this, PageIterator::PAGES_USED_BY_MC);
2280 while (it.has_next()) {
2281 Page* page = it.next();
2282 Address page_top = page->AllocationTop();
ager@chromium.orgc4c92722009-11-18 14:12:51 +00002283 computed_size += static_cast<int>(page_top - page->ObjectAreaStart());
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002284 if (it.has_next()) {
ager@chromium.orgc4c92722009-11-18 14:12:51 +00002285 accounting_stats_.WasteBytes(
2286 static_cast<int>(page->ObjectAreaEnd() - page_top));
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002287 }
2288 }
2289
2290 // Make sure the computed size - based on the used portion of the
2291 // pages in use - matches the size we adjust during allocation.
2292 ASSERT(computed_size == Size());
2293}
2294
2295
kasper.lund7276f142008-07-30 08:49:36 +00002296// Slow case for normal allocation. Try in order: (1) allocate in the next
2297// page in the space, (2) allocate off the space's free list, (3) expand the
2298// space, (4) fail.
kasperl@chromium.orgdefbd102009-07-13 14:04:26 +00002299HeapObject* FixedSpace::SlowAllocateRaw(int size_in_bytes) {
2300 ASSERT_EQ(object_size_in_bytes_, size_in_bytes);
kasper.lund7276f142008-07-30 08:49:36 +00002301 // Linear allocation in this space has failed. If there is another page
2302 // in the space, move to that page and allocate there. This allocation
2303 // should succeed.
2304 Page* current_page = TopPageOf(allocation_info_);
2305 if (current_page->next_page()->is_valid()) {
2306 return AllocateInNextPage(current_page, size_in_bytes);
2307 }
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002308
ager@chromium.org3811b432009-10-28 14:53:37 +00002309 // There is no next page in this space. Try free list allocation unless
2310 // that is currently forbidden. The fixed space free list implicitly assumes
2311 // that all free blocks are of the fixed size.
2312 if (!Heap::linear_allocation()) {
kasper.lund7276f142008-07-30 08:49:36 +00002313 Object* result = free_list_.Allocate();
2314 if (!result->IsFailure()) {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002315 accounting_stats_.AllocateBytes(size_in_bytes);
kasper.lund7276f142008-07-30 08:49:36 +00002316 return HeapObject::cast(result);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002317 }
2318 }
kasper.lund7276f142008-07-30 08:49:36 +00002319
kasperl@chromium.org9bbf9682008-10-30 11:53:07 +00002320 // Free list allocation failed and there is no next page. Fail if we have
2321 // hit the old generation size limit that should cause a garbage
2322 // collection.
2323 if (!Heap::always_allocate() && Heap::OldGenerationAllocationLimitReached()) {
2324 return NULL;
2325 }
2326
2327 // Try to expand the space and allocate in the new next page.
kasper.lund7276f142008-07-30 08:49:36 +00002328 ASSERT(!current_page->next_page()->is_valid());
2329 if (Expand(current_page)) {
2330 return AllocateInNextPage(current_page, size_in_bytes);
2331 }
2332
2333 // Finally, fail.
2334 return NULL;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002335}
2336
2337
kasper.lund7276f142008-07-30 08:49:36 +00002338// Move to the next page (there is assumed to be one) and allocate there.
2339// The top of page block is always wasted, because it is too small to hold a
2340// map.
kasperl@chromium.orgdefbd102009-07-13 14:04:26 +00002341HeapObject* FixedSpace::AllocateInNextPage(Page* current_page,
2342 int size_in_bytes) {
kasper.lund7276f142008-07-30 08:49:36 +00002343 ASSERT(current_page->next_page()->is_valid());
kasperl@chromium.orgdefbd102009-07-13 14:04:26 +00002344 ASSERT(current_page->ObjectAreaEnd() - allocation_info_.top == page_extra_);
2345 ASSERT_EQ(object_size_in_bytes_, size_in_bytes);
2346 accounting_stats_.WasteBytes(page_extra_);
kasper.lund7276f142008-07-30 08:49:36 +00002347 SetAllocationInfo(&allocation_info_, current_page->next_page());
2348 return AllocateLinearly(&allocation_info_, size_in_bytes);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002349}
2350
2351
2352#ifdef DEBUG
kasperl@chromium.orgdefbd102009-07-13 14:04:26 +00002353void FixedSpace::ReportStatistics() {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002354 int pct = Available() * 100 / Capacity();
2355 PrintF(" capacity: %d, waste: %d, available: %d, %%%d\n",
2356 Capacity(), Waste(), Available(), pct);
2357
2358 // Report remembered set statistics.
2359 int rset_marked_pointers = 0;
2360 int cross_gen_pointers = 0;
2361
2362 PageIterator page_it(this, PageIterator::PAGES_IN_USE);
2363 while (page_it.has_next()) {
2364 Page* p = page_it.next();
2365
2366 for (Address rset_addr = p->RSetStart();
2367 rset_addr < p->RSetEnd();
2368 rset_addr += kIntSize) {
2369 int rset = Memory::int_at(rset_addr);
2370 if (rset != 0) {
2371 // Bits were set
ager@chromium.orgc4c92722009-11-18 14:12:51 +00002372 int intoff =
2373 static_cast<int>(rset_addr - p->address() - Page::kRSetOffset);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002374 int bitoff = 0;
2375 for (; bitoff < kBitsPerInt; ++bitoff) {
2376 if ((rset & (1 << bitoff)) != 0) {
2377 int bitpos = intoff*kBitsPerByte + bitoff;
2378 Address slot = p->OffsetToAddress(bitpos << kObjectAlignmentBits);
2379 Object** obj = reinterpret_cast<Object**>(slot);
2380 rset_marked_pointers++;
2381 if (Heap::InNewSpace(*obj))
2382 cross_gen_pointers++;
2383 }
2384 }
2385 }
2386 }
2387 }
2388
2389 pct = rset_marked_pointers == 0 ?
2390 0 : cross_gen_pointers * 100 / rset_marked_pointers;
2391 PrintF(" rset-marked pointers %d, to-new-space %d (%%%d)\n",
2392 rset_marked_pointers, cross_gen_pointers, pct);
2393
2394 ClearHistograms();
2395 HeapObjectIterator obj_it(this);
2396 while (obj_it.has_next()) { CollectHistogramInfo(obj_it.next()); }
2397 ReportHistogram(false);
2398}
2399
2400
kasperl@chromium.orgdefbd102009-07-13 14:04:26 +00002401void FixedSpace::PrintRSet() { DoPrintRSet(name_); }
2402#endif
2403
2404
2405// -----------------------------------------------------------------------------
2406// MapSpace implementation
2407
2408void MapSpace::PrepareForMarkCompact(bool will_compact) {
2409 // Call prepare of the super class.
2410 FixedSpace::PrepareForMarkCompact(will_compact);
2411
2412 if (will_compact) {
2413 // Initialize map index entry.
2414 int page_count = 0;
2415 PageIterator it(this, PageIterator::ALL_PAGES);
2416 while (it.has_next()) {
2417 ASSERT_MAP_PAGE_INDEX(page_count);
2418
2419 Page* p = it.next();
2420 ASSERT(p->mc_page_index == page_count);
2421
2422 page_addresses_[page_count++] = p->address();
2423 }
2424 }
2425}
2426
2427
2428#ifdef DEBUG
2429void MapSpace::VerifyObject(HeapObject* object) {
2430 // The object should be a map or a free-list node.
2431 ASSERT(object->IsMap() || object->IsByteArray());
2432}
2433#endif
2434
2435
2436// -----------------------------------------------------------------------------
2437// GlobalPropertyCellSpace implementation
2438
2439#ifdef DEBUG
2440void CellSpace::VerifyObject(HeapObject* object) {
2441 // The object should be a global object property cell or a free-list node.
2442 ASSERT(object->IsJSGlobalPropertyCell() ||
2443 object->map() == Heap::two_pointer_filler_map());
2444}
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002445#endif
2446
2447
2448// -----------------------------------------------------------------------------
2449// LargeObjectIterator
2450
2451LargeObjectIterator::LargeObjectIterator(LargeObjectSpace* space) {
2452 current_ = space->first_chunk_;
2453 size_func_ = NULL;
2454}
2455
2456
2457LargeObjectIterator::LargeObjectIterator(LargeObjectSpace* space,
2458 HeapObjectCallback size_func) {
2459 current_ = space->first_chunk_;
2460 size_func_ = size_func;
2461}
2462
2463
2464HeapObject* LargeObjectIterator::next() {
2465 ASSERT(has_next());
2466 HeapObject* object = current_->GetObject();
2467 current_ = current_->next();
2468 return object;
2469}
2470
2471
2472// -----------------------------------------------------------------------------
2473// LargeObjectChunk
2474
2475LargeObjectChunk* LargeObjectChunk::New(int size_in_bytes,
kasper.lund7276f142008-07-30 08:49:36 +00002476 size_t* chunk_size,
ager@chromium.org9258b6b2008-09-11 09:11:10 +00002477 Executability executable) {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002478 size_t requested = ChunkSizeFor(size_in_bytes);
kasper.lund7276f142008-07-30 08:49:36 +00002479 void* mem = MemoryAllocator::AllocateRawMemory(requested,
2480 chunk_size,
2481 executable);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002482 if (mem == NULL) return NULL;
2483 LOG(NewEvent("LargeObjectChunk", mem, *chunk_size));
2484 if (*chunk_size < requested) {
2485 MemoryAllocator::FreeRawMemory(mem, *chunk_size);
2486 LOG(DeleteEvent("LargeObjectChunk", mem));
2487 return NULL;
2488 }
2489 return reinterpret_cast<LargeObjectChunk*>(mem);
2490}
2491
2492
2493int LargeObjectChunk::ChunkSizeFor(int size_in_bytes) {
ager@chromium.orgc4c92722009-11-18 14:12:51 +00002494 int os_alignment = static_cast<int>(OS::AllocateAlignment());
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002495 if (os_alignment < Page::kPageSize)
2496 size_in_bytes += (Page::kPageSize - os_alignment);
2497 return size_in_bytes + Page::kObjectStartOffset;
2498}
2499
2500// -----------------------------------------------------------------------------
2501// LargeObjectSpace
2502
ager@chromium.org9258b6b2008-09-11 09:11:10 +00002503LargeObjectSpace::LargeObjectSpace(AllocationSpace id)
2504 : Space(id, NOT_EXECUTABLE), // Managed on a per-allocation basis
kasper.lund7276f142008-07-30 08:49:36 +00002505 first_chunk_(NULL),
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002506 size_(0),
2507 page_count_(0) {}
2508
2509
2510bool LargeObjectSpace::Setup() {
2511 first_chunk_ = NULL;
2512 size_ = 0;
2513 page_count_ = 0;
2514 return true;
2515}
2516
2517
2518void LargeObjectSpace::TearDown() {
2519 while (first_chunk_ != NULL) {
2520 LargeObjectChunk* chunk = first_chunk_;
2521 first_chunk_ = first_chunk_->next();
2522 LOG(DeleteEvent("LargeObjectChunk", chunk->address()));
2523 MemoryAllocator::FreeRawMemory(chunk->address(), chunk->size());
2524 }
2525
2526 size_ = 0;
2527 page_count_ = 0;
2528}
2529
2530
kasperl@chromium.orgf5aa8372009-03-24 14:47:14 +00002531#ifdef ENABLE_HEAP_PROTECTION
2532
2533void LargeObjectSpace::Protect() {
2534 LargeObjectChunk* chunk = first_chunk_;
2535 while (chunk != NULL) {
2536 MemoryAllocator::Protect(chunk->address(), chunk->size());
2537 chunk = chunk->next();
2538 }
2539}
2540
2541
2542void LargeObjectSpace::Unprotect() {
2543 LargeObjectChunk* chunk = first_chunk_;
2544 while (chunk != NULL) {
2545 bool is_code = chunk->GetObject()->IsCode();
2546 MemoryAllocator::Unprotect(chunk->address(), chunk->size(),
2547 is_code ? EXECUTABLE : NOT_EXECUTABLE);
2548 chunk = chunk->next();
2549 }
2550}
2551
2552#endif
2553
2554
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002555Object* LargeObjectSpace::AllocateRawInternal(int requested_size,
ager@chromium.org9258b6b2008-09-11 09:11:10 +00002556 int object_size,
2557 Executability executable) {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002558 ASSERT(0 < object_size && object_size <= requested_size);
kasperl@chromium.org9bbf9682008-10-30 11:53:07 +00002559
2560 // Check if we want to force a GC before growing the old space further.
2561 // If so, fail the allocation.
2562 if (!Heap::always_allocate() && Heap::OldGenerationAllocationLimitReached()) {
2563 return Failure::RetryAfterGC(requested_size, identity());
2564 }
2565
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002566 size_t chunk_size;
2567 LargeObjectChunk* chunk =
ager@chromium.org9258b6b2008-09-11 09:11:10 +00002568 LargeObjectChunk::New(requested_size, &chunk_size, executable);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002569 if (chunk == NULL) {
kasper.lund7276f142008-07-30 08:49:36 +00002570 return Failure::RetryAfterGC(requested_size, identity());
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002571 }
2572
ager@chromium.orgc4c92722009-11-18 14:12:51 +00002573 size_ += static_cast<int>(chunk_size);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002574 page_count_++;
2575 chunk->set_next(first_chunk_);
2576 chunk->set_size(chunk_size);
2577 first_chunk_ = chunk;
2578
2579 // Set the object address and size in the page header and clear its
2580 // remembered set.
2581 Page* page = Page::FromAddress(RoundUp(chunk->address(), Page::kPageSize));
2582 Address object_address = page->ObjectAreaStart();
2583 // Clear the low order bit of the second word in the page to flag it as a
2584 // large object page. If the chunk_size happened to be written there, its
2585 // low order bit should already be clear.
2586 ASSERT((chunk_size & 0x1) == 0);
2587 page->is_normal_page &= ~0x1;
2588 page->ClearRSet();
2589 int extra_bytes = requested_size - object_size;
2590 if (extra_bytes > 0) {
2591 // The extra memory for the remembered set should be cleared.
2592 memset(object_address + object_size, 0, extra_bytes);
2593 }
2594
2595 return HeapObject::FromAddress(object_address);
2596}
2597
2598
ager@chromium.org9258b6b2008-09-11 09:11:10 +00002599Object* LargeObjectSpace::AllocateRawCode(int size_in_bytes) {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002600 ASSERT(0 < size_in_bytes);
ager@chromium.org9258b6b2008-09-11 09:11:10 +00002601 return AllocateRawInternal(size_in_bytes,
2602 size_in_bytes,
2603 EXECUTABLE);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002604}
2605
2606
2607Object* LargeObjectSpace::AllocateRawFixedArray(int size_in_bytes) {
ager@chromium.org9258b6b2008-09-11 09:11:10 +00002608 ASSERT(0 < size_in_bytes);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002609 int extra_rset_bytes = ExtraRSetBytesFor(size_in_bytes);
ager@chromium.org9258b6b2008-09-11 09:11:10 +00002610 return AllocateRawInternal(size_in_bytes + extra_rset_bytes,
2611 size_in_bytes,
2612 NOT_EXECUTABLE);
2613}
2614
2615
2616Object* LargeObjectSpace::AllocateRaw(int size_in_bytes) {
2617 ASSERT(0 < size_in_bytes);
2618 return AllocateRawInternal(size_in_bytes,
2619 size_in_bytes,
2620 NOT_EXECUTABLE);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002621}
2622
2623
2624// GC support
2625Object* LargeObjectSpace::FindObject(Address a) {
2626 for (LargeObjectChunk* chunk = first_chunk_;
2627 chunk != NULL;
2628 chunk = chunk->next()) {
2629 Address chunk_address = chunk->address();
2630 if (chunk_address <= a && a < chunk_address + chunk->size()) {
2631 return chunk->GetObject();
2632 }
2633 }
2634 return Failure::Exception();
2635}
2636
2637
2638void LargeObjectSpace::ClearRSet() {
2639 ASSERT(Page::is_rset_in_use());
2640
2641 LargeObjectIterator it(this);
2642 while (it.has_next()) {
2643 HeapObject* object = it.next();
2644 // We only have code, sequential strings, or fixed arrays in large
2645 // object space, and only fixed arrays need remembered set support.
2646 if (object->IsFixedArray()) {
2647 // Clear the normal remembered set region of the page;
2648 Page* page = Page::FromAddress(object->address());
2649 page->ClearRSet();
2650
2651 // Clear the extra remembered set.
2652 int size = object->Size();
2653 int extra_rset_bytes = ExtraRSetBytesFor(size);
2654 memset(object->address() + size, 0, extra_rset_bytes);
2655 }
2656 }
2657}
2658
2659
2660void LargeObjectSpace::IterateRSet(ObjectSlotCallback copy_object_func) {
2661 ASSERT(Page::is_rset_in_use());
2662
kasperl@chromium.org71affb52009-05-26 05:44:31 +00002663 static void* lo_rset_histogram = StatsTable::CreateHistogram(
2664 "V8.RSetLO",
2665 0,
2666 // Keeping this histogram's buckets the same as the paged space histogram.
2667 Page::kObjectAreaSize / kPointerSize,
2668 30);
2669
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002670 LargeObjectIterator it(this);
2671 while (it.has_next()) {
2672 // We only have code, sequential strings, or fixed arrays in large
2673 // object space, and only fixed arrays can possibly contain pointers to
2674 // the young generation.
2675 HeapObject* object = it.next();
2676 if (object->IsFixedArray()) {
2677 // Iterate the normal page remembered set range.
2678 Page* page = Page::FromAddress(object->address());
2679 Address object_end = object->address() + object->Size();
kasperl@chromium.org71affb52009-05-26 05:44:31 +00002680 int count = Heap::IterateRSetRange(page->ObjectAreaStart(),
2681 Min(page->ObjectAreaEnd(), object_end),
2682 page->RSetStart(),
2683 copy_object_func);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002684
2685 // Iterate the extra array elements.
2686 if (object_end > page->ObjectAreaEnd()) {
kasperl@chromium.org71affb52009-05-26 05:44:31 +00002687 count += Heap::IterateRSetRange(page->ObjectAreaEnd(), object_end,
2688 object_end, copy_object_func);
2689 }
2690 if (lo_rset_histogram != NULL) {
2691 StatsTable::AddHistogramSample(lo_rset_histogram, count);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002692 }
2693 }
2694 }
2695}
2696
2697
2698void LargeObjectSpace::FreeUnmarkedObjects() {
2699 LargeObjectChunk* previous = NULL;
2700 LargeObjectChunk* current = first_chunk_;
2701 while (current != NULL) {
2702 HeapObject* object = current->GetObject();
kasper.lund7276f142008-07-30 08:49:36 +00002703 if (object->IsMarked()) {
2704 object->ClearMark();
2705 MarkCompactCollector::tracer()->decrement_marked_count();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002706 previous = current;
2707 current = current->next();
2708 } else {
2709 Address chunk_address = current->address();
2710 size_t chunk_size = current->size();
2711
2712 // Cut the chunk out from the chunk list.
2713 current = current->next();
2714 if (previous == NULL) {
2715 first_chunk_ = current;
2716 } else {
2717 previous->set_next(current);
2718 }
2719
2720 // Free the chunk.
2721 if (object->IsCode()) {
2722 LOG(CodeDeleteEvent(object->address()));
2723 }
ager@chromium.orgc4c92722009-11-18 14:12:51 +00002724 size_ -= static_cast<int>(chunk_size);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002725 page_count_--;
2726 MemoryAllocator::FreeRawMemory(chunk_address, chunk_size);
2727 LOG(DeleteEvent("LargeObjectChunk", chunk_address));
2728 }
2729 }
2730}
2731
2732
2733bool LargeObjectSpace::Contains(HeapObject* object) {
2734 Address address = object->address();
2735 Page* page = Page::FromAddress(address);
2736
2737 SLOW_ASSERT(!page->IsLargeObjectPage()
2738 || !FindObject(address)->IsFailure());
2739
2740 return page->IsLargeObjectPage();
2741}
2742
2743
2744#ifdef DEBUG
2745// We do not assume that the large object iterator works, because it depends
2746// on the invariants we are checking during verification.
2747void LargeObjectSpace::Verify() {
2748 for (LargeObjectChunk* chunk = first_chunk_;
2749 chunk != NULL;
2750 chunk = chunk->next()) {
2751 // Each chunk contains an object that starts at the large object page's
2752 // object area start.
2753 HeapObject* object = chunk->GetObject();
2754 Page* page = Page::FromAddress(object->address());
2755 ASSERT(object->address() == page->ObjectAreaStart());
2756
2757 // The first word should be a map, and we expect all map pointers to be
2758 // in map space.
2759 Map* map = object->map();
2760 ASSERT(map->IsMap());
2761 ASSERT(Heap::map_space()->Contains(map));
2762
ager@chromium.orga1645e22009-09-09 19:27:10 +00002763 // We have only code, sequential strings, external strings
2764 // (sequential strings that have been morphed into external
2765 // strings), fixed arrays, and byte arrays in large object space.
2766 ASSERT(object->IsCode() || object->IsSeqString() ||
2767 object->IsExternalString() || object->IsFixedArray() ||
2768 object->IsByteArray());
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002769
2770 // The object itself should look OK.
mads.s.ager@gmail.com9a4089a2008-09-01 08:55:01 +00002771 object->Verify();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002772
2773 // Byte arrays and strings don't have interior pointers.
2774 if (object->IsCode()) {
2775 VerifyPointersVisitor code_visitor;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002776 object->IterateBody(map->instance_type(),
2777 object->Size(),
2778 &code_visitor);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002779 } else if (object->IsFixedArray()) {
2780 // We loop over fixed arrays ourselves, rather then using the visitor,
2781 // because the visitor doesn't support the start/offset iteration
2782 // needed for IsRSetSet.
2783 FixedArray* array = FixedArray::cast(object);
2784 for (int j = 0; j < array->length(); j++) {
2785 Object* element = array->get(j);
2786 if (element->IsHeapObject()) {
2787 HeapObject* element_object = HeapObject::cast(element);
2788 ASSERT(Heap::Contains(element_object));
2789 ASSERT(element_object->map()->IsMap());
2790 if (Heap::InNewSpace(element_object)) {
2791 ASSERT(Page::IsRSetSet(object->address(),
2792 FixedArray::kHeaderSize + j * kPointerSize));
2793 }
2794 }
2795 }
2796 }
2797 }
2798}
2799
2800
2801void LargeObjectSpace::Print() {
2802 LargeObjectIterator it(this);
2803 while (it.has_next()) {
2804 it.next()->Print();
2805 }
2806}
2807
2808
2809void LargeObjectSpace::ReportStatistics() {
2810 PrintF(" size: %d\n", size_);
2811 int num_objects = 0;
2812 ClearHistograms();
2813 LargeObjectIterator it(this);
2814 while (it.has_next()) {
2815 num_objects++;
2816 CollectHistogramInfo(it.next());
2817 }
2818
2819 PrintF(" number of objects %d\n", num_objects);
2820 if (num_objects > 0) ReportHistogram(false);
2821}
2822
2823
2824void LargeObjectSpace::CollectCodeStatistics() {
2825 LargeObjectIterator obj_it(this);
2826 while (obj_it.has_next()) {
2827 HeapObject* obj = obj_it.next();
2828 if (obj->IsCode()) {
2829 Code* code = Code::cast(obj);
2830 code_kind_statistics[code->kind()] += code->Size();
2831 }
2832 }
2833}
2834
2835
2836void LargeObjectSpace::PrintRSet() {
2837 LargeObjectIterator it(this);
2838 while (it.has_next()) {
2839 HeapObject* object = it.next();
2840 if (object->IsFixedArray()) {
2841 Page* page = Page::FromAddress(object->address());
2842
2843 Address allocation_top = object->address() + object->Size();
2844 PrintF("large page 0x%x:\n", page);
2845 PrintRSetRange(page->RSetStart(), page->RSetEnd(),
2846 reinterpret_cast<Object**>(object->address()),
2847 allocation_top);
2848 int extra_array_bytes = object->Size() - Page::kObjectAreaSize;
2849 int extra_rset_bits = RoundUp(extra_array_bytes / kPointerSize,
2850 kBitsPerInt);
2851 PrintF("------------------------------------------------------------"
2852 "-----------\n");
2853 PrintRSetRange(allocation_top,
2854 allocation_top + extra_rset_bits / kBitsPerByte,
2855 reinterpret_cast<Object**>(object->address()
2856 + Page::kObjectAreaSize),
2857 allocation_top);
2858 PrintF("\n");
2859 }
2860 }
2861}
2862#endif // DEBUG
2863
2864} } // namespace v8::internal