blob: f15af9e720d3995ca71b4172a107fac4562446bb [file] [log] [blame]
ager@chromium.org9258b6b2008-09-11 09:11:10 +00001// Copyright 2006-2008 the V8 project authors. All rights reserved.
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002// Redistribution and use in source and binary forms, with or without
3// modification, are permitted provided that the following conditions are
4// met:
5//
6// * Redistributions of source code must retain the above copyright
7// notice, this list of conditions and the following disclaimer.
8// * Redistributions in binary form must reproduce the above
9// copyright notice, this list of conditions and the following
10// disclaimer in the documentation and/or other materials provided
11// with the distribution.
12// * Neither the name of Google Inc. nor the names of its
13// contributors may be used to endorse or promote products derived
14// from this software without specific prior written permission.
15//
16// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
28#include "v8.h"
29
30#include "macro-assembler.h"
31#include "mark-compact.h"
32#include "platform.h"
33
34namespace v8 { namespace internal {
35
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +000036// For contiguous spaces, top should be in the space (or at the end) and limit
37// should be the end of the space.
38#define ASSERT_SEMISPACE_ALLOCATION_INFO(info, space) \
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +000039 ASSERT((space).low() <= (info).top \
40 && (info).top <= (space).high() \
41 && (info).limit == (space).high())
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +000042
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +000043
44// ----------------------------------------------------------------------------
45// HeapObjectIterator
46
47HeapObjectIterator::HeapObjectIterator(PagedSpace* space) {
48 Initialize(space->bottom(), space->top(), NULL);
49}
50
51
52HeapObjectIterator::HeapObjectIterator(PagedSpace* space,
53 HeapObjectCallback size_func) {
54 Initialize(space->bottom(), space->top(), size_func);
55}
56
57
58HeapObjectIterator::HeapObjectIterator(PagedSpace* space, Address start) {
59 Initialize(start, space->top(), NULL);
60}
61
62
63HeapObjectIterator::HeapObjectIterator(PagedSpace* space, Address start,
64 HeapObjectCallback size_func) {
65 Initialize(start, space->top(), size_func);
66}
67
68
69void HeapObjectIterator::Initialize(Address cur, Address end,
70 HeapObjectCallback size_f) {
71 cur_addr_ = cur;
72 end_addr_ = end;
73 end_page_ = Page::FromAllocationTop(end);
74 size_func_ = size_f;
75 Page* p = Page::FromAllocationTop(cur_addr_);
76 cur_limit_ = (p == end_page_) ? end_addr_ : p->AllocationTop();
77
78#ifdef DEBUG
79 Verify();
80#endif
81}
82
83
84bool HeapObjectIterator::HasNextInNextPage() {
85 if (cur_addr_ == end_addr_) return false;
86
87 Page* cur_page = Page::FromAllocationTop(cur_addr_);
88 cur_page = cur_page->next_page();
89 ASSERT(cur_page->is_valid());
90
91 cur_addr_ = cur_page->ObjectAreaStart();
92 cur_limit_ = (cur_page == end_page_) ? end_addr_ : cur_page->AllocationTop();
93
94 ASSERT(cur_addr_ < cur_limit_);
95#ifdef DEBUG
96 Verify();
97#endif
98 return true;
99}
100
101
102#ifdef DEBUG
103void HeapObjectIterator::Verify() {
104 Page* p = Page::FromAllocationTop(cur_addr_);
105 ASSERT(p == Page::FromAllocationTop(cur_limit_));
106 ASSERT(p->Offset(cur_addr_) <= p->Offset(cur_limit_));
107}
108#endif
109
110
111// -----------------------------------------------------------------------------
112// PageIterator
113
ager@chromium.org9085a012009-05-11 19:22:57 +0000114PageIterator::PageIterator(PagedSpace* space, Mode mode) : space_(space) {
115 prev_page_ = NULL;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000116 switch (mode) {
117 case PAGES_IN_USE:
ager@chromium.org9085a012009-05-11 19:22:57 +0000118 stop_page_ = space->AllocationTopPage();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000119 break;
120 case PAGES_USED_BY_MC:
ager@chromium.org9085a012009-05-11 19:22:57 +0000121 stop_page_ = space->MCRelocationTopPage();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000122 break;
123 case ALL_PAGES:
ager@chromium.org9085a012009-05-11 19:22:57 +0000124 stop_page_ = space->last_page_;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000125 break;
126 default:
127 UNREACHABLE();
128 }
129}
130
131
132// -----------------------------------------------------------------------------
133// Page
134
135#ifdef DEBUG
136Page::RSetState Page::rset_state_ = Page::IN_USE;
137#endif
138
139// -----------------------------------------------------------------------------
140// MemoryAllocator
141//
142int MemoryAllocator::capacity_ = 0;
143int MemoryAllocator::size_ = 0;
144
145VirtualMemory* MemoryAllocator::initial_chunk_ = NULL;
146
147// 270 is an estimate based on the static default heap size of a pair of 256K
148// semispaces and a 64M old generation.
149const int kEstimatedNumberOfChunks = 270;
150List<MemoryAllocator::ChunkInfo> MemoryAllocator::chunks_(
151 kEstimatedNumberOfChunks);
152List<int> MemoryAllocator::free_chunk_ids_(kEstimatedNumberOfChunks);
153int MemoryAllocator::max_nof_chunks_ = 0;
154int MemoryAllocator::top_ = 0;
155
156
157void MemoryAllocator::Push(int free_chunk_id) {
158 ASSERT(max_nof_chunks_ > 0);
159 ASSERT(top_ < max_nof_chunks_);
160 free_chunk_ids_[top_++] = free_chunk_id;
161}
162
163
164int MemoryAllocator::Pop() {
165 ASSERT(top_ > 0);
166 return free_chunk_ids_[--top_];
167}
168
169
170bool MemoryAllocator::Setup(int capacity) {
171 capacity_ = RoundUp(capacity, Page::kPageSize);
172
173 // Over-estimate the size of chunks_ array. It assumes the expansion of old
174 // space is always in the unit of a chunk (kChunkSize) except the last
175 // expansion.
176 //
177 // Due to alignment, allocated space might be one page less than required
178 // number (kPagesPerChunk) of pages for old spaces.
179 //
kasper.lund7276f142008-07-30 08:49:36 +0000180 // Reserve two chunk ids for semispaces, one for map space, one for old
181 // space, and one for code space.
182 max_nof_chunks_ = (capacity_ / (kChunkSize - Page::kPageSize)) + 5;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000183 if (max_nof_chunks_ > kMaxNofChunks) return false;
184
185 size_ = 0;
186 ChunkInfo info; // uninitialized element.
187 for (int i = max_nof_chunks_ - 1; i >= 0; i--) {
188 chunks_.Add(info);
189 free_chunk_ids_.Add(i);
190 }
191 top_ = max_nof_chunks_;
192 return true;
193}
194
195
196void MemoryAllocator::TearDown() {
197 for (int i = 0; i < max_nof_chunks_; i++) {
198 if (chunks_[i].address() != NULL) DeleteChunk(i);
199 }
200 chunks_.Clear();
201 free_chunk_ids_.Clear();
202
203 if (initial_chunk_ != NULL) {
204 LOG(DeleteEvent("InitialChunk", initial_chunk_->address()));
205 delete initial_chunk_;
206 initial_chunk_ = NULL;
207 }
208
209 ASSERT(top_ == max_nof_chunks_); // all chunks are free
210 top_ = 0;
211 capacity_ = 0;
212 size_ = 0;
213 max_nof_chunks_ = 0;
214}
215
216
217void* MemoryAllocator::AllocateRawMemory(const size_t requested,
kasper.lund7276f142008-07-30 08:49:36 +0000218 size_t* allocated,
ager@chromium.org9258b6b2008-09-11 09:11:10 +0000219 Executability executable) {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000220 if (size_ + static_cast<int>(requested) > capacity_) return NULL;
221
ager@chromium.org9258b6b2008-09-11 09:11:10 +0000222 void* mem = OS::Allocate(requested, allocated, executable == EXECUTABLE);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000223 int alloced = *allocated;
224 size_ += alloced;
225 Counters::memory_allocated.Increment(alloced);
226 return mem;
227}
228
229
230void MemoryAllocator::FreeRawMemory(void* mem, size_t length) {
231 OS::Free(mem, length);
232 Counters::memory_allocated.Decrement(length);
233 size_ -= length;
234 ASSERT(size_ >= 0);
235}
236
237
238void* MemoryAllocator::ReserveInitialChunk(const size_t requested) {
239 ASSERT(initial_chunk_ == NULL);
240
241 initial_chunk_ = new VirtualMemory(requested);
242 CHECK(initial_chunk_ != NULL);
243 if (!initial_chunk_->IsReserved()) {
244 delete initial_chunk_;
245 initial_chunk_ = NULL;
246 return NULL;
247 }
248
249 // We are sure that we have mapped a block of requested addresses.
250 ASSERT(initial_chunk_->size() == requested);
251 LOG(NewEvent("InitialChunk", initial_chunk_->address(), requested));
252 size_ += requested;
253 return initial_chunk_->address();
254}
255
256
257static int PagesInChunk(Address start, size_t size) {
258 // The first page starts on the first page-aligned address from start onward
259 // and the last page ends on the last page-aligned address before
260 // start+size. Page::kPageSize is a power of two so we can divide by
261 // shifting.
262 return (RoundDown(start + size, Page::kPageSize)
263 - RoundUp(start, Page::kPageSize)) >> Page::kPageSizeBits;
264}
265
266
267Page* MemoryAllocator::AllocatePages(int requested_pages, int* allocated_pages,
268 PagedSpace* owner) {
269 if (requested_pages <= 0) return Page::FromAddress(NULL);
270 size_t chunk_size = requested_pages * Page::kPageSize;
271
272 // There is not enough space to guarantee the desired number pages can be
273 // allocated.
274 if (size_ + static_cast<int>(chunk_size) > capacity_) {
275 // Request as many pages as we can.
276 chunk_size = capacity_ - size_;
277 requested_pages = chunk_size >> Page::kPageSizeBits;
278
279 if (requested_pages <= 0) return Page::FromAddress(NULL);
280 }
kasper.lund7276f142008-07-30 08:49:36 +0000281 void* chunk = AllocateRawMemory(chunk_size, &chunk_size, owner->executable());
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000282 if (chunk == NULL) return Page::FromAddress(NULL);
283 LOG(NewEvent("PagedChunk", chunk, chunk_size));
284
285 *allocated_pages = PagesInChunk(static_cast<Address>(chunk), chunk_size);
286 if (*allocated_pages == 0) {
287 FreeRawMemory(chunk, chunk_size);
288 LOG(DeleteEvent("PagedChunk", chunk));
289 return Page::FromAddress(NULL);
290 }
291
292 int chunk_id = Pop();
293 chunks_[chunk_id].init(static_cast<Address>(chunk), chunk_size, owner);
294
295 return InitializePagesInChunk(chunk_id, *allocated_pages, owner);
296}
297
298
299Page* MemoryAllocator::CommitPages(Address start, size_t size,
300 PagedSpace* owner, int* num_pages) {
301 ASSERT(start != NULL);
302 *num_pages = PagesInChunk(start, size);
303 ASSERT(*num_pages > 0);
304 ASSERT(initial_chunk_ != NULL);
kasperl@chromium.orgf5aa8372009-03-24 14:47:14 +0000305 ASSERT(InInitialChunk(start));
306 ASSERT(InInitialChunk(start + size - 1));
ager@chromium.org9258b6b2008-09-11 09:11:10 +0000307 if (!initial_chunk_->Commit(start, size, owner->executable() == EXECUTABLE)) {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000308 return Page::FromAddress(NULL);
309 }
310 Counters::memory_allocated.Increment(size);
311
312 // So long as we correctly overestimated the number of chunks we should not
313 // run out of chunk ids.
314 CHECK(!OutOfChunkIds());
315 int chunk_id = Pop();
316 chunks_[chunk_id].init(start, size, owner);
317 return InitializePagesInChunk(chunk_id, *num_pages, owner);
318}
319
320
kasper.lund7276f142008-07-30 08:49:36 +0000321bool MemoryAllocator::CommitBlock(Address start,
322 size_t size,
ager@chromium.org9258b6b2008-09-11 09:11:10 +0000323 Executability executable) {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000324 ASSERT(start != NULL);
325 ASSERT(size > 0);
326 ASSERT(initial_chunk_ != NULL);
kasperl@chromium.orgf5aa8372009-03-24 14:47:14 +0000327 ASSERT(InInitialChunk(start));
328 ASSERT(InInitialChunk(start + size - 1));
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000329
kasper.lund7276f142008-07-30 08:49:36 +0000330 if (!initial_chunk_->Commit(start, size, executable)) return false;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000331 Counters::memory_allocated.Increment(size);
332 return true;
333}
334
335
336Page* MemoryAllocator::InitializePagesInChunk(int chunk_id, int pages_in_chunk,
337 PagedSpace* owner) {
338 ASSERT(IsValidChunk(chunk_id));
339 ASSERT(pages_in_chunk > 0);
340
341 Address chunk_start = chunks_[chunk_id].address();
342
343 Address low = RoundUp(chunk_start, Page::kPageSize);
344
345#ifdef DEBUG
346 size_t chunk_size = chunks_[chunk_id].size();
347 Address high = RoundDown(chunk_start + chunk_size, Page::kPageSize);
348 ASSERT(pages_in_chunk <=
349 ((OffsetFrom(high) - OffsetFrom(low)) / Page::kPageSize));
350#endif
351
352 Address page_addr = low;
353 for (int i = 0; i < pages_in_chunk; i++) {
354 Page* p = Page::FromAddress(page_addr);
355 p->opaque_header = OffsetFrom(page_addr + Page::kPageSize) | chunk_id;
356 p->is_normal_page = 1;
357 page_addr += Page::kPageSize;
358 }
359
360 // Set the next page of the last page to 0.
361 Page* last_page = Page::FromAddress(page_addr - Page::kPageSize);
362 last_page->opaque_header = OffsetFrom(0) | chunk_id;
363
364 return Page::FromAddress(low);
365}
366
367
368Page* MemoryAllocator::FreePages(Page* p) {
369 if (!p->is_valid()) return p;
370
371 // Find the first page in the same chunk as 'p'
372 Page* first_page = FindFirstPageInSameChunk(p);
373 Page* page_to_return = Page::FromAddress(NULL);
374
375 if (p != first_page) {
376 // Find the last page in the same chunk as 'prev'.
377 Page* last_page = FindLastPageInSameChunk(p);
378 first_page = GetNextPage(last_page); // first page in next chunk
379
380 // set the next_page of last_page to NULL
381 SetNextPage(last_page, Page::FromAddress(NULL));
382 page_to_return = p; // return 'p' when exiting
383 }
384
385 while (first_page->is_valid()) {
386 int chunk_id = GetChunkId(first_page);
387 ASSERT(IsValidChunk(chunk_id));
388
389 // Find the first page of the next chunk before deleting this chunk.
390 first_page = GetNextPage(FindLastPageInSameChunk(first_page));
391
392 // Free the current chunk.
393 DeleteChunk(chunk_id);
394 }
395
396 return page_to_return;
397}
398
399
400void MemoryAllocator::DeleteChunk(int chunk_id) {
401 ASSERT(IsValidChunk(chunk_id));
402
403 ChunkInfo& c = chunks_[chunk_id];
404
405 // We cannot free a chunk contained in the initial chunk because it was not
406 // allocated with AllocateRawMemory. Instead we uncommit the virtual
407 // memory.
kasperl@chromium.orgf5aa8372009-03-24 14:47:14 +0000408 if (InInitialChunk(c.address())) {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000409 // TODO(1240712): VirtualMemory::Uncommit has a return value which
410 // is ignored here.
411 initial_chunk_->Uncommit(c.address(), c.size());
412 Counters::memory_allocated.Decrement(c.size());
413 } else {
414 LOG(DeleteEvent("PagedChunk", c.address()));
415 FreeRawMemory(c.address(), c.size());
416 }
417 c.init(NULL, 0, NULL);
418 Push(chunk_id);
419}
420
421
422Page* MemoryAllocator::FindFirstPageInSameChunk(Page* p) {
423 int chunk_id = GetChunkId(p);
424 ASSERT(IsValidChunk(chunk_id));
425
426 Address low = RoundUp(chunks_[chunk_id].address(), Page::kPageSize);
427 return Page::FromAddress(low);
428}
429
430
431Page* MemoryAllocator::FindLastPageInSameChunk(Page* p) {
432 int chunk_id = GetChunkId(p);
433 ASSERT(IsValidChunk(chunk_id));
434
435 Address chunk_start = chunks_[chunk_id].address();
436 size_t chunk_size = chunks_[chunk_id].size();
437
438 Address high = RoundDown(chunk_start + chunk_size, Page::kPageSize);
439 ASSERT(chunk_start <= p->address() && p->address() < high);
440
441 return Page::FromAddress(high - Page::kPageSize);
442}
443
444
445#ifdef DEBUG
446void MemoryAllocator::ReportStatistics() {
447 float pct = static_cast<float>(capacity_ - size_) / capacity_;
448 PrintF(" capacity: %d, used: %d, available: %%%d\n\n",
449 capacity_, size_, static_cast<int>(pct*100));
450}
451#endif
452
453
454// -----------------------------------------------------------------------------
455// PagedSpace implementation
456
ager@chromium.org9258b6b2008-09-11 09:11:10 +0000457PagedSpace::PagedSpace(int max_capacity,
458 AllocationSpace id,
459 Executability executable)
kasper.lund7276f142008-07-30 08:49:36 +0000460 : Space(id, executable) {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000461 max_capacity_ = (RoundDown(max_capacity, Page::kPageSize) / Page::kPageSize)
462 * Page::kObjectAreaSize;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000463 accounting_stats_.Clear();
464
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000465 allocation_info_.top = NULL;
466 allocation_info_.limit = NULL;
467
468 mc_forwarding_info_.top = NULL;
469 mc_forwarding_info_.limit = NULL;
470}
471
472
473bool PagedSpace::Setup(Address start, size_t size) {
474 if (HasBeenSetup()) return false;
475
476 int num_pages = 0;
477 // Try to use the virtual memory range passed to us. If it is too small to
478 // contain at least one page, ignore it and allocate instead.
ager@chromium.org9258b6b2008-09-11 09:11:10 +0000479 int pages_in_chunk = PagesInChunk(start, size);
480 if (pages_in_chunk > 0) {
481 first_page_ = MemoryAllocator::CommitPages(RoundUp(start, Page::kPageSize),
482 Page::kPageSize * pages_in_chunk,
483 this, &num_pages);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000484 } else {
485 int requested_pages = Min(MemoryAllocator::kPagesPerChunk,
486 max_capacity_ / Page::kObjectAreaSize);
487 first_page_ =
488 MemoryAllocator::AllocatePages(requested_pages, &num_pages, this);
489 if (!first_page_->is_valid()) return false;
490 }
491
492 // We are sure that the first page is valid and that we have at least one
493 // page.
494 ASSERT(first_page_->is_valid());
495 ASSERT(num_pages > 0);
496 accounting_stats_.ExpandSpace(num_pages * Page::kObjectAreaSize);
497 ASSERT(Capacity() <= max_capacity_);
498
ager@chromium.org9085a012009-05-11 19:22:57 +0000499 // Sequentially initialize remembered sets in the newly allocated
500 // pages and cache the current last page in the space.
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000501 for (Page* p = first_page_; p->is_valid(); p = p->next_page()) {
502 p->ClearRSet();
ager@chromium.org9085a012009-05-11 19:22:57 +0000503 last_page_ = p;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000504 }
505
506 // Use first_page_ for allocation.
507 SetAllocationInfo(&allocation_info_, first_page_);
508
509 return true;
510}
511
512
513bool PagedSpace::HasBeenSetup() {
514 return (Capacity() > 0);
515}
516
517
518void PagedSpace::TearDown() {
519 first_page_ = MemoryAllocator::FreePages(first_page_);
520 ASSERT(!first_page_->is_valid());
521
522 accounting_stats_.Clear();
523}
524
525
kasperl@chromium.orgf5aa8372009-03-24 14:47:14 +0000526#ifdef ENABLE_HEAP_PROTECTION
527
528void PagedSpace::Protect() {
529 Page* page = first_page_;
530 while (page->is_valid()) {
531 MemoryAllocator::ProtectChunkFromPage(page);
532 page = MemoryAllocator::FindLastPageInSameChunk(page)->next_page();
533 }
534}
535
536
537void PagedSpace::Unprotect() {
538 Page* page = first_page_;
539 while (page->is_valid()) {
540 MemoryAllocator::UnprotectChunkFromPage(page);
541 page = MemoryAllocator::FindLastPageInSameChunk(page)->next_page();
542 }
543}
544
545#endif
546
547
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000548void PagedSpace::ClearRSet() {
549 PageIterator it(this, PageIterator::ALL_PAGES);
550 while (it.has_next()) {
551 it.next()->ClearRSet();
552 }
553}
554
555
556Object* PagedSpace::FindObject(Address addr) {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000557 // Note: this function can only be called before or after mark-compact GC
558 // because it accesses map pointers.
559 ASSERT(!MarkCompactCollector::in_use());
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000560
561 if (!Contains(addr)) return Failure::Exception();
562
563 Page* p = Page::FromAddress(addr);
kasper.lund7276f142008-07-30 08:49:36 +0000564 ASSERT(IsUsed(p));
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000565 Address cur = p->ObjectAreaStart();
566 Address end = p->AllocationTop();
567 while (cur < end) {
568 HeapObject* obj = HeapObject::FromAddress(cur);
569 Address next = cur + obj->Size();
570 if ((cur <= addr) && (addr < next)) return obj;
571 cur = next;
572 }
573
kasper.lund7276f142008-07-30 08:49:36 +0000574 UNREACHABLE();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000575 return Failure::Exception();
576}
577
578
kasper.lund7276f142008-07-30 08:49:36 +0000579bool PagedSpace::IsUsed(Page* page) {
580 PageIterator it(this, PageIterator::PAGES_IN_USE);
581 while (it.has_next()) {
582 if (page == it.next()) return true;
583 }
584 return false;
585}
586
587
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000588void PagedSpace::SetAllocationInfo(AllocationInfo* alloc_info, Page* p) {
589 alloc_info->top = p->ObjectAreaStart();
590 alloc_info->limit = p->ObjectAreaEnd();
kasper.lund7276f142008-07-30 08:49:36 +0000591 ASSERT(alloc_info->VerifyPagedAllocation());
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000592}
593
594
595void PagedSpace::MCResetRelocationInfo() {
596 // Set page indexes.
597 int i = 0;
598 PageIterator it(this, PageIterator::ALL_PAGES);
599 while (it.has_next()) {
600 Page* p = it.next();
601 p->mc_page_index = i++;
602 }
603
604 // Set mc_forwarding_info_ to the first page in the space.
605 SetAllocationInfo(&mc_forwarding_info_, first_page_);
606 // All the bytes in the space are 'available'. We will rediscover
607 // allocated and wasted bytes during GC.
608 accounting_stats_.Reset();
609}
610
611
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000612int PagedSpace::MCSpaceOffsetForAddress(Address addr) {
613#ifdef DEBUG
614 // The Contains function considers the address at the beginning of a
615 // page in the page, MCSpaceOffsetForAddress considers it is in the
616 // previous page.
617 if (Page::IsAlignedToPageSize(addr)) {
618 ASSERT(Contains(addr - kPointerSize));
619 } else {
620 ASSERT(Contains(addr));
621 }
622#endif
623
624 // If addr is at the end of a page, it belongs to previous page
625 Page* p = Page::IsAlignedToPageSize(addr)
626 ? Page::FromAllocationTop(addr)
627 : Page::FromAddress(addr);
628 int index = p->mc_page_index;
629 return (index * Page::kPageSize) + p->Offset(addr);
630}
631
632
kasper.lund7276f142008-07-30 08:49:36 +0000633// Slow case for reallocating and promoting objects during a compacting
634// collection. This function is not space-specific.
635HeapObject* PagedSpace::SlowMCAllocateRaw(int size_in_bytes) {
636 Page* current_page = TopPageOf(mc_forwarding_info_);
637 if (!current_page->next_page()->is_valid()) {
638 if (!Expand(current_page)) {
639 return NULL;
640 }
641 }
642
643 // There are surely more pages in the space now.
644 ASSERT(current_page->next_page()->is_valid());
645 // We do not add the top of page block for current page to the space's
646 // free list---the block may contain live objects so we cannot write
647 // bookkeeping information to it. Instead, we will recover top of page
648 // blocks when we move objects to their new locations.
649 //
650 // We do however write the allocation pointer to the page. The encoding
651 // of forwarding addresses is as an offset in terms of live bytes, so we
652 // need quick access to the allocation top of each page to decode
653 // forwarding addresses.
654 current_page->mc_relocation_top = mc_forwarding_info_.top;
655 SetAllocationInfo(&mc_forwarding_info_, current_page->next_page());
656 return AllocateLinearly(&mc_forwarding_info_, size_in_bytes);
657}
658
659
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000660bool PagedSpace::Expand(Page* last_page) {
661 ASSERT(max_capacity_ % Page::kObjectAreaSize == 0);
662 ASSERT(Capacity() % Page::kObjectAreaSize == 0);
663
664 if (Capacity() == max_capacity_) return false;
665
666 ASSERT(Capacity() < max_capacity_);
667 // Last page must be valid and its next page is invalid.
668 ASSERT(last_page->is_valid() && !last_page->next_page()->is_valid());
669
670 int available_pages = (max_capacity_ - Capacity()) / Page::kObjectAreaSize;
671 if (available_pages <= 0) return false;
672
673 int desired_pages = Min(available_pages, MemoryAllocator::kPagesPerChunk);
674 Page* p = MemoryAllocator::AllocatePages(desired_pages, &desired_pages, this);
675 if (!p->is_valid()) return false;
676
677 accounting_stats_.ExpandSpace(desired_pages * Page::kObjectAreaSize);
678 ASSERT(Capacity() <= max_capacity_);
679
680 MemoryAllocator::SetNextPage(last_page, p);
681
ager@chromium.org9085a012009-05-11 19:22:57 +0000682 // Sequentially clear remembered set of new pages and and cache the
683 // new last page in the space.
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000684 while (p->is_valid()) {
685 p->ClearRSet();
ager@chromium.org9085a012009-05-11 19:22:57 +0000686 last_page_ = p;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000687 p = p->next_page();
688 }
689
690 return true;
691}
692
693
694#ifdef DEBUG
695int PagedSpace::CountTotalPages() {
696 int count = 0;
697 for (Page* p = first_page_; p->is_valid(); p = p->next_page()) {
698 count++;
699 }
700 return count;
701}
702#endif
703
704
705void PagedSpace::Shrink() {
706 // Release half of free pages.
707 Page* top_page = AllocationTopPage();
708 ASSERT(top_page->is_valid());
709
710 // Loop over the pages from the top page to the end of the space to count
711 // the number of pages to keep and find the last page to keep.
712 int free_pages = 0;
713 int pages_to_keep = 0; // Of the free pages.
714 Page* last_page_to_keep = top_page;
715 Page* current_page = top_page->next_page();
716 // Loop over the pages to the end of the space.
717 while (current_page->is_valid()) {
kasper.lund7276f142008-07-30 08:49:36 +0000718 // Advance last_page_to_keep every other step to end up at the midpoint.
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000719 if ((free_pages & 0x1) == 1) {
720 pages_to_keep++;
721 last_page_to_keep = last_page_to_keep->next_page();
722 }
723 free_pages++;
724 current_page = current_page->next_page();
725 }
726
727 // Free pages after last_page_to_keep, and adjust the next_page link.
728 Page* p = MemoryAllocator::FreePages(last_page_to_keep->next_page());
729 MemoryAllocator::SetNextPage(last_page_to_keep, p);
730
ager@chromium.org9085a012009-05-11 19:22:57 +0000731 // Since pages are only freed in whole chunks, we may have kept more
732 // than pages_to_keep. Count the extra pages and cache the new last
733 // page in the space.
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000734 while (p->is_valid()) {
735 pages_to_keep++;
ager@chromium.org9085a012009-05-11 19:22:57 +0000736 last_page_ = p;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000737 p = p->next_page();
738 }
739
740 // The difference between free_pages and pages_to_keep is the number of
741 // pages actually freed.
742 ASSERT(pages_to_keep <= free_pages);
743 int bytes_freed = (free_pages - pages_to_keep) * Page::kObjectAreaSize;
744 accounting_stats_.ShrinkSpace(bytes_freed);
745
746 ASSERT(Capacity() == CountTotalPages() * Page::kObjectAreaSize);
747}
748
749
750bool PagedSpace::EnsureCapacity(int capacity) {
751 if (Capacity() >= capacity) return true;
752
753 // Start from the allocation top and loop to the last page in the space.
754 Page* last_page = AllocationTopPage();
755 Page* next_page = last_page->next_page();
756 while (next_page->is_valid()) {
757 last_page = MemoryAllocator::FindLastPageInSameChunk(next_page);
758 next_page = last_page->next_page();
759 }
760
761 // Expand the space until it has the required capacity or expansion fails.
762 do {
763 if (!Expand(last_page)) return false;
764 ASSERT(last_page->next_page()->is_valid());
765 last_page =
766 MemoryAllocator::FindLastPageInSameChunk(last_page->next_page());
767 } while (Capacity() < capacity);
768
769 return true;
770}
771
772
773#ifdef DEBUG
774void PagedSpace::Print() { }
775#endif
776
777
778// -----------------------------------------------------------------------------
779// NewSpace implementation
780
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +0000781
782bool NewSpace::Setup(Address start, int size) {
783 // Setup new space based on the preallocated memory block defined by
784 // start and size. The provided space is divided into two semi-spaces.
785 // To support fast containment testing in the new space, the size of
786 // this chunk must be a power of two and it must be aligned to its size.
787 int initial_semispace_capacity = Heap::InitialSemiSpaceSize();
788 int maximum_semispace_capacity = Heap::SemiSpaceSize();
789
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000790 ASSERT(initial_semispace_capacity <= maximum_semispace_capacity);
791 ASSERT(IsPowerOf2(maximum_semispace_capacity));
792 maximum_capacity_ = maximum_semispace_capacity;
793 capacity_ = initial_semispace_capacity;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000794
795 // Allocate and setup the histogram arrays if necessary.
796#if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
797 allocated_histogram_ = NewArray<HistogramInfo>(LAST_TYPE + 1);
798 promoted_histogram_ = NewArray<HistogramInfo>(LAST_TYPE + 1);
799
800#define SET_NAME(name) allocated_histogram_[name].set_name(#name); \
801 promoted_histogram_[name].set_name(#name);
802 INSTANCE_TYPE_LIST(SET_NAME)
803#undef SET_NAME
804#endif
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000805
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000806 ASSERT(size == 2 * maximum_capacity_);
807 ASSERT(IsAddressAligned(start, size, 0));
808
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +0000809 if (!to_space_.Setup(start, capacity_, maximum_capacity_)) {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000810 return false;
811 }
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +0000812 if (!from_space_.Setup(start + maximum_capacity_,
813 capacity_,
814 maximum_capacity_)) {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000815 return false;
816 }
817
818 start_ = start;
819 address_mask_ = ~(size - 1);
820 object_mask_ = address_mask_ | kHeapObjectTag;
ager@chromium.org9085a012009-05-11 19:22:57 +0000821 object_expected_ = reinterpret_cast<uintptr_t>(start) | kHeapObjectTag;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000822
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +0000823 allocation_info_.top = to_space_.low();
824 allocation_info_.limit = to_space_.high();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000825 mc_forwarding_info_.top = NULL;
826 mc_forwarding_info_.limit = NULL;
827
828 ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
829 return true;
830}
831
832
833void NewSpace::TearDown() {
834#if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
835 if (allocated_histogram_) {
836 DeleteArray(allocated_histogram_);
837 allocated_histogram_ = NULL;
838 }
839 if (promoted_histogram_) {
840 DeleteArray(promoted_histogram_);
841 promoted_histogram_ = NULL;
842 }
843#endif
844
845 start_ = NULL;
846 capacity_ = 0;
847 allocation_info_.top = NULL;
848 allocation_info_.limit = NULL;
849 mc_forwarding_info_.top = NULL;
850 mc_forwarding_info_.limit = NULL;
851
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +0000852 to_space_.TearDown();
853 from_space_.TearDown();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000854}
855
856
kasperl@chromium.orgf5aa8372009-03-24 14:47:14 +0000857#ifdef ENABLE_HEAP_PROTECTION
858
859void NewSpace::Protect() {
860 MemoryAllocator::Protect(ToSpaceLow(), Capacity());
861 MemoryAllocator::Protect(FromSpaceLow(), Capacity());
862}
863
864
865void NewSpace::Unprotect() {
866 MemoryAllocator::Unprotect(ToSpaceLow(), Capacity(),
867 to_space_.executable());
868 MemoryAllocator::Unprotect(FromSpaceLow(), Capacity(),
869 from_space_.executable());
870}
871
872#endif
873
874
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000875void NewSpace::Flip() {
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +0000876 SemiSpace tmp = from_space_;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000877 from_space_ = to_space_;
878 to_space_ = tmp;
879}
880
881
882bool NewSpace::Double() {
883 ASSERT(capacity_ <= maximum_capacity_ / 2);
884 // TODO(1240712): Failure to double the from space can result in
885 // semispaces of different sizes. In the event of that failure, the
886 // to space doubling should be rolled back before returning false.
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +0000887 if (!to_space_.Double() || !from_space_.Double()) return false;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000888 capacity_ *= 2;
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +0000889 allocation_info_.limit = to_space_.high();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000890 ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
891 return true;
892}
893
894
895void NewSpace::ResetAllocationInfo() {
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +0000896 allocation_info_.top = to_space_.low();
897 allocation_info_.limit = to_space_.high();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000898 ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
899}
900
901
902void NewSpace::MCResetRelocationInfo() {
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +0000903 mc_forwarding_info_.top = from_space_.low();
904 mc_forwarding_info_.limit = from_space_.high();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000905 ASSERT_SEMISPACE_ALLOCATION_INFO(mc_forwarding_info_, from_space_);
906}
907
908
909void NewSpace::MCCommitRelocationInfo() {
910 // Assumes that the spaces have been flipped so that mc_forwarding_info_ is
911 // valid allocation info for the to space.
912 allocation_info_.top = mc_forwarding_info_.top;
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +0000913 allocation_info_.limit = to_space_.high();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000914 ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
915}
916
917
918#ifdef DEBUG
919// We do not use the SemispaceIterator because verification doesn't assume
920// that it works (it depends on the invariants we are checking).
921void NewSpace::Verify() {
922 // The allocation pointer should be in the space or at the very end.
923 ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
924
925 // There should be objects packed in from the low address up to the
926 // allocation pointer.
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +0000927 Address current = to_space_.low();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000928 while (current < top()) {
929 HeapObject* object = HeapObject::FromAddress(current);
930
931 // The first word should be a map, and we expect all map pointers to
932 // be in map space.
933 Map* map = object->map();
934 ASSERT(map->IsMap());
935 ASSERT(Heap::map_space()->Contains(map));
936
937 // The object should not be code or a map.
938 ASSERT(!object->IsMap());
939 ASSERT(!object->IsCode());
940
941 // The object itself should look OK.
942 object->Verify();
943
944 // All the interior pointers should be contained in the heap.
945 VerifyPointersVisitor visitor;
946 int size = object->Size();
947 object->IterateBody(map->instance_type(), size, &visitor);
948
949 current += size;
950 }
951
952 // The allocation pointer should not be in the middle of an object.
953 ASSERT(current == top());
954}
955#endif
956
957
958// -----------------------------------------------------------------------------
959// SemiSpace implementation
960
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +0000961bool SemiSpace::Setup(Address start,
962 int initial_capacity,
963 int maximum_capacity) {
964 // Creates a space in the young generation. The constructor does not
965 // allocate memory from the OS. A SemiSpace is given a contiguous chunk of
966 // memory of size 'capacity' when set up, and does not grow or shrink
967 // otherwise. In the mark-compact collector, the memory region of the from
968 // space is used as the marking stack. It requires contiguous memory
969 // addresses.
970 capacity_ = initial_capacity;
971 maximum_capacity_ = maximum_capacity;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000972
kasper.lund7276f142008-07-30 08:49:36 +0000973 if (!MemoryAllocator::CommitBlock(start, capacity_, executable())) {
974 return false;
975 }
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000976
977 start_ = start;
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +0000978 address_mask_ = ~(maximum_capacity - 1);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000979 object_mask_ = address_mask_ | kHeapObjectTag;
ager@chromium.org9085a012009-05-11 19:22:57 +0000980 object_expected_ = reinterpret_cast<uintptr_t>(start) | kHeapObjectTag;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000981
982 age_mark_ = start_;
983 return true;
984}
985
986
987void SemiSpace::TearDown() {
988 start_ = NULL;
989 capacity_ = 0;
990}
991
992
993bool SemiSpace::Double() {
kasper.lund7276f142008-07-30 08:49:36 +0000994 if (!MemoryAllocator::CommitBlock(high(), capacity_, executable())) {
995 return false;
996 }
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000997 capacity_ *= 2;
998 return true;
999}
1000
1001
1002#ifdef DEBUG
1003void SemiSpace::Print() { }
ager@chromium.org9258b6b2008-09-11 09:11:10 +00001004
1005
1006void SemiSpace::Verify() { }
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001007#endif
1008
1009
1010// -----------------------------------------------------------------------------
1011// SemiSpaceIterator implementation.
1012SemiSpaceIterator::SemiSpaceIterator(NewSpace* space) {
1013 Initialize(space, space->bottom(), space->top(), NULL);
1014}
1015
1016
1017SemiSpaceIterator::SemiSpaceIterator(NewSpace* space,
1018 HeapObjectCallback size_func) {
1019 Initialize(space, space->bottom(), space->top(), size_func);
1020}
1021
1022
1023SemiSpaceIterator::SemiSpaceIterator(NewSpace* space, Address start) {
1024 Initialize(space, start, space->top(), NULL);
1025}
1026
1027
1028void SemiSpaceIterator::Initialize(NewSpace* space, Address start,
1029 Address end,
1030 HeapObjectCallback size_func) {
1031 ASSERT(space->ToSpaceContains(start));
1032 ASSERT(space->ToSpaceLow() <= end
1033 && end <= space->ToSpaceHigh());
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +00001034 space_ = &space->to_space_;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001035 current_ = start;
1036 limit_ = end;
1037 size_func_ = size_func;
1038}
1039
1040
1041#ifdef DEBUG
1042// A static array of histogram info for each type.
1043static HistogramInfo heap_histograms[LAST_TYPE+1];
1044static JSObject::SpillInformation js_spill_information;
1045
1046// heap_histograms is shared, always clear it before using it.
1047static void ClearHistograms() {
1048 // We reset the name each time, though it hasn't changed.
1049#define DEF_TYPE_NAME(name) heap_histograms[name].set_name(#name);
1050 INSTANCE_TYPE_LIST(DEF_TYPE_NAME)
1051#undef DEF_TYPE_NAME
1052
1053#define CLEAR_HISTOGRAM(name) heap_histograms[name].clear();
1054 INSTANCE_TYPE_LIST(CLEAR_HISTOGRAM)
1055#undef CLEAR_HISTOGRAM
1056
1057 js_spill_information.Clear();
1058}
1059
1060
1061static int code_kind_statistics[Code::NUMBER_OF_KINDS];
1062
1063
1064static void ClearCodeKindStatistics() {
1065 for (int i = 0; i < Code::NUMBER_OF_KINDS; i++) {
1066 code_kind_statistics[i] = 0;
1067 }
1068}
1069
1070
1071static void ReportCodeKindStatistics() {
1072 const char* table[Code::NUMBER_OF_KINDS];
1073
1074#define CASE(name) \
1075 case Code::name: table[Code::name] = #name; \
1076 break
1077
1078 for (int i = 0; i < Code::NUMBER_OF_KINDS; i++) {
1079 switch (static_cast<Code::Kind>(i)) {
1080 CASE(FUNCTION);
1081 CASE(STUB);
1082 CASE(BUILTIN);
1083 CASE(LOAD_IC);
1084 CASE(KEYED_LOAD_IC);
1085 CASE(STORE_IC);
1086 CASE(KEYED_STORE_IC);
1087 CASE(CALL_IC);
1088 }
1089 }
1090
1091#undef CASE
1092
1093 PrintF("\n Code kind histograms: \n");
1094 for (int i = 0; i < Code::NUMBER_OF_KINDS; i++) {
1095 if (code_kind_statistics[i] > 0) {
1096 PrintF(" %-20s: %10d bytes\n", table[i], code_kind_statistics[i]);
1097 }
1098 }
1099 PrintF("\n");
1100}
1101
1102
1103static int CollectHistogramInfo(HeapObject* obj) {
1104 InstanceType type = obj->map()->instance_type();
1105 ASSERT(0 <= type && type <= LAST_TYPE);
1106 ASSERT(heap_histograms[type].name() != NULL);
1107 heap_histograms[type].increment_number(1);
1108 heap_histograms[type].increment_bytes(obj->Size());
1109
1110 if (FLAG_collect_heap_spill_statistics && obj->IsJSObject()) {
1111 JSObject::cast(obj)->IncrementSpillStatistics(&js_spill_information);
1112 }
1113
1114 return obj->Size();
1115}
1116
1117
1118static void ReportHistogram(bool print_spill) {
1119 PrintF("\n Object Histogram:\n");
1120 for (int i = 0; i <= LAST_TYPE; i++) {
1121 if (heap_histograms[i].number() > 0) {
1122 PrintF(" %-33s%10d (%10d bytes)\n",
1123 heap_histograms[i].name(),
1124 heap_histograms[i].number(),
1125 heap_histograms[i].bytes());
1126 }
1127 }
1128 PrintF("\n");
1129
1130 // Summarize string types.
1131 int string_number = 0;
1132 int string_bytes = 0;
1133#define INCREMENT(type, size, name) \
1134 string_number += heap_histograms[type].number(); \
1135 string_bytes += heap_histograms[type].bytes();
1136 STRING_TYPE_LIST(INCREMENT)
1137#undef INCREMENT
1138 if (string_number > 0) {
1139 PrintF(" %-33s%10d (%10d bytes)\n\n", "STRING_TYPE", string_number,
1140 string_bytes);
1141 }
1142
1143 if (FLAG_collect_heap_spill_statistics && print_spill) {
1144 js_spill_information.Print();
1145 }
1146}
1147#endif // DEBUG
1148
1149
1150// Support for statistics gathering for --heap-stats and --log-gc.
1151#if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
1152void NewSpace::ClearHistograms() {
1153 for (int i = 0; i <= LAST_TYPE; i++) {
1154 allocated_histogram_[i].clear();
1155 promoted_histogram_[i].clear();
1156 }
1157}
1158
1159// Because the copying collector does not touch garbage objects, we iterate
1160// the new space before a collection to get a histogram of allocated objects.
1161// This only happens (1) when compiled with DEBUG and the --heap-stats flag is
1162// set, or when compiled with ENABLE_LOGGING_AND_PROFILING and the --log-gc
1163// flag is set.
1164void NewSpace::CollectStatistics() {
1165 ClearHistograms();
1166 SemiSpaceIterator it(this);
1167 while (it.has_next()) RecordAllocation(it.next());
1168}
1169
1170
1171#ifdef ENABLE_LOGGING_AND_PROFILING
1172static void DoReportStatistics(HistogramInfo* info, const char* description) {
1173 LOG(HeapSampleBeginEvent("NewSpace", description));
1174 // Lump all the string types together.
1175 int string_number = 0;
1176 int string_bytes = 0;
1177#define INCREMENT(type, size, name) \
1178 string_number += info[type].number(); \
1179 string_bytes += info[type].bytes();
1180 STRING_TYPE_LIST(INCREMENT)
1181#undef INCREMENT
1182 if (string_number > 0) {
1183 LOG(HeapSampleItemEvent("STRING_TYPE", string_number, string_bytes));
1184 }
1185
1186 // Then do the other types.
1187 for (int i = FIRST_NONSTRING_TYPE; i <= LAST_TYPE; ++i) {
1188 if (info[i].number() > 0) {
1189 LOG(HeapSampleItemEvent(info[i].name(), info[i].number(),
1190 info[i].bytes()));
1191 }
1192 }
1193 LOG(HeapSampleEndEvent("NewSpace", description));
1194}
1195#endif // ENABLE_LOGGING_AND_PROFILING
1196
1197
1198void NewSpace::ReportStatistics() {
1199#ifdef DEBUG
1200 if (FLAG_heap_stats) {
1201 float pct = static_cast<float>(Available()) / Capacity();
1202 PrintF(" capacity: %d, available: %d, %%%d\n",
1203 Capacity(), Available(), static_cast<int>(pct*100));
1204 PrintF("\n Object Histogram:\n");
1205 for (int i = 0; i <= LAST_TYPE; i++) {
1206 if (allocated_histogram_[i].number() > 0) {
1207 PrintF(" %-33s%10d (%10d bytes)\n",
1208 allocated_histogram_[i].name(),
1209 allocated_histogram_[i].number(),
1210 allocated_histogram_[i].bytes());
1211 }
1212 }
1213 PrintF("\n");
1214 }
1215#endif // DEBUG
1216
1217#ifdef ENABLE_LOGGING_AND_PROFILING
1218 if (FLAG_log_gc) {
1219 DoReportStatistics(allocated_histogram_, "allocated");
1220 DoReportStatistics(promoted_histogram_, "promoted");
1221 }
1222#endif // ENABLE_LOGGING_AND_PROFILING
1223}
1224
1225
1226void NewSpace::RecordAllocation(HeapObject* obj) {
1227 InstanceType type = obj->map()->instance_type();
1228 ASSERT(0 <= type && type <= LAST_TYPE);
1229 allocated_histogram_[type].increment_number(1);
1230 allocated_histogram_[type].increment_bytes(obj->Size());
1231}
1232
1233
1234void NewSpace::RecordPromotion(HeapObject* obj) {
1235 InstanceType type = obj->map()->instance_type();
1236 ASSERT(0 <= type && type <= LAST_TYPE);
1237 promoted_histogram_[type].increment_number(1);
1238 promoted_histogram_[type].increment_bytes(obj->Size());
1239}
1240#endif // defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
1241
1242
1243// -----------------------------------------------------------------------------
1244// Free lists for old object spaces implementation
1245
1246void FreeListNode::set_size(int size_in_bytes) {
1247 ASSERT(size_in_bytes > 0);
1248 ASSERT(IsAligned(size_in_bytes, kPointerSize));
1249
1250 // We write a map and possibly size information to the block. If the block
1251 // is big enough to be a ByteArray with at least one extra word (the next
1252 // pointer), we set its map to be the byte array map and its size to an
1253 // appropriate array length for the desired size from HeapObject::Size().
1254 // If the block is too small (eg, one or two words), to hold both a size
1255 // field and a next pointer, we give it a filler map that gives it the
1256 // correct size.
1257 if (size_in_bytes > Array::kHeaderSize) {
1258 set_map(Heap::byte_array_map());
1259 ByteArray::cast(this)->set_length(ByteArray::LengthFor(size_in_bytes));
1260 } else if (size_in_bytes == kPointerSize) {
1261 set_map(Heap::one_word_filler_map());
1262 } else if (size_in_bytes == 2 * kPointerSize) {
1263 set_map(Heap::two_word_filler_map());
1264 } else {
1265 UNREACHABLE();
1266 }
kasper.lund7276f142008-07-30 08:49:36 +00001267 ASSERT(Size() == size_in_bytes);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001268}
1269
1270
1271Address FreeListNode::next() {
1272 ASSERT(map() == Heap::byte_array_map());
kasper.lund7276f142008-07-30 08:49:36 +00001273 ASSERT(Size() >= kNextOffset + kPointerSize);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001274 return Memory::Address_at(address() + kNextOffset);
1275}
1276
1277
1278void FreeListNode::set_next(Address next) {
1279 ASSERT(map() == Heap::byte_array_map());
kasper.lund7276f142008-07-30 08:49:36 +00001280 ASSERT(Size() >= kNextOffset + kPointerSize);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001281 Memory::Address_at(address() + kNextOffset) = next;
1282}
1283
1284
1285OldSpaceFreeList::OldSpaceFreeList(AllocationSpace owner) : owner_(owner) {
1286 Reset();
1287}
1288
1289
1290void OldSpaceFreeList::Reset() {
1291 available_ = 0;
1292 for (int i = 0; i < kFreeListsLength; i++) {
1293 free_[i].head_node_ = NULL;
1294 }
1295 needs_rebuild_ = false;
1296 finger_ = kHead;
1297 free_[kHead].next_size_ = kEnd;
1298}
1299
1300
1301void OldSpaceFreeList::RebuildSizeList() {
1302 ASSERT(needs_rebuild_);
1303 int cur = kHead;
1304 for (int i = cur + 1; i < kFreeListsLength; i++) {
1305 if (free_[i].head_node_ != NULL) {
1306 free_[cur].next_size_ = i;
1307 cur = i;
1308 }
1309 }
1310 free_[cur].next_size_ = kEnd;
1311 needs_rebuild_ = false;
1312}
1313
1314
1315int OldSpaceFreeList::Free(Address start, int size_in_bytes) {
1316#ifdef DEBUG
1317 for (int i = 0; i < size_in_bytes; i += kPointerSize) {
1318 Memory::Address_at(start + i) = kZapValue;
1319 }
1320#endif
1321 FreeListNode* node = FreeListNode::FromAddress(start);
1322 node->set_size(size_in_bytes);
1323
1324 // Early return to drop too-small blocks on the floor (one or two word
1325 // blocks cannot hold a map pointer, a size field, and a pointer to the
1326 // next block in the free list).
1327 if (size_in_bytes < kMinBlockSize) {
1328 return size_in_bytes;
1329 }
1330
1331 // Insert other blocks at the head of an exact free list.
1332 int index = size_in_bytes >> kPointerSizeLog2;
1333 node->set_next(free_[index].head_node_);
1334 free_[index].head_node_ = node->address();
1335 available_ += size_in_bytes;
1336 needs_rebuild_ = true;
1337 return 0;
1338}
1339
1340
1341Object* OldSpaceFreeList::Allocate(int size_in_bytes, int* wasted_bytes) {
1342 ASSERT(0 < size_in_bytes);
1343 ASSERT(size_in_bytes <= kMaxBlockSize);
1344 ASSERT(IsAligned(size_in_bytes, kPointerSize));
1345
1346 if (needs_rebuild_) RebuildSizeList();
1347 int index = size_in_bytes >> kPointerSizeLog2;
1348 // Check for a perfect fit.
1349 if (free_[index].head_node_ != NULL) {
1350 FreeListNode* node = FreeListNode::FromAddress(free_[index].head_node_);
1351 // If this was the last block of its size, remove the size.
1352 if ((free_[index].head_node_ = node->next()) == NULL) RemoveSize(index);
1353 available_ -= size_in_bytes;
1354 *wasted_bytes = 0;
1355 return node;
1356 }
1357 // Search the size list for the best fit.
1358 int prev = finger_ < index ? finger_ : kHead;
1359 int cur = FindSize(index, &prev);
1360 ASSERT(index < cur);
1361 if (cur == kEnd) {
1362 // No large enough size in list.
1363 *wasted_bytes = 0;
1364 return Failure::RetryAfterGC(size_in_bytes, owner_);
1365 }
1366 int rem = cur - index;
1367 int rem_bytes = rem << kPointerSizeLog2;
1368 FreeListNode* cur_node = FreeListNode::FromAddress(free_[cur].head_node_);
kasper.lund7276f142008-07-30 08:49:36 +00001369 ASSERT(cur_node->Size() == (cur << kPointerSizeLog2));
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001370 FreeListNode* rem_node = FreeListNode::FromAddress(free_[cur].head_node_ +
1371 size_in_bytes);
1372 // Distinguish the cases prev < rem < cur and rem <= prev < cur
1373 // to avoid many redundant tests and calls to Insert/RemoveSize.
1374 if (prev < rem) {
1375 // Simple case: insert rem between prev and cur.
1376 finger_ = prev;
1377 free_[prev].next_size_ = rem;
1378 // If this was the last block of size cur, remove the size.
1379 if ((free_[cur].head_node_ = cur_node->next()) == NULL) {
1380 free_[rem].next_size_ = free_[cur].next_size_;
1381 } else {
1382 free_[rem].next_size_ = cur;
1383 }
1384 // Add the remainder block.
1385 rem_node->set_size(rem_bytes);
1386 rem_node->set_next(free_[rem].head_node_);
1387 free_[rem].head_node_ = rem_node->address();
1388 } else {
1389 // If this was the last block of size cur, remove the size.
1390 if ((free_[cur].head_node_ = cur_node->next()) == NULL) {
1391 finger_ = prev;
1392 free_[prev].next_size_ = free_[cur].next_size_;
1393 }
1394 if (rem_bytes < kMinBlockSize) {
1395 // Too-small remainder is wasted.
1396 rem_node->set_size(rem_bytes);
1397 available_ -= size_in_bytes + rem_bytes;
1398 *wasted_bytes = rem_bytes;
1399 return cur_node;
1400 }
1401 // Add the remainder block and, if needed, insert its size.
1402 rem_node->set_size(rem_bytes);
1403 rem_node->set_next(free_[rem].head_node_);
1404 free_[rem].head_node_ = rem_node->address();
1405 if (rem_node->next() == NULL) InsertSize(rem);
1406 }
1407 available_ -= size_in_bytes;
1408 *wasted_bytes = 0;
1409 return cur_node;
1410}
1411
1412
kasper.lund7276f142008-07-30 08:49:36 +00001413#ifdef DEBUG
1414bool OldSpaceFreeList::Contains(FreeListNode* node) {
1415 for (int i = 0; i < kFreeListsLength; i++) {
1416 Address cur_addr = free_[i].head_node_;
1417 while (cur_addr != NULL) {
1418 FreeListNode* cur_node = FreeListNode::FromAddress(cur_addr);
1419 if (cur_node == node) return true;
1420 cur_addr = cur_node->next();
1421 }
1422 }
1423 return false;
1424}
1425#endif
1426
1427
1428MapSpaceFreeList::MapSpaceFreeList(AllocationSpace owner) {
1429 owner_ = owner;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001430 Reset();
1431}
1432
1433
1434void MapSpaceFreeList::Reset() {
1435 available_ = 0;
1436 head_ = NULL;
1437}
1438
1439
1440void MapSpaceFreeList::Free(Address start) {
1441#ifdef DEBUG
1442 for (int i = 0; i < Map::kSize; i += kPointerSize) {
1443 Memory::Address_at(start + i) = kZapValue;
1444 }
1445#endif
1446 FreeListNode* node = FreeListNode::FromAddress(start);
1447 node->set_size(Map::kSize);
1448 node->set_next(head_);
1449 head_ = node->address();
1450 available_ += Map::kSize;
1451}
1452
1453
1454Object* MapSpaceFreeList::Allocate() {
1455 if (head_ == NULL) {
kasper.lund7276f142008-07-30 08:49:36 +00001456 return Failure::RetryAfterGC(Map::kSize, owner_);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001457 }
1458
1459 FreeListNode* node = FreeListNode::FromAddress(head_);
1460 head_ = node->next();
1461 available_ -= Map::kSize;
1462 return node;
1463}
1464
1465
1466// -----------------------------------------------------------------------------
1467// OldSpace implementation
1468
1469void OldSpace::PrepareForMarkCompact(bool will_compact) {
1470 if (will_compact) {
1471 // Reset relocation info. During a compacting collection, everything in
1472 // the space is considered 'available' and we will rediscover live data
1473 // and waste during the collection.
1474 MCResetRelocationInfo();
1475 mc_end_of_relocation_ = bottom();
1476 ASSERT(Available() == Capacity());
1477 } else {
1478 // During a non-compacting collection, everything below the linear
1479 // allocation pointer is considered allocated (everything above is
1480 // available) and we will rediscover available and wasted bytes during
1481 // the collection.
1482 accounting_stats_.AllocateBytes(free_list_.available());
1483 accounting_stats_.FillWastedBytes(Waste());
1484 }
1485
kasper.lund7276f142008-07-30 08:49:36 +00001486 // Clear the free list before a full GC---it will be rebuilt afterward.
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001487 free_list_.Reset();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001488}
1489
1490
1491void OldSpace::MCAdjustRelocationEnd(Address address, int size_in_bytes) {
1492 ASSERT(Contains(address));
1493 Address current_top = mc_end_of_relocation_;
1494 Page* current_page = Page::FromAllocationTop(current_top);
1495
1496 // No more objects relocated to this page? Move to the next.
1497 ASSERT(current_top <= current_page->mc_relocation_top);
1498 if (current_top == current_page->mc_relocation_top) {
1499 // The space should already be properly expanded.
1500 Page* next_page = current_page->next_page();
1501 CHECK(next_page->is_valid());
1502 mc_end_of_relocation_ = next_page->ObjectAreaStart();
1503 }
1504 ASSERT(mc_end_of_relocation_ == address);
1505 mc_end_of_relocation_ += size_in_bytes;
1506}
1507
1508
1509void OldSpace::MCCommitRelocationInfo() {
1510 // Update fast allocation info.
1511 allocation_info_.top = mc_forwarding_info_.top;
1512 allocation_info_.limit = mc_forwarding_info_.limit;
kasper.lund7276f142008-07-30 08:49:36 +00001513 ASSERT(allocation_info_.VerifyPagedAllocation());
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001514
1515 // The space is compacted and we haven't yet built free lists or
1516 // wasted any space.
1517 ASSERT(Waste() == 0);
1518 ASSERT(AvailableFree() == 0);
1519
1520 // Build the free list for the space.
1521 int computed_size = 0;
1522 PageIterator it(this, PageIterator::PAGES_USED_BY_MC);
1523 while (it.has_next()) {
1524 Page* p = it.next();
1525 // Space below the relocation pointer is allocated.
1526 computed_size += p->mc_relocation_top - p->ObjectAreaStart();
1527 if (it.has_next()) {
1528 // Free the space at the top of the page. We cannot use
1529 // p->mc_relocation_top after the call to Free (because Free will clear
1530 // remembered set bits).
1531 int extra_size = p->ObjectAreaEnd() - p->mc_relocation_top;
1532 if (extra_size > 0) {
1533 int wasted_bytes = free_list_.Free(p->mc_relocation_top, extra_size);
1534 // The bytes we have just "freed" to add to the free list were
1535 // already accounted as available.
1536 accounting_stats_.WasteBytes(wasted_bytes);
1537 }
1538 }
1539 }
1540
1541 // Make sure the computed size - based on the used portion of the pages in
1542 // use - matches the size obtained while computing forwarding addresses.
1543 ASSERT(computed_size == Size());
1544}
1545
1546
kasper.lund7276f142008-07-30 08:49:36 +00001547// Slow case for normal allocation. Try in order: (1) allocate in the next
1548// page in the space, (2) allocate off the space's free list, (3) expand the
1549// space, (4) fail.
1550HeapObject* OldSpace::SlowAllocateRaw(int size_in_bytes) {
1551 // Linear allocation in this space has failed. If there is another page
1552 // in the space, move to that page and allocate there. This allocation
1553 // should succeed (size_in_bytes should not be greater than a page's
1554 // object area size).
1555 Page* current_page = TopPageOf(allocation_info_);
1556 if (current_page->next_page()->is_valid()) {
1557 return AllocateInNextPage(current_page, size_in_bytes);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001558 }
kasper.lund7276f142008-07-30 08:49:36 +00001559
1560 // There is no next page in this space. Try free list allocation.
1561 int wasted_bytes;
1562 Object* result = free_list_.Allocate(size_in_bytes, &wasted_bytes);
1563 accounting_stats_.WasteBytes(wasted_bytes);
1564 if (!result->IsFailure()) {
1565 accounting_stats_.AllocateBytes(size_in_bytes);
1566 return HeapObject::cast(result);
1567 }
1568
kasperl@chromium.org9bbf9682008-10-30 11:53:07 +00001569 // Free list allocation failed and there is no next page. Fail if we have
1570 // hit the old generation size limit that should cause a garbage
1571 // collection.
1572 if (!Heap::always_allocate() && Heap::OldGenerationAllocationLimitReached()) {
1573 return NULL;
1574 }
1575
1576 // Try to expand the space and allocate in the new next page.
kasper.lund7276f142008-07-30 08:49:36 +00001577 ASSERT(!current_page->next_page()->is_valid());
1578 if (Expand(current_page)) {
1579 return AllocateInNextPage(current_page, size_in_bytes);
1580 }
1581
1582 // Finally, fail.
1583 return NULL;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001584}
1585
1586
kasper.lund7276f142008-07-30 08:49:36 +00001587// Add the block at the top of the page to the space's free list, set the
1588// allocation info to the next page (assumed to be one), and allocate
1589// linearly there.
1590HeapObject* OldSpace::AllocateInNextPage(Page* current_page,
1591 int size_in_bytes) {
1592 ASSERT(current_page->next_page()->is_valid());
1593 // Add the block at the top of this page to the free list.
1594 int free_size = current_page->ObjectAreaEnd() - allocation_info_.top;
1595 if (free_size > 0) {
1596 int wasted_bytes = free_list_.Free(allocation_info_.top, free_size);
1597 accounting_stats_.WasteBytes(wasted_bytes);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001598 }
kasper.lund7276f142008-07-30 08:49:36 +00001599 SetAllocationInfo(&allocation_info_, current_page->next_page());
1600 return AllocateLinearly(&allocation_info_, size_in_bytes);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001601}
1602
1603
1604#ifdef DEBUG
1605// We do not assume that the PageIterator works, because it depends on the
1606// invariants we are checking during verification.
1607void OldSpace::Verify() {
1608 // The allocation pointer should be valid, and it should be in a page in the
1609 // space.
kasper.lund7276f142008-07-30 08:49:36 +00001610 ASSERT(allocation_info_.VerifyPagedAllocation());
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001611 Page* top_page = Page::FromAllocationTop(allocation_info_.top);
1612 ASSERT(MemoryAllocator::IsPageInSpace(top_page, this));
1613
1614 // Loop over all the pages.
1615 bool above_allocation_top = false;
1616 Page* current_page = first_page_;
1617 while (current_page->is_valid()) {
1618 if (above_allocation_top) {
1619 // We don't care what's above the allocation top.
1620 } else {
1621 // Unless this is the last page in the space containing allocated
1622 // objects, the allocation top should be at the object area end.
1623 Address top = current_page->AllocationTop();
1624 if (current_page == top_page) {
1625 ASSERT(top == allocation_info_.top);
1626 // The next page will be above the allocation top.
1627 above_allocation_top = true;
1628 } else {
1629 ASSERT(top == current_page->ObjectAreaEnd());
1630 }
1631
1632 // It should be packed with objects from the bottom to the top.
1633 Address current = current_page->ObjectAreaStart();
1634 while (current < top) {
1635 HeapObject* object = HeapObject::FromAddress(current);
1636
1637 // The first word should be a map, and we expect all map pointers to
1638 // be in map space.
1639 Map* map = object->map();
1640 ASSERT(map->IsMap());
1641 ASSERT(Heap::map_space()->Contains(map));
1642
1643 // The object should not be a map.
1644 ASSERT(!object->IsMap());
1645
1646 // The object itself should look OK.
mads.s.ager@gmail.com9a4089a2008-09-01 08:55:01 +00001647 object->Verify();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001648
1649 // All the interior pointers should be contained in the heap and have
1650 // their remembered set bits set if they point to new space. Code
1651 // objects do not have remembered set bits that we care about.
1652 VerifyPointersAndRSetVisitor rset_visitor;
1653 VerifyPointersVisitor no_rset_visitor;
1654 int size = object->Size();
1655 if (object->IsCode()) {
1656 Code::cast(object)->ConvertICTargetsFromAddressToObject();
1657 object->IterateBody(map->instance_type(), size, &no_rset_visitor);
1658 Code::cast(object)->ConvertICTargetsFromObjectToAddress();
1659 } else {
1660 object->IterateBody(map->instance_type(), size, &rset_visitor);
1661 }
1662
1663 current += size;
1664 }
1665
1666 // The allocation pointer should not be in the middle of an object.
1667 ASSERT(current == top);
1668 }
1669
1670 current_page = current_page->next_page();
1671 }
1672}
1673
1674
1675struct CommentStatistic {
1676 const char* comment;
1677 int size;
1678 int count;
1679 void Clear() {
1680 comment = NULL;
1681 size = 0;
1682 count = 0;
1683 }
1684};
1685
1686
1687// must be small, since an iteration is used for lookup
1688const int kMaxComments = 64;
1689static CommentStatistic comments_statistics[kMaxComments+1];
1690
1691
1692void PagedSpace::ReportCodeStatistics() {
1693 ReportCodeKindStatistics();
1694 PrintF("Code comment statistics (\" [ comment-txt : size/ "
1695 "count (average)\"):\n");
1696 for (int i = 0; i <= kMaxComments; i++) {
1697 const CommentStatistic& cs = comments_statistics[i];
1698 if (cs.size > 0) {
1699 PrintF(" %-30s: %10d/%6d (%d)\n", cs.comment, cs.size, cs.count,
1700 cs.size/cs.count);
1701 }
1702 }
1703 PrintF("\n");
1704}
1705
1706
1707void PagedSpace::ResetCodeStatistics() {
1708 ClearCodeKindStatistics();
1709 for (int i = 0; i < kMaxComments; i++) comments_statistics[i].Clear();
1710 comments_statistics[kMaxComments].comment = "Unknown";
1711 comments_statistics[kMaxComments].size = 0;
1712 comments_statistics[kMaxComments].count = 0;
1713}
1714
1715
1716// Adds comment to 'comment_statistics' table. Performance OK sa long as
1717// 'kMaxComments' is small
1718static void EnterComment(const char* comment, int delta) {
1719 // Do not count empty comments
1720 if (delta <= 0) return;
1721 CommentStatistic* cs = &comments_statistics[kMaxComments];
1722 // Search for a free or matching entry in 'comments_statistics': 'cs'
1723 // points to result.
1724 for (int i = 0; i < kMaxComments; i++) {
1725 if (comments_statistics[i].comment == NULL) {
1726 cs = &comments_statistics[i];
1727 cs->comment = comment;
1728 break;
1729 } else if (strcmp(comments_statistics[i].comment, comment) == 0) {
1730 cs = &comments_statistics[i];
1731 break;
1732 }
1733 }
1734 // Update entry for 'comment'
1735 cs->size += delta;
1736 cs->count += 1;
1737}
1738
1739
1740// Call for each nested comment start (start marked with '[ xxx', end marked
1741// with ']'. RelocIterator 'it' must point to a comment reloc info.
1742static void CollectCommentStatistics(RelocIterator* it) {
1743 ASSERT(!it->done());
ager@chromium.org236ad962008-09-25 09:45:57 +00001744 ASSERT(it->rinfo()->rmode() == RelocInfo::COMMENT);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001745 const char* tmp = reinterpret_cast<const char*>(it->rinfo()->data());
1746 if (tmp[0] != '[') {
1747 // Not a nested comment; skip
1748 return;
1749 }
1750
1751 // Search for end of nested comment or a new nested comment
1752 const char* const comment_txt =
1753 reinterpret_cast<const char*>(it->rinfo()->data());
1754 const byte* prev_pc = it->rinfo()->pc();
1755 int flat_delta = 0;
1756 it->next();
1757 while (true) {
1758 // All nested comments must be terminated properly, and therefore exit
1759 // from loop.
1760 ASSERT(!it->done());
ager@chromium.org236ad962008-09-25 09:45:57 +00001761 if (it->rinfo()->rmode() == RelocInfo::COMMENT) {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001762 const char* const txt =
1763 reinterpret_cast<const char*>(it->rinfo()->data());
1764 flat_delta += it->rinfo()->pc() - prev_pc;
1765 if (txt[0] == ']') break; // End of nested comment
1766 // A new comment
1767 CollectCommentStatistics(it);
1768 // Skip code that was covered with previous comment
1769 prev_pc = it->rinfo()->pc();
1770 }
1771 it->next();
1772 }
1773 EnterComment(comment_txt, flat_delta);
1774}
1775
1776
1777// Collects code size statistics:
1778// - by code kind
1779// - by code comment
1780void PagedSpace::CollectCodeStatistics() {
1781 HeapObjectIterator obj_it(this);
1782 while (obj_it.has_next()) {
1783 HeapObject* obj = obj_it.next();
1784 if (obj->IsCode()) {
1785 Code* code = Code::cast(obj);
1786 code_kind_statistics[code->kind()] += code->Size();
1787 RelocIterator it(code);
1788 int delta = 0;
1789 const byte* prev_pc = code->instruction_start();
1790 while (!it.done()) {
ager@chromium.org236ad962008-09-25 09:45:57 +00001791 if (it.rinfo()->rmode() == RelocInfo::COMMENT) {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001792 delta += it.rinfo()->pc() - prev_pc;
1793 CollectCommentStatistics(&it);
1794 prev_pc = it.rinfo()->pc();
1795 }
1796 it.next();
1797 }
1798
1799 ASSERT(code->instruction_start() <= prev_pc &&
1800 prev_pc <= code->relocation_start());
1801 delta += code->relocation_start() - prev_pc;
1802 EnterComment("NoComment", delta);
1803 }
1804 }
1805}
1806
1807
1808void OldSpace::ReportStatistics() {
1809 int pct = Available() * 100 / Capacity();
1810 PrintF(" capacity: %d, waste: %d, available: %d, %%%d\n",
1811 Capacity(), Waste(), Available(), pct);
1812
1813 // Report remembered set statistics.
1814 int rset_marked_pointers = 0;
1815 int rset_marked_arrays = 0;
1816 int rset_marked_array_elements = 0;
1817 int cross_gen_pointers = 0;
1818 int cross_gen_array_elements = 0;
1819
1820 PageIterator page_it(this, PageIterator::PAGES_IN_USE);
1821 while (page_it.has_next()) {
1822 Page* p = page_it.next();
1823
1824 for (Address rset_addr = p->RSetStart();
1825 rset_addr < p->RSetEnd();
1826 rset_addr += kIntSize) {
1827 int rset = Memory::int_at(rset_addr);
1828 if (rset != 0) {
1829 // Bits were set
1830 int intoff = rset_addr - p->address();
1831 int bitoff = 0;
1832 for (; bitoff < kBitsPerInt; ++bitoff) {
1833 if ((rset & (1 << bitoff)) != 0) {
1834 int bitpos = intoff*kBitsPerByte + bitoff;
1835 Address slot = p->OffsetToAddress(bitpos << kObjectAlignmentBits);
1836 Object** obj = reinterpret_cast<Object**>(slot);
1837 if (*obj == Heap::fixed_array_map()) {
1838 rset_marked_arrays++;
1839 FixedArray* fa = FixedArray::cast(HeapObject::FromAddress(slot));
1840
1841 rset_marked_array_elements += fa->length();
1842 // Manually inline FixedArray::IterateBody
1843 Address elm_start = slot + FixedArray::kHeaderSize;
1844 Address elm_stop = elm_start + fa->length() * kPointerSize;
1845 for (Address elm_addr = elm_start;
1846 elm_addr < elm_stop; elm_addr += kPointerSize) {
1847 // Filter non-heap-object pointers
1848 Object** elm_p = reinterpret_cast<Object**>(elm_addr);
1849 if (Heap::InNewSpace(*elm_p))
1850 cross_gen_array_elements++;
1851 }
1852 } else {
1853 rset_marked_pointers++;
1854 if (Heap::InNewSpace(*obj))
1855 cross_gen_pointers++;
1856 }
1857 }
1858 }
1859 }
1860 }
1861 }
1862
1863 pct = rset_marked_pointers == 0 ?
1864 0 : cross_gen_pointers * 100 / rset_marked_pointers;
1865 PrintF(" rset-marked pointers %d, to-new-space %d (%%%d)\n",
1866 rset_marked_pointers, cross_gen_pointers, pct);
1867 PrintF(" rset_marked arrays %d, ", rset_marked_arrays);
1868 PrintF(" elements %d, ", rset_marked_array_elements);
1869 pct = rset_marked_array_elements == 0 ? 0
1870 : cross_gen_array_elements * 100 / rset_marked_array_elements;
1871 PrintF(" pointers to new space %d (%%%d)\n", cross_gen_array_elements, pct);
1872 PrintF(" total rset-marked bits %d\n",
1873 (rset_marked_pointers + rset_marked_arrays));
1874 pct = (rset_marked_pointers + rset_marked_array_elements) == 0 ? 0
1875 : (cross_gen_pointers + cross_gen_array_elements) * 100 /
1876 (rset_marked_pointers + rset_marked_array_elements);
1877 PrintF(" total rset pointers %d, true cross generation ones %d (%%%d)\n",
1878 (rset_marked_pointers + rset_marked_array_elements),
1879 (cross_gen_pointers + cross_gen_array_elements),
1880 pct);
1881
1882 ClearHistograms();
1883 HeapObjectIterator obj_it(this);
1884 while (obj_it.has_next()) { CollectHistogramInfo(obj_it.next()); }
1885 ReportHistogram(true);
1886}
1887
1888
1889// Dump the range of remembered set words between [start, end) corresponding
1890// to the pointers starting at object_p. The allocation_top is an object
1891// pointer which should not be read past. This is important for large object
1892// pages, where some bits in the remembered set range do not correspond to
1893// allocated addresses.
1894static void PrintRSetRange(Address start, Address end, Object** object_p,
1895 Address allocation_top) {
1896 Address rset_address = start;
1897
1898 // If the range starts on on odd numbered word (eg, for large object extra
1899 // remembered set ranges), print some spaces.
ager@chromium.org9085a012009-05-11 19:22:57 +00001900 if ((reinterpret_cast<uintptr_t>(start) / kIntSize) % 2 == 1) {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001901 PrintF(" ");
1902 }
1903
1904 // Loop over all the words in the range.
1905 while (rset_address < end) {
1906 uint32_t rset_word = Memory::uint32_at(rset_address);
1907 int bit_position = 0;
1908
1909 // Loop over all the bits in the word.
1910 while (bit_position < kBitsPerInt) {
1911 if (object_p == reinterpret_cast<Object**>(allocation_top)) {
1912 // Print a bar at the allocation pointer.
1913 PrintF("|");
1914 } else if (object_p > reinterpret_cast<Object**>(allocation_top)) {
1915 // Do not dereference object_p past the allocation pointer.
1916 PrintF("#");
1917 } else if ((rset_word & (1 << bit_position)) == 0) {
1918 // Print a dot for zero bits.
1919 PrintF(".");
1920 } else if (Heap::InNewSpace(*object_p)) {
1921 // Print an X for one bits for pointers to new space.
1922 PrintF("X");
1923 } else {
1924 // Print a circle for one bits for pointers to old space.
1925 PrintF("o");
1926 }
1927
1928 // Print a space after every 8th bit except the last.
1929 if (bit_position % 8 == 7 && bit_position != (kBitsPerInt - 1)) {
1930 PrintF(" ");
1931 }
1932
1933 // Advance to next bit.
1934 bit_position++;
1935 object_p++;
1936 }
1937
1938 // Print a newline after every odd numbered word, otherwise a space.
ager@chromium.org9085a012009-05-11 19:22:57 +00001939 if ((reinterpret_cast<uintptr_t>(rset_address) / kIntSize) % 2 == 1) {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001940 PrintF("\n");
1941 } else {
1942 PrintF(" ");
1943 }
1944
1945 // Advance to next remembered set word.
1946 rset_address += kIntSize;
1947 }
1948}
1949
1950
1951void PagedSpace::DoPrintRSet(const char* space_name) {
1952 PageIterator it(this, PageIterator::PAGES_IN_USE);
1953 while (it.has_next()) {
1954 Page* p = it.next();
1955 PrintF("%s page 0x%x:\n", space_name, p);
1956 PrintRSetRange(p->RSetStart(), p->RSetEnd(),
1957 reinterpret_cast<Object**>(p->ObjectAreaStart()),
1958 p->AllocationTop());
1959 PrintF("\n");
1960 }
1961}
1962
1963
1964void OldSpace::PrintRSet() { DoPrintRSet("old"); }
1965#endif
1966
1967// -----------------------------------------------------------------------------
1968// MapSpace implementation
1969
1970void MapSpace::PrepareForMarkCompact(bool will_compact) {
1971 if (will_compact) {
1972 // Reset relocation info.
1973 MCResetRelocationInfo();
1974
1975 // Initialize map index entry.
1976 int page_count = 0;
1977 PageIterator it(this, PageIterator::ALL_PAGES);
1978 while (it.has_next()) {
1979 ASSERT_MAP_PAGE_INDEX(page_count);
1980
1981 Page* p = it.next();
1982 ASSERT(p->mc_page_index == page_count);
1983
1984 page_addresses_[page_count++] = p->address();
1985 }
1986
1987 // During a compacting collection, everything in the space is considered
1988 // 'available' (set by the call to MCResetRelocationInfo) and we will
1989 // rediscover live and wasted bytes during the collection.
1990 ASSERT(Available() == Capacity());
1991 } else {
1992 // During a non-compacting collection, everything below the linear
1993 // allocation pointer except wasted top-of-page blocks is considered
1994 // allocated and we will rediscover available bytes during the
1995 // collection.
1996 accounting_stats_.AllocateBytes(free_list_.available());
1997 }
1998
kasper.lund7276f142008-07-30 08:49:36 +00001999 // Clear the free list before a full GC---it will be rebuilt afterward.
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002000 free_list_.Reset();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002001}
2002
2003
2004void MapSpace::MCCommitRelocationInfo() {
2005 // Update fast allocation info.
2006 allocation_info_.top = mc_forwarding_info_.top;
2007 allocation_info_.limit = mc_forwarding_info_.limit;
kasper.lund7276f142008-07-30 08:49:36 +00002008 ASSERT(allocation_info_.VerifyPagedAllocation());
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002009
2010 // The space is compacted and we haven't yet wasted any space.
2011 ASSERT(Waste() == 0);
2012
2013 // Update allocation_top of each page in use and compute waste.
2014 int computed_size = 0;
2015 PageIterator it(this, PageIterator::PAGES_USED_BY_MC);
2016 while (it.has_next()) {
2017 Page* page = it.next();
2018 Address page_top = page->AllocationTop();
2019 computed_size += page_top - page->ObjectAreaStart();
2020 if (it.has_next()) {
2021 accounting_stats_.WasteBytes(page->ObjectAreaEnd() - page_top);
2022 }
2023 }
2024
2025 // Make sure the computed size - based on the used portion of the
2026 // pages in use - matches the size we adjust during allocation.
2027 ASSERT(computed_size == Size());
2028}
2029
2030
kasper.lund7276f142008-07-30 08:49:36 +00002031// Slow case for normal allocation. Try in order: (1) allocate in the next
2032// page in the space, (2) allocate off the space's free list, (3) expand the
2033// space, (4) fail.
2034HeapObject* MapSpace::SlowAllocateRaw(int size_in_bytes) {
2035 // Linear allocation in this space has failed. If there is another page
2036 // in the space, move to that page and allocate there. This allocation
2037 // should succeed.
2038 Page* current_page = TopPageOf(allocation_info_);
2039 if (current_page->next_page()->is_valid()) {
2040 return AllocateInNextPage(current_page, size_in_bytes);
2041 }
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002042
kasper.lund7276f142008-07-30 08:49:36 +00002043 // There is no next page in this space. Try free list allocation. The
2044 // map space free list implicitly assumes that all free blocks are map
2045 // sized.
2046 if (size_in_bytes == Map::kSize) {
2047 Object* result = free_list_.Allocate();
2048 if (!result->IsFailure()) {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002049 accounting_stats_.AllocateBytes(size_in_bytes);
kasper.lund7276f142008-07-30 08:49:36 +00002050 return HeapObject::cast(result);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002051 }
2052 }
kasper.lund7276f142008-07-30 08:49:36 +00002053
kasperl@chromium.org9bbf9682008-10-30 11:53:07 +00002054 // Free list allocation failed and there is no next page. Fail if we have
2055 // hit the old generation size limit that should cause a garbage
2056 // collection.
2057 if (!Heap::always_allocate() && Heap::OldGenerationAllocationLimitReached()) {
2058 return NULL;
2059 }
2060
2061 // Try to expand the space and allocate in the new next page.
kasper.lund7276f142008-07-30 08:49:36 +00002062 ASSERT(!current_page->next_page()->is_valid());
2063 if (Expand(current_page)) {
2064 return AllocateInNextPage(current_page, size_in_bytes);
2065 }
2066
2067 // Finally, fail.
2068 return NULL;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002069}
2070
2071
kasper.lund7276f142008-07-30 08:49:36 +00002072// Move to the next page (there is assumed to be one) and allocate there.
2073// The top of page block is always wasted, because it is too small to hold a
2074// map.
2075HeapObject* MapSpace::AllocateInNextPage(Page* current_page,
2076 int size_in_bytes) {
2077 ASSERT(current_page->next_page()->is_valid());
2078 ASSERT(current_page->ObjectAreaEnd() - allocation_info_.top == kPageExtra);
2079 accounting_stats_.WasteBytes(kPageExtra);
2080 SetAllocationInfo(&allocation_info_, current_page->next_page());
2081 return AllocateLinearly(&allocation_info_, size_in_bytes);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002082}
2083
2084
2085#ifdef DEBUG
2086// We do not assume that the PageIterator works, because it depends on the
2087// invariants we are checking during verification.
2088void MapSpace::Verify() {
2089 // The allocation pointer should be valid, and it should be in a page in the
2090 // space.
kasper.lund7276f142008-07-30 08:49:36 +00002091 ASSERT(allocation_info_.VerifyPagedAllocation());
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002092 Page* top_page = Page::FromAllocationTop(allocation_info_.top);
2093 ASSERT(MemoryAllocator::IsPageInSpace(top_page, this));
2094
2095 // Loop over all the pages.
2096 bool above_allocation_top = false;
2097 Page* current_page = first_page_;
2098 while (current_page->is_valid()) {
2099 if (above_allocation_top) {
2100 // We don't care what's above the allocation top.
2101 } else {
2102 // Unless this is the last page in the space containing allocated
2103 // objects, the allocation top should be at a constant offset from the
2104 // object area end.
2105 Address top = current_page->AllocationTop();
2106 if (current_page == top_page) {
2107 ASSERT(top == allocation_info_.top);
2108 // The next page will be above the allocation top.
2109 above_allocation_top = true;
2110 } else {
2111 ASSERT(top == current_page->ObjectAreaEnd() - kPageExtra);
2112 }
2113
2114 // It should be packed with objects from the bottom to the top.
2115 Address current = current_page->ObjectAreaStart();
2116 while (current < top) {
2117 HeapObject* object = HeapObject::FromAddress(current);
2118
2119 // The first word should be a map, and we expect all map pointers to
2120 // be in map space.
2121 Map* map = object->map();
2122 ASSERT(map->IsMap());
2123 ASSERT(Heap::map_space()->Contains(map));
2124
2125 // The object should be a map or a byte array.
2126 ASSERT(object->IsMap() || object->IsByteArray());
2127
2128 // The object itself should look OK.
mads.s.ager@gmail.com9a4089a2008-09-01 08:55:01 +00002129 object->Verify();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002130
2131 // All the interior pointers should be contained in the heap and
2132 // have their remembered set bits set if they point to new space.
2133 VerifyPointersAndRSetVisitor visitor;
2134 int size = object->Size();
2135 object->IterateBody(map->instance_type(), size, &visitor);
2136
2137 current += size;
2138 }
2139
2140 // The allocation pointer should not be in the middle of an object.
2141 ASSERT(current == top);
2142 }
2143
2144 current_page = current_page->next_page();
2145 }
2146}
2147
2148
2149void MapSpace::ReportStatistics() {
2150 int pct = Available() * 100 / Capacity();
2151 PrintF(" capacity: %d, waste: %d, available: %d, %%%d\n",
2152 Capacity(), Waste(), Available(), pct);
2153
2154 // Report remembered set statistics.
2155 int rset_marked_pointers = 0;
2156 int cross_gen_pointers = 0;
2157
2158 PageIterator page_it(this, PageIterator::PAGES_IN_USE);
2159 while (page_it.has_next()) {
2160 Page* p = page_it.next();
2161
2162 for (Address rset_addr = p->RSetStart();
2163 rset_addr < p->RSetEnd();
2164 rset_addr += kIntSize) {
2165 int rset = Memory::int_at(rset_addr);
2166 if (rset != 0) {
2167 // Bits were set
2168 int intoff = rset_addr - p->address();
2169 int bitoff = 0;
2170 for (; bitoff < kBitsPerInt; ++bitoff) {
2171 if ((rset & (1 << bitoff)) != 0) {
2172 int bitpos = intoff*kBitsPerByte + bitoff;
2173 Address slot = p->OffsetToAddress(bitpos << kObjectAlignmentBits);
2174 Object** obj = reinterpret_cast<Object**>(slot);
2175 rset_marked_pointers++;
2176 if (Heap::InNewSpace(*obj))
2177 cross_gen_pointers++;
2178 }
2179 }
2180 }
2181 }
2182 }
2183
2184 pct = rset_marked_pointers == 0 ?
2185 0 : cross_gen_pointers * 100 / rset_marked_pointers;
2186 PrintF(" rset-marked pointers %d, to-new-space %d (%%%d)\n",
2187 rset_marked_pointers, cross_gen_pointers, pct);
2188
2189 ClearHistograms();
2190 HeapObjectIterator obj_it(this);
2191 while (obj_it.has_next()) { CollectHistogramInfo(obj_it.next()); }
2192 ReportHistogram(false);
2193}
2194
2195
2196void MapSpace::PrintRSet() { DoPrintRSet("map"); }
2197#endif
2198
2199
2200// -----------------------------------------------------------------------------
2201// LargeObjectIterator
2202
2203LargeObjectIterator::LargeObjectIterator(LargeObjectSpace* space) {
2204 current_ = space->first_chunk_;
2205 size_func_ = NULL;
2206}
2207
2208
2209LargeObjectIterator::LargeObjectIterator(LargeObjectSpace* space,
2210 HeapObjectCallback size_func) {
2211 current_ = space->first_chunk_;
2212 size_func_ = size_func;
2213}
2214
2215
2216HeapObject* LargeObjectIterator::next() {
2217 ASSERT(has_next());
2218 HeapObject* object = current_->GetObject();
2219 current_ = current_->next();
2220 return object;
2221}
2222
2223
2224// -----------------------------------------------------------------------------
2225// LargeObjectChunk
2226
2227LargeObjectChunk* LargeObjectChunk::New(int size_in_bytes,
kasper.lund7276f142008-07-30 08:49:36 +00002228 size_t* chunk_size,
ager@chromium.org9258b6b2008-09-11 09:11:10 +00002229 Executability executable) {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002230 size_t requested = ChunkSizeFor(size_in_bytes);
kasper.lund7276f142008-07-30 08:49:36 +00002231 void* mem = MemoryAllocator::AllocateRawMemory(requested,
2232 chunk_size,
2233 executable);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002234 if (mem == NULL) return NULL;
2235 LOG(NewEvent("LargeObjectChunk", mem, *chunk_size));
2236 if (*chunk_size < requested) {
2237 MemoryAllocator::FreeRawMemory(mem, *chunk_size);
2238 LOG(DeleteEvent("LargeObjectChunk", mem));
2239 return NULL;
2240 }
2241 return reinterpret_cast<LargeObjectChunk*>(mem);
2242}
2243
2244
2245int LargeObjectChunk::ChunkSizeFor(int size_in_bytes) {
2246 int os_alignment = OS::AllocateAlignment();
2247 if (os_alignment < Page::kPageSize)
2248 size_in_bytes += (Page::kPageSize - os_alignment);
2249 return size_in_bytes + Page::kObjectStartOffset;
2250}
2251
2252// -----------------------------------------------------------------------------
2253// LargeObjectSpace
2254
ager@chromium.org9258b6b2008-09-11 09:11:10 +00002255LargeObjectSpace::LargeObjectSpace(AllocationSpace id)
2256 : Space(id, NOT_EXECUTABLE), // Managed on a per-allocation basis
kasper.lund7276f142008-07-30 08:49:36 +00002257 first_chunk_(NULL),
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002258 size_(0),
2259 page_count_(0) {}
2260
2261
2262bool LargeObjectSpace::Setup() {
2263 first_chunk_ = NULL;
2264 size_ = 0;
2265 page_count_ = 0;
2266 return true;
2267}
2268
2269
2270void LargeObjectSpace::TearDown() {
2271 while (first_chunk_ != NULL) {
2272 LargeObjectChunk* chunk = first_chunk_;
2273 first_chunk_ = first_chunk_->next();
2274 LOG(DeleteEvent("LargeObjectChunk", chunk->address()));
2275 MemoryAllocator::FreeRawMemory(chunk->address(), chunk->size());
2276 }
2277
2278 size_ = 0;
2279 page_count_ = 0;
2280}
2281
2282
kasperl@chromium.orgf5aa8372009-03-24 14:47:14 +00002283#ifdef ENABLE_HEAP_PROTECTION
2284
2285void LargeObjectSpace::Protect() {
2286 LargeObjectChunk* chunk = first_chunk_;
2287 while (chunk != NULL) {
2288 MemoryAllocator::Protect(chunk->address(), chunk->size());
2289 chunk = chunk->next();
2290 }
2291}
2292
2293
2294void LargeObjectSpace::Unprotect() {
2295 LargeObjectChunk* chunk = first_chunk_;
2296 while (chunk != NULL) {
2297 bool is_code = chunk->GetObject()->IsCode();
2298 MemoryAllocator::Unprotect(chunk->address(), chunk->size(),
2299 is_code ? EXECUTABLE : NOT_EXECUTABLE);
2300 chunk = chunk->next();
2301 }
2302}
2303
2304#endif
2305
2306
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002307Object* LargeObjectSpace::AllocateRawInternal(int requested_size,
ager@chromium.org9258b6b2008-09-11 09:11:10 +00002308 int object_size,
2309 Executability executable) {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002310 ASSERT(0 < object_size && object_size <= requested_size);
kasperl@chromium.org9bbf9682008-10-30 11:53:07 +00002311
2312 // Check if we want to force a GC before growing the old space further.
2313 // If so, fail the allocation.
2314 if (!Heap::always_allocate() && Heap::OldGenerationAllocationLimitReached()) {
2315 return Failure::RetryAfterGC(requested_size, identity());
2316 }
2317
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002318 size_t chunk_size;
2319 LargeObjectChunk* chunk =
ager@chromium.org9258b6b2008-09-11 09:11:10 +00002320 LargeObjectChunk::New(requested_size, &chunk_size, executable);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002321 if (chunk == NULL) {
kasper.lund7276f142008-07-30 08:49:36 +00002322 return Failure::RetryAfterGC(requested_size, identity());
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002323 }
2324
2325 size_ += chunk_size;
2326 page_count_++;
2327 chunk->set_next(first_chunk_);
2328 chunk->set_size(chunk_size);
2329 first_chunk_ = chunk;
2330
2331 // Set the object address and size in the page header and clear its
2332 // remembered set.
2333 Page* page = Page::FromAddress(RoundUp(chunk->address(), Page::kPageSize));
2334 Address object_address = page->ObjectAreaStart();
2335 // Clear the low order bit of the second word in the page to flag it as a
2336 // large object page. If the chunk_size happened to be written there, its
2337 // low order bit should already be clear.
2338 ASSERT((chunk_size & 0x1) == 0);
2339 page->is_normal_page &= ~0x1;
2340 page->ClearRSet();
2341 int extra_bytes = requested_size - object_size;
2342 if (extra_bytes > 0) {
2343 // The extra memory for the remembered set should be cleared.
2344 memset(object_address + object_size, 0, extra_bytes);
2345 }
2346
2347 return HeapObject::FromAddress(object_address);
2348}
2349
2350
ager@chromium.org9258b6b2008-09-11 09:11:10 +00002351Object* LargeObjectSpace::AllocateRawCode(int size_in_bytes) {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002352 ASSERT(0 < size_in_bytes);
ager@chromium.org9258b6b2008-09-11 09:11:10 +00002353 return AllocateRawInternal(size_in_bytes,
2354 size_in_bytes,
2355 EXECUTABLE);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002356}
2357
2358
2359Object* LargeObjectSpace::AllocateRawFixedArray(int size_in_bytes) {
ager@chromium.org9258b6b2008-09-11 09:11:10 +00002360 ASSERT(0 < size_in_bytes);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002361 int extra_rset_bytes = ExtraRSetBytesFor(size_in_bytes);
ager@chromium.org9258b6b2008-09-11 09:11:10 +00002362 return AllocateRawInternal(size_in_bytes + extra_rset_bytes,
2363 size_in_bytes,
2364 NOT_EXECUTABLE);
2365}
2366
2367
2368Object* LargeObjectSpace::AllocateRaw(int size_in_bytes) {
2369 ASSERT(0 < size_in_bytes);
2370 return AllocateRawInternal(size_in_bytes,
2371 size_in_bytes,
2372 NOT_EXECUTABLE);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002373}
2374
2375
2376// GC support
2377Object* LargeObjectSpace::FindObject(Address a) {
2378 for (LargeObjectChunk* chunk = first_chunk_;
2379 chunk != NULL;
2380 chunk = chunk->next()) {
2381 Address chunk_address = chunk->address();
2382 if (chunk_address <= a && a < chunk_address + chunk->size()) {
2383 return chunk->GetObject();
2384 }
2385 }
2386 return Failure::Exception();
2387}
2388
2389
2390void LargeObjectSpace::ClearRSet() {
2391 ASSERT(Page::is_rset_in_use());
2392
2393 LargeObjectIterator it(this);
2394 while (it.has_next()) {
2395 HeapObject* object = it.next();
2396 // We only have code, sequential strings, or fixed arrays in large
2397 // object space, and only fixed arrays need remembered set support.
2398 if (object->IsFixedArray()) {
2399 // Clear the normal remembered set region of the page;
2400 Page* page = Page::FromAddress(object->address());
2401 page->ClearRSet();
2402
2403 // Clear the extra remembered set.
2404 int size = object->Size();
2405 int extra_rset_bytes = ExtraRSetBytesFor(size);
2406 memset(object->address() + size, 0, extra_rset_bytes);
2407 }
2408 }
2409}
2410
2411
2412void LargeObjectSpace::IterateRSet(ObjectSlotCallback copy_object_func) {
2413 ASSERT(Page::is_rset_in_use());
2414
2415 LargeObjectIterator it(this);
2416 while (it.has_next()) {
2417 // We only have code, sequential strings, or fixed arrays in large
2418 // object space, and only fixed arrays can possibly contain pointers to
2419 // the young generation.
2420 HeapObject* object = it.next();
2421 if (object->IsFixedArray()) {
2422 // Iterate the normal page remembered set range.
2423 Page* page = Page::FromAddress(object->address());
2424 Address object_end = object->address() + object->Size();
2425 Heap::IterateRSetRange(page->ObjectAreaStart(),
2426 Min(page->ObjectAreaEnd(), object_end),
2427 page->RSetStart(),
2428 copy_object_func);
2429
2430 // Iterate the extra array elements.
2431 if (object_end > page->ObjectAreaEnd()) {
2432 Heap::IterateRSetRange(page->ObjectAreaEnd(), object_end,
2433 object_end, copy_object_func);
2434 }
2435 }
2436 }
2437}
2438
2439
2440void LargeObjectSpace::FreeUnmarkedObjects() {
2441 LargeObjectChunk* previous = NULL;
2442 LargeObjectChunk* current = first_chunk_;
2443 while (current != NULL) {
2444 HeapObject* object = current->GetObject();
kasper.lund7276f142008-07-30 08:49:36 +00002445 if (object->IsMarked()) {
2446 object->ClearMark();
2447 MarkCompactCollector::tracer()->decrement_marked_count();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002448 previous = current;
2449 current = current->next();
2450 } else {
2451 Address chunk_address = current->address();
2452 size_t chunk_size = current->size();
2453
2454 // Cut the chunk out from the chunk list.
2455 current = current->next();
2456 if (previous == NULL) {
2457 first_chunk_ = current;
2458 } else {
2459 previous->set_next(current);
2460 }
2461
2462 // Free the chunk.
2463 if (object->IsCode()) {
2464 LOG(CodeDeleteEvent(object->address()));
2465 }
2466 size_ -= chunk_size;
2467 page_count_--;
2468 MemoryAllocator::FreeRawMemory(chunk_address, chunk_size);
2469 LOG(DeleteEvent("LargeObjectChunk", chunk_address));
2470 }
2471 }
2472}
2473
2474
2475bool LargeObjectSpace::Contains(HeapObject* object) {
2476 Address address = object->address();
2477 Page* page = Page::FromAddress(address);
2478
2479 SLOW_ASSERT(!page->IsLargeObjectPage()
2480 || !FindObject(address)->IsFailure());
2481
2482 return page->IsLargeObjectPage();
2483}
2484
2485
2486#ifdef DEBUG
2487// We do not assume that the large object iterator works, because it depends
2488// on the invariants we are checking during verification.
2489void LargeObjectSpace::Verify() {
2490 for (LargeObjectChunk* chunk = first_chunk_;
2491 chunk != NULL;
2492 chunk = chunk->next()) {
2493 // Each chunk contains an object that starts at the large object page's
2494 // object area start.
2495 HeapObject* object = chunk->GetObject();
2496 Page* page = Page::FromAddress(object->address());
2497 ASSERT(object->address() == page->ObjectAreaStart());
2498
2499 // The first word should be a map, and we expect all map pointers to be
2500 // in map space.
2501 Map* map = object->map();
2502 ASSERT(map->IsMap());
2503 ASSERT(Heap::map_space()->Contains(map));
2504
2505 // We have only code, sequential strings, fixed arrays, and byte arrays
2506 // in large object space.
2507 ASSERT(object->IsCode() || object->IsSeqString()
2508 || object->IsFixedArray() || object->IsByteArray());
2509
2510 // The object itself should look OK.
mads.s.ager@gmail.com9a4089a2008-09-01 08:55:01 +00002511 object->Verify();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002512
2513 // Byte arrays and strings don't have interior pointers.
2514 if (object->IsCode()) {
2515 VerifyPointersVisitor code_visitor;
2516 Code::cast(object)->ConvertICTargetsFromAddressToObject();
2517 object->IterateBody(map->instance_type(),
2518 object->Size(),
2519 &code_visitor);
2520 Code::cast(object)->ConvertICTargetsFromObjectToAddress();
2521 } else if (object->IsFixedArray()) {
2522 // We loop over fixed arrays ourselves, rather then using the visitor,
2523 // because the visitor doesn't support the start/offset iteration
2524 // needed for IsRSetSet.
2525 FixedArray* array = FixedArray::cast(object);
2526 for (int j = 0; j < array->length(); j++) {
2527 Object* element = array->get(j);
2528 if (element->IsHeapObject()) {
2529 HeapObject* element_object = HeapObject::cast(element);
2530 ASSERT(Heap::Contains(element_object));
2531 ASSERT(element_object->map()->IsMap());
2532 if (Heap::InNewSpace(element_object)) {
2533 ASSERT(Page::IsRSetSet(object->address(),
2534 FixedArray::kHeaderSize + j * kPointerSize));
2535 }
2536 }
2537 }
2538 }
2539 }
2540}
2541
2542
2543void LargeObjectSpace::Print() {
2544 LargeObjectIterator it(this);
2545 while (it.has_next()) {
2546 it.next()->Print();
2547 }
2548}
2549
2550
2551void LargeObjectSpace::ReportStatistics() {
2552 PrintF(" size: %d\n", size_);
2553 int num_objects = 0;
2554 ClearHistograms();
2555 LargeObjectIterator it(this);
2556 while (it.has_next()) {
2557 num_objects++;
2558 CollectHistogramInfo(it.next());
2559 }
2560
2561 PrintF(" number of objects %d\n", num_objects);
2562 if (num_objects > 0) ReportHistogram(false);
2563}
2564
2565
2566void LargeObjectSpace::CollectCodeStatistics() {
2567 LargeObjectIterator obj_it(this);
2568 while (obj_it.has_next()) {
2569 HeapObject* obj = obj_it.next();
2570 if (obj->IsCode()) {
2571 Code* code = Code::cast(obj);
2572 code_kind_statistics[code->kind()] += code->Size();
2573 }
2574 }
2575}
2576
2577
2578void LargeObjectSpace::PrintRSet() {
2579 LargeObjectIterator it(this);
2580 while (it.has_next()) {
2581 HeapObject* object = it.next();
2582 if (object->IsFixedArray()) {
2583 Page* page = Page::FromAddress(object->address());
2584
2585 Address allocation_top = object->address() + object->Size();
2586 PrintF("large page 0x%x:\n", page);
2587 PrintRSetRange(page->RSetStart(), page->RSetEnd(),
2588 reinterpret_cast<Object**>(object->address()),
2589 allocation_top);
2590 int extra_array_bytes = object->Size() - Page::kObjectAreaSize;
2591 int extra_rset_bits = RoundUp(extra_array_bytes / kPointerSize,
2592 kBitsPerInt);
2593 PrintF("------------------------------------------------------------"
2594 "-----------\n");
2595 PrintRSetRange(allocation_top,
2596 allocation_top + extra_rset_bits / kBitsPerByte,
2597 reinterpret_cast<Object**>(object->address()
2598 + Page::kObjectAreaSize),
2599 allocation_top);
2600 PrintF("\n");
2601 }
2602 }
2603}
2604#endif // DEBUG
2605
2606} } // namespace v8::internal