blob: 52f6391ee0e38682d26874e99e405c60683ce5d7 [file] [log] [blame]
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001// Copyright 2006-2008 Google Inc. All Rights Reserved.
2// Redistribution and use in source and binary forms, with or without
3// modification, are permitted provided that the following conditions are
4// met:
5//
6// * Redistributions of source code must retain the above copyright
7// notice, this list of conditions and the following disclaimer.
8// * Redistributions in binary form must reproduce the above
9// copyright notice, this list of conditions and the following
10// disclaimer in the documentation and/or other materials provided
11// with the distribution.
12// * Neither the name of Google Inc. nor the names of its
13// contributors may be used to endorse or promote products derived
14// from this software without specific prior written permission.
15//
16// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
28#include "v8.h"
29
30#include "macro-assembler.h"
31#include "mark-compact.h"
32#include "platform.h"
33
34namespace v8 { namespace internal {
35
36#ifdef DEBUG
37DECLARE_bool(heap_stats);
38DEFINE_bool(collect_heap_spill_statistics, false,
39 "report heap spill statistics along with heap_stats "
40 "(requires heap_stats)");
41#endif
42
43#ifdef ENABLE_LOGGING_AND_PROFILING
44DECLARE_bool(log_gc);
45#endif
46
47// For paged spaces, top and limit should always be in the same page and top
48// should not be greater than limit.
49#define ASSERT_PAGED_ALLOCATION_INFO(info) \
50 ASSERT((Page::FromAllocationTop((info).top) == \
51 Page::FromAllocationTop((info).limit)) \
52 &&((info).top <= (info).limit))
53
54
55// For contiguous spaces, top should be in the space (or at the end) and limit
56// should be the end of the space.
57#define ASSERT_SEMISPACE_ALLOCATION_INFO(info, space) \
58 ASSERT((space)->low() <= (info).top \
59 && (info).top <= (space)->high() \
60 && (info).limit == (space)->high())
61
62// ----------------------------------------------------------------------------
63// SpaceIterator
64
65SpaceIterator::SpaceIterator() : current_space_(NEW_SPACE), iterator_(NULL) {
66 // SpaceIterator depends on AllocationSpace enumeration starts with NEW_SPACE.
67 ASSERT(NEW_SPACE == 0);
68}
69
70
71SpaceIterator::~SpaceIterator() {
72 // Delete active iterator if any.
73 if (iterator_ != NULL) delete iterator_;
74}
75
76
77bool SpaceIterator::has_next() {
78 // Iterate until no more spaces.
79 return current_space_ != LAST_SPACE;
80}
81
82
83ObjectIterator* SpaceIterator::next() {
84 if (iterator_ != NULL) {
85 delete iterator_;
86 iterator_ = NULL;
87 // Move to the next space
88 current_space_++;
89 if (current_space_ > LAST_SPACE) {
90 return NULL;
91 }
92 }
93
94 // Return iterator for the new current space.
95 return CreateIterator();
96}
97
98
99// Create an iterator for the space to iterate.
100ObjectIterator* SpaceIterator::CreateIterator() {
101 ASSERT(iterator_ == NULL);
102
103 switch (current_space_) {
104 case NEW_SPACE:
105 iterator_ = new SemiSpaceIterator(Heap::new_space());
106 break;
107 case OLD_SPACE:
108 iterator_ = new HeapObjectIterator(Heap::old_space());
109 break;
110 case CODE_SPACE:
111 iterator_ = new HeapObjectIterator(Heap::code_space());
112 break;
113 case MAP_SPACE:
114 iterator_ = new HeapObjectIterator(Heap::map_space());
115 break;
116 case LO_SPACE:
117 iterator_ = new LargeObjectIterator(Heap::lo_space());
118 break;
119 }
120
121 // Return the newly allocated iterator;
122 ASSERT(iterator_ != NULL);
123 return iterator_;
124}
125
126
127// ----------------------------------------------------------------------------
128// HeapObjectIterator
129
130HeapObjectIterator::HeapObjectIterator(PagedSpace* space) {
131 Initialize(space->bottom(), space->top(), NULL);
132}
133
134
135HeapObjectIterator::HeapObjectIterator(PagedSpace* space,
136 HeapObjectCallback size_func) {
137 Initialize(space->bottom(), space->top(), size_func);
138}
139
140
141HeapObjectIterator::HeapObjectIterator(PagedSpace* space, Address start) {
142 Initialize(start, space->top(), NULL);
143}
144
145
146HeapObjectIterator::HeapObjectIterator(PagedSpace* space, Address start,
147 HeapObjectCallback size_func) {
148 Initialize(start, space->top(), size_func);
149}
150
151
152void HeapObjectIterator::Initialize(Address cur, Address end,
153 HeapObjectCallback size_f) {
154 cur_addr_ = cur;
155 end_addr_ = end;
156 end_page_ = Page::FromAllocationTop(end);
157 size_func_ = size_f;
158 Page* p = Page::FromAllocationTop(cur_addr_);
159 cur_limit_ = (p == end_page_) ? end_addr_ : p->AllocationTop();
160
161#ifdef DEBUG
162 Verify();
163#endif
164}
165
166
167bool HeapObjectIterator::HasNextInNextPage() {
168 if (cur_addr_ == end_addr_) return false;
169
170 Page* cur_page = Page::FromAllocationTop(cur_addr_);
171 cur_page = cur_page->next_page();
172 ASSERT(cur_page->is_valid());
173
174 cur_addr_ = cur_page->ObjectAreaStart();
175 cur_limit_ = (cur_page == end_page_) ? end_addr_ : cur_page->AllocationTop();
176
177 ASSERT(cur_addr_ < cur_limit_);
178#ifdef DEBUG
179 Verify();
180#endif
181 return true;
182}
183
184
185#ifdef DEBUG
186void HeapObjectIterator::Verify() {
187 Page* p = Page::FromAllocationTop(cur_addr_);
188 ASSERT(p == Page::FromAllocationTop(cur_limit_));
189 ASSERT(p->Offset(cur_addr_) <= p->Offset(cur_limit_));
190}
191#endif
192
193
194// -----------------------------------------------------------------------------
195// PageIterator
196
197PageIterator::PageIterator(PagedSpace* space, Mode mode) {
198 cur_page_ = space->first_page_;
199 switch (mode) {
200 case PAGES_IN_USE:
201 stop_page_ = space->AllocationTopPage()->next_page();
202 break;
203 case PAGES_USED_BY_MC:
204 stop_page_ = space->MCRelocationTopPage()->next_page();
205 break;
206 case ALL_PAGES:
207 stop_page_ = Page::FromAddress(NULL);
208 break;
209 default:
210 UNREACHABLE();
211 }
212}
213
214
215// -----------------------------------------------------------------------------
216// Page
217
218#ifdef DEBUG
219Page::RSetState Page::rset_state_ = Page::IN_USE;
220#endif
221
222// -----------------------------------------------------------------------------
223// MemoryAllocator
224//
225int MemoryAllocator::capacity_ = 0;
226int MemoryAllocator::size_ = 0;
227
228VirtualMemory* MemoryAllocator::initial_chunk_ = NULL;
229
230// 270 is an estimate based on the static default heap size of a pair of 256K
231// semispaces and a 64M old generation.
232const int kEstimatedNumberOfChunks = 270;
233List<MemoryAllocator::ChunkInfo> MemoryAllocator::chunks_(
234 kEstimatedNumberOfChunks);
235List<int> MemoryAllocator::free_chunk_ids_(kEstimatedNumberOfChunks);
236int MemoryAllocator::max_nof_chunks_ = 0;
237int MemoryAllocator::top_ = 0;
238
239
240void MemoryAllocator::Push(int free_chunk_id) {
241 ASSERT(max_nof_chunks_ > 0);
242 ASSERT(top_ < max_nof_chunks_);
243 free_chunk_ids_[top_++] = free_chunk_id;
244}
245
246
247int MemoryAllocator::Pop() {
248 ASSERT(top_ > 0);
249 return free_chunk_ids_[--top_];
250}
251
252
253bool MemoryAllocator::Setup(int capacity) {
254 capacity_ = RoundUp(capacity, Page::kPageSize);
255
256 // Over-estimate the size of chunks_ array. It assumes the expansion of old
257 // space is always in the unit of a chunk (kChunkSize) except the last
258 // expansion.
259 //
260 // Due to alignment, allocated space might be one page less than required
261 // number (kPagesPerChunk) of pages for old spaces.
262 //
263 // Reserve two chunk ids for semispaces, one for map space and one for old
264 // space.
265 max_nof_chunks_ = (capacity_ / (kChunkSize - Page::kPageSize)) + 4;
266 if (max_nof_chunks_ > kMaxNofChunks) return false;
267
268 size_ = 0;
269 ChunkInfo info; // uninitialized element.
270 for (int i = max_nof_chunks_ - 1; i >= 0; i--) {
271 chunks_.Add(info);
272 free_chunk_ids_.Add(i);
273 }
274 top_ = max_nof_chunks_;
275 return true;
276}
277
278
279void MemoryAllocator::TearDown() {
280 for (int i = 0; i < max_nof_chunks_; i++) {
281 if (chunks_[i].address() != NULL) DeleteChunk(i);
282 }
283 chunks_.Clear();
284 free_chunk_ids_.Clear();
285
286 if (initial_chunk_ != NULL) {
287 LOG(DeleteEvent("InitialChunk", initial_chunk_->address()));
288 delete initial_chunk_;
289 initial_chunk_ = NULL;
290 }
291
292 ASSERT(top_ == max_nof_chunks_); // all chunks are free
293 top_ = 0;
294 capacity_ = 0;
295 size_ = 0;
296 max_nof_chunks_ = 0;
297}
298
299
300void* MemoryAllocator::AllocateRawMemory(const size_t requested,
301 size_t* allocated) {
302 if (size_ + static_cast<int>(requested) > capacity_) return NULL;
303
304 void* mem = OS::Allocate(requested, allocated);
305 int alloced = *allocated;
306 size_ += alloced;
307 Counters::memory_allocated.Increment(alloced);
308 return mem;
309}
310
311
312void MemoryAllocator::FreeRawMemory(void* mem, size_t length) {
313 OS::Free(mem, length);
314 Counters::memory_allocated.Decrement(length);
315 size_ -= length;
316 ASSERT(size_ >= 0);
317}
318
319
320void* MemoryAllocator::ReserveInitialChunk(const size_t requested) {
321 ASSERT(initial_chunk_ == NULL);
322
323 initial_chunk_ = new VirtualMemory(requested);
324 CHECK(initial_chunk_ != NULL);
325 if (!initial_chunk_->IsReserved()) {
326 delete initial_chunk_;
327 initial_chunk_ = NULL;
328 return NULL;
329 }
330
331 // We are sure that we have mapped a block of requested addresses.
332 ASSERT(initial_chunk_->size() == requested);
333 LOG(NewEvent("InitialChunk", initial_chunk_->address(), requested));
334 size_ += requested;
335 return initial_chunk_->address();
336}
337
338
339static int PagesInChunk(Address start, size_t size) {
340 // The first page starts on the first page-aligned address from start onward
341 // and the last page ends on the last page-aligned address before
342 // start+size. Page::kPageSize is a power of two so we can divide by
343 // shifting.
344 return (RoundDown(start + size, Page::kPageSize)
345 - RoundUp(start, Page::kPageSize)) >> Page::kPageSizeBits;
346}
347
348
349Page* MemoryAllocator::AllocatePages(int requested_pages, int* allocated_pages,
350 PagedSpace* owner) {
351 if (requested_pages <= 0) return Page::FromAddress(NULL);
352 size_t chunk_size = requested_pages * Page::kPageSize;
353
354 // There is not enough space to guarantee the desired number pages can be
355 // allocated.
356 if (size_ + static_cast<int>(chunk_size) > capacity_) {
357 // Request as many pages as we can.
358 chunk_size = capacity_ - size_;
359 requested_pages = chunk_size >> Page::kPageSizeBits;
360
361 if (requested_pages <= 0) return Page::FromAddress(NULL);
362 }
363
364 void* chunk = AllocateRawMemory(chunk_size, &chunk_size);
365 if (chunk == NULL) return Page::FromAddress(NULL);
366 LOG(NewEvent("PagedChunk", chunk, chunk_size));
367
368 *allocated_pages = PagesInChunk(static_cast<Address>(chunk), chunk_size);
369 if (*allocated_pages == 0) {
370 FreeRawMemory(chunk, chunk_size);
371 LOG(DeleteEvent("PagedChunk", chunk));
372 return Page::FromAddress(NULL);
373 }
374
375 int chunk_id = Pop();
376 chunks_[chunk_id].init(static_cast<Address>(chunk), chunk_size, owner);
377
378 return InitializePagesInChunk(chunk_id, *allocated_pages, owner);
379}
380
381
382Page* MemoryAllocator::CommitPages(Address start, size_t size,
383 PagedSpace* owner, int* num_pages) {
384 ASSERT(start != NULL);
385 *num_pages = PagesInChunk(start, size);
386 ASSERT(*num_pages > 0);
387 ASSERT(initial_chunk_ != NULL);
388 ASSERT(initial_chunk_->address() <= start);
389 ASSERT(start + size <= reinterpret_cast<Address>(initial_chunk_->address())
390 + initial_chunk_->size());
391
392 if (!initial_chunk_->Commit(start, size)) {
393 return Page::FromAddress(NULL);
394 }
395 Counters::memory_allocated.Increment(size);
396
397 // So long as we correctly overestimated the number of chunks we should not
398 // run out of chunk ids.
399 CHECK(!OutOfChunkIds());
400 int chunk_id = Pop();
401 chunks_[chunk_id].init(start, size, owner);
402 return InitializePagesInChunk(chunk_id, *num_pages, owner);
403}
404
405
406bool MemoryAllocator::CommitBlock(Address start, size_t size) {
407 ASSERT(start != NULL);
408 ASSERT(size > 0);
409 ASSERT(initial_chunk_ != NULL);
410 ASSERT(initial_chunk_->address() <= start);
411 ASSERT(start + size <= reinterpret_cast<Address>(initial_chunk_->address())
412 + initial_chunk_->size());
413
414 if (!initial_chunk_->Commit(start, size)) return false;
415 Counters::memory_allocated.Increment(size);
416 return true;
417}
418
419
420Page* MemoryAllocator::InitializePagesInChunk(int chunk_id, int pages_in_chunk,
421 PagedSpace* owner) {
422 ASSERT(IsValidChunk(chunk_id));
423 ASSERT(pages_in_chunk > 0);
424
425 Address chunk_start = chunks_[chunk_id].address();
426
427 Address low = RoundUp(chunk_start, Page::kPageSize);
428
429#ifdef DEBUG
430 size_t chunk_size = chunks_[chunk_id].size();
431 Address high = RoundDown(chunk_start + chunk_size, Page::kPageSize);
432 ASSERT(pages_in_chunk <=
433 ((OffsetFrom(high) - OffsetFrom(low)) / Page::kPageSize));
434#endif
435
436 Address page_addr = low;
437 for (int i = 0; i < pages_in_chunk; i++) {
438 Page* p = Page::FromAddress(page_addr);
439 p->opaque_header = OffsetFrom(page_addr + Page::kPageSize) | chunk_id;
440 p->is_normal_page = 1;
441 page_addr += Page::kPageSize;
442 }
443
444 // Set the next page of the last page to 0.
445 Page* last_page = Page::FromAddress(page_addr - Page::kPageSize);
446 last_page->opaque_header = OffsetFrom(0) | chunk_id;
447
448 return Page::FromAddress(low);
449}
450
451
452Page* MemoryAllocator::FreePages(Page* p) {
453 if (!p->is_valid()) return p;
454
455 // Find the first page in the same chunk as 'p'
456 Page* first_page = FindFirstPageInSameChunk(p);
457 Page* page_to_return = Page::FromAddress(NULL);
458
459 if (p != first_page) {
460 // Find the last page in the same chunk as 'prev'.
461 Page* last_page = FindLastPageInSameChunk(p);
462 first_page = GetNextPage(last_page); // first page in next chunk
463
464 // set the next_page of last_page to NULL
465 SetNextPage(last_page, Page::FromAddress(NULL));
466 page_to_return = p; // return 'p' when exiting
467 }
468
469 while (first_page->is_valid()) {
470 int chunk_id = GetChunkId(first_page);
471 ASSERT(IsValidChunk(chunk_id));
472
473 // Find the first page of the next chunk before deleting this chunk.
474 first_page = GetNextPage(FindLastPageInSameChunk(first_page));
475
476 // Free the current chunk.
477 DeleteChunk(chunk_id);
478 }
479
480 return page_to_return;
481}
482
483
484void MemoryAllocator::DeleteChunk(int chunk_id) {
485 ASSERT(IsValidChunk(chunk_id));
486
487 ChunkInfo& c = chunks_[chunk_id];
488
489 // We cannot free a chunk contained in the initial chunk because it was not
490 // allocated with AllocateRawMemory. Instead we uncommit the virtual
491 // memory.
492 bool in_initial_chunk = false;
493 if (initial_chunk_ != NULL) {
494 Address start = static_cast<Address>(initial_chunk_->address());
495 Address end = start + initial_chunk_->size();
496 in_initial_chunk = (start <= c.address()) && (c.address() < end);
497 }
498
499 if (in_initial_chunk) {
500 // TODO(1240712): VirtualMemory::Uncommit has a return value which
501 // is ignored here.
502 initial_chunk_->Uncommit(c.address(), c.size());
503 Counters::memory_allocated.Decrement(c.size());
504 } else {
505 LOG(DeleteEvent("PagedChunk", c.address()));
506 FreeRawMemory(c.address(), c.size());
507 }
508 c.init(NULL, 0, NULL);
509 Push(chunk_id);
510}
511
512
513Page* MemoryAllocator::FindFirstPageInSameChunk(Page* p) {
514 int chunk_id = GetChunkId(p);
515 ASSERT(IsValidChunk(chunk_id));
516
517 Address low = RoundUp(chunks_[chunk_id].address(), Page::kPageSize);
518 return Page::FromAddress(low);
519}
520
521
522Page* MemoryAllocator::FindLastPageInSameChunk(Page* p) {
523 int chunk_id = GetChunkId(p);
524 ASSERT(IsValidChunk(chunk_id));
525
526 Address chunk_start = chunks_[chunk_id].address();
527 size_t chunk_size = chunks_[chunk_id].size();
528
529 Address high = RoundDown(chunk_start + chunk_size, Page::kPageSize);
530 ASSERT(chunk_start <= p->address() && p->address() < high);
531
532 return Page::FromAddress(high - Page::kPageSize);
533}
534
535
536#ifdef DEBUG
537void MemoryAllocator::ReportStatistics() {
538 float pct = static_cast<float>(capacity_ - size_) / capacity_;
539 PrintF(" capacity: %d, used: %d, available: %%%d\n\n",
540 capacity_, size_, static_cast<int>(pct*100));
541}
542#endif
543
544
545// -----------------------------------------------------------------------------
546// PagedSpace implementation
547
548PagedSpace::PagedSpace(int max_capacity, AllocationSpace id) {
549 ASSERT(id == OLD_SPACE || id == CODE_SPACE || id == MAP_SPACE);
550 max_capacity_ = (RoundDown(max_capacity, Page::kPageSize) / Page::kPageSize)
551 * Page::kObjectAreaSize;
552 identity_ = id;
553 accounting_stats_.Clear();
554
555 allocation_mode_ = LINEAR;
556
557 allocation_info_.top = NULL;
558 allocation_info_.limit = NULL;
559
560 mc_forwarding_info_.top = NULL;
561 mc_forwarding_info_.limit = NULL;
562}
563
564
565bool PagedSpace::Setup(Address start, size_t size) {
566 if (HasBeenSetup()) return false;
567
568 int num_pages = 0;
569 // Try to use the virtual memory range passed to us. If it is too small to
570 // contain at least one page, ignore it and allocate instead.
571 if (PagesInChunk(start, size) > 0) {
572 first_page_ = MemoryAllocator::CommitPages(start, size, this, &num_pages);
573 } else {
574 int requested_pages = Min(MemoryAllocator::kPagesPerChunk,
575 max_capacity_ / Page::kObjectAreaSize);
576 first_page_ =
577 MemoryAllocator::AllocatePages(requested_pages, &num_pages, this);
578 if (!first_page_->is_valid()) return false;
579 }
580
581 // We are sure that the first page is valid and that we have at least one
582 // page.
583 ASSERT(first_page_->is_valid());
584 ASSERT(num_pages > 0);
585 accounting_stats_.ExpandSpace(num_pages * Page::kObjectAreaSize);
586 ASSERT(Capacity() <= max_capacity_);
587
588 for (Page* p = first_page_; p->is_valid(); p = p->next_page()) {
589 p->ClearRSet();
590 }
591
592 // Use first_page_ for allocation.
593 SetAllocationInfo(&allocation_info_, first_page_);
594
595 return true;
596}
597
598
599bool PagedSpace::HasBeenSetup() {
600 return (Capacity() > 0);
601}
602
603
604void PagedSpace::TearDown() {
605 first_page_ = MemoryAllocator::FreePages(first_page_);
606 ASSERT(!first_page_->is_valid());
607
608 accounting_stats_.Clear();
609}
610
611
612void PagedSpace::ClearRSet() {
613 PageIterator it(this, PageIterator::ALL_PAGES);
614 while (it.has_next()) {
615 it.next()->ClearRSet();
616 }
617}
618
619
620Object* PagedSpace::FindObject(Address addr) {
621#ifdef DEBUG
622 // Note: this function can only be called before or after mark-compact GC
623 // because it accesses map pointers.
624 ASSERT(!MarkCompactCollector::in_use());
625#endif
626
627 if (!Contains(addr)) return Failure::Exception();
628
629 Page* p = Page::FromAddress(addr);
630 Address cur = p->ObjectAreaStart();
631 Address end = p->AllocationTop();
632 while (cur < end) {
633 HeapObject* obj = HeapObject::FromAddress(cur);
634 Address next = cur + obj->Size();
635 if ((cur <= addr) && (addr < next)) return obj;
636 cur = next;
637 }
638
639 return Failure::Exception();
640}
641
642
643void PagedSpace::SetAllocationInfo(AllocationInfo* alloc_info, Page* p) {
644 alloc_info->top = p->ObjectAreaStart();
645 alloc_info->limit = p->ObjectAreaEnd();
646 ASSERT_PAGED_ALLOCATION_INFO(*alloc_info);
647}
648
649
650void PagedSpace::MCResetRelocationInfo() {
651 // Set page indexes.
652 int i = 0;
653 PageIterator it(this, PageIterator::ALL_PAGES);
654 while (it.has_next()) {
655 Page* p = it.next();
656 p->mc_page_index = i++;
657 }
658
659 // Set mc_forwarding_info_ to the first page in the space.
660 SetAllocationInfo(&mc_forwarding_info_, first_page_);
661 // All the bytes in the space are 'available'. We will rediscover
662 // allocated and wasted bytes during GC.
663 accounting_stats_.Reset();
664}
665
666
667void PagedSpace::SetLinearAllocationOnly(bool linear_only) {
668 if (linear_only) {
669 // Note that the free_list is not cleared. If we switch back to
670 // FREE_LIST mode it will be available for use. Resetting it
671 // requires correct accounting for the wasted bytes.
672 allocation_mode_ = LINEAR_ONLY;
673 } else {
674 ASSERT(allocation_mode_ == LINEAR_ONLY);
675 allocation_mode_ = LINEAR;
676 }
677}
678
679
680int PagedSpace::MCSpaceOffsetForAddress(Address addr) {
681#ifdef DEBUG
682 // The Contains function considers the address at the beginning of a
683 // page in the page, MCSpaceOffsetForAddress considers it is in the
684 // previous page.
685 if (Page::IsAlignedToPageSize(addr)) {
686 ASSERT(Contains(addr - kPointerSize));
687 } else {
688 ASSERT(Contains(addr));
689 }
690#endif
691
692 // If addr is at the end of a page, it belongs to previous page
693 Page* p = Page::IsAlignedToPageSize(addr)
694 ? Page::FromAllocationTop(addr)
695 : Page::FromAddress(addr);
696 int index = p->mc_page_index;
697 return (index * Page::kPageSize) + p->Offset(addr);
698}
699
700
701bool PagedSpace::Expand(Page* last_page) {
702 ASSERT(max_capacity_ % Page::kObjectAreaSize == 0);
703 ASSERT(Capacity() % Page::kObjectAreaSize == 0);
704
705 if (Capacity() == max_capacity_) return false;
706
707 ASSERT(Capacity() < max_capacity_);
708 // Last page must be valid and its next page is invalid.
709 ASSERT(last_page->is_valid() && !last_page->next_page()->is_valid());
710
711 int available_pages = (max_capacity_ - Capacity()) / Page::kObjectAreaSize;
712 if (available_pages <= 0) return false;
713
714 int desired_pages = Min(available_pages, MemoryAllocator::kPagesPerChunk);
715 Page* p = MemoryAllocator::AllocatePages(desired_pages, &desired_pages, this);
716 if (!p->is_valid()) return false;
717
718 accounting_stats_.ExpandSpace(desired_pages * Page::kObjectAreaSize);
719 ASSERT(Capacity() <= max_capacity_);
720
721 MemoryAllocator::SetNextPage(last_page, p);
722
723 // Clear remembered set of new pages.
724 while (p->is_valid()) {
725 p->ClearRSet();
726 p = p->next_page();
727 }
728
729 return true;
730}
731
732
733#ifdef DEBUG
734int PagedSpace::CountTotalPages() {
735 int count = 0;
736 for (Page* p = first_page_; p->is_valid(); p = p->next_page()) {
737 count++;
738 }
739 return count;
740}
741#endif
742
743
744void PagedSpace::Shrink() {
745 // Release half of free pages.
746 Page* top_page = AllocationTopPage();
747 ASSERT(top_page->is_valid());
748
749 // Loop over the pages from the top page to the end of the space to count
750 // the number of pages to keep and find the last page to keep.
751 int free_pages = 0;
752 int pages_to_keep = 0; // Of the free pages.
753 Page* last_page_to_keep = top_page;
754 Page* current_page = top_page->next_page();
755 // Loop over the pages to the end of the space.
756 while (current_page->is_valid()) {
757 // Keep every odd-numbered page, one page for every two in the space.
758 if ((free_pages & 0x1) == 1) {
759 pages_to_keep++;
760 last_page_to_keep = last_page_to_keep->next_page();
761 }
762 free_pages++;
763 current_page = current_page->next_page();
764 }
765
766 // Free pages after last_page_to_keep, and adjust the next_page link.
767 Page* p = MemoryAllocator::FreePages(last_page_to_keep->next_page());
768 MemoryAllocator::SetNextPage(last_page_to_keep, p);
769
770 // Since pages are only freed in whole chunks, we may have kept more than
771 // pages_to_keep.
772 while (p->is_valid()) {
773 pages_to_keep++;
774 p = p->next_page();
775 }
776
777 // The difference between free_pages and pages_to_keep is the number of
778 // pages actually freed.
779 ASSERT(pages_to_keep <= free_pages);
780 int bytes_freed = (free_pages - pages_to_keep) * Page::kObjectAreaSize;
781 accounting_stats_.ShrinkSpace(bytes_freed);
782
783 ASSERT(Capacity() == CountTotalPages() * Page::kObjectAreaSize);
784}
785
786
787bool PagedSpace::EnsureCapacity(int capacity) {
788 if (Capacity() >= capacity) return true;
789
790 // Start from the allocation top and loop to the last page in the space.
791 Page* last_page = AllocationTopPage();
792 Page* next_page = last_page->next_page();
793 while (next_page->is_valid()) {
794 last_page = MemoryAllocator::FindLastPageInSameChunk(next_page);
795 next_page = last_page->next_page();
796 }
797
798 // Expand the space until it has the required capacity or expansion fails.
799 do {
800 if (!Expand(last_page)) return false;
801 ASSERT(last_page->next_page()->is_valid());
802 last_page =
803 MemoryAllocator::FindLastPageInSameChunk(last_page->next_page());
804 } while (Capacity() < capacity);
805
806 return true;
807}
808
809
810#ifdef DEBUG
811void PagedSpace::Print() { }
812#endif
813
814
815// -----------------------------------------------------------------------------
816// NewSpace implementation
817
818NewSpace::NewSpace(int initial_semispace_capacity,
819 int maximum_semispace_capacity) {
820 ASSERT(initial_semispace_capacity <= maximum_semispace_capacity);
821 ASSERT(IsPowerOf2(maximum_semispace_capacity));
822 maximum_capacity_ = maximum_semispace_capacity;
823 capacity_ = initial_semispace_capacity;
824 to_space_ = new SemiSpace(capacity_, maximum_capacity_);
825 from_space_ = new SemiSpace(capacity_, maximum_capacity_);
826
827 // Allocate and setup the histogram arrays if necessary.
828#if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
829 allocated_histogram_ = NewArray<HistogramInfo>(LAST_TYPE + 1);
830 promoted_histogram_ = NewArray<HistogramInfo>(LAST_TYPE + 1);
831
832#define SET_NAME(name) allocated_histogram_[name].set_name(#name); \
833 promoted_histogram_[name].set_name(#name);
834 INSTANCE_TYPE_LIST(SET_NAME)
835#undef SET_NAME
836#endif
837}
838
839
840bool NewSpace::Setup(Address start, int size) {
841 ASSERT(size == 2 * maximum_capacity_);
842 ASSERT(IsAddressAligned(start, size, 0));
843
844 if (to_space_ == NULL
845 || !to_space_->Setup(start, maximum_capacity_)) {
846 return false;
847 }
848 if (from_space_ == NULL
849 || !from_space_->Setup(start + maximum_capacity_, maximum_capacity_)) {
850 return false;
851 }
852
853 start_ = start;
854 address_mask_ = ~(size - 1);
855 object_mask_ = address_mask_ | kHeapObjectTag;
856 object_expected_ = reinterpret_cast<uint32_t>(start) | kHeapObjectTag;
857
858 allocation_info_.top = to_space_->low();
859 allocation_info_.limit = to_space_->high();
860 mc_forwarding_info_.top = NULL;
861 mc_forwarding_info_.limit = NULL;
862
863 ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
864 return true;
865}
866
867
868void NewSpace::TearDown() {
869#if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
870 if (allocated_histogram_) {
871 DeleteArray(allocated_histogram_);
872 allocated_histogram_ = NULL;
873 }
874 if (promoted_histogram_) {
875 DeleteArray(promoted_histogram_);
876 promoted_histogram_ = NULL;
877 }
878#endif
879
880 start_ = NULL;
881 capacity_ = 0;
882 allocation_info_.top = NULL;
883 allocation_info_.limit = NULL;
884 mc_forwarding_info_.top = NULL;
885 mc_forwarding_info_.limit = NULL;
886
887 if (to_space_ != NULL) {
888 to_space_->TearDown();
889 delete to_space_;
890 to_space_ = NULL;
891 }
892
893 if (from_space_ != NULL) {
894 from_space_->TearDown();
895 delete from_space_;
896 from_space_ = NULL;
897 }
898}
899
900
901void NewSpace::Flip() {
902 SemiSpace* tmp = from_space_;
903 from_space_ = to_space_;
904 to_space_ = tmp;
905}
906
907
908bool NewSpace::Double() {
909 ASSERT(capacity_ <= maximum_capacity_ / 2);
910 // TODO(1240712): Failure to double the from space can result in
911 // semispaces of different sizes. In the event of that failure, the
912 // to space doubling should be rolled back before returning false.
913 if (!to_space_->Double() || !from_space_->Double()) return false;
914 capacity_ *= 2;
915 allocation_info_.limit = to_space_->high();
916 ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
917 return true;
918}
919
920
921void NewSpace::ResetAllocationInfo() {
922 allocation_info_.top = to_space_->low();
923 allocation_info_.limit = to_space_->high();
924 ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
925}
926
927
928void NewSpace::MCResetRelocationInfo() {
929 mc_forwarding_info_.top = from_space_->low();
930 mc_forwarding_info_.limit = from_space_->high();
931 ASSERT_SEMISPACE_ALLOCATION_INFO(mc_forwarding_info_, from_space_);
932}
933
934
935void NewSpace::MCCommitRelocationInfo() {
936 // Assumes that the spaces have been flipped so that mc_forwarding_info_ is
937 // valid allocation info for the to space.
938 allocation_info_.top = mc_forwarding_info_.top;
939 allocation_info_.limit = to_space_->high();
940 ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
941}
942
943
944#ifdef DEBUG
945// We do not use the SemispaceIterator because verification doesn't assume
946// that it works (it depends on the invariants we are checking).
947void NewSpace::Verify() {
948 // The allocation pointer should be in the space or at the very end.
949 ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
950
951 // There should be objects packed in from the low address up to the
952 // allocation pointer.
953 Address current = to_space_->low();
954 while (current < top()) {
955 HeapObject* object = HeapObject::FromAddress(current);
956
957 // The first word should be a map, and we expect all map pointers to
958 // be in map space.
959 Map* map = object->map();
960 ASSERT(map->IsMap());
961 ASSERT(Heap::map_space()->Contains(map));
962
963 // The object should not be code or a map.
964 ASSERT(!object->IsMap());
965 ASSERT(!object->IsCode());
966
967 // The object itself should look OK.
968 object->Verify();
969
970 // All the interior pointers should be contained in the heap.
971 VerifyPointersVisitor visitor;
972 int size = object->Size();
973 object->IterateBody(map->instance_type(), size, &visitor);
974
975 current += size;
976 }
977
978 // The allocation pointer should not be in the middle of an object.
979 ASSERT(current == top());
980}
981#endif
982
983
984// -----------------------------------------------------------------------------
985// SemiSpace implementation
986
987SemiSpace::SemiSpace(int initial_capacity, int maximum_capacity)
988 : capacity_(initial_capacity), maximum_capacity_(maximum_capacity),
989 start_(NULL), age_mark_(NULL) {
990}
991
992
993bool SemiSpace::Setup(Address start, int size) {
994 ASSERT(size == maximum_capacity_);
995 if (!MemoryAllocator::CommitBlock(start, capacity_)) return false;
996
997 start_ = start;
998 address_mask_ = ~(size - 1);
999 object_mask_ = address_mask_ | kHeapObjectTag;
1000 object_expected_ = reinterpret_cast<uint32_t>(start) | kHeapObjectTag;
1001
1002 age_mark_ = start_;
1003 return true;
1004}
1005
1006
1007void SemiSpace::TearDown() {
1008 start_ = NULL;
1009 capacity_ = 0;
1010}
1011
1012
1013bool SemiSpace::Double() {
1014 if (!MemoryAllocator::CommitBlock(high(), capacity_)) return false;
1015 capacity_ *= 2;
1016 return true;
1017}
1018
1019
1020#ifdef DEBUG
1021void SemiSpace::Print() { }
1022#endif
1023
1024
1025// -----------------------------------------------------------------------------
1026// SemiSpaceIterator implementation.
1027SemiSpaceIterator::SemiSpaceIterator(NewSpace* space) {
1028 Initialize(space, space->bottom(), space->top(), NULL);
1029}
1030
1031
1032SemiSpaceIterator::SemiSpaceIterator(NewSpace* space,
1033 HeapObjectCallback size_func) {
1034 Initialize(space, space->bottom(), space->top(), size_func);
1035}
1036
1037
1038SemiSpaceIterator::SemiSpaceIterator(NewSpace* space, Address start) {
1039 Initialize(space, start, space->top(), NULL);
1040}
1041
1042
1043void SemiSpaceIterator::Initialize(NewSpace* space, Address start,
1044 Address end,
1045 HeapObjectCallback size_func) {
1046 ASSERT(space->ToSpaceContains(start));
1047 ASSERT(space->ToSpaceLow() <= end
1048 && end <= space->ToSpaceHigh());
1049 space_ = space->to_space_;
1050 current_ = start;
1051 limit_ = end;
1052 size_func_ = size_func;
1053}
1054
1055
1056#ifdef DEBUG
1057// A static array of histogram info for each type.
1058static HistogramInfo heap_histograms[LAST_TYPE+1];
1059static JSObject::SpillInformation js_spill_information;
1060
1061// heap_histograms is shared, always clear it before using it.
1062static void ClearHistograms() {
1063 // We reset the name each time, though it hasn't changed.
1064#define DEF_TYPE_NAME(name) heap_histograms[name].set_name(#name);
1065 INSTANCE_TYPE_LIST(DEF_TYPE_NAME)
1066#undef DEF_TYPE_NAME
1067
1068#define CLEAR_HISTOGRAM(name) heap_histograms[name].clear();
1069 INSTANCE_TYPE_LIST(CLEAR_HISTOGRAM)
1070#undef CLEAR_HISTOGRAM
1071
1072 js_spill_information.Clear();
1073}
1074
1075
1076static int code_kind_statistics[Code::NUMBER_OF_KINDS];
1077
1078
1079static void ClearCodeKindStatistics() {
1080 for (int i = 0; i < Code::NUMBER_OF_KINDS; i++) {
1081 code_kind_statistics[i] = 0;
1082 }
1083}
1084
1085
1086static void ReportCodeKindStatistics() {
1087 const char* table[Code::NUMBER_OF_KINDS];
1088
1089#define CASE(name) \
1090 case Code::name: table[Code::name] = #name; \
1091 break
1092
1093 for (int i = 0; i < Code::NUMBER_OF_KINDS; i++) {
1094 switch (static_cast<Code::Kind>(i)) {
1095 CASE(FUNCTION);
1096 CASE(STUB);
1097 CASE(BUILTIN);
1098 CASE(LOAD_IC);
1099 CASE(KEYED_LOAD_IC);
1100 CASE(STORE_IC);
1101 CASE(KEYED_STORE_IC);
1102 CASE(CALL_IC);
1103 }
1104 }
1105
1106#undef CASE
1107
1108 PrintF("\n Code kind histograms: \n");
1109 for (int i = 0; i < Code::NUMBER_OF_KINDS; i++) {
1110 if (code_kind_statistics[i] > 0) {
1111 PrintF(" %-20s: %10d bytes\n", table[i], code_kind_statistics[i]);
1112 }
1113 }
1114 PrintF("\n");
1115}
1116
1117
1118static int CollectHistogramInfo(HeapObject* obj) {
1119 InstanceType type = obj->map()->instance_type();
1120 ASSERT(0 <= type && type <= LAST_TYPE);
1121 ASSERT(heap_histograms[type].name() != NULL);
1122 heap_histograms[type].increment_number(1);
1123 heap_histograms[type].increment_bytes(obj->Size());
1124
1125 if (FLAG_collect_heap_spill_statistics && obj->IsJSObject()) {
1126 JSObject::cast(obj)->IncrementSpillStatistics(&js_spill_information);
1127 }
1128
1129 return obj->Size();
1130}
1131
1132
1133static void ReportHistogram(bool print_spill) {
1134 PrintF("\n Object Histogram:\n");
1135 for (int i = 0; i <= LAST_TYPE; i++) {
1136 if (heap_histograms[i].number() > 0) {
1137 PrintF(" %-33s%10d (%10d bytes)\n",
1138 heap_histograms[i].name(),
1139 heap_histograms[i].number(),
1140 heap_histograms[i].bytes());
1141 }
1142 }
1143 PrintF("\n");
1144
1145 // Summarize string types.
1146 int string_number = 0;
1147 int string_bytes = 0;
1148#define INCREMENT(type, size, name) \
1149 string_number += heap_histograms[type].number(); \
1150 string_bytes += heap_histograms[type].bytes();
1151 STRING_TYPE_LIST(INCREMENT)
1152#undef INCREMENT
1153 if (string_number > 0) {
1154 PrintF(" %-33s%10d (%10d bytes)\n\n", "STRING_TYPE", string_number,
1155 string_bytes);
1156 }
1157
1158 if (FLAG_collect_heap_spill_statistics && print_spill) {
1159 js_spill_information.Print();
1160 }
1161}
1162#endif // DEBUG
1163
1164
1165// Support for statistics gathering for --heap-stats and --log-gc.
1166#if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
1167void NewSpace::ClearHistograms() {
1168 for (int i = 0; i <= LAST_TYPE; i++) {
1169 allocated_histogram_[i].clear();
1170 promoted_histogram_[i].clear();
1171 }
1172}
1173
1174// Because the copying collector does not touch garbage objects, we iterate
1175// the new space before a collection to get a histogram of allocated objects.
1176// This only happens (1) when compiled with DEBUG and the --heap-stats flag is
1177// set, or when compiled with ENABLE_LOGGING_AND_PROFILING and the --log-gc
1178// flag is set.
1179void NewSpace::CollectStatistics() {
1180 ClearHistograms();
1181 SemiSpaceIterator it(this);
1182 while (it.has_next()) RecordAllocation(it.next());
1183}
1184
1185
1186#ifdef ENABLE_LOGGING_AND_PROFILING
1187static void DoReportStatistics(HistogramInfo* info, const char* description) {
1188 LOG(HeapSampleBeginEvent("NewSpace", description));
1189 // Lump all the string types together.
1190 int string_number = 0;
1191 int string_bytes = 0;
1192#define INCREMENT(type, size, name) \
1193 string_number += info[type].number(); \
1194 string_bytes += info[type].bytes();
1195 STRING_TYPE_LIST(INCREMENT)
1196#undef INCREMENT
1197 if (string_number > 0) {
1198 LOG(HeapSampleItemEvent("STRING_TYPE", string_number, string_bytes));
1199 }
1200
1201 // Then do the other types.
1202 for (int i = FIRST_NONSTRING_TYPE; i <= LAST_TYPE; ++i) {
1203 if (info[i].number() > 0) {
1204 LOG(HeapSampleItemEvent(info[i].name(), info[i].number(),
1205 info[i].bytes()));
1206 }
1207 }
1208 LOG(HeapSampleEndEvent("NewSpace", description));
1209}
1210#endif // ENABLE_LOGGING_AND_PROFILING
1211
1212
1213void NewSpace::ReportStatistics() {
1214#ifdef DEBUG
1215 if (FLAG_heap_stats) {
1216 float pct = static_cast<float>(Available()) / Capacity();
1217 PrintF(" capacity: %d, available: %d, %%%d\n",
1218 Capacity(), Available(), static_cast<int>(pct*100));
1219 PrintF("\n Object Histogram:\n");
1220 for (int i = 0; i <= LAST_TYPE; i++) {
1221 if (allocated_histogram_[i].number() > 0) {
1222 PrintF(" %-33s%10d (%10d bytes)\n",
1223 allocated_histogram_[i].name(),
1224 allocated_histogram_[i].number(),
1225 allocated_histogram_[i].bytes());
1226 }
1227 }
1228 PrintF("\n");
1229 }
1230#endif // DEBUG
1231
1232#ifdef ENABLE_LOGGING_AND_PROFILING
1233 if (FLAG_log_gc) {
1234 DoReportStatistics(allocated_histogram_, "allocated");
1235 DoReportStatistics(promoted_histogram_, "promoted");
1236 }
1237#endif // ENABLE_LOGGING_AND_PROFILING
1238}
1239
1240
1241void NewSpace::RecordAllocation(HeapObject* obj) {
1242 InstanceType type = obj->map()->instance_type();
1243 ASSERT(0 <= type && type <= LAST_TYPE);
1244 allocated_histogram_[type].increment_number(1);
1245 allocated_histogram_[type].increment_bytes(obj->Size());
1246}
1247
1248
1249void NewSpace::RecordPromotion(HeapObject* obj) {
1250 InstanceType type = obj->map()->instance_type();
1251 ASSERT(0 <= type && type <= LAST_TYPE);
1252 promoted_histogram_[type].increment_number(1);
1253 promoted_histogram_[type].increment_bytes(obj->Size());
1254}
1255#endif // defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
1256
1257
1258// -----------------------------------------------------------------------------
1259// Free lists for old object spaces implementation
1260
1261void FreeListNode::set_size(int size_in_bytes) {
1262 ASSERT(size_in_bytes > 0);
1263 ASSERT(IsAligned(size_in_bytes, kPointerSize));
1264
1265 // We write a map and possibly size information to the block. If the block
1266 // is big enough to be a ByteArray with at least one extra word (the next
1267 // pointer), we set its map to be the byte array map and its size to an
1268 // appropriate array length for the desired size from HeapObject::Size().
1269 // If the block is too small (eg, one or two words), to hold both a size
1270 // field and a next pointer, we give it a filler map that gives it the
1271 // correct size.
1272 if (size_in_bytes > Array::kHeaderSize) {
1273 set_map(Heap::byte_array_map());
1274 ByteArray::cast(this)->set_length(ByteArray::LengthFor(size_in_bytes));
1275 } else if (size_in_bytes == kPointerSize) {
1276 set_map(Heap::one_word_filler_map());
1277 } else if (size_in_bytes == 2 * kPointerSize) {
1278 set_map(Heap::two_word_filler_map());
1279 } else {
1280 UNREACHABLE();
1281 }
1282}
1283
1284
1285Address FreeListNode::next() {
1286 ASSERT(map() == Heap::byte_array_map());
1287 return Memory::Address_at(address() + kNextOffset);
1288}
1289
1290
1291void FreeListNode::set_next(Address next) {
1292 ASSERT(map() == Heap::byte_array_map());
1293 Memory::Address_at(address() + kNextOffset) = next;
1294}
1295
1296
1297OldSpaceFreeList::OldSpaceFreeList(AllocationSpace owner) : owner_(owner) {
1298 Reset();
1299}
1300
1301
1302void OldSpaceFreeList::Reset() {
1303 available_ = 0;
1304 for (int i = 0; i < kFreeListsLength; i++) {
1305 free_[i].head_node_ = NULL;
1306 }
1307 needs_rebuild_ = false;
1308 finger_ = kHead;
1309 free_[kHead].next_size_ = kEnd;
1310}
1311
1312
1313void OldSpaceFreeList::RebuildSizeList() {
1314 ASSERT(needs_rebuild_);
1315 int cur = kHead;
1316 for (int i = cur + 1; i < kFreeListsLength; i++) {
1317 if (free_[i].head_node_ != NULL) {
1318 free_[cur].next_size_ = i;
1319 cur = i;
1320 }
1321 }
1322 free_[cur].next_size_ = kEnd;
1323 needs_rebuild_ = false;
1324}
1325
1326
1327int OldSpaceFreeList::Free(Address start, int size_in_bytes) {
1328#ifdef DEBUG
1329 for (int i = 0; i < size_in_bytes; i += kPointerSize) {
1330 Memory::Address_at(start + i) = kZapValue;
1331 }
1332#endif
1333 FreeListNode* node = FreeListNode::FromAddress(start);
1334 node->set_size(size_in_bytes);
1335
1336 // Early return to drop too-small blocks on the floor (one or two word
1337 // blocks cannot hold a map pointer, a size field, and a pointer to the
1338 // next block in the free list).
1339 if (size_in_bytes < kMinBlockSize) {
1340 return size_in_bytes;
1341 }
1342
1343 // Insert other blocks at the head of an exact free list.
1344 int index = size_in_bytes >> kPointerSizeLog2;
1345 node->set_next(free_[index].head_node_);
1346 free_[index].head_node_ = node->address();
1347 available_ += size_in_bytes;
1348 needs_rebuild_ = true;
1349 return 0;
1350}
1351
1352
1353Object* OldSpaceFreeList::Allocate(int size_in_bytes, int* wasted_bytes) {
1354 ASSERT(0 < size_in_bytes);
1355 ASSERT(size_in_bytes <= kMaxBlockSize);
1356 ASSERT(IsAligned(size_in_bytes, kPointerSize));
1357
1358 if (needs_rebuild_) RebuildSizeList();
1359 int index = size_in_bytes >> kPointerSizeLog2;
1360 // Check for a perfect fit.
1361 if (free_[index].head_node_ != NULL) {
1362 FreeListNode* node = FreeListNode::FromAddress(free_[index].head_node_);
1363 // If this was the last block of its size, remove the size.
1364 if ((free_[index].head_node_ = node->next()) == NULL) RemoveSize(index);
1365 available_ -= size_in_bytes;
1366 *wasted_bytes = 0;
1367 return node;
1368 }
1369 // Search the size list for the best fit.
1370 int prev = finger_ < index ? finger_ : kHead;
1371 int cur = FindSize(index, &prev);
1372 ASSERT(index < cur);
1373 if (cur == kEnd) {
1374 // No large enough size in list.
1375 *wasted_bytes = 0;
1376 return Failure::RetryAfterGC(size_in_bytes, owner_);
1377 }
1378 int rem = cur - index;
1379 int rem_bytes = rem << kPointerSizeLog2;
1380 FreeListNode* cur_node = FreeListNode::FromAddress(free_[cur].head_node_);
1381 FreeListNode* rem_node = FreeListNode::FromAddress(free_[cur].head_node_ +
1382 size_in_bytes);
1383 // Distinguish the cases prev < rem < cur and rem <= prev < cur
1384 // to avoid many redundant tests and calls to Insert/RemoveSize.
1385 if (prev < rem) {
1386 // Simple case: insert rem between prev and cur.
1387 finger_ = prev;
1388 free_[prev].next_size_ = rem;
1389 // If this was the last block of size cur, remove the size.
1390 if ((free_[cur].head_node_ = cur_node->next()) == NULL) {
1391 free_[rem].next_size_ = free_[cur].next_size_;
1392 } else {
1393 free_[rem].next_size_ = cur;
1394 }
1395 // Add the remainder block.
1396 rem_node->set_size(rem_bytes);
1397 rem_node->set_next(free_[rem].head_node_);
1398 free_[rem].head_node_ = rem_node->address();
1399 } else {
1400 // If this was the last block of size cur, remove the size.
1401 if ((free_[cur].head_node_ = cur_node->next()) == NULL) {
1402 finger_ = prev;
1403 free_[prev].next_size_ = free_[cur].next_size_;
1404 }
1405 if (rem_bytes < kMinBlockSize) {
1406 // Too-small remainder is wasted.
1407 rem_node->set_size(rem_bytes);
1408 available_ -= size_in_bytes + rem_bytes;
1409 *wasted_bytes = rem_bytes;
1410 return cur_node;
1411 }
1412 // Add the remainder block and, if needed, insert its size.
1413 rem_node->set_size(rem_bytes);
1414 rem_node->set_next(free_[rem].head_node_);
1415 free_[rem].head_node_ = rem_node->address();
1416 if (rem_node->next() == NULL) InsertSize(rem);
1417 }
1418 available_ -= size_in_bytes;
1419 *wasted_bytes = 0;
1420 return cur_node;
1421}
1422
1423
1424MapSpaceFreeList::MapSpaceFreeList() {
1425 Reset();
1426}
1427
1428
1429void MapSpaceFreeList::Reset() {
1430 available_ = 0;
1431 head_ = NULL;
1432}
1433
1434
1435void MapSpaceFreeList::Free(Address start) {
1436#ifdef DEBUG
1437 for (int i = 0; i < Map::kSize; i += kPointerSize) {
1438 Memory::Address_at(start + i) = kZapValue;
1439 }
1440#endif
1441 FreeListNode* node = FreeListNode::FromAddress(start);
1442 node->set_size(Map::kSize);
1443 node->set_next(head_);
1444 head_ = node->address();
1445 available_ += Map::kSize;
1446}
1447
1448
1449Object* MapSpaceFreeList::Allocate() {
1450 if (head_ == NULL) {
1451 return Failure::RetryAfterGC(Map::kSize, MAP_SPACE);
1452 }
1453
1454 FreeListNode* node = FreeListNode::FromAddress(head_);
1455 head_ = node->next();
1456 available_ -= Map::kSize;
1457 return node;
1458}
1459
1460
1461// -----------------------------------------------------------------------------
1462// OldSpace implementation
1463
1464void OldSpace::PrepareForMarkCompact(bool will_compact) {
1465 if (will_compact) {
1466 // Reset relocation info. During a compacting collection, everything in
1467 // the space is considered 'available' and we will rediscover live data
1468 // and waste during the collection.
1469 MCResetRelocationInfo();
1470 mc_end_of_relocation_ = bottom();
1471 ASSERT(Available() == Capacity());
1472 } else {
1473 // During a non-compacting collection, everything below the linear
1474 // allocation pointer is considered allocated (everything above is
1475 // available) and we will rediscover available and wasted bytes during
1476 // the collection.
1477 accounting_stats_.AllocateBytes(free_list_.available());
1478 accounting_stats_.FillWastedBytes(Waste());
1479 }
1480
1481 // Clear the free list and switch to linear allocation if we are in FREE_LIST
1482 free_list_.Reset();
1483 if (allocation_mode_ == FREE_LIST) allocation_mode_ = LINEAR;
1484}
1485
1486
1487void OldSpace::MCAdjustRelocationEnd(Address address, int size_in_bytes) {
1488 ASSERT(Contains(address));
1489 Address current_top = mc_end_of_relocation_;
1490 Page* current_page = Page::FromAllocationTop(current_top);
1491
1492 // No more objects relocated to this page? Move to the next.
1493 ASSERT(current_top <= current_page->mc_relocation_top);
1494 if (current_top == current_page->mc_relocation_top) {
1495 // The space should already be properly expanded.
1496 Page* next_page = current_page->next_page();
1497 CHECK(next_page->is_valid());
1498 mc_end_of_relocation_ = next_page->ObjectAreaStart();
1499 }
1500 ASSERT(mc_end_of_relocation_ == address);
1501 mc_end_of_relocation_ += size_in_bytes;
1502}
1503
1504
1505void OldSpace::MCCommitRelocationInfo() {
1506 // Update fast allocation info.
1507 allocation_info_.top = mc_forwarding_info_.top;
1508 allocation_info_.limit = mc_forwarding_info_.limit;
1509 ASSERT_PAGED_ALLOCATION_INFO(allocation_info_);
1510
1511 // The space is compacted and we haven't yet built free lists or
1512 // wasted any space.
1513 ASSERT(Waste() == 0);
1514 ASSERT(AvailableFree() == 0);
1515
1516 // Build the free list for the space.
1517 int computed_size = 0;
1518 PageIterator it(this, PageIterator::PAGES_USED_BY_MC);
1519 while (it.has_next()) {
1520 Page* p = it.next();
1521 // Space below the relocation pointer is allocated.
1522 computed_size += p->mc_relocation_top - p->ObjectAreaStart();
1523 if (it.has_next()) {
1524 // Free the space at the top of the page. We cannot use
1525 // p->mc_relocation_top after the call to Free (because Free will clear
1526 // remembered set bits).
1527 int extra_size = p->ObjectAreaEnd() - p->mc_relocation_top;
1528 if (extra_size > 0) {
1529 int wasted_bytes = free_list_.Free(p->mc_relocation_top, extra_size);
1530 // The bytes we have just "freed" to add to the free list were
1531 // already accounted as available.
1532 accounting_stats_.WasteBytes(wasted_bytes);
1533 }
1534 }
1535 }
1536
1537 // Make sure the computed size - based on the used portion of the pages in
1538 // use - matches the size obtained while computing forwarding addresses.
1539 ASSERT(computed_size == Size());
1540}
1541
1542
1543Object* OldSpace::AllocateRawInternal(int size_in_bytes,
1544 AllocationInfo* alloc_info) {
1545 ASSERT(HasBeenSetup());
1546
1547 if (allocation_mode_ == LINEAR_ONLY || allocation_mode_ == LINEAR) {
1548 // Try linear allocation in the current page.
1549 Address cur_top = alloc_info->top;
1550 Address new_top = cur_top + size_in_bytes;
1551 if (new_top <= alloc_info->limit) {
1552 Object* obj = HeapObject::FromAddress(cur_top);
1553 alloc_info->top = new_top;
1554 ASSERT_PAGED_ALLOCATION_INFO(*alloc_info);
1555
1556 accounting_stats_.AllocateBytes(size_in_bytes);
1557 ASSERT(Size() <= Capacity());
1558 return obj;
1559 }
1560 } else {
1561 // For now we should not try free list allocation during m-c relocation.
1562 ASSERT(alloc_info == &allocation_info_);
1563 int wasted_bytes;
1564 Object* object = free_list_.Allocate(size_in_bytes, &wasted_bytes);
1565 accounting_stats_.WasteBytes(wasted_bytes);
1566 if (!object->IsFailure()) {
1567 accounting_stats_.AllocateBytes(size_in_bytes);
1568 return object;
1569 }
1570 }
1571 // Fast allocation failed.
1572 return SlowAllocateRaw(size_in_bytes, alloc_info);
1573}
1574
1575
1576// Slow cases for AllocateRawInternal. In linear allocation mode, try
1577// to allocate in the next page in the space. If there are no more
1578// pages, switch to free-list allocation if permitted, otherwise try
1579// to grow the space. In free-list allocation mode, try to grow the
1580// space and switch to linear allocation.
1581Object* OldSpace::SlowAllocateRaw(int size_in_bytes,
1582 AllocationInfo* alloc_info) {
1583 if (allocation_mode_ == LINEAR_ONLY || allocation_mode_ == LINEAR) {
1584 Page* top_page = TopPageOf(*alloc_info);
1585 // Until we implement free-list allocation during global gc, we have two
1586 // cases: one for normal allocation and one for m-c relocation allocation.
1587 if (alloc_info == &allocation_info_) { // Normal allocation.
1588 int free_size = top_page->ObjectAreaEnd() - alloc_info->top;
1589 // Add the extra space at the top of this page to the free list.
1590 if (free_size > 0) {
1591 int wasted_bytes = free_list_.Free(alloc_info->top, free_size);
1592 accounting_stats_.WasteBytes(wasted_bytes);
1593 alloc_info->top += free_size;
1594 ASSERT_PAGED_ALLOCATION_INFO(*alloc_info);
1595 }
1596
1597 // Move to the next page in this space if there is one; switch
1598 // to free-list allocation, if we can; try to expand the space otherwise
1599 if (top_page->next_page()->is_valid()) {
1600 SetAllocationInfo(alloc_info, top_page->next_page());
1601 } else if (allocation_mode_ == LINEAR) {
1602 allocation_mode_ = FREE_LIST;
1603 } else if (Expand(top_page)) {
1604 ASSERT(top_page->next_page()->is_valid());
1605 SetAllocationInfo(alloc_info, top_page->next_page());
1606 } else {
1607 return Failure::RetryAfterGC(size_in_bytes, identity());
1608 }
1609 } else { // Allocation during m-c relocation.
1610 // During m-c 'allocation' while computing forwarding addresses, we do
1611 // not yet add blocks to the free list because they still contain live
1612 // objects. We also cache the m-c forwarding allocation pointer in the
1613 // current page.
1614
1615 // If there are no more pages try to expand the space. This can only
1616 // happen when promoting objects from the new space.
1617 if (!top_page->next_page()->is_valid()) {
1618 if (!Expand(top_page)) {
1619 return Failure::RetryAfterGC(size_in_bytes, identity());
1620 }
1621 }
1622
1623 // Move to the next page.
1624 ASSERT(top_page->next_page()->is_valid());
1625 top_page->mc_relocation_top = alloc_info->top;
1626 SetAllocationInfo(alloc_info, top_page->next_page());
1627 }
1628 } else { // Free-list allocation.
1629 // We failed to allocate from the free list; try to expand the space and
1630 // switch back to linear allocation.
1631 ASSERT(alloc_info == &allocation_info_);
1632 Page* top_page = TopPageOf(*alloc_info);
1633 if (!top_page->next_page()->is_valid()) {
1634 if (!Expand(top_page)) {
1635 return Failure::RetryAfterGC(size_in_bytes, identity());
1636 }
1637 }
1638
1639 // We surely have more pages, move to the next page and switch to linear
1640 // allocation.
1641 ASSERT(top_page->next_page()->is_valid());
1642 SetAllocationInfo(alloc_info, top_page->next_page());
1643 ASSERT(allocation_mode_ == FREE_LIST);
1644 allocation_mode_ = LINEAR;
1645 }
1646
1647 // Perform the allocation.
1648 return AllocateRawInternal(size_in_bytes, alloc_info);
1649}
1650
1651
1652#ifdef DEBUG
1653// We do not assume that the PageIterator works, because it depends on the
1654// invariants we are checking during verification.
1655void OldSpace::Verify() {
1656 // The allocation pointer should be valid, and it should be in a page in the
1657 // space.
1658 ASSERT_PAGED_ALLOCATION_INFO(allocation_info_);
1659 Page* top_page = Page::FromAllocationTop(allocation_info_.top);
1660 ASSERT(MemoryAllocator::IsPageInSpace(top_page, this));
1661
1662 // Loop over all the pages.
1663 bool above_allocation_top = false;
1664 Page* current_page = first_page_;
1665 while (current_page->is_valid()) {
1666 if (above_allocation_top) {
1667 // We don't care what's above the allocation top.
1668 } else {
1669 // Unless this is the last page in the space containing allocated
1670 // objects, the allocation top should be at the object area end.
1671 Address top = current_page->AllocationTop();
1672 if (current_page == top_page) {
1673 ASSERT(top == allocation_info_.top);
1674 // The next page will be above the allocation top.
1675 above_allocation_top = true;
1676 } else {
1677 ASSERT(top == current_page->ObjectAreaEnd());
1678 }
1679
1680 // It should be packed with objects from the bottom to the top.
1681 Address current = current_page->ObjectAreaStart();
1682 while (current < top) {
1683 HeapObject* object = HeapObject::FromAddress(current);
1684
1685 // The first word should be a map, and we expect all map pointers to
1686 // be in map space.
1687 Map* map = object->map();
1688 ASSERT(map->IsMap());
1689 ASSERT(Heap::map_space()->Contains(map));
1690
1691 // The object should not be a map.
1692 ASSERT(!object->IsMap());
1693
1694 // The object itself should look OK.
1695 // This is blocked by bug #1006953.
1696 // object->Verify();
1697
1698 // All the interior pointers should be contained in the heap and have
1699 // their remembered set bits set if they point to new space. Code
1700 // objects do not have remembered set bits that we care about.
1701 VerifyPointersAndRSetVisitor rset_visitor;
1702 VerifyPointersVisitor no_rset_visitor;
1703 int size = object->Size();
1704 if (object->IsCode()) {
1705 Code::cast(object)->ConvertICTargetsFromAddressToObject();
1706 object->IterateBody(map->instance_type(), size, &no_rset_visitor);
1707 Code::cast(object)->ConvertICTargetsFromObjectToAddress();
1708 } else {
1709 object->IterateBody(map->instance_type(), size, &rset_visitor);
1710 }
1711
1712 current += size;
1713 }
1714
1715 // The allocation pointer should not be in the middle of an object.
1716 ASSERT(current == top);
1717 }
1718
1719 current_page = current_page->next_page();
1720 }
1721}
1722
1723
1724struct CommentStatistic {
1725 const char* comment;
1726 int size;
1727 int count;
1728 void Clear() {
1729 comment = NULL;
1730 size = 0;
1731 count = 0;
1732 }
1733};
1734
1735
1736// must be small, since an iteration is used for lookup
1737const int kMaxComments = 64;
1738static CommentStatistic comments_statistics[kMaxComments+1];
1739
1740
1741void PagedSpace::ReportCodeStatistics() {
1742 ReportCodeKindStatistics();
1743 PrintF("Code comment statistics (\" [ comment-txt : size/ "
1744 "count (average)\"):\n");
1745 for (int i = 0; i <= kMaxComments; i++) {
1746 const CommentStatistic& cs = comments_statistics[i];
1747 if (cs.size > 0) {
1748 PrintF(" %-30s: %10d/%6d (%d)\n", cs.comment, cs.size, cs.count,
1749 cs.size/cs.count);
1750 }
1751 }
1752 PrintF("\n");
1753}
1754
1755
1756void PagedSpace::ResetCodeStatistics() {
1757 ClearCodeKindStatistics();
1758 for (int i = 0; i < kMaxComments; i++) comments_statistics[i].Clear();
1759 comments_statistics[kMaxComments].comment = "Unknown";
1760 comments_statistics[kMaxComments].size = 0;
1761 comments_statistics[kMaxComments].count = 0;
1762}
1763
1764
1765// Adds comment to 'comment_statistics' table. Performance OK sa long as
1766// 'kMaxComments' is small
1767static void EnterComment(const char* comment, int delta) {
1768 // Do not count empty comments
1769 if (delta <= 0) return;
1770 CommentStatistic* cs = &comments_statistics[kMaxComments];
1771 // Search for a free or matching entry in 'comments_statistics': 'cs'
1772 // points to result.
1773 for (int i = 0; i < kMaxComments; i++) {
1774 if (comments_statistics[i].comment == NULL) {
1775 cs = &comments_statistics[i];
1776 cs->comment = comment;
1777 break;
1778 } else if (strcmp(comments_statistics[i].comment, comment) == 0) {
1779 cs = &comments_statistics[i];
1780 break;
1781 }
1782 }
1783 // Update entry for 'comment'
1784 cs->size += delta;
1785 cs->count += 1;
1786}
1787
1788
1789// Call for each nested comment start (start marked with '[ xxx', end marked
1790// with ']'. RelocIterator 'it' must point to a comment reloc info.
1791static void CollectCommentStatistics(RelocIterator* it) {
1792 ASSERT(!it->done());
1793 ASSERT(it->rinfo()->rmode() == comment);
1794 const char* tmp = reinterpret_cast<const char*>(it->rinfo()->data());
1795 if (tmp[0] != '[') {
1796 // Not a nested comment; skip
1797 return;
1798 }
1799
1800 // Search for end of nested comment or a new nested comment
1801 const char* const comment_txt =
1802 reinterpret_cast<const char*>(it->rinfo()->data());
1803 const byte* prev_pc = it->rinfo()->pc();
1804 int flat_delta = 0;
1805 it->next();
1806 while (true) {
1807 // All nested comments must be terminated properly, and therefore exit
1808 // from loop.
1809 ASSERT(!it->done());
1810 if (it->rinfo()->rmode() == comment) {
1811 const char* const txt =
1812 reinterpret_cast<const char*>(it->rinfo()->data());
1813 flat_delta += it->rinfo()->pc() - prev_pc;
1814 if (txt[0] == ']') break; // End of nested comment
1815 // A new comment
1816 CollectCommentStatistics(it);
1817 // Skip code that was covered with previous comment
1818 prev_pc = it->rinfo()->pc();
1819 }
1820 it->next();
1821 }
1822 EnterComment(comment_txt, flat_delta);
1823}
1824
1825
1826// Collects code size statistics:
1827// - by code kind
1828// - by code comment
1829void PagedSpace::CollectCodeStatistics() {
1830 HeapObjectIterator obj_it(this);
1831 while (obj_it.has_next()) {
1832 HeapObject* obj = obj_it.next();
1833 if (obj->IsCode()) {
1834 Code* code = Code::cast(obj);
1835 code_kind_statistics[code->kind()] += code->Size();
1836 RelocIterator it(code);
1837 int delta = 0;
1838 const byte* prev_pc = code->instruction_start();
1839 while (!it.done()) {
1840 if (it.rinfo()->rmode() == comment) {
1841 delta += it.rinfo()->pc() - prev_pc;
1842 CollectCommentStatistics(&it);
1843 prev_pc = it.rinfo()->pc();
1844 }
1845 it.next();
1846 }
1847
1848 ASSERT(code->instruction_start() <= prev_pc &&
1849 prev_pc <= code->relocation_start());
1850 delta += code->relocation_start() - prev_pc;
1851 EnterComment("NoComment", delta);
1852 }
1853 }
1854}
1855
1856
1857void OldSpace::ReportStatistics() {
1858 int pct = Available() * 100 / Capacity();
1859 PrintF(" capacity: %d, waste: %d, available: %d, %%%d\n",
1860 Capacity(), Waste(), Available(), pct);
1861
1862 // Report remembered set statistics.
1863 int rset_marked_pointers = 0;
1864 int rset_marked_arrays = 0;
1865 int rset_marked_array_elements = 0;
1866 int cross_gen_pointers = 0;
1867 int cross_gen_array_elements = 0;
1868
1869 PageIterator page_it(this, PageIterator::PAGES_IN_USE);
1870 while (page_it.has_next()) {
1871 Page* p = page_it.next();
1872
1873 for (Address rset_addr = p->RSetStart();
1874 rset_addr < p->RSetEnd();
1875 rset_addr += kIntSize) {
1876 int rset = Memory::int_at(rset_addr);
1877 if (rset != 0) {
1878 // Bits were set
1879 int intoff = rset_addr - p->address();
1880 int bitoff = 0;
1881 for (; bitoff < kBitsPerInt; ++bitoff) {
1882 if ((rset & (1 << bitoff)) != 0) {
1883 int bitpos = intoff*kBitsPerByte + bitoff;
1884 Address slot = p->OffsetToAddress(bitpos << kObjectAlignmentBits);
1885 Object** obj = reinterpret_cast<Object**>(slot);
1886 if (*obj == Heap::fixed_array_map()) {
1887 rset_marked_arrays++;
1888 FixedArray* fa = FixedArray::cast(HeapObject::FromAddress(slot));
1889
1890 rset_marked_array_elements += fa->length();
1891 // Manually inline FixedArray::IterateBody
1892 Address elm_start = slot + FixedArray::kHeaderSize;
1893 Address elm_stop = elm_start + fa->length() * kPointerSize;
1894 for (Address elm_addr = elm_start;
1895 elm_addr < elm_stop; elm_addr += kPointerSize) {
1896 // Filter non-heap-object pointers
1897 Object** elm_p = reinterpret_cast<Object**>(elm_addr);
1898 if (Heap::InNewSpace(*elm_p))
1899 cross_gen_array_elements++;
1900 }
1901 } else {
1902 rset_marked_pointers++;
1903 if (Heap::InNewSpace(*obj))
1904 cross_gen_pointers++;
1905 }
1906 }
1907 }
1908 }
1909 }
1910 }
1911
1912 pct = rset_marked_pointers == 0 ?
1913 0 : cross_gen_pointers * 100 / rset_marked_pointers;
1914 PrintF(" rset-marked pointers %d, to-new-space %d (%%%d)\n",
1915 rset_marked_pointers, cross_gen_pointers, pct);
1916 PrintF(" rset_marked arrays %d, ", rset_marked_arrays);
1917 PrintF(" elements %d, ", rset_marked_array_elements);
1918 pct = rset_marked_array_elements == 0 ? 0
1919 : cross_gen_array_elements * 100 / rset_marked_array_elements;
1920 PrintF(" pointers to new space %d (%%%d)\n", cross_gen_array_elements, pct);
1921 PrintF(" total rset-marked bits %d\n",
1922 (rset_marked_pointers + rset_marked_arrays));
1923 pct = (rset_marked_pointers + rset_marked_array_elements) == 0 ? 0
1924 : (cross_gen_pointers + cross_gen_array_elements) * 100 /
1925 (rset_marked_pointers + rset_marked_array_elements);
1926 PrintF(" total rset pointers %d, true cross generation ones %d (%%%d)\n",
1927 (rset_marked_pointers + rset_marked_array_elements),
1928 (cross_gen_pointers + cross_gen_array_elements),
1929 pct);
1930
1931 ClearHistograms();
1932 HeapObjectIterator obj_it(this);
1933 while (obj_it.has_next()) { CollectHistogramInfo(obj_it.next()); }
1934 ReportHistogram(true);
1935}
1936
1937
1938// Dump the range of remembered set words between [start, end) corresponding
1939// to the pointers starting at object_p. The allocation_top is an object
1940// pointer which should not be read past. This is important for large object
1941// pages, where some bits in the remembered set range do not correspond to
1942// allocated addresses.
1943static void PrintRSetRange(Address start, Address end, Object** object_p,
1944 Address allocation_top) {
1945 Address rset_address = start;
1946
1947 // If the range starts on on odd numbered word (eg, for large object extra
1948 // remembered set ranges), print some spaces.
1949 if ((reinterpret_cast<uint32_t>(start) / kIntSize) % 2 == 1) {
1950 PrintF(" ");
1951 }
1952
1953 // Loop over all the words in the range.
1954 while (rset_address < end) {
1955 uint32_t rset_word = Memory::uint32_at(rset_address);
1956 int bit_position = 0;
1957
1958 // Loop over all the bits in the word.
1959 while (bit_position < kBitsPerInt) {
1960 if (object_p == reinterpret_cast<Object**>(allocation_top)) {
1961 // Print a bar at the allocation pointer.
1962 PrintF("|");
1963 } else if (object_p > reinterpret_cast<Object**>(allocation_top)) {
1964 // Do not dereference object_p past the allocation pointer.
1965 PrintF("#");
1966 } else if ((rset_word & (1 << bit_position)) == 0) {
1967 // Print a dot for zero bits.
1968 PrintF(".");
1969 } else if (Heap::InNewSpace(*object_p)) {
1970 // Print an X for one bits for pointers to new space.
1971 PrintF("X");
1972 } else {
1973 // Print a circle for one bits for pointers to old space.
1974 PrintF("o");
1975 }
1976
1977 // Print a space after every 8th bit except the last.
1978 if (bit_position % 8 == 7 && bit_position != (kBitsPerInt - 1)) {
1979 PrintF(" ");
1980 }
1981
1982 // Advance to next bit.
1983 bit_position++;
1984 object_p++;
1985 }
1986
1987 // Print a newline after every odd numbered word, otherwise a space.
1988 if ((reinterpret_cast<uint32_t>(rset_address) / kIntSize) % 2 == 1) {
1989 PrintF("\n");
1990 } else {
1991 PrintF(" ");
1992 }
1993
1994 // Advance to next remembered set word.
1995 rset_address += kIntSize;
1996 }
1997}
1998
1999
2000void PagedSpace::DoPrintRSet(const char* space_name) {
2001 PageIterator it(this, PageIterator::PAGES_IN_USE);
2002 while (it.has_next()) {
2003 Page* p = it.next();
2004 PrintF("%s page 0x%x:\n", space_name, p);
2005 PrintRSetRange(p->RSetStart(), p->RSetEnd(),
2006 reinterpret_cast<Object**>(p->ObjectAreaStart()),
2007 p->AllocationTop());
2008 PrintF("\n");
2009 }
2010}
2011
2012
2013void OldSpace::PrintRSet() { DoPrintRSet("old"); }
2014#endif
2015
2016// -----------------------------------------------------------------------------
2017// MapSpace implementation
2018
2019void MapSpace::PrepareForMarkCompact(bool will_compact) {
2020 if (will_compact) {
2021 // Reset relocation info.
2022 MCResetRelocationInfo();
2023
2024 // Initialize map index entry.
2025 int page_count = 0;
2026 PageIterator it(this, PageIterator::ALL_PAGES);
2027 while (it.has_next()) {
2028 ASSERT_MAP_PAGE_INDEX(page_count);
2029
2030 Page* p = it.next();
2031 ASSERT(p->mc_page_index == page_count);
2032
2033 page_addresses_[page_count++] = p->address();
2034 }
2035
2036 // During a compacting collection, everything in the space is considered
2037 // 'available' (set by the call to MCResetRelocationInfo) and we will
2038 // rediscover live and wasted bytes during the collection.
2039 ASSERT(Available() == Capacity());
2040 } else {
2041 // During a non-compacting collection, everything below the linear
2042 // allocation pointer except wasted top-of-page blocks is considered
2043 // allocated and we will rediscover available bytes during the
2044 // collection.
2045 accounting_stats_.AllocateBytes(free_list_.available());
2046 }
2047
2048 // Clear the free list and switch to linear allocation if not already
2049 // required.
2050 free_list_.Reset();
2051 if (allocation_mode_ != LINEAR_ONLY) allocation_mode_ = LINEAR;
2052}
2053
2054
2055void MapSpace::MCCommitRelocationInfo() {
2056 // Update fast allocation info.
2057 allocation_info_.top = mc_forwarding_info_.top;
2058 allocation_info_.limit = mc_forwarding_info_.limit;
2059 ASSERT_PAGED_ALLOCATION_INFO(allocation_info_);
2060
2061 // The space is compacted and we haven't yet wasted any space.
2062 ASSERT(Waste() == 0);
2063
2064 // Update allocation_top of each page in use and compute waste.
2065 int computed_size = 0;
2066 PageIterator it(this, PageIterator::PAGES_USED_BY_MC);
2067 while (it.has_next()) {
2068 Page* page = it.next();
2069 Address page_top = page->AllocationTop();
2070 computed_size += page_top - page->ObjectAreaStart();
2071 if (it.has_next()) {
2072 accounting_stats_.WasteBytes(page->ObjectAreaEnd() - page_top);
2073 }
2074 }
2075
2076 // Make sure the computed size - based on the used portion of the
2077 // pages in use - matches the size we adjust during allocation.
2078 ASSERT(computed_size == Size());
2079}
2080
2081
2082Object* MapSpace::AllocateRawInternal(int size_in_bytes,
2083 AllocationInfo* alloc_info) {
2084 ASSERT(HasBeenSetup());
2085 // When doing free-list allocation, we implicitly assume that we always
2086 // allocate a map-sized block.
2087 ASSERT(size_in_bytes == Map::kSize);
2088
2089 if (allocation_mode_ == LINEAR_ONLY || allocation_mode_ == LINEAR) {
2090 // Try linear allocation in the current page.
2091 Address cur_top = alloc_info->top;
2092 Address new_top = cur_top + size_in_bytes;
2093 if (new_top <= alloc_info->limit) {
2094 Object* obj = HeapObject::FromAddress(cur_top);
2095 alloc_info->top = new_top;
2096 ASSERT_PAGED_ALLOCATION_INFO(*alloc_info);
2097
2098 accounting_stats_.AllocateBytes(size_in_bytes);
2099 return obj;
2100 }
2101 } else {
2102 // We should not do free list allocation during m-c compaction.
2103 ASSERT(alloc_info == &allocation_info_);
2104 Object* object = free_list_.Allocate();
2105 if (!object->IsFailure()) {
2106 accounting_stats_.AllocateBytes(size_in_bytes);
2107 return object;
2108 }
2109 }
2110 // Fast allocation failed.
2111 return SlowAllocateRaw(size_in_bytes, alloc_info);
2112}
2113
2114
2115// Slow case for AllocateRawInternal. In linear allocation mode, try to
2116// allocate in the next page in the space. If there are no more pages, switch
2117// to free-list allocation. In free-list allocation mode, try to grow the
2118// space and switch to linear allocation.
2119Object* MapSpace::SlowAllocateRaw(int size_in_bytes,
2120 AllocationInfo* alloc_info) {
2121 if (allocation_mode_ == LINEAR_ONLY || allocation_mode_ == LINEAR) {
2122 Page* top_page = TopPageOf(*alloc_info);
2123
2124 // We do not do free-list allocation during compacting GCs.
2125 if (alloc_info == &mc_forwarding_info_) {
2126 // We expect to always have more pages, because the map space cannot
2127 // grow during GC. Move to the next page.
2128 CHECK(top_page->next_page()->is_valid());
2129 top_page->mc_relocation_top = alloc_info->top;
2130 SetAllocationInfo(alloc_info, top_page->next_page());
2131 } else { // Normal allocation.
2132 // Move to the next page in this space (counting the top-of-page block
2133 // as waste) if there is one, otherwise switch to free-list allocation if
2134 // permitted, otherwise try to expand the heap
2135 if (top_page->next_page()->is_valid() ||
2136 (allocation_mode_ == LINEAR_ONLY && Expand(top_page))) {
2137 int free_size = top_page->ObjectAreaEnd() - alloc_info->top;
2138 ASSERT(free_size == kPageExtra);
2139 accounting_stats_.WasteBytes(free_size);
2140 SetAllocationInfo(alloc_info, top_page->next_page());
2141 } else if (allocation_mode_ == LINEAR) {
2142 allocation_mode_ = FREE_LIST;
2143 } else {
2144 return Failure::RetryAfterGC(size_in_bytes, MAP_SPACE);
2145 }
2146 }
2147 } else { // Free-list allocation.
2148 ASSERT(alloc_info == &allocation_info_);
2149 // We failed to allocate from the free list (ie, it must be empty) so try
2150 // to expand the space and switch back to linear allocation.
2151 Page* top_page = TopPageOf(*alloc_info);
2152 if (!top_page->next_page()->is_valid()) {
2153 if (!Expand(top_page)) {
2154 return Failure::RetryAfterGC(size_in_bytes, MAP_SPACE);
2155 }
2156 }
2157
2158 // We have more pages now so we can move to the next and switch to linear
2159 // allocation.
2160 ASSERT(top_page->next_page()->is_valid());
2161 int free_size = top_page->ObjectAreaEnd() - alloc_info->top;
2162 ASSERT(free_size == kPageExtra);
2163 accounting_stats_.WasteBytes(free_size);
2164 SetAllocationInfo(alloc_info, top_page->next_page());
2165 ASSERT(allocation_mode_ == FREE_LIST);
2166 allocation_mode_ = LINEAR;
2167 }
2168
2169 // Perform the allocation.
2170 return AllocateRawInternal(size_in_bytes, alloc_info);
2171}
2172
2173
2174#ifdef DEBUG
2175// We do not assume that the PageIterator works, because it depends on the
2176// invariants we are checking during verification.
2177void MapSpace::Verify() {
2178 // The allocation pointer should be valid, and it should be in a page in the
2179 // space.
2180 ASSERT_PAGED_ALLOCATION_INFO(allocation_info_);
2181 Page* top_page = Page::FromAllocationTop(allocation_info_.top);
2182 ASSERT(MemoryAllocator::IsPageInSpace(top_page, this));
2183
2184 // Loop over all the pages.
2185 bool above_allocation_top = false;
2186 Page* current_page = first_page_;
2187 while (current_page->is_valid()) {
2188 if (above_allocation_top) {
2189 // We don't care what's above the allocation top.
2190 } else {
2191 // Unless this is the last page in the space containing allocated
2192 // objects, the allocation top should be at a constant offset from the
2193 // object area end.
2194 Address top = current_page->AllocationTop();
2195 if (current_page == top_page) {
2196 ASSERT(top == allocation_info_.top);
2197 // The next page will be above the allocation top.
2198 above_allocation_top = true;
2199 } else {
2200 ASSERT(top == current_page->ObjectAreaEnd() - kPageExtra);
2201 }
2202
2203 // It should be packed with objects from the bottom to the top.
2204 Address current = current_page->ObjectAreaStart();
2205 while (current < top) {
2206 HeapObject* object = HeapObject::FromAddress(current);
2207
2208 // The first word should be a map, and we expect all map pointers to
2209 // be in map space.
2210 Map* map = object->map();
2211 ASSERT(map->IsMap());
2212 ASSERT(Heap::map_space()->Contains(map));
2213
2214 // The object should be a map or a byte array.
2215 ASSERT(object->IsMap() || object->IsByteArray());
2216
2217 // The object itself should look OK.
2218 // This is blocked by bug #1006953.
2219 // object->Verify();
2220
2221 // All the interior pointers should be contained in the heap and
2222 // have their remembered set bits set if they point to new space.
2223 VerifyPointersAndRSetVisitor visitor;
2224 int size = object->Size();
2225 object->IterateBody(map->instance_type(), size, &visitor);
2226
2227 current += size;
2228 }
2229
2230 // The allocation pointer should not be in the middle of an object.
2231 ASSERT(current == top);
2232 }
2233
2234 current_page = current_page->next_page();
2235 }
2236}
2237
2238
2239void MapSpace::ReportStatistics() {
2240 int pct = Available() * 100 / Capacity();
2241 PrintF(" capacity: %d, waste: %d, available: %d, %%%d\n",
2242 Capacity(), Waste(), Available(), pct);
2243
2244 // Report remembered set statistics.
2245 int rset_marked_pointers = 0;
2246 int cross_gen_pointers = 0;
2247
2248 PageIterator page_it(this, PageIterator::PAGES_IN_USE);
2249 while (page_it.has_next()) {
2250 Page* p = page_it.next();
2251
2252 for (Address rset_addr = p->RSetStart();
2253 rset_addr < p->RSetEnd();
2254 rset_addr += kIntSize) {
2255 int rset = Memory::int_at(rset_addr);
2256 if (rset != 0) {
2257 // Bits were set
2258 int intoff = rset_addr - p->address();
2259 int bitoff = 0;
2260 for (; bitoff < kBitsPerInt; ++bitoff) {
2261 if ((rset & (1 << bitoff)) != 0) {
2262 int bitpos = intoff*kBitsPerByte + bitoff;
2263 Address slot = p->OffsetToAddress(bitpos << kObjectAlignmentBits);
2264 Object** obj = reinterpret_cast<Object**>(slot);
2265 rset_marked_pointers++;
2266 if (Heap::InNewSpace(*obj))
2267 cross_gen_pointers++;
2268 }
2269 }
2270 }
2271 }
2272 }
2273
2274 pct = rset_marked_pointers == 0 ?
2275 0 : cross_gen_pointers * 100 / rset_marked_pointers;
2276 PrintF(" rset-marked pointers %d, to-new-space %d (%%%d)\n",
2277 rset_marked_pointers, cross_gen_pointers, pct);
2278
2279 ClearHistograms();
2280 HeapObjectIterator obj_it(this);
2281 while (obj_it.has_next()) { CollectHistogramInfo(obj_it.next()); }
2282 ReportHistogram(false);
2283}
2284
2285
2286void MapSpace::PrintRSet() { DoPrintRSet("map"); }
2287#endif
2288
2289
2290// -----------------------------------------------------------------------------
2291// LargeObjectIterator
2292
2293LargeObjectIterator::LargeObjectIterator(LargeObjectSpace* space) {
2294 current_ = space->first_chunk_;
2295 size_func_ = NULL;
2296}
2297
2298
2299LargeObjectIterator::LargeObjectIterator(LargeObjectSpace* space,
2300 HeapObjectCallback size_func) {
2301 current_ = space->first_chunk_;
2302 size_func_ = size_func;
2303}
2304
2305
2306HeapObject* LargeObjectIterator::next() {
2307 ASSERT(has_next());
2308 HeapObject* object = current_->GetObject();
2309 current_ = current_->next();
2310 return object;
2311}
2312
2313
2314// -----------------------------------------------------------------------------
2315// LargeObjectChunk
2316
2317LargeObjectChunk* LargeObjectChunk::New(int size_in_bytes,
2318 size_t* chunk_size) {
2319 size_t requested = ChunkSizeFor(size_in_bytes);
2320 void* mem = MemoryAllocator::AllocateRawMemory(requested, chunk_size);
2321 if (mem == NULL) return NULL;
2322 LOG(NewEvent("LargeObjectChunk", mem, *chunk_size));
2323 if (*chunk_size < requested) {
2324 MemoryAllocator::FreeRawMemory(mem, *chunk_size);
2325 LOG(DeleteEvent("LargeObjectChunk", mem));
2326 return NULL;
2327 }
2328 return reinterpret_cast<LargeObjectChunk*>(mem);
2329}
2330
2331
2332int LargeObjectChunk::ChunkSizeFor(int size_in_bytes) {
2333 int os_alignment = OS::AllocateAlignment();
2334 if (os_alignment < Page::kPageSize)
2335 size_in_bytes += (Page::kPageSize - os_alignment);
2336 return size_in_bytes + Page::kObjectStartOffset;
2337}
2338
2339// -----------------------------------------------------------------------------
2340// LargeObjectSpace
2341
2342LargeObjectSpace::LargeObjectSpace()
2343 : first_chunk_(NULL),
2344 size_(0),
2345 page_count_(0) {}
2346
2347
2348bool LargeObjectSpace::Setup() {
2349 first_chunk_ = NULL;
2350 size_ = 0;
2351 page_count_ = 0;
2352 return true;
2353}
2354
2355
2356void LargeObjectSpace::TearDown() {
2357 while (first_chunk_ != NULL) {
2358 LargeObjectChunk* chunk = first_chunk_;
2359 first_chunk_ = first_chunk_->next();
2360 LOG(DeleteEvent("LargeObjectChunk", chunk->address()));
2361 MemoryAllocator::FreeRawMemory(chunk->address(), chunk->size());
2362 }
2363
2364 size_ = 0;
2365 page_count_ = 0;
2366}
2367
2368
2369Object* LargeObjectSpace::AllocateRawInternal(int requested_size,
2370 int object_size) {
2371 ASSERT(0 < object_size && object_size <= requested_size);
2372 size_t chunk_size;
2373 LargeObjectChunk* chunk =
2374 LargeObjectChunk::New(requested_size, &chunk_size);
2375 if (chunk == NULL) {
2376 return Failure::RetryAfterGC(requested_size, LO_SPACE);
2377 }
2378
2379 size_ += chunk_size;
2380 page_count_++;
2381 chunk->set_next(first_chunk_);
2382 chunk->set_size(chunk_size);
2383 first_chunk_ = chunk;
2384
2385 // Set the object address and size in the page header and clear its
2386 // remembered set.
2387 Page* page = Page::FromAddress(RoundUp(chunk->address(), Page::kPageSize));
2388 Address object_address = page->ObjectAreaStart();
2389 // Clear the low order bit of the second word in the page to flag it as a
2390 // large object page. If the chunk_size happened to be written there, its
2391 // low order bit should already be clear.
2392 ASSERT((chunk_size & 0x1) == 0);
2393 page->is_normal_page &= ~0x1;
2394 page->ClearRSet();
2395 int extra_bytes = requested_size - object_size;
2396 if (extra_bytes > 0) {
2397 // The extra memory for the remembered set should be cleared.
2398 memset(object_address + object_size, 0, extra_bytes);
2399 }
2400
2401 return HeapObject::FromAddress(object_address);
2402}
2403
2404
2405Object* LargeObjectSpace::AllocateRaw(int size_in_bytes) {
2406 ASSERT(0 < size_in_bytes);
2407 return AllocateRawInternal(size_in_bytes, size_in_bytes);
2408}
2409
2410
2411Object* LargeObjectSpace::AllocateRawFixedArray(int size_in_bytes) {
2412 int extra_rset_bytes = ExtraRSetBytesFor(size_in_bytes);
2413 return AllocateRawInternal(size_in_bytes + extra_rset_bytes, size_in_bytes);
2414}
2415
2416
2417// GC support
2418Object* LargeObjectSpace::FindObject(Address a) {
2419 for (LargeObjectChunk* chunk = first_chunk_;
2420 chunk != NULL;
2421 chunk = chunk->next()) {
2422 Address chunk_address = chunk->address();
2423 if (chunk_address <= a && a < chunk_address + chunk->size()) {
2424 return chunk->GetObject();
2425 }
2426 }
2427 return Failure::Exception();
2428}
2429
2430
2431void LargeObjectSpace::ClearRSet() {
2432 ASSERT(Page::is_rset_in_use());
2433
2434 LargeObjectIterator it(this);
2435 while (it.has_next()) {
2436 HeapObject* object = it.next();
2437 // We only have code, sequential strings, or fixed arrays in large
2438 // object space, and only fixed arrays need remembered set support.
2439 if (object->IsFixedArray()) {
2440 // Clear the normal remembered set region of the page;
2441 Page* page = Page::FromAddress(object->address());
2442 page->ClearRSet();
2443
2444 // Clear the extra remembered set.
2445 int size = object->Size();
2446 int extra_rset_bytes = ExtraRSetBytesFor(size);
2447 memset(object->address() + size, 0, extra_rset_bytes);
2448 }
2449 }
2450}
2451
2452
2453void LargeObjectSpace::IterateRSet(ObjectSlotCallback copy_object_func) {
2454 ASSERT(Page::is_rset_in_use());
2455
2456 LargeObjectIterator it(this);
2457 while (it.has_next()) {
2458 // We only have code, sequential strings, or fixed arrays in large
2459 // object space, and only fixed arrays can possibly contain pointers to
2460 // the young generation.
2461 HeapObject* object = it.next();
2462 if (object->IsFixedArray()) {
2463 // Iterate the normal page remembered set range.
2464 Page* page = Page::FromAddress(object->address());
2465 Address object_end = object->address() + object->Size();
2466 Heap::IterateRSetRange(page->ObjectAreaStart(),
2467 Min(page->ObjectAreaEnd(), object_end),
2468 page->RSetStart(),
2469 copy_object_func);
2470
2471 // Iterate the extra array elements.
2472 if (object_end > page->ObjectAreaEnd()) {
2473 Heap::IterateRSetRange(page->ObjectAreaEnd(), object_end,
2474 object_end, copy_object_func);
2475 }
2476 }
2477 }
2478}
2479
2480
2481void LargeObjectSpace::FreeUnmarkedObjects() {
2482 LargeObjectChunk* previous = NULL;
2483 LargeObjectChunk* current = first_chunk_;
2484 while (current != NULL) {
2485 HeapObject* object = current->GetObject();
2486 if (is_marked(object)) {
2487 clear_mark(object);
2488 previous = current;
2489 current = current->next();
2490 } else {
2491 Address chunk_address = current->address();
2492 size_t chunk_size = current->size();
2493
2494 // Cut the chunk out from the chunk list.
2495 current = current->next();
2496 if (previous == NULL) {
2497 first_chunk_ = current;
2498 } else {
2499 previous->set_next(current);
2500 }
2501
2502 // Free the chunk.
2503 if (object->IsCode()) {
2504 LOG(CodeDeleteEvent(object->address()));
2505 }
2506 size_ -= chunk_size;
2507 page_count_--;
2508 MemoryAllocator::FreeRawMemory(chunk_address, chunk_size);
2509 LOG(DeleteEvent("LargeObjectChunk", chunk_address));
2510 }
2511 }
2512}
2513
2514
2515bool LargeObjectSpace::Contains(HeapObject* object) {
2516 Address address = object->address();
2517 Page* page = Page::FromAddress(address);
2518
2519 SLOW_ASSERT(!page->IsLargeObjectPage()
2520 || !FindObject(address)->IsFailure());
2521
2522 return page->IsLargeObjectPage();
2523}
2524
2525
2526#ifdef DEBUG
2527// We do not assume that the large object iterator works, because it depends
2528// on the invariants we are checking during verification.
2529void LargeObjectSpace::Verify() {
2530 for (LargeObjectChunk* chunk = first_chunk_;
2531 chunk != NULL;
2532 chunk = chunk->next()) {
2533 // Each chunk contains an object that starts at the large object page's
2534 // object area start.
2535 HeapObject* object = chunk->GetObject();
2536 Page* page = Page::FromAddress(object->address());
2537 ASSERT(object->address() == page->ObjectAreaStart());
2538
2539 // The first word should be a map, and we expect all map pointers to be
2540 // in map space.
2541 Map* map = object->map();
2542 ASSERT(map->IsMap());
2543 ASSERT(Heap::map_space()->Contains(map));
2544
2545 // We have only code, sequential strings, fixed arrays, and byte arrays
2546 // in large object space.
2547 ASSERT(object->IsCode() || object->IsSeqString()
2548 || object->IsFixedArray() || object->IsByteArray());
2549
2550 // The object itself should look OK.
2551 // This is blocked by bug #1006953.
2552 // object->Verify();
2553
2554 // Byte arrays and strings don't have interior pointers.
2555 if (object->IsCode()) {
2556 VerifyPointersVisitor code_visitor;
2557 Code::cast(object)->ConvertICTargetsFromAddressToObject();
2558 object->IterateBody(map->instance_type(),
2559 object->Size(),
2560 &code_visitor);
2561 Code::cast(object)->ConvertICTargetsFromObjectToAddress();
2562 } else if (object->IsFixedArray()) {
2563 // We loop over fixed arrays ourselves, rather then using the visitor,
2564 // because the visitor doesn't support the start/offset iteration
2565 // needed for IsRSetSet.
2566 FixedArray* array = FixedArray::cast(object);
2567 for (int j = 0; j < array->length(); j++) {
2568 Object* element = array->get(j);
2569 if (element->IsHeapObject()) {
2570 HeapObject* element_object = HeapObject::cast(element);
2571 ASSERT(Heap::Contains(element_object));
2572 ASSERT(element_object->map()->IsMap());
2573 if (Heap::InNewSpace(element_object)) {
2574 ASSERT(Page::IsRSetSet(object->address(),
2575 FixedArray::kHeaderSize + j * kPointerSize));
2576 }
2577 }
2578 }
2579 }
2580 }
2581}
2582
2583
2584void LargeObjectSpace::Print() {
2585 LargeObjectIterator it(this);
2586 while (it.has_next()) {
2587 it.next()->Print();
2588 }
2589}
2590
2591
2592void LargeObjectSpace::ReportStatistics() {
2593 PrintF(" size: %d\n", size_);
2594 int num_objects = 0;
2595 ClearHistograms();
2596 LargeObjectIterator it(this);
2597 while (it.has_next()) {
2598 num_objects++;
2599 CollectHistogramInfo(it.next());
2600 }
2601
2602 PrintF(" number of objects %d\n", num_objects);
2603 if (num_objects > 0) ReportHistogram(false);
2604}
2605
2606
2607void LargeObjectSpace::CollectCodeStatistics() {
2608 LargeObjectIterator obj_it(this);
2609 while (obj_it.has_next()) {
2610 HeapObject* obj = obj_it.next();
2611 if (obj->IsCode()) {
2612 Code* code = Code::cast(obj);
2613 code_kind_statistics[code->kind()] += code->Size();
2614 }
2615 }
2616}
2617
2618
2619void LargeObjectSpace::PrintRSet() {
2620 LargeObjectIterator it(this);
2621 while (it.has_next()) {
2622 HeapObject* object = it.next();
2623 if (object->IsFixedArray()) {
2624 Page* page = Page::FromAddress(object->address());
2625
2626 Address allocation_top = object->address() + object->Size();
2627 PrintF("large page 0x%x:\n", page);
2628 PrintRSetRange(page->RSetStart(), page->RSetEnd(),
2629 reinterpret_cast<Object**>(object->address()),
2630 allocation_top);
2631 int extra_array_bytes = object->Size() - Page::kObjectAreaSize;
2632 int extra_rset_bits = RoundUp(extra_array_bytes / kPointerSize,
2633 kBitsPerInt);
2634 PrintF("------------------------------------------------------------"
2635 "-----------\n");
2636 PrintRSetRange(allocation_top,
2637 allocation_top + extra_rset_bits / kBitsPerByte,
2638 reinterpret_cast<Object**>(object->address()
2639 + Page::kObjectAreaSize),
2640 allocation_top);
2641 PrintF("\n");
2642 }
2643 }
2644}
2645#endif // DEBUG
2646
2647} } // namespace v8::internal