blob: ea40d52116fe1e301c307f2bf33df3970ddc6318 [file] [log] [blame]
ager@chromium.org9258b6b2008-09-11 09:11:10 +00001// Copyright 2006-2008 the V8 project authors. All rights reserved.
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002// Redistribution and use in source and binary forms, with or without
3// modification, are permitted provided that the following conditions are
4// met:
5//
6// * Redistributions of source code must retain the above copyright
7// notice, this list of conditions and the following disclaimer.
8// * Redistributions in binary form must reproduce the above
9// copyright notice, this list of conditions and the following
10// disclaimer in the documentation and/or other materials provided
11// with the distribution.
12// * Neither the name of Google Inc. nor the names of its
13// contributors may be used to endorse or promote products derived
14// from this software without specific prior written permission.
15//
16// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
28#include "v8.h"
29
30#include "macro-assembler.h"
31#include "mark-compact.h"
32#include "platform.h"
33
34namespace v8 { namespace internal {
35
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +000036// For contiguous spaces, top should be in the space (or at the end) and limit
37// should be the end of the space.
38#define ASSERT_SEMISPACE_ALLOCATION_INFO(info, space) \
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +000039 ASSERT((space).low() <= (info).top \
40 && (info).top <= (space).high() \
41 && (info).limit == (space).high())
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +000042
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +000043
44// ----------------------------------------------------------------------------
45// HeapObjectIterator
46
47HeapObjectIterator::HeapObjectIterator(PagedSpace* space) {
48 Initialize(space->bottom(), space->top(), NULL);
49}
50
51
52HeapObjectIterator::HeapObjectIterator(PagedSpace* space,
53 HeapObjectCallback size_func) {
54 Initialize(space->bottom(), space->top(), size_func);
55}
56
57
58HeapObjectIterator::HeapObjectIterator(PagedSpace* space, Address start) {
59 Initialize(start, space->top(), NULL);
60}
61
62
63HeapObjectIterator::HeapObjectIterator(PagedSpace* space, Address start,
64 HeapObjectCallback size_func) {
65 Initialize(start, space->top(), size_func);
66}
67
68
69void HeapObjectIterator::Initialize(Address cur, Address end,
70 HeapObjectCallback size_f) {
71 cur_addr_ = cur;
72 end_addr_ = end;
73 end_page_ = Page::FromAllocationTop(end);
74 size_func_ = size_f;
75 Page* p = Page::FromAllocationTop(cur_addr_);
76 cur_limit_ = (p == end_page_) ? end_addr_ : p->AllocationTop();
77
78#ifdef DEBUG
79 Verify();
80#endif
81}
82
83
84bool HeapObjectIterator::HasNextInNextPage() {
85 if (cur_addr_ == end_addr_) return false;
86
87 Page* cur_page = Page::FromAllocationTop(cur_addr_);
88 cur_page = cur_page->next_page();
89 ASSERT(cur_page->is_valid());
90
91 cur_addr_ = cur_page->ObjectAreaStart();
92 cur_limit_ = (cur_page == end_page_) ? end_addr_ : cur_page->AllocationTop();
93
94 ASSERT(cur_addr_ < cur_limit_);
95#ifdef DEBUG
96 Verify();
97#endif
98 return true;
99}
100
101
102#ifdef DEBUG
103void HeapObjectIterator::Verify() {
104 Page* p = Page::FromAllocationTop(cur_addr_);
105 ASSERT(p == Page::FromAllocationTop(cur_limit_));
106 ASSERT(p->Offset(cur_addr_) <= p->Offset(cur_limit_));
107}
108#endif
109
110
111// -----------------------------------------------------------------------------
112// PageIterator
113
114PageIterator::PageIterator(PagedSpace* space, Mode mode) {
115 cur_page_ = space->first_page_;
116 switch (mode) {
117 case PAGES_IN_USE:
118 stop_page_ = space->AllocationTopPage()->next_page();
119 break;
120 case PAGES_USED_BY_MC:
121 stop_page_ = space->MCRelocationTopPage()->next_page();
122 break;
123 case ALL_PAGES:
124 stop_page_ = Page::FromAddress(NULL);
125 break;
126 default:
127 UNREACHABLE();
128 }
129}
130
131
132// -----------------------------------------------------------------------------
133// Page
134
135#ifdef DEBUG
136Page::RSetState Page::rset_state_ = Page::IN_USE;
137#endif
138
139// -----------------------------------------------------------------------------
140// MemoryAllocator
141//
142int MemoryAllocator::capacity_ = 0;
143int MemoryAllocator::size_ = 0;
144
145VirtualMemory* MemoryAllocator::initial_chunk_ = NULL;
146
147// 270 is an estimate based on the static default heap size of a pair of 256K
148// semispaces and a 64M old generation.
149const int kEstimatedNumberOfChunks = 270;
150List<MemoryAllocator::ChunkInfo> MemoryAllocator::chunks_(
151 kEstimatedNumberOfChunks);
152List<int> MemoryAllocator::free_chunk_ids_(kEstimatedNumberOfChunks);
153int MemoryAllocator::max_nof_chunks_ = 0;
154int MemoryAllocator::top_ = 0;
155
156
157void MemoryAllocator::Push(int free_chunk_id) {
158 ASSERT(max_nof_chunks_ > 0);
159 ASSERT(top_ < max_nof_chunks_);
160 free_chunk_ids_[top_++] = free_chunk_id;
161}
162
163
164int MemoryAllocator::Pop() {
165 ASSERT(top_ > 0);
166 return free_chunk_ids_[--top_];
167}
168
169
170bool MemoryAllocator::Setup(int capacity) {
171 capacity_ = RoundUp(capacity, Page::kPageSize);
172
173 // Over-estimate the size of chunks_ array. It assumes the expansion of old
174 // space is always in the unit of a chunk (kChunkSize) except the last
175 // expansion.
176 //
177 // Due to alignment, allocated space might be one page less than required
178 // number (kPagesPerChunk) of pages for old spaces.
179 //
kasper.lund7276f142008-07-30 08:49:36 +0000180 // Reserve two chunk ids for semispaces, one for map space, one for old
181 // space, and one for code space.
182 max_nof_chunks_ = (capacity_ / (kChunkSize - Page::kPageSize)) + 5;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000183 if (max_nof_chunks_ > kMaxNofChunks) return false;
184
185 size_ = 0;
186 ChunkInfo info; // uninitialized element.
187 for (int i = max_nof_chunks_ - 1; i >= 0; i--) {
188 chunks_.Add(info);
189 free_chunk_ids_.Add(i);
190 }
191 top_ = max_nof_chunks_;
192 return true;
193}
194
195
196void MemoryAllocator::TearDown() {
197 for (int i = 0; i < max_nof_chunks_; i++) {
198 if (chunks_[i].address() != NULL) DeleteChunk(i);
199 }
200 chunks_.Clear();
201 free_chunk_ids_.Clear();
202
203 if (initial_chunk_ != NULL) {
204 LOG(DeleteEvent("InitialChunk", initial_chunk_->address()));
205 delete initial_chunk_;
206 initial_chunk_ = NULL;
207 }
208
209 ASSERT(top_ == max_nof_chunks_); // all chunks are free
210 top_ = 0;
211 capacity_ = 0;
212 size_ = 0;
213 max_nof_chunks_ = 0;
214}
215
216
217void* MemoryAllocator::AllocateRawMemory(const size_t requested,
kasper.lund7276f142008-07-30 08:49:36 +0000218 size_t* allocated,
ager@chromium.org9258b6b2008-09-11 09:11:10 +0000219 Executability executable) {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000220 if (size_ + static_cast<int>(requested) > capacity_) return NULL;
221
ager@chromium.org9258b6b2008-09-11 09:11:10 +0000222 void* mem = OS::Allocate(requested, allocated, executable == EXECUTABLE);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000223 int alloced = *allocated;
224 size_ += alloced;
225 Counters::memory_allocated.Increment(alloced);
226 return mem;
227}
228
229
230void MemoryAllocator::FreeRawMemory(void* mem, size_t length) {
231 OS::Free(mem, length);
232 Counters::memory_allocated.Decrement(length);
233 size_ -= length;
234 ASSERT(size_ >= 0);
235}
236
237
238void* MemoryAllocator::ReserveInitialChunk(const size_t requested) {
239 ASSERT(initial_chunk_ == NULL);
240
241 initial_chunk_ = new VirtualMemory(requested);
242 CHECK(initial_chunk_ != NULL);
243 if (!initial_chunk_->IsReserved()) {
244 delete initial_chunk_;
245 initial_chunk_ = NULL;
246 return NULL;
247 }
248
249 // We are sure that we have mapped a block of requested addresses.
250 ASSERT(initial_chunk_->size() == requested);
251 LOG(NewEvent("InitialChunk", initial_chunk_->address(), requested));
252 size_ += requested;
253 return initial_chunk_->address();
254}
255
256
257static int PagesInChunk(Address start, size_t size) {
258 // The first page starts on the first page-aligned address from start onward
259 // and the last page ends on the last page-aligned address before
260 // start+size. Page::kPageSize is a power of two so we can divide by
261 // shifting.
262 return (RoundDown(start + size, Page::kPageSize)
263 - RoundUp(start, Page::kPageSize)) >> Page::kPageSizeBits;
264}
265
266
267Page* MemoryAllocator::AllocatePages(int requested_pages, int* allocated_pages,
268 PagedSpace* owner) {
269 if (requested_pages <= 0) return Page::FromAddress(NULL);
270 size_t chunk_size = requested_pages * Page::kPageSize;
271
272 // There is not enough space to guarantee the desired number pages can be
273 // allocated.
274 if (size_ + static_cast<int>(chunk_size) > capacity_) {
275 // Request as many pages as we can.
276 chunk_size = capacity_ - size_;
277 requested_pages = chunk_size >> Page::kPageSizeBits;
278
279 if (requested_pages <= 0) return Page::FromAddress(NULL);
280 }
kasper.lund7276f142008-07-30 08:49:36 +0000281 void* chunk = AllocateRawMemory(chunk_size, &chunk_size, owner->executable());
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000282 if (chunk == NULL) return Page::FromAddress(NULL);
283 LOG(NewEvent("PagedChunk", chunk, chunk_size));
284
285 *allocated_pages = PagesInChunk(static_cast<Address>(chunk), chunk_size);
286 if (*allocated_pages == 0) {
287 FreeRawMemory(chunk, chunk_size);
288 LOG(DeleteEvent("PagedChunk", chunk));
289 return Page::FromAddress(NULL);
290 }
291
292 int chunk_id = Pop();
293 chunks_[chunk_id].init(static_cast<Address>(chunk), chunk_size, owner);
294
295 return InitializePagesInChunk(chunk_id, *allocated_pages, owner);
296}
297
298
299Page* MemoryAllocator::CommitPages(Address start, size_t size,
300 PagedSpace* owner, int* num_pages) {
301 ASSERT(start != NULL);
302 *num_pages = PagesInChunk(start, size);
303 ASSERT(*num_pages > 0);
304 ASSERT(initial_chunk_ != NULL);
kasperl@chromium.orgf5aa8372009-03-24 14:47:14 +0000305 ASSERT(InInitialChunk(start));
306 ASSERT(InInitialChunk(start + size - 1));
ager@chromium.org9258b6b2008-09-11 09:11:10 +0000307 if (!initial_chunk_->Commit(start, size, owner->executable() == EXECUTABLE)) {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000308 return Page::FromAddress(NULL);
309 }
310 Counters::memory_allocated.Increment(size);
311
312 // So long as we correctly overestimated the number of chunks we should not
313 // run out of chunk ids.
314 CHECK(!OutOfChunkIds());
315 int chunk_id = Pop();
316 chunks_[chunk_id].init(start, size, owner);
317 return InitializePagesInChunk(chunk_id, *num_pages, owner);
318}
319
320
kasper.lund7276f142008-07-30 08:49:36 +0000321bool MemoryAllocator::CommitBlock(Address start,
322 size_t size,
ager@chromium.org9258b6b2008-09-11 09:11:10 +0000323 Executability executable) {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000324 ASSERT(start != NULL);
325 ASSERT(size > 0);
326 ASSERT(initial_chunk_ != NULL);
kasperl@chromium.orgf5aa8372009-03-24 14:47:14 +0000327 ASSERT(InInitialChunk(start));
328 ASSERT(InInitialChunk(start + size - 1));
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000329
kasper.lund7276f142008-07-30 08:49:36 +0000330 if (!initial_chunk_->Commit(start, size, executable)) return false;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000331 Counters::memory_allocated.Increment(size);
332 return true;
333}
334
335
336Page* MemoryAllocator::InitializePagesInChunk(int chunk_id, int pages_in_chunk,
337 PagedSpace* owner) {
338 ASSERT(IsValidChunk(chunk_id));
339 ASSERT(pages_in_chunk > 0);
340
341 Address chunk_start = chunks_[chunk_id].address();
342
343 Address low = RoundUp(chunk_start, Page::kPageSize);
344
345#ifdef DEBUG
346 size_t chunk_size = chunks_[chunk_id].size();
347 Address high = RoundDown(chunk_start + chunk_size, Page::kPageSize);
348 ASSERT(pages_in_chunk <=
349 ((OffsetFrom(high) - OffsetFrom(low)) / Page::kPageSize));
350#endif
351
352 Address page_addr = low;
353 for (int i = 0; i < pages_in_chunk; i++) {
354 Page* p = Page::FromAddress(page_addr);
355 p->opaque_header = OffsetFrom(page_addr + Page::kPageSize) | chunk_id;
356 p->is_normal_page = 1;
357 page_addr += Page::kPageSize;
358 }
359
360 // Set the next page of the last page to 0.
361 Page* last_page = Page::FromAddress(page_addr - Page::kPageSize);
362 last_page->opaque_header = OffsetFrom(0) | chunk_id;
363
364 return Page::FromAddress(low);
365}
366
367
368Page* MemoryAllocator::FreePages(Page* p) {
369 if (!p->is_valid()) return p;
370
371 // Find the first page in the same chunk as 'p'
372 Page* first_page = FindFirstPageInSameChunk(p);
373 Page* page_to_return = Page::FromAddress(NULL);
374
375 if (p != first_page) {
376 // Find the last page in the same chunk as 'prev'.
377 Page* last_page = FindLastPageInSameChunk(p);
378 first_page = GetNextPage(last_page); // first page in next chunk
379
380 // set the next_page of last_page to NULL
381 SetNextPage(last_page, Page::FromAddress(NULL));
382 page_to_return = p; // return 'p' when exiting
383 }
384
385 while (first_page->is_valid()) {
386 int chunk_id = GetChunkId(first_page);
387 ASSERT(IsValidChunk(chunk_id));
388
389 // Find the first page of the next chunk before deleting this chunk.
390 first_page = GetNextPage(FindLastPageInSameChunk(first_page));
391
392 // Free the current chunk.
393 DeleteChunk(chunk_id);
394 }
395
396 return page_to_return;
397}
398
399
400void MemoryAllocator::DeleteChunk(int chunk_id) {
401 ASSERT(IsValidChunk(chunk_id));
402
403 ChunkInfo& c = chunks_[chunk_id];
404
405 // We cannot free a chunk contained in the initial chunk because it was not
406 // allocated with AllocateRawMemory. Instead we uncommit the virtual
407 // memory.
kasperl@chromium.orgf5aa8372009-03-24 14:47:14 +0000408 if (InInitialChunk(c.address())) {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000409 // TODO(1240712): VirtualMemory::Uncommit has a return value which
410 // is ignored here.
411 initial_chunk_->Uncommit(c.address(), c.size());
412 Counters::memory_allocated.Decrement(c.size());
413 } else {
414 LOG(DeleteEvent("PagedChunk", c.address()));
415 FreeRawMemory(c.address(), c.size());
416 }
417 c.init(NULL, 0, NULL);
418 Push(chunk_id);
419}
420
421
422Page* MemoryAllocator::FindFirstPageInSameChunk(Page* p) {
423 int chunk_id = GetChunkId(p);
424 ASSERT(IsValidChunk(chunk_id));
425
426 Address low = RoundUp(chunks_[chunk_id].address(), Page::kPageSize);
427 return Page::FromAddress(low);
428}
429
430
431Page* MemoryAllocator::FindLastPageInSameChunk(Page* p) {
432 int chunk_id = GetChunkId(p);
433 ASSERT(IsValidChunk(chunk_id));
434
435 Address chunk_start = chunks_[chunk_id].address();
436 size_t chunk_size = chunks_[chunk_id].size();
437
438 Address high = RoundDown(chunk_start + chunk_size, Page::kPageSize);
439 ASSERT(chunk_start <= p->address() && p->address() < high);
440
441 return Page::FromAddress(high - Page::kPageSize);
442}
443
444
445#ifdef DEBUG
446void MemoryAllocator::ReportStatistics() {
447 float pct = static_cast<float>(capacity_ - size_) / capacity_;
448 PrintF(" capacity: %d, used: %d, available: %%%d\n\n",
449 capacity_, size_, static_cast<int>(pct*100));
450}
451#endif
452
453
454// -----------------------------------------------------------------------------
455// PagedSpace implementation
456
ager@chromium.org9258b6b2008-09-11 09:11:10 +0000457PagedSpace::PagedSpace(int max_capacity,
458 AllocationSpace id,
459 Executability executable)
kasper.lund7276f142008-07-30 08:49:36 +0000460 : Space(id, executable) {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000461 max_capacity_ = (RoundDown(max_capacity, Page::kPageSize) / Page::kPageSize)
462 * Page::kObjectAreaSize;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000463 accounting_stats_.Clear();
464
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000465 allocation_info_.top = NULL;
466 allocation_info_.limit = NULL;
467
468 mc_forwarding_info_.top = NULL;
469 mc_forwarding_info_.limit = NULL;
470}
471
472
473bool PagedSpace::Setup(Address start, size_t size) {
474 if (HasBeenSetup()) return false;
475
476 int num_pages = 0;
477 // Try to use the virtual memory range passed to us. If it is too small to
478 // contain at least one page, ignore it and allocate instead.
ager@chromium.org9258b6b2008-09-11 09:11:10 +0000479 int pages_in_chunk = PagesInChunk(start, size);
480 if (pages_in_chunk > 0) {
481 first_page_ = MemoryAllocator::CommitPages(RoundUp(start, Page::kPageSize),
482 Page::kPageSize * pages_in_chunk,
483 this, &num_pages);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000484 } else {
485 int requested_pages = Min(MemoryAllocator::kPagesPerChunk,
486 max_capacity_ / Page::kObjectAreaSize);
487 first_page_ =
488 MemoryAllocator::AllocatePages(requested_pages, &num_pages, this);
489 if (!first_page_->is_valid()) return false;
490 }
491
492 // We are sure that the first page is valid and that we have at least one
493 // page.
494 ASSERT(first_page_->is_valid());
495 ASSERT(num_pages > 0);
496 accounting_stats_.ExpandSpace(num_pages * Page::kObjectAreaSize);
497 ASSERT(Capacity() <= max_capacity_);
498
499 for (Page* p = first_page_; p->is_valid(); p = p->next_page()) {
500 p->ClearRSet();
501 }
502
503 // Use first_page_ for allocation.
504 SetAllocationInfo(&allocation_info_, first_page_);
505
506 return true;
507}
508
509
510bool PagedSpace::HasBeenSetup() {
511 return (Capacity() > 0);
512}
513
514
515void PagedSpace::TearDown() {
516 first_page_ = MemoryAllocator::FreePages(first_page_);
517 ASSERT(!first_page_->is_valid());
518
519 accounting_stats_.Clear();
520}
521
522
kasperl@chromium.orgf5aa8372009-03-24 14:47:14 +0000523#ifdef ENABLE_HEAP_PROTECTION
524
525void PagedSpace::Protect() {
526 Page* page = first_page_;
527 while (page->is_valid()) {
528 MemoryAllocator::ProtectChunkFromPage(page);
529 page = MemoryAllocator::FindLastPageInSameChunk(page)->next_page();
530 }
531}
532
533
534void PagedSpace::Unprotect() {
535 Page* page = first_page_;
536 while (page->is_valid()) {
537 MemoryAllocator::UnprotectChunkFromPage(page);
538 page = MemoryAllocator::FindLastPageInSameChunk(page)->next_page();
539 }
540}
541
542#endif
543
544
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000545void PagedSpace::ClearRSet() {
546 PageIterator it(this, PageIterator::ALL_PAGES);
547 while (it.has_next()) {
548 it.next()->ClearRSet();
549 }
550}
551
552
553Object* PagedSpace::FindObject(Address addr) {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000554 // Note: this function can only be called before or after mark-compact GC
555 // because it accesses map pointers.
556 ASSERT(!MarkCompactCollector::in_use());
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000557
558 if (!Contains(addr)) return Failure::Exception();
559
560 Page* p = Page::FromAddress(addr);
kasper.lund7276f142008-07-30 08:49:36 +0000561 ASSERT(IsUsed(p));
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000562 Address cur = p->ObjectAreaStart();
563 Address end = p->AllocationTop();
564 while (cur < end) {
565 HeapObject* obj = HeapObject::FromAddress(cur);
566 Address next = cur + obj->Size();
567 if ((cur <= addr) && (addr < next)) return obj;
568 cur = next;
569 }
570
kasper.lund7276f142008-07-30 08:49:36 +0000571 UNREACHABLE();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000572 return Failure::Exception();
573}
574
575
kasper.lund7276f142008-07-30 08:49:36 +0000576bool PagedSpace::IsUsed(Page* page) {
577 PageIterator it(this, PageIterator::PAGES_IN_USE);
578 while (it.has_next()) {
579 if (page == it.next()) return true;
580 }
581 return false;
582}
583
584
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000585void PagedSpace::SetAllocationInfo(AllocationInfo* alloc_info, Page* p) {
586 alloc_info->top = p->ObjectAreaStart();
587 alloc_info->limit = p->ObjectAreaEnd();
kasper.lund7276f142008-07-30 08:49:36 +0000588 ASSERT(alloc_info->VerifyPagedAllocation());
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000589}
590
591
592void PagedSpace::MCResetRelocationInfo() {
593 // Set page indexes.
594 int i = 0;
595 PageIterator it(this, PageIterator::ALL_PAGES);
596 while (it.has_next()) {
597 Page* p = it.next();
598 p->mc_page_index = i++;
599 }
600
601 // Set mc_forwarding_info_ to the first page in the space.
602 SetAllocationInfo(&mc_forwarding_info_, first_page_);
603 // All the bytes in the space are 'available'. We will rediscover
604 // allocated and wasted bytes during GC.
605 accounting_stats_.Reset();
606}
607
608
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000609int PagedSpace::MCSpaceOffsetForAddress(Address addr) {
610#ifdef DEBUG
611 // The Contains function considers the address at the beginning of a
612 // page in the page, MCSpaceOffsetForAddress considers it is in the
613 // previous page.
614 if (Page::IsAlignedToPageSize(addr)) {
615 ASSERT(Contains(addr - kPointerSize));
616 } else {
617 ASSERT(Contains(addr));
618 }
619#endif
620
621 // If addr is at the end of a page, it belongs to previous page
622 Page* p = Page::IsAlignedToPageSize(addr)
623 ? Page::FromAllocationTop(addr)
624 : Page::FromAddress(addr);
625 int index = p->mc_page_index;
626 return (index * Page::kPageSize) + p->Offset(addr);
627}
628
629
kasper.lund7276f142008-07-30 08:49:36 +0000630// Slow case for reallocating and promoting objects during a compacting
631// collection. This function is not space-specific.
632HeapObject* PagedSpace::SlowMCAllocateRaw(int size_in_bytes) {
633 Page* current_page = TopPageOf(mc_forwarding_info_);
634 if (!current_page->next_page()->is_valid()) {
635 if (!Expand(current_page)) {
636 return NULL;
637 }
638 }
639
640 // There are surely more pages in the space now.
641 ASSERT(current_page->next_page()->is_valid());
642 // We do not add the top of page block for current page to the space's
643 // free list---the block may contain live objects so we cannot write
644 // bookkeeping information to it. Instead, we will recover top of page
645 // blocks when we move objects to their new locations.
646 //
647 // We do however write the allocation pointer to the page. The encoding
648 // of forwarding addresses is as an offset in terms of live bytes, so we
649 // need quick access to the allocation top of each page to decode
650 // forwarding addresses.
651 current_page->mc_relocation_top = mc_forwarding_info_.top;
652 SetAllocationInfo(&mc_forwarding_info_, current_page->next_page());
653 return AllocateLinearly(&mc_forwarding_info_, size_in_bytes);
654}
655
656
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000657bool PagedSpace::Expand(Page* last_page) {
658 ASSERT(max_capacity_ % Page::kObjectAreaSize == 0);
659 ASSERT(Capacity() % Page::kObjectAreaSize == 0);
660
661 if (Capacity() == max_capacity_) return false;
662
663 ASSERT(Capacity() < max_capacity_);
664 // Last page must be valid and its next page is invalid.
665 ASSERT(last_page->is_valid() && !last_page->next_page()->is_valid());
666
667 int available_pages = (max_capacity_ - Capacity()) / Page::kObjectAreaSize;
668 if (available_pages <= 0) return false;
669
670 int desired_pages = Min(available_pages, MemoryAllocator::kPagesPerChunk);
671 Page* p = MemoryAllocator::AllocatePages(desired_pages, &desired_pages, this);
672 if (!p->is_valid()) return false;
673
674 accounting_stats_.ExpandSpace(desired_pages * Page::kObjectAreaSize);
675 ASSERT(Capacity() <= max_capacity_);
676
677 MemoryAllocator::SetNextPage(last_page, p);
678
679 // Clear remembered set of new pages.
680 while (p->is_valid()) {
681 p->ClearRSet();
682 p = p->next_page();
683 }
684
685 return true;
686}
687
688
689#ifdef DEBUG
690int PagedSpace::CountTotalPages() {
691 int count = 0;
692 for (Page* p = first_page_; p->is_valid(); p = p->next_page()) {
693 count++;
694 }
695 return count;
696}
697#endif
698
699
700void PagedSpace::Shrink() {
701 // Release half of free pages.
702 Page* top_page = AllocationTopPage();
703 ASSERT(top_page->is_valid());
704
705 // Loop over the pages from the top page to the end of the space to count
706 // the number of pages to keep and find the last page to keep.
707 int free_pages = 0;
708 int pages_to_keep = 0; // Of the free pages.
709 Page* last_page_to_keep = top_page;
710 Page* current_page = top_page->next_page();
711 // Loop over the pages to the end of the space.
712 while (current_page->is_valid()) {
kasper.lund7276f142008-07-30 08:49:36 +0000713 // Advance last_page_to_keep every other step to end up at the midpoint.
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000714 if ((free_pages & 0x1) == 1) {
715 pages_to_keep++;
716 last_page_to_keep = last_page_to_keep->next_page();
717 }
718 free_pages++;
719 current_page = current_page->next_page();
720 }
721
722 // Free pages after last_page_to_keep, and adjust the next_page link.
723 Page* p = MemoryAllocator::FreePages(last_page_to_keep->next_page());
724 MemoryAllocator::SetNextPage(last_page_to_keep, p);
725
726 // Since pages are only freed in whole chunks, we may have kept more than
727 // pages_to_keep.
728 while (p->is_valid()) {
729 pages_to_keep++;
730 p = p->next_page();
731 }
732
733 // The difference between free_pages and pages_to_keep is the number of
734 // pages actually freed.
735 ASSERT(pages_to_keep <= free_pages);
736 int bytes_freed = (free_pages - pages_to_keep) * Page::kObjectAreaSize;
737 accounting_stats_.ShrinkSpace(bytes_freed);
738
739 ASSERT(Capacity() == CountTotalPages() * Page::kObjectAreaSize);
740}
741
742
743bool PagedSpace::EnsureCapacity(int capacity) {
744 if (Capacity() >= capacity) return true;
745
746 // Start from the allocation top and loop to the last page in the space.
747 Page* last_page = AllocationTopPage();
748 Page* next_page = last_page->next_page();
749 while (next_page->is_valid()) {
750 last_page = MemoryAllocator::FindLastPageInSameChunk(next_page);
751 next_page = last_page->next_page();
752 }
753
754 // Expand the space until it has the required capacity or expansion fails.
755 do {
756 if (!Expand(last_page)) return false;
757 ASSERT(last_page->next_page()->is_valid());
758 last_page =
759 MemoryAllocator::FindLastPageInSameChunk(last_page->next_page());
760 } while (Capacity() < capacity);
761
762 return true;
763}
764
765
766#ifdef DEBUG
767void PagedSpace::Print() { }
768#endif
769
770
771// -----------------------------------------------------------------------------
772// NewSpace implementation
773
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +0000774
775bool NewSpace::Setup(Address start, int size) {
776 // Setup new space based on the preallocated memory block defined by
777 // start and size. The provided space is divided into two semi-spaces.
778 // To support fast containment testing in the new space, the size of
779 // this chunk must be a power of two and it must be aligned to its size.
780 int initial_semispace_capacity = Heap::InitialSemiSpaceSize();
781 int maximum_semispace_capacity = Heap::SemiSpaceSize();
782
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000783 ASSERT(initial_semispace_capacity <= maximum_semispace_capacity);
784 ASSERT(IsPowerOf2(maximum_semispace_capacity));
785 maximum_capacity_ = maximum_semispace_capacity;
786 capacity_ = initial_semispace_capacity;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000787
788 // Allocate and setup the histogram arrays if necessary.
789#if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
790 allocated_histogram_ = NewArray<HistogramInfo>(LAST_TYPE + 1);
791 promoted_histogram_ = NewArray<HistogramInfo>(LAST_TYPE + 1);
792
793#define SET_NAME(name) allocated_histogram_[name].set_name(#name); \
794 promoted_histogram_[name].set_name(#name);
795 INSTANCE_TYPE_LIST(SET_NAME)
796#undef SET_NAME
797#endif
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000798
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000799 ASSERT(size == 2 * maximum_capacity_);
800 ASSERT(IsAddressAligned(start, size, 0));
801
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +0000802 if (!to_space_.Setup(start, capacity_, maximum_capacity_)) {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000803 return false;
804 }
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +0000805 if (!from_space_.Setup(start + maximum_capacity_,
806 capacity_,
807 maximum_capacity_)) {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000808 return false;
809 }
810
811 start_ = start;
812 address_mask_ = ~(size - 1);
813 object_mask_ = address_mask_ | kHeapObjectTag;
814 object_expected_ = reinterpret_cast<uint32_t>(start) | kHeapObjectTag;
815
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +0000816 allocation_info_.top = to_space_.low();
817 allocation_info_.limit = to_space_.high();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000818 mc_forwarding_info_.top = NULL;
819 mc_forwarding_info_.limit = NULL;
820
821 ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
822 return true;
823}
824
825
826void NewSpace::TearDown() {
827#if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
828 if (allocated_histogram_) {
829 DeleteArray(allocated_histogram_);
830 allocated_histogram_ = NULL;
831 }
832 if (promoted_histogram_) {
833 DeleteArray(promoted_histogram_);
834 promoted_histogram_ = NULL;
835 }
836#endif
837
838 start_ = NULL;
839 capacity_ = 0;
840 allocation_info_.top = NULL;
841 allocation_info_.limit = NULL;
842 mc_forwarding_info_.top = NULL;
843 mc_forwarding_info_.limit = NULL;
844
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +0000845 to_space_.TearDown();
846 from_space_.TearDown();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000847}
848
849
kasperl@chromium.orgf5aa8372009-03-24 14:47:14 +0000850#ifdef ENABLE_HEAP_PROTECTION
851
852void NewSpace::Protect() {
853 MemoryAllocator::Protect(ToSpaceLow(), Capacity());
854 MemoryAllocator::Protect(FromSpaceLow(), Capacity());
855}
856
857
858void NewSpace::Unprotect() {
859 MemoryAllocator::Unprotect(ToSpaceLow(), Capacity(),
860 to_space_.executable());
861 MemoryAllocator::Unprotect(FromSpaceLow(), Capacity(),
862 from_space_.executable());
863}
864
865#endif
866
867
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000868void NewSpace::Flip() {
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +0000869 SemiSpace tmp = from_space_;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000870 from_space_ = to_space_;
871 to_space_ = tmp;
872}
873
874
875bool NewSpace::Double() {
876 ASSERT(capacity_ <= maximum_capacity_ / 2);
877 // TODO(1240712): Failure to double the from space can result in
878 // semispaces of different sizes. In the event of that failure, the
879 // to space doubling should be rolled back before returning false.
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +0000880 if (!to_space_.Double() || !from_space_.Double()) return false;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000881 capacity_ *= 2;
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +0000882 allocation_info_.limit = to_space_.high();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000883 ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
884 return true;
885}
886
887
888void NewSpace::ResetAllocationInfo() {
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +0000889 allocation_info_.top = to_space_.low();
890 allocation_info_.limit = to_space_.high();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000891 ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
892}
893
894
895void NewSpace::MCResetRelocationInfo() {
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +0000896 mc_forwarding_info_.top = from_space_.low();
897 mc_forwarding_info_.limit = from_space_.high();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000898 ASSERT_SEMISPACE_ALLOCATION_INFO(mc_forwarding_info_, from_space_);
899}
900
901
902void NewSpace::MCCommitRelocationInfo() {
903 // Assumes that the spaces have been flipped so that mc_forwarding_info_ is
904 // valid allocation info for the to space.
905 allocation_info_.top = mc_forwarding_info_.top;
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +0000906 allocation_info_.limit = to_space_.high();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000907 ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
908}
909
910
911#ifdef DEBUG
912// We do not use the SemispaceIterator because verification doesn't assume
913// that it works (it depends on the invariants we are checking).
914void NewSpace::Verify() {
915 // The allocation pointer should be in the space or at the very end.
916 ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
917
918 // There should be objects packed in from the low address up to the
919 // allocation pointer.
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +0000920 Address current = to_space_.low();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000921 while (current < top()) {
922 HeapObject* object = HeapObject::FromAddress(current);
923
924 // The first word should be a map, and we expect all map pointers to
925 // be in map space.
926 Map* map = object->map();
927 ASSERT(map->IsMap());
928 ASSERT(Heap::map_space()->Contains(map));
929
930 // The object should not be code or a map.
931 ASSERT(!object->IsMap());
932 ASSERT(!object->IsCode());
933
934 // The object itself should look OK.
935 object->Verify();
936
937 // All the interior pointers should be contained in the heap.
938 VerifyPointersVisitor visitor;
939 int size = object->Size();
940 object->IterateBody(map->instance_type(), size, &visitor);
941
942 current += size;
943 }
944
945 // The allocation pointer should not be in the middle of an object.
946 ASSERT(current == top());
947}
948#endif
949
950
951// -----------------------------------------------------------------------------
952// SemiSpace implementation
953
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +0000954bool SemiSpace::Setup(Address start,
955 int initial_capacity,
956 int maximum_capacity) {
957 // Creates a space in the young generation. The constructor does not
958 // allocate memory from the OS. A SemiSpace is given a contiguous chunk of
959 // memory of size 'capacity' when set up, and does not grow or shrink
960 // otherwise. In the mark-compact collector, the memory region of the from
961 // space is used as the marking stack. It requires contiguous memory
962 // addresses.
963 capacity_ = initial_capacity;
964 maximum_capacity_ = maximum_capacity;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000965
kasper.lund7276f142008-07-30 08:49:36 +0000966 if (!MemoryAllocator::CommitBlock(start, capacity_, executable())) {
967 return false;
968 }
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000969
970 start_ = start;
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +0000971 address_mask_ = ~(maximum_capacity - 1);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000972 object_mask_ = address_mask_ | kHeapObjectTag;
973 object_expected_ = reinterpret_cast<uint32_t>(start) | kHeapObjectTag;
974
975 age_mark_ = start_;
976 return true;
977}
978
979
980void SemiSpace::TearDown() {
981 start_ = NULL;
982 capacity_ = 0;
983}
984
985
986bool SemiSpace::Double() {
kasper.lund7276f142008-07-30 08:49:36 +0000987 if (!MemoryAllocator::CommitBlock(high(), capacity_, executable())) {
988 return false;
989 }
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000990 capacity_ *= 2;
991 return true;
992}
993
994
995#ifdef DEBUG
996void SemiSpace::Print() { }
ager@chromium.org9258b6b2008-09-11 09:11:10 +0000997
998
999void SemiSpace::Verify() { }
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001000#endif
1001
1002
1003// -----------------------------------------------------------------------------
1004// SemiSpaceIterator implementation.
1005SemiSpaceIterator::SemiSpaceIterator(NewSpace* space) {
1006 Initialize(space, space->bottom(), space->top(), NULL);
1007}
1008
1009
1010SemiSpaceIterator::SemiSpaceIterator(NewSpace* space,
1011 HeapObjectCallback size_func) {
1012 Initialize(space, space->bottom(), space->top(), size_func);
1013}
1014
1015
1016SemiSpaceIterator::SemiSpaceIterator(NewSpace* space, Address start) {
1017 Initialize(space, start, space->top(), NULL);
1018}
1019
1020
1021void SemiSpaceIterator::Initialize(NewSpace* space, Address start,
1022 Address end,
1023 HeapObjectCallback size_func) {
1024 ASSERT(space->ToSpaceContains(start));
1025 ASSERT(space->ToSpaceLow() <= end
1026 && end <= space->ToSpaceHigh());
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +00001027 space_ = &space->to_space_;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001028 current_ = start;
1029 limit_ = end;
1030 size_func_ = size_func;
1031}
1032
1033
1034#ifdef DEBUG
1035// A static array of histogram info for each type.
1036static HistogramInfo heap_histograms[LAST_TYPE+1];
1037static JSObject::SpillInformation js_spill_information;
1038
1039// heap_histograms is shared, always clear it before using it.
1040static void ClearHistograms() {
1041 // We reset the name each time, though it hasn't changed.
1042#define DEF_TYPE_NAME(name) heap_histograms[name].set_name(#name);
1043 INSTANCE_TYPE_LIST(DEF_TYPE_NAME)
1044#undef DEF_TYPE_NAME
1045
1046#define CLEAR_HISTOGRAM(name) heap_histograms[name].clear();
1047 INSTANCE_TYPE_LIST(CLEAR_HISTOGRAM)
1048#undef CLEAR_HISTOGRAM
1049
1050 js_spill_information.Clear();
1051}
1052
1053
1054static int code_kind_statistics[Code::NUMBER_OF_KINDS];
1055
1056
1057static void ClearCodeKindStatistics() {
1058 for (int i = 0; i < Code::NUMBER_OF_KINDS; i++) {
1059 code_kind_statistics[i] = 0;
1060 }
1061}
1062
1063
1064static void ReportCodeKindStatistics() {
1065 const char* table[Code::NUMBER_OF_KINDS];
1066
1067#define CASE(name) \
1068 case Code::name: table[Code::name] = #name; \
1069 break
1070
1071 for (int i = 0; i < Code::NUMBER_OF_KINDS; i++) {
1072 switch (static_cast<Code::Kind>(i)) {
1073 CASE(FUNCTION);
1074 CASE(STUB);
1075 CASE(BUILTIN);
1076 CASE(LOAD_IC);
1077 CASE(KEYED_LOAD_IC);
1078 CASE(STORE_IC);
1079 CASE(KEYED_STORE_IC);
1080 CASE(CALL_IC);
1081 }
1082 }
1083
1084#undef CASE
1085
1086 PrintF("\n Code kind histograms: \n");
1087 for (int i = 0; i < Code::NUMBER_OF_KINDS; i++) {
1088 if (code_kind_statistics[i] > 0) {
1089 PrintF(" %-20s: %10d bytes\n", table[i], code_kind_statistics[i]);
1090 }
1091 }
1092 PrintF("\n");
1093}
1094
1095
1096static int CollectHistogramInfo(HeapObject* obj) {
1097 InstanceType type = obj->map()->instance_type();
1098 ASSERT(0 <= type && type <= LAST_TYPE);
1099 ASSERT(heap_histograms[type].name() != NULL);
1100 heap_histograms[type].increment_number(1);
1101 heap_histograms[type].increment_bytes(obj->Size());
1102
1103 if (FLAG_collect_heap_spill_statistics && obj->IsJSObject()) {
1104 JSObject::cast(obj)->IncrementSpillStatistics(&js_spill_information);
1105 }
1106
1107 return obj->Size();
1108}
1109
1110
1111static void ReportHistogram(bool print_spill) {
1112 PrintF("\n Object Histogram:\n");
1113 for (int i = 0; i <= LAST_TYPE; i++) {
1114 if (heap_histograms[i].number() > 0) {
1115 PrintF(" %-33s%10d (%10d bytes)\n",
1116 heap_histograms[i].name(),
1117 heap_histograms[i].number(),
1118 heap_histograms[i].bytes());
1119 }
1120 }
1121 PrintF("\n");
1122
1123 // Summarize string types.
1124 int string_number = 0;
1125 int string_bytes = 0;
1126#define INCREMENT(type, size, name) \
1127 string_number += heap_histograms[type].number(); \
1128 string_bytes += heap_histograms[type].bytes();
1129 STRING_TYPE_LIST(INCREMENT)
1130#undef INCREMENT
1131 if (string_number > 0) {
1132 PrintF(" %-33s%10d (%10d bytes)\n\n", "STRING_TYPE", string_number,
1133 string_bytes);
1134 }
1135
1136 if (FLAG_collect_heap_spill_statistics && print_spill) {
1137 js_spill_information.Print();
1138 }
1139}
1140#endif // DEBUG
1141
1142
1143// Support for statistics gathering for --heap-stats and --log-gc.
1144#if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
1145void NewSpace::ClearHistograms() {
1146 for (int i = 0; i <= LAST_TYPE; i++) {
1147 allocated_histogram_[i].clear();
1148 promoted_histogram_[i].clear();
1149 }
1150}
1151
1152// Because the copying collector does not touch garbage objects, we iterate
1153// the new space before a collection to get a histogram of allocated objects.
1154// This only happens (1) when compiled with DEBUG and the --heap-stats flag is
1155// set, or when compiled with ENABLE_LOGGING_AND_PROFILING and the --log-gc
1156// flag is set.
1157void NewSpace::CollectStatistics() {
1158 ClearHistograms();
1159 SemiSpaceIterator it(this);
1160 while (it.has_next()) RecordAllocation(it.next());
1161}
1162
1163
1164#ifdef ENABLE_LOGGING_AND_PROFILING
1165static void DoReportStatistics(HistogramInfo* info, const char* description) {
1166 LOG(HeapSampleBeginEvent("NewSpace", description));
1167 // Lump all the string types together.
1168 int string_number = 0;
1169 int string_bytes = 0;
1170#define INCREMENT(type, size, name) \
1171 string_number += info[type].number(); \
1172 string_bytes += info[type].bytes();
1173 STRING_TYPE_LIST(INCREMENT)
1174#undef INCREMENT
1175 if (string_number > 0) {
1176 LOG(HeapSampleItemEvent("STRING_TYPE", string_number, string_bytes));
1177 }
1178
1179 // Then do the other types.
1180 for (int i = FIRST_NONSTRING_TYPE; i <= LAST_TYPE; ++i) {
1181 if (info[i].number() > 0) {
1182 LOG(HeapSampleItemEvent(info[i].name(), info[i].number(),
1183 info[i].bytes()));
1184 }
1185 }
1186 LOG(HeapSampleEndEvent("NewSpace", description));
1187}
1188#endif // ENABLE_LOGGING_AND_PROFILING
1189
1190
1191void NewSpace::ReportStatistics() {
1192#ifdef DEBUG
1193 if (FLAG_heap_stats) {
1194 float pct = static_cast<float>(Available()) / Capacity();
1195 PrintF(" capacity: %d, available: %d, %%%d\n",
1196 Capacity(), Available(), static_cast<int>(pct*100));
1197 PrintF("\n Object Histogram:\n");
1198 for (int i = 0; i <= LAST_TYPE; i++) {
1199 if (allocated_histogram_[i].number() > 0) {
1200 PrintF(" %-33s%10d (%10d bytes)\n",
1201 allocated_histogram_[i].name(),
1202 allocated_histogram_[i].number(),
1203 allocated_histogram_[i].bytes());
1204 }
1205 }
1206 PrintF("\n");
1207 }
1208#endif // DEBUG
1209
1210#ifdef ENABLE_LOGGING_AND_PROFILING
1211 if (FLAG_log_gc) {
1212 DoReportStatistics(allocated_histogram_, "allocated");
1213 DoReportStatistics(promoted_histogram_, "promoted");
1214 }
1215#endif // ENABLE_LOGGING_AND_PROFILING
1216}
1217
1218
1219void NewSpace::RecordAllocation(HeapObject* obj) {
1220 InstanceType type = obj->map()->instance_type();
1221 ASSERT(0 <= type && type <= LAST_TYPE);
1222 allocated_histogram_[type].increment_number(1);
1223 allocated_histogram_[type].increment_bytes(obj->Size());
1224}
1225
1226
1227void NewSpace::RecordPromotion(HeapObject* obj) {
1228 InstanceType type = obj->map()->instance_type();
1229 ASSERT(0 <= type && type <= LAST_TYPE);
1230 promoted_histogram_[type].increment_number(1);
1231 promoted_histogram_[type].increment_bytes(obj->Size());
1232}
1233#endif // defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
1234
1235
1236// -----------------------------------------------------------------------------
1237// Free lists for old object spaces implementation
1238
1239void FreeListNode::set_size(int size_in_bytes) {
1240 ASSERT(size_in_bytes > 0);
1241 ASSERT(IsAligned(size_in_bytes, kPointerSize));
1242
1243 // We write a map and possibly size information to the block. If the block
1244 // is big enough to be a ByteArray with at least one extra word (the next
1245 // pointer), we set its map to be the byte array map and its size to an
1246 // appropriate array length for the desired size from HeapObject::Size().
1247 // If the block is too small (eg, one or two words), to hold both a size
1248 // field and a next pointer, we give it a filler map that gives it the
1249 // correct size.
1250 if (size_in_bytes > Array::kHeaderSize) {
1251 set_map(Heap::byte_array_map());
1252 ByteArray::cast(this)->set_length(ByteArray::LengthFor(size_in_bytes));
1253 } else if (size_in_bytes == kPointerSize) {
1254 set_map(Heap::one_word_filler_map());
1255 } else if (size_in_bytes == 2 * kPointerSize) {
1256 set_map(Heap::two_word_filler_map());
1257 } else {
1258 UNREACHABLE();
1259 }
kasper.lund7276f142008-07-30 08:49:36 +00001260 ASSERT(Size() == size_in_bytes);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001261}
1262
1263
1264Address FreeListNode::next() {
1265 ASSERT(map() == Heap::byte_array_map());
kasper.lund7276f142008-07-30 08:49:36 +00001266 ASSERT(Size() >= kNextOffset + kPointerSize);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001267 return Memory::Address_at(address() + kNextOffset);
1268}
1269
1270
1271void FreeListNode::set_next(Address next) {
1272 ASSERT(map() == Heap::byte_array_map());
kasper.lund7276f142008-07-30 08:49:36 +00001273 ASSERT(Size() >= kNextOffset + kPointerSize);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001274 Memory::Address_at(address() + kNextOffset) = next;
1275}
1276
1277
1278OldSpaceFreeList::OldSpaceFreeList(AllocationSpace owner) : owner_(owner) {
1279 Reset();
1280}
1281
1282
1283void OldSpaceFreeList::Reset() {
1284 available_ = 0;
1285 for (int i = 0; i < kFreeListsLength; i++) {
1286 free_[i].head_node_ = NULL;
1287 }
1288 needs_rebuild_ = false;
1289 finger_ = kHead;
1290 free_[kHead].next_size_ = kEnd;
1291}
1292
1293
1294void OldSpaceFreeList::RebuildSizeList() {
1295 ASSERT(needs_rebuild_);
1296 int cur = kHead;
1297 for (int i = cur + 1; i < kFreeListsLength; i++) {
1298 if (free_[i].head_node_ != NULL) {
1299 free_[cur].next_size_ = i;
1300 cur = i;
1301 }
1302 }
1303 free_[cur].next_size_ = kEnd;
1304 needs_rebuild_ = false;
1305}
1306
1307
1308int OldSpaceFreeList::Free(Address start, int size_in_bytes) {
1309#ifdef DEBUG
1310 for (int i = 0; i < size_in_bytes; i += kPointerSize) {
1311 Memory::Address_at(start + i) = kZapValue;
1312 }
1313#endif
1314 FreeListNode* node = FreeListNode::FromAddress(start);
1315 node->set_size(size_in_bytes);
1316
1317 // Early return to drop too-small blocks on the floor (one or two word
1318 // blocks cannot hold a map pointer, a size field, and a pointer to the
1319 // next block in the free list).
1320 if (size_in_bytes < kMinBlockSize) {
1321 return size_in_bytes;
1322 }
1323
1324 // Insert other blocks at the head of an exact free list.
1325 int index = size_in_bytes >> kPointerSizeLog2;
1326 node->set_next(free_[index].head_node_);
1327 free_[index].head_node_ = node->address();
1328 available_ += size_in_bytes;
1329 needs_rebuild_ = true;
1330 return 0;
1331}
1332
1333
1334Object* OldSpaceFreeList::Allocate(int size_in_bytes, int* wasted_bytes) {
1335 ASSERT(0 < size_in_bytes);
1336 ASSERT(size_in_bytes <= kMaxBlockSize);
1337 ASSERT(IsAligned(size_in_bytes, kPointerSize));
1338
1339 if (needs_rebuild_) RebuildSizeList();
1340 int index = size_in_bytes >> kPointerSizeLog2;
1341 // Check for a perfect fit.
1342 if (free_[index].head_node_ != NULL) {
1343 FreeListNode* node = FreeListNode::FromAddress(free_[index].head_node_);
1344 // If this was the last block of its size, remove the size.
1345 if ((free_[index].head_node_ = node->next()) == NULL) RemoveSize(index);
1346 available_ -= size_in_bytes;
1347 *wasted_bytes = 0;
1348 return node;
1349 }
1350 // Search the size list for the best fit.
1351 int prev = finger_ < index ? finger_ : kHead;
1352 int cur = FindSize(index, &prev);
1353 ASSERT(index < cur);
1354 if (cur == kEnd) {
1355 // No large enough size in list.
1356 *wasted_bytes = 0;
1357 return Failure::RetryAfterGC(size_in_bytes, owner_);
1358 }
1359 int rem = cur - index;
1360 int rem_bytes = rem << kPointerSizeLog2;
1361 FreeListNode* cur_node = FreeListNode::FromAddress(free_[cur].head_node_);
kasper.lund7276f142008-07-30 08:49:36 +00001362 ASSERT(cur_node->Size() == (cur << kPointerSizeLog2));
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001363 FreeListNode* rem_node = FreeListNode::FromAddress(free_[cur].head_node_ +
1364 size_in_bytes);
1365 // Distinguish the cases prev < rem < cur and rem <= prev < cur
1366 // to avoid many redundant tests and calls to Insert/RemoveSize.
1367 if (prev < rem) {
1368 // Simple case: insert rem between prev and cur.
1369 finger_ = prev;
1370 free_[prev].next_size_ = rem;
1371 // If this was the last block of size cur, remove the size.
1372 if ((free_[cur].head_node_ = cur_node->next()) == NULL) {
1373 free_[rem].next_size_ = free_[cur].next_size_;
1374 } else {
1375 free_[rem].next_size_ = cur;
1376 }
1377 // Add the remainder block.
1378 rem_node->set_size(rem_bytes);
1379 rem_node->set_next(free_[rem].head_node_);
1380 free_[rem].head_node_ = rem_node->address();
1381 } else {
1382 // If this was the last block of size cur, remove the size.
1383 if ((free_[cur].head_node_ = cur_node->next()) == NULL) {
1384 finger_ = prev;
1385 free_[prev].next_size_ = free_[cur].next_size_;
1386 }
1387 if (rem_bytes < kMinBlockSize) {
1388 // Too-small remainder is wasted.
1389 rem_node->set_size(rem_bytes);
1390 available_ -= size_in_bytes + rem_bytes;
1391 *wasted_bytes = rem_bytes;
1392 return cur_node;
1393 }
1394 // Add the remainder block and, if needed, insert its size.
1395 rem_node->set_size(rem_bytes);
1396 rem_node->set_next(free_[rem].head_node_);
1397 free_[rem].head_node_ = rem_node->address();
1398 if (rem_node->next() == NULL) InsertSize(rem);
1399 }
1400 available_ -= size_in_bytes;
1401 *wasted_bytes = 0;
1402 return cur_node;
1403}
1404
1405
kasper.lund7276f142008-07-30 08:49:36 +00001406#ifdef DEBUG
1407bool OldSpaceFreeList::Contains(FreeListNode* node) {
1408 for (int i = 0; i < kFreeListsLength; i++) {
1409 Address cur_addr = free_[i].head_node_;
1410 while (cur_addr != NULL) {
1411 FreeListNode* cur_node = FreeListNode::FromAddress(cur_addr);
1412 if (cur_node == node) return true;
1413 cur_addr = cur_node->next();
1414 }
1415 }
1416 return false;
1417}
1418#endif
1419
1420
1421MapSpaceFreeList::MapSpaceFreeList(AllocationSpace owner) {
1422 owner_ = owner;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001423 Reset();
1424}
1425
1426
1427void MapSpaceFreeList::Reset() {
1428 available_ = 0;
1429 head_ = NULL;
1430}
1431
1432
1433void MapSpaceFreeList::Free(Address start) {
1434#ifdef DEBUG
1435 for (int i = 0; i < Map::kSize; i += kPointerSize) {
1436 Memory::Address_at(start + i) = kZapValue;
1437 }
1438#endif
1439 FreeListNode* node = FreeListNode::FromAddress(start);
1440 node->set_size(Map::kSize);
1441 node->set_next(head_);
1442 head_ = node->address();
1443 available_ += Map::kSize;
1444}
1445
1446
1447Object* MapSpaceFreeList::Allocate() {
1448 if (head_ == NULL) {
kasper.lund7276f142008-07-30 08:49:36 +00001449 return Failure::RetryAfterGC(Map::kSize, owner_);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001450 }
1451
1452 FreeListNode* node = FreeListNode::FromAddress(head_);
1453 head_ = node->next();
1454 available_ -= Map::kSize;
1455 return node;
1456}
1457
1458
1459// -----------------------------------------------------------------------------
1460// OldSpace implementation
1461
1462void OldSpace::PrepareForMarkCompact(bool will_compact) {
1463 if (will_compact) {
1464 // Reset relocation info. During a compacting collection, everything in
1465 // the space is considered 'available' and we will rediscover live data
1466 // and waste during the collection.
1467 MCResetRelocationInfo();
1468 mc_end_of_relocation_ = bottom();
1469 ASSERT(Available() == Capacity());
1470 } else {
1471 // During a non-compacting collection, everything below the linear
1472 // allocation pointer is considered allocated (everything above is
1473 // available) and we will rediscover available and wasted bytes during
1474 // the collection.
1475 accounting_stats_.AllocateBytes(free_list_.available());
1476 accounting_stats_.FillWastedBytes(Waste());
1477 }
1478
kasper.lund7276f142008-07-30 08:49:36 +00001479 // Clear the free list before a full GC---it will be rebuilt afterward.
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001480 free_list_.Reset();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001481}
1482
1483
1484void OldSpace::MCAdjustRelocationEnd(Address address, int size_in_bytes) {
1485 ASSERT(Contains(address));
1486 Address current_top = mc_end_of_relocation_;
1487 Page* current_page = Page::FromAllocationTop(current_top);
1488
1489 // No more objects relocated to this page? Move to the next.
1490 ASSERT(current_top <= current_page->mc_relocation_top);
1491 if (current_top == current_page->mc_relocation_top) {
1492 // The space should already be properly expanded.
1493 Page* next_page = current_page->next_page();
1494 CHECK(next_page->is_valid());
1495 mc_end_of_relocation_ = next_page->ObjectAreaStart();
1496 }
1497 ASSERT(mc_end_of_relocation_ == address);
1498 mc_end_of_relocation_ += size_in_bytes;
1499}
1500
1501
1502void OldSpace::MCCommitRelocationInfo() {
1503 // Update fast allocation info.
1504 allocation_info_.top = mc_forwarding_info_.top;
1505 allocation_info_.limit = mc_forwarding_info_.limit;
kasper.lund7276f142008-07-30 08:49:36 +00001506 ASSERT(allocation_info_.VerifyPagedAllocation());
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001507
1508 // The space is compacted and we haven't yet built free lists or
1509 // wasted any space.
1510 ASSERT(Waste() == 0);
1511 ASSERT(AvailableFree() == 0);
1512
1513 // Build the free list for the space.
1514 int computed_size = 0;
1515 PageIterator it(this, PageIterator::PAGES_USED_BY_MC);
1516 while (it.has_next()) {
1517 Page* p = it.next();
1518 // Space below the relocation pointer is allocated.
1519 computed_size += p->mc_relocation_top - p->ObjectAreaStart();
1520 if (it.has_next()) {
1521 // Free the space at the top of the page. We cannot use
1522 // p->mc_relocation_top after the call to Free (because Free will clear
1523 // remembered set bits).
1524 int extra_size = p->ObjectAreaEnd() - p->mc_relocation_top;
1525 if (extra_size > 0) {
1526 int wasted_bytes = free_list_.Free(p->mc_relocation_top, extra_size);
1527 // The bytes we have just "freed" to add to the free list were
1528 // already accounted as available.
1529 accounting_stats_.WasteBytes(wasted_bytes);
1530 }
1531 }
1532 }
1533
1534 // Make sure the computed size - based on the used portion of the pages in
1535 // use - matches the size obtained while computing forwarding addresses.
1536 ASSERT(computed_size == Size());
1537}
1538
1539
kasper.lund7276f142008-07-30 08:49:36 +00001540// Slow case for normal allocation. Try in order: (1) allocate in the next
1541// page in the space, (2) allocate off the space's free list, (3) expand the
1542// space, (4) fail.
1543HeapObject* OldSpace::SlowAllocateRaw(int size_in_bytes) {
1544 // Linear allocation in this space has failed. If there is another page
1545 // in the space, move to that page and allocate there. This allocation
1546 // should succeed (size_in_bytes should not be greater than a page's
1547 // object area size).
1548 Page* current_page = TopPageOf(allocation_info_);
1549 if (current_page->next_page()->is_valid()) {
1550 return AllocateInNextPage(current_page, size_in_bytes);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001551 }
kasper.lund7276f142008-07-30 08:49:36 +00001552
1553 // There is no next page in this space. Try free list allocation.
1554 int wasted_bytes;
1555 Object* result = free_list_.Allocate(size_in_bytes, &wasted_bytes);
1556 accounting_stats_.WasteBytes(wasted_bytes);
1557 if (!result->IsFailure()) {
1558 accounting_stats_.AllocateBytes(size_in_bytes);
1559 return HeapObject::cast(result);
1560 }
1561
kasperl@chromium.org9bbf9682008-10-30 11:53:07 +00001562 // Free list allocation failed and there is no next page. Fail if we have
1563 // hit the old generation size limit that should cause a garbage
1564 // collection.
1565 if (!Heap::always_allocate() && Heap::OldGenerationAllocationLimitReached()) {
1566 return NULL;
1567 }
1568
1569 // Try to expand the space and allocate in the new next page.
kasper.lund7276f142008-07-30 08:49:36 +00001570 ASSERT(!current_page->next_page()->is_valid());
1571 if (Expand(current_page)) {
1572 return AllocateInNextPage(current_page, size_in_bytes);
1573 }
1574
1575 // Finally, fail.
1576 return NULL;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001577}
1578
1579
kasper.lund7276f142008-07-30 08:49:36 +00001580// Add the block at the top of the page to the space's free list, set the
1581// allocation info to the next page (assumed to be one), and allocate
1582// linearly there.
1583HeapObject* OldSpace::AllocateInNextPage(Page* current_page,
1584 int size_in_bytes) {
1585 ASSERT(current_page->next_page()->is_valid());
1586 // Add the block at the top of this page to the free list.
1587 int free_size = current_page->ObjectAreaEnd() - allocation_info_.top;
1588 if (free_size > 0) {
1589 int wasted_bytes = free_list_.Free(allocation_info_.top, free_size);
1590 accounting_stats_.WasteBytes(wasted_bytes);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001591 }
kasper.lund7276f142008-07-30 08:49:36 +00001592 SetAllocationInfo(&allocation_info_, current_page->next_page());
1593 return AllocateLinearly(&allocation_info_, size_in_bytes);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001594}
1595
1596
1597#ifdef DEBUG
1598// We do not assume that the PageIterator works, because it depends on the
1599// invariants we are checking during verification.
1600void OldSpace::Verify() {
1601 // The allocation pointer should be valid, and it should be in a page in the
1602 // space.
kasper.lund7276f142008-07-30 08:49:36 +00001603 ASSERT(allocation_info_.VerifyPagedAllocation());
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001604 Page* top_page = Page::FromAllocationTop(allocation_info_.top);
1605 ASSERT(MemoryAllocator::IsPageInSpace(top_page, this));
1606
1607 // Loop over all the pages.
1608 bool above_allocation_top = false;
1609 Page* current_page = first_page_;
1610 while (current_page->is_valid()) {
1611 if (above_allocation_top) {
1612 // We don't care what's above the allocation top.
1613 } else {
1614 // Unless this is the last page in the space containing allocated
1615 // objects, the allocation top should be at the object area end.
1616 Address top = current_page->AllocationTop();
1617 if (current_page == top_page) {
1618 ASSERT(top == allocation_info_.top);
1619 // The next page will be above the allocation top.
1620 above_allocation_top = true;
1621 } else {
1622 ASSERT(top == current_page->ObjectAreaEnd());
1623 }
1624
1625 // It should be packed with objects from the bottom to the top.
1626 Address current = current_page->ObjectAreaStart();
1627 while (current < top) {
1628 HeapObject* object = HeapObject::FromAddress(current);
1629
1630 // The first word should be a map, and we expect all map pointers to
1631 // be in map space.
1632 Map* map = object->map();
1633 ASSERT(map->IsMap());
1634 ASSERT(Heap::map_space()->Contains(map));
1635
1636 // The object should not be a map.
1637 ASSERT(!object->IsMap());
1638
1639 // The object itself should look OK.
mads.s.ager@gmail.com9a4089a2008-09-01 08:55:01 +00001640 object->Verify();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001641
1642 // All the interior pointers should be contained in the heap and have
1643 // their remembered set bits set if they point to new space. Code
1644 // objects do not have remembered set bits that we care about.
1645 VerifyPointersAndRSetVisitor rset_visitor;
1646 VerifyPointersVisitor no_rset_visitor;
1647 int size = object->Size();
1648 if (object->IsCode()) {
1649 Code::cast(object)->ConvertICTargetsFromAddressToObject();
1650 object->IterateBody(map->instance_type(), size, &no_rset_visitor);
1651 Code::cast(object)->ConvertICTargetsFromObjectToAddress();
1652 } else {
1653 object->IterateBody(map->instance_type(), size, &rset_visitor);
1654 }
1655
1656 current += size;
1657 }
1658
1659 // The allocation pointer should not be in the middle of an object.
1660 ASSERT(current == top);
1661 }
1662
1663 current_page = current_page->next_page();
1664 }
1665}
1666
1667
1668struct CommentStatistic {
1669 const char* comment;
1670 int size;
1671 int count;
1672 void Clear() {
1673 comment = NULL;
1674 size = 0;
1675 count = 0;
1676 }
1677};
1678
1679
1680// must be small, since an iteration is used for lookup
1681const int kMaxComments = 64;
1682static CommentStatistic comments_statistics[kMaxComments+1];
1683
1684
1685void PagedSpace::ReportCodeStatistics() {
1686 ReportCodeKindStatistics();
1687 PrintF("Code comment statistics (\" [ comment-txt : size/ "
1688 "count (average)\"):\n");
1689 for (int i = 0; i <= kMaxComments; i++) {
1690 const CommentStatistic& cs = comments_statistics[i];
1691 if (cs.size > 0) {
1692 PrintF(" %-30s: %10d/%6d (%d)\n", cs.comment, cs.size, cs.count,
1693 cs.size/cs.count);
1694 }
1695 }
1696 PrintF("\n");
1697}
1698
1699
1700void PagedSpace::ResetCodeStatistics() {
1701 ClearCodeKindStatistics();
1702 for (int i = 0; i < kMaxComments; i++) comments_statistics[i].Clear();
1703 comments_statistics[kMaxComments].comment = "Unknown";
1704 comments_statistics[kMaxComments].size = 0;
1705 comments_statistics[kMaxComments].count = 0;
1706}
1707
1708
1709// Adds comment to 'comment_statistics' table. Performance OK sa long as
1710// 'kMaxComments' is small
1711static void EnterComment(const char* comment, int delta) {
1712 // Do not count empty comments
1713 if (delta <= 0) return;
1714 CommentStatistic* cs = &comments_statistics[kMaxComments];
1715 // Search for a free or matching entry in 'comments_statistics': 'cs'
1716 // points to result.
1717 for (int i = 0; i < kMaxComments; i++) {
1718 if (comments_statistics[i].comment == NULL) {
1719 cs = &comments_statistics[i];
1720 cs->comment = comment;
1721 break;
1722 } else if (strcmp(comments_statistics[i].comment, comment) == 0) {
1723 cs = &comments_statistics[i];
1724 break;
1725 }
1726 }
1727 // Update entry for 'comment'
1728 cs->size += delta;
1729 cs->count += 1;
1730}
1731
1732
1733// Call for each nested comment start (start marked with '[ xxx', end marked
1734// with ']'. RelocIterator 'it' must point to a comment reloc info.
1735static void CollectCommentStatistics(RelocIterator* it) {
1736 ASSERT(!it->done());
ager@chromium.org236ad962008-09-25 09:45:57 +00001737 ASSERT(it->rinfo()->rmode() == RelocInfo::COMMENT);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001738 const char* tmp = reinterpret_cast<const char*>(it->rinfo()->data());
1739 if (tmp[0] != '[') {
1740 // Not a nested comment; skip
1741 return;
1742 }
1743
1744 // Search for end of nested comment or a new nested comment
1745 const char* const comment_txt =
1746 reinterpret_cast<const char*>(it->rinfo()->data());
1747 const byte* prev_pc = it->rinfo()->pc();
1748 int flat_delta = 0;
1749 it->next();
1750 while (true) {
1751 // All nested comments must be terminated properly, and therefore exit
1752 // from loop.
1753 ASSERT(!it->done());
ager@chromium.org236ad962008-09-25 09:45:57 +00001754 if (it->rinfo()->rmode() == RelocInfo::COMMENT) {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001755 const char* const txt =
1756 reinterpret_cast<const char*>(it->rinfo()->data());
1757 flat_delta += it->rinfo()->pc() - prev_pc;
1758 if (txt[0] == ']') break; // End of nested comment
1759 // A new comment
1760 CollectCommentStatistics(it);
1761 // Skip code that was covered with previous comment
1762 prev_pc = it->rinfo()->pc();
1763 }
1764 it->next();
1765 }
1766 EnterComment(comment_txt, flat_delta);
1767}
1768
1769
1770// Collects code size statistics:
1771// - by code kind
1772// - by code comment
1773void PagedSpace::CollectCodeStatistics() {
1774 HeapObjectIterator obj_it(this);
1775 while (obj_it.has_next()) {
1776 HeapObject* obj = obj_it.next();
1777 if (obj->IsCode()) {
1778 Code* code = Code::cast(obj);
1779 code_kind_statistics[code->kind()] += code->Size();
1780 RelocIterator it(code);
1781 int delta = 0;
1782 const byte* prev_pc = code->instruction_start();
1783 while (!it.done()) {
ager@chromium.org236ad962008-09-25 09:45:57 +00001784 if (it.rinfo()->rmode() == RelocInfo::COMMENT) {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001785 delta += it.rinfo()->pc() - prev_pc;
1786 CollectCommentStatistics(&it);
1787 prev_pc = it.rinfo()->pc();
1788 }
1789 it.next();
1790 }
1791
1792 ASSERT(code->instruction_start() <= prev_pc &&
1793 prev_pc <= code->relocation_start());
1794 delta += code->relocation_start() - prev_pc;
1795 EnterComment("NoComment", delta);
1796 }
1797 }
1798}
1799
1800
1801void OldSpace::ReportStatistics() {
1802 int pct = Available() * 100 / Capacity();
1803 PrintF(" capacity: %d, waste: %d, available: %d, %%%d\n",
1804 Capacity(), Waste(), Available(), pct);
1805
1806 // Report remembered set statistics.
1807 int rset_marked_pointers = 0;
1808 int rset_marked_arrays = 0;
1809 int rset_marked_array_elements = 0;
1810 int cross_gen_pointers = 0;
1811 int cross_gen_array_elements = 0;
1812
1813 PageIterator page_it(this, PageIterator::PAGES_IN_USE);
1814 while (page_it.has_next()) {
1815 Page* p = page_it.next();
1816
1817 for (Address rset_addr = p->RSetStart();
1818 rset_addr < p->RSetEnd();
1819 rset_addr += kIntSize) {
1820 int rset = Memory::int_at(rset_addr);
1821 if (rset != 0) {
1822 // Bits were set
1823 int intoff = rset_addr - p->address();
1824 int bitoff = 0;
1825 for (; bitoff < kBitsPerInt; ++bitoff) {
1826 if ((rset & (1 << bitoff)) != 0) {
1827 int bitpos = intoff*kBitsPerByte + bitoff;
1828 Address slot = p->OffsetToAddress(bitpos << kObjectAlignmentBits);
1829 Object** obj = reinterpret_cast<Object**>(slot);
1830 if (*obj == Heap::fixed_array_map()) {
1831 rset_marked_arrays++;
1832 FixedArray* fa = FixedArray::cast(HeapObject::FromAddress(slot));
1833
1834 rset_marked_array_elements += fa->length();
1835 // Manually inline FixedArray::IterateBody
1836 Address elm_start = slot + FixedArray::kHeaderSize;
1837 Address elm_stop = elm_start + fa->length() * kPointerSize;
1838 for (Address elm_addr = elm_start;
1839 elm_addr < elm_stop; elm_addr += kPointerSize) {
1840 // Filter non-heap-object pointers
1841 Object** elm_p = reinterpret_cast<Object**>(elm_addr);
1842 if (Heap::InNewSpace(*elm_p))
1843 cross_gen_array_elements++;
1844 }
1845 } else {
1846 rset_marked_pointers++;
1847 if (Heap::InNewSpace(*obj))
1848 cross_gen_pointers++;
1849 }
1850 }
1851 }
1852 }
1853 }
1854 }
1855
1856 pct = rset_marked_pointers == 0 ?
1857 0 : cross_gen_pointers * 100 / rset_marked_pointers;
1858 PrintF(" rset-marked pointers %d, to-new-space %d (%%%d)\n",
1859 rset_marked_pointers, cross_gen_pointers, pct);
1860 PrintF(" rset_marked arrays %d, ", rset_marked_arrays);
1861 PrintF(" elements %d, ", rset_marked_array_elements);
1862 pct = rset_marked_array_elements == 0 ? 0
1863 : cross_gen_array_elements * 100 / rset_marked_array_elements;
1864 PrintF(" pointers to new space %d (%%%d)\n", cross_gen_array_elements, pct);
1865 PrintF(" total rset-marked bits %d\n",
1866 (rset_marked_pointers + rset_marked_arrays));
1867 pct = (rset_marked_pointers + rset_marked_array_elements) == 0 ? 0
1868 : (cross_gen_pointers + cross_gen_array_elements) * 100 /
1869 (rset_marked_pointers + rset_marked_array_elements);
1870 PrintF(" total rset pointers %d, true cross generation ones %d (%%%d)\n",
1871 (rset_marked_pointers + rset_marked_array_elements),
1872 (cross_gen_pointers + cross_gen_array_elements),
1873 pct);
1874
1875 ClearHistograms();
1876 HeapObjectIterator obj_it(this);
1877 while (obj_it.has_next()) { CollectHistogramInfo(obj_it.next()); }
1878 ReportHistogram(true);
1879}
1880
1881
1882// Dump the range of remembered set words between [start, end) corresponding
1883// to the pointers starting at object_p. The allocation_top is an object
1884// pointer which should not be read past. This is important for large object
1885// pages, where some bits in the remembered set range do not correspond to
1886// allocated addresses.
1887static void PrintRSetRange(Address start, Address end, Object** object_p,
1888 Address allocation_top) {
1889 Address rset_address = start;
1890
1891 // If the range starts on on odd numbered word (eg, for large object extra
1892 // remembered set ranges), print some spaces.
1893 if ((reinterpret_cast<uint32_t>(start) / kIntSize) % 2 == 1) {
1894 PrintF(" ");
1895 }
1896
1897 // Loop over all the words in the range.
1898 while (rset_address < end) {
1899 uint32_t rset_word = Memory::uint32_at(rset_address);
1900 int bit_position = 0;
1901
1902 // Loop over all the bits in the word.
1903 while (bit_position < kBitsPerInt) {
1904 if (object_p == reinterpret_cast<Object**>(allocation_top)) {
1905 // Print a bar at the allocation pointer.
1906 PrintF("|");
1907 } else if (object_p > reinterpret_cast<Object**>(allocation_top)) {
1908 // Do not dereference object_p past the allocation pointer.
1909 PrintF("#");
1910 } else if ((rset_word & (1 << bit_position)) == 0) {
1911 // Print a dot for zero bits.
1912 PrintF(".");
1913 } else if (Heap::InNewSpace(*object_p)) {
1914 // Print an X for one bits for pointers to new space.
1915 PrintF("X");
1916 } else {
1917 // Print a circle for one bits for pointers to old space.
1918 PrintF("o");
1919 }
1920
1921 // Print a space after every 8th bit except the last.
1922 if (bit_position % 8 == 7 && bit_position != (kBitsPerInt - 1)) {
1923 PrintF(" ");
1924 }
1925
1926 // Advance to next bit.
1927 bit_position++;
1928 object_p++;
1929 }
1930
1931 // Print a newline after every odd numbered word, otherwise a space.
1932 if ((reinterpret_cast<uint32_t>(rset_address) / kIntSize) % 2 == 1) {
1933 PrintF("\n");
1934 } else {
1935 PrintF(" ");
1936 }
1937
1938 // Advance to next remembered set word.
1939 rset_address += kIntSize;
1940 }
1941}
1942
1943
1944void PagedSpace::DoPrintRSet(const char* space_name) {
1945 PageIterator it(this, PageIterator::PAGES_IN_USE);
1946 while (it.has_next()) {
1947 Page* p = it.next();
1948 PrintF("%s page 0x%x:\n", space_name, p);
1949 PrintRSetRange(p->RSetStart(), p->RSetEnd(),
1950 reinterpret_cast<Object**>(p->ObjectAreaStart()),
1951 p->AllocationTop());
1952 PrintF("\n");
1953 }
1954}
1955
1956
1957void OldSpace::PrintRSet() { DoPrintRSet("old"); }
1958#endif
1959
1960// -----------------------------------------------------------------------------
1961// MapSpace implementation
1962
1963void MapSpace::PrepareForMarkCompact(bool will_compact) {
1964 if (will_compact) {
1965 // Reset relocation info.
1966 MCResetRelocationInfo();
1967
1968 // Initialize map index entry.
1969 int page_count = 0;
1970 PageIterator it(this, PageIterator::ALL_PAGES);
1971 while (it.has_next()) {
1972 ASSERT_MAP_PAGE_INDEX(page_count);
1973
1974 Page* p = it.next();
1975 ASSERT(p->mc_page_index == page_count);
1976
1977 page_addresses_[page_count++] = p->address();
1978 }
1979
1980 // During a compacting collection, everything in the space is considered
1981 // 'available' (set by the call to MCResetRelocationInfo) and we will
1982 // rediscover live and wasted bytes during the collection.
1983 ASSERT(Available() == Capacity());
1984 } else {
1985 // During a non-compacting collection, everything below the linear
1986 // allocation pointer except wasted top-of-page blocks is considered
1987 // allocated and we will rediscover available bytes during the
1988 // collection.
1989 accounting_stats_.AllocateBytes(free_list_.available());
1990 }
1991
kasper.lund7276f142008-07-30 08:49:36 +00001992 // Clear the free list before a full GC---it will be rebuilt afterward.
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001993 free_list_.Reset();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001994}
1995
1996
1997void MapSpace::MCCommitRelocationInfo() {
1998 // Update fast allocation info.
1999 allocation_info_.top = mc_forwarding_info_.top;
2000 allocation_info_.limit = mc_forwarding_info_.limit;
kasper.lund7276f142008-07-30 08:49:36 +00002001 ASSERT(allocation_info_.VerifyPagedAllocation());
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002002
2003 // The space is compacted and we haven't yet wasted any space.
2004 ASSERT(Waste() == 0);
2005
2006 // Update allocation_top of each page in use and compute waste.
2007 int computed_size = 0;
2008 PageIterator it(this, PageIterator::PAGES_USED_BY_MC);
2009 while (it.has_next()) {
2010 Page* page = it.next();
2011 Address page_top = page->AllocationTop();
2012 computed_size += page_top - page->ObjectAreaStart();
2013 if (it.has_next()) {
2014 accounting_stats_.WasteBytes(page->ObjectAreaEnd() - page_top);
2015 }
2016 }
2017
2018 // Make sure the computed size - based on the used portion of the
2019 // pages in use - matches the size we adjust during allocation.
2020 ASSERT(computed_size == Size());
2021}
2022
2023
kasper.lund7276f142008-07-30 08:49:36 +00002024// Slow case for normal allocation. Try in order: (1) allocate in the next
2025// page in the space, (2) allocate off the space's free list, (3) expand the
2026// space, (4) fail.
2027HeapObject* MapSpace::SlowAllocateRaw(int size_in_bytes) {
2028 // Linear allocation in this space has failed. If there is another page
2029 // in the space, move to that page and allocate there. This allocation
2030 // should succeed.
2031 Page* current_page = TopPageOf(allocation_info_);
2032 if (current_page->next_page()->is_valid()) {
2033 return AllocateInNextPage(current_page, size_in_bytes);
2034 }
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002035
kasper.lund7276f142008-07-30 08:49:36 +00002036 // There is no next page in this space. Try free list allocation. The
2037 // map space free list implicitly assumes that all free blocks are map
2038 // sized.
2039 if (size_in_bytes == Map::kSize) {
2040 Object* result = free_list_.Allocate();
2041 if (!result->IsFailure()) {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002042 accounting_stats_.AllocateBytes(size_in_bytes);
kasper.lund7276f142008-07-30 08:49:36 +00002043 return HeapObject::cast(result);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002044 }
2045 }
kasper.lund7276f142008-07-30 08:49:36 +00002046
kasperl@chromium.org9bbf9682008-10-30 11:53:07 +00002047 // Free list allocation failed and there is no next page. Fail if we have
2048 // hit the old generation size limit that should cause a garbage
2049 // collection.
2050 if (!Heap::always_allocate() && Heap::OldGenerationAllocationLimitReached()) {
2051 return NULL;
2052 }
2053
2054 // Try to expand the space and allocate in the new next page.
kasper.lund7276f142008-07-30 08:49:36 +00002055 ASSERT(!current_page->next_page()->is_valid());
2056 if (Expand(current_page)) {
2057 return AllocateInNextPage(current_page, size_in_bytes);
2058 }
2059
2060 // Finally, fail.
2061 return NULL;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002062}
2063
2064
kasper.lund7276f142008-07-30 08:49:36 +00002065// Move to the next page (there is assumed to be one) and allocate there.
2066// The top of page block is always wasted, because it is too small to hold a
2067// map.
2068HeapObject* MapSpace::AllocateInNextPage(Page* current_page,
2069 int size_in_bytes) {
2070 ASSERT(current_page->next_page()->is_valid());
2071 ASSERT(current_page->ObjectAreaEnd() - allocation_info_.top == kPageExtra);
2072 accounting_stats_.WasteBytes(kPageExtra);
2073 SetAllocationInfo(&allocation_info_, current_page->next_page());
2074 return AllocateLinearly(&allocation_info_, size_in_bytes);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002075}
2076
2077
2078#ifdef DEBUG
2079// We do not assume that the PageIterator works, because it depends on the
2080// invariants we are checking during verification.
2081void MapSpace::Verify() {
2082 // The allocation pointer should be valid, and it should be in a page in the
2083 // space.
kasper.lund7276f142008-07-30 08:49:36 +00002084 ASSERT(allocation_info_.VerifyPagedAllocation());
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002085 Page* top_page = Page::FromAllocationTop(allocation_info_.top);
2086 ASSERT(MemoryAllocator::IsPageInSpace(top_page, this));
2087
2088 // Loop over all the pages.
2089 bool above_allocation_top = false;
2090 Page* current_page = first_page_;
2091 while (current_page->is_valid()) {
2092 if (above_allocation_top) {
2093 // We don't care what's above the allocation top.
2094 } else {
2095 // Unless this is the last page in the space containing allocated
2096 // objects, the allocation top should be at a constant offset from the
2097 // object area end.
2098 Address top = current_page->AllocationTop();
2099 if (current_page == top_page) {
2100 ASSERT(top == allocation_info_.top);
2101 // The next page will be above the allocation top.
2102 above_allocation_top = true;
2103 } else {
2104 ASSERT(top == current_page->ObjectAreaEnd() - kPageExtra);
2105 }
2106
2107 // It should be packed with objects from the bottom to the top.
2108 Address current = current_page->ObjectAreaStart();
2109 while (current < top) {
2110 HeapObject* object = HeapObject::FromAddress(current);
2111
2112 // The first word should be a map, and we expect all map pointers to
2113 // be in map space.
2114 Map* map = object->map();
2115 ASSERT(map->IsMap());
2116 ASSERT(Heap::map_space()->Contains(map));
2117
2118 // The object should be a map or a byte array.
2119 ASSERT(object->IsMap() || object->IsByteArray());
2120
2121 // The object itself should look OK.
mads.s.ager@gmail.com9a4089a2008-09-01 08:55:01 +00002122 object->Verify();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002123
2124 // All the interior pointers should be contained in the heap and
2125 // have their remembered set bits set if they point to new space.
2126 VerifyPointersAndRSetVisitor visitor;
2127 int size = object->Size();
2128 object->IterateBody(map->instance_type(), size, &visitor);
2129
2130 current += size;
2131 }
2132
2133 // The allocation pointer should not be in the middle of an object.
2134 ASSERT(current == top);
2135 }
2136
2137 current_page = current_page->next_page();
2138 }
2139}
2140
2141
2142void MapSpace::ReportStatistics() {
2143 int pct = Available() * 100 / Capacity();
2144 PrintF(" capacity: %d, waste: %d, available: %d, %%%d\n",
2145 Capacity(), Waste(), Available(), pct);
2146
2147 // Report remembered set statistics.
2148 int rset_marked_pointers = 0;
2149 int cross_gen_pointers = 0;
2150
2151 PageIterator page_it(this, PageIterator::PAGES_IN_USE);
2152 while (page_it.has_next()) {
2153 Page* p = page_it.next();
2154
2155 for (Address rset_addr = p->RSetStart();
2156 rset_addr < p->RSetEnd();
2157 rset_addr += kIntSize) {
2158 int rset = Memory::int_at(rset_addr);
2159 if (rset != 0) {
2160 // Bits were set
2161 int intoff = rset_addr - p->address();
2162 int bitoff = 0;
2163 for (; bitoff < kBitsPerInt; ++bitoff) {
2164 if ((rset & (1 << bitoff)) != 0) {
2165 int bitpos = intoff*kBitsPerByte + bitoff;
2166 Address slot = p->OffsetToAddress(bitpos << kObjectAlignmentBits);
2167 Object** obj = reinterpret_cast<Object**>(slot);
2168 rset_marked_pointers++;
2169 if (Heap::InNewSpace(*obj))
2170 cross_gen_pointers++;
2171 }
2172 }
2173 }
2174 }
2175 }
2176
2177 pct = rset_marked_pointers == 0 ?
2178 0 : cross_gen_pointers * 100 / rset_marked_pointers;
2179 PrintF(" rset-marked pointers %d, to-new-space %d (%%%d)\n",
2180 rset_marked_pointers, cross_gen_pointers, pct);
2181
2182 ClearHistograms();
2183 HeapObjectIterator obj_it(this);
2184 while (obj_it.has_next()) { CollectHistogramInfo(obj_it.next()); }
2185 ReportHistogram(false);
2186}
2187
2188
2189void MapSpace::PrintRSet() { DoPrintRSet("map"); }
2190#endif
2191
2192
2193// -----------------------------------------------------------------------------
2194// LargeObjectIterator
2195
2196LargeObjectIterator::LargeObjectIterator(LargeObjectSpace* space) {
2197 current_ = space->first_chunk_;
2198 size_func_ = NULL;
2199}
2200
2201
2202LargeObjectIterator::LargeObjectIterator(LargeObjectSpace* space,
2203 HeapObjectCallback size_func) {
2204 current_ = space->first_chunk_;
2205 size_func_ = size_func;
2206}
2207
2208
2209HeapObject* LargeObjectIterator::next() {
2210 ASSERT(has_next());
2211 HeapObject* object = current_->GetObject();
2212 current_ = current_->next();
2213 return object;
2214}
2215
2216
2217// -----------------------------------------------------------------------------
2218// LargeObjectChunk
2219
2220LargeObjectChunk* LargeObjectChunk::New(int size_in_bytes,
kasper.lund7276f142008-07-30 08:49:36 +00002221 size_t* chunk_size,
ager@chromium.org9258b6b2008-09-11 09:11:10 +00002222 Executability executable) {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002223 size_t requested = ChunkSizeFor(size_in_bytes);
kasper.lund7276f142008-07-30 08:49:36 +00002224 void* mem = MemoryAllocator::AllocateRawMemory(requested,
2225 chunk_size,
2226 executable);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002227 if (mem == NULL) return NULL;
2228 LOG(NewEvent("LargeObjectChunk", mem, *chunk_size));
2229 if (*chunk_size < requested) {
2230 MemoryAllocator::FreeRawMemory(mem, *chunk_size);
2231 LOG(DeleteEvent("LargeObjectChunk", mem));
2232 return NULL;
2233 }
2234 return reinterpret_cast<LargeObjectChunk*>(mem);
2235}
2236
2237
2238int LargeObjectChunk::ChunkSizeFor(int size_in_bytes) {
2239 int os_alignment = OS::AllocateAlignment();
2240 if (os_alignment < Page::kPageSize)
2241 size_in_bytes += (Page::kPageSize - os_alignment);
2242 return size_in_bytes + Page::kObjectStartOffset;
2243}
2244
2245// -----------------------------------------------------------------------------
2246// LargeObjectSpace
2247
ager@chromium.org9258b6b2008-09-11 09:11:10 +00002248LargeObjectSpace::LargeObjectSpace(AllocationSpace id)
2249 : Space(id, NOT_EXECUTABLE), // Managed on a per-allocation basis
kasper.lund7276f142008-07-30 08:49:36 +00002250 first_chunk_(NULL),
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002251 size_(0),
2252 page_count_(0) {}
2253
2254
2255bool LargeObjectSpace::Setup() {
2256 first_chunk_ = NULL;
2257 size_ = 0;
2258 page_count_ = 0;
2259 return true;
2260}
2261
2262
2263void LargeObjectSpace::TearDown() {
2264 while (first_chunk_ != NULL) {
2265 LargeObjectChunk* chunk = first_chunk_;
2266 first_chunk_ = first_chunk_->next();
2267 LOG(DeleteEvent("LargeObjectChunk", chunk->address()));
2268 MemoryAllocator::FreeRawMemory(chunk->address(), chunk->size());
2269 }
2270
2271 size_ = 0;
2272 page_count_ = 0;
2273}
2274
2275
kasperl@chromium.orgf5aa8372009-03-24 14:47:14 +00002276#ifdef ENABLE_HEAP_PROTECTION
2277
2278void LargeObjectSpace::Protect() {
2279 LargeObjectChunk* chunk = first_chunk_;
2280 while (chunk != NULL) {
2281 MemoryAllocator::Protect(chunk->address(), chunk->size());
2282 chunk = chunk->next();
2283 }
2284}
2285
2286
2287void LargeObjectSpace::Unprotect() {
2288 LargeObjectChunk* chunk = first_chunk_;
2289 while (chunk != NULL) {
2290 bool is_code = chunk->GetObject()->IsCode();
2291 MemoryAllocator::Unprotect(chunk->address(), chunk->size(),
2292 is_code ? EXECUTABLE : NOT_EXECUTABLE);
2293 chunk = chunk->next();
2294 }
2295}
2296
2297#endif
2298
2299
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002300Object* LargeObjectSpace::AllocateRawInternal(int requested_size,
ager@chromium.org9258b6b2008-09-11 09:11:10 +00002301 int object_size,
2302 Executability executable) {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002303 ASSERT(0 < object_size && object_size <= requested_size);
kasperl@chromium.org9bbf9682008-10-30 11:53:07 +00002304
2305 // Check if we want to force a GC before growing the old space further.
2306 // If so, fail the allocation.
2307 if (!Heap::always_allocate() && Heap::OldGenerationAllocationLimitReached()) {
2308 return Failure::RetryAfterGC(requested_size, identity());
2309 }
2310
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002311 size_t chunk_size;
2312 LargeObjectChunk* chunk =
ager@chromium.org9258b6b2008-09-11 09:11:10 +00002313 LargeObjectChunk::New(requested_size, &chunk_size, executable);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002314 if (chunk == NULL) {
kasper.lund7276f142008-07-30 08:49:36 +00002315 return Failure::RetryAfterGC(requested_size, identity());
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002316 }
2317
2318 size_ += chunk_size;
2319 page_count_++;
2320 chunk->set_next(first_chunk_);
2321 chunk->set_size(chunk_size);
2322 first_chunk_ = chunk;
2323
2324 // Set the object address and size in the page header and clear its
2325 // remembered set.
2326 Page* page = Page::FromAddress(RoundUp(chunk->address(), Page::kPageSize));
2327 Address object_address = page->ObjectAreaStart();
2328 // Clear the low order bit of the second word in the page to flag it as a
2329 // large object page. If the chunk_size happened to be written there, its
2330 // low order bit should already be clear.
2331 ASSERT((chunk_size & 0x1) == 0);
2332 page->is_normal_page &= ~0x1;
2333 page->ClearRSet();
2334 int extra_bytes = requested_size - object_size;
2335 if (extra_bytes > 0) {
2336 // The extra memory for the remembered set should be cleared.
2337 memset(object_address + object_size, 0, extra_bytes);
2338 }
2339
2340 return HeapObject::FromAddress(object_address);
2341}
2342
2343
ager@chromium.org9258b6b2008-09-11 09:11:10 +00002344Object* LargeObjectSpace::AllocateRawCode(int size_in_bytes) {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002345 ASSERT(0 < size_in_bytes);
ager@chromium.org9258b6b2008-09-11 09:11:10 +00002346 return AllocateRawInternal(size_in_bytes,
2347 size_in_bytes,
2348 EXECUTABLE);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002349}
2350
2351
2352Object* LargeObjectSpace::AllocateRawFixedArray(int size_in_bytes) {
ager@chromium.org9258b6b2008-09-11 09:11:10 +00002353 ASSERT(0 < size_in_bytes);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002354 int extra_rset_bytes = ExtraRSetBytesFor(size_in_bytes);
ager@chromium.org9258b6b2008-09-11 09:11:10 +00002355 return AllocateRawInternal(size_in_bytes + extra_rset_bytes,
2356 size_in_bytes,
2357 NOT_EXECUTABLE);
2358}
2359
2360
2361Object* LargeObjectSpace::AllocateRaw(int size_in_bytes) {
2362 ASSERT(0 < size_in_bytes);
2363 return AllocateRawInternal(size_in_bytes,
2364 size_in_bytes,
2365 NOT_EXECUTABLE);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002366}
2367
2368
2369// GC support
2370Object* LargeObjectSpace::FindObject(Address a) {
2371 for (LargeObjectChunk* chunk = first_chunk_;
2372 chunk != NULL;
2373 chunk = chunk->next()) {
2374 Address chunk_address = chunk->address();
2375 if (chunk_address <= a && a < chunk_address + chunk->size()) {
2376 return chunk->GetObject();
2377 }
2378 }
2379 return Failure::Exception();
2380}
2381
2382
2383void LargeObjectSpace::ClearRSet() {
2384 ASSERT(Page::is_rset_in_use());
2385
2386 LargeObjectIterator it(this);
2387 while (it.has_next()) {
2388 HeapObject* object = it.next();
2389 // We only have code, sequential strings, or fixed arrays in large
2390 // object space, and only fixed arrays need remembered set support.
2391 if (object->IsFixedArray()) {
2392 // Clear the normal remembered set region of the page;
2393 Page* page = Page::FromAddress(object->address());
2394 page->ClearRSet();
2395
2396 // Clear the extra remembered set.
2397 int size = object->Size();
2398 int extra_rset_bytes = ExtraRSetBytesFor(size);
2399 memset(object->address() + size, 0, extra_rset_bytes);
2400 }
2401 }
2402}
2403
2404
2405void LargeObjectSpace::IterateRSet(ObjectSlotCallback copy_object_func) {
2406 ASSERT(Page::is_rset_in_use());
2407
2408 LargeObjectIterator it(this);
2409 while (it.has_next()) {
2410 // We only have code, sequential strings, or fixed arrays in large
2411 // object space, and only fixed arrays can possibly contain pointers to
2412 // the young generation.
2413 HeapObject* object = it.next();
2414 if (object->IsFixedArray()) {
2415 // Iterate the normal page remembered set range.
2416 Page* page = Page::FromAddress(object->address());
2417 Address object_end = object->address() + object->Size();
2418 Heap::IterateRSetRange(page->ObjectAreaStart(),
2419 Min(page->ObjectAreaEnd(), object_end),
2420 page->RSetStart(),
2421 copy_object_func);
2422
2423 // Iterate the extra array elements.
2424 if (object_end > page->ObjectAreaEnd()) {
2425 Heap::IterateRSetRange(page->ObjectAreaEnd(), object_end,
2426 object_end, copy_object_func);
2427 }
2428 }
2429 }
2430}
2431
2432
2433void LargeObjectSpace::FreeUnmarkedObjects() {
2434 LargeObjectChunk* previous = NULL;
2435 LargeObjectChunk* current = first_chunk_;
2436 while (current != NULL) {
2437 HeapObject* object = current->GetObject();
kasper.lund7276f142008-07-30 08:49:36 +00002438 if (object->IsMarked()) {
2439 object->ClearMark();
2440 MarkCompactCollector::tracer()->decrement_marked_count();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002441 previous = current;
2442 current = current->next();
2443 } else {
2444 Address chunk_address = current->address();
2445 size_t chunk_size = current->size();
2446
2447 // Cut the chunk out from the chunk list.
2448 current = current->next();
2449 if (previous == NULL) {
2450 first_chunk_ = current;
2451 } else {
2452 previous->set_next(current);
2453 }
2454
2455 // Free the chunk.
2456 if (object->IsCode()) {
2457 LOG(CodeDeleteEvent(object->address()));
2458 }
2459 size_ -= chunk_size;
2460 page_count_--;
2461 MemoryAllocator::FreeRawMemory(chunk_address, chunk_size);
2462 LOG(DeleteEvent("LargeObjectChunk", chunk_address));
2463 }
2464 }
2465}
2466
2467
2468bool LargeObjectSpace::Contains(HeapObject* object) {
2469 Address address = object->address();
2470 Page* page = Page::FromAddress(address);
2471
2472 SLOW_ASSERT(!page->IsLargeObjectPage()
2473 || !FindObject(address)->IsFailure());
2474
2475 return page->IsLargeObjectPage();
2476}
2477
2478
2479#ifdef DEBUG
2480// We do not assume that the large object iterator works, because it depends
2481// on the invariants we are checking during verification.
2482void LargeObjectSpace::Verify() {
2483 for (LargeObjectChunk* chunk = first_chunk_;
2484 chunk != NULL;
2485 chunk = chunk->next()) {
2486 // Each chunk contains an object that starts at the large object page's
2487 // object area start.
2488 HeapObject* object = chunk->GetObject();
2489 Page* page = Page::FromAddress(object->address());
2490 ASSERT(object->address() == page->ObjectAreaStart());
2491
2492 // The first word should be a map, and we expect all map pointers to be
2493 // in map space.
2494 Map* map = object->map();
2495 ASSERT(map->IsMap());
2496 ASSERT(Heap::map_space()->Contains(map));
2497
2498 // We have only code, sequential strings, fixed arrays, and byte arrays
2499 // in large object space.
2500 ASSERT(object->IsCode() || object->IsSeqString()
2501 || object->IsFixedArray() || object->IsByteArray());
2502
2503 // The object itself should look OK.
mads.s.ager@gmail.com9a4089a2008-09-01 08:55:01 +00002504 object->Verify();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002505
2506 // Byte arrays and strings don't have interior pointers.
2507 if (object->IsCode()) {
2508 VerifyPointersVisitor code_visitor;
2509 Code::cast(object)->ConvertICTargetsFromAddressToObject();
2510 object->IterateBody(map->instance_type(),
2511 object->Size(),
2512 &code_visitor);
2513 Code::cast(object)->ConvertICTargetsFromObjectToAddress();
2514 } else if (object->IsFixedArray()) {
2515 // We loop over fixed arrays ourselves, rather then using the visitor,
2516 // because the visitor doesn't support the start/offset iteration
2517 // needed for IsRSetSet.
2518 FixedArray* array = FixedArray::cast(object);
2519 for (int j = 0; j < array->length(); j++) {
2520 Object* element = array->get(j);
2521 if (element->IsHeapObject()) {
2522 HeapObject* element_object = HeapObject::cast(element);
2523 ASSERT(Heap::Contains(element_object));
2524 ASSERT(element_object->map()->IsMap());
2525 if (Heap::InNewSpace(element_object)) {
2526 ASSERT(Page::IsRSetSet(object->address(),
2527 FixedArray::kHeaderSize + j * kPointerSize));
2528 }
2529 }
2530 }
2531 }
2532 }
2533}
2534
2535
2536void LargeObjectSpace::Print() {
2537 LargeObjectIterator it(this);
2538 while (it.has_next()) {
2539 it.next()->Print();
2540 }
2541}
2542
2543
2544void LargeObjectSpace::ReportStatistics() {
2545 PrintF(" size: %d\n", size_);
2546 int num_objects = 0;
2547 ClearHistograms();
2548 LargeObjectIterator it(this);
2549 while (it.has_next()) {
2550 num_objects++;
2551 CollectHistogramInfo(it.next());
2552 }
2553
2554 PrintF(" number of objects %d\n", num_objects);
2555 if (num_objects > 0) ReportHistogram(false);
2556}
2557
2558
2559void LargeObjectSpace::CollectCodeStatistics() {
2560 LargeObjectIterator obj_it(this);
2561 while (obj_it.has_next()) {
2562 HeapObject* obj = obj_it.next();
2563 if (obj->IsCode()) {
2564 Code* code = Code::cast(obj);
2565 code_kind_statistics[code->kind()] += code->Size();
2566 }
2567 }
2568}
2569
2570
2571void LargeObjectSpace::PrintRSet() {
2572 LargeObjectIterator it(this);
2573 while (it.has_next()) {
2574 HeapObject* object = it.next();
2575 if (object->IsFixedArray()) {
2576 Page* page = Page::FromAddress(object->address());
2577
2578 Address allocation_top = object->address() + object->Size();
2579 PrintF("large page 0x%x:\n", page);
2580 PrintRSetRange(page->RSetStart(), page->RSetEnd(),
2581 reinterpret_cast<Object**>(object->address()),
2582 allocation_top);
2583 int extra_array_bytes = object->Size() - Page::kObjectAreaSize;
2584 int extra_rset_bits = RoundUp(extra_array_bytes / kPointerSize,
2585 kBitsPerInt);
2586 PrintF("------------------------------------------------------------"
2587 "-----------\n");
2588 PrintRSetRange(allocation_top,
2589 allocation_top + extra_rset_bits / kBitsPerByte,
2590 reinterpret_cast<Object**>(object->address()
2591 + Page::kObjectAreaSize),
2592 allocation_top);
2593 PrintF("\n");
2594 }
2595 }
2596}
2597#endif // DEBUG
2598
2599} } // namespace v8::internal