blob: defe352614b7268db819aadcd011890bd7b79e34 [file] [log] [blame]
Ben Murdoch257744e2011-11-30 15:57:28 +00001// Copyright 2011 the V8 project authors. All rights reserved.
Steve Blocka7e24c12009-10-30 11:49:00 +00002// Redistribution and use in source and binary forms, with or without
3// modification, are permitted provided that the following conditions are
4// met:
5//
6// * Redistributions of source code must retain the above copyright
7// notice, this list of conditions and the following disclaimer.
8// * Redistributions in binary form must reproduce the above
9// copyright notice, this list of conditions and the following
10// disclaimer in the documentation and/or other materials provided
11// with the distribution.
12// * Neither the name of Google Inc. nor the names of its
13// contributors may be used to endorse or promote products derived
14// from this software without specific prior written permission.
15//
16// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
28#include "v8.h"
29
Steve Block1e0659c2011-05-24 12:43:12 +010030#include "liveobjectlist-inl.h"
Steve Blocka7e24c12009-10-30 11:49:00 +000031#include "macro-assembler.h"
32#include "mark-compact.h"
33#include "platform.h"
34
35namespace v8 {
36namespace internal {
37
Steve Blocka7e24c12009-10-30 11:49:00 +000038
Steve Blocka7e24c12009-10-30 11:49:00 +000039// ----------------------------------------------------------------------------
40// HeapObjectIterator
41
42HeapObjectIterator::HeapObjectIterator(PagedSpace* space) {
Ben Murdoch592a9fc2012-03-05 11:04:45 +000043 // You can't actually iterate over the anchor page. It is not a real page,
44 // just an anchor for the double linked page list. Initialize as if we have
45 // reached the end of the anchor page, then the first iteration will move on
46 // to the first page.
47 Initialize(space,
48 NULL,
49 NULL,
50 kAllPagesInSpace,
51 NULL);
Steve Blocka7e24c12009-10-30 11:49:00 +000052}
53
54
55HeapObjectIterator::HeapObjectIterator(PagedSpace* space,
56 HeapObjectCallback size_func) {
Ben Murdoch592a9fc2012-03-05 11:04:45 +000057 // You can't actually iterate over the anchor page. It is not a real page,
58 // just an anchor for the double linked page list. Initialize the current
59 // address and end as NULL, then the first iteration will move on
60 // to the first page.
61 Initialize(space,
62 NULL,
63 NULL,
64 kAllPagesInSpace,
65 size_func);
Steve Blocka7e24c12009-10-30 11:49:00 +000066}
67
68
Kristian Monsen80d68ea2010-09-08 11:05:35 +010069HeapObjectIterator::HeapObjectIterator(Page* page,
70 HeapObjectCallback size_func) {
Ben Murdoch592a9fc2012-03-05 11:04:45 +000071 Space* owner = page->owner();
72 ASSERT(owner == HEAP->old_pointer_space() ||
73 owner == HEAP->old_data_space() ||
74 owner == HEAP->map_space() ||
75 owner == HEAP->cell_space() ||
76 owner == HEAP->code_space());
77 Initialize(reinterpret_cast<PagedSpace*>(owner),
78 page->area_start(),
79 page->area_end(),
80 kOnePageOnly,
81 size_func);
82 ASSERT(page->WasSweptPrecisely());
Kristian Monsen80d68ea2010-09-08 11:05:35 +010083}
84
85
Ben Murdoch592a9fc2012-03-05 11:04:45 +000086void HeapObjectIterator::Initialize(PagedSpace* space,
87 Address cur, Address end,
88 HeapObjectIterator::PageMode mode,
Steve Blocka7e24c12009-10-30 11:49:00 +000089 HeapObjectCallback size_f) {
Ben Murdoch592a9fc2012-03-05 11:04:45 +000090 // Check that we actually can iterate this space.
91 ASSERT(!space->was_swept_conservatively());
92
93 space_ = space;
Steve Blocka7e24c12009-10-30 11:49:00 +000094 cur_addr_ = cur;
Ben Murdoch592a9fc2012-03-05 11:04:45 +000095 cur_end_ = end;
96 page_mode_ = mode;
Steve Blocka7e24c12009-10-30 11:49:00 +000097 size_func_ = size_f;
Steve Blocka7e24c12009-10-30 11:49:00 +000098}
99
100
Ben Murdoch592a9fc2012-03-05 11:04:45 +0000101// We have hit the end of the page and should advance to the next block of
102// objects. This happens at the end of the page.
103bool HeapObjectIterator::AdvanceToNextPage() {
104 ASSERT(cur_addr_ == cur_end_);
105 if (page_mode_ == kOnePageOnly) return false;
106 Page* cur_page;
107 if (cur_addr_ == NULL) {
108 cur_page = space_->anchor();
109 } else {
110 cur_page = Page::FromAddress(cur_addr_ - 1);
111 ASSERT(cur_addr_ == cur_page->area_end());
Steve Blocka7e24c12009-10-30 11:49:00 +0000112 }
Ben Murdoch592a9fc2012-03-05 11:04:45 +0000113 cur_page = cur_page->next_page();
114 if (cur_page == space_->anchor()) return false;
115 cur_addr_ = cur_page->area_start();
116 cur_end_ = cur_page->area_end();
117 ASSERT(cur_page->WasSweptPrecisely());
118 return true;
Steve Blocka7e24c12009-10-30 11:49:00 +0000119}
120
121
122// -----------------------------------------------------------------------------
Steve Blocka7e24c12009-10-30 11:49:00 +0000123// CodeRange
124
Steve Block44f0eee2011-05-26 01:26:41 +0100125
Ben Murdoch69a99ed2011-11-30 16:03:39 +0000126CodeRange::CodeRange(Isolate* isolate)
127 : isolate_(isolate),
128 code_range_(NULL),
Steve Block44f0eee2011-05-26 01:26:41 +0100129 free_list_(0),
130 allocation_list_(0),
Ben Murdoch69a99ed2011-11-30 16:03:39 +0000131 current_allocation_block_index_(0) {
Steve Block44f0eee2011-05-26 01:26:41 +0100132}
Steve Blocka7e24c12009-10-30 11:49:00 +0000133
134
Ben Murdochc7cc0282012-03-05 14:35:55 +0000135bool CodeRange::SetUp(const size_t requested) {
Steve Blocka7e24c12009-10-30 11:49:00 +0000136 ASSERT(code_range_ == NULL);
137
138 code_range_ = new VirtualMemory(requested);
139 CHECK(code_range_ != NULL);
140 if (!code_range_->IsReserved()) {
141 delete code_range_;
142 code_range_ = NULL;
143 return false;
144 }
145
146 // We are sure that we have mapped a block of requested addresses.
147 ASSERT(code_range_->size() == requested);
Steve Block44f0eee2011-05-26 01:26:41 +0100148 LOG(isolate_, NewEvent("CodeRange", code_range_->address(), requested));
Ben Murdoch592a9fc2012-03-05 11:04:45 +0000149 Address base = reinterpret_cast<Address>(code_range_->address());
150 Address aligned_base =
151 RoundUp(reinterpret_cast<Address>(code_range_->address()),
152 MemoryChunk::kAlignment);
153 size_t size = code_range_->size() - (aligned_base - base);
154 allocation_list_.Add(FreeBlock(aligned_base, size));
Steve Blocka7e24c12009-10-30 11:49:00 +0000155 current_allocation_block_index_ = 0;
156 return true;
157}
158
159
160int CodeRange::CompareFreeBlockAddress(const FreeBlock* left,
161 const FreeBlock* right) {
162 // The entire point of CodeRange is that the difference between two
163 // addresses in the range can be represented as a signed 32-bit int,
164 // so the cast is semantically correct.
165 return static_cast<int>(left->start - right->start);
166}
167
168
169void CodeRange::GetNextAllocationBlock(size_t requested) {
170 for (current_allocation_block_index_++;
171 current_allocation_block_index_ < allocation_list_.length();
172 current_allocation_block_index_++) {
173 if (requested <= allocation_list_[current_allocation_block_index_].size) {
174 return; // Found a large enough allocation block.
175 }
176 }
177
178 // Sort and merge the free blocks on the free list and the allocation list.
179 free_list_.AddAll(allocation_list_);
180 allocation_list_.Clear();
181 free_list_.Sort(&CompareFreeBlockAddress);
182 for (int i = 0; i < free_list_.length();) {
183 FreeBlock merged = free_list_[i];
184 i++;
185 // Add adjacent free blocks to the current merged block.
186 while (i < free_list_.length() &&
187 free_list_[i].start == merged.start + merged.size) {
188 merged.size += free_list_[i].size;
189 i++;
190 }
191 if (merged.size > 0) {
192 allocation_list_.Add(merged);
193 }
194 }
195 free_list_.Clear();
196
197 for (current_allocation_block_index_ = 0;
198 current_allocation_block_index_ < allocation_list_.length();
199 current_allocation_block_index_++) {
200 if (requested <= allocation_list_[current_allocation_block_index_].size) {
201 return; // Found a large enough allocation block.
202 }
203 }
204
205 // Code range is full or too fragmented.
206 V8::FatalProcessOutOfMemory("CodeRange::GetNextAllocationBlock");
207}
208
209
210
Ben Murdoch592a9fc2012-03-05 11:04:45 +0000211Address CodeRange::AllocateRawMemory(const size_t requested,
212 size_t* allocated) {
Steve Blocka7e24c12009-10-30 11:49:00 +0000213 ASSERT(current_allocation_block_index_ < allocation_list_.length());
214 if (requested > allocation_list_[current_allocation_block_index_].size) {
215 // Find an allocation block large enough. This function call may
216 // call V8::FatalProcessOutOfMemory if it cannot find a large enough block.
217 GetNextAllocationBlock(requested);
218 }
219 // Commit the requested memory at the start of the current allocation block.
Ben Murdoch592a9fc2012-03-05 11:04:45 +0000220 size_t aligned_requested = RoundUp(requested, MemoryChunk::kAlignment);
Steve Blocka7e24c12009-10-30 11:49:00 +0000221 FreeBlock current = allocation_list_[current_allocation_block_index_];
Ben Murdoch592a9fc2012-03-05 11:04:45 +0000222 if (aligned_requested >= (current.size - Page::kPageSize)) {
Steve Blocka7e24c12009-10-30 11:49:00 +0000223 // Don't leave a small free block, useless for a large object or chunk.
224 *allocated = current.size;
Ben Murdoch592a9fc2012-03-05 11:04:45 +0000225 } else {
226 *allocated = aligned_requested;
Steve Blocka7e24c12009-10-30 11:49:00 +0000227 }
228 ASSERT(*allocated <= current.size);
Ben Murdoch592a9fc2012-03-05 11:04:45 +0000229 ASSERT(IsAddressAligned(current.start, MemoryChunk::kAlignment));
230 if (!MemoryAllocator::CommitCodePage(code_range_,
231 current.start,
232 *allocated)) {
Steve Blocka7e24c12009-10-30 11:49:00 +0000233 *allocated = 0;
234 return NULL;
235 }
236 allocation_list_[current_allocation_block_index_].start += *allocated;
237 allocation_list_[current_allocation_block_index_].size -= *allocated;
238 if (*allocated == current.size) {
239 GetNextAllocationBlock(0); // This block is used up, get the next one.
240 }
241 return current.start;
242}
243
244
Ben Murdoch592a9fc2012-03-05 11:04:45 +0000245void CodeRange::FreeRawMemory(Address address, size_t length) {
246 ASSERT(IsAddressAligned(address, MemoryChunk::kAlignment));
Steve Blocka7e24c12009-10-30 11:49:00 +0000247 free_list_.Add(FreeBlock(address, length));
248 code_range_->Uncommit(address, length);
249}
250
251
252void CodeRange::TearDown() {
253 delete code_range_; // Frees all memory in the virtual memory range.
254 code_range_ = NULL;
255 free_list_.Free();
256 allocation_list_.Free();
257}
258
259
260// -----------------------------------------------------------------------------
261// MemoryAllocator
262//
Steve Blocka7e24c12009-10-30 11:49:00 +0000263
Ben Murdoch69a99ed2011-11-30 16:03:39 +0000264MemoryAllocator::MemoryAllocator(Isolate* isolate)
265 : isolate_(isolate),
266 capacity_(0),
Steve Block44f0eee2011-05-26 01:26:41 +0100267 capacity_executable_(0),
268 size_(0),
Ben Murdoch592a9fc2012-03-05 11:04:45 +0000269 size_executable_(0) {
Steve Blocka7e24c12009-10-30 11:49:00 +0000270}
271
272
Ben Murdochc7cc0282012-03-05 14:35:55 +0000273bool MemoryAllocator::SetUp(intptr_t capacity, intptr_t capacity_executable) {
Steve Blocka7e24c12009-10-30 11:49:00 +0000274 capacity_ = RoundUp(capacity, Page::kPageSize);
Russell Brenner90bac252010-11-18 13:33:46 -0800275 capacity_executable_ = RoundUp(capacity_executable, Page::kPageSize);
276 ASSERT_GE(capacity_, capacity_executable_);
Steve Blocka7e24c12009-10-30 11:49:00 +0000277
Steve Blocka7e24c12009-10-30 11:49:00 +0000278 size_ = 0;
Steve Block791712a2010-08-27 10:21:07 +0100279 size_executable_ = 0;
Ben Murdoch592a9fc2012-03-05 11:04:45 +0000280
Steve Blocka7e24c12009-10-30 11:49:00 +0000281 return true;
282}
283
284
285void MemoryAllocator::TearDown() {
Ben Murdoch592a9fc2012-03-05 11:04:45 +0000286 // Check that spaces were torn down before MemoryAllocator.
287 ASSERT(size_ == 0);
288 // TODO(gc) this will be true again when we fix FreeMemory.
289 // ASSERT(size_executable_ == 0);
Steve Blocka7e24c12009-10-30 11:49:00 +0000290 capacity_ = 0;
Russell Brenner90bac252010-11-18 13:33:46 -0800291 capacity_executable_ = 0;
Steve Blocka7e24c12009-10-30 11:49:00 +0000292}
293
294
Ben Murdoch592a9fc2012-03-05 11:04:45 +0000295void MemoryAllocator::FreeMemory(VirtualMemory* reservation,
296 Executability executable) {
297 // TODO(gc) make code_range part of memory allocator?
298 ASSERT(reservation->IsReserved());
299 size_t size = reservation->size();
300 ASSERT(size_ >= size);
301 size_ -= size;
302
303 isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(size));
304
305 if (executable == EXECUTABLE) {
306 ASSERT(size_executable_ >= size);
307 size_executable_ -= size;
308 }
309 // Code which is part of the code-range does not have its own VirtualMemory.
310 ASSERT(!isolate_->code_range()->contains(
311 static_cast<Address>(reservation->address())));
312 ASSERT(executable == NOT_EXECUTABLE || !isolate_->code_range()->exists());
313 reservation->Release();
314}
315
316
317void MemoryAllocator::FreeMemory(Address base,
318 size_t size,
319 Executability executable) {
320 // TODO(gc) make code_range part of memory allocator?
321 ASSERT(size_ >= size);
322 size_ -= size;
323
324 isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(size));
325
326 if (executable == EXECUTABLE) {
327 ASSERT(size_executable_ >= size);
328 size_executable_ -= size;
329 }
330 if (isolate_->code_range()->contains(static_cast<Address>(base))) {
331 ASSERT(executable == EXECUTABLE);
332 isolate_->code_range()->FreeRawMemory(base, size);
333 } else {
334 ASSERT(executable == NOT_EXECUTABLE || !isolate_->code_range()->exists());
335 bool result = VirtualMemory::ReleaseRegion(base, size);
336 USE(result);
337 ASSERT(result);
338 }
339}
340
341
342Address MemoryAllocator::ReserveAlignedMemory(size_t size,
343 size_t alignment,
344 VirtualMemory* controller) {
345 VirtualMemory reservation(size, alignment);
346
347 if (!reservation.IsReserved()) return NULL;
348 size_ += reservation.size();
349 Address base = RoundUp(static_cast<Address>(reservation.address()),
350 alignment);
351 controller->TakeControl(&reservation);
352 return base;
353}
354
355
356Address MemoryAllocator::AllocateAlignedMemory(size_t size,
357 size_t alignment,
358 Executability executable,
359 VirtualMemory* controller) {
360 VirtualMemory reservation;
361 Address base = ReserveAlignedMemory(size, alignment, &reservation);
362 if (base == NULL) return NULL;
363
364 if (executable == EXECUTABLE) {
365 CommitCodePage(&reservation, base, size);
366 } else {
367 if (!reservation.Commit(base,
368 size,
369 executable == EXECUTABLE)) {
370 return NULL;
371 }
Kristian Monsen50ef84f2010-07-29 15:18:00 +0100372 }
Russell Brenner90bac252010-11-18 13:33:46 -0800373
Ben Murdoch592a9fc2012-03-05 11:04:45 +0000374 controller->TakeControl(&reservation);
375 return base;
376}
377
378
379void Page::InitializeAsAnchor(PagedSpace* owner) {
380 set_owner(owner);
381 set_prev_page(this);
382 set_next_page(this);
383}
384
385
386NewSpacePage* NewSpacePage::Initialize(Heap* heap,
387 Address start,
388 SemiSpace* semi_space) {
389 Address area_start = start + NewSpacePage::kObjectStartOffset;
390 Address area_end = start + Page::kPageSize;
391
392 MemoryChunk* chunk = MemoryChunk::Initialize(heap,
393 start,
394 Page::kPageSize,
395 area_start,
396 area_end,
397 NOT_EXECUTABLE,
398 semi_space);
399 chunk->set_next_chunk(NULL);
400 chunk->set_prev_chunk(NULL);
401 chunk->initialize_scan_on_scavenge(true);
402 bool in_to_space = (semi_space->id() != kFromSpace);
403 chunk->SetFlag(in_to_space ? MemoryChunk::IN_TO_SPACE
404 : MemoryChunk::IN_FROM_SPACE);
405 ASSERT(!chunk->IsFlagSet(in_to_space ? MemoryChunk::IN_FROM_SPACE
406 : MemoryChunk::IN_TO_SPACE));
407 NewSpacePage* page = static_cast<NewSpacePage*>(chunk);
408 heap->incremental_marking()->SetNewSpacePageFlags(page);
409 return page;
410}
411
412
413void NewSpacePage::InitializeAsAnchor(SemiSpace* semi_space) {
414 set_owner(semi_space);
415 set_next_chunk(this);
416 set_prev_chunk(this);
417 // Flags marks this invalid page as not being in new-space.
418 // All real new-space pages will be in new-space.
419 SetFlags(0, ~0);
420}
421
422
423MemoryChunk* MemoryChunk::Initialize(Heap* heap,
424 Address base,
425 size_t size,
426 Address area_start,
427 Address area_end,
428 Executability executable,
429 Space* owner) {
430 MemoryChunk* chunk = FromAddress(base);
431
432 ASSERT(base == chunk->address());
433
434 chunk->heap_ = heap;
435 chunk->size_ = size;
436 chunk->area_start_ = area_start;
437 chunk->area_end_ = area_end;
438 chunk->flags_ = 0;
439 chunk->set_owner(owner);
440 chunk->InitializeReservedMemory();
441 chunk->slots_buffer_ = NULL;
442 chunk->skip_list_ = NULL;
443 chunk->ResetLiveBytes();
444 Bitmap::Clear(chunk);
445 chunk->initialize_scan_on_scavenge(false);
446 chunk->SetFlag(WAS_SWEPT_PRECISELY);
447
448 ASSERT(OFFSET_OF(MemoryChunk, flags_) == kFlagsOffset);
449 ASSERT(OFFSET_OF(MemoryChunk, live_byte_count_) == kLiveBytesOffset);
450
Russell Brenner90bac252010-11-18 13:33:46 -0800451 if (executable == EXECUTABLE) {
Ben Murdoch592a9fc2012-03-05 11:04:45 +0000452 chunk->SetFlag(IS_EXECUTABLE);
453 }
454
455 if (owner == heap->old_data_space()) {
456 chunk->SetFlag(CONTAINS_ONLY_DATA);
457 }
458
459 return chunk;
460}
461
462
463void MemoryChunk::InsertAfter(MemoryChunk* other) {
464 next_chunk_ = other->next_chunk_;
465 prev_chunk_ = other;
466 other->next_chunk_->prev_chunk_ = this;
467 other->next_chunk_ = this;
468}
469
470
471void MemoryChunk::Unlink() {
472 if (!InNewSpace() && IsFlagSet(SCAN_ON_SCAVENGE)) {
473 heap_->decrement_scan_on_scavenge_pages();
474 ClearFlag(SCAN_ON_SCAVENGE);
475 }
476 next_chunk_->prev_chunk_ = prev_chunk_;
477 prev_chunk_->next_chunk_ = next_chunk_;
478 prev_chunk_ = NULL;
479 next_chunk_ = NULL;
480}
481
482
483MemoryChunk* MemoryAllocator::AllocateChunk(intptr_t body_size,
484 Executability executable,
485 Space* owner) {
486 size_t chunk_size;
487 Heap* heap = isolate_->heap();
488 Address base = NULL;
489 VirtualMemory reservation;
490 Address area_start = NULL;
491 Address area_end = NULL;
492 if (executable == EXECUTABLE) {
493 chunk_size = RoundUp(CodePageAreaStartOffset() + body_size,
494 OS::CommitPageSize()) + CodePageGuardSize();
495
Russell Brenner90bac252010-11-18 13:33:46 -0800496 // Check executable memory limit.
Ben Murdoch592a9fc2012-03-05 11:04:45 +0000497 if (size_executable_ + chunk_size > capacity_executable_) {
Steve Block44f0eee2011-05-26 01:26:41 +0100498 LOG(isolate_,
499 StringEvent("MemoryAllocator::AllocateRawMemory",
Russell Brenner90bac252010-11-18 13:33:46 -0800500 "V8 Executable Allocation capacity exceeded"));
501 return NULL;
502 }
Ben Murdoch592a9fc2012-03-05 11:04:45 +0000503
Russell Brenner90bac252010-11-18 13:33:46 -0800504 // Allocate executable memory either from code range or from the
505 // OS.
Steve Block44f0eee2011-05-26 01:26:41 +0100506 if (isolate_->code_range()->exists()) {
Ben Murdoch592a9fc2012-03-05 11:04:45 +0000507 base = isolate_->code_range()->AllocateRawMemory(chunk_size, &chunk_size);
508 ASSERT(IsAligned(reinterpret_cast<intptr_t>(base),
509 MemoryChunk::kAlignment));
510 if (base == NULL) return NULL;
511 size_ += chunk_size;
512 // Update executable memory size.
513 size_executable_ += chunk_size;
Russell Brenner90bac252010-11-18 13:33:46 -0800514 } else {
Ben Murdoch592a9fc2012-03-05 11:04:45 +0000515 base = AllocateAlignedMemory(chunk_size,
516 MemoryChunk::kAlignment,
517 executable,
518 &reservation);
519 if (base == NULL) return NULL;
520 // Update executable memory size.
521 size_executable_ += reservation.size();
Russell Brenner90bac252010-11-18 13:33:46 -0800522 }
Steve Block791712a2010-08-27 10:21:07 +0100523
Leon Clarke4515c472010-02-03 11:58:03 +0000524#ifdef DEBUG
Ben Murdoch592a9fc2012-03-05 11:04:45 +0000525 ZapBlock(base, CodePageGuardStartOffset());
526 ZapBlock(base + CodePageAreaStartOffset(), body_size);
Leon Clarke4515c472010-02-03 11:58:03 +0000527#endif
Ben Murdoch592a9fc2012-03-05 11:04:45 +0000528 area_start = base + CodePageAreaStartOffset();
529 area_end = area_start + body_size;
530 } else {
531 chunk_size = MemoryChunk::kObjectStartOffset + body_size;
532 base = AllocateAlignedMemory(chunk_size,
533 MemoryChunk::kAlignment,
534 executable,
535 &reservation);
536
537 if (base == NULL) return NULL;
538
539#ifdef DEBUG
540 ZapBlock(base, chunk_size);
541#endif
542
543 area_start = base + Page::kObjectStartOffset;
544 area_end = base + chunk_size;
545 }
546
547 isolate_->counters()->memory_allocated()->
548 Increment(static_cast<int>(chunk_size));
549
550 LOG(isolate_, NewEvent("MemoryChunk", base, chunk_size));
551 if (owner != NULL) {
552 ObjectSpace space = static_cast<ObjectSpace>(1 << owner->identity());
553 PerformAllocationCallback(space, kAllocationActionAllocate, chunk_size);
554 }
555
556 MemoryChunk* result = MemoryChunk::Initialize(heap,
557 base,
558 chunk_size,
559 area_start,
560 area_end,
561 executable,
562 owner);
563 result->set_reserved_memory(&reservation);
564 return result;
Steve Blocka7e24c12009-10-30 11:49:00 +0000565}
566
567
Ben Murdoch592a9fc2012-03-05 11:04:45 +0000568Page* MemoryAllocator::AllocatePage(PagedSpace* owner,
Steve Block791712a2010-08-27 10:21:07 +0100569 Executability executable) {
Ben Murdoch592a9fc2012-03-05 11:04:45 +0000570 MemoryChunk* chunk = AllocateChunk(owner->AreaSize(),
571 executable,
572 owner);
Iain Merrick9ac36c92010-09-13 15:29:50 +0100573
Ben Murdoch592a9fc2012-03-05 11:04:45 +0000574 if (chunk == NULL) return NULL;
575
576 return Page::Initialize(isolate_->heap(), chunk, executable, owner);
577}
578
579
580LargePage* MemoryAllocator::AllocateLargePage(intptr_t object_size,
581 Executability executable,
582 Space* owner) {
583 MemoryChunk* chunk = AllocateChunk(object_size, executable, owner);
584 if (chunk == NULL) return NULL;
585 return LargePage::Initialize(isolate_->heap(), chunk);
586}
587
588
589void MemoryAllocator::Free(MemoryChunk* chunk) {
590 LOG(isolate_, DeleteEvent("MemoryChunk", chunk));
591 if (chunk->owner() != NULL) {
592 ObjectSpace space =
593 static_cast<ObjectSpace>(1 << chunk->owner()->identity());
594 PerformAllocationCallback(space, kAllocationActionFree, chunk->size());
595 }
596
Ben Murdoch5d4cdbf2012-04-11 10:23:59 +0100597 isolate_->heap()->RememberUnmappedPage(
598 reinterpret_cast<Address>(chunk), chunk->IsEvacuationCandidate());
599
Ben Murdoch592a9fc2012-03-05 11:04:45 +0000600 delete chunk->slots_buffer();
601 delete chunk->skip_list();
602
603 VirtualMemory* reservation = chunk->reserved_memory();
604 if (reservation->IsReserved()) {
605 FreeMemory(reservation, chunk->executable());
606 } else {
607 FreeMemory(chunk->address(),
608 chunk->size(),
609 chunk->executable());
610 }
611}
612
613
614bool MemoryAllocator::CommitBlock(Address start,
615 size_t size,
616 Executability executable) {
617 if (!VirtualMemory::CommitRegion(start, size, executable)) return false;
618#ifdef DEBUG
619 ZapBlock(start, size);
620#endif
621 isolate_->counters()->memory_allocated()->Increment(static_cast<int>(size));
622 return true;
623}
624
625
626bool MemoryAllocator::UncommitBlock(Address start, size_t size) {
627 if (!VirtualMemory::UncommitRegion(start, size)) return false;
628 isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(size));
629 return true;
630}
631
632
633void MemoryAllocator::ZapBlock(Address start, size_t size) {
634 for (size_t s = 0; s + kPointerSize <= size; s += kPointerSize) {
635 Memory::Address_at(start + s) = kZapValue;
636 }
Steve Blocka7e24c12009-10-30 11:49:00 +0000637}
638
639
Iain Merrick9ac36c92010-09-13 15:29:50 +0100640void MemoryAllocator::PerformAllocationCallback(ObjectSpace space,
641 AllocationAction action,
642 size_t size) {
643 for (int i = 0; i < memory_allocation_callbacks_.length(); ++i) {
644 MemoryAllocationCallbackRegistration registration =
645 memory_allocation_callbacks_[i];
646 if ((registration.space & space) == space &&
647 (registration.action & action) == action)
648 registration.callback(space, action, static_cast<int>(size));
649 }
650}
651
652
653bool MemoryAllocator::MemoryAllocationCallbackRegistered(
654 MemoryAllocationCallback callback) {
655 for (int i = 0; i < memory_allocation_callbacks_.length(); ++i) {
656 if (memory_allocation_callbacks_[i].callback == callback) return true;
657 }
658 return false;
659}
660
661
662void MemoryAllocator::AddMemoryAllocationCallback(
663 MemoryAllocationCallback callback,
664 ObjectSpace space,
665 AllocationAction action) {
666 ASSERT(callback != NULL);
667 MemoryAllocationCallbackRegistration registration(callback, space, action);
668 ASSERT(!MemoryAllocator::MemoryAllocationCallbackRegistered(callback));
669 return memory_allocation_callbacks_.Add(registration);
670}
671
672
673void MemoryAllocator::RemoveMemoryAllocationCallback(
674 MemoryAllocationCallback callback) {
675 ASSERT(callback != NULL);
676 for (int i = 0; i < memory_allocation_callbacks_.length(); ++i) {
677 if (memory_allocation_callbacks_[i].callback == callback) {
678 memory_allocation_callbacks_.Remove(i);
679 return;
680 }
681 }
682 UNREACHABLE();
683}
684
Steve Blocka7e24c12009-10-30 11:49:00 +0000685
686#ifdef DEBUG
687void MemoryAllocator::ReportStatistics() {
688 float pct = static_cast<float>(capacity_ - size_) / capacity_;
Ben Murdochf87a2032010-10-22 12:50:53 +0100689 PrintF(" capacity: %" V8_PTR_PREFIX "d"
690 ", used: %" V8_PTR_PREFIX "d"
691 ", available: %%%d\n\n",
Steve Blocka7e24c12009-10-30 11:49:00 +0000692 capacity_, size_, static_cast<int>(pct*100));
693}
694#endif
695
696
Ben Murdoch592a9fc2012-03-05 11:04:45 +0000697int MemoryAllocator::CodePageGuardStartOffset() {
698 // We are guarding code pages: the first OS page after the header
699 // will be protected as non-writable.
700 return RoundUp(Page::kObjectStartOffset, OS::CommitPageSize());
Steve Block6ded16b2010-05-10 14:33:55 +0100701}
702
703
Ben Murdoch592a9fc2012-03-05 11:04:45 +0000704int MemoryAllocator::CodePageGuardSize() {
Ben Murdoch5d4cdbf2012-04-11 10:23:59 +0100705 return static_cast<int>(OS::CommitPageSize());
Ben Murdoch592a9fc2012-03-05 11:04:45 +0000706}
Steve Block6ded16b2010-05-10 14:33:55 +0100707
Ben Murdoch592a9fc2012-03-05 11:04:45 +0000708
709int MemoryAllocator::CodePageAreaStartOffset() {
710 // We are guarding code pages: the first OS page after the header
711 // will be protected as non-writable.
712 return CodePageGuardStartOffset() + CodePageGuardSize();
713}
714
715
716int MemoryAllocator::CodePageAreaEndOffset() {
717 // We are guarding code pages: the last OS page will be protected as
718 // non-writable.
Ben Murdoch5d4cdbf2012-04-11 10:23:59 +0100719 return Page::kPageSize - static_cast<int>(OS::CommitPageSize());
Ben Murdoch592a9fc2012-03-05 11:04:45 +0000720}
721
722
723bool MemoryAllocator::CommitCodePage(VirtualMemory* vm,
724 Address start,
725 size_t size) {
726 // Commit page header (not executable).
727 if (!vm->Commit(start,
728 CodePageGuardStartOffset(),
729 false)) {
730 return false;
Steve Block6ded16b2010-05-10 14:33:55 +0100731 }
732
Ben Murdoch592a9fc2012-03-05 11:04:45 +0000733 // Create guard page after the header.
734 if (!vm->Guard(start + CodePageGuardStartOffset())) {
735 return false;
Steve Block6ded16b2010-05-10 14:33:55 +0100736 }
737
Ben Murdoch592a9fc2012-03-05 11:04:45 +0000738 // Commit page body (executable).
739 size_t area_size = size - CodePageAreaStartOffset() - CodePageGuardSize();
740 if (!vm->Commit(start + CodePageAreaStartOffset(),
741 area_size,
742 true)) {
743 return false;
Steve Block6ded16b2010-05-10 14:33:55 +0100744 }
745
Ben Murdoch592a9fc2012-03-05 11:04:45 +0000746 // Create guard page after the allocatable area.
747 if (!vm->Guard(start + CodePageAreaStartOffset() + area_size)) {
748 return false;
749 }
750
751 return true;
Steve Block6ded16b2010-05-10 14:33:55 +0100752}
753
754
Steve Blocka7e24c12009-10-30 11:49:00 +0000755// -----------------------------------------------------------------------------
Ben Murdochc7cc0282012-03-05 14:35:55 +0000756// MemoryChunk implementation
757
758void MemoryChunk::IncrementLiveBytesFromMutator(Address address, int by) {
759 MemoryChunk* chunk = MemoryChunk::FromAddress(address);
760 if (!chunk->InNewSpace() && !static_cast<Page*>(chunk)->WasSwept()) {
761 static_cast<PagedSpace*>(chunk->owner())->IncrementUnsweptFreeBytes(-by);
762 }
763 chunk->IncrementLiveBytes(by);
764}
765
766// -----------------------------------------------------------------------------
Steve Blocka7e24c12009-10-30 11:49:00 +0000767// PagedSpace implementation
768
Steve Block44f0eee2011-05-26 01:26:41 +0100769PagedSpace::PagedSpace(Heap* heap,
770 intptr_t max_capacity,
Steve Blocka7e24c12009-10-30 11:49:00 +0000771 AllocationSpace id,
772 Executability executable)
Ben Murdoch592a9fc2012-03-05 11:04:45 +0000773 : Space(heap, id, executable),
774 free_list_(this),
775 was_swept_conservatively_(false),
Ben Murdochc7cc0282012-03-05 14:35:55 +0000776 first_unswept_page_(Page::FromAddress(NULL)),
777 unswept_free_bytes_(0) {
Ben Murdoch592a9fc2012-03-05 11:04:45 +0000778 if (id == CODE_SPACE) {
779 area_size_ = heap->isolate()->memory_allocator()->
780 CodePageAreaSize();
781 } else {
782 area_size_ = Page::kPageSize - Page::kObjectStartOffset;
783 }
Steve Blocka7e24c12009-10-30 11:49:00 +0000784 max_capacity_ = (RoundDown(max_capacity, Page::kPageSize) / Page::kPageSize)
Ben Murdoch592a9fc2012-03-05 11:04:45 +0000785 * AreaSize();
Steve Blocka7e24c12009-10-30 11:49:00 +0000786 accounting_stats_.Clear();
787
788 allocation_info_.top = NULL;
789 allocation_info_.limit = NULL;
790
Ben Murdoch592a9fc2012-03-05 11:04:45 +0000791 anchor_.InitializeAsAnchor(this);
Steve Blocka7e24c12009-10-30 11:49:00 +0000792}
793
794
Ben Murdochc7cc0282012-03-05 14:35:55 +0000795bool PagedSpace::SetUp() {
Steve Blocka7e24c12009-10-30 11:49:00 +0000796 return true;
797}
798
799
Ben Murdochc7cc0282012-03-05 14:35:55 +0000800bool PagedSpace::HasBeenSetUp() {
Ben Murdoch592a9fc2012-03-05 11:04:45 +0000801 return true;
Steve Blocka7e24c12009-10-30 11:49:00 +0000802}
803
804
805void PagedSpace::TearDown() {
Ben Murdoch592a9fc2012-03-05 11:04:45 +0000806 PageIterator iterator(this);
807 while (iterator.has_next()) {
808 heap()->isolate()->memory_allocator()->Free(iterator.next());
809 }
810 anchor_.set_next_page(&anchor_);
811 anchor_.set_prev_page(&anchor_);
Steve Blocka7e24c12009-10-30 11:49:00 +0000812 accounting_stats_.Clear();
813}
814
815
John Reck59135872010-11-02 12:39:01 -0700816MaybeObject* PagedSpace::FindObject(Address addr) {
Ben Murdoch592a9fc2012-03-05 11:04:45 +0000817 // Note: this function can only be called on precisely swept spaces.
Steve Block44f0eee2011-05-26 01:26:41 +0100818 ASSERT(!heap()->mark_compact_collector()->in_use());
Steve Blocka7e24c12009-10-30 11:49:00 +0000819
820 if (!Contains(addr)) return Failure::Exception();
821
822 Page* p = Page::FromAddress(addr);
Ben Murdoch592a9fc2012-03-05 11:04:45 +0000823 HeapObjectIterator it(p, NULL);
824 for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
825 Address cur = obj->address();
Steve Blocka7e24c12009-10-30 11:49:00 +0000826 Address next = cur + obj->Size();
827 if ((cur <= addr) && (addr < next)) return obj;
Steve Blocka7e24c12009-10-30 11:49:00 +0000828 }
829
830 UNREACHABLE();
831 return Failure::Exception();
832}
833
Ben Murdoch592a9fc2012-03-05 11:04:45 +0000834bool PagedSpace::CanExpand() {
835 ASSERT(max_capacity_ % AreaSize() == 0);
836 ASSERT(Capacity() % AreaSize() == 0);
Steve Blocka7e24c12009-10-30 11:49:00 +0000837
838 if (Capacity() == max_capacity_) return false;
839
840 ASSERT(Capacity() < max_capacity_);
Steve Blocka7e24c12009-10-30 11:49:00 +0000841
Ben Murdoch592a9fc2012-03-05 11:04:45 +0000842 // Are we going to exceed capacity for this space?
843 if ((Capacity() + Page::kPageSize) > max_capacity_) return false;
Steve Blocka7e24c12009-10-30 11:49:00 +0000844
Ben Murdoch592a9fc2012-03-05 11:04:45 +0000845 return true;
846}
Steve Blocka7e24c12009-10-30 11:49:00 +0000847
Ben Murdoch592a9fc2012-03-05 11:04:45 +0000848bool PagedSpace::Expand() {
849 if (!CanExpand()) return false;
850
851 Page* p = heap()->isolate()->memory_allocator()->
852 AllocatePage(this, executable());
853 if (p == NULL) return false;
854
Steve Blocka7e24c12009-10-30 11:49:00 +0000855 ASSERT(Capacity() <= max_capacity_);
856
Ben Murdoch592a9fc2012-03-05 11:04:45 +0000857 p->InsertAfter(anchor_.prev_page());
Steve Blocka7e24c12009-10-30 11:49:00 +0000858
859 return true;
860}
861
862
Steve Blocka7e24c12009-10-30 11:49:00 +0000863int PagedSpace::CountTotalPages() {
Ben Murdoch592a9fc2012-03-05 11:04:45 +0000864 PageIterator it(this);
Steve Blocka7e24c12009-10-30 11:49:00 +0000865 int count = 0;
Ben Murdoch592a9fc2012-03-05 11:04:45 +0000866 while (it.has_next()) {
867 it.next();
Steve Blocka7e24c12009-10-30 11:49:00 +0000868 count++;
869 }
870 return count;
871}
Steve Blocka7e24c12009-10-30 11:49:00 +0000872
873
Ben Murdoch592a9fc2012-03-05 11:04:45 +0000874void PagedSpace::ReleasePage(Page* page) {
875 ASSERT(page->LiveBytes() == 0);
876 ASSERT(AreaSize() == page->area_size());
877
878 // Adjust list of unswept pages if the page is the head of the list.
879 if (first_unswept_page_ == page) {
880 first_unswept_page_ = page->next_page();
881 if (first_unswept_page_ == anchor()) {
882 first_unswept_page_ = Page::FromAddress(NULL);
883 }
Steve Block6ded16b2010-05-10 14:33:55 +0100884 }
885
Ben Murdoch592a9fc2012-03-05 11:04:45 +0000886 if (page->WasSwept()) {
887 intptr_t size = free_list_.EvictFreeListItems(page);
888 accounting_stats_.AllocateBytes(size);
889 ASSERT_EQ(AreaSize(), static_cast<int>(size));
Ben Murdochc7cc0282012-03-05 14:35:55 +0000890 } else {
891 DecreaseUnsweptFreeBytes(page);
Steve Blocka7e24c12009-10-30 11:49:00 +0000892 }
893
Ben Murdoch592a9fc2012-03-05 11:04:45 +0000894 if (Page::FromAllocationTop(allocation_info_.top) == page) {
895 allocation_info_.top = allocation_info_.limit = NULL;
Steve Blocka7e24c12009-10-30 11:49:00 +0000896 }
897
Ben Murdoch592a9fc2012-03-05 11:04:45 +0000898 page->Unlink();
899 if (page->IsFlagSet(MemoryChunk::CONTAINS_ONLY_DATA)) {
900 heap()->isolate()->memory_allocator()->Free(page);
901 } else {
902 heap()->QueueMemoryChunkForFree(page);
903 }
904
905 ASSERT(Capacity() > 0);
906 ASSERT(Capacity() % AreaSize() == 0);
907 accounting_stats_.ShrinkSpace(AreaSize());
Steve Blocka7e24c12009-10-30 11:49:00 +0000908}
909
910
Ben Murdoch592a9fc2012-03-05 11:04:45 +0000911void PagedSpace::ReleaseAllUnusedPages() {
912 PageIterator it(this);
913 while (it.has_next()) {
914 Page* page = it.next();
915 if (!page->WasSwept()) {
916 if (page->LiveBytes() == 0) ReleasePage(page);
917 } else {
918 HeapObject* obj = HeapObject::FromAddress(page->area_start());
919 if (obj->IsFreeSpace() &&
920 FreeSpace::cast(obj)->size() == AreaSize()) {
921 // Sometimes we allocate memory from free list but don't
922 // immediately initialize it (e.g. see PagedSpace::ReserveSpace
923 // called from Heap::ReserveSpace that can cause GC before
924 // reserved space is actually initialized).
925 // Thus we can't simply assume that obj represents a valid
926 // node still owned by a free list
927 // Instead we should verify that the page is fully covered
928 // by free list items.
929 FreeList::SizeStats sizes;
930 free_list_.CountFreeListItems(page, &sizes);
931 if (sizes.Total() == AreaSize()) {
932 ReleasePage(page);
933 }
934 }
935 }
Steve Blocka7e24c12009-10-30 11:49:00 +0000936 }
Ben Murdoch592a9fc2012-03-05 11:04:45 +0000937 heap()->FreeQueuedChunks();
Steve Blocka7e24c12009-10-30 11:49:00 +0000938}
939
940
941#ifdef DEBUG
942void PagedSpace::Print() { }
943#endif
944
945
946#ifdef DEBUG
Steve Blocka7e24c12009-10-30 11:49:00 +0000947void PagedSpace::Verify(ObjectVisitor* visitor) {
Ben Murdoch592a9fc2012-03-05 11:04:45 +0000948 // We can only iterate over the pages if they were swept precisely.
949 if (was_swept_conservatively_) return;
Steve Blocka7e24c12009-10-30 11:49:00 +0000950
Ben Murdoch592a9fc2012-03-05 11:04:45 +0000951 bool allocation_pointer_found_in_space =
952 (allocation_info_.top == allocation_info_.limit);
953 PageIterator page_iterator(this);
954 while (page_iterator.has_next()) {
955 Page* page = page_iterator.next();
956 ASSERT(page->owner() == this);
957 if (page == Page::FromAllocationTop(allocation_info_.top)) {
958 allocation_pointer_found_in_space = true;
Steve Blocka7e24c12009-10-30 11:49:00 +0000959 }
Ben Murdoch592a9fc2012-03-05 11:04:45 +0000960 ASSERT(page->WasSweptPrecisely());
961 HeapObjectIterator it(page, NULL);
962 Address end_of_previous_object = page->area_start();
963 Address top = page->area_end();
964 int black_size = 0;
965 for (HeapObject* object = it.Next(); object != NULL; object = it.Next()) {
966 ASSERT(end_of_previous_object <= object->address());
Steve Blocka7e24c12009-10-30 11:49:00 +0000967
Ben Murdoch592a9fc2012-03-05 11:04:45 +0000968 // The first word should be a map, and we expect all map pointers to
969 // be in map space.
970 Map* map = object->map();
971 ASSERT(map->IsMap());
972 ASSERT(heap()->map_space()->Contains(map));
973
974 // Perform space-specific object verification.
975 VerifyObject(object);
976
977 // The object itself should look OK.
978 object->Verify();
979
980 // All the interior pointers should be contained in the heap.
981 int size = object->Size();
982 object->IterateBody(map->instance_type(), size, visitor);
983 if (Marking::IsBlack(Marking::MarkBitFrom(object))) {
984 black_size += size;
985 }
986
987 ASSERT(object->address() + size <= top);
988 end_of_previous_object = object->address() + size;
989 }
990 ASSERT_LE(black_size, page->LiveBytes());
Steve Blocka7e24c12009-10-30 11:49:00 +0000991 }
Ben Murdoch592a9fc2012-03-05 11:04:45 +0000992 ASSERT(allocation_pointer_found_in_space);
Steve Blocka7e24c12009-10-30 11:49:00 +0000993}
994#endif
995
996
997// -----------------------------------------------------------------------------
998// NewSpace implementation
999
1000
Ben Murdochc7cc0282012-03-05 14:35:55 +00001001bool NewSpace::SetUp(int reserved_semispace_capacity,
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001002 int maximum_semispace_capacity) {
Ben Murdochc7cc0282012-03-05 14:35:55 +00001003 // Set up new space based on the preallocated memory block defined by
Steve Blocka7e24c12009-10-30 11:49:00 +00001004 // start and size. The provided space is divided into two semi-spaces.
1005 // To support fast containment testing in the new space, the size of
1006 // this chunk must be a power of two and it must be aligned to its size.
Steve Block44f0eee2011-05-26 01:26:41 +01001007 int initial_semispace_capacity = heap()->InitialSemiSpaceSize();
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001008
1009 size_t size = 2 * reserved_semispace_capacity;
1010 Address base =
1011 heap()->isolate()->memory_allocator()->ReserveAlignedMemory(
1012 size, size, &reservation_);
1013 if (base == NULL) return false;
1014
1015 chunk_base_ = base;
1016 chunk_size_ = static_cast<uintptr_t>(size);
1017 LOG(heap()->isolate(), NewEvent("InitialChunk", chunk_base_, chunk_size_));
Steve Blocka7e24c12009-10-30 11:49:00 +00001018
1019 ASSERT(initial_semispace_capacity <= maximum_semispace_capacity);
1020 ASSERT(IsPowerOf2(maximum_semispace_capacity));
1021
Ben Murdochc7cc0282012-03-05 14:35:55 +00001022 // Allocate and set up the histogram arrays if necessary.
Steve Blocka7e24c12009-10-30 11:49:00 +00001023 allocated_histogram_ = NewArray<HistogramInfo>(LAST_TYPE + 1);
1024 promoted_histogram_ = NewArray<HistogramInfo>(LAST_TYPE + 1);
1025
1026#define SET_NAME(name) allocated_histogram_[name].set_name(#name); \
1027 promoted_histogram_[name].set_name(#name);
1028 INSTANCE_TYPE_LIST(SET_NAME)
1029#undef SET_NAME
Steve Blocka7e24c12009-10-30 11:49:00 +00001030
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001031 ASSERT(reserved_semispace_capacity == heap()->ReservedSemiSpaceSize());
1032 ASSERT(static_cast<intptr_t>(chunk_size_) >=
1033 2 * heap()->ReservedSemiSpaceSize());
1034 ASSERT(IsAddressAligned(chunk_base_, 2 * reserved_semispace_capacity, 0));
Steve Blocka7e24c12009-10-30 11:49:00 +00001035
Ben Murdochc7cc0282012-03-05 14:35:55 +00001036 to_space_.SetUp(chunk_base_,
1037 initial_semispace_capacity,
1038 maximum_semispace_capacity);
1039 from_space_.SetUp(chunk_base_ + reserved_semispace_capacity,
1040 initial_semispace_capacity,
1041 maximum_semispace_capacity);
1042 if (!to_space_.Commit()) {
Steve Blocka7e24c12009-10-30 11:49:00 +00001043 return false;
1044 }
1045
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001046 start_ = chunk_base_;
1047 address_mask_ = ~(2 * reserved_semispace_capacity - 1);
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01001048 object_mask_ = address_mask_ | kHeapObjectTagMask;
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001049 object_expected_ = reinterpret_cast<uintptr_t>(start_) | kHeapObjectTag;
Steve Blocka7e24c12009-10-30 11:49:00 +00001050
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001051 ResetAllocationInfo();
Steve Blocka7e24c12009-10-30 11:49:00 +00001052
Steve Blocka7e24c12009-10-30 11:49:00 +00001053 return true;
1054}
1055
1056
1057void NewSpace::TearDown() {
Steve Blocka7e24c12009-10-30 11:49:00 +00001058 if (allocated_histogram_) {
1059 DeleteArray(allocated_histogram_);
1060 allocated_histogram_ = NULL;
1061 }
1062 if (promoted_histogram_) {
1063 DeleteArray(promoted_histogram_);
1064 promoted_histogram_ = NULL;
1065 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001066
1067 start_ = NULL;
1068 allocation_info_.top = NULL;
1069 allocation_info_.limit = NULL;
Steve Blocka7e24c12009-10-30 11:49:00 +00001070
1071 to_space_.TearDown();
1072 from_space_.TearDown();
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001073
1074 LOG(heap()->isolate(), DeleteEvent("InitialChunk", chunk_base_));
1075
1076 ASSERT(reservation_.IsReserved());
1077 heap()->isolate()->memory_allocator()->FreeMemory(&reservation_,
1078 NOT_EXECUTABLE);
1079 chunk_base_ = NULL;
1080 chunk_size_ = 0;
Steve Blocka7e24c12009-10-30 11:49:00 +00001081}
1082
1083
Steve Blocka7e24c12009-10-30 11:49:00 +00001084void NewSpace::Flip() {
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001085 SemiSpace::Swap(&from_space_, &to_space_);
Steve Blocka7e24c12009-10-30 11:49:00 +00001086}
1087
1088
1089void NewSpace::Grow() {
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001090 // Double the semispace size but only up to maximum capacity.
Steve Blocka7e24c12009-10-30 11:49:00 +00001091 ASSERT(Capacity() < MaximumCapacity());
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001092 int new_capacity = Min(MaximumCapacity(), 2 * static_cast<int>(Capacity()));
1093 if (to_space_.GrowTo(new_capacity)) {
1094 // Only grow from space if we managed to grow to-space.
1095 if (!from_space_.GrowTo(new_capacity)) {
1096 // If we managed to grow to-space but couldn't grow from-space,
1097 // attempt to shrink to-space.
Steve Blocka7e24c12009-10-30 11:49:00 +00001098 if (!to_space_.ShrinkTo(from_space_.Capacity())) {
1099 // We are in an inconsistent state because we could not
1100 // commit/uncommit memory from new space.
1101 V8::FatalProcessOutOfMemory("Failed to grow new space.");
1102 }
1103 }
1104 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001105 ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
1106}
1107
1108
1109void NewSpace::Shrink() {
Ben Murdochf87a2032010-10-22 12:50:53 +01001110 int new_capacity = Max(InitialCapacity(), 2 * SizeAsInt());
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001111 int rounded_new_capacity = RoundUp(new_capacity, Page::kPageSize);
Steve Blocka7e24c12009-10-30 11:49:00 +00001112 if (rounded_new_capacity < Capacity() &&
1113 to_space_.ShrinkTo(rounded_new_capacity)) {
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001114 // Only shrink from-space if we managed to shrink to-space.
1115 from_space_.Reset();
Steve Blocka7e24c12009-10-30 11:49:00 +00001116 if (!from_space_.ShrinkTo(rounded_new_capacity)) {
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001117 // If we managed to shrink to-space but couldn't shrink from
1118 // space, attempt to grow to-space again.
Steve Blocka7e24c12009-10-30 11:49:00 +00001119 if (!to_space_.GrowTo(from_space_.Capacity())) {
1120 // We are in an inconsistent state because we could not
1121 // commit/uncommit memory from new space.
1122 V8::FatalProcessOutOfMemory("Failed to shrink new space.");
1123 }
1124 }
1125 }
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001126 allocation_info_.limit = to_space_.page_high();
1127 ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
1128}
1129
1130
1131void NewSpace::UpdateAllocationInfo() {
1132 allocation_info_.top = to_space_.page_low();
1133 allocation_info_.limit = to_space_.page_high();
1134
1135 // Lower limit during incremental marking.
1136 if (heap()->incremental_marking()->IsMarking() &&
1137 inline_allocation_limit_step() != 0) {
1138 Address new_limit =
1139 allocation_info_.top + inline_allocation_limit_step();
1140 allocation_info_.limit = Min(new_limit, allocation_info_.limit);
1141 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001142 ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
1143}
1144
1145
1146void NewSpace::ResetAllocationInfo() {
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001147 to_space_.Reset();
1148 UpdateAllocationInfo();
1149 pages_used_ = 0;
1150 // Clear all mark-bits in the to-space.
1151 NewSpacePageIterator it(&to_space_);
1152 while (it.has_next()) {
1153 Bitmap::Clear(it.next());
1154 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001155}
1156
1157
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001158bool NewSpace::AddFreshPage() {
1159 Address top = allocation_info_.top;
1160 if (NewSpacePage::IsAtStart(top)) {
1161 // The current page is already empty. Don't try to make another.
1162
1163 // We should only get here if someone asks to allocate more
1164 // than what can be stored in a single page.
1165 // TODO(gc): Change the limit on new-space allocation to prevent this
1166 // from happening (all such allocations should go directly to LOSpace).
1167 return false;
1168 }
1169 if (!to_space_.AdvancePage()) {
1170 // Failed to get a new page in to-space.
1171 return false;
1172 }
1173
1174 // Clear remainder of current page.
1175 Address limit = NewSpacePage::FromLimit(top)->area_end();
1176 if (heap()->gc_state() == Heap::SCAVENGE) {
1177 heap()->promotion_queue()->SetNewLimit(limit);
1178 heap()->promotion_queue()->ActivateGuardIfOnTheSamePage();
1179 }
1180
1181 int remaining_in_page = static_cast<int>(limit - top);
1182 heap()->CreateFillerObjectAt(top, remaining_in_page);
1183 pages_used_++;
1184 UpdateAllocationInfo();
1185
1186 return true;
Steve Blocka7e24c12009-10-30 11:49:00 +00001187}
1188
1189
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001190MaybeObject* NewSpace::SlowAllocateRaw(int size_in_bytes) {
1191 Address old_top = allocation_info_.top;
1192 Address new_top = old_top + size_in_bytes;
1193 Address high = to_space_.page_high();
1194 if (allocation_info_.limit < high) {
1195 // Incremental marking has lowered the limit to get a
1196 // chance to do a step.
1197 allocation_info_.limit = Min(
1198 allocation_info_.limit + inline_allocation_limit_step_,
1199 high);
1200 int bytes_allocated = static_cast<int>(new_top - top_on_previous_step_);
1201 heap()->incremental_marking()->Step(bytes_allocated);
1202 top_on_previous_step_ = new_top;
1203 return AllocateRaw(size_in_bytes);
1204 } else if (AddFreshPage()) {
1205 // Switched to new page. Try allocating again.
1206 int bytes_allocated = static_cast<int>(old_top - top_on_previous_step_);
1207 heap()->incremental_marking()->Step(bytes_allocated);
1208 top_on_previous_step_ = to_space_.page_low();
1209 return AllocateRaw(size_in_bytes);
1210 } else {
1211 return Failure::RetryAfterGC();
1212 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001213}
1214
1215
1216#ifdef DEBUG
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001217// We do not use the SemiSpaceIterator because verification doesn't assume
Steve Blocka7e24c12009-10-30 11:49:00 +00001218// that it works (it depends on the invariants we are checking).
1219void NewSpace::Verify() {
1220 // The allocation pointer should be in the space or at the very end.
1221 ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
1222
1223 // There should be objects packed in from the low address up to the
1224 // allocation pointer.
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001225 Address current = to_space_.first_page()->area_start();
1226 CHECK_EQ(current, to_space_.space_start());
Steve Blocka7e24c12009-10-30 11:49:00 +00001227
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001228 while (current != top()) {
1229 if (!NewSpacePage::IsAtEnd(current)) {
1230 // The allocation pointer should not be in the middle of an object.
1231 CHECK(!NewSpacePage::FromLimit(current)->ContainsLimit(top()) ||
1232 current < top());
Steve Blocka7e24c12009-10-30 11:49:00 +00001233
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001234 HeapObject* object = HeapObject::FromAddress(current);
Steve Blocka7e24c12009-10-30 11:49:00 +00001235
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001236 // The first word should be a map, and we expect all map pointers to
1237 // be in map space.
1238 Map* map = object->map();
1239 CHECK(map->IsMap());
1240 CHECK(heap()->map_space()->Contains(map));
Steve Blocka7e24c12009-10-30 11:49:00 +00001241
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001242 // The object should not be code or a map.
1243 CHECK(!object->IsMap());
1244 CHECK(!object->IsCode());
Steve Blocka7e24c12009-10-30 11:49:00 +00001245
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001246 // The object itself should look OK.
1247 object->Verify();
1248
1249 // All the interior pointers should be contained in the heap.
1250 VerifyPointersVisitor visitor;
1251 int size = object->Size();
1252 object->IterateBody(map->instance_type(), size, &visitor);
1253
1254 current += size;
1255 } else {
1256 // At end of page, switch to next page.
1257 NewSpacePage* page = NewSpacePage::FromLimit(current)->next_page();
1258 // Next page should be valid.
1259 CHECK(!page->is_anchor());
1260 current = page->area_start();
1261 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001262 }
1263
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001264 // Check semi-spaces.
1265 ASSERT_EQ(from_space_.id(), kFromSpace);
1266 ASSERT_EQ(to_space_.id(), kToSpace);
1267 from_space_.Verify();
1268 to_space_.Verify();
Steve Blocka7e24c12009-10-30 11:49:00 +00001269}
1270#endif
1271
Steve Blocka7e24c12009-10-30 11:49:00 +00001272// -----------------------------------------------------------------------------
1273// SemiSpace implementation
1274
Ben Murdochc7cc0282012-03-05 14:35:55 +00001275void SemiSpace::SetUp(Address start,
Steve Blocka7e24c12009-10-30 11:49:00 +00001276 int initial_capacity,
1277 int maximum_capacity) {
1278 // Creates a space in the young generation. The constructor does not
1279 // allocate memory from the OS. A SemiSpace is given a contiguous chunk of
1280 // memory of size 'capacity' when set up, and does not grow or shrink
1281 // otherwise. In the mark-compact collector, the memory region of the from
1282 // space is used as the marking stack. It requires contiguous memory
1283 // addresses.
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001284 ASSERT(maximum_capacity >= Page::kPageSize);
1285 initial_capacity_ = RoundDown(initial_capacity, Page::kPageSize);
Steve Blocka7e24c12009-10-30 11:49:00 +00001286 capacity_ = initial_capacity;
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001287 maximum_capacity_ = RoundDown(maximum_capacity, Page::kPageSize);
Steve Blocka7e24c12009-10-30 11:49:00 +00001288 committed_ = false;
Steve Blocka7e24c12009-10-30 11:49:00 +00001289 start_ = start;
1290 address_mask_ = ~(maximum_capacity - 1);
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01001291 object_mask_ = address_mask_ | kHeapObjectTagMask;
Steve Blocka7e24c12009-10-30 11:49:00 +00001292 object_expected_ = reinterpret_cast<uintptr_t>(start) | kHeapObjectTag;
1293 age_mark_ = start_;
Steve Blocka7e24c12009-10-30 11:49:00 +00001294}
1295
1296
1297void SemiSpace::TearDown() {
1298 start_ = NULL;
1299 capacity_ = 0;
1300}
1301
1302
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001303bool SemiSpace::Commit() {
1304 ASSERT(!is_committed());
1305 int pages = capacity_ / Page::kPageSize;
1306 Address end = start_ + maximum_capacity_;
1307 Address start = end - pages * Page::kPageSize;
1308 if (!heap()->isolate()->memory_allocator()->CommitBlock(start,
1309 capacity_,
1310 executable())) {
Steve Blocka7e24c12009-10-30 11:49:00 +00001311 return false;
1312 }
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001313
1314 NewSpacePage* page = anchor();
1315 for (int i = 1; i <= pages; i++) {
1316 NewSpacePage* new_page =
1317 NewSpacePage::Initialize(heap(), end - i * Page::kPageSize, this);
1318 new_page->InsertAfter(page);
1319 page = new_page;
1320 }
1321
1322 committed_ = true;
1323 Reset();
1324 return true;
1325}
1326
1327
1328bool SemiSpace::Uncommit() {
1329 ASSERT(is_committed());
1330 Address start = start_ + maximum_capacity_ - capacity_;
1331 if (!heap()->isolate()->memory_allocator()->UncommitBlock(start, capacity_)) {
1332 return false;
1333 }
1334 anchor()->set_next_page(anchor());
1335 anchor()->set_prev_page(anchor());
1336
1337 committed_ = false;
Steve Blocka7e24c12009-10-30 11:49:00 +00001338 return true;
1339}
1340
1341
1342bool SemiSpace::GrowTo(int new_capacity) {
Ben Murdochc7cc0282012-03-05 14:35:55 +00001343 if (!is_committed()) {
1344 if (!Commit()) return false;
1345 }
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001346 ASSERT((new_capacity & Page::kPageAlignmentMask) == 0);
Steve Blocka7e24c12009-10-30 11:49:00 +00001347 ASSERT(new_capacity <= maximum_capacity_);
1348 ASSERT(new_capacity > capacity_);
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001349 int pages_before = capacity_ / Page::kPageSize;
1350 int pages_after = new_capacity / Page::kPageSize;
1351
1352 Address end = start_ + maximum_capacity_;
1353 Address start = end - new_capacity;
Steve Blocka7e24c12009-10-30 11:49:00 +00001354 size_t delta = new_capacity - capacity_;
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001355
Steve Blocka7e24c12009-10-30 11:49:00 +00001356 ASSERT(IsAligned(delta, OS::AllocateAlignment()));
Steve Block44f0eee2011-05-26 01:26:41 +01001357 if (!heap()->isolate()->memory_allocator()->CommitBlock(
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001358 start, delta, executable())) {
Steve Blocka7e24c12009-10-30 11:49:00 +00001359 return false;
1360 }
1361 capacity_ = new_capacity;
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001362 NewSpacePage* last_page = anchor()->prev_page();
1363 ASSERT(last_page != anchor());
1364 for (int i = pages_before + 1; i <= pages_after; i++) {
1365 Address page_address = end - i * Page::kPageSize;
1366 NewSpacePage* new_page = NewSpacePage::Initialize(heap(),
1367 page_address,
1368 this);
1369 new_page->InsertAfter(last_page);
1370 Bitmap::Clear(new_page);
1371 // Duplicate the flags that was set on the old page.
1372 new_page->SetFlags(last_page->GetFlags(),
1373 NewSpacePage::kCopyOnFlipFlagsMask);
1374 last_page = new_page;
1375 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001376 return true;
1377}
1378
1379
1380bool SemiSpace::ShrinkTo(int new_capacity) {
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001381 ASSERT((new_capacity & Page::kPageAlignmentMask) == 0);
Steve Blocka7e24c12009-10-30 11:49:00 +00001382 ASSERT(new_capacity >= initial_capacity_);
1383 ASSERT(new_capacity < capacity_);
Ben Murdochc7cc0282012-03-05 14:35:55 +00001384 if (is_committed()) {
1385 // Semispaces grow backwards from the end of their allocated capacity,
1386 // so we find the before and after start addresses relative to the
1387 // end of the space.
1388 Address space_end = start_ + maximum_capacity_;
1389 Address old_start = space_end - capacity_;
1390 size_t delta = capacity_ - new_capacity;
1391 ASSERT(IsAligned(delta, OS::AllocateAlignment()));
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001392
Ben Murdochc7cc0282012-03-05 14:35:55 +00001393 MemoryAllocator* allocator = heap()->isolate()->memory_allocator();
1394 if (!allocator->UncommitBlock(old_start, delta)) {
1395 return false;
1396 }
1397
1398 int pages_after = new_capacity / Page::kPageSize;
1399 NewSpacePage* new_last_page =
1400 NewSpacePage::FromAddress(space_end - pages_after * Page::kPageSize);
1401 new_last_page->set_next_page(anchor());
1402 anchor()->set_prev_page(new_last_page);
1403 ASSERT((current_page_ <= first_page()) && (current_page_ >= new_last_page));
1404 }
1405
1406 capacity_ = new_capacity;
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001407
Steve Blocka7e24c12009-10-30 11:49:00 +00001408 return true;
1409}
1410
1411
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001412void SemiSpace::FlipPages(intptr_t flags, intptr_t mask) {
1413 anchor_.set_owner(this);
1414 // Fixup back-pointers to anchor. Address of anchor changes
1415 // when we swap.
1416 anchor_.prev_page()->set_next_page(&anchor_);
1417 anchor_.next_page()->set_prev_page(&anchor_);
1418
1419 bool becomes_to_space = (id_ == kFromSpace);
1420 id_ = becomes_to_space ? kToSpace : kFromSpace;
1421 NewSpacePage* page = anchor_.next_page();
1422 while (page != &anchor_) {
1423 page->set_owner(this);
1424 page->SetFlags(flags, mask);
1425 if (becomes_to_space) {
1426 page->ClearFlag(MemoryChunk::IN_FROM_SPACE);
1427 page->SetFlag(MemoryChunk::IN_TO_SPACE);
1428 page->ClearFlag(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK);
1429 page->ResetLiveBytes();
1430 } else {
1431 page->SetFlag(MemoryChunk::IN_FROM_SPACE);
1432 page->ClearFlag(MemoryChunk::IN_TO_SPACE);
1433 }
1434 ASSERT(page->IsFlagSet(MemoryChunk::SCAN_ON_SCAVENGE));
1435 ASSERT(page->IsFlagSet(MemoryChunk::IN_TO_SPACE) ||
1436 page->IsFlagSet(MemoryChunk::IN_FROM_SPACE));
1437 page = page->next_page();
1438 }
1439}
1440
1441
1442void SemiSpace::Reset() {
1443 ASSERT(anchor_.next_page() != &anchor_);
1444 current_page_ = anchor_.next_page();
1445}
1446
1447
1448void SemiSpace::Swap(SemiSpace* from, SemiSpace* to) {
1449 // We won't be swapping semispaces without data in them.
1450 ASSERT(from->anchor_.next_page() != &from->anchor_);
1451 ASSERT(to->anchor_.next_page() != &to->anchor_);
1452
1453 // Swap bits.
1454 SemiSpace tmp = *from;
1455 *from = *to;
1456 *to = tmp;
1457
1458 // Fixup back-pointers to the page list anchor now that its address
1459 // has changed.
1460 // Swap to/from-space bits on pages.
1461 // Copy GC flags from old active space (from-space) to new (to-space).
1462 intptr_t flags = from->current_page()->GetFlags();
1463 to->FlipPages(flags, NewSpacePage::kCopyOnFlipFlagsMask);
1464
1465 from->FlipPages(0, 0);
1466}
1467
1468
1469void SemiSpace::set_age_mark(Address mark) {
1470 ASSERT(NewSpacePage::FromLimit(mark)->semi_space() == this);
1471 age_mark_ = mark;
1472 // Mark all pages up to the one containing mark.
1473 NewSpacePageIterator it(space_start(), mark);
1474 while (it.has_next()) {
1475 it.next()->SetFlag(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK);
1476 }
1477}
1478
1479
Steve Blocka7e24c12009-10-30 11:49:00 +00001480#ifdef DEBUG
1481void SemiSpace::Print() { }
1482
1483
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001484void SemiSpace::Verify() {
1485 bool is_from_space = (id_ == kFromSpace);
1486 NewSpacePage* page = anchor_.next_page();
1487 CHECK(anchor_.semi_space() == this);
1488 while (page != &anchor_) {
1489 CHECK(page->semi_space() == this);
1490 CHECK(page->InNewSpace());
1491 CHECK(page->IsFlagSet(is_from_space ? MemoryChunk::IN_FROM_SPACE
1492 : MemoryChunk::IN_TO_SPACE));
1493 CHECK(!page->IsFlagSet(is_from_space ? MemoryChunk::IN_TO_SPACE
1494 : MemoryChunk::IN_FROM_SPACE));
1495 CHECK(page->IsFlagSet(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING));
1496 if (!is_from_space) {
1497 // The pointers-from-here-are-interesting flag isn't updated dynamically
1498 // on from-space pages, so it might be out of sync with the marking state.
1499 if (page->heap()->incremental_marking()->IsMarking()) {
1500 CHECK(page->IsFlagSet(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING));
1501 } else {
1502 CHECK(!page->IsFlagSet(
1503 MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING));
1504 }
1505 // TODO(gc): Check that the live_bytes_count_ field matches the
1506 // black marking on the page (if we make it match in new-space).
1507 }
1508 CHECK(page->IsFlagSet(MemoryChunk::SCAN_ON_SCAVENGE));
1509 CHECK(page->prev_page()->next_page() == page);
1510 page = page->next_page();
1511 }
1512}
1513
1514
1515void SemiSpace::AssertValidRange(Address start, Address end) {
1516 // Addresses belong to same semi-space
1517 NewSpacePage* page = NewSpacePage::FromLimit(start);
1518 NewSpacePage* end_page = NewSpacePage::FromLimit(end);
1519 SemiSpace* space = page->semi_space();
1520 CHECK_EQ(space, end_page->semi_space());
1521 // Start address is before end address, either on same page,
1522 // or end address is on a later page in the linked list of
1523 // semi-space pages.
1524 if (page == end_page) {
1525 CHECK(start <= end);
1526 } else {
1527 while (page != end_page) {
1528 page = page->next_page();
1529 CHECK_NE(page, space->anchor());
1530 }
1531 }
1532}
Steve Blocka7e24c12009-10-30 11:49:00 +00001533#endif
1534
1535
1536// -----------------------------------------------------------------------------
1537// SemiSpaceIterator implementation.
1538SemiSpaceIterator::SemiSpaceIterator(NewSpace* space) {
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001539 Initialize(space->bottom(), space->top(), NULL);
Steve Blocka7e24c12009-10-30 11:49:00 +00001540}
1541
1542
1543SemiSpaceIterator::SemiSpaceIterator(NewSpace* space,
1544 HeapObjectCallback size_func) {
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001545 Initialize(space->bottom(), space->top(), size_func);
Steve Blocka7e24c12009-10-30 11:49:00 +00001546}
1547
1548
1549SemiSpaceIterator::SemiSpaceIterator(NewSpace* space, Address start) {
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001550 Initialize(start, space->top(), NULL);
Steve Blocka7e24c12009-10-30 11:49:00 +00001551}
1552
1553
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001554SemiSpaceIterator::SemiSpaceIterator(Address from, Address to) {
1555 Initialize(from, to, NULL);
1556}
1557
1558
1559void SemiSpaceIterator::Initialize(Address start,
Steve Blocka7e24c12009-10-30 11:49:00 +00001560 Address end,
1561 HeapObjectCallback size_func) {
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001562 SemiSpace::AssertValidRange(start, end);
Steve Blocka7e24c12009-10-30 11:49:00 +00001563 current_ = start;
1564 limit_ = end;
1565 size_func_ = size_func;
1566}
1567
1568
1569#ifdef DEBUG
Steve Blocka7e24c12009-10-30 11:49:00 +00001570// heap_histograms is shared, always clear it before using it.
1571static void ClearHistograms() {
Steve Block44f0eee2011-05-26 01:26:41 +01001572 Isolate* isolate = Isolate::Current();
Steve Blocka7e24c12009-10-30 11:49:00 +00001573 // We reset the name each time, though it hasn't changed.
Steve Block44f0eee2011-05-26 01:26:41 +01001574#define DEF_TYPE_NAME(name) isolate->heap_histograms()[name].set_name(#name);
Steve Blocka7e24c12009-10-30 11:49:00 +00001575 INSTANCE_TYPE_LIST(DEF_TYPE_NAME)
1576#undef DEF_TYPE_NAME
1577
Steve Block44f0eee2011-05-26 01:26:41 +01001578#define CLEAR_HISTOGRAM(name) isolate->heap_histograms()[name].clear();
Steve Blocka7e24c12009-10-30 11:49:00 +00001579 INSTANCE_TYPE_LIST(CLEAR_HISTOGRAM)
1580#undef CLEAR_HISTOGRAM
1581
Steve Block44f0eee2011-05-26 01:26:41 +01001582 isolate->js_spill_information()->Clear();
Steve Blocka7e24c12009-10-30 11:49:00 +00001583}
1584
1585
Steve Blocka7e24c12009-10-30 11:49:00 +00001586static void ClearCodeKindStatistics() {
Steve Block44f0eee2011-05-26 01:26:41 +01001587 Isolate* isolate = Isolate::Current();
Steve Blocka7e24c12009-10-30 11:49:00 +00001588 for (int i = 0; i < Code::NUMBER_OF_KINDS; i++) {
Steve Block44f0eee2011-05-26 01:26:41 +01001589 isolate->code_kind_statistics()[i] = 0;
Steve Blocka7e24c12009-10-30 11:49:00 +00001590 }
1591}
1592
1593
1594static void ReportCodeKindStatistics() {
Steve Block44f0eee2011-05-26 01:26:41 +01001595 Isolate* isolate = Isolate::Current();
Steve Block6ded16b2010-05-10 14:33:55 +01001596 const char* table[Code::NUMBER_OF_KINDS] = { NULL };
Steve Blocka7e24c12009-10-30 11:49:00 +00001597
1598#define CASE(name) \
1599 case Code::name: table[Code::name] = #name; \
1600 break
1601
1602 for (int i = 0; i < Code::NUMBER_OF_KINDS; i++) {
1603 switch (static_cast<Code::Kind>(i)) {
1604 CASE(FUNCTION);
Ben Murdochb0fe1622011-05-05 13:52:32 +01001605 CASE(OPTIMIZED_FUNCTION);
Steve Blocka7e24c12009-10-30 11:49:00 +00001606 CASE(STUB);
1607 CASE(BUILTIN);
1608 CASE(LOAD_IC);
1609 CASE(KEYED_LOAD_IC);
1610 CASE(STORE_IC);
1611 CASE(KEYED_STORE_IC);
1612 CASE(CALL_IC);
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01001613 CASE(KEYED_CALL_IC);
Ben Murdoch257744e2011-11-30 15:57:28 +00001614 CASE(UNARY_OP_IC);
1615 CASE(BINARY_OP_IC);
Ben Murdochb0fe1622011-05-05 13:52:32 +01001616 CASE(COMPARE_IC);
Ben Murdoch69a99ed2011-11-30 16:03:39 +00001617 CASE(TO_BOOLEAN_IC);
Steve Blocka7e24c12009-10-30 11:49:00 +00001618 }
1619 }
1620
1621#undef CASE
1622
1623 PrintF("\n Code kind histograms: \n");
1624 for (int i = 0; i < Code::NUMBER_OF_KINDS; i++) {
Steve Block44f0eee2011-05-26 01:26:41 +01001625 if (isolate->code_kind_statistics()[i] > 0) {
1626 PrintF(" %-20s: %10d bytes\n", table[i],
1627 isolate->code_kind_statistics()[i]);
Steve Blocka7e24c12009-10-30 11:49:00 +00001628 }
1629 }
1630 PrintF("\n");
1631}
1632
1633
1634static int CollectHistogramInfo(HeapObject* obj) {
Steve Block44f0eee2011-05-26 01:26:41 +01001635 Isolate* isolate = Isolate::Current();
Steve Blocka7e24c12009-10-30 11:49:00 +00001636 InstanceType type = obj->map()->instance_type();
1637 ASSERT(0 <= type && type <= LAST_TYPE);
Steve Block44f0eee2011-05-26 01:26:41 +01001638 ASSERT(isolate->heap_histograms()[type].name() != NULL);
1639 isolate->heap_histograms()[type].increment_number(1);
1640 isolate->heap_histograms()[type].increment_bytes(obj->Size());
Steve Blocka7e24c12009-10-30 11:49:00 +00001641
1642 if (FLAG_collect_heap_spill_statistics && obj->IsJSObject()) {
Steve Block44f0eee2011-05-26 01:26:41 +01001643 JSObject::cast(obj)->IncrementSpillStatistics(
1644 isolate->js_spill_information());
Steve Blocka7e24c12009-10-30 11:49:00 +00001645 }
1646
1647 return obj->Size();
1648}
1649
1650
1651static void ReportHistogram(bool print_spill) {
Steve Block44f0eee2011-05-26 01:26:41 +01001652 Isolate* isolate = Isolate::Current();
Steve Blocka7e24c12009-10-30 11:49:00 +00001653 PrintF("\n Object Histogram:\n");
1654 for (int i = 0; i <= LAST_TYPE; i++) {
Steve Block44f0eee2011-05-26 01:26:41 +01001655 if (isolate->heap_histograms()[i].number() > 0) {
Steve Block6ded16b2010-05-10 14:33:55 +01001656 PrintF(" %-34s%10d (%10d bytes)\n",
Steve Block44f0eee2011-05-26 01:26:41 +01001657 isolate->heap_histograms()[i].name(),
1658 isolate->heap_histograms()[i].number(),
1659 isolate->heap_histograms()[i].bytes());
Steve Blocka7e24c12009-10-30 11:49:00 +00001660 }
1661 }
1662 PrintF("\n");
1663
1664 // Summarize string types.
1665 int string_number = 0;
1666 int string_bytes = 0;
1667#define INCREMENT(type, size, name, camel_name) \
Steve Block44f0eee2011-05-26 01:26:41 +01001668 string_number += isolate->heap_histograms()[type].number(); \
1669 string_bytes += isolate->heap_histograms()[type].bytes();
Steve Blocka7e24c12009-10-30 11:49:00 +00001670 STRING_TYPE_LIST(INCREMENT)
1671#undef INCREMENT
1672 if (string_number > 0) {
Steve Block6ded16b2010-05-10 14:33:55 +01001673 PrintF(" %-34s%10d (%10d bytes)\n\n", "STRING_TYPE", string_number,
Steve Blocka7e24c12009-10-30 11:49:00 +00001674 string_bytes);
1675 }
1676
1677 if (FLAG_collect_heap_spill_statistics && print_spill) {
Steve Block44f0eee2011-05-26 01:26:41 +01001678 isolate->js_spill_information()->Print();
Steve Blocka7e24c12009-10-30 11:49:00 +00001679 }
1680}
1681#endif // DEBUG
1682
1683
1684// Support for statistics gathering for --heap-stats and --log-gc.
Steve Blocka7e24c12009-10-30 11:49:00 +00001685void NewSpace::ClearHistograms() {
1686 for (int i = 0; i <= LAST_TYPE; i++) {
1687 allocated_histogram_[i].clear();
1688 promoted_histogram_[i].clear();
1689 }
1690}
1691
1692// Because the copying collector does not touch garbage objects, we iterate
1693// the new space before a collection to get a histogram of allocated objects.
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00001694// This only happens when --log-gc flag is set.
Steve Blocka7e24c12009-10-30 11:49:00 +00001695void NewSpace::CollectStatistics() {
1696 ClearHistograms();
1697 SemiSpaceIterator it(this);
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001698 for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next())
Leon Clarked91b9f72010-01-27 17:25:45 +00001699 RecordAllocation(obj);
Steve Blocka7e24c12009-10-30 11:49:00 +00001700}
1701
1702
Steve Block44f0eee2011-05-26 01:26:41 +01001703static void DoReportStatistics(Isolate* isolate,
1704 HistogramInfo* info, const char* description) {
1705 LOG(isolate, HeapSampleBeginEvent("NewSpace", description));
Steve Blocka7e24c12009-10-30 11:49:00 +00001706 // Lump all the string types together.
1707 int string_number = 0;
1708 int string_bytes = 0;
1709#define INCREMENT(type, size, name, camel_name) \
1710 string_number += info[type].number(); \
1711 string_bytes += info[type].bytes();
1712 STRING_TYPE_LIST(INCREMENT)
1713#undef INCREMENT
1714 if (string_number > 0) {
Steve Block44f0eee2011-05-26 01:26:41 +01001715 LOG(isolate,
1716 HeapSampleItemEvent("STRING_TYPE", string_number, string_bytes));
Steve Blocka7e24c12009-10-30 11:49:00 +00001717 }
1718
1719 // Then do the other types.
1720 for (int i = FIRST_NONSTRING_TYPE; i <= LAST_TYPE; ++i) {
1721 if (info[i].number() > 0) {
Steve Block44f0eee2011-05-26 01:26:41 +01001722 LOG(isolate,
1723 HeapSampleItemEvent(info[i].name(), info[i].number(),
Steve Blocka7e24c12009-10-30 11:49:00 +00001724 info[i].bytes()));
1725 }
1726 }
Steve Block44f0eee2011-05-26 01:26:41 +01001727 LOG(isolate, HeapSampleEndEvent("NewSpace", description));
Steve Blocka7e24c12009-10-30 11:49:00 +00001728}
Steve Blocka7e24c12009-10-30 11:49:00 +00001729
1730
1731void NewSpace::ReportStatistics() {
1732#ifdef DEBUG
1733 if (FLAG_heap_stats) {
1734 float pct = static_cast<float>(Available()) / Capacity();
Ben Murdochf87a2032010-10-22 12:50:53 +01001735 PrintF(" capacity: %" V8_PTR_PREFIX "d"
1736 ", available: %" V8_PTR_PREFIX "d, %%%d\n",
Steve Blocka7e24c12009-10-30 11:49:00 +00001737 Capacity(), Available(), static_cast<int>(pct*100));
1738 PrintF("\n Object Histogram:\n");
1739 for (int i = 0; i <= LAST_TYPE; i++) {
1740 if (allocated_histogram_[i].number() > 0) {
Steve Block6ded16b2010-05-10 14:33:55 +01001741 PrintF(" %-34s%10d (%10d bytes)\n",
Steve Blocka7e24c12009-10-30 11:49:00 +00001742 allocated_histogram_[i].name(),
1743 allocated_histogram_[i].number(),
1744 allocated_histogram_[i].bytes());
1745 }
1746 }
1747 PrintF("\n");
1748 }
1749#endif // DEBUG
1750
Steve Blocka7e24c12009-10-30 11:49:00 +00001751 if (FLAG_log_gc) {
Steve Block44f0eee2011-05-26 01:26:41 +01001752 Isolate* isolate = ISOLATE;
1753 DoReportStatistics(isolate, allocated_histogram_, "allocated");
1754 DoReportStatistics(isolate, promoted_histogram_, "promoted");
Steve Blocka7e24c12009-10-30 11:49:00 +00001755 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001756}
1757
1758
1759void NewSpace::RecordAllocation(HeapObject* obj) {
1760 InstanceType type = obj->map()->instance_type();
1761 ASSERT(0 <= type && type <= LAST_TYPE);
1762 allocated_histogram_[type].increment_number(1);
1763 allocated_histogram_[type].increment_bytes(obj->Size());
1764}
1765
1766
1767void NewSpace::RecordPromotion(HeapObject* obj) {
1768 InstanceType type = obj->map()->instance_type();
1769 ASSERT(0 <= type && type <= LAST_TYPE);
1770 promoted_histogram_[type].increment_number(1);
1771 promoted_histogram_[type].increment_bytes(obj->Size());
1772}
Steve Blocka7e24c12009-10-30 11:49:00 +00001773
Steve Blocka7e24c12009-10-30 11:49:00 +00001774// -----------------------------------------------------------------------------
1775// Free lists for old object spaces implementation
1776
Steve Block44f0eee2011-05-26 01:26:41 +01001777void FreeListNode::set_size(Heap* heap, int size_in_bytes) {
Steve Blocka7e24c12009-10-30 11:49:00 +00001778 ASSERT(size_in_bytes > 0);
1779 ASSERT(IsAligned(size_in_bytes, kPointerSize));
1780
1781 // We write a map and possibly size information to the block. If the block
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001782 // is big enough to be a FreeSpace with at least one extra word (the next
1783 // pointer), we set its map to be the free space map and its size to an
Steve Blocka7e24c12009-10-30 11:49:00 +00001784 // appropriate array length for the desired size from HeapObject::Size().
1785 // If the block is too small (eg, one or two words), to hold both a size
1786 // field and a next pointer, we give it a filler map that gives it the
1787 // correct size.
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001788 if (size_in_bytes > FreeSpace::kHeaderSize) {
Ben Murdochc7cc0282012-03-05 14:35:55 +00001789 set_map_no_write_barrier(heap->raw_unchecked_free_space_map());
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001790 // Can't use FreeSpace::cast because it fails during deserialization.
1791 FreeSpace* this_as_free_space = reinterpret_cast<FreeSpace*>(this);
1792 this_as_free_space->set_size(size_in_bytes);
Steve Blocka7e24c12009-10-30 11:49:00 +00001793 } else if (size_in_bytes == kPointerSize) {
Ben Murdochc7cc0282012-03-05 14:35:55 +00001794 set_map_no_write_barrier(heap->raw_unchecked_one_pointer_filler_map());
Steve Blocka7e24c12009-10-30 11:49:00 +00001795 } else if (size_in_bytes == 2 * kPointerSize) {
Ben Murdochc7cc0282012-03-05 14:35:55 +00001796 set_map_no_write_barrier(heap->raw_unchecked_two_pointer_filler_map());
Steve Blocka7e24c12009-10-30 11:49:00 +00001797 } else {
1798 UNREACHABLE();
1799 }
Steve Blockd0582a62009-12-15 09:54:21 +00001800 // We would like to ASSERT(Size() == size_in_bytes) but this would fail during
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001801 // deserialization because the free space map is not done yet.
Steve Blocka7e24c12009-10-30 11:49:00 +00001802}
1803
1804
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001805FreeListNode* FreeListNode::next() {
Steve Block3ce2e202009-11-05 08:53:23 +00001806 ASSERT(IsFreeListNode(this));
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001807 if (map() == HEAP->raw_unchecked_free_space_map()) {
1808 ASSERT(map() == NULL || Size() >= kNextOffset + kPointerSize);
1809 return reinterpret_cast<FreeListNode*>(
1810 Memory::Address_at(address() + kNextOffset));
Steve Blocka7e24c12009-10-30 11:49:00 +00001811 } else {
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001812 return reinterpret_cast<FreeListNode*>(
1813 Memory::Address_at(address() + kPointerSize));
Steve Blocka7e24c12009-10-30 11:49:00 +00001814 }
1815}
1816
1817
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001818FreeListNode** FreeListNode::next_address() {
Steve Block3ce2e202009-11-05 08:53:23 +00001819 ASSERT(IsFreeListNode(this));
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001820 if (map() == HEAP->raw_unchecked_free_space_map()) {
Steve Blocka7e24c12009-10-30 11:49:00 +00001821 ASSERT(Size() >= kNextOffset + kPointerSize);
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001822 return reinterpret_cast<FreeListNode**>(address() + kNextOffset);
Steve Blocka7e24c12009-10-30 11:49:00 +00001823 } else {
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001824 return reinterpret_cast<FreeListNode**>(address() + kPointerSize);
Steve Blocka7e24c12009-10-30 11:49:00 +00001825 }
1826}
1827
1828
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001829void FreeListNode::set_next(FreeListNode* next) {
1830 ASSERT(IsFreeListNode(this));
1831 // While we are booting the VM the free space map will actually be null. So
1832 // we have to make sure that we don't try to use it for anything at that
1833 // stage.
1834 if (map() == HEAP->raw_unchecked_free_space_map()) {
1835 ASSERT(map() == NULL || Size() >= kNextOffset + kPointerSize);
1836 Memory::Address_at(address() + kNextOffset) =
1837 reinterpret_cast<Address>(next);
1838 } else {
1839 Memory::Address_at(address() + kPointerSize) =
1840 reinterpret_cast<Address>(next);
1841 }
1842}
1843
1844
1845FreeList::FreeList(PagedSpace* owner)
1846 : owner_(owner), heap_(owner->heap()) {
Steve Blocka7e24c12009-10-30 11:49:00 +00001847 Reset();
1848}
1849
1850
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001851void FreeList::Reset() {
Steve Blocka7e24c12009-10-30 11:49:00 +00001852 available_ = 0;
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001853 small_list_ = NULL;
1854 medium_list_ = NULL;
1855 large_list_ = NULL;
1856 huge_list_ = NULL;
Steve Blocka7e24c12009-10-30 11:49:00 +00001857}
1858
1859
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001860int FreeList::Free(Address start, int size_in_bytes) {
1861 if (size_in_bytes == 0) return 0;
Steve Blocka7e24c12009-10-30 11:49:00 +00001862 FreeListNode* node = FreeListNode::FromAddress(start);
Steve Block44f0eee2011-05-26 01:26:41 +01001863 node->set_size(heap_, size_in_bytes);
Steve Blocka7e24c12009-10-30 11:49:00 +00001864
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001865 // Early return to drop too-small blocks on the floor.
1866 if (size_in_bytes < kSmallListMin) return size_in_bytes;
Steve Blocka7e24c12009-10-30 11:49:00 +00001867
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001868 // Insert other blocks at the head of a free list of the appropriate
1869 // magnitude.
1870 if (size_in_bytes <= kSmallListMax) {
1871 node->set_next(small_list_);
1872 small_list_ = node;
1873 } else if (size_in_bytes <= kMediumListMax) {
1874 node->set_next(medium_list_);
1875 medium_list_ = node;
1876 } else if (size_in_bytes <= kLargeListMax) {
1877 node->set_next(large_list_);
1878 large_list_ = node;
1879 } else {
1880 node->set_next(huge_list_);
1881 huge_list_ = node;
Steve Blocka7e24c12009-10-30 11:49:00 +00001882 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001883 available_ += size_in_bytes;
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001884 ASSERT(IsVeryLong() || available_ == SumFreeLists());
Steve Blocka7e24c12009-10-30 11:49:00 +00001885 return 0;
1886}
1887
1888
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001889FreeListNode* FreeList::PickNodeFromList(FreeListNode** list, int* node_size) {
1890 FreeListNode* node = *list;
Steve Blocka7e24c12009-10-30 11:49:00 +00001891
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001892 if (node == NULL) return NULL;
1893
1894 while (node != NULL &&
1895 Page::FromAddress(node->address())->IsEvacuationCandidate()) {
1896 available_ -= node->Size();
1897 node = node->next();
Steve Blocka7e24c12009-10-30 11:49:00 +00001898 }
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001899
1900 if (node != NULL) {
1901 *node_size = node->Size();
1902 *list = node->next();
Steve Blocka7e24c12009-10-30 11:49:00 +00001903 } else {
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001904 *list = NULL;
Steve Blocka7e24c12009-10-30 11:49:00 +00001905 }
1906
Steve Blocka7e24c12009-10-30 11:49:00 +00001907 return node;
1908}
1909
1910
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001911FreeListNode* FreeList::FindNodeFor(int size_in_bytes, int* node_size) {
1912 FreeListNode* node = NULL;
1913
1914 if (size_in_bytes <= kSmallAllocationMax) {
1915 node = PickNodeFromList(&small_list_, node_size);
1916 if (node != NULL) return node;
1917 }
1918
1919 if (size_in_bytes <= kMediumAllocationMax) {
1920 node = PickNodeFromList(&medium_list_, node_size);
1921 if (node != NULL) return node;
1922 }
1923
1924 if (size_in_bytes <= kLargeAllocationMax) {
1925 node = PickNodeFromList(&large_list_, node_size);
1926 if (node != NULL) return node;
1927 }
1928
1929 for (FreeListNode** cur = &huge_list_;
1930 *cur != NULL;
1931 cur = (*cur)->next_address()) {
1932 FreeListNode* cur_node = *cur;
1933 while (cur_node != NULL &&
1934 Page::FromAddress(cur_node->address())->IsEvacuationCandidate()) {
1935 available_ -= reinterpret_cast<FreeSpace*>(cur_node)->Size();
1936 cur_node = cur_node->next();
1937 }
1938
1939 *cur = cur_node;
1940 if (cur_node == NULL) break;
1941
1942 ASSERT((*cur)->map() == HEAP->raw_unchecked_free_space_map());
1943 FreeSpace* cur_as_free_space = reinterpret_cast<FreeSpace*>(*cur);
1944 int size = cur_as_free_space->Size();
1945 if (size >= size_in_bytes) {
1946 // Large enough node found. Unlink it from the list.
1947 node = *cur;
1948 *node_size = size;
1949 *cur = node->next();
1950 break;
1951 }
1952 }
1953
1954 return node;
1955}
1956
1957
1958// Allocation on the old space free list. If it succeeds then a new linear
1959// allocation space has been set up with the top and limit of the space. If
1960// the allocation fails then NULL is returned, and the caller can perform a GC
1961// or allocate a new page before retrying.
1962HeapObject* FreeList::Allocate(int size_in_bytes) {
1963 ASSERT(0 < size_in_bytes);
1964 ASSERT(size_in_bytes <= kMaxBlockSize);
1965 ASSERT(IsAligned(size_in_bytes, kPointerSize));
1966 // Don't free list allocate if there is linear space available.
1967 ASSERT(owner_->limit() - owner_->top() < size_in_bytes);
1968
1969 int new_node_size = 0;
1970 FreeListNode* new_node = FindNodeFor(size_in_bytes, &new_node_size);
1971 if (new_node == NULL) return NULL;
1972
1973 available_ -= new_node_size;
1974 ASSERT(IsVeryLong() || available_ == SumFreeLists());
1975
1976 int bytes_left = new_node_size - size_in_bytes;
1977 ASSERT(bytes_left >= 0);
1978
1979 int old_linear_size = static_cast<int>(owner_->limit() - owner_->top());
1980 // Mark the old linear allocation area with a free space map so it can be
1981 // skipped when scanning the heap. This also puts it back in the free list
1982 // if it is big enough.
1983 owner_->Free(owner_->top(), old_linear_size);
1984
1985#ifdef DEBUG
1986 for (int i = 0; i < size_in_bytes / kPointerSize; i++) {
1987 reinterpret_cast<Object**>(new_node->address())[i] = Smi::FromInt(0);
1988 }
1989#endif
1990
1991 owner_->heap()->incremental_marking()->OldSpaceStep(
1992 size_in_bytes - old_linear_size);
1993
1994 // The old-space-step might have finished sweeping and restarted marking.
1995 // Verify that it did not turn the page of the new node into an evacuation
1996 // candidate.
1997 ASSERT(!MarkCompactCollector::IsOnEvacuationCandidate(new_node));
1998
1999 const int kThreshold = IncrementalMarking::kAllocatedThreshold;
2000
2001 // Memory in the linear allocation area is counted as allocated. We may free
2002 // a little of this again immediately - see below.
2003 owner_->Allocate(new_node_size);
2004
2005 if (bytes_left > kThreshold &&
2006 owner_->heap()->incremental_marking()->IsMarkingIncomplete() &&
2007 FLAG_incremental_marking_steps) {
2008 int linear_size = owner_->RoundSizeDownToObjectAlignment(kThreshold);
2009 // We don't want to give too large linear areas to the allocator while
2010 // incremental marking is going on, because we won't check again whether
2011 // we want to do another increment until the linear area is used up.
2012 owner_->Free(new_node->address() + size_in_bytes + linear_size,
2013 new_node_size - size_in_bytes - linear_size);
2014 owner_->SetTop(new_node->address() + size_in_bytes,
2015 new_node->address() + size_in_bytes + linear_size);
2016 } else if (bytes_left > 0) {
2017 // Normally we give the rest of the node to the allocator as its new
2018 // linear allocation area.
2019 owner_->SetTop(new_node->address() + size_in_bytes,
2020 new_node->address() + new_node_size);
2021 } else {
2022 // TODO(gc) Try not freeing linear allocation region when bytes_left
2023 // are zero.
2024 owner_->SetTop(NULL, NULL);
2025 }
2026
2027 return new_node;
2028}
2029
2030
2031static intptr_t CountFreeListItemsInList(FreeListNode* n, Page* p) {
2032 intptr_t sum = 0;
2033 while (n != NULL) {
2034 if (Page::FromAddress(n->address()) == p) {
2035 FreeSpace* free_space = reinterpret_cast<FreeSpace*>(n);
2036 sum += free_space->Size();
2037 }
2038 n = n->next();
2039 }
2040 return sum;
2041}
2042
2043
2044void FreeList::CountFreeListItems(Page* p, SizeStats* sizes) {
2045 sizes->huge_size_ = CountFreeListItemsInList(huge_list_, p);
2046 if (sizes->huge_size_ < p->area_size()) {
2047 sizes->small_size_ = CountFreeListItemsInList(small_list_, p);
2048 sizes->medium_size_ = CountFreeListItemsInList(medium_list_, p);
2049 sizes->large_size_ = CountFreeListItemsInList(large_list_, p);
2050 } else {
2051 sizes->small_size_ = 0;
2052 sizes->medium_size_ = 0;
2053 sizes->large_size_ = 0;
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -08002054 }
2055}
2056
2057
Ben Murdoch592a9fc2012-03-05 11:04:45 +00002058static intptr_t EvictFreeListItemsInList(FreeListNode** n, Page* p) {
2059 intptr_t sum = 0;
2060 while (*n != NULL) {
2061 if (Page::FromAddress((*n)->address()) == p) {
2062 FreeSpace* free_space = reinterpret_cast<FreeSpace*>(*n);
2063 sum += free_space->Size();
2064 *n = (*n)->next();
2065 } else {
2066 n = (*n)->next_address();
2067 }
2068 }
2069 return sum;
2070}
2071
2072
2073intptr_t FreeList::EvictFreeListItems(Page* p) {
2074 intptr_t sum = EvictFreeListItemsInList(&huge_list_, p);
2075
2076 if (sum < p->area_size()) {
2077 sum += EvictFreeListItemsInList(&small_list_, p) +
2078 EvictFreeListItemsInList(&medium_list_, p) +
2079 EvictFreeListItemsInList(&large_list_, p);
2080 }
2081
2082 available_ -= static_cast<int>(sum);
2083
2084 return sum;
2085}
2086
2087
2088#ifdef DEBUG
2089intptr_t FreeList::SumFreeList(FreeListNode* cur) {
2090 intptr_t sum = 0;
2091 while (cur != NULL) {
2092 ASSERT(cur->map() == HEAP->raw_unchecked_free_space_map());
2093 FreeSpace* cur_as_free_space = reinterpret_cast<FreeSpace*>(cur);
2094 sum += cur_as_free_space->Size();
2095 cur = cur->next();
2096 }
2097 return sum;
2098}
2099
2100
2101static const int kVeryLongFreeList = 500;
2102
2103
2104int FreeList::FreeListLength(FreeListNode* cur) {
2105 int length = 0;
2106 while (cur != NULL) {
2107 length++;
2108 cur = cur->next();
2109 if (length == kVeryLongFreeList) return length;
2110 }
2111 return length;
2112}
2113
2114
2115bool FreeList::IsVeryLong() {
2116 if (FreeListLength(small_list_) == kVeryLongFreeList) return true;
2117 if (FreeListLength(medium_list_) == kVeryLongFreeList) return true;
2118 if (FreeListLength(large_list_) == kVeryLongFreeList) return true;
2119 if (FreeListLength(huge_list_) == kVeryLongFreeList) return true;
2120 return false;
2121}
2122
2123
2124// This can take a very long time because it is linear in the number of entries
2125// on the free list, so it should not be called if FreeListLength returns
2126// kVeryLongFreeList.
2127intptr_t FreeList::SumFreeLists() {
2128 intptr_t sum = SumFreeList(small_list_);
2129 sum += SumFreeList(medium_list_);
2130 sum += SumFreeList(large_list_);
2131 sum += SumFreeList(huge_list_);
2132 return sum;
2133}
2134#endif
2135
2136
Steve Blocka7e24c12009-10-30 11:49:00 +00002137// -----------------------------------------------------------------------------
2138// OldSpace implementation
2139
Ben Murdoch592a9fc2012-03-05 11:04:45 +00002140bool NewSpace::ReserveSpace(int bytes) {
2141 // We can't reliably unpack a partial snapshot that needs more new space
2142 // space than the minimum NewSpace size. The limit can be set lower than
2143 // the end of new space either because there is more space on the next page
2144 // or because we have lowered the limit in order to get periodic incremental
2145 // marking. The most reliable way to ensure that there is linear space is
2146 // to do the allocation, then rewind the limit.
2147 ASSERT(bytes <= InitialCapacity());
2148 MaybeObject* maybe = AllocateRaw(bytes);
2149 Object* object = NULL;
2150 if (!maybe->ToObject(&object)) return false;
2151 HeapObject* allocation = HeapObject::cast(object);
2152 Address top = allocation_info_.top;
2153 if ((top - bytes) == allocation->address()) {
2154 allocation_info_.top = allocation->address();
2155 return true;
Steve Blocka7e24c12009-10-30 11:49:00 +00002156 }
Ben Murdoch592a9fc2012-03-05 11:04:45 +00002157 // There may be a borderline case here where the allocation succeeded, but
2158 // the limit and top have moved on to a new page. In that case we try again.
2159 return ReserveSpace(bytes);
2160}
2161
2162
2163void PagedSpace::PrepareForMarkCompact() {
2164 // We don't have a linear allocation area while sweeping. It will be restored
2165 // on the first allocation after the sweep.
2166 // Mark the old linear allocation area with a free space map so it can be
2167 // skipped when scanning the heap.
2168 int old_linear_size = static_cast<int>(limit() - top());
2169 Free(top(), old_linear_size);
2170 SetTop(NULL, NULL);
2171
2172 // Stop lazy sweeping and clear marking bits for unswept pages.
2173 if (first_unswept_page_ != NULL) {
2174 Page* p = first_unswept_page_;
2175 do {
2176 // Do not use ShouldBeSweptLazily predicate here.
2177 // New evacuation candidates were selected but they still have
2178 // to be swept before collection starts.
2179 if (!p->WasSwept()) {
2180 Bitmap::Clear(p);
2181 if (FLAG_gc_verbose) {
2182 PrintF("Sweeping 0x%" V8PRIxPTR " lazily abandoned.\n",
2183 reinterpret_cast<intptr_t>(p));
2184 }
2185 }
2186 p = p->next_page();
2187 } while (p != anchor());
2188 }
2189 first_unswept_page_ = Page::FromAddress(NULL);
Ben Murdochc7cc0282012-03-05 14:35:55 +00002190 unswept_free_bytes_ = 0;
Steve Blocka7e24c12009-10-30 11:49:00 +00002191
2192 // Clear the free list before a full GC---it will be rebuilt afterward.
2193 free_list_.Reset();
2194}
2195
2196
Ben Murdoch592a9fc2012-03-05 11:04:45 +00002197bool PagedSpace::ReserveSpace(int size_in_bytes) {
2198 ASSERT(size_in_bytes <= AreaSize());
2199 ASSERT(size_in_bytes == RoundSizeDownToObjectAlignment(size_in_bytes));
2200 Address current_top = allocation_info_.top;
2201 Address new_top = current_top + size_in_bytes;
2202 if (new_top <= allocation_info_.limit) return true;
Steve Blocka7e24c12009-10-30 11:49:00 +00002203
Ben Murdoch592a9fc2012-03-05 11:04:45 +00002204 HeapObject* new_area = free_list_.Allocate(size_in_bytes);
2205 if (new_area == NULL) new_area = SlowAllocateRaw(size_in_bytes);
2206 if (new_area == NULL) return false;
Steve Blocka7e24c12009-10-30 11:49:00 +00002207
Ben Murdoch592a9fc2012-03-05 11:04:45 +00002208 int old_linear_size = static_cast<int>(limit() - top());
2209 // Mark the old linear allocation area with a free space so it can be
2210 // skipped when scanning the heap. This also puts it back in the free list
2211 // if it is big enough.
2212 Free(top(), old_linear_size);
Steve Blocka7e24c12009-10-30 11:49:00 +00002213
Ben Murdoch592a9fc2012-03-05 11:04:45 +00002214 SetTop(new_area->address(), new_area->address() + size_in_bytes);
2215 Allocate(size_in_bytes);
Leon Clarkee46be812010-01-19 14:06:41 +00002216 return true;
2217}
2218
2219
2220// You have to call this last, since the implementation from PagedSpace
2221// doesn't know that memory was 'promised' to large object space.
2222bool LargeObjectSpace::ReserveSpace(int bytes) {
Ben Murdoch5d4cdbf2012-04-11 10:23:59 +01002223 return heap()->OldGenerationCapacityAvailable() >= bytes &&
2224 (!heap()->incremental_marking()->IsStopped() ||
2225 heap()->OldGenerationSpaceAvailable() >= bytes);
Leon Clarkee46be812010-01-19 14:06:41 +00002226}
2227
2228
Ben Murdoch592a9fc2012-03-05 11:04:45 +00002229bool PagedSpace::AdvanceSweeper(intptr_t bytes_to_sweep) {
2230 if (IsSweepingComplete()) return true;
Steve Blocka7e24c12009-10-30 11:49:00 +00002231
Ben Murdoch592a9fc2012-03-05 11:04:45 +00002232 intptr_t freed_bytes = 0;
2233 Page* p = first_unswept_page_;
2234 do {
2235 Page* next_page = p->next_page();
2236 if (ShouldBeSweptLazily(p)) {
2237 if (FLAG_gc_verbose) {
2238 PrintF("Sweeping 0x%" V8PRIxPTR " lazily advanced.\n",
2239 reinterpret_cast<intptr_t>(p));
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002240 }
Ben Murdochc7cc0282012-03-05 14:35:55 +00002241 DecreaseUnsweptFreeBytes(p);
Ben Murdoch592a9fc2012-03-05 11:04:45 +00002242 freed_bytes += MarkCompactCollector::SweepConservatively(this, p);
Steve Blockd0582a62009-12-15 09:54:21 +00002243 }
Ben Murdoch592a9fc2012-03-05 11:04:45 +00002244 p = next_page;
2245 } while (p != anchor() && freed_bytes < bytes_to_sweep);
2246
2247 if (p == anchor()) {
2248 first_unswept_page_ = Page::FromAddress(NULL);
2249 } else {
2250 first_unswept_page_ = p;
Steve Blocka7e24c12009-10-30 11:49:00 +00002251 }
2252
Ben Murdoch592a9fc2012-03-05 11:04:45 +00002253 heap()->LowerOldGenLimits(freed_bytes);
2254
2255 heap()->FreeQueuedChunks();
2256
2257 return IsSweepingComplete();
2258}
2259
2260
2261void PagedSpace::EvictEvacuationCandidatesFromFreeLists() {
2262 if (allocation_info_.top >= allocation_info_.limit) return;
2263
2264 if (Page::FromAllocationTop(allocation_info_.top)->IsEvacuationCandidate()) {
2265 // Create filler object to keep page iterable if it was iterable.
2266 int remaining =
2267 static_cast<int>(allocation_info_.limit - allocation_info_.top);
2268 heap()->CreateFillerObjectAt(allocation_info_.top, remaining);
2269
2270 allocation_info_.top = NULL;
2271 allocation_info_.limit = NULL;
2272 }
2273}
2274
2275
2276HeapObject* PagedSpace::SlowAllocateRaw(int size_in_bytes) {
2277 // Allocation in this space has failed.
2278
Ben Murdochc7cc0282012-03-05 14:35:55 +00002279 // If there are unswept pages advance lazy sweeper then sweep one page before
2280 // allocating a new page.
2281 if (first_unswept_page_->is_valid()) {
2282 AdvanceSweeper(size_in_bytes);
2283
2284 // Retry the free list allocation.
2285 HeapObject* object = free_list_.Allocate(size_in_bytes);
2286 if (object != NULL) return object;
2287 }
2288
Steve Blocka7e24c12009-10-30 11:49:00 +00002289 // Free list allocation failed and there is no next page. Fail if we have
2290 // hit the old generation size limit that should cause a garbage
2291 // collection.
Steve Block44f0eee2011-05-26 01:26:41 +01002292 if (!heap()->always_allocate() &&
2293 heap()->OldGenerationAllocationLimitReached()) {
Steve Blocka7e24c12009-10-30 11:49:00 +00002294 return NULL;
2295 }
2296
Ben Murdochc7cc0282012-03-05 14:35:55 +00002297 // Try to expand the space and allocate in the new next page.
2298 if (Expand()) {
2299 return free_list_.Allocate(size_in_bytes);
2300 }
2301
2302 // Last ditch, sweep all the remaining pages to try to find space. This may
2303 // cause a pause.
2304 if (!IsSweepingComplete()) {
2305 AdvanceSweeper(kMaxInt);
Ben Murdoch592a9fc2012-03-05 11:04:45 +00002306
2307 // Retry the free list allocation.
2308 HeapObject* object = free_list_.Allocate(size_in_bytes);
2309 if (object != NULL) return object;
Steve Blocka7e24c12009-10-30 11:49:00 +00002310 }
2311
2312 // Finally, fail.
2313 return NULL;
2314}
2315
2316
Steve Blocka7e24c12009-10-30 11:49:00 +00002317#ifdef DEBUG
Steve Blocka7e24c12009-10-30 11:49:00 +00002318void PagedSpace::ReportCodeStatistics() {
Steve Block44f0eee2011-05-26 01:26:41 +01002319 Isolate* isolate = Isolate::Current();
2320 CommentStatistic* comments_statistics =
2321 isolate->paged_space_comments_statistics();
Steve Blocka7e24c12009-10-30 11:49:00 +00002322 ReportCodeKindStatistics();
2323 PrintF("Code comment statistics (\" [ comment-txt : size/ "
2324 "count (average)\"):\n");
Steve Block44f0eee2011-05-26 01:26:41 +01002325 for (int i = 0; i <= CommentStatistic::kMaxComments; i++) {
Steve Blocka7e24c12009-10-30 11:49:00 +00002326 const CommentStatistic& cs = comments_statistics[i];
2327 if (cs.size > 0) {
2328 PrintF(" %-30s: %10d/%6d (%d)\n", cs.comment, cs.size, cs.count,
2329 cs.size/cs.count);
2330 }
2331 }
2332 PrintF("\n");
2333}
2334
2335
2336void PagedSpace::ResetCodeStatistics() {
Steve Block44f0eee2011-05-26 01:26:41 +01002337 Isolate* isolate = Isolate::Current();
2338 CommentStatistic* comments_statistics =
2339 isolate->paged_space_comments_statistics();
Steve Blocka7e24c12009-10-30 11:49:00 +00002340 ClearCodeKindStatistics();
Steve Block44f0eee2011-05-26 01:26:41 +01002341 for (int i = 0; i < CommentStatistic::kMaxComments; i++) {
2342 comments_statistics[i].Clear();
2343 }
2344 comments_statistics[CommentStatistic::kMaxComments].comment = "Unknown";
2345 comments_statistics[CommentStatistic::kMaxComments].size = 0;
2346 comments_statistics[CommentStatistic::kMaxComments].count = 0;
Steve Blocka7e24c12009-10-30 11:49:00 +00002347}
2348
2349
Steve Block44f0eee2011-05-26 01:26:41 +01002350// Adds comment to 'comment_statistics' table. Performance OK as long as
Steve Blocka7e24c12009-10-30 11:49:00 +00002351// 'kMaxComments' is small
Steve Block44f0eee2011-05-26 01:26:41 +01002352static void EnterComment(Isolate* isolate, const char* comment, int delta) {
2353 CommentStatistic* comments_statistics =
2354 isolate->paged_space_comments_statistics();
Steve Blocka7e24c12009-10-30 11:49:00 +00002355 // Do not count empty comments
2356 if (delta <= 0) return;
Steve Block44f0eee2011-05-26 01:26:41 +01002357 CommentStatistic* cs = &comments_statistics[CommentStatistic::kMaxComments];
Steve Blocka7e24c12009-10-30 11:49:00 +00002358 // Search for a free or matching entry in 'comments_statistics': 'cs'
2359 // points to result.
Steve Block44f0eee2011-05-26 01:26:41 +01002360 for (int i = 0; i < CommentStatistic::kMaxComments; i++) {
Steve Blocka7e24c12009-10-30 11:49:00 +00002361 if (comments_statistics[i].comment == NULL) {
2362 cs = &comments_statistics[i];
2363 cs->comment = comment;
2364 break;
2365 } else if (strcmp(comments_statistics[i].comment, comment) == 0) {
2366 cs = &comments_statistics[i];
2367 break;
2368 }
2369 }
2370 // Update entry for 'comment'
2371 cs->size += delta;
2372 cs->count += 1;
2373}
2374
2375
2376// Call for each nested comment start (start marked with '[ xxx', end marked
2377// with ']'. RelocIterator 'it' must point to a comment reloc info.
Steve Block44f0eee2011-05-26 01:26:41 +01002378static void CollectCommentStatistics(Isolate* isolate, RelocIterator* it) {
Steve Blocka7e24c12009-10-30 11:49:00 +00002379 ASSERT(!it->done());
2380 ASSERT(it->rinfo()->rmode() == RelocInfo::COMMENT);
2381 const char* tmp = reinterpret_cast<const char*>(it->rinfo()->data());
2382 if (tmp[0] != '[') {
2383 // Not a nested comment; skip
2384 return;
2385 }
2386
2387 // Search for end of nested comment or a new nested comment
2388 const char* const comment_txt =
2389 reinterpret_cast<const char*>(it->rinfo()->data());
2390 const byte* prev_pc = it->rinfo()->pc();
2391 int flat_delta = 0;
2392 it->next();
2393 while (true) {
2394 // All nested comments must be terminated properly, and therefore exit
2395 // from loop.
2396 ASSERT(!it->done());
2397 if (it->rinfo()->rmode() == RelocInfo::COMMENT) {
2398 const char* const txt =
2399 reinterpret_cast<const char*>(it->rinfo()->data());
Steve Blockd0582a62009-12-15 09:54:21 +00002400 flat_delta += static_cast<int>(it->rinfo()->pc() - prev_pc);
Steve Blocka7e24c12009-10-30 11:49:00 +00002401 if (txt[0] == ']') break; // End of nested comment
2402 // A new comment
Steve Block44f0eee2011-05-26 01:26:41 +01002403 CollectCommentStatistics(isolate, it);
Steve Blocka7e24c12009-10-30 11:49:00 +00002404 // Skip code that was covered with previous comment
2405 prev_pc = it->rinfo()->pc();
2406 }
2407 it->next();
2408 }
Steve Block44f0eee2011-05-26 01:26:41 +01002409 EnterComment(isolate, comment_txt, flat_delta);
Steve Blocka7e24c12009-10-30 11:49:00 +00002410}
2411
2412
2413// Collects code size statistics:
2414// - by code kind
2415// - by code comment
2416void PagedSpace::CollectCodeStatistics() {
Steve Block44f0eee2011-05-26 01:26:41 +01002417 Isolate* isolate = heap()->isolate();
Steve Blocka7e24c12009-10-30 11:49:00 +00002418 HeapObjectIterator obj_it(this);
Ben Murdoch592a9fc2012-03-05 11:04:45 +00002419 for (HeapObject* obj = obj_it.Next(); obj != NULL; obj = obj_it.Next()) {
Steve Blocka7e24c12009-10-30 11:49:00 +00002420 if (obj->IsCode()) {
2421 Code* code = Code::cast(obj);
Steve Block44f0eee2011-05-26 01:26:41 +01002422 isolate->code_kind_statistics()[code->kind()] += code->Size();
Steve Blocka7e24c12009-10-30 11:49:00 +00002423 RelocIterator it(code);
2424 int delta = 0;
2425 const byte* prev_pc = code->instruction_start();
2426 while (!it.done()) {
2427 if (it.rinfo()->rmode() == RelocInfo::COMMENT) {
Steve Blockd0582a62009-12-15 09:54:21 +00002428 delta += static_cast<int>(it.rinfo()->pc() - prev_pc);
Steve Block44f0eee2011-05-26 01:26:41 +01002429 CollectCommentStatistics(isolate, &it);
Steve Blocka7e24c12009-10-30 11:49:00 +00002430 prev_pc = it.rinfo()->pc();
2431 }
2432 it.next();
2433 }
2434
2435 ASSERT(code->instruction_start() <= prev_pc &&
Leon Clarkeac952652010-07-15 11:15:24 +01002436 prev_pc <= code->instruction_end());
2437 delta += static_cast<int>(code->instruction_end() - prev_pc);
Steve Block44f0eee2011-05-26 01:26:41 +01002438 EnterComment(isolate, "NoComment", delta);
Steve Blocka7e24c12009-10-30 11:49:00 +00002439 }
2440 }
2441}
2442
2443
Ben Murdoch592a9fc2012-03-05 11:04:45 +00002444void PagedSpace::ReportStatistics() {
Ben Murdochf87a2032010-10-22 12:50:53 +01002445 int pct = static_cast<int>(Available() * 100 / Capacity());
2446 PrintF(" capacity: %" V8_PTR_PREFIX "d"
2447 ", waste: %" V8_PTR_PREFIX "d"
2448 ", available: %" V8_PTR_PREFIX "d, %%%d\n",
Steve Blocka7e24c12009-10-30 11:49:00 +00002449 Capacity(), Waste(), Available(), pct);
2450
Ben Murdoch592a9fc2012-03-05 11:04:45 +00002451 if (was_swept_conservatively_) return;
Steve Blocka7e24c12009-10-30 11:49:00 +00002452 ClearHistograms();
2453 HeapObjectIterator obj_it(this);
Ben Murdoch592a9fc2012-03-05 11:04:45 +00002454 for (HeapObject* obj = obj_it.Next(); obj != NULL; obj = obj_it.Next())
Leon Clarked91b9f72010-01-27 17:25:45 +00002455 CollectHistogramInfo(obj);
Steve Blocka7e24c12009-10-30 11:49:00 +00002456 ReportHistogram(true);
2457}
Steve Blocka7e24c12009-10-30 11:49:00 +00002458#endif
2459
2460// -----------------------------------------------------------------------------
2461// FixedSpace implementation
2462
Ben Murdoch592a9fc2012-03-05 11:04:45 +00002463void FixedSpace::PrepareForMarkCompact() {
Steve Block6ded16b2010-05-10 14:33:55 +01002464 // Call prepare of the super class.
Ben Murdoch592a9fc2012-03-05 11:04:45 +00002465 PagedSpace::PrepareForMarkCompact();
Steve Block6ded16b2010-05-10 14:33:55 +01002466
Ben Murdoch592a9fc2012-03-05 11:04:45 +00002467 // During a non-compacting collection, everything below the linear
2468 // allocation pointer except wasted top-of-page blocks is considered
2469 // allocated and we will rediscover available bytes during the
2470 // collection.
2471 accounting_stats_.AllocateBytes(free_list_.available());
Steve Blocka7e24c12009-10-30 11:49:00 +00002472
2473 // Clear the free list before a full GC---it will be rebuilt afterward.
2474 free_list_.Reset();
2475}
2476
2477
Steve Blocka7e24c12009-10-30 11:49:00 +00002478// -----------------------------------------------------------------------------
2479// MapSpace implementation
2480
Steve Blocka7e24c12009-10-30 11:49:00 +00002481#ifdef DEBUG
2482void MapSpace::VerifyObject(HeapObject* object) {
2483 // The object should be a map or a free-list node.
Ben Murdoch592a9fc2012-03-05 11:04:45 +00002484 ASSERT(object->IsMap() || object->IsFreeSpace());
Steve Blocka7e24c12009-10-30 11:49:00 +00002485}
2486#endif
2487
2488
2489// -----------------------------------------------------------------------------
2490// GlobalPropertyCellSpace implementation
2491
2492#ifdef DEBUG
2493void CellSpace::VerifyObject(HeapObject* object) {
2494 // The object should be a global object property cell or a free-list node.
2495 ASSERT(object->IsJSGlobalPropertyCell() ||
Steve Block44f0eee2011-05-26 01:26:41 +01002496 object->map() == heap()->two_pointer_filler_map());
Steve Blocka7e24c12009-10-30 11:49:00 +00002497}
2498#endif
2499
2500
2501// -----------------------------------------------------------------------------
2502// LargeObjectIterator
2503
2504LargeObjectIterator::LargeObjectIterator(LargeObjectSpace* space) {
Ben Murdoch592a9fc2012-03-05 11:04:45 +00002505 current_ = space->first_page_;
Steve Blocka7e24c12009-10-30 11:49:00 +00002506 size_func_ = NULL;
2507}
2508
2509
2510LargeObjectIterator::LargeObjectIterator(LargeObjectSpace* space,
2511 HeapObjectCallback size_func) {
Ben Murdoch592a9fc2012-03-05 11:04:45 +00002512 current_ = space->first_page_;
Steve Blocka7e24c12009-10-30 11:49:00 +00002513 size_func_ = size_func;
2514}
2515
2516
Ben Murdoch592a9fc2012-03-05 11:04:45 +00002517HeapObject* LargeObjectIterator::Next() {
Leon Clarked91b9f72010-01-27 17:25:45 +00002518 if (current_ == NULL) return NULL;
2519
Steve Blocka7e24c12009-10-30 11:49:00 +00002520 HeapObject* object = current_->GetObject();
Ben Murdoch592a9fc2012-03-05 11:04:45 +00002521 current_ = current_->next_page();
Steve Blocka7e24c12009-10-30 11:49:00 +00002522 return object;
2523}
2524
2525
2526// -----------------------------------------------------------------------------
Steve Blocka7e24c12009-10-30 11:49:00 +00002527// LargeObjectSpace
Ben Murdoch5d4cdbf2012-04-11 10:23:59 +01002528static bool ComparePointers(void* key1, void* key2) {
2529 return key1 == key2;
2530}
2531
Steve Blocka7e24c12009-10-30 11:49:00 +00002532
Ben Murdoch592a9fc2012-03-05 11:04:45 +00002533LargeObjectSpace::LargeObjectSpace(Heap* heap,
2534 intptr_t max_capacity,
2535 AllocationSpace id)
Steve Block44f0eee2011-05-26 01:26:41 +01002536 : Space(heap, id, NOT_EXECUTABLE), // Managed on a per-allocation basis
Ben Murdoch592a9fc2012-03-05 11:04:45 +00002537 max_capacity_(max_capacity),
2538 first_page_(NULL),
Steve Blocka7e24c12009-10-30 11:49:00 +00002539 size_(0),
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -08002540 page_count_(0),
Ben Murdoch5d4cdbf2012-04-11 10:23:59 +01002541 objects_size_(0),
2542 chunk_map_(ComparePointers, 1024) {}
Steve Blocka7e24c12009-10-30 11:49:00 +00002543
2544
Ben Murdochc7cc0282012-03-05 14:35:55 +00002545bool LargeObjectSpace::SetUp() {
Ben Murdoch592a9fc2012-03-05 11:04:45 +00002546 first_page_ = NULL;
Steve Blocka7e24c12009-10-30 11:49:00 +00002547 size_ = 0;
2548 page_count_ = 0;
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -08002549 objects_size_ = 0;
Ben Murdoch5d4cdbf2012-04-11 10:23:59 +01002550 chunk_map_.Clear();
Steve Blocka7e24c12009-10-30 11:49:00 +00002551 return true;
2552}
2553
2554
2555void LargeObjectSpace::TearDown() {
Ben Murdoch592a9fc2012-03-05 11:04:45 +00002556 while (first_page_ != NULL) {
2557 LargePage* page = first_page_;
2558 first_page_ = first_page_->next_page();
2559 LOG(heap()->isolate(), DeleteEvent("LargeObjectChunk", page->address()));
2560
2561 ObjectSpace space = static_cast<ObjectSpace>(1 << identity());
2562 heap()->isolate()->memory_allocator()->PerformAllocationCallback(
2563 space, kAllocationActionFree, page->size());
2564 heap()->isolate()->memory_allocator()->Free(page);
Steve Blocka7e24c12009-10-30 11:49:00 +00002565 }
Ben Murdochc7cc0282012-03-05 14:35:55 +00002566 SetUp();
Steve Blocka7e24c12009-10-30 11:49:00 +00002567}
2568
2569
Ben Murdoch592a9fc2012-03-05 11:04:45 +00002570MaybeObject* LargeObjectSpace::AllocateRaw(int object_size,
2571 Executability executable) {
Steve Blocka7e24c12009-10-30 11:49:00 +00002572 // Check if we want to force a GC before growing the old space further.
2573 // If so, fail the allocation.
Steve Block44f0eee2011-05-26 01:26:41 +01002574 if (!heap()->always_allocate() &&
2575 heap()->OldGenerationAllocationLimitReached()) {
Ben Murdochf87a2032010-10-22 12:50:53 +01002576 return Failure::RetryAfterGC(identity());
Steve Blocka7e24c12009-10-30 11:49:00 +00002577 }
2578
Ben Murdoch592a9fc2012-03-05 11:04:45 +00002579 if (Size() + object_size > max_capacity_) {
Ben Murdochf87a2032010-10-22 12:50:53 +01002580 return Failure::RetryAfterGC(identity());
Steve Blocka7e24c12009-10-30 11:49:00 +00002581 }
2582
Ben Murdoch592a9fc2012-03-05 11:04:45 +00002583 LargePage* page = heap()->isolate()->memory_allocator()->
2584 AllocateLargePage(object_size, executable, this);
2585 if (page == NULL) return Failure::RetryAfterGC(identity());
2586 ASSERT(page->area_size() >= object_size);
2587
2588 size_ += static_cast<int>(page->size());
2589 objects_size_ += object_size;
Steve Blocka7e24c12009-10-30 11:49:00 +00002590 page_count_++;
Ben Murdoch592a9fc2012-03-05 11:04:45 +00002591 page->set_next_page(first_page_);
2592 first_page_ = page;
Steve Blocka7e24c12009-10-30 11:49:00 +00002593
Ben Murdoch5d4cdbf2012-04-11 10:23:59 +01002594 // Register all MemoryChunk::kAlignment-aligned chunks covered by
2595 // this large page in the chunk map.
2596 uintptr_t base = reinterpret_cast<uintptr_t>(page) / MemoryChunk::kAlignment;
2597 uintptr_t limit = base + (page->size() - 1) / MemoryChunk::kAlignment;
2598 for (uintptr_t key = base; key <= limit; key++) {
2599 HashMap::Entry* entry = chunk_map_.Lookup(reinterpret_cast<void*>(key),
2600 static_cast<uint32_t>(key),
2601 true);
2602 ASSERT(entry != NULL);
2603 entry->value = page;
2604 }
2605
Ben Murdoch592a9fc2012-03-05 11:04:45 +00002606 HeapObject* object = page->GetObject();
Ben Murdochb0fe1622011-05-05 13:52:32 +01002607
Ben Murdoch592a9fc2012-03-05 11:04:45 +00002608#ifdef DEBUG
2609 // Make the object consistent so the heap can be vefified in OldSpaceStep.
2610 reinterpret_cast<Object**>(object->address())[0] =
2611 heap()->fixed_array_map();
2612 reinterpret_cast<Object**>(object->address())[1] = Smi::FromInt(0);
2613#endif
Steve Blocka7e24c12009-10-30 11:49:00 +00002614
Ben Murdoch592a9fc2012-03-05 11:04:45 +00002615 heap()->incremental_marking()->OldSpaceStep(object_size);
2616 return object;
Steve Blocka7e24c12009-10-30 11:49:00 +00002617}
2618
2619
2620// GC support
John Reck59135872010-11-02 12:39:01 -07002621MaybeObject* LargeObjectSpace::FindObject(Address a) {
Ben Murdoch5d4cdbf2012-04-11 10:23:59 +01002622 LargePage* page = FindPage(a);
2623 if (page != NULL) {
2624 return page->GetObject();
Steve Blocka7e24c12009-10-30 11:49:00 +00002625 }
2626 return Failure::Exception();
2627}
2628
Kristian Monsen80d68ea2010-09-08 11:05:35 +01002629
Ben Murdoch5d4cdbf2012-04-11 10:23:59 +01002630LargePage* LargeObjectSpace::FindPage(Address a) {
2631 uintptr_t key = reinterpret_cast<uintptr_t>(a) / MemoryChunk::kAlignment;
2632 HashMap::Entry* e = chunk_map_.Lookup(reinterpret_cast<void*>(key),
2633 static_cast<uint32_t>(key),
2634 false);
2635 if (e != NULL) {
2636 ASSERT(e->value != NULL);
2637 LargePage* page = reinterpret_cast<LargePage*>(e->value);
2638 ASSERT(page->is_valid());
2639 if (page->Contains(a)) {
2640 return page;
Kristian Monsen80d68ea2010-09-08 11:05:35 +01002641 }
2642 }
2643 return NULL;
2644}
2645
2646
Steve Blocka7e24c12009-10-30 11:49:00 +00002647void LargeObjectSpace::FreeUnmarkedObjects() {
Ben Murdoch592a9fc2012-03-05 11:04:45 +00002648 LargePage* previous = NULL;
2649 LargePage* current = first_page_;
Steve Blocka7e24c12009-10-30 11:49:00 +00002650 while (current != NULL) {
2651 HeapObject* object = current->GetObject();
Ben Murdoch592a9fc2012-03-05 11:04:45 +00002652 // Can this large page contain pointers to non-trivial objects. No other
2653 // pointer object is this big.
2654 bool is_pointer_object = object->IsFixedArray();
2655 MarkBit mark_bit = Marking::MarkBitFrom(object);
2656 if (mark_bit.Get()) {
2657 mark_bit.Clear();
Ben Murdochc7cc0282012-03-05 14:35:55 +00002658 MemoryChunk::IncrementLiveBytesFromGC(object->address(), -object->Size());
Steve Blocka7e24c12009-10-30 11:49:00 +00002659 previous = current;
Ben Murdoch592a9fc2012-03-05 11:04:45 +00002660 current = current->next_page();
Steve Blocka7e24c12009-10-30 11:49:00 +00002661 } else {
Ben Murdoch592a9fc2012-03-05 11:04:45 +00002662 LargePage* page = current;
Steve Blocka7e24c12009-10-30 11:49:00 +00002663 // Cut the chunk out from the chunk list.
Ben Murdoch592a9fc2012-03-05 11:04:45 +00002664 current = current->next_page();
Steve Blocka7e24c12009-10-30 11:49:00 +00002665 if (previous == NULL) {
Ben Murdoch592a9fc2012-03-05 11:04:45 +00002666 first_page_ = current;
Steve Blocka7e24c12009-10-30 11:49:00 +00002667 } else {
Ben Murdoch592a9fc2012-03-05 11:04:45 +00002668 previous->set_next_page(current);
Steve Blocka7e24c12009-10-30 11:49:00 +00002669 }
2670
2671 // Free the chunk.
Ben Murdoch8b112d22011-06-08 16:22:53 +01002672 heap()->mark_compact_collector()->ReportDeleteIfNeeded(
2673 object, heap()->isolate());
Ben Murdoch592a9fc2012-03-05 11:04:45 +00002674 size_ -= static_cast<int>(page->size());
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -08002675 objects_size_ -= object->Size();
Steve Blocka7e24c12009-10-30 11:49:00 +00002676 page_count_--;
Ben Murdoch592a9fc2012-03-05 11:04:45 +00002677
Ben Murdoch5d4cdbf2012-04-11 10:23:59 +01002678 // Remove entries belonging to this page.
2679 // Use variable alignment to help pass length check (<= 80 characters)
2680 // of single line in tools/presubmit.py.
2681 const intptr_t alignment = MemoryChunk::kAlignment;
2682 uintptr_t base = reinterpret_cast<uintptr_t>(page)/alignment;
2683 uintptr_t limit = base + (page->size()-1)/alignment;
2684 for (uintptr_t key = base; key <= limit; key++) {
2685 chunk_map_.Remove(reinterpret_cast<void*>(key),
2686 static_cast<uint32_t>(key));
2687 }
2688
Ben Murdoch592a9fc2012-03-05 11:04:45 +00002689 if (is_pointer_object) {
2690 heap()->QueueMemoryChunkForFree(page);
2691 } else {
2692 heap()->isolate()->memory_allocator()->Free(page);
2693 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002694 }
2695 }
Ben Murdoch592a9fc2012-03-05 11:04:45 +00002696 heap()->FreeQueuedChunks();
Steve Blocka7e24c12009-10-30 11:49:00 +00002697}
2698
2699
2700bool LargeObjectSpace::Contains(HeapObject* object) {
2701 Address address = object->address();
Ben Murdoch592a9fc2012-03-05 11:04:45 +00002702 MemoryChunk* chunk = MemoryChunk::FromAddress(address);
Steve Blocka7e24c12009-10-30 11:49:00 +00002703
Ben Murdoch592a9fc2012-03-05 11:04:45 +00002704 bool owned = (chunk->owner() == this);
Steve Blocka7e24c12009-10-30 11:49:00 +00002705
Ben Murdoch592a9fc2012-03-05 11:04:45 +00002706 SLOW_ASSERT(!owned || !FindObject(address)->IsFailure());
2707
2708 return owned;
Steve Blocka7e24c12009-10-30 11:49:00 +00002709}
2710
2711
2712#ifdef DEBUG
2713// We do not assume that the large object iterator works, because it depends
2714// on the invariants we are checking during verification.
2715void LargeObjectSpace::Verify() {
Ben Murdoch592a9fc2012-03-05 11:04:45 +00002716 for (LargePage* chunk = first_page_;
Steve Blocka7e24c12009-10-30 11:49:00 +00002717 chunk != NULL;
Ben Murdoch592a9fc2012-03-05 11:04:45 +00002718 chunk = chunk->next_page()) {
Steve Blocka7e24c12009-10-30 11:49:00 +00002719 // Each chunk contains an object that starts at the large object page's
2720 // object area start.
2721 HeapObject* object = chunk->GetObject();
2722 Page* page = Page::FromAddress(object->address());
Ben Murdoch592a9fc2012-03-05 11:04:45 +00002723 ASSERT(object->address() == page->area_start());
Steve Blocka7e24c12009-10-30 11:49:00 +00002724
2725 // The first word should be a map, and we expect all map pointers to be
2726 // in map space.
2727 Map* map = object->map();
2728 ASSERT(map->IsMap());
Steve Block44f0eee2011-05-26 01:26:41 +01002729 ASSERT(heap()->map_space()->Contains(map));
Steve Blocka7e24c12009-10-30 11:49:00 +00002730
2731 // We have only code, sequential strings, external strings
2732 // (sequential strings that have been morphed into external
2733 // strings), fixed arrays, and byte arrays in large object space.
2734 ASSERT(object->IsCode() || object->IsSeqString() ||
2735 object->IsExternalString() || object->IsFixedArray() ||
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00002736 object->IsFixedDoubleArray() || object->IsByteArray());
Steve Blocka7e24c12009-10-30 11:49:00 +00002737
2738 // The object itself should look OK.
2739 object->Verify();
2740
2741 // Byte arrays and strings don't have interior pointers.
2742 if (object->IsCode()) {
2743 VerifyPointersVisitor code_visitor;
2744 object->IterateBody(map->instance_type(),
2745 object->Size(),
2746 &code_visitor);
2747 } else if (object->IsFixedArray()) {
Steve Blocka7e24c12009-10-30 11:49:00 +00002748 FixedArray* array = FixedArray::cast(object);
2749 for (int j = 0; j < array->length(); j++) {
2750 Object* element = array->get(j);
2751 if (element->IsHeapObject()) {
2752 HeapObject* element_object = HeapObject::cast(element);
Steve Block44f0eee2011-05-26 01:26:41 +01002753 ASSERT(heap()->Contains(element_object));
Steve Blocka7e24c12009-10-30 11:49:00 +00002754 ASSERT(element_object->map()->IsMap());
Steve Blocka7e24c12009-10-30 11:49:00 +00002755 }
2756 }
2757 }
2758 }
2759}
2760
2761
2762void LargeObjectSpace::Print() {
2763 LargeObjectIterator it(this);
Ben Murdoch592a9fc2012-03-05 11:04:45 +00002764 for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
Leon Clarked91b9f72010-01-27 17:25:45 +00002765 obj->Print();
Steve Blocka7e24c12009-10-30 11:49:00 +00002766 }
2767}
2768
2769
2770void LargeObjectSpace::ReportStatistics() {
Ben Murdochf87a2032010-10-22 12:50:53 +01002771 PrintF(" size: %" V8_PTR_PREFIX "d\n", size_);
Steve Blocka7e24c12009-10-30 11:49:00 +00002772 int num_objects = 0;
2773 ClearHistograms();
2774 LargeObjectIterator it(this);
Ben Murdoch592a9fc2012-03-05 11:04:45 +00002775 for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
Steve Blocka7e24c12009-10-30 11:49:00 +00002776 num_objects++;
Leon Clarked91b9f72010-01-27 17:25:45 +00002777 CollectHistogramInfo(obj);
Steve Blocka7e24c12009-10-30 11:49:00 +00002778 }
2779
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -08002780 PrintF(" number of objects %d, "
2781 "size of objects %" V8_PTR_PREFIX "d\n", num_objects, objects_size_);
Steve Blocka7e24c12009-10-30 11:49:00 +00002782 if (num_objects > 0) ReportHistogram(false);
2783}
2784
2785
2786void LargeObjectSpace::CollectCodeStatistics() {
Steve Block44f0eee2011-05-26 01:26:41 +01002787 Isolate* isolate = heap()->isolate();
Steve Blocka7e24c12009-10-30 11:49:00 +00002788 LargeObjectIterator obj_it(this);
Ben Murdoch592a9fc2012-03-05 11:04:45 +00002789 for (HeapObject* obj = obj_it.Next(); obj != NULL; obj = obj_it.Next()) {
Steve Blocka7e24c12009-10-30 11:49:00 +00002790 if (obj->IsCode()) {
2791 Code* code = Code::cast(obj);
Steve Block44f0eee2011-05-26 01:26:41 +01002792 isolate->code_kind_statistics()[code->kind()] += code->Size();
Steve Blocka7e24c12009-10-30 11:49:00 +00002793 }
2794 }
2795}
Ben Murdoch592a9fc2012-03-05 11:04:45 +00002796
2797
2798void Page::Print() {
2799 // Make a best-effort to print the objects in the page.
2800 PrintF("Page@%p in %s\n",
2801 this->address(),
2802 AllocationSpaceName(this->owner()->identity()));
2803 printf(" --------------------------------------\n");
2804 HeapObjectIterator objects(this, heap()->GcSafeSizeOfOldObjectFunction());
2805 unsigned mark_size = 0;
2806 for (HeapObject* object = objects.Next();
2807 object != NULL;
2808 object = objects.Next()) {
2809 bool is_marked = Marking::MarkBitFrom(object).Get();
2810 PrintF(" %c ", (is_marked ? '!' : ' ')); // Indent a little.
2811 if (is_marked) {
2812 mark_size += heap()->GcSafeSizeOfOldObjectFunction()(object);
2813 }
2814 object->ShortPrint();
2815 PrintF("\n");
2816 }
2817 printf(" --------------------------------------\n");
2818 printf(" Marked: %x, LiveCount: %x\n", mark_size, LiveBytes());
2819}
2820
Steve Blocka7e24c12009-10-30 11:49:00 +00002821#endif // DEBUG
2822
2823} } // namespace v8::internal