blob: de738fba80ab3cef49f0b5978a68d08045e658c3 [file] [log] [blame]
Ben Murdoch257744e2011-11-30 15:57:28 +00001// Copyright 2011 the V8 project authors. All rights reserved.
Steve Blocka7e24c12009-10-30 11:49:00 +00002// Redistribution and use in source and binary forms, with or without
3// modification, are permitted provided that the following conditions are
4// met:
5//
6// * Redistributions of source code must retain the above copyright
7// notice, this list of conditions and the following disclaimer.
8// * Redistributions in binary form must reproduce the above
9// copyright notice, this list of conditions and the following
10// disclaimer in the documentation and/or other materials provided
11// with the distribution.
12// * Neither the name of Google Inc. nor the names of its
13// contributors may be used to endorse or promote products derived
14// from this software without specific prior written permission.
15//
16// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
28#include "v8.h"
29
Steve Block1e0659c2011-05-24 12:43:12 +010030#include "liveobjectlist-inl.h"
Steve Blocka7e24c12009-10-30 11:49:00 +000031#include "macro-assembler.h"
32#include "mark-compact.h"
33#include "platform.h"
34
35namespace v8 {
36namespace internal {
37
Steve Blocka7e24c12009-10-30 11:49:00 +000038
Steve Blocka7e24c12009-10-30 11:49:00 +000039// ----------------------------------------------------------------------------
40// HeapObjectIterator
41
42HeapObjectIterator::HeapObjectIterator(PagedSpace* space) {
Ben Murdoch592a9fc2012-03-05 11:04:45 +000043 // You can't actually iterate over the anchor page. It is not a real page,
44 // just an anchor for the double linked page list. Initialize as if we have
45 // reached the end of the anchor page, then the first iteration will move on
46 // to the first page.
47 Initialize(space,
48 NULL,
49 NULL,
50 kAllPagesInSpace,
51 NULL);
Steve Blocka7e24c12009-10-30 11:49:00 +000052}
53
54
55HeapObjectIterator::HeapObjectIterator(PagedSpace* space,
56 HeapObjectCallback size_func) {
Ben Murdoch592a9fc2012-03-05 11:04:45 +000057 // You can't actually iterate over the anchor page. It is not a real page,
58 // just an anchor for the double linked page list. Initialize the current
59 // address and end as NULL, then the first iteration will move on
60 // to the first page.
61 Initialize(space,
62 NULL,
63 NULL,
64 kAllPagesInSpace,
65 size_func);
Steve Blocka7e24c12009-10-30 11:49:00 +000066}
67
68
Kristian Monsen80d68ea2010-09-08 11:05:35 +010069HeapObjectIterator::HeapObjectIterator(Page* page,
70 HeapObjectCallback size_func) {
Ben Murdoch592a9fc2012-03-05 11:04:45 +000071 Space* owner = page->owner();
72 ASSERT(owner == HEAP->old_pointer_space() ||
73 owner == HEAP->old_data_space() ||
74 owner == HEAP->map_space() ||
75 owner == HEAP->cell_space() ||
76 owner == HEAP->code_space());
77 Initialize(reinterpret_cast<PagedSpace*>(owner),
78 page->area_start(),
79 page->area_end(),
80 kOnePageOnly,
81 size_func);
82 ASSERT(page->WasSweptPrecisely());
Kristian Monsen80d68ea2010-09-08 11:05:35 +010083}
84
85
Ben Murdoch592a9fc2012-03-05 11:04:45 +000086void HeapObjectIterator::Initialize(PagedSpace* space,
87 Address cur, Address end,
88 HeapObjectIterator::PageMode mode,
Steve Blocka7e24c12009-10-30 11:49:00 +000089 HeapObjectCallback size_f) {
Ben Murdoch592a9fc2012-03-05 11:04:45 +000090 // Check that we actually can iterate this space.
91 ASSERT(!space->was_swept_conservatively());
92
93 space_ = space;
Steve Blocka7e24c12009-10-30 11:49:00 +000094 cur_addr_ = cur;
Ben Murdoch592a9fc2012-03-05 11:04:45 +000095 cur_end_ = end;
96 page_mode_ = mode;
Steve Blocka7e24c12009-10-30 11:49:00 +000097 size_func_ = size_f;
Steve Blocka7e24c12009-10-30 11:49:00 +000098}
99
100
Ben Murdoch592a9fc2012-03-05 11:04:45 +0000101// We have hit the end of the page and should advance to the next block of
102// objects. This happens at the end of the page.
103bool HeapObjectIterator::AdvanceToNextPage() {
104 ASSERT(cur_addr_ == cur_end_);
105 if (page_mode_ == kOnePageOnly) return false;
106 Page* cur_page;
107 if (cur_addr_ == NULL) {
108 cur_page = space_->anchor();
109 } else {
110 cur_page = Page::FromAddress(cur_addr_ - 1);
111 ASSERT(cur_addr_ == cur_page->area_end());
Steve Blocka7e24c12009-10-30 11:49:00 +0000112 }
Ben Murdoch592a9fc2012-03-05 11:04:45 +0000113 cur_page = cur_page->next_page();
114 if (cur_page == space_->anchor()) return false;
115 cur_addr_ = cur_page->area_start();
116 cur_end_ = cur_page->area_end();
117 ASSERT(cur_page->WasSweptPrecisely());
118 return true;
Steve Blocka7e24c12009-10-30 11:49:00 +0000119}
120
121
122// -----------------------------------------------------------------------------
Steve Blocka7e24c12009-10-30 11:49:00 +0000123// CodeRange
124
Steve Block44f0eee2011-05-26 01:26:41 +0100125
Ben Murdoch69a99ed2011-11-30 16:03:39 +0000126CodeRange::CodeRange(Isolate* isolate)
127 : isolate_(isolate),
128 code_range_(NULL),
Steve Block44f0eee2011-05-26 01:26:41 +0100129 free_list_(0),
130 allocation_list_(0),
Ben Murdoch69a99ed2011-11-30 16:03:39 +0000131 current_allocation_block_index_(0) {
Steve Block44f0eee2011-05-26 01:26:41 +0100132}
Steve Blocka7e24c12009-10-30 11:49:00 +0000133
134
Ben Murdochc7cc0282012-03-05 14:35:55 +0000135bool CodeRange::SetUp(const size_t requested) {
Steve Blocka7e24c12009-10-30 11:49:00 +0000136 ASSERT(code_range_ == NULL);
137
138 code_range_ = new VirtualMemory(requested);
139 CHECK(code_range_ != NULL);
140 if (!code_range_->IsReserved()) {
141 delete code_range_;
142 code_range_ = NULL;
143 return false;
144 }
145
146 // We are sure that we have mapped a block of requested addresses.
147 ASSERT(code_range_->size() == requested);
Steve Block44f0eee2011-05-26 01:26:41 +0100148 LOG(isolate_, NewEvent("CodeRange", code_range_->address(), requested));
Ben Murdoch592a9fc2012-03-05 11:04:45 +0000149 Address base = reinterpret_cast<Address>(code_range_->address());
150 Address aligned_base =
151 RoundUp(reinterpret_cast<Address>(code_range_->address()),
152 MemoryChunk::kAlignment);
153 size_t size = code_range_->size() - (aligned_base - base);
154 allocation_list_.Add(FreeBlock(aligned_base, size));
Steve Blocka7e24c12009-10-30 11:49:00 +0000155 current_allocation_block_index_ = 0;
156 return true;
157}
158
159
160int CodeRange::CompareFreeBlockAddress(const FreeBlock* left,
161 const FreeBlock* right) {
162 // The entire point of CodeRange is that the difference between two
163 // addresses in the range can be represented as a signed 32-bit int,
164 // so the cast is semantically correct.
165 return static_cast<int>(left->start - right->start);
166}
167
168
169void CodeRange::GetNextAllocationBlock(size_t requested) {
170 for (current_allocation_block_index_++;
171 current_allocation_block_index_ < allocation_list_.length();
172 current_allocation_block_index_++) {
173 if (requested <= allocation_list_[current_allocation_block_index_].size) {
174 return; // Found a large enough allocation block.
175 }
176 }
177
178 // Sort and merge the free blocks on the free list and the allocation list.
179 free_list_.AddAll(allocation_list_);
180 allocation_list_.Clear();
181 free_list_.Sort(&CompareFreeBlockAddress);
182 for (int i = 0; i < free_list_.length();) {
183 FreeBlock merged = free_list_[i];
184 i++;
185 // Add adjacent free blocks to the current merged block.
186 while (i < free_list_.length() &&
187 free_list_[i].start == merged.start + merged.size) {
188 merged.size += free_list_[i].size;
189 i++;
190 }
191 if (merged.size > 0) {
192 allocation_list_.Add(merged);
193 }
194 }
195 free_list_.Clear();
196
197 for (current_allocation_block_index_ = 0;
198 current_allocation_block_index_ < allocation_list_.length();
199 current_allocation_block_index_++) {
200 if (requested <= allocation_list_[current_allocation_block_index_].size) {
201 return; // Found a large enough allocation block.
202 }
203 }
204
205 // Code range is full or too fragmented.
206 V8::FatalProcessOutOfMemory("CodeRange::GetNextAllocationBlock");
207}
208
209
210
Ben Murdoch592a9fc2012-03-05 11:04:45 +0000211Address CodeRange::AllocateRawMemory(const size_t requested,
212 size_t* allocated) {
Steve Blocka7e24c12009-10-30 11:49:00 +0000213 ASSERT(current_allocation_block_index_ < allocation_list_.length());
214 if (requested > allocation_list_[current_allocation_block_index_].size) {
215 // Find an allocation block large enough. This function call may
216 // call V8::FatalProcessOutOfMemory if it cannot find a large enough block.
217 GetNextAllocationBlock(requested);
218 }
219 // Commit the requested memory at the start of the current allocation block.
Ben Murdoch592a9fc2012-03-05 11:04:45 +0000220 size_t aligned_requested = RoundUp(requested, MemoryChunk::kAlignment);
Steve Blocka7e24c12009-10-30 11:49:00 +0000221 FreeBlock current = allocation_list_[current_allocation_block_index_];
Ben Murdoch592a9fc2012-03-05 11:04:45 +0000222 if (aligned_requested >= (current.size - Page::kPageSize)) {
Steve Blocka7e24c12009-10-30 11:49:00 +0000223 // Don't leave a small free block, useless for a large object or chunk.
224 *allocated = current.size;
Ben Murdoch592a9fc2012-03-05 11:04:45 +0000225 } else {
226 *allocated = aligned_requested;
Steve Blocka7e24c12009-10-30 11:49:00 +0000227 }
228 ASSERT(*allocated <= current.size);
Ben Murdoch592a9fc2012-03-05 11:04:45 +0000229 ASSERT(IsAddressAligned(current.start, MemoryChunk::kAlignment));
230 if (!MemoryAllocator::CommitCodePage(code_range_,
231 current.start,
232 *allocated)) {
Steve Blocka7e24c12009-10-30 11:49:00 +0000233 *allocated = 0;
234 return NULL;
235 }
236 allocation_list_[current_allocation_block_index_].start += *allocated;
237 allocation_list_[current_allocation_block_index_].size -= *allocated;
238 if (*allocated == current.size) {
239 GetNextAllocationBlock(0); // This block is used up, get the next one.
240 }
241 return current.start;
242}
243
244
Ben Murdoch592a9fc2012-03-05 11:04:45 +0000245void CodeRange::FreeRawMemory(Address address, size_t length) {
246 ASSERT(IsAddressAligned(address, MemoryChunk::kAlignment));
Steve Blocka7e24c12009-10-30 11:49:00 +0000247 free_list_.Add(FreeBlock(address, length));
248 code_range_->Uncommit(address, length);
249}
250
251
252void CodeRange::TearDown() {
253 delete code_range_; // Frees all memory in the virtual memory range.
254 code_range_ = NULL;
255 free_list_.Free();
256 allocation_list_.Free();
257}
258
259
260// -----------------------------------------------------------------------------
261// MemoryAllocator
262//
Steve Blocka7e24c12009-10-30 11:49:00 +0000263
Ben Murdoch69a99ed2011-11-30 16:03:39 +0000264MemoryAllocator::MemoryAllocator(Isolate* isolate)
265 : isolate_(isolate),
266 capacity_(0),
Steve Block44f0eee2011-05-26 01:26:41 +0100267 capacity_executable_(0),
268 size_(0),
Ben Murdoch592a9fc2012-03-05 11:04:45 +0000269 size_executable_(0) {
Steve Blocka7e24c12009-10-30 11:49:00 +0000270}
271
272
Ben Murdochc7cc0282012-03-05 14:35:55 +0000273bool MemoryAllocator::SetUp(intptr_t capacity, intptr_t capacity_executable) {
Steve Blocka7e24c12009-10-30 11:49:00 +0000274 capacity_ = RoundUp(capacity, Page::kPageSize);
Russell Brenner90bac252010-11-18 13:33:46 -0800275 capacity_executable_ = RoundUp(capacity_executable, Page::kPageSize);
276 ASSERT_GE(capacity_, capacity_executable_);
Steve Blocka7e24c12009-10-30 11:49:00 +0000277
Steve Blocka7e24c12009-10-30 11:49:00 +0000278 size_ = 0;
Steve Block791712a2010-08-27 10:21:07 +0100279 size_executable_ = 0;
Ben Murdoch592a9fc2012-03-05 11:04:45 +0000280
Steve Blocka7e24c12009-10-30 11:49:00 +0000281 return true;
282}
283
284
285void MemoryAllocator::TearDown() {
Ben Murdoch592a9fc2012-03-05 11:04:45 +0000286 // Check that spaces were torn down before MemoryAllocator.
287 ASSERT(size_ == 0);
288 // TODO(gc) this will be true again when we fix FreeMemory.
289 // ASSERT(size_executable_ == 0);
Steve Blocka7e24c12009-10-30 11:49:00 +0000290 capacity_ = 0;
Russell Brenner90bac252010-11-18 13:33:46 -0800291 capacity_executable_ = 0;
Steve Blocka7e24c12009-10-30 11:49:00 +0000292}
293
294
Ben Murdoch592a9fc2012-03-05 11:04:45 +0000295void MemoryAllocator::FreeMemory(VirtualMemory* reservation,
296 Executability executable) {
297 // TODO(gc) make code_range part of memory allocator?
298 ASSERT(reservation->IsReserved());
299 size_t size = reservation->size();
300 ASSERT(size_ >= size);
301 size_ -= size;
302
303 isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(size));
304
305 if (executable == EXECUTABLE) {
306 ASSERT(size_executable_ >= size);
307 size_executable_ -= size;
308 }
309 // Code which is part of the code-range does not have its own VirtualMemory.
310 ASSERT(!isolate_->code_range()->contains(
311 static_cast<Address>(reservation->address())));
312 ASSERT(executable == NOT_EXECUTABLE || !isolate_->code_range()->exists());
313 reservation->Release();
314}
315
316
317void MemoryAllocator::FreeMemory(Address base,
318 size_t size,
319 Executability executable) {
320 // TODO(gc) make code_range part of memory allocator?
321 ASSERT(size_ >= size);
322 size_ -= size;
323
324 isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(size));
325
326 if (executable == EXECUTABLE) {
327 ASSERT(size_executable_ >= size);
328 size_executable_ -= size;
329 }
330 if (isolate_->code_range()->contains(static_cast<Address>(base))) {
331 ASSERT(executable == EXECUTABLE);
332 isolate_->code_range()->FreeRawMemory(base, size);
333 } else {
334 ASSERT(executable == NOT_EXECUTABLE || !isolate_->code_range()->exists());
335 bool result = VirtualMemory::ReleaseRegion(base, size);
336 USE(result);
337 ASSERT(result);
338 }
339}
340
341
342Address MemoryAllocator::ReserveAlignedMemory(size_t size,
343 size_t alignment,
344 VirtualMemory* controller) {
345 VirtualMemory reservation(size, alignment);
346
347 if (!reservation.IsReserved()) return NULL;
348 size_ += reservation.size();
349 Address base = RoundUp(static_cast<Address>(reservation.address()),
350 alignment);
351 controller->TakeControl(&reservation);
352 return base;
353}
354
355
356Address MemoryAllocator::AllocateAlignedMemory(size_t size,
357 size_t alignment,
358 Executability executable,
359 VirtualMemory* controller) {
360 VirtualMemory reservation;
361 Address base = ReserveAlignedMemory(size, alignment, &reservation);
362 if (base == NULL) return NULL;
363
364 if (executable == EXECUTABLE) {
365 CommitCodePage(&reservation, base, size);
366 } else {
367 if (!reservation.Commit(base,
368 size,
369 executable == EXECUTABLE)) {
370 return NULL;
371 }
Kristian Monsen50ef84f2010-07-29 15:18:00 +0100372 }
Russell Brenner90bac252010-11-18 13:33:46 -0800373
Ben Murdoch592a9fc2012-03-05 11:04:45 +0000374 controller->TakeControl(&reservation);
375 return base;
376}
377
378
379void Page::InitializeAsAnchor(PagedSpace* owner) {
380 set_owner(owner);
381 set_prev_page(this);
382 set_next_page(this);
383}
384
385
386NewSpacePage* NewSpacePage::Initialize(Heap* heap,
387 Address start,
388 SemiSpace* semi_space) {
389 Address area_start = start + NewSpacePage::kObjectStartOffset;
390 Address area_end = start + Page::kPageSize;
391
392 MemoryChunk* chunk = MemoryChunk::Initialize(heap,
393 start,
394 Page::kPageSize,
395 area_start,
396 area_end,
397 NOT_EXECUTABLE,
398 semi_space);
399 chunk->set_next_chunk(NULL);
400 chunk->set_prev_chunk(NULL);
401 chunk->initialize_scan_on_scavenge(true);
402 bool in_to_space = (semi_space->id() != kFromSpace);
403 chunk->SetFlag(in_to_space ? MemoryChunk::IN_TO_SPACE
404 : MemoryChunk::IN_FROM_SPACE);
405 ASSERT(!chunk->IsFlagSet(in_to_space ? MemoryChunk::IN_FROM_SPACE
406 : MemoryChunk::IN_TO_SPACE));
407 NewSpacePage* page = static_cast<NewSpacePage*>(chunk);
408 heap->incremental_marking()->SetNewSpacePageFlags(page);
409 return page;
410}
411
412
413void NewSpacePage::InitializeAsAnchor(SemiSpace* semi_space) {
414 set_owner(semi_space);
415 set_next_chunk(this);
416 set_prev_chunk(this);
417 // Flags marks this invalid page as not being in new-space.
418 // All real new-space pages will be in new-space.
419 SetFlags(0, ~0);
420}
421
422
423MemoryChunk* MemoryChunk::Initialize(Heap* heap,
424 Address base,
425 size_t size,
426 Address area_start,
427 Address area_end,
428 Executability executable,
429 Space* owner) {
430 MemoryChunk* chunk = FromAddress(base);
431
432 ASSERT(base == chunk->address());
433
434 chunk->heap_ = heap;
435 chunk->size_ = size;
436 chunk->area_start_ = area_start;
437 chunk->area_end_ = area_end;
438 chunk->flags_ = 0;
439 chunk->set_owner(owner);
440 chunk->InitializeReservedMemory();
441 chunk->slots_buffer_ = NULL;
442 chunk->skip_list_ = NULL;
443 chunk->ResetLiveBytes();
444 Bitmap::Clear(chunk);
445 chunk->initialize_scan_on_scavenge(false);
446 chunk->SetFlag(WAS_SWEPT_PRECISELY);
447
448 ASSERT(OFFSET_OF(MemoryChunk, flags_) == kFlagsOffset);
449 ASSERT(OFFSET_OF(MemoryChunk, live_byte_count_) == kLiveBytesOffset);
450
Russell Brenner90bac252010-11-18 13:33:46 -0800451 if (executable == EXECUTABLE) {
Ben Murdoch592a9fc2012-03-05 11:04:45 +0000452 chunk->SetFlag(IS_EXECUTABLE);
453 }
454
455 if (owner == heap->old_data_space()) {
456 chunk->SetFlag(CONTAINS_ONLY_DATA);
457 }
458
459 return chunk;
460}
461
462
463void MemoryChunk::InsertAfter(MemoryChunk* other) {
464 next_chunk_ = other->next_chunk_;
465 prev_chunk_ = other;
466 other->next_chunk_->prev_chunk_ = this;
467 other->next_chunk_ = this;
468}
469
470
471void MemoryChunk::Unlink() {
472 if (!InNewSpace() && IsFlagSet(SCAN_ON_SCAVENGE)) {
473 heap_->decrement_scan_on_scavenge_pages();
474 ClearFlag(SCAN_ON_SCAVENGE);
475 }
476 next_chunk_->prev_chunk_ = prev_chunk_;
477 prev_chunk_->next_chunk_ = next_chunk_;
478 prev_chunk_ = NULL;
479 next_chunk_ = NULL;
480}
481
482
483MemoryChunk* MemoryAllocator::AllocateChunk(intptr_t body_size,
484 Executability executable,
485 Space* owner) {
486 size_t chunk_size;
487 Heap* heap = isolate_->heap();
488 Address base = NULL;
489 VirtualMemory reservation;
490 Address area_start = NULL;
491 Address area_end = NULL;
492 if (executable == EXECUTABLE) {
493 chunk_size = RoundUp(CodePageAreaStartOffset() + body_size,
494 OS::CommitPageSize()) + CodePageGuardSize();
495
Russell Brenner90bac252010-11-18 13:33:46 -0800496 // Check executable memory limit.
Ben Murdoch592a9fc2012-03-05 11:04:45 +0000497 if (size_executable_ + chunk_size > capacity_executable_) {
Steve Block44f0eee2011-05-26 01:26:41 +0100498 LOG(isolate_,
499 StringEvent("MemoryAllocator::AllocateRawMemory",
Russell Brenner90bac252010-11-18 13:33:46 -0800500 "V8 Executable Allocation capacity exceeded"));
501 return NULL;
502 }
Ben Murdoch592a9fc2012-03-05 11:04:45 +0000503
Russell Brenner90bac252010-11-18 13:33:46 -0800504 // Allocate executable memory either from code range or from the
505 // OS.
Steve Block44f0eee2011-05-26 01:26:41 +0100506 if (isolate_->code_range()->exists()) {
Ben Murdoch592a9fc2012-03-05 11:04:45 +0000507 base = isolate_->code_range()->AllocateRawMemory(chunk_size, &chunk_size);
508 ASSERT(IsAligned(reinterpret_cast<intptr_t>(base),
509 MemoryChunk::kAlignment));
510 if (base == NULL) return NULL;
511 size_ += chunk_size;
512 // Update executable memory size.
513 size_executable_ += chunk_size;
Russell Brenner90bac252010-11-18 13:33:46 -0800514 } else {
Ben Murdoch592a9fc2012-03-05 11:04:45 +0000515 base = AllocateAlignedMemory(chunk_size,
516 MemoryChunk::kAlignment,
517 executable,
518 &reservation);
519 if (base == NULL) return NULL;
520 // Update executable memory size.
521 size_executable_ += reservation.size();
Russell Brenner90bac252010-11-18 13:33:46 -0800522 }
Steve Block791712a2010-08-27 10:21:07 +0100523
Leon Clarke4515c472010-02-03 11:58:03 +0000524#ifdef DEBUG
Ben Murdoch592a9fc2012-03-05 11:04:45 +0000525 ZapBlock(base, CodePageGuardStartOffset());
526 ZapBlock(base + CodePageAreaStartOffset(), body_size);
Leon Clarke4515c472010-02-03 11:58:03 +0000527#endif
Ben Murdoch592a9fc2012-03-05 11:04:45 +0000528 area_start = base + CodePageAreaStartOffset();
529 area_end = area_start + body_size;
530 } else {
531 chunk_size = MemoryChunk::kObjectStartOffset + body_size;
532 base = AllocateAlignedMemory(chunk_size,
533 MemoryChunk::kAlignment,
534 executable,
535 &reservation);
536
537 if (base == NULL) return NULL;
538
539#ifdef DEBUG
540 ZapBlock(base, chunk_size);
541#endif
542
543 area_start = base + Page::kObjectStartOffset;
544 area_end = base + chunk_size;
545 }
546
547 isolate_->counters()->memory_allocated()->
548 Increment(static_cast<int>(chunk_size));
549
550 LOG(isolate_, NewEvent("MemoryChunk", base, chunk_size));
551 if (owner != NULL) {
552 ObjectSpace space = static_cast<ObjectSpace>(1 << owner->identity());
553 PerformAllocationCallback(space, kAllocationActionAllocate, chunk_size);
554 }
555
556 MemoryChunk* result = MemoryChunk::Initialize(heap,
557 base,
558 chunk_size,
559 area_start,
560 area_end,
561 executable,
562 owner);
563 result->set_reserved_memory(&reservation);
564 return result;
Steve Blocka7e24c12009-10-30 11:49:00 +0000565}
566
567
Ben Murdoch592a9fc2012-03-05 11:04:45 +0000568Page* MemoryAllocator::AllocatePage(PagedSpace* owner,
Steve Block791712a2010-08-27 10:21:07 +0100569 Executability executable) {
Ben Murdoch592a9fc2012-03-05 11:04:45 +0000570 MemoryChunk* chunk = AllocateChunk(owner->AreaSize(),
571 executable,
572 owner);
Iain Merrick9ac36c92010-09-13 15:29:50 +0100573
Ben Murdoch592a9fc2012-03-05 11:04:45 +0000574 if (chunk == NULL) return NULL;
575
576 return Page::Initialize(isolate_->heap(), chunk, executable, owner);
577}
578
579
580LargePage* MemoryAllocator::AllocateLargePage(intptr_t object_size,
581 Executability executable,
582 Space* owner) {
583 MemoryChunk* chunk = AllocateChunk(object_size, executable, owner);
584 if (chunk == NULL) return NULL;
585 return LargePage::Initialize(isolate_->heap(), chunk);
586}
587
588
589void MemoryAllocator::Free(MemoryChunk* chunk) {
590 LOG(isolate_, DeleteEvent("MemoryChunk", chunk));
591 if (chunk->owner() != NULL) {
592 ObjectSpace space =
593 static_cast<ObjectSpace>(1 << chunk->owner()->identity());
594 PerformAllocationCallback(space, kAllocationActionFree, chunk->size());
595 }
596
597 delete chunk->slots_buffer();
598 delete chunk->skip_list();
599
600 VirtualMemory* reservation = chunk->reserved_memory();
601 if (reservation->IsReserved()) {
602 FreeMemory(reservation, chunk->executable());
603 } else {
604 FreeMemory(chunk->address(),
605 chunk->size(),
606 chunk->executable());
607 }
608}
609
610
611bool MemoryAllocator::CommitBlock(Address start,
612 size_t size,
613 Executability executable) {
614 if (!VirtualMemory::CommitRegion(start, size, executable)) return false;
615#ifdef DEBUG
616 ZapBlock(start, size);
617#endif
618 isolate_->counters()->memory_allocated()->Increment(static_cast<int>(size));
619 return true;
620}
621
622
623bool MemoryAllocator::UncommitBlock(Address start, size_t size) {
624 if (!VirtualMemory::UncommitRegion(start, size)) return false;
625 isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(size));
626 return true;
627}
628
629
630void MemoryAllocator::ZapBlock(Address start, size_t size) {
631 for (size_t s = 0; s + kPointerSize <= size; s += kPointerSize) {
632 Memory::Address_at(start + s) = kZapValue;
633 }
Steve Blocka7e24c12009-10-30 11:49:00 +0000634}
635
636
Iain Merrick9ac36c92010-09-13 15:29:50 +0100637void MemoryAllocator::PerformAllocationCallback(ObjectSpace space,
638 AllocationAction action,
639 size_t size) {
640 for (int i = 0; i < memory_allocation_callbacks_.length(); ++i) {
641 MemoryAllocationCallbackRegistration registration =
642 memory_allocation_callbacks_[i];
643 if ((registration.space & space) == space &&
644 (registration.action & action) == action)
645 registration.callback(space, action, static_cast<int>(size));
646 }
647}
648
649
650bool MemoryAllocator::MemoryAllocationCallbackRegistered(
651 MemoryAllocationCallback callback) {
652 for (int i = 0; i < memory_allocation_callbacks_.length(); ++i) {
653 if (memory_allocation_callbacks_[i].callback == callback) return true;
654 }
655 return false;
656}
657
658
659void MemoryAllocator::AddMemoryAllocationCallback(
660 MemoryAllocationCallback callback,
661 ObjectSpace space,
662 AllocationAction action) {
663 ASSERT(callback != NULL);
664 MemoryAllocationCallbackRegistration registration(callback, space, action);
665 ASSERT(!MemoryAllocator::MemoryAllocationCallbackRegistered(callback));
666 return memory_allocation_callbacks_.Add(registration);
667}
668
669
670void MemoryAllocator::RemoveMemoryAllocationCallback(
671 MemoryAllocationCallback callback) {
672 ASSERT(callback != NULL);
673 for (int i = 0; i < memory_allocation_callbacks_.length(); ++i) {
674 if (memory_allocation_callbacks_[i].callback == callback) {
675 memory_allocation_callbacks_.Remove(i);
676 return;
677 }
678 }
679 UNREACHABLE();
680}
681
Steve Blocka7e24c12009-10-30 11:49:00 +0000682
683#ifdef DEBUG
684void MemoryAllocator::ReportStatistics() {
685 float pct = static_cast<float>(capacity_ - size_) / capacity_;
Ben Murdochf87a2032010-10-22 12:50:53 +0100686 PrintF(" capacity: %" V8_PTR_PREFIX "d"
687 ", used: %" V8_PTR_PREFIX "d"
688 ", available: %%%d\n\n",
Steve Blocka7e24c12009-10-30 11:49:00 +0000689 capacity_, size_, static_cast<int>(pct*100));
690}
691#endif
692
693
Ben Murdoch592a9fc2012-03-05 11:04:45 +0000694int MemoryAllocator::CodePageGuardStartOffset() {
695 // We are guarding code pages: the first OS page after the header
696 // will be protected as non-writable.
697 return RoundUp(Page::kObjectStartOffset, OS::CommitPageSize());
Steve Block6ded16b2010-05-10 14:33:55 +0100698}
699
700
Ben Murdoch592a9fc2012-03-05 11:04:45 +0000701int MemoryAllocator::CodePageGuardSize() {
702 return OS::CommitPageSize();
703}
Steve Block6ded16b2010-05-10 14:33:55 +0100704
Ben Murdoch592a9fc2012-03-05 11:04:45 +0000705
706int MemoryAllocator::CodePageAreaStartOffset() {
707 // We are guarding code pages: the first OS page after the header
708 // will be protected as non-writable.
709 return CodePageGuardStartOffset() + CodePageGuardSize();
710}
711
712
713int MemoryAllocator::CodePageAreaEndOffset() {
714 // We are guarding code pages: the last OS page will be protected as
715 // non-writable.
716 return Page::kPageSize - OS::CommitPageSize();
717}
718
719
720bool MemoryAllocator::CommitCodePage(VirtualMemory* vm,
721 Address start,
722 size_t size) {
723 // Commit page header (not executable).
724 if (!vm->Commit(start,
725 CodePageGuardStartOffset(),
726 false)) {
727 return false;
Steve Block6ded16b2010-05-10 14:33:55 +0100728 }
729
Ben Murdoch592a9fc2012-03-05 11:04:45 +0000730 // Create guard page after the header.
731 if (!vm->Guard(start + CodePageGuardStartOffset())) {
732 return false;
Steve Block6ded16b2010-05-10 14:33:55 +0100733 }
734
Ben Murdoch592a9fc2012-03-05 11:04:45 +0000735 // Commit page body (executable).
736 size_t area_size = size - CodePageAreaStartOffset() - CodePageGuardSize();
737 if (!vm->Commit(start + CodePageAreaStartOffset(),
738 area_size,
739 true)) {
740 return false;
Steve Block6ded16b2010-05-10 14:33:55 +0100741 }
742
Ben Murdoch592a9fc2012-03-05 11:04:45 +0000743 // Create guard page after the allocatable area.
744 if (!vm->Guard(start + CodePageAreaStartOffset() + area_size)) {
745 return false;
746 }
747
748 return true;
Steve Block6ded16b2010-05-10 14:33:55 +0100749}
750
751
Steve Blocka7e24c12009-10-30 11:49:00 +0000752// -----------------------------------------------------------------------------
Ben Murdochc7cc0282012-03-05 14:35:55 +0000753// MemoryChunk implementation
754
755void MemoryChunk::IncrementLiveBytesFromMutator(Address address, int by) {
756 MemoryChunk* chunk = MemoryChunk::FromAddress(address);
757 if (!chunk->InNewSpace() && !static_cast<Page*>(chunk)->WasSwept()) {
758 static_cast<PagedSpace*>(chunk->owner())->IncrementUnsweptFreeBytes(-by);
759 }
760 chunk->IncrementLiveBytes(by);
761}
762
763// -----------------------------------------------------------------------------
Steve Blocka7e24c12009-10-30 11:49:00 +0000764// PagedSpace implementation
765
Steve Block44f0eee2011-05-26 01:26:41 +0100766PagedSpace::PagedSpace(Heap* heap,
767 intptr_t max_capacity,
Steve Blocka7e24c12009-10-30 11:49:00 +0000768 AllocationSpace id,
769 Executability executable)
Ben Murdoch592a9fc2012-03-05 11:04:45 +0000770 : Space(heap, id, executable),
771 free_list_(this),
772 was_swept_conservatively_(false),
Ben Murdochc7cc0282012-03-05 14:35:55 +0000773 first_unswept_page_(Page::FromAddress(NULL)),
774 unswept_free_bytes_(0) {
Ben Murdoch592a9fc2012-03-05 11:04:45 +0000775 if (id == CODE_SPACE) {
776 area_size_ = heap->isolate()->memory_allocator()->
777 CodePageAreaSize();
778 } else {
779 area_size_ = Page::kPageSize - Page::kObjectStartOffset;
780 }
Steve Blocka7e24c12009-10-30 11:49:00 +0000781 max_capacity_ = (RoundDown(max_capacity, Page::kPageSize) / Page::kPageSize)
Ben Murdoch592a9fc2012-03-05 11:04:45 +0000782 * AreaSize();
Steve Blocka7e24c12009-10-30 11:49:00 +0000783 accounting_stats_.Clear();
784
785 allocation_info_.top = NULL;
786 allocation_info_.limit = NULL;
787
Ben Murdoch592a9fc2012-03-05 11:04:45 +0000788 anchor_.InitializeAsAnchor(this);
Steve Blocka7e24c12009-10-30 11:49:00 +0000789}
790
791
Ben Murdochc7cc0282012-03-05 14:35:55 +0000792bool PagedSpace::SetUp() {
Steve Blocka7e24c12009-10-30 11:49:00 +0000793 return true;
794}
795
796
Ben Murdochc7cc0282012-03-05 14:35:55 +0000797bool PagedSpace::HasBeenSetUp() {
Ben Murdoch592a9fc2012-03-05 11:04:45 +0000798 return true;
Steve Blocka7e24c12009-10-30 11:49:00 +0000799}
800
801
802void PagedSpace::TearDown() {
Ben Murdoch592a9fc2012-03-05 11:04:45 +0000803 PageIterator iterator(this);
804 while (iterator.has_next()) {
805 heap()->isolate()->memory_allocator()->Free(iterator.next());
806 }
807 anchor_.set_next_page(&anchor_);
808 anchor_.set_prev_page(&anchor_);
Steve Blocka7e24c12009-10-30 11:49:00 +0000809 accounting_stats_.Clear();
810}
811
812
John Reck59135872010-11-02 12:39:01 -0700813MaybeObject* PagedSpace::FindObject(Address addr) {
Ben Murdoch592a9fc2012-03-05 11:04:45 +0000814 // Note: this function can only be called on precisely swept spaces.
Steve Block44f0eee2011-05-26 01:26:41 +0100815 ASSERT(!heap()->mark_compact_collector()->in_use());
Steve Blocka7e24c12009-10-30 11:49:00 +0000816
817 if (!Contains(addr)) return Failure::Exception();
818
819 Page* p = Page::FromAddress(addr);
Ben Murdoch592a9fc2012-03-05 11:04:45 +0000820 HeapObjectIterator it(p, NULL);
821 for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
822 Address cur = obj->address();
Steve Blocka7e24c12009-10-30 11:49:00 +0000823 Address next = cur + obj->Size();
824 if ((cur <= addr) && (addr < next)) return obj;
Steve Blocka7e24c12009-10-30 11:49:00 +0000825 }
826
827 UNREACHABLE();
828 return Failure::Exception();
829}
830
Ben Murdoch592a9fc2012-03-05 11:04:45 +0000831bool PagedSpace::CanExpand() {
832 ASSERT(max_capacity_ % AreaSize() == 0);
833 ASSERT(Capacity() % AreaSize() == 0);
Steve Blocka7e24c12009-10-30 11:49:00 +0000834
835 if (Capacity() == max_capacity_) return false;
836
837 ASSERT(Capacity() < max_capacity_);
Steve Blocka7e24c12009-10-30 11:49:00 +0000838
Ben Murdoch592a9fc2012-03-05 11:04:45 +0000839 // Are we going to exceed capacity for this space?
840 if ((Capacity() + Page::kPageSize) > max_capacity_) return false;
Steve Blocka7e24c12009-10-30 11:49:00 +0000841
Ben Murdoch592a9fc2012-03-05 11:04:45 +0000842 return true;
843}
Steve Blocka7e24c12009-10-30 11:49:00 +0000844
Ben Murdoch592a9fc2012-03-05 11:04:45 +0000845bool PagedSpace::Expand() {
846 if (!CanExpand()) return false;
847
848 Page* p = heap()->isolate()->memory_allocator()->
849 AllocatePage(this, executable());
850 if (p == NULL) return false;
851
Steve Blocka7e24c12009-10-30 11:49:00 +0000852 ASSERT(Capacity() <= max_capacity_);
853
Ben Murdoch592a9fc2012-03-05 11:04:45 +0000854 p->InsertAfter(anchor_.prev_page());
Steve Blocka7e24c12009-10-30 11:49:00 +0000855
856 return true;
857}
858
859
Steve Blocka7e24c12009-10-30 11:49:00 +0000860int PagedSpace::CountTotalPages() {
Ben Murdoch592a9fc2012-03-05 11:04:45 +0000861 PageIterator it(this);
Steve Blocka7e24c12009-10-30 11:49:00 +0000862 int count = 0;
Ben Murdoch592a9fc2012-03-05 11:04:45 +0000863 while (it.has_next()) {
864 it.next();
Steve Blocka7e24c12009-10-30 11:49:00 +0000865 count++;
866 }
867 return count;
868}
Steve Blocka7e24c12009-10-30 11:49:00 +0000869
870
Ben Murdoch592a9fc2012-03-05 11:04:45 +0000871void PagedSpace::ReleasePage(Page* page) {
872 ASSERT(page->LiveBytes() == 0);
873 ASSERT(AreaSize() == page->area_size());
874
875 // Adjust list of unswept pages if the page is the head of the list.
876 if (first_unswept_page_ == page) {
877 first_unswept_page_ = page->next_page();
878 if (first_unswept_page_ == anchor()) {
879 first_unswept_page_ = Page::FromAddress(NULL);
880 }
Steve Block6ded16b2010-05-10 14:33:55 +0100881 }
882
Ben Murdoch592a9fc2012-03-05 11:04:45 +0000883 if (page->WasSwept()) {
884 intptr_t size = free_list_.EvictFreeListItems(page);
885 accounting_stats_.AllocateBytes(size);
886 ASSERT_EQ(AreaSize(), static_cast<int>(size));
Ben Murdochc7cc0282012-03-05 14:35:55 +0000887 } else {
888 DecreaseUnsweptFreeBytes(page);
Steve Blocka7e24c12009-10-30 11:49:00 +0000889 }
890
Ben Murdoch592a9fc2012-03-05 11:04:45 +0000891 if (Page::FromAllocationTop(allocation_info_.top) == page) {
892 allocation_info_.top = allocation_info_.limit = NULL;
Steve Blocka7e24c12009-10-30 11:49:00 +0000893 }
894
Ben Murdoch592a9fc2012-03-05 11:04:45 +0000895 page->Unlink();
896 if (page->IsFlagSet(MemoryChunk::CONTAINS_ONLY_DATA)) {
897 heap()->isolate()->memory_allocator()->Free(page);
898 } else {
899 heap()->QueueMemoryChunkForFree(page);
900 }
901
902 ASSERT(Capacity() > 0);
903 ASSERT(Capacity() % AreaSize() == 0);
904 accounting_stats_.ShrinkSpace(AreaSize());
Steve Blocka7e24c12009-10-30 11:49:00 +0000905}
906
907
Ben Murdoch592a9fc2012-03-05 11:04:45 +0000908void PagedSpace::ReleaseAllUnusedPages() {
909 PageIterator it(this);
910 while (it.has_next()) {
911 Page* page = it.next();
912 if (!page->WasSwept()) {
913 if (page->LiveBytes() == 0) ReleasePage(page);
914 } else {
915 HeapObject* obj = HeapObject::FromAddress(page->area_start());
916 if (obj->IsFreeSpace() &&
917 FreeSpace::cast(obj)->size() == AreaSize()) {
918 // Sometimes we allocate memory from free list but don't
919 // immediately initialize it (e.g. see PagedSpace::ReserveSpace
920 // called from Heap::ReserveSpace that can cause GC before
921 // reserved space is actually initialized).
922 // Thus we can't simply assume that obj represents a valid
923 // node still owned by a free list
924 // Instead we should verify that the page is fully covered
925 // by free list items.
926 FreeList::SizeStats sizes;
927 free_list_.CountFreeListItems(page, &sizes);
928 if (sizes.Total() == AreaSize()) {
929 ReleasePage(page);
930 }
931 }
932 }
Steve Blocka7e24c12009-10-30 11:49:00 +0000933 }
Ben Murdoch592a9fc2012-03-05 11:04:45 +0000934 heap()->FreeQueuedChunks();
Steve Blocka7e24c12009-10-30 11:49:00 +0000935}
936
937
938#ifdef DEBUG
939void PagedSpace::Print() { }
940#endif
941
942
943#ifdef DEBUG
Steve Blocka7e24c12009-10-30 11:49:00 +0000944void PagedSpace::Verify(ObjectVisitor* visitor) {
Ben Murdoch592a9fc2012-03-05 11:04:45 +0000945 // We can only iterate over the pages if they were swept precisely.
946 if (was_swept_conservatively_) return;
Steve Blocka7e24c12009-10-30 11:49:00 +0000947
Ben Murdoch592a9fc2012-03-05 11:04:45 +0000948 bool allocation_pointer_found_in_space =
949 (allocation_info_.top == allocation_info_.limit);
950 PageIterator page_iterator(this);
951 while (page_iterator.has_next()) {
952 Page* page = page_iterator.next();
953 ASSERT(page->owner() == this);
954 if (page == Page::FromAllocationTop(allocation_info_.top)) {
955 allocation_pointer_found_in_space = true;
Steve Blocka7e24c12009-10-30 11:49:00 +0000956 }
Ben Murdoch592a9fc2012-03-05 11:04:45 +0000957 ASSERT(page->WasSweptPrecisely());
958 HeapObjectIterator it(page, NULL);
959 Address end_of_previous_object = page->area_start();
960 Address top = page->area_end();
961 int black_size = 0;
962 for (HeapObject* object = it.Next(); object != NULL; object = it.Next()) {
963 ASSERT(end_of_previous_object <= object->address());
Steve Blocka7e24c12009-10-30 11:49:00 +0000964
Ben Murdoch592a9fc2012-03-05 11:04:45 +0000965 // The first word should be a map, and we expect all map pointers to
966 // be in map space.
967 Map* map = object->map();
968 ASSERT(map->IsMap());
969 ASSERT(heap()->map_space()->Contains(map));
970
971 // Perform space-specific object verification.
972 VerifyObject(object);
973
974 // The object itself should look OK.
975 object->Verify();
976
977 // All the interior pointers should be contained in the heap.
978 int size = object->Size();
979 object->IterateBody(map->instance_type(), size, visitor);
980 if (Marking::IsBlack(Marking::MarkBitFrom(object))) {
981 black_size += size;
982 }
983
984 ASSERT(object->address() + size <= top);
985 end_of_previous_object = object->address() + size;
986 }
987 ASSERT_LE(black_size, page->LiveBytes());
Steve Blocka7e24c12009-10-30 11:49:00 +0000988 }
Ben Murdoch592a9fc2012-03-05 11:04:45 +0000989 ASSERT(allocation_pointer_found_in_space);
Steve Blocka7e24c12009-10-30 11:49:00 +0000990}
991#endif
992
993
994// -----------------------------------------------------------------------------
995// NewSpace implementation
996
997
Ben Murdochc7cc0282012-03-05 14:35:55 +0000998bool NewSpace::SetUp(int reserved_semispace_capacity,
Ben Murdoch592a9fc2012-03-05 11:04:45 +0000999 int maximum_semispace_capacity) {
Ben Murdochc7cc0282012-03-05 14:35:55 +00001000 // Set up new space based on the preallocated memory block defined by
Steve Blocka7e24c12009-10-30 11:49:00 +00001001 // start and size. The provided space is divided into two semi-spaces.
1002 // To support fast containment testing in the new space, the size of
1003 // this chunk must be a power of two and it must be aligned to its size.
Steve Block44f0eee2011-05-26 01:26:41 +01001004 int initial_semispace_capacity = heap()->InitialSemiSpaceSize();
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001005
1006 size_t size = 2 * reserved_semispace_capacity;
1007 Address base =
1008 heap()->isolate()->memory_allocator()->ReserveAlignedMemory(
1009 size, size, &reservation_);
1010 if (base == NULL) return false;
1011
1012 chunk_base_ = base;
1013 chunk_size_ = static_cast<uintptr_t>(size);
1014 LOG(heap()->isolate(), NewEvent("InitialChunk", chunk_base_, chunk_size_));
Steve Blocka7e24c12009-10-30 11:49:00 +00001015
1016 ASSERT(initial_semispace_capacity <= maximum_semispace_capacity);
1017 ASSERT(IsPowerOf2(maximum_semispace_capacity));
1018
Ben Murdochc7cc0282012-03-05 14:35:55 +00001019 // Allocate and set up the histogram arrays if necessary.
Steve Blocka7e24c12009-10-30 11:49:00 +00001020 allocated_histogram_ = NewArray<HistogramInfo>(LAST_TYPE + 1);
1021 promoted_histogram_ = NewArray<HistogramInfo>(LAST_TYPE + 1);
1022
1023#define SET_NAME(name) allocated_histogram_[name].set_name(#name); \
1024 promoted_histogram_[name].set_name(#name);
1025 INSTANCE_TYPE_LIST(SET_NAME)
1026#undef SET_NAME
Steve Blocka7e24c12009-10-30 11:49:00 +00001027
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001028 ASSERT(reserved_semispace_capacity == heap()->ReservedSemiSpaceSize());
1029 ASSERT(static_cast<intptr_t>(chunk_size_) >=
1030 2 * heap()->ReservedSemiSpaceSize());
1031 ASSERT(IsAddressAligned(chunk_base_, 2 * reserved_semispace_capacity, 0));
Steve Blocka7e24c12009-10-30 11:49:00 +00001032
Ben Murdochc7cc0282012-03-05 14:35:55 +00001033 to_space_.SetUp(chunk_base_,
1034 initial_semispace_capacity,
1035 maximum_semispace_capacity);
1036 from_space_.SetUp(chunk_base_ + reserved_semispace_capacity,
1037 initial_semispace_capacity,
1038 maximum_semispace_capacity);
1039 if (!to_space_.Commit()) {
Steve Blocka7e24c12009-10-30 11:49:00 +00001040 return false;
1041 }
1042
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001043 start_ = chunk_base_;
1044 address_mask_ = ~(2 * reserved_semispace_capacity - 1);
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01001045 object_mask_ = address_mask_ | kHeapObjectTagMask;
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001046 object_expected_ = reinterpret_cast<uintptr_t>(start_) | kHeapObjectTag;
Steve Blocka7e24c12009-10-30 11:49:00 +00001047
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001048 ResetAllocationInfo();
Steve Blocka7e24c12009-10-30 11:49:00 +00001049
Steve Blocka7e24c12009-10-30 11:49:00 +00001050 return true;
1051}
1052
1053
1054void NewSpace::TearDown() {
Steve Blocka7e24c12009-10-30 11:49:00 +00001055 if (allocated_histogram_) {
1056 DeleteArray(allocated_histogram_);
1057 allocated_histogram_ = NULL;
1058 }
1059 if (promoted_histogram_) {
1060 DeleteArray(promoted_histogram_);
1061 promoted_histogram_ = NULL;
1062 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001063
1064 start_ = NULL;
1065 allocation_info_.top = NULL;
1066 allocation_info_.limit = NULL;
Steve Blocka7e24c12009-10-30 11:49:00 +00001067
1068 to_space_.TearDown();
1069 from_space_.TearDown();
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001070
1071 LOG(heap()->isolate(), DeleteEvent("InitialChunk", chunk_base_));
1072
1073 ASSERT(reservation_.IsReserved());
1074 heap()->isolate()->memory_allocator()->FreeMemory(&reservation_,
1075 NOT_EXECUTABLE);
1076 chunk_base_ = NULL;
1077 chunk_size_ = 0;
Steve Blocka7e24c12009-10-30 11:49:00 +00001078}
1079
1080
Steve Blocka7e24c12009-10-30 11:49:00 +00001081void NewSpace::Flip() {
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001082 SemiSpace::Swap(&from_space_, &to_space_);
Steve Blocka7e24c12009-10-30 11:49:00 +00001083}
1084
1085
1086void NewSpace::Grow() {
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001087 // Double the semispace size but only up to maximum capacity.
Steve Blocka7e24c12009-10-30 11:49:00 +00001088 ASSERT(Capacity() < MaximumCapacity());
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001089 int new_capacity = Min(MaximumCapacity(), 2 * static_cast<int>(Capacity()));
1090 if (to_space_.GrowTo(new_capacity)) {
1091 // Only grow from space if we managed to grow to-space.
1092 if (!from_space_.GrowTo(new_capacity)) {
1093 // If we managed to grow to-space but couldn't grow from-space,
1094 // attempt to shrink to-space.
Steve Blocka7e24c12009-10-30 11:49:00 +00001095 if (!to_space_.ShrinkTo(from_space_.Capacity())) {
1096 // We are in an inconsistent state because we could not
1097 // commit/uncommit memory from new space.
1098 V8::FatalProcessOutOfMemory("Failed to grow new space.");
1099 }
1100 }
1101 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001102 ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
1103}
1104
1105
1106void NewSpace::Shrink() {
Ben Murdochf87a2032010-10-22 12:50:53 +01001107 int new_capacity = Max(InitialCapacity(), 2 * SizeAsInt());
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001108 int rounded_new_capacity = RoundUp(new_capacity, Page::kPageSize);
Steve Blocka7e24c12009-10-30 11:49:00 +00001109 if (rounded_new_capacity < Capacity() &&
1110 to_space_.ShrinkTo(rounded_new_capacity)) {
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001111 // Only shrink from-space if we managed to shrink to-space.
1112 from_space_.Reset();
Steve Blocka7e24c12009-10-30 11:49:00 +00001113 if (!from_space_.ShrinkTo(rounded_new_capacity)) {
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001114 // If we managed to shrink to-space but couldn't shrink from
1115 // space, attempt to grow to-space again.
Steve Blocka7e24c12009-10-30 11:49:00 +00001116 if (!to_space_.GrowTo(from_space_.Capacity())) {
1117 // We are in an inconsistent state because we could not
1118 // commit/uncommit memory from new space.
1119 V8::FatalProcessOutOfMemory("Failed to shrink new space.");
1120 }
1121 }
1122 }
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001123 allocation_info_.limit = to_space_.page_high();
1124 ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
1125}
1126
1127
1128void NewSpace::UpdateAllocationInfo() {
1129 allocation_info_.top = to_space_.page_low();
1130 allocation_info_.limit = to_space_.page_high();
1131
1132 // Lower limit during incremental marking.
1133 if (heap()->incremental_marking()->IsMarking() &&
1134 inline_allocation_limit_step() != 0) {
1135 Address new_limit =
1136 allocation_info_.top + inline_allocation_limit_step();
1137 allocation_info_.limit = Min(new_limit, allocation_info_.limit);
1138 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001139 ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
1140}
1141
1142
1143void NewSpace::ResetAllocationInfo() {
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001144 to_space_.Reset();
1145 UpdateAllocationInfo();
1146 pages_used_ = 0;
1147 // Clear all mark-bits in the to-space.
1148 NewSpacePageIterator it(&to_space_);
1149 while (it.has_next()) {
1150 Bitmap::Clear(it.next());
1151 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001152}
1153
1154
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001155bool NewSpace::AddFreshPage() {
1156 Address top = allocation_info_.top;
1157 if (NewSpacePage::IsAtStart(top)) {
1158 // The current page is already empty. Don't try to make another.
1159
1160 // We should only get here if someone asks to allocate more
1161 // than what can be stored in a single page.
1162 // TODO(gc): Change the limit on new-space allocation to prevent this
1163 // from happening (all such allocations should go directly to LOSpace).
1164 return false;
1165 }
1166 if (!to_space_.AdvancePage()) {
1167 // Failed to get a new page in to-space.
1168 return false;
1169 }
1170
1171 // Clear remainder of current page.
1172 Address limit = NewSpacePage::FromLimit(top)->area_end();
1173 if (heap()->gc_state() == Heap::SCAVENGE) {
1174 heap()->promotion_queue()->SetNewLimit(limit);
1175 heap()->promotion_queue()->ActivateGuardIfOnTheSamePage();
1176 }
1177
1178 int remaining_in_page = static_cast<int>(limit - top);
1179 heap()->CreateFillerObjectAt(top, remaining_in_page);
1180 pages_used_++;
1181 UpdateAllocationInfo();
1182
1183 return true;
Steve Blocka7e24c12009-10-30 11:49:00 +00001184}
1185
1186
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001187MaybeObject* NewSpace::SlowAllocateRaw(int size_in_bytes) {
1188 Address old_top = allocation_info_.top;
1189 Address new_top = old_top + size_in_bytes;
1190 Address high = to_space_.page_high();
1191 if (allocation_info_.limit < high) {
1192 // Incremental marking has lowered the limit to get a
1193 // chance to do a step.
1194 allocation_info_.limit = Min(
1195 allocation_info_.limit + inline_allocation_limit_step_,
1196 high);
1197 int bytes_allocated = static_cast<int>(new_top - top_on_previous_step_);
1198 heap()->incremental_marking()->Step(bytes_allocated);
1199 top_on_previous_step_ = new_top;
1200 return AllocateRaw(size_in_bytes);
1201 } else if (AddFreshPage()) {
1202 // Switched to new page. Try allocating again.
1203 int bytes_allocated = static_cast<int>(old_top - top_on_previous_step_);
1204 heap()->incremental_marking()->Step(bytes_allocated);
1205 top_on_previous_step_ = to_space_.page_low();
1206 return AllocateRaw(size_in_bytes);
1207 } else {
1208 return Failure::RetryAfterGC();
1209 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001210}
1211
1212
1213#ifdef DEBUG
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001214// We do not use the SemiSpaceIterator because verification doesn't assume
Steve Blocka7e24c12009-10-30 11:49:00 +00001215// that it works (it depends on the invariants we are checking).
1216void NewSpace::Verify() {
1217 // The allocation pointer should be in the space or at the very end.
1218 ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
1219
1220 // There should be objects packed in from the low address up to the
1221 // allocation pointer.
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001222 Address current = to_space_.first_page()->area_start();
1223 CHECK_EQ(current, to_space_.space_start());
Steve Blocka7e24c12009-10-30 11:49:00 +00001224
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001225 while (current != top()) {
1226 if (!NewSpacePage::IsAtEnd(current)) {
1227 // The allocation pointer should not be in the middle of an object.
1228 CHECK(!NewSpacePage::FromLimit(current)->ContainsLimit(top()) ||
1229 current < top());
Steve Blocka7e24c12009-10-30 11:49:00 +00001230
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001231 HeapObject* object = HeapObject::FromAddress(current);
Steve Blocka7e24c12009-10-30 11:49:00 +00001232
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001233 // The first word should be a map, and we expect all map pointers to
1234 // be in map space.
1235 Map* map = object->map();
1236 CHECK(map->IsMap());
1237 CHECK(heap()->map_space()->Contains(map));
Steve Blocka7e24c12009-10-30 11:49:00 +00001238
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001239 // The object should not be code or a map.
1240 CHECK(!object->IsMap());
1241 CHECK(!object->IsCode());
Steve Blocka7e24c12009-10-30 11:49:00 +00001242
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001243 // The object itself should look OK.
1244 object->Verify();
1245
1246 // All the interior pointers should be contained in the heap.
1247 VerifyPointersVisitor visitor;
1248 int size = object->Size();
1249 object->IterateBody(map->instance_type(), size, &visitor);
1250
1251 current += size;
1252 } else {
1253 // At end of page, switch to next page.
1254 NewSpacePage* page = NewSpacePage::FromLimit(current)->next_page();
1255 // Next page should be valid.
1256 CHECK(!page->is_anchor());
1257 current = page->area_start();
1258 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001259 }
1260
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001261 // Check semi-spaces.
1262 ASSERT_EQ(from_space_.id(), kFromSpace);
1263 ASSERT_EQ(to_space_.id(), kToSpace);
1264 from_space_.Verify();
1265 to_space_.Verify();
Steve Blocka7e24c12009-10-30 11:49:00 +00001266}
1267#endif
1268
Steve Blocka7e24c12009-10-30 11:49:00 +00001269// -----------------------------------------------------------------------------
1270// SemiSpace implementation
1271
Ben Murdochc7cc0282012-03-05 14:35:55 +00001272void SemiSpace::SetUp(Address start,
Steve Blocka7e24c12009-10-30 11:49:00 +00001273 int initial_capacity,
1274 int maximum_capacity) {
1275 // Creates a space in the young generation. The constructor does not
1276 // allocate memory from the OS. A SemiSpace is given a contiguous chunk of
1277 // memory of size 'capacity' when set up, and does not grow or shrink
1278 // otherwise. In the mark-compact collector, the memory region of the from
1279 // space is used as the marking stack. It requires contiguous memory
1280 // addresses.
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001281 ASSERT(maximum_capacity >= Page::kPageSize);
1282 initial_capacity_ = RoundDown(initial_capacity, Page::kPageSize);
Steve Blocka7e24c12009-10-30 11:49:00 +00001283 capacity_ = initial_capacity;
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001284 maximum_capacity_ = RoundDown(maximum_capacity, Page::kPageSize);
Steve Blocka7e24c12009-10-30 11:49:00 +00001285 committed_ = false;
Steve Blocka7e24c12009-10-30 11:49:00 +00001286 start_ = start;
1287 address_mask_ = ~(maximum_capacity - 1);
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01001288 object_mask_ = address_mask_ | kHeapObjectTagMask;
Steve Blocka7e24c12009-10-30 11:49:00 +00001289 object_expected_ = reinterpret_cast<uintptr_t>(start) | kHeapObjectTag;
1290 age_mark_ = start_;
Steve Blocka7e24c12009-10-30 11:49:00 +00001291}
1292
1293
1294void SemiSpace::TearDown() {
1295 start_ = NULL;
1296 capacity_ = 0;
1297}
1298
1299
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001300bool SemiSpace::Commit() {
1301 ASSERT(!is_committed());
1302 int pages = capacity_ / Page::kPageSize;
1303 Address end = start_ + maximum_capacity_;
1304 Address start = end - pages * Page::kPageSize;
1305 if (!heap()->isolate()->memory_allocator()->CommitBlock(start,
1306 capacity_,
1307 executable())) {
Steve Blocka7e24c12009-10-30 11:49:00 +00001308 return false;
1309 }
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001310
1311 NewSpacePage* page = anchor();
1312 for (int i = 1; i <= pages; i++) {
1313 NewSpacePage* new_page =
1314 NewSpacePage::Initialize(heap(), end - i * Page::kPageSize, this);
1315 new_page->InsertAfter(page);
1316 page = new_page;
1317 }
1318
1319 committed_ = true;
1320 Reset();
1321 return true;
1322}
1323
1324
1325bool SemiSpace::Uncommit() {
1326 ASSERT(is_committed());
1327 Address start = start_ + maximum_capacity_ - capacity_;
1328 if (!heap()->isolate()->memory_allocator()->UncommitBlock(start, capacity_)) {
1329 return false;
1330 }
1331 anchor()->set_next_page(anchor());
1332 anchor()->set_prev_page(anchor());
1333
1334 committed_ = false;
Steve Blocka7e24c12009-10-30 11:49:00 +00001335 return true;
1336}
1337
1338
1339bool SemiSpace::GrowTo(int new_capacity) {
Ben Murdochc7cc0282012-03-05 14:35:55 +00001340 if (!is_committed()) {
1341 if (!Commit()) return false;
1342 }
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001343 ASSERT((new_capacity & Page::kPageAlignmentMask) == 0);
Steve Blocka7e24c12009-10-30 11:49:00 +00001344 ASSERT(new_capacity <= maximum_capacity_);
1345 ASSERT(new_capacity > capacity_);
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001346 int pages_before = capacity_ / Page::kPageSize;
1347 int pages_after = new_capacity / Page::kPageSize;
1348
1349 Address end = start_ + maximum_capacity_;
1350 Address start = end - new_capacity;
Steve Blocka7e24c12009-10-30 11:49:00 +00001351 size_t delta = new_capacity - capacity_;
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001352
Steve Blocka7e24c12009-10-30 11:49:00 +00001353 ASSERT(IsAligned(delta, OS::AllocateAlignment()));
Steve Block44f0eee2011-05-26 01:26:41 +01001354 if (!heap()->isolate()->memory_allocator()->CommitBlock(
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001355 start, delta, executable())) {
Steve Blocka7e24c12009-10-30 11:49:00 +00001356 return false;
1357 }
1358 capacity_ = new_capacity;
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001359 NewSpacePage* last_page = anchor()->prev_page();
1360 ASSERT(last_page != anchor());
1361 for (int i = pages_before + 1; i <= pages_after; i++) {
1362 Address page_address = end - i * Page::kPageSize;
1363 NewSpacePage* new_page = NewSpacePage::Initialize(heap(),
1364 page_address,
1365 this);
1366 new_page->InsertAfter(last_page);
1367 Bitmap::Clear(new_page);
1368 // Duplicate the flags that was set on the old page.
1369 new_page->SetFlags(last_page->GetFlags(),
1370 NewSpacePage::kCopyOnFlipFlagsMask);
1371 last_page = new_page;
1372 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001373 return true;
1374}
1375
1376
1377bool SemiSpace::ShrinkTo(int new_capacity) {
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001378 ASSERT((new_capacity & Page::kPageAlignmentMask) == 0);
Steve Blocka7e24c12009-10-30 11:49:00 +00001379 ASSERT(new_capacity >= initial_capacity_);
1380 ASSERT(new_capacity < capacity_);
Ben Murdochc7cc0282012-03-05 14:35:55 +00001381 if (is_committed()) {
1382 // Semispaces grow backwards from the end of their allocated capacity,
1383 // so we find the before and after start addresses relative to the
1384 // end of the space.
1385 Address space_end = start_ + maximum_capacity_;
1386 Address old_start = space_end - capacity_;
1387 size_t delta = capacity_ - new_capacity;
1388 ASSERT(IsAligned(delta, OS::AllocateAlignment()));
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001389
Ben Murdochc7cc0282012-03-05 14:35:55 +00001390 MemoryAllocator* allocator = heap()->isolate()->memory_allocator();
1391 if (!allocator->UncommitBlock(old_start, delta)) {
1392 return false;
1393 }
1394
1395 int pages_after = new_capacity / Page::kPageSize;
1396 NewSpacePage* new_last_page =
1397 NewSpacePage::FromAddress(space_end - pages_after * Page::kPageSize);
1398 new_last_page->set_next_page(anchor());
1399 anchor()->set_prev_page(new_last_page);
1400 ASSERT((current_page_ <= first_page()) && (current_page_ >= new_last_page));
1401 }
1402
1403 capacity_ = new_capacity;
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001404
Steve Blocka7e24c12009-10-30 11:49:00 +00001405 return true;
1406}
1407
1408
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001409void SemiSpace::FlipPages(intptr_t flags, intptr_t mask) {
1410 anchor_.set_owner(this);
1411 // Fixup back-pointers to anchor. Address of anchor changes
1412 // when we swap.
1413 anchor_.prev_page()->set_next_page(&anchor_);
1414 anchor_.next_page()->set_prev_page(&anchor_);
1415
1416 bool becomes_to_space = (id_ == kFromSpace);
1417 id_ = becomes_to_space ? kToSpace : kFromSpace;
1418 NewSpacePage* page = anchor_.next_page();
1419 while (page != &anchor_) {
1420 page->set_owner(this);
1421 page->SetFlags(flags, mask);
1422 if (becomes_to_space) {
1423 page->ClearFlag(MemoryChunk::IN_FROM_SPACE);
1424 page->SetFlag(MemoryChunk::IN_TO_SPACE);
1425 page->ClearFlag(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK);
1426 page->ResetLiveBytes();
1427 } else {
1428 page->SetFlag(MemoryChunk::IN_FROM_SPACE);
1429 page->ClearFlag(MemoryChunk::IN_TO_SPACE);
1430 }
1431 ASSERT(page->IsFlagSet(MemoryChunk::SCAN_ON_SCAVENGE));
1432 ASSERT(page->IsFlagSet(MemoryChunk::IN_TO_SPACE) ||
1433 page->IsFlagSet(MemoryChunk::IN_FROM_SPACE));
1434 page = page->next_page();
1435 }
1436}
1437
1438
1439void SemiSpace::Reset() {
1440 ASSERT(anchor_.next_page() != &anchor_);
1441 current_page_ = anchor_.next_page();
1442}
1443
1444
1445void SemiSpace::Swap(SemiSpace* from, SemiSpace* to) {
1446 // We won't be swapping semispaces without data in them.
1447 ASSERT(from->anchor_.next_page() != &from->anchor_);
1448 ASSERT(to->anchor_.next_page() != &to->anchor_);
1449
1450 // Swap bits.
1451 SemiSpace tmp = *from;
1452 *from = *to;
1453 *to = tmp;
1454
1455 // Fixup back-pointers to the page list anchor now that its address
1456 // has changed.
1457 // Swap to/from-space bits on pages.
1458 // Copy GC flags from old active space (from-space) to new (to-space).
1459 intptr_t flags = from->current_page()->GetFlags();
1460 to->FlipPages(flags, NewSpacePage::kCopyOnFlipFlagsMask);
1461
1462 from->FlipPages(0, 0);
1463}
1464
1465
1466void SemiSpace::set_age_mark(Address mark) {
1467 ASSERT(NewSpacePage::FromLimit(mark)->semi_space() == this);
1468 age_mark_ = mark;
1469 // Mark all pages up to the one containing mark.
1470 NewSpacePageIterator it(space_start(), mark);
1471 while (it.has_next()) {
1472 it.next()->SetFlag(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK);
1473 }
1474}
1475
1476
Steve Blocka7e24c12009-10-30 11:49:00 +00001477#ifdef DEBUG
1478void SemiSpace::Print() { }
1479
1480
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001481void SemiSpace::Verify() {
1482 bool is_from_space = (id_ == kFromSpace);
1483 NewSpacePage* page = anchor_.next_page();
1484 CHECK(anchor_.semi_space() == this);
1485 while (page != &anchor_) {
1486 CHECK(page->semi_space() == this);
1487 CHECK(page->InNewSpace());
1488 CHECK(page->IsFlagSet(is_from_space ? MemoryChunk::IN_FROM_SPACE
1489 : MemoryChunk::IN_TO_SPACE));
1490 CHECK(!page->IsFlagSet(is_from_space ? MemoryChunk::IN_TO_SPACE
1491 : MemoryChunk::IN_FROM_SPACE));
1492 CHECK(page->IsFlagSet(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING));
1493 if (!is_from_space) {
1494 // The pointers-from-here-are-interesting flag isn't updated dynamically
1495 // on from-space pages, so it might be out of sync with the marking state.
1496 if (page->heap()->incremental_marking()->IsMarking()) {
1497 CHECK(page->IsFlagSet(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING));
1498 } else {
1499 CHECK(!page->IsFlagSet(
1500 MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING));
1501 }
1502 // TODO(gc): Check that the live_bytes_count_ field matches the
1503 // black marking on the page (if we make it match in new-space).
1504 }
1505 CHECK(page->IsFlagSet(MemoryChunk::SCAN_ON_SCAVENGE));
1506 CHECK(page->prev_page()->next_page() == page);
1507 page = page->next_page();
1508 }
1509}
1510
1511
1512void SemiSpace::AssertValidRange(Address start, Address end) {
1513 // Addresses belong to same semi-space
1514 NewSpacePage* page = NewSpacePage::FromLimit(start);
1515 NewSpacePage* end_page = NewSpacePage::FromLimit(end);
1516 SemiSpace* space = page->semi_space();
1517 CHECK_EQ(space, end_page->semi_space());
1518 // Start address is before end address, either on same page,
1519 // or end address is on a later page in the linked list of
1520 // semi-space pages.
1521 if (page == end_page) {
1522 CHECK(start <= end);
1523 } else {
1524 while (page != end_page) {
1525 page = page->next_page();
1526 CHECK_NE(page, space->anchor());
1527 }
1528 }
1529}
Steve Blocka7e24c12009-10-30 11:49:00 +00001530#endif
1531
1532
1533// -----------------------------------------------------------------------------
1534// SemiSpaceIterator implementation.
1535SemiSpaceIterator::SemiSpaceIterator(NewSpace* space) {
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001536 Initialize(space->bottom(), space->top(), NULL);
Steve Blocka7e24c12009-10-30 11:49:00 +00001537}
1538
1539
1540SemiSpaceIterator::SemiSpaceIterator(NewSpace* space,
1541 HeapObjectCallback size_func) {
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001542 Initialize(space->bottom(), space->top(), size_func);
Steve Blocka7e24c12009-10-30 11:49:00 +00001543}
1544
1545
1546SemiSpaceIterator::SemiSpaceIterator(NewSpace* space, Address start) {
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001547 Initialize(start, space->top(), NULL);
Steve Blocka7e24c12009-10-30 11:49:00 +00001548}
1549
1550
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001551SemiSpaceIterator::SemiSpaceIterator(Address from, Address to) {
1552 Initialize(from, to, NULL);
1553}
1554
1555
1556void SemiSpaceIterator::Initialize(Address start,
Steve Blocka7e24c12009-10-30 11:49:00 +00001557 Address end,
1558 HeapObjectCallback size_func) {
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001559 SemiSpace::AssertValidRange(start, end);
Steve Blocka7e24c12009-10-30 11:49:00 +00001560 current_ = start;
1561 limit_ = end;
1562 size_func_ = size_func;
1563}
1564
1565
1566#ifdef DEBUG
Steve Blocka7e24c12009-10-30 11:49:00 +00001567// heap_histograms is shared, always clear it before using it.
1568static void ClearHistograms() {
Steve Block44f0eee2011-05-26 01:26:41 +01001569 Isolate* isolate = Isolate::Current();
Steve Blocka7e24c12009-10-30 11:49:00 +00001570 // We reset the name each time, though it hasn't changed.
Steve Block44f0eee2011-05-26 01:26:41 +01001571#define DEF_TYPE_NAME(name) isolate->heap_histograms()[name].set_name(#name);
Steve Blocka7e24c12009-10-30 11:49:00 +00001572 INSTANCE_TYPE_LIST(DEF_TYPE_NAME)
1573#undef DEF_TYPE_NAME
1574
Steve Block44f0eee2011-05-26 01:26:41 +01001575#define CLEAR_HISTOGRAM(name) isolate->heap_histograms()[name].clear();
Steve Blocka7e24c12009-10-30 11:49:00 +00001576 INSTANCE_TYPE_LIST(CLEAR_HISTOGRAM)
1577#undef CLEAR_HISTOGRAM
1578
Steve Block44f0eee2011-05-26 01:26:41 +01001579 isolate->js_spill_information()->Clear();
Steve Blocka7e24c12009-10-30 11:49:00 +00001580}
1581
1582
Steve Blocka7e24c12009-10-30 11:49:00 +00001583static void ClearCodeKindStatistics() {
Steve Block44f0eee2011-05-26 01:26:41 +01001584 Isolate* isolate = Isolate::Current();
Steve Blocka7e24c12009-10-30 11:49:00 +00001585 for (int i = 0; i < Code::NUMBER_OF_KINDS; i++) {
Steve Block44f0eee2011-05-26 01:26:41 +01001586 isolate->code_kind_statistics()[i] = 0;
Steve Blocka7e24c12009-10-30 11:49:00 +00001587 }
1588}
1589
1590
1591static void ReportCodeKindStatistics() {
Steve Block44f0eee2011-05-26 01:26:41 +01001592 Isolate* isolate = Isolate::Current();
Steve Block6ded16b2010-05-10 14:33:55 +01001593 const char* table[Code::NUMBER_OF_KINDS] = { NULL };
Steve Blocka7e24c12009-10-30 11:49:00 +00001594
1595#define CASE(name) \
1596 case Code::name: table[Code::name] = #name; \
1597 break
1598
1599 for (int i = 0; i < Code::NUMBER_OF_KINDS; i++) {
1600 switch (static_cast<Code::Kind>(i)) {
1601 CASE(FUNCTION);
Ben Murdochb0fe1622011-05-05 13:52:32 +01001602 CASE(OPTIMIZED_FUNCTION);
Steve Blocka7e24c12009-10-30 11:49:00 +00001603 CASE(STUB);
1604 CASE(BUILTIN);
1605 CASE(LOAD_IC);
1606 CASE(KEYED_LOAD_IC);
1607 CASE(STORE_IC);
1608 CASE(KEYED_STORE_IC);
1609 CASE(CALL_IC);
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01001610 CASE(KEYED_CALL_IC);
Ben Murdoch257744e2011-11-30 15:57:28 +00001611 CASE(UNARY_OP_IC);
1612 CASE(BINARY_OP_IC);
Ben Murdochb0fe1622011-05-05 13:52:32 +01001613 CASE(COMPARE_IC);
Ben Murdoch69a99ed2011-11-30 16:03:39 +00001614 CASE(TO_BOOLEAN_IC);
Steve Blocka7e24c12009-10-30 11:49:00 +00001615 }
1616 }
1617
1618#undef CASE
1619
1620 PrintF("\n Code kind histograms: \n");
1621 for (int i = 0; i < Code::NUMBER_OF_KINDS; i++) {
Steve Block44f0eee2011-05-26 01:26:41 +01001622 if (isolate->code_kind_statistics()[i] > 0) {
1623 PrintF(" %-20s: %10d bytes\n", table[i],
1624 isolate->code_kind_statistics()[i]);
Steve Blocka7e24c12009-10-30 11:49:00 +00001625 }
1626 }
1627 PrintF("\n");
1628}
1629
1630
1631static int CollectHistogramInfo(HeapObject* obj) {
Steve Block44f0eee2011-05-26 01:26:41 +01001632 Isolate* isolate = Isolate::Current();
Steve Blocka7e24c12009-10-30 11:49:00 +00001633 InstanceType type = obj->map()->instance_type();
1634 ASSERT(0 <= type && type <= LAST_TYPE);
Steve Block44f0eee2011-05-26 01:26:41 +01001635 ASSERT(isolate->heap_histograms()[type].name() != NULL);
1636 isolate->heap_histograms()[type].increment_number(1);
1637 isolate->heap_histograms()[type].increment_bytes(obj->Size());
Steve Blocka7e24c12009-10-30 11:49:00 +00001638
1639 if (FLAG_collect_heap_spill_statistics && obj->IsJSObject()) {
Steve Block44f0eee2011-05-26 01:26:41 +01001640 JSObject::cast(obj)->IncrementSpillStatistics(
1641 isolate->js_spill_information());
Steve Blocka7e24c12009-10-30 11:49:00 +00001642 }
1643
1644 return obj->Size();
1645}
1646
1647
1648static void ReportHistogram(bool print_spill) {
Steve Block44f0eee2011-05-26 01:26:41 +01001649 Isolate* isolate = Isolate::Current();
Steve Blocka7e24c12009-10-30 11:49:00 +00001650 PrintF("\n Object Histogram:\n");
1651 for (int i = 0; i <= LAST_TYPE; i++) {
Steve Block44f0eee2011-05-26 01:26:41 +01001652 if (isolate->heap_histograms()[i].number() > 0) {
Steve Block6ded16b2010-05-10 14:33:55 +01001653 PrintF(" %-34s%10d (%10d bytes)\n",
Steve Block44f0eee2011-05-26 01:26:41 +01001654 isolate->heap_histograms()[i].name(),
1655 isolate->heap_histograms()[i].number(),
1656 isolate->heap_histograms()[i].bytes());
Steve Blocka7e24c12009-10-30 11:49:00 +00001657 }
1658 }
1659 PrintF("\n");
1660
1661 // Summarize string types.
1662 int string_number = 0;
1663 int string_bytes = 0;
1664#define INCREMENT(type, size, name, camel_name) \
Steve Block44f0eee2011-05-26 01:26:41 +01001665 string_number += isolate->heap_histograms()[type].number(); \
1666 string_bytes += isolate->heap_histograms()[type].bytes();
Steve Blocka7e24c12009-10-30 11:49:00 +00001667 STRING_TYPE_LIST(INCREMENT)
1668#undef INCREMENT
1669 if (string_number > 0) {
Steve Block6ded16b2010-05-10 14:33:55 +01001670 PrintF(" %-34s%10d (%10d bytes)\n\n", "STRING_TYPE", string_number,
Steve Blocka7e24c12009-10-30 11:49:00 +00001671 string_bytes);
1672 }
1673
1674 if (FLAG_collect_heap_spill_statistics && print_spill) {
Steve Block44f0eee2011-05-26 01:26:41 +01001675 isolate->js_spill_information()->Print();
Steve Blocka7e24c12009-10-30 11:49:00 +00001676 }
1677}
1678#endif // DEBUG
1679
1680
1681// Support for statistics gathering for --heap-stats and --log-gc.
Steve Blocka7e24c12009-10-30 11:49:00 +00001682void NewSpace::ClearHistograms() {
1683 for (int i = 0; i <= LAST_TYPE; i++) {
1684 allocated_histogram_[i].clear();
1685 promoted_histogram_[i].clear();
1686 }
1687}
1688
1689// Because the copying collector does not touch garbage objects, we iterate
1690// the new space before a collection to get a histogram of allocated objects.
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00001691// This only happens when --log-gc flag is set.
Steve Blocka7e24c12009-10-30 11:49:00 +00001692void NewSpace::CollectStatistics() {
1693 ClearHistograms();
1694 SemiSpaceIterator it(this);
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001695 for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next())
Leon Clarked91b9f72010-01-27 17:25:45 +00001696 RecordAllocation(obj);
Steve Blocka7e24c12009-10-30 11:49:00 +00001697}
1698
1699
Steve Block44f0eee2011-05-26 01:26:41 +01001700static void DoReportStatistics(Isolate* isolate,
1701 HistogramInfo* info, const char* description) {
1702 LOG(isolate, HeapSampleBeginEvent("NewSpace", description));
Steve Blocka7e24c12009-10-30 11:49:00 +00001703 // Lump all the string types together.
1704 int string_number = 0;
1705 int string_bytes = 0;
1706#define INCREMENT(type, size, name, camel_name) \
1707 string_number += info[type].number(); \
1708 string_bytes += info[type].bytes();
1709 STRING_TYPE_LIST(INCREMENT)
1710#undef INCREMENT
1711 if (string_number > 0) {
Steve Block44f0eee2011-05-26 01:26:41 +01001712 LOG(isolate,
1713 HeapSampleItemEvent("STRING_TYPE", string_number, string_bytes));
Steve Blocka7e24c12009-10-30 11:49:00 +00001714 }
1715
1716 // Then do the other types.
1717 for (int i = FIRST_NONSTRING_TYPE; i <= LAST_TYPE; ++i) {
1718 if (info[i].number() > 0) {
Steve Block44f0eee2011-05-26 01:26:41 +01001719 LOG(isolate,
1720 HeapSampleItemEvent(info[i].name(), info[i].number(),
Steve Blocka7e24c12009-10-30 11:49:00 +00001721 info[i].bytes()));
1722 }
1723 }
Steve Block44f0eee2011-05-26 01:26:41 +01001724 LOG(isolate, HeapSampleEndEvent("NewSpace", description));
Steve Blocka7e24c12009-10-30 11:49:00 +00001725}
Steve Blocka7e24c12009-10-30 11:49:00 +00001726
1727
1728void NewSpace::ReportStatistics() {
1729#ifdef DEBUG
1730 if (FLAG_heap_stats) {
1731 float pct = static_cast<float>(Available()) / Capacity();
Ben Murdochf87a2032010-10-22 12:50:53 +01001732 PrintF(" capacity: %" V8_PTR_PREFIX "d"
1733 ", available: %" V8_PTR_PREFIX "d, %%%d\n",
Steve Blocka7e24c12009-10-30 11:49:00 +00001734 Capacity(), Available(), static_cast<int>(pct*100));
1735 PrintF("\n Object Histogram:\n");
1736 for (int i = 0; i <= LAST_TYPE; i++) {
1737 if (allocated_histogram_[i].number() > 0) {
Steve Block6ded16b2010-05-10 14:33:55 +01001738 PrintF(" %-34s%10d (%10d bytes)\n",
Steve Blocka7e24c12009-10-30 11:49:00 +00001739 allocated_histogram_[i].name(),
1740 allocated_histogram_[i].number(),
1741 allocated_histogram_[i].bytes());
1742 }
1743 }
1744 PrintF("\n");
1745 }
1746#endif // DEBUG
1747
Steve Blocka7e24c12009-10-30 11:49:00 +00001748 if (FLAG_log_gc) {
Steve Block44f0eee2011-05-26 01:26:41 +01001749 Isolate* isolate = ISOLATE;
1750 DoReportStatistics(isolate, allocated_histogram_, "allocated");
1751 DoReportStatistics(isolate, promoted_histogram_, "promoted");
Steve Blocka7e24c12009-10-30 11:49:00 +00001752 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001753}
1754
1755
1756void NewSpace::RecordAllocation(HeapObject* obj) {
1757 InstanceType type = obj->map()->instance_type();
1758 ASSERT(0 <= type && type <= LAST_TYPE);
1759 allocated_histogram_[type].increment_number(1);
1760 allocated_histogram_[type].increment_bytes(obj->Size());
1761}
1762
1763
1764void NewSpace::RecordPromotion(HeapObject* obj) {
1765 InstanceType type = obj->map()->instance_type();
1766 ASSERT(0 <= type && type <= LAST_TYPE);
1767 promoted_histogram_[type].increment_number(1);
1768 promoted_histogram_[type].increment_bytes(obj->Size());
1769}
Steve Blocka7e24c12009-10-30 11:49:00 +00001770
Steve Blocka7e24c12009-10-30 11:49:00 +00001771// -----------------------------------------------------------------------------
1772// Free lists for old object spaces implementation
1773
Steve Block44f0eee2011-05-26 01:26:41 +01001774void FreeListNode::set_size(Heap* heap, int size_in_bytes) {
Steve Blocka7e24c12009-10-30 11:49:00 +00001775 ASSERT(size_in_bytes > 0);
1776 ASSERT(IsAligned(size_in_bytes, kPointerSize));
1777
1778 // We write a map and possibly size information to the block. If the block
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001779 // is big enough to be a FreeSpace with at least one extra word (the next
1780 // pointer), we set its map to be the free space map and its size to an
Steve Blocka7e24c12009-10-30 11:49:00 +00001781 // appropriate array length for the desired size from HeapObject::Size().
1782 // If the block is too small (eg, one or two words), to hold both a size
1783 // field and a next pointer, we give it a filler map that gives it the
1784 // correct size.
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001785 if (size_in_bytes > FreeSpace::kHeaderSize) {
Ben Murdochc7cc0282012-03-05 14:35:55 +00001786 set_map_no_write_barrier(heap->raw_unchecked_free_space_map());
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001787 // Can't use FreeSpace::cast because it fails during deserialization.
1788 FreeSpace* this_as_free_space = reinterpret_cast<FreeSpace*>(this);
1789 this_as_free_space->set_size(size_in_bytes);
Steve Blocka7e24c12009-10-30 11:49:00 +00001790 } else if (size_in_bytes == kPointerSize) {
Ben Murdochc7cc0282012-03-05 14:35:55 +00001791 set_map_no_write_barrier(heap->raw_unchecked_one_pointer_filler_map());
Steve Blocka7e24c12009-10-30 11:49:00 +00001792 } else if (size_in_bytes == 2 * kPointerSize) {
Ben Murdochc7cc0282012-03-05 14:35:55 +00001793 set_map_no_write_barrier(heap->raw_unchecked_two_pointer_filler_map());
Steve Blocka7e24c12009-10-30 11:49:00 +00001794 } else {
1795 UNREACHABLE();
1796 }
Steve Blockd0582a62009-12-15 09:54:21 +00001797 // We would like to ASSERT(Size() == size_in_bytes) but this would fail during
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001798 // deserialization because the free space map is not done yet.
Steve Blocka7e24c12009-10-30 11:49:00 +00001799}
1800
1801
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001802FreeListNode* FreeListNode::next() {
Steve Block3ce2e202009-11-05 08:53:23 +00001803 ASSERT(IsFreeListNode(this));
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001804 if (map() == HEAP->raw_unchecked_free_space_map()) {
1805 ASSERT(map() == NULL || Size() >= kNextOffset + kPointerSize);
1806 return reinterpret_cast<FreeListNode*>(
1807 Memory::Address_at(address() + kNextOffset));
Steve Blocka7e24c12009-10-30 11:49:00 +00001808 } else {
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001809 return reinterpret_cast<FreeListNode*>(
1810 Memory::Address_at(address() + kPointerSize));
Steve Blocka7e24c12009-10-30 11:49:00 +00001811 }
1812}
1813
1814
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001815FreeListNode** FreeListNode::next_address() {
Steve Block3ce2e202009-11-05 08:53:23 +00001816 ASSERT(IsFreeListNode(this));
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001817 if (map() == HEAP->raw_unchecked_free_space_map()) {
Steve Blocka7e24c12009-10-30 11:49:00 +00001818 ASSERT(Size() >= kNextOffset + kPointerSize);
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001819 return reinterpret_cast<FreeListNode**>(address() + kNextOffset);
Steve Blocka7e24c12009-10-30 11:49:00 +00001820 } else {
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001821 return reinterpret_cast<FreeListNode**>(address() + kPointerSize);
Steve Blocka7e24c12009-10-30 11:49:00 +00001822 }
1823}
1824
1825
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001826void FreeListNode::set_next(FreeListNode* next) {
1827 ASSERT(IsFreeListNode(this));
1828 // While we are booting the VM the free space map will actually be null. So
1829 // we have to make sure that we don't try to use it for anything at that
1830 // stage.
1831 if (map() == HEAP->raw_unchecked_free_space_map()) {
1832 ASSERT(map() == NULL || Size() >= kNextOffset + kPointerSize);
1833 Memory::Address_at(address() + kNextOffset) =
1834 reinterpret_cast<Address>(next);
1835 } else {
1836 Memory::Address_at(address() + kPointerSize) =
1837 reinterpret_cast<Address>(next);
1838 }
1839}
1840
1841
1842FreeList::FreeList(PagedSpace* owner)
1843 : owner_(owner), heap_(owner->heap()) {
Steve Blocka7e24c12009-10-30 11:49:00 +00001844 Reset();
1845}
1846
1847
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001848void FreeList::Reset() {
Steve Blocka7e24c12009-10-30 11:49:00 +00001849 available_ = 0;
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001850 small_list_ = NULL;
1851 medium_list_ = NULL;
1852 large_list_ = NULL;
1853 huge_list_ = NULL;
Steve Blocka7e24c12009-10-30 11:49:00 +00001854}
1855
1856
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001857int FreeList::Free(Address start, int size_in_bytes) {
1858 if (size_in_bytes == 0) return 0;
Steve Blocka7e24c12009-10-30 11:49:00 +00001859 FreeListNode* node = FreeListNode::FromAddress(start);
Steve Block44f0eee2011-05-26 01:26:41 +01001860 node->set_size(heap_, size_in_bytes);
Steve Blocka7e24c12009-10-30 11:49:00 +00001861
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001862 // Early return to drop too-small blocks on the floor.
1863 if (size_in_bytes < kSmallListMin) return size_in_bytes;
Steve Blocka7e24c12009-10-30 11:49:00 +00001864
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001865 // Insert other blocks at the head of a free list of the appropriate
1866 // magnitude.
1867 if (size_in_bytes <= kSmallListMax) {
1868 node->set_next(small_list_);
1869 small_list_ = node;
1870 } else if (size_in_bytes <= kMediumListMax) {
1871 node->set_next(medium_list_);
1872 medium_list_ = node;
1873 } else if (size_in_bytes <= kLargeListMax) {
1874 node->set_next(large_list_);
1875 large_list_ = node;
1876 } else {
1877 node->set_next(huge_list_);
1878 huge_list_ = node;
Steve Blocka7e24c12009-10-30 11:49:00 +00001879 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001880 available_ += size_in_bytes;
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001881 ASSERT(IsVeryLong() || available_ == SumFreeLists());
Steve Blocka7e24c12009-10-30 11:49:00 +00001882 return 0;
1883}
1884
1885
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001886FreeListNode* FreeList::PickNodeFromList(FreeListNode** list, int* node_size) {
1887 FreeListNode* node = *list;
Steve Blocka7e24c12009-10-30 11:49:00 +00001888
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001889 if (node == NULL) return NULL;
1890
1891 while (node != NULL &&
1892 Page::FromAddress(node->address())->IsEvacuationCandidate()) {
1893 available_ -= node->Size();
1894 node = node->next();
Steve Blocka7e24c12009-10-30 11:49:00 +00001895 }
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001896
1897 if (node != NULL) {
1898 *node_size = node->Size();
1899 *list = node->next();
Steve Blocka7e24c12009-10-30 11:49:00 +00001900 } else {
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001901 *list = NULL;
Steve Blocka7e24c12009-10-30 11:49:00 +00001902 }
1903
Steve Blocka7e24c12009-10-30 11:49:00 +00001904 return node;
1905}
1906
1907
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001908FreeListNode* FreeList::FindNodeFor(int size_in_bytes, int* node_size) {
1909 FreeListNode* node = NULL;
1910
1911 if (size_in_bytes <= kSmallAllocationMax) {
1912 node = PickNodeFromList(&small_list_, node_size);
1913 if (node != NULL) return node;
1914 }
1915
1916 if (size_in_bytes <= kMediumAllocationMax) {
1917 node = PickNodeFromList(&medium_list_, node_size);
1918 if (node != NULL) return node;
1919 }
1920
1921 if (size_in_bytes <= kLargeAllocationMax) {
1922 node = PickNodeFromList(&large_list_, node_size);
1923 if (node != NULL) return node;
1924 }
1925
1926 for (FreeListNode** cur = &huge_list_;
1927 *cur != NULL;
1928 cur = (*cur)->next_address()) {
1929 FreeListNode* cur_node = *cur;
1930 while (cur_node != NULL &&
1931 Page::FromAddress(cur_node->address())->IsEvacuationCandidate()) {
1932 available_ -= reinterpret_cast<FreeSpace*>(cur_node)->Size();
1933 cur_node = cur_node->next();
1934 }
1935
1936 *cur = cur_node;
1937 if (cur_node == NULL) break;
1938
1939 ASSERT((*cur)->map() == HEAP->raw_unchecked_free_space_map());
1940 FreeSpace* cur_as_free_space = reinterpret_cast<FreeSpace*>(*cur);
1941 int size = cur_as_free_space->Size();
1942 if (size >= size_in_bytes) {
1943 // Large enough node found. Unlink it from the list.
1944 node = *cur;
1945 *node_size = size;
1946 *cur = node->next();
1947 break;
1948 }
1949 }
1950
1951 return node;
1952}
1953
1954
1955// Allocation on the old space free list. If it succeeds then a new linear
1956// allocation space has been set up with the top and limit of the space. If
1957// the allocation fails then NULL is returned, and the caller can perform a GC
1958// or allocate a new page before retrying.
1959HeapObject* FreeList::Allocate(int size_in_bytes) {
1960 ASSERT(0 < size_in_bytes);
1961 ASSERT(size_in_bytes <= kMaxBlockSize);
1962 ASSERT(IsAligned(size_in_bytes, kPointerSize));
1963 // Don't free list allocate if there is linear space available.
1964 ASSERT(owner_->limit() - owner_->top() < size_in_bytes);
1965
1966 int new_node_size = 0;
1967 FreeListNode* new_node = FindNodeFor(size_in_bytes, &new_node_size);
1968 if (new_node == NULL) return NULL;
1969
1970 available_ -= new_node_size;
1971 ASSERT(IsVeryLong() || available_ == SumFreeLists());
1972
1973 int bytes_left = new_node_size - size_in_bytes;
1974 ASSERT(bytes_left >= 0);
1975
1976 int old_linear_size = static_cast<int>(owner_->limit() - owner_->top());
1977 // Mark the old linear allocation area with a free space map so it can be
1978 // skipped when scanning the heap. This also puts it back in the free list
1979 // if it is big enough.
1980 owner_->Free(owner_->top(), old_linear_size);
1981
1982#ifdef DEBUG
1983 for (int i = 0; i < size_in_bytes / kPointerSize; i++) {
1984 reinterpret_cast<Object**>(new_node->address())[i] = Smi::FromInt(0);
1985 }
1986#endif
1987
1988 owner_->heap()->incremental_marking()->OldSpaceStep(
1989 size_in_bytes - old_linear_size);
1990
1991 // The old-space-step might have finished sweeping and restarted marking.
1992 // Verify that it did not turn the page of the new node into an evacuation
1993 // candidate.
1994 ASSERT(!MarkCompactCollector::IsOnEvacuationCandidate(new_node));
1995
1996 const int kThreshold = IncrementalMarking::kAllocatedThreshold;
1997
1998 // Memory in the linear allocation area is counted as allocated. We may free
1999 // a little of this again immediately - see below.
2000 owner_->Allocate(new_node_size);
2001
2002 if (bytes_left > kThreshold &&
2003 owner_->heap()->incremental_marking()->IsMarkingIncomplete() &&
2004 FLAG_incremental_marking_steps) {
2005 int linear_size = owner_->RoundSizeDownToObjectAlignment(kThreshold);
2006 // We don't want to give too large linear areas to the allocator while
2007 // incremental marking is going on, because we won't check again whether
2008 // we want to do another increment until the linear area is used up.
2009 owner_->Free(new_node->address() + size_in_bytes + linear_size,
2010 new_node_size - size_in_bytes - linear_size);
2011 owner_->SetTop(new_node->address() + size_in_bytes,
2012 new_node->address() + size_in_bytes + linear_size);
2013 } else if (bytes_left > 0) {
2014 // Normally we give the rest of the node to the allocator as its new
2015 // linear allocation area.
2016 owner_->SetTop(new_node->address() + size_in_bytes,
2017 new_node->address() + new_node_size);
2018 } else {
2019 // TODO(gc) Try not freeing linear allocation region when bytes_left
2020 // are zero.
2021 owner_->SetTop(NULL, NULL);
2022 }
2023
2024 return new_node;
2025}
2026
2027
2028static intptr_t CountFreeListItemsInList(FreeListNode* n, Page* p) {
2029 intptr_t sum = 0;
2030 while (n != NULL) {
2031 if (Page::FromAddress(n->address()) == p) {
2032 FreeSpace* free_space = reinterpret_cast<FreeSpace*>(n);
2033 sum += free_space->Size();
2034 }
2035 n = n->next();
2036 }
2037 return sum;
2038}
2039
2040
2041void FreeList::CountFreeListItems(Page* p, SizeStats* sizes) {
2042 sizes->huge_size_ = CountFreeListItemsInList(huge_list_, p);
2043 if (sizes->huge_size_ < p->area_size()) {
2044 sizes->small_size_ = CountFreeListItemsInList(small_list_, p);
2045 sizes->medium_size_ = CountFreeListItemsInList(medium_list_, p);
2046 sizes->large_size_ = CountFreeListItemsInList(large_list_, p);
2047 } else {
2048 sizes->small_size_ = 0;
2049 sizes->medium_size_ = 0;
2050 sizes->large_size_ = 0;
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -08002051 }
2052}
2053
2054
Ben Murdoch592a9fc2012-03-05 11:04:45 +00002055static intptr_t EvictFreeListItemsInList(FreeListNode** n, Page* p) {
2056 intptr_t sum = 0;
2057 while (*n != NULL) {
2058 if (Page::FromAddress((*n)->address()) == p) {
2059 FreeSpace* free_space = reinterpret_cast<FreeSpace*>(*n);
2060 sum += free_space->Size();
2061 *n = (*n)->next();
2062 } else {
2063 n = (*n)->next_address();
2064 }
2065 }
2066 return sum;
2067}
2068
2069
2070intptr_t FreeList::EvictFreeListItems(Page* p) {
2071 intptr_t sum = EvictFreeListItemsInList(&huge_list_, p);
2072
2073 if (sum < p->area_size()) {
2074 sum += EvictFreeListItemsInList(&small_list_, p) +
2075 EvictFreeListItemsInList(&medium_list_, p) +
2076 EvictFreeListItemsInList(&large_list_, p);
2077 }
2078
2079 available_ -= static_cast<int>(sum);
2080
2081 return sum;
2082}
2083
2084
2085#ifdef DEBUG
2086intptr_t FreeList::SumFreeList(FreeListNode* cur) {
2087 intptr_t sum = 0;
2088 while (cur != NULL) {
2089 ASSERT(cur->map() == HEAP->raw_unchecked_free_space_map());
2090 FreeSpace* cur_as_free_space = reinterpret_cast<FreeSpace*>(cur);
2091 sum += cur_as_free_space->Size();
2092 cur = cur->next();
2093 }
2094 return sum;
2095}
2096
2097
2098static const int kVeryLongFreeList = 500;
2099
2100
2101int FreeList::FreeListLength(FreeListNode* cur) {
2102 int length = 0;
2103 while (cur != NULL) {
2104 length++;
2105 cur = cur->next();
2106 if (length == kVeryLongFreeList) return length;
2107 }
2108 return length;
2109}
2110
2111
2112bool FreeList::IsVeryLong() {
2113 if (FreeListLength(small_list_) == kVeryLongFreeList) return true;
2114 if (FreeListLength(medium_list_) == kVeryLongFreeList) return true;
2115 if (FreeListLength(large_list_) == kVeryLongFreeList) return true;
2116 if (FreeListLength(huge_list_) == kVeryLongFreeList) return true;
2117 return false;
2118}
2119
2120
2121// This can take a very long time because it is linear in the number of entries
2122// on the free list, so it should not be called if FreeListLength returns
2123// kVeryLongFreeList.
2124intptr_t FreeList::SumFreeLists() {
2125 intptr_t sum = SumFreeList(small_list_);
2126 sum += SumFreeList(medium_list_);
2127 sum += SumFreeList(large_list_);
2128 sum += SumFreeList(huge_list_);
2129 return sum;
2130}
2131#endif
2132
2133
Steve Blocka7e24c12009-10-30 11:49:00 +00002134// -----------------------------------------------------------------------------
2135// OldSpace implementation
2136
Ben Murdoch592a9fc2012-03-05 11:04:45 +00002137bool NewSpace::ReserveSpace(int bytes) {
2138 // We can't reliably unpack a partial snapshot that needs more new space
2139 // space than the minimum NewSpace size. The limit can be set lower than
2140 // the end of new space either because there is more space on the next page
2141 // or because we have lowered the limit in order to get periodic incremental
2142 // marking. The most reliable way to ensure that there is linear space is
2143 // to do the allocation, then rewind the limit.
2144 ASSERT(bytes <= InitialCapacity());
2145 MaybeObject* maybe = AllocateRaw(bytes);
2146 Object* object = NULL;
2147 if (!maybe->ToObject(&object)) return false;
2148 HeapObject* allocation = HeapObject::cast(object);
2149 Address top = allocation_info_.top;
2150 if ((top - bytes) == allocation->address()) {
2151 allocation_info_.top = allocation->address();
2152 return true;
Steve Blocka7e24c12009-10-30 11:49:00 +00002153 }
Ben Murdoch592a9fc2012-03-05 11:04:45 +00002154 // There may be a borderline case here where the allocation succeeded, but
2155 // the limit and top have moved on to a new page. In that case we try again.
2156 return ReserveSpace(bytes);
2157}
2158
2159
2160void PagedSpace::PrepareForMarkCompact() {
2161 // We don't have a linear allocation area while sweeping. It will be restored
2162 // on the first allocation after the sweep.
2163 // Mark the old linear allocation area with a free space map so it can be
2164 // skipped when scanning the heap.
2165 int old_linear_size = static_cast<int>(limit() - top());
2166 Free(top(), old_linear_size);
2167 SetTop(NULL, NULL);
2168
2169 // Stop lazy sweeping and clear marking bits for unswept pages.
2170 if (first_unswept_page_ != NULL) {
2171 Page* p = first_unswept_page_;
2172 do {
2173 // Do not use ShouldBeSweptLazily predicate here.
2174 // New evacuation candidates were selected but they still have
2175 // to be swept before collection starts.
2176 if (!p->WasSwept()) {
2177 Bitmap::Clear(p);
2178 if (FLAG_gc_verbose) {
2179 PrintF("Sweeping 0x%" V8PRIxPTR " lazily abandoned.\n",
2180 reinterpret_cast<intptr_t>(p));
2181 }
2182 }
2183 p = p->next_page();
2184 } while (p != anchor());
2185 }
2186 first_unswept_page_ = Page::FromAddress(NULL);
Ben Murdochc7cc0282012-03-05 14:35:55 +00002187 unswept_free_bytes_ = 0;
Steve Blocka7e24c12009-10-30 11:49:00 +00002188
2189 // Clear the free list before a full GC---it will be rebuilt afterward.
2190 free_list_.Reset();
2191}
2192
2193
Ben Murdoch592a9fc2012-03-05 11:04:45 +00002194bool PagedSpace::ReserveSpace(int size_in_bytes) {
2195 ASSERT(size_in_bytes <= AreaSize());
2196 ASSERT(size_in_bytes == RoundSizeDownToObjectAlignment(size_in_bytes));
2197 Address current_top = allocation_info_.top;
2198 Address new_top = current_top + size_in_bytes;
2199 if (new_top <= allocation_info_.limit) return true;
Steve Blocka7e24c12009-10-30 11:49:00 +00002200
Ben Murdoch592a9fc2012-03-05 11:04:45 +00002201 HeapObject* new_area = free_list_.Allocate(size_in_bytes);
2202 if (new_area == NULL) new_area = SlowAllocateRaw(size_in_bytes);
2203 if (new_area == NULL) return false;
Steve Blocka7e24c12009-10-30 11:49:00 +00002204
Ben Murdoch592a9fc2012-03-05 11:04:45 +00002205 int old_linear_size = static_cast<int>(limit() - top());
2206 // Mark the old linear allocation area with a free space so it can be
2207 // skipped when scanning the heap. This also puts it back in the free list
2208 // if it is big enough.
2209 Free(top(), old_linear_size);
Steve Blocka7e24c12009-10-30 11:49:00 +00002210
Ben Murdoch592a9fc2012-03-05 11:04:45 +00002211 SetTop(new_area->address(), new_area->address() + size_in_bytes);
2212 Allocate(size_in_bytes);
Leon Clarkee46be812010-01-19 14:06:41 +00002213 return true;
2214}
2215
2216
2217// You have to call this last, since the implementation from PagedSpace
2218// doesn't know that memory was 'promised' to large object space.
2219bool LargeObjectSpace::ReserveSpace(int bytes) {
Steve Block44f0eee2011-05-26 01:26:41 +01002220 return heap()->OldGenerationSpaceAvailable() >= bytes;
Leon Clarkee46be812010-01-19 14:06:41 +00002221}
2222
2223
Ben Murdoch592a9fc2012-03-05 11:04:45 +00002224bool PagedSpace::AdvanceSweeper(intptr_t bytes_to_sweep) {
2225 if (IsSweepingComplete()) return true;
Steve Blocka7e24c12009-10-30 11:49:00 +00002226
Ben Murdoch592a9fc2012-03-05 11:04:45 +00002227 intptr_t freed_bytes = 0;
2228 Page* p = first_unswept_page_;
2229 do {
2230 Page* next_page = p->next_page();
2231 if (ShouldBeSweptLazily(p)) {
2232 if (FLAG_gc_verbose) {
2233 PrintF("Sweeping 0x%" V8PRIxPTR " lazily advanced.\n",
2234 reinterpret_cast<intptr_t>(p));
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002235 }
Ben Murdochc7cc0282012-03-05 14:35:55 +00002236 DecreaseUnsweptFreeBytes(p);
Ben Murdoch592a9fc2012-03-05 11:04:45 +00002237 freed_bytes += MarkCompactCollector::SweepConservatively(this, p);
Steve Blockd0582a62009-12-15 09:54:21 +00002238 }
Ben Murdoch592a9fc2012-03-05 11:04:45 +00002239 p = next_page;
2240 } while (p != anchor() && freed_bytes < bytes_to_sweep);
2241
2242 if (p == anchor()) {
2243 first_unswept_page_ = Page::FromAddress(NULL);
2244 } else {
2245 first_unswept_page_ = p;
Steve Blocka7e24c12009-10-30 11:49:00 +00002246 }
2247
Ben Murdoch592a9fc2012-03-05 11:04:45 +00002248 heap()->LowerOldGenLimits(freed_bytes);
2249
2250 heap()->FreeQueuedChunks();
2251
2252 return IsSweepingComplete();
2253}
2254
2255
2256void PagedSpace::EvictEvacuationCandidatesFromFreeLists() {
2257 if (allocation_info_.top >= allocation_info_.limit) return;
2258
2259 if (Page::FromAllocationTop(allocation_info_.top)->IsEvacuationCandidate()) {
2260 // Create filler object to keep page iterable if it was iterable.
2261 int remaining =
2262 static_cast<int>(allocation_info_.limit - allocation_info_.top);
2263 heap()->CreateFillerObjectAt(allocation_info_.top, remaining);
2264
2265 allocation_info_.top = NULL;
2266 allocation_info_.limit = NULL;
2267 }
2268}
2269
2270
2271HeapObject* PagedSpace::SlowAllocateRaw(int size_in_bytes) {
2272 // Allocation in this space has failed.
2273
Ben Murdochc7cc0282012-03-05 14:35:55 +00002274 // If there are unswept pages advance lazy sweeper then sweep one page before
2275 // allocating a new page.
2276 if (first_unswept_page_->is_valid()) {
2277 AdvanceSweeper(size_in_bytes);
2278
2279 // Retry the free list allocation.
2280 HeapObject* object = free_list_.Allocate(size_in_bytes);
2281 if (object != NULL) return object;
2282 }
2283
Steve Blocka7e24c12009-10-30 11:49:00 +00002284 // Free list allocation failed and there is no next page. Fail if we have
2285 // hit the old generation size limit that should cause a garbage
2286 // collection.
Steve Block44f0eee2011-05-26 01:26:41 +01002287 if (!heap()->always_allocate() &&
2288 heap()->OldGenerationAllocationLimitReached()) {
Steve Blocka7e24c12009-10-30 11:49:00 +00002289 return NULL;
2290 }
2291
Ben Murdochc7cc0282012-03-05 14:35:55 +00002292 // Try to expand the space and allocate in the new next page.
2293 if (Expand()) {
2294 return free_list_.Allocate(size_in_bytes);
2295 }
2296
2297 // Last ditch, sweep all the remaining pages to try to find space. This may
2298 // cause a pause.
2299 if (!IsSweepingComplete()) {
2300 AdvanceSweeper(kMaxInt);
Ben Murdoch592a9fc2012-03-05 11:04:45 +00002301
2302 // Retry the free list allocation.
2303 HeapObject* object = free_list_.Allocate(size_in_bytes);
2304 if (object != NULL) return object;
Steve Blocka7e24c12009-10-30 11:49:00 +00002305 }
2306
2307 // Finally, fail.
2308 return NULL;
2309}
2310
2311
Steve Blocka7e24c12009-10-30 11:49:00 +00002312#ifdef DEBUG
Steve Blocka7e24c12009-10-30 11:49:00 +00002313void PagedSpace::ReportCodeStatistics() {
Steve Block44f0eee2011-05-26 01:26:41 +01002314 Isolate* isolate = Isolate::Current();
2315 CommentStatistic* comments_statistics =
2316 isolate->paged_space_comments_statistics();
Steve Blocka7e24c12009-10-30 11:49:00 +00002317 ReportCodeKindStatistics();
2318 PrintF("Code comment statistics (\" [ comment-txt : size/ "
2319 "count (average)\"):\n");
Steve Block44f0eee2011-05-26 01:26:41 +01002320 for (int i = 0; i <= CommentStatistic::kMaxComments; i++) {
Steve Blocka7e24c12009-10-30 11:49:00 +00002321 const CommentStatistic& cs = comments_statistics[i];
2322 if (cs.size > 0) {
2323 PrintF(" %-30s: %10d/%6d (%d)\n", cs.comment, cs.size, cs.count,
2324 cs.size/cs.count);
2325 }
2326 }
2327 PrintF("\n");
2328}
2329
2330
2331void PagedSpace::ResetCodeStatistics() {
Steve Block44f0eee2011-05-26 01:26:41 +01002332 Isolate* isolate = Isolate::Current();
2333 CommentStatistic* comments_statistics =
2334 isolate->paged_space_comments_statistics();
Steve Blocka7e24c12009-10-30 11:49:00 +00002335 ClearCodeKindStatistics();
Steve Block44f0eee2011-05-26 01:26:41 +01002336 for (int i = 0; i < CommentStatistic::kMaxComments; i++) {
2337 comments_statistics[i].Clear();
2338 }
2339 comments_statistics[CommentStatistic::kMaxComments].comment = "Unknown";
2340 comments_statistics[CommentStatistic::kMaxComments].size = 0;
2341 comments_statistics[CommentStatistic::kMaxComments].count = 0;
Steve Blocka7e24c12009-10-30 11:49:00 +00002342}
2343
2344
Steve Block44f0eee2011-05-26 01:26:41 +01002345// Adds comment to 'comment_statistics' table. Performance OK as long as
Steve Blocka7e24c12009-10-30 11:49:00 +00002346// 'kMaxComments' is small
Steve Block44f0eee2011-05-26 01:26:41 +01002347static void EnterComment(Isolate* isolate, const char* comment, int delta) {
2348 CommentStatistic* comments_statistics =
2349 isolate->paged_space_comments_statistics();
Steve Blocka7e24c12009-10-30 11:49:00 +00002350 // Do not count empty comments
2351 if (delta <= 0) return;
Steve Block44f0eee2011-05-26 01:26:41 +01002352 CommentStatistic* cs = &comments_statistics[CommentStatistic::kMaxComments];
Steve Blocka7e24c12009-10-30 11:49:00 +00002353 // Search for a free or matching entry in 'comments_statistics': 'cs'
2354 // points to result.
Steve Block44f0eee2011-05-26 01:26:41 +01002355 for (int i = 0; i < CommentStatistic::kMaxComments; i++) {
Steve Blocka7e24c12009-10-30 11:49:00 +00002356 if (comments_statistics[i].comment == NULL) {
2357 cs = &comments_statistics[i];
2358 cs->comment = comment;
2359 break;
2360 } else if (strcmp(comments_statistics[i].comment, comment) == 0) {
2361 cs = &comments_statistics[i];
2362 break;
2363 }
2364 }
2365 // Update entry for 'comment'
2366 cs->size += delta;
2367 cs->count += 1;
2368}
2369
2370
2371// Call for each nested comment start (start marked with '[ xxx', end marked
2372// with ']'. RelocIterator 'it' must point to a comment reloc info.
Steve Block44f0eee2011-05-26 01:26:41 +01002373static void CollectCommentStatistics(Isolate* isolate, RelocIterator* it) {
Steve Blocka7e24c12009-10-30 11:49:00 +00002374 ASSERT(!it->done());
2375 ASSERT(it->rinfo()->rmode() == RelocInfo::COMMENT);
2376 const char* tmp = reinterpret_cast<const char*>(it->rinfo()->data());
2377 if (tmp[0] != '[') {
2378 // Not a nested comment; skip
2379 return;
2380 }
2381
2382 // Search for end of nested comment or a new nested comment
2383 const char* const comment_txt =
2384 reinterpret_cast<const char*>(it->rinfo()->data());
2385 const byte* prev_pc = it->rinfo()->pc();
2386 int flat_delta = 0;
2387 it->next();
2388 while (true) {
2389 // All nested comments must be terminated properly, and therefore exit
2390 // from loop.
2391 ASSERT(!it->done());
2392 if (it->rinfo()->rmode() == RelocInfo::COMMENT) {
2393 const char* const txt =
2394 reinterpret_cast<const char*>(it->rinfo()->data());
Steve Blockd0582a62009-12-15 09:54:21 +00002395 flat_delta += static_cast<int>(it->rinfo()->pc() - prev_pc);
Steve Blocka7e24c12009-10-30 11:49:00 +00002396 if (txt[0] == ']') break; // End of nested comment
2397 // A new comment
Steve Block44f0eee2011-05-26 01:26:41 +01002398 CollectCommentStatistics(isolate, it);
Steve Blocka7e24c12009-10-30 11:49:00 +00002399 // Skip code that was covered with previous comment
2400 prev_pc = it->rinfo()->pc();
2401 }
2402 it->next();
2403 }
Steve Block44f0eee2011-05-26 01:26:41 +01002404 EnterComment(isolate, comment_txt, flat_delta);
Steve Blocka7e24c12009-10-30 11:49:00 +00002405}
2406
2407
2408// Collects code size statistics:
2409// - by code kind
2410// - by code comment
2411void PagedSpace::CollectCodeStatistics() {
Steve Block44f0eee2011-05-26 01:26:41 +01002412 Isolate* isolate = heap()->isolate();
Steve Blocka7e24c12009-10-30 11:49:00 +00002413 HeapObjectIterator obj_it(this);
Ben Murdoch592a9fc2012-03-05 11:04:45 +00002414 for (HeapObject* obj = obj_it.Next(); obj != NULL; obj = obj_it.Next()) {
Steve Blocka7e24c12009-10-30 11:49:00 +00002415 if (obj->IsCode()) {
2416 Code* code = Code::cast(obj);
Steve Block44f0eee2011-05-26 01:26:41 +01002417 isolate->code_kind_statistics()[code->kind()] += code->Size();
Steve Blocka7e24c12009-10-30 11:49:00 +00002418 RelocIterator it(code);
2419 int delta = 0;
2420 const byte* prev_pc = code->instruction_start();
2421 while (!it.done()) {
2422 if (it.rinfo()->rmode() == RelocInfo::COMMENT) {
Steve Blockd0582a62009-12-15 09:54:21 +00002423 delta += static_cast<int>(it.rinfo()->pc() - prev_pc);
Steve Block44f0eee2011-05-26 01:26:41 +01002424 CollectCommentStatistics(isolate, &it);
Steve Blocka7e24c12009-10-30 11:49:00 +00002425 prev_pc = it.rinfo()->pc();
2426 }
2427 it.next();
2428 }
2429
2430 ASSERT(code->instruction_start() <= prev_pc &&
Leon Clarkeac952652010-07-15 11:15:24 +01002431 prev_pc <= code->instruction_end());
2432 delta += static_cast<int>(code->instruction_end() - prev_pc);
Steve Block44f0eee2011-05-26 01:26:41 +01002433 EnterComment(isolate, "NoComment", delta);
Steve Blocka7e24c12009-10-30 11:49:00 +00002434 }
2435 }
2436}
2437
2438
Ben Murdoch592a9fc2012-03-05 11:04:45 +00002439void PagedSpace::ReportStatistics() {
Ben Murdochf87a2032010-10-22 12:50:53 +01002440 int pct = static_cast<int>(Available() * 100 / Capacity());
2441 PrintF(" capacity: %" V8_PTR_PREFIX "d"
2442 ", waste: %" V8_PTR_PREFIX "d"
2443 ", available: %" V8_PTR_PREFIX "d, %%%d\n",
Steve Blocka7e24c12009-10-30 11:49:00 +00002444 Capacity(), Waste(), Available(), pct);
2445
Ben Murdoch592a9fc2012-03-05 11:04:45 +00002446 if (was_swept_conservatively_) return;
Steve Blocka7e24c12009-10-30 11:49:00 +00002447 ClearHistograms();
2448 HeapObjectIterator obj_it(this);
Ben Murdoch592a9fc2012-03-05 11:04:45 +00002449 for (HeapObject* obj = obj_it.Next(); obj != NULL; obj = obj_it.Next())
Leon Clarked91b9f72010-01-27 17:25:45 +00002450 CollectHistogramInfo(obj);
Steve Blocka7e24c12009-10-30 11:49:00 +00002451 ReportHistogram(true);
2452}
Steve Blocka7e24c12009-10-30 11:49:00 +00002453#endif
2454
2455// -----------------------------------------------------------------------------
2456// FixedSpace implementation
2457
Ben Murdoch592a9fc2012-03-05 11:04:45 +00002458void FixedSpace::PrepareForMarkCompact() {
Steve Block6ded16b2010-05-10 14:33:55 +01002459 // Call prepare of the super class.
Ben Murdoch592a9fc2012-03-05 11:04:45 +00002460 PagedSpace::PrepareForMarkCompact();
Steve Block6ded16b2010-05-10 14:33:55 +01002461
Ben Murdoch592a9fc2012-03-05 11:04:45 +00002462 // During a non-compacting collection, everything below the linear
2463 // allocation pointer except wasted top-of-page blocks is considered
2464 // allocated and we will rediscover available bytes during the
2465 // collection.
2466 accounting_stats_.AllocateBytes(free_list_.available());
Steve Blocka7e24c12009-10-30 11:49:00 +00002467
2468 // Clear the free list before a full GC---it will be rebuilt afterward.
2469 free_list_.Reset();
2470}
2471
2472
Steve Blocka7e24c12009-10-30 11:49:00 +00002473// -----------------------------------------------------------------------------
2474// MapSpace implementation
2475
Steve Blocka7e24c12009-10-30 11:49:00 +00002476#ifdef DEBUG
2477void MapSpace::VerifyObject(HeapObject* object) {
2478 // The object should be a map or a free-list node.
Ben Murdoch592a9fc2012-03-05 11:04:45 +00002479 ASSERT(object->IsMap() || object->IsFreeSpace());
Steve Blocka7e24c12009-10-30 11:49:00 +00002480}
2481#endif
2482
2483
2484// -----------------------------------------------------------------------------
2485// GlobalPropertyCellSpace implementation
2486
2487#ifdef DEBUG
2488void CellSpace::VerifyObject(HeapObject* object) {
2489 // The object should be a global object property cell or a free-list node.
2490 ASSERT(object->IsJSGlobalPropertyCell() ||
Steve Block44f0eee2011-05-26 01:26:41 +01002491 object->map() == heap()->two_pointer_filler_map());
Steve Blocka7e24c12009-10-30 11:49:00 +00002492}
2493#endif
2494
2495
2496// -----------------------------------------------------------------------------
2497// LargeObjectIterator
2498
2499LargeObjectIterator::LargeObjectIterator(LargeObjectSpace* space) {
Ben Murdoch592a9fc2012-03-05 11:04:45 +00002500 current_ = space->first_page_;
Steve Blocka7e24c12009-10-30 11:49:00 +00002501 size_func_ = NULL;
2502}
2503
2504
2505LargeObjectIterator::LargeObjectIterator(LargeObjectSpace* space,
2506 HeapObjectCallback size_func) {
Ben Murdoch592a9fc2012-03-05 11:04:45 +00002507 current_ = space->first_page_;
Steve Blocka7e24c12009-10-30 11:49:00 +00002508 size_func_ = size_func;
2509}
2510
2511
Ben Murdoch592a9fc2012-03-05 11:04:45 +00002512HeapObject* LargeObjectIterator::Next() {
Leon Clarked91b9f72010-01-27 17:25:45 +00002513 if (current_ == NULL) return NULL;
2514
Steve Blocka7e24c12009-10-30 11:49:00 +00002515 HeapObject* object = current_->GetObject();
Ben Murdoch592a9fc2012-03-05 11:04:45 +00002516 current_ = current_->next_page();
Steve Blocka7e24c12009-10-30 11:49:00 +00002517 return object;
2518}
2519
2520
2521// -----------------------------------------------------------------------------
Steve Blocka7e24c12009-10-30 11:49:00 +00002522// LargeObjectSpace
2523
Ben Murdoch592a9fc2012-03-05 11:04:45 +00002524LargeObjectSpace::LargeObjectSpace(Heap* heap,
2525 intptr_t max_capacity,
2526 AllocationSpace id)
Steve Block44f0eee2011-05-26 01:26:41 +01002527 : Space(heap, id, NOT_EXECUTABLE), // Managed on a per-allocation basis
Ben Murdoch592a9fc2012-03-05 11:04:45 +00002528 max_capacity_(max_capacity),
2529 first_page_(NULL),
Steve Blocka7e24c12009-10-30 11:49:00 +00002530 size_(0),
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -08002531 page_count_(0),
2532 objects_size_(0) {}
Steve Blocka7e24c12009-10-30 11:49:00 +00002533
2534
Ben Murdochc7cc0282012-03-05 14:35:55 +00002535bool LargeObjectSpace::SetUp() {
Ben Murdoch592a9fc2012-03-05 11:04:45 +00002536 first_page_ = NULL;
Steve Blocka7e24c12009-10-30 11:49:00 +00002537 size_ = 0;
2538 page_count_ = 0;
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -08002539 objects_size_ = 0;
Steve Blocka7e24c12009-10-30 11:49:00 +00002540 return true;
2541}
2542
2543
2544void LargeObjectSpace::TearDown() {
Ben Murdoch592a9fc2012-03-05 11:04:45 +00002545 while (first_page_ != NULL) {
2546 LargePage* page = first_page_;
2547 first_page_ = first_page_->next_page();
2548 LOG(heap()->isolate(), DeleteEvent("LargeObjectChunk", page->address()));
2549
2550 ObjectSpace space = static_cast<ObjectSpace>(1 << identity());
2551 heap()->isolate()->memory_allocator()->PerformAllocationCallback(
2552 space, kAllocationActionFree, page->size());
2553 heap()->isolate()->memory_allocator()->Free(page);
Steve Blocka7e24c12009-10-30 11:49:00 +00002554 }
Ben Murdochc7cc0282012-03-05 14:35:55 +00002555 SetUp();
Steve Blocka7e24c12009-10-30 11:49:00 +00002556}
2557
2558
Ben Murdoch592a9fc2012-03-05 11:04:45 +00002559MaybeObject* LargeObjectSpace::AllocateRaw(int object_size,
2560 Executability executable) {
Steve Blocka7e24c12009-10-30 11:49:00 +00002561 // Check if we want to force a GC before growing the old space further.
2562 // If so, fail the allocation.
Steve Block44f0eee2011-05-26 01:26:41 +01002563 if (!heap()->always_allocate() &&
2564 heap()->OldGenerationAllocationLimitReached()) {
Ben Murdochf87a2032010-10-22 12:50:53 +01002565 return Failure::RetryAfterGC(identity());
Steve Blocka7e24c12009-10-30 11:49:00 +00002566 }
2567
Ben Murdoch592a9fc2012-03-05 11:04:45 +00002568 if (Size() + object_size > max_capacity_) {
Ben Murdochf87a2032010-10-22 12:50:53 +01002569 return Failure::RetryAfterGC(identity());
Steve Blocka7e24c12009-10-30 11:49:00 +00002570 }
2571
Ben Murdoch592a9fc2012-03-05 11:04:45 +00002572 LargePage* page = heap()->isolate()->memory_allocator()->
2573 AllocateLargePage(object_size, executable, this);
2574 if (page == NULL) return Failure::RetryAfterGC(identity());
2575 ASSERT(page->area_size() >= object_size);
2576
2577 size_ += static_cast<int>(page->size());
2578 objects_size_ += object_size;
Steve Blocka7e24c12009-10-30 11:49:00 +00002579 page_count_++;
Ben Murdoch592a9fc2012-03-05 11:04:45 +00002580 page->set_next_page(first_page_);
2581 first_page_ = page;
Steve Blocka7e24c12009-10-30 11:49:00 +00002582
Ben Murdoch592a9fc2012-03-05 11:04:45 +00002583 HeapObject* object = page->GetObject();
Ben Murdochb0fe1622011-05-05 13:52:32 +01002584
Ben Murdoch592a9fc2012-03-05 11:04:45 +00002585#ifdef DEBUG
2586 // Make the object consistent so the heap can be vefified in OldSpaceStep.
2587 reinterpret_cast<Object**>(object->address())[0] =
2588 heap()->fixed_array_map();
2589 reinterpret_cast<Object**>(object->address())[1] = Smi::FromInt(0);
2590#endif
Steve Blocka7e24c12009-10-30 11:49:00 +00002591
Ben Murdoch592a9fc2012-03-05 11:04:45 +00002592 heap()->incremental_marking()->OldSpaceStep(object_size);
2593 return object;
Steve Blocka7e24c12009-10-30 11:49:00 +00002594}
2595
2596
2597// GC support
John Reck59135872010-11-02 12:39:01 -07002598MaybeObject* LargeObjectSpace::FindObject(Address a) {
Ben Murdoch592a9fc2012-03-05 11:04:45 +00002599 for (LargePage* page = first_page_;
2600 page != NULL;
2601 page = page->next_page()) {
2602 Address page_address = page->address();
2603 if (page_address <= a && a < page_address + page->size()) {
2604 return page->GetObject();
Steve Blocka7e24c12009-10-30 11:49:00 +00002605 }
2606 }
2607 return Failure::Exception();
2608}
2609
Kristian Monsen80d68ea2010-09-08 11:05:35 +01002610
Ben Murdoch592a9fc2012-03-05 11:04:45 +00002611LargePage* LargeObjectSpace::FindPageContainingPc(Address pc) {
Kristian Monsen80d68ea2010-09-08 11:05:35 +01002612 // TODO(853): Change this implementation to only find executable
2613 // chunks and use some kind of hash-based approach to speed it up.
Ben Murdoch592a9fc2012-03-05 11:04:45 +00002614 for (LargePage* chunk = first_page_;
Kristian Monsen80d68ea2010-09-08 11:05:35 +01002615 chunk != NULL;
Ben Murdoch592a9fc2012-03-05 11:04:45 +00002616 chunk = chunk->next_page()) {
Kristian Monsen80d68ea2010-09-08 11:05:35 +01002617 Address chunk_address = chunk->address();
2618 if (chunk_address <= pc && pc < chunk_address + chunk->size()) {
2619 return chunk;
2620 }
2621 }
2622 return NULL;
2623}
2624
2625
Steve Blocka7e24c12009-10-30 11:49:00 +00002626void LargeObjectSpace::FreeUnmarkedObjects() {
Ben Murdoch592a9fc2012-03-05 11:04:45 +00002627 LargePage* previous = NULL;
2628 LargePage* current = first_page_;
Steve Blocka7e24c12009-10-30 11:49:00 +00002629 while (current != NULL) {
2630 HeapObject* object = current->GetObject();
Ben Murdoch592a9fc2012-03-05 11:04:45 +00002631 // Can this large page contain pointers to non-trivial objects. No other
2632 // pointer object is this big.
2633 bool is_pointer_object = object->IsFixedArray();
2634 MarkBit mark_bit = Marking::MarkBitFrom(object);
2635 if (mark_bit.Get()) {
2636 mark_bit.Clear();
Ben Murdochc7cc0282012-03-05 14:35:55 +00002637 MemoryChunk::IncrementLiveBytesFromGC(object->address(), -object->Size());
Steve Blocka7e24c12009-10-30 11:49:00 +00002638 previous = current;
Ben Murdoch592a9fc2012-03-05 11:04:45 +00002639 current = current->next_page();
Steve Blocka7e24c12009-10-30 11:49:00 +00002640 } else {
Ben Murdoch592a9fc2012-03-05 11:04:45 +00002641 LargePage* page = current;
Steve Blocka7e24c12009-10-30 11:49:00 +00002642 // Cut the chunk out from the chunk list.
Ben Murdoch592a9fc2012-03-05 11:04:45 +00002643 current = current->next_page();
Steve Blocka7e24c12009-10-30 11:49:00 +00002644 if (previous == NULL) {
Ben Murdoch592a9fc2012-03-05 11:04:45 +00002645 first_page_ = current;
Steve Blocka7e24c12009-10-30 11:49:00 +00002646 } else {
Ben Murdoch592a9fc2012-03-05 11:04:45 +00002647 previous->set_next_page(current);
Steve Blocka7e24c12009-10-30 11:49:00 +00002648 }
2649
2650 // Free the chunk.
Ben Murdoch8b112d22011-06-08 16:22:53 +01002651 heap()->mark_compact_collector()->ReportDeleteIfNeeded(
2652 object, heap()->isolate());
Ben Murdoch592a9fc2012-03-05 11:04:45 +00002653 size_ -= static_cast<int>(page->size());
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -08002654 objects_size_ -= object->Size();
Steve Blocka7e24c12009-10-30 11:49:00 +00002655 page_count_--;
Ben Murdoch592a9fc2012-03-05 11:04:45 +00002656
2657 if (is_pointer_object) {
2658 heap()->QueueMemoryChunkForFree(page);
2659 } else {
2660 heap()->isolate()->memory_allocator()->Free(page);
2661 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002662 }
2663 }
Ben Murdoch592a9fc2012-03-05 11:04:45 +00002664 heap()->FreeQueuedChunks();
Steve Blocka7e24c12009-10-30 11:49:00 +00002665}
2666
2667
2668bool LargeObjectSpace::Contains(HeapObject* object) {
2669 Address address = object->address();
Ben Murdoch592a9fc2012-03-05 11:04:45 +00002670 MemoryChunk* chunk = MemoryChunk::FromAddress(address);
Steve Blocka7e24c12009-10-30 11:49:00 +00002671
Ben Murdoch592a9fc2012-03-05 11:04:45 +00002672 bool owned = (chunk->owner() == this);
Steve Blocka7e24c12009-10-30 11:49:00 +00002673
Ben Murdoch592a9fc2012-03-05 11:04:45 +00002674 SLOW_ASSERT(!owned || !FindObject(address)->IsFailure());
2675
2676 return owned;
Steve Blocka7e24c12009-10-30 11:49:00 +00002677}
2678
2679
2680#ifdef DEBUG
2681// We do not assume that the large object iterator works, because it depends
2682// on the invariants we are checking during verification.
2683void LargeObjectSpace::Verify() {
Ben Murdoch592a9fc2012-03-05 11:04:45 +00002684 for (LargePage* chunk = first_page_;
Steve Blocka7e24c12009-10-30 11:49:00 +00002685 chunk != NULL;
Ben Murdoch592a9fc2012-03-05 11:04:45 +00002686 chunk = chunk->next_page()) {
Steve Blocka7e24c12009-10-30 11:49:00 +00002687 // Each chunk contains an object that starts at the large object page's
2688 // object area start.
2689 HeapObject* object = chunk->GetObject();
2690 Page* page = Page::FromAddress(object->address());
Ben Murdoch592a9fc2012-03-05 11:04:45 +00002691 ASSERT(object->address() == page->area_start());
Steve Blocka7e24c12009-10-30 11:49:00 +00002692
2693 // The first word should be a map, and we expect all map pointers to be
2694 // in map space.
2695 Map* map = object->map();
2696 ASSERT(map->IsMap());
Steve Block44f0eee2011-05-26 01:26:41 +01002697 ASSERT(heap()->map_space()->Contains(map));
Steve Blocka7e24c12009-10-30 11:49:00 +00002698
2699 // We have only code, sequential strings, external strings
2700 // (sequential strings that have been morphed into external
2701 // strings), fixed arrays, and byte arrays in large object space.
2702 ASSERT(object->IsCode() || object->IsSeqString() ||
2703 object->IsExternalString() || object->IsFixedArray() ||
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00002704 object->IsFixedDoubleArray() || object->IsByteArray());
Steve Blocka7e24c12009-10-30 11:49:00 +00002705
2706 // The object itself should look OK.
2707 object->Verify();
2708
2709 // Byte arrays and strings don't have interior pointers.
2710 if (object->IsCode()) {
2711 VerifyPointersVisitor code_visitor;
2712 object->IterateBody(map->instance_type(),
2713 object->Size(),
2714 &code_visitor);
2715 } else if (object->IsFixedArray()) {
Steve Blocka7e24c12009-10-30 11:49:00 +00002716 FixedArray* array = FixedArray::cast(object);
2717 for (int j = 0; j < array->length(); j++) {
2718 Object* element = array->get(j);
2719 if (element->IsHeapObject()) {
2720 HeapObject* element_object = HeapObject::cast(element);
Steve Block44f0eee2011-05-26 01:26:41 +01002721 ASSERT(heap()->Contains(element_object));
Steve Blocka7e24c12009-10-30 11:49:00 +00002722 ASSERT(element_object->map()->IsMap());
Steve Blocka7e24c12009-10-30 11:49:00 +00002723 }
2724 }
2725 }
2726 }
2727}
2728
2729
2730void LargeObjectSpace::Print() {
2731 LargeObjectIterator it(this);
Ben Murdoch592a9fc2012-03-05 11:04:45 +00002732 for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
Leon Clarked91b9f72010-01-27 17:25:45 +00002733 obj->Print();
Steve Blocka7e24c12009-10-30 11:49:00 +00002734 }
2735}
2736
2737
2738void LargeObjectSpace::ReportStatistics() {
Ben Murdochf87a2032010-10-22 12:50:53 +01002739 PrintF(" size: %" V8_PTR_PREFIX "d\n", size_);
Steve Blocka7e24c12009-10-30 11:49:00 +00002740 int num_objects = 0;
2741 ClearHistograms();
2742 LargeObjectIterator it(this);
Ben Murdoch592a9fc2012-03-05 11:04:45 +00002743 for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
Steve Blocka7e24c12009-10-30 11:49:00 +00002744 num_objects++;
Leon Clarked91b9f72010-01-27 17:25:45 +00002745 CollectHistogramInfo(obj);
Steve Blocka7e24c12009-10-30 11:49:00 +00002746 }
2747
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -08002748 PrintF(" number of objects %d, "
2749 "size of objects %" V8_PTR_PREFIX "d\n", num_objects, objects_size_);
Steve Blocka7e24c12009-10-30 11:49:00 +00002750 if (num_objects > 0) ReportHistogram(false);
2751}
2752
2753
2754void LargeObjectSpace::CollectCodeStatistics() {
Steve Block44f0eee2011-05-26 01:26:41 +01002755 Isolate* isolate = heap()->isolate();
Steve Blocka7e24c12009-10-30 11:49:00 +00002756 LargeObjectIterator obj_it(this);
Ben Murdoch592a9fc2012-03-05 11:04:45 +00002757 for (HeapObject* obj = obj_it.Next(); obj != NULL; obj = obj_it.Next()) {
Steve Blocka7e24c12009-10-30 11:49:00 +00002758 if (obj->IsCode()) {
2759 Code* code = Code::cast(obj);
Steve Block44f0eee2011-05-26 01:26:41 +01002760 isolate->code_kind_statistics()[code->kind()] += code->Size();
Steve Blocka7e24c12009-10-30 11:49:00 +00002761 }
2762 }
2763}
Ben Murdoch592a9fc2012-03-05 11:04:45 +00002764
2765
2766void Page::Print() {
2767 // Make a best-effort to print the objects in the page.
2768 PrintF("Page@%p in %s\n",
2769 this->address(),
2770 AllocationSpaceName(this->owner()->identity()));
2771 printf(" --------------------------------------\n");
2772 HeapObjectIterator objects(this, heap()->GcSafeSizeOfOldObjectFunction());
2773 unsigned mark_size = 0;
2774 for (HeapObject* object = objects.Next();
2775 object != NULL;
2776 object = objects.Next()) {
2777 bool is_marked = Marking::MarkBitFrom(object).Get();
2778 PrintF(" %c ", (is_marked ? '!' : ' ')); // Indent a little.
2779 if (is_marked) {
2780 mark_size += heap()->GcSafeSizeOfOldObjectFunction()(object);
2781 }
2782 object->ShortPrint();
2783 PrintF("\n");
2784 }
2785 printf(" --------------------------------------\n");
2786 printf(" Marked: %x, LiveCount: %x\n", mark_size, LiveBytes());
2787}
2788
Steve Blocka7e24c12009-10-30 11:49:00 +00002789#endif // DEBUG
2790
2791} } // namespace v8::internal