blob: 1ee33598c0eb54c4097f29f9fde84acc65ff63a5 [file] [log] [blame]
Ben Murdoch257744e2011-11-30 15:57:28 +00001// Copyright 2011 the V8 project authors. All rights reserved.
Steve Blocka7e24c12009-10-30 11:49:00 +00002// Redistribution and use in source and binary forms, with or without
3// modification, are permitted provided that the following conditions are
4// met:
5//
6// * Redistributions of source code must retain the above copyright
7// notice, this list of conditions and the following disclaimer.
8// * Redistributions in binary form must reproduce the above
9// copyright notice, this list of conditions and the following
10// disclaimer in the documentation and/or other materials provided
11// with the distribution.
12// * Neither the name of Google Inc. nor the names of its
13// contributors may be used to endorse or promote products derived
14// from this software without specific prior written permission.
15//
16// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
28#include "v8.h"
29
Steve Block1e0659c2011-05-24 12:43:12 +010030#include "liveobjectlist-inl.h"
Steve Blocka7e24c12009-10-30 11:49:00 +000031#include "macro-assembler.h"
32#include "mark-compact.h"
33#include "platform.h"
34
35namespace v8 {
36namespace internal {
37
Steve Blocka7e24c12009-10-30 11:49:00 +000038
Steve Blocka7e24c12009-10-30 11:49:00 +000039// ----------------------------------------------------------------------------
40// HeapObjectIterator
41
42HeapObjectIterator::HeapObjectIterator(PagedSpace* space) {
Ben Murdoch592a9fc2012-03-05 11:04:45 +000043 // You can't actually iterate over the anchor page. It is not a real page,
44 // just an anchor for the double linked page list. Initialize as if we have
45 // reached the end of the anchor page, then the first iteration will move on
46 // to the first page.
47 Initialize(space,
48 NULL,
49 NULL,
50 kAllPagesInSpace,
51 NULL);
Steve Blocka7e24c12009-10-30 11:49:00 +000052}
53
54
55HeapObjectIterator::HeapObjectIterator(PagedSpace* space,
56 HeapObjectCallback size_func) {
Ben Murdoch592a9fc2012-03-05 11:04:45 +000057 // You can't actually iterate over the anchor page. It is not a real page,
58 // just an anchor for the double linked page list. Initialize the current
59 // address and end as NULL, then the first iteration will move on
60 // to the first page.
61 Initialize(space,
62 NULL,
63 NULL,
64 kAllPagesInSpace,
65 size_func);
Steve Blocka7e24c12009-10-30 11:49:00 +000066}
67
68
Kristian Monsen80d68ea2010-09-08 11:05:35 +010069HeapObjectIterator::HeapObjectIterator(Page* page,
70 HeapObjectCallback size_func) {
Ben Murdoch592a9fc2012-03-05 11:04:45 +000071 Space* owner = page->owner();
72 ASSERT(owner == HEAP->old_pointer_space() ||
73 owner == HEAP->old_data_space() ||
74 owner == HEAP->map_space() ||
75 owner == HEAP->cell_space() ||
76 owner == HEAP->code_space());
77 Initialize(reinterpret_cast<PagedSpace*>(owner),
78 page->area_start(),
79 page->area_end(),
80 kOnePageOnly,
81 size_func);
82 ASSERT(page->WasSweptPrecisely());
Kristian Monsen80d68ea2010-09-08 11:05:35 +010083}
84
85
Ben Murdoch592a9fc2012-03-05 11:04:45 +000086void HeapObjectIterator::Initialize(PagedSpace* space,
87 Address cur, Address end,
88 HeapObjectIterator::PageMode mode,
Steve Blocka7e24c12009-10-30 11:49:00 +000089 HeapObjectCallback size_f) {
Ben Murdoch592a9fc2012-03-05 11:04:45 +000090 // Check that we actually can iterate this space.
91 ASSERT(!space->was_swept_conservatively());
92
93 space_ = space;
Steve Blocka7e24c12009-10-30 11:49:00 +000094 cur_addr_ = cur;
Ben Murdoch592a9fc2012-03-05 11:04:45 +000095 cur_end_ = end;
96 page_mode_ = mode;
Steve Blocka7e24c12009-10-30 11:49:00 +000097 size_func_ = size_f;
Steve Blocka7e24c12009-10-30 11:49:00 +000098}
99
100
Ben Murdoch592a9fc2012-03-05 11:04:45 +0000101// We have hit the end of the page and should advance to the next block of
102// objects. This happens at the end of the page.
103bool HeapObjectIterator::AdvanceToNextPage() {
104 ASSERT(cur_addr_ == cur_end_);
105 if (page_mode_ == kOnePageOnly) return false;
106 Page* cur_page;
107 if (cur_addr_ == NULL) {
108 cur_page = space_->anchor();
109 } else {
110 cur_page = Page::FromAddress(cur_addr_ - 1);
111 ASSERT(cur_addr_ == cur_page->area_end());
Steve Blocka7e24c12009-10-30 11:49:00 +0000112 }
Ben Murdoch592a9fc2012-03-05 11:04:45 +0000113 cur_page = cur_page->next_page();
114 if (cur_page == space_->anchor()) return false;
115 cur_addr_ = cur_page->area_start();
116 cur_end_ = cur_page->area_end();
117 ASSERT(cur_page->WasSweptPrecisely());
118 return true;
Steve Blocka7e24c12009-10-30 11:49:00 +0000119}
120
121
122// -----------------------------------------------------------------------------
Steve Blocka7e24c12009-10-30 11:49:00 +0000123// CodeRange
124
Steve Block44f0eee2011-05-26 01:26:41 +0100125
Ben Murdoch69a99ed2011-11-30 16:03:39 +0000126CodeRange::CodeRange(Isolate* isolate)
127 : isolate_(isolate),
128 code_range_(NULL),
Steve Block44f0eee2011-05-26 01:26:41 +0100129 free_list_(0),
130 allocation_list_(0),
Ben Murdoch69a99ed2011-11-30 16:03:39 +0000131 current_allocation_block_index_(0) {
Steve Block44f0eee2011-05-26 01:26:41 +0100132}
Steve Blocka7e24c12009-10-30 11:49:00 +0000133
134
135bool CodeRange::Setup(const size_t requested) {
136 ASSERT(code_range_ == NULL);
137
138 code_range_ = new VirtualMemory(requested);
139 CHECK(code_range_ != NULL);
140 if (!code_range_->IsReserved()) {
141 delete code_range_;
142 code_range_ = NULL;
143 return false;
144 }
145
146 // We are sure that we have mapped a block of requested addresses.
147 ASSERT(code_range_->size() == requested);
Steve Block44f0eee2011-05-26 01:26:41 +0100148 LOG(isolate_, NewEvent("CodeRange", code_range_->address(), requested));
Ben Murdoch592a9fc2012-03-05 11:04:45 +0000149 Address base = reinterpret_cast<Address>(code_range_->address());
150 Address aligned_base =
151 RoundUp(reinterpret_cast<Address>(code_range_->address()),
152 MemoryChunk::kAlignment);
153 size_t size = code_range_->size() - (aligned_base - base);
154 allocation_list_.Add(FreeBlock(aligned_base, size));
Steve Blocka7e24c12009-10-30 11:49:00 +0000155 current_allocation_block_index_ = 0;
156 return true;
157}
158
159
160int CodeRange::CompareFreeBlockAddress(const FreeBlock* left,
161 const FreeBlock* right) {
162 // The entire point of CodeRange is that the difference between two
163 // addresses in the range can be represented as a signed 32-bit int,
164 // so the cast is semantically correct.
165 return static_cast<int>(left->start - right->start);
166}
167
168
169void CodeRange::GetNextAllocationBlock(size_t requested) {
170 for (current_allocation_block_index_++;
171 current_allocation_block_index_ < allocation_list_.length();
172 current_allocation_block_index_++) {
173 if (requested <= allocation_list_[current_allocation_block_index_].size) {
174 return; // Found a large enough allocation block.
175 }
176 }
177
178 // Sort and merge the free blocks on the free list and the allocation list.
179 free_list_.AddAll(allocation_list_);
180 allocation_list_.Clear();
181 free_list_.Sort(&CompareFreeBlockAddress);
182 for (int i = 0; i < free_list_.length();) {
183 FreeBlock merged = free_list_[i];
184 i++;
185 // Add adjacent free blocks to the current merged block.
186 while (i < free_list_.length() &&
187 free_list_[i].start == merged.start + merged.size) {
188 merged.size += free_list_[i].size;
189 i++;
190 }
191 if (merged.size > 0) {
192 allocation_list_.Add(merged);
193 }
194 }
195 free_list_.Clear();
196
197 for (current_allocation_block_index_ = 0;
198 current_allocation_block_index_ < allocation_list_.length();
199 current_allocation_block_index_++) {
200 if (requested <= allocation_list_[current_allocation_block_index_].size) {
201 return; // Found a large enough allocation block.
202 }
203 }
204
205 // Code range is full or too fragmented.
206 V8::FatalProcessOutOfMemory("CodeRange::GetNextAllocationBlock");
207}
208
209
210
Ben Murdoch592a9fc2012-03-05 11:04:45 +0000211Address CodeRange::AllocateRawMemory(const size_t requested,
212 size_t* allocated) {
Steve Blocka7e24c12009-10-30 11:49:00 +0000213 ASSERT(current_allocation_block_index_ < allocation_list_.length());
214 if (requested > allocation_list_[current_allocation_block_index_].size) {
215 // Find an allocation block large enough. This function call may
216 // call V8::FatalProcessOutOfMemory if it cannot find a large enough block.
217 GetNextAllocationBlock(requested);
218 }
219 // Commit the requested memory at the start of the current allocation block.
Ben Murdoch592a9fc2012-03-05 11:04:45 +0000220 size_t aligned_requested = RoundUp(requested, MemoryChunk::kAlignment);
Steve Blocka7e24c12009-10-30 11:49:00 +0000221 FreeBlock current = allocation_list_[current_allocation_block_index_];
Ben Murdoch592a9fc2012-03-05 11:04:45 +0000222 if (aligned_requested >= (current.size - Page::kPageSize)) {
Steve Blocka7e24c12009-10-30 11:49:00 +0000223 // Don't leave a small free block, useless for a large object or chunk.
224 *allocated = current.size;
Ben Murdoch592a9fc2012-03-05 11:04:45 +0000225 } else {
226 *allocated = aligned_requested;
Steve Blocka7e24c12009-10-30 11:49:00 +0000227 }
228 ASSERT(*allocated <= current.size);
Ben Murdoch592a9fc2012-03-05 11:04:45 +0000229 ASSERT(IsAddressAligned(current.start, MemoryChunk::kAlignment));
230 if (!MemoryAllocator::CommitCodePage(code_range_,
231 current.start,
232 *allocated)) {
Steve Blocka7e24c12009-10-30 11:49:00 +0000233 *allocated = 0;
234 return NULL;
235 }
236 allocation_list_[current_allocation_block_index_].start += *allocated;
237 allocation_list_[current_allocation_block_index_].size -= *allocated;
238 if (*allocated == current.size) {
239 GetNextAllocationBlock(0); // This block is used up, get the next one.
240 }
241 return current.start;
242}
243
244
Ben Murdoch592a9fc2012-03-05 11:04:45 +0000245void CodeRange::FreeRawMemory(Address address, size_t length) {
246 ASSERT(IsAddressAligned(address, MemoryChunk::kAlignment));
Steve Blocka7e24c12009-10-30 11:49:00 +0000247 free_list_.Add(FreeBlock(address, length));
248 code_range_->Uncommit(address, length);
249}
250
251
252void CodeRange::TearDown() {
253 delete code_range_; // Frees all memory in the virtual memory range.
254 code_range_ = NULL;
255 free_list_.Free();
256 allocation_list_.Free();
257}
258
259
260// -----------------------------------------------------------------------------
261// MemoryAllocator
262//
Steve Blocka7e24c12009-10-30 11:49:00 +0000263
Ben Murdoch69a99ed2011-11-30 16:03:39 +0000264MemoryAllocator::MemoryAllocator(Isolate* isolate)
265 : isolate_(isolate),
266 capacity_(0),
Steve Block44f0eee2011-05-26 01:26:41 +0100267 capacity_executable_(0),
268 size_(0),
Ben Murdoch592a9fc2012-03-05 11:04:45 +0000269 size_executable_(0) {
Steve Blocka7e24c12009-10-30 11:49:00 +0000270}
271
272
Russell Brenner90bac252010-11-18 13:33:46 -0800273bool MemoryAllocator::Setup(intptr_t capacity, intptr_t capacity_executable) {
Steve Blocka7e24c12009-10-30 11:49:00 +0000274 capacity_ = RoundUp(capacity, Page::kPageSize);
Russell Brenner90bac252010-11-18 13:33:46 -0800275 capacity_executable_ = RoundUp(capacity_executable, Page::kPageSize);
276 ASSERT_GE(capacity_, capacity_executable_);
Steve Blocka7e24c12009-10-30 11:49:00 +0000277
Steve Blocka7e24c12009-10-30 11:49:00 +0000278 size_ = 0;
Steve Block791712a2010-08-27 10:21:07 +0100279 size_executable_ = 0;
Ben Murdoch592a9fc2012-03-05 11:04:45 +0000280
Steve Blocka7e24c12009-10-30 11:49:00 +0000281 return true;
282}
283
284
285void MemoryAllocator::TearDown() {
Ben Murdoch592a9fc2012-03-05 11:04:45 +0000286 // Check that spaces were torn down before MemoryAllocator.
287 ASSERT(size_ == 0);
288 // TODO(gc) this will be true again when we fix FreeMemory.
289 // ASSERT(size_executable_ == 0);
Steve Blocka7e24c12009-10-30 11:49:00 +0000290 capacity_ = 0;
Russell Brenner90bac252010-11-18 13:33:46 -0800291 capacity_executable_ = 0;
Steve Blocka7e24c12009-10-30 11:49:00 +0000292}
293
294
Ben Murdoch592a9fc2012-03-05 11:04:45 +0000295void MemoryAllocator::FreeMemory(VirtualMemory* reservation,
296 Executability executable) {
297 // TODO(gc) make code_range part of memory allocator?
298 ASSERT(reservation->IsReserved());
299 size_t size = reservation->size();
300 ASSERT(size_ >= size);
301 size_ -= size;
302
303 isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(size));
304
305 if (executable == EXECUTABLE) {
306 ASSERT(size_executable_ >= size);
307 size_executable_ -= size;
308 }
309 // Code which is part of the code-range does not have its own VirtualMemory.
310 ASSERT(!isolate_->code_range()->contains(
311 static_cast<Address>(reservation->address())));
312 ASSERT(executable == NOT_EXECUTABLE || !isolate_->code_range()->exists());
313 reservation->Release();
314}
315
316
317void MemoryAllocator::FreeMemory(Address base,
318 size_t size,
319 Executability executable) {
320 // TODO(gc) make code_range part of memory allocator?
321 ASSERT(size_ >= size);
322 size_ -= size;
323
324 isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(size));
325
326 if (executable == EXECUTABLE) {
327 ASSERT(size_executable_ >= size);
328 size_executable_ -= size;
329 }
330 if (isolate_->code_range()->contains(static_cast<Address>(base))) {
331 ASSERT(executable == EXECUTABLE);
332 isolate_->code_range()->FreeRawMemory(base, size);
333 } else {
334 ASSERT(executable == NOT_EXECUTABLE || !isolate_->code_range()->exists());
335 bool result = VirtualMemory::ReleaseRegion(base, size);
336 USE(result);
337 ASSERT(result);
338 }
339}
340
341
342Address MemoryAllocator::ReserveAlignedMemory(size_t size,
343 size_t alignment,
344 VirtualMemory* controller) {
345 VirtualMemory reservation(size, alignment);
346
347 if (!reservation.IsReserved()) return NULL;
348 size_ += reservation.size();
349 Address base = RoundUp(static_cast<Address>(reservation.address()),
350 alignment);
351 controller->TakeControl(&reservation);
352 return base;
353}
354
355
356Address MemoryAllocator::AllocateAlignedMemory(size_t size,
357 size_t alignment,
358 Executability executable,
359 VirtualMemory* controller) {
360 VirtualMemory reservation;
361 Address base = ReserveAlignedMemory(size, alignment, &reservation);
362 if (base == NULL) return NULL;
363
364 if (executable == EXECUTABLE) {
365 CommitCodePage(&reservation, base, size);
366 } else {
367 if (!reservation.Commit(base,
368 size,
369 executable == EXECUTABLE)) {
370 return NULL;
371 }
Kristian Monsen50ef84f2010-07-29 15:18:00 +0100372 }
Russell Brenner90bac252010-11-18 13:33:46 -0800373
Ben Murdoch592a9fc2012-03-05 11:04:45 +0000374 controller->TakeControl(&reservation);
375 return base;
376}
377
378
379void Page::InitializeAsAnchor(PagedSpace* owner) {
380 set_owner(owner);
381 set_prev_page(this);
382 set_next_page(this);
383}
384
385
386NewSpacePage* NewSpacePage::Initialize(Heap* heap,
387 Address start,
388 SemiSpace* semi_space) {
389 Address area_start = start + NewSpacePage::kObjectStartOffset;
390 Address area_end = start + Page::kPageSize;
391
392 MemoryChunk* chunk = MemoryChunk::Initialize(heap,
393 start,
394 Page::kPageSize,
395 area_start,
396 area_end,
397 NOT_EXECUTABLE,
398 semi_space);
399 chunk->set_next_chunk(NULL);
400 chunk->set_prev_chunk(NULL);
401 chunk->initialize_scan_on_scavenge(true);
402 bool in_to_space = (semi_space->id() != kFromSpace);
403 chunk->SetFlag(in_to_space ? MemoryChunk::IN_TO_SPACE
404 : MemoryChunk::IN_FROM_SPACE);
405 ASSERT(!chunk->IsFlagSet(in_to_space ? MemoryChunk::IN_FROM_SPACE
406 : MemoryChunk::IN_TO_SPACE));
407 NewSpacePage* page = static_cast<NewSpacePage*>(chunk);
408 heap->incremental_marking()->SetNewSpacePageFlags(page);
409 return page;
410}
411
412
413void NewSpacePage::InitializeAsAnchor(SemiSpace* semi_space) {
414 set_owner(semi_space);
415 set_next_chunk(this);
416 set_prev_chunk(this);
417 // Flags marks this invalid page as not being in new-space.
418 // All real new-space pages will be in new-space.
419 SetFlags(0, ~0);
420}
421
422
423MemoryChunk* MemoryChunk::Initialize(Heap* heap,
424 Address base,
425 size_t size,
426 Address area_start,
427 Address area_end,
428 Executability executable,
429 Space* owner) {
430 MemoryChunk* chunk = FromAddress(base);
431
432 ASSERT(base == chunk->address());
433
434 chunk->heap_ = heap;
435 chunk->size_ = size;
436 chunk->area_start_ = area_start;
437 chunk->area_end_ = area_end;
438 chunk->flags_ = 0;
439 chunk->set_owner(owner);
440 chunk->InitializeReservedMemory();
441 chunk->slots_buffer_ = NULL;
442 chunk->skip_list_ = NULL;
443 chunk->ResetLiveBytes();
444 Bitmap::Clear(chunk);
445 chunk->initialize_scan_on_scavenge(false);
446 chunk->SetFlag(WAS_SWEPT_PRECISELY);
447
448 ASSERT(OFFSET_OF(MemoryChunk, flags_) == kFlagsOffset);
449 ASSERT(OFFSET_OF(MemoryChunk, live_byte_count_) == kLiveBytesOffset);
450
Russell Brenner90bac252010-11-18 13:33:46 -0800451 if (executable == EXECUTABLE) {
Ben Murdoch592a9fc2012-03-05 11:04:45 +0000452 chunk->SetFlag(IS_EXECUTABLE);
453 }
454
455 if (owner == heap->old_data_space()) {
456 chunk->SetFlag(CONTAINS_ONLY_DATA);
457 }
458
459 return chunk;
460}
461
462
463void MemoryChunk::InsertAfter(MemoryChunk* other) {
464 next_chunk_ = other->next_chunk_;
465 prev_chunk_ = other;
466 other->next_chunk_->prev_chunk_ = this;
467 other->next_chunk_ = this;
468}
469
470
471void MemoryChunk::Unlink() {
472 if (!InNewSpace() && IsFlagSet(SCAN_ON_SCAVENGE)) {
473 heap_->decrement_scan_on_scavenge_pages();
474 ClearFlag(SCAN_ON_SCAVENGE);
475 }
476 next_chunk_->prev_chunk_ = prev_chunk_;
477 prev_chunk_->next_chunk_ = next_chunk_;
478 prev_chunk_ = NULL;
479 next_chunk_ = NULL;
480}
481
482
483MemoryChunk* MemoryAllocator::AllocateChunk(intptr_t body_size,
484 Executability executable,
485 Space* owner) {
486 size_t chunk_size;
487 Heap* heap = isolate_->heap();
488 Address base = NULL;
489 VirtualMemory reservation;
490 Address area_start = NULL;
491 Address area_end = NULL;
492 if (executable == EXECUTABLE) {
493 chunk_size = RoundUp(CodePageAreaStartOffset() + body_size,
494 OS::CommitPageSize()) + CodePageGuardSize();
495
Russell Brenner90bac252010-11-18 13:33:46 -0800496 // Check executable memory limit.
Ben Murdoch592a9fc2012-03-05 11:04:45 +0000497 if (size_executable_ + chunk_size > capacity_executable_) {
Steve Block44f0eee2011-05-26 01:26:41 +0100498 LOG(isolate_,
499 StringEvent("MemoryAllocator::AllocateRawMemory",
Russell Brenner90bac252010-11-18 13:33:46 -0800500 "V8 Executable Allocation capacity exceeded"));
501 return NULL;
502 }
Ben Murdoch592a9fc2012-03-05 11:04:45 +0000503
Russell Brenner90bac252010-11-18 13:33:46 -0800504 // Allocate executable memory either from code range or from the
505 // OS.
Steve Block44f0eee2011-05-26 01:26:41 +0100506 if (isolate_->code_range()->exists()) {
Ben Murdoch592a9fc2012-03-05 11:04:45 +0000507 base = isolate_->code_range()->AllocateRawMemory(chunk_size, &chunk_size);
508 ASSERT(IsAligned(reinterpret_cast<intptr_t>(base),
509 MemoryChunk::kAlignment));
510 if (base == NULL) return NULL;
511 size_ += chunk_size;
512 // Update executable memory size.
513 size_executable_ += chunk_size;
Russell Brenner90bac252010-11-18 13:33:46 -0800514 } else {
Ben Murdoch592a9fc2012-03-05 11:04:45 +0000515 base = AllocateAlignedMemory(chunk_size,
516 MemoryChunk::kAlignment,
517 executable,
518 &reservation);
519 if (base == NULL) return NULL;
520 // Update executable memory size.
521 size_executable_ += reservation.size();
Russell Brenner90bac252010-11-18 13:33:46 -0800522 }
Steve Block791712a2010-08-27 10:21:07 +0100523
Leon Clarke4515c472010-02-03 11:58:03 +0000524#ifdef DEBUG
Ben Murdoch592a9fc2012-03-05 11:04:45 +0000525 ZapBlock(base, CodePageGuardStartOffset());
526 ZapBlock(base + CodePageAreaStartOffset(), body_size);
Leon Clarke4515c472010-02-03 11:58:03 +0000527#endif
Ben Murdoch592a9fc2012-03-05 11:04:45 +0000528 area_start = base + CodePageAreaStartOffset();
529 area_end = area_start + body_size;
530 } else {
531 chunk_size = MemoryChunk::kObjectStartOffset + body_size;
532 base = AllocateAlignedMemory(chunk_size,
533 MemoryChunk::kAlignment,
534 executable,
535 &reservation);
536
537 if (base == NULL) return NULL;
538
539#ifdef DEBUG
540 ZapBlock(base, chunk_size);
541#endif
542
543 area_start = base + Page::kObjectStartOffset;
544 area_end = base + chunk_size;
545 }
546
547 isolate_->counters()->memory_allocated()->
548 Increment(static_cast<int>(chunk_size));
549
550 LOG(isolate_, NewEvent("MemoryChunk", base, chunk_size));
551 if (owner != NULL) {
552 ObjectSpace space = static_cast<ObjectSpace>(1 << owner->identity());
553 PerformAllocationCallback(space, kAllocationActionAllocate, chunk_size);
554 }
555
556 MemoryChunk* result = MemoryChunk::Initialize(heap,
557 base,
558 chunk_size,
559 area_start,
560 area_end,
561 executable,
562 owner);
563 result->set_reserved_memory(&reservation);
564 return result;
Steve Blocka7e24c12009-10-30 11:49:00 +0000565}
566
567
Ben Murdoch592a9fc2012-03-05 11:04:45 +0000568Page* MemoryAllocator::AllocatePage(PagedSpace* owner,
Steve Block791712a2010-08-27 10:21:07 +0100569 Executability executable) {
Ben Murdoch592a9fc2012-03-05 11:04:45 +0000570 MemoryChunk* chunk = AllocateChunk(owner->AreaSize(),
571 executable,
572 owner);
Iain Merrick9ac36c92010-09-13 15:29:50 +0100573
Ben Murdoch592a9fc2012-03-05 11:04:45 +0000574 if (chunk == NULL) return NULL;
575
576 return Page::Initialize(isolate_->heap(), chunk, executable, owner);
577}
578
579
580LargePage* MemoryAllocator::AllocateLargePage(intptr_t object_size,
581 Executability executable,
582 Space* owner) {
583 MemoryChunk* chunk = AllocateChunk(object_size, executable, owner);
584 if (chunk == NULL) return NULL;
585 return LargePage::Initialize(isolate_->heap(), chunk);
586}
587
588
589void MemoryAllocator::Free(MemoryChunk* chunk) {
590 LOG(isolate_, DeleteEvent("MemoryChunk", chunk));
591 if (chunk->owner() != NULL) {
592 ObjectSpace space =
593 static_cast<ObjectSpace>(1 << chunk->owner()->identity());
594 PerformAllocationCallback(space, kAllocationActionFree, chunk->size());
595 }
596
597 delete chunk->slots_buffer();
598 delete chunk->skip_list();
599
600 VirtualMemory* reservation = chunk->reserved_memory();
601 if (reservation->IsReserved()) {
602 FreeMemory(reservation, chunk->executable());
603 } else {
604 FreeMemory(chunk->address(),
605 chunk->size(),
606 chunk->executable());
607 }
608}
609
610
611bool MemoryAllocator::CommitBlock(Address start,
612 size_t size,
613 Executability executable) {
614 if (!VirtualMemory::CommitRegion(start, size, executable)) return false;
615#ifdef DEBUG
616 ZapBlock(start, size);
617#endif
618 isolate_->counters()->memory_allocated()->Increment(static_cast<int>(size));
619 return true;
620}
621
622
623bool MemoryAllocator::UncommitBlock(Address start, size_t size) {
624 if (!VirtualMemory::UncommitRegion(start, size)) return false;
625 isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(size));
626 return true;
627}
628
629
630void MemoryAllocator::ZapBlock(Address start, size_t size) {
631 for (size_t s = 0; s + kPointerSize <= size; s += kPointerSize) {
632 Memory::Address_at(start + s) = kZapValue;
633 }
Steve Blocka7e24c12009-10-30 11:49:00 +0000634}
635
636
Iain Merrick9ac36c92010-09-13 15:29:50 +0100637void MemoryAllocator::PerformAllocationCallback(ObjectSpace space,
638 AllocationAction action,
639 size_t size) {
640 for (int i = 0; i < memory_allocation_callbacks_.length(); ++i) {
641 MemoryAllocationCallbackRegistration registration =
642 memory_allocation_callbacks_[i];
643 if ((registration.space & space) == space &&
644 (registration.action & action) == action)
645 registration.callback(space, action, static_cast<int>(size));
646 }
647}
648
649
650bool MemoryAllocator::MemoryAllocationCallbackRegistered(
651 MemoryAllocationCallback callback) {
652 for (int i = 0; i < memory_allocation_callbacks_.length(); ++i) {
653 if (memory_allocation_callbacks_[i].callback == callback) return true;
654 }
655 return false;
656}
657
658
659void MemoryAllocator::AddMemoryAllocationCallback(
660 MemoryAllocationCallback callback,
661 ObjectSpace space,
662 AllocationAction action) {
663 ASSERT(callback != NULL);
664 MemoryAllocationCallbackRegistration registration(callback, space, action);
665 ASSERT(!MemoryAllocator::MemoryAllocationCallbackRegistered(callback));
666 return memory_allocation_callbacks_.Add(registration);
667}
668
669
670void MemoryAllocator::RemoveMemoryAllocationCallback(
671 MemoryAllocationCallback callback) {
672 ASSERT(callback != NULL);
673 for (int i = 0; i < memory_allocation_callbacks_.length(); ++i) {
674 if (memory_allocation_callbacks_[i].callback == callback) {
675 memory_allocation_callbacks_.Remove(i);
676 return;
677 }
678 }
679 UNREACHABLE();
680}
681
Steve Blocka7e24c12009-10-30 11:49:00 +0000682
683#ifdef DEBUG
684void MemoryAllocator::ReportStatistics() {
685 float pct = static_cast<float>(capacity_ - size_) / capacity_;
Ben Murdochf87a2032010-10-22 12:50:53 +0100686 PrintF(" capacity: %" V8_PTR_PREFIX "d"
687 ", used: %" V8_PTR_PREFIX "d"
688 ", available: %%%d\n\n",
Steve Blocka7e24c12009-10-30 11:49:00 +0000689 capacity_, size_, static_cast<int>(pct*100));
690}
691#endif
692
693
Ben Murdoch592a9fc2012-03-05 11:04:45 +0000694int MemoryAllocator::CodePageGuardStartOffset() {
695 // We are guarding code pages: the first OS page after the header
696 // will be protected as non-writable.
697 return RoundUp(Page::kObjectStartOffset, OS::CommitPageSize());
Steve Block6ded16b2010-05-10 14:33:55 +0100698}
699
700
Ben Murdoch592a9fc2012-03-05 11:04:45 +0000701int MemoryAllocator::CodePageGuardSize() {
702 return OS::CommitPageSize();
703}
Steve Block6ded16b2010-05-10 14:33:55 +0100704
Ben Murdoch592a9fc2012-03-05 11:04:45 +0000705
706int MemoryAllocator::CodePageAreaStartOffset() {
707 // We are guarding code pages: the first OS page after the header
708 // will be protected as non-writable.
709 return CodePageGuardStartOffset() + CodePageGuardSize();
710}
711
712
713int MemoryAllocator::CodePageAreaEndOffset() {
714 // We are guarding code pages: the last OS page will be protected as
715 // non-writable.
716 return Page::kPageSize - OS::CommitPageSize();
717}
718
719
720bool MemoryAllocator::CommitCodePage(VirtualMemory* vm,
721 Address start,
722 size_t size) {
723 // Commit page header (not executable).
724 if (!vm->Commit(start,
725 CodePageGuardStartOffset(),
726 false)) {
727 return false;
Steve Block6ded16b2010-05-10 14:33:55 +0100728 }
729
Ben Murdoch592a9fc2012-03-05 11:04:45 +0000730 // Create guard page after the header.
731 if (!vm->Guard(start + CodePageGuardStartOffset())) {
732 return false;
Steve Block6ded16b2010-05-10 14:33:55 +0100733 }
734
Ben Murdoch592a9fc2012-03-05 11:04:45 +0000735 // Commit page body (executable).
736 size_t area_size = size - CodePageAreaStartOffset() - CodePageGuardSize();
737 if (!vm->Commit(start + CodePageAreaStartOffset(),
738 area_size,
739 true)) {
740 return false;
Steve Block6ded16b2010-05-10 14:33:55 +0100741 }
742
Ben Murdoch592a9fc2012-03-05 11:04:45 +0000743 // Create guard page after the allocatable area.
744 if (!vm->Guard(start + CodePageAreaStartOffset() + area_size)) {
745 return false;
746 }
747
748 return true;
Steve Block6ded16b2010-05-10 14:33:55 +0100749}
750
751
Steve Blocka7e24c12009-10-30 11:49:00 +0000752// -----------------------------------------------------------------------------
753// PagedSpace implementation
754
Steve Block44f0eee2011-05-26 01:26:41 +0100755PagedSpace::PagedSpace(Heap* heap,
756 intptr_t max_capacity,
Steve Blocka7e24c12009-10-30 11:49:00 +0000757 AllocationSpace id,
758 Executability executable)
Ben Murdoch592a9fc2012-03-05 11:04:45 +0000759 : Space(heap, id, executable),
760 free_list_(this),
761 was_swept_conservatively_(false),
762 first_unswept_page_(Page::FromAddress(NULL)) {
763 if (id == CODE_SPACE) {
764 area_size_ = heap->isolate()->memory_allocator()->
765 CodePageAreaSize();
766 } else {
767 area_size_ = Page::kPageSize - Page::kObjectStartOffset;
768 }
Steve Blocka7e24c12009-10-30 11:49:00 +0000769 max_capacity_ = (RoundDown(max_capacity, Page::kPageSize) / Page::kPageSize)
Ben Murdoch592a9fc2012-03-05 11:04:45 +0000770 * AreaSize();
Steve Blocka7e24c12009-10-30 11:49:00 +0000771 accounting_stats_.Clear();
772
773 allocation_info_.top = NULL;
774 allocation_info_.limit = NULL;
775
Ben Murdoch592a9fc2012-03-05 11:04:45 +0000776 anchor_.InitializeAsAnchor(this);
Steve Blocka7e24c12009-10-30 11:49:00 +0000777}
778
779
Ben Murdoch592a9fc2012-03-05 11:04:45 +0000780bool PagedSpace::Setup() {
Steve Blocka7e24c12009-10-30 11:49:00 +0000781 return true;
782}
783
784
785bool PagedSpace::HasBeenSetup() {
Ben Murdoch592a9fc2012-03-05 11:04:45 +0000786 return true;
Steve Blocka7e24c12009-10-30 11:49:00 +0000787}
788
789
790void PagedSpace::TearDown() {
Ben Murdoch592a9fc2012-03-05 11:04:45 +0000791 PageIterator iterator(this);
792 while (iterator.has_next()) {
793 heap()->isolate()->memory_allocator()->Free(iterator.next());
794 }
795 anchor_.set_next_page(&anchor_);
796 anchor_.set_prev_page(&anchor_);
Steve Blocka7e24c12009-10-30 11:49:00 +0000797 accounting_stats_.Clear();
798}
799
800
John Reck59135872010-11-02 12:39:01 -0700801MaybeObject* PagedSpace::FindObject(Address addr) {
Ben Murdoch592a9fc2012-03-05 11:04:45 +0000802 // Note: this function can only be called on precisely swept spaces.
Steve Block44f0eee2011-05-26 01:26:41 +0100803 ASSERT(!heap()->mark_compact_collector()->in_use());
Steve Blocka7e24c12009-10-30 11:49:00 +0000804
805 if (!Contains(addr)) return Failure::Exception();
806
807 Page* p = Page::FromAddress(addr);
Ben Murdoch592a9fc2012-03-05 11:04:45 +0000808 HeapObjectIterator it(p, NULL);
809 for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
810 Address cur = obj->address();
Steve Blocka7e24c12009-10-30 11:49:00 +0000811 Address next = cur + obj->Size();
812 if ((cur <= addr) && (addr < next)) return obj;
Steve Blocka7e24c12009-10-30 11:49:00 +0000813 }
814
815 UNREACHABLE();
816 return Failure::Exception();
817}
818
Ben Murdoch592a9fc2012-03-05 11:04:45 +0000819bool PagedSpace::CanExpand() {
820 ASSERT(max_capacity_ % AreaSize() == 0);
821 ASSERT(Capacity() % AreaSize() == 0);
Steve Blocka7e24c12009-10-30 11:49:00 +0000822
823 if (Capacity() == max_capacity_) return false;
824
825 ASSERT(Capacity() < max_capacity_);
Steve Blocka7e24c12009-10-30 11:49:00 +0000826
Ben Murdoch592a9fc2012-03-05 11:04:45 +0000827 // Are we going to exceed capacity for this space?
828 if ((Capacity() + Page::kPageSize) > max_capacity_) return false;
Steve Blocka7e24c12009-10-30 11:49:00 +0000829
Ben Murdoch592a9fc2012-03-05 11:04:45 +0000830 return true;
831}
Steve Blocka7e24c12009-10-30 11:49:00 +0000832
Ben Murdoch592a9fc2012-03-05 11:04:45 +0000833bool PagedSpace::Expand() {
834 if (!CanExpand()) return false;
835
836 Page* p = heap()->isolate()->memory_allocator()->
837 AllocatePage(this, executable());
838 if (p == NULL) return false;
839
Steve Blocka7e24c12009-10-30 11:49:00 +0000840 ASSERT(Capacity() <= max_capacity_);
841
Ben Murdoch592a9fc2012-03-05 11:04:45 +0000842 p->InsertAfter(anchor_.prev_page());
Steve Blocka7e24c12009-10-30 11:49:00 +0000843
844 return true;
845}
846
847
Steve Blocka7e24c12009-10-30 11:49:00 +0000848int PagedSpace::CountTotalPages() {
Ben Murdoch592a9fc2012-03-05 11:04:45 +0000849 PageIterator it(this);
Steve Blocka7e24c12009-10-30 11:49:00 +0000850 int count = 0;
Ben Murdoch592a9fc2012-03-05 11:04:45 +0000851 while (it.has_next()) {
852 it.next();
Steve Blocka7e24c12009-10-30 11:49:00 +0000853 count++;
854 }
855 return count;
856}
Steve Blocka7e24c12009-10-30 11:49:00 +0000857
858
Ben Murdoch592a9fc2012-03-05 11:04:45 +0000859void PagedSpace::ReleasePage(Page* page) {
860 ASSERT(page->LiveBytes() == 0);
861 ASSERT(AreaSize() == page->area_size());
862
863 // Adjust list of unswept pages if the page is the head of the list.
864 if (first_unswept_page_ == page) {
865 first_unswept_page_ = page->next_page();
866 if (first_unswept_page_ == anchor()) {
867 first_unswept_page_ = Page::FromAddress(NULL);
868 }
Steve Block6ded16b2010-05-10 14:33:55 +0100869 }
870
Ben Murdoch592a9fc2012-03-05 11:04:45 +0000871 if (page->WasSwept()) {
872 intptr_t size = free_list_.EvictFreeListItems(page);
873 accounting_stats_.AllocateBytes(size);
874 ASSERT_EQ(AreaSize(), static_cast<int>(size));
Steve Blocka7e24c12009-10-30 11:49:00 +0000875 }
876
Ben Murdoch592a9fc2012-03-05 11:04:45 +0000877 if (Page::FromAllocationTop(allocation_info_.top) == page) {
878 allocation_info_.top = allocation_info_.limit = NULL;
Steve Blocka7e24c12009-10-30 11:49:00 +0000879 }
880
Ben Murdoch592a9fc2012-03-05 11:04:45 +0000881 page->Unlink();
882 if (page->IsFlagSet(MemoryChunk::CONTAINS_ONLY_DATA)) {
883 heap()->isolate()->memory_allocator()->Free(page);
884 } else {
885 heap()->QueueMemoryChunkForFree(page);
886 }
887
888 ASSERT(Capacity() > 0);
889 ASSERT(Capacity() % AreaSize() == 0);
890 accounting_stats_.ShrinkSpace(AreaSize());
Steve Blocka7e24c12009-10-30 11:49:00 +0000891}
892
893
Ben Murdoch592a9fc2012-03-05 11:04:45 +0000894void PagedSpace::ReleaseAllUnusedPages() {
895 PageIterator it(this);
896 while (it.has_next()) {
897 Page* page = it.next();
898 if (!page->WasSwept()) {
899 if (page->LiveBytes() == 0) ReleasePage(page);
900 } else {
901 HeapObject* obj = HeapObject::FromAddress(page->area_start());
902 if (obj->IsFreeSpace() &&
903 FreeSpace::cast(obj)->size() == AreaSize()) {
904 // Sometimes we allocate memory from free list but don't
905 // immediately initialize it (e.g. see PagedSpace::ReserveSpace
906 // called from Heap::ReserveSpace that can cause GC before
907 // reserved space is actually initialized).
908 // Thus we can't simply assume that obj represents a valid
909 // node still owned by a free list
910 // Instead we should verify that the page is fully covered
911 // by free list items.
912 FreeList::SizeStats sizes;
913 free_list_.CountFreeListItems(page, &sizes);
914 if (sizes.Total() == AreaSize()) {
915 ReleasePage(page);
916 }
917 }
918 }
Steve Blocka7e24c12009-10-30 11:49:00 +0000919 }
Ben Murdoch592a9fc2012-03-05 11:04:45 +0000920 heap()->FreeQueuedChunks();
Steve Blocka7e24c12009-10-30 11:49:00 +0000921}
922
923
924#ifdef DEBUG
925void PagedSpace::Print() { }
926#endif
927
928
929#ifdef DEBUG
Steve Blocka7e24c12009-10-30 11:49:00 +0000930void PagedSpace::Verify(ObjectVisitor* visitor) {
Ben Murdoch592a9fc2012-03-05 11:04:45 +0000931 // We can only iterate over the pages if they were swept precisely.
932 if (was_swept_conservatively_) return;
Steve Blocka7e24c12009-10-30 11:49:00 +0000933
Ben Murdoch592a9fc2012-03-05 11:04:45 +0000934 bool allocation_pointer_found_in_space =
935 (allocation_info_.top == allocation_info_.limit);
936 PageIterator page_iterator(this);
937 while (page_iterator.has_next()) {
938 Page* page = page_iterator.next();
939 ASSERT(page->owner() == this);
940 if (page == Page::FromAllocationTop(allocation_info_.top)) {
941 allocation_pointer_found_in_space = true;
Steve Blocka7e24c12009-10-30 11:49:00 +0000942 }
Ben Murdoch592a9fc2012-03-05 11:04:45 +0000943 ASSERT(page->WasSweptPrecisely());
944 HeapObjectIterator it(page, NULL);
945 Address end_of_previous_object = page->area_start();
946 Address top = page->area_end();
947 int black_size = 0;
948 for (HeapObject* object = it.Next(); object != NULL; object = it.Next()) {
949 ASSERT(end_of_previous_object <= object->address());
Steve Blocka7e24c12009-10-30 11:49:00 +0000950
Ben Murdoch592a9fc2012-03-05 11:04:45 +0000951 // The first word should be a map, and we expect all map pointers to
952 // be in map space.
953 Map* map = object->map();
954 ASSERT(map->IsMap());
955 ASSERT(heap()->map_space()->Contains(map));
956
957 // Perform space-specific object verification.
958 VerifyObject(object);
959
960 // The object itself should look OK.
961 object->Verify();
962
963 // All the interior pointers should be contained in the heap.
964 int size = object->Size();
965 object->IterateBody(map->instance_type(), size, visitor);
966 if (Marking::IsBlack(Marking::MarkBitFrom(object))) {
967 black_size += size;
968 }
969
970 ASSERT(object->address() + size <= top);
971 end_of_previous_object = object->address() + size;
972 }
973 ASSERT_LE(black_size, page->LiveBytes());
Steve Blocka7e24c12009-10-30 11:49:00 +0000974 }
Ben Murdoch592a9fc2012-03-05 11:04:45 +0000975 ASSERT(allocation_pointer_found_in_space);
Steve Blocka7e24c12009-10-30 11:49:00 +0000976}
977#endif
978
979
980// -----------------------------------------------------------------------------
981// NewSpace implementation
982
983
Ben Murdoch592a9fc2012-03-05 11:04:45 +0000984bool NewSpace::Setup(int reserved_semispace_capacity,
985 int maximum_semispace_capacity) {
Steve Blocka7e24c12009-10-30 11:49:00 +0000986 // Setup new space based on the preallocated memory block defined by
987 // start and size. The provided space is divided into two semi-spaces.
988 // To support fast containment testing in the new space, the size of
989 // this chunk must be a power of two and it must be aligned to its size.
Steve Block44f0eee2011-05-26 01:26:41 +0100990 int initial_semispace_capacity = heap()->InitialSemiSpaceSize();
Ben Murdoch592a9fc2012-03-05 11:04:45 +0000991
992 size_t size = 2 * reserved_semispace_capacity;
993 Address base =
994 heap()->isolate()->memory_allocator()->ReserveAlignedMemory(
995 size, size, &reservation_);
996 if (base == NULL) return false;
997
998 chunk_base_ = base;
999 chunk_size_ = static_cast<uintptr_t>(size);
1000 LOG(heap()->isolate(), NewEvent("InitialChunk", chunk_base_, chunk_size_));
Steve Blocka7e24c12009-10-30 11:49:00 +00001001
1002 ASSERT(initial_semispace_capacity <= maximum_semispace_capacity);
1003 ASSERT(IsPowerOf2(maximum_semispace_capacity));
1004
1005 // Allocate and setup the histogram arrays if necessary.
Steve Blocka7e24c12009-10-30 11:49:00 +00001006 allocated_histogram_ = NewArray<HistogramInfo>(LAST_TYPE + 1);
1007 promoted_histogram_ = NewArray<HistogramInfo>(LAST_TYPE + 1);
1008
1009#define SET_NAME(name) allocated_histogram_[name].set_name(#name); \
1010 promoted_histogram_[name].set_name(#name);
1011 INSTANCE_TYPE_LIST(SET_NAME)
1012#undef SET_NAME
Steve Blocka7e24c12009-10-30 11:49:00 +00001013
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001014 ASSERT(reserved_semispace_capacity == heap()->ReservedSemiSpaceSize());
1015 ASSERT(static_cast<intptr_t>(chunk_size_) >=
1016 2 * heap()->ReservedSemiSpaceSize());
1017 ASSERT(IsAddressAligned(chunk_base_, 2 * reserved_semispace_capacity, 0));
Steve Blocka7e24c12009-10-30 11:49:00 +00001018
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001019 if (!to_space_.Setup(chunk_base_,
Steve Blocka7e24c12009-10-30 11:49:00 +00001020 initial_semispace_capacity,
1021 maximum_semispace_capacity)) {
1022 return false;
1023 }
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001024 if (!from_space_.Setup(chunk_base_ + reserved_semispace_capacity,
Steve Blocka7e24c12009-10-30 11:49:00 +00001025 initial_semispace_capacity,
1026 maximum_semispace_capacity)) {
1027 return false;
1028 }
1029
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001030 start_ = chunk_base_;
1031 address_mask_ = ~(2 * reserved_semispace_capacity - 1);
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01001032 object_mask_ = address_mask_ | kHeapObjectTagMask;
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001033 object_expected_ = reinterpret_cast<uintptr_t>(start_) | kHeapObjectTag;
Steve Blocka7e24c12009-10-30 11:49:00 +00001034
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001035 ResetAllocationInfo();
Steve Blocka7e24c12009-10-30 11:49:00 +00001036
Steve Blocka7e24c12009-10-30 11:49:00 +00001037 return true;
1038}
1039
1040
1041void NewSpace::TearDown() {
Steve Blocka7e24c12009-10-30 11:49:00 +00001042 if (allocated_histogram_) {
1043 DeleteArray(allocated_histogram_);
1044 allocated_histogram_ = NULL;
1045 }
1046 if (promoted_histogram_) {
1047 DeleteArray(promoted_histogram_);
1048 promoted_histogram_ = NULL;
1049 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001050
1051 start_ = NULL;
1052 allocation_info_.top = NULL;
1053 allocation_info_.limit = NULL;
Steve Blocka7e24c12009-10-30 11:49:00 +00001054
1055 to_space_.TearDown();
1056 from_space_.TearDown();
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001057
1058 LOG(heap()->isolate(), DeleteEvent("InitialChunk", chunk_base_));
1059
1060 ASSERT(reservation_.IsReserved());
1061 heap()->isolate()->memory_allocator()->FreeMemory(&reservation_,
1062 NOT_EXECUTABLE);
1063 chunk_base_ = NULL;
1064 chunk_size_ = 0;
Steve Blocka7e24c12009-10-30 11:49:00 +00001065}
1066
1067
Steve Blocka7e24c12009-10-30 11:49:00 +00001068void NewSpace::Flip() {
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001069 SemiSpace::Swap(&from_space_, &to_space_);
Steve Blocka7e24c12009-10-30 11:49:00 +00001070}
1071
1072
1073void NewSpace::Grow() {
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001074 // Double the semispace size but only up to maximum capacity.
Steve Blocka7e24c12009-10-30 11:49:00 +00001075 ASSERT(Capacity() < MaximumCapacity());
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001076 int new_capacity = Min(MaximumCapacity(), 2 * static_cast<int>(Capacity()));
1077 if (to_space_.GrowTo(new_capacity)) {
1078 // Only grow from space if we managed to grow to-space.
1079 if (!from_space_.GrowTo(new_capacity)) {
1080 // If we managed to grow to-space but couldn't grow from-space,
1081 // attempt to shrink to-space.
Steve Blocka7e24c12009-10-30 11:49:00 +00001082 if (!to_space_.ShrinkTo(from_space_.Capacity())) {
1083 // We are in an inconsistent state because we could not
1084 // commit/uncommit memory from new space.
1085 V8::FatalProcessOutOfMemory("Failed to grow new space.");
1086 }
1087 }
1088 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001089 ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
1090}
1091
1092
1093void NewSpace::Shrink() {
Ben Murdochf87a2032010-10-22 12:50:53 +01001094 int new_capacity = Max(InitialCapacity(), 2 * SizeAsInt());
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001095 int rounded_new_capacity = RoundUp(new_capacity, Page::kPageSize);
Steve Blocka7e24c12009-10-30 11:49:00 +00001096 if (rounded_new_capacity < Capacity() &&
1097 to_space_.ShrinkTo(rounded_new_capacity)) {
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001098 // Only shrink from-space if we managed to shrink to-space.
1099 from_space_.Reset();
Steve Blocka7e24c12009-10-30 11:49:00 +00001100 if (!from_space_.ShrinkTo(rounded_new_capacity)) {
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001101 // If we managed to shrink to-space but couldn't shrink from
1102 // space, attempt to grow to-space again.
Steve Blocka7e24c12009-10-30 11:49:00 +00001103 if (!to_space_.GrowTo(from_space_.Capacity())) {
1104 // We are in an inconsistent state because we could not
1105 // commit/uncommit memory from new space.
1106 V8::FatalProcessOutOfMemory("Failed to shrink new space.");
1107 }
1108 }
1109 }
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001110 allocation_info_.limit = to_space_.page_high();
1111 ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
1112}
1113
1114
1115void NewSpace::UpdateAllocationInfo() {
1116 allocation_info_.top = to_space_.page_low();
1117 allocation_info_.limit = to_space_.page_high();
1118
1119 // Lower limit during incremental marking.
1120 if (heap()->incremental_marking()->IsMarking() &&
1121 inline_allocation_limit_step() != 0) {
1122 Address new_limit =
1123 allocation_info_.top + inline_allocation_limit_step();
1124 allocation_info_.limit = Min(new_limit, allocation_info_.limit);
1125 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001126 ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
1127}
1128
1129
1130void NewSpace::ResetAllocationInfo() {
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001131 to_space_.Reset();
1132 UpdateAllocationInfo();
1133 pages_used_ = 0;
1134 // Clear all mark-bits in the to-space.
1135 NewSpacePageIterator it(&to_space_);
1136 while (it.has_next()) {
1137 Bitmap::Clear(it.next());
1138 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001139}
1140
1141
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001142bool NewSpace::AddFreshPage() {
1143 Address top = allocation_info_.top;
1144 if (NewSpacePage::IsAtStart(top)) {
1145 // The current page is already empty. Don't try to make another.
1146
1147 // We should only get here if someone asks to allocate more
1148 // than what can be stored in a single page.
1149 // TODO(gc): Change the limit on new-space allocation to prevent this
1150 // from happening (all such allocations should go directly to LOSpace).
1151 return false;
1152 }
1153 if (!to_space_.AdvancePage()) {
1154 // Failed to get a new page in to-space.
1155 return false;
1156 }
1157
1158 // Clear remainder of current page.
1159 Address limit = NewSpacePage::FromLimit(top)->area_end();
1160 if (heap()->gc_state() == Heap::SCAVENGE) {
1161 heap()->promotion_queue()->SetNewLimit(limit);
1162 heap()->promotion_queue()->ActivateGuardIfOnTheSamePage();
1163 }
1164
1165 int remaining_in_page = static_cast<int>(limit - top);
1166 heap()->CreateFillerObjectAt(top, remaining_in_page);
1167 pages_used_++;
1168 UpdateAllocationInfo();
1169
1170 return true;
Steve Blocka7e24c12009-10-30 11:49:00 +00001171}
1172
1173
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001174MaybeObject* NewSpace::SlowAllocateRaw(int size_in_bytes) {
1175 Address old_top = allocation_info_.top;
1176 Address new_top = old_top + size_in_bytes;
1177 Address high = to_space_.page_high();
1178 if (allocation_info_.limit < high) {
1179 // Incremental marking has lowered the limit to get a
1180 // chance to do a step.
1181 allocation_info_.limit = Min(
1182 allocation_info_.limit + inline_allocation_limit_step_,
1183 high);
1184 int bytes_allocated = static_cast<int>(new_top - top_on_previous_step_);
1185 heap()->incremental_marking()->Step(bytes_allocated);
1186 top_on_previous_step_ = new_top;
1187 return AllocateRaw(size_in_bytes);
1188 } else if (AddFreshPage()) {
1189 // Switched to new page. Try allocating again.
1190 int bytes_allocated = static_cast<int>(old_top - top_on_previous_step_);
1191 heap()->incremental_marking()->Step(bytes_allocated);
1192 top_on_previous_step_ = to_space_.page_low();
1193 return AllocateRaw(size_in_bytes);
1194 } else {
1195 return Failure::RetryAfterGC();
1196 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001197}
1198
1199
1200#ifdef DEBUG
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001201// We do not use the SemiSpaceIterator because verification doesn't assume
Steve Blocka7e24c12009-10-30 11:49:00 +00001202// that it works (it depends on the invariants we are checking).
1203void NewSpace::Verify() {
1204 // The allocation pointer should be in the space or at the very end.
1205 ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
1206
1207 // There should be objects packed in from the low address up to the
1208 // allocation pointer.
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001209 Address current = to_space_.first_page()->area_start();
1210 CHECK_EQ(current, to_space_.space_start());
Steve Blocka7e24c12009-10-30 11:49:00 +00001211
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001212 while (current != top()) {
1213 if (!NewSpacePage::IsAtEnd(current)) {
1214 // The allocation pointer should not be in the middle of an object.
1215 CHECK(!NewSpacePage::FromLimit(current)->ContainsLimit(top()) ||
1216 current < top());
Steve Blocka7e24c12009-10-30 11:49:00 +00001217
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001218 HeapObject* object = HeapObject::FromAddress(current);
Steve Blocka7e24c12009-10-30 11:49:00 +00001219
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001220 // The first word should be a map, and we expect all map pointers to
1221 // be in map space.
1222 Map* map = object->map();
1223 CHECK(map->IsMap());
1224 CHECK(heap()->map_space()->Contains(map));
Steve Blocka7e24c12009-10-30 11:49:00 +00001225
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001226 // The object should not be code or a map.
1227 CHECK(!object->IsMap());
1228 CHECK(!object->IsCode());
Steve Blocka7e24c12009-10-30 11:49:00 +00001229
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001230 // The object itself should look OK.
1231 object->Verify();
1232
1233 // All the interior pointers should be contained in the heap.
1234 VerifyPointersVisitor visitor;
1235 int size = object->Size();
1236 object->IterateBody(map->instance_type(), size, &visitor);
1237
1238 current += size;
1239 } else {
1240 // At end of page, switch to next page.
1241 NewSpacePage* page = NewSpacePage::FromLimit(current)->next_page();
1242 // Next page should be valid.
1243 CHECK(!page->is_anchor());
1244 current = page->area_start();
1245 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001246 }
1247
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001248 // Check semi-spaces.
1249 ASSERT_EQ(from_space_.id(), kFromSpace);
1250 ASSERT_EQ(to_space_.id(), kToSpace);
1251 from_space_.Verify();
1252 to_space_.Verify();
Steve Blocka7e24c12009-10-30 11:49:00 +00001253}
1254#endif
1255
Steve Blocka7e24c12009-10-30 11:49:00 +00001256// -----------------------------------------------------------------------------
1257// SemiSpace implementation
1258
1259bool SemiSpace::Setup(Address start,
1260 int initial_capacity,
1261 int maximum_capacity) {
1262 // Creates a space in the young generation. The constructor does not
1263 // allocate memory from the OS. A SemiSpace is given a contiguous chunk of
1264 // memory of size 'capacity' when set up, and does not grow or shrink
1265 // otherwise. In the mark-compact collector, the memory region of the from
1266 // space is used as the marking stack. It requires contiguous memory
1267 // addresses.
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001268 ASSERT(maximum_capacity >= Page::kPageSize);
1269 initial_capacity_ = RoundDown(initial_capacity, Page::kPageSize);
Steve Blocka7e24c12009-10-30 11:49:00 +00001270 capacity_ = initial_capacity;
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001271 maximum_capacity_ = RoundDown(maximum_capacity, Page::kPageSize);
Steve Blocka7e24c12009-10-30 11:49:00 +00001272 committed_ = false;
Steve Blocka7e24c12009-10-30 11:49:00 +00001273 start_ = start;
1274 address_mask_ = ~(maximum_capacity - 1);
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01001275 object_mask_ = address_mask_ | kHeapObjectTagMask;
Steve Blocka7e24c12009-10-30 11:49:00 +00001276 object_expected_ = reinterpret_cast<uintptr_t>(start) | kHeapObjectTag;
1277 age_mark_ = start_;
1278
1279 return Commit();
1280}
1281
1282
1283void SemiSpace::TearDown() {
1284 start_ = NULL;
1285 capacity_ = 0;
1286}
1287
1288
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001289bool SemiSpace::Commit() {
1290 ASSERT(!is_committed());
1291 int pages = capacity_ / Page::kPageSize;
1292 Address end = start_ + maximum_capacity_;
1293 Address start = end - pages * Page::kPageSize;
1294 if (!heap()->isolate()->memory_allocator()->CommitBlock(start,
1295 capacity_,
1296 executable())) {
Steve Blocka7e24c12009-10-30 11:49:00 +00001297 return false;
1298 }
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001299
1300 NewSpacePage* page = anchor();
1301 for (int i = 1; i <= pages; i++) {
1302 NewSpacePage* new_page =
1303 NewSpacePage::Initialize(heap(), end - i * Page::kPageSize, this);
1304 new_page->InsertAfter(page);
1305 page = new_page;
1306 }
1307
1308 committed_ = true;
1309 Reset();
1310 return true;
1311}
1312
1313
1314bool SemiSpace::Uncommit() {
1315 ASSERT(is_committed());
1316 Address start = start_ + maximum_capacity_ - capacity_;
1317 if (!heap()->isolate()->memory_allocator()->UncommitBlock(start, capacity_)) {
1318 return false;
1319 }
1320 anchor()->set_next_page(anchor());
1321 anchor()->set_prev_page(anchor());
1322
1323 committed_ = false;
Steve Blocka7e24c12009-10-30 11:49:00 +00001324 return true;
1325}
1326
1327
1328bool SemiSpace::GrowTo(int new_capacity) {
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001329 ASSERT((new_capacity & Page::kPageAlignmentMask) == 0);
Steve Blocka7e24c12009-10-30 11:49:00 +00001330 ASSERT(new_capacity <= maximum_capacity_);
1331 ASSERT(new_capacity > capacity_);
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001332 int pages_before = capacity_ / Page::kPageSize;
1333 int pages_after = new_capacity / Page::kPageSize;
1334
1335 Address end = start_ + maximum_capacity_;
1336 Address start = end - new_capacity;
Steve Blocka7e24c12009-10-30 11:49:00 +00001337 size_t delta = new_capacity - capacity_;
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001338
Steve Blocka7e24c12009-10-30 11:49:00 +00001339 ASSERT(IsAligned(delta, OS::AllocateAlignment()));
Steve Block44f0eee2011-05-26 01:26:41 +01001340 if (!heap()->isolate()->memory_allocator()->CommitBlock(
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001341 start, delta, executable())) {
Steve Blocka7e24c12009-10-30 11:49:00 +00001342 return false;
1343 }
1344 capacity_ = new_capacity;
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001345 NewSpacePage* last_page = anchor()->prev_page();
1346 ASSERT(last_page != anchor());
1347 for (int i = pages_before + 1; i <= pages_after; i++) {
1348 Address page_address = end - i * Page::kPageSize;
1349 NewSpacePage* new_page = NewSpacePage::Initialize(heap(),
1350 page_address,
1351 this);
1352 new_page->InsertAfter(last_page);
1353 Bitmap::Clear(new_page);
1354 // Duplicate the flags that was set on the old page.
1355 new_page->SetFlags(last_page->GetFlags(),
1356 NewSpacePage::kCopyOnFlipFlagsMask);
1357 last_page = new_page;
1358 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001359 return true;
1360}
1361
1362
1363bool SemiSpace::ShrinkTo(int new_capacity) {
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001364 ASSERT((new_capacity & Page::kPageAlignmentMask) == 0);
Steve Blocka7e24c12009-10-30 11:49:00 +00001365 ASSERT(new_capacity >= initial_capacity_);
1366 ASSERT(new_capacity < capacity_);
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001367 // Semispaces grow backwards from the end of their allocated capacity,
1368 // so we find the before and after start addresses relative to the
1369 // end of the space.
1370 Address space_end = start_ + maximum_capacity_;
1371 Address old_start = space_end - capacity_;
Steve Blocka7e24c12009-10-30 11:49:00 +00001372 size_t delta = capacity_ - new_capacity;
1373 ASSERT(IsAligned(delta, OS::AllocateAlignment()));
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001374 if (!heap()->isolate()->memory_allocator()->UncommitBlock(old_start, delta)) {
Steve Blocka7e24c12009-10-30 11:49:00 +00001375 return false;
1376 }
1377 capacity_ = new_capacity;
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001378
1379 int pages_after = capacity_ / Page::kPageSize;
1380 NewSpacePage* new_last_page =
1381 NewSpacePage::FromAddress(space_end - pages_after * Page::kPageSize);
1382 new_last_page->set_next_page(anchor());
1383 anchor()->set_prev_page(new_last_page);
1384 ASSERT((current_page_ <= first_page()) && (current_page_ >= new_last_page));
1385
Steve Blocka7e24c12009-10-30 11:49:00 +00001386 return true;
1387}
1388
1389
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001390void SemiSpace::FlipPages(intptr_t flags, intptr_t mask) {
1391 anchor_.set_owner(this);
1392 // Fixup back-pointers to anchor. Address of anchor changes
1393 // when we swap.
1394 anchor_.prev_page()->set_next_page(&anchor_);
1395 anchor_.next_page()->set_prev_page(&anchor_);
1396
1397 bool becomes_to_space = (id_ == kFromSpace);
1398 id_ = becomes_to_space ? kToSpace : kFromSpace;
1399 NewSpacePage* page = anchor_.next_page();
1400 while (page != &anchor_) {
1401 page->set_owner(this);
1402 page->SetFlags(flags, mask);
1403 if (becomes_to_space) {
1404 page->ClearFlag(MemoryChunk::IN_FROM_SPACE);
1405 page->SetFlag(MemoryChunk::IN_TO_SPACE);
1406 page->ClearFlag(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK);
1407 page->ResetLiveBytes();
1408 } else {
1409 page->SetFlag(MemoryChunk::IN_FROM_SPACE);
1410 page->ClearFlag(MemoryChunk::IN_TO_SPACE);
1411 }
1412 ASSERT(page->IsFlagSet(MemoryChunk::SCAN_ON_SCAVENGE));
1413 ASSERT(page->IsFlagSet(MemoryChunk::IN_TO_SPACE) ||
1414 page->IsFlagSet(MemoryChunk::IN_FROM_SPACE));
1415 page = page->next_page();
1416 }
1417}
1418
1419
1420void SemiSpace::Reset() {
1421 ASSERT(anchor_.next_page() != &anchor_);
1422 current_page_ = anchor_.next_page();
1423}
1424
1425
1426void SemiSpace::Swap(SemiSpace* from, SemiSpace* to) {
1427 // We won't be swapping semispaces without data in them.
1428 ASSERT(from->anchor_.next_page() != &from->anchor_);
1429 ASSERT(to->anchor_.next_page() != &to->anchor_);
1430
1431 // Swap bits.
1432 SemiSpace tmp = *from;
1433 *from = *to;
1434 *to = tmp;
1435
1436 // Fixup back-pointers to the page list anchor now that its address
1437 // has changed.
1438 // Swap to/from-space bits on pages.
1439 // Copy GC flags from old active space (from-space) to new (to-space).
1440 intptr_t flags = from->current_page()->GetFlags();
1441 to->FlipPages(flags, NewSpacePage::kCopyOnFlipFlagsMask);
1442
1443 from->FlipPages(0, 0);
1444}
1445
1446
1447void SemiSpace::set_age_mark(Address mark) {
1448 ASSERT(NewSpacePage::FromLimit(mark)->semi_space() == this);
1449 age_mark_ = mark;
1450 // Mark all pages up to the one containing mark.
1451 NewSpacePageIterator it(space_start(), mark);
1452 while (it.has_next()) {
1453 it.next()->SetFlag(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK);
1454 }
1455}
1456
1457
Steve Blocka7e24c12009-10-30 11:49:00 +00001458#ifdef DEBUG
1459void SemiSpace::Print() { }
1460
1461
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001462void SemiSpace::Verify() {
1463 bool is_from_space = (id_ == kFromSpace);
1464 NewSpacePage* page = anchor_.next_page();
1465 CHECK(anchor_.semi_space() == this);
1466 while (page != &anchor_) {
1467 CHECK(page->semi_space() == this);
1468 CHECK(page->InNewSpace());
1469 CHECK(page->IsFlagSet(is_from_space ? MemoryChunk::IN_FROM_SPACE
1470 : MemoryChunk::IN_TO_SPACE));
1471 CHECK(!page->IsFlagSet(is_from_space ? MemoryChunk::IN_TO_SPACE
1472 : MemoryChunk::IN_FROM_SPACE));
1473 CHECK(page->IsFlagSet(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING));
1474 if (!is_from_space) {
1475 // The pointers-from-here-are-interesting flag isn't updated dynamically
1476 // on from-space pages, so it might be out of sync with the marking state.
1477 if (page->heap()->incremental_marking()->IsMarking()) {
1478 CHECK(page->IsFlagSet(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING));
1479 } else {
1480 CHECK(!page->IsFlagSet(
1481 MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING));
1482 }
1483 // TODO(gc): Check that the live_bytes_count_ field matches the
1484 // black marking on the page (if we make it match in new-space).
1485 }
1486 CHECK(page->IsFlagSet(MemoryChunk::SCAN_ON_SCAVENGE));
1487 CHECK(page->prev_page()->next_page() == page);
1488 page = page->next_page();
1489 }
1490}
1491
1492
1493void SemiSpace::AssertValidRange(Address start, Address end) {
1494 // Addresses belong to same semi-space
1495 NewSpacePage* page = NewSpacePage::FromLimit(start);
1496 NewSpacePage* end_page = NewSpacePage::FromLimit(end);
1497 SemiSpace* space = page->semi_space();
1498 CHECK_EQ(space, end_page->semi_space());
1499 // Start address is before end address, either on same page,
1500 // or end address is on a later page in the linked list of
1501 // semi-space pages.
1502 if (page == end_page) {
1503 CHECK(start <= end);
1504 } else {
1505 while (page != end_page) {
1506 page = page->next_page();
1507 CHECK_NE(page, space->anchor());
1508 }
1509 }
1510}
Steve Blocka7e24c12009-10-30 11:49:00 +00001511#endif
1512
1513
1514// -----------------------------------------------------------------------------
1515// SemiSpaceIterator implementation.
1516SemiSpaceIterator::SemiSpaceIterator(NewSpace* space) {
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001517 Initialize(space->bottom(), space->top(), NULL);
Steve Blocka7e24c12009-10-30 11:49:00 +00001518}
1519
1520
1521SemiSpaceIterator::SemiSpaceIterator(NewSpace* space,
1522 HeapObjectCallback size_func) {
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001523 Initialize(space->bottom(), space->top(), size_func);
Steve Blocka7e24c12009-10-30 11:49:00 +00001524}
1525
1526
1527SemiSpaceIterator::SemiSpaceIterator(NewSpace* space, Address start) {
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001528 Initialize(start, space->top(), NULL);
Steve Blocka7e24c12009-10-30 11:49:00 +00001529}
1530
1531
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001532SemiSpaceIterator::SemiSpaceIterator(Address from, Address to) {
1533 Initialize(from, to, NULL);
1534}
1535
1536
1537void SemiSpaceIterator::Initialize(Address start,
Steve Blocka7e24c12009-10-30 11:49:00 +00001538 Address end,
1539 HeapObjectCallback size_func) {
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001540 SemiSpace::AssertValidRange(start, end);
Steve Blocka7e24c12009-10-30 11:49:00 +00001541 current_ = start;
1542 limit_ = end;
1543 size_func_ = size_func;
1544}
1545
1546
1547#ifdef DEBUG
Steve Blocka7e24c12009-10-30 11:49:00 +00001548// heap_histograms is shared, always clear it before using it.
1549static void ClearHistograms() {
Steve Block44f0eee2011-05-26 01:26:41 +01001550 Isolate* isolate = Isolate::Current();
Steve Blocka7e24c12009-10-30 11:49:00 +00001551 // We reset the name each time, though it hasn't changed.
Steve Block44f0eee2011-05-26 01:26:41 +01001552#define DEF_TYPE_NAME(name) isolate->heap_histograms()[name].set_name(#name);
Steve Blocka7e24c12009-10-30 11:49:00 +00001553 INSTANCE_TYPE_LIST(DEF_TYPE_NAME)
1554#undef DEF_TYPE_NAME
1555
Steve Block44f0eee2011-05-26 01:26:41 +01001556#define CLEAR_HISTOGRAM(name) isolate->heap_histograms()[name].clear();
Steve Blocka7e24c12009-10-30 11:49:00 +00001557 INSTANCE_TYPE_LIST(CLEAR_HISTOGRAM)
1558#undef CLEAR_HISTOGRAM
1559
Steve Block44f0eee2011-05-26 01:26:41 +01001560 isolate->js_spill_information()->Clear();
Steve Blocka7e24c12009-10-30 11:49:00 +00001561}
1562
1563
Steve Blocka7e24c12009-10-30 11:49:00 +00001564static void ClearCodeKindStatistics() {
Steve Block44f0eee2011-05-26 01:26:41 +01001565 Isolate* isolate = Isolate::Current();
Steve Blocka7e24c12009-10-30 11:49:00 +00001566 for (int i = 0; i < Code::NUMBER_OF_KINDS; i++) {
Steve Block44f0eee2011-05-26 01:26:41 +01001567 isolate->code_kind_statistics()[i] = 0;
Steve Blocka7e24c12009-10-30 11:49:00 +00001568 }
1569}
1570
1571
1572static void ReportCodeKindStatistics() {
Steve Block44f0eee2011-05-26 01:26:41 +01001573 Isolate* isolate = Isolate::Current();
Steve Block6ded16b2010-05-10 14:33:55 +01001574 const char* table[Code::NUMBER_OF_KINDS] = { NULL };
Steve Blocka7e24c12009-10-30 11:49:00 +00001575
1576#define CASE(name) \
1577 case Code::name: table[Code::name] = #name; \
1578 break
1579
1580 for (int i = 0; i < Code::NUMBER_OF_KINDS; i++) {
1581 switch (static_cast<Code::Kind>(i)) {
1582 CASE(FUNCTION);
Ben Murdochb0fe1622011-05-05 13:52:32 +01001583 CASE(OPTIMIZED_FUNCTION);
Steve Blocka7e24c12009-10-30 11:49:00 +00001584 CASE(STUB);
1585 CASE(BUILTIN);
1586 CASE(LOAD_IC);
1587 CASE(KEYED_LOAD_IC);
1588 CASE(STORE_IC);
1589 CASE(KEYED_STORE_IC);
1590 CASE(CALL_IC);
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01001591 CASE(KEYED_CALL_IC);
Ben Murdoch257744e2011-11-30 15:57:28 +00001592 CASE(UNARY_OP_IC);
1593 CASE(BINARY_OP_IC);
Ben Murdochb0fe1622011-05-05 13:52:32 +01001594 CASE(COMPARE_IC);
Ben Murdoch69a99ed2011-11-30 16:03:39 +00001595 CASE(TO_BOOLEAN_IC);
Steve Blocka7e24c12009-10-30 11:49:00 +00001596 }
1597 }
1598
1599#undef CASE
1600
1601 PrintF("\n Code kind histograms: \n");
1602 for (int i = 0; i < Code::NUMBER_OF_KINDS; i++) {
Steve Block44f0eee2011-05-26 01:26:41 +01001603 if (isolate->code_kind_statistics()[i] > 0) {
1604 PrintF(" %-20s: %10d bytes\n", table[i],
1605 isolate->code_kind_statistics()[i]);
Steve Blocka7e24c12009-10-30 11:49:00 +00001606 }
1607 }
1608 PrintF("\n");
1609}
1610
1611
1612static int CollectHistogramInfo(HeapObject* obj) {
Steve Block44f0eee2011-05-26 01:26:41 +01001613 Isolate* isolate = Isolate::Current();
Steve Blocka7e24c12009-10-30 11:49:00 +00001614 InstanceType type = obj->map()->instance_type();
1615 ASSERT(0 <= type && type <= LAST_TYPE);
Steve Block44f0eee2011-05-26 01:26:41 +01001616 ASSERT(isolate->heap_histograms()[type].name() != NULL);
1617 isolate->heap_histograms()[type].increment_number(1);
1618 isolate->heap_histograms()[type].increment_bytes(obj->Size());
Steve Blocka7e24c12009-10-30 11:49:00 +00001619
1620 if (FLAG_collect_heap_spill_statistics && obj->IsJSObject()) {
Steve Block44f0eee2011-05-26 01:26:41 +01001621 JSObject::cast(obj)->IncrementSpillStatistics(
1622 isolate->js_spill_information());
Steve Blocka7e24c12009-10-30 11:49:00 +00001623 }
1624
1625 return obj->Size();
1626}
1627
1628
1629static void ReportHistogram(bool print_spill) {
Steve Block44f0eee2011-05-26 01:26:41 +01001630 Isolate* isolate = Isolate::Current();
Steve Blocka7e24c12009-10-30 11:49:00 +00001631 PrintF("\n Object Histogram:\n");
1632 for (int i = 0; i <= LAST_TYPE; i++) {
Steve Block44f0eee2011-05-26 01:26:41 +01001633 if (isolate->heap_histograms()[i].number() > 0) {
Steve Block6ded16b2010-05-10 14:33:55 +01001634 PrintF(" %-34s%10d (%10d bytes)\n",
Steve Block44f0eee2011-05-26 01:26:41 +01001635 isolate->heap_histograms()[i].name(),
1636 isolate->heap_histograms()[i].number(),
1637 isolate->heap_histograms()[i].bytes());
Steve Blocka7e24c12009-10-30 11:49:00 +00001638 }
1639 }
1640 PrintF("\n");
1641
1642 // Summarize string types.
1643 int string_number = 0;
1644 int string_bytes = 0;
1645#define INCREMENT(type, size, name, camel_name) \
Steve Block44f0eee2011-05-26 01:26:41 +01001646 string_number += isolate->heap_histograms()[type].number(); \
1647 string_bytes += isolate->heap_histograms()[type].bytes();
Steve Blocka7e24c12009-10-30 11:49:00 +00001648 STRING_TYPE_LIST(INCREMENT)
1649#undef INCREMENT
1650 if (string_number > 0) {
Steve Block6ded16b2010-05-10 14:33:55 +01001651 PrintF(" %-34s%10d (%10d bytes)\n\n", "STRING_TYPE", string_number,
Steve Blocka7e24c12009-10-30 11:49:00 +00001652 string_bytes);
1653 }
1654
1655 if (FLAG_collect_heap_spill_statistics && print_spill) {
Steve Block44f0eee2011-05-26 01:26:41 +01001656 isolate->js_spill_information()->Print();
Steve Blocka7e24c12009-10-30 11:49:00 +00001657 }
1658}
1659#endif // DEBUG
1660
1661
1662// Support for statistics gathering for --heap-stats and --log-gc.
Steve Blocka7e24c12009-10-30 11:49:00 +00001663void NewSpace::ClearHistograms() {
1664 for (int i = 0; i <= LAST_TYPE; i++) {
1665 allocated_histogram_[i].clear();
1666 promoted_histogram_[i].clear();
1667 }
1668}
1669
1670// Because the copying collector does not touch garbage objects, we iterate
1671// the new space before a collection to get a histogram of allocated objects.
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00001672// This only happens when --log-gc flag is set.
Steve Blocka7e24c12009-10-30 11:49:00 +00001673void NewSpace::CollectStatistics() {
1674 ClearHistograms();
1675 SemiSpaceIterator it(this);
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001676 for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next())
Leon Clarked91b9f72010-01-27 17:25:45 +00001677 RecordAllocation(obj);
Steve Blocka7e24c12009-10-30 11:49:00 +00001678}
1679
1680
Steve Block44f0eee2011-05-26 01:26:41 +01001681static void DoReportStatistics(Isolate* isolate,
1682 HistogramInfo* info, const char* description) {
1683 LOG(isolate, HeapSampleBeginEvent("NewSpace", description));
Steve Blocka7e24c12009-10-30 11:49:00 +00001684 // Lump all the string types together.
1685 int string_number = 0;
1686 int string_bytes = 0;
1687#define INCREMENT(type, size, name, camel_name) \
1688 string_number += info[type].number(); \
1689 string_bytes += info[type].bytes();
1690 STRING_TYPE_LIST(INCREMENT)
1691#undef INCREMENT
1692 if (string_number > 0) {
Steve Block44f0eee2011-05-26 01:26:41 +01001693 LOG(isolate,
1694 HeapSampleItemEvent("STRING_TYPE", string_number, string_bytes));
Steve Blocka7e24c12009-10-30 11:49:00 +00001695 }
1696
1697 // Then do the other types.
1698 for (int i = FIRST_NONSTRING_TYPE; i <= LAST_TYPE; ++i) {
1699 if (info[i].number() > 0) {
Steve Block44f0eee2011-05-26 01:26:41 +01001700 LOG(isolate,
1701 HeapSampleItemEvent(info[i].name(), info[i].number(),
Steve Blocka7e24c12009-10-30 11:49:00 +00001702 info[i].bytes()));
1703 }
1704 }
Steve Block44f0eee2011-05-26 01:26:41 +01001705 LOG(isolate, HeapSampleEndEvent("NewSpace", description));
Steve Blocka7e24c12009-10-30 11:49:00 +00001706}
Steve Blocka7e24c12009-10-30 11:49:00 +00001707
1708
1709void NewSpace::ReportStatistics() {
1710#ifdef DEBUG
1711 if (FLAG_heap_stats) {
1712 float pct = static_cast<float>(Available()) / Capacity();
Ben Murdochf87a2032010-10-22 12:50:53 +01001713 PrintF(" capacity: %" V8_PTR_PREFIX "d"
1714 ", available: %" V8_PTR_PREFIX "d, %%%d\n",
Steve Blocka7e24c12009-10-30 11:49:00 +00001715 Capacity(), Available(), static_cast<int>(pct*100));
1716 PrintF("\n Object Histogram:\n");
1717 for (int i = 0; i <= LAST_TYPE; i++) {
1718 if (allocated_histogram_[i].number() > 0) {
Steve Block6ded16b2010-05-10 14:33:55 +01001719 PrintF(" %-34s%10d (%10d bytes)\n",
Steve Blocka7e24c12009-10-30 11:49:00 +00001720 allocated_histogram_[i].name(),
1721 allocated_histogram_[i].number(),
1722 allocated_histogram_[i].bytes());
1723 }
1724 }
1725 PrintF("\n");
1726 }
1727#endif // DEBUG
1728
Steve Blocka7e24c12009-10-30 11:49:00 +00001729 if (FLAG_log_gc) {
Steve Block44f0eee2011-05-26 01:26:41 +01001730 Isolate* isolate = ISOLATE;
1731 DoReportStatistics(isolate, allocated_histogram_, "allocated");
1732 DoReportStatistics(isolate, promoted_histogram_, "promoted");
Steve Blocka7e24c12009-10-30 11:49:00 +00001733 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001734}
1735
1736
1737void NewSpace::RecordAllocation(HeapObject* obj) {
1738 InstanceType type = obj->map()->instance_type();
1739 ASSERT(0 <= type && type <= LAST_TYPE);
1740 allocated_histogram_[type].increment_number(1);
1741 allocated_histogram_[type].increment_bytes(obj->Size());
1742}
1743
1744
1745void NewSpace::RecordPromotion(HeapObject* obj) {
1746 InstanceType type = obj->map()->instance_type();
1747 ASSERT(0 <= type && type <= LAST_TYPE);
1748 promoted_histogram_[type].increment_number(1);
1749 promoted_histogram_[type].increment_bytes(obj->Size());
1750}
Steve Blocka7e24c12009-10-30 11:49:00 +00001751
Steve Blocka7e24c12009-10-30 11:49:00 +00001752// -----------------------------------------------------------------------------
1753// Free lists for old object spaces implementation
1754
Steve Block44f0eee2011-05-26 01:26:41 +01001755void FreeListNode::set_size(Heap* heap, int size_in_bytes) {
Steve Blocka7e24c12009-10-30 11:49:00 +00001756 ASSERT(size_in_bytes > 0);
1757 ASSERT(IsAligned(size_in_bytes, kPointerSize));
1758
1759 // We write a map and possibly size information to the block. If the block
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001760 // is big enough to be a FreeSpace with at least one extra word (the next
1761 // pointer), we set its map to be the free space map and its size to an
Steve Blocka7e24c12009-10-30 11:49:00 +00001762 // appropriate array length for the desired size from HeapObject::Size().
1763 // If the block is too small (eg, one or two words), to hold both a size
1764 // field and a next pointer, we give it a filler map that gives it the
1765 // correct size.
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001766 if (size_in_bytes > FreeSpace::kHeaderSize) {
1767 set_map_unsafe(heap->raw_unchecked_free_space_map());
1768 // Can't use FreeSpace::cast because it fails during deserialization.
1769 FreeSpace* this_as_free_space = reinterpret_cast<FreeSpace*>(this);
1770 this_as_free_space->set_size(size_in_bytes);
Steve Blocka7e24c12009-10-30 11:49:00 +00001771 } else if (size_in_bytes == kPointerSize) {
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001772 set_map_unsafe(heap->raw_unchecked_one_pointer_filler_map());
Steve Blocka7e24c12009-10-30 11:49:00 +00001773 } else if (size_in_bytes == 2 * kPointerSize) {
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001774 set_map_unsafe(heap->raw_unchecked_two_pointer_filler_map());
Steve Blocka7e24c12009-10-30 11:49:00 +00001775 } else {
1776 UNREACHABLE();
1777 }
Steve Blockd0582a62009-12-15 09:54:21 +00001778 // We would like to ASSERT(Size() == size_in_bytes) but this would fail during
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001779 // deserialization because the free space map is not done yet.
Steve Blocka7e24c12009-10-30 11:49:00 +00001780}
1781
1782
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001783FreeListNode* FreeListNode::next() {
Steve Block3ce2e202009-11-05 08:53:23 +00001784 ASSERT(IsFreeListNode(this));
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001785 if (map() == HEAP->raw_unchecked_free_space_map()) {
1786 ASSERT(map() == NULL || Size() >= kNextOffset + kPointerSize);
1787 return reinterpret_cast<FreeListNode*>(
1788 Memory::Address_at(address() + kNextOffset));
Steve Blocka7e24c12009-10-30 11:49:00 +00001789 } else {
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001790 return reinterpret_cast<FreeListNode*>(
1791 Memory::Address_at(address() + kPointerSize));
Steve Blocka7e24c12009-10-30 11:49:00 +00001792 }
1793}
1794
1795
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001796FreeListNode** FreeListNode::next_address() {
Steve Block3ce2e202009-11-05 08:53:23 +00001797 ASSERT(IsFreeListNode(this));
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001798 if (map() == HEAP->raw_unchecked_free_space_map()) {
Steve Blocka7e24c12009-10-30 11:49:00 +00001799 ASSERT(Size() >= kNextOffset + kPointerSize);
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001800 return reinterpret_cast<FreeListNode**>(address() + kNextOffset);
Steve Blocka7e24c12009-10-30 11:49:00 +00001801 } else {
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001802 return reinterpret_cast<FreeListNode**>(address() + kPointerSize);
Steve Blocka7e24c12009-10-30 11:49:00 +00001803 }
1804}
1805
1806
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001807void FreeListNode::set_next(FreeListNode* next) {
1808 ASSERT(IsFreeListNode(this));
1809 // While we are booting the VM the free space map will actually be null. So
1810 // we have to make sure that we don't try to use it for anything at that
1811 // stage.
1812 if (map() == HEAP->raw_unchecked_free_space_map()) {
1813 ASSERT(map() == NULL || Size() >= kNextOffset + kPointerSize);
1814 Memory::Address_at(address() + kNextOffset) =
1815 reinterpret_cast<Address>(next);
1816 } else {
1817 Memory::Address_at(address() + kPointerSize) =
1818 reinterpret_cast<Address>(next);
1819 }
1820}
1821
1822
1823FreeList::FreeList(PagedSpace* owner)
1824 : owner_(owner), heap_(owner->heap()) {
Steve Blocka7e24c12009-10-30 11:49:00 +00001825 Reset();
1826}
1827
1828
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001829void FreeList::Reset() {
Steve Blocka7e24c12009-10-30 11:49:00 +00001830 available_ = 0;
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001831 small_list_ = NULL;
1832 medium_list_ = NULL;
1833 large_list_ = NULL;
1834 huge_list_ = NULL;
Steve Blocka7e24c12009-10-30 11:49:00 +00001835}
1836
1837
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001838int FreeList::Free(Address start, int size_in_bytes) {
1839 if (size_in_bytes == 0) return 0;
Steve Blocka7e24c12009-10-30 11:49:00 +00001840 FreeListNode* node = FreeListNode::FromAddress(start);
Steve Block44f0eee2011-05-26 01:26:41 +01001841 node->set_size(heap_, size_in_bytes);
Steve Blocka7e24c12009-10-30 11:49:00 +00001842
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001843 // Early return to drop too-small blocks on the floor.
1844 if (size_in_bytes < kSmallListMin) return size_in_bytes;
Steve Blocka7e24c12009-10-30 11:49:00 +00001845
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001846 // Insert other blocks at the head of a free list of the appropriate
1847 // magnitude.
1848 if (size_in_bytes <= kSmallListMax) {
1849 node->set_next(small_list_);
1850 small_list_ = node;
1851 } else if (size_in_bytes <= kMediumListMax) {
1852 node->set_next(medium_list_);
1853 medium_list_ = node;
1854 } else if (size_in_bytes <= kLargeListMax) {
1855 node->set_next(large_list_);
1856 large_list_ = node;
1857 } else {
1858 node->set_next(huge_list_);
1859 huge_list_ = node;
Steve Blocka7e24c12009-10-30 11:49:00 +00001860 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001861 available_ += size_in_bytes;
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001862 ASSERT(IsVeryLong() || available_ == SumFreeLists());
Steve Blocka7e24c12009-10-30 11:49:00 +00001863 return 0;
1864}
1865
1866
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001867FreeListNode* FreeList::PickNodeFromList(FreeListNode** list, int* node_size) {
1868 FreeListNode* node = *list;
Steve Blocka7e24c12009-10-30 11:49:00 +00001869
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001870 if (node == NULL) return NULL;
1871
1872 while (node != NULL &&
1873 Page::FromAddress(node->address())->IsEvacuationCandidate()) {
1874 available_ -= node->Size();
1875 node = node->next();
Steve Blocka7e24c12009-10-30 11:49:00 +00001876 }
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001877
1878 if (node != NULL) {
1879 *node_size = node->Size();
1880 *list = node->next();
Steve Blocka7e24c12009-10-30 11:49:00 +00001881 } else {
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001882 *list = NULL;
Steve Blocka7e24c12009-10-30 11:49:00 +00001883 }
1884
Steve Blocka7e24c12009-10-30 11:49:00 +00001885 return node;
1886}
1887
1888
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001889FreeListNode* FreeList::FindNodeFor(int size_in_bytes, int* node_size) {
1890 FreeListNode* node = NULL;
1891
1892 if (size_in_bytes <= kSmallAllocationMax) {
1893 node = PickNodeFromList(&small_list_, node_size);
1894 if (node != NULL) return node;
1895 }
1896
1897 if (size_in_bytes <= kMediumAllocationMax) {
1898 node = PickNodeFromList(&medium_list_, node_size);
1899 if (node != NULL) return node;
1900 }
1901
1902 if (size_in_bytes <= kLargeAllocationMax) {
1903 node = PickNodeFromList(&large_list_, node_size);
1904 if (node != NULL) return node;
1905 }
1906
1907 for (FreeListNode** cur = &huge_list_;
1908 *cur != NULL;
1909 cur = (*cur)->next_address()) {
1910 FreeListNode* cur_node = *cur;
1911 while (cur_node != NULL &&
1912 Page::FromAddress(cur_node->address())->IsEvacuationCandidate()) {
1913 available_ -= reinterpret_cast<FreeSpace*>(cur_node)->Size();
1914 cur_node = cur_node->next();
1915 }
1916
1917 *cur = cur_node;
1918 if (cur_node == NULL) break;
1919
1920 ASSERT((*cur)->map() == HEAP->raw_unchecked_free_space_map());
1921 FreeSpace* cur_as_free_space = reinterpret_cast<FreeSpace*>(*cur);
1922 int size = cur_as_free_space->Size();
1923 if (size >= size_in_bytes) {
1924 // Large enough node found. Unlink it from the list.
1925 node = *cur;
1926 *node_size = size;
1927 *cur = node->next();
1928 break;
1929 }
1930 }
1931
1932 return node;
1933}
1934
1935
1936// Allocation on the old space free list. If it succeeds then a new linear
1937// allocation space has been set up with the top and limit of the space. If
1938// the allocation fails then NULL is returned, and the caller can perform a GC
1939// or allocate a new page before retrying.
1940HeapObject* FreeList::Allocate(int size_in_bytes) {
1941 ASSERT(0 < size_in_bytes);
1942 ASSERT(size_in_bytes <= kMaxBlockSize);
1943 ASSERT(IsAligned(size_in_bytes, kPointerSize));
1944 // Don't free list allocate if there is linear space available.
1945 ASSERT(owner_->limit() - owner_->top() < size_in_bytes);
1946
1947 int new_node_size = 0;
1948 FreeListNode* new_node = FindNodeFor(size_in_bytes, &new_node_size);
1949 if (new_node == NULL) return NULL;
1950
1951 available_ -= new_node_size;
1952 ASSERT(IsVeryLong() || available_ == SumFreeLists());
1953
1954 int bytes_left = new_node_size - size_in_bytes;
1955 ASSERT(bytes_left >= 0);
1956
1957 int old_linear_size = static_cast<int>(owner_->limit() - owner_->top());
1958 // Mark the old linear allocation area with a free space map so it can be
1959 // skipped when scanning the heap. This also puts it back in the free list
1960 // if it is big enough.
1961 owner_->Free(owner_->top(), old_linear_size);
1962
1963#ifdef DEBUG
1964 for (int i = 0; i < size_in_bytes / kPointerSize; i++) {
1965 reinterpret_cast<Object**>(new_node->address())[i] = Smi::FromInt(0);
1966 }
1967#endif
1968
1969 owner_->heap()->incremental_marking()->OldSpaceStep(
1970 size_in_bytes - old_linear_size);
1971
1972 // The old-space-step might have finished sweeping and restarted marking.
1973 // Verify that it did not turn the page of the new node into an evacuation
1974 // candidate.
1975 ASSERT(!MarkCompactCollector::IsOnEvacuationCandidate(new_node));
1976
1977 const int kThreshold = IncrementalMarking::kAllocatedThreshold;
1978
1979 // Memory in the linear allocation area is counted as allocated. We may free
1980 // a little of this again immediately - see below.
1981 owner_->Allocate(new_node_size);
1982
1983 if (bytes_left > kThreshold &&
1984 owner_->heap()->incremental_marking()->IsMarkingIncomplete() &&
1985 FLAG_incremental_marking_steps) {
1986 int linear_size = owner_->RoundSizeDownToObjectAlignment(kThreshold);
1987 // We don't want to give too large linear areas to the allocator while
1988 // incremental marking is going on, because we won't check again whether
1989 // we want to do another increment until the linear area is used up.
1990 owner_->Free(new_node->address() + size_in_bytes + linear_size,
1991 new_node_size - size_in_bytes - linear_size);
1992 owner_->SetTop(new_node->address() + size_in_bytes,
1993 new_node->address() + size_in_bytes + linear_size);
1994 } else if (bytes_left > 0) {
1995 // Normally we give the rest of the node to the allocator as its new
1996 // linear allocation area.
1997 owner_->SetTop(new_node->address() + size_in_bytes,
1998 new_node->address() + new_node_size);
1999 } else {
2000 // TODO(gc) Try not freeing linear allocation region when bytes_left
2001 // are zero.
2002 owner_->SetTop(NULL, NULL);
2003 }
2004
2005 return new_node;
2006}
2007
2008
2009static intptr_t CountFreeListItemsInList(FreeListNode* n, Page* p) {
2010 intptr_t sum = 0;
2011 while (n != NULL) {
2012 if (Page::FromAddress(n->address()) == p) {
2013 FreeSpace* free_space = reinterpret_cast<FreeSpace*>(n);
2014 sum += free_space->Size();
2015 }
2016 n = n->next();
2017 }
2018 return sum;
2019}
2020
2021
2022void FreeList::CountFreeListItems(Page* p, SizeStats* sizes) {
2023 sizes->huge_size_ = CountFreeListItemsInList(huge_list_, p);
2024 if (sizes->huge_size_ < p->area_size()) {
2025 sizes->small_size_ = CountFreeListItemsInList(small_list_, p);
2026 sizes->medium_size_ = CountFreeListItemsInList(medium_list_, p);
2027 sizes->large_size_ = CountFreeListItemsInList(large_list_, p);
2028 } else {
2029 sizes->small_size_ = 0;
2030 sizes->medium_size_ = 0;
2031 sizes->large_size_ = 0;
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -08002032 }
2033}
2034
2035
Ben Murdoch592a9fc2012-03-05 11:04:45 +00002036static intptr_t EvictFreeListItemsInList(FreeListNode** n, Page* p) {
2037 intptr_t sum = 0;
2038 while (*n != NULL) {
2039 if (Page::FromAddress((*n)->address()) == p) {
2040 FreeSpace* free_space = reinterpret_cast<FreeSpace*>(*n);
2041 sum += free_space->Size();
2042 *n = (*n)->next();
2043 } else {
2044 n = (*n)->next_address();
2045 }
2046 }
2047 return sum;
2048}
2049
2050
2051intptr_t FreeList::EvictFreeListItems(Page* p) {
2052 intptr_t sum = EvictFreeListItemsInList(&huge_list_, p);
2053
2054 if (sum < p->area_size()) {
2055 sum += EvictFreeListItemsInList(&small_list_, p) +
2056 EvictFreeListItemsInList(&medium_list_, p) +
2057 EvictFreeListItemsInList(&large_list_, p);
2058 }
2059
2060 available_ -= static_cast<int>(sum);
2061
2062 return sum;
2063}
2064
2065
2066#ifdef DEBUG
2067intptr_t FreeList::SumFreeList(FreeListNode* cur) {
2068 intptr_t sum = 0;
2069 while (cur != NULL) {
2070 ASSERT(cur->map() == HEAP->raw_unchecked_free_space_map());
2071 FreeSpace* cur_as_free_space = reinterpret_cast<FreeSpace*>(cur);
2072 sum += cur_as_free_space->Size();
2073 cur = cur->next();
2074 }
2075 return sum;
2076}
2077
2078
2079static const int kVeryLongFreeList = 500;
2080
2081
2082int FreeList::FreeListLength(FreeListNode* cur) {
2083 int length = 0;
2084 while (cur != NULL) {
2085 length++;
2086 cur = cur->next();
2087 if (length == kVeryLongFreeList) return length;
2088 }
2089 return length;
2090}
2091
2092
2093bool FreeList::IsVeryLong() {
2094 if (FreeListLength(small_list_) == kVeryLongFreeList) return true;
2095 if (FreeListLength(medium_list_) == kVeryLongFreeList) return true;
2096 if (FreeListLength(large_list_) == kVeryLongFreeList) return true;
2097 if (FreeListLength(huge_list_) == kVeryLongFreeList) return true;
2098 return false;
2099}
2100
2101
2102// This can take a very long time because it is linear in the number of entries
2103// on the free list, so it should not be called if FreeListLength returns
2104// kVeryLongFreeList.
2105intptr_t FreeList::SumFreeLists() {
2106 intptr_t sum = SumFreeList(small_list_);
2107 sum += SumFreeList(medium_list_);
2108 sum += SumFreeList(large_list_);
2109 sum += SumFreeList(huge_list_);
2110 return sum;
2111}
2112#endif
2113
2114
Steve Blocka7e24c12009-10-30 11:49:00 +00002115// -----------------------------------------------------------------------------
2116// OldSpace implementation
2117
Ben Murdoch592a9fc2012-03-05 11:04:45 +00002118bool NewSpace::ReserveSpace(int bytes) {
2119 // We can't reliably unpack a partial snapshot that needs more new space
2120 // space than the minimum NewSpace size. The limit can be set lower than
2121 // the end of new space either because there is more space on the next page
2122 // or because we have lowered the limit in order to get periodic incremental
2123 // marking. The most reliable way to ensure that there is linear space is
2124 // to do the allocation, then rewind the limit.
2125 ASSERT(bytes <= InitialCapacity());
2126 MaybeObject* maybe = AllocateRaw(bytes);
2127 Object* object = NULL;
2128 if (!maybe->ToObject(&object)) return false;
2129 HeapObject* allocation = HeapObject::cast(object);
2130 Address top = allocation_info_.top;
2131 if ((top - bytes) == allocation->address()) {
2132 allocation_info_.top = allocation->address();
2133 return true;
Steve Blocka7e24c12009-10-30 11:49:00 +00002134 }
Ben Murdoch592a9fc2012-03-05 11:04:45 +00002135 // There may be a borderline case here where the allocation succeeded, but
2136 // the limit and top have moved on to a new page. In that case we try again.
2137 return ReserveSpace(bytes);
2138}
2139
2140
2141void PagedSpace::PrepareForMarkCompact() {
2142 // We don't have a linear allocation area while sweeping. It will be restored
2143 // on the first allocation after the sweep.
2144 // Mark the old linear allocation area with a free space map so it can be
2145 // skipped when scanning the heap.
2146 int old_linear_size = static_cast<int>(limit() - top());
2147 Free(top(), old_linear_size);
2148 SetTop(NULL, NULL);
2149
2150 // Stop lazy sweeping and clear marking bits for unswept pages.
2151 if (first_unswept_page_ != NULL) {
2152 Page* p = first_unswept_page_;
2153 do {
2154 // Do not use ShouldBeSweptLazily predicate here.
2155 // New evacuation candidates were selected but they still have
2156 // to be swept before collection starts.
2157 if (!p->WasSwept()) {
2158 Bitmap::Clear(p);
2159 if (FLAG_gc_verbose) {
2160 PrintF("Sweeping 0x%" V8PRIxPTR " lazily abandoned.\n",
2161 reinterpret_cast<intptr_t>(p));
2162 }
2163 }
2164 p = p->next_page();
2165 } while (p != anchor());
2166 }
2167 first_unswept_page_ = Page::FromAddress(NULL);
Steve Blocka7e24c12009-10-30 11:49:00 +00002168
2169 // Clear the free list before a full GC---it will be rebuilt afterward.
2170 free_list_.Reset();
2171}
2172
2173
Ben Murdoch592a9fc2012-03-05 11:04:45 +00002174bool PagedSpace::ReserveSpace(int size_in_bytes) {
2175 ASSERT(size_in_bytes <= AreaSize());
2176 ASSERT(size_in_bytes == RoundSizeDownToObjectAlignment(size_in_bytes));
2177 Address current_top = allocation_info_.top;
2178 Address new_top = current_top + size_in_bytes;
2179 if (new_top <= allocation_info_.limit) return true;
Steve Blocka7e24c12009-10-30 11:49:00 +00002180
Ben Murdoch592a9fc2012-03-05 11:04:45 +00002181 HeapObject* new_area = free_list_.Allocate(size_in_bytes);
2182 if (new_area == NULL) new_area = SlowAllocateRaw(size_in_bytes);
2183 if (new_area == NULL) return false;
Steve Blocka7e24c12009-10-30 11:49:00 +00002184
Ben Murdoch592a9fc2012-03-05 11:04:45 +00002185 int old_linear_size = static_cast<int>(limit() - top());
2186 // Mark the old linear allocation area with a free space so it can be
2187 // skipped when scanning the heap. This also puts it back in the free list
2188 // if it is big enough.
2189 Free(top(), old_linear_size);
Steve Blocka7e24c12009-10-30 11:49:00 +00002190
Ben Murdoch592a9fc2012-03-05 11:04:45 +00002191 SetTop(new_area->address(), new_area->address() + size_in_bytes);
2192 Allocate(size_in_bytes);
Leon Clarkee46be812010-01-19 14:06:41 +00002193 return true;
2194}
2195
2196
2197// You have to call this last, since the implementation from PagedSpace
2198// doesn't know that memory was 'promised' to large object space.
2199bool LargeObjectSpace::ReserveSpace(int bytes) {
Steve Block44f0eee2011-05-26 01:26:41 +01002200 return heap()->OldGenerationSpaceAvailable() >= bytes;
Leon Clarkee46be812010-01-19 14:06:41 +00002201}
2202
2203
Ben Murdoch592a9fc2012-03-05 11:04:45 +00002204bool PagedSpace::AdvanceSweeper(intptr_t bytes_to_sweep) {
2205 if (IsSweepingComplete()) return true;
Steve Blocka7e24c12009-10-30 11:49:00 +00002206
Ben Murdoch592a9fc2012-03-05 11:04:45 +00002207 intptr_t freed_bytes = 0;
2208 Page* p = first_unswept_page_;
2209 do {
2210 Page* next_page = p->next_page();
2211 if (ShouldBeSweptLazily(p)) {
2212 if (FLAG_gc_verbose) {
2213 PrintF("Sweeping 0x%" V8PRIxPTR " lazily advanced.\n",
2214 reinterpret_cast<intptr_t>(p));
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002215 }
Ben Murdoch592a9fc2012-03-05 11:04:45 +00002216 freed_bytes += MarkCompactCollector::SweepConservatively(this, p);
Steve Blockd0582a62009-12-15 09:54:21 +00002217 }
Ben Murdoch592a9fc2012-03-05 11:04:45 +00002218 p = next_page;
2219 } while (p != anchor() && freed_bytes < bytes_to_sweep);
2220
2221 if (p == anchor()) {
2222 first_unswept_page_ = Page::FromAddress(NULL);
2223 } else {
2224 first_unswept_page_ = p;
Steve Blocka7e24c12009-10-30 11:49:00 +00002225 }
2226
Ben Murdoch592a9fc2012-03-05 11:04:45 +00002227 heap()->LowerOldGenLimits(freed_bytes);
2228
2229 heap()->FreeQueuedChunks();
2230
2231 return IsSweepingComplete();
2232}
2233
2234
2235void PagedSpace::EvictEvacuationCandidatesFromFreeLists() {
2236 if (allocation_info_.top >= allocation_info_.limit) return;
2237
2238 if (Page::FromAllocationTop(allocation_info_.top)->IsEvacuationCandidate()) {
2239 // Create filler object to keep page iterable if it was iterable.
2240 int remaining =
2241 static_cast<int>(allocation_info_.limit - allocation_info_.top);
2242 heap()->CreateFillerObjectAt(allocation_info_.top, remaining);
2243
2244 allocation_info_.top = NULL;
2245 allocation_info_.limit = NULL;
2246 }
2247}
2248
2249
2250HeapObject* PagedSpace::SlowAllocateRaw(int size_in_bytes) {
2251 // Allocation in this space has failed.
2252
Steve Blocka7e24c12009-10-30 11:49:00 +00002253 // Free list allocation failed and there is no next page. Fail if we have
2254 // hit the old generation size limit that should cause a garbage
2255 // collection.
Steve Block44f0eee2011-05-26 01:26:41 +01002256 if (!heap()->always_allocate() &&
2257 heap()->OldGenerationAllocationLimitReached()) {
Steve Blocka7e24c12009-10-30 11:49:00 +00002258 return NULL;
2259 }
2260
Ben Murdoch592a9fc2012-03-05 11:04:45 +00002261 // If there are unswept pages advance lazy sweeper.
2262 if (first_unswept_page_->is_valid()) {
2263 AdvanceSweeper(size_in_bytes);
2264
2265 // Retry the free list allocation.
2266 HeapObject* object = free_list_.Allocate(size_in_bytes);
2267 if (object != NULL) return object;
2268
2269 if (!IsSweepingComplete()) {
2270 AdvanceSweeper(kMaxInt);
2271
2272 // Retry the free list allocation.
2273 object = free_list_.Allocate(size_in_bytes);
2274 if (object != NULL) return object;
2275 }
2276 }
2277
Steve Blocka7e24c12009-10-30 11:49:00 +00002278 // Try to expand the space and allocate in the new next page.
Ben Murdoch592a9fc2012-03-05 11:04:45 +00002279 if (Expand()) {
2280 return free_list_.Allocate(size_in_bytes);
Steve Blocka7e24c12009-10-30 11:49:00 +00002281 }
2282
2283 // Finally, fail.
2284 return NULL;
2285}
2286
2287
Steve Blocka7e24c12009-10-30 11:49:00 +00002288#ifdef DEBUG
Steve Blocka7e24c12009-10-30 11:49:00 +00002289void PagedSpace::ReportCodeStatistics() {
Steve Block44f0eee2011-05-26 01:26:41 +01002290 Isolate* isolate = Isolate::Current();
2291 CommentStatistic* comments_statistics =
2292 isolate->paged_space_comments_statistics();
Steve Blocka7e24c12009-10-30 11:49:00 +00002293 ReportCodeKindStatistics();
2294 PrintF("Code comment statistics (\" [ comment-txt : size/ "
2295 "count (average)\"):\n");
Steve Block44f0eee2011-05-26 01:26:41 +01002296 for (int i = 0; i <= CommentStatistic::kMaxComments; i++) {
Steve Blocka7e24c12009-10-30 11:49:00 +00002297 const CommentStatistic& cs = comments_statistics[i];
2298 if (cs.size > 0) {
2299 PrintF(" %-30s: %10d/%6d (%d)\n", cs.comment, cs.size, cs.count,
2300 cs.size/cs.count);
2301 }
2302 }
2303 PrintF("\n");
2304}
2305
2306
2307void PagedSpace::ResetCodeStatistics() {
Steve Block44f0eee2011-05-26 01:26:41 +01002308 Isolate* isolate = Isolate::Current();
2309 CommentStatistic* comments_statistics =
2310 isolate->paged_space_comments_statistics();
Steve Blocka7e24c12009-10-30 11:49:00 +00002311 ClearCodeKindStatistics();
Steve Block44f0eee2011-05-26 01:26:41 +01002312 for (int i = 0; i < CommentStatistic::kMaxComments; i++) {
2313 comments_statistics[i].Clear();
2314 }
2315 comments_statistics[CommentStatistic::kMaxComments].comment = "Unknown";
2316 comments_statistics[CommentStatistic::kMaxComments].size = 0;
2317 comments_statistics[CommentStatistic::kMaxComments].count = 0;
Steve Blocka7e24c12009-10-30 11:49:00 +00002318}
2319
2320
Steve Block44f0eee2011-05-26 01:26:41 +01002321// Adds comment to 'comment_statistics' table. Performance OK as long as
Steve Blocka7e24c12009-10-30 11:49:00 +00002322// 'kMaxComments' is small
Steve Block44f0eee2011-05-26 01:26:41 +01002323static void EnterComment(Isolate* isolate, const char* comment, int delta) {
2324 CommentStatistic* comments_statistics =
2325 isolate->paged_space_comments_statistics();
Steve Blocka7e24c12009-10-30 11:49:00 +00002326 // Do not count empty comments
2327 if (delta <= 0) return;
Steve Block44f0eee2011-05-26 01:26:41 +01002328 CommentStatistic* cs = &comments_statistics[CommentStatistic::kMaxComments];
Steve Blocka7e24c12009-10-30 11:49:00 +00002329 // Search for a free or matching entry in 'comments_statistics': 'cs'
2330 // points to result.
Steve Block44f0eee2011-05-26 01:26:41 +01002331 for (int i = 0; i < CommentStatistic::kMaxComments; i++) {
Steve Blocka7e24c12009-10-30 11:49:00 +00002332 if (comments_statistics[i].comment == NULL) {
2333 cs = &comments_statistics[i];
2334 cs->comment = comment;
2335 break;
2336 } else if (strcmp(comments_statistics[i].comment, comment) == 0) {
2337 cs = &comments_statistics[i];
2338 break;
2339 }
2340 }
2341 // Update entry for 'comment'
2342 cs->size += delta;
2343 cs->count += 1;
2344}
2345
2346
2347// Call for each nested comment start (start marked with '[ xxx', end marked
2348// with ']'. RelocIterator 'it' must point to a comment reloc info.
Steve Block44f0eee2011-05-26 01:26:41 +01002349static void CollectCommentStatistics(Isolate* isolate, RelocIterator* it) {
Steve Blocka7e24c12009-10-30 11:49:00 +00002350 ASSERT(!it->done());
2351 ASSERT(it->rinfo()->rmode() == RelocInfo::COMMENT);
2352 const char* tmp = reinterpret_cast<const char*>(it->rinfo()->data());
2353 if (tmp[0] != '[') {
2354 // Not a nested comment; skip
2355 return;
2356 }
2357
2358 // Search for end of nested comment or a new nested comment
2359 const char* const comment_txt =
2360 reinterpret_cast<const char*>(it->rinfo()->data());
2361 const byte* prev_pc = it->rinfo()->pc();
2362 int flat_delta = 0;
2363 it->next();
2364 while (true) {
2365 // All nested comments must be terminated properly, and therefore exit
2366 // from loop.
2367 ASSERT(!it->done());
2368 if (it->rinfo()->rmode() == RelocInfo::COMMENT) {
2369 const char* const txt =
2370 reinterpret_cast<const char*>(it->rinfo()->data());
Steve Blockd0582a62009-12-15 09:54:21 +00002371 flat_delta += static_cast<int>(it->rinfo()->pc() - prev_pc);
Steve Blocka7e24c12009-10-30 11:49:00 +00002372 if (txt[0] == ']') break; // End of nested comment
2373 // A new comment
Steve Block44f0eee2011-05-26 01:26:41 +01002374 CollectCommentStatistics(isolate, it);
Steve Blocka7e24c12009-10-30 11:49:00 +00002375 // Skip code that was covered with previous comment
2376 prev_pc = it->rinfo()->pc();
2377 }
2378 it->next();
2379 }
Steve Block44f0eee2011-05-26 01:26:41 +01002380 EnterComment(isolate, comment_txt, flat_delta);
Steve Blocka7e24c12009-10-30 11:49:00 +00002381}
2382
2383
2384// Collects code size statistics:
2385// - by code kind
2386// - by code comment
2387void PagedSpace::CollectCodeStatistics() {
Steve Block44f0eee2011-05-26 01:26:41 +01002388 Isolate* isolate = heap()->isolate();
Steve Blocka7e24c12009-10-30 11:49:00 +00002389 HeapObjectIterator obj_it(this);
Ben Murdoch592a9fc2012-03-05 11:04:45 +00002390 for (HeapObject* obj = obj_it.Next(); obj != NULL; obj = obj_it.Next()) {
Steve Blocka7e24c12009-10-30 11:49:00 +00002391 if (obj->IsCode()) {
2392 Code* code = Code::cast(obj);
Steve Block44f0eee2011-05-26 01:26:41 +01002393 isolate->code_kind_statistics()[code->kind()] += code->Size();
Steve Blocka7e24c12009-10-30 11:49:00 +00002394 RelocIterator it(code);
2395 int delta = 0;
2396 const byte* prev_pc = code->instruction_start();
2397 while (!it.done()) {
2398 if (it.rinfo()->rmode() == RelocInfo::COMMENT) {
Steve Blockd0582a62009-12-15 09:54:21 +00002399 delta += static_cast<int>(it.rinfo()->pc() - prev_pc);
Steve Block44f0eee2011-05-26 01:26:41 +01002400 CollectCommentStatistics(isolate, &it);
Steve Blocka7e24c12009-10-30 11:49:00 +00002401 prev_pc = it.rinfo()->pc();
2402 }
2403 it.next();
2404 }
2405
2406 ASSERT(code->instruction_start() <= prev_pc &&
Leon Clarkeac952652010-07-15 11:15:24 +01002407 prev_pc <= code->instruction_end());
2408 delta += static_cast<int>(code->instruction_end() - prev_pc);
Steve Block44f0eee2011-05-26 01:26:41 +01002409 EnterComment(isolate, "NoComment", delta);
Steve Blocka7e24c12009-10-30 11:49:00 +00002410 }
2411 }
2412}
2413
2414
Ben Murdoch592a9fc2012-03-05 11:04:45 +00002415void PagedSpace::ReportStatistics() {
Ben Murdochf87a2032010-10-22 12:50:53 +01002416 int pct = static_cast<int>(Available() * 100 / Capacity());
2417 PrintF(" capacity: %" V8_PTR_PREFIX "d"
2418 ", waste: %" V8_PTR_PREFIX "d"
2419 ", available: %" V8_PTR_PREFIX "d, %%%d\n",
Steve Blocka7e24c12009-10-30 11:49:00 +00002420 Capacity(), Waste(), Available(), pct);
2421
Ben Murdoch592a9fc2012-03-05 11:04:45 +00002422 if (was_swept_conservatively_) return;
Steve Blocka7e24c12009-10-30 11:49:00 +00002423 ClearHistograms();
2424 HeapObjectIterator obj_it(this);
Ben Murdoch592a9fc2012-03-05 11:04:45 +00002425 for (HeapObject* obj = obj_it.Next(); obj != NULL; obj = obj_it.Next())
Leon Clarked91b9f72010-01-27 17:25:45 +00002426 CollectHistogramInfo(obj);
Steve Blocka7e24c12009-10-30 11:49:00 +00002427 ReportHistogram(true);
2428}
Steve Blocka7e24c12009-10-30 11:49:00 +00002429#endif
2430
2431// -----------------------------------------------------------------------------
2432// FixedSpace implementation
2433
Ben Murdoch592a9fc2012-03-05 11:04:45 +00002434void FixedSpace::PrepareForMarkCompact() {
Steve Block6ded16b2010-05-10 14:33:55 +01002435 // Call prepare of the super class.
Ben Murdoch592a9fc2012-03-05 11:04:45 +00002436 PagedSpace::PrepareForMarkCompact();
Steve Block6ded16b2010-05-10 14:33:55 +01002437
Ben Murdoch592a9fc2012-03-05 11:04:45 +00002438 // During a non-compacting collection, everything below the linear
2439 // allocation pointer except wasted top-of-page blocks is considered
2440 // allocated and we will rediscover available bytes during the
2441 // collection.
2442 accounting_stats_.AllocateBytes(free_list_.available());
Steve Blocka7e24c12009-10-30 11:49:00 +00002443
2444 // Clear the free list before a full GC---it will be rebuilt afterward.
2445 free_list_.Reset();
2446}
2447
2448
Steve Blocka7e24c12009-10-30 11:49:00 +00002449// -----------------------------------------------------------------------------
2450// MapSpace implementation
2451
Steve Blocka7e24c12009-10-30 11:49:00 +00002452#ifdef DEBUG
2453void MapSpace::VerifyObject(HeapObject* object) {
2454 // The object should be a map or a free-list node.
Ben Murdoch592a9fc2012-03-05 11:04:45 +00002455 ASSERT(object->IsMap() || object->IsFreeSpace());
Steve Blocka7e24c12009-10-30 11:49:00 +00002456}
2457#endif
2458
2459
2460// -----------------------------------------------------------------------------
2461// GlobalPropertyCellSpace implementation
2462
2463#ifdef DEBUG
2464void CellSpace::VerifyObject(HeapObject* object) {
2465 // The object should be a global object property cell or a free-list node.
2466 ASSERT(object->IsJSGlobalPropertyCell() ||
Steve Block44f0eee2011-05-26 01:26:41 +01002467 object->map() == heap()->two_pointer_filler_map());
Steve Blocka7e24c12009-10-30 11:49:00 +00002468}
2469#endif
2470
2471
2472// -----------------------------------------------------------------------------
2473// LargeObjectIterator
2474
2475LargeObjectIterator::LargeObjectIterator(LargeObjectSpace* space) {
Ben Murdoch592a9fc2012-03-05 11:04:45 +00002476 current_ = space->first_page_;
Steve Blocka7e24c12009-10-30 11:49:00 +00002477 size_func_ = NULL;
2478}
2479
2480
2481LargeObjectIterator::LargeObjectIterator(LargeObjectSpace* space,
2482 HeapObjectCallback size_func) {
Ben Murdoch592a9fc2012-03-05 11:04:45 +00002483 current_ = space->first_page_;
Steve Blocka7e24c12009-10-30 11:49:00 +00002484 size_func_ = size_func;
2485}
2486
2487
Ben Murdoch592a9fc2012-03-05 11:04:45 +00002488HeapObject* LargeObjectIterator::Next() {
Leon Clarked91b9f72010-01-27 17:25:45 +00002489 if (current_ == NULL) return NULL;
2490
Steve Blocka7e24c12009-10-30 11:49:00 +00002491 HeapObject* object = current_->GetObject();
Ben Murdoch592a9fc2012-03-05 11:04:45 +00002492 current_ = current_->next_page();
Steve Blocka7e24c12009-10-30 11:49:00 +00002493 return object;
2494}
2495
2496
2497// -----------------------------------------------------------------------------
Steve Blocka7e24c12009-10-30 11:49:00 +00002498// LargeObjectSpace
2499
Ben Murdoch592a9fc2012-03-05 11:04:45 +00002500LargeObjectSpace::LargeObjectSpace(Heap* heap,
2501 intptr_t max_capacity,
2502 AllocationSpace id)
Steve Block44f0eee2011-05-26 01:26:41 +01002503 : Space(heap, id, NOT_EXECUTABLE), // Managed on a per-allocation basis
Ben Murdoch592a9fc2012-03-05 11:04:45 +00002504 max_capacity_(max_capacity),
2505 first_page_(NULL),
Steve Blocka7e24c12009-10-30 11:49:00 +00002506 size_(0),
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -08002507 page_count_(0),
2508 objects_size_(0) {}
Steve Blocka7e24c12009-10-30 11:49:00 +00002509
2510
2511bool LargeObjectSpace::Setup() {
Ben Murdoch592a9fc2012-03-05 11:04:45 +00002512 first_page_ = NULL;
Steve Blocka7e24c12009-10-30 11:49:00 +00002513 size_ = 0;
2514 page_count_ = 0;
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -08002515 objects_size_ = 0;
Steve Blocka7e24c12009-10-30 11:49:00 +00002516 return true;
2517}
2518
2519
2520void LargeObjectSpace::TearDown() {
Ben Murdoch592a9fc2012-03-05 11:04:45 +00002521 while (first_page_ != NULL) {
2522 LargePage* page = first_page_;
2523 first_page_ = first_page_->next_page();
2524 LOG(heap()->isolate(), DeleteEvent("LargeObjectChunk", page->address()));
2525
2526 ObjectSpace space = static_cast<ObjectSpace>(1 << identity());
2527 heap()->isolate()->memory_allocator()->PerformAllocationCallback(
2528 space, kAllocationActionFree, page->size());
2529 heap()->isolate()->memory_allocator()->Free(page);
Steve Blocka7e24c12009-10-30 11:49:00 +00002530 }
Ben Murdoch69a99ed2011-11-30 16:03:39 +00002531 Setup();
Steve Blocka7e24c12009-10-30 11:49:00 +00002532}
2533
2534
Ben Murdoch592a9fc2012-03-05 11:04:45 +00002535MaybeObject* LargeObjectSpace::AllocateRaw(int object_size,
2536 Executability executable) {
Steve Blocka7e24c12009-10-30 11:49:00 +00002537 // Check if we want to force a GC before growing the old space further.
2538 // If so, fail the allocation.
Steve Block44f0eee2011-05-26 01:26:41 +01002539 if (!heap()->always_allocate() &&
2540 heap()->OldGenerationAllocationLimitReached()) {
Ben Murdochf87a2032010-10-22 12:50:53 +01002541 return Failure::RetryAfterGC(identity());
Steve Blocka7e24c12009-10-30 11:49:00 +00002542 }
2543
Ben Murdoch592a9fc2012-03-05 11:04:45 +00002544 if (Size() + object_size > max_capacity_) {
Ben Murdochf87a2032010-10-22 12:50:53 +01002545 return Failure::RetryAfterGC(identity());
Steve Blocka7e24c12009-10-30 11:49:00 +00002546 }
2547
Ben Murdoch592a9fc2012-03-05 11:04:45 +00002548 LargePage* page = heap()->isolate()->memory_allocator()->
2549 AllocateLargePage(object_size, executable, this);
2550 if (page == NULL) return Failure::RetryAfterGC(identity());
2551 ASSERT(page->area_size() >= object_size);
2552
2553 size_ += static_cast<int>(page->size());
2554 objects_size_ += object_size;
Steve Blocka7e24c12009-10-30 11:49:00 +00002555 page_count_++;
Ben Murdoch592a9fc2012-03-05 11:04:45 +00002556 page->set_next_page(first_page_);
2557 first_page_ = page;
Steve Blocka7e24c12009-10-30 11:49:00 +00002558
Ben Murdoch592a9fc2012-03-05 11:04:45 +00002559 HeapObject* object = page->GetObject();
Ben Murdochb0fe1622011-05-05 13:52:32 +01002560
Ben Murdoch592a9fc2012-03-05 11:04:45 +00002561#ifdef DEBUG
2562 // Make the object consistent so the heap can be vefified in OldSpaceStep.
2563 reinterpret_cast<Object**>(object->address())[0] =
2564 heap()->fixed_array_map();
2565 reinterpret_cast<Object**>(object->address())[1] = Smi::FromInt(0);
2566#endif
Steve Blocka7e24c12009-10-30 11:49:00 +00002567
Ben Murdoch592a9fc2012-03-05 11:04:45 +00002568 heap()->incremental_marking()->OldSpaceStep(object_size);
2569 return object;
Steve Blocka7e24c12009-10-30 11:49:00 +00002570}
2571
2572
2573// GC support
John Reck59135872010-11-02 12:39:01 -07002574MaybeObject* LargeObjectSpace::FindObject(Address a) {
Ben Murdoch592a9fc2012-03-05 11:04:45 +00002575 for (LargePage* page = first_page_;
2576 page != NULL;
2577 page = page->next_page()) {
2578 Address page_address = page->address();
2579 if (page_address <= a && a < page_address + page->size()) {
2580 return page->GetObject();
Steve Blocka7e24c12009-10-30 11:49:00 +00002581 }
2582 }
2583 return Failure::Exception();
2584}
2585
Kristian Monsen80d68ea2010-09-08 11:05:35 +01002586
Ben Murdoch592a9fc2012-03-05 11:04:45 +00002587LargePage* LargeObjectSpace::FindPageContainingPc(Address pc) {
Kristian Monsen80d68ea2010-09-08 11:05:35 +01002588 // TODO(853): Change this implementation to only find executable
2589 // chunks and use some kind of hash-based approach to speed it up.
Ben Murdoch592a9fc2012-03-05 11:04:45 +00002590 for (LargePage* chunk = first_page_;
Kristian Monsen80d68ea2010-09-08 11:05:35 +01002591 chunk != NULL;
Ben Murdoch592a9fc2012-03-05 11:04:45 +00002592 chunk = chunk->next_page()) {
Kristian Monsen80d68ea2010-09-08 11:05:35 +01002593 Address chunk_address = chunk->address();
2594 if (chunk_address <= pc && pc < chunk_address + chunk->size()) {
2595 return chunk;
2596 }
2597 }
2598 return NULL;
2599}
2600
2601
Steve Blocka7e24c12009-10-30 11:49:00 +00002602void LargeObjectSpace::FreeUnmarkedObjects() {
Ben Murdoch592a9fc2012-03-05 11:04:45 +00002603 LargePage* previous = NULL;
2604 LargePage* current = first_page_;
Steve Blocka7e24c12009-10-30 11:49:00 +00002605 while (current != NULL) {
2606 HeapObject* object = current->GetObject();
Ben Murdoch592a9fc2012-03-05 11:04:45 +00002607 // Can this large page contain pointers to non-trivial objects. No other
2608 // pointer object is this big.
2609 bool is_pointer_object = object->IsFixedArray();
2610 MarkBit mark_bit = Marking::MarkBitFrom(object);
2611 if (mark_bit.Get()) {
2612 mark_bit.Clear();
2613 MemoryChunk::IncrementLiveBytes(object->address(), -object->Size());
Steve Blocka7e24c12009-10-30 11:49:00 +00002614 previous = current;
Ben Murdoch592a9fc2012-03-05 11:04:45 +00002615 current = current->next_page();
Steve Blocka7e24c12009-10-30 11:49:00 +00002616 } else {
Ben Murdoch592a9fc2012-03-05 11:04:45 +00002617 LargePage* page = current;
Steve Blocka7e24c12009-10-30 11:49:00 +00002618 // Cut the chunk out from the chunk list.
Ben Murdoch592a9fc2012-03-05 11:04:45 +00002619 current = current->next_page();
Steve Blocka7e24c12009-10-30 11:49:00 +00002620 if (previous == NULL) {
Ben Murdoch592a9fc2012-03-05 11:04:45 +00002621 first_page_ = current;
Steve Blocka7e24c12009-10-30 11:49:00 +00002622 } else {
Ben Murdoch592a9fc2012-03-05 11:04:45 +00002623 previous->set_next_page(current);
Steve Blocka7e24c12009-10-30 11:49:00 +00002624 }
2625
2626 // Free the chunk.
Ben Murdoch8b112d22011-06-08 16:22:53 +01002627 heap()->mark_compact_collector()->ReportDeleteIfNeeded(
2628 object, heap()->isolate());
Ben Murdoch592a9fc2012-03-05 11:04:45 +00002629 size_ -= static_cast<int>(page->size());
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -08002630 objects_size_ -= object->Size();
Steve Blocka7e24c12009-10-30 11:49:00 +00002631 page_count_--;
Ben Murdoch592a9fc2012-03-05 11:04:45 +00002632
2633 if (is_pointer_object) {
2634 heap()->QueueMemoryChunkForFree(page);
2635 } else {
2636 heap()->isolate()->memory_allocator()->Free(page);
2637 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002638 }
2639 }
Ben Murdoch592a9fc2012-03-05 11:04:45 +00002640 heap()->FreeQueuedChunks();
Steve Blocka7e24c12009-10-30 11:49:00 +00002641}
2642
2643
2644bool LargeObjectSpace::Contains(HeapObject* object) {
2645 Address address = object->address();
Ben Murdoch592a9fc2012-03-05 11:04:45 +00002646 MemoryChunk* chunk = MemoryChunk::FromAddress(address);
Steve Blocka7e24c12009-10-30 11:49:00 +00002647
Ben Murdoch592a9fc2012-03-05 11:04:45 +00002648 bool owned = (chunk->owner() == this);
Steve Blocka7e24c12009-10-30 11:49:00 +00002649
Ben Murdoch592a9fc2012-03-05 11:04:45 +00002650 SLOW_ASSERT(!owned || !FindObject(address)->IsFailure());
2651
2652 return owned;
Steve Blocka7e24c12009-10-30 11:49:00 +00002653}
2654
2655
2656#ifdef DEBUG
2657// We do not assume that the large object iterator works, because it depends
2658// on the invariants we are checking during verification.
2659void LargeObjectSpace::Verify() {
Ben Murdoch592a9fc2012-03-05 11:04:45 +00002660 for (LargePage* chunk = first_page_;
Steve Blocka7e24c12009-10-30 11:49:00 +00002661 chunk != NULL;
Ben Murdoch592a9fc2012-03-05 11:04:45 +00002662 chunk = chunk->next_page()) {
Steve Blocka7e24c12009-10-30 11:49:00 +00002663 // Each chunk contains an object that starts at the large object page's
2664 // object area start.
2665 HeapObject* object = chunk->GetObject();
2666 Page* page = Page::FromAddress(object->address());
Ben Murdoch592a9fc2012-03-05 11:04:45 +00002667 ASSERT(object->address() == page->area_start());
Steve Blocka7e24c12009-10-30 11:49:00 +00002668
2669 // The first word should be a map, and we expect all map pointers to be
2670 // in map space.
2671 Map* map = object->map();
2672 ASSERT(map->IsMap());
Steve Block44f0eee2011-05-26 01:26:41 +01002673 ASSERT(heap()->map_space()->Contains(map));
Steve Blocka7e24c12009-10-30 11:49:00 +00002674
2675 // We have only code, sequential strings, external strings
2676 // (sequential strings that have been morphed into external
2677 // strings), fixed arrays, and byte arrays in large object space.
2678 ASSERT(object->IsCode() || object->IsSeqString() ||
2679 object->IsExternalString() || object->IsFixedArray() ||
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00002680 object->IsFixedDoubleArray() || object->IsByteArray());
Steve Blocka7e24c12009-10-30 11:49:00 +00002681
2682 // The object itself should look OK.
2683 object->Verify();
2684
2685 // Byte arrays and strings don't have interior pointers.
2686 if (object->IsCode()) {
2687 VerifyPointersVisitor code_visitor;
2688 object->IterateBody(map->instance_type(),
2689 object->Size(),
2690 &code_visitor);
2691 } else if (object->IsFixedArray()) {
Steve Blocka7e24c12009-10-30 11:49:00 +00002692 FixedArray* array = FixedArray::cast(object);
2693 for (int j = 0; j < array->length(); j++) {
2694 Object* element = array->get(j);
2695 if (element->IsHeapObject()) {
2696 HeapObject* element_object = HeapObject::cast(element);
Steve Block44f0eee2011-05-26 01:26:41 +01002697 ASSERT(heap()->Contains(element_object));
Steve Blocka7e24c12009-10-30 11:49:00 +00002698 ASSERT(element_object->map()->IsMap());
Steve Blocka7e24c12009-10-30 11:49:00 +00002699 }
2700 }
2701 }
2702 }
2703}
2704
2705
2706void LargeObjectSpace::Print() {
2707 LargeObjectIterator it(this);
Ben Murdoch592a9fc2012-03-05 11:04:45 +00002708 for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
Leon Clarked91b9f72010-01-27 17:25:45 +00002709 obj->Print();
Steve Blocka7e24c12009-10-30 11:49:00 +00002710 }
2711}
2712
2713
2714void LargeObjectSpace::ReportStatistics() {
Ben Murdochf87a2032010-10-22 12:50:53 +01002715 PrintF(" size: %" V8_PTR_PREFIX "d\n", size_);
Steve Blocka7e24c12009-10-30 11:49:00 +00002716 int num_objects = 0;
2717 ClearHistograms();
2718 LargeObjectIterator it(this);
Ben Murdoch592a9fc2012-03-05 11:04:45 +00002719 for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
Steve Blocka7e24c12009-10-30 11:49:00 +00002720 num_objects++;
Leon Clarked91b9f72010-01-27 17:25:45 +00002721 CollectHistogramInfo(obj);
Steve Blocka7e24c12009-10-30 11:49:00 +00002722 }
2723
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -08002724 PrintF(" number of objects %d, "
2725 "size of objects %" V8_PTR_PREFIX "d\n", num_objects, objects_size_);
Steve Blocka7e24c12009-10-30 11:49:00 +00002726 if (num_objects > 0) ReportHistogram(false);
2727}
2728
2729
2730void LargeObjectSpace::CollectCodeStatistics() {
Steve Block44f0eee2011-05-26 01:26:41 +01002731 Isolate* isolate = heap()->isolate();
Steve Blocka7e24c12009-10-30 11:49:00 +00002732 LargeObjectIterator obj_it(this);
Ben Murdoch592a9fc2012-03-05 11:04:45 +00002733 for (HeapObject* obj = obj_it.Next(); obj != NULL; obj = obj_it.Next()) {
Steve Blocka7e24c12009-10-30 11:49:00 +00002734 if (obj->IsCode()) {
2735 Code* code = Code::cast(obj);
Steve Block44f0eee2011-05-26 01:26:41 +01002736 isolate->code_kind_statistics()[code->kind()] += code->Size();
Steve Blocka7e24c12009-10-30 11:49:00 +00002737 }
2738 }
2739}
Ben Murdoch592a9fc2012-03-05 11:04:45 +00002740
2741
2742void Page::Print() {
2743 // Make a best-effort to print the objects in the page.
2744 PrintF("Page@%p in %s\n",
2745 this->address(),
2746 AllocationSpaceName(this->owner()->identity()));
2747 printf(" --------------------------------------\n");
2748 HeapObjectIterator objects(this, heap()->GcSafeSizeOfOldObjectFunction());
2749 unsigned mark_size = 0;
2750 for (HeapObject* object = objects.Next();
2751 object != NULL;
2752 object = objects.Next()) {
2753 bool is_marked = Marking::MarkBitFrom(object).Get();
2754 PrintF(" %c ", (is_marked ? '!' : ' ')); // Indent a little.
2755 if (is_marked) {
2756 mark_size += heap()->GcSafeSizeOfOldObjectFunction()(object);
2757 }
2758 object->ShortPrint();
2759 PrintF("\n");
2760 }
2761 printf(" --------------------------------------\n");
2762 printf(" Marked: %x, LiveCount: %x\n", mark_size, LiveBytes());
2763}
2764
Steve Blocka7e24c12009-10-30 11:49:00 +00002765#endif // DEBUG
2766
2767} } // namespace v8::internal