blob: ee19a02967d69b2b21551b16a2f46cd5243df2eb [file] [log] [blame]
ager@chromium.orgea91cc52011-05-23 06:06:11 +00001// Copyright 2011 the V8 project authors. All rights reserved.
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002// Redistribution and use in source and binary forms, with or without
3// modification, are permitted provided that the following conditions are
4// met:
5//
6// * Redistributions of source code must retain the above copyright
7// notice, this list of conditions and the following disclaimer.
8// * Redistributions in binary form must reproduce the above
9// copyright notice, this list of conditions and the following
10// disclaimer in the documentation and/or other materials provided
11// with the distribution.
12// * Neither the name of Google Inc. nor the names of its
13// contributors may be used to endorse or promote products derived
14// from this software without specific prior written permission.
15//
16// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
28#include "v8.h"
29
30#include "macro-assembler.h"
31#include "mark-compact.h"
bmeurer@chromium.orgc9913f02013-10-24 06:31:36 +000032#include "msan.h"
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +000033#include "platform.h"
34
kasperl@chromium.org71affb52009-05-26 05:44:31 +000035namespace v8 {
36namespace internal {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +000037
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +000038
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +000039// ----------------------------------------------------------------------------
40// HeapObjectIterator
41
42HeapObjectIterator::HeapObjectIterator(PagedSpace* space) {
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +000043 // You can't actually iterate over the anchor page. It is not a real page,
44 // just an anchor for the double linked page list. Initialize as if we have
45 // reached the end of the anchor page, then the first iteration will move on
46 // to the first page.
47 Initialize(space,
48 NULL,
49 NULL,
50 kAllPagesInSpace,
51 NULL);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +000052}
53
54
55HeapObjectIterator::HeapObjectIterator(PagedSpace* space,
56 HeapObjectCallback size_func) {
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +000057 // You can't actually iterate over the anchor page. It is not a real page,
58 // just an anchor for the double linked page list. Initialize the current
59 // address and end as NULL, then the first iteration will move on
60 // to the first page.
61 Initialize(space,
62 NULL,
63 NULL,
64 kAllPagesInSpace,
65 size_func);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +000066}
67
68
ricow@chromium.orgd236f4d2010-09-01 06:52:08 +000069HeapObjectIterator::HeapObjectIterator(Page* page,
70 HeapObjectCallback size_func) {
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +000071 Space* owner = page->owner();
hpayer@chromium.org8432c912013-02-28 15:55:26 +000072 ASSERT(owner == page->heap()->old_pointer_space() ||
73 owner == page->heap()->old_data_space() ||
74 owner == page->heap()->map_space() ||
75 owner == page->heap()->cell_space() ||
danno@chromium.org41728482013-06-12 22:31:22 +000076 owner == page->heap()->property_cell_space() ||
hpayer@chromium.org8432c912013-02-28 15:55:26 +000077 owner == page->heap()->code_space());
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +000078 Initialize(reinterpret_cast<PagedSpace*>(owner),
yangguo@chromium.orgab30bb82012-02-24 14:41:46 +000079 page->area_start(),
80 page->area_end(),
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +000081 kOnePageOnly,
82 size_func);
83 ASSERT(page->WasSweptPrecisely());
ricow@chromium.orgd236f4d2010-09-01 06:52:08 +000084}
85
86
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +000087void HeapObjectIterator::Initialize(PagedSpace* space,
88 Address cur, Address end,
89 HeapObjectIterator::PageMode mode,
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +000090 HeapObjectCallback size_f) {
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +000091 // Check that we actually can iterate this space.
92 ASSERT(!space->was_swept_conservatively());
93
94 space_ = space;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +000095 cur_addr_ = cur;
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +000096 cur_end_ = end;
97 page_mode_ = mode;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +000098 size_func_ = size_f;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +000099}
100
101
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000102// We have hit the end of the page and should advance to the next block of
103// objects. This happens at the end of the page.
104bool HeapObjectIterator::AdvanceToNextPage() {
105 ASSERT(cur_addr_ == cur_end_);
106 if (page_mode_ == kOnePageOnly) return false;
107 Page* cur_page;
108 if (cur_addr_ == NULL) {
109 cur_page = space_->anchor();
110 } else {
111 cur_page = Page::FromAddress(cur_addr_ - 1);
yangguo@chromium.orgab30bb82012-02-24 14:41:46 +0000112 ASSERT(cur_addr_ == cur_page->area_end());
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000113 }
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000114 cur_page = cur_page->next_page();
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000115 if (cur_page == space_->anchor()) return false;
yangguo@chromium.orgab30bb82012-02-24 14:41:46 +0000116 cur_addr_ = cur_page->area_start();
117 cur_end_ = cur_page->area_end();
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000118 ASSERT(cur_page->WasSweptPrecisely());
119 return true;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000120}
121
122
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000123// -----------------------------------------------------------------------------
sgjesse@chromium.orgc5145742009-10-07 09:00:33 +0000124// CodeRange
125
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +0000126
kmillikin@chromium.org7c2628c2011-08-10 11:27:35 +0000127CodeRange::CodeRange(Isolate* isolate)
128 : isolate_(isolate),
129 code_range_(NULL),
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +0000130 free_list_(0),
131 allocation_list_(0),
kmillikin@chromium.org7c2628c2011-08-10 11:27:35 +0000132 current_allocation_block_index_(0) {
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +0000133}
sgjesse@chromium.orgc5145742009-10-07 09:00:33 +0000134
135
erik.corry@gmail.comf2038fb2012-01-16 11:42:08 +0000136bool CodeRange::SetUp(const size_t requested) {
sgjesse@chromium.orgc5145742009-10-07 09:00:33 +0000137 ASSERT(code_range_ == NULL);
138
139 code_range_ = new VirtualMemory(requested);
140 CHECK(code_range_ != NULL);
141 if (!code_range_->IsReserved()) {
142 delete code_range_;
143 code_range_ = NULL;
144 return false;
145 }
146
147 // We are sure that we have mapped a block of requested addresses.
148 ASSERT(code_range_->size() == requested);
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +0000149 LOG(isolate_, NewEvent("CodeRange", code_range_->address(), requested));
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000150 Address base = reinterpret_cast<Address>(code_range_->address());
151 Address aligned_base =
152 RoundUp(reinterpret_cast<Address>(code_range_->address()),
153 MemoryChunk::kAlignment);
154 size_t size = code_range_->size() - (aligned_base - base);
155 allocation_list_.Add(FreeBlock(aligned_base, size));
sgjesse@chromium.orgc5145742009-10-07 09:00:33 +0000156 current_allocation_block_index_ = 0;
157 return true;
158}
159
160
161int CodeRange::CompareFreeBlockAddress(const FreeBlock* left,
162 const FreeBlock* right) {
163 // The entire point of CodeRange is that the difference between two
164 // addresses in the range can be represented as a signed 32-bit int,
165 // so the cast is semantically correct.
166 return static_cast<int>(left->start - right->start);
167}
168
169
170void CodeRange::GetNextAllocationBlock(size_t requested) {
171 for (current_allocation_block_index_++;
172 current_allocation_block_index_ < allocation_list_.length();
173 current_allocation_block_index_++) {
174 if (requested <= allocation_list_[current_allocation_block_index_].size) {
175 return; // Found a large enough allocation block.
176 }
177 }
178
179 // Sort and merge the free blocks on the free list and the allocation list.
180 free_list_.AddAll(allocation_list_);
181 allocation_list_.Clear();
182 free_list_.Sort(&CompareFreeBlockAddress);
183 for (int i = 0; i < free_list_.length();) {
184 FreeBlock merged = free_list_[i];
185 i++;
186 // Add adjacent free blocks to the current merged block.
187 while (i < free_list_.length() &&
188 free_list_[i].start == merged.start + merged.size) {
189 merged.size += free_list_[i].size;
190 i++;
191 }
192 if (merged.size > 0) {
193 allocation_list_.Add(merged);
194 }
195 }
196 free_list_.Clear();
197
198 for (current_allocation_block_index_ = 0;
199 current_allocation_block_index_ < allocation_list_.length();
200 current_allocation_block_index_++) {
201 if (requested <= allocation_list_[current_allocation_block_index_].size) {
202 return; // Found a large enough allocation block.
203 }
204 }
205
206 // Code range is full or too fragmented.
207 V8::FatalProcessOutOfMemory("CodeRange::GetNextAllocationBlock");
208}
209
210
mstarzinger@chromium.org068ea0a2013-01-30 09:39:44 +0000211Address CodeRange::AllocateRawMemory(const size_t requested_size,
212 const size_t commit_size,
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000213 size_t* allocated) {
mstarzinger@chromium.org068ea0a2013-01-30 09:39:44 +0000214 ASSERT(commit_size <= requested_size);
sgjesse@chromium.orgc5145742009-10-07 09:00:33 +0000215 ASSERT(current_allocation_block_index_ < allocation_list_.length());
mstarzinger@chromium.org068ea0a2013-01-30 09:39:44 +0000216 if (requested_size > allocation_list_[current_allocation_block_index_].size) {
sgjesse@chromium.orgc5145742009-10-07 09:00:33 +0000217 // Find an allocation block large enough. This function call may
218 // call V8::FatalProcessOutOfMemory if it cannot find a large enough block.
mstarzinger@chromium.org068ea0a2013-01-30 09:39:44 +0000219 GetNextAllocationBlock(requested_size);
sgjesse@chromium.orgc5145742009-10-07 09:00:33 +0000220 }
221 // Commit the requested memory at the start of the current allocation block.
mstarzinger@chromium.org068ea0a2013-01-30 09:39:44 +0000222 size_t aligned_requested = RoundUp(requested_size, MemoryChunk::kAlignment);
sgjesse@chromium.orgc5145742009-10-07 09:00:33 +0000223 FreeBlock current = allocation_list_[current_allocation_block_index_];
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000224 if (aligned_requested >= (current.size - Page::kPageSize)) {
sgjesse@chromium.orgc5145742009-10-07 09:00:33 +0000225 // Don't leave a small free block, useless for a large object or chunk.
226 *allocated = current.size;
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000227 } else {
228 *allocated = aligned_requested;
sgjesse@chromium.orgc5145742009-10-07 09:00:33 +0000229 }
230 ASSERT(*allocated <= current.size);
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000231 ASSERT(IsAddressAligned(current.start, MemoryChunk::kAlignment));
jkummerow@chromium.org2c9426b2013-09-05 16:31:13 +0000232 if (!isolate_->memory_allocator()->CommitExecutableMemory(code_range_,
233 current.start,
234 commit_size,
235 *allocated)) {
sgjesse@chromium.orgc5145742009-10-07 09:00:33 +0000236 *allocated = 0;
237 return NULL;
238 }
239 allocation_list_[current_allocation_block_index_].start += *allocated;
240 allocation_list_[current_allocation_block_index_].size -= *allocated;
241 if (*allocated == current.size) {
242 GetNextAllocationBlock(0); // This block is used up, get the next one.
243 }
244 return current.start;
245}
246
247
mstarzinger@chromium.org068ea0a2013-01-30 09:39:44 +0000248bool CodeRange::CommitRawMemory(Address start, size_t length) {
jkummerow@chromium.org2c9426b2013-09-05 16:31:13 +0000249 return isolate_->memory_allocator()->CommitMemory(start, length, EXECUTABLE);
mstarzinger@chromium.org068ea0a2013-01-30 09:39:44 +0000250}
251
252
253bool CodeRange::UncommitRawMemory(Address start, size_t length) {
254 return code_range_->Uncommit(start, length);
255}
256
257
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000258void CodeRange::FreeRawMemory(Address address, size_t length) {
259 ASSERT(IsAddressAligned(address, MemoryChunk::kAlignment));
sgjesse@chromium.orgc5145742009-10-07 09:00:33 +0000260 free_list_.Add(FreeBlock(address, length));
261 code_range_->Uncommit(address, length);
262}
263
264
265void CodeRange::TearDown() {
266 delete code_range_; // Frees all memory in the virtual memory range.
267 code_range_ = NULL;
268 free_list_.Free();
269 allocation_list_.Free();
270}
271
272
273// -----------------------------------------------------------------------------
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000274// MemoryAllocator
275//
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000276
kmillikin@chromium.org7c2628c2011-08-10 11:27:35 +0000277MemoryAllocator::MemoryAllocator(Isolate* isolate)
278 : isolate_(isolate),
279 capacity_(0),
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +0000280 capacity_executable_(0),
281 size_(0),
jkummerow@chromium.org2c9426b2013-09-05 16:31:13 +0000282 size_executable_(0),
283 lowest_ever_allocated_(reinterpret_cast<void*>(-1)),
284 highest_ever_allocated_(reinterpret_cast<void*>(0)) {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000285}
286
287
erik.corry@gmail.comf2038fb2012-01-16 11:42:08 +0000288bool MemoryAllocator::SetUp(intptr_t capacity, intptr_t capacity_executable) {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000289 capacity_ = RoundUp(capacity, Page::kPageSize);
ager@chromium.org01fe7df2010-11-10 11:59:11 +0000290 capacity_executable_ = RoundUp(capacity_executable, Page::kPageSize);
291 ASSERT_GE(capacity_, capacity_executable_);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000292
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000293 size_ = 0;
erik.corry@gmail.com145eff52010-08-23 11:36:18 +0000294 size_executable_ = 0;
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000295
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000296 return true;
297}
298
299
300void MemoryAllocator::TearDown() {
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000301 // Check that spaces were torn down before MemoryAllocator.
302 ASSERT(size_ == 0);
303 // TODO(gc) this will be true again when we fix FreeMemory.
304 // ASSERT(size_executable_ == 0);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000305 capacity_ = 0;
ager@chromium.org01fe7df2010-11-10 11:59:11 +0000306 capacity_executable_ = 0;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000307}
308
309
jkummerow@chromium.org2c9426b2013-09-05 16:31:13 +0000310bool MemoryAllocator::CommitMemory(Address base,
311 size_t size,
312 Executability executable) {
313 if (!VirtualMemory::CommitRegion(base, size, executable == EXECUTABLE)) {
314 return false;
315 }
316 UpdateAllocatedSpaceLimits(base, base + size);
317 return true;
318}
319
320
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000321void MemoryAllocator::FreeMemory(VirtualMemory* reservation,
322 Executability executable) {
323 // TODO(gc) make code_range part of memory allocator?
324 ASSERT(reservation->IsReserved());
325 size_t size = reservation->size();
326 ASSERT(size_ >= size);
327 size_ -= size;
328
329 isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(size));
330
331 if (executable == EXECUTABLE) {
332 ASSERT(size_executable_ >= size);
333 size_executable_ -= size;
334 }
335 // Code which is part of the code-range does not have its own VirtualMemory.
336 ASSERT(!isolate_->code_range()->contains(
337 static_cast<Address>(reservation->address())));
338 ASSERT(executable == NOT_EXECUTABLE || !isolate_->code_range()->exists());
339 reservation->Release();
340}
341
342
343void MemoryAllocator::FreeMemory(Address base,
344 size_t size,
345 Executability executable) {
346 // TODO(gc) make code_range part of memory allocator?
347 ASSERT(size_ >= size);
348 size_ -= size;
349
350 isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(size));
351
352 if (executable == EXECUTABLE) {
353 ASSERT(size_executable_ >= size);
354 size_executable_ -= size;
355 }
356 if (isolate_->code_range()->contains(static_cast<Address>(base))) {
357 ASSERT(executable == EXECUTABLE);
358 isolate_->code_range()->FreeRawMemory(base, size);
359 } else {
360 ASSERT(executable == NOT_EXECUTABLE || !isolate_->code_range()->exists());
361 bool result = VirtualMemory::ReleaseRegion(base, size);
362 USE(result);
363 ASSERT(result);
364 }
365}
366
367
368Address MemoryAllocator::ReserveAlignedMemory(size_t size,
369 size_t alignment,
370 VirtualMemory* controller) {
371 VirtualMemory reservation(size, alignment);
372
373 if (!reservation.IsReserved()) return NULL;
374 size_ += reservation.size();
375 Address base = RoundUp(static_cast<Address>(reservation.address()),
376 alignment);
377 controller->TakeControl(&reservation);
378 return base;
379}
380
381
mstarzinger@chromium.org068ea0a2013-01-30 09:39:44 +0000382Address MemoryAllocator::AllocateAlignedMemory(size_t reserve_size,
383 size_t commit_size,
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000384 size_t alignment,
385 Executability executable,
386 VirtualMemory* controller) {
mstarzinger@chromium.org068ea0a2013-01-30 09:39:44 +0000387 ASSERT(commit_size <= reserve_size);
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000388 VirtualMemory reservation;
mstarzinger@chromium.org068ea0a2013-01-30 09:39:44 +0000389 Address base = ReserveAlignedMemory(reserve_size, alignment, &reservation);
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000390 if (base == NULL) return NULL;
yangguo@chromium.orgab30bb82012-02-24 14:41:46 +0000391
392 if (executable == EXECUTABLE) {
mstarzinger@chromium.org068ea0a2013-01-30 09:39:44 +0000393 if (!CommitExecutableMemory(&reservation,
394 base,
395 commit_size,
396 reserve_size)) {
danno@chromium.org2c26cb12012-05-03 09:06:43 +0000397 base = NULL;
yangguo@chromium.orgab30bb82012-02-24 14:41:46 +0000398 }
danno@chromium.org2c26cb12012-05-03 09:06:43 +0000399 } else {
jkummerow@chromium.org2c9426b2013-09-05 16:31:13 +0000400 if (reservation.Commit(base, commit_size, false)) {
401 UpdateAllocatedSpaceLimits(base, base + commit_size);
402 } else {
danno@chromium.org2c26cb12012-05-03 09:06:43 +0000403 base = NULL;
404 }
405 }
406
407 if (base == NULL) {
408 // Failed to commit the body. Release the mapping and any partially
409 // commited regions inside it.
410 reservation.Release();
411 return NULL;
fschneider@chromium.orged78ffd2010-07-21 11:05:19 +0000412 }
yangguo@chromium.orgab30bb82012-02-24 14:41:46 +0000413
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000414 controller->TakeControl(&reservation);
415 return base;
416}
ager@chromium.org01fe7df2010-11-10 11:59:11 +0000417
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000418
419void Page::InitializeAsAnchor(PagedSpace* owner) {
420 set_owner(owner);
421 set_prev_page(this);
422 set_next_page(this);
423}
424
425
426NewSpacePage* NewSpacePage::Initialize(Heap* heap,
427 Address start,
428 SemiSpace* semi_space) {
yangguo@chromium.orgab30bb82012-02-24 14:41:46 +0000429 Address area_start = start + NewSpacePage::kObjectStartOffset;
430 Address area_end = start + Page::kPageSize;
431
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000432 MemoryChunk* chunk = MemoryChunk::Initialize(heap,
433 start,
434 Page::kPageSize,
yangguo@chromium.orgab30bb82012-02-24 14:41:46 +0000435 area_start,
436 area_end,
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000437 NOT_EXECUTABLE,
438 semi_space);
439 chunk->set_next_chunk(NULL);
440 chunk->set_prev_chunk(NULL);
441 chunk->initialize_scan_on_scavenge(true);
442 bool in_to_space = (semi_space->id() != kFromSpace);
443 chunk->SetFlag(in_to_space ? MemoryChunk::IN_TO_SPACE
444 : MemoryChunk::IN_FROM_SPACE);
445 ASSERT(!chunk->IsFlagSet(in_to_space ? MemoryChunk::IN_FROM_SPACE
446 : MemoryChunk::IN_TO_SPACE));
447 NewSpacePage* page = static_cast<NewSpacePage*>(chunk);
448 heap->incremental_marking()->SetNewSpacePageFlags(page);
449 return page;
450}
451
452
453void NewSpacePage::InitializeAsAnchor(SemiSpace* semi_space) {
454 set_owner(semi_space);
455 set_next_chunk(this);
456 set_prev_chunk(this);
457 // Flags marks this invalid page as not being in new-space.
458 // All real new-space pages will be in new-space.
459 SetFlags(0, ~0);
460}
461
462
463MemoryChunk* MemoryChunk::Initialize(Heap* heap,
464 Address base,
465 size_t size,
yangguo@chromium.orgab30bb82012-02-24 14:41:46 +0000466 Address area_start,
467 Address area_end,
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000468 Executability executable,
469 Space* owner) {
470 MemoryChunk* chunk = FromAddress(base);
471
472 ASSERT(base == chunk->address());
473
474 chunk->heap_ = heap;
475 chunk->size_ = size;
yangguo@chromium.orgab30bb82012-02-24 14:41:46 +0000476 chunk->area_start_ = area_start;
477 chunk->area_end_ = area_end;
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000478 chunk->flags_ = 0;
479 chunk->set_owner(owner);
480 chunk->InitializeReservedMemory();
481 chunk->slots_buffer_ = NULL;
482 chunk->skip_list_ = NULL;
verwaest@chromium.org33e09c82012-10-10 17:07:22 +0000483 chunk->write_barrier_counter_ = kWriteBarrierCounterGranularity;
yangguo@chromium.orgfb377212012-11-16 14:43:43 +0000484 chunk->progress_bar_ = 0;
danno@chromium.org72204d52012-10-31 10:02:10 +0000485 chunk->high_water_mark_ = static_cast<int>(area_start - base);
mstarzinger@chromium.orge3b8d0f2013-02-01 09:06:41 +0000486 chunk->parallel_sweeping_ = 0;
mstarzinger@chromium.orge27d6172013-04-17 11:51:44 +0000487 chunk->available_in_small_free_list_ = 0;
488 chunk->available_in_medium_free_list_ = 0;
489 chunk->available_in_large_free_list_ = 0;
490 chunk->available_in_huge_free_list_ = 0;
491 chunk->non_available_small_blocks_ = 0;
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000492 chunk->ResetLiveBytes();
493 Bitmap::Clear(chunk);
494 chunk->initialize_scan_on_scavenge(false);
495 chunk->SetFlag(WAS_SWEPT_PRECISELY);
496
497 ASSERT(OFFSET_OF(MemoryChunk, flags_) == kFlagsOffset);
498 ASSERT(OFFSET_OF(MemoryChunk, live_byte_count_) == kLiveBytesOffset);
499
yangguo@chromium.orgab30bb82012-02-24 14:41:46 +0000500 if (executable == EXECUTABLE) {
501 chunk->SetFlag(IS_EXECUTABLE);
502 }
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000503
yangguo@chromium.orgab30bb82012-02-24 14:41:46 +0000504 if (owner == heap->old_data_space()) {
505 chunk->SetFlag(CONTAINS_ONLY_DATA);
506 }
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000507
508 return chunk;
509}
510
511
mstarzinger@chromium.org068ea0a2013-01-30 09:39:44 +0000512// Commit MemoryChunk area to the requested size.
513bool MemoryChunk::CommitArea(size_t requested) {
514 size_t guard_size = IsFlagSet(IS_EXECUTABLE) ?
515 MemoryAllocator::CodePageGuardSize() : 0;
516 size_t header_size = area_start() - address() - guard_size;
517 size_t commit_size = RoundUp(header_size + requested, OS::CommitPageSize());
518 size_t committed_size = RoundUp(header_size + (area_end() - area_start()),
519 OS::CommitPageSize());
520
521 if (commit_size > committed_size) {
522 // Commit size should be less or equal than the reserved size.
523 ASSERT(commit_size <= size() - 2 * guard_size);
524 // Append the committed area.
525 Address start = address() + committed_size + guard_size;
526 size_t length = commit_size - committed_size;
527 if (reservation_.IsReserved()) {
jkummerow@chromium.org2c9426b2013-09-05 16:31:13 +0000528 Executability executable = IsFlagSet(IS_EXECUTABLE)
529 ? EXECUTABLE : NOT_EXECUTABLE;
530 if (!heap()->isolate()->memory_allocator()->CommitMemory(
531 start, length, executable)) {
mstarzinger@chromium.org068ea0a2013-01-30 09:39:44 +0000532 return false;
533 }
534 } else {
535 CodeRange* code_range = heap_->isolate()->code_range();
536 ASSERT(code_range->exists() && IsFlagSet(IS_EXECUTABLE));
537 if (!code_range->CommitRawMemory(start, length)) return false;
538 }
539
540 if (Heap::ShouldZapGarbage()) {
541 heap_->isolate()->memory_allocator()->ZapBlock(start, length);
542 }
543 } else if (commit_size < committed_size) {
544 ASSERT(commit_size > 0);
545 // Shrink the committed area.
546 size_t length = committed_size - commit_size;
547 Address start = address() + committed_size + guard_size - length;
548 if (reservation_.IsReserved()) {
549 if (!reservation_.Uncommit(start, length)) return false;
550 } else {
551 CodeRange* code_range = heap_->isolate()->code_range();
552 ASSERT(code_range->exists() && IsFlagSet(IS_EXECUTABLE));
553 if (!code_range->UncommitRawMemory(start, length)) return false;
554 }
555 }
556
557 area_end_ = area_start_ + requested;
558 return true;
559}
560
561
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000562void MemoryChunk::InsertAfter(MemoryChunk* other) {
563 next_chunk_ = other->next_chunk_;
564 prev_chunk_ = other;
svenpanne@chromium.org2bda5432013-03-15 12:39:50 +0000565
566 // This memory barrier is needed since concurrent sweeper threads may iterate
567 // over the list of pages while a new page is inserted.
568 // TODO(hpayer): find a cleaner way to guarantee that the page list can be
569 // expanded concurrently
570 MemoryBarrier();
571
572 // The following two write operations can take effect in arbitrary order
573 // since pages are always iterated by the sweeper threads in LIFO order, i.e,
574 // the inserted page becomes visible for the sweeper threads after
575 // other->next_chunk_ = this;
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000576 other->next_chunk_->prev_chunk_ = this;
577 other->next_chunk_ = this;
578}
579
580
581void MemoryChunk::Unlink() {
582 if (!InNewSpace() && IsFlagSet(SCAN_ON_SCAVENGE)) {
583 heap_->decrement_scan_on_scavenge_pages();
584 ClearFlag(SCAN_ON_SCAVENGE);
585 }
586 next_chunk_->prev_chunk_ = prev_chunk_;
587 prev_chunk_->next_chunk_ = next_chunk_;
588 prev_chunk_ = NULL;
589 next_chunk_ = NULL;
590}
591
592
mstarzinger@chromium.org068ea0a2013-01-30 09:39:44 +0000593MemoryChunk* MemoryAllocator::AllocateChunk(intptr_t reserve_area_size,
594 intptr_t commit_area_size,
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000595 Executability executable,
596 Space* owner) {
mstarzinger@chromium.org068ea0a2013-01-30 09:39:44 +0000597 ASSERT(commit_area_size <= reserve_area_size);
598
yangguo@chromium.orgab30bb82012-02-24 14:41:46 +0000599 size_t chunk_size;
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000600 Heap* heap = isolate_->heap();
601 Address base = NULL;
602 VirtualMemory reservation;
yangguo@chromium.orgab30bb82012-02-24 14:41:46 +0000603 Address area_start = NULL;
604 Address area_end = NULL;
svenpanne@chromium.orgc859c4f2012-10-15 11:51:39 +0000605
mstarzinger@chromium.org068ea0a2013-01-30 09:39:44 +0000606 //
607 // MemoryChunk layout:
608 //
609 // Executable
610 // +----------------------------+<- base aligned with MemoryChunk::kAlignment
611 // | Header |
612 // +----------------------------+<- base + CodePageGuardStartOffset
613 // | Guard |
614 // +----------------------------+<- area_start_
615 // | Area |
616 // +----------------------------+<- area_end_ (area_start + commit_area_size)
617 // | Committed but not used |
618 // +----------------------------+<- aligned at OS page boundary
619 // | Reserved but not committed |
620 // +----------------------------+<- aligned at OS page boundary
621 // | Guard |
622 // +----------------------------+<- base + chunk_size
623 //
624 // Non-executable
625 // +----------------------------+<- base aligned with MemoryChunk::kAlignment
626 // | Header |
627 // +----------------------------+<- area_start_ (base + kObjectStartOffset)
628 // | Area |
629 // +----------------------------+<- area_end_ (area_start + commit_area_size)
630 // | Committed but not used |
631 // +----------------------------+<- aligned at OS page boundary
632 // | Reserved but not committed |
633 // +----------------------------+<- base + chunk_size
634 //
635
ager@chromium.org01fe7df2010-11-10 11:59:11 +0000636 if (executable == EXECUTABLE) {
mstarzinger@chromium.org068ea0a2013-01-30 09:39:44 +0000637 chunk_size = RoundUp(CodePageAreaStartOffset() + reserve_area_size,
yangguo@chromium.orgab30bb82012-02-24 14:41:46 +0000638 OS::CommitPageSize()) + CodePageGuardSize();
639
ager@chromium.org01fe7df2010-11-10 11:59:11 +0000640 // Check executable memory limit.
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000641 if (size_executable_ + chunk_size > capacity_executable_) {
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +0000642 LOG(isolate_,
643 StringEvent("MemoryAllocator::AllocateRawMemory",
ager@chromium.org01fe7df2010-11-10 11:59:11 +0000644 "V8 Executable Allocation capacity exceeded"));
645 return NULL;
646 }
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000647
mstarzinger@chromium.org068ea0a2013-01-30 09:39:44 +0000648 // Size of header (not executable) plus area (executable).
649 size_t commit_size = RoundUp(CodePageGuardStartOffset() + commit_area_size,
650 OS::CommitPageSize());
ager@chromium.org01fe7df2010-11-10 11:59:11 +0000651 // Allocate executable memory either from code range or from the
652 // OS.
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +0000653 if (isolate_->code_range()->exists()) {
mstarzinger@chromium.org068ea0a2013-01-30 09:39:44 +0000654 base = isolate_->code_range()->AllocateRawMemory(chunk_size,
655 commit_size,
656 &chunk_size);
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000657 ASSERT(IsAligned(reinterpret_cast<intptr_t>(base),
658 MemoryChunk::kAlignment));
659 if (base == NULL) return NULL;
660 size_ += chunk_size;
661 // Update executable memory size.
662 size_executable_ += chunk_size;
ager@chromium.org01fe7df2010-11-10 11:59:11 +0000663 } else {
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000664 base = AllocateAlignedMemory(chunk_size,
mstarzinger@chromium.org068ea0a2013-01-30 09:39:44 +0000665 commit_size,
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000666 MemoryChunk::kAlignment,
667 executable,
668 &reservation);
669 if (base == NULL) return NULL;
670 // Update executable memory size.
671 size_executable_ += reservation.size();
ager@chromium.org01fe7df2010-11-10 11:59:11 +0000672 }
yangguo@chromium.orgab30bb82012-02-24 14:41:46 +0000673
svenpanne@chromium.orgc859c4f2012-10-15 11:51:39 +0000674 if (Heap::ShouldZapGarbage()) {
675 ZapBlock(base, CodePageGuardStartOffset());
mstarzinger@chromium.org068ea0a2013-01-30 09:39:44 +0000676 ZapBlock(base + CodePageAreaStartOffset(), commit_area_size);
svenpanne@chromium.orgc859c4f2012-10-15 11:51:39 +0000677 }
678
yangguo@chromium.orgab30bb82012-02-24 14:41:46 +0000679 area_start = base + CodePageAreaStartOffset();
mstarzinger@chromium.org068ea0a2013-01-30 09:39:44 +0000680 area_end = area_start + commit_area_size;
sgjesse@chromium.orgc5145742009-10-07 09:00:33 +0000681 } else {
mstarzinger@chromium.org068ea0a2013-01-30 09:39:44 +0000682 chunk_size = RoundUp(MemoryChunk::kObjectStartOffset + reserve_area_size,
683 OS::CommitPageSize());
684 size_t commit_size = RoundUp(MemoryChunk::kObjectStartOffset +
685 commit_area_size, OS::CommitPageSize());
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000686 base = AllocateAlignedMemory(chunk_size,
mstarzinger@chromium.org068ea0a2013-01-30 09:39:44 +0000687 commit_size,
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000688 MemoryChunk::kAlignment,
689 executable,
690 &reservation);
691
692 if (base == NULL) return NULL;
erik.corry@gmail.com145eff52010-08-23 11:36:18 +0000693
svenpanne@chromium.orgc859c4f2012-10-15 11:51:39 +0000694 if (Heap::ShouldZapGarbage()) {
mstarzinger@chromium.org068ea0a2013-01-30 09:39:44 +0000695 ZapBlock(base, Page::kObjectStartOffset + commit_area_size);
svenpanne@chromium.orgc859c4f2012-10-15 11:51:39 +0000696 }
yangguo@chromium.orgab30bb82012-02-24 14:41:46 +0000697
698 area_start = base + Page::kObjectStartOffset;
mstarzinger@chromium.org068ea0a2013-01-30 09:39:44 +0000699 area_end = area_start + commit_area_size;
yangguo@chromium.orgab30bb82012-02-24 14:41:46 +0000700 }
701
mstarzinger@chromium.org068ea0a2013-01-30 09:39:44 +0000702 // Use chunk_size for statistics and callbacks because we assume that they
703 // treat reserved but not-yet committed memory regions of chunks as allocated.
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000704 isolate_->counters()->memory_allocated()->
705 Increment(static_cast<int>(chunk_size));
706
707 LOG(isolate_, NewEvent("MemoryChunk", base, chunk_size));
708 if (owner != NULL) {
709 ObjectSpace space = static_cast<ObjectSpace>(1 << owner->identity());
710 PerformAllocationCallback(space, kAllocationActionAllocate, chunk_size);
711 }
712
713 MemoryChunk* result = MemoryChunk::Initialize(heap,
714 base,
715 chunk_size,
yangguo@chromium.orgab30bb82012-02-24 14:41:46 +0000716 area_start,
717 area_end,
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000718 executable,
719 owner);
720 result->set_reserved_memory(&reservation);
bmeurer@chromium.orgc9913f02013-10-24 06:31:36 +0000721 MSAN_MEMORY_IS_INITIALIZED(base, chunk_size);
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000722 return result;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000723}
724
725
mstarzinger@chromium.orge27d6172013-04-17 11:51:44 +0000726void Page::ResetFreeListStatistics() {
727 non_available_small_blocks_ = 0;
728 available_in_small_free_list_ = 0;
729 available_in_medium_free_list_ = 0;
730 available_in_large_free_list_ = 0;
731 available_in_huge_free_list_ = 0;
732}
733
734
fschneider@chromium.org7d10be52012-04-10 12:30:14 +0000735Page* MemoryAllocator::AllocatePage(intptr_t size,
736 PagedSpace* owner,
erik.corry@gmail.com145eff52010-08-23 11:36:18 +0000737 Executability executable) {
mstarzinger@chromium.org068ea0a2013-01-30 09:39:44 +0000738 MemoryChunk* chunk = AllocateChunk(size, size, executable, owner);
kmillikin@chromium.org3cdd9e12010-09-06 11:39:48 +0000739
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000740 if (chunk == NULL) return NULL;
741
742 return Page::Initialize(isolate_->heap(), chunk, executable, owner);
743}
744
745
746LargePage* MemoryAllocator::AllocateLargePage(intptr_t object_size,
fschneider@chromium.org7d10be52012-04-10 12:30:14 +0000747 Space* owner,
748 Executability executable) {
mstarzinger@chromium.org068ea0a2013-01-30 09:39:44 +0000749 MemoryChunk* chunk = AllocateChunk(object_size,
750 object_size,
751 executable,
752 owner);
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000753 if (chunk == NULL) return NULL;
754 return LargePage::Initialize(isolate_->heap(), chunk);
755}
756
757
758void MemoryAllocator::Free(MemoryChunk* chunk) {
759 LOG(isolate_, DeleteEvent("MemoryChunk", chunk));
mstarzinger@chromium.orge3b8d0f2013-02-01 09:06:41 +0000760 if (chunk->owner() != NULL) {
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000761 ObjectSpace space =
762 static_cast<ObjectSpace>(1 << chunk->owner()->identity());
763 PerformAllocationCallback(space, kAllocationActionFree, chunk->size());
764 }
765
rossberg@chromium.org2c067b12012-03-19 11:01:52 +0000766 isolate_->heap()->RememberUnmappedPage(
767 reinterpret_cast<Address>(chunk), chunk->IsEvacuationCandidate());
768
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000769 delete chunk->slots_buffer();
770 delete chunk->skip_list();
771
772 VirtualMemory* reservation = chunk->reserved_memory();
773 if (reservation->IsReserved()) {
774 FreeMemory(reservation, chunk->executable());
775 } else {
776 FreeMemory(chunk->address(),
777 chunk->size(),
778 chunk->executable());
779 }
780}
781
782
783bool MemoryAllocator::CommitBlock(Address start,
784 size_t size,
785 Executability executable) {
jkummerow@chromium.org2c9426b2013-09-05 16:31:13 +0000786 if (!CommitMemory(start, size, executable)) return false;
svenpanne@chromium.orgc859c4f2012-10-15 11:51:39 +0000787
788 if (Heap::ShouldZapGarbage()) {
789 ZapBlock(start, size);
790 }
791
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000792 isolate_->counters()->memory_allocated()->Increment(static_cast<int>(size));
793 return true;
794}
795
796
797bool MemoryAllocator::UncommitBlock(Address start, size_t size) {
798 if (!VirtualMemory::UncommitRegion(start, size)) return false;
799 isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(size));
800 return true;
801}
802
803
804void MemoryAllocator::ZapBlock(Address start, size_t size) {
805 for (size_t s = 0; s + kPointerSize <= size; s += kPointerSize) {
806 Memory::Address_at(start + s) = kZapValue;
807 }
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000808}
809
810
kmillikin@chromium.org3cdd9e12010-09-06 11:39:48 +0000811void MemoryAllocator::PerformAllocationCallback(ObjectSpace space,
812 AllocationAction action,
813 size_t size) {
814 for (int i = 0; i < memory_allocation_callbacks_.length(); ++i) {
815 MemoryAllocationCallbackRegistration registration =
816 memory_allocation_callbacks_[i];
817 if ((registration.space & space) == space &&
818 (registration.action & action) == action)
819 registration.callback(space, action, static_cast<int>(size));
820 }
821}
822
823
824bool MemoryAllocator::MemoryAllocationCallbackRegistered(
825 MemoryAllocationCallback callback) {
826 for (int i = 0; i < memory_allocation_callbacks_.length(); ++i) {
827 if (memory_allocation_callbacks_[i].callback == callback) return true;
828 }
829 return false;
830}
831
832
833void MemoryAllocator::AddMemoryAllocationCallback(
834 MemoryAllocationCallback callback,
835 ObjectSpace space,
836 AllocationAction action) {
837 ASSERT(callback != NULL);
838 MemoryAllocationCallbackRegistration registration(callback, space, action);
839 ASSERT(!MemoryAllocator::MemoryAllocationCallbackRegistered(callback));
840 return memory_allocation_callbacks_.Add(registration);
841}
842
843
844void MemoryAllocator::RemoveMemoryAllocationCallback(
845 MemoryAllocationCallback callback) {
846 ASSERT(callback != NULL);
847 for (int i = 0; i < memory_allocation_callbacks_.length(); ++i) {
848 if (memory_allocation_callbacks_[i].callback == callback) {
849 memory_allocation_callbacks_.Remove(i);
850 return;
851 }
852 }
853 UNREACHABLE();
854}
855
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000856
857#ifdef DEBUG
858void MemoryAllocator::ReportStatistics() {
859 float pct = static_cast<float>(capacity_ - size_) / capacity_;
kmillikin@chromium.orgf05f2912010-09-30 10:07:24 +0000860 PrintF(" capacity: %" V8_PTR_PREFIX "d"
861 ", used: %" V8_PTR_PREFIX "d"
862 ", available: %%%d\n\n",
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000863 capacity_, size_, static_cast<int>(pct*100));
864}
865#endif
866
yangguo@chromium.orgab30bb82012-02-24 14:41:46 +0000867
868int MemoryAllocator::CodePageGuardStartOffset() {
869 // We are guarding code pages: the first OS page after the header
870 // will be protected as non-writable.
871 return RoundUp(Page::kObjectStartOffset, OS::CommitPageSize());
872}
873
874
875int MemoryAllocator::CodePageGuardSize() {
876 return static_cast<int>(OS::CommitPageSize());
877}
878
879
880int MemoryAllocator::CodePageAreaStartOffset() {
881 // We are guarding code pages: the first OS page after the header
882 // will be protected as non-writable.
883 return CodePageGuardStartOffset() + CodePageGuardSize();
884}
885
886
887int MemoryAllocator::CodePageAreaEndOffset() {
888 // We are guarding code pages: the last OS page will be protected as
889 // non-writable.
890 return Page::kPageSize - static_cast<int>(OS::CommitPageSize());
891}
892
893
mstarzinger@chromium.org068ea0a2013-01-30 09:39:44 +0000894bool MemoryAllocator::CommitExecutableMemory(VirtualMemory* vm,
895 Address start,
896 size_t commit_size,
897 size_t reserved_size) {
yangguo@chromium.orgab30bb82012-02-24 14:41:46 +0000898 // Commit page header (not executable).
899 if (!vm->Commit(start,
900 CodePageGuardStartOffset(),
901 false)) {
902 return false;
903 }
904
905 // Create guard page after the header.
906 if (!vm->Guard(start + CodePageGuardStartOffset())) {
907 return false;
908 }
909
910 // Commit page body (executable).
yangguo@chromium.orgab30bb82012-02-24 14:41:46 +0000911 if (!vm->Commit(start + CodePageAreaStartOffset(),
mstarzinger@chromium.org068ea0a2013-01-30 09:39:44 +0000912 commit_size - CodePageGuardStartOffset(),
yangguo@chromium.orgab30bb82012-02-24 14:41:46 +0000913 true)) {
914 return false;
915 }
916
mstarzinger@chromium.org068ea0a2013-01-30 09:39:44 +0000917 // Create guard page before the end.
918 if (!vm->Guard(start + reserved_size - CodePageGuardSize())) {
yangguo@chromium.orgab30bb82012-02-24 14:41:46 +0000919 return false;
920 }
921
jkummerow@chromium.org2c9426b2013-09-05 16:31:13 +0000922 UpdateAllocatedSpaceLimits(start,
923 start + CodePageAreaStartOffset() +
924 commit_size - CodePageGuardStartOffset());
yangguo@chromium.orgab30bb82012-02-24 14:41:46 +0000925 return true;
926}
927
928
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000929// -----------------------------------------------------------------------------
ulan@chromium.org2efb9002012-01-19 15:36:35 +0000930// MemoryChunk implementation
931
932void MemoryChunk::IncrementLiveBytesFromMutator(Address address, int by) {
933 MemoryChunk* chunk = MemoryChunk::FromAddress(address);
934 if (!chunk->InNewSpace() && !static_cast<Page*>(chunk)->WasSwept()) {
935 static_cast<PagedSpace*>(chunk->owner())->IncrementUnsweptFreeBytes(-by);
936 }
937 chunk->IncrementLiveBytes(by);
938}
939
mstarzinger@chromium.orge0e1b0d2013-07-08 08:38:06 +0000940
ulan@chromium.org2efb9002012-01-19 15:36:35 +0000941// -----------------------------------------------------------------------------
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000942// PagedSpace implementation
943
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +0000944PagedSpace::PagedSpace(Heap* heap,
945 intptr_t max_capacity,
ager@chromium.org9258b6b2008-09-11 09:11:10 +0000946 AllocationSpace id,
947 Executability executable)
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000948 : Space(heap, id, executable),
949 free_list_(this),
950 was_swept_conservatively_(false),
erik.corry@gmail.comf2038fb2012-01-16 11:42:08 +0000951 first_unswept_page_(Page::FromAddress(NULL)),
952 unswept_free_bytes_(0) {
yangguo@chromium.orgab30bb82012-02-24 14:41:46 +0000953 if (id == CODE_SPACE) {
954 area_size_ = heap->isolate()->memory_allocator()->
955 CodePageAreaSize();
956 } else {
957 area_size_ = Page::kPageSize - Page::kObjectStartOffset;
958 }
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000959 max_capacity_ = (RoundDown(max_capacity, Page::kPageSize) / Page::kPageSize)
yangguo@chromium.orgab30bb82012-02-24 14:41:46 +0000960 * AreaSize();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000961 accounting_stats_.Clear();
962
jkummerow@chromium.orgef33a542013-10-28 18:03:37 +0000963 allocation_info_.set_top(NULL);
964 allocation_info_.set_limit(NULL);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000965
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000966 anchor_.InitializeAsAnchor(this);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000967}
968
969
erik.corry@gmail.comf2038fb2012-01-16 11:42:08 +0000970bool PagedSpace::SetUp() {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000971 return true;
972}
973
974
erik.corry@gmail.comf2038fb2012-01-16 11:42:08 +0000975bool PagedSpace::HasBeenSetUp() {
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000976 return true;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000977}
978
979
980void PagedSpace::TearDown() {
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000981 PageIterator iterator(this);
982 while (iterator.has_next()) {
983 heap()->isolate()->memory_allocator()->Free(iterator.next());
984 }
985 anchor_.set_next_page(&anchor_);
986 anchor_.set_prev_page(&anchor_);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000987 accounting_stats_.Clear();
988}
989
990
danno@chromium.org72204d52012-10-31 10:02:10 +0000991size_t PagedSpace::CommittedPhysicalMemory() {
992 if (!VirtualMemory::HasLazyCommits()) return CommittedMemory();
jkummerow@chromium.orgef33a542013-10-28 18:03:37 +0000993 MemoryChunk::UpdateHighWaterMark(allocation_info_.top());
danno@chromium.org72204d52012-10-31 10:02:10 +0000994 size_t size = 0;
995 PageIterator it(this);
996 while (it.has_next()) {
997 size += it.next()->CommittedPhysicalMemory();
998 }
999 return size;
1000}
1001
1002
lrn@chromium.org303ada72010-10-27 09:33:13 +00001003MaybeObject* PagedSpace::FindObject(Address addr) {
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00001004 // Note: this function can only be called on precisely swept spaces.
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00001005 ASSERT(!heap()->mark_compact_collector()->in_use());
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001006
1007 if (!Contains(addr)) return Failure::Exception();
1008
1009 Page* p = Page::FromAddress(addr);
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00001010 HeapObjectIterator it(p, NULL);
1011 for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
1012 Address cur = obj->address();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001013 Address next = cur + obj->Size();
1014 if ((cur <= addr) && (addr < next)) return obj;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001015 }
1016
kasper.lund7276f142008-07-30 08:49:36 +00001017 UNREACHABLE();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001018 return Failure::Exception();
1019}
1020
mstarzinger@chromium.orge0e1b0d2013-07-08 08:38:06 +00001021
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00001022bool PagedSpace::CanExpand() {
yangguo@chromium.orgab30bb82012-02-24 14:41:46 +00001023 ASSERT(max_capacity_ % AreaSize() == 0);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001024
1025 if (Capacity() == max_capacity_) return false;
1026
1027 ASSERT(Capacity() < max_capacity_);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001028
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00001029 // Are we going to exceed capacity for this space?
1030 if ((Capacity() + Page::kPageSize) > max_capacity_) return false;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001031
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00001032 return true;
1033}
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001034
ulan@chromium.org6e196bf2013-03-13 09:38:22 +00001035
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00001036bool PagedSpace::Expand() {
1037 if (!CanExpand()) return false;
1038
fschneider@chromium.org7d10be52012-04-10 12:30:14 +00001039 intptr_t size = AreaSize();
1040
1041 if (anchor_.next_page() == &anchor_) {
1042 size = SizeOfFirstPage();
1043 }
1044
1045 Page* p = heap()->isolate()->memory_allocator()->AllocatePage(
1046 size, this, executable());
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00001047 if (p == NULL) return false;
1048
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001049 ASSERT(Capacity() <= max_capacity_);
1050
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00001051 p->InsertAfter(anchor_.prev_page());
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001052
1053 return true;
1054}
1055
1056
fschneider@chromium.org7d10be52012-04-10 12:30:14 +00001057intptr_t PagedSpace::SizeOfFirstPage() {
1058 int size = 0;
1059 switch (identity()) {
1060 case OLD_POINTER_SPACE:
jkummerow@chromium.org25b0e212013-10-04 15:38:52 +00001061 size = 72 * kPointerSize * KB;
fschneider@chromium.org7d10be52012-04-10 12:30:14 +00001062 break;
1063 case OLD_DATA_SPACE:
1064 size = 192 * KB;
1065 break;
1066 case MAP_SPACE:
ulan@chromium.org56c14af2012-09-20 12:51:09 +00001067 size = 16 * kPointerSize * KB;
fschneider@chromium.org7d10be52012-04-10 12:30:14 +00001068 break;
1069 case CELL_SPACE:
ulan@chromium.org56c14af2012-09-20 12:51:09 +00001070 size = 16 * kPointerSize * KB;
fschneider@chromium.org7d10be52012-04-10 12:30:14 +00001071 break;
danno@chromium.org41728482013-06-12 22:31:22 +00001072 case PROPERTY_CELL_SPACE:
dslomov@chromium.orgb752d402013-06-18 11:54:54 +00001073 size = 8 * kPointerSize * KB;
danno@chromium.org41728482013-06-12 22:31:22 +00001074 break;
fschneider@chromium.org7d10be52012-04-10 12:30:14 +00001075 case CODE_SPACE:
mstarzinger@chromium.orgf705b502013-04-04 11:38:09 +00001076 if (heap()->isolate()->code_range()->exists()) {
1077 // When code range exists, code pages are allocated in a special way
1078 // (from the reserved code range). That part of the code is not yet
1079 // upgraded to handle small pages.
fschneider@chromium.org7d10be52012-04-10 12:30:14 +00001080 size = AreaSize();
1081 } else {
jkummerow@chromium.org25b0e212013-10-04 15:38:52 +00001082#if V8_TARGET_ARCH_MIPS
bmeurer@chromium.org3c12eb12013-10-24 09:23:47 +00001083 // TODO(plind): Investigate larger code stubs size on MIPS.
1084 size = 480 * KB;
jkummerow@chromium.org25b0e212013-10-04 15:38:52 +00001085#else
1086 size = 416 * KB;
1087#endif
fschneider@chromium.org7d10be52012-04-10 12:30:14 +00001088 }
1089 break;
1090 default:
1091 UNREACHABLE();
1092 }
1093 return Min(size, AreaSize());
1094}
1095
1096
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001097int PagedSpace::CountTotalPages() {
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00001098 PageIterator it(this);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001099 int count = 0;
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00001100 while (it.has_next()) {
1101 it.next();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001102 count++;
1103 }
1104 return count;
1105}
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001106
1107
mstarzinger@chromium.orge27d6172013-04-17 11:51:44 +00001108void PagedSpace::ObtainFreeListStatistics(Page* page, SizeStats* sizes) {
1109 sizes->huge_size_ = page->available_in_huge_free_list();
1110 sizes->small_size_ = page->available_in_small_free_list();
1111 sizes->medium_size_ = page->available_in_medium_free_list();
1112 sizes->large_size_ = page->available_in_large_free_list();
1113}
1114
1115
1116void PagedSpace::ResetFreeListStatistics() {
1117 PageIterator page_iterator(this);
1118 while (page_iterator.has_next()) {
1119 Page* page = page_iterator.next();
1120 page->ResetFreeListStatistics();
1121 }
1122}
1123
1124
verwaest@chromium.org057bd502013-11-06 12:03:29 +00001125void PagedSpace::IncreaseCapacity(int size) {
1126 accounting_stats_.ExpandSpace(size);
1127}
1128
1129
ulan@chromium.org6e196bf2013-03-13 09:38:22 +00001130void PagedSpace::ReleasePage(Page* page, bool unlink) {
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00001131 ASSERT(page->LiveBytes() == 0);
yangguo@chromium.orgab30bb82012-02-24 14:41:46 +00001132 ASSERT(AreaSize() == page->area_size());
danno@chromium.org2c456792011-11-11 12:00:53 +00001133
ricow@chromium.org7ad65222011-12-19 12:13:11 +00001134 // Adjust list of unswept pages if the page is the head of the list.
danno@chromium.org2c456792011-11-11 12:00:53 +00001135 if (first_unswept_page_ == page) {
1136 first_unswept_page_ = page->next_page();
1137 if (first_unswept_page_ == anchor()) {
1138 first_unswept_page_ = Page::FromAddress(NULL);
1139 }
1140 }
1141
1142 if (page->WasSwept()) {
1143 intptr_t size = free_list_.EvictFreeListItems(page);
1144 accounting_stats_.AllocateBytes(size);
yangguo@chromium.orgab30bb82012-02-24 14:41:46 +00001145 ASSERT_EQ(AreaSize(), static_cast<int>(size));
ulan@chromium.org2efb9002012-01-19 15:36:35 +00001146 } else {
1147 DecreaseUnsweptFreeBytes(page);
danno@chromium.org2c456792011-11-11 12:00:53 +00001148 }
1149
jkummerow@chromium.orgef33a542013-10-28 18:03:37 +00001150 if (Page::FromAllocationTop(allocation_info_.top()) == page) {
1151 allocation_info_.set_top(NULL);
1152 allocation_info_.set_limit(NULL);
ricow@chromium.org27bf2882011-11-17 08:34:43 +00001153 }
1154
ulan@chromium.org6e196bf2013-03-13 09:38:22 +00001155 if (unlink) {
1156 page->Unlink();
1157 }
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00001158 if (page->IsFlagSet(MemoryChunk::CONTAINS_ONLY_DATA)) {
1159 heap()->isolate()->memory_allocator()->Free(page);
1160 } else {
1161 heap()->QueueMemoryChunkForFree(page);
fschneider@chromium.org013f3e12010-04-26 13:27:52 +00001162 }
1163
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00001164 ASSERT(Capacity() > 0);
yangguo@chromium.orgab30bb82012-02-24 14:41:46 +00001165 accounting_stats_.ShrinkSpace(AreaSize());
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001166}
1167
1168
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001169#ifdef DEBUG
1170void PagedSpace::Print() { }
1171#endif
1172
svenpanne@chromium.orgc859c4f2012-10-15 11:51:39 +00001173#ifdef VERIFY_HEAP
kasperl@chromium.orgdefbd102009-07-13 14:04:26 +00001174void PagedSpace::Verify(ObjectVisitor* visitor) {
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00001175 // We can only iterate over the pages if they were swept precisely.
1176 if (was_swept_conservatively_) return;
kasperl@chromium.orgdefbd102009-07-13 14:04:26 +00001177
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00001178 bool allocation_pointer_found_in_space =
jkummerow@chromium.orgef33a542013-10-28 18:03:37 +00001179 (allocation_info_.top() == allocation_info_.limit());
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00001180 PageIterator page_iterator(this);
1181 while (page_iterator.has_next()) {
1182 Page* page = page_iterator.next();
svenpanne@chromium.orgc859c4f2012-10-15 11:51:39 +00001183 CHECK(page->owner() == this);
jkummerow@chromium.orgef33a542013-10-28 18:03:37 +00001184 if (page == Page::FromAllocationTop(allocation_info_.top())) {
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00001185 allocation_pointer_found_in_space = true;
kasperl@chromium.orgdefbd102009-07-13 14:04:26 +00001186 }
svenpanne@chromium.orgc859c4f2012-10-15 11:51:39 +00001187 CHECK(page->WasSweptPrecisely());
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00001188 HeapObjectIterator it(page, NULL);
yangguo@chromium.orgab30bb82012-02-24 14:41:46 +00001189 Address end_of_previous_object = page->area_start();
1190 Address top = page->area_end();
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00001191 int black_size = 0;
1192 for (HeapObject* object = it.Next(); object != NULL; object = it.Next()) {
svenpanne@chromium.orgc859c4f2012-10-15 11:51:39 +00001193 CHECK(end_of_previous_object <= object->address());
kasperl@chromium.orgdefbd102009-07-13 14:04:26 +00001194
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00001195 // The first word should be a map, and we expect all map pointers to
1196 // be in map space.
1197 Map* map = object->map();
svenpanne@chromium.orgc859c4f2012-10-15 11:51:39 +00001198 CHECK(map->IsMap());
1199 CHECK(heap()->map_space()->Contains(map));
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00001200
1201 // Perform space-specific object verification.
1202 VerifyObject(object);
1203
1204 // The object itself should look OK.
1205 object->Verify();
1206
1207 // All the interior pointers should be contained in the heap.
1208 int size = object->Size();
1209 object->IterateBody(map->instance_type(), size, visitor);
1210 if (Marking::IsBlack(Marking::MarkBitFrom(object))) {
1211 black_size += size;
1212 }
1213
svenpanne@chromium.orgc859c4f2012-10-15 11:51:39 +00001214 CHECK(object->address() + size <= top);
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00001215 end_of_previous_object = object->address() + size;
1216 }
svenpanne@chromium.orgc859c4f2012-10-15 11:51:39 +00001217 CHECK_LE(black_size, page->LiveBytes());
kasperl@chromium.orgdefbd102009-07-13 14:04:26 +00001218 }
svenpanne@chromium.orgc859c4f2012-10-15 11:51:39 +00001219 CHECK(allocation_pointer_found_in_space);
kasperl@chromium.orgdefbd102009-07-13 14:04:26 +00001220}
svenpanne@chromium.orgc859c4f2012-10-15 11:51:39 +00001221#endif // VERIFY_HEAP
kasperl@chromium.orgdefbd102009-07-13 14:04:26 +00001222
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001223// -----------------------------------------------------------------------------
1224// NewSpace implementation
1225
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +00001226
erik.corry@gmail.comf2038fb2012-01-16 11:42:08 +00001227bool NewSpace::SetUp(int reserved_semispace_capacity,
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00001228 int maximum_semispace_capacity) {
erik.corry@gmail.comf2038fb2012-01-16 11:42:08 +00001229 // Set up new space based on the preallocated memory block defined by
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +00001230 // start and size. The provided space is divided into two semi-spaces.
1231 // To support fast containment testing in the new space, the size of
1232 // this chunk must be a power of two and it must be aligned to its size.
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00001233 int initial_semispace_capacity = heap()->InitialSemiSpaceSize();
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00001234
1235 size_t size = 2 * reserved_semispace_capacity;
1236 Address base =
1237 heap()->isolate()->memory_allocator()->ReserveAlignedMemory(
1238 size, size, &reservation_);
1239 if (base == NULL) return false;
1240
1241 chunk_base_ = base;
1242 chunk_size_ = static_cast<uintptr_t>(size);
1243 LOG(heap()->isolate(), NewEvent("InitialChunk", chunk_base_, chunk_size_));
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +00001244
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001245 ASSERT(initial_semispace_capacity <= maximum_semispace_capacity);
1246 ASSERT(IsPowerOf2(maximum_semispace_capacity));
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001247
erik.corry@gmail.comf2038fb2012-01-16 11:42:08 +00001248 // Allocate and set up the histogram arrays if necessary.
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001249 allocated_histogram_ = NewArray<HistogramInfo>(LAST_TYPE + 1);
1250 promoted_histogram_ = NewArray<HistogramInfo>(LAST_TYPE + 1);
1251
1252#define SET_NAME(name) allocated_histogram_[name].set_name(#name); \
1253 promoted_histogram_[name].set_name(#name);
1254 INSTANCE_TYPE_LIST(SET_NAME)
1255#undef SET_NAME
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001256
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00001257 ASSERT(reserved_semispace_capacity == heap()->ReservedSemiSpaceSize());
1258 ASSERT(static_cast<intptr_t>(chunk_size_) >=
1259 2 * heap()->ReservedSemiSpaceSize());
1260 ASSERT(IsAddressAligned(chunk_base_, 2 * reserved_semispace_capacity, 0));
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001261
yangguo@chromium.org659ceec2012-01-26 07:37:54 +00001262 to_space_.SetUp(chunk_base_,
1263 initial_semispace_capacity,
1264 maximum_semispace_capacity);
1265 from_space_.SetUp(chunk_base_ + reserved_semispace_capacity,
1266 initial_semispace_capacity,
1267 maximum_semispace_capacity);
1268 if (!to_space_.Commit()) {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001269 return false;
1270 }
fschneider@chromium.org7d10be52012-04-10 12:30:14 +00001271 ASSERT(!from_space_.is_committed()); // No need to use memory yet.
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001272
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00001273 start_ = chunk_base_;
1274 address_mask_ = ~(2 * reserved_semispace_capacity - 1);
ricow@chromium.org30ce4112010-05-31 10:38:25 +00001275 object_mask_ = address_mask_ | kHeapObjectTagMask;
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00001276 object_expected_ = reinterpret_cast<uintptr_t>(start_) | kHeapObjectTag;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001277
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00001278 ResetAllocationInfo();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001279
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001280 return true;
1281}
1282
1283
1284void NewSpace::TearDown() {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001285 if (allocated_histogram_) {
1286 DeleteArray(allocated_histogram_);
1287 allocated_histogram_ = NULL;
1288 }
1289 if (promoted_histogram_) {
1290 DeleteArray(promoted_histogram_);
1291 promoted_histogram_ = NULL;
1292 }
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001293
1294 start_ = NULL;
jkummerow@chromium.orgef33a542013-10-28 18:03:37 +00001295 allocation_info_.set_top(NULL);
1296 allocation_info_.set_limit(NULL);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001297
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +00001298 to_space_.TearDown();
1299 from_space_.TearDown();
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00001300
1301 LOG(heap()->isolate(), DeleteEvent("InitialChunk", chunk_base_));
1302
1303 ASSERT(reservation_.IsReserved());
1304 heap()->isolate()->memory_allocator()->FreeMemory(&reservation_,
1305 NOT_EXECUTABLE);
1306 chunk_base_ = NULL;
1307 chunk_size_ = 0;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001308}
1309
1310
1311void NewSpace::Flip() {
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00001312 SemiSpace::Swap(&from_space_, &to_space_);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001313}
1314
1315
ager@chromium.orgab99eea2009-08-25 07:05:41 +00001316void NewSpace::Grow() {
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00001317 // Double the semispace size but only up to maximum capacity.
sgjesse@chromium.org911335c2009-08-19 12:59:44 +00001318 ASSERT(Capacity() < MaximumCapacity());
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00001319 int new_capacity = Min(MaximumCapacity(), 2 * static_cast<int>(Capacity()));
1320 if (to_space_.GrowTo(new_capacity)) {
1321 // Only grow from space if we managed to grow to-space.
1322 if (!from_space_.GrowTo(new_capacity)) {
1323 // If we managed to grow to-space but couldn't grow from-space,
1324 // attempt to shrink to-space.
ager@chromium.orgab99eea2009-08-25 07:05:41 +00001325 if (!to_space_.ShrinkTo(from_space_.Capacity())) {
1326 // We are in an inconsistent state because we could not
1327 // commit/uncommit memory from new space.
1328 V8::FatalProcessOutOfMemory("Failed to grow new space.");
1329 }
1330 }
1331 }
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001332 ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
ager@chromium.orgab99eea2009-08-25 07:05:41 +00001333}
1334
1335
1336void NewSpace::Shrink() {
kmillikin@chromium.orgf05f2912010-09-30 10:07:24 +00001337 int new_capacity = Max(InitialCapacity(), 2 * SizeAsInt());
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00001338 int rounded_new_capacity = RoundUp(new_capacity, Page::kPageSize);
ager@chromium.orgab99eea2009-08-25 07:05:41 +00001339 if (rounded_new_capacity < Capacity() &&
1340 to_space_.ShrinkTo(rounded_new_capacity)) {
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00001341 // Only shrink from-space if we managed to shrink to-space.
1342 from_space_.Reset();
ager@chromium.orgab99eea2009-08-25 07:05:41 +00001343 if (!from_space_.ShrinkTo(rounded_new_capacity)) {
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00001344 // If we managed to shrink to-space but couldn't shrink from
1345 // space, attempt to grow to-space again.
ager@chromium.orgab99eea2009-08-25 07:05:41 +00001346 if (!to_space_.GrowTo(from_space_.Capacity())) {
1347 // We are in an inconsistent state because we could not
1348 // commit/uncommit memory from new space.
1349 V8::FatalProcessOutOfMemory("Failed to shrink new space.");
1350 }
1351 }
1352 }
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00001353 ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
1354}
1355
1356
1357void NewSpace::UpdateAllocationInfo() {
jkummerow@chromium.orgef33a542013-10-28 18:03:37 +00001358 MemoryChunk::UpdateHighWaterMark(allocation_info_.top());
1359 allocation_info_.set_top(to_space_.page_low());
1360 allocation_info_.set_limit(to_space_.page_high());
machenbach@chromium.orgb5be0a92013-11-15 10:32:41 +00001361 UpdateInlineAllocationLimit(0);
ager@chromium.orgab99eea2009-08-25 07:05:41 +00001362 ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001363}
1364
1365
1366void NewSpace::ResetAllocationInfo() {
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00001367 to_space_.Reset();
1368 UpdateAllocationInfo();
1369 pages_used_ = 0;
1370 // Clear all mark-bits in the to-space.
1371 NewSpacePageIterator it(&to_space_);
1372 while (it.has_next()) {
1373 Bitmap::Clear(it.next());
1374 }
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001375}
1376
1377
machenbach@chromium.orgb5be0a92013-11-15 10:32:41 +00001378void NewSpace::UpdateInlineAllocationLimit(int size_in_bytes) {
1379 if (heap()->inline_allocation_disabled()) {
1380 // Lowest limit when linear allocation was disabled.
1381 Address high = to_space_.page_high();
1382 Address new_top = allocation_info_.top() + size_in_bytes;
1383 allocation_info_.set_limit(Min(new_top, high));
1384 } else if (inline_allocation_limit_step() == 0) {
1385 // Normal limit is the end of the current page.
1386 allocation_info_.set_limit(to_space_.page_high());
1387 } else {
1388 // Lower limit during incremental marking.
1389 Address high = to_space_.page_high();
1390 Address new_top = allocation_info_.top() + size_in_bytes;
1391 Address new_limit = new_top + inline_allocation_limit_step_;
1392 allocation_info_.set_limit(Min(new_limit, high));
1393 }
1394 ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
1395}
1396
1397
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00001398bool NewSpace::AddFreshPage() {
jkummerow@chromium.orgef33a542013-10-28 18:03:37 +00001399 Address top = allocation_info_.top();
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00001400 if (NewSpacePage::IsAtStart(top)) {
1401 // The current page is already empty. Don't try to make another.
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001402
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00001403 // We should only get here if someone asks to allocate more
1404 // than what can be stored in a single page.
1405 // TODO(gc): Change the limit on new-space allocation to prevent this
1406 // from happening (all such allocations should go directly to LOSpace).
1407 return false;
1408 }
1409 if (!to_space_.AdvancePage()) {
1410 // Failed to get a new page in to-space.
1411 return false;
1412 }
danno@chromium.orgc612e022011-11-10 11:38:15 +00001413
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00001414 // Clear remainder of current page.
yangguo@chromium.orgab30bb82012-02-24 14:41:46 +00001415 Address limit = NewSpacePage::FromLimit(top)->area_end();
danno@chromium.orgc612e022011-11-10 11:38:15 +00001416 if (heap()->gc_state() == Heap::SCAVENGE) {
1417 heap()->promotion_queue()->SetNewLimit(limit);
1418 heap()->promotion_queue()->ActivateGuardIfOnTheSamePage();
1419 }
1420
1421 int remaining_in_page = static_cast<int>(limit - top);
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00001422 heap()->CreateFillerObjectAt(top, remaining_in_page);
1423 pages_used_++;
1424 UpdateAllocationInfo();
danno@chromium.orgc612e022011-11-10 11:38:15 +00001425
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00001426 return true;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001427}
1428
1429
danno@chromium.orgc612e022011-11-10 11:38:15 +00001430MaybeObject* NewSpace::SlowAllocateRaw(int size_in_bytes) {
jkummerow@chromium.orgef33a542013-10-28 18:03:37 +00001431 Address old_top = allocation_info_.top();
danno@chromium.orgc612e022011-11-10 11:38:15 +00001432 Address high = to_space_.page_high();
jkummerow@chromium.orgef33a542013-10-28 18:03:37 +00001433 if (allocation_info_.limit() < high) {
machenbach@chromium.orgb5be0a92013-11-15 10:32:41 +00001434 // Either the limit has been lowered because linear allocation was disabled
1435 // or because incremental marking wants to get a chance to do a step. Set
1436 // the new limit accordingly.
1437 Address new_top = old_top + size_in_bytes;
danno@chromium.orgc612e022011-11-10 11:38:15 +00001438 int bytes_allocated = static_cast<int>(new_top - top_on_previous_step_);
fschneider@chromium.org7d10be52012-04-10 12:30:14 +00001439 heap()->incremental_marking()->Step(
1440 bytes_allocated, IncrementalMarking::GC_VIA_STACK_GUARD);
machenbach@chromium.orgb5be0a92013-11-15 10:32:41 +00001441 UpdateInlineAllocationLimit(size_in_bytes);
danno@chromium.orgc612e022011-11-10 11:38:15 +00001442 top_on_previous_step_ = new_top;
1443 return AllocateRaw(size_in_bytes);
1444 } else if (AddFreshPage()) {
1445 // Switched to new page. Try allocating again.
1446 int bytes_allocated = static_cast<int>(old_top - top_on_previous_step_);
fschneider@chromium.org7d10be52012-04-10 12:30:14 +00001447 heap()->incremental_marking()->Step(
1448 bytes_allocated, IncrementalMarking::GC_VIA_STACK_GUARD);
danno@chromium.orgc612e022011-11-10 11:38:15 +00001449 top_on_previous_step_ = to_space_.page_low();
1450 return AllocateRaw(size_in_bytes);
1451 } else {
1452 return Failure::RetryAfterGC();
1453 }
1454}
1455
1456
svenpanne@chromium.orgc859c4f2012-10-15 11:51:39 +00001457#ifdef VERIFY_HEAP
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00001458// We do not use the SemiSpaceIterator because verification doesn't assume
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001459// that it works (it depends on the invariants we are checking).
1460void NewSpace::Verify() {
1461 // The allocation pointer should be in the space or at the very end.
1462 ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
1463
1464 // There should be objects packed in from the low address up to the
1465 // allocation pointer.
yangguo@chromium.orgab30bb82012-02-24 14:41:46 +00001466 Address current = to_space_.first_page()->area_start();
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00001467 CHECK_EQ(current, to_space_.space_start());
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001468
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00001469 while (current != top()) {
1470 if (!NewSpacePage::IsAtEnd(current)) {
1471 // The allocation pointer should not be in the middle of an object.
1472 CHECK(!NewSpacePage::FromLimit(current)->ContainsLimit(top()) ||
1473 current < top());
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001474
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00001475 HeapObject* object = HeapObject::FromAddress(current);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001476
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00001477 // The first word should be a map, and we expect all map pointers to
1478 // be in map space.
1479 Map* map = object->map();
1480 CHECK(map->IsMap());
1481 CHECK(heap()->map_space()->Contains(map));
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001482
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00001483 // The object should not be code or a map.
1484 CHECK(!object->IsMap());
1485 CHECK(!object->IsCode());
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001486
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00001487 // The object itself should look OK.
1488 object->Verify();
1489
1490 // All the interior pointers should be contained in the heap.
1491 VerifyPointersVisitor visitor;
1492 int size = object->Size();
1493 object->IterateBody(map->instance_type(), size, &visitor);
1494
1495 current += size;
1496 } else {
1497 // At end of page, switch to next page.
1498 NewSpacePage* page = NewSpacePage::FromLimit(current)->next_page();
1499 // Next page should be valid.
1500 CHECK(!page->is_anchor());
yangguo@chromium.orgab30bb82012-02-24 14:41:46 +00001501 current = page->area_start();
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00001502 }
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001503 }
1504
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00001505 // Check semi-spaces.
svenpanne@chromium.orgc859c4f2012-10-15 11:51:39 +00001506 CHECK_EQ(from_space_.id(), kFromSpace);
1507 CHECK_EQ(to_space_.id(), kToSpace);
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00001508 from_space_.Verify();
1509 to_space_.Verify();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001510}
1511#endif
1512
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001513// -----------------------------------------------------------------------------
1514// SemiSpace implementation
1515
yangguo@chromium.org659ceec2012-01-26 07:37:54 +00001516void SemiSpace::SetUp(Address start,
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +00001517 int initial_capacity,
1518 int maximum_capacity) {
1519 // Creates a space in the young generation. The constructor does not
1520 // allocate memory from the OS. A SemiSpace is given a contiguous chunk of
1521 // memory of size 'capacity' when set up, and does not grow or shrink
1522 // otherwise. In the mark-compact collector, the memory region of the from
1523 // space is used as the marking stack. It requires contiguous memory
1524 // addresses.
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00001525 ASSERT(maximum_capacity >= Page::kPageSize);
1526 initial_capacity_ = RoundDown(initial_capacity, Page::kPageSize);
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +00001527 capacity_ = initial_capacity;
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00001528 maximum_capacity_ = RoundDown(maximum_capacity, Page::kPageSize);
verwaest@chromium.org057bd502013-11-06 12:03:29 +00001529 maximum_committed_ = 0;
ager@chromium.orgadd848f2009-08-13 12:44:13 +00001530 committed_ = false;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001531 start_ = start;
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +00001532 address_mask_ = ~(maximum_capacity - 1);
ricow@chromium.org30ce4112010-05-31 10:38:25 +00001533 object_mask_ = address_mask_ | kHeapObjectTagMask;
ager@chromium.org9085a012009-05-11 19:22:57 +00001534 object_expected_ = reinterpret_cast<uintptr_t>(start) | kHeapObjectTag;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001535 age_mark_ = start_;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001536}
1537
1538
1539void SemiSpace::TearDown() {
1540 start_ = NULL;
1541 capacity_ = 0;
1542}
1543
1544
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00001545bool SemiSpace::Commit() {
1546 ASSERT(!is_committed());
1547 int pages = capacity_ / Page::kPageSize;
mstarzinger@chromium.orga2e1a402013-10-15 08:25:05 +00001548 if (!heap()->isolate()->memory_allocator()->CommitBlock(start_,
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00001549 capacity_,
1550 executable())) {
kasper.lund7276f142008-07-30 08:49:36 +00001551 return false;
1552 }
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00001553
mstarzinger@chromium.orga2e1a402013-10-15 08:25:05 +00001554 NewSpacePage* current = anchor();
1555 for (int i = 0; i < pages; i++) {
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00001556 NewSpacePage* new_page =
mstarzinger@chromium.orga2e1a402013-10-15 08:25:05 +00001557 NewSpacePage::Initialize(heap(), start_ + i * Page::kPageSize, this);
1558 new_page->InsertAfter(current);
1559 current = new_page;
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00001560 }
1561
verwaest@chromium.org057bd502013-11-06 12:03:29 +00001562 SetCapacity(capacity_);
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00001563 committed_ = true;
1564 Reset();
1565 return true;
1566}
1567
1568
1569bool SemiSpace::Uncommit() {
1570 ASSERT(is_committed());
1571 Address start = start_ + maximum_capacity_ - capacity_;
1572 if (!heap()->isolate()->memory_allocator()->UncommitBlock(start, capacity_)) {
1573 return false;
1574 }
1575 anchor()->set_next_page(anchor());
1576 anchor()->set_prev_page(anchor());
1577
1578 committed_ = false;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001579 return true;
1580}
1581
1582
danno@chromium.org72204d52012-10-31 10:02:10 +00001583size_t SemiSpace::CommittedPhysicalMemory() {
1584 if (!is_committed()) return 0;
1585 size_t size = 0;
1586 NewSpacePageIterator it(this);
1587 while (it.has_next()) {
1588 size += it.next()->CommittedPhysicalMemory();
1589 }
1590 return size;
1591}
1592
1593
ager@chromium.orgab99eea2009-08-25 07:05:41 +00001594bool SemiSpace::GrowTo(int new_capacity) {
yangguo@chromium.org659ceec2012-01-26 07:37:54 +00001595 if (!is_committed()) {
1596 if (!Commit()) return false;
1597 }
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00001598 ASSERT((new_capacity & Page::kPageAlignmentMask) == 0);
ager@chromium.orgab99eea2009-08-25 07:05:41 +00001599 ASSERT(new_capacity <= maximum_capacity_);
1600 ASSERT(new_capacity > capacity_);
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00001601 int pages_before = capacity_ / Page::kPageSize;
1602 int pages_after = new_capacity / Page::kPageSize;
1603
ager@chromium.orgab99eea2009-08-25 07:05:41 +00001604 size_t delta = new_capacity - capacity_;
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00001605
ager@chromium.orgab99eea2009-08-25 07:05:41 +00001606 ASSERT(IsAligned(delta, OS::AllocateAlignment()));
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00001607 if (!heap()->isolate()->memory_allocator()->CommitBlock(
mstarzinger@chromium.orga2e1a402013-10-15 08:25:05 +00001608 start_ + capacity_, delta, executable())) {
ager@chromium.orgab99eea2009-08-25 07:05:41 +00001609 return false;
1610 }
verwaest@chromium.org057bd502013-11-06 12:03:29 +00001611 SetCapacity(new_capacity);
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00001612 NewSpacePage* last_page = anchor()->prev_page();
1613 ASSERT(last_page != anchor());
mstarzinger@chromium.orga2e1a402013-10-15 08:25:05 +00001614 for (int i = pages_before; i < pages_after; i++) {
1615 Address page_address = start_ + i * Page::kPageSize;
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00001616 NewSpacePage* new_page = NewSpacePage::Initialize(heap(),
1617 page_address,
1618 this);
1619 new_page->InsertAfter(last_page);
1620 Bitmap::Clear(new_page);
1621 // Duplicate the flags that was set on the old page.
1622 new_page->SetFlags(last_page->GetFlags(),
1623 NewSpacePage::kCopyOnFlipFlagsMask);
1624 last_page = new_page;
1625 }
ager@chromium.orgab99eea2009-08-25 07:05:41 +00001626 return true;
1627}
1628
1629
1630bool SemiSpace::ShrinkTo(int new_capacity) {
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00001631 ASSERT((new_capacity & Page::kPageAlignmentMask) == 0);
ager@chromium.orgab99eea2009-08-25 07:05:41 +00001632 ASSERT(new_capacity >= initial_capacity_);
1633 ASSERT(new_capacity < capacity_);
rossberg@chromium.orgfab14982012-01-05 15:02:15 +00001634 if (is_committed()) {
rossberg@chromium.orgfab14982012-01-05 15:02:15 +00001635 size_t delta = capacity_ - new_capacity;
1636 ASSERT(IsAligned(delta, OS::AllocateAlignment()));
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00001637
rossberg@chromium.orgfab14982012-01-05 15:02:15 +00001638 MemoryAllocator* allocator = heap()->isolate()->memory_allocator();
mstarzinger@chromium.orga2e1a402013-10-15 08:25:05 +00001639 if (!allocator->UncommitBlock(start_ + new_capacity, delta)) {
rossberg@chromium.orgfab14982012-01-05 15:02:15 +00001640 return false;
1641 }
1642
1643 int pages_after = new_capacity / Page::kPageSize;
1644 NewSpacePage* new_last_page =
mstarzinger@chromium.orga2e1a402013-10-15 08:25:05 +00001645 NewSpacePage::FromAddress(start_ + (pages_after - 1) * Page::kPageSize);
rossberg@chromium.orgfab14982012-01-05 15:02:15 +00001646 new_last_page->set_next_page(anchor());
1647 anchor()->set_prev_page(new_last_page);
mstarzinger@chromium.orga2e1a402013-10-15 08:25:05 +00001648 ASSERT((current_page_ >= first_page()) && (current_page_ <= new_last_page));
rossberg@chromium.orgfab14982012-01-05 15:02:15 +00001649 }
1650
verwaest@chromium.org057bd502013-11-06 12:03:29 +00001651 SetCapacity(new_capacity);
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00001652
ager@chromium.orgab99eea2009-08-25 07:05:41 +00001653 return true;
1654}
1655
1656
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00001657void SemiSpace::FlipPages(intptr_t flags, intptr_t mask) {
1658 anchor_.set_owner(this);
1659 // Fixup back-pointers to anchor. Address of anchor changes
1660 // when we swap.
1661 anchor_.prev_page()->set_next_page(&anchor_);
1662 anchor_.next_page()->set_prev_page(&anchor_);
1663
1664 bool becomes_to_space = (id_ == kFromSpace);
1665 id_ = becomes_to_space ? kToSpace : kFromSpace;
1666 NewSpacePage* page = anchor_.next_page();
1667 while (page != &anchor_) {
1668 page->set_owner(this);
1669 page->SetFlags(flags, mask);
1670 if (becomes_to_space) {
1671 page->ClearFlag(MemoryChunk::IN_FROM_SPACE);
1672 page->SetFlag(MemoryChunk::IN_TO_SPACE);
1673 page->ClearFlag(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK);
1674 page->ResetLiveBytes();
1675 } else {
1676 page->SetFlag(MemoryChunk::IN_FROM_SPACE);
1677 page->ClearFlag(MemoryChunk::IN_TO_SPACE);
1678 }
1679 ASSERT(page->IsFlagSet(MemoryChunk::SCAN_ON_SCAVENGE));
1680 ASSERT(page->IsFlagSet(MemoryChunk::IN_TO_SPACE) ||
1681 page->IsFlagSet(MemoryChunk::IN_FROM_SPACE));
1682 page = page->next_page();
1683 }
1684}
1685
1686
1687void SemiSpace::Reset() {
1688 ASSERT(anchor_.next_page() != &anchor_);
1689 current_page_ = anchor_.next_page();
1690}
1691
1692
1693void SemiSpace::Swap(SemiSpace* from, SemiSpace* to) {
1694 // We won't be swapping semispaces without data in them.
1695 ASSERT(from->anchor_.next_page() != &from->anchor_);
1696 ASSERT(to->anchor_.next_page() != &to->anchor_);
1697
1698 // Swap bits.
1699 SemiSpace tmp = *from;
1700 *from = *to;
1701 *to = tmp;
1702
1703 // Fixup back-pointers to the page list anchor now that its address
1704 // has changed.
1705 // Swap to/from-space bits on pages.
1706 // Copy GC flags from old active space (from-space) to new (to-space).
1707 intptr_t flags = from->current_page()->GetFlags();
1708 to->FlipPages(flags, NewSpacePage::kCopyOnFlipFlagsMask);
1709
1710 from->FlipPages(0, 0);
1711}
1712
1713
verwaest@chromium.org057bd502013-11-06 12:03:29 +00001714void SemiSpace::SetCapacity(int new_capacity) {
1715 capacity_ = new_capacity;
1716 if (capacity_ > maximum_committed_) {
1717 maximum_committed_ = capacity_;
1718 }
1719}
1720
1721
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00001722void SemiSpace::set_age_mark(Address mark) {
1723 ASSERT(NewSpacePage::FromLimit(mark)->semi_space() == this);
1724 age_mark_ = mark;
1725 // Mark all pages up to the one containing mark.
1726 NewSpacePageIterator it(space_start(), mark);
1727 while (it.has_next()) {
1728 it.next()->SetFlag(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK);
1729 }
1730}
1731
1732
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001733#ifdef DEBUG
1734void SemiSpace::Print() { }
svenpanne@chromium.orgc859c4f2012-10-15 11:51:39 +00001735#endif
ager@chromium.org9258b6b2008-09-11 09:11:10 +00001736
svenpanne@chromium.orgc859c4f2012-10-15 11:51:39 +00001737#ifdef VERIFY_HEAP
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00001738void SemiSpace::Verify() {
1739 bool is_from_space = (id_ == kFromSpace);
1740 NewSpacePage* page = anchor_.next_page();
1741 CHECK(anchor_.semi_space() == this);
1742 while (page != &anchor_) {
1743 CHECK(page->semi_space() == this);
1744 CHECK(page->InNewSpace());
1745 CHECK(page->IsFlagSet(is_from_space ? MemoryChunk::IN_FROM_SPACE
1746 : MemoryChunk::IN_TO_SPACE));
1747 CHECK(!page->IsFlagSet(is_from_space ? MemoryChunk::IN_TO_SPACE
1748 : MemoryChunk::IN_FROM_SPACE));
1749 CHECK(page->IsFlagSet(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING));
1750 if (!is_from_space) {
1751 // The pointers-from-here-are-interesting flag isn't updated dynamically
1752 // on from-space pages, so it might be out of sync with the marking state.
1753 if (page->heap()->incremental_marking()->IsMarking()) {
1754 CHECK(page->IsFlagSet(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING));
1755 } else {
1756 CHECK(!page->IsFlagSet(
1757 MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING));
1758 }
1759 // TODO(gc): Check that the live_bytes_count_ field matches the
1760 // black marking on the page (if we make it match in new-space).
1761 }
1762 CHECK(page->IsFlagSet(MemoryChunk::SCAN_ON_SCAVENGE));
1763 CHECK(page->prev_page()->next_page() == page);
1764 page = page->next_page();
1765 }
1766}
svenpanne@chromium.orgc859c4f2012-10-15 11:51:39 +00001767#endif
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00001768
svenpanne@chromium.orgc859c4f2012-10-15 11:51:39 +00001769#ifdef DEBUG
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00001770void SemiSpace::AssertValidRange(Address start, Address end) {
1771 // Addresses belong to same semi-space
1772 NewSpacePage* page = NewSpacePage::FromLimit(start);
1773 NewSpacePage* end_page = NewSpacePage::FromLimit(end);
1774 SemiSpace* space = page->semi_space();
1775 CHECK_EQ(space, end_page->semi_space());
1776 // Start address is before end address, either on same page,
1777 // or end address is on a later page in the linked list of
1778 // semi-space pages.
1779 if (page == end_page) {
1780 CHECK(start <= end);
1781 } else {
1782 while (page != end_page) {
1783 page = page->next_page();
1784 CHECK_NE(page, space->anchor());
1785 }
1786 }
1787}
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001788#endif
1789
1790
1791// -----------------------------------------------------------------------------
1792// SemiSpaceIterator implementation.
1793SemiSpaceIterator::SemiSpaceIterator(NewSpace* space) {
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00001794 Initialize(space->bottom(), space->top(), NULL);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001795}
1796
1797
1798SemiSpaceIterator::SemiSpaceIterator(NewSpace* space,
1799 HeapObjectCallback size_func) {
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00001800 Initialize(space->bottom(), space->top(), size_func);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001801}
1802
1803
1804SemiSpaceIterator::SemiSpaceIterator(NewSpace* space, Address start) {
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00001805 Initialize(start, space->top(), NULL);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001806}
1807
1808
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00001809SemiSpaceIterator::SemiSpaceIterator(Address from, Address to) {
1810 Initialize(from, to, NULL);
1811}
1812
1813
1814void SemiSpaceIterator::Initialize(Address start,
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001815 Address end,
1816 HeapObjectCallback size_func) {
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00001817 SemiSpace::AssertValidRange(start, end);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001818 current_ = start;
1819 limit_ = end;
1820 size_func_ = size_func;
1821}
1822
1823
1824#ifdef DEBUG
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001825// heap_histograms is shared, always clear it before using it.
jkummerow@chromium.org3d00d0a2013-09-04 13:57:32 +00001826static void ClearHistograms(Isolate* isolate) {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001827 // We reset the name each time, though it hasn't changed.
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00001828#define DEF_TYPE_NAME(name) isolate->heap_histograms()[name].set_name(#name);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001829 INSTANCE_TYPE_LIST(DEF_TYPE_NAME)
1830#undef DEF_TYPE_NAME
1831
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00001832#define CLEAR_HISTOGRAM(name) isolate->heap_histograms()[name].clear();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001833 INSTANCE_TYPE_LIST(CLEAR_HISTOGRAM)
1834#undef CLEAR_HISTOGRAM
1835
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00001836 isolate->js_spill_information()->Clear();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001837}
1838
1839
mstarzinger@chromium.org1510d582013-06-28 14:00:48 +00001840static void ClearCodeKindStatistics(int* code_kind_statistics) {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001841 for (int i = 0; i < Code::NUMBER_OF_KINDS; i++) {
mstarzinger@chromium.org1510d582013-06-28 14:00:48 +00001842 code_kind_statistics[i] = 0;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001843 }
1844}
1845
1846
mstarzinger@chromium.org1510d582013-06-28 14:00:48 +00001847static void ReportCodeKindStatistics(int* code_kind_statistics) {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001848 PrintF("\n Code kind histograms: \n");
1849 for (int i = 0; i < Code::NUMBER_OF_KINDS; i++) {
mstarzinger@chromium.org1510d582013-06-28 14:00:48 +00001850 if (code_kind_statistics[i] > 0) {
1851 PrintF(" %-20s: %10d bytes\n",
1852 Code::Kind2String(static_cast<Code::Kind>(i)),
1853 code_kind_statistics[i]);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001854 }
1855 }
1856 PrintF("\n");
1857}
1858
1859
1860static int CollectHistogramInfo(HeapObject* obj) {
mstarzinger@chromium.org1510d582013-06-28 14:00:48 +00001861 Isolate* isolate = obj->GetIsolate();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001862 InstanceType type = obj->map()->instance_type();
1863 ASSERT(0 <= type && type <= LAST_TYPE);
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00001864 ASSERT(isolate->heap_histograms()[type].name() != NULL);
1865 isolate->heap_histograms()[type].increment_number(1);
1866 isolate->heap_histograms()[type].increment_bytes(obj->Size());
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001867
1868 if (FLAG_collect_heap_spill_statistics && obj->IsJSObject()) {
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00001869 JSObject::cast(obj)->IncrementSpillStatistics(
1870 isolate->js_spill_information());
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001871 }
1872
1873 return obj->Size();
1874}
1875
1876
jkummerow@chromium.org3d00d0a2013-09-04 13:57:32 +00001877static void ReportHistogram(Isolate* isolate, bool print_spill) {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001878 PrintF("\n Object Histogram:\n");
1879 for (int i = 0; i <= LAST_TYPE; i++) {
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00001880 if (isolate->heap_histograms()[i].number() > 0) {
ager@chromium.orgce5e87b2010-03-10 10:24:18 +00001881 PrintF(" %-34s%10d (%10d bytes)\n",
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00001882 isolate->heap_histograms()[i].name(),
1883 isolate->heap_histograms()[i].number(),
1884 isolate->heap_histograms()[i].bytes());
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001885 }
1886 }
1887 PrintF("\n");
1888
1889 // Summarize string types.
1890 int string_number = 0;
1891 int string_bytes = 0;
kasperl@chromium.org68ac0092009-07-09 06:00:35 +00001892#define INCREMENT(type, size, name, camel_name) \
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00001893 string_number += isolate->heap_histograms()[type].number(); \
1894 string_bytes += isolate->heap_histograms()[type].bytes();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001895 STRING_TYPE_LIST(INCREMENT)
1896#undef INCREMENT
1897 if (string_number > 0) {
ager@chromium.orgce5e87b2010-03-10 10:24:18 +00001898 PrintF(" %-34s%10d (%10d bytes)\n\n", "STRING_TYPE", string_number,
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001899 string_bytes);
1900 }
1901
1902 if (FLAG_collect_heap_spill_statistics && print_spill) {
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00001903 isolate->js_spill_information()->Print();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001904 }
1905}
1906#endif // DEBUG
1907
1908
1909// Support for statistics gathering for --heap-stats and --log-gc.
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001910void NewSpace::ClearHistograms() {
1911 for (int i = 0; i <= LAST_TYPE; i++) {
1912 allocated_histogram_[i].clear();
1913 promoted_histogram_[i].clear();
1914 }
1915}
1916
mstarzinger@chromium.orge0e1b0d2013-07-08 08:38:06 +00001917
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001918// Because the copying collector does not touch garbage objects, we iterate
1919// the new space before a collection to get a histogram of allocated objects.
whesse@chromium.org030d38e2011-07-13 13:23:34 +00001920// This only happens when --log-gc flag is set.
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001921void NewSpace::CollectStatistics() {
1922 ClearHistograms();
1923 SemiSpaceIterator it(this);
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00001924 for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next())
sgjesse@chromium.orgb302e562010-02-03 11:26:59 +00001925 RecordAllocation(obj);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001926}
1927
1928
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00001929static void DoReportStatistics(Isolate* isolate,
1930 HistogramInfo* info, const char* description) {
1931 LOG(isolate, HeapSampleBeginEvent("NewSpace", description));
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001932 // Lump all the string types together.
1933 int string_number = 0;
1934 int string_bytes = 0;
kasperl@chromium.org68ac0092009-07-09 06:00:35 +00001935#define INCREMENT(type, size, name, camel_name) \
1936 string_number += info[type].number(); \
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001937 string_bytes += info[type].bytes();
1938 STRING_TYPE_LIST(INCREMENT)
1939#undef INCREMENT
1940 if (string_number > 0) {
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00001941 LOG(isolate,
1942 HeapSampleItemEvent("STRING_TYPE", string_number, string_bytes));
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001943 }
1944
1945 // Then do the other types.
1946 for (int i = FIRST_NONSTRING_TYPE; i <= LAST_TYPE; ++i) {
1947 if (info[i].number() > 0) {
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00001948 LOG(isolate,
1949 HeapSampleItemEvent(info[i].name(), info[i].number(),
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001950 info[i].bytes()));
1951 }
1952 }
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00001953 LOG(isolate, HeapSampleEndEvent("NewSpace", description));
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001954}
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001955
1956
1957void NewSpace::ReportStatistics() {
1958#ifdef DEBUG
1959 if (FLAG_heap_stats) {
1960 float pct = static_cast<float>(Available()) / Capacity();
kmillikin@chromium.orgf05f2912010-09-30 10:07:24 +00001961 PrintF(" capacity: %" V8_PTR_PREFIX "d"
1962 ", available: %" V8_PTR_PREFIX "d, %%%d\n",
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001963 Capacity(), Available(), static_cast<int>(pct*100));
1964 PrintF("\n Object Histogram:\n");
1965 for (int i = 0; i <= LAST_TYPE; i++) {
1966 if (allocated_histogram_[i].number() > 0) {
ager@chromium.orgce5e87b2010-03-10 10:24:18 +00001967 PrintF(" %-34s%10d (%10d bytes)\n",
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001968 allocated_histogram_[i].name(),
1969 allocated_histogram_[i].number(),
1970 allocated_histogram_[i].bytes());
1971 }
1972 }
1973 PrintF("\n");
1974 }
1975#endif // DEBUG
1976
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001977 if (FLAG_log_gc) {
hpayer@chromium.orgc5d49712013-09-11 08:25:48 +00001978 Isolate* isolate = heap()->isolate();
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00001979 DoReportStatistics(isolate, allocated_histogram_, "allocated");
1980 DoReportStatistics(isolate, promoted_histogram_, "promoted");
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001981 }
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001982}
1983
1984
1985void NewSpace::RecordAllocation(HeapObject* obj) {
1986 InstanceType type = obj->map()->instance_type();
1987 ASSERT(0 <= type && type <= LAST_TYPE);
1988 allocated_histogram_[type].increment_number(1);
1989 allocated_histogram_[type].increment_bytes(obj->Size());
1990}
1991
1992
1993void NewSpace::RecordPromotion(HeapObject* obj) {
1994 InstanceType type = obj->map()->instance_type();
1995 ASSERT(0 <= type && type <= LAST_TYPE);
1996 promoted_histogram_[type].increment_number(1);
1997 promoted_histogram_[type].increment_bytes(obj->Size());
1998}
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001999
danno@chromium.org72204d52012-10-31 10:02:10 +00002000
2001size_t NewSpace::CommittedPhysicalMemory() {
2002 if (!VirtualMemory::HasLazyCommits()) return CommittedMemory();
jkummerow@chromium.orgef33a542013-10-28 18:03:37 +00002003 MemoryChunk::UpdateHighWaterMark(allocation_info_.top());
danno@chromium.org72204d52012-10-31 10:02:10 +00002004 size_t size = to_space_.CommittedPhysicalMemory();
2005 if (from_space_.is_committed()) {
2006 size += from_space_.CommittedPhysicalMemory();
2007 }
2008 return size;
2009}
2010
mstarzinger@chromium.orge0e1b0d2013-07-08 08:38:06 +00002011
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002012// -----------------------------------------------------------------------------
2013// Free lists for old object spaces implementation
2014
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00002015void FreeListNode::set_size(Heap* heap, int size_in_bytes) {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002016 ASSERT(size_in_bytes > 0);
2017 ASSERT(IsAligned(size_in_bytes, kPointerSize));
2018
2019 // We write a map and possibly size information to the block. If the block
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00002020 // is big enough to be a FreeSpace with at least one extra word (the next
2021 // pointer), we set its map to be the free space map and its size to an
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002022 // appropriate array length for the desired size from HeapObject::Size().
2023 // If the block is too small (eg, one or two words), to hold both a size
2024 // field and a next pointer, we give it a filler map that gives it the
2025 // correct size.
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00002026 if (size_in_bytes > FreeSpace::kHeaderSize) {
ricow@chromium.org64e3a4b2011-12-13 08:07:27 +00002027 set_map_no_write_barrier(heap->raw_unchecked_free_space_map());
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00002028 // Can't use FreeSpace::cast because it fails during deserialization.
2029 FreeSpace* this_as_free_space = reinterpret_cast<FreeSpace*>(this);
2030 this_as_free_space->set_size(size_in_bytes);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002031 } else if (size_in_bytes == kPointerSize) {
ricow@chromium.org64e3a4b2011-12-13 08:07:27 +00002032 set_map_no_write_barrier(heap->raw_unchecked_one_pointer_filler_map());
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002033 } else if (size_in_bytes == 2 * kPointerSize) {
ricow@chromium.org64e3a4b2011-12-13 08:07:27 +00002034 set_map_no_write_barrier(heap->raw_unchecked_two_pointer_filler_map());
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002035 } else {
2036 UNREACHABLE();
2037 }
ager@chromium.org3811b432009-10-28 14:53:37 +00002038 // We would like to ASSERT(Size() == size_in_bytes) but this would fail during
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00002039 // deserialization because the free space map is not done yet.
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002040}
2041
2042
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00002043FreeListNode* FreeListNode::next() {
ager@chromium.org3811b432009-10-28 14:53:37 +00002044 ASSERT(IsFreeListNode(this));
hpayer@chromium.org8432c912013-02-28 15:55:26 +00002045 if (map() == GetHeap()->raw_unchecked_free_space_map()) {
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00002046 ASSERT(map() == NULL || Size() >= kNextOffset + kPointerSize);
2047 return reinterpret_cast<FreeListNode*>(
2048 Memory::Address_at(address() + kNextOffset));
kasperl@chromium.orgdefbd102009-07-13 14:04:26 +00002049 } else {
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00002050 return reinterpret_cast<FreeListNode*>(
2051 Memory::Address_at(address() + kPointerSize));
kasperl@chromium.orgdefbd102009-07-13 14:04:26 +00002052 }
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002053}
2054
2055
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00002056FreeListNode** FreeListNode::next_address() {
ager@chromium.org3811b432009-10-28 14:53:37 +00002057 ASSERT(IsFreeListNode(this));
hpayer@chromium.org8432c912013-02-28 15:55:26 +00002058 if (map() == GetHeap()->raw_unchecked_free_space_map()) {
kasperl@chromium.orgdefbd102009-07-13 14:04:26 +00002059 ASSERT(Size() >= kNextOffset + kPointerSize);
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00002060 return reinterpret_cast<FreeListNode**>(address() + kNextOffset);
kasperl@chromium.orgdefbd102009-07-13 14:04:26 +00002061 } else {
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00002062 return reinterpret_cast<FreeListNode**>(address() + kPointerSize);
kasperl@chromium.orgdefbd102009-07-13 14:04:26 +00002063 }
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002064}
2065
2066
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00002067void FreeListNode::set_next(FreeListNode* next) {
2068 ASSERT(IsFreeListNode(this));
2069 // While we are booting the VM the free space map will actually be null. So
2070 // we have to make sure that we don't try to use it for anything at that
2071 // stage.
hpayer@chromium.org8432c912013-02-28 15:55:26 +00002072 if (map() == GetHeap()->raw_unchecked_free_space_map()) {
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00002073 ASSERT(map() == NULL || Size() >= kNextOffset + kPointerSize);
2074 Memory::Address_at(address() + kNextOffset) =
2075 reinterpret_cast<Address>(next);
2076 } else {
2077 Memory::Address_at(address() + kPointerSize) =
2078 reinterpret_cast<Address>(next);
2079 }
2080}
2081
2082
mstarzinger@chromium.orge3b8d0f2013-02-01 09:06:41 +00002083intptr_t FreeListCategory::Concatenate(FreeListCategory* category) {
2084 intptr_t free_bytes = 0;
2085 if (category->top_ != NULL) {
2086 ASSERT(category->end_ != NULL);
2087 // This is safe (not going to deadlock) since Concatenate operations
2088 // are never performed on the same free lists at the same time in
2089 // reverse order.
jkummerow@chromium.orgdc94e192013-08-30 11:35:42 +00002090 LockGuard<Mutex> target_lock_guard(mutex());
2091 LockGuard<Mutex> source_lock_guard(category->mutex());
mstarzinger@chromium.orge3b8d0f2013-02-01 09:06:41 +00002092 free_bytes = category->available();
2093 if (end_ == NULL) {
2094 end_ = category->end();
2095 } else {
2096 category->end()->set_next(top_);
2097 }
2098 top_ = category->top();
2099 available_ += category->available();
2100 category->Reset();
2101 }
2102 return free_bytes;
2103}
2104
2105
yangguo@chromium.orga6bbcc82012-12-21 12:35:02 +00002106void FreeListCategory::Reset() {
2107 top_ = NULL;
2108 end_ = NULL;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002109 available_ = 0;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002110}
2111
2112
yangguo@chromium.orga6bbcc82012-12-21 12:35:02 +00002113intptr_t FreeListCategory::EvictFreeListItemsInList(Page* p) {
2114 int sum = 0;
2115 FreeListNode** n = &top_;
2116 while (*n != NULL) {
2117 if (Page::FromAddress((*n)->address()) == p) {
2118 FreeSpace* free_space = reinterpret_cast<FreeSpace*>(*n);
2119 sum += free_space->Size();
2120 *n = (*n)->next();
2121 } else {
2122 n = (*n)->next_address();
2123 }
2124 }
2125 if (top_ == NULL) {
2126 end_ = NULL;
2127 }
2128 available_ -= sum;
2129 return sum;
2130}
2131
2132
2133FreeListNode* FreeListCategory::PickNodeFromList(int *node_size) {
2134 FreeListNode* node = top_;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002135
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00002136 if (node == NULL) return NULL;
2137
2138 while (node != NULL &&
2139 Page::FromAddress(node->address())->IsEvacuationCandidate()) {
dslomov@chromium.orgb752d402013-06-18 11:54:54 +00002140 available_ -= reinterpret_cast<FreeSpace*>(node)->Size();
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00002141 node = node->next();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002142 }
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00002143
2144 if (node != NULL) {
yangguo@chromium.orga6bbcc82012-12-21 12:35:02 +00002145 set_top(node->next());
dslomov@chromium.orgb752d402013-06-18 11:54:54 +00002146 *node_size = reinterpret_cast<FreeSpace*>(node)->Size();
yangguo@chromium.orga6bbcc82012-12-21 12:35:02 +00002147 available_ -= *node_size;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002148 } else {
yangguo@chromium.orga6bbcc82012-12-21 12:35:02 +00002149 set_top(NULL);
2150 }
2151
2152 if (top() == NULL) {
2153 set_end(NULL);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002154 }
2155
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002156 return node;
2157}
2158
2159
dslomov@chromium.orgb752d402013-06-18 11:54:54 +00002160FreeListNode* FreeListCategory::PickNodeFromList(int size_in_bytes,
2161 int *node_size) {
2162 FreeListNode* node = PickNodeFromList(node_size);
2163 if (node != NULL && *node_size < size_in_bytes) {
2164 Free(node, *node_size);
2165 *node_size = 0;
2166 return NULL;
2167 }
2168 return node;
2169}
2170
2171
yangguo@chromium.orga6bbcc82012-12-21 12:35:02 +00002172void FreeListCategory::Free(FreeListNode* node, int size_in_bytes) {
2173 node->set_next(top_);
2174 top_ = node;
2175 if (end_ == NULL) {
2176 end_ = node;
2177 }
2178 available_ += size_in_bytes;
2179}
2180
2181
2182void FreeListCategory::RepairFreeList(Heap* heap) {
2183 FreeListNode* n = top_;
2184 while (n != NULL) {
2185 Map** map_location = reinterpret_cast<Map**>(n->address());
2186 if (*map_location == NULL) {
2187 *map_location = heap->free_space_map();
2188 } else {
2189 ASSERT(*map_location == heap->free_space_map());
2190 }
2191 n = n->next();
2192 }
2193}
2194
2195
2196FreeList::FreeList(PagedSpace* owner)
2197 : owner_(owner), heap_(owner->heap()) {
2198 Reset();
2199}
2200
2201
mstarzinger@chromium.orge3b8d0f2013-02-01 09:06:41 +00002202intptr_t FreeList::Concatenate(FreeList* free_list) {
2203 intptr_t free_bytes = 0;
2204 free_bytes += small_list_.Concatenate(free_list->small_list());
2205 free_bytes += medium_list_.Concatenate(free_list->medium_list());
2206 free_bytes += large_list_.Concatenate(free_list->large_list());
2207 free_bytes += huge_list_.Concatenate(free_list->huge_list());
2208 return free_bytes;
2209}
2210
2211
yangguo@chromium.orga6bbcc82012-12-21 12:35:02 +00002212void FreeList::Reset() {
2213 small_list_.Reset();
2214 medium_list_.Reset();
2215 large_list_.Reset();
2216 huge_list_.Reset();
2217}
2218
2219
2220int FreeList::Free(Address start, int size_in_bytes) {
2221 if (size_in_bytes == 0) return 0;
2222
2223 FreeListNode* node = FreeListNode::FromAddress(start);
2224 node->set_size(heap_, size_in_bytes);
mstarzinger@chromium.orge27d6172013-04-17 11:51:44 +00002225 Page* page = Page::FromAddress(start);
yangguo@chromium.orga6bbcc82012-12-21 12:35:02 +00002226
2227 // Early return to drop too-small blocks on the floor.
mstarzinger@chromium.orge27d6172013-04-17 11:51:44 +00002228 if (size_in_bytes < kSmallListMin) {
2229 page->add_non_available_small_blocks(size_in_bytes);
2230 return size_in_bytes;
2231 }
yangguo@chromium.orga6bbcc82012-12-21 12:35:02 +00002232
2233 // Insert other blocks at the head of a free list of the appropriate
2234 // magnitude.
2235 if (size_in_bytes <= kSmallListMax) {
2236 small_list_.Free(node, size_in_bytes);
mstarzinger@chromium.orge27d6172013-04-17 11:51:44 +00002237 page->add_available_in_small_free_list(size_in_bytes);
yangguo@chromium.orga6bbcc82012-12-21 12:35:02 +00002238 } else if (size_in_bytes <= kMediumListMax) {
2239 medium_list_.Free(node, size_in_bytes);
mstarzinger@chromium.orge27d6172013-04-17 11:51:44 +00002240 page->add_available_in_medium_free_list(size_in_bytes);
yangguo@chromium.orga6bbcc82012-12-21 12:35:02 +00002241 } else if (size_in_bytes <= kLargeListMax) {
2242 large_list_.Free(node, size_in_bytes);
mstarzinger@chromium.orge27d6172013-04-17 11:51:44 +00002243 page->add_available_in_large_free_list(size_in_bytes);
yangguo@chromium.orga6bbcc82012-12-21 12:35:02 +00002244 } else {
2245 huge_list_.Free(node, size_in_bytes);
mstarzinger@chromium.orge27d6172013-04-17 11:51:44 +00002246 page->add_available_in_huge_free_list(size_in_bytes);
yangguo@chromium.orga6bbcc82012-12-21 12:35:02 +00002247 }
2248
2249 ASSERT(IsVeryLong() || available() == SumFreeLists());
2250 return 0;
2251}
2252
2253
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00002254FreeListNode* FreeList::FindNodeFor(int size_in_bytes, int* node_size) {
2255 FreeListNode* node = NULL;
mstarzinger@chromium.orge27d6172013-04-17 11:51:44 +00002256 Page* page = NULL;
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00002257
2258 if (size_in_bytes <= kSmallAllocationMax) {
yangguo@chromium.orga6bbcc82012-12-21 12:35:02 +00002259 node = small_list_.PickNodeFromList(node_size);
mstarzinger@chromium.orge27d6172013-04-17 11:51:44 +00002260 if (node != NULL) {
dslomov@chromium.orgb752d402013-06-18 11:54:54 +00002261 ASSERT(size_in_bytes <= *node_size);
mstarzinger@chromium.orge27d6172013-04-17 11:51:44 +00002262 page = Page::FromAddress(node->address());
2263 page->add_available_in_small_free_list(-(*node_size));
dslomov@chromium.orgb752d402013-06-18 11:54:54 +00002264 ASSERT(IsVeryLong() || available() == SumFreeLists());
mstarzinger@chromium.orge27d6172013-04-17 11:51:44 +00002265 return node;
2266 }
erik.corry@gmail.com4a6c3272010-11-18 12:04:40 +00002267 }
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00002268
2269 if (size_in_bytes <= kMediumAllocationMax) {
yangguo@chromium.orga6bbcc82012-12-21 12:35:02 +00002270 node = medium_list_.PickNodeFromList(node_size);
mstarzinger@chromium.orge27d6172013-04-17 11:51:44 +00002271 if (node != NULL) {
dslomov@chromium.orgb752d402013-06-18 11:54:54 +00002272 ASSERT(size_in_bytes <= *node_size);
mstarzinger@chromium.orge27d6172013-04-17 11:51:44 +00002273 page = Page::FromAddress(node->address());
2274 page->add_available_in_medium_free_list(-(*node_size));
dslomov@chromium.orgb752d402013-06-18 11:54:54 +00002275 ASSERT(IsVeryLong() || available() == SumFreeLists());
mstarzinger@chromium.orge27d6172013-04-17 11:51:44 +00002276 return node;
2277 }
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00002278 }
2279
2280 if (size_in_bytes <= kLargeAllocationMax) {
yangguo@chromium.orga6bbcc82012-12-21 12:35:02 +00002281 node = large_list_.PickNodeFromList(node_size);
mstarzinger@chromium.orge27d6172013-04-17 11:51:44 +00002282 if (node != NULL) {
dslomov@chromium.orgb752d402013-06-18 11:54:54 +00002283 ASSERT(size_in_bytes <= *node_size);
mstarzinger@chromium.orge27d6172013-04-17 11:51:44 +00002284 page = Page::FromAddress(node->address());
2285 page->add_available_in_large_free_list(-(*node_size));
dslomov@chromium.orgb752d402013-06-18 11:54:54 +00002286 ASSERT(IsVeryLong() || available() == SumFreeLists());
mstarzinger@chromium.orge27d6172013-04-17 11:51:44 +00002287 return node;
2288 }
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00002289 }
2290
yangguo@chromium.orga6bbcc82012-12-21 12:35:02 +00002291 int huge_list_available = huge_list_.available();
2292 for (FreeListNode** cur = huge_list_.GetTopAddress();
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00002293 *cur != NULL;
2294 cur = (*cur)->next_address()) {
2295 FreeListNode* cur_node = *cur;
2296 while (cur_node != NULL &&
2297 Page::FromAddress(cur_node->address())->IsEvacuationCandidate()) {
mstarzinger@chromium.orge27d6172013-04-17 11:51:44 +00002298 int size = reinterpret_cast<FreeSpace*>(cur_node)->Size();
2299 huge_list_available -= size;
2300 page = Page::FromAddress(cur_node->address());
2301 page->add_available_in_huge_free_list(-size);
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00002302 cur_node = cur_node->next();
2303 }
2304
2305 *cur = cur_node;
yangguo@chromium.orga6bbcc82012-12-21 12:35:02 +00002306 if (cur_node == NULL) {
2307 huge_list_.set_end(NULL);
2308 break;
2309 }
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00002310
hpayer@chromium.org8432c912013-02-28 15:55:26 +00002311 ASSERT((*cur)->map() == heap_->raw_unchecked_free_space_map());
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00002312 FreeSpace* cur_as_free_space = reinterpret_cast<FreeSpace*>(*cur);
2313 int size = cur_as_free_space->Size();
2314 if (size >= size_in_bytes) {
2315 // Large enough node found. Unlink it from the list.
2316 node = *cur;
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00002317 *cur = node->next();
yangguo@chromium.orga6bbcc82012-12-21 12:35:02 +00002318 *node_size = size;
2319 huge_list_available -= size;
mstarzinger@chromium.orge27d6172013-04-17 11:51:44 +00002320 page = Page::FromAddress(node->address());
2321 page->add_available_in_huge_free_list(-size);
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00002322 break;
2323 }
2324 }
2325
yangguo@chromium.orga6bbcc82012-12-21 12:35:02 +00002326 if (huge_list_.top() == NULL) {
2327 huge_list_.set_end(NULL);
2328 }
yangguo@chromium.orga6bbcc82012-12-21 12:35:02 +00002329 huge_list_.set_available(huge_list_available);
danno@chromium.orgfe578672013-06-15 14:38:35 +00002330
dslomov@chromium.orgb752d402013-06-18 11:54:54 +00002331 if (node != NULL) {
2332 ASSERT(IsVeryLong() || available() == SumFreeLists());
2333 return node;
2334 }
2335
2336 if (size_in_bytes <= kSmallListMax) {
2337 node = small_list_.PickNodeFromList(size_in_bytes, node_size);
2338 if (node != NULL) {
2339 ASSERT(size_in_bytes <= *node_size);
2340 page = Page::FromAddress(node->address());
2341 page->add_available_in_small_free_list(-(*node_size));
2342 }
2343 } else if (size_in_bytes <= kMediumListMax) {
2344 node = medium_list_.PickNodeFromList(size_in_bytes, node_size);
2345 if (node != NULL) {
2346 ASSERT(size_in_bytes <= *node_size);
2347 page = Page::FromAddress(node->address());
2348 page->add_available_in_medium_free_list(-(*node_size));
2349 }
2350 } else if (size_in_bytes <= kLargeListMax) {
2351 node = large_list_.PickNodeFromList(size_in_bytes, node_size);
2352 if (node != NULL) {
2353 ASSERT(size_in_bytes <= *node_size);
2354 page = Page::FromAddress(node->address());
2355 page->add_available_in_large_free_list(-(*node_size));
2356 }
2357 }
2358
2359 ASSERT(IsVeryLong() || available() == SumFreeLists());
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00002360 return node;
erik.corry@gmail.com4a6c3272010-11-18 12:04:40 +00002361}
2362
2363
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00002364// Allocation on the old space free list. If it succeeds then a new linear
2365// allocation space has been set up with the top and limit of the space. If
2366// the allocation fails then NULL is returned, and the caller can perform a GC
2367// or allocate a new page before retrying.
2368HeapObject* FreeList::Allocate(int size_in_bytes) {
2369 ASSERT(0 < size_in_bytes);
2370 ASSERT(size_in_bytes <= kMaxBlockSize);
2371 ASSERT(IsAligned(size_in_bytes, kPointerSize));
2372 // Don't free list allocate if there is linear space available.
2373 ASSERT(owner_->limit() - owner_->top() < size_in_bytes);
2374
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00002375 int old_linear_size = static_cast<int>(owner_->limit() - owner_->top());
2376 // Mark the old linear allocation area with a free space map so it can be
2377 // skipped when scanning the heap. This also puts it back in the free list
2378 // if it is big enough.
2379 owner_->Free(owner_->top(), old_linear_size);
mstarzinger@chromium.org1b3afd12011-11-29 14:28:56 +00002380
danno@chromium.orgeb831462012-08-24 11:57:08 +00002381 owner_->heap()->incremental_marking()->OldSpaceStep(
2382 size_in_bytes - old_linear_size);
2383
ulan@chromium.org57ff8812013-05-10 08:16:55 +00002384 int new_node_size = 0;
2385 FreeListNode* new_node = FindNodeFor(size_in_bytes, &new_node_size);
2386 if (new_node == NULL) {
machenbach@chromium.orgb5be0a92013-11-15 10:32:41 +00002387 owner_->SetTopAndLimit(NULL, NULL);
ulan@chromium.org57ff8812013-05-10 08:16:55 +00002388 return NULL;
2389 }
2390
2391 int bytes_left = new_node_size - size_in_bytes;
2392 ASSERT(bytes_left >= 0);
2393
yangguo@chromium.org46839fb2012-08-28 09:06:19 +00002394#ifdef DEBUG
2395 for (int i = 0; i < size_in_bytes / kPointerSize; i++) {
2396 reinterpret_cast<Object**>(new_node->address())[i] =
2397 Smi::FromInt(kCodeZapValue);
2398 }
2399#endif
2400
ricow@chromium.orgfa52deb2011-10-11 19:09:42 +00002401 // The old-space-step might have finished sweeping and restarted marking.
2402 // Verify that it did not turn the page of the new node into an evacuation
2403 // candidate.
2404 ASSERT(!MarkCompactCollector::IsOnEvacuationCandidate(new_node));
2405
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00002406 const int kThreshold = IncrementalMarking::kAllocatedThreshold;
2407
2408 // Memory in the linear allocation area is counted as allocated. We may free
2409 // a little of this again immediately - see below.
2410 owner_->Allocate(new_node_size);
2411
machenbach@chromium.orgb5be0a92013-11-15 10:32:41 +00002412 if (owner_->heap()->inline_allocation_disabled()) {
2413 // Keep the linear allocation area empty if requested to do so, just
2414 // return area back to the free list instead.
2415 owner_->Free(new_node->address() + size_in_bytes, bytes_left);
2416 ASSERT(owner_->top() == NULL && owner_->limit() == NULL);
2417 } else if (bytes_left > kThreshold &&
2418 owner_->heap()->incremental_marking()->IsMarkingIncomplete() &&
2419 FLAG_incremental_marking_steps) {
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00002420 int linear_size = owner_->RoundSizeDownToObjectAlignment(kThreshold);
2421 // We don't want to give too large linear areas to the allocator while
2422 // incremental marking is going on, because we won't check again whether
2423 // we want to do another increment until the linear area is used up.
2424 owner_->Free(new_node->address() + size_in_bytes + linear_size,
2425 new_node_size - size_in_bytes - linear_size);
machenbach@chromium.orgb5be0a92013-11-15 10:32:41 +00002426 owner_->SetTopAndLimit(new_node->address() + size_in_bytes,
2427 new_node->address() + size_in_bytes + linear_size);
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00002428 } else if (bytes_left > 0) {
2429 // Normally we give the rest of the node to the allocator as its new
2430 // linear allocation area.
machenbach@chromium.orgb5be0a92013-11-15 10:32:41 +00002431 owner_->SetTopAndLimit(new_node->address() + size_in_bytes,
2432 new_node->address() + new_node_size);
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00002433 } else {
2434 // TODO(gc) Try not freeing linear allocation region when bytes_left
2435 // are zero.
machenbach@chromium.orgb5be0a92013-11-15 10:32:41 +00002436 owner_->SetTopAndLimit(NULL, NULL);
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00002437 }
2438
2439 return new_node;
2440}
2441
2442
yangguo@chromium.orga6bbcc82012-12-21 12:35:02 +00002443intptr_t FreeList::EvictFreeListItems(Page* p) {
2444 intptr_t sum = huge_list_.EvictFreeListItemsInList(p);
mstarzinger@chromium.orge27d6172013-04-17 11:51:44 +00002445 p->set_available_in_huge_free_list(0);
yangguo@chromium.orga6bbcc82012-12-21 12:35:02 +00002446
2447 if (sum < p->area_size()) {
2448 sum += small_list_.EvictFreeListItemsInList(p) +
2449 medium_list_.EvictFreeListItemsInList(p) +
2450 large_list_.EvictFreeListItemsInList(p);
mstarzinger@chromium.orge27d6172013-04-17 11:51:44 +00002451 p->set_available_in_small_free_list(0);
2452 p->set_available_in_medium_free_list(0);
2453 p->set_available_in_large_free_list(0);
danno@chromium.org2c456792011-11-11 12:00:53 +00002454 }
yangguo@chromium.orga6bbcc82012-12-21 12:35:02 +00002455
danno@chromium.org2c456792011-11-11 12:00:53 +00002456 return sum;
2457}
2458
2459
yangguo@chromium.orga6bbcc82012-12-21 12:35:02 +00002460void FreeList::RepairLists(Heap* heap) {
2461 small_list_.RepairFreeList(heap);
2462 medium_list_.RepairFreeList(heap);
2463 large_list_.RepairFreeList(heap);
2464 huge_list_.RepairFreeList(heap);
danno@chromium.org2c456792011-11-11 12:00:53 +00002465}
2466
2467
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00002468#ifdef DEBUG
yangguo@chromium.orga6bbcc82012-12-21 12:35:02 +00002469intptr_t FreeListCategory::SumFreeList() {
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00002470 intptr_t sum = 0;
yangguo@chromium.orga6bbcc82012-12-21 12:35:02 +00002471 FreeListNode* cur = top_;
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00002472 while (cur != NULL) {
hpayer@chromium.org8432c912013-02-28 15:55:26 +00002473 ASSERT(cur->map() == cur->GetHeap()->raw_unchecked_free_space_map());
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00002474 FreeSpace* cur_as_free_space = reinterpret_cast<FreeSpace*>(cur);
2475 sum += cur_as_free_space->Size();
2476 cur = cur->next();
2477 }
2478 return sum;
2479}
2480
2481
2482static const int kVeryLongFreeList = 500;
2483
2484
yangguo@chromium.orga6bbcc82012-12-21 12:35:02 +00002485int FreeListCategory::FreeListLength() {
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00002486 int length = 0;
yangguo@chromium.orga6bbcc82012-12-21 12:35:02 +00002487 FreeListNode* cur = top_;
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00002488 while (cur != NULL) {
2489 length++;
2490 cur = cur->next();
2491 if (length == kVeryLongFreeList) return length;
2492 }
2493 return length;
2494}
2495
2496
2497bool FreeList::IsVeryLong() {
yangguo@chromium.orga6bbcc82012-12-21 12:35:02 +00002498 if (small_list_.FreeListLength() == kVeryLongFreeList) return true;
2499 if (medium_list_.FreeListLength() == kVeryLongFreeList) return true;
2500 if (large_list_.FreeListLength() == kVeryLongFreeList) return true;
2501 if (huge_list_.FreeListLength() == kVeryLongFreeList) return true;
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00002502 return false;
2503}
2504
2505
2506// This can take a very long time because it is linear in the number of entries
2507// on the free list, so it should not be called if FreeListLength returns
2508// kVeryLongFreeList.
2509intptr_t FreeList::SumFreeLists() {
yangguo@chromium.orga6bbcc82012-12-21 12:35:02 +00002510 intptr_t sum = small_list_.SumFreeList();
2511 sum += medium_list_.SumFreeList();
2512 sum += large_list_.SumFreeList();
2513 sum += huge_list_.SumFreeList();
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00002514 return sum;
2515}
2516#endif
2517
2518
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002519// -----------------------------------------------------------------------------
2520// OldSpace implementation
2521
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00002522void PagedSpace::PrepareForMarkCompact() {
2523 // We don't have a linear allocation area while sweeping. It will be restored
2524 // on the first allocation after the sweep.
machenbach@chromium.orgb5be0a92013-11-15 10:32:41 +00002525 EmptyAllocationInfo();
fschneider@chromium.org013f3e12010-04-26 13:27:52 +00002526
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00002527 // Stop lazy sweeping and clear marking bits for unswept pages.
2528 if (first_unswept_page_ != NULL) {
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00002529 Page* p = first_unswept_page_;
2530 do {
2531 // Do not use ShouldBeSweptLazily predicate here.
2532 // New evacuation candidates were selected but they still have
2533 // to be swept before collection starts.
2534 if (!p->WasSwept()) {
2535 Bitmap::Clear(p);
2536 if (FLAG_gc_verbose) {
2537 PrintF("Sweeping 0x%" V8PRIxPTR " lazily abandoned.\n",
2538 reinterpret_cast<intptr_t>(p));
2539 }
ricow@chromium.orgd236f4d2010-09-01 06:52:08 +00002540 }
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00002541 p = p->next_page();
danno@chromium.org2c456792011-11-11 12:00:53 +00002542 } while (p != anchor());
ricow@chromium.orgd236f4d2010-09-01 06:52:08 +00002543 }
danno@chromium.org2c456792011-11-11 12:00:53 +00002544 first_unswept_page_ = Page::FromAddress(NULL);
erik.corry@gmail.comf2038fb2012-01-16 11:42:08 +00002545 unswept_free_bytes_ = 0;
ricow@chromium.orgd236f4d2010-09-01 06:52:08 +00002546
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00002547 // Clear the free list before a full GC---it will be rebuilt afterward.
2548 free_list_.Reset();
ricow@chromium.orgd236f4d2010-09-01 06:52:08 +00002549}
2550
2551
ulan@chromium.org750145a2013-03-07 15:14:13 +00002552intptr_t PagedSpace::SizeOfObjects() {
2553 ASSERT(!heap()->IsSweepingComplete() || (unswept_free_bytes_ == 0));
2554 return Size() - unswept_free_bytes_ - (limit() - top());
2555}
2556
2557
ulan@chromium.org56c14af2012-09-20 12:51:09 +00002558// After we have booted, we have created a map which represents free space
2559// on the heap. If there was already a free list then the elements on it
2560// were created with the wrong FreeSpaceMap (normally NULL), so we need to
2561// fix them.
2562void PagedSpace::RepairFreeListsAfterBoot() {
2563 free_list_.RepairLists(heap());
2564}
2565
2566
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00002567bool PagedSpace::AdvanceSweeper(intptr_t bytes_to_sweep) {
mmassi@chromium.org2f0efde2013-02-06 14:12:58 +00002568 if (IsLazySweepingComplete()) return true;
kasper.lund7276f142008-07-30 08:49:36 +00002569
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00002570 intptr_t freed_bytes = 0;
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00002571 Page* p = first_unswept_page_;
2572 do {
2573 Page* next_page = p->next_page();
2574 if (ShouldBeSweptLazily(p)) {
2575 if (FLAG_gc_verbose) {
2576 PrintF("Sweeping 0x%" V8PRIxPTR " lazily advanced.\n",
2577 reinterpret_cast<intptr_t>(p));
ricow@chromium.org30ce4112010-05-31 10:38:25 +00002578 }
ulan@chromium.org2efb9002012-01-19 15:36:35 +00002579 DecreaseUnsweptFreeBytes(p);
mstarzinger@chromium.orge3b8d0f2013-02-01 09:06:41 +00002580 freed_bytes +=
2581 MarkCompactCollector::
2582 SweepConservatively<MarkCompactCollector::SWEEP_SEQUENTIALLY>(
2583 this, NULL, p);
ager@chromium.org3811b432009-10-28 14:53:37 +00002584 }
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00002585 p = next_page;
danno@chromium.org2c456792011-11-11 12:00:53 +00002586 } while (p != anchor() && freed_bytes < bytes_to_sweep);
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00002587
danno@chromium.org2c456792011-11-11 12:00:53 +00002588 if (p == anchor()) {
2589 first_unswept_page_ = Page::FromAddress(NULL);
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00002590 } else {
2591 first_unswept_page_ = p;
kasper.lund7276f142008-07-30 08:49:36 +00002592 }
2593
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00002594 heap()->FreeQueuedChunks();
2595
mmassi@chromium.org2f0efde2013-02-06 14:12:58 +00002596 return IsLazySweepingComplete();
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00002597}
2598
2599
2600void PagedSpace::EvictEvacuationCandidatesFromFreeLists() {
jkummerow@chromium.orgef33a542013-10-28 18:03:37 +00002601 if (allocation_info_.top() >= allocation_info_.limit()) return;
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00002602
jkummerow@chromium.orgef33a542013-10-28 18:03:37 +00002603 if (Page::FromAllocationTop(allocation_info_.top())->
2604 IsEvacuationCandidate()) {
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00002605 // Create filler object to keep page iterable if it was iterable.
2606 int remaining =
jkummerow@chromium.orgef33a542013-10-28 18:03:37 +00002607 static_cast<int>(allocation_info_.limit() - allocation_info_.top());
2608 heap()->CreateFillerObjectAt(allocation_info_.top(), remaining);
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00002609
jkummerow@chromium.orgef33a542013-10-28 18:03:37 +00002610 allocation_info_.set_top(NULL);
2611 allocation_info_.set_limit(NULL);
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00002612 }
2613}
2614
2615
mstarzinger@chromium.orge3b8d0f2013-02-01 09:06:41 +00002616bool PagedSpace::EnsureSweeperProgress(intptr_t size_in_bytes) {
2617 MarkCompactCollector* collector = heap()->mark_compact_collector();
2618 if (collector->AreSweeperThreadsActivated()) {
ulan@chromium.org750145a2013-03-07 15:14:13 +00002619 if (collector->IsConcurrentSweepingInProgress()) {
mmassi@chromium.org2f0efde2013-02-06 14:12:58 +00002620 if (collector->StealMemoryFromSweeperThreads(this) < size_in_bytes) {
ulan@chromium.org750145a2013-03-07 15:14:13 +00002621 if (!collector->sequential_sweeping()) {
2622 collector->WaitUntilSweepingCompleted();
ulan@chromium.org750145a2013-03-07 15:14:13 +00002623 return true;
2624 }
mmassi@chromium.org2f0efde2013-02-06 14:12:58 +00002625 }
2626 return false;
mstarzinger@chromium.orge3b8d0f2013-02-01 09:06:41 +00002627 }
mmassi@chromium.org2f0efde2013-02-06 14:12:58 +00002628 return true;
mstarzinger@chromium.orge3b8d0f2013-02-01 09:06:41 +00002629 } else {
2630 return AdvanceSweeper(size_in_bytes);
2631 }
2632}
2633
2634
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00002635HeapObject* PagedSpace::SlowAllocateRaw(int size_in_bytes) {
2636 // Allocation in this space has failed.
2637
svenpanne@chromium.org83130cf2012-11-30 10:13:25 +00002638 // If there are unswept pages advance lazy sweeper a bounded number of times
2639 // until we find a size_in_bytes contiguous piece of memory
2640 const int kMaxSweepingTries = 5;
2641 bool sweeping_complete = false;
2642
2643 for (int i = 0; i < kMaxSweepingTries && !sweeping_complete; i++) {
mstarzinger@chromium.orge3b8d0f2013-02-01 09:06:41 +00002644 sweeping_complete = EnsureSweeperProgress(size_in_bytes);
svenpanne@chromium.orgecb9dd62011-12-01 08:22:35 +00002645
2646 // Retry the free list allocation.
2647 HeapObject* object = free_list_.Allocate(size_in_bytes);
2648 if (object != NULL) return object;
2649 }
2650
kasperl@chromium.org9bbf9682008-10-30 11:53:07 +00002651 // Free list allocation failed and there is no next page. Fail if we have
2652 // hit the old generation size limit that should cause a garbage
2653 // collection.
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00002654 if (!heap()->always_allocate() &&
2655 heap()->OldGenerationAllocationLimitReached()) {
kasperl@chromium.org9bbf9682008-10-30 11:53:07 +00002656 return NULL;
2657 }
2658
svenpanne@chromium.orgecb9dd62011-12-01 08:22:35 +00002659 // Try to expand the space and allocate in the new next page.
2660 if (Expand()) {
jkummerow@chromium.org25b0e212013-10-04 15:38:52 +00002661 ASSERT(CountTotalPages() > 1 || size_in_bytes <= free_list_.available());
svenpanne@chromium.orgecb9dd62011-12-01 08:22:35 +00002662 return free_list_.Allocate(size_in_bytes);
2663 }
2664
2665 // Last ditch, sweep all the remaining pages to try to find space. This may
2666 // cause a pause.
mmassi@chromium.org2f0efde2013-02-06 14:12:58 +00002667 if (!IsLazySweepingComplete()) {
mstarzinger@chromium.orge3b8d0f2013-02-01 09:06:41 +00002668 EnsureSweeperProgress(kMaxInt);
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00002669
2670 // Retry the free list allocation.
2671 HeapObject* object = free_list_.Allocate(size_in_bytes);
2672 if (object != NULL) return object;
kasper.lund7276f142008-07-30 08:49:36 +00002673 }
2674
2675 // Finally, fail.
2676 return NULL;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002677}
2678
2679
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002680#ifdef DEBUG
jkummerow@chromium.org3d00d0a2013-09-04 13:57:32 +00002681void PagedSpace::ReportCodeStatistics(Isolate* isolate) {
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00002682 CommentStatistic* comments_statistics =
2683 isolate->paged_space_comments_statistics();
mstarzinger@chromium.org1510d582013-06-28 14:00:48 +00002684 ReportCodeKindStatistics(isolate->code_kind_statistics());
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002685 PrintF("Code comment statistics (\" [ comment-txt : size/ "
2686 "count (average)\"):\n");
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00002687 for (int i = 0; i <= CommentStatistic::kMaxComments; i++) {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002688 const CommentStatistic& cs = comments_statistics[i];
2689 if (cs.size > 0) {
2690 PrintF(" %-30s: %10d/%6d (%d)\n", cs.comment, cs.size, cs.count,
2691 cs.size/cs.count);
2692 }
2693 }
2694 PrintF("\n");
2695}
2696
2697
jkummerow@chromium.org3d00d0a2013-09-04 13:57:32 +00002698void PagedSpace::ResetCodeStatistics(Isolate* isolate) {
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00002699 CommentStatistic* comments_statistics =
2700 isolate->paged_space_comments_statistics();
mstarzinger@chromium.org1510d582013-06-28 14:00:48 +00002701 ClearCodeKindStatistics(isolate->code_kind_statistics());
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00002702 for (int i = 0; i < CommentStatistic::kMaxComments; i++) {
2703 comments_statistics[i].Clear();
2704 }
2705 comments_statistics[CommentStatistic::kMaxComments].comment = "Unknown";
2706 comments_statistics[CommentStatistic::kMaxComments].size = 0;
2707 comments_statistics[CommentStatistic::kMaxComments].count = 0;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002708}
2709
2710
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00002711// Adds comment to 'comment_statistics' table. Performance OK as long as
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002712// 'kMaxComments' is small
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00002713static void EnterComment(Isolate* isolate, const char* comment, int delta) {
2714 CommentStatistic* comments_statistics =
2715 isolate->paged_space_comments_statistics();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002716 // Do not count empty comments
2717 if (delta <= 0) return;
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00002718 CommentStatistic* cs = &comments_statistics[CommentStatistic::kMaxComments];
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002719 // Search for a free or matching entry in 'comments_statistics': 'cs'
2720 // points to result.
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00002721 for (int i = 0; i < CommentStatistic::kMaxComments; i++) {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002722 if (comments_statistics[i].comment == NULL) {
2723 cs = &comments_statistics[i];
2724 cs->comment = comment;
2725 break;
2726 } else if (strcmp(comments_statistics[i].comment, comment) == 0) {
2727 cs = &comments_statistics[i];
2728 break;
2729 }
2730 }
2731 // Update entry for 'comment'
2732 cs->size += delta;
2733 cs->count += 1;
2734}
2735
2736
2737// Call for each nested comment start (start marked with '[ xxx', end marked
2738// with ']'. RelocIterator 'it' must point to a comment reloc info.
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00002739static void CollectCommentStatistics(Isolate* isolate, RelocIterator* it) {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002740 ASSERT(!it->done());
ager@chromium.org236ad962008-09-25 09:45:57 +00002741 ASSERT(it->rinfo()->rmode() == RelocInfo::COMMENT);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002742 const char* tmp = reinterpret_cast<const char*>(it->rinfo()->data());
2743 if (tmp[0] != '[') {
2744 // Not a nested comment; skip
2745 return;
2746 }
2747
2748 // Search for end of nested comment or a new nested comment
2749 const char* const comment_txt =
2750 reinterpret_cast<const char*>(it->rinfo()->data());
2751 const byte* prev_pc = it->rinfo()->pc();
2752 int flat_delta = 0;
2753 it->next();
2754 while (true) {
2755 // All nested comments must be terminated properly, and therefore exit
2756 // from loop.
2757 ASSERT(!it->done());
ager@chromium.org236ad962008-09-25 09:45:57 +00002758 if (it->rinfo()->rmode() == RelocInfo::COMMENT) {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002759 const char* const txt =
2760 reinterpret_cast<const char*>(it->rinfo()->data());
ager@chromium.orgc4c92722009-11-18 14:12:51 +00002761 flat_delta += static_cast<int>(it->rinfo()->pc() - prev_pc);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002762 if (txt[0] == ']') break; // End of nested comment
2763 // A new comment
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00002764 CollectCommentStatistics(isolate, it);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002765 // Skip code that was covered with previous comment
2766 prev_pc = it->rinfo()->pc();
2767 }
2768 it->next();
2769 }
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00002770 EnterComment(isolate, comment_txt, flat_delta);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002771}
2772
2773
2774// Collects code size statistics:
2775// - by code kind
2776// - by code comment
2777void PagedSpace::CollectCodeStatistics() {
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00002778 Isolate* isolate = heap()->isolate();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002779 HeapObjectIterator obj_it(this);
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00002780 for (HeapObject* obj = obj_it.Next(); obj != NULL; obj = obj_it.Next()) {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002781 if (obj->IsCode()) {
2782 Code* code = Code::cast(obj);
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00002783 isolate->code_kind_statistics()[code->kind()] += code->Size();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002784 RelocIterator it(code);
2785 int delta = 0;
2786 const byte* prev_pc = code->instruction_start();
2787 while (!it.done()) {
ager@chromium.org236ad962008-09-25 09:45:57 +00002788 if (it.rinfo()->rmode() == RelocInfo::COMMENT) {
ager@chromium.orgc4c92722009-11-18 14:12:51 +00002789 delta += static_cast<int>(it.rinfo()->pc() - prev_pc);
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00002790 CollectCommentStatistics(isolate, &it);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002791 prev_pc = it.rinfo()->pc();
2792 }
2793 it.next();
2794 }
2795
2796 ASSERT(code->instruction_start() <= prev_pc &&
erik.corry@gmail.com4a2e25e2010-07-07 12:22:46 +00002797 prev_pc <= code->instruction_end());
2798 delta += static_cast<int>(code->instruction_end() - prev_pc);
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00002799 EnterComment(isolate, "NoComment", delta);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002800 }
2801 }
2802}
2803
2804
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00002805void PagedSpace::ReportStatistics() {
kmillikin@chromium.orgf05f2912010-09-30 10:07:24 +00002806 int pct = static_cast<int>(Available() * 100 / Capacity());
2807 PrintF(" capacity: %" V8_PTR_PREFIX "d"
2808 ", waste: %" V8_PTR_PREFIX "d"
2809 ", available: %" V8_PTR_PREFIX "d, %%%d\n",
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002810 Capacity(), Waste(), Available(), pct);
2811
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00002812 if (was_swept_conservatively_) return;
jkummerow@chromium.org3d00d0a2013-09-04 13:57:32 +00002813 ClearHistograms(heap()->isolate());
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002814 HeapObjectIterator obj_it(this);
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00002815 for (HeapObject* obj = obj_it.Next(); obj != NULL; obj = obj_it.Next())
sgjesse@chromium.orgb302e562010-02-03 11:26:59 +00002816 CollectHistogramInfo(obj);
jkummerow@chromium.org3d00d0a2013-09-04 13:57:32 +00002817 ReportHistogram(heap()->isolate(), true);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002818}
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002819#endif
2820
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002821
kasperl@chromium.orgdefbd102009-07-13 14:04:26 +00002822// -----------------------------------------------------------------------------
2823// MapSpace implementation
svenpanne@chromium.orgc859c4f2012-10-15 11:51:39 +00002824// TODO(mvstanton): this is weird...the compiler can't make a vtable unless
2825// there is at least one non-inlined virtual function. I would prefer to hide
2826// the VerifyObject definition behind VERIFY_HEAP.
kasperl@chromium.orgdefbd102009-07-13 14:04:26 +00002827
kasperl@chromium.orgdefbd102009-07-13 14:04:26 +00002828void MapSpace::VerifyObject(HeapObject* object) {
verwaest@chromium.orgec6855e2013-08-22 12:26:58 +00002829 CHECK(object->IsMap());
kasperl@chromium.orgdefbd102009-07-13 14:04:26 +00002830}
kasperl@chromium.orgdefbd102009-07-13 14:04:26 +00002831
2832
2833// -----------------------------------------------------------------------------
danno@chromium.org41728482013-06-12 22:31:22 +00002834// CellSpace and PropertyCellSpace implementation
svenpanne@chromium.orgc859c4f2012-10-15 11:51:39 +00002835// TODO(mvstanton): this is weird...the compiler can't make a vtable unless
2836// there is at least one non-inlined virtual function. I would prefer to hide
2837// the VerifyObject definition behind VERIFY_HEAP.
kasperl@chromium.orgdefbd102009-07-13 14:04:26 +00002838
kasperl@chromium.orgdefbd102009-07-13 14:04:26 +00002839void CellSpace::VerifyObject(HeapObject* object) {
verwaest@chromium.orgec6855e2013-08-22 12:26:58 +00002840 CHECK(object->IsCell());
danno@chromium.org41728482013-06-12 22:31:22 +00002841}
2842
2843
2844void PropertyCellSpace::VerifyObject(HeapObject* object) {
verwaest@chromium.orgec6855e2013-08-22 12:26:58 +00002845 CHECK(object->IsPropertyCell());
kasperl@chromium.orgdefbd102009-07-13 14:04:26 +00002846}
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002847
2848
2849// -----------------------------------------------------------------------------
2850// LargeObjectIterator
2851
2852LargeObjectIterator::LargeObjectIterator(LargeObjectSpace* space) {
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00002853 current_ = space->first_page_;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002854 size_func_ = NULL;
2855}
2856
2857
2858LargeObjectIterator::LargeObjectIterator(LargeObjectSpace* space,
2859 HeapObjectCallback size_func) {
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00002860 current_ = space->first_page_;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002861 size_func_ = size_func;
2862}
2863
2864
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00002865HeapObject* LargeObjectIterator::Next() {
sgjesse@chromium.orgb302e562010-02-03 11:26:59 +00002866 if (current_ == NULL) return NULL;
2867
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002868 HeapObject* object = current_->GetObject();
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00002869 current_ = current_->next_page();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002870 return object;
2871}
2872
2873
2874// -----------------------------------------------------------------------------
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002875// LargeObjectSpace
jkummerow@chromium.org531dfe82012-03-20 13:01:16 +00002876static bool ComparePointers(void* key1, void* key2) {
2877 return key1 == key2;
2878}
2879
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002880
erik.corry@gmail.com394dbcf2011-10-27 07:38:48 +00002881LargeObjectSpace::LargeObjectSpace(Heap* heap,
2882 intptr_t max_capacity,
2883 AllocationSpace id)
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00002884 : Space(heap, id, NOT_EXECUTABLE), // Managed on a per-allocation basis
erik.corry@gmail.com394dbcf2011-10-27 07:38:48 +00002885 max_capacity_(max_capacity),
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00002886 first_page_(NULL),
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002887 size_(0),
erik.corry@gmail.com4a6c3272010-11-18 12:04:40 +00002888 page_count_(0),
jkummerow@chromium.org531dfe82012-03-20 13:01:16 +00002889 objects_size_(0),
2890 chunk_map_(ComparePointers, 1024) {}
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002891
2892
erik.corry@gmail.comf2038fb2012-01-16 11:42:08 +00002893bool LargeObjectSpace::SetUp() {
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00002894 first_page_ = NULL;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002895 size_ = 0;
verwaest@chromium.org057bd502013-11-06 12:03:29 +00002896 maximum_committed_ = 0;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002897 page_count_ = 0;
erik.corry@gmail.com4a6c3272010-11-18 12:04:40 +00002898 objects_size_ = 0;
jkummerow@chromium.org531dfe82012-03-20 13:01:16 +00002899 chunk_map_.Clear();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002900 return true;
2901}
2902
2903
2904void LargeObjectSpace::TearDown() {
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00002905 while (first_page_ != NULL) {
2906 LargePage* page = first_page_;
2907 first_page_ = first_page_->next_page();
2908 LOG(heap()->isolate(), DeleteEvent("LargeObjectChunk", page->address()));
2909
2910 ObjectSpace space = static_cast<ObjectSpace>(1 << identity());
2911 heap()->isolate()->memory_allocator()->PerformAllocationCallback(
2912 space, kAllocationActionFree, page->size());
2913 heap()->isolate()->memory_allocator()->Free(page);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002914 }
erik.corry@gmail.comf2038fb2012-01-16 11:42:08 +00002915 SetUp();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002916}
2917
2918
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00002919MaybeObject* LargeObjectSpace::AllocateRaw(int object_size,
2920 Executability executable) {
kasperl@chromium.org9bbf9682008-10-30 11:53:07 +00002921 // Check if we want to force a GC before growing the old space further.
2922 // If so, fail the allocation.
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00002923 if (!heap()->always_allocate() &&
2924 heap()->OldGenerationAllocationLimitReached()) {
whesse@chromium.org4a5224e2010-10-20 12:37:07 +00002925 return Failure::RetryAfterGC(identity());
kasperl@chromium.org9bbf9682008-10-30 11:53:07 +00002926 }
2927
erik.corry@gmail.com394dbcf2011-10-27 07:38:48 +00002928 if (Size() + object_size > max_capacity_) {
2929 return Failure::RetryAfterGC(identity());
2930 }
2931
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00002932 LargePage* page = heap()->isolate()->memory_allocator()->
fschneider@chromium.org7d10be52012-04-10 12:30:14 +00002933 AllocateLargePage(object_size, this, executable);
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00002934 if (page == NULL) return Failure::RetryAfterGC(identity());
yangguo@chromium.orgab30bb82012-02-24 14:41:46 +00002935 ASSERT(page->area_size() >= object_size);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002936
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00002937 size_ += static_cast<int>(page->size());
2938 objects_size_ += object_size;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002939 page_count_++;
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00002940 page->set_next_page(first_page_);
2941 first_page_ = page;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002942
verwaest@chromium.org057bd502013-11-06 12:03:29 +00002943 if (size_ > maximum_committed_) {
2944 maximum_committed_ = size_;
2945 }
2946
jkummerow@chromium.org531dfe82012-03-20 13:01:16 +00002947 // Register all MemoryChunk::kAlignment-aligned chunks covered by
2948 // this large page in the chunk map.
2949 uintptr_t base = reinterpret_cast<uintptr_t>(page) / MemoryChunk::kAlignment;
2950 uintptr_t limit = base + (page->size() - 1) / MemoryChunk::kAlignment;
2951 for (uintptr_t key = base; key <= limit; key++) {
2952 HashMap::Entry* entry = chunk_map_.Lookup(reinterpret_cast<void*>(key),
2953 static_cast<uint32_t>(key),
2954 true);
2955 ASSERT(entry != NULL);
2956 entry->value = page;
2957 }
2958
mstarzinger@chromium.org1b3afd12011-11-29 14:28:56 +00002959 HeapObject* object = page->GetObject();
2960
svenpanne@chromium.orgc859c4f2012-10-15 11:51:39 +00002961 if (Heap::ShouldZapGarbage()) {
2962 // Make the object consistent so the heap can be verified in OldSpaceStep.
2963 // We only need to do this in debug builds or if verify_heap is on.
2964 reinterpret_cast<Object**>(object->address())[0] =
2965 heap()->fixed_array_map();
2966 reinterpret_cast<Object**>(object->address())[1] = Smi::FromInt(0);
2967 }
mstarzinger@chromium.org1b3afd12011-11-29 14:28:56 +00002968
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00002969 heap()->incremental_marking()->OldSpaceStep(object_size);
mstarzinger@chromium.org1b3afd12011-11-29 14:28:56 +00002970 return object;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002971}
2972
2973
danno@chromium.org72204d52012-10-31 10:02:10 +00002974size_t LargeObjectSpace::CommittedPhysicalMemory() {
2975 if (!VirtualMemory::HasLazyCommits()) return CommittedMemory();
2976 size_t size = 0;
2977 LargePage* current = first_page_;
2978 while (current != NULL) {
2979 size += current->CommittedPhysicalMemory();
2980 current = current->next_page();
2981 }
2982 return size;
2983}
2984
2985
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002986// GC support
lrn@chromium.org303ada72010-10-27 09:33:13 +00002987MaybeObject* LargeObjectSpace::FindObject(Address a) {
jkummerow@chromium.org531dfe82012-03-20 13:01:16 +00002988 LargePage* page = FindPage(a);
2989 if (page != NULL) {
2990 return page->GetObject();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002991 }
2992 return Failure::Exception();
2993}
2994
ricow@chromium.orgd236f4d2010-09-01 06:52:08 +00002995
jkummerow@chromium.org531dfe82012-03-20 13:01:16 +00002996LargePage* LargeObjectSpace::FindPage(Address a) {
2997 uintptr_t key = reinterpret_cast<uintptr_t>(a) / MemoryChunk::kAlignment;
2998 HashMap::Entry* e = chunk_map_.Lookup(reinterpret_cast<void*>(key),
2999 static_cast<uint32_t>(key),
3000 false);
3001 if (e != NULL) {
3002 ASSERT(e->value != NULL);
3003 LargePage* page = reinterpret_cast<LargePage*>(e->value);
3004 ASSERT(page->is_valid());
3005 if (page->Contains(a)) {
3006 return page;
ricow@chromium.orgd236f4d2010-09-01 06:52:08 +00003007 }
3008 }
3009 return NULL;
3010}
3011
3012
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00003013void LargeObjectSpace::FreeUnmarkedObjects() {
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00003014 LargePage* previous = NULL;
3015 LargePage* current = first_page_;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00003016 while (current != NULL) {
3017 HeapObject* object = current->GetObject();
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00003018 // Can this large page contain pointers to non-trivial objects. No other
3019 // pointer object is this big.
3020 bool is_pointer_object = object->IsFixedArray();
3021 MarkBit mark_bit = Marking::MarkBitFrom(object);
3022 if (mark_bit.Get()) {
3023 mark_bit.Clear();
yangguo@chromium.orgfb377212012-11-16 14:43:43 +00003024 Page::FromAddress(object->address())->ResetProgressBar();
3025 Page::FromAddress(object->address())->ResetLiveBytes();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00003026 previous = current;
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00003027 current = current->next_page();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00003028 } else {
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00003029 LargePage* page = current;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00003030 // Cut the chunk out from the chunk list.
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00003031 current = current->next_page();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00003032 if (previous == NULL) {
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00003033 first_page_ = current;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00003034 } else {
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00003035 previous->set_next_page(current);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00003036 }
3037
3038 // Free the chunk.
kmillikin@chromium.orgc36ce6e2011-04-04 08:25:31 +00003039 heap()->mark_compact_collector()->ReportDeleteIfNeeded(
3040 object, heap()->isolate());
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00003041 size_ -= static_cast<int>(page->size());
erik.corry@gmail.com4a6c3272010-11-18 12:04:40 +00003042 objects_size_ -= object->Size();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00003043 page_count_--;
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00003044
jkummerow@chromium.org531dfe82012-03-20 13:01:16 +00003045 // Remove entries belonging to this page.
3046 // Use variable alignment to help pass length check (<= 80 characters)
3047 // of single line in tools/presubmit.py.
3048 const intptr_t alignment = MemoryChunk::kAlignment;
3049 uintptr_t base = reinterpret_cast<uintptr_t>(page)/alignment;
3050 uintptr_t limit = base + (page->size()-1)/alignment;
3051 for (uintptr_t key = base; key <= limit; key++) {
3052 chunk_map_.Remove(reinterpret_cast<void*>(key),
3053 static_cast<uint32_t>(key));
3054 }
3055
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00003056 if (is_pointer_object) {
3057 heap()->QueueMemoryChunkForFree(page);
3058 } else {
3059 heap()->isolate()->memory_allocator()->Free(page);
3060 }
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00003061 }
3062 }
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00003063 heap()->FreeQueuedChunks();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00003064}
3065
3066
3067bool LargeObjectSpace::Contains(HeapObject* object) {
3068 Address address = object->address();
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00003069 MemoryChunk* chunk = MemoryChunk::FromAddress(address);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00003070
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00003071 bool owned = (chunk->owner() == this);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00003072
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00003073 SLOW_ASSERT(!owned || !FindObject(address)->IsFailure());
3074
3075 return owned;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00003076}
3077
3078
svenpanne@chromium.orgc859c4f2012-10-15 11:51:39 +00003079#ifdef VERIFY_HEAP
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00003080// We do not assume that the large object iterator works, because it depends
3081// on the invariants we are checking during verification.
3082void LargeObjectSpace::Verify() {
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00003083 for (LargePage* chunk = first_page_;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00003084 chunk != NULL;
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00003085 chunk = chunk->next_page()) {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00003086 // Each chunk contains an object that starts at the large object page's
3087 // object area start.
3088 HeapObject* object = chunk->GetObject();
3089 Page* page = Page::FromAddress(object->address());
svenpanne@chromium.orgc859c4f2012-10-15 11:51:39 +00003090 CHECK(object->address() == page->area_start());
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00003091
3092 // The first word should be a map, and we expect all map pointers to be
3093 // in map space.
3094 Map* map = object->map();
svenpanne@chromium.orgc859c4f2012-10-15 11:51:39 +00003095 CHECK(map->IsMap());
3096 CHECK(heap()->map_space()->Contains(map));
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00003097
ager@chromium.orga1645e22009-09-09 19:27:10 +00003098 // We have only code, sequential strings, external strings
3099 // (sequential strings that have been morphed into external
3100 // strings), fixed arrays, and byte arrays in large object space.
svenpanne@chromium.orgc859c4f2012-10-15 11:51:39 +00003101 CHECK(object->IsCode() || object->IsSeqString() ||
ager@chromium.orga1645e22009-09-09 19:27:10 +00003102 object->IsExternalString() || object->IsFixedArray() ||
svenpanne@chromium.org6d786c92011-06-15 10:58:27 +00003103 object->IsFixedDoubleArray() || object->IsByteArray());
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00003104
3105 // The object itself should look OK.
mads.s.ager@gmail.com9a4089a2008-09-01 08:55:01 +00003106 object->Verify();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00003107
3108 // Byte arrays and strings don't have interior pointers.
3109 if (object->IsCode()) {
3110 VerifyPointersVisitor code_visitor;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00003111 object->IterateBody(map->instance_type(),
3112 object->Size(),
3113 &code_visitor);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00003114 } else if (object->IsFixedArray()) {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00003115 FixedArray* array = FixedArray::cast(object);
3116 for (int j = 0; j < array->length(); j++) {
3117 Object* element = array->get(j);
3118 if (element->IsHeapObject()) {
3119 HeapObject* element_object = HeapObject::cast(element);
svenpanne@chromium.orgc859c4f2012-10-15 11:51:39 +00003120 CHECK(heap()->Contains(element_object));
3121 CHECK(element_object->map()->IsMap());
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00003122 }
3123 }
3124 }
3125 }
3126}
svenpanne@chromium.orgc859c4f2012-10-15 11:51:39 +00003127#endif
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00003128
3129
svenpanne@chromium.orgc859c4f2012-10-15 11:51:39 +00003130#ifdef DEBUG
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00003131void LargeObjectSpace::Print() {
3132 LargeObjectIterator it(this);
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00003133 for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
sgjesse@chromium.orgb302e562010-02-03 11:26:59 +00003134 obj->Print();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00003135 }
3136}
3137
3138
3139void LargeObjectSpace::ReportStatistics() {
kmillikin@chromium.orgf05f2912010-09-30 10:07:24 +00003140 PrintF(" size: %" V8_PTR_PREFIX "d\n", size_);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00003141 int num_objects = 0;
jkummerow@chromium.org3d00d0a2013-09-04 13:57:32 +00003142 ClearHistograms(heap()->isolate());
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00003143 LargeObjectIterator it(this);
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00003144 for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00003145 num_objects++;
sgjesse@chromium.orgb302e562010-02-03 11:26:59 +00003146 CollectHistogramInfo(obj);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00003147 }
3148
erik.corry@gmail.com4a6c3272010-11-18 12:04:40 +00003149 PrintF(" number of objects %d, "
3150 "size of objects %" V8_PTR_PREFIX "d\n", num_objects, objects_size_);
jkummerow@chromium.org3d00d0a2013-09-04 13:57:32 +00003151 if (num_objects > 0) ReportHistogram(heap()->isolate(), false);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00003152}
3153
3154
3155void LargeObjectSpace::CollectCodeStatistics() {
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00003156 Isolate* isolate = heap()->isolate();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00003157 LargeObjectIterator obj_it(this);
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00003158 for (HeapObject* obj = obj_it.Next(); obj != NULL; obj = obj_it.Next()) {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00003159 if (obj->IsCode()) {
3160 Code* code = Code::cast(obj);
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00003161 isolate->code_kind_statistics()[code->kind()] += code->Size();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00003162 }
3163 }
3164}
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00003165
3166
3167void Page::Print() {
3168 // Make a best-effort to print the objects in the page.
3169 PrintF("Page@%p in %s\n",
3170 this->address(),
3171 AllocationSpaceName(this->owner()->identity()));
3172 printf(" --------------------------------------\n");
3173 HeapObjectIterator objects(this, heap()->GcSafeSizeOfOldObjectFunction());
3174 unsigned mark_size = 0;
3175 for (HeapObject* object = objects.Next();
3176 object != NULL;
3177 object = objects.Next()) {
3178 bool is_marked = Marking::MarkBitFrom(object).Get();
3179 PrintF(" %c ", (is_marked ? '!' : ' ')); // Indent a little.
3180 if (is_marked) {
3181 mark_size += heap()->GcSafeSizeOfOldObjectFunction()(object);
3182 }
3183 object->ShortPrint();
3184 PrintF("\n");
3185 }
3186 printf(" --------------------------------------\n");
3187 printf(" Marked: %x, LiveCount: %x\n", mark_size, LiveBytes());
3188}
3189
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00003190#endif // DEBUG
3191
3192} } // namespace v8::internal