blob: 15381eaf3224ee51e47211d2c97d5df614eeafb3 [file] [log] [blame]
ager@chromium.orgea91cc52011-05-23 06:06:11 +00001// Copyright 2011 the V8 project authors. All rights reserved.
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002// Redistribution and use in source and binary forms, with or without
3// modification, are permitted provided that the following conditions are
4// met:
5//
6// * Redistributions of source code must retain the above copyright
7// notice, this list of conditions and the following disclaimer.
8// * Redistributions in binary form must reproduce the above
9// copyright notice, this list of conditions and the following
10// disclaimer in the documentation and/or other materials provided
11// with the distribution.
12// * Neither the name of Google Inc. nor the names of its
13// contributors may be used to endorse or promote products derived
14// from this software without specific prior written permission.
15//
16// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
28#include "v8.h"
29
30#include "macro-assembler.h"
31#include "mark-compact.h"
32#include "platform.h"
33
kasperl@chromium.org71affb52009-05-26 05:44:31 +000034namespace v8 {
35namespace internal {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +000036
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +000037
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +000038// ----------------------------------------------------------------------------
39// HeapObjectIterator
40
41HeapObjectIterator::HeapObjectIterator(PagedSpace* space) {
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +000042 // You can't actually iterate over the anchor page. It is not a real page,
43 // just an anchor for the double linked page list. Initialize as if we have
44 // reached the end of the anchor page, then the first iteration will move on
45 // to the first page.
46 Initialize(space,
47 NULL,
48 NULL,
49 kAllPagesInSpace,
50 NULL);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +000051}
52
53
54HeapObjectIterator::HeapObjectIterator(PagedSpace* space,
55 HeapObjectCallback size_func) {
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +000056 // You can't actually iterate over the anchor page. It is not a real page,
57 // just an anchor for the double linked page list. Initialize the current
58 // address and end as NULL, then the first iteration will move on
59 // to the first page.
60 Initialize(space,
61 NULL,
62 NULL,
63 kAllPagesInSpace,
64 size_func);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +000065}
66
67
ricow@chromium.orgd236f4d2010-09-01 06:52:08 +000068HeapObjectIterator::HeapObjectIterator(Page* page,
69 HeapObjectCallback size_func) {
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +000070 Space* owner = page->owner();
hpayer@chromium.org8432c912013-02-28 15:55:26 +000071 ASSERT(owner == page->heap()->old_pointer_space() ||
72 owner == page->heap()->old_data_space() ||
73 owner == page->heap()->map_space() ||
74 owner == page->heap()->cell_space() ||
danno@chromium.org41728482013-06-12 22:31:22 +000075 owner == page->heap()->property_cell_space() ||
hpayer@chromium.org8432c912013-02-28 15:55:26 +000076 owner == page->heap()->code_space());
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +000077 Initialize(reinterpret_cast<PagedSpace*>(owner),
yangguo@chromium.orgab30bb82012-02-24 14:41:46 +000078 page->area_start(),
79 page->area_end(),
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +000080 kOnePageOnly,
81 size_func);
82 ASSERT(page->WasSweptPrecisely());
ricow@chromium.orgd236f4d2010-09-01 06:52:08 +000083}
84
85
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +000086void HeapObjectIterator::Initialize(PagedSpace* space,
87 Address cur, Address end,
88 HeapObjectIterator::PageMode mode,
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +000089 HeapObjectCallback size_f) {
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +000090 // Check that we actually can iterate this space.
91 ASSERT(!space->was_swept_conservatively());
92
93 space_ = space;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +000094 cur_addr_ = cur;
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +000095 cur_end_ = end;
96 page_mode_ = mode;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +000097 size_func_ = size_f;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +000098}
99
100
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000101// We have hit the end of the page and should advance to the next block of
102// objects. This happens at the end of the page.
103bool HeapObjectIterator::AdvanceToNextPage() {
104 ASSERT(cur_addr_ == cur_end_);
105 if (page_mode_ == kOnePageOnly) return false;
106 Page* cur_page;
107 if (cur_addr_ == NULL) {
108 cur_page = space_->anchor();
109 } else {
110 cur_page = Page::FromAddress(cur_addr_ - 1);
yangguo@chromium.orgab30bb82012-02-24 14:41:46 +0000111 ASSERT(cur_addr_ == cur_page->area_end());
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000112 }
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000113 cur_page = cur_page->next_page();
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000114 if (cur_page == space_->anchor()) return false;
yangguo@chromium.orgab30bb82012-02-24 14:41:46 +0000115 cur_addr_ = cur_page->area_start();
116 cur_end_ = cur_page->area_end();
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000117 ASSERT(cur_page->WasSweptPrecisely());
118 return true;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000119}
120
121
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000122// -----------------------------------------------------------------------------
sgjesse@chromium.orgc5145742009-10-07 09:00:33 +0000123// CodeRange
124
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +0000125
kmillikin@chromium.org7c2628c2011-08-10 11:27:35 +0000126CodeRange::CodeRange(Isolate* isolate)
127 : isolate_(isolate),
128 code_range_(NULL),
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +0000129 free_list_(0),
130 allocation_list_(0),
kmillikin@chromium.org7c2628c2011-08-10 11:27:35 +0000131 current_allocation_block_index_(0) {
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +0000132}
sgjesse@chromium.orgc5145742009-10-07 09:00:33 +0000133
134
erik.corry@gmail.comf2038fb2012-01-16 11:42:08 +0000135bool CodeRange::SetUp(const size_t requested) {
sgjesse@chromium.orgc5145742009-10-07 09:00:33 +0000136 ASSERT(code_range_ == NULL);
137
138 code_range_ = new VirtualMemory(requested);
139 CHECK(code_range_ != NULL);
140 if (!code_range_->IsReserved()) {
141 delete code_range_;
142 code_range_ = NULL;
143 return false;
144 }
145
146 // We are sure that we have mapped a block of requested addresses.
147 ASSERT(code_range_->size() == requested);
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +0000148 LOG(isolate_, NewEvent("CodeRange", code_range_->address(), requested));
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000149 Address base = reinterpret_cast<Address>(code_range_->address());
150 Address aligned_base =
151 RoundUp(reinterpret_cast<Address>(code_range_->address()),
152 MemoryChunk::kAlignment);
153 size_t size = code_range_->size() - (aligned_base - base);
154 allocation_list_.Add(FreeBlock(aligned_base, size));
sgjesse@chromium.orgc5145742009-10-07 09:00:33 +0000155 current_allocation_block_index_ = 0;
156 return true;
157}
158
159
160int CodeRange::CompareFreeBlockAddress(const FreeBlock* left,
161 const FreeBlock* right) {
162 // The entire point of CodeRange is that the difference between two
163 // addresses in the range can be represented as a signed 32-bit int,
164 // so the cast is semantically correct.
165 return static_cast<int>(left->start - right->start);
166}
167
168
169void CodeRange::GetNextAllocationBlock(size_t requested) {
170 for (current_allocation_block_index_++;
171 current_allocation_block_index_ < allocation_list_.length();
172 current_allocation_block_index_++) {
173 if (requested <= allocation_list_[current_allocation_block_index_].size) {
174 return; // Found a large enough allocation block.
175 }
176 }
177
178 // Sort and merge the free blocks on the free list and the allocation list.
179 free_list_.AddAll(allocation_list_);
180 allocation_list_.Clear();
181 free_list_.Sort(&CompareFreeBlockAddress);
182 for (int i = 0; i < free_list_.length();) {
183 FreeBlock merged = free_list_[i];
184 i++;
185 // Add adjacent free blocks to the current merged block.
186 while (i < free_list_.length() &&
187 free_list_[i].start == merged.start + merged.size) {
188 merged.size += free_list_[i].size;
189 i++;
190 }
191 if (merged.size > 0) {
192 allocation_list_.Add(merged);
193 }
194 }
195 free_list_.Clear();
196
197 for (current_allocation_block_index_ = 0;
198 current_allocation_block_index_ < allocation_list_.length();
199 current_allocation_block_index_++) {
200 if (requested <= allocation_list_[current_allocation_block_index_].size) {
201 return; // Found a large enough allocation block.
202 }
203 }
204
205 // Code range is full or too fragmented.
206 V8::FatalProcessOutOfMemory("CodeRange::GetNextAllocationBlock");
207}
208
209
mstarzinger@chromium.org068ea0a2013-01-30 09:39:44 +0000210Address CodeRange::AllocateRawMemory(const size_t requested_size,
211 const size_t commit_size,
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000212 size_t* allocated) {
mstarzinger@chromium.org068ea0a2013-01-30 09:39:44 +0000213 ASSERT(commit_size <= requested_size);
sgjesse@chromium.orgc5145742009-10-07 09:00:33 +0000214 ASSERT(current_allocation_block_index_ < allocation_list_.length());
mstarzinger@chromium.org068ea0a2013-01-30 09:39:44 +0000215 if (requested_size > allocation_list_[current_allocation_block_index_].size) {
sgjesse@chromium.orgc5145742009-10-07 09:00:33 +0000216 // Find an allocation block large enough. This function call may
217 // call V8::FatalProcessOutOfMemory if it cannot find a large enough block.
mstarzinger@chromium.org068ea0a2013-01-30 09:39:44 +0000218 GetNextAllocationBlock(requested_size);
sgjesse@chromium.orgc5145742009-10-07 09:00:33 +0000219 }
220 // Commit the requested memory at the start of the current allocation block.
mstarzinger@chromium.org068ea0a2013-01-30 09:39:44 +0000221 size_t aligned_requested = RoundUp(requested_size, MemoryChunk::kAlignment);
sgjesse@chromium.orgc5145742009-10-07 09:00:33 +0000222 FreeBlock current = allocation_list_[current_allocation_block_index_];
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000223 if (aligned_requested >= (current.size - Page::kPageSize)) {
sgjesse@chromium.orgc5145742009-10-07 09:00:33 +0000224 // Don't leave a small free block, useless for a large object or chunk.
225 *allocated = current.size;
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000226 } else {
227 *allocated = aligned_requested;
sgjesse@chromium.orgc5145742009-10-07 09:00:33 +0000228 }
229 ASSERT(*allocated <= current.size);
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000230 ASSERT(IsAddressAligned(current.start, MemoryChunk::kAlignment));
mstarzinger@chromium.org068ea0a2013-01-30 09:39:44 +0000231 if (!MemoryAllocator::CommitExecutableMemory(code_range_,
232 current.start,
233 commit_size,
234 *allocated)) {
sgjesse@chromium.orgc5145742009-10-07 09:00:33 +0000235 *allocated = 0;
236 return NULL;
237 }
238 allocation_list_[current_allocation_block_index_].start += *allocated;
239 allocation_list_[current_allocation_block_index_].size -= *allocated;
240 if (*allocated == current.size) {
241 GetNextAllocationBlock(0); // This block is used up, get the next one.
242 }
243 return current.start;
244}
245
246
mstarzinger@chromium.org068ea0a2013-01-30 09:39:44 +0000247bool CodeRange::CommitRawMemory(Address start, size_t length) {
248 return code_range_->Commit(start, length, true);
249}
250
251
252bool CodeRange::UncommitRawMemory(Address start, size_t length) {
253 return code_range_->Uncommit(start, length);
254}
255
256
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000257void CodeRange::FreeRawMemory(Address address, size_t length) {
258 ASSERT(IsAddressAligned(address, MemoryChunk::kAlignment));
sgjesse@chromium.orgc5145742009-10-07 09:00:33 +0000259 free_list_.Add(FreeBlock(address, length));
260 code_range_->Uncommit(address, length);
261}
262
263
264void CodeRange::TearDown() {
265 delete code_range_; // Frees all memory in the virtual memory range.
266 code_range_ = NULL;
267 free_list_.Free();
268 allocation_list_.Free();
269}
270
271
272// -----------------------------------------------------------------------------
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000273// MemoryAllocator
274//
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000275
kmillikin@chromium.org7c2628c2011-08-10 11:27:35 +0000276MemoryAllocator::MemoryAllocator(Isolate* isolate)
277 : isolate_(isolate),
278 capacity_(0),
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +0000279 capacity_executable_(0),
280 size_(0),
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000281 size_executable_(0) {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000282}
283
284
erik.corry@gmail.comf2038fb2012-01-16 11:42:08 +0000285bool MemoryAllocator::SetUp(intptr_t capacity, intptr_t capacity_executable) {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000286 capacity_ = RoundUp(capacity, Page::kPageSize);
ager@chromium.org01fe7df2010-11-10 11:59:11 +0000287 capacity_executable_ = RoundUp(capacity_executable, Page::kPageSize);
288 ASSERT_GE(capacity_, capacity_executable_);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000289
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000290 size_ = 0;
erik.corry@gmail.com145eff52010-08-23 11:36:18 +0000291 size_executable_ = 0;
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000292
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000293 return true;
294}
295
296
297void MemoryAllocator::TearDown() {
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000298 // Check that spaces were torn down before MemoryAllocator.
299 ASSERT(size_ == 0);
300 // TODO(gc) this will be true again when we fix FreeMemory.
301 // ASSERT(size_executable_ == 0);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000302 capacity_ = 0;
ager@chromium.org01fe7df2010-11-10 11:59:11 +0000303 capacity_executable_ = 0;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000304}
305
306
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000307void MemoryAllocator::FreeMemory(VirtualMemory* reservation,
308 Executability executable) {
309 // TODO(gc) make code_range part of memory allocator?
310 ASSERT(reservation->IsReserved());
311 size_t size = reservation->size();
312 ASSERT(size_ >= size);
313 size_ -= size;
314
315 isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(size));
316
317 if (executable == EXECUTABLE) {
318 ASSERT(size_executable_ >= size);
319 size_executable_ -= size;
320 }
321 // Code which is part of the code-range does not have its own VirtualMemory.
322 ASSERT(!isolate_->code_range()->contains(
323 static_cast<Address>(reservation->address())));
324 ASSERT(executable == NOT_EXECUTABLE || !isolate_->code_range()->exists());
325 reservation->Release();
326}
327
328
329void MemoryAllocator::FreeMemory(Address base,
330 size_t size,
331 Executability executable) {
332 // TODO(gc) make code_range part of memory allocator?
333 ASSERT(size_ >= size);
334 size_ -= size;
335
336 isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(size));
337
338 if (executable == EXECUTABLE) {
339 ASSERT(size_executable_ >= size);
340 size_executable_ -= size;
341 }
342 if (isolate_->code_range()->contains(static_cast<Address>(base))) {
343 ASSERT(executable == EXECUTABLE);
344 isolate_->code_range()->FreeRawMemory(base, size);
345 } else {
346 ASSERT(executable == NOT_EXECUTABLE || !isolate_->code_range()->exists());
347 bool result = VirtualMemory::ReleaseRegion(base, size);
348 USE(result);
349 ASSERT(result);
350 }
351}
352
353
354Address MemoryAllocator::ReserveAlignedMemory(size_t size,
355 size_t alignment,
356 VirtualMemory* controller) {
357 VirtualMemory reservation(size, alignment);
358
359 if (!reservation.IsReserved()) return NULL;
360 size_ += reservation.size();
361 Address base = RoundUp(static_cast<Address>(reservation.address()),
362 alignment);
363 controller->TakeControl(&reservation);
364 return base;
365}
366
367
mstarzinger@chromium.org068ea0a2013-01-30 09:39:44 +0000368Address MemoryAllocator::AllocateAlignedMemory(size_t reserve_size,
369 size_t commit_size,
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000370 size_t alignment,
371 Executability executable,
372 VirtualMemory* controller) {
mstarzinger@chromium.org068ea0a2013-01-30 09:39:44 +0000373 ASSERT(commit_size <= reserve_size);
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000374 VirtualMemory reservation;
mstarzinger@chromium.org068ea0a2013-01-30 09:39:44 +0000375 Address base = ReserveAlignedMemory(reserve_size, alignment, &reservation);
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000376 if (base == NULL) return NULL;
yangguo@chromium.orgab30bb82012-02-24 14:41:46 +0000377
378 if (executable == EXECUTABLE) {
mstarzinger@chromium.org068ea0a2013-01-30 09:39:44 +0000379 if (!CommitExecutableMemory(&reservation,
380 base,
381 commit_size,
382 reserve_size)) {
danno@chromium.org2c26cb12012-05-03 09:06:43 +0000383 base = NULL;
yangguo@chromium.orgab30bb82012-02-24 14:41:46 +0000384 }
danno@chromium.org2c26cb12012-05-03 09:06:43 +0000385 } else {
mstarzinger@chromium.org068ea0a2013-01-30 09:39:44 +0000386 if (!reservation.Commit(base, commit_size, false)) {
danno@chromium.org2c26cb12012-05-03 09:06:43 +0000387 base = NULL;
388 }
389 }
390
391 if (base == NULL) {
392 // Failed to commit the body. Release the mapping and any partially
393 // commited regions inside it.
394 reservation.Release();
395 return NULL;
fschneider@chromium.orged78ffd2010-07-21 11:05:19 +0000396 }
yangguo@chromium.orgab30bb82012-02-24 14:41:46 +0000397
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000398 controller->TakeControl(&reservation);
399 return base;
400}
ager@chromium.org01fe7df2010-11-10 11:59:11 +0000401
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000402
403void Page::InitializeAsAnchor(PagedSpace* owner) {
404 set_owner(owner);
405 set_prev_page(this);
406 set_next_page(this);
407}
408
409
410NewSpacePage* NewSpacePage::Initialize(Heap* heap,
411 Address start,
412 SemiSpace* semi_space) {
yangguo@chromium.orgab30bb82012-02-24 14:41:46 +0000413 Address area_start = start + NewSpacePage::kObjectStartOffset;
414 Address area_end = start + Page::kPageSize;
415
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000416 MemoryChunk* chunk = MemoryChunk::Initialize(heap,
417 start,
418 Page::kPageSize,
yangguo@chromium.orgab30bb82012-02-24 14:41:46 +0000419 area_start,
420 area_end,
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000421 NOT_EXECUTABLE,
422 semi_space);
423 chunk->set_next_chunk(NULL);
424 chunk->set_prev_chunk(NULL);
425 chunk->initialize_scan_on_scavenge(true);
426 bool in_to_space = (semi_space->id() != kFromSpace);
427 chunk->SetFlag(in_to_space ? MemoryChunk::IN_TO_SPACE
428 : MemoryChunk::IN_FROM_SPACE);
429 ASSERT(!chunk->IsFlagSet(in_to_space ? MemoryChunk::IN_FROM_SPACE
430 : MemoryChunk::IN_TO_SPACE));
431 NewSpacePage* page = static_cast<NewSpacePage*>(chunk);
432 heap->incremental_marking()->SetNewSpacePageFlags(page);
433 return page;
434}
435
436
437void NewSpacePage::InitializeAsAnchor(SemiSpace* semi_space) {
438 set_owner(semi_space);
439 set_next_chunk(this);
440 set_prev_chunk(this);
441 // Flags marks this invalid page as not being in new-space.
442 // All real new-space pages will be in new-space.
443 SetFlags(0, ~0);
444}
445
446
447MemoryChunk* MemoryChunk::Initialize(Heap* heap,
448 Address base,
449 size_t size,
yangguo@chromium.orgab30bb82012-02-24 14:41:46 +0000450 Address area_start,
451 Address area_end,
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000452 Executability executable,
453 Space* owner) {
454 MemoryChunk* chunk = FromAddress(base);
455
456 ASSERT(base == chunk->address());
457
458 chunk->heap_ = heap;
459 chunk->size_ = size;
yangguo@chromium.orgab30bb82012-02-24 14:41:46 +0000460 chunk->area_start_ = area_start;
461 chunk->area_end_ = area_end;
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000462 chunk->flags_ = 0;
463 chunk->set_owner(owner);
464 chunk->InitializeReservedMemory();
465 chunk->slots_buffer_ = NULL;
466 chunk->skip_list_ = NULL;
verwaest@chromium.org33e09c82012-10-10 17:07:22 +0000467 chunk->write_barrier_counter_ = kWriteBarrierCounterGranularity;
yangguo@chromium.orgfb377212012-11-16 14:43:43 +0000468 chunk->progress_bar_ = 0;
danno@chromium.org72204d52012-10-31 10:02:10 +0000469 chunk->high_water_mark_ = static_cast<int>(area_start - base);
mstarzinger@chromium.orge3b8d0f2013-02-01 09:06:41 +0000470 chunk->parallel_sweeping_ = 0;
mstarzinger@chromium.orge27d6172013-04-17 11:51:44 +0000471 chunk->available_in_small_free_list_ = 0;
472 chunk->available_in_medium_free_list_ = 0;
473 chunk->available_in_large_free_list_ = 0;
474 chunk->available_in_huge_free_list_ = 0;
475 chunk->non_available_small_blocks_ = 0;
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000476 chunk->ResetLiveBytes();
477 Bitmap::Clear(chunk);
478 chunk->initialize_scan_on_scavenge(false);
479 chunk->SetFlag(WAS_SWEPT_PRECISELY);
480
481 ASSERT(OFFSET_OF(MemoryChunk, flags_) == kFlagsOffset);
482 ASSERT(OFFSET_OF(MemoryChunk, live_byte_count_) == kLiveBytesOffset);
483
yangguo@chromium.orgab30bb82012-02-24 14:41:46 +0000484 if (executable == EXECUTABLE) {
485 chunk->SetFlag(IS_EXECUTABLE);
486 }
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000487
yangguo@chromium.orgab30bb82012-02-24 14:41:46 +0000488 if (owner == heap->old_data_space()) {
489 chunk->SetFlag(CONTAINS_ONLY_DATA);
490 }
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000491
492 return chunk;
493}
494
495
mstarzinger@chromium.org068ea0a2013-01-30 09:39:44 +0000496// Commit MemoryChunk area to the requested size.
497bool MemoryChunk::CommitArea(size_t requested) {
498 size_t guard_size = IsFlagSet(IS_EXECUTABLE) ?
499 MemoryAllocator::CodePageGuardSize() : 0;
500 size_t header_size = area_start() - address() - guard_size;
501 size_t commit_size = RoundUp(header_size + requested, OS::CommitPageSize());
502 size_t committed_size = RoundUp(header_size + (area_end() - area_start()),
503 OS::CommitPageSize());
504
505 if (commit_size > committed_size) {
506 // Commit size should be less or equal than the reserved size.
507 ASSERT(commit_size <= size() - 2 * guard_size);
508 // Append the committed area.
509 Address start = address() + committed_size + guard_size;
510 size_t length = commit_size - committed_size;
511 if (reservation_.IsReserved()) {
512 if (!reservation_.Commit(start, length, IsFlagSet(IS_EXECUTABLE))) {
513 return false;
514 }
515 } else {
516 CodeRange* code_range = heap_->isolate()->code_range();
517 ASSERT(code_range->exists() && IsFlagSet(IS_EXECUTABLE));
518 if (!code_range->CommitRawMemory(start, length)) return false;
519 }
520
521 if (Heap::ShouldZapGarbage()) {
522 heap_->isolate()->memory_allocator()->ZapBlock(start, length);
523 }
524 } else if (commit_size < committed_size) {
525 ASSERT(commit_size > 0);
526 // Shrink the committed area.
527 size_t length = committed_size - commit_size;
528 Address start = address() + committed_size + guard_size - length;
529 if (reservation_.IsReserved()) {
530 if (!reservation_.Uncommit(start, length)) return false;
531 } else {
532 CodeRange* code_range = heap_->isolate()->code_range();
533 ASSERT(code_range->exists() && IsFlagSet(IS_EXECUTABLE));
534 if (!code_range->UncommitRawMemory(start, length)) return false;
535 }
536 }
537
538 area_end_ = area_start_ + requested;
539 return true;
540}
541
542
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000543void MemoryChunk::InsertAfter(MemoryChunk* other) {
544 next_chunk_ = other->next_chunk_;
545 prev_chunk_ = other;
svenpanne@chromium.org2bda5432013-03-15 12:39:50 +0000546
547 // This memory barrier is needed since concurrent sweeper threads may iterate
548 // over the list of pages while a new page is inserted.
549 // TODO(hpayer): find a cleaner way to guarantee that the page list can be
550 // expanded concurrently
551 MemoryBarrier();
552
553 // The following two write operations can take effect in arbitrary order
554 // since pages are always iterated by the sweeper threads in LIFO order, i.e,
555 // the inserted page becomes visible for the sweeper threads after
556 // other->next_chunk_ = this;
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000557 other->next_chunk_->prev_chunk_ = this;
558 other->next_chunk_ = this;
559}
560
561
562void MemoryChunk::Unlink() {
563 if (!InNewSpace() && IsFlagSet(SCAN_ON_SCAVENGE)) {
564 heap_->decrement_scan_on_scavenge_pages();
565 ClearFlag(SCAN_ON_SCAVENGE);
566 }
567 next_chunk_->prev_chunk_ = prev_chunk_;
568 prev_chunk_->next_chunk_ = next_chunk_;
569 prev_chunk_ = NULL;
570 next_chunk_ = NULL;
571}
572
573
mstarzinger@chromium.org068ea0a2013-01-30 09:39:44 +0000574MemoryChunk* MemoryAllocator::AllocateChunk(intptr_t reserve_area_size,
575 intptr_t commit_area_size,
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000576 Executability executable,
577 Space* owner) {
mstarzinger@chromium.org068ea0a2013-01-30 09:39:44 +0000578 ASSERT(commit_area_size <= reserve_area_size);
579
yangguo@chromium.orgab30bb82012-02-24 14:41:46 +0000580 size_t chunk_size;
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000581 Heap* heap = isolate_->heap();
582 Address base = NULL;
583 VirtualMemory reservation;
yangguo@chromium.orgab30bb82012-02-24 14:41:46 +0000584 Address area_start = NULL;
585 Address area_end = NULL;
svenpanne@chromium.orgc859c4f2012-10-15 11:51:39 +0000586
mstarzinger@chromium.org068ea0a2013-01-30 09:39:44 +0000587 //
588 // MemoryChunk layout:
589 //
590 // Executable
591 // +----------------------------+<- base aligned with MemoryChunk::kAlignment
592 // | Header |
593 // +----------------------------+<- base + CodePageGuardStartOffset
594 // | Guard |
595 // +----------------------------+<- area_start_
596 // | Area |
597 // +----------------------------+<- area_end_ (area_start + commit_area_size)
598 // | Committed but not used |
599 // +----------------------------+<- aligned at OS page boundary
600 // | Reserved but not committed |
601 // +----------------------------+<- aligned at OS page boundary
602 // | Guard |
603 // +----------------------------+<- base + chunk_size
604 //
605 // Non-executable
606 // +----------------------------+<- base aligned with MemoryChunk::kAlignment
607 // | Header |
608 // +----------------------------+<- area_start_ (base + kObjectStartOffset)
609 // | Area |
610 // +----------------------------+<- area_end_ (area_start + commit_area_size)
611 // | Committed but not used |
612 // +----------------------------+<- aligned at OS page boundary
613 // | Reserved but not committed |
614 // +----------------------------+<- base + chunk_size
615 //
616
ager@chromium.org01fe7df2010-11-10 11:59:11 +0000617 if (executable == EXECUTABLE) {
mstarzinger@chromium.org068ea0a2013-01-30 09:39:44 +0000618 chunk_size = RoundUp(CodePageAreaStartOffset() + reserve_area_size,
yangguo@chromium.orgab30bb82012-02-24 14:41:46 +0000619 OS::CommitPageSize()) + CodePageGuardSize();
620
ager@chromium.org01fe7df2010-11-10 11:59:11 +0000621 // Check executable memory limit.
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000622 if (size_executable_ + chunk_size > capacity_executable_) {
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +0000623 LOG(isolate_,
624 StringEvent("MemoryAllocator::AllocateRawMemory",
ager@chromium.org01fe7df2010-11-10 11:59:11 +0000625 "V8 Executable Allocation capacity exceeded"));
626 return NULL;
627 }
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000628
mstarzinger@chromium.org068ea0a2013-01-30 09:39:44 +0000629 // Size of header (not executable) plus area (executable).
630 size_t commit_size = RoundUp(CodePageGuardStartOffset() + commit_area_size,
631 OS::CommitPageSize());
ager@chromium.org01fe7df2010-11-10 11:59:11 +0000632 // Allocate executable memory either from code range or from the
633 // OS.
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +0000634 if (isolate_->code_range()->exists()) {
mstarzinger@chromium.org068ea0a2013-01-30 09:39:44 +0000635 base = isolate_->code_range()->AllocateRawMemory(chunk_size,
636 commit_size,
637 &chunk_size);
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000638 ASSERT(IsAligned(reinterpret_cast<intptr_t>(base),
639 MemoryChunk::kAlignment));
640 if (base == NULL) return NULL;
641 size_ += chunk_size;
642 // Update executable memory size.
643 size_executable_ += chunk_size;
ager@chromium.org01fe7df2010-11-10 11:59:11 +0000644 } else {
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000645 base = AllocateAlignedMemory(chunk_size,
mstarzinger@chromium.org068ea0a2013-01-30 09:39:44 +0000646 commit_size,
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000647 MemoryChunk::kAlignment,
648 executable,
649 &reservation);
650 if (base == NULL) return NULL;
651 // Update executable memory size.
652 size_executable_ += reservation.size();
ager@chromium.org01fe7df2010-11-10 11:59:11 +0000653 }
yangguo@chromium.orgab30bb82012-02-24 14:41:46 +0000654
svenpanne@chromium.orgc859c4f2012-10-15 11:51:39 +0000655 if (Heap::ShouldZapGarbage()) {
656 ZapBlock(base, CodePageGuardStartOffset());
mstarzinger@chromium.org068ea0a2013-01-30 09:39:44 +0000657 ZapBlock(base + CodePageAreaStartOffset(), commit_area_size);
svenpanne@chromium.orgc859c4f2012-10-15 11:51:39 +0000658 }
659
yangguo@chromium.orgab30bb82012-02-24 14:41:46 +0000660 area_start = base + CodePageAreaStartOffset();
mstarzinger@chromium.org068ea0a2013-01-30 09:39:44 +0000661 area_end = area_start + commit_area_size;
sgjesse@chromium.orgc5145742009-10-07 09:00:33 +0000662 } else {
mstarzinger@chromium.org068ea0a2013-01-30 09:39:44 +0000663 chunk_size = RoundUp(MemoryChunk::kObjectStartOffset + reserve_area_size,
664 OS::CommitPageSize());
665 size_t commit_size = RoundUp(MemoryChunk::kObjectStartOffset +
666 commit_area_size, OS::CommitPageSize());
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000667 base = AllocateAlignedMemory(chunk_size,
mstarzinger@chromium.org068ea0a2013-01-30 09:39:44 +0000668 commit_size,
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000669 MemoryChunk::kAlignment,
670 executable,
671 &reservation);
672
673 if (base == NULL) return NULL;
erik.corry@gmail.com145eff52010-08-23 11:36:18 +0000674
svenpanne@chromium.orgc859c4f2012-10-15 11:51:39 +0000675 if (Heap::ShouldZapGarbage()) {
mstarzinger@chromium.org068ea0a2013-01-30 09:39:44 +0000676 ZapBlock(base, Page::kObjectStartOffset + commit_area_size);
svenpanne@chromium.orgc859c4f2012-10-15 11:51:39 +0000677 }
yangguo@chromium.orgab30bb82012-02-24 14:41:46 +0000678
679 area_start = base + Page::kObjectStartOffset;
mstarzinger@chromium.org068ea0a2013-01-30 09:39:44 +0000680 area_end = area_start + commit_area_size;
yangguo@chromium.orgab30bb82012-02-24 14:41:46 +0000681 }
682
mstarzinger@chromium.org068ea0a2013-01-30 09:39:44 +0000683 // Use chunk_size for statistics and callbacks because we assume that they
684 // treat reserved but not-yet committed memory regions of chunks as allocated.
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000685 isolate_->counters()->memory_allocated()->
686 Increment(static_cast<int>(chunk_size));
687
688 LOG(isolate_, NewEvent("MemoryChunk", base, chunk_size));
689 if (owner != NULL) {
690 ObjectSpace space = static_cast<ObjectSpace>(1 << owner->identity());
691 PerformAllocationCallback(space, kAllocationActionAllocate, chunk_size);
692 }
693
694 MemoryChunk* result = MemoryChunk::Initialize(heap,
695 base,
696 chunk_size,
yangguo@chromium.orgab30bb82012-02-24 14:41:46 +0000697 area_start,
698 area_end,
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000699 executable,
700 owner);
701 result->set_reserved_memory(&reservation);
702 return result;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000703}
704
705
mstarzinger@chromium.orge27d6172013-04-17 11:51:44 +0000706void Page::ResetFreeListStatistics() {
707 non_available_small_blocks_ = 0;
708 available_in_small_free_list_ = 0;
709 available_in_medium_free_list_ = 0;
710 available_in_large_free_list_ = 0;
711 available_in_huge_free_list_ = 0;
712}
713
714
fschneider@chromium.org7d10be52012-04-10 12:30:14 +0000715Page* MemoryAllocator::AllocatePage(intptr_t size,
716 PagedSpace* owner,
erik.corry@gmail.com145eff52010-08-23 11:36:18 +0000717 Executability executable) {
mstarzinger@chromium.org068ea0a2013-01-30 09:39:44 +0000718 MemoryChunk* chunk = AllocateChunk(size, size, executable, owner);
kmillikin@chromium.org3cdd9e12010-09-06 11:39:48 +0000719
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000720 if (chunk == NULL) return NULL;
721
722 return Page::Initialize(isolate_->heap(), chunk, executable, owner);
723}
724
725
726LargePage* MemoryAllocator::AllocateLargePage(intptr_t object_size,
fschneider@chromium.org7d10be52012-04-10 12:30:14 +0000727 Space* owner,
728 Executability executable) {
mstarzinger@chromium.org068ea0a2013-01-30 09:39:44 +0000729 MemoryChunk* chunk = AllocateChunk(object_size,
730 object_size,
731 executable,
732 owner);
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000733 if (chunk == NULL) return NULL;
734 return LargePage::Initialize(isolate_->heap(), chunk);
735}
736
737
738void MemoryAllocator::Free(MemoryChunk* chunk) {
739 LOG(isolate_, DeleteEvent("MemoryChunk", chunk));
mstarzinger@chromium.orge3b8d0f2013-02-01 09:06:41 +0000740 if (chunk->owner() != NULL) {
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000741 ObjectSpace space =
742 static_cast<ObjectSpace>(1 << chunk->owner()->identity());
743 PerformAllocationCallback(space, kAllocationActionFree, chunk->size());
744 }
745
rossberg@chromium.org2c067b12012-03-19 11:01:52 +0000746 isolate_->heap()->RememberUnmappedPage(
747 reinterpret_cast<Address>(chunk), chunk->IsEvacuationCandidate());
748
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000749 delete chunk->slots_buffer();
750 delete chunk->skip_list();
751
752 VirtualMemory* reservation = chunk->reserved_memory();
753 if (reservation->IsReserved()) {
754 FreeMemory(reservation, chunk->executable());
755 } else {
756 FreeMemory(chunk->address(),
757 chunk->size(),
758 chunk->executable());
759 }
760}
761
762
763bool MemoryAllocator::CommitBlock(Address start,
764 size_t size,
765 Executability executable) {
766 if (!VirtualMemory::CommitRegion(start, size, executable)) return false;
svenpanne@chromium.orgc859c4f2012-10-15 11:51:39 +0000767
768 if (Heap::ShouldZapGarbage()) {
769 ZapBlock(start, size);
770 }
771
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000772 isolate_->counters()->memory_allocated()->Increment(static_cast<int>(size));
773 return true;
774}
775
776
777bool MemoryAllocator::UncommitBlock(Address start, size_t size) {
778 if (!VirtualMemory::UncommitRegion(start, size)) return false;
779 isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(size));
780 return true;
781}
782
783
784void MemoryAllocator::ZapBlock(Address start, size_t size) {
785 for (size_t s = 0; s + kPointerSize <= size; s += kPointerSize) {
786 Memory::Address_at(start + s) = kZapValue;
787 }
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000788}
789
790
kmillikin@chromium.org3cdd9e12010-09-06 11:39:48 +0000791void MemoryAllocator::PerformAllocationCallback(ObjectSpace space,
792 AllocationAction action,
793 size_t size) {
794 for (int i = 0; i < memory_allocation_callbacks_.length(); ++i) {
795 MemoryAllocationCallbackRegistration registration =
796 memory_allocation_callbacks_[i];
797 if ((registration.space & space) == space &&
798 (registration.action & action) == action)
799 registration.callback(space, action, static_cast<int>(size));
800 }
801}
802
803
804bool MemoryAllocator::MemoryAllocationCallbackRegistered(
805 MemoryAllocationCallback callback) {
806 for (int i = 0; i < memory_allocation_callbacks_.length(); ++i) {
807 if (memory_allocation_callbacks_[i].callback == callback) return true;
808 }
809 return false;
810}
811
812
813void MemoryAllocator::AddMemoryAllocationCallback(
814 MemoryAllocationCallback callback,
815 ObjectSpace space,
816 AllocationAction action) {
817 ASSERT(callback != NULL);
818 MemoryAllocationCallbackRegistration registration(callback, space, action);
819 ASSERT(!MemoryAllocator::MemoryAllocationCallbackRegistered(callback));
820 return memory_allocation_callbacks_.Add(registration);
821}
822
823
824void MemoryAllocator::RemoveMemoryAllocationCallback(
825 MemoryAllocationCallback callback) {
826 ASSERT(callback != NULL);
827 for (int i = 0; i < memory_allocation_callbacks_.length(); ++i) {
828 if (memory_allocation_callbacks_[i].callback == callback) {
829 memory_allocation_callbacks_.Remove(i);
830 return;
831 }
832 }
833 UNREACHABLE();
834}
835
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000836
837#ifdef DEBUG
838void MemoryAllocator::ReportStatistics() {
839 float pct = static_cast<float>(capacity_ - size_) / capacity_;
kmillikin@chromium.orgf05f2912010-09-30 10:07:24 +0000840 PrintF(" capacity: %" V8_PTR_PREFIX "d"
841 ", used: %" V8_PTR_PREFIX "d"
842 ", available: %%%d\n\n",
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000843 capacity_, size_, static_cast<int>(pct*100));
844}
845#endif
846
yangguo@chromium.orgab30bb82012-02-24 14:41:46 +0000847
848int MemoryAllocator::CodePageGuardStartOffset() {
849 // We are guarding code pages: the first OS page after the header
850 // will be protected as non-writable.
851 return RoundUp(Page::kObjectStartOffset, OS::CommitPageSize());
852}
853
854
855int MemoryAllocator::CodePageGuardSize() {
856 return static_cast<int>(OS::CommitPageSize());
857}
858
859
860int MemoryAllocator::CodePageAreaStartOffset() {
861 // We are guarding code pages: the first OS page after the header
862 // will be protected as non-writable.
863 return CodePageGuardStartOffset() + CodePageGuardSize();
864}
865
866
867int MemoryAllocator::CodePageAreaEndOffset() {
868 // We are guarding code pages: the last OS page will be protected as
869 // non-writable.
870 return Page::kPageSize - static_cast<int>(OS::CommitPageSize());
871}
872
873
mstarzinger@chromium.org068ea0a2013-01-30 09:39:44 +0000874bool MemoryAllocator::CommitExecutableMemory(VirtualMemory* vm,
875 Address start,
876 size_t commit_size,
877 size_t reserved_size) {
yangguo@chromium.orgab30bb82012-02-24 14:41:46 +0000878 // Commit page header (not executable).
879 if (!vm->Commit(start,
880 CodePageGuardStartOffset(),
881 false)) {
882 return false;
883 }
884
885 // Create guard page after the header.
886 if (!vm->Guard(start + CodePageGuardStartOffset())) {
887 return false;
888 }
889
890 // Commit page body (executable).
yangguo@chromium.orgab30bb82012-02-24 14:41:46 +0000891 if (!vm->Commit(start + CodePageAreaStartOffset(),
mstarzinger@chromium.org068ea0a2013-01-30 09:39:44 +0000892 commit_size - CodePageGuardStartOffset(),
yangguo@chromium.orgab30bb82012-02-24 14:41:46 +0000893 true)) {
894 return false;
895 }
896
mstarzinger@chromium.org068ea0a2013-01-30 09:39:44 +0000897 // Create guard page before the end.
898 if (!vm->Guard(start + reserved_size - CodePageGuardSize())) {
yangguo@chromium.orgab30bb82012-02-24 14:41:46 +0000899 return false;
900 }
901
902 return true;
903}
904
905
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000906// -----------------------------------------------------------------------------
ulan@chromium.org2efb9002012-01-19 15:36:35 +0000907// MemoryChunk implementation
908
909void MemoryChunk::IncrementLiveBytesFromMutator(Address address, int by) {
910 MemoryChunk* chunk = MemoryChunk::FromAddress(address);
911 if (!chunk->InNewSpace() && !static_cast<Page*>(chunk)->WasSwept()) {
912 static_cast<PagedSpace*>(chunk->owner())->IncrementUnsweptFreeBytes(-by);
913 }
914 chunk->IncrementLiveBytes(by);
915}
916
917// -----------------------------------------------------------------------------
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000918// PagedSpace implementation
919
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +0000920PagedSpace::PagedSpace(Heap* heap,
921 intptr_t max_capacity,
ager@chromium.org9258b6b2008-09-11 09:11:10 +0000922 AllocationSpace id,
923 Executability executable)
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000924 : Space(heap, id, executable),
925 free_list_(this),
926 was_swept_conservatively_(false),
erik.corry@gmail.comf2038fb2012-01-16 11:42:08 +0000927 first_unswept_page_(Page::FromAddress(NULL)),
928 unswept_free_bytes_(0) {
yangguo@chromium.orgab30bb82012-02-24 14:41:46 +0000929 if (id == CODE_SPACE) {
930 area_size_ = heap->isolate()->memory_allocator()->
931 CodePageAreaSize();
932 } else {
933 area_size_ = Page::kPageSize - Page::kObjectStartOffset;
934 }
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000935 max_capacity_ = (RoundDown(max_capacity, Page::kPageSize) / Page::kPageSize)
yangguo@chromium.orgab30bb82012-02-24 14:41:46 +0000936 * AreaSize();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000937 accounting_stats_.Clear();
938
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000939 allocation_info_.top = NULL;
940 allocation_info_.limit = NULL;
941
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000942 anchor_.InitializeAsAnchor(this);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000943}
944
945
erik.corry@gmail.comf2038fb2012-01-16 11:42:08 +0000946bool PagedSpace::SetUp() {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000947 return true;
948}
949
950
erik.corry@gmail.comf2038fb2012-01-16 11:42:08 +0000951bool PagedSpace::HasBeenSetUp() {
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000952 return true;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000953}
954
955
956void PagedSpace::TearDown() {
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000957 PageIterator iterator(this);
958 while (iterator.has_next()) {
959 heap()->isolate()->memory_allocator()->Free(iterator.next());
960 }
961 anchor_.set_next_page(&anchor_);
962 anchor_.set_prev_page(&anchor_);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000963 accounting_stats_.Clear();
964}
965
966
danno@chromium.org72204d52012-10-31 10:02:10 +0000967size_t PagedSpace::CommittedPhysicalMemory() {
968 if (!VirtualMemory::HasLazyCommits()) return CommittedMemory();
969 MemoryChunk::UpdateHighWaterMark(allocation_info_.top);
970 size_t size = 0;
971 PageIterator it(this);
972 while (it.has_next()) {
973 size += it.next()->CommittedPhysicalMemory();
974 }
975 return size;
976}
977
978
lrn@chromium.org303ada72010-10-27 09:33:13 +0000979MaybeObject* PagedSpace::FindObject(Address addr) {
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000980 // Note: this function can only be called on precisely swept spaces.
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +0000981 ASSERT(!heap()->mark_compact_collector()->in_use());
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000982
983 if (!Contains(addr)) return Failure::Exception();
984
985 Page* p = Page::FromAddress(addr);
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000986 HeapObjectIterator it(p, NULL);
987 for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
988 Address cur = obj->address();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000989 Address next = cur + obj->Size();
990 if ((cur <= addr) && (addr < next)) return obj;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000991 }
992
kasper.lund7276f142008-07-30 08:49:36 +0000993 UNREACHABLE();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000994 return Failure::Exception();
995}
996
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000997bool PagedSpace::CanExpand() {
yangguo@chromium.orgab30bb82012-02-24 14:41:46 +0000998 ASSERT(max_capacity_ % AreaSize() == 0);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000999
1000 if (Capacity() == max_capacity_) return false;
1001
1002 ASSERT(Capacity() < max_capacity_);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001003
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00001004 // Are we going to exceed capacity for this space?
1005 if ((Capacity() + Page::kPageSize) > max_capacity_) return false;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001006
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00001007 return true;
1008}
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001009
ulan@chromium.org6e196bf2013-03-13 09:38:22 +00001010
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00001011bool PagedSpace::Expand() {
1012 if (!CanExpand()) return false;
1013
fschneider@chromium.org7d10be52012-04-10 12:30:14 +00001014 intptr_t size = AreaSize();
1015
1016 if (anchor_.next_page() == &anchor_) {
1017 size = SizeOfFirstPage();
1018 }
1019
1020 Page* p = heap()->isolate()->memory_allocator()->AllocatePage(
1021 size, this, executable());
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00001022 if (p == NULL) return false;
1023
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001024 ASSERT(Capacity() <= max_capacity_);
1025
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00001026 p->InsertAfter(anchor_.prev_page());
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001027
1028 return true;
1029}
1030
1031
fschneider@chromium.org7d10be52012-04-10 12:30:14 +00001032intptr_t PagedSpace::SizeOfFirstPage() {
1033 int size = 0;
1034 switch (identity()) {
1035 case OLD_POINTER_SPACE:
1036 size = 64 * kPointerSize * KB;
1037 break;
1038 case OLD_DATA_SPACE:
1039 size = 192 * KB;
1040 break;
1041 case MAP_SPACE:
ulan@chromium.org56c14af2012-09-20 12:51:09 +00001042 size = 16 * kPointerSize * KB;
fschneider@chromium.org7d10be52012-04-10 12:30:14 +00001043 break;
1044 case CELL_SPACE:
ulan@chromium.org56c14af2012-09-20 12:51:09 +00001045 size = 16 * kPointerSize * KB;
fschneider@chromium.org7d10be52012-04-10 12:30:14 +00001046 break;
danno@chromium.org41728482013-06-12 22:31:22 +00001047 case PROPERTY_CELL_SPACE:
dslomov@chromium.orgb752d402013-06-18 11:54:54 +00001048 size = 8 * kPointerSize * KB;
danno@chromium.org41728482013-06-12 22:31:22 +00001049 break;
fschneider@chromium.org7d10be52012-04-10 12:30:14 +00001050 case CODE_SPACE:
mstarzinger@chromium.orgf705b502013-04-04 11:38:09 +00001051 if (heap()->isolate()->code_range()->exists()) {
1052 // When code range exists, code pages are allocated in a special way
1053 // (from the reserved code range). That part of the code is not yet
1054 // upgraded to handle small pages.
fschneider@chromium.org7d10be52012-04-10 12:30:14 +00001055 size = AreaSize();
1056 } else {
1057 size = 384 * KB;
1058 }
1059 break;
1060 default:
1061 UNREACHABLE();
1062 }
1063 return Min(size, AreaSize());
1064}
1065
1066
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001067int PagedSpace::CountTotalPages() {
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00001068 PageIterator it(this);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001069 int count = 0;
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00001070 while (it.has_next()) {
1071 it.next();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001072 count++;
1073 }
1074 return count;
1075}
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001076
1077
mstarzinger@chromium.orge27d6172013-04-17 11:51:44 +00001078void PagedSpace::ObtainFreeListStatistics(Page* page, SizeStats* sizes) {
1079 sizes->huge_size_ = page->available_in_huge_free_list();
1080 sizes->small_size_ = page->available_in_small_free_list();
1081 sizes->medium_size_ = page->available_in_medium_free_list();
1082 sizes->large_size_ = page->available_in_large_free_list();
1083}
1084
1085
1086void PagedSpace::ResetFreeListStatistics() {
1087 PageIterator page_iterator(this);
1088 while (page_iterator.has_next()) {
1089 Page* page = page_iterator.next();
1090 page->ResetFreeListStatistics();
1091 }
1092}
1093
1094
ulan@chromium.org6e196bf2013-03-13 09:38:22 +00001095void PagedSpace::ReleasePage(Page* page, bool unlink) {
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00001096 ASSERT(page->LiveBytes() == 0);
yangguo@chromium.orgab30bb82012-02-24 14:41:46 +00001097 ASSERT(AreaSize() == page->area_size());
danno@chromium.org2c456792011-11-11 12:00:53 +00001098
ricow@chromium.org7ad65222011-12-19 12:13:11 +00001099 // Adjust list of unswept pages if the page is the head of the list.
danno@chromium.org2c456792011-11-11 12:00:53 +00001100 if (first_unswept_page_ == page) {
1101 first_unswept_page_ = page->next_page();
1102 if (first_unswept_page_ == anchor()) {
1103 first_unswept_page_ = Page::FromAddress(NULL);
1104 }
1105 }
1106
1107 if (page->WasSwept()) {
1108 intptr_t size = free_list_.EvictFreeListItems(page);
1109 accounting_stats_.AllocateBytes(size);
yangguo@chromium.orgab30bb82012-02-24 14:41:46 +00001110 ASSERT_EQ(AreaSize(), static_cast<int>(size));
ulan@chromium.org2efb9002012-01-19 15:36:35 +00001111 } else {
1112 DecreaseUnsweptFreeBytes(page);
danno@chromium.org2c456792011-11-11 12:00:53 +00001113 }
1114
ricow@chromium.org27bf2882011-11-17 08:34:43 +00001115 if (Page::FromAllocationTop(allocation_info_.top) == page) {
1116 allocation_info_.top = allocation_info_.limit = NULL;
1117 }
1118
ulan@chromium.org6e196bf2013-03-13 09:38:22 +00001119 if (unlink) {
1120 page->Unlink();
1121 }
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00001122 if (page->IsFlagSet(MemoryChunk::CONTAINS_ONLY_DATA)) {
1123 heap()->isolate()->memory_allocator()->Free(page);
1124 } else {
1125 heap()->QueueMemoryChunkForFree(page);
fschneider@chromium.org013f3e12010-04-26 13:27:52 +00001126 }
1127
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00001128 ASSERT(Capacity() > 0);
yangguo@chromium.orgab30bb82012-02-24 14:41:46 +00001129 accounting_stats_.ShrinkSpace(AreaSize());
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001130}
1131
1132
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001133#ifdef DEBUG
1134void PagedSpace::Print() { }
1135#endif
1136
svenpanne@chromium.orgc859c4f2012-10-15 11:51:39 +00001137#ifdef VERIFY_HEAP
kasperl@chromium.orgdefbd102009-07-13 14:04:26 +00001138void PagedSpace::Verify(ObjectVisitor* visitor) {
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00001139 // We can only iterate over the pages if they were swept precisely.
1140 if (was_swept_conservatively_) return;
kasperl@chromium.orgdefbd102009-07-13 14:04:26 +00001141
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00001142 bool allocation_pointer_found_in_space =
1143 (allocation_info_.top == allocation_info_.limit);
1144 PageIterator page_iterator(this);
1145 while (page_iterator.has_next()) {
1146 Page* page = page_iterator.next();
svenpanne@chromium.orgc859c4f2012-10-15 11:51:39 +00001147 CHECK(page->owner() == this);
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00001148 if (page == Page::FromAllocationTop(allocation_info_.top)) {
1149 allocation_pointer_found_in_space = true;
kasperl@chromium.orgdefbd102009-07-13 14:04:26 +00001150 }
svenpanne@chromium.orgc859c4f2012-10-15 11:51:39 +00001151 CHECK(page->WasSweptPrecisely());
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00001152 HeapObjectIterator it(page, NULL);
yangguo@chromium.orgab30bb82012-02-24 14:41:46 +00001153 Address end_of_previous_object = page->area_start();
1154 Address top = page->area_end();
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00001155 int black_size = 0;
1156 for (HeapObject* object = it.Next(); object != NULL; object = it.Next()) {
svenpanne@chromium.orgc859c4f2012-10-15 11:51:39 +00001157 CHECK(end_of_previous_object <= object->address());
kasperl@chromium.orgdefbd102009-07-13 14:04:26 +00001158
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00001159 // The first word should be a map, and we expect all map pointers to
1160 // be in map space.
1161 Map* map = object->map();
svenpanne@chromium.orgc859c4f2012-10-15 11:51:39 +00001162 CHECK(map->IsMap());
1163 CHECK(heap()->map_space()->Contains(map));
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00001164
1165 // Perform space-specific object verification.
1166 VerifyObject(object);
1167
1168 // The object itself should look OK.
1169 object->Verify();
1170
1171 // All the interior pointers should be contained in the heap.
1172 int size = object->Size();
1173 object->IterateBody(map->instance_type(), size, visitor);
1174 if (Marking::IsBlack(Marking::MarkBitFrom(object))) {
1175 black_size += size;
1176 }
1177
svenpanne@chromium.orgc859c4f2012-10-15 11:51:39 +00001178 CHECK(object->address() + size <= top);
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00001179 end_of_previous_object = object->address() + size;
1180 }
svenpanne@chromium.orgc859c4f2012-10-15 11:51:39 +00001181 CHECK_LE(black_size, page->LiveBytes());
kasperl@chromium.orgdefbd102009-07-13 14:04:26 +00001182 }
svenpanne@chromium.orgc859c4f2012-10-15 11:51:39 +00001183 CHECK(allocation_pointer_found_in_space);
kasperl@chromium.orgdefbd102009-07-13 14:04:26 +00001184}
svenpanne@chromium.orgc859c4f2012-10-15 11:51:39 +00001185#endif // VERIFY_HEAP
kasperl@chromium.orgdefbd102009-07-13 14:04:26 +00001186
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001187// -----------------------------------------------------------------------------
1188// NewSpace implementation
1189
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +00001190
erik.corry@gmail.comf2038fb2012-01-16 11:42:08 +00001191bool NewSpace::SetUp(int reserved_semispace_capacity,
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00001192 int maximum_semispace_capacity) {
erik.corry@gmail.comf2038fb2012-01-16 11:42:08 +00001193 // Set up new space based on the preallocated memory block defined by
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +00001194 // start and size. The provided space is divided into two semi-spaces.
1195 // To support fast containment testing in the new space, the size of
1196 // this chunk must be a power of two and it must be aligned to its size.
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00001197 int initial_semispace_capacity = heap()->InitialSemiSpaceSize();
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00001198
1199 size_t size = 2 * reserved_semispace_capacity;
1200 Address base =
1201 heap()->isolate()->memory_allocator()->ReserveAlignedMemory(
1202 size, size, &reservation_);
1203 if (base == NULL) return false;
1204
1205 chunk_base_ = base;
1206 chunk_size_ = static_cast<uintptr_t>(size);
1207 LOG(heap()->isolate(), NewEvent("InitialChunk", chunk_base_, chunk_size_));
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +00001208
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001209 ASSERT(initial_semispace_capacity <= maximum_semispace_capacity);
1210 ASSERT(IsPowerOf2(maximum_semispace_capacity));
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001211
erik.corry@gmail.comf2038fb2012-01-16 11:42:08 +00001212 // Allocate and set up the histogram arrays if necessary.
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001213 allocated_histogram_ = NewArray<HistogramInfo>(LAST_TYPE + 1);
1214 promoted_histogram_ = NewArray<HistogramInfo>(LAST_TYPE + 1);
1215
1216#define SET_NAME(name) allocated_histogram_[name].set_name(#name); \
1217 promoted_histogram_[name].set_name(#name);
1218 INSTANCE_TYPE_LIST(SET_NAME)
1219#undef SET_NAME
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001220
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00001221 ASSERT(reserved_semispace_capacity == heap()->ReservedSemiSpaceSize());
1222 ASSERT(static_cast<intptr_t>(chunk_size_) >=
1223 2 * heap()->ReservedSemiSpaceSize());
1224 ASSERT(IsAddressAligned(chunk_base_, 2 * reserved_semispace_capacity, 0));
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001225
yangguo@chromium.org659ceec2012-01-26 07:37:54 +00001226 to_space_.SetUp(chunk_base_,
1227 initial_semispace_capacity,
1228 maximum_semispace_capacity);
1229 from_space_.SetUp(chunk_base_ + reserved_semispace_capacity,
1230 initial_semispace_capacity,
1231 maximum_semispace_capacity);
1232 if (!to_space_.Commit()) {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001233 return false;
1234 }
fschneider@chromium.org7d10be52012-04-10 12:30:14 +00001235 ASSERT(!from_space_.is_committed()); // No need to use memory yet.
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001236
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00001237 start_ = chunk_base_;
1238 address_mask_ = ~(2 * reserved_semispace_capacity - 1);
ricow@chromium.org30ce4112010-05-31 10:38:25 +00001239 object_mask_ = address_mask_ | kHeapObjectTagMask;
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00001240 object_expected_ = reinterpret_cast<uintptr_t>(start_) | kHeapObjectTag;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001241
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00001242 ResetAllocationInfo();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001243
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001244 return true;
1245}
1246
1247
1248void NewSpace::TearDown() {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001249 if (allocated_histogram_) {
1250 DeleteArray(allocated_histogram_);
1251 allocated_histogram_ = NULL;
1252 }
1253 if (promoted_histogram_) {
1254 DeleteArray(promoted_histogram_);
1255 promoted_histogram_ = NULL;
1256 }
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001257
1258 start_ = NULL;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001259 allocation_info_.top = NULL;
1260 allocation_info_.limit = NULL;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001261
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +00001262 to_space_.TearDown();
1263 from_space_.TearDown();
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00001264
1265 LOG(heap()->isolate(), DeleteEvent("InitialChunk", chunk_base_));
1266
1267 ASSERT(reservation_.IsReserved());
1268 heap()->isolate()->memory_allocator()->FreeMemory(&reservation_,
1269 NOT_EXECUTABLE);
1270 chunk_base_ = NULL;
1271 chunk_size_ = 0;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001272}
1273
1274
1275void NewSpace::Flip() {
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00001276 SemiSpace::Swap(&from_space_, &to_space_);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001277}
1278
1279
ager@chromium.orgab99eea2009-08-25 07:05:41 +00001280void NewSpace::Grow() {
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00001281 // Double the semispace size but only up to maximum capacity.
sgjesse@chromium.org911335c2009-08-19 12:59:44 +00001282 ASSERT(Capacity() < MaximumCapacity());
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00001283 int new_capacity = Min(MaximumCapacity(), 2 * static_cast<int>(Capacity()));
1284 if (to_space_.GrowTo(new_capacity)) {
1285 // Only grow from space if we managed to grow to-space.
1286 if (!from_space_.GrowTo(new_capacity)) {
1287 // If we managed to grow to-space but couldn't grow from-space,
1288 // attempt to shrink to-space.
ager@chromium.orgab99eea2009-08-25 07:05:41 +00001289 if (!to_space_.ShrinkTo(from_space_.Capacity())) {
1290 // We are in an inconsistent state because we could not
1291 // commit/uncommit memory from new space.
1292 V8::FatalProcessOutOfMemory("Failed to grow new space.");
1293 }
1294 }
1295 }
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001296 ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
ager@chromium.orgab99eea2009-08-25 07:05:41 +00001297}
1298
1299
1300void NewSpace::Shrink() {
kmillikin@chromium.orgf05f2912010-09-30 10:07:24 +00001301 int new_capacity = Max(InitialCapacity(), 2 * SizeAsInt());
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00001302 int rounded_new_capacity = RoundUp(new_capacity, Page::kPageSize);
ager@chromium.orgab99eea2009-08-25 07:05:41 +00001303 if (rounded_new_capacity < Capacity() &&
1304 to_space_.ShrinkTo(rounded_new_capacity)) {
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00001305 // Only shrink from-space if we managed to shrink to-space.
1306 from_space_.Reset();
ager@chromium.orgab99eea2009-08-25 07:05:41 +00001307 if (!from_space_.ShrinkTo(rounded_new_capacity)) {
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00001308 // If we managed to shrink to-space but couldn't shrink from
1309 // space, attempt to grow to-space again.
ager@chromium.orgab99eea2009-08-25 07:05:41 +00001310 if (!to_space_.GrowTo(from_space_.Capacity())) {
1311 // We are in an inconsistent state because we could not
1312 // commit/uncommit memory from new space.
1313 V8::FatalProcessOutOfMemory("Failed to shrink new space.");
1314 }
1315 }
1316 }
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00001317 allocation_info_.limit = to_space_.page_high();
1318 ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
1319}
1320
1321
1322void NewSpace::UpdateAllocationInfo() {
danno@chromium.org72204d52012-10-31 10:02:10 +00001323 MemoryChunk::UpdateHighWaterMark(allocation_info_.top);
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00001324 allocation_info_.top = to_space_.page_low();
1325 allocation_info_.limit = to_space_.page_high();
1326
1327 // Lower limit during incremental marking.
1328 if (heap()->incremental_marking()->IsMarking() &&
1329 inline_allocation_limit_step() != 0) {
1330 Address new_limit =
1331 allocation_info_.top + inline_allocation_limit_step();
1332 allocation_info_.limit = Min(new_limit, allocation_info_.limit);
1333 }
ager@chromium.orgab99eea2009-08-25 07:05:41 +00001334 ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001335}
1336
1337
1338void NewSpace::ResetAllocationInfo() {
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00001339 to_space_.Reset();
1340 UpdateAllocationInfo();
1341 pages_used_ = 0;
1342 // Clear all mark-bits in the to-space.
1343 NewSpacePageIterator it(&to_space_);
1344 while (it.has_next()) {
1345 Bitmap::Clear(it.next());
1346 }
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001347}
1348
1349
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00001350bool NewSpace::AddFreshPage() {
1351 Address top = allocation_info_.top;
1352 if (NewSpacePage::IsAtStart(top)) {
1353 // The current page is already empty. Don't try to make another.
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001354
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00001355 // We should only get here if someone asks to allocate more
1356 // than what can be stored in a single page.
1357 // TODO(gc): Change the limit on new-space allocation to prevent this
1358 // from happening (all such allocations should go directly to LOSpace).
1359 return false;
1360 }
1361 if (!to_space_.AdvancePage()) {
1362 // Failed to get a new page in to-space.
1363 return false;
1364 }
danno@chromium.orgc612e022011-11-10 11:38:15 +00001365
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00001366 // Clear remainder of current page.
yangguo@chromium.orgab30bb82012-02-24 14:41:46 +00001367 Address limit = NewSpacePage::FromLimit(top)->area_end();
danno@chromium.orgc612e022011-11-10 11:38:15 +00001368 if (heap()->gc_state() == Heap::SCAVENGE) {
1369 heap()->promotion_queue()->SetNewLimit(limit);
1370 heap()->promotion_queue()->ActivateGuardIfOnTheSamePage();
1371 }
1372
1373 int remaining_in_page = static_cast<int>(limit - top);
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00001374 heap()->CreateFillerObjectAt(top, remaining_in_page);
1375 pages_used_++;
1376 UpdateAllocationInfo();
danno@chromium.orgc612e022011-11-10 11:38:15 +00001377
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00001378 return true;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001379}
1380
1381
danno@chromium.orgc612e022011-11-10 11:38:15 +00001382MaybeObject* NewSpace::SlowAllocateRaw(int size_in_bytes) {
1383 Address old_top = allocation_info_.top;
1384 Address new_top = old_top + size_in_bytes;
1385 Address high = to_space_.page_high();
1386 if (allocation_info_.limit < high) {
1387 // Incremental marking has lowered the limit to get a
1388 // chance to do a step.
1389 allocation_info_.limit = Min(
1390 allocation_info_.limit + inline_allocation_limit_step_,
1391 high);
1392 int bytes_allocated = static_cast<int>(new_top - top_on_previous_step_);
fschneider@chromium.org7d10be52012-04-10 12:30:14 +00001393 heap()->incremental_marking()->Step(
1394 bytes_allocated, IncrementalMarking::GC_VIA_STACK_GUARD);
danno@chromium.orgc612e022011-11-10 11:38:15 +00001395 top_on_previous_step_ = new_top;
1396 return AllocateRaw(size_in_bytes);
1397 } else if (AddFreshPage()) {
1398 // Switched to new page. Try allocating again.
1399 int bytes_allocated = static_cast<int>(old_top - top_on_previous_step_);
fschneider@chromium.org7d10be52012-04-10 12:30:14 +00001400 heap()->incremental_marking()->Step(
1401 bytes_allocated, IncrementalMarking::GC_VIA_STACK_GUARD);
danno@chromium.orgc612e022011-11-10 11:38:15 +00001402 top_on_previous_step_ = to_space_.page_low();
1403 return AllocateRaw(size_in_bytes);
1404 } else {
1405 return Failure::RetryAfterGC();
1406 }
1407}
1408
1409
svenpanne@chromium.orgc859c4f2012-10-15 11:51:39 +00001410#ifdef VERIFY_HEAP
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00001411// We do not use the SemiSpaceIterator because verification doesn't assume
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001412// that it works (it depends on the invariants we are checking).
1413void NewSpace::Verify() {
1414 // The allocation pointer should be in the space or at the very end.
1415 ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
1416
1417 // There should be objects packed in from the low address up to the
1418 // allocation pointer.
yangguo@chromium.orgab30bb82012-02-24 14:41:46 +00001419 Address current = to_space_.first_page()->area_start();
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00001420 CHECK_EQ(current, to_space_.space_start());
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001421
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00001422 while (current != top()) {
1423 if (!NewSpacePage::IsAtEnd(current)) {
1424 // The allocation pointer should not be in the middle of an object.
1425 CHECK(!NewSpacePage::FromLimit(current)->ContainsLimit(top()) ||
1426 current < top());
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001427
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00001428 HeapObject* object = HeapObject::FromAddress(current);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001429
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00001430 // The first word should be a map, and we expect all map pointers to
1431 // be in map space.
1432 Map* map = object->map();
1433 CHECK(map->IsMap());
1434 CHECK(heap()->map_space()->Contains(map));
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001435
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00001436 // The object should not be code or a map.
1437 CHECK(!object->IsMap());
1438 CHECK(!object->IsCode());
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001439
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00001440 // The object itself should look OK.
1441 object->Verify();
1442
1443 // All the interior pointers should be contained in the heap.
1444 VerifyPointersVisitor visitor;
1445 int size = object->Size();
1446 object->IterateBody(map->instance_type(), size, &visitor);
1447
1448 current += size;
1449 } else {
1450 // At end of page, switch to next page.
1451 NewSpacePage* page = NewSpacePage::FromLimit(current)->next_page();
1452 // Next page should be valid.
1453 CHECK(!page->is_anchor());
yangguo@chromium.orgab30bb82012-02-24 14:41:46 +00001454 current = page->area_start();
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00001455 }
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001456 }
1457
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00001458 // Check semi-spaces.
svenpanne@chromium.orgc859c4f2012-10-15 11:51:39 +00001459 CHECK_EQ(from_space_.id(), kFromSpace);
1460 CHECK_EQ(to_space_.id(), kToSpace);
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00001461 from_space_.Verify();
1462 to_space_.Verify();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001463}
1464#endif
1465
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001466// -----------------------------------------------------------------------------
1467// SemiSpace implementation
1468
yangguo@chromium.org659ceec2012-01-26 07:37:54 +00001469void SemiSpace::SetUp(Address start,
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +00001470 int initial_capacity,
1471 int maximum_capacity) {
1472 // Creates a space in the young generation. The constructor does not
1473 // allocate memory from the OS. A SemiSpace is given a contiguous chunk of
1474 // memory of size 'capacity' when set up, and does not grow or shrink
1475 // otherwise. In the mark-compact collector, the memory region of the from
1476 // space is used as the marking stack. It requires contiguous memory
1477 // addresses.
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00001478 ASSERT(maximum_capacity >= Page::kPageSize);
1479 initial_capacity_ = RoundDown(initial_capacity, Page::kPageSize);
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +00001480 capacity_ = initial_capacity;
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00001481 maximum_capacity_ = RoundDown(maximum_capacity, Page::kPageSize);
ager@chromium.orgadd848f2009-08-13 12:44:13 +00001482 committed_ = false;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001483 start_ = start;
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +00001484 address_mask_ = ~(maximum_capacity - 1);
ricow@chromium.org30ce4112010-05-31 10:38:25 +00001485 object_mask_ = address_mask_ | kHeapObjectTagMask;
ager@chromium.org9085a012009-05-11 19:22:57 +00001486 object_expected_ = reinterpret_cast<uintptr_t>(start) | kHeapObjectTag;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001487 age_mark_ = start_;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001488}
1489
1490
1491void SemiSpace::TearDown() {
1492 start_ = NULL;
1493 capacity_ = 0;
1494}
1495
1496
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00001497bool SemiSpace::Commit() {
1498 ASSERT(!is_committed());
1499 int pages = capacity_ / Page::kPageSize;
1500 Address end = start_ + maximum_capacity_;
1501 Address start = end - pages * Page::kPageSize;
1502 if (!heap()->isolate()->memory_allocator()->CommitBlock(start,
1503 capacity_,
1504 executable())) {
kasper.lund7276f142008-07-30 08:49:36 +00001505 return false;
1506 }
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00001507
1508 NewSpacePage* page = anchor();
1509 for (int i = 1; i <= pages; i++) {
1510 NewSpacePage* new_page =
1511 NewSpacePage::Initialize(heap(), end - i * Page::kPageSize, this);
1512 new_page->InsertAfter(page);
1513 page = new_page;
1514 }
1515
1516 committed_ = true;
1517 Reset();
1518 return true;
1519}
1520
1521
1522bool SemiSpace::Uncommit() {
1523 ASSERT(is_committed());
1524 Address start = start_ + maximum_capacity_ - capacity_;
1525 if (!heap()->isolate()->memory_allocator()->UncommitBlock(start, capacity_)) {
1526 return false;
1527 }
1528 anchor()->set_next_page(anchor());
1529 anchor()->set_prev_page(anchor());
1530
1531 committed_ = false;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001532 return true;
1533}
1534
1535
danno@chromium.org72204d52012-10-31 10:02:10 +00001536size_t SemiSpace::CommittedPhysicalMemory() {
1537 if (!is_committed()) return 0;
1538 size_t size = 0;
1539 NewSpacePageIterator it(this);
1540 while (it.has_next()) {
1541 size += it.next()->CommittedPhysicalMemory();
1542 }
1543 return size;
1544}
1545
1546
ager@chromium.orgab99eea2009-08-25 07:05:41 +00001547bool SemiSpace::GrowTo(int new_capacity) {
yangguo@chromium.org659ceec2012-01-26 07:37:54 +00001548 if (!is_committed()) {
1549 if (!Commit()) return false;
1550 }
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00001551 ASSERT((new_capacity & Page::kPageAlignmentMask) == 0);
ager@chromium.orgab99eea2009-08-25 07:05:41 +00001552 ASSERT(new_capacity <= maximum_capacity_);
1553 ASSERT(new_capacity > capacity_);
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00001554 int pages_before = capacity_ / Page::kPageSize;
1555 int pages_after = new_capacity / Page::kPageSize;
1556
1557 Address end = start_ + maximum_capacity_;
1558 Address start = end - new_capacity;
ager@chromium.orgab99eea2009-08-25 07:05:41 +00001559 size_t delta = new_capacity - capacity_;
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00001560
ager@chromium.orgab99eea2009-08-25 07:05:41 +00001561 ASSERT(IsAligned(delta, OS::AllocateAlignment()));
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00001562 if (!heap()->isolate()->memory_allocator()->CommitBlock(
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00001563 start, delta, executable())) {
ager@chromium.orgab99eea2009-08-25 07:05:41 +00001564 return false;
1565 }
1566 capacity_ = new_capacity;
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00001567 NewSpacePage* last_page = anchor()->prev_page();
1568 ASSERT(last_page != anchor());
1569 for (int i = pages_before + 1; i <= pages_after; i++) {
1570 Address page_address = end - i * Page::kPageSize;
1571 NewSpacePage* new_page = NewSpacePage::Initialize(heap(),
1572 page_address,
1573 this);
1574 new_page->InsertAfter(last_page);
1575 Bitmap::Clear(new_page);
1576 // Duplicate the flags that was set on the old page.
1577 new_page->SetFlags(last_page->GetFlags(),
1578 NewSpacePage::kCopyOnFlipFlagsMask);
1579 last_page = new_page;
1580 }
ager@chromium.orgab99eea2009-08-25 07:05:41 +00001581 return true;
1582}
1583
1584
1585bool SemiSpace::ShrinkTo(int new_capacity) {
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00001586 ASSERT((new_capacity & Page::kPageAlignmentMask) == 0);
ager@chromium.orgab99eea2009-08-25 07:05:41 +00001587 ASSERT(new_capacity >= initial_capacity_);
1588 ASSERT(new_capacity < capacity_);
rossberg@chromium.orgfab14982012-01-05 15:02:15 +00001589 if (is_committed()) {
1590 // Semispaces grow backwards from the end of their allocated capacity,
1591 // so we find the before and after start addresses relative to the
1592 // end of the space.
1593 Address space_end = start_ + maximum_capacity_;
1594 Address old_start = space_end - capacity_;
1595 size_t delta = capacity_ - new_capacity;
1596 ASSERT(IsAligned(delta, OS::AllocateAlignment()));
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00001597
rossberg@chromium.orgfab14982012-01-05 15:02:15 +00001598 MemoryAllocator* allocator = heap()->isolate()->memory_allocator();
1599 if (!allocator->UncommitBlock(old_start, delta)) {
1600 return false;
1601 }
1602
1603 int pages_after = new_capacity / Page::kPageSize;
1604 NewSpacePage* new_last_page =
1605 NewSpacePage::FromAddress(space_end - pages_after * Page::kPageSize);
1606 new_last_page->set_next_page(anchor());
1607 anchor()->set_prev_page(new_last_page);
1608 ASSERT((current_page_ <= first_page()) && (current_page_ >= new_last_page));
1609 }
1610
1611 capacity_ = new_capacity;
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00001612
ager@chromium.orgab99eea2009-08-25 07:05:41 +00001613 return true;
1614}
1615
1616
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00001617void SemiSpace::FlipPages(intptr_t flags, intptr_t mask) {
1618 anchor_.set_owner(this);
1619 // Fixup back-pointers to anchor. Address of anchor changes
1620 // when we swap.
1621 anchor_.prev_page()->set_next_page(&anchor_);
1622 anchor_.next_page()->set_prev_page(&anchor_);
1623
1624 bool becomes_to_space = (id_ == kFromSpace);
1625 id_ = becomes_to_space ? kToSpace : kFromSpace;
1626 NewSpacePage* page = anchor_.next_page();
1627 while (page != &anchor_) {
1628 page->set_owner(this);
1629 page->SetFlags(flags, mask);
1630 if (becomes_to_space) {
1631 page->ClearFlag(MemoryChunk::IN_FROM_SPACE);
1632 page->SetFlag(MemoryChunk::IN_TO_SPACE);
1633 page->ClearFlag(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK);
1634 page->ResetLiveBytes();
1635 } else {
1636 page->SetFlag(MemoryChunk::IN_FROM_SPACE);
1637 page->ClearFlag(MemoryChunk::IN_TO_SPACE);
1638 }
1639 ASSERT(page->IsFlagSet(MemoryChunk::SCAN_ON_SCAVENGE));
1640 ASSERT(page->IsFlagSet(MemoryChunk::IN_TO_SPACE) ||
1641 page->IsFlagSet(MemoryChunk::IN_FROM_SPACE));
1642 page = page->next_page();
1643 }
1644}
1645
1646
1647void SemiSpace::Reset() {
1648 ASSERT(anchor_.next_page() != &anchor_);
1649 current_page_ = anchor_.next_page();
1650}
1651
1652
1653void SemiSpace::Swap(SemiSpace* from, SemiSpace* to) {
1654 // We won't be swapping semispaces without data in them.
1655 ASSERT(from->anchor_.next_page() != &from->anchor_);
1656 ASSERT(to->anchor_.next_page() != &to->anchor_);
1657
1658 // Swap bits.
1659 SemiSpace tmp = *from;
1660 *from = *to;
1661 *to = tmp;
1662
1663 // Fixup back-pointers to the page list anchor now that its address
1664 // has changed.
1665 // Swap to/from-space bits on pages.
1666 // Copy GC flags from old active space (from-space) to new (to-space).
1667 intptr_t flags = from->current_page()->GetFlags();
1668 to->FlipPages(flags, NewSpacePage::kCopyOnFlipFlagsMask);
1669
1670 from->FlipPages(0, 0);
1671}
1672
1673
1674void SemiSpace::set_age_mark(Address mark) {
1675 ASSERT(NewSpacePage::FromLimit(mark)->semi_space() == this);
1676 age_mark_ = mark;
1677 // Mark all pages up to the one containing mark.
1678 NewSpacePageIterator it(space_start(), mark);
1679 while (it.has_next()) {
1680 it.next()->SetFlag(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK);
1681 }
1682}
1683
1684
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001685#ifdef DEBUG
1686void SemiSpace::Print() { }
svenpanne@chromium.orgc859c4f2012-10-15 11:51:39 +00001687#endif
ager@chromium.org9258b6b2008-09-11 09:11:10 +00001688
svenpanne@chromium.orgc859c4f2012-10-15 11:51:39 +00001689#ifdef VERIFY_HEAP
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00001690void SemiSpace::Verify() {
1691 bool is_from_space = (id_ == kFromSpace);
1692 NewSpacePage* page = anchor_.next_page();
1693 CHECK(anchor_.semi_space() == this);
1694 while (page != &anchor_) {
1695 CHECK(page->semi_space() == this);
1696 CHECK(page->InNewSpace());
1697 CHECK(page->IsFlagSet(is_from_space ? MemoryChunk::IN_FROM_SPACE
1698 : MemoryChunk::IN_TO_SPACE));
1699 CHECK(!page->IsFlagSet(is_from_space ? MemoryChunk::IN_TO_SPACE
1700 : MemoryChunk::IN_FROM_SPACE));
1701 CHECK(page->IsFlagSet(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING));
1702 if (!is_from_space) {
1703 // The pointers-from-here-are-interesting flag isn't updated dynamically
1704 // on from-space pages, so it might be out of sync with the marking state.
1705 if (page->heap()->incremental_marking()->IsMarking()) {
1706 CHECK(page->IsFlagSet(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING));
1707 } else {
1708 CHECK(!page->IsFlagSet(
1709 MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING));
1710 }
1711 // TODO(gc): Check that the live_bytes_count_ field matches the
1712 // black marking on the page (if we make it match in new-space).
1713 }
1714 CHECK(page->IsFlagSet(MemoryChunk::SCAN_ON_SCAVENGE));
1715 CHECK(page->prev_page()->next_page() == page);
1716 page = page->next_page();
1717 }
1718}
svenpanne@chromium.orgc859c4f2012-10-15 11:51:39 +00001719#endif
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00001720
svenpanne@chromium.orgc859c4f2012-10-15 11:51:39 +00001721#ifdef DEBUG
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00001722void SemiSpace::AssertValidRange(Address start, Address end) {
1723 // Addresses belong to same semi-space
1724 NewSpacePage* page = NewSpacePage::FromLimit(start);
1725 NewSpacePage* end_page = NewSpacePage::FromLimit(end);
1726 SemiSpace* space = page->semi_space();
1727 CHECK_EQ(space, end_page->semi_space());
1728 // Start address is before end address, either on same page,
1729 // or end address is on a later page in the linked list of
1730 // semi-space pages.
1731 if (page == end_page) {
1732 CHECK(start <= end);
1733 } else {
1734 while (page != end_page) {
1735 page = page->next_page();
1736 CHECK_NE(page, space->anchor());
1737 }
1738 }
1739}
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001740#endif
1741
1742
1743// -----------------------------------------------------------------------------
1744// SemiSpaceIterator implementation.
1745SemiSpaceIterator::SemiSpaceIterator(NewSpace* space) {
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00001746 Initialize(space->bottom(), space->top(), NULL);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001747}
1748
1749
1750SemiSpaceIterator::SemiSpaceIterator(NewSpace* space,
1751 HeapObjectCallback size_func) {
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00001752 Initialize(space->bottom(), space->top(), size_func);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001753}
1754
1755
1756SemiSpaceIterator::SemiSpaceIterator(NewSpace* space, Address start) {
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00001757 Initialize(start, space->top(), NULL);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001758}
1759
1760
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00001761SemiSpaceIterator::SemiSpaceIterator(Address from, Address to) {
1762 Initialize(from, to, NULL);
1763}
1764
1765
1766void SemiSpaceIterator::Initialize(Address start,
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001767 Address end,
1768 HeapObjectCallback size_func) {
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00001769 SemiSpace::AssertValidRange(start, end);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001770 current_ = start;
1771 limit_ = end;
1772 size_func_ = size_func;
1773}
1774
1775
1776#ifdef DEBUG
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001777// heap_histograms is shared, always clear it before using it.
1778static void ClearHistograms() {
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00001779 Isolate* isolate = Isolate::Current();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001780 // We reset the name each time, though it hasn't changed.
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00001781#define DEF_TYPE_NAME(name) isolate->heap_histograms()[name].set_name(#name);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001782 INSTANCE_TYPE_LIST(DEF_TYPE_NAME)
1783#undef DEF_TYPE_NAME
1784
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00001785#define CLEAR_HISTOGRAM(name) isolate->heap_histograms()[name].clear();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001786 INSTANCE_TYPE_LIST(CLEAR_HISTOGRAM)
1787#undef CLEAR_HISTOGRAM
1788
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00001789 isolate->js_spill_information()->Clear();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001790}
1791
1792
mstarzinger@chromium.org1510d582013-06-28 14:00:48 +00001793static void ClearCodeKindStatistics(int* code_kind_statistics) {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001794 for (int i = 0; i < Code::NUMBER_OF_KINDS; i++) {
mstarzinger@chromium.org1510d582013-06-28 14:00:48 +00001795 code_kind_statistics[i] = 0;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001796 }
1797}
1798
1799
mstarzinger@chromium.org1510d582013-06-28 14:00:48 +00001800static void ReportCodeKindStatistics(int* code_kind_statistics) {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001801 PrintF("\n Code kind histograms: \n");
1802 for (int i = 0; i < Code::NUMBER_OF_KINDS; i++) {
mstarzinger@chromium.org1510d582013-06-28 14:00:48 +00001803 if (code_kind_statistics[i] > 0) {
1804 PrintF(" %-20s: %10d bytes\n",
1805 Code::Kind2String(static_cast<Code::Kind>(i)),
1806 code_kind_statistics[i]);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001807 }
1808 }
1809 PrintF("\n");
1810}
1811
1812
1813static int CollectHistogramInfo(HeapObject* obj) {
mstarzinger@chromium.org1510d582013-06-28 14:00:48 +00001814 Isolate* isolate = obj->GetIsolate();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001815 InstanceType type = obj->map()->instance_type();
1816 ASSERT(0 <= type && type <= LAST_TYPE);
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00001817 ASSERT(isolate->heap_histograms()[type].name() != NULL);
1818 isolate->heap_histograms()[type].increment_number(1);
1819 isolate->heap_histograms()[type].increment_bytes(obj->Size());
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001820
1821 if (FLAG_collect_heap_spill_statistics && obj->IsJSObject()) {
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00001822 JSObject::cast(obj)->IncrementSpillStatistics(
1823 isolate->js_spill_information());
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001824 }
1825
1826 return obj->Size();
1827}
1828
1829
1830static void ReportHistogram(bool print_spill) {
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00001831 Isolate* isolate = Isolate::Current();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001832 PrintF("\n Object Histogram:\n");
1833 for (int i = 0; i <= LAST_TYPE; i++) {
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00001834 if (isolate->heap_histograms()[i].number() > 0) {
ager@chromium.orgce5e87b2010-03-10 10:24:18 +00001835 PrintF(" %-34s%10d (%10d bytes)\n",
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00001836 isolate->heap_histograms()[i].name(),
1837 isolate->heap_histograms()[i].number(),
1838 isolate->heap_histograms()[i].bytes());
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001839 }
1840 }
1841 PrintF("\n");
1842
1843 // Summarize string types.
1844 int string_number = 0;
1845 int string_bytes = 0;
kasperl@chromium.org68ac0092009-07-09 06:00:35 +00001846#define INCREMENT(type, size, name, camel_name) \
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00001847 string_number += isolate->heap_histograms()[type].number(); \
1848 string_bytes += isolate->heap_histograms()[type].bytes();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001849 STRING_TYPE_LIST(INCREMENT)
1850#undef INCREMENT
1851 if (string_number > 0) {
ager@chromium.orgce5e87b2010-03-10 10:24:18 +00001852 PrintF(" %-34s%10d (%10d bytes)\n\n", "STRING_TYPE", string_number,
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001853 string_bytes);
1854 }
1855
1856 if (FLAG_collect_heap_spill_statistics && print_spill) {
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00001857 isolate->js_spill_information()->Print();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001858 }
1859}
1860#endif // DEBUG
1861
1862
1863// Support for statistics gathering for --heap-stats and --log-gc.
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001864void NewSpace::ClearHistograms() {
1865 for (int i = 0; i <= LAST_TYPE; i++) {
1866 allocated_histogram_[i].clear();
1867 promoted_histogram_[i].clear();
1868 }
1869}
1870
1871// Because the copying collector does not touch garbage objects, we iterate
1872// the new space before a collection to get a histogram of allocated objects.
whesse@chromium.org030d38e2011-07-13 13:23:34 +00001873// This only happens when --log-gc flag is set.
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001874void NewSpace::CollectStatistics() {
1875 ClearHistograms();
1876 SemiSpaceIterator it(this);
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00001877 for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next())
sgjesse@chromium.orgb302e562010-02-03 11:26:59 +00001878 RecordAllocation(obj);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001879}
1880
1881
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00001882static void DoReportStatistics(Isolate* isolate,
1883 HistogramInfo* info, const char* description) {
1884 LOG(isolate, HeapSampleBeginEvent("NewSpace", description));
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001885 // Lump all the string types together.
1886 int string_number = 0;
1887 int string_bytes = 0;
kasperl@chromium.org68ac0092009-07-09 06:00:35 +00001888#define INCREMENT(type, size, name, camel_name) \
1889 string_number += info[type].number(); \
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001890 string_bytes += info[type].bytes();
1891 STRING_TYPE_LIST(INCREMENT)
1892#undef INCREMENT
1893 if (string_number > 0) {
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00001894 LOG(isolate,
1895 HeapSampleItemEvent("STRING_TYPE", string_number, string_bytes));
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001896 }
1897
1898 // Then do the other types.
1899 for (int i = FIRST_NONSTRING_TYPE; i <= LAST_TYPE; ++i) {
1900 if (info[i].number() > 0) {
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00001901 LOG(isolate,
1902 HeapSampleItemEvent(info[i].name(), info[i].number(),
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001903 info[i].bytes()));
1904 }
1905 }
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00001906 LOG(isolate, HeapSampleEndEvent("NewSpace", description));
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001907}
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001908
1909
1910void NewSpace::ReportStatistics() {
1911#ifdef DEBUG
1912 if (FLAG_heap_stats) {
1913 float pct = static_cast<float>(Available()) / Capacity();
kmillikin@chromium.orgf05f2912010-09-30 10:07:24 +00001914 PrintF(" capacity: %" V8_PTR_PREFIX "d"
1915 ", available: %" V8_PTR_PREFIX "d, %%%d\n",
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001916 Capacity(), Available(), static_cast<int>(pct*100));
1917 PrintF("\n Object Histogram:\n");
1918 for (int i = 0; i <= LAST_TYPE; i++) {
1919 if (allocated_histogram_[i].number() > 0) {
ager@chromium.orgce5e87b2010-03-10 10:24:18 +00001920 PrintF(" %-34s%10d (%10d bytes)\n",
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001921 allocated_histogram_[i].name(),
1922 allocated_histogram_[i].number(),
1923 allocated_histogram_[i].bytes());
1924 }
1925 }
1926 PrintF("\n");
1927 }
1928#endif // DEBUG
1929
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001930 if (FLAG_log_gc) {
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00001931 Isolate* isolate = ISOLATE;
1932 DoReportStatistics(isolate, allocated_histogram_, "allocated");
1933 DoReportStatistics(isolate, promoted_histogram_, "promoted");
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001934 }
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001935}
1936
1937
1938void NewSpace::RecordAllocation(HeapObject* obj) {
1939 InstanceType type = obj->map()->instance_type();
1940 ASSERT(0 <= type && type <= LAST_TYPE);
1941 allocated_histogram_[type].increment_number(1);
1942 allocated_histogram_[type].increment_bytes(obj->Size());
1943}
1944
1945
1946void NewSpace::RecordPromotion(HeapObject* obj) {
1947 InstanceType type = obj->map()->instance_type();
1948 ASSERT(0 <= type && type <= LAST_TYPE);
1949 promoted_histogram_[type].increment_number(1);
1950 promoted_histogram_[type].increment_bytes(obj->Size());
1951}
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001952
danno@chromium.org72204d52012-10-31 10:02:10 +00001953
1954size_t NewSpace::CommittedPhysicalMemory() {
1955 if (!VirtualMemory::HasLazyCommits()) return CommittedMemory();
1956 MemoryChunk::UpdateHighWaterMark(allocation_info_.top);
1957 size_t size = to_space_.CommittedPhysicalMemory();
1958 if (from_space_.is_committed()) {
1959 size += from_space_.CommittedPhysicalMemory();
1960 }
1961 return size;
1962}
1963
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001964// -----------------------------------------------------------------------------
1965// Free lists for old object spaces implementation
1966
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00001967void FreeListNode::set_size(Heap* heap, int size_in_bytes) {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001968 ASSERT(size_in_bytes > 0);
1969 ASSERT(IsAligned(size_in_bytes, kPointerSize));
1970
1971 // We write a map and possibly size information to the block. If the block
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00001972 // is big enough to be a FreeSpace with at least one extra word (the next
1973 // pointer), we set its map to be the free space map and its size to an
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001974 // appropriate array length for the desired size from HeapObject::Size().
1975 // If the block is too small (eg, one or two words), to hold both a size
1976 // field and a next pointer, we give it a filler map that gives it the
1977 // correct size.
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00001978 if (size_in_bytes > FreeSpace::kHeaderSize) {
ricow@chromium.org64e3a4b2011-12-13 08:07:27 +00001979 set_map_no_write_barrier(heap->raw_unchecked_free_space_map());
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00001980 // Can't use FreeSpace::cast because it fails during deserialization.
1981 FreeSpace* this_as_free_space = reinterpret_cast<FreeSpace*>(this);
1982 this_as_free_space->set_size(size_in_bytes);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001983 } else if (size_in_bytes == kPointerSize) {
ricow@chromium.org64e3a4b2011-12-13 08:07:27 +00001984 set_map_no_write_barrier(heap->raw_unchecked_one_pointer_filler_map());
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001985 } else if (size_in_bytes == 2 * kPointerSize) {
ricow@chromium.org64e3a4b2011-12-13 08:07:27 +00001986 set_map_no_write_barrier(heap->raw_unchecked_two_pointer_filler_map());
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001987 } else {
1988 UNREACHABLE();
1989 }
ager@chromium.org3811b432009-10-28 14:53:37 +00001990 // We would like to ASSERT(Size() == size_in_bytes) but this would fail during
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00001991 // deserialization because the free space map is not done yet.
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001992}
1993
1994
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00001995FreeListNode* FreeListNode::next() {
ager@chromium.org3811b432009-10-28 14:53:37 +00001996 ASSERT(IsFreeListNode(this));
hpayer@chromium.org8432c912013-02-28 15:55:26 +00001997 if (map() == GetHeap()->raw_unchecked_free_space_map()) {
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00001998 ASSERT(map() == NULL || Size() >= kNextOffset + kPointerSize);
1999 return reinterpret_cast<FreeListNode*>(
2000 Memory::Address_at(address() + kNextOffset));
kasperl@chromium.orgdefbd102009-07-13 14:04:26 +00002001 } else {
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00002002 return reinterpret_cast<FreeListNode*>(
2003 Memory::Address_at(address() + kPointerSize));
kasperl@chromium.orgdefbd102009-07-13 14:04:26 +00002004 }
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002005}
2006
2007
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00002008FreeListNode** FreeListNode::next_address() {
ager@chromium.org3811b432009-10-28 14:53:37 +00002009 ASSERT(IsFreeListNode(this));
hpayer@chromium.org8432c912013-02-28 15:55:26 +00002010 if (map() == GetHeap()->raw_unchecked_free_space_map()) {
kasperl@chromium.orgdefbd102009-07-13 14:04:26 +00002011 ASSERT(Size() >= kNextOffset + kPointerSize);
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00002012 return reinterpret_cast<FreeListNode**>(address() + kNextOffset);
kasperl@chromium.orgdefbd102009-07-13 14:04:26 +00002013 } else {
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00002014 return reinterpret_cast<FreeListNode**>(address() + kPointerSize);
kasperl@chromium.orgdefbd102009-07-13 14:04:26 +00002015 }
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002016}
2017
2018
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00002019void FreeListNode::set_next(FreeListNode* next) {
2020 ASSERT(IsFreeListNode(this));
2021 // While we are booting the VM the free space map will actually be null. So
2022 // we have to make sure that we don't try to use it for anything at that
2023 // stage.
hpayer@chromium.org8432c912013-02-28 15:55:26 +00002024 if (map() == GetHeap()->raw_unchecked_free_space_map()) {
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00002025 ASSERT(map() == NULL || Size() >= kNextOffset + kPointerSize);
2026 Memory::Address_at(address() + kNextOffset) =
2027 reinterpret_cast<Address>(next);
2028 } else {
2029 Memory::Address_at(address() + kPointerSize) =
2030 reinterpret_cast<Address>(next);
2031 }
2032}
2033
2034
mstarzinger@chromium.orge3b8d0f2013-02-01 09:06:41 +00002035intptr_t FreeListCategory::Concatenate(FreeListCategory* category) {
2036 intptr_t free_bytes = 0;
2037 if (category->top_ != NULL) {
2038 ASSERT(category->end_ != NULL);
2039 // This is safe (not going to deadlock) since Concatenate operations
2040 // are never performed on the same free lists at the same time in
2041 // reverse order.
2042 ScopedLock lock_target(mutex_);
2043 ScopedLock lock_source(category->mutex());
2044 free_bytes = category->available();
2045 if (end_ == NULL) {
2046 end_ = category->end();
2047 } else {
2048 category->end()->set_next(top_);
2049 }
2050 top_ = category->top();
2051 available_ += category->available();
2052 category->Reset();
2053 }
2054 return free_bytes;
2055}
2056
2057
yangguo@chromium.orga6bbcc82012-12-21 12:35:02 +00002058void FreeListCategory::Reset() {
2059 top_ = NULL;
2060 end_ = NULL;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002061 available_ = 0;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002062}
2063
2064
yangguo@chromium.orga6bbcc82012-12-21 12:35:02 +00002065intptr_t FreeListCategory::EvictFreeListItemsInList(Page* p) {
2066 int sum = 0;
2067 FreeListNode** n = &top_;
2068 while (*n != NULL) {
2069 if (Page::FromAddress((*n)->address()) == p) {
2070 FreeSpace* free_space = reinterpret_cast<FreeSpace*>(*n);
2071 sum += free_space->Size();
2072 *n = (*n)->next();
2073 } else {
2074 n = (*n)->next_address();
2075 }
2076 }
2077 if (top_ == NULL) {
2078 end_ = NULL;
2079 }
2080 available_ -= sum;
2081 return sum;
2082}
2083
2084
2085FreeListNode* FreeListCategory::PickNodeFromList(int *node_size) {
2086 FreeListNode* node = top_;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002087
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00002088 if (node == NULL) return NULL;
2089
2090 while (node != NULL &&
2091 Page::FromAddress(node->address())->IsEvacuationCandidate()) {
dslomov@chromium.orgb752d402013-06-18 11:54:54 +00002092 available_ -= reinterpret_cast<FreeSpace*>(node)->Size();
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00002093 node = node->next();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002094 }
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00002095
2096 if (node != NULL) {
yangguo@chromium.orga6bbcc82012-12-21 12:35:02 +00002097 set_top(node->next());
dslomov@chromium.orgb752d402013-06-18 11:54:54 +00002098 *node_size = reinterpret_cast<FreeSpace*>(node)->Size();
yangguo@chromium.orga6bbcc82012-12-21 12:35:02 +00002099 available_ -= *node_size;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002100 } else {
yangguo@chromium.orga6bbcc82012-12-21 12:35:02 +00002101 set_top(NULL);
2102 }
2103
2104 if (top() == NULL) {
2105 set_end(NULL);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002106 }
2107
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002108 return node;
2109}
2110
2111
dslomov@chromium.orgb752d402013-06-18 11:54:54 +00002112FreeListNode* FreeListCategory::PickNodeFromList(int size_in_bytes,
2113 int *node_size) {
2114 FreeListNode* node = PickNodeFromList(node_size);
2115 if (node != NULL && *node_size < size_in_bytes) {
2116 Free(node, *node_size);
2117 *node_size = 0;
2118 return NULL;
2119 }
2120 return node;
2121}
2122
2123
yangguo@chromium.orga6bbcc82012-12-21 12:35:02 +00002124void FreeListCategory::Free(FreeListNode* node, int size_in_bytes) {
2125 node->set_next(top_);
2126 top_ = node;
2127 if (end_ == NULL) {
2128 end_ = node;
2129 }
2130 available_ += size_in_bytes;
2131}
2132
2133
2134void FreeListCategory::RepairFreeList(Heap* heap) {
2135 FreeListNode* n = top_;
2136 while (n != NULL) {
2137 Map** map_location = reinterpret_cast<Map**>(n->address());
2138 if (*map_location == NULL) {
2139 *map_location = heap->free_space_map();
2140 } else {
2141 ASSERT(*map_location == heap->free_space_map());
2142 }
2143 n = n->next();
2144 }
2145}
2146
2147
2148FreeList::FreeList(PagedSpace* owner)
2149 : owner_(owner), heap_(owner->heap()) {
2150 Reset();
2151}
2152
2153
mstarzinger@chromium.orge3b8d0f2013-02-01 09:06:41 +00002154intptr_t FreeList::Concatenate(FreeList* free_list) {
2155 intptr_t free_bytes = 0;
2156 free_bytes += small_list_.Concatenate(free_list->small_list());
2157 free_bytes += medium_list_.Concatenate(free_list->medium_list());
2158 free_bytes += large_list_.Concatenate(free_list->large_list());
2159 free_bytes += huge_list_.Concatenate(free_list->huge_list());
2160 return free_bytes;
2161}
2162
2163
yangguo@chromium.orga6bbcc82012-12-21 12:35:02 +00002164void FreeList::Reset() {
2165 small_list_.Reset();
2166 medium_list_.Reset();
2167 large_list_.Reset();
2168 huge_list_.Reset();
2169}
2170
2171
2172int FreeList::Free(Address start, int size_in_bytes) {
2173 if (size_in_bytes == 0) return 0;
2174
2175 FreeListNode* node = FreeListNode::FromAddress(start);
2176 node->set_size(heap_, size_in_bytes);
mstarzinger@chromium.orge27d6172013-04-17 11:51:44 +00002177 Page* page = Page::FromAddress(start);
yangguo@chromium.orga6bbcc82012-12-21 12:35:02 +00002178
2179 // Early return to drop too-small blocks on the floor.
mstarzinger@chromium.orge27d6172013-04-17 11:51:44 +00002180 if (size_in_bytes < kSmallListMin) {
2181 page->add_non_available_small_blocks(size_in_bytes);
2182 return size_in_bytes;
2183 }
yangguo@chromium.orga6bbcc82012-12-21 12:35:02 +00002184
2185 // Insert other blocks at the head of a free list of the appropriate
2186 // magnitude.
2187 if (size_in_bytes <= kSmallListMax) {
2188 small_list_.Free(node, size_in_bytes);
mstarzinger@chromium.orge27d6172013-04-17 11:51:44 +00002189 page->add_available_in_small_free_list(size_in_bytes);
yangguo@chromium.orga6bbcc82012-12-21 12:35:02 +00002190 } else if (size_in_bytes <= kMediumListMax) {
2191 medium_list_.Free(node, size_in_bytes);
mstarzinger@chromium.orge27d6172013-04-17 11:51:44 +00002192 page->add_available_in_medium_free_list(size_in_bytes);
yangguo@chromium.orga6bbcc82012-12-21 12:35:02 +00002193 } else if (size_in_bytes <= kLargeListMax) {
2194 large_list_.Free(node, size_in_bytes);
mstarzinger@chromium.orge27d6172013-04-17 11:51:44 +00002195 page->add_available_in_large_free_list(size_in_bytes);
yangguo@chromium.orga6bbcc82012-12-21 12:35:02 +00002196 } else {
2197 huge_list_.Free(node, size_in_bytes);
mstarzinger@chromium.orge27d6172013-04-17 11:51:44 +00002198 page->add_available_in_huge_free_list(size_in_bytes);
yangguo@chromium.orga6bbcc82012-12-21 12:35:02 +00002199 }
2200
2201 ASSERT(IsVeryLong() || available() == SumFreeLists());
2202 return 0;
2203}
2204
2205
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00002206FreeListNode* FreeList::FindNodeFor(int size_in_bytes, int* node_size) {
2207 FreeListNode* node = NULL;
mstarzinger@chromium.orge27d6172013-04-17 11:51:44 +00002208 Page* page = NULL;
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00002209
2210 if (size_in_bytes <= kSmallAllocationMax) {
yangguo@chromium.orga6bbcc82012-12-21 12:35:02 +00002211 node = small_list_.PickNodeFromList(node_size);
mstarzinger@chromium.orge27d6172013-04-17 11:51:44 +00002212 if (node != NULL) {
dslomov@chromium.orgb752d402013-06-18 11:54:54 +00002213 ASSERT(size_in_bytes <= *node_size);
mstarzinger@chromium.orge27d6172013-04-17 11:51:44 +00002214 page = Page::FromAddress(node->address());
2215 page->add_available_in_small_free_list(-(*node_size));
dslomov@chromium.orgb752d402013-06-18 11:54:54 +00002216 ASSERT(IsVeryLong() || available() == SumFreeLists());
mstarzinger@chromium.orge27d6172013-04-17 11:51:44 +00002217 return node;
2218 }
erik.corry@gmail.com4a6c3272010-11-18 12:04:40 +00002219 }
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00002220
2221 if (size_in_bytes <= kMediumAllocationMax) {
yangguo@chromium.orga6bbcc82012-12-21 12:35:02 +00002222 node = medium_list_.PickNodeFromList(node_size);
mstarzinger@chromium.orge27d6172013-04-17 11:51:44 +00002223 if (node != NULL) {
dslomov@chromium.orgb752d402013-06-18 11:54:54 +00002224 ASSERT(size_in_bytes <= *node_size);
mstarzinger@chromium.orge27d6172013-04-17 11:51:44 +00002225 page = Page::FromAddress(node->address());
2226 page->add_available_in_medium_free_list(-(*node_size));
dslomov@chromium.orgb752d402013-06-18 11:54:54 +00002227 ASSERT(IsVeryLong() || available() == SumFreeLists());
mstarzinger@chromium.orge27d6172013-04-17 11:51:44 +00002228 return node;
2229 }
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00002230 }
2231
2232 if (size_in_bytes <= kLargeAllocationMax) {
yangguo@chromium.orga6bbcc82012-12-21 12:35:02 +00002233 node = large_list_.PickNodeFromList(node_size);
mstarzinger@chromium.orge27d6172013-04-17 11:51:44 +00002234 if (node != NULL) {
dslomov@chromium.orgb752d402013-06-18 11:54:54 +00002235 ASSERT(size_in_bytes <= *node_size);
mstarzinger@chromium.orge27d6172013-04-17 11:51:44 +00002236 page = Page::FromAddress(node->address());
2237 page->add_available_in_large_free_list(-(*node_size));
dslomov@chromium.orgb752d402013-06-18 11:54:54 +00002238 ASSERT(IsVeryLong() || available() == SumFreeLists());
mstarzinger@chromium.orge27d6172013-04-17 11:51:44 +00002239 return node;
2240 }
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00002241 }
2242
yangguo@chromium.orga6bbcc82012-12-21 12:35:02 +00002243 int huge_list_available = huge_list_.available();
2244 for (FreeListNode** cur = huge_list_.GetTopAddress();
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00002245 *cur != NULL;
2246 cur = (*cur)->next_address()) {
2247 FreeListNode* cur_node = *cur;
2248 while (cur_node != NULL &&
2249 Page::FromAddress(cur_node->address())->IsEvacuationCandidate()) {
mstarzinger@chromium.orge27d6172013-04-17 11:51:44 +00002250 int size = reinterpret_cast<FreeSpace*>(cur_node)->Size();
2251 huge_list_available -= size;
2252 page = Page::FromAddress(cur_node->address());
2253 page->add_available_in_huge_free_list(-size);
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00002254 cur_node = cur_node->next();
2255 }
2256
2257 *cur = cur_node;
yangguo@chromium.orga6bbcc82012-12-21 12:35:02 +00002258 if (cur_node == NULL) {
2259 huge_list_.set_end(NULL);
2260 break;
2261 }
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00002262
hpayer@chromium.org8432c912013-02-28 15:55:26 +00002263 ASSERT((*cur)->map() == heap_->raw_unchecked_free_space_map());
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00002264 FreeSpace* cur_as_free_space = reinterpret_cast<FreeSpace*>(*cur);
2265 int size = cur_as_free_space->Size();
2266 if (size >= size_in_bytes) {
2267 // Large enough node found. Unlink it from the list.
2268 node = *cur;
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00002269 *cur = node->next();
yangguo@chromium.orga6bbcc82012-12-21 12:35:02 +00002270 *node_size = size;
2271 huge_list_available -= size;
mstarzinger@chromium.orge27d6172013-04-17 11:51:44 +00002272 page = Page::FromAddress(node->address());
2273 page->add_available_in_huge_free_list(-size);
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00002274 break;
2275 }
2276 }
2277
yangguo@chromium.orga6bbcc82012-12-21 12:35:02 +00002278 if (huge_list_.top() == NULL) {
2279 huge_list_.set_end(NULL);
2280 }
yangguo@chromium.orga6bbcc82012-12-21 12:35:02 +00002281 huge_list_.set_available(huge_list_available);
danno@chromium.orgfe578672013-06-15 14:38:35 +00002282
dslomov@chromium.orgb752d402013-06-18 11:54:54 +00002283 if (node != NULL) {
2284 ASSERT(IsVeryLong() || available() == SumFreeLists());
2285 return node;
2286 }
2287
2288 if (size_in_bytes <= kSmallListMax) {
2289 node = small_list_.PickNodeFromList(size_in_bytes, node_size);
2290 if (node != NULL) {
2291 ASSERT(size_in_bytes <= *node_size);
2292 page = Page::FromAddress(node->address());
2293 page->add_available_in_small_free_list(-(*node_size));
2294 }
2295 } else if (size_in_bytes <= kMediumListMax) {
2296 node = medium_list_.PickNodeFromList(size_in_bytes, node_size);
2297 if (node != NULL) {
2298 ASSERT(size_in_bytes <= *node_size);
2299 page = Page::FromAddress(node->address());
2300 page->add_available_in_medium_free_list(-(*node_size));
2301 }
2302 } else if (size_in_bytes <= kLargeListMax) {
2303 node = large_list_.PickNodeFromList(size_in_bytes, node_size);
2304 if (node != NULL) {
2305 ASSERT(size_in_bytes <= *node_size);
2306 page = Page::FromAddress(node->address());
2307 page->add_available_in_large_free_list(-(*node_size));
2308 }
2309 }
2310
2311 ASSERT(IsVeryLong() || available() == SumFreeLists());
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00002312 return node;
erik.corry@gmail.com4a6c3272010-11-18 12:04:40 +00002313}
2314
2315
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00002316// Allocation on the old space free list. If it succeeds then a new linear
2317// allocation space has been set up with the top and limit of the space. If
2318// the allocation fails then NULL is returned, and the caller can perform a GC
2319// or allocate a new page before retrying.
2320HeapObject* FreeList::Allocate(int size_in_bytes) {
2321 ASSERT(0 < size_in_bytes);
2322 ASSERT(size_in_bytes <= kMaxBlockSize);
2323 ASSERT(IsAligned(size_in_bytes, kPointerSize));
2324 // Don't free list allocate if there is linear space available.
2325 ASSERT(owner_->limit() - owner_->top() < size_in_bytes);
2326
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00002327 int old_linear_size = static_cast<int>(owner_->limit() - owner_->top());
2328 // Mark the old linear allocation area with a free space map so it can be
2329 // skipped when scanning the heap. This also puts it back in the free list
2330 // if it is big enough.
2331 owner_->Free(owner_->top(), old_linear_size);
mstarzinger@chromium.org1b3afd12011-11-29 14:28:56 +00002332
danno@chromium.orgeb831462012-08-24 11:57:08 +00002333 owner_->heap()->incremental_marking()->OldSpaceStep(
2334 size_in_bytes - old_linear_size);
2335
ulan@chromium.org57ff8812013-05-10 08:16:55 +00002336 int new_node_size = 0;
2337 FreeListNode* new_node = FindNodeFor(size_in_bytes, &new_node_size);
2338 if (new_node == NULL) {
2339 owner_->SetTop(NULL, NULL);
2340 return NULL;
2341 }
2342
2343 int bytes_left = new_node_size - size_in_bytes;
2344 ASSERT(bytes_left >= 0);
2345
yangguo@chromium.org46839fb2012-08-28 09:06:19 +00002346#ifdef DEBUG
2347 for (int i = 0; i < size_in_bytes / kPointerSize; i++) {
2348 reinterpret_cast<Object**>(new_node->address())[i] =
2349 Smi::FromInt(kCodeZapValue);
2350 }
2351#endif
2352
ricow@chromium.orgfa52deb2011-10-11 19:09:42 +00002353 // The old-space-step might have finished sweeping and restarted marking.
2354 // Verify that it did not turn the page of the new node into an evacuation
2355 // candidate.
2356 ASSERT(!MarkCompactCollector::IsOnEvacuationCandidate(new_node));
2357
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00002358 const int kThreshold = IncrementalMarking::kAllocatedThreshold;
2359
2360 // Memory in the linear allocation area is counted as allocated. We may free
2361 // a little of this again immediately - see below.
2362 owner_->Allocate(new_node_size);
2363
2364 if (bytes_left > kThreshold &&
2365 owner_->heap()->incremental_marking()->IsMarkingIncomplete() &&
2366 FLAG_incremental_marking_steps) {
2367 int linear_size = owner_->RoundSizeDownToObjectAlignment(kThreshold);
2368 // We don't want to give too large linear areas to the allocator while
2369 // incremental marking is going on, because we won't check again whether
2370 // we want to do another increment until the linear area is used up.
2371 owner_->Free(new_node->address() + size_in_bytes + linear_size,
2372 new_node_size - size_in_bytes - linear_size);
2373 owner_->SetTop(new_node->address() + size_in_bytes,
2374 new_node->address() + size_in_bytes + linear_size);
2375 } else if (bytes_left > 0) {
2376 // Normally we give the rest of the node to the allocator as its new
2377 // linear allocation area.
2378 owner_->SetTop(new_node->address() + size_in_bytes,
2379 new_node->address() + new_node_size);
2380 } else {
2381 // TODO(gc) Try not freeing linear allocation region when bytes_left
2382 // are zero.
2383 owner_->SetTop(NULL, NULL);
2384 }
2385
2386 return new_node;
2387}
2388
2389
yangguo@chromium.orga6bbcc82012-12-21 12:35:02 +00002390intptr_t FreeList::EvictFreeListItems(Page* p) {
2391 intptr_t sum = huge_list_.EvictFreeListItemsInList(p);
mstarzinger@chromium.orge27d6172013-04-17 11:51:44 +00002392 p->set_available_in_huge_free_list(0);
yangguo@chromium.orga6bbcc82012-12-21 12:35:02 +00002393
2394 if (sum < p->area_size()) {
2395 sum += small_list_.EvictFreeListItemsInList(p) +
2396 medium_list_.EvictFreeListItemsInList(p) +
2397 large_list_.EvictFreeListItemsInList(p);
mstarzinger@chromium.orge27d6172013-04-17 11:51:44 +00002398 p->set_available_in_small_free_list(0);
2399 p->set_available_in_medium_free_list(0);
2400 p->set_available_in_large_free_list(0);
danno@chromium.org2c456792011-11-11 12:00:53 +00002401 }
yangguo@chromium.orga6bbcc82012-12-21 12:35:02 +00002402
danno@chromium.org2c456792011-11-11 12:00:53 +00002403 return sum;
2404}
2405
2406
yangguo@chromium.orga6bbcc82012-12-21 12:35:02 +00002407void FreeList::RepairLists(Heap* heap) {
2408 small_list_.RepairFreeList(heap);
2409 medium_list_.RepairFreeList(heap);
2410 large_list_.RepairFreeList(heap);
2411 huge_list_.RepairFreeList(heap);
danno@chromium.org2c456792011-11-11 12:00:53 +00002412}
2413
2414
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00002415#ifdef DEBUG
yangguo@chromium.orga6bbcc82012-12-21 12:35:02 +00002416intptr_t FreeListCategory::SumFreeList() {
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00002417 intptr_t sum = 0;
yangguo@chromium.orga6bbcc82012-12-21 12:35:02 +00002418 FreeListNode* cur = top_;
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00002419 while (cur != NULL) {
hpayer@chromium.org8432c912013-02-28 15:55:26 +00002420 ASSERT(cur->map() == cur->GetHeap()->raw_unchecked_free_space_map());
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00002421 FreeSpace* cur_as_free_space = reinterpret_cast<FreeSpace*>(cur);
2422 sum += cur_as_free_space->Size();
2423 cur = cur->next();
2424 }
2425 return sum;
2426}
2427
2428
2429static const int kVeryLongFreeList = 500;
2430
2431
yangguo@chromium.orga6bbcc82012-12-21 12:35:02 +00002432int FreeListCategory::FreeListLength() {
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00002433 int length = 0;
yangguo@chromium.orga6bbcc82012-12-21 12:35:02 +00002434 FreeListNode* cur = top_;
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00002435 while (cur != NULL) {
2436 length++;
2437 cur = cur->next();
2438 if (length == kVeryLongFreeList) return length;
2439 }
2440 return length;
2441}
2442
2443
2444bool FreeList::IsVeryLong() {
yangguo@chromium.orga6bbcc82012-12-21 12:35:02 +00002445 if (small_list_.FreeListLength() == kVeryLongFreeList) return true;
2446 if (medium_list_.FreeListLength() == kVeryLongFreeList) return true;
2447 if (large_list_.FreeListLength() == kVeryLongFreeList) return true;
2448 if (huge_list_.FreeListLength() == kVeryLongFreeList) return true;
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00002449 return false;
2450}
2451
2452
2453// This can take a very long time because it is linear in the number of entries
2454// on the free list, so it should not be called if FreeListLength returns
2455// kVeryLongFreeList.
2456intptr_t FreeList::SumFreeLists() {
yangguo@chromium.orga6bbcc82012-12-21 12:35:02 +00002457 intptr_t sum = small_list_.SumFreeList();
2458 sum += medium_list_.SumFreeList();
2459 sum += large_list_.SumFreeList();
2460 sum += huge_list_.SumFreeList();
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00002461 return sum;
2462}
2463#endif
2464
2465
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002466// -----------------------------------------------------------------------------
2467// OldSpace implementation
2468
fschneider@chromium.org0c20e672010-01-14 15:28:53 +00002469bool NewSpace::ReserveSpace(int bytes) {
2470 // We can't reliably unpack a partial snapshot that needs more new space
erik.corry@gmail.com394dbcf2011-10-27 07:38:48 +00002471 // space than the minimum NewSpace size. The limit can be set lower than
2472 // the end of new space either because there is more space on the next page
2473 // or because we have lowered the limit in order to get periodic incremental
2474 // marking. The most reliable way to ensure that there is linear space is
2475 // to do the allocation, then rewind the limit.
fschneider@chromium.org0c20e672010-01-14 15:28:53 +00002476 ASSERT(bytes <= InitialCapacity());
danno@chromium.orgc612e022011-11-10 11:38:15 +00002477 MaybeObject* maybe = AllocateRaw(bytes);
erik.corry@gmail.com394dbcf2011-10-27 07:38:48 +00002478 Object* object = NULL;
2479 if (!maybe->ToObject(&object)) return false;
2480 HeapObject* allocation = HeapObject::cast(object);
fschneider@chromium.org0c20e672010-01-14 15:28:53 +00002481 Address top = allocation_info_.top;
erik.corry@gmail.com394dbcf2011-10-27 07:38:48 +00002482 if ((top - bytes) == allocation->address()) {
2483 allocation_info_.top = allocation->address();
2484 return true;
2485 }
2486 // There may be a borderline case here where the allocation succeeded, but
2487 // the limit and top have moved on to a new page. In that case we try again.
2488 return ReserveSpace(bytes);
fschneider@chromium.org0c20e672010-01-14 15:28:53 +00002489}
2490
2491
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00002492void PagedSpace::PrepareForMarkCompact() {
2493 // We don't have a linear allocation area while sweeping. It will be restored
2494 // on the first allocation after the sweep.
2495 // Mark the old linear allocation area with a free space map so it can be
2496 // skipped when scanning the heap.
2497 int old_linear_size = static_cast<int>(limit() - top());
2498 Free(top(), old_linear_size);
2499 SetTop(NULL, NULL);
fschneider@chromium.org013f3e12010-04-26 13:27:52 +00002500
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00002501 // Stop lazy sweeping and clear marking bits for unswept pages.
2502 if (first_unswept_page_ != NULL) {
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00002503 Page* p = first_unswept_page_;
2504 do {
2505 // Do not use ShouldBeSweptLazily predicate here.
2506 // New evacuation candidates were selected but they still have
2507 // to be swept before collection starts.
2508 if (!p->WasSwept()) {
2509 Bitmap::Clear(p);
2510 if (FLAG_gc_verbose) {
2511 PrintF("Sweeping 0x%" V8PRIxPTR " lazily abandoned.\n",
2512 reinterpret_cast<intptr_t>(p));
2513 }
ricow@chromium.orgd236f4d2010-09-01 06:52:08 +00002514 }
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00002515 p = p->next_page();
danno@chromium.org2c456792011-11-11 12:00:53 +00002516 } while (p != anchor());
ricow@chromium.orgd236f4d2010-09-01 06:52:08 +00002517 }
danno@chromium.org2c456792011-11-11 12:00:53 +00002518 first_unswept_page_ = Page::FromAddress(NULL);
erik.corry@gmail.comf2038fb2012-01-16 11:42:08 +00002519 unswept_free_bytes_ = 0;
ricow@chromium.orgd236f4d2010-09-01 06:52:08 +00002520
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00002521 // Clear the free list before a full GC---it will be rebuilt afterward.
2522 free_list_.Reset();
ricow@chromium.orgd236f4d2010-09-01 06:52:08 +00002523}
2524
2525
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00002526bool PagedSpace::ReserveSpace(int size_in_bytes) {
yangguo@chromium.orgab30bb82012-02-24 14:41:46 +00002527 ASSERT(size_in_bytes <= AreaSize());
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00002528 ASSERT(size_in_bytes == RoundSizeDownToObjectAlignment(size_in_bytes));
2529 Address current_top = allocation_info_.top;
2530 Address new_top = current_top + size_in_bytes;
2531 if (new_top <= allocation_info_.limit) return true;
fschneider@chromium.org013f3e12010-04-26 13:27:52 +00002532
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00002533 HeapObject* new_area = free_list_.Allocate(size_in_bytes);
2534 if (new_area == NULL) new_area = SlowAllocateRaw(size_in_bytes);
2535 if (new_area == NULL) return false;
fschneider@chromium.org013f3e12010-04-26 13:27:52 +00002536
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00002537 int old_linear_size = static_cast<int>(limit() - top());
2538 // Mark the old linear allocation area with a free space so it can be
2539 // skipped when scanning the heap. This also puts it back in the free list
2540 // if it is big enough.
2541 Free(top(), old_linear_size);
fschneider@chromium.org0c20e672010-01-14 15:28:53 +00002542
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00002543 SetTop(new_area->address(), new_area->address() + size_in_bytes);
fschneider@chromium.org0c20e672010-01-14 15:28:53 +00002544 return true;
2545}
2546
2547
ulan@chromium.org750145a2013-03-07 15:14:13 +00002548intptr_t PagedSpace::SizeOfObjects() {
2549 ASSERT(!heap()->IsSweepingComplete() || (unswept_free_bytes_ == 0));
2550 return Size() - unswept_free_bytes_ - (limit() - top());
2551}
2552
2553
ulan@chromium.org56c14af2012-09-20 12:51:09 +00002554// After we have booted, we have created a map which represents free space
2555// on the heap. If there was already a free list then the elements on it
2556// were created with the wrong FreeSpaceMap (normally NULL), so we need to
2557// fix them.
2558void PagedSpace::RepairFreeListsAfterBoot() {
2559 free_list_.RepairLists(heap());
2560}
2561
2562
fschneider@chromium.org0c20e672010-01-14 15:28:53 +00002563// You have to call this last, since the implementation from PagedSpace
2564// doesn't know that memory was 'promised' to large object space.
2565bool LargeObjectSpace::ReserveSpace(int bytes) {
erik.corry@gmail.combbceb572012-03-09 10:52:05 +00002566 return heap()->OldGenerationCapacityAvailable() >= bytes &&
2567 (!heap()->incremental_marking()->IsStopped() ||
2568 heap()->OldGenerationSpaceAvailable() >= bytes);
fschneider@chromium.org0c20e672010-01-14 15:28:53 +00002569}
2570
2571
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00002572bool PagedSpace::AdvanceSweeper(intptr_t bytes_to_sweep) {
mmassi@chromium.org2f0efde2013-02-06 14:12:58 +00002573 if (IsLazySweepingComplete()) return true;
kasper.lund7276f142008-07-30 08:49:36 +00002574
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00002575 intptr_t freed_bytes = 0;
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00002576 Page* p = first_unswept_page_;
2577 do {
2578 Page* next_page = p->next_page();
2579 if (ShouldBeSweptLazily(p)) {
2580 if (FLAG_gc_verbose) {
2581 PrintF("Sweeping 0x%" V8PRIxPTR " lazily advanced.\n",
2582 reinterpret_cast<intptr_t>(p));
ricow@chromium.org30ce4112010-05-31 10:38:25 +00002583 }
ulan@chromium.org2efb9002012-01-19 15:36:35 +00002584 DecreaseUnsweptFreeBytes(p);
mstarzinger@chromium.orge3b8d0f2013-02-01 09:06:41 +00002585 freed_bytes +=
2586 MarkCompactCollector::
2587 SweepConservatively<MarkCompactCollector::SWEEP_SEQUENTIALLY>(
2588 this, NULL, p);
ager@chromium.org3811b432009-10-28 14:53:37 +00002589 }
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00002590 p = next_page;
danno@chromium.org2c456792011-11-11 12:00:53 +00002591 } while (p != anchor() && freed_bytes < bytes_to_sweep);
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00002592
danno@chromium.org2c456792011-11-11 12:00:53 +00002593 if (p == anchor()) {
2594 first_unswept_page_ = Page::FromAddress(NULL);
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00002595 } else {
2596 first_unswept_page_ = p;
kasper.lund7276f142008-07-30 08:49:36 +00002597 }
2598
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00002599 heap()->FreeQueuedChunks();
2600
mmassi@chromium.org2f0efde2013-02-06 14:12:58 +00002601 return IsLazySweepingComplete();
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00002602}
2603
2604
2605void PagedSpace::EvictEvacuationCandidatesFromFreeLists() {
2606 if (allocation_info_.top >= allocation_info_.limit) return;
2607
ricow@chromium.org27bf2882011-11-17 08:34:43 +00002608 if (Page::FromAllocationTop(allocation_info_.top)->IsEvacuationCandidate()) {
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00002609 // Create filler object to keep page iterable if it was iterable.
2610 int remaining =
2611 static_cast<int>(allocation_info_.limit - allocation_info_.top);
2612 heap()->CreateFillerObjectAt(allocation_info_.top, remaining);
2613
2614 allocation_info_.top = NULL;
2615 allocation_info_.limit = NULL;
2616 }
2617}
2618
2619
mstarzinger@chromium.orge3b8d0f2013-02-01 09:06:41 +00002620bool PagedSpace::EnsureSweeperProgress(intptr_t size_in_bytes) {
2621 MarkCompactCollector* collector = heap()->mark_compact_collector();
2622 if (collector->AreSweeperThreadsActivated()) {
ulan@chromium.org750145a2013-03-07 15:14:13 +00002623 if (collector->IsConcurrentSweepingInProgress()) {
mmassi@chromium.org2f0efde2013-02-06 14:12:58 +00002624 if (collector->StealMemoryFromSweeperThreads(this) < size_in_bytes) {
ulan@chromium.org750145a2013-03-07 15:14:13 +00002625 if (!collector->sequential_sweeping()) {
2626 collector->WaitUntilSweepingCompleted();
ulan@chromium.org750145a2013-03-07 15:14:13 +00002627 return true;
2628 }
mmassi@chromium.org2f0efde2013-02-06 14:12:58 +00002629 }
2630 return false;
mstarzinger@chromium.orge3b8d0f2013-02-01 09:06:41 +00002631 }
mmassi@chromium.org2f0efde2013-02-06 14:12:58 +00002632 return true;
mstarzinger@chromium.orge3b8d0f2013-02-01 09:06:41 +00002633 } else {
2634 return AdvanceSweeper(size_in_bytes);
2635 }
2636}
2637
2638
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00002639HeapObject* PagedSpace::SlowAllocateRaw(int size_in_bytes) {
2640 // Allocation in this space has failed.
2641
svenpanne@chromium.org83130cf2012-11-30 10:13:25 +00002642 // If there are unswept pages advance lazy sweeper a bounded number of times
2643 // until we find a size_in_bytes contiguous piece of memory
2644 const int kMaxSweepingTries = 5;
2645 bool sweeping_complete = false;
2646
2647 for (int i = 0; i < kMaxSweepingTries && !sweeping_complete; i++) {
mstarzinger@chromium.orge3b8d0f2013-02-01 09:06:41 +00002648 sweeping_complete = EnsureSweeperProgress(size_in_bytes);
svenpanne@chromium.orgecb9dd62011-12-01 08:22:35 +00002649
2650 // Retry the free list allocation.
2651 HeapObject* object = free_list_.Allocate(size_in_bytes);
2652 if (object != NULL) return object;
2653 }
2654
kasperl@chromium.org9bbf9682008-10-30 11:53:07 +00002655 // Free list allocation failed and there is no next page. Fail if we have
2656 // hit the old generation size limit that should cause a garbage
2657 // collection.
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00002658 if (!heap()->always_allocate() &&
2659 heap()->OldGenerationAllocationLimitReached()) {
kasperl@chromium.org9bbf9682008-10-30 11:53:07 +00002660 return NULL;
2661 }
2662
svenpanne@chromium.orgecb9dd62011-12-01 08:22:35 +00002663 // Try to expand the space and allocate in the new next page.
2664 if (Expand()) {
2665 return free_list_.Allocate(size_in_bytes);
2666 }
2667
2668 // Last ditch, sweep all the remaining pages to try to find space. This may
2669 // cause a pause.
mmassi@chromium.org2f0efde2013-02-06 14:12:58 +00002670 if (!IsLazySweepingComplete()) {
mstarzinger@chromium.orge3b8d0f2013-02-01 09:06:41 +00002671 EnsureSweeperProgress(kMaxInt);
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00002672
2673 // Retry the free list allocation.
2674 HeapObject* object = free_list_.Allocate(size_in_bytes);
2675 if (object != NULL) return object;
kasper.lund7276f142008-07-30 08:49:36 +00002676 }
2677
2678 // Finally, fail.
2679 return NULL;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002680}
2681
2682
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002683#ifdef DEBUG
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002684void PagedSpace::ReportCodeStatistics() {
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00002685 Isolate* isolate = Isolate::Current();
2686 CommentStatistic* comments_statistics =
2687 isolate->paged_space_comments_statistics();
mstarzinger@chromium.org1510d582013-06-28 14:00:48 +00002688 ReportCodeKindStatistics(isolate->code_kind_statistics());
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002689 PrintF("Code comment statistics (\" [ comment-txt : size/ "
2690 "count (average)\"):\n");
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00002691 for (int i = 0; i <= CommentStatistic::kMaxComments; i++) {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002692 const CommentStatistic& cs = comments_statistics[i];
2693 if (cs.size > 0) {
2694 PrintF(" %-30s: %10d/%6d (%d)\n", cs.comment, cs.size, cs.count,
2695 cs.size/cs.count);
2696 }
2697 }
2698 PrintF("\n");
2699}
2700
2701
2702void PagedSpace::ResetCodeStatistics() {
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00002703 Isolate* isolate = Isolate::Current();
2704 CommentStatistic* comments_statistics =
2705 isolate->paged_space_comments_statistics();
mstarzinger@chromium.org1510d582013-06-28 14:00:48 +00002706 ClearCodeKindStatistics(isolate->code_kind_statistics());
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00002707 for (int i = 0; i < CommentStatistic::kMaxComments; i++) {
2708 comments_statistics[i].Clear();
2709 }
2710 comments_statistics[CommentStatistic::kMaxComments].comment = "Unknown";
2711 comments_statistics[CommentStatistic::kMaxComments].size = 0;
2712 comments_statistics[CommentStatistic::kMaxComments].count = 0;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002713}
2714
2715
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00002716// Adds comment to 'comment_statistics' table. Performance OK as long as
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002717// 'kMaxComments' is small
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00002718static void EnterComment(Isolate* isolate, const char* comment, int delta) {
2719 CommentStatistic* comments_statistics =
2720 isolate->paged_space_comments_statistics();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002721 // Do not count empty comments
2722 if (delta <= 0) return;
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00002723 CommentStatistic* cs = &comments_statistics[CommentStatistic::kMaxComments];
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002724 // Search for a free or matching entry in 'comments_statistics': 'cs'
2725 // points to result.
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00002726 for (int i = 0; i < CommentStatistic::kMaxComments; i++) {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002727 if (comments_statistics[i].comment == NULL) {
2728 cs = &comments_statistics[i];
2729 cs->comment = comment;
2730 break;
2731 } else if (strcmp(comments_statistics[i].comment, comment) == 0) {
2732 cs = &comments_statistics[i];
2733 break;
2734 }
2735 }
2736 // Update entry for 'comment'
2737 cs->size += delta;
2738 cs->count += 1;
2739}
2740
2741
2742// Call for each nested comment start (start marked with '[ xxx', end marked
2743// with ']'. RelocIterator 'it' must point to a comment reloc info.
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00002744static void CollectCommentStatistics(Isolate* isolate, RelocIterator* it) {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002745 ASSERT(!it->done());
ager@chromium.org236ad962008-09-25 09:45:57 +00002746 ASSERT(it->rinfo()->rmode() == RelocInfo::COMMENT);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002747 const char* tmp = reinterpret_cast<const char*>(it->rinfo()->data());
2748 if (tmp[0] != '[') {
2749 // Not a nested comment; skip
2750 return;
2751 }
2752
2753 // Search for end of nested comment or a new nested comment
2754 const char* const comment_txt =
2755 reinterpret_cast<const char*>(it->rinfo()->data());
2756 const byte* prev_pc = it->rinfo()->pc();
2757 int flat_delta = 0;
2758 it->next();
2759 while (true) {
2760 // All nested comments must be terminated properly, and therefore exit
2761 // from loop.
2762 ASSERT(!it->done());
ager@chromium.org236ad962008-09-25 09:45:57 +00002763 if (it->rinfo()->rmode() == RelocInfo::COMMENT) {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002764 const char* const txt =
2765 reinterpret_cast<const char*>(it->rinfo()->data());
ager@chromium.orgc4c92722009-11-18 14:12:51 +00002766 flat_delta += static_cast<int>(it->rinfo()->pc() - prev_pc);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002767 if (txt[0] == ']') break; // End of nested comment
2768 // A new comment
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00002769 CollectCommentStatistics(isolate, it);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002770 // Skip code that was covered with previous comment
2771 prev_pc = it->rinfo()->pc();
2772 }
2773 it->next();
2774 }
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00002775 EnterComment(isolate, comment_txt, flat_delta);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002776}
2777
2778
2779// Collects code size statistics:
2780// - by code kind
2781// - by code comment
2782void PagedSpace::CollectCodeStatistics() {
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00002783 Isolate* isolate = heap()->isolate();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002784 HeapObjectIterator obj_it(this);
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00002785 for (HeapObject* obj = obj_it.Next(); obj != NULL; obj = obj_it.Next()) {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002786 if (obj->IsCode()) {
2787 Code* code = Code::cast(obj);
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00002788 isolate->code_kind_statistics()[code->kind()] += code->Size();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002789 RelocIterator it(code);
2790 int delta = 0;
2791 const byte* prev_pc = code->instruction_start();
2792 while (!it.done()) {
ager@chromium.org236ad962008-09-25 09:45:57 +00002793 if (it.rinfo()->rmode() == RelocInfo::COMMENT) {
ager@chromium.orgc4c92722009-11-18 14:12:51 +00002794 delta += static_cast<int>(it.rinfo()->pc() - prev_pc);
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00002795 CollectCommentStatistics(isolate, &it);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002796 prev_pc = it.rinfo()->pc();
2797 }
2798 it.next();
2799 }
2800
2801 ASSERT(code->instruction_start() <= prev_pc &&
erik.corry@gmail.com4a2e25e2010-07-07 12:22:46 +00002802 prev_pc <= code->instruction_end());
2803 delta += static_cast<int>(code->instruction_end() - prev_pc);
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00002804 EnterComment(isolate, "NoComment", delta);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002805 }
2806 }
2807}
2808
2809
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00002810void PagedSpace::ReportStatistics() {
kmillikin@chromium.orgf05f2912010-09-30 10:07:24 +00002811 int pct = static_cast<int>(Available() * 100 / Capacity());
2812 PrintF(" capacity: %" V8_PTR_PREFIX "d"
2813 ", waste: %" V8_PTR_PREFIX "d"
2814 ", available: %" V8_PTR_PREFIX "d, %%%d\n",
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002815 Capacity(), Waste(), Available(), pct);
2816
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00002817 if (was_swept_conservatively_) return;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002818 ClearHistograms();
2819 HeapObjectIterator obj_it(this);
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00002820 for (HeapObject* obj = obj_it.Next(); obj != NULL; obj = obj_it.Next())
sgjesse@chromium.orgb302e562010-02-03 11:26:59 +00002821 CollectHistogramInfo(obj);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002822 ReportHistogram(true);
2823}
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002824#endif
2825
2826// -----------------------------------------------------------------------------
kasperl@chromium.orgdefbd102009-07-13 14:04:26 +00002827// FixedSpace implementation
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002828
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00002829void FixedSpace::PrepareForMarkCompact() {
fschneider@chromium.org013f3e12010-04-26 13:27:52 +00002830 // Call prepare of the super class.
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00002831 PagedSpace::PrepareForMarkCompact();
fschneider@chromium.org013f3e12010-04-26 13:27:52 +00002832
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00002833 // During a non-compacting collection, everything below the linear
2834 // allocation pointer except wasted top-of-page blocks is considered
2835 // allocated and we will rediscover available bytes during the
2836 // collection.
2837 accounting_stats_.AllocateBytes(free_list_.available());
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002838
kasper.lund7276f142008-07-30 08:49:36 +00002839 // Clear the free list before a full GC---it will be rebuilt afterward.
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002840 free_list_.Reset();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002841}
2842
2843
kasperl@chromium.orgdefbd102009-07-13 14:04:26 +00002844// -----------------------------------------------------------------------------
2845// MapSpace implementation
svenpanne@chromium.orgc859c4f2012-10-15 11:51:39 +00002846// TODO(mvstanton): this is weird...the compiler can't make a vtable unless
2847// there is at least one non-inlined virtual function. I would prefer to hide
2848// the VerifyObject definition behind VERIFY_HEAP.
kasperl@chromium.orgdefbd102009-07-13 14:04:26 +00002849
kasperl@chromium.orgdefbd102009-07-13 14:04:26 +00002850void MapSpace::VerifyObject(HeapObject* object) {
2851 // The object should be a map or a free-list node.
svenpanne@chromium.orgc859c4f2012-10-15 11:51:39 +00002852 CHECK(object->IsMap() || object->IsFreeSpace());
kasperl@chromium.orgdefbd102009-07-13 14:04:26 +00002853}
kasperl@chromium.orgdefbd102009-07-13 14:04:26 +00002854
2855
2856// -----------------------------------------------------------------------------
danno@chromium.org41728482013-06-12 22:31:22 +00002857// CellSpace and PropertyCellSpace implementation
svenpanne@chromium.orgc859c4f2012-10-15 11:51:39 +00002858// TODO(mvstanton): this is weird...the compiler can't make a vtable unless
2859// there is at least one non-inlined virtual function. I would prefer to hide
2860// the VerifyObject definition behind VERIFY_HEAP.
kasperl@chromium.orgdefbd102009-07-13 14:04:26 +00002861
kasperl@chromium.orgdefbd102009-07-13 14:04:26 +00002862void CellSpace::VerifyObject(HeapObject* object) {
2863 // The object should be a global object property cell or a free-list node.
danno@chromium.org41728482013-06-12 22:31:22 +00002864 CHECK(object->IsCell() ||
2865 object->map() == heap()->two_pointer_filler_map());
2866}
2867
2868
2869void PropertyCellSpace::VerifyObject(HeapObject* object) {
2870 // The object should be a global object property cell or a free-list node.
dslomov@chromium.orgb752d402013-06-18 11:54:54 +00002871 CHECK(object->IsPropertyCell() ||
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00002872 object->map() == heap()->two_pointer_filler_map());
kasperl@chromium.orgdefbd102009-07-13 14:04:26 +00002873}
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002874
2875
2876// -----------------------------------------------------------------------------
2877// LargeObjectIterator
2878
2879LargeObjectIterator::LargeObjectIterator(LargeObjectSpace* space) {
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00002880 current_ = space->first_page_;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002881 size_func_ = NULL;
2882}
2883
2884
2885LargeObjectIterator::LargeObjectIterator(LargeObjectSpace* space,
2886 HeapObjectCallback size_func) {
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00002887 current_ = space->first_page_;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002888 size_func_ = size_func;
2889}
2890
2891
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00002892HeapObject* LargeObjectIterator::Next() {
sgjesse@chromium.orgb302e562010-02-03 11:26:59 +00002893 if (current_ == NULL) return NULL;
2894
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002895 HeapObject* object = current_->GetObject();
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00002896 current_ = current_->next_page();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002897 return object;
2898}
2899
2900
2901// -----------------------------------------------------------------------------
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002902// LargeObjectSpace
jkummerow@chromium.org531dfe82012-03-20 13:01:16 +00002903static bool ComparePointers(void* key1, void* key2) {
2904 return key1 == key2;
2905}
2906
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002907
erik.corry@gmail.com394dbcf2011-10-27 07:38:48 +00002908LargeObjectSpace::LargeObjectSpace(Heap* heap,
2909 intptr_t max_capacity,
2910 AllocationSpace id)
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00002911 : Space(heap, id, NOT_EXECUTABLE), // Managed on a per-allocation basis
erik.corry@gmail.com394dbcf2011-10-27 07:38:48 +00002912 max_capacity_(max_capacity),
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00002913 first_page_(NULL),
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002914 size_(0),
erik.corry@gmail.com4a6c3272010-11-18 12:04:40 +00002915 page_count_(0),
jkummerow@chromium.org531dfe82012-03-20 13:01:16 +00002916 objects_size_(0),
2917 chunk_map_(ComparePointers, 1024) {}
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002918
2919
erik.corry@gmail.comf2038fb2012-01-16 11:42:08 +00002920bool LargeObjectSpace::SetUp() {
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00002921 first_page_ = NULL;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002922 size_ = 0;
2923 page_count_ = 0;
erik.corry@gmail.com4a6c3272010-11-18 12:04:40 +00002924 objects_size_ = 0;
jkummerow@chromium.org531dfe82012-03-20 13:01:16 +00002925 chunk_map_.Clear();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002926 return true;
2927}
2928
2929
2930void LargeObjectSpace::TearDown() {
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00002931 while (first_page_ != NULL) {
2932 LargePage* page = first_page_;
2933 first_page_ = first_page_->next_page();
2934 LOG(heap()->isolate(), DeleteEvent("LargeObjectChunk", page->address()));
2935
2936 ObjectSpace space = static_cast<ObjectSpace>(1 << identity());
2937 heap()->isolate()->memory_allocator()->PerformAllocationCallback(
2938 space, kAllocationActionFree, page->size());
2939 heap()->isolate()->memory_allocator()->Free(page);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002940 }
erik.corry@gmail.comf2038fb2012-01-16 11:42:08 +00002941 SetUp();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002942}
2943
2944
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00002945MaybeObject* LargeObjectSpace::AllocateRaw(int object_size,
2946 Executability executable) {
kasperl@chromium.org9bbf9682008-10-30 11:53:07 +00002947 // Check if we want to force a GC before growing the old space further.
2948 // If so, fail the allocation.
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00002949 if (!heap()->always_allocate() &&
2950 heap()->OldGenerationAllocationLimitReached()) {
whesse@chromium.org4a5224e2010-10-20 12:37:07 +00002951 return Failure::RetryAfterGC(identity());
kasperl@chromium.org9bbf9682008-10-30 11:53:07 +00002952 }
2953
erik.corry@gmail.com394dbcf2011-10-27 07:38:48 +00002954 if (Size() + object_size > max_capacity_) {
2955 return Failure::RetryAfterGC(identity());
2956 }
2957
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00002958 LargePage* page = heap()->isolate()->memory_allocator()->
fschneider@chromium.org7d10be52012-04-10 12:30:14 +00002959 AllocateLargePage(object_size, this, executable);
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00002960 if (page == NULL) return Failure::RetryAfterGC(identity());
yangguo@chromium.orgab30bb82012-02-24 14:41:46 +00002961 ASSERT(page->area_size() >= object_size);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002962
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00002963 size_ += static_cast<int>(page->size());
2964 objects_size_ += object_size;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002965 page_count_++;
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00002966 page->set_next_page(first_page_);
2967 first_page_ = page;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002968
jkummerow@chromium.org531dfe82012-03-20 13:01:16 +00002969 // Register all MemoryChunk::kAlignment-aligned chunks covered by
2970 // this large page in the chunk map.
2971 uintptr_t base = reinterpret_cast<uintptr_t>(page) / MemoryChunk::kAlignment;
2972 uintptr_t limit = base + (page->size() - 1) / MemoryChunk::kAlignment;
2973 for (uintptr_t key = base; key <= limit; key++) {
2974 HashMap::Entry* entry = chunk_map_.Lookup(reinterpret_cast<void*>(key),
2975 static_cast<uint32_t>(key),
2976 true);
2977 ASSERT(entry != NULL);
2978 entry->value = page;
2979 }
2980
mstarzinger@chromium.org1b3afd12011-11-29 14:28:56 +00002981 HeapObject* object = page->GetObject();
2982
svenpanne@chromium.orgc859c4f2012-10-15 11:51:39 +00002983 if (Heap::ShouldZapGarbage()) {
2984 // Make the object consistent so the heap can be verified in OldSpaceStep.
2985 // We only need to do this in debug builds or if verify_heap is on.
2986 reinterpret_cast<Object**>(object->address())[0] =
2987 heap()->fixed_array_map();
2988 reinterpret_cast<Object**>(object->address())[1] = Smi::FromInt(0);
2989 }
mstarzinger@chromium.org1b3afd12011-11-29 14:28:56 +00002990
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00002991 heap()->incremental_marking()->OldSpaceStep(object_size);
mstarzinger@chromium.org1b3afd12011-11-29 14:28:56 +00002992 return object;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002993}
2994
2995
danno@chromium.org72204d52012-10-31 10:02:10 +00002996size_t LargeObjectSpace::CommittedPhysicalMemory() {
2997 if (!VirtualMemory::HasLazyCommits()) return CommittedMemory();
2998 size_t size = 0;
2999 LargePage* current = first_page_;
3000 while (current != NULL) {
3001 size += current->CommittedPhysicalMemory();
3002 current = current->next_page();
3003 }
3004 return size;
3005}
3006
3007
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00003008// GC support
lrn@chromium.org303ada72010-10-27 09:33:13 +00003009MaybeObject* LargeObjectSpace::FindObject(Address a) {
jkummerow@chromium.org531dfe82012-03-20 13:01:16 +00003010 LargePage* page = FindPage(a);
3011 if (page != NULL) {
3012 return page->GetObject();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00003013 }
3014 return Failure::Exception();
3015}
3016
ricow@chromium.orgd236f4d2010-09-01 06:52:08 +00003017
jkummerow@chromium.org531dfe82012-03-20 13:01:16 +00003018LargePage* LargeObjectSpace::FindPage(Address a) {
3019 uintptr_t key = reinterpret_cast<uintptr_t>(a) / MemoryChunk::kAlignment;
3020 HashMap::Entry* e = chunk_map_.Lookup(reinterpret_cast<void*>(key),
3021 static_cast<uint32_t>(key),
3022 false);
3023 if (e != NULL) {
3024 ASSERT(e->value != NULL);
3025 LargePage* page = reinterpret_cast<LargePage*>(e->value);
3026 ASSERT(page->is_valid());
3027 if (page->Contains(a)) {
3028 return page;
ricow@chromium.orgd236f4d2010-09-01 06:52:08 +00003029 }
3030 }
3031 return NULL;
3032}
3033
3034
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00003035void LargeObjectSpace::FreeUnmarkedObjects() {
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00003036 LargePage* previous = NULL;
3037 LargePage* current = first_page_;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00003038 while (current != NULL) {
3039 HeapObject* object = current->GetObject();
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00003040 // Can this large page contain pointers to non-trivial objects. No other
3041 // pointer object is this big.
3042 bool is_pointer_object = object->IsFixedArray();
3043 MarkBit mark_bit = Marking::MarkBitFrom(object);
3044 if (mark_bit.Get()) {
3045 mark_bit.Clear();
yangguo@chromium.orgfb377212012-11-16 14:43:43 +00003046 Page::FromAddress(object->address())->ResetProgressBar();
3047 Page::FromAddress(object->address())->ResetLiveBytes();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00003048 previous = current;
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00003049 current = current->next_page();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00003050 } else {
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00003051 LargePage* page = current;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00003052 // Cut the chunk out from the chunk list.
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00003053 current = current->next_page();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00003054 if (previous == NULL) {
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00003055 first_page_ = current;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00003056 } else {
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00003057 previous->set_next_page(current);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00003058 }
3059
3060 // Free the chunk.
kmillikin@chromium.orgc36ce6e2011-04-04 08:25:31 +00003061 heap()->mark_compact_collector()->ReportDeleteIfNeeded(
3062 object, heap()->isolate());
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00003063 size_ -= static_cast<int>(page->size());
erik.corry@gmail.com4a6c3272010-11-18 12:04:40 +00003064 objects_size_ -= object->Size();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00003065 page_count_--;
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00003066
jkummerow@chromium.org531dfe82012-03-20 13:01:16 +00003067 // Remove entries belonging to this page.
3068 // Use variable alignment to help pass length check (<= 80 characters)
3069 // of single line in tools/presubmit.py.
3070 const intptr_t alignment = MemoryChunk::kAlignment;
3071 uintptr_t base = reinterpret_cast<uintptr_t>(page)/alignment;
3072 uintptr_t limit = base + (page->size()-1)/alignment;
3073 for (uintptr_t key = base; key <= limit; key++) {
3074 chunk_map_.Remove(reinterpret_cast<void*>(key),
3075 static_cast<uint32_t>(key));
3076 }
3077
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00003078 if (is_pointer_object) {
3079 heap()->QueueMemoryChunkForFree(page);
3080 } else {
3081 heap()->isolate()->memory_allocator()->Free(page);
3082 }
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00003083 }
3084 }
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00003085 heap()->FreeQueuedChunks();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00003086}
3087
3088
3089bool LargeObjectSpace::Contains(HeapObject* object) {
3090 Address address = object->address();
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00003091 MemoryChunk* chunk = MemoryChunk::FromAddress(address);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00003092
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00003093 bool owned = (chunk->owner() == this);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00003094
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00003095 SLOW_ASSERT(!owned || !FindObject(address)->IsFailure());
3096
3097 return owned;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00003098}
3099
3100
svenpanne@chromium.orgc859c4f2012-10-15 11:51:39 +00003101#ifdef VERIFY_HEAP
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00003102// We do not assume that the large object iterator works, because it depends
3103// on the invariants we are checking during verification.
3104void LargeObjectSpace::Verify() {
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00003105 for (LargePage* chunk = first_page_;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00003106 chunk != NULL;
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00003107 chunk = chunk->next_page()) {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00003108 // Each chunk contains an object that starts at the large object page's
3109 // object area start.
3110 HeapObject* object = chunk->GetObject();
3111 Page* page = Page::FromAddress(object->address());
svenpanne@chromium.orgc859c4f2012-10-15 11:51:39 +00003112 CHECK(object->address() == page->area_start());
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00003113
3114 // The first word should be a map, and we expect all map pointers to be
3115 // in map space.
3116 Map* map = object->map();
svenpanne@chromium.orgc859c4f2012-10-15 11:51:39 +00003117 CHECK(map->IsMap());
3118 CHECK(heap()->map_space()->Contains(map));
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00003119
ager@chromium.orga1645e22009-09-09 19:27:10 +00003120 // We have only code, sequential strings, external strings
3121 // (sequential strings that have been morphed into external
3122 // strings), fixed arrays, and byte arrays in large object space.
svenpanne@chromium.orgc859c4f2012-10-15 11:51:39 +00003123 CHECK(object->IsCode() || object->IsSeqString() ||
ager@chromium.orga1645e22009-09-09 19:27:10 +00003124 object->IsExternalString() || object->IsFixedArray() ||
svenpanne@chromium.org6d786c92011-06-15 10:58:27 +00003125 object->IsFixedDoubleArray() || object->IsByteArray());
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00003126
3127 // The object itself should look OK.
mads.s.ager@gmail.com9a4089a2008-09-01 08:55:01 +00003128 object->Verify();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00003129
3130 // Byte arrays and strings don't have interior pointers.
3131 if (object->IsCode()) {
3132 VerifyPointersVisitor code_visitor;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00003133 object->IterateBody(map->instance_type(),
3134 object->Size(),
3135 &code_visitor);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00003136 } else if (object->IsFixedArray()) {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00003137 FixedArray* array = FixedArray::cast(object);
3138 for (int j = 0; j < array->length(); j++) {
3139 Object* element = array->get(j);
3140 if (element->IsHeapObject()) {
3141 HeapObject* element_object = HeapObject::cast(element);
svenpanne@chromium.orgc859c4f2012-10-15 11:51:39 +00003142 CHECK(heap()->Contains(element_object));
3143 CHECK(element_object->map()->IsMap());
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00003144 }
3145 }
3146 }
3147 }
3148}
svenpanne@chromium.orgc859c4f2012-10-15 11:51:39 +00003149#endif
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00003150
3151
svenpanne@chromium.orgc859c4f2012-10-15 11:51:39 +00003152#ifdef DEBUG
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00003153void LargeObjectSpace::Print() {
3154 LargeObjectIterator it(this);
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00003155 for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
sgjesse@chromium.orgb302e562010-02-03 11:26:59 +00003156 obj->Print();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00003157 }
3158}
3159
3160
3161void LargeObjectSpace::ReportStatistics() {
kmillikin@chromium.orgf05f2912010-09-30 10:07:24 +00003162 PrintF(" size: %" V8_PTR_PREFIX "d\n", size_);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00003163 int num_objects = 0;
3164 ClearHistograms();
3165 LargeObjectIterator it(this);
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00003166 for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00003167 num_objects++;
sgjesse@chromium.orgb302e562010-02-03 11:26:59 +00003168 CollectHistogramInfo(obj);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00003169 }
3170
erik.corry@gmail.com4a6c3272010-11-18 12:04:40 +00003171 PrintF(" number of objects %d, "
3172 "size of objects %" V8_PTR_PREFIX "d\n", num_objects, objects_size_);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00003173 if (num_objects > 0) ReportHistogram(false);
3174}
3175
3176
3177void LargeObjectSpace::CollectCodeStatistics() {
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00003178 Isolate* isolate = heap()->isolate();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00003179 LargeObjectIterator obj_it(this);
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00003180 for (HeapObject* obj = obj_it.Next(); obj != NULL; obj = obj_it.Next()) {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00003181 if (obj->IsCode()) {
3182 Code* code = Code::cast(obj);
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00003183 isolate->code_kind_statistics()[code->kind()] += code->Size();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00003184 }
3185 }
3186}
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00003187
3188
3189void Page::Print() {
3190 // Make a best-effort to print the objects in the page.
3191 PrintF("Page@%p in %s\n",
3192 this->address(),
3193 AllocationSpaceName(this->owner()->identity()));
3194 printf(" --------------------------------------\n");
3195 HeapObjectIterator objects(this, heap()->GcSafeSizeOfOldObjectFunction());
3196 unsigned mark_size = 0;
3197 for (HeapObject* object = objects.Next();
3198 object != NULL;
3199 object = objects.Next()) {
3200 bool is_marked = Marking::MarkBitFrom(object).Get();
3201 PrintF(" %c ", (is_marked ? '!' : ' ')); // Indent a little.
3202 if (is_marked) {
3203 mark_size += heap()->GcSafeSizeOfOldObjectFunction()(object);
3204 }
3205 object->ShortPrint();
3206 PrintF("\n");
3207 }
3208 printf(" --------------------------------------\n");
3209 printf(" Marked: %x, LiveCount: %x\n", mark_size, LiveBytes());
3210}
3211
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00003212#endif // DEBUG
3213
3214} } // namespace v8::internal