blob: a782ba98cafc8fc48186f7e1b4657c264bec2d09 [file] [log] [blame]
ager@chromium.orgea91cc52011-05-23 06:06:11 +00001// Copyright 2011 the V8 project authors. All rights reserved.
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002// Redistribution and use in source and binary forms, with or without
3// modification, are permitted provided that the following conditions are
4// met:
5//
6// * Redistributions of source code must retain the above copyright
7// notice, this list of conditions and the following disclaimer.
8// * Redistributions in binary form must reproduce the above
9// copyright notice, this list of conditions and the following
10// disclaimer in the documentation and/or other materials provided
11// with the distribution.
12// * Neither the name of Google Inc. nor the names of its
13// contributors may be used to endorse or promote products derived
14// from this software without specific prior written permission.
15//
16// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
28#include "v8.h"
29
ager@chromium.org0ee099b2011-01-25 14:06:47 +000030#include "liveobjectlist-inl.h"
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +000031#include "macro-assembler.h"
32#include "mark-compact.h"
33#include "platform.h"
34
kasperl@chromium.org71affb52009-05-26 05:44:31 +000035namespace v8 {
36namespace internal {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +000037
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +000038// For contiguous spaces, top should be in the space (or at the end) and limit
39// should be the end of the space.
40#define ASSERT_SEMISPACE_ALLOCATION_INFO(info, space) \
kasperl@chromium.orgdefbd102009-07-13 14:04:26 +000041 ASSERT((space).low() <= (info).top \
42 && (info).top <= (space).high() \
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +000043 && (info).limit == (space).high())
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +000044
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +000045// ----------------------------------------------------------------------------
46// HeapObjectIterator
47
48HeapObjectIterator::HeapObjectIterator(PagedSpace* space) {
49 Initialize(space->bottom(), space->top(), NULL);
50}
51
52
53HeapObjectIterator::HeapObjectIterator(PagedSpace* space,
54 HeapObjectCallback size_func) {
55 Initialize(space->bottom(), space->top(), size_func);
56}
57
58
59HeapObjectIterator::HeapObjectIterator(PagedSpace* space, Address start) {
60 Initialize(start, space->top(), NULL);
61}
62
63
64HeapObjectIterator::HeapObjectIterator(PagedSpace* space, Address start,
65 HeapObjectCallback size_func) {
66 Initialize(start, space->top(), size_func);
67}
68
69
ricow@chromium.orgd236f4d2010-09-01 06:52:08 +000070HeapObjectIterator::HeapObjectIterator(Page* page,
71 HeapObjectCallback size_func) {
72 Initialize(page->ObjectAreaStart(), page->AllocationTop(), size_func);
73}
74
75
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +000076void HeapObjectIterator::Initialize(Address cur, Address end,
77 HeapObjectCallback size_f) {
78 cur_addr_ = cur;
79 end_addr_ = end;
80 end_page_ = Page::FromAllocationTop(end);
81 size_func_ = size_f;
82 Page* p = Page::FromAllocationTop(cur_addr_);
83 cur_limit_ = (p == end_page_) ? end_addr_ : p->AllocationTop();
84
85#ifdef DEBUG
86 Verify();
87#endif
88}
89
90
sgjesse@chromium.orgb302e562010-02-03 11:26:59 +000091HeapObject* HeapObjectIterator::FromNextPage() {
92 if (cur_addr_ == end_addr_) return NULL;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +000093
94 Page* cur_page = Page::FromAllocationTop(cur_addr_);
95 cur_page = cur_page->next_page();
96 ASSERT(cur_page->is_valid());
97
98 cur_addr_ = cur_page->ObjectAreaStart();
99 cur_limit_ = (cur_page == end_page_) ? end_addr_ : cur_page->AllocationTop();
100
sgjesse@chromium.orgb302e562010-02-03 11:26:59 +0000101 if (cur_addr_ == end_addr_) return NULL;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000102 ASSERT(cur_addr_ < cur_limit_);
103#ifdef DEBUG
104 Verify();
105#endif
sgjesse@chromium.orgb302e562010-02-03 11:26:59 +0000106 return FromCurrentPage();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000107}
108
109
110#ifdef DEBUG
111void HeapObjectIterator::Verify() {
112 Page* p = Page::FromAllocationTop(cur_addr_);
113 ASSERT(p == Page::FromAllocationTop(cur_limit_));
114 ASSERT(p->Offset(cur_addr_) <= p->Offset(cur_limit_));
115}
116#endif
117
118
119// -----------------------------------------------------------------------------
120// PageIterator
121
kasperl@chromium.orgb3284ad2009-05-18 06:12:45 +0000122PageIterator::PageIterator(PagedSpace* space, Mode mode) : space_(space) {
123 prev_page_ = NULL;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000124 switch (mode) {
125 case PAGES_IN_USE:
kasperl@chromium.orgb3284ad2009-05-18 06:12:45 +0000126 stop_page_ = space->AllocationTopPage();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000127 break;
128 case PAGES_USED_BY_MC:
kasperl@chromium.orgb3284ad2009-05-18 06:12:45 +0000129 stop_page_ = space->MCRelocationTopPage();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000130 break;
131 case ALL_PAGES:
kasperl@chromium.orgb3284ad2009-05-18 06:12:45 +0000132#ifdef DEBUG
133 // Verify that the cached last page in the space is actually the
134 // last page.
135 for (Page* p = space->first_page_; p->is_valid(); p = p->next_page()) {
136 if (!p->next_page()->is_valid()) {
137 ASSERT(space->last_page_ == p);
138 }
139 }
140#endif
141 stop_page_ = space->last_page_;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000142 break;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000143 }
144}
145
146
147// -----------------------------------------------------------------------------
sgjesse@chromium.orgc5145742009-10-07 09:00:33 +0000148// CodeRange
149
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +0000150
kmillikin@chromium.org7c2628c2011-08-10 11:27:35 +0000151CodeRange::CodeRange(Isolate* isolate)
152 : isolate_(isolate),
153 code_range_(NULL),
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +0000154 free_list_(0),
155 allocation_list_(0),
kmillikin@chromium.org7c2628c2011-08-10 11:27:35 +0000156 current_allocation_block_index_(0) {
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +0000157}
sgjesse@chromium.orgc5145742009-10-07 09:00:33 +0000158
159
160bool CodeRange::Setup(const size_t requested) {
161 ASSERT(code_range_ == NULL);
162
163 code_range_ = new VirtualMemory(requested);
164 CHECK(code_range_ != NULL);
165 if (!code_range_->IsReserved()) {
166 delete code_range_;
167 code_range_ = NULL;
168 return false;
169 }
170
171 // We are sure that we have mapped a block of requested addresses.
172 ASSERT(code_range_->size() == requested);
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +0000173 LOG(isolate_, NewEvent("CodeRange", code_range_->address(), requested));
sgjesse@chromium.orgc5145742009-10-07 09:00:33 +0000174 allocation_list_.Add(FreeBlock(code_range_->address(), code_range_->size()));
175 current_allocation_block_index_ = 0;
176 return true;
177}
178
179
180int CodeRange::CompareFreeBlockAddress(const FreeBlock* left,
181 const FreeBlock* right) {
182 // The entire point of CodeRange is that the difference between two
183 // addresses in the range can be represented as a signed 32-bit int,
184 // so the cast is semantically correct.
185 return static_cast<int>(left->start - right->start);
186}
187
188
189void CodeRange::GetNextAllocationBlock(size_t requested) {
190 for (current_allocation_block_index_++;
191 current_allocation_block_index_ < allocation_list_.length();
192 current_allocation_block_index_++) {
193 if (requested <= allocation_list_[current_allocation_block_index_].size) {
194 return; // Found a large enough allocation block.
195 }
196 }
197
198 // Sort and merge the free blocks on the free list and the allocation list.
199 free_list_.AddAll(allocation_list_);
200 allocation_list_.Clear();
201 free_list_.Sort(&CompareFreeBlockAddress);
202 for (int i = 0; i < free_list_.length();) {
203 FreeBlock merged = free_list_[i];
204 i++;
205 // Add adjacent free blocks to the current merged block.
206 while (i < free_list_.length() &&
207 free_list_[i].start == merged.start + merged.size) {
208 merged.size += free_list_[i].size;
209 i++;
210 }
211 if (merged.size > 0) {
212 allocation_list_.Add(merged);
213 }
214 }
215 free_list_.Clear();
216
217 for (current_allocation_block_index_ = 0;
218 current_allocation_block_index_ < allocation_list_.length();
219 current_allocation_block_index_++) {
220 if (requested <= allocation_list_[current_allocation_block_index_].size) {
221 return; // Found a large enough allocation block.
222 }
223 }
224
225 // Code range is full or too fragmented.
226 V8::FatalProcessOutOfMemory("CodeRange::GetNextAllocationBlock");
227}
228
229
230
231void* CodeRange::AllocateRawMemory(const size_t requested, size_t* allocated) {
232 ASSERT(current_allocation_block_index_ < allocation_list_.length());
233 if (requested > allocation_list_[current_allocation_block_index_].size) {
234 // Find an allocation block large enough. This function call may
235 // call V8::FatalProcessOutOfMemory if it cannot find a large enough block.
236 GetNextAllocationBlock(requested);
237 }
238 // Commit the requested memory at the start of the current allocation block.
239 *allocated = RoundUp(requested, Page::kPageSize);
240 FreeBlock current = allocation_list_[current_allocation_block_index_];
241 if (*allocated >= current.size - Page::kPageSize) {
242 // Don't leave a small free block, useless for a large object or chunk.
243 *allocated = current.size;
244 }
245 ASSERT(*allocated <= current.size);
246 if (!code_range_->Commit(current.start, *allocated, true)) {
247 *allocated = 0;
248 return NULL;
249 }
250 allocation_list_[current_allocation_block_index_].start += *allocated;
251 allocation_list_[current_allocation_block_index_].size -= *allocated;
252 if (*allocated == current.size) {
253 GetNextAllocationBlock(0); // This block is used up, get the next one.
254 }
255 return current.start;
256}
257
258
259void CodeRange::FreeRawMemory(void* address, size_t length) {
260 free_list_.Add(FreeBlock(address, length));
261 code_range_->Uncommit(address, length);
262}
263
264
265void CodeRange::TearDown() {
266 delete code_range_; // Frees all memory in the virtual memory range.
267 code_range_ = NULL;
268 free_list_.Free();
269 allocation_list_.Free();
270}
271
272
273// -----------------------------------------------------------------------------
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000274// MemoryAllocator
275//
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000276
277// 270 is an estimate based on the static default heap size of a pair of 256K
278// semispaces and a 64M old generation.
279const int kEstimatedNumberOfChunks = 270;
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +0000280
281
kmillikin@chromium.org7c2628c2011-08-10 11:27:35 +0000282MemoryAllocator::MemoryAllocator(Isolate* isolate)
283 : isolate_(isolate),
284 capacity_(0),
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +0000285 capacity_executable_(0),
286 size_(0),
287 size_executable_(0),
288 initial_chunk_(NULL),
289 chunks_(kEstimatedNumberOfChunks),
290 free_chunk_ids_(kEstimatedNumberOfChunks),
291 max_nof_chunks_(0),
kmillikin@chromium.org7c2628c2011-08-10 11:27:35 +0000292 top_(0) {
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +0000293}
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000294
295
296void MemoryAllocator::Push(int free_chunk_id) {
297 ASSERT(max_nof_chunks_ > 0);
298 ASSERT(top_ < max_nof_chunks_);
299 free_chunk_ids_[top_++] = free_chunk_id;
300}
301
302
303int MemoryAllocator::Pop() {
304 ASSERT(top_ > 0);
305 return free_chunk_ids_[--top_];
306}
307
308
ager@chromium.org01fe7df2010-11-10 11:59:11 +0000309bool MemoryAllocator::Setup(intptr_t capacity, intptr_t capacity_executable) {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000310 capacity_ = RoundUp(capacity, Page::kPageSize);
ager@chromium.org01fe7df2010-11-10 11:59:11 +0000311 capacity_executable_ = RoundUp(capacity_executable, Page::kPageSize);
312 ASSERT_GE(capacity_, capacity_executable_);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000313
314 // Over-estimate the size of chunks_ array. It assumes the expansion of old
315 // space is always in the unit of a chunk (kChunkSize) except the last
316 // expansion.
317 //
318 // Due to alignment, allocated space might be one page less than required
319 // number (kPagesPerChunk) of pages for old spaces.
320 //
kasper.lund7276f142008-07-30 08:49:36 +0000321 // Reserve two chunk ids for semispaces, one for map space, one for old
322 // space, and one for code space.
kmillikin@chromium.orgf05f2912010-09-30 10:07:24 +0000323 max_nof_chunks_ =
324 static_cast<int>((capacity_ / (kChunkSize - Page::kPageSize))) + 5;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000325 if (max_nof_chunks_ > kMaxNofChunks) return false;
326
327 size_ = 0;
erik.corry@gmail.com145eff52010-08-23 11:36:18 +0000328 size_executable_ = 0;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000329 ChunkInfo info; // uninitialized element.
330 for (int i = max_nof_chunks_ - 1; i >= 0; i--) {
331 chunks_.Add(info);
332 free_chunk_ids_.Add(i);
333 }
334 top_ = max_nof_chunks_;
335 return true;
336}
337
338
339void MemoryAllocator::TearDown() {
340 for (int i = 0; i < max_nof_chunks_; i++) {
341 if (chunks_[i].address() != NULL) DeleteChunk(i);
342 }
343 chunks_.Clear();
344 free_chunk_ids_.Clear();
345
346 if (initial_chunk_ != NULL) {
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +0000347 LOG(isolate_, DeleteEvent("InitialChunk", initial_chunk_->address()));
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000348 delete initial_chunk_;
349 initial_chunk_ = NULL;
350 }
351
352 ASSERT(top_ == max_nof_chunks_); // all chunks are free
353 top_ = 0;
354 capacity_ = 0;
ager@chromium.org01fe7df2010-11-10 11:59:11 +0000355 capacity_executable_ = 0;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000356 size_ = 0;
357 max_nof_chunks_ = 0;
358}
359
360
361void* MemoryAllocator::AllocateRawMemory(const size_t requested,
kasper.lund7276f142008-07-30 08:49:36 +0000362 size_t* allocated,
ager@chromium.org9258b6b2008-09-11 09:11:10 +0000363 Executability executable) {
fschneider@chromium.orged78ffd2010-07-21 11:05:19 +0000364 if (size_ + static_cast<size_t>(requested) > static_cast<size_t>(capacity_)) {
365 return NULL;
366 }
ager@chromium.org01fe7df2010-11-10 11:59:11 +0000367
sgjesse@chromium.orgc5145742009-10-07 09:00:33 +0000368 void* mem;
ager@chromium.org01fe7df2010-11-10 11:59:11 +0000369 if (executable == EXECUTABLE) {
370 // Check executable memory limit.
371 if (size_executable_ + requested >
372 static_cast<size_t>(capacity_executable_)) {
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +0000373 LOG(isolate_,
374 StringEvent("MemoryAllocator::AllocateRawMemory",
ager@chromium.org01fe7df2010-11-10 11:59:11 +0000375 "V8 Executable Allocation capacity exceeded"));
376 return NULL;
377 }
378 // Allocate executable memory either from code range or from the
379 // OS.
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +0000380 if (isolate_->code_range()->exists()) {
381 mem = isolate_->code_range()->AllocateRawMemory(requested, allocated);
ager@chromium.org01fe7df2010-11-10 11:59:11 +0000382 } else {
383 mem = OS::Allocate(requested, allocated, true);
384 }
385 // Update executable memory size.
386 size_executable_ += static_cast<int>(*allocated);
sgjesse@chromium.orgc5145742009-10-07 09:00:33 +0000387 } else {
ager@chromium.org01fe7df2010-11-10 11:59:11 +0000388 mem = OS::Allocate(requested, allocated, false);
sgjesse@chromium.orgc5145742009-10-07 09:00:33 +0000389 }
ager@chromium.orgc4c92722009-11-18 14:12:51 +0000390 int alloced = static_cast<int>(*allocated);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000391 size_ += alloced;
erik.corry@gmail.com145eff52010-08-23 11:36:18 +0000392
sgjesse@chromium.orgb302e562010-02-03 11:26:59 +0000393#ifdef DEBUG
394 ZapBlock(reinterpret_cast<Address>(mem), alloced);
395#endif
fschneider@chromium.org7979bbb2011-03-28 10:47:03 +0000396 isolate_->counters()->memory_allocated()->Increment(alloced);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000397 return mem;
398}
399
400
erik.corry@gmail.com145eff52010-08-23 11:36:18 +0000401void MemoryAllocator::FreeRawMemory(void* mem,
402 size_t length,
403 Executability executable) {
sgjesse@chromium.orgb302e562010-02-03 11:26:59 +0000404#ifdef DEBUG
rossberg@chromium.org717967f2011-07-20 13:44:42 +0000405 // Do not try to zap the guard page.
406 size_t guard_size = (executable == EXECUTABLE) ? Page::kPageSize : 0;
407 ZapBlock(reinterpret_cast<Address>(mem) + guard_size, length - guard_size);
sgjesse@chromium.orgb302e562010-02-03 11:26:59 +0000408#endif
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +0000409 if (isolate_->code_range()->contains(static_cast<Address>(mem))) {
410 isolate_->code_range()->FreeRawMemory(mem, length);
sgjesse@chromium.orgc5145742009-10-07 09:00:33 +0000411 } else {
412 OS::Free(mem, length);
413 }
fschneider@chromium.org7979bbb2011-03-28 10:47:03 +0000414 isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(length));
ager@chromium.orgc4c92722009-11-18 14:12:51 +0000415 size_ -= static_cast<int>(length);
erik.corry@gmail.com145eff52010-08-23 11:36:18 +0000416 if (executable == EXECUTABLE) size_executable_ -= static_cast<int>(length);
kmillikin@chromium.org3cdd9e12010-09-06 11:39:48 +0000417
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000418 ASSERT(size_ >= 0);
ager@chromium.org01fe7df2010-11-10 11:59:11 +0000419 ASSERT(size_executable_ >= 0);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000420}
421
422
kmillikin@chromium.org3cdd9e12010-09-06 11:39:48 +0000423void MemoryAllocator::PerformAllocationCallback(ObjectSpace space,
424 AllocationAction action,
425 size_t size) {
426 for (int i = 0; i < memory_allocation_callbacks_.length(); ++i) {
427 MemoryAllocationCallbackRegistration registration =
428 memory_allocation_callbacks_[i];
429 if ((registration.space & space) == space &&
430 (registration.action & action) == action)
431 registration.callback(space, action, static_cast<int>(size));
432 }
433}
434
435
436bool MemoryAllocator::MemoryAllocationCallbackRegistered(
437 MemoryAllocationCallback callback) {
438 for (int i = 0; i < memory_allocation_callbacks_.length(); ++i) {
439 if (memory_allocation_callbacks_[i].callback == callback) return true;
440 }
441 return false;
442}
443
444
445void MemoryAllocator::AddMemoryAllocationCallback(
446 MemoryAllocationCallback callback,
447 ObjectSpace space,
448 AllocationAction action) {
449 ASSERT(callback != NULL);
450 MemoryAllocationCallbackRegistration registration(callback, space, action);
451 ASSERT(!MemoryAllocator::MemoryAllocationCallbackRegistered(callback));
452 return memory_allocation_callbacks_.Add(registration);
453}
454
455
456void MemoryAllocator::RemoveMemoryAllocationCallback(
457 MemoryAllocationCallback callback) {
458 ASSERT(callback != NULL);
459 for (int i = 0; i < memory_allocation_callbacks_.length(); ++i) {
460 if (memory_allocation_callbacks_[i].callback == callback) {
461 memory_allocation_callbacks_.Remove(i);
462 return;
463 }
464 }
465 UNREACHABLE();
466}
467
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000468void* MemoryAllocator::ReserveInitialChunk(const size_t requested) {
469 ASSERT(initial_chunk_ == NULL);
470
471 initial_chunk_ = new VirtualMemory(requested);
472 CHECK(initial_chunk_ != NULL);
473 if (!initial_chunk_->IsReserved()) {
474 delete initial_chunk_;
475 initial_chunk_ = NULL;
476 return NULL;
477 }
478
479 // We are sure that we have mapped a block of requested addresses.
480 ASSERT(initial_chunk_->size() == requested);
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +0000481 LOG(isolate_,
482 NewEvent("InitialChunk", initial_chunk_->address(), requested));
ager@chromium.orgc4c92722009-11-18 14:12:51 +0000483 size_ += static_cast<int>(requested);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000484 return initial_chunk_->address();
485}
486
487
488static int PagesInChunk(Address start, size_t size) {
489 // The first page starts on the first page-aligned address from start onward
490 // and the last page ends on the last page-aligned address before
491 // start+size. Page::kPageSize is a power of two so we can divide by
492 // shifting.
ager@chromium.orgc4c92722009-11-18 14:12:51 +0000493 return static_cast<int>((RoundDown(start + size, Page::kPageSize)
sgjesse@chromium.org846fb742009-12-18 08:56:33 +0000494 - RoundUp(start, Page::kPageSize)) >> kPageSizeBits);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000495}
496
497
kasperl@chromium.orga5551262010-12-07 12:49:48 +0000498Page* MemoryAllocator::AllocatePages(int requested_pages,
499 int* allocated_pages,
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000500 PagedSpace* owner) {
501 if (requested_pages <= 0) return Page::FromAddress(NULL);
502 size_t chunk_size = requested_pages * Page::kPageSize;
503
kasper.lund7276f142008-07-30 08:49:36 +0000504 void* chunk = AllocateRawMemory(chunk_size, &chunk_size, owner->executable());
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000505 if (chunk == NULL) return Page::FromAddress(NULL);
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +0000506 LOG(isolate_, NewEvent("PagedChunk", chunk, chunk_size));
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000507
508 *allocated_pages = PagesInChunk(static_cast<Address>(chunk), chunk_size);
rossberg@chromium.org717967f2011-07-20 13:44:42 +0000509
kasperl@chromium.orga5551262010-12-07 12:49:48 +0000510 // We may 'lose' a page due to alignment.
511 ASSERT(*allocated_pages >= kPagesPerChunk - 1);
rossberg@chromium.org717967f2011-07-20 13:44:42 +0000512
513 size_t guard_size = (owner->executable() == EXECUTABLE) ? Page::kPageSize : 0;
514
515 // Check that we got at least one page that we can use.
516 if (*allocated_pages <= ((guard_size != 0) ? 1 : 0)) {
517 FreeRawMemory(chunk,
518 chunk_size,
519 owner->executable());
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +0000520 LOG(isolate_, DeleteEvent("PagedChunk", chunk));
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000521 return Page::FromAddress(NULL);
522 }
523
rossberg@chromium.org717967f2011-07-20 13:44:42 +0000524 if (guard_size != 0) {
525 OS::Guard(chunk, guard_size);
526 chunk_size -= guard_size;
527 chunk = static_cast<Address>(chunk) + guard_size;
528 --*allocated_pages;
529 }
530
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000531 int chunk_id = Pop();
532 chunks_[chunk_id].init(static_cast<Address>(chunk), chunk_size, owner);
533
kmillikin@chromium.org3cdd9e12010-09-06 11:39:48 +0000534 ObjectSpace space = static_cast<ObjectSpace>(1 << owner->identity());
535 PerformAllocationCallback(space, kAllocationActionAllocate, chunk_size);
kasperl@chromium.orga5551262010-12-07 12:49:48 +0000536 Page* new_pages = InitializePagesInChunk(chunk_id, *allocated_pages, owner);
537
kasperl@chromium.orga5551262010-12-07 12:49:48 +0000538 return new_pages;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000539}
540
541
542Page* MemoryAllocator::CommitPages(Address start, size_t size,
543 PagedSpace* owner, int* num_pages) {
544 ASSERT(start != NULL);
545 *num_pages = PagesInChunk(start, size);
546 ASSERT(*num_pages > 0);
547 ASSERT(initial_chunk_ != NULL);
kasperl@chromium.orgf5aa8372009-03-24 14:47:14 +0000548 ASSERT(InInitialChunk(start));
549 ASSERT(InInitialChunk(start + size - 1));
ager@chromium.org9258b6b2008-09-11 09:11:10 +0000550 if (!initial_chunk_->Commit(start, size, owner->executable() == EXECUTABLE)) {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000551 return Page::FromAddress(NULL);
552 }
sgjesse@chromium.orgb302e562010-02-03 11:26:59 +0000553#ifdef DEBUG
554 ZapBlock(start, size);
555#endif
fschneider@chromium.org7979bbb2011-03-28 10:47:03 +0000556 isolate_->counters()->memory_allocated()->Increment(static_cast<int>(size));
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000557
558 // So long as we correctly overestimated the number of chunks we should not
559 // run out of chunk ids.
560 CHECK(!OutOfChunkIds());
561 int chunk_id = Pop();
562 chunks_[chunk_id].init(start, size, owner);
563 return InitializePagesInChunk(chunk_id, *num_pages, owner);
564}
565
566
kasper.lund7276f142008-07-30 08:49:36 +0000567bool MemoryAllocator::CommitBlock(Address start,
568 size_t size,
ager@chromium.org9258b6b2008-09-11 09:11:10 +0000569 Executability executable) {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000570 ASSERT(start != NULL);
571 ASSERT(size > 0);
572 ASSERT(initial_chunk_ != NULL);
kasperl@chromium.orgf5aa8372009-03-24 14:47:14 +0000573 ASSERT(InInitialChunk(start));
574 ASSERT(InInitialChunk(start + size - 1));
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000575
kasper.lund7276f142008-07-30 08:49:36 +0000576 if (!initial_chunk_->Commit(start, size, executable)) return false;
sgjesse@chromium.orgb302e562010-02-03 11:26:59 +0000577#ifdef DEBUG
578 ZapBlock(start, size);
579#endif
fschneider@chromium.org7979bbb2011-03-28 10:47:03 +0000580 isolate_->counters()->memory_allocated()->Increment(static_cast<int>(size));
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000581 return true;
582}
583
sgjesse@chromium.orgb302e562010-02-03 11:26:59 +0000584
ager@chromium.orgadd848f2009-08-13 12:44:13 +0000585bool MemoryAllocator::UncommitBlock(Address start, size_t size) {
586 ASSERT(start != NULL);
587 ASSERT(size > 0);
588 ASSERT(initial_chunk_ != NULL);
589 ASSERT(InInitialChunk(start));
590 ASSERT(InInitialChunk(start + size - 1));
591
592 if (!initial_chunk_->Uncommit(start, size)) return false;
fschneider@chromium.org7979bbb2011-03-28 10:47:03 +0000593 isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(size));
ager@chromium.orgadd848f2009-08-13 12:44:13 +0000594 return true;
595}
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000596
sgjesse@chromium.orgb302e562010-02-03 11:26:59 +0000597
598void MemoryAllocator::ZapBlock(Address start, size_t size) {
599 for (size_t s = 0; s + kPointerSize <= size; s += kPointerSize) {
600 Memory::Address_at(start + s) = kZapValue;
601 }
602}
603
604
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000605Page* MemoryAllocator::InitializePagesInChunk(int chunk_id, int pages_in_chunk,
606 PagedSpace* owner) {
607 ASSERT(IsValidChunk(chunk_id));
608 ASSERT(pages_in_chunk > 0);
609
610 Address chunk_start = chunks_[chunk_id].address();
611
612 Address low = RoundUp(chunk_start, Page::kPageSize);
613
614#ifdef DEBUG
615 size_t chunk_size = chunks_[chunk_id].size();
616 Address high = RoundDown(chunk_start + chunk_size, Page::kPageSize);
617 ASSERT(pages_in_chunk <=
618 ((OffsetFrom(high) - OffsetFrom(low)) / Page::kPageSize));
619#endif
620
621 Address page_addr = low;
622 for (int i = 0; i < pages_in_chunk; i++) {
623 Page* p = Page::FromAddress(page_addr);
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +0000624 p->heap_ = owner->heap();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000625 p->opaque_header = OffsetFrom(page_addr + Page::kPageSize) | chunk_id;
ricow@chromium.org30ce4112010-05-31 10:38:25 +0000626 p->InvalidateWatermark(true);
fschneider@chromium.org013f3e12010-04-26 13:27:52 +0000627 p->SetIsLargeObjectPage(false);
ricow@chromium.org30ce4112010-05-31 10:38:25 +0000628 p->SetAllocationWatermark(p->ObjectAreaStart());
629 p->SetCachedAllocationWatermark(p->ObjectAreaStart());
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000630 page_addr += Page::kPageSize;
631 }
632
633 // Set the next page of the last page to 0.
634 Page* last_page = Page::FromAddress(page_addr - Page::kPageSize);
635 last_page->opaque_header = OffsetFrom(0) | chunk_id;
636
637 return Page::FromAddress(low);
638}
639
640
641Page* MemoryAllocator::FreePages(Page* p) {
642 if (!p->is_valid()) return p;
643
644 // Find the first page in the same chunk as 'p'
645 Page* first_page = FindFirstPageInSameChunk(p);
646 Page* page_to_return = Page::FromAddress(NULL);
647
648 if (p != first_page) {
649 // Find the last page in the same chunk as 'prev'.
650 Page* last_page = FindLastPageInSameChunk(p);
651 first_page = GetNextPage(last_page); // first page in next chunk
652
653 // set the next_page of last_page to NULL
654 SetNextPage(last_page, Page::FromAddress(NULL));
655 page_to_return = p; // return 'p' when exiting
656 }
657
658 while (first_page->is_valid()) {
659 int chunk_id = GetChunkId(first_page);
660 ASSERT(IsValidChunk(chunk_id));
661
662 // Find the first page of the next chunk before deleting this chunk.
663 first_page = GetNextPage(FindLastPageInSameChunk(first_page));
664
665 // Free the current chunk.
666 DeleteChunk(chunk_id);
667 }
668
669 return page_to_return;
670}
671
672
fschneider@chromium.org013f3e12010-04-26 13:27:52 +0000673void MemoryAllocator::FreeAllPages(PagedSpace* space) {
674 for (int i = 0, length = chunks_.length(); i < length; i++) {
675 if (chunks_[i].owner() == space) {
676 DeleteChunk(i);
677 }
678 }
679}
680
681
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000682void MemoryAllocator::DeleteChunk(int chunk_id) {
683 ASSERT(IsValidChunk(chunk_id));
684
685 ChunkInfo& c = chunks_[chunk_id];
686
687 // We cannot free a chunk contained in the initial chunk because it was not
688 // allocated with AllocateRawMemory. Instead we uncommit the virtual
689 // memory.
kasperl@chromium.orgf5aa8372009-03-24 14:47:14 +0000690 if (InInitialChunk(c.address())) {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000691 // TODO(1240712): VirtualMemory::Uncommit has a return value which
692 // is ignored here.
693 initial_chunk_->Uncommit(c.address(), c.size());
fschneider@chromium.org7979bbb2011-03-28 10:47:03 +0000694 Counters* counters = isolate_->counters();
695 counters->memory_allocated()->Decrement(static_cast<int>(c.size()));
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000696 } else {
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +0000697 LOG(isolate_, DeleteEvent("PagedChunk", c.address()));
698 ObjectSpace space = static_cast<ObjectSpace>(1 << c.owner_identity());
kmillikin@chromium.org3cdd9e12010-09-06 11:39:48 +0000699 size_t size = c.size();
rossberg@chromium.org717967f2011-07-20 13:44:42 +0000700 size_t guard_size = (c.executable() == EXECUTABLE) ? Page::kPageSize : 0;
701 FreeRawMemory(c.address() - guard_size, size + guard_size, c.executable());
kmillikin@chromium.org3cdd9e12010-09-06 11:39:48 +0000702 PerformAllocationCallback(space, kAllocationActionFree, size);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000703 }
704 c.init(NULL, 0, NULL);
705 Push(chunk_id);
706}
707
708
709Page* MemoryAllocator::FindFirstPageInSameChunk(Page* p) {
710 int chunk_id = GetChunkId(p);
711 ASSERT(IsValidChunk(chunk_id));
712
713 Address low = RoundUp(chunks_[chunk_id].address(), Page::kPageSize);
714 return Page::FromAddress(low);
715}
716
717
718Page* MemoryAllocator::FindLastPageInSameChunk(Page* p) {
719 int chunk_id = GetChunkId(p);
720 ASSERT(IsValidChunk(chunk_id));
721
722 Address chunk_start = chunks_[chunk_id].address();
723 size_t chunk_size = chunks_[chunk_id].size();
724
725 Address high = RoundDown(chunk_start + chunk_size, Page::kPageSize);
726 ASSERT(chunk_start <= p->address() && p->address() < high);
727
728 return Page::FromAddress(high - Page::kPageSize);
729}
730
731
732#ifdef DEBUG
733void MemoryAllocator::ReportStatistics() {
734 float pct = static_cast<float>(capacity_ - size_) / capacity_;
kmillikin@chromium.orgf05f2912010-09-30 10:07:24 +0000735 PrintF(" capacity: %" V8_PTR_PREFIX "d"
736 ", used: %" V8_PTR_PREFIX "d"
737 ", available: %%%d\n\n",
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000738 capacity_, size_, static_cast<int>(pct*100));
739}
740#endif
741
742
fschneider@chromium.org013f3e12010-04-26 13:27:52 +0000743void MemoryAllocator::RelinkPageListInChunkOrder(PagedSpace* space,
744 Page** first_page,
745 Page** last_page,
746 Page** last_page_in_use) {
747 Page* first = NULL;
748 Page* last = NULL;
749
750 for (int i = 0, length = chunks_.length(); i < length; i++) {
751 ChunkInfo& chunk = chunks_[i];
752
753 if (chunk.owner() == space) {
754 if (first == NULL) {
755 Address low = RoundUp(chunk.address(), Page::kPageSize);
756 first = Page::FromAddress(low);
757 }
758 last = RelinkPagesInChunk(i,
759 chunk.address(),
760 chunk.size(),
761 last,
762 last_page_in_use);
763 }
764 }
765
766 if (first_page != NULL) {
767 *first_page = first;
768 }
769
770 if (last_page != NULL) {
771 *last_page = last;
772 }
773}
774
775
776Page* MemoryAllocator::RelinkPagesInChunk(int chunk_id,
777 Address chunk_start,
778 size_t chunk_size,
779 Page* prev,
780 Page** last_page_in_use) {
781 Address page_addr = RoundUp(chunk_start, Page::kPageSize);
782 int pages_in_chunk = PagesInChunk(chunk_start, chunk_size);
783
784 if (prev->is_valid()) {
785 SetNextPage(prev, Page::FromAddress(page_addr));
786 }
787
788 for (int i = 0; i < pages_in_chunk; i++) {
789 Page* p = Page::FromAddress(page_addr);
790 p->opaque_header = OffsetFrom(page_addr + Page::kPageSize) | chunk_id;
791 page_addr += Page::kPageSize;
792
ricow@chromium.org30ce4112010-05-31 10:38:25 +0000793 p->InvalidateWatermark(true);
fschneider@chromium.org013f3e12010-04-26 13:27:52 +0000794 if (p->WasInUseBeforeMC()) {
795 *last_page_in_use = p;
796 }
797 }
798
799 // Set the next page of the last page to 0.
800 Page* last_page = Page::FromAddress(page_addr - Page::kPageSize);
801 last_page->opaque_header = OffsetFrom(0) | chunk_id;
802
803 if (last_page->WasInUseBeforeMC()) {
804 *last_page_in_use = last_page;
805 }
806
807 return last_page;
808}
809
810
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000811// -----------------------------------------------------------------------------
812// PagedSpace implementation
813
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +0000814PagedSpace::PagedSpace(Heap* heap,
815 intptr_t max_capacity,
ager@chromium.org9258b6b2008-09-11 09:11:10 +0000816 AllocationSpace id,
817 Executability executable)
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +0000818 : Space(heap, id, executable) {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000819 max_capacity_ = (RoundDown(max_capacity, Page::kPageSize) / Page::kPageSize)
820 * Page::kObjectAreaSize;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000821 accounting_stats_.Clear();
822
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000823 allocation_info_.top = NULL;
824 allocation_info_.limit = NULL;
825
826 mc_forwarding_info_.top = NULL;
827 mc_forwarding_info_.limit = NULL;
828}
829
830
831bool PagedSpace::Setup(Address start, size_t size) {
832 if (HasBeenSetup()) return false;
833
834 int num_pages = 0;
835 // Try to use the virtual memory range passed to us. If it is too small to
836 // contain at least one page, ignore it and allocate instead.
ager@chromium.org9258b6b2008-09-11 09:11:10 +0000837 int pages_in_chunk = PagesInChunk(start, size);
838 if (pages_in_chunk > 0) {
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +0000839 first_page_ = Isolate::Current()->memory_allocator()->CommitPages(
840 RoundUp(start, Page::kPageSize),
841 Page::kPageSize * pages_in_chunk,
842 this, &num_pages);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000843 } else {
kmillikin@chromium.orgf05f2912010-09-30 10:07:24 +0000844 int requested_pages =
845 Min(MemoryAllocator::kPagesPerChunk,
846 static_cast<int>(max_capacity_ / Page::kObjectAreaSize));
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000847 first_page_ =
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +0000848 Isolate::Current()->memory_allocator()->AllocatePages(
849 requested_pages, &num_pages, this);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000850 if (!first_page_->is_valid()) return false;
851 }
852
853 // We are sure that the first page is valid and that we have at least one
854 // page.
855 ASSERT(first_page_->is_valid());
856 ASSERT(num_pages > 0);
857 accounting_stats_.ExpandSpace(num_pages * Page::kObjectAreaSize);
858 ASSERT(Capacity() <= max_capacity_);
859
ricow@chromium.org30ce4112010-05-31 10:38:25 +0000860 // Sequentially clear region marks in the newly allocated
kasperl@chromium.orgb3284ad2009-05-18 06:12:45 +0000861 // pages and cache the current last page in the space.
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000862 for (Page* p = first_page_; p->is_valid(); p = p->next_page()) {
ricow@chromium.org30ce4112010-05-31 10:38:25 +0000863 p->SetRegionMarks(Page::kAllRegionsCleanMarks);
kasperl@chromium.orgb3284ad2009-05-18 06:12:45 +0000864 last_page_ = p;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000865 }
866
867 // Use first_page_ for allocation.
868 SetAllocationInfo(&allocation_info_, first_page_);
869
fschneider@chromium.org013f3e12010-04-26 13:27:52 +0000870 page_list_is_chunk_ordered_ = true;
871
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000872 return true;
873}
874
875
876bool PagedSpace::HasBeenSetup() {
877 return (Capacity() > 0);
878}
879
880
881void PagedSpace::TearDown() {
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +0000882 Isolate::Current()->memory_allocator()->FreeAllPages(this);
fschneider@chromium.org013f3e12010-04-26 13:27:52 +0000883 first_page_ = NULL;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000884 accounting_stats_.Clear();
885}
886
887
ricow@chromium.org30ce4112010-05-31 10:38:25 +0000888void PagedSpace::MarkAllPagesClean() {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000889 PageIterator it(this, PageIterator::ALL_PAGES);
890 while (it.has_next()) {
ricow@chromium.org30ce4112010-05-31 10:38:25 +0000891 it.next()->SetRegionMarks(Page::kAllRegionsCleanMarks);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000892 }
893}
894
895
lrn@chromium.org303ada72010-10-27 09:33:13 +0000896MaybeObject* PagedSpace::FindObject(Address addr) {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000897 // Note: this function can only be called before or after mark-compact GC
898 // because it accesses map pointers.
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +0000899 ASSERT(!heap()->mark_compact_collector()->in_use());
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000900
901 if (!Contains(addr)) return Failure::Exception();
902
903 Page* p = Page::FromAddress(addr);
kasper.lund7276f142008-07-30 08:49:36 +0000904 ASSERT(IsUsed(p));
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000905 Address cur = p->ObjectAreaStart();
906 Address end = p->AllocationTop();
907 while (cur < end) {
908 HeapObject* obj = HeapObject::FromAddress(cur);
909 Address next = cur + obj->Size();
910 if ((cur <= addr) && (addr < next)) return obj;
911 cur = next;
912 }
913
kasper.lund7276f142008-07-30 08:49:36 +0000914 UNREACHABLE();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000915 return Failure::Exception();
916}
917
918
kasper.lund7276f142008-07-30 08:49:36 +0000919bool PagedSpace::IsUsed(Page* page) {
920 PageIterator it(this, PageIterator::PAGES_IN_USE);
921 while (it.has_next()) {
922 if (page == it.next()) return true;
923 }
924 return false;
925}
926
927
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000928void PagedSpace::SetAllocationInfo(AllocationInfo* alloc_info, Page* p) {
929 alloc_info->top = p->ObjectAreaStart();
930 alloc_info->limit = p->ObjectAreaEnd();
kasper.lund7276f142008-07-30 08:49:36 +0000931 ASSERT(alloc_info->VerifyPagedAllocation());
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000932}
933
934
935void PagedSpace::MCResetRelocationInfo() {
936 // Set page indexes.
937 int i = 0;
938 PageIterator it(this, PageIterator::ALL_PAGES);
939 while (it.has_next()) {
940 Page* p = it.next();
941 p->mc_page_index = i++;
942 }
943
944 // Set mc_forwarding_info_ to the first page in the space.
945 SetAllocationInfo(&mc_forwarding_info_, first_page_);
946 // All the bytes in the space are 'available'. We will rediscover
947 // allocated and wasted bytes during GC.
948 accounting_stats_.Reset();
949}
950
951
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000952int PagedSpace::MCSpaceOffsetForAddress(Address addr) {
953#ifdef DEBUG
954 // The Contains function considers the address at the beginning of a
955 // page in the page, MCSpaceOffsetForAddress considers it is in the
956 // previous page.
957 if (Page::IsAlignedToPageSize(addr)) {
958 ASSERT(Contains(addr - kPointerSize));
959 } else {
960 ASSERT(Contains(addr));
961 }
962#endif
963
964 // If addr is at the end of a page, it belongs to previous page
965 Page* p = Page::IsAlignedToPageSize(addr)
966 ? Page::FromAllocationTop(addr)
967 : Page::FromAddress(addr);
968 int index = p->mc_page_index;
969 return (index * Page::kPageSize) + p->Offset(addr);
970}
971
972
kasper.lund7276f142008-07-30 08:49:36 +0000973// Slow case for reallocating and promoting objects during a compacting
974// collection. This function is not space-specific.
975HeapObject* PagedSpace::SlowMCAllocateRaw(int size_in_bytes) {
976 Page* current_page = TopPageOf(mc_forwarding_info_);
977 if (!current_page->next_page()->is_valid()) {
978 if (!Expand(current_page)) {
979 return NULL;
980 }
981 }
982
983 // There are surely more pages in the space now.
984 ASSERT(current_page->next_page()->is_valid());
985 // We do not add the top of page block for current page to the space's
986 // free list---the block may contain live objects so we cannot write
987 // bookkeeping information to it. Instead, we will recover top of page
988 // blocks when we move objects to their new locations.
989 //
990 // We do however write the allocation pointer to the page. The encoding
991 // of forwarding addresses is as an offset in terms of live bytes, so we
992 // need quick access to the allocation top of each page to decode
993 // forwarding addresses.
ricow@chromium.org30ce4112010-05-31 10:38:25 +0000994 current_page->SetAllocationWatermark(mc_forwarding_info_.top);
995 current_page->next_page()->InvalidateWatermark(true);
kasper.lund7276f142008-07-30 08:49:36 +0000996 SetAllocationInfo(&mc_forwarding_info_, current_page->next_page());
997 return AllocateLinearly(&mc_forwarding_info_, size_in_bytes);
998}
999
1000
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001001bool PagedSpace::Expand(Page* last_page) {
1002 ASSERT(max_capacity_ % Page::kObjectAreaSize == 0);
1003 ASSERT(Capacity() % Page::kObjectAreaSize == 0);
1004
1005 if (Capacity() == max_capacity_) return false;
1006
1007 ASSERT(Capacity() < max_capacity_);
1008 // Last page must be valid and its next page is invalid.
1009 ASSERT(last_page->is_valid() && !last_page->next_page()->is_valid());
1010
kmillikin@chromium.orgf05f2912010-09-30 10:07:24 +00001011 int available_pages =
1012 static_cast<int>((max_capacity_ - Capacity()) / Page::kObjectAreaSize);
kasperl@chromium.orga5551262010-12-07 12:49:48 +00001013 // We don't want to have to handle small chunks near the end so if there are
1014 // not kPagesPerChunk pages available without exceeding the max capacity then
1015 // act as if memory has run out.
1016 if (available_pages < MemoryAllocator::kPagesPerChunk) return false;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001017
1018 int desired_pages = Min(available_pages, MemoryAllocator::kPagesPerChunk);
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00001019 Page* p = heap()->isolate()->memory_allocator()->AllocatePages(
1020 desired_pages, &desired_pages, this);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001021 if (!p->is_valid()) return false;
1022
1023 accounting_stats_.ExpandSpace(desired_pages * Page::kObjectAreaSize);
1024 ASSERT(Capacity() <= max_capacity_);
1025
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00001026 heap()->isolate()->memory_allocator()->SetNextPage(last_page, p);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001027
ricow@chromium.org30ce4112010-05-31 10:38:25 +00001028 // Sequentially clear region marks of new pages and and cache the
kasperl@chromium.orgb3284ad2009-05-18 06:12:45 +00001029 // new last page in the space.
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001030 while (p->is_valid()) {
ricow@chromium.org30ce4112010-05-31 10:38:25 +00001031 p->SetRegionMarks(Page::kAllRegionsCleanMarks);
kasperl@chromium.orgb3284ad2009-05-18 06:12:45 +00001032 last_page_ = p;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001033 p = p->next_page();
1034 }
1035
1036 return true;
1037}
1038
1039
1040#ifdef DEBUG
1041int PagedSpace::CountTotalPages() {
1042 int count = 0;
1043 for (Page* p = first_page_; p->is_valid(); p = p->next_page()) {
1044 count++;
1045 }
1046 return count;
1047}
1048#endif
1049
1050
1051void PagedSpace::Shrink() {
fschneider@chromium.org013f3e12010-04-26 13:27:52 +00001052 if (!page_list_is_chunk_ordered_) {
1053 // We can't shrink space if pages is not chunk-ordered
1054 // (see comment for class MemoryAllocator for definition).
1055 return;
1056 }
1057
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001058 // Release half of free pages.
1059 Page* top_page = AllocationTopPage();
1060 ASSERT(top_page->is_valid());
1061
sgjesse@chromium.org911335c2009-08-19 12:59:44 +00001062 // Count the number of pages we would like to free.
1063 int pages_to_free = 0;
1064 for (Page* p = top_page->next_page(); p->is_valid(); p = p->next_page()) {
1065 pages_to_free++;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001066 }
1067
sgjesse@chromium.org911335c2009-08-19 12:59:44 +00001068 // Free pages after top_page.
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00001069 Page* p = heap()->isolate()->memory_allocator()->
1070 FreePages(top_page->next_page());
1071 heap()->isolate()->memory_allocator()->SetNextPage(top_page, p);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001072
sgjesse@chromium.org911335c2009-08-19 12:59:44 +00001073 // Find out how many pages we failed to free and update last_page_.
1074 // Please note pages can only be freed in whole chunks.
1075 last_page_ = top_page;
1076 for (Page* p = top_page->next_page(); p->is_valid(); p = p->next_page()) {
1077 pages_to_free--;
kasperl@chromium.orgb3284ad2009-05-18 06:12:45 +00001078 last_page_ = p;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001079 }
1080
sgjesse@chromium.org911335c2009-08-19 12:59:44 +00001081 accounting_stats_.ShrinkSpace(pages_to_free * Page::kObjectAreaSize);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001082 ASSERT(Capacity() == CountTotalPages() * Page::kObjectAreaSize);
1083}
1084
1085
1086bool PagedSpace::EnsureCapacity(int capacity) {
1087 if (Capacity() >= capacity) return true;
1088
1089 // Start from the allocation top and loop to the last page in the space.
1090 Page* last_page = AllocationTopPage();
1091 Page* next_page = last_page->next_page();
1092 while (next_page->is_valid()) {
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00001093 last_page = heap()->isolate()->memory_allocator()->
1094 FindLastPageInSameChunk(next_page);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001095 next_page = last_page->next_page();
1096 }
1097
1098 // Expand the space until it has the required capacity or expansion fails.
1099 do {
1100 if (!Expand(last_page)) return false;
1101 ASSERT(last_page->next_page()->is_valid());
1102 last_page =
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00001103 heap()->isolate()->memory_allocator()->FindLastPageInSameChunk(
1104 last_page->next_page());
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001105 } while (Capacity() < capacity);
1106
1107 return true;
1108}
1109
1110
1111#ifdef DEBUG
1112void PagedSpace::Print() { }
1113#endif
1114
1115
kasperl@chromium.orgdefbd102009-07-13 14:04:26 +00001116#ifdef DEBUG
1117// We do not assume that the PageIterator works, because it depends on the
1118// invariants we are checking during verification.
1119void PagedSpace::Verify(ObjectVisitor* visitor) {
1120 // The allocation pointer should be valid, and it should be in a page in the
1121 // space.
1122 ASSERT(allocation_info_.VerifyPagedAllocation());
1123 Page* top_page = Page::FromAllocationTop(allocation_info_.top);
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00001124 ASSERT(heap()->isolate()->memory_allocator()->IsPageInSpace(top_page, this));
kasperl@chromium.orgdefbd102009-07-13 14:04:26 +00001125
1126 // Loop over all the pages.
1127 bool above_allocation_top = false;
1128 Page* current_page = first_page_;
1129 while (current_page->is_valid()) {
1130 if (above_allocation_top) {
1131 // We don't care what's above the allocation top.
1132 } else {
kasperl@chromium.orgdefbd102009-07-13 14:04:26 +00001133 Address top = current_page->AllocationTop();
1134 if (current_page == top_page) {
1135 ASSERT(top == allocation_info_.top);
1136 // The next page will be above the allocation top.
1137 above_allocation_top = true;
kasperl@chromium.orgdefbd102009-07-13 14:04:26 +00001138 }
1139
1140 // It should be packed with objects from the bottom to the top.
1141 Address current = current_page->ObjectAreaStart();
1142 while (current < top) {
1143 HeapObject* object = HeapObject::FromAddress(current);
1144
1145 // The first word should be a map, and we expect all map pointers to
1146 // be in map space.
1147 Map* map = object->map();
1148 ASSERT(map->IsMap());
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00001149 ASSERT(heap()->map_space()->Contains(map));
kasperl@chromium.orgdefbd102009-07-13 14:04:26 +00001150
1151 // Perform space-specific object verification.
1152 VerifyObject(object);
1153
1154 // The object itself should look OK.
1155 object->Verify();
1156
1157 // All the interior pointers should be contained in the heap and
ricow@chromium.org30ce4112010-05-31 10:38:25 +00001158 // have page regions covering intergenerational references should be
1159 // marked dirty.
kasperl@chromium.orgdefbd102009-07-13 14:04:26 +00001160 int size = object->Size();
christian.plesner.hansen@gmail.com2bc58ef2009-09-22 10:00:30 +00001161 object->IterateBody(map->instance_type(), size, visitor);
kasperl@chromium.orgdefbd102009-07-13 14:04:26 +00001162
1163 current += size;
1164 }
1165
1166 // The allocation pointer should not be in the middle of an object.
1167 ASSERT(current == top);
1168 }
1169
1170 current_page = current_page->next_page();
1171 }
1172}
1173#endif
1174
1175
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001176// -----------------------------------------------------------------------------
1177// NewSpace implementation
1178
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +00001179
1180bool NewSpace::Setup(Address start, int size) {
1181 // Setup new space based on the preallocated memory block defined by
1182 // start and size. The provided space is divided into two semi-spaces.
1183 // To support fast containment testing in the new space, the size of
1184 // this chunk must be a power of two and it must be aligned to its size.
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00001185 int initial_semispace_capacity = heap()->InitialSemiSpaceSize();
1186 int maximum_semispace_capacity = heap()->MaxSemiSpaceSize();
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +00001187
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001188 ASSERT(initial_semispace_capacity <= maximum_semispace_capacity);
1189 ASSERT(IsPowerOf2(maximum_semispace_capacity));
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001190
1191 // Allocate and setup the histogram arrays if necessary.
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001192 allocated_histogram_ = NewArray<HistogramInfo>(LAST_TYPE + 1);
1193 promoted_histogram_ = NewArray<HistogramInfo>(LAST_TYPE + 1);
1194
1195#define SET_NAME(name) allocated_histogram_[name].set_name(#name); \
1196 promoted_histogram_[name].set_name(#name);
1197 INSTANCE_TYPE_LIST(SET_NAME)
1198#undef SET_NAME
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001199
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00001200 ASSERT(size == 2 * heap()->ReservedSemiSpaceSize());
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001201 ASSERT(IsAddressAligned(start, size, 0));
1202
sgjesse@chromium.org911335c2009-08-19 12:59:44 +00001203 if (!to_space_.Setup(start,
1204 initial_semispace_capacity,
1205 maximum_semispace_capacity)) {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001206 return false;
1207 }
sgjesse@chromium.org911335c2009-08-19 12:59:44 +00001208 if (!from_space_.Setup(start + maximum_semispace_capacity,
1209 initial_semispace_capacity,
1210 maximum_semispace_capacity)) {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001211 return false;
1212 }
1213
1214 start_ = start;
1215 address_mask_ = ~(size - 1);
ricow@chromium.org30ce4112010-05-31 10:38:25 +00001216 object_mask_ = address_mask_ | kHeapObjectTagMask;
ager@chromium.org9085a012009-05-11 19:22:57 +00001217 object_expected_ = reinterpret_cast<uintptr_t>(start) | kHeapObjectTag;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001218
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +00001219 allocation_info_.top = to_space_.low();
1220 allocation_info_.limit = to_space_.high();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001221 mc_forwarding_info_.top = NULL;
1222 mc_forwarding_info_.limit = NULL;
1223
1224 ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
1225 return true;
1226}
1227
1228
1229void NewSpace::TearDown() {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001230 if (allocated_histogram_) {
1231 DeleteArray(allocated_histogram_);
1232 allocated_histogram_ = NULL;
1233 }
1234 if (promoted_histogram_) {
1235 DeleteArray(promoted_histogram_);
1236 promoted_histogram_ = NULL;
1237 }
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001238
1239 start_ = NULL;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001240 allocation_info_.top = NULL;
1241 allocation_info_.limit = NULL;
1242 mc_forwarding_info_.top = NULL;
1243 mc_forwarding_info_.limit = NULL;
1244
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +00001245 to_space_.TearDown();
1246 from_space_.TearDown();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001247}
1248
1249
1250void NewSpace::Flip() {
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +00001251 SemiSpace tmp = from_space_;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001252 from_space_ = to_space_;
1253 to_space_ = tmp;
1254}
1255
1256
ager@chromium.orgab99eea2009-08-25 07:05:41 +00001257void NewSpace::Grow() {
sgjesse@chromium.org911335c2009-08-19 12:59:44 +00001258 ASSERT(Capacity() < MaximumCapacity());
ager@chromium.orgab99eea2009-08-25 07:05:41 +00001259 if (to_space_.Grow()) {
1260 // Only grow from space if we managed to grow to space.
1261 if (!from_space_.Grow()) {
1262 // If we managed to grow to space but couldn't grow from space,
1263 // attempt to shrink to space.
1264 if (!to_space_.ShrinkTo(from_space_.Capacity())) {
1265 // We are in an inconsistent state because we could not
1266 // commit/uncommit memory from new space.
1267 V8::FatalProcessOutOfMemory("Failed to grow new space.");
1268 }
1269 }
1270 }
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +00001271 allocation_info_.limit = to_space_.high();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001272 ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
ager@chromium.orgab99eea2009-08-25 07:05:41 +00001273}
1274
1275
1276void NewSpace::Shrink() {
kmillikin@chromium.orgf05f2912010-09-30 10:07:24 +00001277 int new_capacity = Max(InitialCapacity(), 2 * SizeAsInt());
ager@chromium.orgc4c92722009-11-18 14:12:51 +00001278 int rounded_new_capacity =
1279 RoundUp(new_capacity, static_cast<int>(OS::AllocateAlignment()));
ager@chromium.orgab99eea2009-08-25 07:05:41 +00001280 if (rounded_new_capacity < Capacity() &&
1281 to_space_.ShrinkTo(rounded_new_capacity)) {
1282 // Only shrink from space if we managed to shrink to space.
1283 if (!from_space_.ShrinkTo(rounded_new_capacity)) {
1284 // If we managed to shrink to space but couldn't shrink from
1285 // space, attempt to grow to space again.
1286 if (!to_space_.GrowTo(from_space_.Capacity())) {
1287 // We are in an inconsistent state because we could not
1288 // commit/uncommit memory from new space.
1289 V8::FatalProcessOutOfMemory("Failed to shrink new space.");
1290 }
1291 }
1292 }
1293 allocation_info_.limit = to_space_.high();
1294 ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001295}
1296
1297
1298void NewSpace::ResetAllocationInfo() {
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +00001299 allocation_info_.top = to_space_.low();
1300 allocation_info_.limit = to_space_.high();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001301 ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
1302}
1303
1304
1305void NewSpace::MCResetRelocationInfo() {
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +00001306 mc_forwarding_info_.top = from_space_.low();
1307 mc_forwarding_info_.limit = from_space_.high();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001308 ASSERT_SEMISPACE_ALLOCATION_INFO(mc_forwarding_info_, from_space_);
1309}
1310
1311
1312void NewSpace::MCCommitRelocationInfo() {
1313 // Assumes that the spaces have been flipped so that mc_forwarding_info_ is
1314 // valid allocation info for the to space.
1315 allocation_info_.top = mc_forwarding_info_.top;
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +00001316 allocation_info_.limit = to_space_.high();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001317 ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
1318}
1319
1320
1321#ifdef DEBUG
1322// We do not use the SemispaceIterator because verification doesn't assume
1323// that it works (it depends on the invariants we are checking).
1324void NewSpace::Verify() {
1325 // The allocation pointer should be in the space or at the very end.
1326 ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
1327
1328 // There should be objects packed in from the low address up to the
1329 // allocation pointer.
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +00001330 Address current = to_space_.low();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001331 while (current < top()) {
1332 HeapObject* object = HeapObject::FromAddress(current);
1333
1334 // The first word should be a map, and we expect all map pointers to
1335 // be in map space.
1336 Map* map = object->map();
1337 ASSERT(map->IsMap());
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00001338 ASSERT(heap()->map_space()->Contains(map));
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001339
1340 // The object should not be code or a map.
1341 ASSERT(!object->IsMap());
1342 ASSERT(!object->IsCode());
1343
1344 // The object itself should look OK.
1345 object->Verify();
1346
1347 // All the interior pointers should be contained in the heap.
1348 VerifyPointersVisitor visitor;
1349 int size = object->Size();
1350 object->IterateBody(map->instance_type(), size, &visitor);
1351
1352 current += size;
1353 }
1354
1355 // The allocation pointer should not be in the middle of an object.
1356 ASSERT(current == top());
1357}
1358#endif
1359
1360
ager@chromium.orgadd848f2009-08-13 12:44:13 +00001361bool SemiSpace::Commit() {
1362 ASSERT(!is_committed());
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00001363 if (!heap()->isolate()->memory_allocator()->CommitBlock(
1364 start_, capacity_, executable())) {
ager@chromium.orgadd848f2009-08-13 12:44:13 +00001365 return false;
1366 }
1367 committed_ = true;
1368 return true;
1369}
1370
1371
1372bool SemiSpace::Uncommit() {
1373 ASSERT(is_committed());
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00001374 if (!heap()->isolate()->memory_allocator()->UncommitBlock(
1375 start_, capacity_)) {
ager@chromium.orgadd848f2009-08-13 12:44:13 +00001376 return false;
1377 }
1378 committed_ = false;
1379 return true;
1380}
1381
1382
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001383// -----------------------------------------------------------------------------
1384// SemiSpace implementation
1385
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +00001386bool SemiSpace::Setup(Address start,
1387 int initial_capacity,
1388 int maximum_capacity) {
1389 // Creates a space in the young generation. The constructor does not
1390 // allocate memory from the OS. A SemiSpace is given a contiguous chunk of
1391 // memory of size 'capacity' when set up, and does not grow or shrink
1392 // otherwise. In the mark-compact collector, the memory region of the from
1393 // space is used as the marking stack. It requires contiguous memory
1394 // addresses.
ager@chromium.orgab99eea2009-08-25 07:05:41 +00001395 initial_capacity_ = initial_capacity;
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +00001396 capacity_ = initial_capacity;
1397 maximum_capacity_ = maximum_capacity;
ager@chromium.orgadd848f2009-08-13 12:44:13 +00001398 committed_ = false;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001399
1400 start_ = start;
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +00001401 address_mask_ = ~(maximum_capacity - 1);
ricow@chromium.org30ce4112010-05-31 10:38:25 +00001402 object_mask_ = address_mask_ | kHeapObjectTagMask;
ager@chromium.org9085a012009-05-11 19:22:57 +00001403 object_expected_ = reinterpret_cast<uintptr_t>(start) | kHeapObjectTag;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001404 age_mark_ = start_;
ager@chromium.orgadd848f2009-08-13 12:44:13 +00001405
1406 return Commit();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001407}
1408
1409
1410void SemiSpace::TearDown() {
1411 start_ = NULL;
1412 capacity_ = 0;
1413}
1414
1415
christian.plesner.hansen@gmail.com5a6af922009-08-12 14:20:51 +00001416bool SemiSpace::Grow() {
sgjesse@chromium.orgc81c8942009-08-21 10:54:26 +00001417 // Double the semispace size but only up to maximum capacity.
sgjesse@chromium.org911335c2009-08-19 12:59:44 +00001418 int maximum_extra = maximum_capacity_ - capacity_;
ager@chromium.orgc4c92722009-11-18 14:12:51 +00001419 int extra = Min(RoundUp(capacity_, static_cast<int>(OS::AllocateAlignment())),
sgjesse@chromium.org911335c2009-08-19 12:59:44 +00001420 maximum_extra);
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00001421 if (!heap()->isolate()->memory_allocator()->CommitBlock(
1422 high(), extra, executable())) {
kasper.lund7276f142008-07-30 08:49:36 +00001423 return false;
1424 }
christian.plesner.hansen@gmail.com5a6af922009-08-12 14:20:51 +00001425 capacity_ += extra;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001426 return true;
1427}
1428
1429
ager@chromium.orgab99eea2009-08-25 07:05:41 +00001430bool SemiSpace::GrowTo(int new_capacity) {
1431 ASSERT(new_capacity <= maximum_capacity_);
1432 ASSERT(new_capacity > capacity_);
1433 size_t delta = new_capacity - capacity_;
1434 ASSERT(IsAligned(delta, OS::AllocateAlignment()));
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00001435 if (!heap()->isolate()->memory_allocator()->CommitBlock(
1436 high(), delta, executable())) {
ager@chromium.orgab99eea2009-08-25 07:05:41 +00001437 return false;
1438 }
1439 capacity_ = new_capacity;
1440 return true;
1441}
1442
1443
1444bool SemiSpace::ShrinkTo(int new_capacity) {
1445 ASSERT(new_capacity >= initial_capacity_);
1446 ASSERT(new_capacity < capacity_);
1447 size_t delta = capacity_ - new_capacity;
1448 ASSERT(IsAligned(delta, OS::AllocateAlignment()));
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00001449 if (!heap()->isolate()->memory_allocator()->UncommitBlock(
1450 high() - delta, delta)) {
ager@chromium.orgab99eea2009-08-25 07:05:41 +00001451 return false;
1452 }
1453 capacity_ = new_capacity;
1454 return true;
1455}
1456
1457
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001458#ifdef DEBUG
1459void SemiSpace::Print() { }
ager@chromium.org9258b6b2008-09-11 09:11:10 +00001460
1461
1462void SemiSpace::Verify() { }
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001463#endif
1464
1465
1466// -----------------------------------------------------------------------------
1467// SemiSpaceIterator implementation.
1468SemiSpaceIterator::SemiSpaceIterator(NewSpace* space) {
1469 Initialize(space, space->bottom(), space->top(), NULL);
1470}
1471
1472
1473SemiSpaceIterator::SemiSpaceIterator(NewSpace* space,
1474 HeapObjectCallback size_func) {
1475 Initialize(space, space->bottom(), space->top(), size_func);
1476}
1477
1478
1479SemiSpaceIterator::SemiSpaceIterator(NewSpace* space, Address start) {
1480 Initialize(space, start, space->top(), NULL);
1481}
1482
1483
1484void SemiSpaceIterator::Initialize(NewSpace* space, Address start,
1485 Address end,
1486 HeapObjectCallback size_func) {
1487 ASSERT(space->ToSpaceContains(start));
1488 ASSERT(space->ToSpaceLow() <= end
1489 && end <= space->ToSpaceHigh());
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +00001490 space_ = &space->to_space_;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001491 current_ = start;
1492 limit_ = end;
1493 size_func_ = size_func;
1494}
1495
1496
1497#ifdef DEBUG
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001498// heap_histograms is shared, always clear it before using it.
1499static void ClearHistograms() {
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00001500 Isolate* isolate = Isolate::Current();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001501 // We reset the name each time, though it hasn't changed.
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00001502#define DEF_TYPE_NAME(name) isolate->heap_histograms()[name].set_name(#name);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001503 INSTANCE_TYPE_LIST(DEF_TYPE_NAME)
1504#undef DEF_TYPE_NAME
1505
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00001506#define CLEAR_HISTOGRAM(name) isolate->heap_histograms()[name].clear();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001507 INSTANCE_TYPE_LIST(CLEAR_HISTOGRAM)
1508#undef CLEAR_HISTOGRAM
1509
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00001510 isolate->js_spill_information()->Clear();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001511}
1512
1513
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001514static void ClearCodeKindStatistics() {
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00001515 Isolate* isolate = Isolate::Current();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001516 for (int i = 0; i < Code::NUMBER_OF_KINDS; i++) {
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00001517 isolate->code_kind_statistics()[i] = 0;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001518 }
1519}
1520
1521
1522static void ReportCodeKindStatistics() {
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00001523 Isolate* isolate = Isolate::Current();
fschneider@chromium.org013f3e12010-04-26 13:27:52 +00001524 const char* table[Code::NUMBER_OF_KINDS] = { NULL };
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001525
1526#define CASE(name) \
1527 case Code::name: table[Code::name] = #name; \
1528 break
1529
1530 for (int i = 0; i < Code::NUMBER_OF_KINDS; i++) {
1531 switch (static_cast<Code::Kind>(i)) {
1532 CASE(FUNCTION);
kasperl@chromium.orga5551262010-12-07 12:49:48 +00001533 CASE(OPTIMIZED_FUNCTION);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001534 CASE(STUB);
1535 CASE(BUILTIN);
1536 CASE(LOAD_IC);
1537 CASE(KEYED_LOAD_IC);
1538 CASE(STORE_IC);
1539 CASE(KEYED_STORE_IC);
1540 CASE(CALL_IC);
lrn@chromium.org1af7e1b2010-06-07 11:12:01 +00001541 CASE(KEYED_CALL_IC);
danno@chromium.org40cb8782011-05-25 07:58:50 +00001542 CASE(UNARY_OP_IC);
1543 CASE(BINARY_OP_IC);
kasperl@chromium.orga5551262010-12-07 12:49:48 +00001544 CASE(COMPARE_IC);
ricow@chromium.org9fa09672011-07-25 11:05:35 +00001545 CASE(TO_BOOLEAN_IC);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001546 }
1547 }
1548
1549#undef CASE
1550
1551 PrintF("\n Code kind histograms: \n");
1552 for (int i = 0; i < Code::NUMBER_OF_KINDS; i++) {
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00001553 if (isolate->code_kind_statistics()[i] > 0) {
1554 PrintF(" %-20s: %10d bytes\n", table[i],
1555 isolate->code_kind_statistics()[i]);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001556 }
1557 }
1558 PrintF("\n");
1559}
1560
1561
1562static int CollectHistogramInfo(HeapObject* obj) {
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00001563 Isolate* isolate = Isolate::Current();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001564 InstanceType type = obj->map()->instance_type();
1565 ASSERT(0 <= type && type <= LAST_TYPE);
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00001566 ASSERT(isolate->heap_histograms()[type].name() != NULL);
1567 isolate->heap_histograms()[type].increment_number(1);
1568 isolate->heap_histograms()[type].increment_bytes(obj->Size());
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001569
1570 if (FLAG_collect_heap_spill_statistics && obj->IsJSObject()) {
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00001571 JSObject::cast(obj)->IncrementSpillStatistics(
1572 isolate->js_spill_information());
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001573 }
1574
1575 return obj->Size();
1576}
1577
1578
1579static void ReportHistogram(bool print_spill) {
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00001580 Isolate* isolate = Isolate::Current();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001581 PrintF("\n Object Histogram:\n");
1582 for (int i = 0; i <= LAST_TYPE; i++) {
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00001583 if (isolate->heap_histograms()[i].number() > 0) {
ager@chromium.orgce5e87b2010-03-10 10:24:18 +00001584 PrintF(" %-34s%10d (%10d bytes)\n",
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00001585 isolate->heap_histograms()[i].name(),
1586 isolate->heap_histograms()[i].number(),
1587 isolate->heap_histograms()[i].bytes());
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001588 }
1589 }
1590 PrintF("\n");
1591
1592 // Summarize string types.
1593 int string_number = 0;
1594 int string_bytes = 0;
kasperl@chromium.org68ac0092009-07-09 06:00:35 +00001595#define INCREMENT(type, size, name, camel_name) \
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00001596 string_number += isolate->heap_histograms()[type].number(); \
1597 string_bytes += isolate->heap_histograms()[type].bytes();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001598 STRING_TYPE_LIST(INCREMENT)
1599#undef INCREMENT
1600 if (string_number > 0) {
ager@chromium.orgce5e87b2010-03-10 10:24:18 +00001601 PrintF(" %-34s%10d (%10d bytes)\n\n", "STRING_TYPE", string_number,
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001602 string_bytes);
1603 }
1604
1605 if (FLAG_collect_heap_spill_statistics && print_spill) {
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00001606 isolate->js_spill_information()->Print();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001607 }
1608}
1609#endif // DEBUG
1610
1611
1612// Support for statistics gathering for --heap-stats and --log-gc.
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001613void NewSpace::ClearHistograms() {
1614 for (int i = 0; i <= LAST_TYPE; i++) {
1615 allocated_histogram_[i].clear();
1616 promoted_histogram_[i].clear();
1617 }
1618}
1619
1620// Because the copying collector does not touch garbage objects, we iterate
1621// the new space before a collection to get a histogram of allocated objects.
whesse@chromium.org030d38e2011-07-13 13:23:34 +00001622// This only happens when --log-gc flag is set.
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001623void NewSpace::CollectStatistics() {
1624 ClearHistograms();
1625 SemiSpaceIterator it(this);
sgjesse@chromium.orgb302e562010-02-03 11:26:59 +00001626 for (HeapObject* obj = it.next(); obj != NULL; obj = it.next())
1627 RecordAllocation(obj);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001628}
1629
1630
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00001631static void DoReportStatistics(Isolate* isolate,
1632 HistogramInfo* info, const char* description) {
1633 LOG(isolate, HeapSampleBeginEvent("NewSpace", description));
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001634 // Lump all the string types together.
1635 int string_number = 0;
1636 int string_bytes = 0;
kasperl@chromium.org68ac0092009-07-09 06:00:35 +00001637#define INCREMENT(type, size, name, camel_name) \
1638 string_number += info[type].number(); \
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001639 string_bytes += info[type].bytes();
1640 STRING_TYPE_LIST(INCREMENT)
1641#undef INCREMENT
1642 if (string_number > 0) {
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00001643 LOG(isolate,
1644 HeapSampleItemEvent("STRING_TYPE", string_number, string_bytes));
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001645 }
1646
1647 // Then do the other types.
1648 for (int i = FIRST_NONSTRING_TYPE; i <= LAST_TYPE; ++i) {
1649 if (info[i].number() > 0) {
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00001650 LOG(isolate,
1651 HeapSampleItemEvent(info[i].name(), info[i].number(),
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001652 info[i].bytes()));
1653 }
1654 }
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00001655 LOG(isolate, HeapSampleEndEvent("NewSpace", description));
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001656}
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001657
1658
1659void NewSpace::ReportStatistics() {
1660#ifdef DEBUG
1661 if (FLAG_heap_stats) {
1662 float pct = static_cast<float>(Available()) / Capacity();
kmillikin@chromium.orgf05f2912010-09-30 10:07:24 +00001663 PrintF(" capacity: %" V8_PTR_PREFIX "d"
1664 ", available: %" V8_PTR_PREFIX "d, %%%d\n",
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001665 Capacity(), Available(), static_cast<int>(pct*100));
1666 PrintF("\n Object Histogram:\n");
1667 for (int i = 0; i <= LAST_TYPE; i++) {
1668 if (allocated_histogram_[i].number() > 0) {
ager@chromium.orgce5e87b2010-03-10 10:24:18 +00001669 PrintF(" %-34s%10d (%10d bytes)\n",
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001670 allocated_histogram_[i].name(),
1671 allocated_histogram_[i].number(),
1672 allocated_histogram_[i].bytes());
1673 }
1674 }
1675 PrintF("\n");
1676 }
1677#endif // DEBUG
1678
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001679 if (FLAG_log_gc) {
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00001680 Isolate* isolate = ISOLATE;
1681 DoReportStatistics(isolate, allocated_histogram_, "allocated");
1682 DoReportStatistics(isolate, promoted_histogram_, "promoted");
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001683 }
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001684}
1685
1686
1687void NewSpace::RecordAllocation(HeapObject* obj) {
1688 InstanceType type = obj->map()->instance_type();
1689 ASSERT(0 <= type && type <= LAST_TYPE);
1690 allocated_histogram_[type].increment_number(1);
1691 allocated_histogram_[type].increment_bytes(obj->Size());
1692}
1693
1694
1695void NewSpace::RecordPromotion(HeapObject* obj) {
1696 InstanceType type = obj->map()->instance_type();
1697 ASSERT(0 <= type && type <= LAST_TYPE);
1698 promoted_histogram_[type].increment_number(1);
1699 promoted_histogram_[type].increment_bytes(obj->Size());
1700}
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001701
1702
1703// -----------------------------------------------------------------------------
1704// Free lists for old object spaces implementation
1705
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00001706void FreeListNode::set_size(Heap* heap, int size_in_bytes) {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001707 ASSERT(size_in_bytes > 0);
1708 ASSERT(IsAligned(size_in_bytes, kPointerSize));
1709
1710 // We write a map and possibly size information to the block. If the block
1711 // is big enough to be a ByteArray with at least one extra word (the next
1712 // pointer), we set its map to be the byte array map and its size to an
1713 // appropriate array length for the desired size from HeapObject::Size().
1714 // If the block is too small (eg, one or two words), to hold both a size
1715 // field and a next pointer, we give it a filler map that gives it the
1716 // correct size.
ricow@chromium.org30ce4112010-05-31 10:38:25 +00001717 if (size_in_bytes > ByteArray::kHeaderSize) {
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00001718 set_map(heap->raw_unchecked_byte_array_map());
ager@chromium.org3811b432009-10-28 14:53:37 +00001719 // Can't use ByteArray::cast because it fails during deserialization.
1720 ByteArray* this_as_byte_array = reinterpret_cast<ByteArray*>(this);
1721 this_as_byte_array->set_length(ByteArray::LengthFor(size_in_bytes));
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001722 } else if (size_in_bytes == kPointerSize) {
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00001723 set_map(heap->raw_unchecked_one_pointer_filler_map());
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001724 } else if (size_in_bytes == 2 * kPointerSize) {
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00001725 set_map(heap->raw_unchecked_two_pointer_filler_map());
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001726 } else {
1727 UNREACHABLE();
1728 }
ager@chromium.org3811b432009-10-28 14:53:37 +00001729 // We would like to ASSERT(Size() == size_in_bytes) but this would fail during
1730 // deserialization because the byte array map is not done yet.
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001731}
1732
1733
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00001734Address FreeListNode::next(Heap* heap) {
ager@chromium.org3811b432009-10-28 14:53:37 +00001735 ASSERT(IsFreeListNode(this));
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00001736 if (map() == heap->raw_unchecked_byte_array_map()) {
kasperl@chromium.orgdefbd102009-07-13 14:04:26 +00001737 ASSERT(Size() >= kNextOffset + kPointerSize);
1738 return Memory::Address_at(address() + kNextOffset);
1739 } else {
1740 return Memory::Address_at(address() + kPointerSize);
1741 }
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001742}
1743
1744
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00001745void FreeListNode::set_next(Heap* heap, Address next) {
ager@chromium.org3811b432009-10-28 14:53:37 +00001746 ASSERT(IsFreeListNode(this));
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00001747 if (map() == heap->raw_unchecked_byte_array_map()) {
kasperl@chromium.orgdefbd102009-07-13 14:04:26 +00001748 ASSERT(Size() >= kNextOffset + kPointerSize);
1749 Memory::Address_at(address() + kNextOffset) = next;
1750 } else {
1751 Memory::Address_at(address() + kPointerSize) = next;
1752 }
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001753}
1754
1755
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00001756OldSpaceFreeList::OldSpaceFreeList(Heap* heap, AllocationSpace owner)
1757 : heap_(heap),
1758 owner_(owner) {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001759 Reset();
1760}
1761
1762
1763void OldSpaceFreeList::Reset() {
1764 available_ = 0;
1765 for (int i = 0; i < kFreeListsLength; i++) {
1766 free_[i].head_node_ = NULL;
1767 }
1768 needs_rebuild_ = false;
1769 finger_ = kHead;
1770 free_[kHead].next_size_ = kEnd;
1771}
1772
1773
1774void OldSpaceFreeList::RebuildSizeList() {
1775 ASSERT(needs_rebuild_);
1776 int cur = kHead;
1777 for (int i = cur + 1; i < kFreeListsLength; i++) {
1778 if (free_[i].head_node_ != NULL) {
1779 free_[cur].next_size_ = i;
1780 cur = i;
1781 }
1782 }
1783 free_[cur].next_size_ = kEnd;
1784 needs_rebuild_ = false;
1785}
1786
1787
1788int OldSpaceFreeList::Free(Address start, int size_in_bytes) {
1789#ifdef DEBUG
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00001790 Isolate::Current()->memory_allocator()->ZapBlock(start, size_in_bytes);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001791#endif
1792 FreeListNode* node = FreeListNode::FromAddress(start);
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00001793 node->set_size(heap_, size_in_bytes);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001794
sgjesse@chromium.org755c5b12009-05-29 11:04:38 +00001795 // We don't use the freelists in compacting mode. This makes it more like a
1796 // GC that only has mark-sweep-compact and doesn't have a mark-sweep
1797 // collector.
1798 if (FLAG_always_compact) {
1799 return size_in_bytes;
1800 }
1801
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001802 // Early return to drop too-small blocks on the floor (one or two word
1803 // blocks cannot hold a map pointer, a size field, and a pointer to the
1804 // next block in the free list).
1805 if (size_in_bytes < kMinBlockSize) {
1806 return size_in_bytes;
1807 }
1808
1809 // Insert other blocks at the head of an exact free list.
1810 int index = size_in_bytes >> kPointerSizeLog2;
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00001811 node->set_next(heap_, free_[index].head_node_);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001812 free_[index].head_node_ = node->address();
1813 available_ += size_in_bytes;
1814 needs_rebuild_ = true;
1815 return 0;
1816}
1817
1818
lrn@chromium.org303ada72010-10-27 09:33:13 +00001819MaybeObject* OldSpaceFreeList::Allocate(int size_in_bytes, int* wasted_bytes) {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001820 ASSERT(0 < size_in_bytes);
1821 ASSERT(size_in_bytes <= kMaxBlockSize);
1822 ASSERT(IsAligned(size_in_bytes, kPointerSize));
1823
1824 if (needs_rebuild_) RebuildSizeList();
1825 int index = size_in_bytes >> kPointerSizeLog2;
1826 // Check for a perfect fit.
1827 if (free_[index].head_node_ != NULL) {
1828 FreeListNode* node = FreeListNode::FromAddress(free_[index].head_node_);
1829 // If this was the last block of its size, remove the size.
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00001830 if ((free_[index].head_node_ = node->next(heap_)) == NULL)
1831 RemoveSize(index);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001832 available_ -= size_in_bytes;
1833 *wasted_bytes = 0;
sgjesse@chromium.org755c5b12009-05-29 11:04:38 +00001834 ASSERT(!FLAG_always_compact); // We only use the freelists with mark-sweep.
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001835 return node;
1836 }
1837 // Search the size list for the best fit.
1838 int prev = finger_ < index ? finger_ : kHead;
1839 int cur = FindSize(index, &prev);
1840 ASSERT(index < cur);
1841 if (cur == kEnd) {
1842 // No large enough size in list.
1843 *wasted_bytes = 0;
whesse@chromium.org4a5224e2010-10-20 12:37:07 +00001844 return Failure::RetryAfterGC(owner_);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001845 }
sgjesse@chromium.org755c5b12009-05-29 11:04:38 +00001846 ASSERT(!FLAG_always_compact); // We only use the freelists with mark-sweep.
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001847 int rem = cur - index;
1848 int rem_bytes = rem << kPointerSizeLog2;
1849 FreeListNode* cur_node = FreeListNode::FromAddress(free_[cur].head_node_);
kasper.lund7276f142008-07-30 08:49:36 +00001850 ASSERT(cur_node->Size() == (cur << kPointerSizeLog2));
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001851 FreeListNode* rem_node = FreeListNode::FromAddress(free_[cur].head_node_ +
1852 size_in_bytes);
1853 // Distinguish the cases prev < rem < cur and rem <= prev < cur
1854 // to avoid many redundant tests and calls to Insert/RemoveSize.
1855 if (prev < rem) {
1856 // Simple case: insert rem between prev and cur.
1857 finger_ = prev;
1858 free_[prev].next_size_ = rem;
1859 // If this was the last block of size cur, remove the size.
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00001860 if ((free_[cur].head_node_ = cur_node->next(heap_)) == NULL) {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001861 free_[rem].next_size_ = free_[cur].next_size_;
1862 } else {
1863 free_[rem].next_size_ = cur;
1864 }
1865 // Add the remainder block.
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00001866 rem_node->set_size(heap_, rem_bytes);
1867 rem_node->set_next(heap_, free_[rem].head_node_);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001868 free_[rem].head_node_ = rem_node->address();
1869 } else {
1870 // If this was the last block of size cur, remove the size.
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00001871 if ((free_[cur].head_node_ = cur_node->next(heap_)) == NULL) {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001872 finger_ = prev;
1873 free_[prev].next_size_ = free_[cur].next_size_;
1874 }
1875 if (rem_bytes < kMinBlockSize) {
1876 // Too-small remainder is wasted.
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00001877 rem_node->set_size(heap_, rem_bytes);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001878 available_ -= size_in_bytes + rem_bytes;
1879 *wasted_bytes = rem_bytes;
1880 return cur_node;
1881 }
1882 // Add the remainder block and, if needed, insert its size.
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00001883 rem_node->set_size(heap_, rem_bytes);
1884 rem_node->set_next(heap_, free_[rem].head_node_);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001885 free_[rem].head_node_ = rem_node->address();
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00001886 if (rem_node->next(heap_) == NULL) InsertSize(rem);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001887 }
1888 available_ -= size_in_bytes;
1889 *wasted_bytes = 0;
1890 return cur_node;
1891}
1892
1893
erik.corry@gmail.com4a6c3272010-11-18 12:04:40 +00001894void OldSpaceFreeList::MarkNodes() {
1895 for (int i = 0; i < kFreeListsLength; i++) {
1896 Address cur_addr = free_[i].head_node_;
1897 while (cur_addr != NULL) {
1898 FreeListNode* cur_node = FreeListNode::FromAddress(cur_addr);
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00001899 cur_addr = cur_node->next(heap_);
erik.corry@gmail.com4a6c3272010-11-18 12:04:40 +00001900 cur_node->SetMark();
1901 }
1902 }
1903}
1904
1905
kasper.lund7276f142008-07-30 08:49:36 +00001906#ifdef DEBUG
1907bool OldSpaceFreeList::Contains(FreeListNode* node) {
1908 for (int i = 0; i < kFreeListsLength; i++) {
1909 Address cur_addr = free_[i].head_node_;
1910 while (cur_addr != NULL) {
1911 FreeListNode* cur_node = FreeListNode::FromAddress(cur_addr);
1912 if (cur_node == node) return true;
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00001913 cur_addr = cur_node->next(heap_);
kasper.lund7276f142008-07-30 08:49:36 +00001914 }
1915 }
1916 return false;
1917}
1918#endif
1919
1920
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00001921FixedSizeFreeList::FixedSizeFreeList(Heap* heap,
1922 AllocationSpace owner,
1923 int object_size)
1924 : heap_(heap), owner_(owner), object_size_(object_size) {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001925 Reset();
1926}
1927
1928
kasperl@chromium.orgdefbd102009-07-13 14:04:26 +00001929void FixedSizeFreeList::Reset() {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001930 available_ = 0;
ricow@chromium.org30ce4112010-05-31 10:38:25 +00001931 head_ = tail_ = NULL;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001932}
1933
1934
kasperl@chromium.orgdefbd102009-07-13 14:04:26 +00001935void FixedSizeFreeList::Free(Address start) {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001936#ifdef DEBUG
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00001937 Isolate::Current()->memory_allocator()->ZapBlock(start, object_size_);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001938#endif
fschneider@chromium.org0c20e672010-01-14 15:28:53 +00001939 // We only use the freelists with mark-sweep.
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00001940 ASSERT(!HEAP->mark_compact_collector()->IsCompacting());
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001941 FreeListNode* node = FreeListNode::FromAddress(start);
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00001942 node->set_size(heap_, object_size_);
1943 node->set_next(heap_, NULL);
ricow@chromium.org30ce4112010-05-31 10:38:25 +00001944 if (head_ == NULL) {
1945 tail_ = head_ = node->address();
1946 } else {
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00001947 FreeListNode::FromAddress(tail_)->set_next(heap_, node->address());
ricow@chromium.org30ce4112010-05-31 10:38:25 +00001948 tail_ = node->address();
1949 }
kasperl@chromium.orgdefbd102009-07-13 14:04:26 +00001950 available_ += object_size_;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001951}
1952
1953
lrn@chromium.org303ada72010-10-27 09:33:13 +00001954MaybeObject* FixedSizeFreeList::Allocate() {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001955 if (head_ == NULL) {
whesse@chromium.org4a5224e2010-10-20 12:37:07 +00001956 return Failure::RetryAfterGC(owner_);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001957 }
1958
sgjesse@chromium.org755c5b12009-05-29 11:04:38 +00001959 ASSERT(!FLAG_always_compact); // We only use the freelists with mark-sweep.
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001960 FreeListNode* node = FreeListNode::FromAddress(head_);
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00001961 head_ = node->next(heap_);
kasperl@chromium.orgdefbd102009-07-13 14:04:26 +00001962 available_ -= object_size_;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001963 return node;
1964}
1965
1966
erik.corry@gmail.com4a6c3272010-11-18 12:04:40 +00001967void FixedSizeFreeList::MarkNodes() {
1968 Address cur_addr = head_;
1969 while (cur_addr != NULL && cur_addr != tail_) {
1970 FreeListNode* cur_node = FreeListNode::FromAddress(cur_addr);
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00001971 cur_addr = cur_node->next(heap_);
erik.corry@gmail.com4a6c3272010-11-18 12:04:40 +00001972 cur_node->SetMark();
1973 }
1974}
1975
1976
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001977// -----------------------------------------------------------------------------
1978// OldSpace implementation
1979
1980void OldSpace::PrepareForMarkCompact(bool will_compact) {
fschneider@chromium.org013f3e12010-04-26 13:27:52 +00001981 // Call prepare of the super class.
1982 PagedSpace::PrepareForMarkCompact(will_compact);
1983
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001984 if (will_compact) {
1985 // Reset relocation info. During a compacting collection, everything in
1986 // the space is considered 'available' and we will rediscover live data
1987 // and waste during the collection.
1988 MCResetRelocationInfo();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001989 ASSERT(Available() == Capacity());
1990 } else {
1991 // During a non-compacting collection, everything below the linear
1992 // allocation pointer is considered allocated (everything above is
1993 // available) and we will rediscover available and wasted bytes during
1994 // the collection.
1995 accounting_stats_.AllocateBytes(free_list_.available());
1996 accounting_stats_.FillWastedBytes(Waste());
1997 }
1998
kasper.lund7276f142008-07-30 08:49:36 +00001999 // Clear the free list before a full GC---it will be rebuilt afterward.
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002000 free_list_.Reset();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002001}
2002
2003
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002004void OldSpace::MCCommitRelocationInfo() {
2005 // Update fast allocation info.
2006 allocation_info_.top = mc_forwarding_info_.top;
2007 allocation_info_.limit = mc_forwarding_info_.limit;
kasper.lund7276f142008-07-30 08:49:36 +00002008 ASSERT(allocation_info_.VerifyPagedAllocation());
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002009
2010 // The space is compacted and we haven't yet built free lists or
2011 // wasted any space.
2012 ASSERT(Waste() == 0);
2013 ASSERT(AvailableFree() == 0);
2014
2015 // Build the free list for the space.
2016 int computed_size = 0;
2017 PageIterator it(this, PageIterator::PAGES_USED_BY_MC);
2018 while (it.has_next()) {
2019 Page* p = it.next();
2020 // Space below the relocation pointer is allocated.
ager@chromium.orgc4c92722009-11-18 14:12:51 +00002021 computed_size +=
ricow@chromium.org30ce4112010-05-31 10:38:25 +00002022 static_cast<int>(p->AllocationWatermark() - p->ObjectAreaStart());
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002023 if (it.has_next()) {
ricow@chromium.org30ce4112010-05-31 10:38:25 +00002024 // Free the space at the top of the page.
ager@chromium.orgc4c92722009-11-18 14:12:51 +00002025 int extra_size =
ricow@chromium.org30ce4112010-05-31 10:38:25 +00002026 static_cast<int>(p->ObjectAreaEnd() - p->AllocationWatermark());
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002027 if (extra_size > 0) {
ricow@chromium.org30ce4112010-05-31 10:38:25 +00002028 int wasted_bytes = free_list_.Free(p->AllocationWatermark(),
2029 extra_size);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002030 // The bytes we have just "freed" to add to the free list were
2031 // already accounted as available.
2032 accounting_stats_.WasteBytes(wasted_bytes);
2033 }
2034 }
2035 }
2036
2037 // Make sure the computed size - based on the used portion of the pages in
2038 // use - matches the size obtained while computing forwarding addresses.
2039 ASSERT(computed_size == Size());
2040}
2041
2042
fschneider@chromium.org0c20e672010-01-14 15:28:53 +00002043bool NewSpace::ReserveSpace(int bytes) {
2044 // We can't reliably unpack a partial snapshot that needs more new space
2045 // space than the minimum NewSpace size.
2046 ASSERT(bytes <= InitialCapacity());
2047 Address limit = allocation_info_.limit;
2048 Address top = allocation_info_.top;
2049 return limit - top >= bytes;
2050}
2051
2052
fschneider@chromium.org013f3e12010-04-26 13:27:52 +00002053void PagedSpace::FreePages(Page* prev, Page* last) {
2054 if (last == AllocationTopPage()) {
2055 // Pages are already at the end of used pages.
2056 return;
2057 }
2058
2059 Page* first = NULL;
2060
2061 // Remove pages from the list.
2062 if (prev == NULL) {
2063 first = first_page_;
2064 first_page_ = last->next_page();
2065 } else {
2066 first = prev->next_page();
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00002067 heap()->isolate()->memory_allocator()->SetNextPage(
2068 prev, last->next_page());
fschneider@chromium.org013f3e12010-04-26 13:27:52 +00002069 }
2070
2071 // Attach it after the last page.
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00002072 heap()->isolate()->memory_allocator()->SetNextPage(last_page_, first);
fschneider@chromium.org013f3e12010-04-26 13:27:52 +00002073 last_page_ = last;
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00002074 heap()->isolate()->memory_allocator()->SetNextPage(last, NULL);
fschneider@chromium.org013f3e12010-04-26 13:27:52 +00002075
2076 // Clean them up.
2077 do {
ricow@chromium.org30ce4112010-05-31 10:38:25 +00002078 first->InvalidateWatermark(true);
2079 first->SetAllocationWatermark(first->ObjectAreaStart());
2080 first->SetCachedAllocationWatermark(first->ObjectAreaStart());
2081 first->SetRegionMarks(Page::kAllRegionsCleanMarks);
fschneider@chromium.org013f3e12010-04-26 13:27:52 +00002082 first = first->next_page();
2083 } while (first != NULL);
2084
2085 // Order of pages in this space might no longer be consistent with
2086 // order of pages in chunks.
2087 page_list_is_chunk_ordered_ = false;
2088}
2089
2090
ricow@chromium.orgd236f4d2010-09-01 06:52:08 +00002091void PagedSpace::RelinkPageListInChunkOrder(bool deallocate_blocks) {
2092 const bool add_to_freelist = true;
2093
2094 // Mark used and unused pages to properly fill unused pages
2095 // after reordering.
2096 PageIterator all_pages_iterator(this, PageIterator::ALL_PAGES);
2097 Page* last_in_use = AllocationTopPage();
2098 bool in_use = true;
2099
2100 while (all_pages_iterator.has_next()) {
2101 Page* p = all_pages_iterator.next();
2102 p->SetWasInUseBeforeMC(in_use);
2103 if (p == last_in_use) {
2104 // We passed a page containing allocation top. All consequent
2105 // pages are not used.
2106 in_use = false;
2107 }
2108 }
2109
2110 if (page_list_is_chunk_ordered_) return;
2111
2112 Page* new_last_in_use = Page::FromAddress(NULL);
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00002113 heap()->isolate()->memory_allocator()->RelinkPageListInChunkOrder(
2114 this, &first_page_, &last_page_, &new_last_in_use);
ricow@chromium.orgd236f4d2010-09-01 06:52:08 +00002115 ASSERT(new_last_in_use->is_valid());
2116
2117 if (new_last_in_use != last_in_use) {
2118 // Current allocation top points to a page which is now in the middle
2119 // of page list. We should move allocation top forward to the new last
2120 // used page so various object iterators will continue to work properly.
2121 int size_in_bytes = static_cast<int>(PageAllocationLimit(last_in_use) -
2122 last_in_use->AllocationTop());
2123
2124 last_in_use->SetAllocationWatermark(last_in_use->AllocationTop());
2125 if (size_in_bytes > 0) {
2126 Address start = last_in_use->AllocationTop();
2127 if (deallocate_blocks) {
2128 accounting_stats_.AllocateBytes(size_in_bytes);
2129 DeallocateBlock(start, size_in_bytes, add_to_freelist);
2130 } else {
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00002131 heap()->CreateFillerObjectAt(start, size_in_bytes);
ricow@chromium.orgd236f4d2010-09-01 06:52:08 +00002132 }
2133 }
2134
2135 // New last in use page was in the middle of the list before
2136 // sorting so it full.
2137 SetTop(new_last_in_use->AllocationTop());
2138
2139 ASSERT(AllocationTopPage() == new_last_in_use);
2140 ASSERT(AllocationTopPage()->WasInUseBeforeMC());
2141 }
2142
2143 PageIterator pages_in_use_iterator(this, PageIterator::PAGES_IN_USE);
2144 while (pages_in_use_iterator.has_next()) {
2145 Page* p = pages_in_use_iterator.next();
2146 if (!p->WasInUseBeforeMC()) {
2147 // Empty page is in the middle of a sequence of used pages.
2148 // Allocate it as a whole and deallocate immediately.
2149 int size_in_bytes = static_cast<int>(PageAllocationLimit(p) -
2150 p->ObjectAreaStart());
2151
2152 p->SetAllocationWatermark(p->ObjectAreaStart());
2153 Address start = p->ObjectAreaStart();
2154 if (deallocate_blocks) {
2155 accounting_stats_.AllocateBytes(size_in_bytes);
2156 DeallocateBlock(start, size_in_bytes, add_to_freelist);
2157 } else {
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00002158 heap()->CreateFillerObjectAt(start, size_in_bytes);
ricow@chromium.orgd236f4d2010-09-01 06:52:08 +00002159 }
2160 }
2161 }
2162
2163 page_list_is_chunk_ordered_ = true;
2164}
2165
2166
fschneider@chromium.org013f3e12010-04-26 13:27:52 +00002167void PagedSpace::PrepareForMarkCompact(bool will_compact) {
2168 if (will_compact) {
ricow@chromium.orgd236f4d2010-09-01 06:52:08 +00002169 RelinkPageListInChunkOrder(false);
fschneider@chromium.org013f3e12010-04-26 13:27:52 +00002170 }
2171}
2172
2173
fschneider@chromium.org0c20e672010-01-14 15:28:53 +00002174bool PagedSpace::ReserveSpace(int bytes) {
2175 Address limit = allocation_info_.limit;
2176 Address top = allocation_info_.top;
2177 if (limit - top >= bytes) return true;
2178
2179 // There wasn't enough space in the current page. Lets put the rest
2180 // of the page on the free list and start a fresh page.
2181 PutRestOfCurrentPageOnFreeList(TopPageOf(allocation_info_));
2182
2183 Page* reserved_page = TopPageOf(allocation_info_);
2184 int bytes_left_to_reserve = bytes;
2185 while (bytes_left_to_reserve > 0) {
2186 if (!reserved_page->next_page()->is_valid()) {
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00002187 if (heap()->OldGenerationAllocationLimitReached()) return false;
fschneider@chromium.org0c20e672010-01-14 15:28:53 +00002188 Expand(reserved_page);
2189 }
2190 bytes_left_to_reserve -= Page::kPageSize;
2191 reserved_page = reserved_page->next_page();
2192 if (!reserved_page->is_valid()) return false;
2193 }
2194 ASSERT(TopPageOf(allocation_info_)->next_page()->is_valid());
ricow@chromium.org30ce4112010-05-31 10:38:25 +00002195 TopPageOf(allocation_info_)->next_page()->InvalidateWatermark(true);
fschneider@chromium.org0c20e672010-01-14 15:28:53 +00002196 SetAllocationInfo(&allocation_info_,
2197 TopPageOf(allocation_info_)->next_page());
2198 return true;
2199}
2200
2201
2202// You have to call this last, since the implementation from PagedSpace
2203// doesn't know that memory was 'promised' to large object space.
2204bool LargeObjectSpace::ReserveSpace(int bytes) {
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00002205 return heap()->OldGenerationSpaceAvailable() >= bytes;
fschneider@chromium.org0c20e672010-01-14 15:28:53 +00002206}
2207
2208
kasper.lund7276f142008-07-30 08:49:36 +00002209// Slow case for normal allocation. Try in order: (1) allocate in the next
2210// page in the space, (2) allocate off the space's free list, (3) expand the
2211// space, (4) fail.
2212HeapObject* OldSpace::SlowAllocateRaw(int size_in_bytes) {
2213 // Linear allocation in this space has failed. If there is another page
2214 // in the space, move to that page and allocate there. This allocation
2215 // should succeed (size_in_bytes should not be greater than a page's
2216 // object area size).
2217 Page* current_page = TopPageOf(allocation_info_);
2218 if (current_page->next_page()->is_valid()) {
2219 return AllocateInNextPage(current_page, size_in_bytes);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002220 }
kasper.lund7276f142008-07-30 08:49:36 +00002221
ager@chromium.org3811b432009-10-28 14:53:37 +00002222 // There is no next page in this space. Try free list allocation unless that
2223 // is currently forbidden.
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00002224 if (!heap()->linear_allocation()) {
ager@chromium.org3811b432009-10-28 14:53:37 +00002225 int wasted_bytes;
lrn@chromium.org303ada72010-10-27 09:33:13 +00002226 Object* result;
2227 MaybeObject* maybe = free_list_.Allocate(size_in_bytes, &wasted_bytes);
ager@chromium.org3811b432009-10-28 14:53:37 +00002228 accounting_stats_.WasteBytes(wasted_bytes);
lrn@chromium.org303ada72010-10-27 09:33:13 +00002229 if (maybe->ToObject(&result)) {
ager@chromium.org3811b432009-10-28 14:53:37 +00002230 accounting_stats_.AllocateBytes(size_in_bytes);
ricow@chromium.org30ce4112010-05-31 10:38:25 +00002231
2232 HeapObject* obj = HeapObject::cast(result);
2233 Page* p = Page::FromAddress(obj->address());
2234
2235 if (obj->address() >= p->AllocationWatermark()) {
2236 // There should be no hole between the allocation watermark
2237 // and allocated object address.
2238 // Memory above the allocation watermark was not swept and
2239 // might contain garbage pointers to new space.
2240 ASSERT(obj->address() == p->AllocationWatermark());
2241 p->SetAllocationWatermark(obj->address() + size_in_bytes);
2242 }
2243
2244 return obj;
ager@chromium.org3811b432009-10-28 14:53:37 +00002245 }
kasper.lund7276f142008-07-30 08:49:36 +00002246 }
2247
kasperl@chromium.org9bbf9682008-10-30 11:53:07 +00002248 // Free list allocation failed and there is no next page. Fail if we have
2249 // hit the old generation size limit that should cause a garbage
2250 // collection.
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00002251 if (!heap()->always_allocate() &&
2252 heap()->OldGenerationAllocationLimitReached()) {
kasperl@chromium.org9bbf9682008-10-30 11:53:07 +00002253 return NULL;
2254 }
2255
2256 // Try to expand the space and allocate in the new next page.
kasper.lund7276f142008-07-30 08:49:36 +00002257 ASSERT(!current_page->next_page()->is_valid());
2258 if (Expand(current_page)) {
2259 return AllocateInNextPage(current_page, size_in_bytes);
2260 }
2261
2262 // Finally, fail.
2263 return NULL;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002264}
2265
2266
fschneider@chromium.org0c20e672010-01-14 15:28:53 +00002267void OldSpace::PutRestOfCurrentPageOnFreeList(Page* current_page) {
ricow@chromium.org30ce4112010-05-31 10:38:25 +00002268 current_page->SetAllocationWatermark(allocation_info_.top);
ager@chromium.orgc4c92722009-11-18 14:12:51 +00002269 int free_size =
2270 static_cast<int>(current_page->ObjectAreaEnd() - allocation_info_.top);
kasper.lund7276f142008-07-30 08:49:36 +00002271 if (free_size > 0) {
2272 int wasted_bytes = free_list_.Free(allocation_info_.top, free_size);
2273 accounting_stats_.WasteBytes(wasted_bytes);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002274 }
fschneider@chromium.org0c20e672010-01-14 15:28:53 +00002275}
2276
2277
2278void FixedSpace::PutRestOfCurrentPageOnFreeList(Page* current_page) {
ricow@chromium.org30ce4112010-05-31 10:38:25 +00002279 current_page->SetAllocationWatermark(allocation_info_.top);
fschneider@chromium.org0c20e672010-01-14 15:28:53 +00002280 int free_size =
2281 static_cast<int>(current_page->ObjectAreaEnd() - allocation_info_.top);
2282 // In the fixed space free list all the free list items have the right size.
2283 // We use up the rest of the page while preserving this invariant.
2284 while (free_size >= object_size_in_bytes_) {
2285 free_list_.Free(allocation_info_.top);
2286 allocation_info_.top += object_size_in_bytes_;
2287 free_size -= object_size_in_bytes_;
2288 accounting_stats_.WasteBytes(object_size_in_bytes_);
2289 }
2290}
2291
2292
2293// Add the block at the top of the page to the space's free list, set the
2294// allocation info to the next page (assumed to be one), and allocate
2295// linearly there.
2296HeapObject* OldSpace::AllocateInNextPage(Page* current_page,
2297 int size_in_bytes) {
2298 ASSERT(current_page->next_page()->is_valid());
ricow@chromium.org30ce4112010-05-31 10:38:25 +00002299 Page* next_page = current_page->next_page();
2300 next_page->ClearGCFields();
fschneider@chromium.org0c20e672010-01-14 15:28:53 +00002301 PutRestOfCurrentPageOnFreeList(current_page);
ricow@chromium.org30ce4112010-05-31 10:38:25 +00002302 SetAllocationInfo(&allocation_info_, next_page);
kasper.lund7276f142008-07-30 08:49:36 +00002303 return AllocateLinearly(&allocation_info_, size_in_bytes);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002304}
2305
2306
ricow@chromium.orgd236f4d2010-09-01 06:52:08 +00002307void OldSpace::DeallocateBlock(Address start,
2308 int size_in_bytes,
2309 bool add_to_freelist) {
2310 Free(start, size_in_bytes, add_to_freelist);
2311}
2312
2313
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002314#ifdef DEBUG
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002315void PagedSpace::ReportCodeStatistics() {
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00002316 Isolate* isolate = Isolate::Current();
2317 CommentStatistic* comments_statistics =
2318 isolate->paged_space_comments_statistics();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002319 ReportCodeKindStatistics();
2320 PrintF("Code comment statistics (\" [ comment-txt : size/ "
2321 "count (average)\"):\n");
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00002322 for (int i = 0; i <= CommentStatistic::kMaxComments; i++) {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002323 const CommentStatistic& cs = comments_statistics[i];
2324 if (cs.size > 0) {
2325 PrintF(" %-30s: %10d/%6d (%d)\n", cs.comment, cs.size, cs.count,
2326 cs.size/cs.count);
2327 }
2328 }
2329 PrintF("\n");
2330}
2331
2332
2333void PagedSpace::ResetCodeStatistics() {
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00002334 Isolate* isolate = Isolate::Current();
2335 CommentStatistic* comments_statistics =
2336 isolate->paged_space_comments_statistics();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002337 ClearCodeKindStatistics();
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00002338 for (int i = 0; i < CommentStatistic::kMaxComments; i++) {
2339 comments_statistics[i].Clear();
2340 }
2341 comments_statistics[CommentStatistic::kMaxComments].comment = "Unknown";
2342 comments_statistics[CommentStatistic::kMaxComments].size = 0;
2343 comments_statistics[CommentStatistic::kMaxComments].count = 0;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002344}
2345
2346
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00002347// Adds comment to 'comment_statistics' table. Performance OK as long as
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002348// 'kMaxComments' is small
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00002349static void EnterComment(Isolate* isolate, const char* comment, int delta) {
2350 CommentStatistic* comments_statistics =
2351 isolate->paged_space_comments_statistics();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002352 // Do not count empty comments
2353 if (delta <= 0) return;
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00002354 CommentStatistic* cs = &comments_statistics[CommentStatistic::kMaxComments];
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002355 // Search for a free or matching entry in 'comments_statistics': 'cs'
2356 // points to result.
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00002357 for (int i = 0; i < CommentStatistic::kMaxComments; i++) {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002358 if (comments_statistics[i].comment == NULL) {
2359 cs = &comments_statistics[i];
2360 cs->comment = comment;
2361 break;
2362 } else if (strcmp(comments_statistics[i].comment, comment) == 0) {
2363 cs = &comments_statistics[i];
2364 break;
2365 }
2366 }
2367 // Update entry for 'comment'
2368 cs->size += delta;
2369 cs->count += 1;
2370}
2371
2372
2373// Call for each nested comment start (start marked with '[ xxx', end marked
2374// with ']'. RelocIterator 'it' must point to a comment reloc info.
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00002375static void CollectCommentStatistics(Isolate* isolate, RelocIterator* it) {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002376 ASSERT(!it->done());
ager@chromium.org236ad962008-09-25 09:45:57 +00002377 ASSERT(it->rinfo()->rmode() == RelocInfo::COMMENT);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002378 const char* tmp = reinterpret_cast<const char*>(it->rinfo()->data());
2379 if (tmp[0] != '[') {
2380 // Not a nested comment; skip
2381 return;
2382 }
2383
2384 // Search for end of nested comment or a new nested comment
2385 const char* const comment_txt =
2386 reinterpret_cast<const char*>(it->rinfo()->data());
2387 const byte* prev_pc = it->rinfo()->pc();
2388 int flat_delta = 0;
2389 it->next();
2390 while (true) {
2391 // All nested comments must be terminated properly, and therefore exit
2392 // from loop.
2393 ASSERT(!it->done());
ager@chromium.org236ad962008-09-25 09:45:57 +00002394 if (it->rinfo()->rmode() == RelocInfo::COMMENT) {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002395 const char* const txt =
2396 reinterpret_cast<const char*>(it->rinfo()->data());
ager@chromium.orgc4c92722009-11-18 14:12:51 +00002397 flat_delta += static_cast<int>(it->rinfo()->pc() - prev_pc);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002398 if (txt[0] == ']') break; // End of nested comment
2399 // A new comment
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00002400 CollectCommentStatistics(isolate, it);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002401 // Skip code that was covered with previous comment
2402 prev_pc = it->rinfo()->pc();
2403 }
2404 it->next();
2405 }
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00002406 EnterComment(isolate, comment_txt, flat_delta);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002407}
2408
2409
2410// Collects code size statistics:
2411// - by code kind
2412// - by code comment
2413void PagedSpace::CollectCodeStatistics() {
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00002414 Isolate* isolate = heap()->isolate();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002415 HeapObjectIterator obj_it(this);
sgjesse@chromium.orgb302e562010-02-03 11:26:59 +00002416 for (HeapObject* obj = obj_it.next(); obj != NULL; obj = obj_it.next()) {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002417 if (obj->IsCode()) {
2418 Code* code = Code::cast(obj);
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00002419 isolate->code_kind_statistics()[code->kind()] += code->Size();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002420 RelocIterator it(code);
2421 int delta = 0;
2422 const byte* prev_pc = code->instruction_start();
2423 while (!it.done()) {
ager@chromium.org236ad962008-09-25 09:45:57 +00002424 if (it.rinfo()->rmode() == RelocInfo::COMMENT) {
ager@chromium.orgc4c92722009-11-18 14:12:51 +00002425 delta += static_cast<int>(it.rinfo()->pc() - prev_pc);
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00002426 CollectCommentStatistics(isolate, &it);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002427 prev_pc = it.rinfo()->pc();
2428 }
2429 it.next();
2430 }
2431
2432 ASSERT(code->instruction_start() <= prev_pc &&
erik.corry@gmail.com4a2e25e2010-07-07 12:22:46 +00002433 prev_pc <= code->instruction_end());
2434 delta += static_cast<int>(code->instruction_end() - prev_pc);
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00002435 EnterComment(isolate, "NoComment", delta);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002436 }
2437 }
2438}
2439
2440
2441void OldSpace::ReportStatistics() {
kmillikin@chromium.orgf05f2912010-09-30 10:07:24 +00002442 int pct = static_cast<int>(Available() * 100 / Capacity());
2443 PrintF(" capacity: %" V8_PTR_PREFIX "d"
2444 ", waste: %" V8_PTR_PREFIX "d"
2445 ", available: %" V8_PTR_PREFIX "d, %%%d\n",
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002446 Capacity(), Waste(), Available(), pct);
2447
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002448 ClearHistograms();
2449 HeapObjectIterator obj_it(this);
sgjesse@chromium.orgb302e562010-02-03 11:26:59 +00002450 for (HeapObject* obj = obj_it.next(); obj != NULL; obj = obj_it.next())
2451 CollectHistogramInfo(obj);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002452 ReportHistogram(true);
2453}
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002454#endif
2455
2456// -----------------------------------------------------------------------------
kasperl@chromium.orgdefbd102009-07-13 14:04:26 +00002457// FixedSpace implementation
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002458
kasperl@chromium.orgdefbd102009-07-13 14:04:26 +00002459void FixedSpace::PrepareForMarkCompact(bool will_compact) {
fschneider@chromium.org013f3e12010-04-26 13:27:52 +00002460 // Call prepare of the super class.
2461 PagedSpace::PrepareForMarkCompact(will_compact);
2462
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002463 if (will_compact) {
2464 // Reset relocation info.
2465 MCResetRelocationInfo();
2466
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002467 // During a compacting collection, everything in the space is considered
2468 // 'available' (set by the call to MCResetRelocationInfo) and we will
2469 // rediscover live and wasted bytes during the collection.
2470 ASSERT(Available() == Capacity());
2471 } else {
2472 // During a non-compacting collection, everything below the linear
2473 // allocation pointer except wasted top-of-page blocks is considered
2474 // allocated and we will rediscover available bytes during the
2475 // collection.
2476 accounting_stats_.AllocateBytes(free_list_.available());
2477 }
2478
kasper.lund7276f142008-07-30 08:49:36 +00002479 // Clear the free list before a full GC---it will be rebuilt afterward.
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002480 free_list_.Reset();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002481}
2482
2483
kasperl@chromium.orgdefbd102009-07-13 14:04:26 +00002484void FixedSpace::MCCommitRelocationInfo() {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002485 // Update fast allocation info.
2486 allocation_info_.top = mc_forwarding_info_.top;
2487 allocation_info_.limit = mc_forwarding_info_.limit;
kasper.lund7276f142008-07-30 08:49:36 +00002488 ASSERT(allocation_info_.VerifyPagedAllocation());
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002489
2490 // The space is compacted and we haven't yet wasted any space.
2491 ASSERT(Waste() == 0);
2492
2493 // Update allocation_top of each page in use and compute waste.
2494 int computed_size = 0;
2495 PageIterator it(this, PageIterator::PAGES_USED_BY_MC);
2496 while (it.has_next()) {
2497 Page* page = it.next();
2498 Address page_top = page->AllocationTop();
ager@chromium.orgc4c92722009-11-18 14:12:51 +00002499 computed_size += static_cast<int>(page_top - page->ObjectAreaStart());
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002500 if (it.has_next()) {
ager@chromium.orgc4c92722009-11-18 14:12:51 +00002501 accounting_stats_.WasteBytes(
2502 static_cast<int>(page->ObjectAreaEnd() - page_top));
ricow@chromium.org30ce4112010-05-31 10:38:25 +00002503 page->SetAllocationWatermark(page_top);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002504 }
2505 }
2506
2507 // Make sure the computed size - based on the used portion of the
2508 // pages in use - matches the size we adjust during allocation.
2509 ASSERT(computed_size == Size());
2510}
2511
2512
kasper.lund7276f142008-07-30 08:49:36 +00002513// Slow case for normal allocation. Try in order: (1) allocate in the next
2514// page in the space, (2) allocate off the space's free list, (3) expand the
2515// space, (4) fail.
kasperl@chromium.orgdefbd102009-07-13 14:04:26 +00002516HeapObject* FixedSpace::SlowAllocateRaw(int size_in_bytes) {
2517 ASSERT_EQ(object_size_in_bytes_, size_in_bytes);
kasper.lund7276f142008-07-30 08:49:36 +00002518 // Linear allocation in this space has failed. If there is another page
2519 // in the space, move to that page and allocate there. This allocation
2520 // should succeed.
2521 Page* current_page = TopPageOf(allocation_info_);
2522 if (current_page->next_page()->is_valid()) {
2523 return AllocateInNextPage(current_page, size_in_bytes);
2524 }
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002525
ager@chromium.org3811b432009-10-28 14:53:37 +00002526 // There is no next page in this space. Try free list allocation unless
2527 // that is currently forbidden. The fixed space free list implicitly assumes
2528 // that all free blocks are of the fixed size.
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00002529 if (!heap()->linear_allocation()) {
lrn@chromium.org303ada72010-10-27 09:33:13 +00002530 Object* result;
2531 MaybeObject* maybe = free_list_.Allocate();
2532 if (maybe->ToObject(&result)) {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002533 accounting_stats_.AllocateBytes(size_in_bytes);
ricow@chromium.org30ce4112010-05-31 10:38:25 +00002534 HeapObject* obj = HeapObject::cast(result);
2535 Page* p = Page::FromAddress(obj->address());
2536
2537 if (obj->address() >= p->AllocationWatermark()) {
2538 // There should be no hole between the allocation watermark
2539 // and allocated object address.
2540 // Memory above the allocation watermark was not swept and
2541 // might contain garbage pointers to new space.
2542 ASSERT(obj->address() == p->AllocationWatermark());
2543 p->SetAllocationWatermark(obj->address() + size_in_bytes);
2544 }
2545
2546 return obj;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002547 }
2548 }
kasper.lund7276f142008-07-30 08:49:36 +00002549
kasperl@chromium.org9bbf9682008-10-30 11:53:07 +00002550 // Free list allocation failed and there is no next page. Fail if we have
2551 // hit the old generation size limit that should cause a garbage
2552 // collection.
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00002553 if (!heap()->always_allocate() &&
2554 heap()->OldGenerationAllocationLimitReached()) {
kasperl@chromium.org9bbf9682008-10-30 11:53:07 +00002555 return NULL;
2556 }
2557
2558 // Try to expand the space and allocate in the new next page.
kasper.lund7276f142008-07-30 08:49:36 +00002559 ASSERT(!current_page->next_page()->is_valid());
2560 if (Expand(current_page)) {
2561 return AllocateInNextPage(current_page, size_in_bytes);
2562 }
2563
2564 // Finally, fail.
2565 return NULL;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002566}
2567
2568
kasper.lund7276f142008-07-30 08:49:36 +00002569// Move to the next page (there is assumed to be one) and allocate there.
2570// The top of page block is always wasted, because it is too small to hold a
2571// map.
kasperl@chromium.orgdefbd102009-07-13 14:04:26 +00002572HeapObject* FixedSpace::AllocateInNextPage(Page* current_page,
2573 int size_in_bytes) {
kasper.lund7276f142008-07-30 08:49:36 +00002574 ASSERT(current_page->next_page()->is_valid());
fschneider@chromium.org013f3e12010-04-26 13:27:52 +00002575 ASSERT(allocation_info_.top == PageAllocationLimit(current_page));
kasperl@chromium.orgdefbd102009-07-13 14:04:26 +00002576 ASSERT_EQ(object_size_in_bytes_, size_in_bytes);
ricow@chromium.org30ce4112010-05-31 10:38:25 +00002577 Page* next_page = current_page->next_page();
2578 next_page->ClearGCFields();
2579 current_page->SetAllocationWatermark(allocation_info_.top);
kasperl@chromium.orgdefbd102009-07-13 14:04:26 +00002580 accounting_stats_.WasteBytes(page_extra_);
ricow@chromium.org30ce4112010-05-31 10:38:25 +00002581 SetAllocationInfo(&allocation_info_, next_page);
kasper.lund7276f142008-07-30 08:49:36 +00002582 return AllocateLinearly(&allocation_info_, size_in_bytes);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002583}
2584
2585
ricow@chromium.orgd236f4d2010-09-01 06:52:08 +00002586void FixedSpace::DeallocateBlock(Address start,
2587 int size_in_bytes,
2588 bool add_to_freelist) {
2589 // Free-list elements in fixed space are assumed to have a fixed size.
2590 // We break the free block into chunks and add them to the free list
2591 // individually.
2592 int size = object_size_in_bytes();
2593 ASSERT(size_in_bytes % size == 0);
2594 Address end = start + size_in_bytes;
2595 for (Address a = start; a < end; a += size) {
2596 Free(a, add_to_freelist);
2597 }
2598}
2599
2600
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002601#ifdef DEBUG
kasperl@chromium.orgdefbd102009-07-13 14:04:26 +00002602void FixedSpace::ReportStatistics() {
kmillikin@chromium.orgf05f2912010-09-30 10:07:24 +00002603 int pct = static_cast<int>(Available() * 100 / Capacity());
2604 PrintF(" capacity: %" V8_PTR_PREFIX "d"
2605 ", waste: %" V8_PTR_PREFIX "d"
2606 ", available: %" V8_PTR_PREFIX "d, %%%d\n",
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002607 Capacity(), Waste(), Available(), pct);
2608
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002609 ClearHistograms();
2610 HeapObjectIterator obj_it(this);
sgjesse@chromium.orgb302e562010-02-03 11:26:59 +00002611 for (HeapObject* obj = obj_it.next(); obj != NULL; obj = obj_it.next())
2612 CollectHistogramInfo(obj);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002613 ReportHistogram(false);
2614}
kasperl@chromium.orgdefbd102009-07-13 14:04:26 +00002615#endif
2616
2617
2618// -----------------------------------------------------------------------------
2619// MapSpace implementation
2620
2621void MapSpace::PrepareForMarkCompact(bool will_compact) {
2622 // Call prepare of the super class.
2623 FixedSpace::PrepareForMarkCompact(will_compact);
2624
2625 if (will_compact) {
2626 // Initialize map index entry.
2627 int page_count = 0;
2628 PageIterator it(this, PageIterator::ALL_PAGES);
2629 while (it.has_next()) {
2630 ASSERT_MAP_PAGE_INDEX(page_count);
2631
2632 Page* p = it.next();
2633 ASSERT(p->mc_page_index == page_count);
2634
2635 page_addresses_[page_count++] = p->address();
2636 }
2637 }
2638}
2639
2640
2641#ifdef DEBUG
2642void MapSpace::VerifyObject(HeapObject* object) {
2643 // The object should be a map or a free-list node.
2644 ASSERT(object->IsMap() || object->IsByteArray());
2645}
2646#endif
2647
2648
2649// -----------------------------------------------------------------------------
2650// GlobalPropertyCellSpace implementation
2651
2652#ifdef DEBUG
2653void CellSpace::VerifyObject(HeapObject* object) {
2654 // The object should be a global object property cell or a free-list node.
2655 ASSERT(object->IsJSGlobalPropertyCell() ||
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00002656 object->map() == heap()->two_pointer_filler_map());
kasperl@chromium.orgdefbd102009-07-13 14:04:26 +00002657}
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002658#endif
2659
2660
2661// -----------------------------------------------------------------------------
2662// LargeObjectIterator
2663
2664LargeObjectIterator::LargeObjectIterator(LargeObjectSpace* space) {
2665 current_ = space->first_chunk_;
2666 size_func_ = NULL;
2667}
2668
2669
2670LargeObjectIterator::LargeObjectIterator(LargeObjectSpace* space,
2671 HeapObjectCallback size_func) {
2672 current_ = space->first_chunk_;
2673 size_func_ = size_func;
2674}
2675
2676
2677HeapObject* LargeObjectIterator::next() {
sgjesse@chromium.orgb302e562010-02-03 11:26:59 +00002678 if (current_ == NULL) return NULL;
2679
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002680 HeapObject* object = current_->GetObject();
2681 current_ = current_->next();
2682 return object;
2683}
2684
2685
2686// -----------------------------------------------------------------------------
2687// LargeObjectChunk
2688
2689LargeObjectChunk* LargeObjectChunk::New(int size_in_bytes,
ager@chromium.org9258b6b2008-09-11 09:11:10 +00002690 Executability executable) {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002691 size_t requested = ChunkSizeFor(size_in_bytes);
kasperl@chromium.orga5551262010-12-07 12:49:48 +00002692 size_t size;
rossberg@chromium.org717967f2011-07-20 13:44:42 +00002693 size_t guard_size = (executable == EXECUTABLE) ? Page::kPageSize : 0;
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00002694 Isolate* isolate = Isolate::Current();
2695 void* mem = isolate->memory_allocator()->AllocateRawMemory(
rossberg@chromium.org717967f2011-07-20 13:44:42 +00002696 requested + guard_size, &size, executable);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002697 if (mem == NULL) return NULL;
kasperl@chromium.orga5551262010-12-07 12:49:48 +00002698
2699 // The start of the chunk may be overlayed with a page so we have to
2700 // make sure that the page flags fit in the size field.
2701 ASSERT((size & Page::kPageFlagMask) == 0);
2702
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00002703 LOG(isolate, NewEvent("LargeObjectChunk", mem, size));
rossberg@chromium.org717967f2011-07-20 13:44:42 +00002704 if (size < requested + guard_size) {
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00002705 isolate->memory_allocator()->FreeRawMemory(
2706 mem, size, executable);
2707 LOG(isolate, DeleteEvent("LargeObjectChunk", mem));
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002708 return NULL;
2709 }
kasperl@chromium.orga5551262010-12-07 12:49:48 +00002710
rossberg@chromium.org717967f2011-07-20 13:44:42 +00002711 if (guard_size != 0) {
2712 OS::Guard(mem, guard_size);
2713 size -= guard_size;
2714 mem = static_cast<Address>(mem) + guard_size;
2715 }
2716
kasperl@chromium.orga5551262010-12-07 12:49:48 +00002717 ObjectSpace space = (executable == EXECUTABLE)
2718 ? kObjectSpaceCodeSpace
2719 : kObjectSpaceLoSpace;
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00002720 isolate->memory_allocator()->PerformAllocationCallback(
kasperl@chromium.orga5551262010-12-07 12:49:48 +00002721 space, kAllocationActionAllocate, size);
2722
2723 LargeObjectChunk* chunk = reinterpret_cast<LargeObjectChunk*>(mem);
2724 chunk->size_ = size;
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00002725 Page* page = Page::FromAddress(RoundUp(chunk->address(), Page::kPageSize));
fschneider@chromium.org7979bbb2011-03-28 10:47:03 +00002726 page->heap_ = isolate->heap();
kasperl@chromium.orga5551262010-12-07 12:49:48 +00002727 return chunk;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002728}
2729
2730
2731int LargeObjectChunk::ChunkSizeFor(int size_in_bytes) {
ager@chromium.orgc4c92722009-11-18 14:12:51 +00002732 int os_alignment = static_cast<int>(OS::AllocateAlignment());
kasperl@chromium.orga5551262010-12-07 12:49:48 +00002733 if (os_alignment < Page::kPageSize) {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002734 size_in_bytes += (Page::kPageSize - os_alignment);
kasperl@chromium.orga5551262010-12-07 12:49:48 +00002735 }
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002736 return size_in_bytes + Page::kObjectStartOffset;
2737}
2738
2739// -----------------------------------------------------------------------------
2740// LargeObjectSpace
2741
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00002742LargeObjectSpace::LargeObjectSpace(Heap* heap, AllocationSpace id)
2743 : Space(heap, id, NOT_EXECUTABLE), // Managed on a per-allocation basis
kasper.lund7276f142008-07-30 08:49:36 +00002744 first_chunk_(NULL),
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002745 size_(0),
erik.corry@gmail.com4a6c3272010-11-18 12:04:40 +00002746 page_count_(0),
2747 objects_size_(0) {}
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002748
2749
2750bool LargeObjectSpace::Setup() {
2751 first_chunk_ = NULL;
2752 size_ = 0;
2753 page_count_ = 0;
erik.corry@gmail.com4a6c3272010-11-18 12:04:40 +00002754 objects_size_ = 0;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002755 return true;
2756}
2757
2758
2759void LargeObjectSpace::TearDown() {
2760 while (first_chunk_ != NULL) {
2761 LargeObjectChunk* chunk = first_chunk_;
2762 first_chunk_ = first_chunk_->next();
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00002763 LOG(heap()->isolate(), DeleteEvent("LargeObjectChunk", chunk->address()));
erik.corry@gmail.com145eff52010-08-23 11:36:18 +00002764 Page* page = Page::FromAddress(RoundUp(chunk->address(), Page::kPageSize));
2765 Executability executable =
2766 page->IsPageExecutable() ? EXECUTABLE : NOT_EXECUTABLE;
kmillikin@chromium.org3cdd9e12010-09-06 11:39:48 +00002767 ObjectSpace space = kObjectSpaceLoSpace;
2768 if (executable == EXECUTABLE) space = kObjectSpaceCodeSpace;
2769 size_t size = chunk->size();
rossberg@chromium.org717967f2011-07-20 13:44:42 +00002770 size_t guard_size = (executable == EXECUTABLE) ? Page::kPageSize : 0;
2771 heap()->isolate()->memory_allocator()->FreeRawMemory(
2772 chunk->address() - guard_size,
2773 size + guard_size,
2774 executable);
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00002775 heap()->isolate()->memory_allocator()->PerformAllocationCallback(
kmillikin@chromium.org3cdd9e12010-09-06 11:39:48 +00002776 space, kAllocationActionFree, size);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002777 }
2778
2779 size_ = 0;
2780 page_count_ = 0;
erik.corry@gmail.com4a6c3272010-11-18 12:04:40 +00002781 objects_size_ = 0;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002782}
2783
2784
lrn@chromium.org303ada72010-10-27 09:33:13 +00002785MaybeObject* LargeObjectSpace::AllocateRawInternal(int requested_size,
2786 int object_size,
2787 Executability executable) {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002788 ASSERT(0 < object_size && object_size <= requested_size);
kasperl@chromium.org9bbf9682008-10-30 11:53:07 +00002789
2790 // Check if we want to force a GC before growing the old space further.
2791 // If so, fail the allocation.
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00002792 if (!heap()->always_allocate() &&
2793 heap()->OldGenerationAllocationLimitReached()) {
whesse@chromium.org4a5224e2010-10-20 12:37:07 +00002794 return Failure::RetryAfterGC(identity());
kasperl@chromium.org9bbf9682008-10-30 11:53:07 +00002795 }
2796
kasperl@chromium.orga5551262010-12-07 12:49:48 +00002797 LargeObjectChunk* chunk = LargeObjectChunk::New(requested_size, executable);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002798 if (chunk == NULL) {
whesse@chromium.org4a5224e2010-10-20 12:37:07 +00002799 return Failure::RetryAfterGC(identity());
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002800 }
2801
kasperl@chromium.orga5551262010-12-07 12:49:48 +00002802 size_ += static_cast<int>(chunk->size());
erik.corry@gmail.com4a6c3272010-11-18 12:04:40 +00002803 objects_size_ += requested_size;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002804 page_count_++;
2805 chunk->set_next(first_chunk_);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002806 first_chunk_ = chunk;
2807
ricow@chromium.org30ce4112010-05-31 10:38:25 +00002808 // Initialize page header.
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002809 Page* page = Page::FromAddress(RoundUp(chunk->address(), Page::kPageSize));
2810 Address object_address = page->ObjectAreaStart();
kasperl@chromium.orga5551262010-12-07 12:49:48 +00002811
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002812 // Clear the low order bit of the second word in the page to flag it as a
2813 // large object page. If the chunk_size happened to be written there, its
2814 // low order bit should already be clear.
fschneider@chromium.org013f3e12010-04-26 13:27:52 +00002815 page->SetIsLargeObjectPage(true);
erik.corry@gmail.com145eff52010-08-23 11:36:18 +00002816 page->SetIsPageExecutable(executable);
ricow@chromium.org30ce4112010-05-31 10:38:25 +00002817 page->SetRegionMarks(Page::kAllRegionsCleanMarks);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002818 return HeapObject::FromAddress(object_address);
2819}
2820
2821
lrn@chromium.org303ada72010-10-27 09:33:13 +00002822MaybeObject* LargeObjectSpace::AllocateRawCode(int size_in_bytes) {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002823 ASSERT(0 < size_in_bytes);
ager@chromium.org9258b6b2008-09-11 09:11:10 +00002824 return AllocateRawInternal(size_in_bytes,
2825 size_in_bytes,
2826 EXECUTABLE);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002827}
2828
2829
lrn@chromium.org303ada72010-10-27 09:33:13 +00002830MaybeObject* LargeObjectSpace::AllocateRawFixedArray(int size_in_bytes) {
ager@chromium.org9258b6b2008-09-11 09:11:10 +00002831 ASSERT(0 < size_in_bytes);
ricow@chromium.org30ce4112010-05-31 10:38:25 +00002832 return AllocateRawInternal(size_in_bytes,
ager@chromium.org9258b6b2008-09-11 09:11:10 +00002833 size_in_bytes,
2834 NOT_EXECUTABLE);
2835}
2836
2837
lrn@chromium.org303ada72010-10-27 09:33:13 +00002838MaybeObject* LargeObjectSpace::AllocateRaw(int size_in_bytes) {
ager@chromium.org9258b6b2008-09-11 09:11:10 +00002839 ASSERT(0 < size_in_bytes);
2840 return AllocateRawInternal(size_in_bytes,
2841 size_in_bytes,
2842 NOT_EXECUTABLE);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002843}
2844
2845
2846// GC support
lrn@chromium.org303ada72010-10-27 09:33:13 +00002847MaybeObject* LargeObjectSpace::FindObject(Address a) {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002848 for (LargeObjectChunk* chunk = first_chunk_;
2849 chunk != NULL;
2850 chunk = chunk->next()) {
2851 Address chunk_address = chunk->address();
2852 if (chunk_address <= a && a < chunk_address + chunk->size()) {
2853 return chunk->GetObject();
2854 }
2855 }
2856 return Failure::Exception();
2857}
2858
ricow@chromium.orgd236f4d2010-09-01 06:52:08 +00002859
2860LargeObjectChunk* LargeObjectSpace::FindChunkContainingPc(Address pc) {
2861 // TODO(853): Change this implementation to only find executable
2862 // chunks and use some kind of hash-based approach to speed it up.
2863 for (LargeObjectChunk* chunk = first_chunk_;
2864 chunk != NULL;
2865 chunk = chunk->next()) {
2866 Address chunk_address = chunk->address();
2867 if (chunk_address <= pc && pc < chunk_address + chunk->size()) {
2868 return chunk;
2869 }
2870 }
2871 return NULL;
2872}
2873
2874
ricow@chromium.org30ce4112010-05-31 10:38:25 +00002875void LargeObjectSpace::IterateDirtyRegions(ObjectSlotCallback copy_object) {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002876 LargeObjectIterator it(this);
sgjesse@chromium.orgb302e562010-02-03 11:26:59 +00002877 for (HeapObject* object = it.next(); object != NULL; object = it.next()) {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002878 // We only have code, sequential strings, or fixed arrays in large
2879 // object space, and only fixed arrays can possibly contain pointers to
2880 // the young generation.
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002881 if (object->IsFixedArray()) {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002882 Page* page = Page::FromAddress(object->address());
ricow@chromium.org30ce4112010-05-31 10:38:25 +00002883 uint32_t marks = page->GetRegionMarks();
2884 uint32_t newmarks = Page::kAllRegionsCleanMarks;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002885
ricow@chromium.org30ce4112010-05-31 10:38:25 +00002886 if (marks != Page::kAllRegionsCleanMarks) {
2887 // For a large page a single dirty mark corresponds to several
2888 // regions (modulo 32). So we treat a large page as a sequence of
2889 // normal pages of size Page::kPageSize having same dirty marks
2890 // and subsequently iterate dirty regions on each of these pages.
2891 Address start = object->address();
2892 Address end = page->ObjectAreaEnd();
2893 Address object_end = start + object->Size();
2894
2895 // Iterate regions of the first normal page covering object.
2896 uint32_t first_region_number = page->GetRegionNumberForAddress(start);
2897 newmarks |=
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00002898 heap()->IterateDirtyRegions(marks >> first_region_number,
2899 start,
2900 end,
2901 &Heap::IteratePointersInDirtyRegion,
2902 copy_object) << first_region_number;
ricow@chromium.org30ce4112010-05-31 10:38:25 +00002903
2904 start = end;
2905 end = start + Page::kPageSize;
2906 while (end <= object_end) {
2907 // Iterate next 32 regions.
2908 newmarks |=
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00002909 heap()->IterateDirtyRegions(marks,
2910 start,
2911 end,
2912 &Heap::IteratePointersInDirtyRegion,
2913 copy_object);
ricow@chromium.org30ce4112010-05-31 10:38:25 +00002914 start = end;
2915 end = start + Page::kPageSize;
2916 }
2917
2918 if (start != object_end) {
2919 // Iterate the last piece of an object which is less than
2920 // Page::kPageSize.
2921 newmarks |=
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00002922 heap()->IterateDirtyRegions(marks,
2923 start,
2924 object_end,
2925 &Heap::IteratePointersInDirtyRegion,
2926 copy_object);
ricow@chromium.org30ce4112010-05-31 10:38:25 +00002927 }
2928
2929 page->SetRegionMarks(newmarks);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002930 }
2931 }
2932 }
2933}
2934
2935
2936void LargeObjectSpace::FreeUnmarkedObjects() {
2937 LargeObjectChunk* previous = NULL;
2938 LargeObjectChunk* current = first_chunk_;
2939 while (current != NULL) {
2940 HeapObject* object = current->GetObject();
kasper.lund7276f142008-07-30 08:49:36 +00002941 if (object->IsMarked()) {
2942 object->ClearMark();
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00002943 heap()->mark_compact_collector()->tracer()->decrement_marked_count();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002944 previous = current;
2945 current = current->next();
2946 } else {
erik.corry@gmail.com145eff52010-08-23 11:36:18 +00002947 Page* page = Page::FromAddress(RoundUp(current->address(),
2948 Page::kPageSize));
2949 Executability executable =
2950 page->IsPageExecutable() ? EXECUTABLE : NOT_EXECUTABLE;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002951 Address chunk_address = current->address();
2952 size_t chunk_size = current->size();
2953
2954 // Cut the chunk out from the chunk list.
2955 current = current->next();
2956 if (previous == NULL) {
2957 first_chunk_ = current;
2958 } else {
2959 previous->set_next(current);
2960 }
2961
2962 // Free the chunk.
kmillikin@chromium.orgc36ce6e2011-04-04 08:25:31 +00002963 heap()->mark_compact_collector()->ReportDeleteIfNeeded(
2964 object, heap()->isolate());
ager@chromium.org0ee099b2011-01-25 14:06:47 +00002965 LiveObjectList::ProcessNonLive(object);
2966
ager@chromium.orgc4c92722009-11-18 14:12:51 +00002967 size_ -= static_cast<int>(chunk_size);
erik.corry@gmail.com4a6c3272010-11-18 12:04:40 +00002968 objects_size_ -= object->Size();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002969 page_count_--;
kmillikin@chromium.org3cdd9e12010-09-06 11:39:48 +00002970 ObjectSpace space = kObjectSpaceLoSpace;
rossberg@chromium.org717967f2011-07-20 13:44:42 +00002971 size_t guard_size = 0;
2972 if (executable == EXECUTABLE) {
2973 space = kObjectSpaceCodeSpace;
2974 guard_size = Page::kPageSize;
2975 }
2976 heap()->isolate()->memory_allocator()->FreeRawMemory(
2977 chunk_address - guard_size,
2978 chunk_size + guard_size,
2979 executable);
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00002980 heap()->isolate()->memory_allocator()->PerformAllocationCallback(
2981 space, kAllocationActionFree, size_);
2982 LOG(heap()->isolate(), DeleteEvent("LargeObjectChunk", chunk_address));
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002983 }
2984 }
2985}
2986
2987
2988bool LargeObjectSpace::Contains(HeapObject* object) {
2989 Address address = object->address();
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00002990 if (heap()->new_space()->Contains(address)) {
sgjesse@chromium.orgdf7a2842010-03-25 14:34:15 +00002991 return false;
2992 }
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002993 Page* page = Page::FromAddress(address);
2994
2995 SLOW_ASSERT(!page->IsLargeObjectPage()
2996 || !FindObject(address)->IsFailure());
2997
2998 return page->IsLargeObjectPage();
2999}
3000
3001
3002#ifdef DEBUG
3003// We do not assume that the large object iterator works, because it depends
3004// on the invariants we are checking during verification.
3005void LargeObjectSpace::Verify() {
3006 for (LargeObjectChunk* chunk = first_chunk_;
3007 chunk != NULL;
3008 chunk = chunk->next()) {
3009 // Each chunk contains an object that starts at the large object page's
3010 // object area start.
3011 HeapObject* object = chunk->GetObject();
3012 Page* page = Page::FromAddress(object->address());
3013 ASSERT(object->address() == page->ObjectAreaStart());
3014
3015 // The first word should be a map, and we expect all map pointers to be
3016 // in map space.
3017 Map* map = object->map();
3018 ASSERT(map->IsMap());
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00003019 ASSERT(heap()->map_space()->Contains(map));
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00003020
ager@chromium.orga1645e22009-09-09 19:27:10 +00003021 // We have only code, sequential strings, external strings
3022 // (sequential strings that have been morphed into external
3023 // strings), fixed arrays, and byte arrays in large object space.
3024 ASSERT(object->IsCode() || object->IsSeqString() ||
3025 object->IsExternalString() || object->IsFixedArray() ||
svenpanne@chromium.org6d786c92011-06-15 10:58:27 +00003026 object->IsFixedDoubleArray() || object->IsByteArray());
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00003027
3028 // The object itself should look OK.
mads.s.ager@gmail.com9a4089a2008-09-01 08:55:01 +00003029 object->Verify();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00003030
3031 // Byte arrays and strings don't have interior pointers.
3032 if (object->IsCode()) {
3033 VerifyPointersVisitor code_visitor;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00003034 object->IterateBody(map->instance_type(),
3035 object->Size(),
3036 &code_visitor);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00003037 } else if (object->IsFixedArray()) {
3038 // We loop over fixed arrays ourselves, rather then using the visitor,
3039 // because the visitor doesn't support the start/offset iteration
ricow@chromium.org30ce4112010-05-31 10:38:25 +00003040 // needed for IsRegionDirty.
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00003041 FixedArray* array = FixedArray::cast(object);
3042 for (int j = 0; j < array->length(); j++) {
3043 Object* element = array->get(j);
3044 if (element->IsHeapObject()) {
3045 HeapObject* element_object = HeapObject::cast(element);
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00003046 ASSERT(heap()->Contains(element_object));
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00003047 ASSERT(element_object->map()->IsMap());
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00003048 if (heap()->InNewSpace(element_object)) {
ricow@chromium.org30ce4112010-05-31 10:38:25 +00003049 Address array_addr = object->address();
3050 Address element_addr = array_addr + FixedArray::kHeaderSize +
3051 j * kPointerSize;
3052
3053 ASSERT(Page::FromAddress(array_addr)->IsRegionDirty(element_addr));
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00003054 }
3055 }
3056 }
3057 }
3058 }
3059}
3060
3061
3062void LargeObjectSpace::Print() {
3063 LargeObjectIterator it(this);
sgjesse@chromium.orgb302e562010-02-03 11:26:59 +00003064 for (HeapObject* obj = it.next(); obj != NULL; obj = it.next()) {
3065 obj->Print();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00003066 }
3067}
3068
3069
3070void LargeObjectSpace::ReportStatistics() {
kmillikin@chromium.orgf05f2912010-09-30 10:07:24 +00003071 PrintF(" size: %" V8_PTR_PREFIX "d\n", size_);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00003072 int num_objects = 0;
3073 ClearHistograms();
3074 LargeObjectIterator it(this);
sgjesse@chromium.orgb302e562010-02-03 11:26:59 +00003075 for (HeapObject* obj = it.next(); obj != NULL; obj = it.next()) {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00003076 num_objects++;
sgjesse@chromium.orgb302e562010-02-03 11:26:59 +00003077 CollectHistogramInfo(obj);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00003078 }
3079
erik.corry@gmail.com4a6c3272010-11-18 12:04:40 +00003080 PrintF(" number of objects %d, "
3081 "size of objects %" V8_PTR_PREFIX "d\n", num_objects, objects_size_);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00003082 if (num_objects > 0) ReportHistogram(false);
3083}
3084
3085
3086void LargeObjectSpace::CollectCodeStatistics() {
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00003087 Isolate* isolate = heap()->isolate();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00003088 LargeObjectIterator obj_it(this);
sgjesse@chromium.orgb302e562010-02-03 11:26:59 +00003089 for (HeapObject* obj = obj_it.next(); obj != NULL; obj = obj_it.next()) {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00003090 if (obj->IsCode()) {
3091 Code* code = Code::cast(obj);
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00003092 isolate->code_kind_statistics()[code->kind()] += code->Size();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00003093 }
3094 }
3095}
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00003096#endif // DEBUG
3097
3098} } // namespace v8::internal