blob: b494d24a4f849772b167495178c38740b5061d21 [file] [log] [blame]
Ben Murdoch257744e2011-11-30 15:57:28 +00001// Copyright 2011 the V8 project authors. All rights reserved.
Steve Blocka7e24c12009-10-30 11:49:00 +00002// Redistribution and use in source and binary forms, with or without
3// modification, are permitted provided that the following conditions are
4// met:
5//
6// * Redistributions of source code must retain the above copyright
7// notice, this list of conditions and the following disclaimer.
8// * Redistributions in binary form must reproduce the above
9// copyright notice, this list of conditions and the following
10// disclaimer in the documentation and/or other materials provided
11// with the distribution.
12// * Neither the name of Google Inc. nor the names of its
13// contributors may be used to endorse or promote products derived
14// from this software without specific prior written permission.
15//
16// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
28#include "v8.h"
29
Steve Block1e0659c2011-05-24 12:43:12 +010030#include "liveobjectlist-inl.h"
Steve Blocka7e24c12009-10-30 11:49:00 +000031#include "macro-assembler.h"
32#include "mark-compact.h"
33#include "platform.h"
34
35namespace v8 {
36namespace internal {
37
38// For contiguous spaces, top should be in the space (or at the end) and limit
39// should be the end of the space.
40#define ASSERT_SEMISPACE_ALLOCATION_INFO(info, space) \
41 ASSERT((space).low() <= (info).top \
42 && (info).top <= (space).high() \
43 && (info).limit == (space).high())
44
Steve Blocka7e24c12009-10-30 11:49:00 +000045// ----------------------------------------------------------------------------
46// HeapObjectIterator
47
48HeapObjectIterator::HeapObjectIterator(PagedSpace* space) {
49 Initialize(space->bottom(), space->top(), NULL);
50}
51
52
53HeapObjectIterator::HeapObjectIterator(PagedSpace* space,
54 HeapObjectCallback size_func) {
55 Initialize(space->bottom(), space->top(), size_func);
56}
57
58
59HeapObjectIterator::HeapObjectIterator(PagedSpace* space, Address start) {
60 Initialize(start, space->top(), NULL);
61}
62
63
64HeapObjectIterator::HeapObjectIterator(PagedSpace* space, Address start,
65 HeapObjectCallback size_func) {
66 Initialize(start, space->top(), size_func);
67}
68
69
Kristian Monsen80d68ea2010-09-08 11:05:35 +010070HeapObjectIterator::HeapObjectIterator(Page* page,
71 HeapObjectCallback size_func) {
72 Initialize(page->ObjectAreaStart(), page->AllocationTop(), size_func);
73}
74
75
Steve Blocka7e24c12009-10-30 11:49:00 +000076void HeapObjectIterator::Initialize(Address cur, Address end,
77 HeapObjectCallback size_f) {
78 cur_addr_ = cur;
79 end_addr_ = end;
80 end_page_ = Page::FromAllocationTop(end);
81 size_func_ = size_f;
82 Page* p = Page::FromAllocationTop(cur_addr_);
83 cur_limit_ = (p == end_page_) ? end_addr_ : p->AllocationTop();
84
85#ifdef DEBUG
86 Verify();
87#endif
88}
89
90
Leon Clarked91b9f72010-01-27 17:25:45 +000091HeapObject* HeapObjectIterator::FromNextPage() {
92 if (cur_addr_ == end_addr_) return NULL;
Steve Blocka7e24c12009-10-30 11:49:00 +000093
94 Page* cur_page = Page::FromAllocationTop(cur_addr_);
95 cur_page = cur_page->next_page();
96 ASSERT(cur_page->is_valid());
97
98 cur_addr_ = cur_page->ObjectAreaStart();
99 cur_limit_ = (cur_page == end_page_) ? end_addr_ : cur_page->AllocationTop();
100
Leon Clarked91b9f72010-01-27 17:25:45 +0000101 if (cur_addr_ == end_addr_) return NULL;
Steve Blocka7e24c12009-10-30 11:49:00 +0000102 ASSERT(cur_addr_ < cur_limit_);
103#ifdef DEBUG
104 Verify();
105#endif
Leon Clarked91b9f72010-01-27 17:25:45 +0000106 return FromCurrentPage();
Steve Blocka7e24c12009-10-30 11:49:00 +0000107}
108
109
110#ifdef DEBUG
111void HeapObjectIterator::Verify() {
112 Page* p = Page::FromAllocationTop(cur_addr_);
113 ASSERT(p == Page::FromAllocationTop(cur_limit_));
114 ASSERT(p->Offset(cur_addr_) <= p->Offset(cur_limit_));
115}
116#endif
117
118
119// -----------------------------------------------------------------------------
120// PageIterator
121
122PageIterator::PageIterator(PagedSpace* space, Mode mode) : space_(space) {
123 prev_page_ = NULL;
124 switch (mode) {
125 case PAGES_IN_USE:
126 stop_page_ = space->AllocationTopPage();
127 break;
128 case PAGES_USED_BY_MC:
129 stop_page_ = space->MCRelocationTopPage();
130 break;
131 case ALL_PAGES:
132#ifdef DEBUG
133 // Verify that the cached last page in the space is actually the
134 // last page.
135 for (Page* p = space->first_page_; p->is_valid(); p = p->next_page()) {
136 if (!p->next_page()->is_valid()) {
137 ASSERT(space->last_page_ == p);
138 }
139 }
140#endif
141 stop_page_ = space->last_page_;
142 break;
143 }
144}
145
146
147// -----------------------------------------------------------------------------
Steve Blocka7e24c12009-10-30 11:49:00 +0000148// CodeRange
149
Steve Block44f0eee2011-05-26 01:26:41 +0100150
Ben Murdoch257744e2011-11-30 15:57:28 +0000151CodeRange::CodeRange()
152 : code_range_(NULL),
Steve Block44f0eee2011-05-26 01:26:41 +0100153 free_list_(0),
154 allocation_list_(0),
Ben Murdoch257744e2011-11-30 15:57:28 +0000155 current_allocation_block_index_(0),
156 isolate_(NULL) {
Steve Block44f0eee2011-05-26 01:26:41 +0100157}
Steve Blocka7e24c12009-10-30 11:49:00 +0000158
159
160bool CodeRange::Setup(const size_t requested) {
161 ASSERT(code_range_ == NULL);
162
163 code_range_ = new VirtualMemory(requested);
164 CHECK(code_range_ != NULL);
165 if (!code_range_->IsReserved()) {
166 delete code_range_;
167 code_range_ = NULL;
168 return false;
169 }
170
171 // We are sure that we have mapped a block of requested addresses.
172 ASSERT(code_range_->size() == requested);
Steve Block44f0eee2011-05-26 01:26:41 +0100173 LOG(isolate_, NewEvent("CodeRange", code_range_->address(), requested));
Steve Blocka7e24c12009-10-30 11:49:00 +0000174 allocation_list_.Add(FreeBlock(code_range_->address(), code_range_->size()));
175 current_allocation_block_index_ = 0;
176 return true;
177}
178
179
180int CodeRange::CompareFreeBlockAddress(const FreeBlock* left,
181 const FreeBlock* right) {
182 // The entire point of CodeRange is that the difference between two
183 // addresses in the range can be represented as a signed 32-bit int,
184 // so the cast is semantically correct.
185 return static_cast<int>(left->start - right->start);
186}
187
188
189void CodeRange::GetNextAllocationBlock(size_t requested) {
190 for (current_allocation_block_index_++;
191 current_allocation_block_index_ < allocation_list_.length();
192 current_allocation_block_index_++) {
193 if (requested <= allocation_list_[current_allocation_block_index_].size) {
194 return; // Found a large enough allocation block.
195 }
196 }
197
198 // Sort and merge the free blocks on the free list and the allocation list.
199 free_list_.AddAll(allocation_list_);
200 allocation_list_.Clear();
201 free_list_.Sort(&CompareFreeBlockAddress);
202 for (int i = 0; i < free_list_.length();) {
203 FreeBlock merged = free_list_[i];
204 i++;
205 // Add adjacent free blocks to the current merged block.
206 while (i < free_list_.length() &&
207 free_list_[i].start == merged.start + merged.size) {
208 merged.size += free_list_[i].size;
209 i++;
210 }
211 if (merged.size > 0) {
212 allocation_list_.Add(merged);
213 }
214 }
215 free_list_.Clear();
216
217 for (current_allocation_block_index_ = 0;
218 current_allocation_block_index_ < allocation_list_.length();
219 current_allocation_block_index_++) {
220 if (requested <= allocation_list_[current_allocation_block_index_].size) {
221 return; // Found a large enough allocation block.
222 }
223 }
224
225 // Code range is full or too fragmented.
226 V8::FatalProcessOutOfMemory("CodeRange::GetNextAllocationBlock");
227}
228
229
230
231void* CodeRange::AllocateRawMemory(const size_t requested, size_t* allocated) {
232 ASSERT(current_allocation_block_index_ < allocation_list_.length());
233 if (requested > allocation_list_[current_allocation_block_index_].size) {
234 // Find an allocation block large enough. This function call may
235 // call V8::FatalProcessOutOfMemory if it cannot find a large enough block.
236 GetNextAllocationBlock(requested);
237 }
238 // Commit the requested memory at the start of the current allocation block.
239 *allocated = RoundUp(requested, Page::kPageSize);
240 FreeBlock current = allocation_list_[current_allocation_block_index_];
241 if (*allocated >= current.size - Page::kPageSize) {
242 // Don't leave a small free block, useless for a large object or chunk.
243 *allocated = current.size;
244 }
245 ASSERT(*allocated <= current.size);
246 if (!code_range_->Commit(current.start, *allocated, true)) {
247 *allocated = 0;
248 return NULL;
249 }
250 allocation_list_[current_allocation_block_index_].start += *allocated;
251 allocation_list_[current_allocation_block_index_].size -= *allocated;
252 if (*allocated == current.size) {
253 GetNextAllocationBlock(0); // This block is used up, get the next one.
254 }
255 return current.start;
256}
257
258
259void CodeRange::FreeRawMemory(void* address, size_t length) {
260 free_list_.Add(FreeBlock(address, length));
261 code_range_->Uncommit(address, length);
262}
263
264
265void CodeRange::TearDown() {
266 delete code_range_; // Frees all memory in the virtual memory range.
267 code_range_ = NULL;
268 free_list_.Free();
269 allocation_list_.Free();
270}
271
272
273// -----------------------------------------------------------------------------
274// MemoryAllocator
275//
Steve Blocka7e24c12009-10-30 11:49:00 +0000276
277// 270 is an estimate based on the static default heap size of a pair of 256K
278// semispaces and a 64M old generation.
279const int kEstimatedNumberOfChunks = 270;
Steve Block44f0eee2011-05-26 01:26:41 +0100280
281
Ben Murdoch257744e2011-11-30 15:57:28 +0000282MemoryAllocator::MemoryAllocator()
283 : capacity_(0),
Steve Block44f0eee2011-05-26 01:26:41 +0100284 capacity_executable_(0),
285 size_(0),
286 size_executable_(0),
287 initial_chunk_(NULL),
288 chunks_(kEstimatedNumberOfChunks),
289 free_chunk_ids_(kEstimatedNumberOfChunks),
290 max_nof_chunks_(0),
Ben Murdoch257744e2011-11-30 15:57:28 +0000291 top_(0),
292 isolate_(NULL) {
Steve Block44f0eee2011-05-26 01:26:41 +0100293}
Steve Blocka7e24c12009-10-30 11:49:00 +0000294
295
296void MemoryAllocator::Push(int free_chunk_id) {
297 ASSERT(max_nof_chunks_ > 0);
298 ASSERT(top_ < max_nof_chunks_);
299 free_chunk_ids_[top_++] = free_chunk_id;
300}
301
302
303int MemoryAllocator::Pop() {
304 ASSERT(top_ > 0);
305 return free_chunk_ids_[--top_];
306}
307
308
Russell Brenner90bac252010-11-18 13:33:46 -0800309bool MemoryAllocator::Setup(intptr_t capacity, intptr_t capacity_executable) {
Steve Blocka7e24c12009-10-30 11:49:00 +0000310 capacity_ = RoundUp(capacity, Page::kPageSize);
Russell Brenner90bac252010-11-18 13:33:46 -0800311 capacity_executable_ = RoundUp(capacity_executable, Page::kPageSize);
312 ASSERT_GE(capacity_, capacity_executable_);
Steve Blocka7e24c12009-10-30 11:49:00 +0000313
314 // Over-estimate the size of chunks_ array. It assumes the expansion of old
315 // space is always in the unit of a chunk (kChunkSize) except the last
316 // expansion.
317 //
318 // Due to alignment, allocated space might be one page less than required
319 // number (kPagesPerChunk) of pages for old spaces.
320 //
321 // Reserve two chunk ids for semispaces, one for map space, one for old
322 // space, and one for code space.
Ben Murdochf87a2032010-10-22 12:50:53 +0100323 max_nof_chunks_ =
324 static_cast<int>((capacity_ / (kChunkSize - Page::kPageSize))) + 5;
Steve Blocka7e24c12009-10-30 11:49:00 +0000325 if (max_nof_chunks_ > kMaxNofChunks) return false;
326
327 size_ = 0;
Steve Block791712a2010-08-27 10:21:07 +0100328 size_executable_ = 0;
Steve Blocka7e24c12009-10-30 11:49:00 +0000329 ChunkInfo info; // uninitialized element.
330 for (int i = max_nof_chunks_ - 1; i >= 0; i--) {
331 chunks_.Add(info);
332 free_chunk_ids_.Add(i);
333 }
334 top_ = max_nof_chunks_;
335 return true;
336}
337
338
339void MemoryAllocator::TearDown() {
340 for (int i = 0; i < max_nof_chunks_; i++) {
341 if (chunks_[i].address() != NULL) DeleteChunk(i);
342 }
343 chunks_.Clear();
344 free_chunk_ids_.Clear();
345
346 if (initial_chunk_ != NULL) {
Steve Block44f0eee2011-05-26 01:26:41 +0100347 LOG(isolate_, DeleteEvent("InitialChunk", initial_chunk_->address()));
Steve Blocka7e24c12009-10-30 11:49:00 +0000348 delete initial_chunk_;
349 initial_chunk_ = NULL;
350 }
351
352 ASSERT(top_ == max_nof_chunks_); // all chunks are free
353 top_ = 0;
354 capacity_ = 0;
Russell Brenner90bac252010-11-18 13:33:46 -0800355 capacity_executable_ = 0;
Steve Blocka7e24c12009-10-30 11:49:00 +0000356 size_ = 0;
357 max_nof_chunks_ = 0;
358}
359
360
361void* MemoryAllocator::AllocateRawMemory(const size_t requested,
362 size_t* allocated,
363 Executability executable) {
Kristian Monsen50ef84f2010-07-29 15:18:00 +0100364 if (size_ + static_cast<size_t>(requested) > static_cast<size_t>(capacity_)) {
365 return NULL;
366 }
Russell Brenner90bac252010-11-18 13:33:46 -0800367
Steve Blocka7e24c12009-10-30 11:49:00 +0000368 void* mem;
Russell Brenner90bac252010-11-18 13:33:46 -0800369 if (executable == EXECUTABLE) {
370 // Check executable memory limit.
371 if (size_executable_ + requested >
372 static_cast<size_t>(capacity_executable_)) {
Steve Block44f0eee2011-05-26 01:26:41 +0100373 LOG(isolate_,
374 StringEvent("MemoryAllocator::AllocateRawMemory",
Russell Brenner90bac252010-11-18 13:33:46 -0800375 "V8 Executable Allocation capacity exceeded"));
376 return NULL;
377 }
378 // Allocate executable memory either from code range or from the
379 // OS.
Steve Block44f0eee2011-05-26 01:26:41 +0100380 if (isolate_->code_range()->exists()) {
381 mem = isolate_->code_range()->AllocateRawMemory(requested, allocated);
Russell Brenner90bac252010-11-18 13:33:46 -0800382 } else {
383 mem = OS::Allocate(requested, allocated, true);
384 }
385 // Update executable memory size.
386 size_executable_ += static_cast<int>(*allocated);
Steve Blocka7e24c12009-10-30 11:49:00 +0000387 } else {
Russell Brenner90bac252010-11-18 13:33:46 -0800388 mem = OS::Allocate(requested, allocated, false);
Steve Blocka7e24c12009-10-30 11:49:00 +0000389 }
Steve Blockd0582a62009-12-15 09:54:21 +0000390 int alloced = static_cast<int>(*allocated);
Steve Blocka7e24c12009-10-30 11:49:00 +0000391 size_ += alloced;
Steve Block791712a2010-08-27 10:21:07 +0100392
Leon Clarke4515c472010-02-03 11:58:03 +0000393#ifdef DEBUG
394 ZapBlock(reinterpret_cast<Address>(mem), alloced);
395#endif
Steve Block44f0eee2011-05-26 01:26:41 +0100396 isolate_->counters()->memory_allocated()->Increment(alloced);
Steve Blocka7e24c12009-10-30 11:49:00 +0000397 return mem;
398}
399
400
Steve Block791712a2010-08-27 10:21:07 +0100401void MemoryAllocator::FreeRawMemory(void* mem,
402 size_t length,
403 Executability executable) {
Leon Clarke4515c472010-02-03 11:58:03 +0000404#ifdef DEBUG
405 ZapBlock(reinterpret_cast<Address>(mem), length);
406#endif
Steve Block44f0eee2011-05-26 01:26:41 +0100407 if (isolate_->code_range()->contains(static_cast<Address>(mem))) {
408 isolate_->code_range()->FreeRawMemory(mem, length);
Steve Blocka7e24c12009-10-30 11:49:00 +0000409 } else {
410 OS::Free(mem, length);
411 }
Steve Block44f0eee2011-05-26 01:26:41 +0100412 isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(length));
Steve Blockd0582a62009-12-15 09:54:21 +0000413 size_ -= static_cast<int>(length);
Steve Block791712a2010-08-27 10:21:07 +0100414 if (executable == EXECUTABLE) size_executable_ -= static_cast<int>(length);
Iain Merrick9ac36c92010-09-13 15:29:50 +0100415
Steve Blocka7e24c12009-10-30 11:49:00 +0000416 ASSERT(size_ >= 0);
Russell Brenner90bac252010-11-18 13:33:46 -0800417 ASSERT(size_executable_ >= 0);
Steve Blocka7e24c12009-10-30 11:49:00 +0000418}
419
420
Iain Merrick9ac36c92010-09-13 15:29:50 +0100421void MemoryAllocator::PerformAllocationCallback(ObjectSpace space,
422 AllocationAction action,
423 size_t size) {
424 for (int i = 0; i < memory_allocation_callbacks_.length(); ++i) {
425 MemoryAllocationCallbackRegistration registration =
426 memory_allocation_callbacks_[i];
427 if ((registration.space & space) == space &&
428 (registration.action & action) == action)
429 registration.callback(space, action, static_cast<int>(size));
430 }
431}
432
433
434bool MemoryAllocator::MemoryAllocationCallbackRegistered(
435 MemoryAllocationCallback callback) {
436 for (int i = 0; i < memory_allocation_callbacks_.length(); ++i) {
437 if (memory_allocation_callbacks_[i].callback == callback) return true;
438 }
439 return false;
440}
441
442
443void MemoryAllocator::AddMemoryAllocationCallback(
444 MemoryAllocationCallback callback,
445 ObjectSpace space,
446 AllocationAction action) {
447 ASSERT(callback != NULL);
448 MemoryAllocationCallbackRegistration registration(callback, space, action);
449 ASSERT(!MemoryAllocator::MemoryAllocationCallbackRegistered(callback));
450 return memory_allocation_callbacks_.Add(registration);
451}
452
453
454void MemoryAllocator::RemoveMemoryAllocationCallback(
455 MemoryAllocationCallback callback) {
456 ASSERT(callback != NULL);
457 for (int i = 0; i < memory_allocation_callbacks_.length(); ++i) {
458 if (memory_allocation_callbacks_[i].callback == callback) {
459 memory_allocation_callbacks_.Remove(i);
460 return;
461 }
462 }
463 UNREACHABLE();
464}
465
Steve Blocka7e24c12009-10-30 11:49:00 +0000466void* MemoryAllocator::ReserveInitialChunk(const size_t requested) {
467 ASSERT(initial_chunk_ == NULL);
468
469 initial_chunk_ = new VirtualMemory(requested);
470 CHECK(initial_chunk_ != NULL);
471 if (!initial_chunk_->IsReserved()) {
472 delete initial_chunk_;
473 initial_chunk_ = NULL;
474 return NULL;
475 }
476
477 // We are sure that we have mapped a block of requested addresses.
478 ASSERT(initial_chunk_->size() == requested);
Steve Block44f0eee2011-05-26 01:26:41 +0100479 LOG(isolate_,
480 NewEvent("InitialChunk", initial_chunk_->address(), requested));
Steve Blockd0582a62009-12-15 09:54:21 +0000481 size_ += static_cast<int>(requested);
Steve Blocka7e24c12009-10-30 11:49:00 +0000482 return initial_chunk_->address();
483}
484
485
486static int PagesInChunk(Address start, size_t size) {
487 // The first page starts on the first page-aligned address from start onward
488 // and the last page ends on the last page-aligned address before
489 // start+size. Page::kPageSize is a power of two so we can divide by
490 // shifting.
Steve Blockd0582a62009-12-15 09:54:21 +0000491 return static_cast<int>((RoundDown(start + size, Page::kPageSize)
Leon Clarkee46be812010-01-19 14:06:41 +0000492 - RoundUp(start, Page::kPageSize)) >> kPageSizeBits);
Steve Blocka7e24c12009-10-30 11:49:00 +0000493}
494
495
Ben Murdochb0fe1622011-05-05 13:52:32 +0100496Page* MemoryAllocator::AllocatePages(int requested_pages,
497 int* allocated_pages,
Steve Blocka7e24c12009-10-30 11:49:00 +0000498 PagedSpace* owner) {
499 if (requested_pages <= 0) return Page::FromAddress(NULL);
500 size_t chunk_size = requested_pages * Page::kPageSize;
501
Steve Blocka7e24c12009-10-30 11:49:00 +0000502 void* chunk = AllocateRawMemory(chunk_size, &chunk_size, owner->executable());
503 if (chunk == NULL) return Page::FromAddress(NULL);
Steve Block44f0eee2011-05-26 01:26:41 +0100504 LOG(isolate_, NewEvent("PagedChunk", chunk, chunk_size));
Steve Blocka7e24c12009-10-30 11:49:00 +0000505
506 *allocated_pages = PagesInChunk(static_cast<Address>(chunk), chunk_size);
Ben Murdochb0fe1622011-05-05 13:52:32 +0100507 // We may 'lose' a page due to alignment.
508 ASSERT(*allocated_pages >= kPagesPerChunk - 1);
Steve Blocka7e24c12009-10-30 11:49:00 +0000509 if (*allocated_pages == 0) {
Steve Block791712a2010-08-27 10:21:07 +0100510 FreeRawMemory(chunk, chunk_size, owner->executable());
Steve Block44f0eee2011-05-26 01:26:41 +0100511 LOG(isolate_, DeleteEvent("PagedChunk", chunk));
Steve Blocka7e24c12009-10-30 11:49:00 +0000512 return Page::FromAddress(NULL);
513 }
514
515 int chunk_id = Pop();
516 chunks_[chunk_id].init(static_cast<Address>(chunk), chunk_size, owner);
517
Iain Merrick9ac36c92010-09-13 15:29:50 +0100518 ObjectSpace space = static_cast<ObjectSpace>(1 << owner->identity());
519 PerformAllocationCallback(space, kAllocationActionAllocate, chunk_size);
Ben Murdochb0fe1622011-05-05 13:52:32 +0100520 Page* new_pages = InitializePagesInChunk(chunk_id, *allocated_pages, owner);
521
Ben Murdochb0fe1622011-05-05 13:52:32 +0100522 return new_pages;
Steve Blocka7e24c12009-10-30 11:49:00 +0000523}
524
525
526Page* MemoryAllocator::CommitPages(Address start, size_t size,
527 PagedSpace* owner, int* num_pages) {
528 ASSERT(start != NULL);
529 *num_pages = PagesInChunk(start, size);
530 ASSERT(*num_pages > 0);
531 ASSERT(initial_chunk_ != NULL);
532 ASSERT(InInitialChunk(start));
533 ASSERT(InInitialChunk(start + size - 1));
534 if (!initial_chunk_->Commit(start, size, owner->executable() == EXECUTABLE)) {
535 return Page::FromAddress(NULL);
536 }
Leon Clarke4515c472010-02-03 11:58:03 +0000537#ifdef DEBUG
538 ZapBlock(start, size);
539#endif
Steve Block44f0eee2011-05-26 01:26:41 +0100540 isolate_->counters()->memory_allocated()->Increment(static_cast<int>(size));
Steve Blocka7e24c12009-10-30 11:49:00 +0000541
542 // So long as we correctly overestimated the number of chunks we should not
543 // run out of chunk ids.
544 CHECK(!OutOfChunkIds());
545 int chunk_id = Pop();
546 chunks_[chunk_id].init(start, size, owner);
547 return InitializePagesInChunk(chunk_id, *num_pages, owner);
548}
549
550
551bool MemoryAllocator::CommitBlock(Address start,
552 size_t size,
553 Executability executable) {
554 ASSERT(start != NULL);
555 ASSERT(size > 0);
556 ASSERT(initial_chunk_ != NULL);
557 ASSERT(InInitialChunk(start));
558 ASSERT(InInitialChunk(start + size - 1));
559
560 if (!initial_chunk_->Commit(start, size, executable)) return false;
Leon Clarke4515c472010-02-03 11:58:03 +0000561#ifdef DEBUG
562 ZapBlock(start, size);
563#endif
Steve Block44f0eee2011-05-26 01:26:41 +0100564 isolate_->counters()->memory_allocated()->Increment(static_cast<int>(size));
Steve Blocka7e24c12009-10-30 11:49:00 +0000565 return true;
566}
567
Leon Clarke4515c472010-02-03 11:58:03 +0000568
Steve Blocka7e24c12009-10-30 11:49:00 +0000569bool MemoryAllocator::UncommitBlock(Address start, size_t size) {
570 ASSERT(start != NULL);
571 ASSERT(size > 0);
572 ASSERT(initial_chunk_ != NULL);
573 ASSERT(InInitialChunk(start));
574 ASSERT(InInitialChunk(start + size - 1));
575
576 if (!initial_chunk_->Uncommit(start, size)) return false;
Steve Block44f0eee2011-05-26 01:26:41 +0100577 isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(size));
Steve Blocka7e24c12009-10-30 11:49:00 +0000578 return true;
579}
580
Leon Clarke4515c472010-02-03 11:58:03 +0000581
582void MemoryAllocator::ZapBlock(Address start, size_t size) {
583 for (size_t s = 0; s + kPointerSize <= size; s += kPointerSize) {
584 Memory::Address_at(start + s) = kZapValue;
585 }
586}
587
588
Steve Blocka7e24c12009-10-30 11:49:00 +0000589Page* MemoryAllocator::InitializePagesInChunk(int chunk_id, int pages_in_chunk,
590 PagedSpace* owner) {
591 ASSERT(IsValidChunk(chunk_id));
592 ASSERT(pages_in_chunk > 0);
593
594 Address chunk_start = chunks_[chunk_id].address();
595
596 Address low = RoundUp(chunk_start, Page::kPageSize);
597
598#ifdef DEBUG
599 size_t chunk_size = chunks_[chunk_id].size();
600 Address high = RoundDown(chunk_start + chunk_size, Page::kPageSize);
601 ASSERT(pages_in_chunk <=
602 ((OffsetFrom(high) - OffsetFrom(low)) / Page::kPageSize));
603#endif
604
605 Address page_addr = low;
606 for (int i = 0; i < pages_in_chunk; i++) {
607 Page* p = Page::FromAddress(page_addr);
Steve Block44f0eee2011-05-26 01:26:41 +0100608 p->heap_ = owner->heap();
Steve Blocka7e24c12009-10-30 11:49:00 +0000609 p->opaque_header = OffsetFrom(page_addr + Page::kPageSize) | chunk_id;
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100610 p->InvalidateWatermark(true);
Steve Block6ded16b2010-05-10 14:33:55 +0100611 p->SetIsLargeObjectPage(false);
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100612 p->SetAllocationWatermark(p->ObjectAreaStart());
613 p->SetCachedAllocationWatermark(p->ObjectAreaStart());
Steve Blocka7e24c12009-10-30 11:49:00 +0000614 page_addr += Page::kPageSize;
615 }
616
617 // Set the next page of the last page to 0.
618 Page* last_page = Page::FromAddress(page_addr - Page::kPageSize);
619 last_page->opaque_header = OffsetFrom(0) | chunk_id;
620
621 return Page::FromAddress(low);
622}
623
624
625Page* MemoryAllocator::FreePages(Page* p) {
626 if (!p->is_valid()) return p;
627
628 // Find the first page in the same chunk as 'p'
629 Page* first_page = FindFirstPageInSameChunk(p);
630 Page* page_to_return = Page::FromAddress(NULL);
631
632 if (p != first_page) {
633 // Find the last page in the same chunk as 'prev'.
634 Page* last_page = FindLastPageInSameChunk(p);
635 first_page = GetNextPage(last_page); // first page in next chunk
636
637 // set the next_page of last_page to NULL
638 SetNextPage(last_page, Page::FromAddress(NULL));
639 page_to_return = p; // return 'p' when exiting
640 }
641
642 while (first_page->is_valid()) {
643 int chunk_id = GetChunkId(first_page);
644 ASSERT(IsValidChunk(chunk_id));
645
646 // Find the first page of the next chunk before deleting this chunk.
647 first_page = GetNextPage(FindLastPageInSameChunk(first_page));
648
649 // Free the current chunk.
650 DeleteChunk(chunk_id);
651 }
652
653 return page_to_return;
654}
655
656
Steve Block6ded16b2010-05-10 14:33:55 +0100657void MemoryAllocator::FreeAllPages(PagedSpace* space) {
658 for (int i = 0, length = chunks_.length(); i < length; i++) {
659 if (chunks_[i].owner() == space) {
660 DeleteChunk(i);
661 }
662 }
663}
664
665
Steve Blocka7e24c12009-10-30 11:49:00 +0000666void MemoryAllocator::DeleteChunk(int chunk_id) {
667 ASSERT(IsValidChunk(chunk_id));
668
669 ChunkInfo& c = chunks_[chunk_id];
670
671 // We cannot free a chunk contained in the initial chunk because it was not
672 // allocated with AllocateRawMemory. Instead we uncommit the virtual
673 // memory.
674 if (InInitialChunk(c.address())) {
675 // TODO(1240712): VirtualMemory::Uncommit has a return value which
676 // is ignored here.
677 initial_chunk_->Uncommit(c.address(), c.size());
Steve Block44f0eee2011-05-26 01:26:41 +0100678 Counters* counters = isolate_->counters();
679 counters->memory_allocated()->Decrement(static_cast<int>(c.size()));
Steve Blocka7e24c12009-10-30 11:49:00 +0000680 } else {
Steve Block44f0eee2011-05-26 01:26:41 +0100681 LOG(isolate_, DeleteEvent("PagedChunk", c.address()));
682 ObjectSpace space = static_cast<ObjectSpace>(1 << c.owner_identity());
Iain Merrick9ac36c92010-09-13 15:29:50 +0100683 size_t size = c.size();
684 FreeRawMemory(c.address(), size, c.executable());
685 PerformAllocationCallback(space, kAllocationActionFree, size);
Steve Blocka7e24c12009-10-30 11:49:00 +0000686 }
687 c.init(NULL, 0, NULL);
688 Push(chunk_id);
689}
690
691
692Page* MemoryAllocator::FindFirstPageInSameChunk(Page* p) {
693 int chunk_id = GetChunkId(p);
694 ASSERT(IsValidChunk(chunk_id));
695
696 Address low = RoundUp(chunks_[chunk_id].address(), Page::kPageSize);
697 return Page::FromAddress(low);
698}
699
700
701Page* MemoryAllocator::FindLastPageInSameChunk(Page* p) {
702 int chunk_id = GetChunkId(p);
703 ASSERT(IsValidChunk(chunk_id));
704
705 Address chunk_start = chunks_[chunk_id].address();
706 size_t chunk_size = chunks_[chunk_id].size();
707
708 Address high = RoundDown(chunk_start + chunk_size, Page::kPageSize);
709 ASSERT(chunk_start <= p->address() && p->address() < high);
710
711 return Page::FromAddress(high - Page::kPageSize);
712}
713
714
715#ifdef DEBUG
716void MemoryAllocator::ReportStatistics() {
717 float pct = static_cast<float>(capacity_ - size_) / capacity_;
Ben Murdochf87a2032010-10-22 12:50:53 +0100718 PrintF(" capacity: %" V8_PTR_PREFIX "d"
719 ", used: %" V8_PTR_PREFIX "d"
720 ", available: %%%d\n\n",
Steve Blocka7e24c12009-10-30 11:49:00 +0000721 capacity_, size_, static_cast<int>(pct*100));
722}
723#endif
724
725
Steve Block6ded16b2010-05-10 14:33:55 +0100726void MemoryAllocator::RelinkPageListInChunkOrder(PagedSpace* space,
727 Page** first_page,
728 Page** last_page,
729 Page** last_page_in_use) {
730 Page* first = NULL;
731 Page* last = NULL;
732
733 for (int i = 0, length = chunks_.length(); i < length; i++) {
734 ChunkInfo& chunk = chunks_[i];
735
736 if (chunk.owner() == space) {
737 if (first == NULL) {
738 Address low = RoundUp(chunk.address(), Page::kPageSize);
739 first = Page::FromAddress(low);
740 }
741 last = RelinkPagesInChunk(i,
742 chunk.address(),
743 chunk.size(),
744 last,
745 last_page_in_use);
746 }
747 }
748
749 if (first_page != NULL) {
750 *first_page = first;
751 }
752
753 if (last_page != NULL) {
754 *last_page = last;
755 }
756}
757
758
759Page* MemoryAllocator::RelinkPagesInChunk(int chunk_id,
760 Address chunk_start,
761 size_t chunk_size,
762 Page* prev,
763 Page** last_page_in_use) {
764 Address page_addr = RoundUp(chunk_start, Page::kPageSize);
765 int pages_in_chunk = PagesInChunk(chunk_start, chunk_size);
766
767 if (prev->is_valid()) {
768 SetNextPage(prev, Page::FromAddress(page_addr));
769 }
770
771 for (int i = 0; i < pages_in_chunk; i++) {
772 Page* p = Page::FromAddress(page_addr);
773 p->opaque_header = OffsetFrom(page_addr + Page::kPageSize) | chunk_id;
774 page_addr += Page::kPageSize;
775
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100776 p->InvalidateWatermark(true);
Steve Block6ded16b2010-05-10 14:33:55 +0100777 if (p->WasInUseBeforeMC()) {
778 *last_page_in_use = p;
779 }
780 }
781
782 // Set the next page of the last page to 0.
783 Page* last_page = Page::FromAddress(page_addr - Page::kPageSize);
784 last_page->opaque_header = OffsetFrom(0) | chunk_id;
785
786 if (last_page->WasInUseBeforeMC()) {
787 *last_page_in_use = last_page;
788 }
789
790 return last_page;
791}
792
793
Steve Blocka7e24c12009-10-30 11:49:00 +0000794// -----------------------------------------------------------------------------
795// PagedSpace implementation
796
Steve Block44f0eee2011-05-26 01:26:41 +0100797PagedSpace::PagedSpace(Heap* heap,
798 intptr_t max_capacity,
Steve Blocka7e24c12009-10-30 11:49:00 +0000799 AllocationSpace id,
800 Executability executable)
Steve Block44f0eee2011-05-26 01:26:41 +0100801 : Space(heap, id, executable) {
Steve Blocka7e24c12009-10-30 11:49:00 +0000802 max_capacity_ = (RoundDown(max_capacity, Page::kPageSize) / Page::kPageSize)
803 * Page::kObjectAreaSize;
804 accounting_stats_.Clear();
805
806 allocation_info_.top = NULL;
807 allocation_info_.limit = NULL;
808
809 mc_forwarding_info_.top = NULL;
810 mc_forwarding_info_.limit = NULL;
811}
812
813
814bool PagedSpace::Setup(Address start, size_t size) {
815 if (HasBeenSetup()) return false;
816
817 int num_pages = 0;
818 // Try to use the virtual memory range passed to us. If it is too small to
819 // contain at least one page, ignore it and allocate instead.
820 int pages_in_chunk = PagesInChunk(start, size);
821 if (pages_in_chunk > 0) {
Steve Block44f0eee2011-05-26 01:26:41 +0100822 first_page_ = Isolate::Current()->memory_allocator()->CommitPages(
823 RoundUp(start, Page::kPageSize),
824 Page::kPageSize * pages_in_chunk,
825 this, &num_pages);
Steve Blocka7e24c12009-10-30 11:49:00 +0000826 } else {
Ben Murdochf87a2032010-10-22 12:50:53 +0100827 int requested_pages =
828 Min(MemoryAllocator::kPagesPerChunk,
829 static_cast<int>(max_capacity_ / Page::kObjectAreaSize));
Steve Blocka7e24c12009-10-30 11:49:00 +0000830 first_page_ =
Steve Block44f0eee2011-05-26 01:26:41 +0100831 Isolate::Current()->memory_allocator()->AllocatePages(
832 requested_pages, &num_pages, this);
Steve Blocka7e24c12009-10-30 11:49:00 +0000833 if (!first_page_->is_valid()) return false;
834 }
835
836 // We are sure that the first page is valid and that we have at least one
837 // page.
838 ASSERT(first_page_->is_valid());
839 ASSERT(num_pages > 0);
840 accounting_stats_.ExpandSpace(num_pages * Page::kObjectAreaSize);
841 ASSERT(Capacity() <= max_capacity_);
842
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100843 // Sequentially clear region marks in the newly allocated
Steve Blocka7e24c12009-10-30 11:49:00 +0000844 // pages and cache the current last page in the space.
845 for (Page* p = first_page_; p->is_valid(); p = p->next_page()) {
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100846 p->SetRegionMarks(Page::kAllRegionsCleanMarks);
Steve Blocka7e24c12009-10-30 11:49:00 +0000847 last_page_ = p;
848 }
849
850 // Use first_page_ for allocation.
851 SetAllocationInfo(&allocation_info_, first_page_);
852
Steve Block6ded16b2010-05-10 14:33:55 +0100853 page_list_is_chunk_ordered_ = true;
854
Steve Blocka7e24c12009-10-30 11:49:00 +0000855 return true;
856}
857
858
859bool PagedSpace::HasBeenSetup() {
860 return (Capacity() > 0);
861}
862
863
864void PagedSpace::TearDown() {
Steve Block44f0eee2011-05-26 01:26:41 +0100865 Isolate::Current()->memory_allocator()->FreeAllPages(this);
Steve Block6ded16b2010-05-10 14:33:55 +0100866 first_page_ = NULL;
Steve Blocka7e24c12009-10-30 11:49:00 +0000867 accounting_stats_.Clear();
868}
869
870
871#ifdef ENABLE_HEAP_PROTECTION
872
873void PagedSpace::Protect() {
874 Page* page = first_page_;
875 while (page->is_valid()) {
Steve Block44f0eee2011-05-26 01:26:41 +0100876 Isolate::Current()->memory_allocator()->ProtectChunkFromPage(page);
877 page = Isolate::Current()->memory_allocator()->
878 FindLastPageInSameChunk(page)->next_page();
Steve Blocka7e24c12009-10-30 11:49:00 +0000879 }
880}
881
882
883void PagedSpace::Unprotect() {
884 Page* page = first_page_;
885 while (page->is_valid()) {
Steve Block44f0eee2011-05-26 01:26:41 +0100886 Isolate::Current()->memory_allocator()->UnprotectChunkFromPage(page);
887 page = Isolate::Current()->memory_allocator()->
888 FindLastPageInSameChunk(page)->next_page();
Steve Blocka7e24c12009-10-30 11:49:00 +0000889 }
890}
891
892#endif
893
894
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100895void PagedSpace::MarkAllPagesClean() {
Steve Blocka7e24c12009-10-30 11:49:00 +0000896 PageIterator it(this, PageIterator::ALL_PAGES);
897 while (it.has_next()) {
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100898 it.next()->SetRegionMarks(Page::kAllRegionsCleanMarks);
Steve Blocka7e24c12009-10-30 11:49:00 +0000899 }
900}
901
902
John Reck59135872010-11-02 12:39:01 -0700903MaybeObject* PagedSpace::FindObject(Address addr) {
Steve Blocka7e24c12009-10-30 11:49:00 +0000904 // Note: this function can only be called before or after mark-compact GC
905 // because it accesses map pointers.
Steve Block44f0eee2011-05-26 01:26:41 +0100906 ASSERT(!heap()->mark_compact_collector()->in_use());
Steve Blocka7e24c12009-10-30 11:49:00 +0000907
908 if (!Contains(addr)) return Failure::Exception();
909
910 Page* p = Page::FromAddress(addr);
911 ASSERT(IsUsed(p));
912 Address cur = p->ObjectAreaStart();
913 Address end = p->AllocationTop();
914 while (cur < end) {
915 HeapObject* obj = HeapObject::FromAddress(cur);
916 Address next = cur + obj->Size();
917 if ((cur <= addr) && (addr < next)) return obj;
918 cur = next;
919 }
920
921 UNREACHABLE();
922 return Failure::Exception();
923}
924
925
926bool PagedSpace::IsUsed(Page* page) {
927 PageIterator it(this, PageIterator::PAGES_IN_USE);
928 while (it.has_next()) {
929 if (page == it.next()) return true;
930 }
931 return false;
932}
933
934
935void PagedSpace::SetAllocationInfo(AllocationInfo* alloc_info, Page* p) {
936 alloc_info->top = p->ObjectAreaStart();
937 alloc_info->limit = p->ObjectAreaEnd();
938 ASSERT(alloc_info->VerifyPagedAllocation());
939}
940
941
942void PagedSpace::MCResetRelocationInfo() {
943 // Set page indexes.
944 int i = 0;
945 PageIterator it(this, PageIterator::ALL_PAGES);
946 while (it.has_next()) {
947 Page* p = it.next();
948 p->mc_page_index = i++;
949 }
950
951 // Set mc_forwarding_info_ to the first page in the space.
952 SetAllocationInfo(&mc_forwarding_info_, first_page_);
953 // All the bytes in the space are 'available'. We will rediscover
954 // allocated and wasted bytes during GC.
955 accounting_stats_.Reset();
956}
957
958
959int PagedSpace::MCSpaceOffsetForAddress(Address addr) {
960#ifdef DEBUG
961 // The Contains function considers the address at the beginning of a
962 // page in the page, MCSpaceOffsetForAddress considers it is in the
963 // previous page.
964 if (Page::IsAlignedToPageSize(addr)) {
965 ASSERT(Contains(addr - kPointerSize));
966 } else {
967 ASSERT(Contains(addr));
968 }
969#endif
970
971 // If addr is at the end of a page, it belongs to previous page
972 Page* p = Page::IsAlignedToPageSize(addr)
973 ? Page::FromAllocationTop(addr)
974 : Page::FromAddress(addr);
975 int index = p->mc_page_index;
976 return (index * Page::kPageSize) + p->Offset(addr);
977}
978
979
980// Slow case for reallocating and promoting objects during a compacting
981// collection. This function is not space-specific.
982HeapObject* PagedSpace::SlowMCAllocateRaw(int size_in_bytes) {
983 Page* current_page = TopPageOf(mc_forwarding_info_);
984 if (!current_page->next_page()->is_valid()) {
985 if (!Expand(current_page)) {
986 return NULL;
987 }
988 }
989
990 // There are surely more pages in the space now.
991 ASSERT(current_page->next_page()->is_valid());
992 // We do not add the top of page block for current page to the space's
993 // free list---the block may contain live objects so we cannot write
994 // bookkeeping information to it. Instead, we will recover top of page
995 // blocks when we move objects to their new locations.
996 //
997 // We do however write the allocation pointer to the page. The encoding
998 // of forwarding addresses is as an offset in terms of live bytes, so we
999 // need quick access to the allocation top of each page to decode
1000 // forwarding addresses.
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01001001 current_page->SetAllocationWatermark(mc_forwarding_info_.top);
1002 current_page->next_page()->InvalidateWatermark(true);
Steve Blocka7e24c12009-10-30 11:49:00 +00001003 SetAllocationInfo(&mc_forwarding_info_, current_page->next_page());
1004 return AllocateLinearly(&mc_forwarding_info_, size_in_bytes);
1005}
1006
1007
1008bool PagedSpace::Expand(Page* last_page) {
1009 ASSERT(max_capacity_ % Page::kObjectAreaSize == 0);
1010 ASSERT(Capacity() % Page::kObjectAreaSize == 0);
1011
1012 if (Capacity() == max_capacity_) return false;
1013
1014 ASSERT(Capacity() < max_capacity_);
1015 // Last page must be valid and its next page is invalid.
1016 ASSERT(last_page->is_valid() && !last_page->next_page()->is_valid());
1017
Ben Murdochf87a2032010-10-22 12:50:53 +01001018 int available_pages =
1019 static_cast<int>((max_capacity_ - Capacity()) / Page::kObjectAreaSize);
Ben Murdochb0fe1622011-05-05 13:52:32 +01001020 // We don't want to have to handle small chunks near the end so if there are
1021 // not kPagesPerChunk pages available without exceeding the max capacity then
1022 // act as if memory has run out.
1023 if (available_pages < MemoryAllocator::kPagesPerChunk) return false;
Steve Blocka7e24c12009-10-30 11:49:00 +00001024
1025 int desired_pages = Min(available_pages, MemoryAllocator::kPagesPerChunk);
Steve Block44f0eee2011-05-26 01:26:41 +01001026 Page* p = heap()->isolate()->memory_allocator()->AllocatePages(
1027 desired_pages, &desired_pages, this);
Steve Blocka7e24c12009-10-30 11:49:00 +00001028 if (!p->is_valid()) return false;
1029
1030 accounting_stats_.ExpandSpace(desired_pages * Page::kObjectAreaSize);
1031 ASSERT(Capacity() <= max_capacity_);
1032
Steve Block44f0eee2011-05-26 01:26:41 +01001033 heap()->isolate()->memory_allocator()->SetNextPage(last_page, p);
Steve Blocka7e24c12009-10-30 11:49:00 +00001034
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01001035 // Sequentially clear region marks of new pages and and cache the
Steve Blocka7e24c12009-10-30 11:49:00 +00001036 // new last page in the space.
1037 while (p->is_valid()) {
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01001038 p->SetRegionMarks(Page::kAllRegionsCleanMarks);
Steve Blocka7e24c12009-10-30 11:49:00 +00001039 last_page_ = p;
1040 p = p->next_page();
1041 }
1042
1043 return true;
1044}
1045
1046
1047#ifdef DEBUG
1048int PagedSpace::CountTotalPages() {
1049 int count = 0;
1050 for (Page* p = first_page_; p->is_valid(); p = p->next_page()) {
1051 count++;
1052 }
1053 return count;
1054}
1055#endif
1056
1057
1058void PagedSpace::Shrink() {
Steve Block6ded16b2010-05-10 14:33:55 +01001059 if (!page_list_is_chunk_ordered_) {
1060 // We can't shrink space if pages is not chunk-ordered
1061 // (see comment for class MemoryAllocator for definition).
1062 return;
1063 }
1064
Steve Blocka7e24c12009-10-30 11:49:00 +00001065 // Release half of free pages.
1066 Page* top_page = AllocationTopPage();
1067 ASSERT(top_page->is_valid());
1068
1069 // Count the number of pages we would like to free.
1070 int pages_to_free = 0;
1071 for (Page* p = top_page->next_page(); p->is_valid(); p = p->next_page()) {
1072 pages_to_free++;
1073 }
1074
1075 // Free pages after top_page.
Steve Block44f0eee2011-05-26 01:26:41 +01001076 Page* p = heap()->isolate()->memory_allocator()->
1077 FreePages(top_page->next_page());
1078 heap()->isolate()->memory_allocator()->SetNextPage(top_page, p);
Steve Blocka7e24c12009-10-30 11:49:00 +00001079
1080 // Find out how many pages we failed to free and update last_page_.
1081 // Please note pages can only be freed in whole chunks.
1082 last_page_ = top_page;
1083 for (Page* p = top_page->next_page(); p->is_valid(); p = p->next_page()) {
1084 pages_to_free--;
1085 last_page_ = p;
1086 }
1087
1088 accounting_stats_.ShrinkSpace(pages_to_free * Page::kObjectAreaSize);
1089 ASSERT(Capacity() == CountTotalPages() * Page::kObjectAreaSize);
1090}
1091
1092
1093bool PagedSpace::EnsureCapacity(int capacity) {
1094 if (Capacity() >= capacity) return true;
1095
1096 // Start from the allocation top and loop to the last page in the space.
1097 Page* last_page = AllocationTopPage();
1098 Page* next_page = last_page->next_page();
1099 while (next_page->is_valid()) {
Steve Block44f0eee2011-05-26 01:26:41 +01001100 last_page = heap()->isolate()->memory_allocator()->
1101 FindLastPageInSameChunk(next_page);
Steve Blocka7e24c12009-10-30 11:49:00 +00001102 next_page = last_page->next_page();
1103 }
1104
1105 // Expand the space until it has the required capacity or expansion fails.
1106 do {
1107 if (!Expand(last_page)) return false;
1108 ASSERT(last_page->next_page()->is_valid());
1109 last_page =
Steve Block44f0eee2011-05-26 01:26:41 +01001110 heap()->isolate()->memory_allocator()->FindLastPageInSameChunk(
1111 last_page->next_page());
Steve Blocka7e24c12009-10-30 11:49:00 +00001112 } while (Capacity() < capacity);
1113
1114 return true;
1115}
1116
1117
1118#ifdef DEBUG
1119void PagedSpace::Print() { }
1120#endif
1121
1122
1123#ifdef DEBUG
1124// We do not assume that the PageIterator works, because it depends on the
1125// invariants we are checking during verification.
1126void PagedSpace::Verify(ObjectVisitor* visitor) {
1127 // The allocation pointer should be valid, and it should be in a page in the
1128 // space.
1129 ASSERT(allocation_info_.VerifyPagedAllocation());
1130 Page* top_page = Page::FromAllocationTop(allocation_info_.top);
Steve Block44f0eee2011-05-26 01:26:41 +01001131 ASSERT(heap()->isolate()->memory_allocator()->IsPageInSpace(top_page, this));
Steve Blocka7e24c12009-10-30 11:49:00 +00001132
1133 // Loop over all the pages.
1134 bool above_allocation_top = false;
1135 Page* current_page = first_page_;
1136 while (current_page->is_valid()) {
1137 if (above_allocation_top) {
1138 // We don't care what's above the allocation top.
1139 } else {
Steve Blocka7e24c12009-10-30 11:49:00 +00001140 Address top = current_page->AllocationTop();
1141 if (current_page == top_page) {
1142 ASSERT(top == allocation_info_.top);
1143 // The next page will be above the allocation top.
1144 above_allocation_top = true;
Steve Blocka7e24c12009-10-30 11:49:00 +00001145 }
1146
1147 // It should be packed with objects from the bottom to the top.
1148 Address current = current_page->ObjectAreaStart();
1149 while (current < top) {
1150 HeapObject* object = HeapObject::FromAddress(current);
1151
1152 // The first word should be a map, and we expect all map pointers to
1153 // be in map space.
1154 Map* map = object->map();
1155 ASSERT(map->IsMap());
Steve Block44f0eee2011-05-26 01:26:41 +01001156 ASSERT(heap()->map_space()->Contains(map));
Steve Blocka7e24c12009-10-30 11:49:00 +00001157
1158 // Perform space-specific object verification.
1159 VerifyObject(object);
1160
1161 // The object itself should look OK.
1162 object->Verify();
1163
1164 // All the interior pointers should be contained in the heap and
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01001165 // have page regions covering intergenerational references should be
1166 // marked dirty.
Steve Blocka7e24c12009-10-30 11:49:00 +00001167 int size = object->Size();
1168 object->IterateBody(map->instance_type(), size, visitor);
1169
1170 current += size;
1171 }
1172
1173 // The allocation pointer should not be in the middle of an object.
1174 ASSERT(current == top);
1175 }
1176
1177 current_page = current_page->next_page();
1178 }
1179}
1180#endif
1181
1182
1183// -----------------------------------------------------------------------------
1184// NewSpace implementation
1185
1186
1187bool NewSpace::Setup(Address start, int size) {
1188 // Setup new space based on the preallocated memory block defined by
1189 // start and size. The provided space is divided into two semi-spaces.
1190 // To support fast containment testing in the new space, the size of
1191 // this chunk must be a power of two and it must be aligned to its size.
Steve Block44f0eee2011-05-26 01:26:41 +01001192 int initial_semispace_capacity = heap()->InitialSemiSpaceSize();
1193 int maximum_semispace_capacity = heap()->MaxSemiSpaceSize();
Steve Blocka7e24c12009-10-30 11:49:00 +00001194
1195 ASSERT(initial_semispace_capacity <= maximum_semispace_capacity);
1196 ASSERT(IsPowerOf2(maximum_semispace_capacity));
1197
1198 // Allocate and setup the histogram arrays if necessary.
1199#if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
1200 allocated_histogram_ = NewArray<HistogramInfo>(LAST_TYPE + 1);
1201 promoted_histogram_ = NewArray<HistogramInfo>(LAST_TYPE + 1);
1202
1203#define SET_NAME(name) allocated_histogram_[name].set_name(#name); \
1204 promoted_histogram_[name].set_name(#name);
1205 INSTANCE_TYPE_LIST(SET_NAME)
1206#undef SET_NAME
1207#endif
1208
Steve Block44f0eee2011-05-26 01:26:41 +01001209 ASSERT(size == 2 * heap()->ReservedSemiSpaceSize());
Steve Blocka7e24c12009-10-30 11:49:00 +00001210 ASSERT(IsAddressAligned(start, size, 0));
1211
1212 if (!to_space_.Setup(start,
1213 initial_semispace_capacity,
1214 maximum_semispace_capacity)) {
1215 return false;
1216 }
1217 if (!from_space_.Setup(start + maximum_semispace_capacity,
1218 initial_semispace_capacity,
1219 maximum_semispace_capacity)) {
1220 return false;
1221 }
1222
1223 start_ = start;
1224 address_mask_ = ~(size - 1);
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01001225 object_mask_ = address_mask_ | kHeapObjectTagMask;
Steve Blocka7e24c12009-10-30 11:49:00 +00001226 object_expected_ = reinterpret_cast<uintptr_t>(start) | kHeapObjectTag;
1227
1228 allocation_info_.top = to_space_.low();
1229 allocation_info_.limit = to_space_.high();
1230 mc_forwarding_info_.top = NULL;
1231 mc_forwarding_info_.limit = NULL;
1232
1233 ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
1234 return true;
1235}
1236
1237
1238void NewSpace::TearDown() {
1239#if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
1240 if (allocated_histogram_) {
1241 DeleteArray(allocated_histogram_);
1242 allocated_histogram_ = NULL;
1243 }
1244 if (promoted_histogram_) {
1245 DeleteArray(promoted_histogram_);
1246 promoted_histogram_ = NULL;
1247 }
1248#endif
1249
1250 start_ = NULL;
1251 allocation_info_.top = NULL;
1252 allocation_info_.limit = NULL;
1253 mc_forwarding_info_.top = NULL;
1254 mc_forwarding_info_.limit = NULL;
1255
1256 to_space_.TearDown();
1257 from_space_.TearDown();
1258}
1259
1260
1261#ifdef ENABLE_HEAP_PROTECTION
1262
1263void NewSpace::Protect() {
Steve Block44f0eee2011-05-26 01:26:41 +01001264 heap()->isolate()->memory_allocator()->Protect(ToSpaceLow(), Capacity());
1265 heap()->isolate()->memory_allocator()->Protect(FromSpaceLow(), Capacity());
Steve Blocka7e24c12009-10-30 11:49:00 +00001266}
1267
1268
1269void NewSpace::Unprotect() {
Steve Block44f0eee2011-05-26 01:26:41 +01001270 heap()->isolate()->memory_allocator()->Unprotect(ToSpaceLow(), Capacity(),
1271 to_space_.executable());
1272 heap()->isolate()->memory_allocator()->Unprotect(FromSpaceLow(), Capacity(),
1273 from_space_.executable());
Steve Blocka7e24c12009-10-30 11:49:00 +00001274}
1275
1276#endif
1277
1278
1279void NewSpace::Flip() {
1280 SemiSpace tmp = from_space_;
1281 from_space_ = to_space_;
1282 to_space_ = tmp;
1283}
1284
1285
1286void NewSpace::Grow() {
1287 ASSERT(Capacity() < MaximumCapacity());
1288 if (to_space_.Grow()) {
1289 // Only grow from space if we managed to grow to space.
1290 if (!from_space_.Grow()) {
1291 // If we managed to grow to space but couldn't grow from space,
1292 // attempt to shrink to space.
1293 if (!to_space_.ShrinkTo(from_space_.Capacity())) {
1294 // We are in an inconsistent state because we could not
1295 // commit/uncommit memory from new space.
1296 V8::FatalProcessOutOfMemory("Failed to grow new space.");
1297 }
1298 }
1299 }
1300 allocation_info_.limit = to_space_.high();
1301 ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
1302}
1303
1304
1305void NewSpace::Shrink() {
Ben Murdochf87a2032010-10-22 12:50:53 +01001306 int new_capacity = Max(InitialCapacity(), 2 * SizeAsInt());
Steve Blockd0582a62009-12-15 09:54:21 +00001307 int rounded_new_capacity =
1308 RoundUp(new_capacity, static_cast<int>(OS::AllocateAlignment()));
Steve Blocka7e24c12009-10-30 11:49:00 +00001309 if (rounded_new_capacity < Capacity() &&
1310 to_space_.ShrinkTo(rounded_new_capacity)) {
1311 // Only shrink from space if we managed to shrink to space.
1312 if (!from_space_.ShrinkTo(rounded_new_capacity)) {
1313 // If we managed to shrink to space but couldn't shrink from
1314 // space, attempt to grow to space again.
1315 if (!to_space_.GrowTo(from_space_.Capacity())) {
1316 // We are in an inconsistent state because we could not
1317 // commit/uncommit memory from new space.
1318 V8::FatalProcessOutOfMemory("Failed to shrink new space.");
1319 }
1320 }
1321 }
1322 allocation_info_.limit = to_space_.high();
1323 ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
1324}
1325
1326
1327void NewSpace::ResetAllocationInfo() {
1328 allocation_info_.top = to_space_.low();
1329 allocation_info_.limit = to_space_.high();
1330 ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
1331}
1332
1333
1334void NewSpace::MCResetRelocationInfo() {
1335 mc_forwarding_info_.top = from_space_.low();
1336 mc_forwarding_info_.limit = from_space_.high();
1337 ASSERT_SEMISPACE_ALLOCATION_INFO(mc_forwarding_info_, from_space_);
1338}
1339
1340
1341void NewSpace::MCCommitRelocationInfo() {
1342 // Assumes that the spaces have been flipped so that mc_forwarding_info_ is
1343 // valid allocation info for the to space.
1344 allocation_info_.top = mc_forwarding_info_.top;
1345 allocation_info_.limit = to_space_.high();
1346 ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
1347}
1348
1349
1350#ifdef DEBUG
1351// We do not use the SemispaceIterator because verification doesn't assume
1352// that it works (it depends on the invariants we are checking).
1353void NewSpace::Verify() {
1354 // The allocation pointer should be in the space or at the very end.
1355 ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
1356
1357 // There should be objects packed in from the low address up to the
1358 // allocation pointer.
1359 Address current = to_space_.low();
1360 while (current < top()) {
1361 HeapObject* object = HeapObject::FromAddress(current);
1362
1363 // The first word should be a map, and we expect all map pointers to
1364 // be in map space.
1365 Map* map = object->map();
1366 ASSERT(map->IsMap());
Steve Block44f0eee2011-05-26 01:26:41 +01001367 ASSERT(heap()->map_space()->Contains(map));
Steve Blocka7e24c12009-10-30 11:49:00 +00001368
1369 // The object should not be code or a map.
1370 ASSERT(!object->IsMap());
1371 ASSERT(!object->IsCode());
1372
1373 // The object itself should look OK.
1374 object->Verify();
1375
1376 // All the interior pointers should be contained in the heap.
1377 VerifyPointersVisitor visitor;
1378 int size = object->Size();
1379 object->IterateBody(map->instance_type(), size, &visitor);
1380
1381 current += size;
1382 }
1383
1384 // The allocation pointer should not be in the middle of an object.
1385 ASSERT(current == top());
1386}
1387#endif
1388
1389
1390bool SemiSpace::Commit() {
1391 ASSERT(!is_committed());
Steve Block44f0eee2011-05-26 01:26:41 +01001392 if (!heap()->isolate()->memory_allocator()->CommitBlock(
1393 start_, capacity_, executable())) {
Steve Blocka7e24c12009-10-30 11:49:00 +00001394 return false;
1395 }
1396 committed_ = true;
1397 return true;
1398}
1399
1400
1401bool SemiSpace::Uncommit() {
1402 ASSERT(is_committed());
Steve Block44f0eee2011-05-26 01:26:41 +01001403 if (!heap()->isolate()->memory_allocator()->UncommitBlock(
1404 start_, capacity_)) {
Steve Blocka7e24c12009-10-30 11:49:00 +00001405 return false;
1406 }
1407 committed_ = false;
1408 return true;
1409}
1410
1411
1412// -----------------------------------------------------------------------------
1413// SemiSpace implementation
1414
1415bool SemiSpace::Setup(Address start,
1416 int initial_capacity,
1417 int maximum_capacity) {
1418 // Creates a space in the young generation. The constructor does not
1419 // allocate memory from the OS. A SemiSpace is given a contiguous chunk of
1420 // memory of size 'capacity' when set up, and does not grow or shrink
1421 // otherwise. In the mark-compact collector, the memory region of the from
1422 // space is used as the marking stack. It requires contiguous memory
1423 // addresses.
1424 initial_capacity_ = initial_capacity;
1425 capacity_ = initial_capacity;
1426 maximum_capacity_ = maximum_capacity;
1427 committed_ = false;
1428
1429 start_ = start;
1430 address_mask_ = ~(maximum_capacity - 1);
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01001431 object_mask_ = address_mask_ | kHeapObjectTagMask;
Steve Blocka7e24c12009-10-30 11:49:00 +00001432 object_expected_ = reinterpret_cast<uintptr_t>(start) | kHeapObjectTag;
1433 age_mark_ = start_;
1434
1435 return Commit();
1436}
1437
1438
1439void SemiSpace::TearDown() {
1440 start_ = NULL;
1441 capacity_ = 0;
1442}
1443
1444
1445bool SemiSpace::Grow() {
1446 // Double the semispace size but only up to maximum capacity.
1447 int maximum_extra = maximum_capacity_ - capacity_;
Steve Blockd0582a62009-12-15 09:54:21 +00001448 int extra = Min(RoundUp(capacity_, static_cast<int>(OS::AllocateAlignment())),
Steve Blocka7e24c12009-10-30 11:49:00 +00001449 maximum_extra);
Steve Block44f0eee2011-05-26 01:26:41 +01001450 if (!heap()->isolate()->memory_allocator()->CommitBlock(
1451 high(), extra, executable())) {
Steve Blocka7e24c12009-10-30 11:49:00 +00001452 return false;
1453 }
1454 capacity_ += extra;
1455 return true;
1456}
1457
1458
1459bool SemiSpace::GrowTo(int new_capacity) {
1460 ASSERT(new_capacity <= maximum_capacity_);
1461 ASSERT(new_capacity > capacity_);
1462 size_t delta = new_capacity - capacity_;
1463 ASSERT(IsAligned(delta, OS::AllocateAlignment()));
Steve Block44f0eee2011-05-26 01:26:41 +01001464 if (!heap()->isolate()->memory_allocator()->CommitBlock(
1465 high(), delta, executable())) {
Steve Blocka7e24c12009-10-30 11:49:00 +00001466 return false;
1467 }
1468 capacity_ = new_capacity;
1469 return true;
1470}
1471
1472
1473bool SemiSpace::ShrinkTo(int new_capacity) {
1474 ASSERT(new_capacity >= initial_capacity_);
1475 ASSERT(new_capacity < capacity_);
1476 size_t delta = capacity_ - new_capacity;
1477 ASSERT(IsAligned(delta, OS::AllocateAlignment()));
Steve Block44f0eee2011-05-26 01:26:41 +01001478 if (!heap()->isolate()->memory_allocator()->UncommitBlock(
1479 high() - delta, delta)) {
Steve Blocka7e24c12009-10-30 11:49:00 +00001480 return false;
1481 }
1482 capacity_ = new_capacity;
1483 return true;
1484}
1485
1486
1487#ifdef DEBUG
1488void SemiSpace::Print() { }
1489
1490
1491void SemiSpace::Verify() { }
1492#endif
1493
1494
1495// -----------------------------------------------------------------------------
1496// SemiSpaceIterator implementation.
1497SemiSpaceIterator::SemiSpaceIterator(NewSpace* space) {
1498 Initialize(space, space->bottom(), space->top(), NULL);
1499}
1500
1501
1502SemiSpaceIterator::SemiSpaceIterator(NewSpace* space,
1503 HeapObjectCallback size_func) {
1504 Initialize(space, space->bottom(), space->top(), size_func);
1505}
1506
1507
1508SemiSpaceIterator::SemiSpaceIterator(NewSpace* space, Address start) {
1509 Initialize(space, start, space->top(), NULL);
1510}
1511
1512
1513void SemiSpaceIterator::Initialize(NewSpace* space, Address start,
1514 Address end,
1515 HeapObjectCallback size_func) {
1516 ASSERT(space->ToSpaceContains(start));
1517 ASSERT(space->ToSpaceLow() <= end
1518 && end <= space->ToSpaceHigh());
1519 space_ = &space->to_space_;
1520 current_ = start;
1521 limit_ = end;
1522 size_func_ = size_func;
1523}
1524
1525
1526#ifdef DEBUG
Steve Blocka7e24c12009-10-30 11:49:00 +00001527// heap_histograms is shared, always clear it before using it.
1528static void ClearHistograms() {
Steve Block44f0eee2011-05-26 01:26:41 +01001529 Isolate* isolate = Isolate::Current();
Steve Blocka7e24c12009-10-30 11:49:00 +00001530 // We reset the name each time, though it hasn't changed.
Steve Block44f0eee2011-05-26 01:26:41 +01001531#define DEF_TYPE_NAME(name) isolate->heap_histograms()[name].set_name(#name);
Steve Blocka7e24c12009-10-30 11:49:00 +00001532 INSTANCE_TYPE_LIST(DEF_TYPE_NAME)
1533#undef DEF_TYPE_NAME
1534
Steve Block44f0eee2011-05-26 01:26:41 +01001535#define CLEAR_HISTOGRAM(name) isolate->heap_histograms()[name].clear();
Steve Blocka7e24c12009-10-30 11:49:00 +00001536 INSTANCE_TYPE_LIST(CLEAR_HISTOGRAM)
1537#undef CLEAR_HISTOGRAM
1538
Steve Block44f0eee2011-05-26 01:26:41 +01001539 isolate->js_spill_information()->Clear();
Steve Blocka7e24c12009-10-30 11:49:00 +00001540}
1541
1542
Steve Blocka7e24c12009-10-30 11:49:00 +00001543static void ClearCodeKindStatistics() {
Steve Block44f0eee2011-05-26 01:26:41 +01001544 Isolate* isolate = Isolate::Current();
Steve Blocka7e24c12009-10-30 11:49:00 +00001545 for (int i = 0; i < Code::NUMBER_OF_KINDS; i++) {
Steve Block44f0eee2011-05-26 01:26:41 +01001546 isolate->code_kind_statistics()[i] = 0;
Steve Blocka7e24c12009-10-30 11:49:00 +00001547 }
1548}
1549
1550
1551static void ReportCodeKindStatistics() {
Steve Block44f0eee2011-05-26 01:26:41 +01001552 Isolate* isolate = Isolate::Current();
Steve Block6ded16b2010-05-10 14:33:55 +01001553 const char* table[Code::NUMBER_OF_KINDS] = { NULL };
Steve Blocka7e24c12009-10-30 11:49:00 +00001554
1555#define CASE(name) \
1556 case Code::name: table[Code::name] = #name; \
1557 break
1558
1559 for (int i = 0; i < Code::NUMBER_OF_KINDS; i++) {
1560 switch (static_cast<Code::Kind>(i)) {
1561 CASE(FUNCTION);
Ben Murdochb0fe1622011-05-05 13:52:32 +01001562 CASE(OPTIMIZED_FUNCTION);
Steve Blocka7e24c12009-10-30 11:49:00 +00001563 CASE(STUB);
1564 CASE(BUILTIN);
1565 CASE(LOAD_IC);
1566 CASE(KEYED_LOAD_IC);
1567 CASE(STORE_IC);
1568 CASE(KEYED_STORE_IC);
1569 CASE(CALL_IC);
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01001570 CASE(KEYED_CALL_IC);
Ben Murdoch257744e2011-11-30 15:57:28 +00001571 CASE(UNARY_OP_IC);
1572 CASE(BINARY_OP_IC);
Ben Murdochb0fe1622011-05-05 13:52:32 +01001573 CASE(COMPARE_IC);
Steve Blocka7e24c12009-10-30 11:49:00 +00001574 }
1575 }
1576
1577#undef CASE
1578
1579 PrintF("\n Code kind histograms: \n");
1580 for (int i = 0; i < Code::NUMBER_OF_KINDS; i++) {
Steve Block44f0eee2011-05-26 01:26:41 +01001581 if (isolate->code_kind_statistics()[i] > 0) {
1582 PrintF(" %-20s: %10d bytes\n", table[i],
1583 isolate->code_kind_statistics()[i]);
Steve Blocka7e24c12009-10-30 11:49:00 +00001584 }
1585 }
1586 PrintF("\n");
1587}
1588
1589
1590static int CollectHistogramInfo(HeapObject* obj) {
Steve Block44f0eee2011-05-26 01:26:41 +01001591 Isolate* isolate = Isolate::Current();
Steve Blocka7e24c12009-10-30 11:49:00 +00001592 InstanceType type = obj->map()->instance_type();
1593 ASSERT(0 <= type && type <= LAST_TYPE);
Steve Block44f0eee2011-05-26 01:26:41 +01001594 ASSERT(isolate->heap_histograms()[type].name() != NULL);
1595 isolate->heap_histograms()[type].increment_number(1);
1596 isolate->heap_histograms()[type].increment_bytes(obj->Size());
Steve Blocka7e24c12009-10-30 11:49:00 +00001597
1598 if (FLAG_collect_heap_spill_statistics && obj->IsJSObject()) {
Steve Block44f0eee2011-05-26 01:26:41 +01001599 JSObject::cast(obj)->IncrementSpillStatistics(
1600 isolate->js_spill_information());
Steve Blocka7e24c12009-10-30 11:49:00 +00001601 }
1602
1603 return obj->Size();
1604}
1605
1606
1607static void ReportHistogram(bool print_spill) {
Steve Block44f0eee2011-05-26 01:26:41 +01001608 Isolate* isolate = Isolate::Current();
Steve Blocka7e24c12009-10-30 11:49:00 +00001609 PrintF("\n Object Histogram:\n");
1610 for (int i = 0; i <= LAST_TYPE; i++) {
Steve Block44f0eee2011-05-26 01:26:41 +01001611 if (isolate->heap_histograms()[i].number() > 0) {
Steve Block6ded16b2010-05-10 14:33:55 +01001612 PrintF(" %-34s%10d (%10d bytes)\n",
Steve Block44f0eee2011-05-26 01:26:41 +01001613 isolate->heap_histograms()[i].name(),
1614 isolate->heap_histograms()[i].number(),
1615 isolate->heap_histograms()[i].bytes());
Steve Blocka7e24c12009-10-30 11:49:00 +00001616 }
1617 }
1618 PrintF("\n");
1619
1620 // Summarize string types.
1621 int string_number = 0;
1622 int string_bytes = 0;
1623#define INCREMENT(type, size, name, camel_name) \
Steve Block44f0eee2011-05-26 01:26:41 +01001624 string_number += isolate->heap_histograms()[type].number(); \
1625 string_bytes += isolate->heap_histograms()[type].bytes();
Steve Blocka7e24c12009-10-30 11:49:00 +00001626 STRING_TYPE_LIST(INCREMENT)
1627#undef INCREMENT
1628 if (string_number > 0) {
Steve Block6ded16b2010-05-10 14:33:55 +01001629 PrintF(" %-34s%10d (%10d bytes)\n\n", "STRING_TYPE", string_number,
Steve Blocka7e24c12009-10-30 11:49:00 +00001630 string_bytes);
1631 }
1632
1633 if (FLAG_collect_heap_spill_statistics && print_spill) {
Steve Block44f0eee2011-05-26 01:26:41 +01001634 isolate->js_spill_information()->Print();
Steve Blocka7e24c12009-10-30 11:49:00 +00001635 }
1636}
1637#endif // DEBUG
1638
1639
1640// Support for statistics gathering for --heap-stats and --log-gc.
1641#if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
1642void NewSpace::ClearHistograms() {
1643 for (int i = 0; i <= LAST_TYPE; i++) {
1644 allocated_histogram_[i].clear();
1645 promoted_histogram_[i].clear();
1646 }
1647}
1648
1649// Because the copying collector does not touch garbage objects, we iterate
1650// the new space before a collection to get a histogram of allocated objects.
1651// This only happens (1) when compiled with DEBUG and the --heap-stats flag is
1652// set, or when compiled with ENABLE_LOGGING_AND_PROFILING and the --log-gc
1653// flag is set.
1654void NewSpace::CollectStatistics() {
1655 ClearHistograms();
1656 SemiSpaceIterator it(this);
Leon Clarked91b9f72010-01-27 17:25:45 +00001657 for (HeapObject* obj = it.next(); obj != NULL; obj = it.next())
1658 RecordAllocation(obj);
Steve Blocka7e24c12009-10-30 11:49:00 +00001659}
1660
1661
1662#ifdef ENABLE_LOGGING_AND_PROFILING
Steve Block44f0eee2011-05-26 01:26:41 +01001663static void DoReportStatistics(Isolate* isolate,
1664 HistogramInfo* info, const char* description) {
1665 LOG(isolate, HeapSampleBeginEvent("NewSpace", description));
Steve Blocka7e24c12009-10-30 11:49:00 +00001666 // Lump all the string types together.
1667 int string_number = 0;
1668 int string_bytes = 0;
1669#define INCREMENT(type, size, name, camel_name) \
1670 string_number += info[type].number(); \
1671 string_bytes += info[type].bytes();
1672 STRING_TYPE_LIST(INCREMENT)
1673#undef INCREMENT
1674 if (string_number > 0) {
Steve Block44f0eee2011-05-26 01:26:41 +01001675 LOG(isolate,
1676 HeapSampleItemEvent("STRING_TYPE", string_number, string_bytes));
Steve Blocka7e24c12009-10-30 11:49:00 +00001677 }
1678
1679 // Then do the other types.
1680 for (int i = FIRST_NONSTRING_TYPE; i <= LAST_TYPE; ++i) {
1681 if (info[i].number() > 0) {
Steve Block44f0eee2011-05-26 01:26:41 +01001682 LOG(isolate,
1683 HeapSampleItemEvent(info[i].name(), info[i].number(),
Steve Blocka7e24c12009-10-30 11:49:00 +00001684 info[i].bytes()));
1685 }
1686 }
Steve Block44f0eee2011-05-26 01:26:41 +01001687 LOG(isolate, HeapSampleEndEvent("NewSpace", description));
Steve Blocka7e24c12009-10-30 11:49:00 +00001688}
1689#endif // ENABLE_LOGGING_AND_PROFILING
1690
1691
1692void NewSpace::ReportStatistics() {
1693#ifdef DEBUG
1694 if (FLAG_heap_stats) {
1695 float pct = static_cast<float>(Available()) / Capacity();
Ben Murdochf87a2032010-10-22 12:50:53 +01001696 PrintF(" capacity: %" V8_PTR_PREFIX "d"
1697 ", available: %" V8_PTR_PREFIX "d, %%%d\n",
Steve Blocka7e24c12009-10-30 11:49:00 +00001698 Capacity(), Available(), static_cast<int>(pct*100));
1699 PrintF("\n Object Histogram:\n");
1700 for (int i = 0; i <= LAST_TYPE; i++) {
1701 if (allocated_histogram_[i].number() > 0) {
Steve Block6ded16b2010-05-10 14:33:55 +01001702 PrintF(" %-34s%10d (%10d bytes)\n",
Steve Blocka7e24c12009-10-30 11:49:00 +00001703 allocated_histogram_[i].name(),
1704 allocated_histogram_[i].number(),
1705 allocated_histogram_[i].bytes());
1706 }
1707 }
1708 PrintF("\n");
1709 }
1710#endif // DEBUG
1711
1712#ifdef ENABLE_LOGGING_AND_PROFILING
1713 if (FLAG_log_gc) {
Steve Block44f0eee2011-05-26 01:26:41 +01001714 Isolate* isolate = ISOLATE;
1715 DoReportStatistics(isolate, allocated_histogram_, "allocated");
1716 DoReportStatistics(isolate, promoted_histogram_, "promoted");
Steve Blocka7e24c12009-10-30 11:49:00 +00001717 }
1718#endif // ENABLE_LOGGING_AND_PROFILING
1719}
1720
1721
1722void NewSpace::RecordAllocation(HeapObject* obj) {
1723 InstanceType type = obj->map()->instance_type();
1724 ASSERT(0 <= type && type <= LAST_TYPE);
1725 allocated_histogram_[type].increment_number(1);
1726 allocated_histogram_[type].increment_bytes(obj->Size());
1727}
1728
1729
1730void NewSpace::RecordPromotion(HeapObject* obj) {
1731 InstanceType type = obj->map()->instance_type();
1732 ASSERT(0 <= type && type <= LAST_TYPE);
1733 promoted_histogram_[type].increment_number(1);
1734 promoted_histogram_[type].increment_bytes(obj->Size());
1735}
1736#endif // defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
1737
1738
1739// -----------------------------------------------------------------------------
1740// Free lists for old object spaces implementation
1741
Steve Block44f0eee2011-05-26 01:26:41 +01001742void FreeListNode::set_size(Heap* heap, int size_in_bytes) {
Steve Blocka7e24c12009-10-30 11:49:00 +00001743 ASSERT(size_in_bytes > 0);
1744 ASSERT(IsAligned(size_in_bytes, kPointerSize));
1745
1746 // We write a map and possibly size information to the block. If the block
1747 // is big enough to be a ByteArray with at least one extra word (the next
1748 // pointer), we set its map to be the byte array map and its size to an
1749 // appropriate array length for the desired size from HeapObject::Size().
1750 // If the block is too small (eg, one or two words), to hold both a size
1751 // field and a next pointer, we give it a filler map that gives it the
1752 // correct size.
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01001753 if (size_in_bytes > ByteArray::kHeaderSize) {
Steve Block44f0eee2011-05-26 01:26:41 +01001754 set_map(heap->raw_unchecked_byte_array_map());
Steve Blockd0582a62009-12-15 09:54:21 +00001755 // Can't use ByteArray::cast because it fails during deserialization.
1756 ByteArray* this_as_byte_array = reinterpret_cast<ByteArray*>(this);
1757 this_as_byte_array->set_length(ByteArray::LengthFor(size_in_bytes));
Steve Blocka7e24c12009-10-30 11:49:00 +00001758 } else if (size_in_bytes == kPointerSize) {
Steve Block44f0eee2011-05-26 01:26:41 +01001759 set_map(heap->raw_unchecked_one_pointer_filler_map());
Steve Blocka7e24c12009-10-30 11:49:00 +00001760 } else if (size_in_bytes == 2 * kPointerSize) {
Steve Block44f0eee2011-05-26 01:26:41 +01001761 set_map(heap->raw_unchecked_two_pointer_filler_map());
Steve Blocka7e24c12009-10-30 11:49:00 +00001762 } else {
1763 UNREACHABLE();
1764 }
Steve Blockd0582a62009-12-15 09:54:21 +00001765 // We would like to ASSERT(Size() == size_in_bytes) but this would fail during
1766 // deserialization because the byte array map is not done yet.
Steve Blocka7e24c12009-10-30 11:49:00 +00001767}
1768
1769
Steve Block44f0eee2011-05-26 01:26:41 +01001770Address FreeListNode::next(Heap* heap) {
Steve Block3ce2e202009-11-05 08:53:23 +00001771 ASSERT(IsFreeListNode(this));
Steve Block44f0eee2011-05-26 01:26:41 +01001772 if (map() == heap->raw_unchecked_byte_array_map()) {
Steve Blocka7e24c12009-10-30 11:49:00 +00001773 ASSERT(Size() >= kNextOffset + kPointerSize);
1774 return Memory::Address_at(address() + kNextOffset);
1775 } else {
1776 return Memory::Address_at(address() + kPointerSize);
1777 }
1778}
1779
1780
Steve Block44f0eee2011-05-26 01:26:41 +01001781void FreeListNode::set_next(Heap* heap, Address next) {
Steve Block3ce2e202009-11-05 08:53:23 +00001782 ASSERT(IsFreeListNode(this));
Steve Block44f0eee2011-05-26 01:26:41 +01001783 if (map() == heap->raw_unchecked_byte_array_map()) {
Steve Blocka7e24c12009-10-30 11:49:00 +00001784 ASSERT(Size() >= kNextOffset + kPointerSize);
1785 Memory::Address_at(address() + kNextOffset) = next;
1786 } else {
1787 Memory::Address_at(address() + kPointerSize) = next;
1788 }
1789}
1790
1791
Steve Block44f0eee2011-05-26 01:26:41 +01001792OldSpaceFreeList::OldSpaceFreeList(Heap* heap, AllocationSpace owner)
1793 : heap_(heap),
1794 owner_(owner) {
Steve Blocka7e24c12009-10-30 11:49:00 +00001795 Reset();
1796}
1797
1798
1799void OldSpaceFreeList::Reset() {
1800 available_ = 0;
1801 for (int i = 0; i < kFreeListsLength; i++) {
1802 free_[i].head_node_ = NULL;
1803 }
1804 needs_rebuild_ = false;
1805 finger_ = kHead;
1806 free_[kHead].next_size_ = kEnd;
1807}
1808
1809
1810void OldSpaceFreeList::RebuildSizeList() {
1811 ASSERT(needs_rebuild_);
1812 int cur = kHead;
1813 for (int i = cur + 1; i < kFreeListsLength; i++) {
1814 if (free_[i].head_node_ != NULL) {
1815 free_[cur].next_size_ = i;
1816 cur = i;
1817 }
1818 }
1819 free_[cur].next_size_ = kEnd;
1820 needs_rebuild_ = false;
1821}
1822
1823
1824int OldSpaceFreeList::Free(Address start, int size_in_bytes) {
1825#ifdef DEBUG
Steve Block44f0eee2011-05-26 01:26:41 +01001826 Isolate::Current()->memory_allocator()->ZapBlock(start, size_in_bytes);
Steve Blocka7e24c12009-10-30 11:49:00 +00001827#endif
1828 FreeListNode* node = FreeListNode::FromAddress(start);
Steve Block44f0eee2011-05-26 01:26:41 +01001829 node->set_size(heap_, size_in_bytes);
Steve Blocka7e24c12009-10-30 11:49:00 +00001830
1831 // We don't use the freelists in compacting mode. This makes it more like a
1832 // GC that only has mark-sweep-compact and doesn't have a mark-sweep
1833 // collector.
1834 if (FLAG_always_compact) {
1835 return size_in_bytes;
1836 }
1837
1838 // Early return to drop too-small blocks on the floor (one or two word
1839 // blocks cannot hold a map pointer, a size field, and a pointer to the
1840 // next block in the free list).
1841 if (size_in_bytes < kMinBlockSize) {
1842 return size_in_bytes;
1843 }
1844
1845 // Insert other blocks at the head of an exact free list.
1846 int index = size_in_bytes >> kPointerSizeLog2;
Steve Block44f0eee2011-05-26 01:26:41 +01001847 node->set_next(heap_, free_[index].head_node_);
Steve Blocka7e24c12009-10-30 11:49:00 +00001848 free_[index].head_node_ = node->address();
1849 available_ += size_in_bytes;
1850 needs_rebuild_ = true;
1851 return 0;
1852}
1853
1854
John Reck59135872010-11-02 12:39:01 -07001855MaybeObject* OldSpaceFreeList::Allocate(int size_in_bytes, int* wasted_bytes) {
Steve Blocka7e24c12009-10-30 11:49:00 +00001856 ASSERT(0 < size_in_bytes);
1857 ASSERT(size_in_bytes <= kMaxBlockSize);
1858 ASSERT(IsAligned(size_in_bytes, kPointerSize));
1859
1860 if (needs_rebuild_) RebuildSizeList();
1861 int index = size_in_bytes >> kPointerSizeLog2;
1862 // Check for a perfect fit.
1863 if (free_[index].head_node_ != NULL) {
1864 FreeListNode* node = FreeListNode::FromAddress(free_[index].head_node_);
1865 // If this was the last block of its size, remove the size.
Steve Block44f0eee2011-05-26 01:26:41 +01001866 if ((free_[index].head_node_ = node->next(heap_)) == NULL)
1867 RemoveSize(index);
Steve Blocka7e24c12009-10-30 11:49:00 +00001868 available_ -= size_in_bytes;
1869 *wasted_bytes = 0;
1870 ASSERT(!FLAG_always_compact); // We only use the freelists with mark-sweep.
1871 return node;
1872 }
1873 // Search the size list for the best fit.
1874 int prev = finger_ < index ? finger_ : kHead;
1875 int cur = FindSize(index, &prev);
1876 ASSERT(index < cur);
1877 if (cur == kEnd) {
1878 // No large enough size in list.
1879 *wasted_bytes = 0;
Ben Murdochf87a2032010-10-22 12:50:53 +01001880 return Failure::RetryAfterGC(owner_);
Steve Blocka7e24c12009-10-30 11:49:00 +00001881 }
1882 ASSERT(!FLAG_always_compact); // We only use the freelists with mark-sweep.
1883 int rem = cur - index;
1884 int rem_bytes = rem << kPointerSizeLog2;
1885 FreeListNode* cur_node = FreeListNode::FromAddress(free_[cur].head_node_);
1886 ASSERT(cur_node->Size() == (cur << kPointerSizeLog2));
1887 FreeListNode* rem_node = FreeListNode::FromAddress(free_[cur].head_node_ +
1888 size_in_bytes);
1889 // Distinguish the cases prev < rem < cur and rem <= prev < cur
1890 // to avoid many redundant tests and calls to Insert/RemoveSize.
1891 if (prev < rem) {
1892 // Simple case: insert rem between prev and cur.
1893 finger_ = prev;
1894 free_[prev].next_size_ = rem;
1895 // If this was the last block of size cur, remove the size.
Steve Block44f0eee2011-05-26 01:26:41 +01001896 if ((free_[cur].head_node_ = cur_node->next(heap_)) == NULL) {
Steve Blocka7e24c12009-10-30 11:49:00 +00001897 free_[rem].next_size_ = free_[cur].next_size_;
1898 } else {
1899 free_[rem].next_size_ = cur;
1900 }
1901 // Add the remainder block.
Steve Block44f0eee2011-05-26 01:26:41 +01001902 rem_node->set_size(heap_, rem_bytes);
1903 rem_node->set_next(heap_, free_[rem].head_node_);
Steve Blocka7e24c12009-10-30 11:49:00 +00001904 free_[rem].head_node_ = rem_node->address();
1905 } else {
1906 // If this was the last block of size cur, remove the size.
Steve Block44f0eee2011-05-26 01:26:41 +01001907 if ((free_[cur].head_node_ = cur_node->next(heap_)) == NULL) {
Steve Blocka7e24c12009-10-30 11:49:00 +00001908 finger_ = prev;
1909 free_[prev].next_size_ = free_[cur].next_size_;
1910 }
1911 if (rem_bytes < kMinBlockSize) {
1912 // Too-small remainder is wasted.
Steve Block44f0eee2011-05-26 01:26:41 +01001913 rem_node->set_size(heap_, rem_bytes);
Steve Blocka7e24c12009-10-30 11:49:00 +00001914 available_ -= size_in_bytes + rem_bytes;
1915 *wasted_bytes = rem_bytes;
1916 return cur_node;
1917 }
1918 // Add the remainder block and, if needed, insert its size.
Steve Block44f0eee2011-05-26 01:26:41 +01001919 rem_node->set_size(heap_, rem_bytes);
1920 rem_node->set_next(heap_, free_[rem].head_node_);
Steve Blocka7e24c12009-10-30 11:49:00 +00001921 free_[rem].head_node_ = rem_node->address();
Steve Block44f0eee2011-05-26 01:26:41 +01001922 if (rem_node->next(heap_) == NULL) InsertSize(rem);
Steve Blocka7e24c12009-10-30 11:49:00 +00001923 }
1924 available_ -= size_in_bytes;
1925 *wasted_bytes = 0;
1926 return cur_node;
1927}
1928
1929
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -08001930void OldSpaceFreeList::MarkNodes() {
1931 for (int i = 0; i < kFreeListsLength; i++) {
1932 Address cur_addr = free_[i].head_node_;
1933 while (cur_addr != NULL) {
1934 FreeListNode* cur_node = FreeListNode::FromAddress(cur_addr);
Steve Block44f0eee2011-05-26 01:26:41 +01001935 cur_addr = cur_node->next(heap_);
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -08001936 cur_node->SetMark();
1937 }
1938 }
1939}
1940
1941
Steve Blocka7e24c12009-10-30 11:49:00 +00001942#ifdef DEBUG
1943bool OldSpaceFreeList::Contains(FreeListNode* node) {
1944 for (int i = 0; i < kFreeListsLength; i++) {
1945 Address cur_addr = free_[i].head_node_;
1946 while (cur_addr != NULL) {
1947 FreeListNode* cur_node = FreeListNode::FromAddress(cur_addr);
1948 if (cur_node == node) return true;
Steve Block44f0eee2011-05-26 01:26:41 +01001949 cur_addr = cur_node->next(heap_);
Steve Blocka7e24c12009-10-30 11:49:00 +00001950 }
1951 }
1952 return false;
1953}
1954#endif
1955
1956
Steve Block44f0eee2011-05-26 01:26:41 +01001957FixedSizeFreeList::FixedSizeFreeList(Heap* heap,
1958 AllocationSpace owner,
1959 int object_size)
1960 : heap_(heap), owner_(owner), object_size_(object_size) {
Steve Blocka7e24c12009-10-30 11:49:00 +00001961 Reset();
1962}
1963
1964
1965void FixedSizeFreeList::Reset() {
1966 available_ = 0;
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01001967 head_ = tail_ = NULL;
Steve Blocka7e24c12009-10-30 11:49:00 +00001968}
1969
1970
1971void FixedSizeFreeList::Free(Address start) {
1972#ifdef DEBUG
Steve Block44f0eee2011-05-26 01:26:41 +01001973 Isolate::Current()->memory_allocator()->ZapBlock(start, object_size_);
Steve Blocka7e24c12009-10-30 11:49:00 +00001974#endif
Leon Clarkee46be812010-01-19 14:06:41 +00001975 // We only use the freelists with mark-sweep.
Steve Block44f0eee2011-05-26 01:26:41 +01001976 ASSERT(!HEAP->mark_compact_collector()->IsCompacting());
Steve Blocka7e24c12009-10-30 11:49:00 +00001977 FreeListNode* node = FreeListNode::FromAddress(start);
Steve Block44f0eee2011-05-26 01:26:41 +01001978 node->set_size(heap_, object_size_);
1979 node->set_next(heap_, NULL);
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01001980 if (head_ == NULL) {
1981 tail_ = head_ = node->address();
1982 } else {
Steve Block44f0eee2011-05-26 01:26:41 +01001983 FreeListNode::FromAddress(tail_)->set_next(heap_, node->address());
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01001984 tail_ = node->address();
1985 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001986 available_ += object_size_;
1987}
1988
1989
John Reck59135872010-11-02 12:39:01 -07001990MaybeObject* FixedSizeFreeList::Allocate() {
Steve Blocka7e24c12009-10-30 11:49:00 +00001991 if (head_ == NULL) {
Ben Murdochf87a2032010-10-22 12:50:53 +01001992 return Failure::RetryAfterGC(owner_);
Steve Blocka7e24c12009-10-30 11:49:00 +00001993 }
1994
1995 ASSERT(!FLAG_always_compact); // We only use the freelists with mark-sweep.
1996 FreeListNode* node = FreeListNode::FromAddress(head_);
Steve Block44f0eee2011-05-26 01:26:41 +01001997 head_ = node->next(heap_);
Steve Blocka7e24c12009-10-30 11:49:00 +00001998 available_ -= object_size_;
1999 return node;
2000}
2001
2002
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -08002003void FixedSizeFreeList::MarkNodes() {
2004 Address cur_addr = head_;
2005 while (cur_addr != NULL && cur_addr != tail_) {
2006 FreeListNode* cur_node = FreeListNode::FromAddress(cur_addr);
Steve Block44f0eee2011-05-26 01:26:41 +01002007 cur_addr = cur_node->next(heap_);
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -08002008 cur_node->SetMark();
2009 }
2010}
2011
2012
Steve Blocka7e24c12009-10-30 11:49:00 +00002013// -----------------------------------------------------------------------------
2014// OldSpace implementation
2015
2016void OldSpace::PrepareForMarkCompact(bool will_compact) {
Steve Block6ded16b2010-05-10 14:33:55 +01002017 // Call prepare of the super class.
2018 PagedSpace::PrepareForMarkCompact(will_compact);
2019
Steve Blocka7e24c12009-10-30 11:49:00 +00002020 if (will_compact) {
2021 // Reset relocation info. During a compacting collection, everything in
2022 // the space is considered 'available' and we will rediscover live data
2023 // and waste during the collection.
2024 MCResetRelocationInfo();
2025 ASSERT(Available() == Capacity());
2026 } else {
2027 // During a non-compacting collection, everything below the linear
2028 // allocation pointer is considered allocated (everything above is
2029 // available) and we will rediscover available and wasted bytes during
2030 // the collection.
2031 accounting_stats_.AllocateBytes(free_list_.available());
2032 accounting_stats_.FillWastedBytes(Waste());
2033 }
2034
2035 // Clear the free list before a full GC---it will be rebuilt afterward.
2036 free_list_.Reset();
2037}
2038
2039
2040void OldSpace::MCCommitRelocationInfo() {
2041 // Update fast allocation info.
2042 allocation_info_.top = mc_forwarding_info_.top;
2043 allocation_info_.limit = mc_forwarding_info_.limit;
2044 ASSERT(allocation_info_.VerifyPagedAllocation());
2045
2046 // The space is compacted and we haven't yet built free lists or
2047 // wasted any space.
2048 ASSERT(Waste() == 0);
2049 ASSERT(AvailableFree() == 0);
2050
2051 // Build the free list for the space.
2052 int computed_size = 0;
2053 PageIterator it(this, PageIterator::PAGES_USED_BY_MC);
2054 while (it.has_next()) {
2055 Page* p = it.next();
2056 // Space below the relocation pointer is allocated.
Steve Blockd0582a62009-12-15 09:54:21 +00002057 computed_size +=
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002058 static_cast<int>(p->AllocationWatermark() - p->ObjectAreaStart());
Steve Blocka7e24c12009-10-30 11:49:00 +00002059 if (it.has_next()) {
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002060 // Free the space at the top of the page.
Steve Blockd0582a62009-12-15 09:54:21 +00002061 int extra_size =
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002062 static_cast<int>(p->ObjectAreaEnd() - p->AllocationWatermark());
Steve Blocka7e24c12009-10-30 11:49:00 +00002063 if (extra_size > 0) {
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002064 int wasted_bytes = free_list_.Free(p->AllocationWatermark(),
2065 extra_size);
Steve Blocka7e24c12009-10-30 11:49:00 +00002066 // The bytes we have just "freed" to add to the free list were
2067 // already accounted as available.
2068 accounting_stats_.WasteBytes(wasted_bytes);
2069 }
2070 }
2071 }
2072
2073 // Make sure the computed size - based on the used portion of the pages in
2074 // use - matches the size obtained while computing forwarding addresses.
2075 ASSERT(computed_size == Size());
2076}
2077
2078
Leon Clarkee46be812010-01-19 14:06:41 +00002079bool NewSpace::ReserveSpace(int bytes) {
2080 // We can't reliably unpack a partial snapshot that needs more new space
2081 // space than the minimum NewSpace size.
2082 ASSERT(bytes <= InitialCapacity());
2083 Address limit = allocation_info_.limit;
2084 Address top = allocation_info_.top;
2085 return limit - top >= bytes;
2086}
2087
2088
Steve Block6ded16b2010-05-10 14:33:55 +01002089void PagedSpace::FreePages(Page* prev, Page* last) {
2090 if (last == AllocationTopPage()) {
2091 // Pages are already at the end of used pages.
2092 return;
2093 }
2094
2095 Page* first = NULL;
2096
2097 // Remove pages from the list.
2098 if (prev == NULL) {
2099 first = first_page_;
2100 first_page_ = last->next_page();
2101 } else {
2102 first = prev->next_page();
Steve Block44f0eee2011-05-26 01:26:41 +01002103 heap()->isolate()->memory_allocator()->SetNextPage(
2104 prev, last->next_page());
Steve Block6ded16b2010-05-10 14:33:55 +01002105 }
2106
2107 // Attach it after the last page.
Steve Block44f0eee2011-05-26 01:26:41 +01002108 heap()->isolate()->memory_allocator()->SetNextPage(last_page_, first);
Steve Block6ded16b2010-05-10 14:33:55 +01002109 last_page_ = last;
Steve Block44f0eee2011-05-26 01:26:41 +01002110 heap()->isolate()->memory_allocator()->SetNextPage(last, NULL);
Steve Block6ded16b2010-05-10 14:33:55 +01002111
2112 // Clean them up.
2113 do {
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002114 first->InvalidateWatermark(true);
2115 first->SetAllocationWatermark(first->ObjectAreaStart());
2116 first->SetCachedAllocationWatermark(first->ObjectAreaStart());
2117 first->SetRegionMarks(Page::kAllRegionsCleanMarks);
Steve Block6ded16b2010-05-10 14:33:55 +01002118 first = first->next_page();
2119 } while (first != NULL);
2120
2121 // Order of pages in this space might no longer be consistent with
2122 // order of pages in chunks.
2123 page_list_is_chunk_ordered_ = false;
2124}
2125
2126
Kristian Monsen80d68ea2010-09-08 11:05:35 +01002127void PagedSpace::RelinkPageListInChunkOrder(bool deallocate_blocks) {
2128 const bool add_to_freelist = true;
2129
2130 // Mark used and unused pages to properly fill unused pages
2131 // after reordering.
2132 PageIterator all_pages_iterator(this, PageIterator::ALL_PAGES);
2133 Page* last_in_use = AllocationTopPage();
2134 bool in_use = true;
2135
2136 while (all_pages_iterator.has_next()) {
2137 Page* p = all_pages_iterator.next();
2138 p->SetWasInUseBeforeMC(in_use);
2139 if (p == last_in_use) {
2140 // We passed a page containing allocation top. All consequent
2141 // pages are not used.
2142 in_use = false;
2143 }
2144 }
2145
2146 if (page_list_is_chunk_ordered_) return;
2147
2148 Page* new_last_in_use = Page::FromAddress(NULL);
Steve Block44f0eee2011-05-26 01:26:41 +01002149 heap()->isolate()->memory_allocator()->RelinkPageListInChunkOrder(
2150 this, &first_page_, &last_page_, &new_last_in_use);
Kristian Monsen80d68ea2010-09-08 11:05:35 +01002151 ASSERT(new_last_in_use->is_valid());
2152
2153 if (new_last_in_use != last_in_use) {
2154 // Current allocation top points to a page which is now in the middle
2155 // of page list. We should move allocation top forward to the new last
2156 // used page so various object iterators will continue to work properly.
2157 int size_in_bytes = static_cast<int>(PageAllocationLimit(last_in_use) -
2158 last_in_use->AllocationTop());
2159
2160 last_in_use->SetAllocationWatermark(last_in_use->AllocationTop());
2161 if (size_in_bytes > 0) {
2162 Address start = last_in_use->AllocationTop();
2163 if (deallocate_blocks) {
2164 accounting_stats_.AllocateBytes(size_in_bytes);
2165 DeallocateBlock(start, size_in_bytes, add_to_freelist);
2166 } else {
Steve Block44f0eee2011-05-26 01:26:41 +01002167 heap()->CreateFillerObjectAt(start, size_in_bytes);
Kristian Monsen80d68ea2010-09-08 11:05:35 +01002168 }
2169 }
2170
2171 // New last in use page was in the middle of the list before
2172 // sorting so it full.
2173 SetTop(new_last_in_use->AllocationTop());
2174
2175 ASSERT(AllocationTopPage() == new_last_in_use);
2176 ASSERT(AllocationTopPage()->WasInUseBeforeMC());
2177 }
2178
2179 PageIterator pages_in_use_iterator(this, PageIterator::PAGES_IN_USE);
2180 while (pages_in_use_iterator.has_next()) {
2181 Page* p = pages_in_use_iterator.next();
2182 if (!p->WasInUseBeforeMC()) {
2183 // Empty page is in the middle of a sequence of used pages.
2184 // Allocate it as a whole and deallocate immediately.
2185 int size_in_bytes = static_cast<int>(PageAllocationLimit(p) -
2186 p->ObjectAreaStart());
2187
2188 p->SetAllocationWatermark(p->ObjectAreaStart());
2189 Address start = p->ObjectAreaStart();
2190 if (deallocate_blocks) {
2191 accounting_stats_.AllocateBytes(size_in_bytes);
2192 DeallocateBlock(start, size_in_bytes, add_to_freelist);
2193 } else {
Steve Block44f0eee2011-05-26 01:26:41 +01002194 heap()->CreateFillerObjectAt(start, size_in_bytes);
Kristian Monsen80d68ea2010-09-08 11:05:35 +01002195 }
2196 }
2197 }
2198
2199 page_list_is_chunk_ordered_ = true;
2200}
2201
2202
Steve Block6ded16b2010-05-10 14:33:55 +01002203void PagedSpace::PrepareForMarkCompact(bool will_compact) {
2204 if (will_compact) {
Kristian Monsen80d68ea2010-09-08 11:05:35 +01002205 RelinkPageListInChunkOrder(false);
Steve Block6ded16b2010-05-10 14:33:55 +01002206 }
2207}
2208
2209
Leon Clarkee46be812010-01-19 14:06:41 +00002210bool PagedSpace::ReserveSpace(int bytes) {
2211 Address limit = allocation_info_.limit;
2212 Address top = allocation_info_.top;
2213 if (limit - top >= bytes) return true;
2214
2215 // There wasn't enough space in the current page. Lets put the rest
2216 // of the page on the free list and start a fresh page.
2217 PutRestOfCurrentPageOnFreeList(TopPageOf(allocation_info_));
2218
2219 Page* reserved_page = TopPageOf(allocation_info_);
2220 int bytes_left_to_reserve = bytes;
2221 while (bytes_left_to_reserve > 0) {
2222 if (!reserved_page->next_page()->is_valid()) {
Steve Block44f0eee2011-05-26 01:26:41 +01002223 if (heap()->OldGenerationAllocationLimitReached()) return false;
Leon Clarkee46be812010-01-19 14:06:41 +00002224 Expand(reserved_page);
2225 }
2226 bytes_left_to_reserve -= Page::kPageSize;
2227 reserved_page = reserved_page->next_page();
2228 if (!reserved_page->is_valid()) return false;
2229 }
2230 ASSERT(TopPageOf(allocation_info_)->next_page()->is_valid());
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002231 TopPageOf(allocation_info_)->next_page()->InvalidateWatermark(true);
Leon Clarkee46be812010-01-19 14:06:41 +00002232 SetAllocationInfo(&allocation_info_,
2233 TopPageOf(allocation_info_)->next_page());
2234 return true;
2235}
2236
2237
2238// You have to call this last, since the implementation from PagedSpace
2239// doesn't know that memory was 'promised' to large object space.
2240bool LargeObjectSpace::ReserveSpace(int bytes) {
Steve Block44f0eee2011-05-26 01:26:41 +01002241 return heap()->OldGenerationSpaceAvailable() >= bytes;
Leon Clarkee46be812010-01-19 14:06:41 +00002242}
2243
2244
Steve Blocka7e24c12009-10-30 11:49:00 +00002245// Slow case for normal allocation. Try in order: (1) allocate in the next
2246// page in the space, (2) allocate off the space's free list, (3) expand the
2247// space, (4) fail.
2248HeapObject* OldSpace::SlowAllocateRaw(int size_in_bytes) {
2249 // Linear allocation in this space has failed. If there is another page
2250 // in the space, move to that page and allocate there. This allocation
2251 // should succeed (size_in_bytes should not be greater than a page's
2252 // object area size).
2253 Page* current_page = TopPageOf(allocation_info_);
2254 if (current_page->next_page()->is_valid()) {
2255 return AllocateInNextPage(current_page, size_in_bytes);
2256 }
2257
Steve Blockd0582a62009-12-15 09:54:21 +00002258 // There is no next page in this space. Try free list allocation unless that
2259 // is currently forbidden.
Steve Block44f0eee2011-05-26 01:26:41 +01002260 if (!heap()->linear_allocation()) {
Steve Blockd0582a62009-12-15 09:54:21 +00002261 int wasted_bytes;
John Reck59135872010-11-02 12:39:01 -07002262 Object* result;
2263 MaybeObject* maybe = free_list_.Allocate(size_in_bytes, &wasted_bytes);
Steve Blockd0582a62009-12-15 09:54:21 +00002264 accounting_stats_.WasteBytes(wasted_bytes);
John Reck59135872010-11-02 12:39:01 -07002265 if (maybe->ToObject(&result)) {
Steve Blockd0582a62009-12-15 09:54:21 +00002266 accounting_stats_.AllocateBytes(size_in_bytes);
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002267
2268 HeapObject* obj = HeapObject::cast(result);
2269 Page* p = Page::FromAddress(obj->address());
2270
2271 if (obj->address() >= p->AllocationWatermark()) {
2272 // There should be no hole between the allocation watermark
2273 // and allocated object address.
2274 // Memory above the allocation watermark was not swept and
2275 // might contain garbage pointers to new space.
2276 ASSERT(obj->address() == p->AllocationWatermark());
2277 p->SetAllocationWatermark(obj->address() + size_in_bytes);
2278 }
2279
2280 return obj;
Steve Blockd0582a62009-12-15 09:54:21 +00002281 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002282 }
2283
2284 // Free list allocation failed and there is no next page. Fail if we have
2285 // hit the old generation size limit that should cause a garbage
2286 // collection.
Steve Block44f0eee2011-05-26 01:26:41 +01002287 if (!heap()->always_allocate() &&
2288 heap()->OldGenerationAllocationLimitReached()) {
Steve Blocka7e24c12009-10-30 11:49:00 +00002289 return NULL;
2290 }
2291
2292 // Try to expand the space and allocate in the new next page.
2293 ASSERT(!current_page->next_page()->is_valid());
2294 if (Expand(current_page)) {
2295 return AllocateInNextPage(current_page, size_in_bytes);
2296 }
2297
2298 // Finally, fail.
2299 return NULL;
2300}
2301
2302
Leon Clarkee46be812010-01-19 14:06:41 +00002303void OldSpace::PutRestOfCurrentPageOnFreeList(Page* current_page) {
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002304 current_page->SetAllocationWatermark(allocation_info_.top);
Steve Blockd0582a62009-12-15 09:54:21 +00002305 int free_size =
2306 static_cast<int>(current_page->ObjectAreaEnd() - allocation_info_.top);
Steve Blocka7e24c12009-10-30 11:49:00 +00002307 if (free_size > 0) {
2308 int wasted_bytes = free_list_.Free(allocation_info_.top, free_size);
2309 accounting_stats_.WasteBytes(wasted_bytes);
2310 }
Leon Clarkee46be812010-01-19 14:06:41 +00002311}
2312
2313
2314void FixedSpace::PutRestOfCurrentPageOnFreeList(Page* current_page) {
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002315 current_page->SetAllocationWatermark(allocation_info_.top);
Leon Clarkee46be812010-01-19 14:06:41 +00002316 int free_size =
2317 static_cast<int>(current_page->ObjectAreaEnd() - allocation_info_.top);
2318 // In the fixed space free list all the free list items have the right size.
2319 // We use up the rest of the page while preserving this invariant.
2320 while (free_size >= object_size_in_bytes_) {
2321 free_list_.Free(allocation_info_.top);
2322 allocation_info_.top += object_size_in_bytes_;
2323 free_size -= object_size_in_bytes_;
2324 accounting_stats_.WasteBytes(object_size_in_bytes_);
2325 }
2326}
2327
2328
2329// Add the block at the top of the page to the space's free list, set the
2330// allocation info to the next page (assumed to be one), and allocate
2331// linearly there.
2332HeapObject* OldSpace::AllocateInNextPage(Page* current_page,
2333 int size_in_bytes) {
2334 ASSERT(current_page->next_page()->is_valid());
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002335 Page* next_page = current_page->next_page();
2336 next_page->ClearGCFields();
Leon Clarkee46be812010-01-19 14:06:41 +00002337 PutRestOfCurrentPageOnFreeList(current_page);
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002338 SetAllocationInfo(&allocation_info_, next_page);
Steve Blocka7e24c12009-10-30 11:49:00 +00002339 return AllocateLinearly(&allocation_info_, size_in_bytes);
2340}
2341
2342
Kristian Monsen80d68ea2010-09-08 11:05:35 +01002343void OldSpace::DeallocateBlock(Address start,
2344 int size_in_bytes,
2345 bool add_to_freelist) {
2346 Free(start, size_in_bytes, add_to_freelist);
2347}
2348
2349
Steve Blocka7e24c12009-10-30 11:49:00 +00002350#ifdef DEBUG
Steve Blocka7e24c12009-10-30 11:49:00 +00002351void PagedSpace::ReportCodeStatistics() {
Steve Block44f0eee2011-05-26 01:26:41 +01002352 Isolate* isolate = Isolate::Current();
2353 CommentStatistic* comments_statistics =
2354 isolate->paged_space_comments_statistics();
Steve Blocka7e24c12009-10-30 11:49:00 +00002355 ReportCodeKindStatistics();
2356 PrintF("Code comment statistics (\" [ comment-txt : size/ "
2357 "count (average)\"):\n");
Steve Block44f0eee2011-05-26 01:26:41 +01002358 for (int i = 0; i <= CommentStatistic::kMaxComments; i++) {
Steve Blocka7e24c12009-10-30 11:49:00 +00002359 const CommentStatistic& cs = comments_statistics[i];
2360 if (cs.size > 0) {
2361 PrintF(" %-30s: %10d/%6d (%d)\n", cs.comment, cs.size, cs.count,
2362 cs.size/cs.count);
2363 }
2364 }
2365 PrintF("\n");
2366}
2367
2368
2369void PagedSpace::ResetCodeStatistics() {
Steve Block44f0eee2011-05-26 01:26:41 +01002370 Isolate* isolate = Isolate::Current();
2371 CommentStatistic* comments_statistics =
2372 isolate->paged_space_comments_statistics();
Steve Blocka7e24c12009-10-30 11:49:00 +00002373 ClearCodeKindStatistics();
Steve Block44f0eee2011-05-26 01:26:41 +01002374 for (int i = 0; i < CommentStatistic::kMaxComments; i++) {
2375 comments_statistics[i].Clear();
2376 }
2377 comments_statistics[CommentStatistic::kMaxComments].comment = "Unknown";
2378 comments_statistics[CommentStatistic::kMaxComments].size = 0;
2379 comments_statistics[CommentStatistic::kMaxComments].count = 0;
Steve Blocka7e24c12009-10-30 11:49:00 +00002380}
2381
2382
Steve Block44f0eee2011-05-26 01:26:41 +01002383// Adds comment to 'comment_statistics' table. Performance OK as long as
Steve Blocka7e24c12009-10-30 11:49:00 +00002384// 'kMaxComments' is small
Steve Block44f0eee2011-05-26 01:26:41 +01002385static void EnterComment(Isolate* isolate, const char* comment, int delta) {
2386 CommentStatistic* comments_statistics =
2387 isolate->paged_space_comments_statistics();
Steve Blocka7e24c12009-10-30 11:49:00 +00002388 // Do not count empty comments
2389 if (delta <= 0) return;
Steve Block44f0eee2011-05-26 01:26:41 +01002390 CommentStatistic* cs = &comments_statistics[CommentStatistic::kMaxComments];
Steve Blocka7e24c12009-10-30 11:49:00 +00002391 // Search for a free or matching entry in 'comments_statistics': 'cs'
2392 // points to result.
Steve Block44f0eee2011-05-26 01:26:41 +01002393 for (int i = 0; i < CommentStatistic::kMaxComments; i++) {
Steve Blocka7e24c12009-10-30 11:49:00 +00002394 if (comments_statistics[i].comment == NULL) {
2395 cs = &comments_statistics[i];
2396 cs->comment = comment;
2397 break;
2398 } else if (strcmp(comments_statistics[i].comment, comment) == 0) {
2399 cs = &comments_statistics[i];
2400 break;
2401 }
2402 }
2403 // Update entry for 'comment'
2404 cs->size += delta;
2405 cs->count += 1;
2406}
2407
2408
2409// Call for each nested comment start (start marked with '[ xxx', end marked
2410// with ']'. RelocIterator 'it' must point to a comment reloc info.
Steve Block44f0eee2011-05-26 01:26:41 +01002411static void CollectCommentStatistics(Isolate* isolate, RelocIterator* it) {
Steve Blocka7e24c12009-10-30 11:49:00 +00002412 ASSERT(!it->done());
2413 ASSERT(it->rinfo()->rmode() == RelocInfo::COMMENT);
2414 const char* tmp = reinterpret_cast<const char*>(it->rinfo()->data());
2415 if (tmp[0] != '[') {
2416 // Not a nested comment; skip
2417 return;
2418 }
2419
2420 // Search for end of nested comment or a new nested comment
2421 const char* const comment_txt =
2422 reinterpret_cast<const char*>(it->rinfo()->data());
2423 const byte* prev_pc = it->rinfo()->pc();
2424 int flat_delta = 0;
2425 it->next();
2426 while (true) {
2427 // All nested comments must be terminated properly, and therefore exit
2428 // from loop.
2429 ASSERT(!it->done());
2430 if (it->rinfo()->rmode() == RelocInfo::COMMENT) {
2431 const char* const txt =
2432 reinterpret_cast<const char*>(it->rinfo()->data());
Steve Blockd0582a62009-12-15 09:54:21 +00002433 flat_delta += static_cast<int>(it->rinfo()->pc() - prev_pc);
Steve Blocka7e24c12009-10-30 11:49:00 +00002434 if (txt[0] == ']') break; // End of nested comment
2435 // A new comment
Steve Block44f0eee2011-05-26 01:26:41 +01002436 CollectCommentStatistics(isolate, it);
Steve Blocka7e24c12009-10-30 11:49:00 +00002437 // Skip code that was covered with previous comment
2438 prev_pc = it->rinfo()->pc();
2439 }
2440 it->next();
2441 }
Steve Block44f0eee2011-05-26 01:26:41 +01002442 EnterComment(isolate, comment_txt, flat_delta);
Steve Blocka7e24c12009-10-30 11:49:00 +00002443}
2444
2445
2446// Collects code size statistics:
2447// - by code kind
2448// - by code comment
2449void PagedSpace::CollectCodeStatistics() {
Steve Block44f0eee2011-05-26 01:26:41 +01002450 Isolate* isolate = heap()->isolate();
Steve Blocka7e24c12009-10-30 11:49:00 +00002451 HeapObjectIterator obj_it(this);
Leon Clarked91b9f72010-01-27 17:25:45 +00002452 for (HeapObject* obj = obj_it.next(); obj != NULL; obj = obj_it.next()) {
Steve Blocka7e24c12009-10-30 11:49:00 +00002453 if (obj->IsCode()) {
2454 Code* code = Code::cast(obj);
Steve Block44f0eee2011-05-26 01:26:41 +01002455 isolate->code_kind_statistics()[code->kind()] += code->Size();
Steve Blocka7e24c12009-10-30 11:49:00 +00002456 RelocIterator it(code);
2457 int delta = 0;
2458 const byte* prev_pc = code->instruction_start();
2459 while (!it.done()) {
2460 if (it.rinfo()->rmode() == RelocInfo::COMMENT) {
Steve Blockd0582a62009-12-15 09:54:21 +00002461 delta += static_cast<int>(it.rinfo()->pc() - prev_pc);
Steve Block44f0eee2011-05-26 01:26:41 +01002462 CollectCommentStatistics(isolate, &it);
Steve Blocka7e24c12009-10-30 11:49:00 +00002463 prev_pc = it.rinfo()->pc();
2464 }
2465 it.next();
2466 }
2467
2468 ASSERT(code->instruction_start() <= prev_pc &&
Leon Clarkeac952652010-07-15 11:15:24 +01002469 prev_pc <= code->instruction_end());
2470 delta += static_cast<int>(code->instruction_end() - prev_pc);
Steve Block44f0eee2011-05-26 01:26:41 +01002471 EnterComment(isolate, "NoComment", delta);
Steve Blocka7e24c12009-10-30 11:49:00 +00002472 }
2473 }
2474}
2475
2476
2477void OldSpace::ReportStatistics() {
Ben Murdochf87a2032010-10-22 12:50:53 +01002478 int pct = static_cast<int>(Available() * 100 / Capacity());
2479 PrintF(" capacity: %" V8_PTR_PREFIX "d"
2480 ", waste: %" V8_PTR_PREFIX "d"
2481 ", available: %" V8_PTR_PREFIX "d, %%%d\n",
Steve Blocka7e24c12009-10-30 11:49:00 +00002482 Capacity(), Waste(), Available(), pct);
2483
Steve Blocka7e24c12009-10-30 11:49:00 +00002484 ClearHistograms();
2485 HeapObjectIterator obj_it(this);
Leon Clarked91b9f72010-01-27 17:25:45 +00002486 for (HeapObject* obj = obj_it.next(); obj != NULL; obj = obj_it.next())
2487 CollectHistogramInfo(obj);
Steve Blocka7e24c12009-10-30 11:49:00 +00002488 ReportHistogram(true);
2489}
Steve Blocka7e24c12009-10-30 11:49:00 +00002490#endif
2491
2492// -----------------------------------------------------------------------------
2493// FixedSpace implementation
2494
2495void FixedSpace::PrepareForMarkCompact(bool will_compact) {
Steve Block6ded16b2010-05-10 14:33:55 +01002496 // Call prepare of the super class.
2497 PagedSpace::PrepareForMarkCompact(will_compact);
2498
Steve Blocka7e24c12009-10-30 11:49:00 +00002499 if (will_compact) {
2500 // Reset relocation info.
2501 MCResetRelocationInfo();
2502
2503 // During a compacting collection, everything in the space is considered
2504 // 'available' (set by the call to MCResetRelocationInfo) and we will
2505 // rediscover live and wasted bytes during the collection.
2506 ASSERT(Available() == Capacity());
2507 } else {
2508 // During a non-compacting collection, everything below the linear
2509 // allocation pointer except wasted top-of-page blocks is considered
2510 // allocated and we will rediscover available bytes during the
2511 // collection.
2512 accounting_stats_.AllocateBytes(free_list_.available());
2513 }
2514
2515 // Clear the free list before a full GC---it will be rebuilt afterward.
2516 free_list_.Reset();
2517}
2518
2519
2520void FixedSpace::MCCommitRelocationInfo() {
2521 // Update fast allocation info.
2522 allocation_info_.top = mc_forwarding_info_.top;
2523 allocation_info_.limit = mc_forwarding_info_.limit;
2524 ASSERT(allocation_info_.VerifyPagedAllocation());
2525
2526 // The space is compacted and we haven't yet wasted any space.
2527 ASSERT(Waste() == 0);
2528
2529 // Update allocation_top of each page in use and compute waste.
2530 int computed_size = 0;
2531 PageIterator it(this, PageIterator::PAGES_USED_BY_MC);
2532 while (it.has_next()) {
2533 Page* page = it.next();
2534 Address page_top = page->AllocationTop();
Steve Blockd0582a62009-12-15 09:54:21 +00002535 computed_size += static_cast<int>(page_top - page->ObjectAreaStart());
Steve Blocka7e24c12009-10-30 11:49:00 +00002536 if (it.has_next()) {
Steve Blockd0582a62009-12-15 09:54:21 +00002537 accounting_stats_.WasteBytes(
2538 static_cast<int>(page->ObjectAreaEnd() - page_top));
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002539 page->SetAllocationWatermark(page_top);
Steve Blocka7e24c12009-10-30 11:49:00 +00002540 }
2541 }
2542
2543 // Make sure the computed size - based on the used portion of the
2544 // pages in use - matches the size we adjust during allocation.
2545 ASSERT(computed_size == Size());
2546}
2547
2548
2549// Slow case for normal allocation. Try in order: (1) allocate in the next
2550// page in the space, (2) allocate off the space's free list, (3) expand the
2551// space, (4) fail.
2552HeapObject* FixedSpace::SlowAllocateRaw(int size_in_bytes) {
2553 ASSERT_EQ(object_size_in_bytes_, size_in_bytes);
2554 // Linear allocation in this space has failed. If there is another page
2555 // in the space, move to that page and allocate there. This allocation
2556 // should succeed.
2557 Page* current_page = TopPageOf(allocation_info_);
2558 if (current_page->next_page()->is_valid()) {
2559 return AllocateInNextPage(current_page, size_in_bytes);
2560 }
2561
Steve Blockd0582a62009-12-15 09:54:21 +00002562 // There is no next page in this space. Try free list allocation unless
2563 // that is currently forbidden. The fixed space free list implicitly assumes
2564 // that all free blocks are of the fixed size.
Steve Block44f0eee2011-05-26 01:26:41 +01002565 if (!heap()->linear_allocation()) {
John Reck59135872010-11-02 12:39:01 -07002566 Object* result;
2567 MaybeObject* maybe = free_list_.Allocate();
2568 if (maybe->ToObject(&result)) {
Steve Blocka7e24c12009-10-30 11:49:00 +00002569 accounting_stats_.AllocateBytes(size_in_bytes);
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002570 HeapObject* obj = HeapObject::cast(result);
2571 Page* p = Page::FromAddress(obj->address());
2572
2573 if (obj->address() >= p->AllocationWatermark()) {
2574 // There should be no hole between the allocation watermark
2575 // and allocated object address.
2576 // Memory above the allocation watermark was not swept and
2577 // might contain garbage pointers to new space.
2578 ASSERT(obj->address() == p->AllocationWatermark());
2579 p->SetAllocationWatermark(obj->address() + size_in_bytes);
2580 }
2581
2582 return obj;
Steve Blocka7e24c12009-10-30 11:49:00 +00002583 }
2584 }
2585
2586 // Free list allocation failed and there is no next page. Fail if we have
2587 // hit the old generation size limit that should cause a garbage
2588 // collection.
Steve Block44f0eee2011-05-26 01:26:41 +01002589 if (!heap()->always_allocate() &&
2590 heap()->OldGenerationAllocationLimitReached()) {
Steve Blocka7e24c12009-10-30 11:49:00 +00002591 return NULL;
2592 }
2593
2594 // Try to expand the space and allocate in the new next page.
2595 ASSERT(!current_page->next_page()->is_valid());
2596 if (Expand(current_page)) {
2597 return AllocateInNextPage(current_page, size_in_bytes);
2598 }
2599
2600 // Finally, fail.
2601 return NULL;
2602}
2603
2604
2605// Move to the next page (there is assumed to be one) and allocate there.
2606// The top of page block is always wasted, because it is too small to hold a
2607// map.
2608HeapObject* FixedSpace::AllocateInNextPage(Page* current_page,
2609 int size_in_bytes) {
2610 ASSERT(current_page->next_page()->is_valid());
Steve Block6ded16b2010-05-10 14:33:55 +01002611 ASSERT(allocation_info_.top == PageAllocationLimit(current_page));
Steve Blocka7e24c12009-10-30 11:49:00 +00002612 ASSERT_EQ(object_size_in_bytes_, size_in_bytes);
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002613 Page* next_page = current_page->next_page();
2614 next_page->ClearGCFields();
2615 current_page->SetAllocationWatermark(allocation_info_.top);
Steve Blocka7e24c12009-10-30 11:49:00 +00002616 accounting_stats_.WasteBytes(page_extra_);
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002617 SetAllocationInfo(&allocation_info_, next_page);
Steve Blocka7e24c12009-10-30 11:49:00 +00002618 return AllocateLinearly(&allocation_info_, size_in_bytes);
2619}
2620
2621
Kristian Monsen80d68ea2010-09-08 11:05:35 +01002622void FixedSpace::DeallocateBlock(Address start,
2623 int size_in_bytes,
2624 bool add_to_freelist) {
2625 // Free-list elements in fixed space are assumed to have a fixed size.
2626 // We break the free block into chunks and add them to the free list
2627 // individually.
2628 int size = object_size_in_bytes();
2629 ASSERT(size_in_bytes % size == 0);
2630 Address end = start + size_in_bytes;
2631 for (Address a = start; a < end; a += size) {
2632 Free(a, add_to_freelist);
2633 }
2634}
2635
2636
Steve Blocka7e24c12009-10-30 11:49:00 +00002637#ifdef DEBUG
2638void FixedSpace::ReportStatistics() {
Ben Murdochf87a2032010-10-22 12:50:53 +01002639 int pct = static_cast<int>(Available() * 100 / Capacity());
2640 PrintF(" capacity: %" V8_PTR_PREFIX "d"
2641 ", waste: %" V8_PTR_PREFIX "d"
2642 ", available: %" V8_PTR_PREFIX "d, %%%d\n",
Steve Blocka7e24c12009-10-30 11:49:00 +00002643 Capacity(), Waste(), Available(), pct);
2644
Steve Blocka7e24c12009-10-30 11:49:00 +00002645 ClearHistograms();
2646 HeapObjectIterator obj_it(this);
Leon Clarked91b9f72010-01-27 17:25:45 +00002647 for (HeapObject* obj = obj_it.next(); obj != NULL; obj = obj_it.next())
2648 CollectHistogramInfo(obj);
Steve Blocka7e24c12009-10-30 11:49:00 +00002649 ReportHistogram(false);
2650}
Steve Blocka7e24c12009-10-30 11:49:00 +00002651#endif
2652
2653
2654// -----------------------------------------------------------------------------
2655// MapSpace implementation
2656
2657void MapSpace::PrepareForMarkCompact(bool will_compact) {
2658 // Call prepare of the super class.
2659 FixedSpace::PrepareForMarkCompact(will_compact);
2660
2661 if (will_compact) {
2662 // Initialize map index entry.
2663 int page_count = 0;
2664 PageIterator it(this, PageIterator::ALL_PAGES);
2665 while (it.has_next()) {
2666 ASSERT_MAP_PAGE_INDEX(page_count);
2667
2668 Page* p = it.next();
2669 ASSERT(p->mc_page_index == page_count);
2670
2671 page_addresses_[page_count++] = p->address();
2672 }
2673 }
2674}
2675
2676
2677#ifdef DEBUG
2678void MapSpace::VerifyObject(HeapObject* object) {
2679 // The object should be a map or a free-list node.
2680 ASSERT(object->IsMap() || object->IsByteArray());
2681}
2682#endif
2683
2684
2685// -----------------------------------------------------------------------------
2686// GlobalPropertyCellSpace implementation
2687
2688#ifdef DEBUG
2689void CellSpace::VerifyObject(HeapObject* object) {
2690 // The object should be a global object property cell or a free-list node.
2691 ASSERT(object->IsJSGlobalPropertyCell() ||
Steve Block44f0eee2011-05-26 01:26:41 +01002692 object->map() == heap()->two_pointer_filler_map());
Steve Blocka7e24c12009-10-30 11:49:00 +00002693}
2694#endif
2695
2696
2697// -----------------------------------------------------------------------------
2698// LargeObjectIterator
2699
2700LargeObjectIterator::LargeObjectIterator(LargeObjectSpace* space) {
2701 current_ = space->first_chunk_;
2702 size_func_ = NULL;
2703}
2704
2705
2706LargeObjectIterator::LargeObjectIterator(LargeObjectSpace* space,
2707 HeapObjectCallback size_func) {
2708 current_ = space->first_chunk_;
2709 size_func_ = size_func;
2710}
2711
2712
2713HeapObject* LargeObjectIterator::next() {
Leon Clarked91b9f72010-01-27 17:25:45 +00002714 if (current_ == NULL) return NULL;
2715
Steve Blocka7e24c12009-10-30 11:49:00 +00002716 HeapObject* object = current_->GetObject();
2717 current_ = current_->next();
2718 return object;
2719}
2720
2721
2722// -----------------------------------------------------------------------------
2723// LargeObjectChunk
2724
2725LargeObjectChunk* LargeObjectChunk::New(int size_in_bytes,
Steve Blocka7e24c12009-10-30 11:49:00 +00002726 Executability executable) {
2727 size_t requested = ChunkSizeFor(size_in_bytes);
Ben Murdochb0fe1622011-05-05 13:52:32 +01002728 size_t size;
Steve Block44f0eee2011-05-26 01:26:41 +01002729 Isolate* isolate = Isolate::Current();
2730 void* mem = isolate->memory_allocator()->AllocateRawMemory(
2731 requested, &size, executable);
Steve Blocka7e24c12009-10-30 11:49:00 +00002732 if (mem == NULL) return NULL;
Ben Murdochb0fe1622011-05-05 13:52:32 +01002733
2734 // The start of the chunk may be overlayed with a page so we have to
2735 // make sure that the page flags fit in the size field.
2736 ASSERT((size & Page::kPageFlagMask) == 0);
2737
Steve Block44f0eee2011-05-26 01:26:41 +01002738 LOG(isolate, NewEvent("LargeObjectChunk", mem, size));
Ben Murdochb0fe1622011-05-05 13:52:32 +01002739 if (size < requested) {
Steve Block44f0eee2011-05-26 01:26:41 +01002740 isolate->memory_allocator()->FreeRawMemory(
2741 mem, size, executable);
2742 LOG(isolate, DeleteEvent("LargeObjectChunk", mem));
Steve Blocka7e24c12009-10-30 11:49:00 +00002743 return NULL;
2744 }
Ben Murdochb0fe1622011-05-05 13:52:32 +01002745
2746 ObjectSpace space = (executable == EXECUTABLE)
2747 ? kObjectSpaceCodeSpace
2748 : kObjectSpaceLoSpace;
Steve Block44f0eee2011-05-26 01:26:41 +01002749 isolate->memory_allocator()->PerformAllocationCallback(
Ben Murdochb0fe1622011-05-05 13:52:32 +01002750 space, kAllocationActionAllocate, size);
2751
2752 LargeObjectChunk* chunk = reinterpret_cast<LargeObjectChunk*>(mem);
2753 chunk->size_ = size;
Steve Block44f0eee2011-05-26 01:26:41 +01002754 Page* page = Page::FromAddress(RoundUp(chunk->address(), Page::kPageSize));
2755 page->heap_ = isolate->heap();
Ben Murdochb0fe1622011-05-05 13:52:32 +01002756 return chunk;
Steve Blocka7e24c12009-10-30 11:49:00 +00002757}
2758
2759
2760int LargeObjectChunk::ChunkSizeFor(int size_in_bytes) {
Steve Blockd0582a62009-12-15 09:54:21 +00002761 int os_alignment = static_cast<int>(OS::AllocateAlignment());
Ben Murdochb0fe1622011-05-05 13:52:32 +01002762 if (os_alignment < Page::kPageSize) {
Steve Blocka7e24c12009-10-30 11:49:00 +00002763 size_in_bytes += (Page::kPageSize - os_alignment);
Ben Murdochb0fe1622011-05-05 13:52:32 +01002764 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002765 return size_in_bytes + Page::kObjectStartOffset;
2766}
2767
2768// -----------------------------------------------------------------------------
2769// LargeObjectSpace
2770
Steve Block44f0eee2011-05-26 01:26:41 +01002771LargeObjectSpace::LargeObjectSpace(Heap* heap, AllocationSpace id)
2772 : Space(heap, id, NOT_EXECUTABLE), // Managed on a per-allocation basis
Steve Blocka7e24c12009-10-30 11:49:00 +00002773 first_chunk_(NULL),
2774 size_(0),
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -08002775 page_count_(0),
2776 objects_size_(0) {}
Steve Blocka7e24c12009-10-30 11:49:00 +00002777
2778
2779bool LargeObjectSpace::Setup() {
2780 first_chunk_ = NULL;
2781 size_ = 0;
2782 page_count_ = 0;
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -08002783 objects_size_ = 0;
Steve Blocka7e24c12009-10-30 11:49:00 +00002784 return true;
2785}
2786
2787
2788void LargeObjectSpace::TearDown() {
2789 while (first_chunk_ != NULL) {
2790 LargeObjectChunk* chunk = first_chunk_;
2791 first_chunk_ = first_chunk_->next();
Steve Block44f0eee2011-05-26 01:26:41 +01002792 LOG(heap()->isolate(), DeleteEvent("LargeObjectChunk", chunk->address()));
Steve Block791712a2010-08-27 10:21:07 +01002793 Page* page = Page::FromAddress(RoundUp(chunk->address(), Page::kPageSize));
2794 Executability executable =
2795 page->IsPageExecutable() ? EXECUTABLE : NOT_EXECUTABLE;
Iain Merrick9ac36c92010-09-13 15:29:50 +01002796 ObjectSpace space = kObjectSpaceLoSpace;
2797 if (executable == EXECUTABLE) space = kObjectSpaceCodeSpace;
2798 size_t size = chunk->size();
Steve Block44f0eee2011-05-26 01:26:41 +01002799 heap()->isolate()->memory_allocator()->FreeRawMemory(chunk->address(),
2800 size,
2801 executable);
2802 heap()->isolate()->memory_allocator()->PerformAllocationCallback(
Iain Merrick9ac36c92010-09-13 15:29:50 +01002803 space, kAllocationActionFree, size);
Steve Blocka7e24c12009-10-30 11:49:00 +00002804 }
2805
2806 size_ = 0;
2807 page_count_ = 0;
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -08002808 objects_size_ = 0;
Steve Blocka7e24c12009-10-30 11:49:00 +00002809}
2810
2811
2812#ifdef ENABLE_HEAP_PROTECTION
2813
2814void LargeObjectSpace::Protect() {
2815 LargeObjectChunk* chunk = first_chunk_;
2816 while (chunk != NULL) {
Steve Block44f0eee2011-05-26 01:26:41 +01002817 heap()->isolate()->memory_allocator()->Protect(chunk->address(),
2818 chunk->size());
Steve Blocka7e24c12009-10-30 11:49:00 +00002819 chunk = chunk->next();
2820 }
2821}
2822
2823
2824void LargeObjectSpace::Unprotect() {
2825 LargeObjectChunk* chunk = first_chunk_;
2826 while (chunk != NULL) {
2827 bool is_code = chunk->GetObject()->IsCode();
Steve Block44f0eee2011-05-26 01:26:41 +01002828 heap()->isolate()->memory_allocator()->Unprotect(chunk->address(),
2829 chunk->size(), is_code ? EXECUTABLE : NOT_EXECUTABLE);
Steve Blocka7e24c12009-10-30 11:49:00 +00002830 chunk = chunk->next();
2831 }
2832}
2833
2834#endif
2835
2836
John Reck59135872010-11-02 12:39:01 -07002837MaybeObject* LargeObjectSpace::AllocateRawInternal(int requested_size,
2838 int object_size,
2839 Executability executable) {
Steve Blocka7e24c12009-10-30 11:49:00 +00002840 ASSERT(0 < object_size && object_size <= requested_size);
2841
2842 // Check if we want to force a GC before growing the old space further.
2843 // If so, fail the allocation.
Steve Block44f0eee2011-05-26 01:26:41 +01002844 if (!heap()->always_allocate() &&
2845 heap()->OldGenerationAllocationLimitReached()) {
Ben Murdochf87a2032010-10-22 12:50:53 +01002846 return Failure::RetryAfterGC(identity());
Steve Blocka7e24c12009-10-30 11:49:00 +00002847 }
2848
Ben Murdochb0fe1622011-05-05 13:52:32 +01002849 LargeObjectChunk* chunk = LargeObjectChunk::New(requested_size, executable);
Steve Blocka7e24c12009-10-30 11:49:00 +00002850 if (chunk == NULL) {
Ben Murdochf87a2032010-10-22 12:50:53 +01002851 return Failure::RetryAfterGC(identity());
Steve Blocka7e24c12009-10-30 11:49:00 +00002852 }
2853
Ben Murdochb0fe1622011-05-05 13:52:32 +01002854 size_ += static_cast<int>(chunk->size());
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -08002855 objects_size_ += requested_size;
Steve Blocka7e24c12009-10-30 11:49:00 +00002856 page_count_++;
2857 chunk->set_next(first_chunk_);
Steve Blocka7e24c12009-10-30 11:49:00 +00002858 first_chunk_ = chunk;
2859
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002860 // Initialize page header.
Steve Blocka7e24c12009-10-30 11:49:00 +00002861 Page* page = Page::FromAddress(RoundUp(chunk->address(), Page::kPageSize));
2862 Address object_address = page->ObjectAreaStart();
Ben Murdochb0fe1622011-05-05 13:52:32 +01002863
Steve Blocka7e24c12009-10-30 11:49:00 +00002864 // Clear the low order bit of the second word in the page to flag it as a
2865 // large object page. If the chunk_size happened to be written there, its
2866 // low order bit should already be clear.
Steve Block6ded16b2010-05-10 14:33:55 +01002867 page->SetIsLargeObjectPage(true);
Steve Block791712a2010-08-27 10:21:07 +01002868 page->SetIsPageExecutable(executable);
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002869 page->SetRegionMarks(Page::kAllRegionsCleanMarks);
Steve Blocka7e24c12009-10-30 11:49:00 +00002870 return HeapObject::FromAddress(object_address);
2871}
2872
2873
John Reck59135872010-11-02 12:39:01 -07002874MaybeObject* LargeObjectSpace::AllocateRawCode(int size_in_bytes) {
Steve Blocka7e24c12009-10-30 11:49:00 +00002875 ASSERT(0 < size_in_bytes);
2876 return AllocateRawInternal(size_in_bytes,
2877 size_in_bytes,
2878 EXECUTABLE);
2879}
2880
2881
John Reck59135872010-11-02 12:39:01 -07002882MaybeObject* LargeObjectSpace::AllocateRawFixedArray(int size_in_bytes) {
Steve Blocka7e24c12009-10-30 11:49:00 +00002883 ASSERT(0 < size_in_bytes);
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002884 return AllocateRawInternal(size_in_bytes,
Steve Blocka7e24c12009-10-30 11:49:00 +00002885 size_in_bytes,
2886 NOT_EXECUTABLE);
2887}
2888
2889
John Reck59135872010-11-02 12:39:01 -07002890MaybeObject* LargeObjectSpace::AllocateRaw(int size_in_bytes) {
Steve Blocka7e24c12009-10-30 11:49:00 +00002891 ASSERT(0 < size_in_bytes);
2892 return AllocateRawInternal(size_in_bytes,
2893 size_in_bytes,
2894 NOT_EXECUTABLE);
2895}
2896
2897
2898// GC support
John Reck59135872010-11-02 12:39:01 -07002899MaybeObject* LargeObjectSpace::FindObject(Address a) {
Steve Blocka7e24c12009-10-30 11:49:00 +00002900 for (LargeObjectChunk* chunk = first_chunk_;
2901 chunk != NULL;
2902 chunk = chunk->next()) {
2903 Address chunk_address = chunk->address();
2904 if (chunk_address <= a && a < chunk_address + chunk->size()) {
2905 return chunk->GetObject();
2906 }
2907 }
2908 return Failure::Exception();
2909}
2910
Kristian Monsen80d68ea2010-09-08 11:05:35 +01002911
2912LargeObjectChunk* LargeObjectSpace::FindChunkContainingPc(Address pc) {
2913 // TODO(853): Change this implementation to only find executable
2914 // chunks and use some kind of hash-based approach to speed it up.
2915 for (LargeObjectChunk* chunk = first_chunk_;
2916 chunk != NULL;
2917 chunk = chunk->next()) {
2918 Address chunk_address = chunk->address();
2919 if (chunk_address <= pc && pc < chunk_address + chunk->size()) {
2920 return chunk;
2921 }
2922 }
2923 return NULL;
2924}
2925
2926
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002927void LargeObjectSpace::IterateDirtyRegions(ObjectSlotCallback copy_object) {
Steve Blocka7e24c12009-10-30 11:49:00 +00002928 LargeObjectIterator it(this);
Leon Clarked91b9f72010-01-27 17:25:45 +00002929 for (HeapObject* object = it.next(); object != NULL; object = it.next()) {
Steve Blocka7e24c12009-10-30 11:49:00 +00002930 // We only have code, sequential strings, or fixed arrays in large
2931 // object space, and only fixed arrays can possibly contain pointers to
2932 // the young generation.
Steve Blocka7e24c12009-10-30 11:49:00 +00002933 if (object->IsFixedArray()) {
Steve Blocka7e24c12009-10-30 11:49:00 +00002934 Page* page = Page::FromAddress(object->address());
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002935 uint32_t marks = page->GetRegionMarks();
2936 uint32_t newmarks = Page::kAllRegionsCleanMarks;
Steve Blocka7e24c12009-10-30 11:49:00 +00002937
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002938 if (marks != Page::kAllRegionsCleanMarks) {
2939 // For a large page a single dirty mark corresponds to several
2940 // regions (modulo 32). So we treat a large page as a sequence of
2941 // normal pages of size Page::kPageSize having same dirty marks
2942 // and subsequently iterate dirty regions on each of these pages.
2943 Address start = object->address();
2944 Address end = page->ObjectAreaEnd();
2945 Address object_end = start + object->Size();
2946
2947 // Iterate regions of the first normal page covering object.
2948 uint32_t first_region_number = page->GetRegionNumberForAddress(start);
2949 newmarks |=
Steve Block44f0eee2011-05-26 01:26:41 +01002950 heap()->IterateDirtyRegions(marks >> first_region_number,
2951 start,
2952 end,
2953 &Heap::IteratePointersInDirtyRegion,
2954 copy_object) << first_region_number;
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002955
2956 start = end;
2957 end = start + Page::kPageSize;
2958 while (end <= object_end) {
2959 // Iterate next 32 regions.
2960 newmarks |=
Steve Block44f0eee2011-05-26 01:26:41 +01002961 heap()->IterateDirtyRegions(marks,
2962 start,
2963 end,
2964 &Heap::IteratePointersInDirtyRegion,
2965 copy_object);
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002966 start = end;
2967 end = start + Page::kPageSize;
2968 }
2969
2970 if (start != object_end) {
2971 // Iterate the last piece of an object which is less than
2972 // Page::kPageSize.
2973 newmarks |=
Steve Block44f0eee2011-05-26 01:26:41 +01002974 heap()->IterateDirtyRegions(marks,
2975 start,
2976 object_end,
2977 &Heap::IteratePointersInDirtyRegion,
2978 copy_object);
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002979 }
2980
2981 page->SetRegionMarks(newmarks);
Steve Blocka7e24c12009-10-30 11:49:00 +00002982 }
2983 }
2984 }
2985}
2986
2987
2988void LargeObjectSpace::FreeUnmarkedObjects() {
2989 LargeObjectChunk* previous = NULL;
2990 LargeObjectChunk* current = first_chunk_;
2991 while (current != NULL) {
2992 HeapObject* object = current->GetObject();
2993 if (object->IsMarked()) {
2994 object->ClearMark();
Steve Block44f0eee2011-05-26 01:26:41 +01002995 heap()->mark_compact_collector()->tracer()->decrement_marked_count();
Steve Blocka7e24c12009-10-30 11:49:00 +00002996 previous = current;
2997 current = current->next();
2998 } else {
Steve Block791712a2010-08-27 10:21:07 +01002999 Page* page = Page::FromAddress(RoundUp(current->address(),
3000 Page::kPageSize));
3001 Executability executable =
3002 page->IsPageExecutable() ? EXECUTABLE : NOT_EXECUTABLE;
Steve Blocka7e24c12009-10-30 11:49:00 +00003003 Address chunk_address = current->address();
3004 size_t chunk_size = current->size();
3005
3006 // Cut the chunk out from the chunk list.
3007 current = current->next();
3008 if (previous == NULL) {
3009 first_chunk_ = current;
3010 } else {
3011 previous->set_next(current);
3012 }
3013
3014 // Free the chunk.
Ben Murdoch8b112d22011-06-08 16:22:53 +01003015 heap()->mark_compact_collector()->ReportDeleteIfNeeded(
3016 object, heap()->isolate());
Steve Block1e0659c2011-05-24 12:43:12 +01003017 LiveObjectList::ProcessNonLive(object);
3018
Steve Blockd0582a62009-12-15 09:54:21 +00003019 size_ -= static_cast<int>(chunk_size);
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -08003020 objects_size_ -= object->Size();
Steve Blocka7e24c12009-10-30 11:49:00 +00003021 page_count_--;
Iain Merrick9ac36c92010-09-13 15:29:50 +01003022 ObjectSpace space = kObjectSpaceLoSpace;
3023 if (executable == EXECUTABLE) space = kObjectSpaceCodeSpace;
Steve Block44f0eee2011-05-26 01:26:41 +01003024 heap()->isolate()->memory_allocator()->FreeRawMemory(chunk_address,
3025 chunk_size,
3026 executable);
3027 heap()->isolate()->memory_allocator()->PerformAllocationCallback(
3028 space, kAllocationActionFree, size_);
3029 LOG(heap()->isolate(), DeleteEvent("LargeObjectChunk", chunk_address));
Steve Blocka7e24c12009-10-30 11:49:00 +00003030 }
3031 }
3032}
3033
3034
3035bool LargeObjectSpace::Contains(HeapObject* object) {
3036 Address address = object->address();
Steve Block44f0eee2011-05-26 01:26:41 +01003037 if (heap()->new_space()->Contains(address)) {
Steve Block6ded16b2010-05-10 14:33:55 +01003038 return false;
3039 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003040 Page* page = Page::FromAddress(address);
3041
3042 SLOW_ASSERT(!page->IsLargeObjectPage()
3043 || !FindObject(address)->IsFailure());
3044
3045 return page->IsLargeObjectPage();
3046}
3047
3048
3049#ifdef DEBUG
3050// We do not assume that the large object iterator works, because it depends
3051// on the invariants we are checking during verification.
3052void LargeObjectSpace::Verify() {
3053 for (LargeObjectChunk* chunk = first_chunk_;
3054 chunk != NULL;
3055 chunk = chunk->next()) {
3056 // Each chunk contains an object that starts at the large object page's
3057 // object area start.
3058 HeapObject* object = chunk->GetObject();
3059 Page* page = Page::FromAddress(object->address());
3060 ASSERT(object->address() == page->ObjectAreaStart());
3061
3062 // The first word should be a map, and we expect all map pointers to be
3063 // in map space.
3064 Map* map = object->map();
3065 ASSERT(map->IsMap());
Steve Block44f0eee2011-05-26 01:26:41 +01003066 ASSERT(heap()->map_space()->Contains(map));
Steve Blocka7e24c12009-10-30 11:49:00 +00003067
3068 // We have only code, sequential strings, external strings
3069 // (sequential strings that have been morphed into external
3070 // strings), fixed arrays, and byte arrays in large object space.
3071 ASSERT(object->IsCode() || object->IsSeqString() ||
3072 object->IsExternalString() || object->IsFixedArray() ||
3073 object->IsByteArray());
3074
3075 // The object itself should look OK.
3076 object->Verify();
3077
3078 // Byte arrays and strings don't have interior pointers.
3079 if (object->IsCode()) {
3080 VerifyPointersVisitor code_visitor;
3081 object->IterateBody(map->instance_type(),
3082 object->Size(),
3083 &code_visitor);
3084 } else if (object->IsFixedArray()) {
3085 // We loop over fixed arrays ourselves, rather then using the visitor,
3086 // because the visitor doesn't support the start/offset iteration
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01003087 // needed for IsRegionDirty.
Steve Blocka7e24c12009-10-30 11:49:00 +00003088 FixedArray* array = FixedArray::cast(object);
3089 for (int j = 0; j < array->length(); j++) {
3090 Object* element = array->get(j);
3091 if (element->IsHeapObject()) {
3092 HeapObject* element_object = HeapObject::cast(element);
Steve Block44f0eee2011-05-26 01:26:41 +01003093 ASSERT(heap()->Contains(element_object));
Steve Blocka7e24c12009-10-30 11:49:00 +00003094 ASSERT(element_object->map()->IsMap());
Steve Block44f0eee2011-05-26 01:26:41 +01003095 if (heap()->InNewSpace(element_object)) {
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01003096 Address array_addr = object->address();
3097 Address element_addr = array_addr + FixedArray::kHeaderSize +
3098 j * kPointerSize;
3099
3100 ASSERT(Page::FromAddress(array_addr)->IsRegionDirty(element_addr));
Steve Blocka7e24c12009-10-30 11:49:00 +00003101 }
3102 }
3103 }
3104 }
3105 }
3106}
3107
3108
3109void LargeObjectSpace::Print() {
3110 LargeObjectIterator it(this);
Leon Clarked91b9f72010-01-27 17:25:45 +00003111 for (HeapObject* obj = it.next(); obj != NULL; obj = it.next()) {
3112 obj->Print();
Steve Blocka7e24c12009-10-30 11:49:00 +00003113 }
3114}
3115
3116
3117void LargeObjectSpace::ReportStatistics() {
Ben Murdochf87a2032010-10-22 12:50:53 +01003118 PrintF(" size: %" V8_PTR_PREFIX "d\n", size_);
Steve Blocka7e24c12009-10-30 11:49:00 +00003119 int num_objects = 0;
3120 ClearHistograms();
3121 LargeObjectIterator it(this);
Leon Clarked91b9f72010-01-27 17:25:45 +00003122 for (HeapObject* obj = it.next(); obj != NULL; obj = it.next()) {
Steve Blocka7e24c12009-10-30 11:49:00 +00003123 num_objects++;
Leon Clarked91b9f72010-01-27 17:25:45 +00003124 CollectHistogramInfo(obj);
Steve Blocka7e24c12009-10-30 11:49:00 +00003125 }
3126
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -08003127 PrintF(" number of objects %d, "
3128 "size of objects %" V8_PTR_PREFIX "d\n", num_objects, objects_size_);
Steve Blocka7e24c12009-10-30 11:49:00 +00003129 if (num_objects > 0) ReportHistogram(false);
3130}
3131
3132
3133void LargeObjectSpace::CollectCodeStatistics() {
Steve Block44f0eee2011-05-26 01:26:41 +01003134 Isolate* isolate = heap()->isolate();
Steve Blocka7e24c12009-10-30 11:49:00 +00003135 LargeObjectIterator obj_it(this);
Leon Clarked91b9f72010-01-27 17:25:45 +00003136 for (HeapObject* obj = obj_it.next(); obj != NULL; obj = obj_it.next()) {
Steve Blocka7e24c12009-10-30 11:49:00 +00003137 if (obj->IsCode()) {
3138 Code* code = Code::cast(obj);
Steve Block44f0eee2011-05-26 01:26:41 +01003139 isolate->code_kind_statistics()[code->kind()] += code->Size();
Steve Blocka7e24c12009-10-30 11:49:00 +00003140 }
3141 }
3142}
Steve Blocka7e24c12009-10-30 11:49:00 +00003143#endif // DEBUG
3144
3145} } // namespace v8::internal