blob: fca10329696c61f799cc482846f31542b68ddef8 [file] [log] [blame]
Ben Murdochb0fe1622011-05-05 13:52:32 +01001// Copyright 2006-2010 the V8 project authors. All rights reserved.
Steve Blocka7e24c12009-10-30 11:49:00 +00002// Redistribution and use in source and binary forms, with or without
3// modification, are permitted provided that the following conditions are
4// met:
5//
6// * Redistributions of source code must retain the above copyright
7// notice, this list of conditions and the following disclaimer.
8// * Redistributions in binary form must reproduce the above
9// copyright notice, this list of conditions and the following
10// disclaimer in the documentation and/or other materials provided
11// with the distribution.
12// * Neither the name of Google Inc. nor the names of its
13// contributors may be used to endorse or promote products derived
14// from this software without specific prior written permission.
15//
16// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
28#include "v8.h"
29
30#include "macro-assembler.h"
31#include "mark-compact.h"
32#include "platform.h"
33
34namespace v8 {
35namespace internal {
36
37// For contiguous spaces, top should be in the space (or at the end) and limit
38// should be the end of the space.
39#define ASSERT_SEMISPACE_ALLOCATION_INFO(info, space) \
40 ASSERT((space).low() <= (info).top \
41 && (info).top <= (space).high() \
42 && (info).limit == (space).high())
43
Steve Block791712a2010-08-27 10:21:07 +010044intptr_t Page::watermark_invalidated_mark_ = 1 << Page::WATERMARK_INVALIDATED;
Steve Blocka7e24c12009-10-30 11:49:00 +000045
46// ----------------------------------------------------------------------------
47// HeapObjectIterator
48
49HeapObjectIterator::HeapObjectIterator(PagedSpace* space) {
50 Initialize(space->bottom(), space->top(), NULL);
51}
52
53
54HeapObjectIterator::HeapObjectIterator(PagedSpace* space,
55 HeapObjectCallback size_func) {
56 Initialize(space->bottom(), space->top(), size_func);
57}
58
59
60HeapObjectIterator::HeapObjectIterator(PagedSpace* space, Address start) {
61 Initialize(start, space->top(), NULL);
62}
63
64
65HeapObjectIterator::HeapObjectIterator(PagedSpace* space, Address start,
66 HeapObjectCallback size_func) {
67 Initialize(start, space->top(), size_func);
68}
69
70
Kristian Monsen80d68ea2010-09-08 11:05:35 +010071HeapObjectIterator::HeapObjectIterator(Page* page,
72 HeapObjectCallback size_func) {
73 Initialize(page->ObjectAreaStart(), page->AllocationTop(), size_func);
74}
75
76
Steve Blocka7e24c12009-10-30 11:49:00 +000077void HeapObjectIterator::Initialize(Address cur, Address end,
78 HeapObjectCallback size_f) {
79 cur_addr_ = cur;
80 end_addr_ = end;
81 end_page_ = Page::FromAllocationTop(end);
82 size_func_ = size_f;
83 Page* p = Page::FromAllocationTop(cur_addr_);
84 cur_limit_ = (p == end_page_) ? end_addr_ : p->AllocationTop();
85
86#ifdef DEBUG
87 Verify();
88#endif
89}
90
91
Leon Clarked91b9f72010-01-27 17:25:45 +000092HeapObject* HeapObjectIterator::FromNextPage() {
93 if (cur_addr_ == end_addr_) return NULL;
Steve Blocka7e24c12009-10-30 11:49:00 +000094
95 Page* cur_page = Page::FromAllocationTop(cur_addr_);
96 cur_page = cur_page->next_page();
97 ASSERT(cur_page->is_valid());
98
99 cur_addr_ = cur_page->ObjectAreaStart();
100 cur_limit_ = (cur_page == end_page_) ? end_addr_ : cur_page->AllocationTop();
101
Leon Clarked91b9f72010-01-27 17:25:45 +0000102 if (cur_addr_ == end_addr_) return NULL;
Steve Blocka7e24c12009-10-30 11:49:00 +0000103 ASSERT(cur_addr_ < cur_limit_);
104#ifdef DEBUG
105 Verify();
106#endif
Leon Clarked91b9f72010-01-27 17:25:45 +0000107 return FromCurrentPage();
Steve Blocka7e24c12009-10-30 11:49:00 +0000108}
109
110
111#ifdef DEBUG
112void HeapObjectIterator::Verify() {
113 Page* p = Page::FromAllocationTop(cur_addr_);
114 ASSERT(p == Page::FromAllocationTop(cur_limit_));
115 ASSERT(p->Offset(cur_addr_) <= p->Offset(cur_limit_));
116}
117#endif
118
119
120// -----------------------------------------------------------------------------
121// PageIterator
122
123PageIterator::PageIterator(PagedSpace* space, Mode mode) : space_(space) {
124 prev_page_ = NULL;
125 switch (mode) {
126 case PAGES_IN_USE:
127 stop_page_ = space->AllocationTopPage();
128 break;
129 case PAGES_USED_BY_MC:
130 stop_page_ = space->MCRelocationTopPage();
131 break;
132 case ALL_PAGES:
133#ifdef DEBUG
134 // Verify that the cached last page in the space is actually the
135 // last page.
136 for (Page* p = space->first_page_; p->is_valid(); p = p->next_page()) {
137 if (!p->next_page()->is_valid()) {
138 ASSERT(space->last_page_ == p);
139 }
140 }
141#endif
142 stop_page_ = space->last_page_;
143 break;
144 }
145}
146
147
148// -----------------------------------------------------------------------------
Steve Blocka7e24c12009-10-30 11:49:00 +0000149// CodeRange
150
151List<CodeRange::FreeBlock> CodeRange::free_list_(0);
152List<CodeRange::FreeBlock> CodeRange::allocation_list_(0);
153int CodeRange::current_allocation_block_index_ = 0;
154VirtualMemory* CodeRange::code_range_ = NULL;
155
156
157bool CodeRange::Setup(const size_t requested) {
158 ASSERT(code_range_ == NULL);
159
160 code_range_ = new VirtualMemory(requested);
161 CHECK(code_range_ != NULL);
162 if (!code_range_->IsReserved()) {
163 delete code_range_;
164 code_range_ = NULL;
165 return false;
166 }
167
168 // We are sure that we have mapped a block of requested addresses.
169 ASSERT(code_range_->size() == requested);
170 LOG(NewEvent("CodeRange", code_range_->address(), requested));
171 allocation_list_.Add(FreeBlock(code_range_->address(), code_range_->size()));
172 current_allocation_block_index_ = 0;
173 return true;
174}
175
176
177int CodeRange::CompareFreeBlockAddress(const FreeBlock* left,
178 const FreeBlock* right) {
179 // The entire point of CodeRange is that the difference between two
180 // addresses in the range can be represented as a signed 32-bit int,
181 // so the cast is semantically correct.
182 return static_cast<int>(left->start - right->start);
183}
184
185
186void CodeRange::GetNextAllocationBlock(size_t requested) {
187 for (current_allocation_block_index_++;
188 current_allocation_block_index_ < allocation_list_.length();
189 current_allocation_block_index_++) {
190 if (requested <= allocation_list_[current_allocation_block_index_].size) {
191 return; // Found a large enough allocation block.
192 }
193 }
194
195 // Sort and merge the free blocks on the free list and the allocation list.
196 free_list_.AddAll(allocation_list_);
197 allocation_list_.Clear();
198 free_list_.Sort(&CompareFreeBlockAddress);
199 for (int i = 0; i < free_list_.length();) {
200 FreeBlock merged = free_list_[i];
201 i++;
202 // Add adjacent free blocks to the current merged block.
203 while (i < free_list_.length() &&
204 free_list_[i].start == merged.start + merged.size) {
205 merged.size += free_list_[i].size;
206 i++;
207 }
208 if (merged.size > 0) {
209 allocation_list_.Add(merged);
210 }
211 }
212 free_list_.Clear();
213
214 for (current_allocation_block_index_ = 0;
215 current_allocation_block_index_ < allocation_list_.length();
216 current_allocation_block_index_++) {
217 if (requested <= allocation_list_[current_allocation_block_index_].size) {
218 return; // Found a large enough allocation block.
219 }
220 }
221
222 // Code range is full or too fragmented.
223 V8::FatalProcessOutOfMemory("CodeRange::GetNextAllocationBlock");
224}
225
226
227
228void* CodeRange::AllocateRawMemory(const size_t requested, size_t* allocated) {
229 ASSERT(current_allocation_block_index_ < allocation_list_.length());
230 if (requested > allocation_list_[current_allocation_block_index_].size) {
231 // Find an allocation block large enough. This function call may
232 // call V8::FatalProcessOutOfMemory if it cannot find a large enough block.
233 GetNextAllocationBlock(requested);
234 }
235 // Commit the requested memory at the start of the current allocation block.
236 *allocated = RoundUp(requested, Page::kPageSize);
237 FreeBlock current = allocation_list_[current_allocation_block_index_];
238 if (*allocated >= current.size - Page::kPageSize) {
239 // Don't leave a small free block, useless for a large object or chunk.
240 *allocated = current.size;
241 }
242 ASSERT(*allocated <= current.size);
243 if (!code_range_->Commit(current.start, *allocated, true)) {
244 *allocated = 0;
245 return NULL;
246 }
247 allocation_list_[current_allocation_block_index_].start += *allocated;
248 allocation_list_[current_allocation_block_index_].size -= *allocated;
249 if (*allocated == current.size) {
250 GetNextAllocationBlock(0); // This block is used up, get the next one.
251 }
252 return current.start;
253}
254
255
256void CodeRange::FreeRawMemory(void* address, size_t length) {
257 free_list_.Add(FreeBlock(address, length));
258 code_range_->Uncommit(address, length);
259}
260
261
262void CodeRange::TearDown() {
263 delete code_range_; // Frees all memory in the virtual memory range.
264 code_range_ = NULL;
265 free_list_.Free();
266 allocation_list_.Free();
267}
268
269
270// -----------------------------------------------------------------------------
271// MemoryAllocator
272//
Russell Brenner90bac252010-11-18 13:33:46 -0800273intptr_t MemoryAllocator::capacity_ = 0;
274intptr_t MemoryAllocator::capacity_executable_ = 0;
275intptr_t MemoryAllocator::size_ = 0;
Ben Murdochf87a2032010-10-22 12:50:53 +0100276intptr_t MemoryAllocator::size_executable_ = 0;
Steve Blocka7e24c12009-10-30 11:49:00 +0000277
Iain Merrick9ac36c92010-09-13 15:29:50 +0100278List<MemoryAllocator::MemoryAllocationCallbackRegistration>
279 MemoryAllocator::memory_allocation_callbacks_;
280
Steve Blocka7e24c12009-10-30 11:49:00 +0000281VirtualMemory* MemoryAllocator::initial_chunk_ = NULL;
282
283// 270 is an estimate based on the static default heap size of a pair of 256K
284// semispaces and a 64M old generation.
285const int kEstimatedNumberOfChunks = 270;
286List<MemoryAllocator::ChunkInfo> MemoryAllocator::chunks_(
287 kEstimatedNumberOfChunks);
288List<int> MemoryAllocator::free_chunk_ids_(kEstimatedNumberOfChunks);
289int MemoryAllocator::max_nof_chunks_ = 0;
290int MemoryAllocator::top_ = 0;
291
292
293void MemoryAllocator::Push(int free_chunk_id) {
294 ASSERT(max_nof_chunks_ > 0);
295 ASSERT(top_ < max_nof_chunks_);
296 free_chunk_ids_[top_++] = free_chunk_id;
297}
298
299
300int MemoryAllocator::Pop() {
301 ASSERT(top_ > 0);
302 return free_chunk_ids_[--top_];
303}
304
305
Russell Brenner90bac252010-11-18 13:33:46 -0800306bool MemoryAllocator::Setup(intptr_t capacity, intptr_t capacity_executable) {
Steve Blocka7e24c12009-10-30 11:49:00 +0000307 capacity_ = RoundUp(capacity, Page::kPageSize);
Russell Brenner90bac252010-11-18 13:33:46 -0800308 capacity_executable_ = RoundUp(capacity_executable, Page::kPageSize);
309 ASSERT_GE(capacity_, capacity_executable_);
Steve Blocka7e24c12009-10-30 11:49:00 +0000310
311 // Over-estimate the size of chunks_ array. It assumes the expansion of old
312 // space is always in the unit of a chunk (kChunkSize) except the last
313 // expansion.
314 //
315 // Due to alignment, allocated space might be one page less than required
316 // number (kPagesPerChunk) of pages for old spaces.
317 //
318 // Reserve two chunk ids for semispaces, one for map space, one for old
319 // space, and one for code space.
Ben Murdochf87a2032010-10-22 12:50:53 +0100320 max_nof_chunks_ =
321 static_cast<int>((capacity_ / (kChunkSize - Page::kPageSize))) + 5;
Steve Blocka7e24c12009-10-30 11:49:00 +0000322 if (max_nof_chunks_ > kMaxNofChunks) return false;
323
324 size_ = 0;
Steve Block791712a2010-08-27 10:21:07 +0100325 size_executable_ = 0;
Steve Blocka7e24c12009-10-30 11:49:00 +0000326 ChunkInfo info; // uninitialized element.
327 for (int i = max_nof_chunks_ - 1; i >= 0; i--) {
328 chunks_.Add(info);
329 free_chunk_ids_.Add(i);
330 }
331 top_ = max_nof_chunks_;
332 return true;
333}
334
335
Ben Murdochb0fe1622011-05-05 13:52:32 +0100336bool MemoryAllocator::SafeIsInAPageChunk(Address addr) {
337 return InInitialChunk(addr) || InAllocatedChunks(addr);
338}
339
340
Steve Blocka7e24c12009-10-30 11:49:00 +0000341void MemoryAllocator::TearDown() {
342 for (int i = 0; i < max_nof_chunks_; i++) {
343 if (chunks_[i].address() != NULL) DeleteChunk(i);
344 }
345 chunks_.Clear();
346 free_chunk_ids_.Clear();
347
348 if (initial_chunk_ != NULL) {
349 LOG(DeleteEvent("InitialChunk", initial_chunk_->address()));
350 delete initial_chunk_;
351 initial_chunk_ = NULL;
352 }
353
Ben Murdochb0fe1622011-05-05 13:52:32 +0100354 FreeChunkTables(&chunk_table_[0],
355 kChunkTableTopLevelEntries,
356 kChunkTableLevels);
357
Steve Blocka7e24c12009-10-30 11:49:00 +0000358 ASSERT(top_ == max_nof_chunks_); // all chunks are free
359 top_ = 0;
360 capacity_ = 0;
Russell Brenner90bac252010-11-18 13:33:46 -0800361 capacity_executable_ = 0;
Steve Blocka7e24c12009-10-30 11:49:00 +0000362 size_ = 0;
363 max_nof_chunks_ = 0;
364}
365
366
Ben Murdochb0fe1622011-05-05 13:52:32 +0100367void MemoryAllocator::FreeChunkTables(uintptr_t* array, int len, int level) {
368 for (int i = 0; i < len; i++) {
369 if (array[i] != kUnusedChunkTableEntry) {
370 uintptr_t* subarray = reinterpret_cast<uintptr_t*>(array[i]);
371 if (level > 1) {
372 array[i] = kUnusedChunkTableEntry;
373 FreeChunkTables(subarray, 1 << kChunkTableBitsPerLevel, level - 1);
374 } else {
375 array[i] = kUnusedChunkTableEntry;
376 }
377 delete[] subarray;
378 }
379 }
380}
381
382
Steve Blocka7e24c12009-10-30 11:49:00 +0000383void* MemoryAllocator::AllocateRawMemory(const size_t requested,
384 size_t* allocated,
385 Executability executable) {
Kristian Monsen50ef84f2010-07-29 15:18:00 +0100386 if (size_ + static_cast<size_t>(requested) > static_cast<size_t>(capacity_)) {
387 return NULL;
388 }
Russell Brenner90bac252010-11-18 13:33:46 -0800389
Steve Blocka7e24c12009-10-30 11:49:00 +0000390 void* mem;
Russell Brenner90bac252010-11-18 13:33:46 -0800391 if (executable == EXECUTABLE) {
392 // Check executable memory limit.
393 if (size_executable_ + requested >
394 static_cast<size_t>(capacity_executable_)) {
395 LOG(StringEvent("MemoryAllocator::AllocateRawMemory",
396 "V8 Executable Allocation capacity exceeded"));
397 return NULL;
398 }
399 // Allocate executable memory either from code range or from the
400 // OS.
401 if (CodeRange::exists()) {
402 mem = CodeRange::AllocateRawMemory(requested, allocated);
403 } else {
404 mem = OS::Allocate(requested, allocated, true);
405 }
406 // Update executable memory size.
407 size_executable_ += static_cast<int>(*allocated);
Steve Blocka7e24c12009-10-30 11:49:00 +0000408 } else {
Russell Brenner90bac252010-11-18 13:33:46 -0800409 mem = OS::Allocate(requested, allocated, false);
Steve Blocka7e24c12009-10-30 11:49:00 +0000410 }
Steve Blockd0582a62009-12-15 09:54:21 +0000411 int alloced = static_cast<int>(*allocated);
Steve Blocka7e24c12009-10-30 11:49:00 +0000412 size_ += alloced;
Steve Block791712a2010-08-27 10:21:07 +0100413
Leon Clarke4515c472010-02-03 11:58:03 +0000414#ifdef DEBUG
415 ZapBlock(reinterpret_cast<Address>(mem), alloced);
416#endif
Steve Blocka7e24c12009-10-30 11:49:00 +0000417 Counters::memory_allocated.Increment(alloced);
418 return mem;
419}
420
421
Steve Block791712a2010-08-27 10:21:07 +0100422void MemoryAllocator::FreeRawMemory(void* mem,
423 size_t length,
424 Executability executable) {
Leon Clarke4515c472010-02-03 11:58:03 +0000425#ifdef DEBUG
426 ZapBlock(reinterpret_cast<Address>(mem), length);
427#endif
Steve Blocka7e24c12009-10-30 11:49:00 +0000428 if (CodeRange::contains(static_cast<Address>(mem))) {
429 CodeRange::FreeRawMemory(mem, length);
430 } else {
431 OS::Free(mem, length);
432 }
Steve Blockd0582a62009-12-15 09:54:21 +0000433 Counters::memory_allocated.Decrement(static_cast<int>(length));
434 size_ -= static_cast<int>(length);
Steve Block791712a2010-08-27 10:21:07 +0100435 if (executable == EXECUTABLE) size_executable_ -= static_cast<int>(length);
Iain Merrick9ac36c92010-09-13 15:29:50 +0100436
Steve Blocka7e24c12009-10-30 11:49:00 +0000437 ASSERT(size_ >= 0);
Russell Brenner90bac252010-11-18 13:33:46 -0800438 ASSERT(size_executable_ >= 0);
Steve Blocka7e24c12009-10-30 11:49:00 +0000439}
440
441
Iain Merrick9ac36c92010-09-13 15:29:50 +0100442void MemoryAllocator::PerformAllocationCallback(ObjectSpace space,
443 AllocationAction action,
444 size_t size) {
445 for (int i = 0; i < memory_allocation_callbacks_.length(); ++i) {
446 MemoryAllocationCallbackRegistration registration =
447 memory_allocation_callbacks_[i];
448 if ((registration.space & space) == space &&
449 (registration.action & action) == action)
450 registration.callback(space, action, static_cast<int>(size));
451 }
452}
453
454
455bool MemoryAllocator::MemoryAllocationCallbackRegistered(
456 MemoryAllocationCallback callback) {
457 for (int i = 0; i < memory_allocation_callbacks_.length(); ++i) {
458 if (memory_allocation_callbacks_[i].callback == callback) return true;
459 }
460 return false;
461}
462
463
464void MemoryAllocator::AddMemoryAllocationCallback(
465 MemoryAllocationCallback callback,
466 ObjectSpace space,
467 AllocationAction action) {
468 ASSERT(callback != NULL);
469 MemoryAllocationCallbackRegistration registration(callback, space, action);
470 ASSERT(!MemoryAllocator::MemoryAllocationCallbackRegistered(callback));
471 return memory_allocation_callbacks_.Add(registration);
472}
473
474
475void MemoryAllocator::RemoveMemoryAllocationCallback(
476 MemoryAllocationCallback callback) {
477 ASSERT(callback != NULL);
478 for (int i = 0; i < memory_allocation_callbacks_.length(); ++i) {
479 if (memory_allocation_callbacks_[i].callback == callback) {
480 memory_allocation_callbacks_.Remove(i);
481 return;
482 }
483 }
484 UNREACHABLE();
485}
486
Steve Blocka7e24c12009-10-30 11:49:00 +0000487void* MemoryAllocator::ReserveInitialChunk(const size_t requested) {
488 ASSERT(initial_chunk_ == NULL);
489
490 initial_chunk_ = new VirtualMemory(requested);
491 CHECK(initial_chunk_ != NULL);
492 if (!initial_chunk_->IsReserved()) {
493 delete initial_chunk_;
494 initial_chunk_ = NULL;
495 return NULL;
496 }
497
498 // We are sure that we have mapped a block of requested addresses.
499 ASSERT(initial_chunk_->size() == requested);
500 LOG(NewEvent("InitialChunk", initial_chunk_->address(), requested));
Steve Blockd0582a62009-12-15 09:54:21 +0000501 size_ += static_cast<int>(requested);
Steve Blocka7e24c12009-10-30 11:49:00 +0000502 return initial_chunk_->address();
503}
504
505
506static int PagesInChunk(Address start, size_t size) {
507 // The first page starts on the first page-aligned address from start onward
508 // and the last page ends on the last page-aligned address before
509 // start+size. Page::kPageSize is a power of two so we can divide by
510 // shifting.
Steve Blockd0582a62009-12-15 09:54:21 +0000511 return static_cast<int>((RoundDown(start + size, Page::kPageSize)
Leon Clarkee46be812010-01-19 14:06:41 +0000512 - RoundUp(start, Page::kPageSize)) >> kPageSizeBits);
Steve Blocka7e24c12009-10-30 11:49:00 +0000513}
514
515
Ben Murdochb0fe1622011-05-05 13:52:32 +0100516Page* MemoryAllocator::AllocatePages(int requested_pages,
517 int* allocated_pages,
Steve Blocka7e24c12009-10-30 11:49:00 +0000518 PagedSpace* owner) {
519 if (requested_pages <= 0) return Page::FromAddress(NULL);
520 size_t chunk_size = requested_pages * Page::kPageSize;
521
Steve Blocka7e24c12009-10-30 11:49:00 +0000522 void* chunk = AllocateRawMemory(chunk_size, &chunk_size, owner->executable());
523 if (chunk == NULL) return Page::FromAddress(NULL);
524 LOG(NewEvent("PagedChunk", chunk, chunk_size));
525
526 *allocated_pages = PagesInChunk(static_cast<Address>(chunk), chunk_size);
Ben Murdochb0fe1622011-05-05 13:52:32 +0100527 // We may 'lose' a page due to alignment.
528 ASSERT(*allocated_pages >= kPagesPerChunk - 1);
Steve Blocka7e24c12009-10-30 11:49:00 +0000529 if (*allocated_pages == 0) {
Steve Block791712a2010-08-27 10:21:07 +0100530 FreeRawMemory(chunk, chunk_size, owner->executable());
Steve Blocka7e24c12009-10-30 11:49:00 +0000531 LOG(DeleteEvent("PagedChunk", chunk));
532 return Page::FromAddress(NULL);
533 }
534
535 int chunk_id = Pop();
536 chunks_[chunk_id].init(static_cast<Address>(chunk), chunk_size, owner);
537
Iain Merrick9ac36c92010-09-13 15:29:50 +0100538 ObjectSpace space = static_cast<ObjectSpace>(1 << owner->identity());
539 PerformAllocationCallback(space, kAllocationActionAllocate, chunk_size);
Ben Murdochb0fe1622011-05-05 13:52:32 +0100540 Page* new_pages = InitializePagesInChunk(chunk_id, *allocated_pages, owner);
541
542 AddToAllocatedChunks(static_cast<Address>(chunk), chunk_size);
543
544 return new_pages;
Steve Blocka7e24c12009-10-30 11:49:00 +0000545}
546
547
548Page* MemoryAllocator::CommitPages(Address start, size_t size,
549 PagedSpace* owner, int* num_pages) {
550 ASSERT(start != NULL);
551 *num_pages = PagesInChunk(start, size);
552 ASSERT(*num_pages > 0);
553 ASSERT(initial_chunk_ != NULL);
554 ASSERT(InInitialChunk(start));
555 ASSERT(InInitialChunk(start + size - 1));
556 if (!initial_chunk_->Commit(start, size, owner->executable() == EXECUTABLE)) {
557 return Page::FromAddress(NULL);
558 }
Leon Clarke4515c472010-02-03 11:58:03 +0000559#ifdef DEBUG
560 ZapBlock(start, size);
561#endif
Steve Blockd0582a62009-12-15 09:54:21 +0000562 Counters::memory_allocated.Increment(static_cast<int>(size));
Steve Blocka7e24c12009-10-30 11:49:00 +0000563
564 // So long as we correctly overestimated the number of chunks we should not
565 // run out of chunk ids.
566 CHECK(!OutOfChunkIds());
567 int chunk_id = Pop();
568 chunks_[chunk_id].init(start, size, owner);
569 return InitializePagesInChunk(chunk_id, *num_pages, owner);
570}
571
572
573bool MemoryAllocator::CommitBlock(Address start,
574 size_t size,
575 Executability executable) {
576 ASSERT(start != NULL);
577 ASSERT(size > 0);
578 ASSERT(initial_chunk_ != NULL);
579 ASSERT(InInitialChunk(start));
580 ASSERT(InInitialChunk(start + size - 1));
581
582 if (!initial_chunk_->Commit(start, size, executable)) return false;
Leon Clarke4515c472010-02-03 11:58:03 +0000583#ifdef DEBUG
584 ZapBlock(start, size);
585#endif
Steve Blockd0582a62009-12-15 09:54:21 +0000586 Counters::memory_allocated.Increment(static_cast<int>(size));
Steve Blocka7e24c12009-10-30 11:49:00 +0000587 return true;
588}
589
Leon Clarke4515c472010-02-03 11:58:03 +0000590
Steve Blocka7e24c12009-10-30 11:49:00 +0000591bool MemoryAllocator::UncommitBlock(Address start, size_t size) {
592 ASSERT(start != NULL);
593 ASSERT(size > 0);
594 ASSERT(initial_chunk_ != NULL);
595 ASSERT(InInitialChunk(start));
596 ASSERT(InInitialChunk(start + size - 1));
597
598 if (!initial_chunk_->Uncommit(start, size)) return false;
Steve Blockd0582a62009-12-15 09:54:21 +0000599 Counters::memory_allocated.Decrement(static_cast<int>(size));
Steve Blocka7e24c12009-10-30 11:49:00 +0000600 return true;
601}
602
Leon Clarke4515c472010-02-03 11:58:03 +0000603
604void MemoryAllocator::ZapBlock(Address start, size_t size) {
605 for (size_t s = 0; s + kPointerSize <= size; s += kPointerSize) {
606 Memory::Address_at(start + s) = kZapValue;
607 }
608}
609
610
Steve Blocka7e24c12009-10-30 11:49:00 +0000611Page* MemoryAllocator::InitializePagesInChunk(int chunk_id, int pages_in_chunk,
612 PagedSpace* owner) {
613 ASSERT(IsValidChunk(chunk_id));
614 ASSERT(pages_in_chunk > 0);
615
616 Address chunk_start = chunks_[chunk_id].address();
617
618 Address low = RoundUp(chunk_start, Page::kPageSize);
619
620#ifdef DEBUG
621 size_t chunk_size = chunks_[chunk_id].size();
622 Address high = RoundDown(chunk_start + chunk_size, Page::kPageSize);
623 ASSERT(pages_in_chunk <=
624 ((OffsetFrom(high) - OffsetFrom(low)) / Page::kPageSize));
625#endif
626
627 Address page_addr = low;
628 for (int i = 0; i < pages_in_chunk; i++) {
629 Page* p = Page::FromAddress(page_addr);
630 p->opaque_header = OffsetFrom(page_addr + Page::kPageSize) | chunk_id;
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100631 p->InvalidateWatermark(true);
Steve Block6ded16b2010-05-10 14:33:55 +0100632 p->SetIsLargeObjectPage(false);
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100633 p->SetAllocationWatermark(p->ObjectAreaStart());
634 p->SetCachedAllocationWatermark(p->ObjectAreaStart());
Steve Blocka7e24c12009-10-30 11:49:00 +0000635 page_addr += Page::kPageSize;
636 }
637
638 // Set the next page of the last page to 0.
639 Page* last_page = Page::FromAddress(page_addr - Page::kPageSize);
640 last_page->opaque_header = OffsetFrom(0) | chunk_id;
641
642 return Page::FromAddress(low);
643}
644
645
646Page* MemoryAllocator::FreePages(Page* p) {
647 if (!p->is_valid()) return p;
648
649 // Find the first page in the same chunk as 'p'
650 Page* first_page = FindFirstPageInSameChunk(p);
651 Page* page_to_return = Page::FromAddress(NULL);
652
653 if (p != first_page) {
654 // Find the last page in the same chunk as 'prev'.
655 Page* last_page = FindLastPageInSameChunk(p);
656 first_page = GetNextPage(last_page); // first page in next chunk
657
658 // set the next_page of last_page to NULL
659 SetNextPage(last_page, Page::FromAddress(NULL));
660 page_to_return = p; // return 'p' when exiting
661 }
662
663 while (first_page->is_valid()) {
664 int chunk_id = GetChunkId(first_page);
665 ASSERT(IsValidChunk(chunk_id));
666
667 // Find the first page of the next chunk before deleting this chunk.
668 first_page = GetNextPage(FindLastPageInSameChunk(first_page));
669
670 // Free the current chunk.
671 DeleteChunk(chunk_id);
672 }
673
674 return page_to_return;
675}
676
677
Steve Block6ded16b2010-05-10 14:33:55 +0100678void MemoryAllocator::FreeAllPages(PagedSpace* space) {
679 for (int i = 0, length = chunks_.length(); i < length; i++) {
680 if (chunks_[i].owner() == space) {
681 DeleteChunk(i);
682 }
683 }
684}
685
686
Steve Blocka7e24c12009-10-30 11:49:00 +0000687void MemoryAllocator::DeleteChunk(int chunk_id) {
688 ASSERT(IsValidChunk(chunk_id));
689
690 ChunkInfo& c = chunks_[chunk_id];
691
692 // We cannot free a chunk contained in the initial chunk because it was not
693 // allocated with AllocateRawMemory. Instead we uncommit the virtual
694 // memory.
695 if (InInitialChunk(c.address())) {
696 // TODO(1240712): VirtualMemory::Uncommit has a return value which
697 // is ignored here.
698 initial_chunk_->Uncommit(c.address(), c.size());
Steve Blockd0582a62009-12-15 09:54:21 +0000699 Counters::memory_allocated.Decrement(static_cast<int>(c.size()));
Steve Blocka7e24c12009-10-30 11:49:00 +0000700 } else {
Ben Murdochb0fe1622011-05-05 13:52:32 +0100701 RemoveFromAllocatedChunks(c.address(), c.size());
Steve Blocka7e24c12009-10-30 11:49:00 +0000702 LOG(DeleteEvent("PagedChunk", c.address()));
Iain Merrick9ac36c92010-09-13 15:29:50 +0100703 ObjectSpace space = static_cast<ObjectSpace>(1 << c.owner()->identity());
704 size_t size = c.size();
705 FreeRawMemory(c.address(), size, c.executable());
706 PerformAllocationCallback(space, kAllocationActionFree, size);
Steve Blocka7e24c12009-10-30 11:49:00 +0000707 }
708 c.init(NULL, 0, NULL);
709 Push(chunk_id);
710}
711
712
713Page* MemoryAllocator::FindFirstPageInSameChunk(Page* p) {
714 int chunk_id = GetChunkId(p);
715 ASSERT(IsValidChunk(chunk_id));
716
717 Address low = RoundUp(chunks_[chunk_id].address(), Page::kPageSize);
718 return Page::FromAddress(low);
719}
720
721
722Page* MemoryAllocator::FindLastPageInSameChunk(Page* p) {
723 int chunk_id = GetChunkId(p);
724 ASSERT(IsValidChunk(chunk_id));
725
726 Address chunk_start = chunks_[chunk_id].address();
727 size_t chunk_size = chunks_[chunk_id].size();
728
729 Address high = RoundDown(chunk_start + chunk_size, Page::kPageSize);
730 ASSERT(chunk_start <= p->address() && p->address() < high);
731
732 return Page::FromAddress(high - Page::kPageSize);
733}
734
735
736#ifdef DEBUG
737void MemoryAllocator::ReportStatistics() {
738 float pct = static_cast<float>(capacity_ - size_) / capacity_;
Ben Murdochf87a2032010-10-22 12:50:53 +0100739 PrintF(" capacity: %" V8_PTR_PREFIX "d"
740 ", used: %" V8_PTR_PREFIX "d"
741 ", available: %%%d\n\n",
Steve Blocka7e24c12009-10-30 11:49:00 +0000742 capacity_, size_, static_cast<int>(pct*100));
743}
744#endif
745
746
Steve Block6ded16b2010-05-10 14:33:55 +0100747void MemoryAllocator::RelinkPageListInChunkOrder(PagedSpace* space,
748 Page** first_page,
749 Page** last_page,
750 Page** last_page_in_use) {
751 Page* first = NULL;
752 Page* last = NULL;
753
754 for (int i = 0, length = chunks_.length(); i < length; i++) {
755 ChunkInfo& chunk = chunks_[i];
756
757 if (chunk.owner() == space) {
758 if (first == NULL) {
759 Address low = RoundUp(chunk.address(), Page::kPageSize);
760 first = Page::FromAddress(low);
761 }
762 last = RelinkPagesInChunk(i,
763 chunk.address(),
764 chunk.size(),
765 last,
766 last_page_in_use);
767 }
768 }
769
770 if (first_page != NULL) {
771 *first_page = first;
772 }
773
774 if (last_page != NULL) {
775 *last_page = last;
776 }
777}
778
779
780Page* MemoryAllocator::RelinkPagesInChunk(int chunk_id,
781 Address chunk_start,
782 size_t chunk_size,
783 Page* prev,
784 Page** last_page_in_use) {
785 Address page_addr = RoundUp(chunk_start, Page::kPageSize);
786 int pages_in_chunk = PagesInChunk(chunk_start, chunk_size);
787
788 if (prev->is_valid()) {
789 SetNextPage(prev, Page::FromAddress(page_addr));
790 }
791
792 for (int i = 0; i < pages_in_chunk; i++) {
793 Page* p = Page::FromAddress(page_addr);
794 p->opaque_header = OffsetFrom(page_addr + Page::kPageSize) | chunk_id;
795 page_addr += Page::kPageSize;
796
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100797 p->InvalidateWatermark(true);
Steve Block6ded16b2010-05-10 14:33:55 +0100798 if (p->WasInUseBeforeMC()) {
799 *last_page_in_use = p;
800 }
801 }
802
803 // Set the next page of the last page to 0.
804 Page* last_page = Page::FromAddress(page_addr - Page::kPageSize);
805 last_page->opaque_header = OffsetFrom(0) | chunk_id;
806
807 if (last_page->WasInUseBeforeMC()) {
808 *last_page_in_use = last_page;
809 }
810
811 return last_page;
812}
813
814
Ben Murdochb0fe1622011-05-05 13:52:32 +0100815void MemoryAllocator::AddToAllocatedChunks(Address addr, intptr_t size) {
816 ASSERT(size == kChunkSize);
817 uintptr_t int_address = reinterpret_cast<uintptr_t>(addr);
818 AddChunkUsingAddress(int_address, int_address);
819 AddChunkUsingAddress(int_address, int_address + size - 1);
820}
821
822
823void MemoryAllocator::AddChunkUsingAddress(uintptr_t chunk_start,
824 uintptr_t chunk_index_base) {
825 uintptr_t* fine_grained = AllocatedChunksFinder(
826 chunk_table_,
827 chunk_index_base,
828 kChunkSizeLog2 + (kChunkTableLevels - 1) * kChunkTableBitsPerLevel,
829 kCreateTablesAsNeeded);
830 int index = FineGrainedIndexForAddress(chunk_index_base);
831 if (fine_grained[index] != kUnusedChunkTableEntry) index++;
832 ASSERT(fine_grained[index] == kUnusedChunkTableEntry);
833 fine_grained[index] = chunk_start;
834}
835
836
837void MemoryAllocator::RemoveFromAllocatedChunks(Address addr, intptr_t size) {
838 ASSERT(size == kChunkSize);
839 uintptr_t int_address = reinterpret_cast<uintptr_t>(addr);
840 RemoveChunkFoundUsingAddress(int_address, int_address);
841 RemoveChunkFoundUsingAddress(int_address, int_address + size - 1);
842}
843
844
845void MemoryAllocator::RemoveChunkFoundUsingAddress(
846 uintptr_t chunk_start,
847 uintptr_t chunk_index_base) {
848 uintptr_t* fine_grained = AllocatedChunksFinder(
849 chunk_table_,
850 chunk_index_base,
851 kChunkSizeLog2 + (kChunkTableLevels - 1) * kChunkTableBitsPerLevel,
852 kDontCreateTables);
853 // Can't remove an entry that's not there.
854 ASSERT(fine_grained != kUnusedChunkTableEntry);
855 int index = FineGrainedIndexForAddress(chunk_index_base);
856 ASSERT(fine_grained[index] != kUnusedChunkTableEntry);
857 if (fine_grained[index] != chunk_start) {
858 index++;
859 ASSERT(fine_grained[index] == chunk_start);
860 fine_grained[index] = kUnusedChunkTableEntry;
861 } else {
862 // If only one of the entries is used it must be the first, since
863 // InAllocatedChunks relies on that. Move things around so that this is
864 // the case.
865 fine_grained[index] = fine_grained[index + 1];
866 fine_grained[index + 1] = kUnusedChunkTableEntry;
867 }
868}
869
870
871bool MemoryAllocator::InAllocatedChunks(Address addr) {
872 uintptr_t int_address = reinterpret_cast<uintptr_t>(addr);
873 uintptr_t* fine_grained = AllocatedChunksFinder(
874 chunk_table_,
875 int_address,
876 kChunkSizeLog2 + (kChunkTableLevels - 1) * kChunkTableBitsPerLevel,
877 kDontCreateTables);
878 if (fine_grained == NULL) return false;
879 int index = FineGrainedIndexForAddress(int_address);
880 if (fine_grained[index] == kUnusedChunkTableEntry) return false;
881 uintptr_t entry = fine_grained[index];
882 if (entry <= int_address && entry + kChunkSize > int_address) return true;
883 index++;
884 if (fine_grained[index] == kUnusedChunkTableEntry) return false;
885 entry = fine_grained[index];
886 if (entry <= int_address && entry + kChunkSize > int_address) return true;
887 return false;
888}
889
890
891uintptr_t* MemoryAllocator::AllocatedChunksFinder(
892 uintptr_t* table,
893 uintptr_t address,
894 int bit_position,
895 CreateTables create_as_needed) {
896 if (bit_position == kChunkSizeLog2) {
897 return table;
898 }
899 ASSERT(bit_position >= kChunkSizeLog2 + kChunkTableBitsPerLevel);
900 int index =
901 ((address >> bit_position) &
902 ((V8_INTPTR_C(1) << kChunkTableBitsPerLevel) - 1));
903 uintptr_t more_fine_grained_address =
904 address & ((V8_INTPTR_C(1) << bit_position) - 1);
905 ASSERT((table == chunk_table_ && index < kChunkTableTopLevelEntries) ||
906 (table != chunk_table_ && index < 1 << kChunkTableBitsPerLevel));
907 uintptr_t* more_fine_grained_table =
908 reinterpret_cast<uintptr_t*>(table[index]);
909 if (more_fine_grained_table == kUnusedChunkTableEntry) {
910 if (create_as_needed == kDontCreateTables) return NULL;
911 int words_needed = 1 << kChunkTableBitsPerLevel;
912 if (bit_position == kChunkTableBitsPerLevel + kChunkSizeLog2) {
913 words_needed =
914 (1 << kChunkTableBitsPerLevel) * kChunkTableFineGrainedWordsPerEntry;
915 }
916 more_fine_grained_table = new uintptr_t[words_needed];
917 for (int i = 0; i < words_needed; i++) {
918 more_fine_grained_table[i] = kUnusedChunkTableEntry;
919 }
920 table[index] = reinterpret_cast<uintptr_t>(more_fine_grained_table);
921 }
922 return AllocatedChunksFinder(
923 more_fine_grained_table,
924 more_fine_grained_address,
925 bit_position - kChunkTableBitsPerLevel,
926 create_as_needed);
927}
928
929
930uintptr_t MemoryAllocator::chunk_table_[kChunkTableTopLevelEntries];
931
Steve Block6ded16b2010-05-10 14:33:55 +0100932
Steve Blocka7e24c12009-10-30 11:49:00 +0000933// -----------------------------------------------------------------------------
934// PagedSpace implementation
935
Ben Murdochf87a2032010-10-22 12:50:53 +0100936PagedSpace::PagedSpace(intptr_t max_capacity,
Steve Blocka7e24c12009-10-30 11:49:00 +0000937 AllocationSpace id,
938 Executability executable)
939 : Space(id, executable) {
940 max_capacity_ = (RoundDown(max_capacity, Page::kPageSize) / Page::kPageSize)
941 * Page::kObjectAreaSize;
942 accounting_stats_.Clear();
943
944 allocation_info_.top = NULL;
945 allocation_info_.limit = NULL;
946
947 mc_forwarding_info_.top = NULL;
948 mc_forwarding_info_.limit = NULL;
949}
950
951
952bool PagedSpace::Setup(Address start, size_t size) {
953 if (HasBeenSetup()) return false;
954
955 int num_pages = 0;
956 // Try to use the virtual memory range passed to us. If it is too small to
957 // contain at least one page, ignore it and allocate instead.
958 int pages_in_chunk = PagesInChunk(start, size);
959 if (pages_in_chunk > 0) {
960 first_page_ = MemoryAllocator::CommitPages(RoundUp(start, Page::kPageSize),
961 Page::kPageSize * pages_in_chunk,
962 this, &num_pages);
963 } else {
Ben Murdochf87a2032010-10-22 12:50:53 +0100964 int requested_pages =
965 Min(MemoryAllocator::kPagesPerChunk,
966 static_cast<int>(max_capacity_ / Page::kObjectAreaSize));
Steve Blocka7e24c12009-10-30 11:49:00 +0000967 first_page_ =
968 MemoryAllocator::AllocatePages(requested_pages, &num_pages, this);
969 if (!first_page_->is_valid()) return false;
970 }
971
972 // We are sure that the first page is valid and that we have at least one
973 // page.
974 ASSERT(first_page_->is_valid());
975 ASSERT(num_pages > 0);
976 accounting_stats_.ExpandSpace(num_pages * Page::kObjectAreaSize);
977 ASSERT(Capacity() <= max_capacity_);
978
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100979 // Sequentially clear region marks in the newly allocated
Steve Blocka7e24c12009-10-30 11:49:00 +0000980 // pages and cache the current last page in the space.
981 for (Page* p = first_page_; p->is_valid(); p = p->next_page()) {
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100982 p->SetRegionMarks(Page::kAllRegionsCleanMarks);
Steve Blocka7e24c12009-10-30 11:49:00 +0000983 last_page_ = p;
984 }
985
986 // Use first_page_ for allocation.
987 SetAllocationInfo(&allocation_info_, first_page_);
988
Steve Block6ded16b2010-05-10 14:33:55 +0100989 page_list_is_chunk_ordered_ = true;
990
Steve Blocka7e24c12009-10-30 11:49:00 +0000991 return true;
992}
993
994
995bool PagedSpace::HasBeenSetup() {
996 return (Capacity() > 0);
997}
998
999
1000void PagedSpace::TearDown() {
Steve Block6ded16b2010-05-10 14:33:55 +01001001 MemoryAllocator::FreeAllPages(this);
1002 first_page_ = NULL;
Steve Blocka7e24c12009-10-30 11:49:00 +00001003 accounting_stats_.Clear();
1004}
1005
1006
1007#ifdef ENABLE_HEAP_PROTECTION
1008
1009void PagedSpace::Protect() {
1010 Page* page = first_page_;
1011 while (page->is_valid()) {
1012 MemoryAllocator::ProtectChunkFromPage(page);
1013 page = MemoryAllocator::FindLastPageInSameChunk(page)->next_page();
1014 }
1015}
1016
1017
1018void PagedSpace::Unprotect() {
1019 Page* page = first_page_;
1020 while (page->is_valid()) {
1021 MemoryAllocator::UnprotectChunkFromPage(page);
1022 page = MemoryAllocator::FindLastPageInSameChunk(page)->next_page();
1023 }
1024}
1025
1026#endif
1027
1028
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01001029void PagedSpace::MarkAllPagesClean() {
Steve Blocka7e24c12009-10-30 11:49:00 +00001030 PageIterator it(this, PageIterator::ALL_PAGES);
1031 while (it.has_next()) {
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01001032 it.next()->SetRegionMarks(Page::kAllRegionsCleanMarks);
Steve Blocka7e24c12009-10-30 11:49:00 +00001033 }
1034}
1035
1036
John Reck59135872010-11-02 12:39:01 -07001037MaybeObject* PagedSpace::FindObject(Address addr) {
Steve Blocka7e24c12009-10-30 11:49:00 +00001038 // Note: this function can only be called before or after mark-compact GC
1039 // because it accesses map pointers.
1040 ASSERT(!MarkCompactCollector::in_use());
1041
1042 if (!Contains(addr)) return Failure::Exception();
1043
1044 Page* p = Page::FromAddress(addr);
1045 ASSERT(IsUsed(p));
1046 Address cur = p->ObjectAreaStart();
1047 Address end = p->AllocationTop();
1048 while (cur < end) {
1049 HeapObject* obj = HeapObject::FromAddress(cur);
1050 Address next = cur + obj->Size();
1051 if ((cur <= addr) && (addr < next)) return obj;
1052 cur = next;
1053 }
1054
1055 UNREACHABLE();
1056 return Failure::Exception();
1057}
1058
1059
1060bool PagedSpace::IsUsed(Page* page) {
1061 PageIterator it(this, PageIterator::PAGES_IN_USE);
1062 while (it.has_next()) {
1063 if (page == it.next()) return true;
1064 }
1065 return false;
1066}
1067
1068
1069void PagedSpace::SetAllocationInfo(AllocationInfo* alloc_info, Page* p) {
1070 alloc_info->top = p->ObjectAreaStart();
1071 alloc_info->limit = p->ObjectAreaEnd();
1072 ASSERT(alloc_info->VerifyPagedAllocation());
1073}
1074
1075
1076void PagedSpace::MCResetRelocationInfo() {
1077 // Set page indexes.
1078 int i = 0;
1079 PageIterator it(this, PageIterator::ALL_PAGES);
1080 while (it.has_next()) {
1081 Page* p = it.next();
1082 p->mc_page_index = i++;
1083 }
1084
1085 // Set mc_forwarding_info_ to the first page in the space.
1086 SetAllocationInfo(&mc_forwarding_info_, first_page_);
1087 // All the bytes in the space are 'available'. We will rediscover
1088 // allocated and wasted bytes during GC.
1089 accounting_stats_.Reset();
1090}
1091
1092
1093int PagedSpace::MCSpaceOffsetForAddress(Address addr) {
1094#ifdef DEBUG
1095 // The Contains function considers the address at the beginning of a
1096 // page in the page, MCSpaceOffsetForAddress considers it is in the
1097 // previous page.
1098 if (Page::IsAlignedToPageSize(addr)) {
1099 ASSERT(Contains(addr - kPointerSize));
1100 } else {
1101 ASSERT(Contains(addr));
1102 }
1103#endif
1104
1105 // If addr is at the end of a page, it belongs to previous page
1106 Page* p = Page::IsAlignedToPageSize(addr)
1107 ? Page::FromAllocationTop(addr)
1108 : Page::FromAddress(addr);
1109 int index = p->mc_page_index;
1110 return (index * Page::kPageSize) + p->Offset(addr);
1111}
1112
1113
1114// Slow case for reallocating and promoting objects during a compacting
1115// collection. This function is not space-specific.
1116HeapObject* PagedSpace::SlowMCAllocateRaw(int size_in_bytes) {
1117 Page* current_page = TopPageOf(mc_forwarding_info_);
1118 if (!current_page->next_page()->is_valid()) {
1119 if (!Expand(current_page)) {
1120 return NULL;
1121 }
1122 }
1123
1124 // There are surely more pages in the space now.
1125 ASSERT(current_page->next_page()->is_valid());
1126 // We do not add the top of page block for current page to the space's
1127 // free list---the block may contain live objects so we cannot write
1128 // bookkeeping information to it. Instead, we will recover top of page
1129 // blocks when we move objects to their new locations.
1130 //
1131 // We do however write the allocation pointer to the page. The encoding
1132 // of forwarding addresses is as an offset in terms of live bytes, so we
1133 // need quick access to the allocation top of each page to decode
1134 // forwarding addresses.
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01001135 current_page->SetAllocationWatermark(mc_forwarding_info_.top);
1136 current_page->next_page()->InvalidateWatermark(true);
Steve Blocka7e24c12009-10-30 11:49:00 +00001137 SetAllocationInfo(&mc_forwarding_info_, current_page->next_page());
1138 return AllocateLinearly(&mc_forwarding_info_, size_in_bytes);
1139}
1140
1141
1142bool PagedSpace::Expand(Page* last_page) {
1143 ASSERT(max_capacity_ % Page::kObjectAreaSize == 0);
1144 ASSERT(Capacity() % Page::kObjectAreaSize == 0);
1145
1146 if (Capacity() == max_capacity_) return false;
1147
1148 ASSERT(Capacity() < max_capacity_);
1149 // Last page must be valid and its next page is invalid.
1150 ASSERT(last_page->is_valid() && !last_page->next_page()->is_valid());
1151
Ben Murdochf87a2032010-10-22 12:50:53 +01001152 int available_pages =
1153 static_cast<int>((max_capacity_ - Capacity()) / Page::kObjectAreaSize);
Ben Murdochb0fe1622011-05-05 13:52:32 +01001154 // We don't want to have to handle small chunks near the end so if there are
1155 // not kPagesPerChunk pages available without exceeding the max capacity then
1156 // act as if memory has run out.
1157 if (available_pages < MemoryAllocator::kPagesPerChunk) return false;
Steve Blocka7e24c12009-10-30 11:49:00 +00001158
1159 int desired_pages = Min(available_pages, MemoryAllocator::kPagesPerChunk);
1160 Page* p = MemoryAllocator::AllocatePages(desired_pages, &desired_pages, this);
1161 if (!p->is_valid()) return false;
1162
1163 accounting_stats_.ExpandSpace(desired_pages * Page::kObjectAreaSize);
1164 ASSERT(Capacity() <= max_capacity_);
1165
1166 MemoryAllocator::SetNextPage(last_page, p);
1167
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01001168 // Sequentially clear region marks of new pages and and cache the
Steve Blocka7e24c12009-10-30 11:49:00 +00001169 // new last page in the space.
1170 while (p->is_valid()) {
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01001171 p->SetRegionMarks(Page::kAllRegionsCleanMarks);
Steve Blocka7e24c12009-10-30 11:49:00 +00001172 last_page_ = p;
1173 p = p->next_page();
1174 }
1175
1176 return true;
1177}
1178
1179
1180#ifdef DEBUG
1181int PagedSpace::CountTotalPages() {
1182 int count = 0;
1183 for (Page* p = first_page_; p->is_valid(); p = p->next_page()) {
1184 count++;
1185 }
1186 return count;
1187}
1188#endif
1189
1190
1191void PagedSpace::Shrink() {
Steve Block6ded16b2010-05-10 14:33:55 +01001192 if (!page_list_is_chunk_ordered_) {
1193 // We can't shrink space if pages is not chunk-ordered
1194 // (see comment for class MemoryAllocator for definition).
1195 return;
1196 }
1197
Steve Blocka7e24c12009-10-30 11:49:00 +00001198 // Release half of free pages.
1199 Page* top_page = AllocationTopPage();
1200 ASSERT(top_page->is_valid());
1201
1202 // Count the number of pages we would like to free.
1203 int pages_to_free = 0;
1204 for (Page* p = top_page->next_page(); p->is_valid(); p = p->next_page()) {
1205 pages_to_free++;
1206 }
1207
1208 // Free pages after top_page.
1209 Page* p = MemoryAllocator::FreePages(top_page->next_page());
1210 MemoryAllocator::SetNextPage(top_page, p);
1211
1212 // Find out how many pages we failed to free and update last_page_.
1213 // Please note pages can only be freed in whole chunks.
1214 last_page_ = top_page;
1215 for (Page* p = top_page->next_page(); p->is_valid(); p = p->next_page()) {
1216 pages_to_free--;
1217 last_page_ = p;
1218 }
1219
1220 accounting_stats_.ShrinkSpace(pages_to_free * Page::kObjectAreaSize);
1221 ASSERT(Capacity() == CountTotalPages() * Page::kObjectAreaSize);
1222}
1223
1224
1225bool PagedSpace::EnsureCapacity(int capacity) {
1226 if (Capacity() >= capacity) return true;
1227
1228 // Start from the allocation top and loop to the last page in the space.
1229 Page* last_page = AllocationTopPage();
1230 Page* next_page = last_page->next_page();
1231 while (next_page->is_valid()) {
1232 last_page = MemoryAllocator::FindLastPageInSameChunk(next_page);
1233 next_page = last_page->next_page();
1234 }
1235
1236 // Expand the space until it has the required capacity or expansion fails.
1237 do {
1238 if (!Expand(last_page)) return false;
1239 ASSERT(last_page->next_page()->is_valid());
1240 last_page =
1241 MemoryAllocator::FindLastPageInSameChunk(last_page->next_page());
1242 } while (Capacity() < capacity);
1243
1244 return true;
1245}
1246
1247
1248#ifdef DEBUG
1249void PagedSpace::Print() { }
1250#endif
1251
1252
1253#ifdef DEBUG
1254// We do not assume that the PageIterator works, because it depends on the
1255// invariants we are checking during verification.
1256void PagedSpace::Verify(ObjectVisitor* visitor) {
1257 // The allocation pointer should be valid, and it should be in a page in the
1258 // space.
1259 ASSERT(allocation_info_.VerifyPagedAllocation());
1260 Page* top_page = Page::FromAllocationTop(allocation_info_.top);
1261 ASSERT(MemoryAllocator::IsPageInSpace(top_page, this));
1262
1263 // Loop over all the pages.
1264 bool above_allocation_top = false;
1265 Page* current_page = first_page_;
1266 while (current_page->is_valid()) {
1267 if (above_allocation_top) {
1268 // We don't care what's above the allocation top.
1269 } else {
Steve Blocka7e24c12009-10-30 11:49:00 +00001270 Address top = current_page->AllocationTop();
1271 if (current_page == top_page) {
1272 ASSERT(top == allocation_info_.top);
1273 // The next page will be above the allocation top.
1274 above_allocation_top = true;
Steve Blocka7e24c12009-10-30 11:49:00 +00001275 }
1276
1277 // It should be packed with objects from the bottom to the top.
1278 Address current = current_page->ObjectAreaStart();
1279 while (current < top) {
1280 HeapObject* object = HeapObject::FromAddress(current);
1281
1282 // The first word should be a map, and we expect all map pointers to
1283 // be in map space.
1284 Map* map = object->map();
1285 ASSERT(map->IsMap());
1286 ASSERT(Heap::map_space()->Contains(map));
1287
1288 // Perform space-specific object verification.
1289 VerifyObject(object);
1290
1291 // The object itself should look OK.
1292 object->Verify();
1293
1294 // All the interior pointers should be contained in the heap and
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01001295 // have page regions covering intergenerational references should be
1296 // marked dirty.
Steve Blocka7e24c12009-10-30 11:49:00 +00001297 int size = object->Size();
1298 object->IterateBody(map->instance_type(), size, visitor);
1299
1300 current += size;
1301 }
1302
1303 // The allocation pointer should not be in the middle of an object.
1304 ASSERT(current == top);
1305 }
1306
1307 current_page = current_page->next_page();
1308 }
1309}
1310#endif
1311
1312
1313// -----------------------------------------------------------------------------
1314// NewSpace implementation
1315
1316
1317bool NewSpace::Setup(Address start, int size) {
1318 // Setup new space based on the preallocated memory block defined by
1319 // start and size. The provided space is divided into two semi-spaces.
1320 // To support fast containment testing in the new space, the size of
1321 // this chunk must be a power of two and it must be aligned to its size.
1322 int initial_semispace_capacity = Heap::InitialSemiSpaceSize();
Steve Block3ce2e202009-11-05 08:53:23 +00001323 int maximum_semispace_capacity = Heap::MaxSemiSpaceSize();
Steve Blocka7e24c12009-10-30 11:49:00 +00001324
1325 ASSERT(initial_semispace_capacity <= maximum_semispace_capacity);
1326 ASSERT(IsPowerOf2(maximum_semispace_capacity));
1327
1328 // Allocate and setup the histogram arrays if necessary.
1329#if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
1330 allocated_histogram_ = NewArray<HistogramInfo>(LAST_TYPE + 1);
1331 promoted_histogram_ = NewArray<HistogramInfo>(LAST_TYPE + 1);
1332
1333#define SET_NAME(name) allocated_histogram_[name].set_name(#name); \
1334 promoted_histogram_[name].set_name(#name);
1335 INSTANCE_TYPE_LIST(SET_NAME)
1336#undef SET_NAME
1337#endif
1338
Steve Block3ce2e202009-11-05 08:53:23 +00001339 ASSERT(size == 2 * Heap::ReservedSemiSpaceSize());
Steve Blocka7e24c12009-10-30 11:49:00 +00001340 ASSERT(IsAddressAligned(start, size, 0));
1341
1342 if (!to_space_.Setup(start,
1343 initial_semispace_capacity,
1344 maximum_semispace_capacity)) {
1345 return false;
1346 }
1347 if (!from_space_.Setup(start + maximum_semispace_capacity,
1348 initial_semispace_capacity,
1349 maximum_semispace_capacity)) {
1350 return false;
1351 }
1352
1353 start_ = start;
1354 address_mask_ = ~(size - 1);
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01001355 object_mask_ = address_mask_ | kHeapObjectTagMask;
Steve Blocka7e24c12009-10-30 11:49:00 +00001356 object_expected_ = reinterpret_cast<uintptr_t>(start) | kHeapObjectTag;
1357
1358 allocation_info_.top = to_space_.low();
1359 allocation_info_.limit = to_space_.high();
1360 mc_forwarding_info_.top = NULL;
1361 mc_forwarding_info_.limit = NULL;
1362
1363 ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
1364 return true;
1365}
1366
1367
1368void NewSpace::TearDown() {
1369#if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
1370 if (allocated_histogram_) {
1371 DeleteArray(allocated_histogram_);
1372 allocated_histogram_ = NULL;
1373 }
1374 if (promoted_histogram_) {
1375 DeleteArray(promoted_histogram_);
1376 promoted_histogram_ = NULL;
1377 }
1378#endif
1379
1380 start_ = NULL;
1381 allocation_info_.top = NULL;
1382 allocation_info_.limit = NULL;
1383 mc_forwarding_info_.top = NULL;
1384 mc_forwarding_info_.limit = NULL;
1385
1386 to_space_.TearDown();
1387 from_space_.TearDown();
1388}
1389
1390
1391#ifdef ENABLE_HEAP_PROTECTION
1392
1393void NewSpace::Protect() {
1394 MemoryAllocator::Protect(ToSpaceLow(), Capacity());
1395 MemoryAllocator::Protect(FromSpaceLow(), Capacity());
1396}
1397
1398
1399void NewSpace::Unprotect() {
1400 MemoryAllocator::Unprotect(ToSpaceLow(), Capacity(),
1401 to_space_.executable());
1402 MemoryAllocator::Unprotect(FromSpaceLow(), Capacity(),
1403 from_space_.executable());
1404}
1405
1406#endif
1407
1408
1409void NewSpace::Flip() {
1410 SemiSpace tmp = from_space_;
1411 from_space_ = to_space_;
1412 to_space_ = tmp;
1413}
1414
1415
1416void NewSpace::Grow() {
1417 ASSERT(Capacity() < MaximumCapacity());
1418 if (to_space_.Grow()) {
1419 // Only grow from space if we managed to grow to space.
1420 if (!from_space_.Grow()) {
1421 // If we managed to grow to space but couldn't grow from space,
1422 // attempt to shrink to space.
1423 if (!to_space_.ShrinkTo(from_space_.Capacity())) {
1424 // We are in an inconsistent state because we could not
1425 // commit/uncommit memory from new space.
1426 V8::FatalProcessOutOfMemory("Failed to grow new space.");
1427 }
1428 }
1429 }
1430 allocation_info_.limit = to_space_.high();
1431 ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
1432}
1433
1434
1435void NewSpace::Shrink() {
Ben Murdochf87a2032010-10-22 12:50:53 +01001436 int new_capacity = Max(InitialCapacity(), 2 * SizeAsInt());
Steve Blockd0582a62009-12-15 09:54:21 +00001437 int rounded_new_capacity =
1438 RoundUp(new_capacity, static_cast<int>(OS::AllocateAlignment()));
Steve Blocka7e24c12009-10-30 11:49:00 +00001439 if (rounded_new_capacity < Capacity() &&
1440 to_space_.ShrinkTo(rounded_new_capacity)) {
1441 // Only shrink from space if we managed to shrink to space.
1442 if (!from_space_.ShrinkTo(rounded_new_capacity)) {
1443 // If we managed to shrink to space but couldn't shrink from
1444 // space, attempt to grow to space again.
1445 if (!to_space_.GrowTo(from_space_.Capacity())) {
1446 // We are in an inconsistent state because we could not
1447 // commit/uncommit memory from new space.
1448 V8::FatalProcessOutOfMemory("Failed to shrink new space.");
1449 }
1450 }
1451 }
1452 allocation_info_.limit = to_space_.high();
1453 ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
1454}
1455
1456
1457void NewSpace::ResetAllocationInfo() {
1458 allocation_info_.top = to_space_.low();
1459 allocation_info_.limit = to_space_.high();
1460 ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
1461}
1462
1463
1464void NewSpace::MCResetRelocationInfo() {
1465 mc_forwarding_info_.top = from_space_.low();
1466 mc_forwarding_info_.limit = from_space_.high();
1467 ASSERT_SEMISPACE_ALLOCATION_INFO(mc_forwarding_info_, from_space_);
1468}
1469
1470
1471void NewSpace::MCCommitRelocationInfo() {
1472 // Assumes that the spaces have been flipped so that mc_forwarding_info_ is
1473 // valid allocation info for the to space.
1474 allocation_info_.top = mc_forwarding_info_.top;
1475 allocation_info_.limit = to_space_.high();
1476 ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
1477}
1478
1479
1480#ifdef DEBUG
1481// We do not use the SemispaceIterator because verification doesn't assume
1482// that it works (it depends on the invariants we are checking).
1483void NewSpace::Verify() {
1484 // The allocation pointer should be in the space or at the very end.
1485 ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
1486
1487 // There should be objects packed in from the low address up to the
1488 // allocation pointer.
1489 Address current = to_space_.low();
1490 while (current < top()) {
1491 HeapObject* object = HeapObject::FromAddress(current);
1492
1493 // The first word should be a map, and we expect all map pointers to
1494 // be in map space.
1495 Map* map = object->map();
1496 ASSERT(map->IsMap());
1497 ASSERT(Heap::map_space()->Contains(map));
1498
1499 // The object should not be code or a map.
1500 ASSERT(!object->IsMap());
1501 ASSERT(!object->IsCode());
1502
1503 // The object itself should look OK.
1504 object->Verify();
1505
1506 // All the interior pointers should be contained in the heap.
1507 VerifyPointersVisitor visitor;
1508 int size = object->Size();
1509 object->IterateBody(map->instance_type(), size, &visitor);
1510
1511 current += size;
1512 }
1513
1514 // The allocation pointer should not be in the middle of an object.
1515 ASSERT(current == top());
1516}
1517#endif
1518
1519
1520bool SemiSpace::Commit() {
1521 ASSERT(!is_committed());
1522 if (!MemoryAllocator::CommitBlock(start_, capacity_, executable())) {
1523 return false;
1524 }
1525 committed_ = true;
1526 return true;
1527}
1528
1529
1530bool SemiSpace::Uncommit() {
1531 ASSERT(is_committed());
1532 if (!MemoryAllocator::UncommitBlock(start_, capacity_)) {
1533 return false;
1534 }
1535 committed_ = false;
1536 return true;
1537}
1538
1539
1540// -----------------------------------------------------------------------------
1541// SemiSpace implementation
1542
1543bool SemiSpace::Setup(Address start,
1544 int initial_capacity,
1545 int maximum_capacity) {
1546 // Creates a space in the young generation. The constructor does not
1547 // allocate memory from the OS. A SemiSpace is given a contiguous chunk of
1548 // memory of size 'capacity' when set up, and does not grow or shrink
1549 // otherwise. In the mark-compact collector, the memory region of the from
1550 // space is used as the marking stack. It requires contiguous memory
1551 // addresses.
1552 initial_capacity_ = initial_capacity;
1553 capacity_ = initial_capacity;
1554 maximum_capacity_ = maximum_capacity;
1555 committed_ = false;
1556
1557 start_ = start;
1558 address_mask_ = ~(maximum_capacity - 1);
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01001559 object_mask_ = address_mask_ | kHeapObjectTagMask;
Steve Blocka7e24c12009-10-30 11:49:00 +00001560 object_expected_ = reinterpret_cast<uintptr_t>(start) | kHeapObjectTag;
1561 age_mark_ = start_;
1562
1563 return Commit();
1564}
1565
1566
1567void SemiSpace::TearDown() {
1568 start_ = NULL;
1569 capacity_ = 0;
1570}
1571
1572
1573bool SemiSpace::Grow() {
1574 // Double the semispace size but only up to maximum capacity.
1575 int maximum_extra = maximum_capacity_ - capacity_;
Steve Blockd0582a62009-12-15 09:54:21 +00001576 int extra = Min(RoundUp(capacity_, static_cast<int>(OS::AllocateAlignment())),
Steve Blocka7e24c12009-10-30 11:49:00 +00001577 maximum_extra);
1578 if (!MemoryAllocator::CommitBlock(high(), extra, executable())) {
1579 return false;
1580 }
1581 capacity_ += extra;
1582 return true;
1583}
1584
1585
1586bool SemiSpace::GrowTo(int new_capacity) {
1587 ASSERT(new_capacity <= maximum_capacity_);
1588 ASSERT(new_capacity > capacity_);
1589 size_t delta = new_capacity - capacity_;
1590 ASSERT(IsAligned(delta, OS::AllocateAlignment()));
1591 if (!MemoryAllocator::CommitBlock(high(), delta, executable())) {
1592 return false;
1593 }
1594 capacity_ = new_capacity;
1595 return true;
1596}
1597
1598
1599bool SemiSpace::ShrinkTo(int new_capacity) {
1600 ASSERT(new_capacity >= initial_capacity_);
1601 ASSERT(new_capacity < capacity_);
1602 size_t delta = capacity_ - new_capacity;
1603 ASSERT(IsAligned(delta, OS::AllocateAlignment()));
1604 if (!MemoryAllocator::UncommitBlock(high() - delta, delta)) {
1605 return false;
1606 }
1607 capacity_ = new_capacity;
1608 return true;
1609}
1610
1611
1612#ifdef DEBUG
1613void SemiSpace::Print() { }
1614
1615
1616void SemiSpace::Verify() { }
1617#endif
1618
1619
1620// -----------------------------------------------------------------------------
1621// SemiSpaceIterator implementation.
1622SemiSpaceIterator::SemiSpaceIterator(NewSpace* space) {
1623 Initialize(space, space->bottom(), space->top(), NULL);
1624}
1625
1626
1627SemiSpaceIterator::SemiSpaceIterator(NewSpace* space,
1628 HeapObjectCallback size_func) {
1629 Initialize(space, space->bottom(), space->top(), size_func);
1630}
1631
1632
1633SemiSpaceIterator::SemiSpaceIterator(NewSpace* space, Address start) {
1634 Initialize(space, start, space->top(), NULL);
1635}
1636
1637
1638void SemiSpaceIterator::Initialize(NewSpace* space, Address start,
1639 Address end,
1640 HeapObjectCallback size_func) {
1641 ASSERT(space->ToSpaceContains(start));
1642 ASSERT(space->ToSpaceLow() <= end
1643 && end <= space->ToSpaceHigh());
1644 space_ = &space->to_space_;
1645 current_ = start;
1646 limit_ = end;
1647 size_func_ = size_func;
1648}
1649
1650
1651#ifdef DEBUG
1652// A static array of histogram info for each type.
1653static HistogramInfo heap_histograms[LAST_TYPE+1];
1654static JSObject::SpillInformation js_spill_information;
1655
1656// heap_histograms is shared, always clear it before using it.
1657static void ClearHistograms() {
1658 // We reset the name each time, though it hasn't changed.
1659#define DEF_TYPE_NAME(name) heap_histograms[name].set_name(#name);
1660 INSTANCE_TYPE_LIST(DEF_TYPE_NAME)
1661#undef DEF_TYPE_NAME
1662
1663#define CLEAR_HISTOGRAM(name) heap_histograms[name].clear();
1664 INSTANCE_TYPE_LIST(CLEAR_HISTOGRAM)
1665#undef CLEAR_HISTOGRAM
1666
1667 js_spill_information.Clear();
1668}
1669
1670
1671static int code_kind_statistics[Code::NUMBER_OF_KINDS];
1672
1673
1674static void ClearCodeKindStatistics() {
1675 for (int i = 0; i < Code::NUMBER_OF_KINDS; i++) {
1676 code_kind_statistics[i] = 0;
1677 }
1678}
1679
1680
1681static void ReportCodeKindStatistics() {
Steve Block6ded16b2010-05-10 14:33:55 +01001682 const char* table[Code::NUMBER_OF_KINDS] = { NULL };
Steve Blocka7e24c12009-10-30 11:49:00 +00001683
1684#define CASE(name) \
1685 case Code::name: table[Code::name] = #name; \
1686 break
1687
1688 for (int i = 0; i < Code::NUMBER_OF_KINDS; i++) {
1689 switch (static_cast<Code::Kind>(i)) {
1690 CASE(FUNCTION);
Ben Murdochb0fe1622011-05-05 13:52:32 +01001691 CASE(OPTIMIZED_FUNCTION);
Steve Blocka7e24c12009-10-30 11:49:00 +00001692 CASE(STUB);
1693 CASE(BUILTIN);
1694 CASE(LOAD_IC);
1695 CASE(KEYED_LOAD_IC);
1696 CASE(STORE_IC);
1697 CASE(KEYED_STORE_IC);
1698 CASE(CALL_IC);
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01001699 CASE(KEYED_CALL_IC);
Steve Block6ded16b2010-05-10 14:33:55 +01001700 CASE(BINARY_OP_IC);
Ben Murdochb0fe1622011-05-05 13:52:32 +01001701 CASE(TYPE_RECORDING_BINARY_OP_IC);
1702 CASE(COMPARE_IC);
Steve Blocka7e24c12009-10-30 11:49:00 +00001703 }
1704 }
1705
1706#undef CASE
1707
1708 PrintF("\n Code kind histograms: \n");
1709 for (int i = 0; i < Code::NUMBER_OF_KINDS; i++) {
1710 if (code_kind_statistics[i] > 0) {
1711 PrintF(" %-20s: %10d bytes\n", table[i], code_kind_statistics[i]);
1712 }
1713 }
1714 PrintF("\n");
1715}
1716
1717
1718static int CollectHistogramInfo(HeapObject* obj) {
1719 InstanceType type = obj->map()->instance_type();
1720 ASSERT(0 <= type && type <= LAST_TYPE);
1721 ASSERT(heap_histograms[type].name() != NULL);
1722 heap_histograms[type].increment_number(1);
1723 heap_histograms[type].increment_bytes(obj->Size());
1724
1725 if (FLAG_collect_heap_spill_statistics && obj->IsJSObject()) {
1726 JSObject::cast(obj)->IncrementSpillStatistics(&js_spill_information);
1727 }
1728
1729 return obj->Size();
1730}
1731
1732
1733static void ReportHistogram(bool print_spill) {
1734 PrintF("\n Object Histogram:\n");
1735 for (int i = 0; i <= LAST_TYPE; i++) {
1736 if (heap_histograms[i].number() > 0) {
Steve Block6ded16b2010-05-10 14:33:55 +01001737 PrintF(" %-34s%10d (%10d bytes)\n",
Steve Blocka7e24c12009-10-30 11:49:00 +00001738 heap_histograms[i].name(),
1739 heap_histograms[i].number(),
1740 heap_histograms[i].bytes());
1741 }
1742 }
1743 PrintF("\n");
1744
1745 // Summarize string types.
1746 int string_number = 0;
1747 int string_bytes = 0;
1748#define INCREMENT(type, size, name, camel_name) \
1749 string_number += heap_histograms[type].number(); \
1750 string_bytes += heap_histograms[type].bytes();
1751 STRING_TYPE_LIST(INCREMENT)
1752#undef INCREMENT
1753 if (string_number > 0) {
Steve Block6ded16b2010-05-10 14:33:55 +01001754 PrintF(" %-34s%10d (%10d bytes)\n\n", "STRING_TYPE", string_number,
Steve Blocka7e24c12009-10-30 11:49:00 +00001755 string_bytes);
1756 }
1757
1758 if (FLAG_collect_heap_spill_statistics && print_spill) {
1759 js_spill_information.Print();
1760 }
1761}
1762#endif // DEBUG
1763
1764
1765// Support for statistics gathering for --heap-stats and --log-gc.
1766#if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
1767void NewSpace::ClearHistograms() {
1768 for (int i = 0; i <= LAST_TYPE; i++) {
1769 allocated_histogram_[i].clear();
1770 promoted_histogram_[i].clear();
1771 }
1772}
1773
1774// Because the copying collector does not touch garbage objects, we iterate
1775// the new space before a collection to get a histogram of allocated objects.
1776// This only happens (1) when compiled with DEBUG and the --heap-stats flag is
1777// set, or when compiled with ENABLE_LOGGING_AND_PROFILING and the --log-gc
1778// flag is set.
1779void NewSpace::CollectStatistics() {
1780 ClearHistograms();
1781 SemiSpaceIterator it(this);
Leon Clarked91b9f72010-01-27 17:25:45 +00001782 for (HeapObject* obj = it.next(); obj != NULL; obj = it.next())
1783 RecordAllocation(obj);
Steve Blocka7e24c12009-10-30 11:49:00 +00001784}
1785
1786
1787#ifdef ENABLE_LOGGING_AND_PROFILING
1788static void DoReportStatistics(HistogramInfo* info, const char* description) {
1789 LOG(HeapSampleBeginEvent("NewSpace", description));
1790 // Lump all the string types together.
1791 int string_number = 0;
1792 int string_bytes = 0;
1793#define INCREMENT(type, size, name, camel_name) \
1794 string_number += info[type].number(); \
1795 string_bytes += info[type].bytes();
1796 STRING_TYPE_LIST(INCREMENT)
1797#undef INCREMENT
1798 if (string_number > 0) {
1799 LOG(HeapSampleItemEvent("STRING_TYPE", string_number, string_bytes));
1800 }
1801
1802 // Then do the other types.
1803 for (int i = FIRST_NONSTRING_TYPE; i <= LAST_TYPE; ++i) {
1804 if (info[i].number() > 0) {
1805 LOG(HeapSampleItemEvent(info[i].name(), info[i].number(),
1806 info[i].bytes()));
1807 }
1808 }
1809 LOG(HeapSampleEndEvent("NewSpace", description));
1810}
1811#endif // ENABLE_LOGGING_AND_PROFILING
1812
1813
1814void NewSpace::ReportStatistics() {
1815#ifdef DEBUG
1816 if (FLAG_heap_stats) {
1817 float pct = static_cast<float>(Available()) / Capacity();
Ben Murdochf87a2032010-10-22 12:50:53 +01001818 PrintF(" capacity: %" V8_PTR_PREFIX "d"
1819 ", available: %" V8_PTR_PREFIX "d, %%%d\n",
Steve Blocka7e24c12009-10-30 11:49:00 +00001820 Capacity(), Available(), static_cast<int>(pct*100));
1821 PrintF("\n Object Histogram:\n");
1822 for (int i = 0; i <= LAST_TYPE; i++) {
1823 if (allocated_histogram_[i].number() > 0) {
Steve Block6ded16b2010-05-10 14:33:55 +01001824 PrintF(" %-34s%10d (%10d bytes)\n",
Steve Blocka7e24c12009-10-30 11:49:00 +00001825 allocated_histogram_[i].name(),
1826 allocated_histogram_[i].number(),
1827 allocated_histogram_[i].bytes());
1828 }
1829 }
1830 PrintF("\n");
1831 }
1832#endif // DEBUG
1833
1834#ifdef ENABLE_LOGGING_AND_PROFILING
1835 if (FLAG_log_gc) {
1836 DoReportStatistics(allocated_histogram_, "allocated");
1837 DoReportStatistics(promoted_histogram_, "promoted");
1838 }
1839#endif // ENABLE_LOGGING_AND_PROFILING
1840}
1841
1842
1843void NewSpace::RecordAllocation(HeapObject* obj) {
1844 InstanceType type = obj->map()->instance_type();
1845 ASSERT(0 <= type && type <= LAST_TYPE);
1846 allocated_histogram_[type].increment_number(1);
1847 allocated_histogram_[type].increment_bytes(obj->Size());
1848}
1849
1850
1851void NewSpace::RecordPromotion(HeapObject* obj) {
1852 InstanceType type = obj->map()->instance_type();
1853 ASSERT(0 <= type && type <= LAST_TYPE);
1854 promoted_histogram_[type].increment_number(1);
1855 promoted_histogram_[type].increment_bytes(obj->Size());
1856}
1857#endif // defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
1858
1859
1860// -----------------------------------------------------------------------------
1861// Free lists for old object spaces implementation
1862
1863void FreeListNode::set_size(int size_in_bytes) {
1864 ASSERT(size_in_bytes > 0);
1865 ASSERT(IsAligned(size_in_bytes, kPointerSize));
1866
1867 // We write a map and possibly size information to the block. If the block
1868 // is big enough to be a ByteArray with at least one extra word (the next
1869 // pointer), we set its map to be the byte array map and its size to an
1870 // appropriate array length for the desired size from HeapObject::Size().
1871 // If the block is too small (eg, one or two words), to hold both a size
1872 // field and a next pointer, we give it a filler map that gives it the
1873 // correct size.
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01001874 if (size_in_bytes > ByteArray::kHeaderSize) {
Steve Blocka7e24c12009-10-30 11:49:00 +00001875 set_map(Heap::raw_unchecked_byte_array_map());
Steve Blockd0582a62009-12-15 09:54:21 +00001876 // Can't use ByteArray::cast because it fails during deserialization.
1877 ByteArray* this_as_byte_array = reinterpret_cast<ByteArray*>(this);
1878 this_as_byte_array->set_length(ByteArray::LengthFor(size_in_bytes));
Steve Blocka7e24c12009-10-30 11:49:00 +00001879 } else if (size_in_bytes == kPointerSize) {
1880 set_map(Heap::raw_unchecked_one_pointer_filler_map());
1881 } else if (size_in_bytes == 2 * kPointerSize) {
1882 set_map(Heap::raw_unchecked_two_pointer_filler_map());
1883 } else {
1884 UNREACHABLE();
1885 }
Steve Blockd0582a62009-12-15 09:54:21 +00001886 // We would like to ASSERT(Size() == size_in_bytes) but this would fail during
1887 // deserialization because the byte array map is not done yet.
Steve Blocka7e24c12009-10-30 11:49:00 +00001888}
1889
1890
1891Address FreeListNode::next() {
Steve Block3ce2e202009-11-05 08:53:23 +00001892 ASSERT(IsFreeListNode(this));
Steve Blocka7e24c12009-10-30 11:49:00 +00001893 if (map() == Heap::raw_unchecked_byte_array_map()) {
1894 ASSERT(Size() >= kNextOffset + kPointerSize);
1895 return Memory::Address_at(address() + kNextOffset);
1896 } else {
1897 return Memory::Address_at(address() + kPointerSize);
1898 }
1899}
1900
1901
1902void FreeListNode::set_next(Address next) {
Steve Block3ce2e202009-11-05 08:53:23 +00001903 ASSERT(IsFreeListNode(this));
Steve Blocka7e24c12009-10-30 11:49:00 +00001904 if (map() == Heap::raw_unchecked_byte_array_map()) {
1905 ASSERT(Size() >= kNextOffset + kPointerSize);
1906 Memory::Address_at(address() + kNextOffset) = next;
1907 } else {
1908 Memory::Address_at(address() + kPointerSize) = next;
1909 }
1910}
1911
1912
1913OldSpaceFreeList::OldSpaceFreeList(AllocationSpace owner) : owner_(owner) {
1914 Reset();
1915}
1916
1917
1918void OldSpaceFreeList::Reset() {
1919 available_ = 0;
1920 for (int i = 0; i < kFreeListsLength; i++) {
1921 free_[i].head_node_ = NULL;
1922 }
1923 needs_rebuild_ = false;
1924 finger_ = kHead;
1925 free_[kHead].next_size_ = kEnd;
1926}
1927
1928
1929void OldSpaceFreeList::RebuildSizeList() {
1930 ASSERT(needs_rebuild_);
1931 int cur = kHead;
1932 for (int i = cur + 1; i < kFreeListsLength; i++) {
1933 if (free_[i].head_node_ != NULL) {
1934 free_[cur].next_size_ = i;
1935 cur = i;
1936 }
1937 }
1938 free_[cur].next_size_ = kEnd;
1939 needs_rebuild_ = false;
1940}
1941
1942
1943int OldSpaceFreeList::Free(Address start, int size_in_bytes) {
1944#ifdef DEBUG
Leon Clarke4515c472010-02-03 11:58:03 +00001945 MemoryAllocator::ZapBlock(start, size_in_bytes);
Steve Blocka7e24c12009-10-30 11:49:00 +00001946#endif
1947 FreeListNode* node = FreeListNode::FromAddress(start);
1948 node->set_size(size_in_bytes);
1949
1950 // We don't use the freelists in compacting mode. This makes it more like a
1951 // GC that only has mark-sweep-compact and doesn't have a mark-sweep
1952 // collector.
1953 if (FLAG_always_compact) {
1954 return size_in_bytes;
1955 }
1956
1957 // Early return to drop too-small blocks on the floor (one or two word
1958 // blocks cannot hold a map pointer, a size field, and a pointer to the
1959 // next block in the free list).
1960 if (size_in_bytes < kMinBlockSize) {
1961 return size_in_bytes;
1962 }
1963
1964 // Insert other blocks at the head of an exact free list.
1965 int index = size_in_bytes >> kPointerSizeLog2;
1966 node->set_next(free_[index].head_node_);
1967 free_[index].head_node_ = node->address();
1968 available_ += size_in_bytes;
1969 needs_rebuild_ = true;
1970 return 0;
1971}
1972
1973
John Reck59135872010-11-02 12:39:01 -07001974MaybeObject* OldSpaceFreeList::Allocate(int size_in_bytes, int* wasted_bytes) {
Steve Blocka7e24c12009-10-30 11:49:00 +00001975 ASSERT(0 < size_in_bytes);
1976 ASSERT(size_in_bytes <= kMaxBlockSize);
1977 ASSERT(IsAligned(size_in_bytes, kPointerSize));
1978
1979 if (needs_rebuild_) RebuildSizeList();
1980 int index = size_in_bytes >> kPointerSizeLog2;
1981 // Check for a perfect fit.
1982 if (free_[index].head_node_ != NULL) {
1983 FreeListNode* node = FreeListNode::FromAddress(free_[index].head_node_);
1984 // If this was the last block of its size, remove the size.
1985 if ((free_[index].head_node_ = node->next()) == NULL) RemoveSize(index);
1986 available_ -= size_in_bytes;
1987 *wasted_bytes = 0;
1988 ASSERT(!FLAG_always_compact); // We only use the freelists with mark-sweep.
1989 return node;
1990 }
1991 // Search the size list for the best fit.
1992 int prev = finger_ < index ? finger_ : kHead;
1993 int cur = FindSize(index, &prev);
1994 ASSERT(index < cur);
1995 if (cur == kEnd) {
1996 // No large enough size in list.
1997 *wasted_bytes = 0;
Ben Murdochf87a2032010-10-22 12:50:53 +01001998 return Failure::RetryAfterGC(owner_);
Steve Blocka7e24c12009-10-30 11:49:00 +00001999 }
2000 ASSERT(!FLAG_always_compact); // We only use the freelists with mark-sweep.
2001 int rem = cur - index;
2002 int rem_bytes = rem << kPointerSizeLog2;
2003 FreeListNode* cur_node = FreeListNode::FromAddress(free_[cur].head_node_);
2004 ASSERT(cur_node->Size() == (cur << kPointerSizeLog2));
2005 FreeListNode* rem_node = FreeListNode::FromAddress(free_[cur].head_node_ +
2006 size_in_bytes);
2007 // Distinguish the cases prev < rem < cur and rem <= prev < cur
2008 // to avoid many redundant tests and calls to Insert/RemoveSize.
2009 if (prev < rem) {
2010 // Simple case: insert rem between prev and cur.
2011 finger_ = prev;
2012 free_[prev].next_size_ = rem;
2013 // If this was the last block of size cur, remove the size.
2014 if ((free_[cur].head_node_ = cur_node->next()) == NULL) {
2015 free_[rem].next_size_ = free_[cur].next_size_;
2016 } else {
2017 free_[rem].next_size_ = cur;
2018 }
2019 // Add the remainder block.
2020 rem_node->set_size(rem_bytes);
2021 rem_node->set_next(free_[rem].head_node_);
2022 free_[rem].head_node_ = rem_node->address();
2023 } else {
2024 // If this was the last block of size cur, remove the size.
2025 if ((free_[cur].head_node_ = cur_node->next()) == NULL) {
2026 finger_ = prev;
2027 free_[prev].next_size_ = free_[cur].next_size_;
2028 }
2029 if (rem_bytes < kMinBlockSize) {
2030 // Too-small remainder is wasted.
2031 rem_node->set_size(rem_bytes);
2032 available_ -= size_in_bytes + rem_bytes;
2033 *wasted_bytes = rem_bytes;
2034 return cur_node;
2035 }
2036 // Add the remainder block and, if needed, insert its size.
2037 rem_node->set_size(rem_bytes);
2038 rem_node->set_next(free_[rem].head_node_);
2039 free_[rem].head_node_ = rem_node->address();
2040 if (rem_node->next() == NULL) InsertSize(rem);
2041 }
2042 available_ -= size_in_bytes;
2043 *wasted_bytes = 0;
2044 return cur_node;
2045}
2046
2047
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -08002048void OldSpaceFreeList::MarkNodes() {
2049 for (int i = 0; i < kFreeListsLength; i++) {
2050 Address cur_addr = free_[i].head_node_;
2051 while (cur_addr != NULL) {
2052 FreeListNode* cur_node = FreeListNode::FromAddress(cur_addr);
2053 cur_addr = cur_node->next();
2054 cur_node->SetMark();
2055 }
2056 }
2057}
2058
2059
Steve Blocka7e24c12009-10-30 11:49:00 +00002060#ifdef DEBUG
2061bool OldSpaceFreeList::Contains(FreeListNode* node) {
2062 for (int i = 0; i < kFreeListsLength; i++) {
2063 Address cur_addr = free_[i].head_node_;
2064 while (cur_addr != NULL) {
2065 FreeListNode* cur_node = FreeListNode::FromAddress(cur_addr);
2066 if (cur_node == node) return true;
2067 cur_addr = cur_node->next();
2068 }
2069 }
2070 return false;
2071}
2072#endif
2073
2074
2075FixedSizeFreeList::FixedSizeFreeList(AllocationSpace owner, int object_size)
2076 : owner_(owner), object_size_(object_size) {
2077 Reset();
2078}
2079
2080
2081void FixedSizeFreeList::Reset() {
2082 available_ = 0;
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002083 head_ = tail_ = NULL;
Steve Blocka7e24c12009-10-30 11:49:00 +00002084}
2085
2086
2087void FixedSizeFreeList::Free(Address start) {
2088#ifdef DEBUG
Leon Clarke4515c472010-02-03 11:58:03 +00002089 MemoryAllocator::ZapBlock(start, object_size_);
Steve Blocka7e24c12009-10-30 11:49:00 +00002090#endif
Leon Clarkee46be812010-01-19 14:06:41 +00002091 // We only use the freelists with mark-sweep.
2092 ASSERT(!MarkCompactCollector::IsCompacting());
Steve Blocka7e24c12009-10-30 11:49:00 +00002093 FreeListNode* node = FreeListNode::FromAddress(start);
2094 node->set_size(object_size_);
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002095 node->set_next(NULL);
2096 if (head_ == NULL) {
2097 tail_ = head_ = node->address();
2098 } else {
2099 FreeListNode::FromAddress(tail_)->set_next(node->address());
2100 tail_ = node->address();
2101 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002102 available_ += object_size_;
2103}
2104
2105
John Reck59135872010-11-02 12:39:01 -07002106MaybeObject* FixedSizeFreeList::Allocate() {
Steve Blocka7e24c12009-10-30 11:49:00 +00002107 if (head_ == NULL) {
Ben Murdochf87a2032010-10-22 12:50:53 +01002108 return Failure::RetryAfterGC(owner_);
Steve Blocka7e24c12009-10-30 11:49:00 +00002109 }
2110
2111 ASSERT(!FLAG_always_compact); // We only use the freelists with mark-sweep.
2112 FreeListNode* node = FreeListNode::FromAddress(head_);
2113 head_ = node->next();
2114 available_ -= object_size_;
2115 return node;
2116}
2117
2118
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -08002119void FixedSizeFreeList::MarkNodes() {
2120 Address cur_addr = head_;
2121 while (cur_addr != NULL && cur_addr != tail_) {
2122 FreeListNode* cur_node = FreeListNode::FromAddress(cur_addr);
2123 cur_addr = cur_node->next();
2124 cur_node->SetMark();
2125 }
2126}
2127
2128
Steve Blocka7e24c12009-10-30 11:49:00 +00002129// -----------------------------------------------------------------------------
2130// OldSpace implementation
2131
2132void OldSpace::PrepareForMarkCompact(bool will_compact) {
Steve Block6ded16b2010-05-10 14:33:55 +01002133 // Call prepare of the super class.
2134 PagedSpace::PrepareForMarkCompact(will_compact);
2135
Steve Blocka7e24c12009-10-30 11:49:00 +00002136 if (will_compact) {
2137 // Reset relocation info. During a compacting collection, everything in
2138 // the space is considered 'available' and we will rediscover live data
2139 // and waste during the collection.
2140 MCResetRelocationInfo();
2141 ASSERT(Available() == Capacity());
2142 } else {
2143 // During a non-compacting collection, everything below the linear
2144 // allocation pointer is considered allocated (everything above is
2145 // available) and we will rediscover available and wasted bytes during
2146 // the collection.
2147 accounting_stats_.AllocateBytes(free_list_.available());
2148 accounting_stats_.FillWastedBytes(Waste());
2149 }
2150
2151 // Clear the free list before a full GC---it will be rebuilt afterward.
2152 free_list_.Reset();
2153}
2154
2155
2156void OldSpace::MCCommitRelocationInfo() {
2157 // Update fast allocation info.
2158 allocation_info_.top = mc_forwarding_info_.top;
2159 allocation_info_.limit = mc_forwarding_info_.limit;
2160 ASSERT(allocation_info_.VerifyPagedAllocation());
2161
2162 // The space is compacted and we haven't yet built free lists or
2163 // wasted any space.
2164 ASSERT(Waste() == 0);
2165 ASSERT(AvailableFree() == 0);
2166
2167 // Build the free list for the space.
2168 int computed_size = 0;
2169 PageIterator it(this, PageIterator::PAGES_USED_BY_MC);
2170 while (it.has_next()) {
2171 Page* p = it.next();
2172 // Space below the relocation pointer is allocated.
Steve Blockd0582a62009-12-15 09:54:21 +00002173 computed_size +=
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002174 static_cast<int>(p->AllocationWatermark() - p->ObjectAreaStart());
Steve Blocka7e24c12009-10-30 11:49:00 +00002175 if (it.has_next()) {
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002176 // Free the space at the top of the page.
Steve Blockd0582a62009-12-15 09:54:21 +00002177 int extra_size =
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002178 static_cast<int>(p->ObjectAreaEnd() - p->AllocationWatermark());
Steve Blocka7e24c12009-10-30 11:49:00 +00002179 if (extra_size > 0) {
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002180 int wasted_bytes = free_list_.Free(p->AllocationWatermark(),
2181 extra_size);
Steve Blocka7e24c12009-10-30 11:49:00 +00002182 // The bytes we have just "freed" to add to the free list were
2183 // already accounted as available.
2184 accounting_stats_.WasteBytes(wasted_bytes);
2185 }
2186 }
2187 }
2188
2189 // Make sure the computed size - based on the used portion of the pages in
2190 // use - matches the size obtained while computing forwarding addresses.
2191 ASSERT(computed_size == Size());
2192}
2193
2194
Leon Clarkee46be812010-01-19 14:06:41 +00002195bool NewSpace::ReserveSpace(int bytes) {
2196 // We can't reliably unpack a partial snapshot that needs more new space
2197 // space than the minimum NewSpace size.
2198 ASSERT(bytes <= InitialCapacity());
2199 Address limit = allocation_info_.limit;
2200 Address top = allocation_info_.top;
2201 return limit - top >= bytes;
2202}
2203
2204
Steve Block6ded16b2010-05-10 14:33:55 +01002205void PagedSpace::FreePages(Page* prev, Page* last) {
2206 if (last == AllocationTopPage()) {
2207 // Pages are already at the end of used pages.
2208 return;
2209 }
2210
2211 Page* first = NULL;
2212
2213 // Remove pages from the list.
2214 if (prev == NULL) {
2215 first = first_page_;
2216 first_page_ = last->next_page();
2217 } else {
2218 first = prev->next_page();
2219 MemoryAllocator::SetNextPage(prev, last->next_page());
2220 }
2221
2222 // Attach it after the last page.
2223 MemoryAllocator::SetNextPage(last_page_, first);
2224 last_page_ = last;
2225 MemoryAllocator::SetNextPage(last, NULL);
2226
2227 // Clean them up.
2228 do {
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002229 first->InvalidateWatermark(true);
2230 first->SetAllocationWatermark(first->ObjectAreaStart());
2231 first->SetCachedAllocationWatermark(first->ObjectAreaStart());
2232 first->SetRegionMarks(Page::kAllRegionsCleanMarks);
Steve Block6ded16b2010-05-10 14:33:55 +01002233 first = first->next_page();
2234 } while (first != NULL);
2235
2236 // Order of pages in this space might no longer be consistent with
2237 // order of pages in chunks.
2238 page_list_is_chunk_ordered_ = false;
2239}
2240
2241
Kristian Monsen80d68ea2010-09-08 11:05:35 +01002242void PagedSpace::RelinkPageListInChunkOrder(bool deallocate_blocks) {
2243 const bool add_to_freelist = true;
2244
2245 // Mark used and unused pages to properly fill unused pages
2246 // after reordering.
2247 PageIterator all_pages_iterator(this, PageIterator::ALL_PAGES);
2248 Page* last_in_use = AllocationTopPage();
2249 bool in_use = true;
2250
2251 while (all_pages_iterator.has_next()) {
2252 Page* p = all_pages_iterator.next();
2253 p->SetWasInUseBeforeMC(in_use);
2254 if (p == last_in_use) {
2255 // We passed a page containing allocation top. All consequent
2256 // pages are not used.
2257 in_use = false;
2258 }
2259 }
2260
2261 if (page_list_is_chunk_ordered_) return;
2262
2263 Page* new_last_in_use = Page::FromAddress(NULL);
2264 MemoryAllocator::RelinkPageListInChunkOrder(this,
2265 &first_page_,
2266 &last_page_,
2267 &new_last_in_use);
2268 ASSERT(new_last_in_use->is_valid());
2269
2270 if (new_last_in_use != last_in_use) {
2271 // Current allocation top points to a page which is now in the middle
2272 // of page list. We should move allocation top forward to the new last
2273 // used page so various object iterators will continue to work properly.
2274 int size_in_bytes = static_cast<int>(PageAllocationLimit(last_in_use) -
2275 last_in_use->AllocationTop());
2276
2277 last_in_use->SetAllocationWatermark(last_in_use->AllocationTop());
2278 if (size_in_bytes > 0) {
2279 Address start = last_in_use->AllocationTop();
2280 if (deallocate_blocks) {
2281 accounting_stats_.AllocateBytes(size_in_bytes);
2282 DeallocateBlock(start, size_in_bytes, add_to_freelist);
2283 } else {
2284 Heap::CreateFillerObjectAt(start, size_in_bytes);
2285 }
2286 }
2287
2288 // New last in use page was in the middle of the list before
2289 // sorting so it full.
2290 SetTop(new_last_in_use->AllocationTop());
2291
2292 ASSERT(AllocationTopPage() == new_last_in_use);
2293 ASSERT(AllocationTopPage()->WasInUseBeforeMC());
2294 }
2295
2296 PageIterator pages_in_use_iterator(this, PageIterator::PAGES_IN_USE);
2297 while (pages_in_use_iterator.has_next()) {
2298 Page* p = pages_in_use_iterator.next();
2299 if (!p->WasInUseBeforeMC()) {
2300 // Empty page is in the middle of a sequence of used pages.
2301 // Allocate it as a whole and deallocate immediately.
2302 int size_in_bytes = static_cast<int>(PageAllocationLimit(p) -
2303 p->ObjectAreaStart());
2304
2305 p->SetAllocationWatermark(p->ObjectAreaStart());
2306 Address start = p->ObjectAreaStart();
2307 if (deallocate_blocks) {
2308 accounting_stats_.AllocateBytes(size_in_bytes);
2309 DeallocateBlock(start, size_in_bytes, add_to_freelist);
2310 } else {
2311 Heap::CreateFillerObjectAt(start, size_in_bytes);
2312 }
2313 }
2314 }
2315
2316 page_list_is_chunk_ordered_ = true;
2317}
2318
2319
Steve Block6ded16b2010-05-10 14:33:55 +01002320void PagedSpace::PrepareForMarkCompact(bool will_compact) {
2321 if (will_compact) {
Kristian Monsen80d68ea2010-09-08 11:05:35 +01002322 RelinkPageListInChunkOrder(false);
Steve Block6ded16b2010-05-10 14:33:55 +01002323 }
2324}
2325
2326
Leon Clarkee46be812010-01-19 14:06:41 +00002327bool PagedSpace::ReserveSpace(int bytes) {
2328 Address limit = allocation_info_.limit;
2329 Address top = allocation_info_.top;
2330 if (limit - top >= bytes) return true;
2331
2332 // There wasn't enough space in the current page. Lets put the rest
2333 // of the page on the free list and start a fresh page.
2334 PutRestOfCurrentPageOnFreeList(TopPageOf(allocation_info_));
2335
2336 Page* reserved_page = TopPageOf(allocation_info_);
2337 int bytes_left_to_reserve = bytes;
2338 while (bytes_left_to_reserve > 0) {
2339 if (!reserved_page->next_page()->is_valid()) {
2340 if (Heap::OldGenerationAllocationLimitReached()) return false;
2341 Expand(reserved_page);
2342 }
2343 bytes_left_to_reserve -= Page::kPageSize;
2344 reserved_page = reserved_page->next_page();
2345 if (!reserved_page->is_valid()) return false;
2346 }
2347 ASSERT(TopPageOf(allocation_info_)->next_page()->is_valid());
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002348 TopPageOf(allocation_info_)->next_page()->InvalidateWatermark(true);
Leon Clarkee46be812010-01-19 14:06:41 +00002349 SetAllocationInfo(&allocation_info_,
2350 TopPageOf(allocation_info_)->next_page());
2351 return true;
2352}
2353
2354
2355// You have to call this last, since the implementation from PagedSpace
2356// doesn't know that memory was 'promised' to large object space.
2357bool LargeObjectSpace::ReserveSpace(int bytes) {
2358 return Heap::OldGenerationSpaceAvailable() >= bytes;
2359}
2360
2361
Steve Blocka7e24c12009-10-30 11:49:00 +00002362// Slow case for normal allocation. Try in order: (1) allocate in the next
2363// page in the space, (2) allocate off the space's free list, (3) expand the
2364// space, (4) fail.
2365HeapObject* OldSpace::SlowAllocateRaw(int size_in_bytes) {
2366 // Linear allocation in this space has failed. If there is another page
2367 // in the space, move to that page and allocate there. This allocation
2368 // should succeed (size_in_bytes should not be greater than a page's
2369 // object area size).
2370 Page* current_page = TopPageOf(allocation_info_);
2371 if (current_page->next_page()->is_valid()) {
2372 return AllocateInNextPage(current_page, size_in_bytes);
2373 }
2374
Steve Blockd0582a62009-12-15 09:54:21 +00002375 // There is no next page in this space. Try free list allocation unless that
2376 // is currently forbidden.
2377 if (!Heap::linear_allocation()) {
2378 int wasted_bytes;
John Reck59135872010-11-02 12:39:01 -07002379 Object* result;
2380 MaybeObject* maybe = free_list_.Allocate(size_in_bytes, &wasted_bytes);
Steve Blockd0582a62009-12-15 09:54:21 +00002381 accounting_stats_.WasteBytes(wasted_bytes);
John Reck59135872010-11-02 12:39:01 -07002382 if (maybe->ToObject(&result)) {
Steve Blockd0582a62009-12-15 09:54:21 +00002383 accounting_stats_.AllocateBytes(size_in_bytes);
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002384
2385 HeapObject* obj = HeapObject::cast(result);
2386 Page* p = Page::FromAddress(obj->address());
2387
2388 if (obj->address() >= p->AllocationWatermark()) {
2389 // There should be no hole between the allocation watermark
2390 // and allocated object address.
2391 // Memory above the allocation watermark was not swept and
2392 // might contain garbage pointers to new space.
2393 ASSERT(obj->address() == p->AllocationWatermark());
2394 p->SetAllocationWatermark(obj->address() + size_in_bytes);
2395 }
2396
2397 return obj;
Steve Blockd0582a62009-12-15 09:54:21 +00002398 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002399 }
2400
2401 // Free list allocation failed and there is no next page. Fail if we have
2402 // hit the old generation size limit that should cause a garbage
2403 // collection.
2404 if (!Heap::always_allocate() && Heap::OldGenerationAllocationLimitReached()) {
2405 return NULL;
2406 }
2407
2408 // Try to expand the space and allocate in the new next page.
2409 ASSERT(!current_page->next_page()->is_valid());
2410 if (Expand(current_page)) {
2411 return AllocateInNextPage(current_page, size_in_bytes);
2412 }
2413
2414 // Finally, fail.
2415 return NULL;
2416}
2417
2418
Leon Clarkee46be812010-01-19 14:06:41 +00002419void OldSpace::PutRestOfCurrentPageOnFreeList(Page* current_page) {
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002420 current_page->SetAllocationWatermark(allocation_info_.top);
Steve Blockd0582a62009-12-15 09:54:21 +00002421 int free_size =
2422 static_cast<int>(current_page->ObjectAreaEnd() - allocation_info_.top);
Steve Blocka7e24c12009-10-30 11:49:00 +00002423 if (free_size > 0) {
2424 int wasted_bytes = free_list_.Free(allocation_info_.top, free_size);
2425 accounting_stats_.WasteBytes(wasted_bytes);
2426 }
Leon Clarkee46be812010-01-19 14:06:41 +00002427}
2428
2429
2430void FixedSpace::PutRestOfCurrentPageOnFreeList(Page* current_page) {
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002431 current_page->SetAllocationWatermark(allocation_info_.top);
Leon Clarkee46be812010-01-19 14:06:41 +00002432 int free_size =
2433 static_cast<int>(current_page->ObjectAreaEnd() - allocation_info_.top);
2434 // In the fixed space free list all the free list items have the right size.
2435 // We use up the rest of the page while preserving this invariant.
2436 while (free_size >= object_size_in_bytes_) {
2437 free_list_.Free(allocation_info_.top);
2438 allocation_info_.top += object_size_in_bytes_;
2439 free_size -= object_size_in_bytes_;
2440 accounting_stats_.WasteBytes(object_size_in_bytes_);
2441 }
2442}
2443
2444
2445// Add the block at the top of the page to the space's free list, set the
2446// allocation info to the next page (assumed to be one), and allocate
2447// linearly there.
2448HeapObject* OldSpace::AllocateInNextPage(Page* current_page,
2449 int size_in_bytes) {
2450 ASSERT(current_page->next_page()->is_valid());
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002451 Page* next_page = current_page->next_page();
2452 next_page->ClearGCFields();
Leon Clarkee46be812010-01-19 14:06:41 +00002453 PutRestOfCurrentPageOnFreeList(current_page);
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002454 SetAllocationInfo(&allocation_info_, next_page);
Steve Blocka7e24c12009-10-30 11:49:00 +00002455 return AllocateLinearly(&allocation_info_, size_in_bytes);
2456}
2457
2458
Kristian Monsen80d68ea2010-09-08 11:05:35 +01002459void OldSpace::DeallocateBlock(Address start,
2460 int size_in_bytes,
2461 bool add_to_freelist) {
2462 Free(start, size_in_bytes, add_to_freelist);
2463}
2464
2465
Steve Blocka7e24c12009-10-30 11:49:00 +00002466#ifdef DEBUG
2467struct CommentStatistic {
2468 const char* comment;
2469 int size;
2470 int count;
2471 void Clear() {
2472 comment = NULL;
2473 size = 0;
2474 count = 0;
2475 }
2476};
2477
2478
2479// must be small, since an iteration is used for lookup
2480const int kMaxComments = 64;
2481static CommentStatistic comments_statistics[kMaxComments+1];
2482
2483
2484void PagedSpace::ReportCodeStatistics() {
2485 ReportCodeKindStatistics();
2486 PrintF("Code comment statistics (\" [ comment-txt : size/ "
2487 "count (average)\"):\n");
2488 for (int i = 0; i <= kMaxComments; i++) {
2489 const CommentStatistic& cs = comments_statistics[i];
2490 if (cs.size > 0) {
2491 PrintF(" %-30s: %10d/%6d (%d)\n", cs.comment, cs.size, cs.count,
2492 cs.size/cs.count);
2493 }
2494 }
2495 PrintF("\n");
2496}
2497
2498
2499void PagedSpace::ResetCodeStatistics() {
2500 ClearCodeKindStatistics();
2501 for (int i = 0; i < kMaxComments; i++) comments_statistics[i].Clear();
2502 comments_statistics[kMaxComments].comment = "Unknown";
2503 comments_statistics[kMaxComments].size = 0;
2504 comments_statistics[kMaxComments].count = 0;
2505}
2506
2507
2508// Adds comment to 'comment_statistics' table. Performance OK sa long as
2509// 'kMaxComments' is small
2510static void EnterComment(const char* comment, int delta) {
2511 // Do not count empty comments
2512 if (delta <= 0) return;
2513 CommentStatistic* cs = &comments_statistics[kMaxComments];
2514 // Search for a free or matching entry in 'comments_statistics': 'cs'
2515 // points to result.
2516 for (int i = 0; i < kMaxComments; i++) {
2517 if (comments_statistics[i].comment == NULL) {
2518 cs = &comments_statistics[i];
2519 cs->comment = comment;
2520 break;
2521 } else if (strcmp(comments_statistics[i].comment, comment) == 0) {
2522 cs = &comments_statistics[i];
2523 break;
2524 }
2525 }
2526 // Update entry for 'comment'
2527 cs->size += delta;
2528 cs->count += 1;
2529}
2530
2531
2532// Call for each nested comment start (start marked with '[ xxx', end marked
2533// with ']'. RelocIterator 'it' must point to a comment reloc info.
2534static void CollectCommentStatistics(RelocIterator* it) {
2535 ASSERT(!it->done());
2536 ASSERT(it->rinfo()->rmode() == RelocInfo::COMMENT);
2537 const char* tmp = reinterpret_cast<const char*>(it->rinfo()->data());
2538 if (tmp[0] != '[') {
2539 // Not a nested comment; skip
2540 return;
2541 }
2542
2543 // Search for end of nested comment or a new nested comment
2544 const char* const comment_txt =
2545 reinterpret_cast<const char*>(it->rinfo()->data());
2546 const byte* prev_pc = it->rinfo()->pc();
2547 int flat_delta = 0;
2548 it->next();
2549 while (true) {
2550 // All nested comments must be terminated properly, and therefore exit
2551 // from loop.
2552 ASSERT(!it->done());
2553 if (it->rinfo()->rmode() == RelocInfo::COMMENT) {
2554 const char* const txt =
2555 reinterpret_cast<const char*>(it->rinfo()->data());
Steve Blockd0582a62009-12-15 09:54:21 +00002556 flat_delta += static_cast<int>(it->rinfo()->pc() - prev_pc);
Steve Blocka7e24c12009-10-30 11:49:00 +00002557 if (txt[0] == ']') break; // End of nested comment
2558 // A new comment
2559 CollectCommentStatistics(it);
2560 // Skip code that was covered with previous comment
2561 prev_pc = it->rinfo()->pc();
2562 }
2563 it->next();
2564 }
2565 EnterComment(comment_txt, flat_delta);
2566}
2567
2568
2569// Collects code size statistics:
2570// - by code kind
2571// - by code comment
2572void PagedSpace::CollectCodeStatistics() {
2573 HeapObjectIterator obj_it(this);
Leon Clarked91b9f72010-01-27 17:25:45 +00002574 for (HeapObject* obj = obj_it.next(); obj != NULL; obj = obj_it.next()) {
Steve Blocka7e24c12009-10-30 11:49:00 +00002575 if (obj->IsCode()) {
2576 Code* code = Code::cast(obj);
2577 code_kind_statistics[code->kind()] += code->Size();
2578 RelocIterator it(code);
2579 int delta = 0;
2580 const byte* prev_pc = code->instruction_start();
2581 while (!it.done()) {
2582 if (it.rinfo()->rmode() == RelocInfo::COMMENT) {
Steve Blockd0582a62009-12-15 09:54:21 +00002583 delta += static_cast<int>(it.rinfo()->pc() - prev_pc);
Steve Blocka7e24c12009-10-30 11:49:00 +00002584 CollectCommentStatistics(&it);
2585 prev_pc = it.rinfo()->pc();
2586 }
2587 it.next();
2588 }
2589
2590 ASSERT(code->instruction_start() <= prev_pc &&
Leon Clarkeac952652010-07-15 11:15:24 +01002591 prev_pc <= code->instruction_end());
2592 delta += static_cast<int>(code->instruction_end() - prev_pc);
Steve Blocka7e24c12009-10-30 11:49:00 +00002593 EnterComment("NoComment", delta);
2594 }
2595 }
2596}
2597
2598
2599void OldSpace::ReportStatistics() {
Ben Murdochf87a2032010-10-22 12:50:53 +01002600 int pct = static_cast<int>(Available() * 100 / Capacity());
2601 PrintF(" capacity: %" V8_PTR_PREFIX "d"
2602 ", waste: %" V8_PTR_PREFIX "d"
2603 ", available: %" V8_PTR_PREFIX "d, %%%d\n",
Steve Blocka7e24c12009-10-30 11:49:00 +00002604 Capacity(), Waste(), Available(), pct);
2605
Steve Blocka7e24c12009-10-30 11:49:00 +00002606 ClearHistograms();
2607 HeapObjectIterator obj_it(this);
Leon Clarked91b9f72010-01-27 17:25:45 +00002608 for (HeapObject* obj = obj_it.next(); obj != NULL; obj = obj_it.next())
2609 CollectHistogramInfo(obj);
Steve Blocka7e24c12009-10-30 11:49:00 +00002610 ReportHistogram(true);
2611}
Steve Blocka7e24c12009-10-30 11:49:00 +00002612#endif
2613
2614// -----------------------------------------------------------------------------
2615// FixedSpace implementation
2616
2617void FixedSpace::PrepareForMarkCompact(bool will_compact) {
Steve Block6ded16b2010-05-10 14:33:55 +01002618 // Call prepare of the super class.
2619 PagedSpace::PrepareForMarkCompact(will_compact);
2620
Steve Blocka7e24c12009-10-30 11:49:00 +00002621 if (will_compact) {
2622 // Reset relocation info.
2623 MCResetRelocationInfo();
2624
2625 // During a compacting collection, everything in the space is considered
2626 // 'available' (set by the call to MCResetRelocationInfo) and we will
2627 // rediscover live and wasted bytes during the collection.
2628 ASSERT(Available() == Capacity());
2629 } else {
2630 // During a non-compacting collection, everything below the linear
2631 // allocation pointer except wasted top-of-page blocks is considered
2632 // allocated and we will rediscover available bytes during the
2633 // collection.
2634 accounting_stats_.AllocateBytes(free_list_.available());
2635 }
2636
2637 // Clear the free list before a full GC---it will be rebuilt afterward.
2638 free_list_.Reset();
2639}
2640
2641
2642void FixedSpace::MCCommitRelocationInfo() {
2643 // Update fast allocation info.
2644 allocation_info_.top = mc_forwarding_info_.top;
2645 allocation_info_.limit = mc_forwarding_info_.limit;
2646 ASSERT(allocation_info_.VerifyPagedAllocation());
2647
2648 // The space is compacted and we haven't yet wasted any space.
2649 ASSERT(Waste() == 0);
2650
2651 // Update allocation_top of each page in use and compute waste.
2652 int computed_size = 0;
2653 PageIterator it(this, PageIterator::PAGES_USED_BY_MC);
2654 while (it.has_next()) {
2655 Page* page = it.next();
2656 Address page_top = page->AllocationTop();
Steve Blockd0582a62009-12-15 09:54:21 +00002657 computed_size += static_cast<int>(page_top - page->ObjectAreaStart());
Steve Blocka7e24c12009-10-30 11:49:00 +00002658 if (it.has_next()) {
Steve Blockd0582a62009-12-15 09:54:21 +00002659 accounting_stats_.WasteBytes(
2660 static_cast<int>(page->ObjectAreaEnd() - page_top));
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002661 page->SetAllocationWatermark(page_top);
Steve Blocka7e24c12009-10-30 11:49:00 +00002662 }
2663 }
2664
2665 // Make sure the computed size - based on the used portion of the
2666 // pages in use - matches the size we adjust during allocation.
2667 ASSERT(computed_size == Size());
2668}
2669
2670
2671// Slow case for normal allocation. Try in order: (1) allocate in the next
2672// page in the space, (2) allocate off the space's free list, (3) expand the
2673// space, (4) fail.
2674HeapObject* FixedSpace::SlowAllocateRaw(int size_in_bytes) {
2675 ASSERT_EQ(object_size_in_bytes_, size_in_bytes);
2676 // Linear allocation in this space has failed. If there is another page
2677 // in the space, move to that page and allocate there. This allocation
2678 // should succeed.
2679 Page* current_page = TopPageOf(allocation_info_);
2680 if (current_page->next_page()->is_valid()) {
2681 return AllocateInNextPage(current_page, size_in_bytes);
2682 }
2683
Steve Blockd0582a62009-12-15 09:54:21 +00002684 // There is no next page in this space. Try free list allocation unless
2685 // that is currently forbidden. The fixed space free list implicitly assumes
2686 // that all free blocks are of the fixed size.
2687 if (!Heap::linear_allocation()) {
John Reck59135872010-11-02 12:39:01 -07002688 Object* result;
2689 MaybeObject* maybe = free_list_.Allocate();
2690 if (maybe->ToObject(&result)) {
Steve Blocka7e24c12009-10-30 11:49:00 +00002691 accounting_stats_.AllocateBytes(size_in_bytes);
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002692 HeapObject* obj = HeapObject::cast(result);
2693 Page* p = Page::FromAddress(obj->address());
2694
2695 if (obj->address() >= p->AllocationWatermark()) {
2696 // There should be no hole between the allocation watermark
2697 // and allocated object address.
2698 // Memory above the allocation watermark was not swept and
2699 // might contain garbage pointers to new space.
2700 ASSERT(obj->address() == p->AllocationWatermark());
2701 p->SetAllocationWatermark(obj->address() + size_in_bytes);
2702 }
2703
2704 return obj;
Steve Blocka7e24c12009-10-30 11:49:00 +00002705 }
2706 }
2707
2708 // Free list allocation failed and there is no next page. Fail if we have
2709 // hit the old generation size limit that should cause a garbage
2710 // collection.
2711 if (!Heap::always_allocate() && Heap::OldGenerationAllocationLimitReached()) {
2712 return NULL;
2713 }
2714
2715 // Try to expand the space and allocate in the new next page.
2716 ASSERT(!current_page->next_page()->is_valid());
2717 if (Expand(current_page)) {
2718 return AllocateInNextPage(current_page, size_in_bytes);
2719 }
2720
2721 // Finally, fail.
2722 return NULL;
2723}
2724
2725
2726// Move to the next page (there is assumed to be one) and allocate there.
2727// The top of page block is always wasted, because it is too small to hold a
2728// map.
2729HeapObject* FixedSpace::AllocateInNextPage(Page* current_page,
2730 int size_in_bytes) {
2731 ASSERT(current_page->next_page()->is_valid());
Steve Block6ded16b2010-05-10 14:33:55 +01002732 ASSERT(allocation_info_.top == PageAllocationLimit(current_page));
Steve Blocka7e24c12009-10-30 11:49:00 +00002733 ASSERT_EQ(object_size_in_bytes_, size_in_bytes);
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002734 Page* next_page = current_page->next_page();
2735 next_page->ClearGCFields();
2736 current_page->SetAllocationWatermark(allocation_info_.top);
Steve Blocka7e24c12009-10-30 11:49:00 +00002737 accounting_stats_.WasteBytes(page_extra_);
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002738 SetAllocationInfo(&allocation_info_, next_page);
Steve Blocka7e24c12009-10-30 11:49:00 +00002739 return AllocateLinearly(&allocation_info_, size_in_bytes);
2740}
2741
2742
Kristian Monsen80d68ea2010-09-08 11:05:35 +01002743void FixedSpace::DeallocateBlock(Address start,
2744 int size_in_bytes,
2745 bool add_to_freelist) {
2746 // Free-list elements in fixed space are assumed to have a fixed size.
2747 // We break the free block into chunks and add them to the free list
2748 // individually.
2749 int size = object_size_in_bytes();
2750 ASSERT(size_in_bytes % size == 0);
2751 Address end = start + size_in_bytes;
2752 for (Address a = start; a < end; a += size) {
2753 Free(a, add_to_freelist);
2754 }
2755}
2756
2757
Steve Blocka7e24c12009-10-30 11:49:00 +00002758#ifdef DEBUG
2759void FixedSpace::ReportStatistics() {
Ben Murdochf87a2032010-10-22 12:50:53 +01002760 int pct = static_cast<int>(Available() * 100 / Capacity());
2761 PrintF(" capacity: %" V8_PTR_PREFIX "d"
2762 ", waste: %" V8_PTR_PREFIX "d"
2763 ", available: %" V8_PTR_PREFIX "d, %%%d\n",
Steve Blocka7e24c12009-10-30 11:49:00 +00002764 Capacity(), Waste(), Available(), pct);
2765
Steve Blocka7e24c12009-10-30 11:49:00 +00002766 ClearHistograms();
2767 HeapObjectIterator obj_it(this);
Leon Clarked91b9f72010-01-27 17:25:45 +00002768 for (HeapObject* obj = obj_it.next(); obj != NULL; obj = obj_it.next())
2769 CollectHistogramInfo(obj);
Steve Blocka7e24c12009-10-30 11:49:00 +00002770 ReportHistogram(false);
2771}
Steve Blocka7e24c12009-10-30 11:49:00 +00002772#endif
2773
2774
2775// -----------------------------------------------------------------------------
2776// MapSpace implementation
2777
2778void MapSpace::PrepareForMarkCompact(bool will_compact) {
2779 // Call prepare of the super class.
2780 FixedSpace::PrepareForMarkCompact(will_compact);
2781
2782 if (will_compact) {
2783 // Initialize map index entry.
2784 int page_count = 0;
2785 PageIterator it(this, PageIterator::ALL_PAGES);
2786 while (it.has_next()) {
2787 ASSERT_MAP_PAGE_INDEX(page_count);
2788
2789 Page* p = it.next();
2790 ASSERT(p->mc_page_index == page_count);
2791
2792 page_addresses_[page_count++] = p->address();
2793 }
2794 }
2795}
2796
2797
2798#ifdef DEBUG
2799void MapSpace::VerifyObject(HeapObject* object) {
2800 // The object should be a map or a free-list node.
2801 ASSERT(object->IsMap() || object->IsByteArray());
2802}
2803#endif
2804
2805
2806// -----------------------------------------------------------------------------
2807// GlobalPropertyCellSpace implementation
2808
2809#ifdef DEBUG
2810void CellSpace::VerifyObject(HeapObject* object) {
2811 // The object should be a global object property cell or a free-list node.
2812 ASSERT(object->IsJSGlobalPropertyCell() ||
2813 object->map() == Heap::two_pointer_filler_map());
2814}
2815#endif
2816
2817
2818// -----------------------------------------------------------------------------
2819// LargeObjectIterator
2820
2821LargeObjectIterator::LargeObjectIterator(LargeObjectSpace* space) {
2822 current_ = space->first_chunk_;
2823 size_func_ = NULL;
2824}
2825
2826
2827LargeObjectIterator::LargeObjectIterator(LargeObjectSpace* space,
2828 HeapObjectCallback size_func) {
2829 current_ = space->first_chunk_;
2830 size_func_ = size_func;
2831}
2832
2833
2834HeapObject* LargeObjectIterator::next() {
Leon Clarked91b9f72010-01-27 17:25:45 +00002835 if (current_ == NULL) return NULL;
2836
Steve Blocka7e24c12009-10-30 11:49:00 +00002837 HeapObject* object = current_->GetObject();
2838 current_ = current_->next();
2839 return object;
2840}
2841
2842
2843// -----------------------------------------------------------------------------
2844// LargeObjectChunk
2845
2846LargeObjectChunk* LargeObjectChunk::New(int size_in_bytes,
Steve Blocka7e24c12009-10-30 11:49:00 +00002847 Executability executable) {
2848 size_t requested = ChunkSizeFor(size_in_bytes);
Ben Murdochb0fe1622011-05-05 13:52:32 +01002849 size_t size;
2850 void* mem = MemoryAllocator::AllocateRawMemory(requested, &size, executable);
Steve Blocka7e24c12009-10-30 11:49:00 +00002851 if (mem == NULL) return NULL;
Ben Murdochb0fe1622011-05-05 13:52:32 +01002852
2853 // The start of the chunk may be overlayed with a page so we have to
2854 // make sure that the page flags fit in the size field.
2855 ASSERT((size & Page::kPageFlagMask) == 0);
2856
2857 LOG(NewEvent("LargeObjectChunk", mem, size));
2858 if (size < requested) {
2859 MemoryAllocator::FreeRawMemory(mem, size, executable);
Steve Blocka7e24c12009-10-30 11:49:00 +00002860 LOG(DeleteEvent("LargeObjectChunk", mem));
2861 return NULL;
2862 }
Ben Murdochb0fe1622011-05-05 13:52:32 +01002863
2864 ObjectSpace space = (executable == EXECUTABLE)
2865 ? kObjectSpaceCodeSpace
2866 : kObjectSpaceLoSpace;
2867 MemoryAllocator::PerformAllocationCallback(
2868 space, kAllocationActionAllocate, size);
2869
2870 LargeObjectChunk* chunk = reinterpret_cast<LargeObjectChunk*>(mem);
2871 chunk->size_ = size;
2872 return chunk;
Steve Blocka7e24c12009-10-30 11:49:00 +00002873}
2874
2875
2876int LargeObjectChunk::ChunkSizeFor(int size_in_bytes) {
Steve Blockd0582a62009-12-15 09:54:21 +00002877 int os_alignment = static_cast<int>(OS::AllocateAlignment());
Ben Murdochb0fe1622011-05-05 13:52:32 +01002878 if (os_alignment < Page::kPageSize) {
Steve Blocka7e24c12009-10-30 11:49:00 +00002879 size_in_bytes += (Page::kPageSize - os_alignment);
Ben Murdochb0fe1622011-05-05 13:52:32 +01002880 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002881 return size_in_bytes + Page::kObjectStartOffset;
2882}
2883
2884// -----------------------------------------------------------------------------
2885// LargeObjectSpace
2886
2887LargeObjectSpace::LargeObjectSpace(AllocationSpace id)
2888 : Space(id, NOT_EXECUTABLE), // Managed on a per-allocation basis
2889 first_chunk_(NULL),
2890 size_(0),
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -08002891 page_count_(0),
2892 objects_size_(0) {}
Steve Blocka7e24c12009-10-30 11:49:00 +00002893
2894
2895bool LargeObjectSpace::Setup() {
2896 first_chunk_ = NULL;
2897 size_ = 0;
2898 page_count_ = 0;
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -08002899 objects_size_ = 0;
Steve Blocka7e24c12009-10-30 11:49:00 +00002900 return true;
2901}
2902
2903
2904void LargeObjectSpace::TearDown() {
2905 while (first_chunk_ != NULL) {
2906 LargeObjectChunk* chunk = first_chunk_;
2907 first_chunk_ = first_chunk_->next();
2908 LOG(DeleteEvent("LargeObjectChunk", chunk->address()));
Steve Block791712a2010-08-27 10:21:07 +01002909 Page* page = Page::FromAddress(RoundUp(chunk->address(), Page::kPageSize));
2910 Executability executable =
2911 page->IsPageExecutable() ? EXECUTABLE : NOT_EXECUTABLE;
Iain Merrick9ac36c92010-09-13 15:29:50 +01002912 ObjectSpace space = kObjectSpaceLoSpace;
2913 if (executable == EXECUTABLE) space = kObjectSpaceCodeSpace;
2914 size_t size = chunk->size();
2915 MemoryAllocator::FreeRawMemory(chunk->address(), size, executable);
2916 MemoryAllocator::PerformAllocationCallback(
2917 space, kAllocationActionFree, size);
Steve Blocka7e24c12009-10-30 11:49:00 +00002918 }
2919
2920 size_ = 0;
2921 page_count_ = 0;
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -08002922 objects_size_ = 0;
Steve Blocka7e24c12009-10-30 11:49:00 +00002923}
2924
2925
2926#ifdef ENABLE_HEAP_PROTECTION
2927
2928void LargeObjectSpace::Protect() {
2929 LargeObjectChunk* chunk = first_chunk_;
2930 while (chunk != NULL) {
2931 MemoryAllocator::Protect(chunk->address(), chunk->size());
2932 chunk = chunk->next();
2933 }
2934}
2935
2936
2937void LargeObjectSpace::Unprotect() {
2938 LargeObjectChunk* chunk = first_chunk_;
2939 while (chunk != NULL) {
2940 bool is_code = chunk->GetObject()->IsCode();
2941 MemoryAllocator::Unprotect(chunk->address(), chunk->size(),
2942 is_code ? EXECUTABLE : NOT_EXECUTABLE);
2943 chunk = chunk->next();
2944 }
2945}
2946
2947#endif
2948
2949
John Reck59135872010-11-02 12:39:01 -07002950MaybeObject* LargeObjectSpace::AllocateRawInternal(int requested_size,
2951 int object_size,
2952 Executability executable) {
Steve Blocka7e24c12009-10-30 11:49:00 +00002953 ASSERT(0 < object_size && object_size <= requested_size);
2954
2955 // Check if we want to force a GC before growing the old space further.
2956 // If so, fail the allocation.
2957 if (!Heap::always_allocate() && Heap::OldGenerationAllocationLimitReached()) {
Ben Murdochf87a2032010-10-22 12:50:53 +01002958 return Failure::RetryAfterGC(identity());
Steve Blocka7e24c12009-10-30 11:49:00 +00002959 }
2960
Ben Murdochb0fe1622011-05-05 13:52:32 +01002961 LargeObjectChunk* chunk = LargeObjectChunk::New(requested_size, executable);
Steve Blocka7e24c12009-10-30 11:49:00 +00002962 if (chunk == NULL) {
Ben Murdochf87a2032010-10-22 12:50:53 +01002963 return Failure::RetryAfterGC(identity());
Steve Blocka7e24c12009-10-30 11:49:00 +00002964 }
2965
Ben Murdochb0fe1622011-05-05 13:52:32 +01002966 size_ += static_cast<int>(chunk->size());
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -08002967 objects_size_ += requested_size;
Steve Blocka7e24c12009-10-30 11:49:00 +00002968 page_count_++;
2969 chunk->set_next(first_chunk_);
Steve Blocka7e24c12009-10-30 11:49:00 +00002970 first_chunk_ = chunk;
2971
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002972 // Initialize page header.
Steve Blocka7e24c12009-10-30 11:49:00 +00002973 Page* page = Page::FromAddress(RoundUp(chunk->address(), Page::kPageSize));
2974 Address object_address = page->ObjectAreaStart();
Ben Murdochb0fe1622011-05-05 13:52:32 +01002975
Steve Blocka7e24c12009-10-30 11:49:00 +00002976 // Clear the low order bit of the second word in the page to flag it as a
2977 // large object page. If the chunk_size happened to be written there, its
2978 // low order bit should already be clear.
Steve Block6ded16b2010-05-10 14:33:55 +01002979 page->SetIsLargeObjectPage(true);
Steve Block791712a2010-08-27 10:21:07 +01002980 page->SetIsPageExecutable(executable);
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002981 page->SetRegionMarks(Page::kAllRegionsCleanMarks);
Steve Blocka7e24c12009-10-30 11:49:00 +00002982 return HeapObject::FromAddress(object_address);
2983}
2984
2985
John Reck59135872010-11-02 12:39:01 -07002986MaybeObject* LargeObjectSpace::AllocateRawCode(int size_in_bytes) {
Steve Blocka7e24c12009-10-30 11:49:00 +00002987 ASSERT(0 < size_in_bytes);
2988 return AllocateRawInternal(size_in_bytes,
2989 size_in_bytes,
2990 EXECUTABLE);
2991}
2992
2993
John Reck59135872010-11-02 12:39:01 -07002994MaybeObject* LargeObjectSpace::AllocateRawFixedArray(int size_in_bytes) {
Steve Blocka7e24c12009-10-30 11:49:00 +00002995 ASSERT(0 < size_in_bytes);
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002996 return AllocateRawInternal(size_in_bytes,
Steve Blocka7e24c12009-10-30 11:49:00 +00002997 size_in_bytes,
2998 NOT_EXECUTABLE);
2999}
3000
3001
John Reck59135872010-11-02 12:39:01 -07003002MaybeObject* LargeObjectSpace::AllocateRaw(int size_in_bytes) {
Steve Blocka7e24c12009-10-30 11:49:00 +00003003 ASSERT(0 < size_in_bytes);
3004 return AllocateRawInternal(size_in_bytes,
3005 size_in_bytes,
3006 NOT_EXECUTABLE);
3007}
3008
3009
3010// GC support
John Reck59135872010-11-02 12:39:01 -07003011MaybeObject* LargeObjectSpace::FindObject(Address a) {
Steve Blocka7e24c12009-10-30 11:49:00 +00003012 for (LargeObjectChunk* chunk = first_chunk_;
3013 chunk != NULL;
3014 chunk = chunk->next()) {
3015 Address chunk_address = chunk->address();
3016 if (chunk_address <= a && a < chunk_address + chunk->size()) {
3017 return chunk->GetObject();
3018 }
3019 }
3020 return Failure::Exception();
3021}
3022
Kristian Monsen80d68ea2010-09-08 11:05:35 +01003023
3024LargeObjectChunk* LargeObjectSpace::FindChunkContainingPc(Address pc) {
3025 // TODO(853): Change this implementation to only find executable
3026 // chunks and use some kind of hash-based approach to speed it up.
3027 for (LargeObjectChunk* chunk = first_chunk_;
3028 chunk != NULL;
3029 chunk = chunk->next()) {
3030 Address chunk_address = chunk->address();
3031 if (chunk_address <= pc && pc < chunk_address + chunk->size()) {
3032 return chunk;
3033 }
3034 }
3035 return NULL;
3036}
3037
3038
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01003039void LargeObjectSpace::IterateDirtyRegions(ObjectSlotCallback copy_object) {
Steve Blocka7e24c12009-10-30 11:49:00 +00003040 LargeObjectIterator it(this);
Leon Clarked91b9f72010-01-27 17:25:45 +00003041 for (HeapObject* object = it.next(); object != NULL; object = it.next()) {
Steve Blocka7e24c12009-10-30 11:49:00 +00003042 // We only have code, sequential strings, or fixed arrays in large
3043 // object space, and only fixed arrays can possibly contain pointers to
3044 // the young generation.
Steve Blocka7e24c12009-10-30 11:49:00 +00003045 if (object->IsFixedArray()) {
Steve Blocka7e24c12009-10-30 11:49:00 +00003046 Page* page = Page::FromAddress(object->address());
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01003047 uint32_t marks = page->GetRegionMarks();
3048 uint32_t newmarks = Page::kAllRegionsCleanMarks;
Steve Blocka7e24c12009-10-30 11:49:00 +00003049
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01003050 if (marks != Page::kAllRegionsCleanMarks) {
3051 // For a large page a single dirty mark corresponds to several
3052 // regions (modulo 32). So we treat a large page as a sequence of
3053 // normal pages of size Page::kPageSize having same dirty marks
3054 // and subsequently iterate dirty regions on each of these pages.
3055 Address start = object->address();
3056 Address end = page->ObjectAreaEnd();
3057 Address object_end = start + object->Size();
3058
3059 // Iterate regions of the first normal page covering object.
3060 uint32_t first_region_number = page->GetRegionNumberForAddress(start);
3061 newmarks |=
3062 Heap::IterateDirtyRegions(marks >> first_region_number,
3063 start,
3064 end,
3065 &Heap::IteratePointersInDirtyRegion,
3066 copy_object) << first_region_number;
3067
3068 start = end;
3069 end = start + Page::kPageSize;
3070 while (end <= object_end) {
3071 // Iterate next 32 regions.
3072 newmarks |=
3073 Heap::IterateDirtyRegions(marks,
3074 start,
3075 end,
3076 &Heap::IteratePointersInDirtyRegion,
3077 copy_object);
3078 start = end;
3079 end = start + Page::kPageSize;
3080 }
3081
3082 if (start != object_end) {
3083 // Iterate the last piece of an object which is less than
3084 // Page::kPageSize.
3085 newmarks |=
3086 Heap::IterateDirtyRegions(marks,
3087 start,
3088 object_end,
3089 &Heap::IteratePointersInDirtyRegion,
3090 copy_object);
3091 }
3092
3093 page->SetRegionMarks(newmarks);
Steve Blocka7e24c12009-10-30 11:49:00 +00003094 }
3095 }
3096 }
3097}
3098
3099
3100void LargeObjectSpace::FreeUnmarkedObjects() {
3101 LargeObjectChunk* previous = NULL;
3102 LargeObjectChunk* current = first_chunk_;
3103 while (current != NULL) {
3104 HeapObject* object = current->GetObject();
3105 if (object->IsMarked()) {
3106 object->ClearMark();
3107 MarkCompactCollector::tracer()->decrement_marked_count();
3108 previous = current;
3109 current = current->next();
3110 } else {
Steve Block791712a2010-08-27 10:21:07 +01003111 Page* page = Page::FromAddress(RoundUp(current->address(),
3112 Page::kPageSize));
3113 Executability executable =
3114 page->IsPageExecutable() ? EXECUTABLE : NOT_EXECUTABLE;
Steve Blocka7e24c12009-10-30 11:49:00 +00003115 Address chunk_address = current->address();
3116 size_t chunk_size = current->size();
3117
3118 // Cut the chunk out from the chunk list.
3119 current = current->next();
3120 if (previous == NULL) {
3121 first_chunk_ = current;
3122 } else {
3123 previous->set_next(current);
3124 }
3125
3126 // Free the chunk.
Leon Clarked91b9f72010-01-27 17:25:45 +00003127 MarkCompactCollector::ReportDeleteIfNeeded(object);
Steve Blockd0582a62009-12-15 09:54:21 +00003128 size_ -= static_cast<int>(chunk_size);
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -08003129 objects_size_ -= object->Size();
Steve Blocka7e24c12009-10-30 11:49:00 +00003130 page_count_--;
Iain Merrick9ac36c92010-09-13 15:29:50 +01003131 ObjectSpace space = kObjectSpaceLoSpace;
3132 if (executable == EXECUTABLE) space = kObjectSpaceCodeSpace;
Steve Block791712a2010-08-27 10:21:07 +01003133 MemoryAllocator::FreeRawMemory(chunk_address, chunk_size, executable);
Iain Merrick9ac36c92010-09-13 15:29:50 +01003134 MemoryAllocator::PerformAllocationCallback(space, kAllocationActionFree,
3135 size_);
Steve Blocka7e24c12009-10-30 11:49:00 +00003136 LOG(DeleteEvent("LargeObjectChunk", chunk_address));
3137 }
3138 }
3139}
3140
3141
3142bool LargeObjectSpace::Contains(HeapObject* object) {
3143 Address address = object->address();
Steve Block6ded16b2010-05-10 14:33:55 +01003144 if (Heap::new_space()->Contains(address)) {
3145 return false;
3146 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003147 Page* page = Page::FromAddress(address);
3148
3149 SLOW_ASSERT(!page->IsLargeObjectPage()
3150 || !FindObject(address)->IsFailure());
3151
3152 return page->IsLargeObjectPage();
3153}
3154
3155
3156#ifdef DEBUG
3157// We do not assume that the large object iterator works, because it depends
3158// on the invariants we are checking during verification.
3159void LargeObjectSpace::Verify() {
3160 for (LargeObjectChunk* chunk = first_chunk_;
3161 chunk != NULL;
3162 chunk = chunk->next()) {
3163 // Each chunk contains an object that starts at the large object page's
3164 // object area start.
3165 HeapObject* object = chunk->GetObject();
3166 Page* page = Page::FromAddress(object->address());
3167 ASSERT(object->address() == page->ObjectAreaStart());
3168
3169 // The first word should be a map, and we expect all map pointers to be
3170 // in map space.
3171 Map* map = object->map();
3172 ASSERT(map->IsMap());
3173 ASSERT(Heap::map_space()->Contains(map));
3174
3175 // We have only code, sequential strings, external strings
3176 // (sequential strings that have been morphed into external
3177 // strings), fixed arrays, and byte arrays in large object space.
3178 ASSERT(object->IsCode() || object->IsSeqString() ||
3179 object->IsExternalString() || object->IsFixedArray() ||
3180 object->IsByteArray());
3181
3182 // The object itself should look OK.
3183 object->Verify();
3184
3185 // Byte arrays and strings don't have interior pointers.
3186 if (object->IsCode()) {
3187 VerifyPointersVisitor code_visitor;
3188 object->IterateBody(map->instance_type(),
3189 object->Size(),
3190 &code_visitor);
3191 } else if (object->IsFixedArray()) {
3192 // We loop over fixed arrays ourselves, rather then using the visitor,
3193 // because the visitor doesn't support the start/offset iteration
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01003194 // needed for IsRegionDirty.
Steve Blocka7e24c12009-10-30 11:49:00 +00003195 FixedArray* array = FixedArray::cast(object);
3196 for (int j = 0; j < array->length(); j++) {
3197 Object* element = array->get(j);
3198 if (element->IsHeapObject()) {
3199 HeapObject* element_object = HeapObject::cast(element);
3200 ASSERT(Heap::Contains(element_object));
3201 ASSERT(element_object->map()->IsMap());
3202 if (Heap::InNewSpace(element_object)) {
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01003203 Address array_addr = object->address();
3204 Address element_addr = array_addr + FixedArray::kHeaderSize +
3205 j * kPointerSize;
3206
3207 ASSERT(Page::FromAddress(array_addr)->IsRegionDirty(element_addr));
Steve Blocka7e24c12009-10-30 11:49:00 +00003208 }
3209 }
3210 }
3211 }
3212 }
3213}
3214
3215
3216void LargeObjectSpace::Print() {
3217 LargeObjectIterator it(this);
Leon Clarked91b9f72010-01-27 17:25:45 +00003218 for (HeapObject* obj = it.next(); obj != NULL; obj = it.next()) {
3219 obj->Print();
Steve Blocka7e24c12009-10-30 11:49:00 +00003220 }
3221}
3222
3223
3224void LargeObjectSpace::ReportStatistics() {
Ben Murdochf87a2032010-10-22 12:50:53 +01003225 PrintF(" size: %" V8_PTR_PREFIX "d\n", size_);
Steve Blocka7e24c12009-10-30 11:49:00 +00003226 int num_objects = 0;
3227 ClearHistograms();
3228 LargeObjectIterator it(this);
Leon Clarked91b9f72010-01-27 17:25:45 +00003229 for (HeapObject* obj = it.next(); obj != NULL; obj = it.next()) {
Steve Blocka7e24c12009-10-30 11:49:00 +00003230 num_objects++;
Leon Clarked91b9f72010-01-27 17:25:45 +00003231 CollectHistogramInfo(obj);
Steve Blocka7e24c12009-10-30 11:49:00 +00003232 }
3233
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -08003234 PrintF(" number of objects %d, "
3235 "size of objects %" V8_PTR_PREFIX "d\n", num_objects, objects_size_);
Steve Blocka7e24c12009-10-30 11:49:00 +00003236 if (num_objects > 0) ReportHistogram(false);
3237}
3238
3239
3240void LargeObjectSpace::CollectCodeStatistics() {
3241 LargeObjectIterator obj_it(this);
Leon Clarked91b9f72010-01-27 17:25:45 +00003242 for (HeapObject* obj = obj_it.next(); obj != NULL; obj = obj_it.next()) {
Steve Blocka7e24c12009-10-30 11:49:00 +00003243 if (obj->IsCode()) {
3244 Code* code = Code::cast(obj);
3245 code_kind_statistics[code->kind()] += code->Size();
3246 }
3247 }
3248}
Steve Blocka7e24c12009-10-30 11:49:00 +00003249#endif // DEBUG
3250
3251} } // namespace v8::internal