blob: a586fbf904f879fe7c73513b94e76355e0b76246 [file] [log] [blame]
Ben Murdochb0fe1622011-05-05 13:52:32 +01001// Copyright 2006-2010 the V8 project authors. All rights reserved.
Steve Blocka7e24c12009-10-30 11:49:00 +00002// Redistribution and use in source and binary forms, with or without
3// modification, are permitted provided that the following conditions are
4// met:
5//
6// * Redistributions of source code must retain the above copyright
7// notice, this list of conditions and the following disclaimer.
8// * Redistributions in binary form must reproduce the above
9// copyright notice, this list of conditions and the following
10// disclaimer in the documentation and/or other materials provided
11// with the distribution.
12// * Neither the name of Google Inc. nor the names of its
13// contributors may be used to endorse or promote products derived
14// from this software without specific prior written permission.
15//
16// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
28#include "v8.h"
29
Steve Block1e0659c2011-05-24 12:43:12 +010030#include "liveobjectlist-inl.h"
Steve Blocka7e24c12009-10-30 11:49:00 +000031#include "macro-assembler.h"
32#include "mark-compact.h"
33#include "platform.h"
34
35namespace v8 {
36namespace internal {
37
38// For contiguous spaces, top should be in the space (or at the end) and limit
39// should be the end of the space.
40#define ASSERT_SEMISPACE_ALLOCATION_INFO(info, space) \
41 ASSERT((space).low() <= (info).top \
42 && (info).top <= (space).high() \
43 && (info).limit == (space).high())
44
Steve Block791712a2010-08-27 10:21:07 +010045intptr_t Page::watermark_invalidated_mark_ = 1 << Page::WATERMARK_INVALIDATED;
Steve Blocka7e24c12009-10-30 11:49:00 +000046
47// ----------------------------------------------------------------------------
48// HeapObjectIterator
49
50HeapObjectIterator::HeapObjectIterator(PagedSpace* space) {
51 Initialize(space->bottom(), space->top(), NULL);
52}
53
54
55HeapObjectIterator::HeapObjectIterator(PagedSpace* space,
56 HeapObjectCallback size_func) {
57 Initialize(space->bottom(), space->top(), size_func);
58}
59
60
61HeapObjectIterator::HeapObjectIterator(PagedSpace* space, Address start) {
62 Initialize(start, space->top(), NULL);
63}
64
65
66HeapObjectIterator::HeapObjectIterator(PagedSpace* space, Address start,
67 HeapObjectCallback size_func) {
68 Initialize(start, space->top(), size_func);
69}
70
71
Kristian Monsen80d68ea2010-09-08 11:05:35 +010072HeapObjectIterator::HeapObjectIterator(Page* page,
73 HeapObjectCallback size_func) {
74 Initialize(page->ObjectAreaStart(), page->AllocationTop(), size_func);
75}
76
77
Steve Blocka7e24c12009-10-30 11:49:00 +000078void HeapObjectIterator::Initialize(Address cur, Address end,
79 HeapObjectCallback size_f) {
80 cur_addr_ = cur;
81 end_addr_ = end;
82 end_page_ = Page::FromAllocationTop(end);
83 size_func_ = size_f;
84 Page* p = Page::FromAllocationTop(cur_addr_);
85 cur_limit_ = (p == end_page_) ? end_addr_ : p->AllocationTop();
86
87#ifdef DEBUG
88 Verify();
89#endif
90}
91
92
Leon Clarked91b9f72010-01-27 17:25:45 +000093HeapObject* HeapObjectIterator::FromNextPage() {
94 if (cur_addr_ == end_addr_) return NULL;
Steve Blocka7e24c12009-10-30 11:49:00 +000095
96 Page* cur_page = Page::FromAllocationTop(cur_addr_);
97 cur_page = cur_page->next_page();
98 ASSERT(cur_page->is_valid());
99
100 cur_addr_ = cur_page->ObjectAreaStart();
101 cur_limit_ = (cur_page == end_page_) ? end_addr_ : cur_page->AllocationTop();
102
Leon Clarked91b9f72010-01-27 17:25:45 +0000103 if (cur_addr_ == end_addr_) return NULL;
Steve Blocka7e24c12009-10-30 11:49:00 +0000104 ASSERT(cur_addr_ < cur_limit_);
105#ifdef DEBUG
106 Verify();
107#endif
Leon Clarked91b9f72010-01-27 17:25:45 +0000108 return FromCurrentPage();
Steve Blocka7e24c12009-10-30 11:49:00 +0000109}
110
111
112#ifdef DEBUG
113void HeapObjectIterator::Verify() {
114 Page* p = Page::FromAllocationTop(cur_addr_);
115 ASSERT(p == Page::FromAllocationTop(cur_limit_));
116 ASSERT(p->Offset(cur_addr_) <= p->Offset(cur_limit_));
117}
118#endif
119
120
121// -----------------------------------------------------------------------------
122// PageIterator
123
124PageIterator::PageIterator(PagedSpace* space, Mode mode) : space_(space) {
125 prev_page_ = NULL;
126 switch (mode) {
127 case PAGES_IN_USE:
128 stop_page_ = space->AllocationTopPage();
129 break;
130 case PAGES_USED_BY_MC:
131 stop_page_ = space->MCRelocationTopPage();
132 break;
133 case ALL_PAGES:
134#ifdef DEBUG
135 // Verify that the cached last page in the space is actually the
136 // last page.
137 for (Page* p = space->first_page_; p->is_valid(); p = p->next_page()) {
138 if (!p->next_page()->is_valid()) {
139 ASSERT(space->last_page_ == p);
140 }
141 }
142#endif
143 stop_page_ = space->last_page_;
144 break;
145 }
146}
147
148
149// -----------------------------------------------------------------------------
Steve Blocka7e24c12009-10-30 11:49:00 +0000150// CodeRange
151
152List<CodeRange::FreeBlock> CodeRange::free_list_(0);
153List<CodeRange::FreeBlock> CodeRange::allocation_list_(0);
154int CodeRange::current_allocation_block_index_ = 0;
155VirtualMemory* CodeRange::code_range_ = NULL;
156
157
158bool CodeRange::Setup(const size_t requested) {
159 ASSERT(code_range_ == NULL);
160
161 code_range_ = new VirtualMemory(requested);
162 CHECK(code_range_ != NULL);
163 if (!code_range_->IsReserved()) {
164 delete code_range_;
165 code_range_ = NULL;
166 return false;
167 }
168
169 // We are sure that we have mapped a block of requested addresses.
170 ASSERT(code_range_->size() == requested);
171 LOG(NewEvent("CodeRange", code_range_->address(), requested));
172 allocation_list_.Add(FreeBlock(code_range_->address(), code_range_->size()));
173 current_allocation_block_index_ = 0;
174 return true;
175}
176
177
178int CodeRange::CompareFreeBlockAddress(const FreeBlock* left,
179 const FreeBlock* right) {
180 // The entire point of CodeRange is that the difference between two
181 // addresses in the range can be represented as a signed 32-bit int,
182 // so the cast is semantically correct.
183 return static_cast<int>(left->start - right->start);
184}
185
186
187void CodeRange::GetNextAllocationBlock(size_t requested) {
188 for (current_allocation_block_index_++;
189 current_allocation_block_index_ < allocation_list_.length();
190 current_allocation_block_index_++) {
191 if (requested <= allocation_list_[current_allocation_block_index_].size) {
192 return; // Found a large enough allocation block.
193 }
194 }
195
196 // Sort and merge the free blocks on the free list and the allocation list.
197 free_list_.AddAll(allocation_list_);
198 allocation_list_.Clear();
199 free_list_.Sort(&CompareFreeBlockAddress);
200 for (int i = 0; i < free_list_.length();) {
201 FreeBlock merged = free_list_[i];
202 i++;
203 // Add adjacent free blocks to the current merged block.
204 while (i < free_list_.length() &&
205 free_list_[i].start == merged.start + merged.size) {
206 merged.size += free_list_[i].size;
207 i++;
208 }
209 if (merged.size > 0) {
210 allocation_list_.Add(merged);
211 }
212 }
213 free_list_.Clear();
214
215 for (current_allocation_block_index_ = 0;
216 current_allocation_block_index_ < allocation_list_.length();
217 current_allocation_block_index_++) {
218 if (requested <= allocation_list_[current_allocation_block_index_].size) {
219 return; // Found a large enough allocation block.
220 }
221 }
222
223 // Code range is full or too fragmented.
224 V8::FatalProcessOutOfMemory("CodeRange::GetNextAllocationBlock");
225}
226
227
228
229void* CodeRange::AllocateRawMemory(const size_t requested, size_t* allocated) {
230 ASSERT(current_allocation_block_index_ < allocation_list_.length());
231 if (requested > allocation_list_[current_allocation_block_index_].size) {
232 // Find an allocation block large enough. This function call may
233 // call V8::FatalProcessOutOfMemory if it cannot find a large enough block.
234 GetNextAllocationBlock(requested);
235 }
236 // Commit the requested memory at the start of the current allocation block.
237 *allocated = RoundUp(requested, Page::kPageSize);
238 FreeBlock current = allocation_list_[current_allocation_block_index_];
239 if (*allocated >= current.size - Page::kPageSize) {
240 // Don't leave a small free block, useless for a large object or chunk.
241 *allocated = current.size;
242 }
243 ASSERT(*allocated <= current.size);
244 if (!code_range_->Commit(current.start, *allocated, true)) {
245 *allocated = 0;
246 return NULL;
247 }
248 allocation_list_[current_allocation_block_index_].start += *allocated;
249 allocation_list_[current_allocation_block_index_].size -= *allocated;
250 if (*allocated == current.size) {
251 GetNextAllocationBlock(0); // This block is used up, get the next one.
252 }
253 return current.start;
254}
255
256
257void CodeRange::FreeRawMemory(void* address, size_t length) {
258 free_list_.Add(FreeBlock(address, length));
259 code_range_->Uncommit(address, length);
260}
261
262
263void CodeRange::TearDown() {
264 delete code_range_; // Frees all memory in the virtual memory range.
265 code_range_ = NULL;
266 free_list_.Free();
267 allocation_list_.Free();
268}
269
270
271// -----------------------------------------------------------------------------
272// MemoryAllocator
273//
Russell Brenner90bac252010-11-18 13:33:46 -0800274intptr_t MemoryAllocator::capacity_ = 0;
275intptr_t MemoryAllocator::capacity_executable_ = 0;
276intptr_t MemoryAllocator::size_ = 0;
Ben Murdochf87a2032010-10-22 12:50:53 +0100277intptr_t MemoryAllocator::size_executable_ = 0;
Steve Blocka7e24c12009-10-30 11:49:00 +0000278
Iain Merrick9ac36c92010-09-13 15:29:50 +0100279List<MemoryAllocator::MemoryAllocationCallbackRegistration>
280 MemoryAllocator::memory_allocation_callbacks_;
281
Steve Blocka7e24c12009-10-30 11:49:00 +0000282VirtualMemory* MemoryAllocator::initial_chunk_ = NULL;
283
284// 270 is an estimate based on the static default heap size of a pair of 256K
285// semispaces and a 64M old generation.
286const int kEstimatedNumberOfChunks = 270;
287List<MemoryAllocator::ChunkInfo> MemoryAllocator::chunks_(
288 kEstimatedNumberOfChunks);
289List<int> MemoryAllocator::free_chunk_ids_(kEstimatedNumberOfChunks);
290int MemoryAllocator::max_nof_chunks_ = 0;
291int MemoryAllocator::top_ = 0;
292
293
294void MemoryAllocator::Push(int free_chunk_id) {
295 ASSERT(max_nof_chunks_ > 0);
296 ASSERT(top_ < max_nof_chunks_);
297 free_chunk_ids_[top_++] = free_chunk_id;
298}
299
300
301int MemoryAllocator::Pop() {
302 ASSERT(top_ > 0);
303 return free_chunk_ids_[--top_];
304}
305
306
Russell Brenner90bac252010-11-18 13:33:46 -0800307bool MemoryAllocator::Setup(intptr_t capacity, intptr_t capacity_executable) {
Steve Blocka7e24c12009-10-30 11:49:00 +0000308 capacity_ = RoundUp(capacity, Page::kPageSize);
Russell Brenner90bac252010-11-18 13:33:46 -0800309 capacity_executable_ = RoundUp(capacity_executable, Page::kPageSize);
310 ASSERT_GE(capacity_, capacity_executable_);
Steve Blocka7e24c12009-10-30 11:49:00 +0000311
312 // Over-estimate the size of chunks_ array. It assumes the expansion of old
313 // space is always in the unit of a chunk (kChunkSize) except the last
314 // expansion.
315 //
316 // Due to alignment, allocated space might be one page less than required
317 // number (kPagesPerChunk) of pages for old spaces.
318 //
319 // Reserve two chunk ids for semispaces, one for map space, one for old
320 // space, and one for code space.
Ben Murdochf87a2032010-10-22 12:50:53 +0100321 max_nof_chunks_ =
322 static_cast<int>((capacity_ / (kChunkSize - Page::kPageSize))) + 5;
Steve Blocka7e24c12009-10-30 11:49:00 +0000323 if (max_nof_chunks_ > kMaxNofChunks) return false;
324
325 size_ = 0;
Steve Block791712a2010-08-27 10:21:07 +0100326 size_executable_ = 0;
Steve Blocka7e24c12009-10-30 11:49:00 +0000327 ChunkInfo info; // uninitialized element.
328 for (int i = max_nof_chunks_ - 1; i >= 0; i--) {
329 chunks_.Add(info);
330 free_chunk_ids_.Add(i);
331 }
332 top_ = max_nof_chunks_;
333 return true;
334}
335
336
Ben Murdochb0fe1622011-05-05 13:52:32 +0100337bool MemoryAllocator::SafeIsInAPageChunk(Address addr) {
338 return InInitialChunk(addr) || InAllocatedChunks(addr);
339}
340
341
Steve Blocka7e24c12009-10-30 11:49:00 +0000342void MemoryAllocator::TearDown() {
343 for (int i = 0; i < max_nof_chunks_; i++) {
344 if (chunks_[i].address() != NULL) DeleteChunk(i);
345 }
346 chunks_.Clear();
347 free_chunk_ids_.Clear();
348
349 if (initial_chunk_ != NULL) {
350 LOG(DeleteEvent("InitialChunk", initial_chunk_->address()));
351 delete initial_chunk_;
352 initial_chunk_ = NULL;
353 }
354
Ben Murdochb0fe1622011-05-05 13:52:32 +0100355 FreeChunkTables(&chunk_table_[0],
356 kChunkTableTopLevelEntries,
357 kChunkTableLevels);
358
Steve Blocka7e24c12009-10-30 11:49:00 +0000359 ASSERT(top_ == max_nof_chunks_); // all chunks are free
360 top_ = 0;
361 capacity_ = 0;
Russell Brenner90bac252010-11-18 13:33:46 -0800362 capacity_executable_ = 0;
Steve Blocka7e24c12009-10-30 11:49:00 +0000363 size_ = 0;
364 max_nof_chunks_ = 0;
365}
366
367
Ben Murdochb0fe1622011-05-05 13:52:32 +0100368void MemoryAllocator::FreeChunkTables(uintptr_t* array, int len, int level) {
369 for (int i = 0; i < len; i++) {
370 if (array[i] != kUnusedChunkTableEntry) {
371 uintptr_t* subarray = reinterpret_cast<uintptr_t*>(array[i]);
372 if (level > 1) {
373 array[i] = kUnusedChunkTableEntry;
374 FreeChunkTables(subarray, 1 << kChunkTableBitsPerLevel, level - 1);
375 } else {
376 array[i] = kUnusedChunkTableEntry;
377 }
378 delete[] subarray;
379 }
380 }
381}
382
383
Steve Blocka7e24c12009-10-30 11:49:00 +0000384void* MemoryAllocator::AllocateRawMemory(const size_t requested,
385 size_t* allocated,
386 Executability executable) {
Kristian Monsen50ef84f2010-07-29 15:18:00 +0100387 if (size_ + static_cast<size_t>(requested) > static_cast<size_t>(capacity_)) {
388 return NULL;
389 }
Russell Brenner90bac252010-11-18 13:33:46 -0800390
Steve Blocka7e24c12009-10-30 11:49:00 +0000391 void* mem;
Russell Brenner90bac252010-11-18 13:33:46 -0800392 if (executable == EXECUTABLE) {
393 // Check executable memory limit.
394 if (size_executable_ + requested >
395 static_cast<size_t>(capacity_executable_)) {
396 LOG(StringEvent("MemoryAllocator::AllocateRawMemory",
397 "V8 Executable Allocation capacity exceeded"));
398 return NULL;
399 }
400 // Allocate executable memory either from code range or from the
401 // OS.
402 if (CodeRange::exists()) {
403 mem = CodeRange::AllocateRawMemory(requested, allocated);
404 } else {
405 mem = OS::Allocate(requested, allocated, true);
406 }
407 // Update executable memory size.
408 size_executable_ += static_cast<int>(*allocated);
Steve Blocka7e24c12009-10-30 11:49:00 +0000409 } else {
Russell Brenner90bac252010-11-18 13:33:46 -0800410 mem = OS::Allocate(requested, allocated, false);
Steve Blocka7e24c12009-10-30 11:49:00 +0000411 }
Steve Blockd0582a62009-12-15 09:54:21 +0000412 int alloced = static_cast<int>(*allocated);
Steve Blocka7e24c12009-10-30 11:49:00 +0000413 size_ += alloced;
Steve Block791712a2010-08-27 10:21:07 +0100414
Leon Clarke4515c472010-02-03 11:58:03 +0000415#ifdef DEBUG
416 ZapBlock(reinterpret_cast<Address>(mem), alloced);
417#endif
Steve Blocka7e24c12009-10-30 11:49:00 +0000418 Counters::memory_allocated.Increment(alloced);
419 return mem;
420}
421
422
Steve Block791712a2010-08-27 10:21:07 +0100423void MemoryAllocator::FreeRawMemory(void* mem,
424 size_t length,
425 Executability executable) {
Leon Clarke4515c472010-02-03 11:58:03 +0000426#ifdef DEBUG
427 ZapBlock(reinterpret_cast<Address>(mem), length);
428#endif
Steve Blocka7e24c12009-10-30 11:49:00 +0000429 if (CodeRange::contains(static_cast<Address>(mem))) {
430 CodeRange::FreeRawMemory(mem, length);
431 } else {
432 OS::Free(mem, length);
433 }
Steve Blockd0582a62009-12-15 09:54:21 +0000434 Counters::memory_allocated.Decrement(static_cast<int>(length));
435 size_ -= static_cast<int>(length);
Steve Block791712a2010-08-27 10:21:07 +0100436 if (executable == EXECUTABLE) size_executable_ -= static_cast<int>(length);
Iain Merrick9ac36c92010-09-13 15:29:50 +0100437
Steve Blocka7e24c12009-10-30 11:49:00 +0000438 ASSERT(size_ >= 0);
Russell Brenner90bac252010-11-18 13:33:46 -0800439 ASSERT(size_executable_ >= 0);
Steve Blocka7e24c12009-10-30 11:49:00 +0000440}
441
442
Iain Merrick9ac36c92010-09-13 15:29:50 +0100443void MemoryAllocator::PerformAllocationCallback(ObjectSpace space,
444 AllocationAction action,
445 size_t size) {
446 for (int i = 0; i < memory_allocation_callbacks_.length(); ++i) {
447 MemoryAllocationCallbackRegistration registration =
448 memory_allocation_callbacks_[i];
449 if ((registration.space & space) == space &&
450 (registration.action & action) == action)
451 registration.callback(space, action, static_cast<int>(size));
452 }
453}
454
455
456bool MemoryAllocator::MemoryAllocationCallbackRegistered(
457 MemoryAllocationCallback callback) {
458 for (int i = 0; i < memory_allocation_callbacks_.length(); ++i) {
459 if (memory_allocation_callbacks_[i].callback == callback) return true;
460 }
461 return false;
462}
463
464
465void MemoryAllocator::AddMemoryAllocationCallback(
466 MemoryAllocationCallback callback,
467 ObjectSpace space,
468 AllocationAction action) {
469 ASSERT(callback != NULL);
470 MemoryAllocationCallbackRegistration registration(callback, space, action);
471 ASSERT(!MemoryAllocator::MemoryAllocationCallbackRegistered(callback));
472 return memory_allocation_callbacks_.Add(registration);
473}
474
475
476void MemoryAllocator::RemoveMemoryAllocationCallback(
477 MemoryAllocationCallback callback) {
478 ASSERT(callback != NULL);
479 for (int i = 0; i < memory_allocation_callbacks_.length(); ++i) {
480 if (memory_allocation_callbacks_[i].callback == callback) {
481 memory_allocation_callbacks_.Remove(i);
482 return;
483 }
484 }
485 UNREACHABLE();
486}
487
Steve Blocka7e24c12009-10-30 11:49:00 +0000488void* MemoryAllocator::ReserveInitialChunk(const size_t requested) {
489 ASSERT(initial_chunk_ == NULL);
490
491 initial_chunk_ = new VirtualMemory(requested);
492 CHECK(initial_chunk_ != NULL);
493 if (!initial_chunk_->IsReserved()) {
494 delete initial_chunk_;
495 initial_chunk_ = NULL;
496 return NULL;
497 }
498
499 // We are sure that we have mapped a block of requested addresses.
500 ASSERT(initial_chunk_->size() == requested);
501 LOG(NewEvent("InitialChunk", initial_chunk_->address(), requested));
Steve Blockd0582a62009-12-15 09:54:21 +0000502 size_ += static_cast<int>(requested);
Steve Blocka7e24c12009-10-30 11:49:00 +0000503 return initial_chunk_->address();
504}
505
506
507static int PagesInChunk(Address start, size_t size) {
508 // The first page starts on the first page-aligned address from start onward
509 // and the last page ends on the last page-aligned address before
510 // start+size. Page::kPageSize is a power of two so we can divide by
511 // shifting.
Steve Blockd0582a62009-12-15 09:54:21 +0000512 return static_cast<int>((RoundDown(start + size, Page::kPageSize)
Leon Clarkee46be812010-01-19 14:06:41 +0000513 - RoundUp(start, Page::kPageSize)) >> kPageSizeBits);
Steve Blocka7e24c12009-10-30 11:49:00 +0000514}
515
516
Ben Murdochb0fe1622011-05-05 13:52:32 +0100517Page* MemoryAllocator::AllocatePages(int requested_pages,
518 int* allocated_pages,
Steve Blocka7e24c12009-10-30 11:49:00 +0000519 PagedSpace* owner) {
520 if (requested_pages <= 0) return Page::FromAddress(NULL);
521 size_t chunk_size = requested_pages * Page::kPageSize;
522
Steve Blocka7e24c12009-10-30 11:49:00 +0000523 void* chunk = AllocateRawMemory(chunk_size, &chunk_size, owner->executable());
524 if (chunk == NULL) return Page::FromAddress(NULL);
525 LOG(NewEvent("PagedChunk", chunk, chunk_size));
526
527 *allocated_pages = PagesInChunk(static_cast<Address>(chunk), chunk_size);
Ben Murdochb0fe1622011-05-05 13:52:32 +0100528 // We may 'lose' a page due to alignment.
529 ASSERT(*allocated_pages >= kPagesPerChunk - 1);
Steve Blocka7e24c12009-10-30 11:49:00 +0000530 if (*allocated_pages == 0) {
Steve Block791712a2010-08-27 10:21:07 +0100531 FreeRawMemory(chunk, chunk_size, owner->executable());
Steve Blocka7e24c12009-10-30 11:49:00 +0000532 LOG(DeleteEvent("PagedChunk", chunk));
533 return Page::FromAddress(NULL);
534 }
535
536 int chunk_id = Pop();
537 chunks_[chunk_id].init(static_cast<Address>(chunk), chunk_size, owner);
538
Iain Merrick9ac36c92010-09-13 15:29:50 +0100539 ObjectSpace space = static_cast<ObjectSpace>(1 << owner->identity());
540 PerformAllocationCallback(space, kAllocationActionAllocate, chunk_size);
Ben Murdochb0fe1622011-05-05 13:52:32 +0100541 Page* new_pages = InitializePagesInChunk(chunk_id, *allocated_pages, owner);
542
543 AddToAllocatedChunks(static_cast<Address>(chunk), chunk_size);
544
545 return new_pages;
Steve Blocka7e24c12009-10-30 11:49:00 +0000546}
547
548
549Page* MemoryAllocator::CommitPages(Address start, size_t size,
550 PagedSpace* owner, int* num_pages) {
551 ASSERT(start != NULL);
552 *num_pages = PagesInChunk(start, size);
553 ASSERT(*num_pages > 0);
554 ASSERT(initial_chunk_ != NULL);
555 ASSERT(InInitialChunk(start));
556 ASSERT(InInitialChunk(start + size - 1));
557 if (!initial_chunk_->Commit(start, size, owner->executable() == EXECUTABLE)) {
558 return Page::FromAddress(NULL);
559 }
Leon Clarke4515c472010-02-03 11:58:03 +0000560#ifdef DEBUG
561 ZapBlock(start, size);
562#endif
Steve Blockd0582a62009-12-15 09:54:21 +0000563 Counters::memory_allocated.Increment(static_cast<int>(size));
Steve Blocka7e24c12009-10-30 11:49:00 +0000564
565 // So long as we correctly overestimated the number of chunks we should not
566 // run out of chunk ids.
567 CHECK(!OutOfChunkIds());
568 int chunk_id = Pop();
569 chunks_[chunk_id].init(start, size, owner);
570 return InitializePagesInChunk(chunk_id, *num_pages, owner);
571}
572
573
574bool MemoryAllocator::CommitBlock(Address start,
575 size_t size,
576 Executability executable) {
577 ASSERT(start != NULL);
578 ASSERT(size > 0);
579 ASSERT(initial_chunk_ != NULL);
580 ASSERT(InInitialChunk(start));
581 ASSERT(InInitialChunk(start + size - 1));
582
583 if (!initial_chunk_->Commit(start, size, executable)) return false;
Leon Clarke4515c472010-02-03 11:58:03 +0000584#ifdef DEBUG
585 ZapBlock(start, size);
586#endif
Steve Blockd0582a62009-12-15 09:54:21 +0000587 Counters::memory_allocated.Increment(static_cast<int>(size));
Steve Blocka7e24c12009-10-30 11:49:00 +0000588 return true;
589}
590
Leon Clarke4515c472010-02-03 11:58:03 +0000591
Steve Blocka7e24c12009-10-30 11:49:00 +0000592bool MemoryAllocator::UncommitBlock(Address start, size_t size) {
593 ASSERT(start != NULL);
594 ASSERT(size > 0);
595 ASSERT(initial_chunk_ != NULL);
596 ASSERT(InInitialChunk(start));
597 ASSERT(InInitialChunk(start + size - 1));
598
599 if (!initial_chunk_->Uncommit(start, size)) return false;
Steve Blockd0582a62009-12-15 09:54:21 +0000600 Counters::memory_allocated.Decrement(static_cast<int>(size));
Steve Blocka7e24c12009-10-30 11:49:00 +0000601 return true;
602}
603
Leon Clarke4515c472010-02-03 11:58:03 +0000604
605void MemoryAllocator::ZapBlock(Address start, size_t size) {
606 for (size_t s = 0; s + kPointerSize <= size; s += kPointerSize) {
607 Memory::Address_at(start + s) = kZapValue;
608 }
609}
610
611
Steve Blocka7e24c12009-10-30 11:49:00 +0000612Page* MemoryAllocator::InitializePagesInChunk(int chunk_id, int pages_in_chunk,
613 PagedSpace* owner) {
614 ASSERT(IsValidChunk(chunk_id));
615 ASSERT(pages_in_chunk > 0);
616
617 Address chunk_start = chunks_[chunk_id].address();
618
619 Address low = RoundUp(chunk_start, Page::kPageSize);
620
621#ifdef DEBUG
622 size_t chunk_size = chunks_[chunk_id].size();
623 Address high = RoundDown(chunk_start + chunk_size, Page::kPageSize);
624 ASSERT(pages_in_chunk <=
625 ((OffsetFrom(high) - OffsetFrom(low)) / Page::kPageSize));
626#endif
627
628 Address page_addr = low;
629 for (int i = 0; i < pages_in_chunk; i++) {
630 Page* p = Page::FromAddress(page_addr);
631 p->opaque_header = OffsetFrom(page_addr + Page::kPageSize) | chunk_id;
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100632 p->InvalidateWatermark(true);
Steve Block6ded16b2010-05-10 14:33:55 +0100633 p->SetIsLargeObjectPage(false);
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100634 p->SetAllocationWatermark(p->ObjectAreaStart());
635 p->SetCachedAllocationWatermark(p->ObjectAreaStart());
Steve Blocka7e24c12009-10-30 11:49:00 +0000636 page_addr += Page::kPageSize;
637 }
638
639 // Set the next page of the last page to 0.
640 Page* last_page = Page::FromAddress(page_addr - Page::kPageSize);
641 last_page->opaque_header = OffsetFrom(0) | chunk_id;
642
643 return Page::FromAddress(low);
644}
645
646
647Page* MemoryAllocator::FreePages(Page* p) {
648 if (!p->is_valid()) return p;
649
650 // Find the first page in the same chunk as 'p'
651 Page* first_page = FindFirstPageInSameChunk(p);
652 Page* page_to_return = Page::FromAddress(NULL);
653
654 if (p != first_page) {
655 // Find the last page in the same chunk as 'prev'.
656 Page* last_page = FindLastPageInSameChunk(p);
657 first_page = GetNextPage(last_page); // first page in next chunk
658
659 // set the next_page of last_page to NULL
660 SetNextPage(last_page, Page::FromAddress(NULL));
661 page_to_return = p; // return 'p' when exiting
662 }
663
664 while (first_page->is_valid()) {
665 int chunk_id = GetChunkId(first_page);
666 ASSERT(IsValidChunk(chunk_id));
667
668 // Find the first page of the next chunk before deleting this chunk.
669 first_page = GetNextPage(FindLastPageInSameChunk(first_page));
670
671 // Free the current chunk.
672 DeleteChunk(chunk_id);
673 }
674
675 return page_to_return;
676}
677
678
Steve Block6ded16b2010-05-10 14:33:55 +0100679void MemoryAllocator::FreeAllPages(PagedSpace* space) {
680 for (int i = 0, length = chunks_.length(); i < length; i++) {
681 if (chunks_[i].owner() == space) {
682 DeleteChunk(i);
683 }
684 }
685}
686
687
Steve Blocka7e24c12009-10-30 11:49:00 +0000688void MemoryAllocator::DeleteChunk(int chunk_id) {
689 ASSERT(IsValidChunk(chunk_id));
690
691 ChunkInfo& c = chunks_[chunk_id];
692
693 // We cannot free a chunk contained in the initial chunk because it was not
694 // allocated with AllocateRawMemory. Instead we uncommit the virtual
695 // memory.
696 if (InInitialChunk(c.address())) {
697 // TODO(1240712): VirtualMemory::Uncommit has a return value which
698 // is ignored here.
699 initial_chunk_->Uncommit(c.address(), c.size());
Steve Blockd0582a62009-12-15 09:54:21 +0000700 Counters::memory_allocated.Decrement(static_cast<int>(c.size()));
Steve Blocka7e24c12009-10-30 11:49:00 +0000701 } else {
Ben Murdochb0fe1622011-05-05 13:52:32 +0100702 RemoveFromAllocatedChunks(c.address(), c.size());
Steve Blocka7e24c12009-10-30 11:49:00 +0000703 LOG(DeleteEvent("PagedChunk", c.address()));
Iain Merrick9ac36c92010-09-13 15:29:50 +0100704 ObjectSpace space = static_cast<ObjectSpace>(1 << c.owner()->identity());
705 size_t size = c.size();
706 FreeRawMemory(c.address(), size, c.executable());
707 PerformAllocationCallback(space, kAllocationActionFree, size);
Steve Blocka7e24c12009-10-30 11:49:00 +0000708 }
709 c.init(NULL, 0, NULL);
710 Push(chunk_id);
711}
712
713
714Page* MemoryAllocator::FindFirstPageInSameChunk(Page* p) {
715 int chunk_id = GetChunkId(p);
716 ASSERT(IsValidChunk(chunk_id));
717
718 Address low = RoundUp(chunks_[chunk_id].address(), Page::kPageSize);
719 return Page::FromAddress(low);
720}
721
722
723Page* MemoryAllocator::FindLastPageInSameChunk(Page* p) {
724 int chunk_id = GetChunkId(p);
725 ASSERT(IsValidChunk(chunk_id));
726
727 Address chunk_start = chunks_[chunk_id].address();
728 size_t chunk_size = chunks_[chunk_id].size();
729
730 Address high = RoundDown(chunk_start + chunk_size, Page::kPageSize);
731 ASSERT(chunk_start <= p->address() && p->address() < high);
732
733 return Page::FromAddress(high - Page::kPageSize);
734}
735
736
737#ifdef DEBUG
738void MemoryAllocator::ReportStatistics() {
739 float pct = static_cast<float>(capacity_ - size_) / capacity_;
Ben Murdochf87a2032010-10-22 12:50:53 +0100740 PrintF(" capacity: %" V8_PTR_PREFIX "d"
741 ", used: %" V8_PTR_PREFIX "d"
742 ", available: %%%d\n\n",
Steve Blocka7e24c12009-10-30 11:49:00 +0000743 capacity_, size_, static_cast<int>(pct*100));
744}
745#endif
746
747
Steve Block6ded16b2010-05-10 14:33:55 +0100748void MemoryAllocator::RelinkPageListInChunkOrder(PagedSpace* space,
749 Page** first_page,
750 Page** last_page,
751 Page** last_page_in_use) {
752 Page* first = NULL;
753 Page* last = NULL;
754
755 for (int i = 0, length = chunks_.length(); i < length; i++) {
756 ChunkInfo& chunk = chunks_[i];
757
758 if (chunk.owner() == space) {
759 if (first == NULL) {
760 Address low = RoundUp(chunk.address(), Page::kPageSize);
761 first = Page::FromAddress(low);
762 }
763 last = RelinkPagesInChunk(i,
764 chunk.address(),
765 chunk.size(),
766 last,
767 last_page_in_use);
768 }
769 }
770
771 if (first_page != NULL) {
772 *first_page = first;
773 }
774
775 if (last_page != NULL) {
776 *last_page = last;
777 }
778}
779
780
781Page* MemoryAllocator::RelinkPagesInChunk(int chunk_id,
782 Address chunk_start,
783 size_t chunk_size,
784 Page* prev,
785 Page** last_page_in_use) {
786 Address page_addr = RoundUp(chunk_start, Page::kPageSize);
787 int pages_in_chunk = PagesInChunk(chunk_start, chunk_size);
788
789 if (prev->is_valid()) {
790 SetNextPage(prev, Page::FromAddress(page_addr));
791 }
792
793 for (int i = 0; i < pages_in_chunk; i++) {
794 Page* p = Page::FromAddress(page_addr);
795 p->opaque_header = OffsetFrom(page_addr + Page::kPageSize) | chunk_id;
796 page_addr += Page::kPageSize;
797
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100798 p->InvalidateWatermark(true);
Steve Block6ded16b2010-05-10 14:33:55 +0100799 if (p->WasInUseBeforeMC()) {
800 *last_page_in_use = p;
801 }
802 }
803
804 // Set the next page of the last page to 0.
805 Page* last_page = Page::FromAddress(page_addr - Page::kPageSize);
806 last_page->opaque_header = OffsetFrom(0) | chunk_id;
807
808 if (last_page->WasInUseBeforeMC()) {
809 *last_page_in_use = last_page;
810 }
811
812 return last_page;
813}
814
815
Ben Murdochb0fe1622011-05-05 13:52:32 +0100816void MemoryAllocator::AddToAllocatedChunks(Address addr, intptr_t size) {
817 ASSERT(size == kChunkSize);
818 uintptr_t int_address = reinterpret_cast<uintptr_t>(addr);
819 AddChunkUsingAddress(int_address, int_address);
820 AddChunkUsingAddress(int_address, int_address + size - 1);
821}
822
823
824void MemoryAllocator::AddChunkUsingAddress(uintptr_t chunk_start,
825 uintptr_t chunk_index_base) {
826 uintptr_t* fine_grained = AllocatedChunksFinder(
827 chunk_table_,
828 chunk_index_base,
829 kChunkSizeLog2 + (kChunkTableLevels - 1) * kChunkTableBitsPerLevel,
830 kCreateTablesAsNeeded);
831 int index = FineGrainedIndexForAddress(chunk_index_base);
832 if (fine_grained[index] != kUnusedChunkTableEntry) index++;
833 ASSERT(fine_grained[index] == kUnusedChunkTableEntry);
834 fine_grained[index] = chunk_start;
835}
836
837
838void MemoryAllocator::RemoveFromAllocatedChunks(Address addr, intptr_t size) {
839 ASSERT(size == kChunkSize);
840 uintptr_t int_address = reinterpret_cast<uintptr_t>(addr);
841 RemoveChunkFoundUsingAddress(int_address, int_address);
842 RemoveChunkFoundUsingAddress(int_address, int_address + size - 1);
843}
844
845
846void MemoryAllocator::RemoveChunkFoundUsingAddress(
847 uintptr_t chunk_start,
848 uintptr_t chunk_index_base) {
849 uintptr_t* fine_grained = AllocatedChunksFinder(
850 chunk_table_,
851 chunk_index_base,
852 kChunkSizeLog2 + (kChunkTableLevels - 1) * kChunkTableBitsPerLevel,
853 kDontCreateTables);
854 // Can't remove an entry that's not there.
855 ASSERT(fine_grained != kUnusedChunkTableEntry);
856 int index = FineGrainedIndexForAddress(chunk_index_base);
857 ASSERT(fine_grained[index] != kUnusedChunkTableEntry);
858 if (fine_grained[index] != chunk_start) {
859 index++;
860 ASSERT(fine_grained[index] == chunk_start);
861 fine_grained[index] = kUnusedChunkTableEntry;
862 } else {
863 // If only one of the entries is used it must be the first, since
864 // InAllocatedChunks relies on that. Move things around so that this is
865 // the case.
866 fine_grained[index] = fine_grained[index + 1];
867 fine_grained[index + 1] = kUnusedChunkTableEntry;
868 }
869}
870
871
872bool MemoryAllocator::InAllocatedChunks(Address addr) {
873 uintptr_t int_address = reinterpret_cast<uintptr_t>(addr);
874 uintptr_t* fine_grained = AllocatedChunksFinder(
875 chunk_table_,
876 int_address,
877 kChunkSizeLog2 + (kChunkTableLevels - 1) * kChunkTableBitsPerLevel,
878 kDontCreateTables);
879 if (fine_grained == NULL) return false;
880 int index = FineGrainedIndexForAddress(int_address);
881 if (fine_grained[index] == kUnusedChunkTableEntry) return false;
882 uintptr_t entry = fine_grained[index];
883 if (entry <= int_address && entry + kChunkSize > int_address) return true;
884 index++;
885 if (fine_grained[index] == kUnusedChunkTableEntry) return false;
886 entry = fine_grained[index];
887 if (entry <= int_address && entry + kChunkSize > int_address) return true;
888 return false;
889}
890
891
892uintptr_t* MemoryAllocator::AllocatedChunksFinder(
893 uintptr_t* table,
894 uintptr_t address,
895 int bit_position,
896 CreateTables create_as_needed) {
897 if (bit_position == kChunkSizeLog2) {
898 return table;
899 }
900 ASSERT(bit_position >= kChunkSizeLog2 + kChunkTableBitsPerLevel);
901 int index =
902 ((address >> bit_position) &
903 ((V8_INTPTR_C(1) << kChunkTableBitsPerLevel) - 1));
904 uintptr_t more_fine_grained_address =
905 address & ((V8_INTPTR_C(1) << bit_position) - 1);
906 ASSERT((table == chunk_table_ && index < kChunkTableTopLevelEntries) ||
907 (table != chunk_table_ && index < 1 << kChunkTableBitsPerLevel));
908 uintptr_t* more_fine_grained_table =
909 reinterpret_cast<uintptr_t*>(table[index]);
910 if (more_fine_grained_table == kUnusedChunkTableEntry) {
911 if (create_as_needed == kDontCreateTables) return NULL;
912 int words_needed = 1 << kChunkTableBitsPerLevel;
913 if (bit_position == kChunkTableBitsPerLevel + kChunkSizeLog2) {
914 words_needed =
915 (1 << kChunkTableBitsPerLevel) * kChunkTableFineGrainedWordsPerEntry;
916 }
917 more_fine_grained_table = new uintptr_t[words_needed];
918 for (int i = 0; i < words_needed; i++) {
919 more_fine_grained_table[i] = kUnusedChunkTableEntry;
920 }
921 table[index] = reinterpret_cast<uintptr_t>(more_fine_grained_table);
922 }
923 return AllocatedChunksFinder(
924 more_fine_grained_table,
925 more_fine_grained_address,
926 bit_position - kChunkTableBitsPerLevel,
927 create_as_needed);
928}
929
930
931uintptr_t MemoryAllocator::chunk_table_[kChunkTableTopLevelEntries];
932
Steve Block6ded16b2010-05-10 14:33:55 +0100933
Steve Blocka7e24c12009-10-30 11:49:00 +0000934// -----------------------------------------------------------------------------
935// PagedSpace implementation
936
Ben Murdochf87a2032010-10-22 12:50:53 +0100937PagedSpace::PagedSpace(intptr_t max_capacity,
Steve Blocka7e24c12009-10-30 11:49:00 +0000938 AllocationSpace id,
939 Executability executable)
940 : Space(id, executable) {
941 max_capacity_ = (RoundDown(max_capacity, Page::kPageSize) / Page::kPageSize)
942 * Page::kObjectAreaSize;
943 accounting_stats_.Clear();
944
945 allocation_info_.top = NULL;
946 allocation_info_.limit = NULL;
947
948 mc_forwarding_info_.top = NULL;
949 mc_forwarding_info_.limit = NULL;
950}
951
952
953bool PagedSpace::Setup(Address start, size_t size) {
954 if (HasBeenSetup()) return false;
955
956 int num_pages = 0;
957 // Try to use the virtual memory range passed to us. If it is too small to
958 // contain at least one page, ignore it and allocate instead.
959 int pages_in_chunk = PagesInChunk(start, size);
960 if (pages_in_chunk > 0) {
961 first_page_ = MemoryAllocator::CommitPages(RoundUp(start, Page::kPageSize),
962 Page::kPageSize * pages_in_chunk,
963 this, &num_pages);
964 } else {
Ben Murdochf87a2032010-10-22 12:50:53 +0100965 int requested_pages =
966 Min(MemoryAllocator::kPagesPerChunk,
967 static_cast<int>(max_capacity_ / Page::kObjectAreaSize));
Steve Blocka7e24c12009-10-30 11:49:00 +0000968 first_page_ =
969 MemoryAllocator::AllocatePages(requested_pages, &num_pages, this);
970 if (!first_page_->is_valid()) return false;
971 }
972
973 // We are sure that the first page is valid and that we have at least one
974 // page.
975 ASSERT(first_page_->is_valid());
976 ASSERT(num_pages > 0);
977 accounting_stats_.ExpandSpace(num_pages * Page::kObjectAreaSize);
978 ASSERT(Capacity() <= max_capacity_);
979
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100980 // Sequentially clear region marks in the newly allocated
Steve Blocka7e24c12009-10-30 11:49:00 +0000981 // pages and cache the current last page in the space.
982 for (Page* p = first_page_; p->is_valid(); p = p->next_page()) {
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100983 p->SetRegionMarks(Page::kAllRegionsCleanMarks);
Steve Blocka7e24c12009-10-30 11:49:00 +0000984 last_page_ = p;
985 }
986
987 // Use first_page_ for allocation.
988 SetAllocationInfo(&allocation_info_, first_page_);
989
Steve Block6ded16b2010-05-10 14:33:55 +0100990 page_list_is_chunk_ordered_ = true;
991
Steve Blocka7e24c12009-10-30 11:49:00 +0000992 return true;
993}
994
995
996bool PagedSpace::HasBeenSetup() {
997 return (Capacity() > 0);
998}
999
1000
1001void PagedSpace::TearDown() {
Steve Block6ded16b2010-05-10 14:33:55 +01001002 MemoryAllocator::FreeAllPages(this);
1003 first_page_ = NULL;
Steve Blocka7e24c12009-10-30 11:49:00 +00001004 accounting_stats_.Clear();
1005}
1006
1007
1008#ifdef ENABLE_HEAP_PROTECTION
1009
1010void PagedSpace::Protect() {
1011 Page* page = first_page_;
1012 while (page->is_valid()) {
1013 MemoryAllocator::ProtectChunkFromPage(page);
1014 page = MemoryAllocator::FindLastPageInSameChunk(page)->next_page();
1015 }
1016}
1017
1018
1019void PagedSpace::Unprotect() {
1020 Page* page = first_page_;
1021 while (page->is_valid()) {
1022 MemoryAllocator::UnprotectChunkFromPage(page);
1023 page = MemoryAllocator::FindLastPageInSameChunk(page)->next_page();
1024 }
1025}
1026
1027#endif
1028
1029
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01001030void PagedSpace::MarkAllPagesClean() {
Steve Blocka7e24c12009-10-30 11:49:00 +00001031 PageIterator it(this, PageIterator::ALL_PAGES);
1032 while (it.has_next()) {
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01001033 it.next()->SetRegionMarks(Page::kAllRegionsCleanMarks);
Steve Blocka7e24c12009-10-30 11:49:00 +00001034 }
1035}
1036
1037
John Reck59135872010-11-02 12:39:01 -07001038MaybeObject* PagedSpace::FindObject(Address addr) {
Steve Blocka7e24c12009-10-30 11:49:00 +00001039 // Note: this function can only be called before or after mark-compact GC
1040 // because it accesses map pointers.
1041 ASSERT(!MarkCompactCollector::in_use());
1042
1043 if (!Contains(addr)) return Failure::Exception();
1044
1045 Page* p = Page::FromAddress(addr);
1046 ASSERT(IsUsed(p));
1047 Address cur = p->ObjectAreaStart();
1048 Address end = p->AllocationTop();
1049 while (cur < end) {
1050 HeapObject* obj = HeapObject::FromAddress(cur);
1051 Address next = cur + obj->Size();
1052 if ((cur <= addr) && (addr < next)) return obj;
1053 cur = next;
1054 }
1055
1056 UNREACHABLE();
1057 return Failure::Exception();
1058}
1059
1060
1061bool PagedSpace::IsUsed(Page* page) {
1062 PageIterator it(this, PageIterator::PAGES_IN_USE);
1063 while (it.has_next()) {
1064 if (page == it.next()) return true;
1065 }
1066 return false;
1067}
1068
1069
1070void PagedSpace::SetAllocationInfo(AllocationInfo* alloc_info, Page* p) {
1071 alloc_info->top = p->ObjectAreaStart();
1072 alloc_info->limit = p->ObjectAreaEnd();
1073 ASSERT(alloc_info->VerifyPagedAllocation());
1074}
1075
1076
1077void PagedSpace::MCResetRelocationInfo() {
1078 // Set page indexes.
1079 int i = 0;
1080 PageIterator it(this, PageIterator::ALL_PAGES);
1081 while (it.has_next()) {
1082 Page* p = it.next();
1083 p->mc_page_index = i++;
1084 }
1085
1086 // Set mc_forwarding_info_ to the first page in the space.
1087 SetAllocationInfo(&mc_forwarding_info_, first_page_);
1088 // All the bytes in the space are 'available'. We will rediscover
1089 // allocated and wasted bytes during GC.
1090 accounting_stats_.Reset();
1091}
1092
1093
1094int PagedSpace::MCSpaceOffsetForAddress(Address addr) {
1095#ifdef DEBUG
1096 // The Contains function considers the address at the beginning of a
1097 // page in the page, MCSpaceOffsetForAddress considers it is in the
1098 // previous page.
1099 if (Page::IsAlignedToPageSize(addr)) {
1100 ASSERT(Contains(addr - kPointerSize));
1101 } else {
1102 ASSERT(Contains(addr));
1103 }
1104#endif
1105
1106 // If addr is at the end of a page, it belongs to previous page
1107 Page* p = Page::IsAlignedToPageSize(addr)
1108 ? Page::FromAllocationTop(addr)
1109 : Page::FromAddress(addr);
1110 int index = p->mc_page_index;
1111 return (index * Page::kPageSize) + p->Offset(addr);
1112}
1113
1114
1115// Slow case for reallocating and promoting objects during a compacting
1116// collection. This function is not space-specific.
1117HeapObject* PagedSpace::SlowMCAllocateRaw(int size_in_bytes) {
1118 Page* current_page = TopPageOf(mc_forwarding_info_);
1119 if (!current_page->next_page()->is_valid()) {
1120 if (!Expand(current_page)) {
1121 return NULL;
1122 }
1123 }
1124
1125 // There are surely more pages in the space now.
1126 ASSERT(current_page->next_page()->is_valid());
1127 // We do not add the top of page block for current page to the space's
1128 // free list---the block may contain live objects so we cannot write
1129 // bookkeeping information to it. Instead, we will recover top of page
1130 // blocks when we move objects to their new locations.
1131 //
1132 // We do however write the allocation pointer to the page. The encoding
1133 // of forwarding addresses is as an offset in terms of live bytes, so we
1134 // need quick access to the allocation top of each page to decode
1135 // forwarding addresses.
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01001136 current_page->SetAllocationWatermark(mc_forwarding_info_.top);
1137 current_page->next_page()->InvalidateWatermark(true);
Steve Blocka7e24c12009-10-30 11:49:00 +00001138 SetAllocationInfo(&mc_forwarding_info_, current_page->next_page());
1139 return AllocateLinearly(&mc_forwarding_info_, size_in_bytes);
1140}
1141
1142
1143bool PagedSpace::Expand(Page* last_page) {
1144 ASSERT(max_capacity_ % Page::kObjectAreaSize == 0);
1145 ASSERT(Capacity() % Page::kObjectAreaSize == 0);
1146
1147 if (Capacity() == max_capacity_) return false;
1148
1149 ASSERT(Capacity() < max_capacity_);
1150 // Last page must be valid and its next page is invalid.
1151 ASSERT(last_page->is_valid() && !last_page->next_page()->is_valid());
1152
Ben Murdochf87a2032010-10-22 12:50:53 +01001153 int available_pages =
1154 static_cast<int>((max_capacity_ - Capacity()) / Page::kObjectAreaSize);
Ben Murdochb0fe1622011-05-05 13:52:32 +01001155 // We don't want to have to handle small chunks near the end so if there are
1156 // not kPagesPerChunk pages available without exceeding the max capacity then
1157 // act as if memory has run out.
1158 if (available_pages < MemoryAllocator::kPagesPerChunk) return false;
Steve Blocka7e24c12009-10-30 11:49:00 +00001159
1160 int desired_pages = Min(available_pages, MemoryAllocator::kPagesPerChunk);
1161 Page* p = MemoryAllocator::AllocatePages(desired_pages, &desired_pages, this);
1162 if (!p->is_valid()) return false;
1163
1164 accounting_stats_.ExpandSpace(desired_pages * Page::kObjectAreaSize);
1165 ASSERT(Capacity() <= max_capacity_);
1166
1167 MemoryAllocator::SetNextPage(last_page, p);
1168
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01001169 // Sequentially clear region marks of new pages and and cache the
Steve Blocka7e24c12009-10-30 11:49:00 +00001170 // new last page in the space.
1171 while (p->is_valid()) {
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01001172 p->SetRegionMarks(Page::kAllRegionsCleanMarks);
Steve Blocka7e24c12009-10-30 11:49:00 +00001173 last_page_ = p;
1174 p = p->next_page();
1175 }
1176
1177 return true;
1178}
1179
1180
1181#ifdef DEBUG
1182int PagedSpace::CountTotalPages() {
1183 int count = 0;
1184 for (Page* p = first_page_; p->is_valid(); p = p->next_page()) {
1185 count++;
1186 }
1187 return count;
1188}
1189#endif
1190
1191
1192void PagedSpace::Shrink() {
Steve Block6ded16b2010-05-10 14:33:55 +01001193 if (!page_list_is_chunk_ordered_) {
1194 // We can't shrink space if pages is not chunk-ordered
1195 // (see comment for class MemoryAllocator for definition).
1196 return;
1197 }
1198
Steve Blocka7e24c12009-10-30 11:49:00 +00001199 // Release half of free pages.
1200 Page* top_page = AllocationTopPage();
1201 ASSERT(top_page->is_valid());
1202
1203 // Count the number of pages we would like to free.
1204 int pages_to_free = 0;
1205 for (Page* p = top_page->next_page(); p->is_valid(); p = p->next_page()) {
1206 pages_to_free++;
1207 }
1208
1209 // Free pages after top_page.
1210 Page* p = MemoryAllocator::FreePages(top_page->next_page());
1211 MemoryAllocator::SetNextPage(top_page, p);
1212
1213 // Find out how many pages we failed to free and update last_page_.
1214 // Please note pages can only be freed in whole chunks.
1215 last_page_ = top_page;
1216 for (Page* p = top_page->next_page(); p->is_valid(); p = p->next_page()) {
1217 pages_to_free--;
1218 last_page_ = p;
1219 }
1220
1221 accounting_stats_.ShrinkSpace(pages_to_free * Page::kObjectAreaSize);
1222 ASSERT(Capacity() == CountTotalPages() * Page::kObjectAreaSize);
1223}
1224
1225
1226bool PagedSpace::EnsureCapacity(int capacity) {
1227 if (Capacity() >= capacity) return true;
1228
1229 // Start from the allocation top and loop to the last page in the space.
1230 Page* last_page = AllocationTopPage();
1231 Page* next_page = last_page->next_page();
1232 while (next_page->is_valid()) {
1233 last_page = MemoryAllocator::FindLastPageInSameChunk(next_page);
1234 next_page = last_page->next_page();
1235 }
1236
1237 // Expand the space until it has the required capacity or expansion fails.
1238 do {
1239 if (!Expand(last_page)) return false;
1240 ASSERT(last_page->next_page()->is_valid());
1241 last_page =
1242 MemoryAllocator::FindLastPageInSameChunk(last_page->next_page());
1243 } while (Capacity() < capacity);
1244
1245 return true;
1246}
1247
1248
1249#ifdef DEBUG
1250void PagedSpace::Print() { }
1251#endif
1252
1253
1254#ifdef DEBUG
1255// We do not assume that the PageIterator works, because it depends on the
1256// invariants we are checking during verification.
1257void PagedSpace::Verify(ObjectVisitor* visitor) {
1258 // The allocation pointer should be valid, and it should be in a page in the
1259 // space.
1260 ASSERT(allocation_info_.VerifyPagedAllocation());
1261 Page* top_page = Page::FromAllocationTop(allocation_info_.top);
1262 ASSERT(MemoryAllocator::IsPageInSpace(top_page, this));
1263
1264 // Loop over all the pages.
1265 bool above_allocation_top = false;
1266 Page* current_page = first_page_;
1267 while (current_page->is_valid()) {
1268 if (above_allocation_top) {
1269 // We don't care what's above the allocation top.
1270 } else {
Steve Blocka7e24c12009-10-30 11:49:00 +00001271 Address top = current_page->AllocationTop();
1272 if (current_page == top_page) {
1273 ASSERT(top == allocation_info_.top);
1274 // The next page will be above the allocation top.
1275 above_allocation_top = true;
Steve Blocka7e24c12009-10-30 11:49:00 +00001276 }
1277
1278 // It should be packed with objects from the bottom to the top.
1279 Address current = current_page->ObjectAreaStart();
1280 while (current < top) {
1281 HeapObject* object = HeapObject::FromAddress(current);
1282
1283 // The first word should be a map, and we expect all map pointers to
1284 // be in map space.
1285 Map* map = object->map();
1286 ASSERT(map->IsMap());
1287 ASSERT(Heap::map_space()->Contains(map));
1288
1289 // Perform space-specific object verification.
1290 VerifyObject(object);
1291
1292 // The object itself should look OK.
1293 object->Verify();
1294
1295 // All the interior pointers should be contained in the heap and
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01001296 // have page regions covering intergenerational references should be
1297 // marked dirty.
Steve Blocka7e24c12009-10-30 11:49:00 +00001298 int size = object->Size();
1299 object->IterateBody(map->instance_type(), size, visitor);
1300
1301 current += size;
1302 }
1303
1304 // The allocation pointer should not be in the middle of an object.
1305 ASSERT(current == top);
1306 }
1307
1308 current_page = current_page->next_page();
1309 }
1310}
1311#endif
1312
1313
1314// -----------------------------------------------------------------------------
1315// NewSpace implementation
1316
1317
1318bool NewSpace::Setup(Address start, int size) {
1319 // Setup new space based on the preallocated memory block defined by
1320 // start and size. The provided space is divided into two semi-spaces.
1321 // To support fast containment testing in the new space, the size of
1322 // this chunk must be a power of two and it must be aligned to its size.
1323 int initial_semispace_capacity = Heap::InitialSemiSpaceSize();
Steve Block3ce2e202009-11-05 08:53:23 +00001324 int maximum_semispace_capacity = Heap::MaxSemiSpaceSize();
Steve Blocka7e24c12009-10-30 11:49:00 +00001325
1326 ASSERT(initial_semispace_capacity <= maximum_semispace_capacity);
1327 ASSERT(IsPowerOf2(maximum_semispace_capacity));
1328
1329 // Allocate and setup the histogram arrays if necessary.
1330#if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
1331 allocated_histogram_ = NewArray<HistogramInfo>(LAST_TYPE + 1);
1332 promoted_histogram_ = NewArray<HistogramInfo>(LAST_TYPE + 1);
1333
1334#define SET_NAME(name) allocated_histogram_[name].set_name(#name); \
1335 promoted_histogram_[name].set_name(#name);
1336 INSTANCE_TYPE_LIST(SET_NAME)
1337#undef SET_NAME
1338#endif
1339
Steve Block3ce2e202009-11-05 08:53:23 +00001340 ASSERT(size == 2 * Heap::ReservedSemiSpaceSize());
Steve Blocka7e24c12009-10-30 11:49:00 +00001341 ASSERT(IsAddressAligned(start, size, 0));
1342
1343 if (!to_space_.Setup(start,
1344 initial_semispace_capacity,
1345 maximum_semispace_capacity)) {
1346 return false;
1347 }
1348 if (!from_space_.Setup(start + maximum_semispace_capacity,
1349 initial_semispace_capacity,
1350 maximum_semispace_capacity)) {
1351 return false;
1352 }
1353
1354 start_ = start;
1355 address_mask_ = ~(size - 1);
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01001356 object_mask_ = address_mask_ | kHeapObjectTagMask;
Steve Blocka7e24c12009-10-30 11:49:00 +00001357 object_expected_ = reinterpret_cast<uintptr_t>(start) | kHeapObjectTag;
1358
1359 allocation_info_.top = to_space_.low();
1360 allocation_info_.limit = to_space_.high();
1361 mc_forwarding_info_.top = NULL;
1362 mc_forwarding_info_.limit = NULL;
1363
1364 ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
1365 return true;
1366}
1367
1368
1369void NewSpace::TearDown() {
1370#if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
1371 if (allocated_histogram_) {
1372 DeleteArray(allocated_histogram_);
1373 allocated_histogram_ = NULL;
1374 }
1375 if (promoted_histogram_) {
1376 DeleteArray(promoted_histogram_);
1377 promoted_histogram_ = NULL;
1378 }
1379#endif
1380
1381 start_ = NULL;
1382 allocation_info_.top = NULL;
1383 allocation_info_.limit = NULL;
1384 mc_forwarding_info_.top = NULL;
1385 mc_forwarding_info_.limit = NULL;
1386
1387 to_space_.TearDown();
1388 from_space_.TearDown();
1389}
1390
1391
1392#ifdef ENABLE_HEAP_PROTECTION
1393
1394void NewSpace::Protect() {
1395 MemoryAllocator::Protect(ToSpaceLow(), Capacity());
1396 MemoryAllocator::Protect(FromSpaceLow(), Capacity());
1397}
1398
1399
1400void NewSpace::Unprotect() {
1401 MemoryAllocator::Unprotect(ToSpaceLow(), Capacity(),
1402 to_space_.executable());
1403 MemoryAllocator::Unprotect(FromSpaceLow(), Capacity(),
1404 from_space_.executable());
1405}
1406
1407#endif
1408
1409
1410void NewSpace::Flip() {
1411 SemiSpace tmp = from_space_;
1412 from_space_ = to_space_;
1413 to_space_ = tmp;
1414}
1415
1416
1417void NewSpace::Grow() {
1418 ASSERT(Capacity() < MaximumCapacity());
1419 if (to_space_.Grow()) {
1420 // Only grow from space if we managed to grow to space.
1421 if (!from_space_.Grow()) {
1422 // If we managed to grow to space but couldn't grow from space,
1423 // attempt to shrink to space.
1424 if (!to_space_.ShrinkTo(from_space_.Capacity())) {
1425 // We are in an inconsistent state because we could not
1426 // commit/uncommit memory from new space.
1427 V8::FatalProcessOutOfMemory("Failed to grow new space.");
1428 }
1429 }
1430 }
1431 allocation_info_.limit = to_space_.high();
1432 ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
1433}
1434
1435
1436void NewSpace::Shrink() {
Ben Murdochf87a2032010-10-22 12:50:53 +01001437 int new_capacity = Max(InitialCapacity(), 2 * SizeAsInt());
Steve Blockd0582a62009-12-15 09:54:21 +00001438 int rounded_new_capacity =
1439 RoundUp(new_capacity, static_cast<int>(OS::AllocateAlignment()));
Steve Blocka7e24c12009-10-30 11:49:00 +00001440 if (rounded_new_capacity < Capacity() &&
1441 to_space_.ShrinkTo(rounded_new_capacity)) {
1442 // Only shrink from space if we managed to shrink to space.
1443 if (!from_space_.ShrinkTo(rounded_new_capacity)) {
1444 // If we managed to shrink to space but couldn't shrink from
1445 // space, attempt to grow to space again.
1446 if (!to_space_.GrowTo(from_space_.Capacity())) {
1447 // We are in an inconsistent state because we could not
1448 // commit/uncommit memory from new space.
1449 V8::FatalProcessOutOfMemory("Failed to shrink new space.");
1450 }
1451 }
1452 }
1453 allocation_info_.limit = to_space_.high();
1454 ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
1455}
1456
1457
1458void NewSpace::ResetAllocationInfo() {
1459 allocation_info_.top = to_space_.low();
1460 allocation_info_.limit = to_space_.high();
1461 ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
1462}
1463
1464
1465void NewSpace::MCResetRelocationInfo() {
1466 mc_forwarding_info_.top = from_space_.low();
1467 mc_forwarding_info_.limit = from_space_.high();
1468 ASSERT_SEMISPACE_ALLOCATION_INFO(mc_forwarding_info_, from_space_);
1469}
1470
1471
1472void NewSpace::MCCommitRelocationInfo() {
1473 // Assumes that the spaces have been flipped so that mc_forwarding_info_ is
1474 // valid allocation info for the to space.
1475 allocation_info_.top = mc_forwarding_info_.top;
1476 allocation_info_.limit = to_space_.high();
1477 ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
1478}
1479
1480
1481#ifdef DEBUG
1482// We do not use the SemispaceIterator because verification doesn't assume
1483// that it works (it depends on the invariants we are checking).
1484void NewSpace::Verify() {
1485 // The allocation pointer should be in the space or at the very end.
1486 ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
1487
1488 // There should be objects packed in from the low address up to the
1489 // allocation pointer.
1490 Address current = to_space_.low();
1491 while (current < top()) {
1492 HeapObject* object = HeapObject::FromAddress(current);
1493
1494 // The first word should be a map, and we expect all map pointers to
1495 // be in map space.
1496 Map* map = object->map();
1497 ASSERT(map->IsMap());
1498 ASSERT(Heap::map_space()->Contains(map));
1499
1500 // The object should not be code or a map.
1501 ASSERT(!object->IsMap());
1502 ASSERT(!object->IsCode());
1503
1504 // The object itself should look OK.
1505 object->Verify();
1506
1507 // All the interior pointers should be contained in the heap.
1508 VerifyPointersVisitor visitor;
1509 int size = object->Size();
1510 object->IterateBody(map->instance_type(), size, &visitor);
1511
1512 current += size;
1513 }
1514
1515 // The allocation pointer should not be in the middle of an object.
1516 ASSERT(current == top());
1517}
1518#endif
1519
1520
1521bool SemiSpace::Commit() {
1522 ASSERT(!is_committed());
1523 if (!MemoryAllocator::CommitBlock(start_, capacity_, executable())) {
1524 return false;
1525 }
1526 committed_ = true;
1527 return true;
1528}
1529
1530
1531bool SemiSpace::Uncommit() {
1532 ASSERT(is_committed());
1533 if (!MemoryAllocator::UncommitBlock(start_, capacity_)) {
1534 return false;
1535 }
1536 committed_ = false;
1537 return true;
1538}
1539
1540
1541// -----------------------------------------------------------------------------
1542// SemiSpace implementation
1543
1544bool SemiSpace::Setup(Address start,
1545 int initial_capacity,
1546 int maximum_capacity) {
1547 // Creates a space in the young generation. The constructor does not
1548 // allocate memory from the OS. A SemiSpace is given a contiguous chunk of
1549 // memory of size 'capacity' when set up, and does not grow or shrink
1550 // otherwise. In the mark-compact collector, the memory region of the from
1551 // space is used as the marking stack. It requires contiguous memory
1552 // addresses.
1553 initial_capacity_ = initial_capacity;
1554 capacity_ = initial_capacity;
1555 maximum_capacity_ = maximum_capacity;
1556 committed_ = false;
1557
1558 start_ = start;
1559 address_mask_ = ~(maximum_capacity - 1);
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01001560 object_mask_ = address_mask_ | kHeapObjectTagMask;
Steve Blocka7e24c12009-10-30 11:49:00 +00001561 object_expected_ = reinterpret_cast<uintptr_t>(start) | kHeapObjectTag;
1562 age_mark_ = start_;
1563
1564 return Commit();
1565}
1566
1567
1568void SemiSpace::TearDown() {
1569 start_ = NULL;
1570 capacity_ = 0;
1571}
1572
1573
1574bool SemiSpace::Grow() {
1575 // Double the semispace size but only up to maximum capacity.
1576 int maximum_extra = maximum_capacity_ - capacity_;
Steve Blockd0582a62009-12-15 09:54:21 +00001577 int extra = Min(RoundUp(capacity_, static_cast<int>(OS::AllocateAlignment())),
Steve Blocka7e24c12009-10-30 11:49:00 +00001578 maximum_extra);
1579 if (!MemoryAllocator::CommitBlock(high(), extra, executable())) {
1580 return false;
1581 }
1582 capacity_ += extra;
1583 return true;
1584}
1585
1586
1587bool SemiSpace::GrowTo(int new_capacity) {
1588 ASSERT(new_capacity <= maximum_capacity_);
1589 ASSERT(new_capacity > capacity_);
1590 size_t delta = new_capacity - capacity_;
1591 ASSERT(IsAligned(delta, OS::AllocateAlignment()));
1592 if (!MemoryAllocator::CommitBlock(high(), delta, executable())) {
1593 return false;
1594 }
1595 capacity_ = new_capacity;
1596 return true;
1597}
1598
1599
1600bool SemiSpace::ShrinkTo(int new_capacity) {
1601 ASSERT(new_capacity >= initial_capacity_);
1602 ASSERT(new_capacity < capacity_);
1603 size_t delta = capacity_ - new_capacity;
1604 ASSERT(IsAligned(delta, OS::AllocateAlignment()));
1605 if (!MemoryAllocator::UncommitBlock(high() - delta, delta)) {
1606 return false;
1607 }
1608 capacity_ = new_capacity;
1609 return true;
1610}
1611
1612
1613#ifdef DEBUG
1614void SemiSpace::Print() { }
1615
1616
1617void SemiSpace::Verify() { }
1618#endif
1619
1620
1621// -----------------------------------------------------------------------------
1622// SemiSpaceIterator implementation.
1623SemiSpaceIterator::SemiSpaceIterator(NewSpace* space) {
1624 Initialize(space, space->bottom(), space->top(), NULL);
1625}
1626
1627
1628SemiSpaceIterator::SemiSpaceIterator(NewSpace* space,
1629 HeapObjectCallback size_func) {
1630 Initialize(space, space->bottom(), space->top(), size_func);
1631}
1632
1633
1634SemiSpaceIterator::SemiSpaceIterator(NewSpace* space, Address start) {
1635 Initialize(space, start, space->top(), NULL);
1636}
1637
1638
1639void SemiSpaceIterator::Initialize(NewSpace* space, Address start,
1640 Address end,
1641 HeapObjectCallback size_func) {
1642 ASSERT(space->ToSpaceContains(start));
1643 ASSERT(space->ToSpaceLow() <= end
1644 && end <= space->ToSpaceHigh());
1645 space_ = &space->to_space_;
1646 current_ = start;
1647 limit_ = end;
1648 size_func_ = size_func;
1649}
1650
1651
1652#ifdef DEBUG
1653// A static array of histogram info for each type.
1654static HistogramInfo heap_histograms[LAST_TYPE+1];
1655static JSObject::SpillInformation js_spill_information;
1656
1657// heap_histograms is shared, always clear it before using it.
1658static void ClearHistograms() {
1659 // We reset the name each time, though it hasn't changed.
1660#define DEF_TYPE_NAME(name) heap_histograms[name].set_name(#name);
1661 INSTANCE_TYPE_LIST(DEF_TYPE_NAME)
1662#undef DEF_TYPE_NAME
1663
1664#define CLEAR_HISTOGRAM(name) heap_histograms[name].clear();
1665 INSTANCE_TYPE_LIST(CLEAR_HISTOGRAM)
1666#undef CLEAR_HISTOGRAM
1667
1668 js_spill_information.Clear();
1669}
1670
1671
1672static int code_kind_statistics[Code::NUMBER_OF_KINDS];
1673
1674
1675static void ClearCodeKindStatistics() {
1676 for (int i = 0; i < Code::NUMBER_OF_KINDS; i++) {
1677 code_kind_statistics[i] = 0;
1678 }
1679}
1680
1681
1682static void ReportCodeKindStatistics() {
Steve Block6ded16b2010-05-10 14:33:55 +01001683 const char* table[Code::NUMBER_OF_KINDS] = { NULL };
Steve Blocka7e24c12009-10-30 11:49:00 +00001684
1685#define CASE(name) \
1686 case Code::name: table[Code::name] = #name; \
1687 break
1688
1689 for (int i = 0; i < Code::NUMBER_OF_KINDS; i++) {
1690 switch (static_cast<Code::Kind>(i)) {
1691 CASE(FUNCTION);
Ben Murdochb0fe1622011-05-05 13:52:32 +01001692 CASE(OPTIMIZED_FUNCTION);
Steve Blocka7e24c12009-10-30 11:49:00 +00001693 CASE(STUB);
1694 CASE(BUILTIN);
1695 CASE(LOAD_IC);
1696 CASE(KEYED_LOAD_IC);
1697 CASE(STORE_IC);
1698 CASE(KEYED_STORE_IC);
1699 CASE(CALL_IC);
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01001700 CASE(KEYED_CALL_IC);
Steve Block6ded16b2010-05-10 14:33:55 +01001701 CASE(BINARY_OP_IC);
Ben Murdochb0fe1622011-05-05 13:52:32 +01001702 CASE(TYPE_RECORDING_BINARY_OP_IC);
1703 CASE(COMPARE_IC);
Steve Blocka7e24c12009-10-30 11:49:00 +00001704 }
1705 }
1706
1707#undef CASE
1708
1709 PrintF("\n Code kind histograms: \n");
1710 for (int i = 0; i < Code::NUMBER_OF_KINDS; i++) {
1711 if (code_kind_statistics[i] > 0) {
1712 PrintF(" %-20s: %10d bytes\n", table[i], code_kind_statistics[i]);
1713 }
1714 }
1715 PrintF("\n");
1716}
1717
1718
1719static int CollectHistogramInfo(HeapObject* obj) {
1720 InstanceType type = obj->map()->instance_type();
1721 ASSERT(0 <= type && type <= LAST_TYPE);
1722 ASSERT(heap_histograms[type].name() != NULL);
1723 heap_histograms[type].increment_number(1);
1724 heap_histograms[type].increment_bytes(obj->Size());
1725
1726 if (FLAG_collect_heap_spill_statistics && obj->IsJSObject()) {
1727 JSObject::cast(obj)->IncrementSpillStatistics(&js_spill_information);
1728 }
1729
1730 return obj->Size();
1731}
1732
1733
1734static void ReportHistogram(bool print_spill) {
1735 PrintF("\n Object Histogram:\n");
1736 for (int i = 0; i <= LAST_TYPE; i++) {
1737 if (heap_histograms[i].number() > 0) {
Steve Block6ded16b2010-05-10 14:33:55 +01001738 PrintF(" %-34s%10d (%10d bytes)\n",
Steve Blocka7e24c12009-10-30 11:49:00 +00001739 heap_histograms[i].name(),
1740 heap_histograms[i].number(),
1741 heap_histograms[i].bytes());
1742 }
1743 }
1744 PrintF("\n");
1745
1746 // Summarize string types.
1747 int string_number = 0;
1748 int string_bytes = 0;
1749#define INCREMENT(type, size, name, camel_name) \
1750 string_number += heap_histograms[type].number(); \
1751 string_bytes += heap_histograms[type].bytes();
1752 STRING_TYPE_LIST(INCREMENT)
1753#undef INCREMENT
1754 if (string_number > 0) {
Steve Block6ded16b2010-05-10 14:33:55 +01001755 PrintF(" %-34s%10d (%10d bytes)\n\n", "STRING_TYPE", string_number,
Steve Blocka7e24c12009-10-30 11:49:00 +00001756 string_bytes);
1757 }
1758
1759 if (FLAG_collect_heap_spill_statistics && print_spill) {
1760 js_spill_information.Print();
1761 }
1762}
1763#endif // DEBUG
1764
1765
1766// Support for statistics gathering for --heap-stats and --log-gc.
1767#if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
1768void NewSpace::ClearHistograms() {
1769 for (int i = 0; i <= LAST_TYPE; i++) {
1770 allocated_histogram_[i].clear();
1771 promoted_histogram_[i].clear();
1772 }
1773}
1774
1775// Because the copying collector does not touch garbage objects, we iterate
1776// the new space before a collection to get a histogram of allocated objects.
1777// This only happens (1) when compiled with DEBUG and the --heap-stats flag is
1778// set, or when compiled with ENABLE_LOGGING_AND_PROFILING and the --log-gc
1779// flag is set.
1780void NewSpace::CollectStatistics() {
1781 ClearHistograms();
1782 SemiSpaceIterator it(this);
Leon Clarked91b9f72010-01-27 17:25:45 +00001783 for (HeapObject* obj = it.next(); obj != NULL; obj = it.next())
1784 RecordAllocation(obj);
Steve Blocka7e24c12009-10-30 11:49:00 +00001785}
1786
1787
1788#ifdef ENABLE_LOGGING_AND_PROFILING
1789static void DoReportStatistics(HistogramInfo* info, const char* description) {
1790 LOG(HeapSampleBeginEvent("NewSpace", description));
1791 // Lump all the string types together.
1792 int string_number = 0;
1793 int string_bytes = 0;
1794#define INCREMENT(type, size, name, camel_name) \
1795 string_number += info[type].number(); \
1796 string_bytes += info[type].bytes();
1797 STRING_TYPE_LIST(INCREMENT)
1798#undef INCREMENT
1799 if (string_number > 0) {
1800 LOG(HeapSampleItemEvent("STRING_TYPE", string_number, string_bytes));
1801 }
1802
1803 // Then do the other types.
1804 for (int i = FIRST_NONSTRING_TYPE; i <= LAST_TYPE; ++i) {
1805 if (info[i].number() > 0) {
1806 LOG(HeapSampleItemEvent(info[i].name(), info[i].number(),
1807 info[i].bytes()));
1808 }
1809 }
1810 LOG(HeapSampleEndEvent("NewSpace", description));
1811}
1812#endif // ENABLE_LOGGING_AND_PROFILING
1813
1814
1815void NewSpace::ReportStatistics() {
1816#ifdef DEBUG
1817 if (FLAG_heap_stats) {
1818 float pct = static_cast<float>(Available()) / Capacity();
Ben Murdochf87a2032010-10-22 12:50:53 +01001819 PrintF(" capacity: %" V8_PTR_PREFIX "d"
1820 ", available: %" V8_PTR_PREFIX "d, %%%d\n",
Steve Blocka7e24c12009-10-30 11:49:00 +00001821 Capacity(), Available(), static_cast<int>(pct*100));
1822 PrintF("\n Object Histogram:\n");
1823 for (int i = 0; i <= LAST_TYPE; i++) {
1824 if (allocated_histogram_[i].number() > 0) {
Steve Block6ded16b2010-05-10 14:33:55 +01001825 PrintF(" %-34s%10d (%10d bytes)\n",
Steve Blocka7e24c12009-10-30 11:49:00 +00001826 allocated_histogram_[i].name(),
1827 allocated_histogram_[i].number(),
1828 allocated_histogram_[i].bytes());
1829 }
1830 }
1831 PrintF("\n");
1832 }
1833#endif // DEBUG
1834
1835#ifdef ENABLE_LOGGING_AND_PROFILING
1836 if (FLAG_log_gc) {
1837 DoReportStatistics(allocated_histogram_, "allocated");
1838 DoReportStatistics(promoted_histogram_, "promoted");
1839 }
1840#endif // ENABLE_LOGGING_AND_PROFILING
1841}
1842
1843
1844void NewSpace::RecordAllocation(HeapObject* obj) {
1845 InstanceType type = obj->map()->instance_type();
1846 ASSERT(0 <= type && type <= LAST_TYPE);
1847 allocated_histogram_[type].increment_number(1);
1848 allocated_histogram_[type].increment_bytes(obj->Size());
1849}
1850
1851
1852void NewSpace::RecordPromotion(HeapObject* obj) {
1853 InstanceType type = obj->map()->instance_type();
1854 ASSERT(0 <= type && type <= LAST_TYPE);
1855 promoted_histogram_[type].increment_number(1);
1856 promoted_histogram_[type].increment_bytes(obj->Size());
1857}
1858#endif // defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
1859
1860
1861// -----------------------------------------------------------------------------
1862// Free lists for old object spaces implementation
1863
1864void FreeListNode::set_size(int size_in_bytes) {
1865 ASSERT(size_in_bytes > 0);
1866 ASSERT(IsAligned(size_in_bytes, kPointerSize));
1867
1868 // We write a map and possibly size information to the block. If the block
1869 // is big enough to be a ByteArray with at least one extra word (the next
1870 // pointer), we set its map to be the byte array map and its size to an
1871 // appropriate array length for the desired size from HeapObject::Size().
1872 // If the block is too small (eg, one or two words), to hold both a size
1873 // field and a next pointer, we give it a filler map that gives it the
1874 // correct size.
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01001875 if (size_in_bytes > ByteArray::kHeaderSize) {
Steve Blocka7e24c12009-10-30 11:49:00 +00001876 set_map(Heap::raw_unchecked_byte_array_map());
Steve Blockd0582a62009-12-15 09:54:21 +00001877 // Can't use ByteArray::cast because it fails during deserialization.
1878 ByteArray* this_as_byte_array = reinterpret_cast<ByteArray*>(this);
1879 this_as_byte_array->set_length(ByteArray::LengthFor(size_in_bytes));
Steve Blocka7e24c12009-10-30 11:49:00 +00001880 } else if (size_in_bytes == kPointerSize) {
1881 set_map(Heap::raw_unchecked_one_pointer_filler_map());
1882 } else if (size_in_bytes == 2 * kPointerSize) {
1883 set_map(Heap::raw_unchecked_two_pointer_filler_map());
1884 } else {
1885 UNREACHABLE();
1886 }
Steve Blockd0582a62009-12-15 09:54:21 +00001887 // We would like to ASSERT(Size() == size_in_bytes) but this would fail during
1888 // deserialization because the byte array map is not done yet.
Steve Blocka7e24c12009-10-30 11:49:00 +00001889}
1890
1891
1892Address FreeListNode::next() {
Steve Block3ce2e202009-11-05 08:53:23 +00001893 ASSERT(IsFreeListNode(this));
Steve Blocka7e24c12009-10-30 11:49:00 +00001894 if (map() == Heap::raw_unchecked_byte_array_map()) {
1895 ASSERT(Size() >= kNextOffset + kPointerSize);
1896 return Memory::Address_at(address() + kNextOffset);
1897 } else {
1898 return Memory::Address_at(address() + kPointerSize);
1899 }
1900}
1901
1902
1903void FreeListNode::set_next(Address next) {
Steve Block3ce2e202009-11-05 08:53:23 +00001904 ASSERT(IsFreeListNode(this));
Steve Blocka7e24c12009-10-30 11:49:00 +00001905 if (map() == Heap::raw_unchecked_byte_array_map()) {
1906 ASSERT(Size() >= kNextOffset + kPointerSize);
1907 Memory::Address_at(address() + kNextOffset) = next;
1908 } else {
1909 Memory::Address_at(address() + kPointerSize) = next;
1910 }
1911}
1912
1913
1914OldSpaceFreeList::OldSpaceFreeList(AllocationSpace owner) : owner_(owner) {
1915 Reset();
1916}
1917
1918
1919void OldSpaceFreeList::Reset() {
1920 available_ = 0;
1921 for (int i = 0; i < kFreeListsLength; i++) {
1922 free_[i].head_node_ = NULL;
1923 }
1924 needs_rebuild_ = false;
1925 finger_ = kHead;
1926 free_[kHead].next_size_ = kEnd;
1927}
1928
1929
1930void OldSpaceFreeList::RebuildSizeList() {
1931 ASSERT(needs_rebuild_);
1932 int cur = kHead;
1933 for (int i = cur + 1; i < kFreeListsLength; i++) {
1934 if (free_[i].head_node_ != NULL) {
1935 free_[cur].next_size_ = i;
1936 cur = i;
1937 }
1938 }
1939 free_[cur].next_size_ = kEnd;
1940 needs_rebuild_ = false;
1941}
1942
1943
1944int OldSpaceFreeList::Free(Address start, int size_in_bytes) {
1945#ifdef DEBUG
Leon Clarke4515c472010-02-03 11:58:03 +00001946 MemoryAllocator::ZapBlock(start, size_in_bytes);
Steve Blocka7e24c12009-10-30 11:49:00 +00001947#endif
1948 FreeListNode* node = FreeListNode::FromAddress(start);
1949 node->set_size(size_in_bytes);
1950
1951 // We don't use the freelists in compacting mode. This makes it more like a
1952 // GC that only has mark-sweep-compact and doesn't have a mark-sweep
1953 // collector.
1954 if (FLAG_always_compact) {
1955 return size_in_bytes;
1956 }
1957
1958 // Early return to drop too-small blocks on the floor (one or two word
1959 // blocks cannot hold a map pointer, a size field, and a pointer to the
1960 // next block in the free list).
1961 if (size_in_bytes < kMinBlockSize) {
1962 return size_in_bytes;
1963 }
1964
1965 // Insert other blocks at the head of an exact free list.
1966 int index = size_in_bytes >> kPointerSizeLog2;
1967 node->set_next(free_[index].head_node_);
1968 free_[index].head_node_ = node->address();
1969 available_ += size_in_bytes;
1970 needs_rebuild_ = true;
1971 return 0;
1972}
1973
1974
John Reck59135872010-11-02 12:39:01 -07001975MaybeObject* OldSpaceFreeList::Allocate(int size_in_bytes, int* wasted_bytes) {
Steve Blocka7e24c12009-10-30 11:49:00 +00001976 ASSERT(0 < size_in_bytes);
1977 ASSERT(size_in_bytes <= kMaxBlockSize);
1978 ASSERT(IsAligned(size_in_bytes, kPointerSize));
1979
1980 if (needs_rebuild_) RebuildSizeList();
1981 int index = size_in_bytes >> kPointerSizeLog2;
1982 // Check for a perfect fit.
1983 if (free_[index].head_node_ != NULL) {
1984 FreeListNode* node = FreeListNode::FromAddress(free_[index].head_node_);
1985 // If this was the last block of its size, remove the size.
1986 if ((free_[index].head_node_ = node->next()) == NULL) RemoveSize(index);
1987 available_ -= size_in_bytes;
1988 *wasted_bytes = 0;
1989 ASSERT(!FLAG_always_compact); // We only use the freelists with mark-sweep.
1990 return node;
1991 }
1992 // Search the size list for the best fit.
1993 int prev = finger_ < index ? finger_ : kHead;
1994 int cur = FindSize(index, &prev);
1995 ASSERT(index < cur);
1996 if (cur == kEnd) {
1997 // No large enough size in list.
1998 *wasted_bytes = 0;
Ben Murdochf87a2032010-10-22 12:50:53 +01001999 return Failure::RetryAfterGC(owner_);
Steve Blocka7e24c12009-10-30 11:49:00 +00002000 }
2001 ASSERT(!FLAG_always_compact); // We only use the freelists with mark-sweep.
2002 int rem = cur - index;
2003 int rem_bytes = rem << kPointerSizeLog2;
2004 FreeListNode* cur_node = FreeListNode::FromAddress(free_[cur].head_node_);
2005 ASSERT(cur_node->Size() == (cur << kPointerSizeLog2));
2006 FreeListNode* rem_node = FreeListNode::FromAddress(free_[cur].head_node_ +
2007 size_in_bytes);
2008 // Distinguish the cases prev < rem < cur and rem <= prev < cur
2009 // to avoid many redundant tests and calls to Insert/RemoveSize.
2010 if (prev < rem) {
2011 // Simple case: insert rem between prev and cur.
2012 finger_ = prev;
2013 free_[prev].next_size_ = rem;
2014 // If this was the last block of size cur, remove the size.
2015 if ((free_[cur].head_node_ = cur_node->next()) == NULL) {
2016 free_[rem].next_size_ = free_[cur].next_size_;
2017 } else {
2018 free_[rem].next_size_ = cur;
2019 }
2020 // Add the remainder block.
2021 rem_node->set_size(rem_bytes);
2022 rem_node->set_next(free_[rem].head_node_);
2023 free_[rem].head_node_ = rem_node->address();
2024 } else {
2025 // If this was the last block of size cur, remove the size.
2026 if ((free_[cur].head_node_ = cur_node->next()) == NULL) {
2027 finger_ = prev;
2028 free_[prev].next_size_ = free_[cur].next_size_;
2029 }
2030 if (rem_bytes < kMinBlockSize) {
2031 // Too-small remainder is wasted.
2032 rem_node->set_size(rem_bytes);
2033 available_ -= size_in_bytes + rem_bytes;
2034 *wasted_bytes = rem_bytes;
2035 return cur_node;
2036 }
2037 // Add the remainder block and, if needed, insert its size.
2038 rem_node->set_size(rem_bytes);
2039 rem_node->set_next(free_[rem].head_node_);
2040 free_[rem].head_node_ = rem_node->address();
2041 if (rem_node->next() == NULL) InsertSize(rem);
2042 }
2043 available_ -= size_in_bytes;
2044 *wasted_bytes = 0;
2045 return cur_node;
2046}
2047
2048
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -08002049void OldSpaceFreeList::MarkNodes() {
2050 for (int i = 0; i < kFreeListsLength; i++) {
2051 Address cur_addr = free_[i].head_node_;
2052 while (cur_addr != NULL) {
2053 FreeListNode* cur_node = FreeListNode::FromAddress(cur_addr);
2054 cur_addr = cur_node->next();
2055 cur_node->SetMark();
2056 }
2057 }
2058}
2059
2060
Steve Blocka7e24c12009-10-30 11:49:00 +00002061#ifdef DEBUG
2062bool OldSpaceFreeList::Contains(FreeListNode* node) {
2063 for (int i = 0; i < kFreeListsLength; i++) {
2064 Address cur_addr = free_[i].head_node_;
2065 while (cur_addr != NULL) {
2066 FreeListNode* cur_node = FreeListNode::FromAddress(cur_addr);
2067 if (cur_node == node) return true;
2068 cur_addr = cur_node->next();
2069 }
2070 }
2071 return false;
2072}
2073#endif
2074
2075
2076FixedSizeFreeList::FixedSizeFreeList(AllocationSpace owner, int object_size)
2077 : owner_(owner), object_size_(object_size) {
2078 Reset();
2079}
2080
2081
2082void FixedSizeFreeList::Reset() {
2083 available_ = 0;
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002084 head_ = tail_ = NULL;
Steve Blocka7e24c12009-10-30 11:49:00 +00002085}
2086
2087
2088void FixedSizeFreeList::Free(Address start) {
2089#ifdef DEBUG
Leon Clarke4515c472010-02-03 11:58:03 +00002090 MemoryAllocator::ZapBlock(start, object_size_);
Steve Blocka7e24c12009-10-30 11:49:00 +00002091#endif
Leon Clarkee46be812010-01-19 14:06:41 +00002092 // We only use the freelists with mark-sweep.
2093 ASSERT(!MarkCompactCollector::IsCompacting());
Steve Blocka7e24c12009-10-30 11:49:00 +00002094 FreeListNode* node = FreeListNode::FromAddress(start);
2095 node->set_size(object_size_);
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002096 node->set_next(NULL);
2097 if (head_ == NULL) {
2098 tail_ = head_ = node->address();
2099 } else {
2100 FreeListNode::FromAddress(tail_)->set_next(node->address());
2101 tail_ = node->address();
2102 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002103 available_ += object_size_;
2104}
2105
2106
John Reck59135872010-11-02 12:39:01 -07002107MaybeObject* FixedSizeFreeList::Allocate() {
Steve Blocka7e24c12009-10-30 11:49:00 +00002108 if (head_ == NULL) {
Ben Murdochf87a2032010-10-22 12:50:53 +01002109 return Failure::RetryAfterGC(owner_);
Steve Blocka7e24c12009-10-30 11:49:00 +00002110 }
2111
2112 ASSERT(!FLAG_always_compact); // We only use the freelists with mark-sweep.
2113 FreeListNode* node = FreeListNode::FromAddress(head_);
2114 head_ = node->next();
2115 available_ -= object_size_;
2116 return node;
2117}
2118
2119
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -08002120void FixedSizeFreeList::MarkNodes() {
2121 Address cur_addr = head_;
2122 while (cur_addr != NULL && cur_addr != tail_) {
2123 FreeListNode* cur_node = FreeListNode::FromAddress(cur_addr);
2124 cur_addr = cur_node->next();
2125 cur_node->SetMark();
2126 }
2127}
2128
2129
Steve Blocka7e24c12009-10-30 11:49:00 +00002130// -----------------------------------------------------------------------------
2131// OldSpace implementation
2132
2133void OldSpace::PrepareForMarkCompact(bool will_compact) {
Steve Block6ded16b2010-05-10 14:33:55 +01002134 // Call prepare of the super class.
2135 PagedSpace::PrepareForMarkCompact(will_compact);
2136
Steve Blocka7e24c12009-10-30 11:49:00 +00002137 if (will_compact) {
2138 // Reset relocation info. During a compacting collection, everything in
2139 // the space is considered 'available' and we will rediscover live data
2140 // and waste during the collection.
2141 MCResetRelocationInfo();
2142 ASSERT(Available() == Capacity());
2143 } else {
2144 // During a non-compacting collection, everything below the linear
2145 // allocation pointer is considered allocated (everything above is
2146 // available) and we will rediscover available and wasted bytes during
2147 // the collection.
2148 accounting_stats_.AllocateBytes(free_list_.available());
2149 accounting_stats_.FillWastedBytes(Waste());
2150 }
2151
2152 // Clear the free list before a full GC---it will be rebuilt afterward.
2153 free_list_.Reset();
2154}
2155
2156
2157void OldSpace::MCCommitRelocationInfo() {
2158 // Update fast allocation info.
2159 allocation_info_.top = mc_forwarding_info_.top;
2160 allocation_info_.limit = mc_forwarding_info_.limit;
2161 ASSERT(allocation_info_.VerifyPagedAllocation());
2162
2163 // The space is compacted and we haven't yet built free lists or
2164 // wasted any space.
2165 ASSERT(Waste() == 0);
2166 ASSERT(AvailableFree() == 0);
2167
2168 // Build the free list for the space.
2169 int computed_size = 0;
2170 PageIterator it(this, PageIterator::PAGES_USED_BY_MC);
2171 while (it.has_next()) {
2172 Page* p = it.next();
2173 // Space below the relocation pointer is allocated.
Steve Blockd0582a62009-12-15 09:54:21 +00002174 computed_size +=
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002175 static_cast<int>(p->AllocationWatermark() - p->ObjectAreaStart());
Steve Blocka7e24c12009-10-30 11:49:00 +00002176 if (it.has_next()) {
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002177 // Free the space at the top of the page.
Steve Blockd0582a62009-12-15 09:54:21 +00002178 int extra_size =
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002179 static_cast<int>(p->ObjectAreaEnd() - p->AllocationWatermark());
Steve Blocka7e24c12009-10-30 11:49:00 +00002180 if (extra_size > 0) {
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002181 int wasted_bytes = free_list_.Free(p->AllocationWatermark(),
2182 extra_size);
Steve Blocka7e24c12009-10-30 11:49:00 +00002183 // The bytes we have just "freed" to add to the free list were
2184 // already accounted as available.
2185 accounting_stats_.WasteBytes(wasted_bytes);
2186 }
2187 }
2188 }
2189
2190 // Make sure the computed size - based on the used portion of the pages in
2191 // use - matches the size obtained while computing forwarding addresses.
2192 ASSERT(computed_size == Size());
2193}
2194
2195
Leon Clarkee46be812010-01-19 14:06:41 +00002196bool NewSpace::ReserveSpace(int bytes) {
2197 // We can't reliably unpack a partial snapshot that needs more new space
2198 // space than the minimum NewSpace size.
2199 ASSERT(bytes <= InitialCapacity());
2200 Address limit = allocation_info_.limit;
2201 Address top = allocation_info_.top;
2202 return limit - top >= bytes;
2203}
2204
2205
Steve Block6ded16b2010-05-10 14:33:55 +01002206void PagedSpace::FreePages(Page* prev, Page* last) {
2207 if (last == AllocationTopPage()) {
2208 // Pages are already at the end of used pages.
2209 return;
2210 }
2211
2212 Page* first = NULL;
2213
2214 // Remove pages from the list.
2215 if (prev == NULL) {
2216 first = first_page_;
2217 first_page_ = last->next_page();
2218 } else {
2219 first = prev->next_page();
2220 MemoryAllocator::SetNextPage(prev, last->next_page());
2221 }
2222
2223 // Attach it after the last page.
2224 MemoryAllocator::SetNextPage(last_page_, first);
2225 last_page_ = last;
2226 MemoryAllocator::SetNextPage(last, NULL);
2227
2228 // Clean them up.
2229 do {
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002230 first->InvalidateWatermark(true);
2231 first->SetAllocationWatermark(first->ObjectAreaStart());
2232 first->SetCachedAllocationWatermark(first->ObjectAreaStart());
2233 first->SetRegionMarks(Page::kAllRegionsCleanMarks);
Steve Block6ded16b2010-05-10 14:33:55 +01002234 first = first->next_page();
2235 } while (first != NULL);
2236
2237 // Order of pages in this space might no longer be consistent with
2238 // order of pages in chunks.
2239 page_list_is_chunk_ordered_ = false;
2240}
2241
2242
Kristian Monsen80d68ea2010-09-08 11:05:35 +01002243void PagedSpace::RelinkPageListInChunkOrder(bool deallocate_blocks) {
2244 const bool add_to_freelist = true;
2245
2246 // Mark used and unused pages to properly fill unused pages
2247 // after reordering.
2248 PageIterator all_pages_iterator(this, PageIterator::ALL_PAGES);
2249 Page* last_in_use = AllocationTopPage();
2250 bool in_use = true;
2251
2252 while (all_pages_iterator.has_next()) {
2253 Page* p = all_pages_iterator.next();
2254 p->SetWasInUseBeforeMC(in_use);
2255 if (p == last_in_use) {
2256 // We passed a page containing allocation top. All consequent
2257 // pages are not used.
2258 in_use = false;
2259 }
2260 }
2261
2262 if (page_list_is_chunk_ordered_) return;
2263
2264 Page* new_last_in_use = Page::FromAddress(NULL);
2265 MemoryAllocator::RelinkPageListInChunkOrder(this,
2266 &first_page_,
2267 &last_page_,
2268 &new_last_in_use);
2269 ASSERT(new_last_in_use->is_valid());
2270
2271 if (new_last_in_use != last_in_use) {
2272 // Current allocation top points to a page which is now in the middle
2273 // of page list. We should move allocation top forward to the new last
2274 // used page so various object iterators will continue to work properly.
2275 int size_in_bytes = static_cast<int>(PageAllocationLimit(last_in_use) -
2276 last_in_use->AllocationTop());
2277
2278 last_in_use->SetAllocationWatermark(last_in_use->AllocationTop());
2279 if (size_in_bytes > 0) {
2280 Address start = last_in_use->AllocationTop();
2281 if (deallocate_blocks) {
2282 accounting_stats_.AllocateBytes(size_in_bytes);
2283 DeallocateBlock(start, size_in_bytes, add_to_freelist);
2284 } else {
2285 Heap::CreateFillerObjectAt(start, size_in_bytes);
2286 }
2287 }
2288
2289 // New last in use page was in the middle of the list before
2290 // sorting so it full.
2291 SetTop(new_last_in_use->AllocationTop());
2292
2293 ASSERT(AllocationTopPage() == new_last_in_use);
2294 ASSERT(AllocationTopPage()->WasInUseBeforeMC());
2295 }
2296
2297 PageIterator pages_in_use_iterator(this, PageIterator::PAGES_IN_USE);
2298 while (pages_in_use_iterator.has_next()) {
2299 Page* p = pages_in_use_iterator.next();
2300 if (!p->WasInUseBeforeMC()) {
2301 // Empty page is in the middle of a sequence of used pages.
2302 // Allocate it as a whole and deallocate immediately.
2303 int size_in_bytes = static_cast<int>(PageAllocationLimit(p) -
2304 p->ObjectAreaStart());
2305
2306 p->SetAllocationWatermark(p->ObjectAreaStart());
2307 Address start = p->ObjectAreaStart();
2308 if (deallocate_blocks) {
2309 accounting_stats_.AllocateBytes(size_in_bytes);
2310 DeallocateBlock(start, size_in_bytes, add_to_freelist);
2311 } else {
2312 Heap::CreateFillerObjectAt(start, size_in_bytes);
2313 }
2314 }
2315 }
2316
2317 page_list_is_chunk_ordered_ = true;
2318}
2319
2320
Steve Block6ded16b2010-05-10 14:33:55 +01002321void PagedSpace::PrepareForMarkCompact(bool will_compact) {
2322 if (will_compact) {
Kristian Monsen80d68ea2010-09-08 11:05:35 +01002323 RelinkPageListInChunkOrder(false);
Steve Block6ded16b2010-05-10 14:33:55 +01002324 }
2325}
2326
2327
Leon Clarkee46be812010-01-19 14:06:41 +00002328bool PagedSpace::ReserveSpace(int bytes) {
2329 Address limit = allocation_info_.limit;
2330 Address top = allocation_info_.top;
2331 if (limit - top >= bytes) return true;
2332
2333 // There wasn't enough space in the current page. Lets put the rest
2334 // of the page on the free list and start a fresh page.
2335 PutRestOfCurrentPageOnFreeList(TopPageOf(allocation_info_));
2336
2337 Page* reserved_page = TopPageOf(allocation_info_);
2338 int bytes_left_to_reserve = bytes;
2339 while (bytes_left_to_reserve > 0) {
2340 if (!reserved_page->next_page()->is_valid()) {
2341 if (Heap::OldGenerationAllocationLimitReached()) return false;
2342 Expand(reserved_page);
2343 }
2344 bytes_left_to_reserve -= Page::kPageSize;
2345 reserved_page = reserved_page->next_page();
2346 if (!reserved_page->is_valid()) return false;
2347 }
2348 ASSERT(TopPageOf(allocation_info_)->next_page()->is_valid());
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002349 TopPageOf(allocation_info_)->next_page()->InvalidateWatermark(true);
Leon Clarkee46be812010-01-19 14:06:41 +00002350 SetAllocationInfo(&allocation_info_,
2351 TopPageOf(allocation_info_)->next_page());
2352 return true;
2353}
2354
2355
2356// You have to call this last, since the implementation from PagedSpace
2357// doesn't know that memory was 'promised' to large object space.
2358bool LargeObjectSpace::ReserveSpace(int bytes) {
2359 return Heap::OldGenerationSpaceAvailable() >= bytes;
2360}
2361
2362
Steve Blocka7e24c12009-10-30 11:49:00 +00002363// Slow case for normal allocation. Try in order: (1) allocate in the next
2364// page in the space, (2) allocate off the space's free list, (3) expand the
2365// space, (4) fail.
2366HeapObject* OldSpace::SlowAllocateRaw(int size_in_bytes) {
2367 // Linear allocation in this space has failed. If there is another page
2368 // in the space, move to that page and allocate there. This allocation
2369 // should succeed (size_in_bytes should not be greater than a page's
2370 // object area size).
2371 Page* current_page = TopPageOf(allocation_info_);
2372 if (current_page->next_page()->is_valid()) {
2373 return AllocateInNextPage(current_page, size_in_bytes);
2374 }
2375
Steve Blockd0582a62009-12-15 09:54:21 +00002376 // There is no next page in this space. Try free list allocation unless that
2377 // is currently forbidden.
2378 if (!Heap::linear_allocation()) {
2379 int wasted_bytes;
John Reck59135872010-11-02 12:39:01 -07002380 Object* result;
2381 MaybeObject* maybe = free_list_.Allocate(size_in_bytes, &wasted_bytes);
Steve Blockd0582a62009-12-15 09:54:21 +00002382 accounting_stats_.WasteBytes(wasted_bytes);
John Reck59135872010-11-02 12:39:01 -07002383 if (maybe->ToObject(&result)) {
Steve Blockd0582a62009-12-15 09:54:21 +00002384 accounting_stats_.AllocateBytes(size_in_bytes);
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002385
2386 HeapObject* obj = HeapObject::cast(result);
2387 Page* p = Page::FromAddress(obj->address());
2388
2389 if (obj->address() >= p->AllocationWatermark()) {
2390 // There should be no hole between the allocation watermark
2391 // and allocated object address.
2392 // Memory above the allocation watermark was not swept and
2393 // might contain garbage pointers to new space.
2394 ASSERT(obj->address() == p->AllocationWatermark());
2395 p->SetAllocationWatermark(obj->address() + size_in_bytes);
2396 }
2397
2398 return obj;
Steve Blockd0582a62009-12-15 09:54:21 +00002399 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002400 }
2401
2402 // Free list allocation failed and there is no next page. Fail if we have
2403 // hit the old generation size limit that should cause a garbage
2404 // collection.
2405 if (!Heap::always_allocate() && Heap::OldGenerationAllocationLimitReached()) {
2406 return NULL;
2407 }
2408
2409 // Try to expand the space and allocate in the new next page.
2410 ASSERT(!current_page->next_page()->is_valid());
2411 if (Expand(current_page)) {
2412 return AllocateInNextPage(current_page, size_in_bytes);
2413 }
2414
2415 // Finally, fail.
2416 return NULL;
2417}
2418
2419
Leon Clarkee46be812010-01-19 14:06:41 +00002420void OldSpace::PutRestOfCurrentPageOnFreeList(Page* current_page) {
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002421 current_page->SetAllocationWatermark(allocation_info_.top);
Steve Blockd0582a62009-12-15 09:54:21 +00002422 int free_size =
2423 static_cast<int>(current_page->ObjectAreaEnd() - allocation_info_.top);
Steve Blocka7e24c12009-10-30 11:49:00 +00002424 if (free_size > 0) {
2425 int wasted_bytes = free_list_.Free(allocation_info_.top, free_size);
2426 accounting_stats_.WasteBytes(wasted_bytes);
2427 }
Leon Clarkee46be812010-01-19 14:06:41 +00002428}
2429
2430
2431void FixedSpace::PutRestOfCurrentPageOnFreeList(Page* current_page) {
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002432 current_page->SetAllocationWatermark(allocation_info_.top);
Leon Clarkee46be812010-01-19 14:06:41 +00002433 int free_size =
2434 static_cast<int>(current_page->ObjectAreaEnd() - allocation_info_.top);
2435 // In the fixed space free list all the free list items have the right size.
2436 // We use up the rest of the page while preserving this invariant.
2437 while (free_size >= object_size_in_bytes_) {
2438 free_list_.Free(allocation_info_.top);
2439 allocation_info_.top += object_size_in_bytes_;
2440 free_size -= object_size_in_bytes_;
2441 accounting_stats_.WasteBytes(object_size_in_bytes_);
2442 }
2443}
2444
2445
2446// Add the block at the top of the page to the space's free list, set the
2447// allocation info to the next page (assumed to be one), and allocate
2448// linearly there.
2449HeapObject* OldSpace::AllocateInNextPage(Page* current_page,
2450 int size_in_bytes) {
2451 ASSERT(current_page->next_page()->is_valid());
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002452 Page* next_page = current_page->next_page();
2453 next_page->ClearGCFields();
Leon Clarkee46be812010-01-19 14:06:41 +00002454 PutRestOfCurrentPageOnFreeList(current_page);
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002455 SetAllocationInfo(&allocation_info_, next_page);
Steve Blocka7e24c12009-10-30 11:49:00 +00002456 return AllocateLinearly(&allocation_info_, size_in_bytes);
2457}
2458
2459
Kristian Monsen80d68ea2010-09-08 11:05:35 +01002460void OldSpace::DeallocateBlock(Address start,
2461 int size_in_bytes,
2462 bool add_to_freelist) {
2463 Free(start, size_in_bytes, add_to_freelist);
2464}
2465
2466
Steve Blocka7e24c12009-10-30 11:49:00 +00002467#ifdef DEBUG
2468struct CommentStatistic {
2469 const char* comment;
2470 int size;
2471 int count;
2472 void Clear() {
2473 comment = NULL;
2474 size = 0;
2475 count = 0;
2476 }
2477};
2478
2479
2480// must be small, since an iteration is used for lookup
2481const int kMaxComments = 64;
2482static CommentStatistic comments_statistics[kMaxComments+1];
2483
2484
2485void PagedSpace::ReportCodeStatistics() {
2486 ReportCodeKindStatistics();
2487 PrintF("Code comment statistics (\" [ comment-txt : size/ "
2488 "count (average)\"):\n");
2489 for (int i = 0; i <= kMaxComments; i++) {
2490 const CommentStatistic& cs = comments_statistics[i];
2491 if (cs.size > 0) {
2492 PrintF(" %-30s: %10d/%6d (%d)\n", cs.comment, cs.size, cs.count,
2493 cs.size/cs.count);
2494 }
2495 }
2496 PrintF("\n");
2497}
2498
2499
2500void PagedSpace::ResetCodeStatistics() {
2501 ClearCodeKindStatistics();
2502 for (int i = 0; i < kMaxComments; i++) comments_statistics[i].Clear();
2503 comments_statistics[kMaxComments].comment = "Unknown";
2504 comments_statistics[kMaxComments].size = 0;
2505 comments_statistics[kMaxComments].count = 0;
2506}
2507
2508
2509// Adds comment to 'comment_statistics' table. Performance OK sa long as
2510// 'kMaxComments' is small
2511static void EnterComment(const char* comment, int delta) {
2512 // Do not count empty comments
2513 if (delta <= 0) return;
2514 CommentStatistic* cs = &comments_statistics[kMaxComments];
2515 // Search for a free or matching entry in 'comments_statistics': 'cs'
2516 // points to result.
2517 for (int i = 0; i < kMaxComments; i++) {
2518 if (comments_statistics[i].comment == NULL) {
2519 cs = &comments_statistics[i];
2520 cs->comment = comment;
2521 break;
2522 } else if (strcmp(comments_statistics[i].comment, comment) == 0) {
2523 cs = &comments_statistics[i];
2524 break;
2525 }
2526 }
2527 // Update entry for 'comment'
2528 cs->size += delta;
2529 cs->count += 1;
2530}
2531
2532
2533// Call for each nested comment start (start marked with '[ xxx', end marked
2534// with ']'. RelocIterator 'it' must point to a comment reloc info.
2535static void CollectCommentStatistics(RelocIterator* it) {
2536 ASSERT(!it->done());
2537 ASSERT(it->rinfo()->rmode() == RelocInfo::COMMENT);
2538 const char* tmp = reinterpret_cast<const char*>(it->rinfo()->data());
2539 if (tmp[0] != '[') {
2540 // Not a nested comment; skip
2541 return;
2542 }
2543
2544 // Search for end of nested comment or a new nested comment
2545 const char* const comment_txt =
2546 reinterpret_cast<const char*>(it->rinfo()->data());
2547 const byte* prev_pc = it->rinfo()->pc();
2548 int flat_delta = 0;
2549 it->next();
2550 while (true) {
2551 // All nested comments must be terminated properly, and therefore exit
2552 // from loop.
2553 ASSERT(!it->done());
2554 if (it->rinfo()->rmode() == RelocInfo::COMMENT) {
2555 const char* const txt =
2556 reinterpret_cast<const char*>(it->rinfo()->data());
Steve Blockd0582a62009-12-15 09:54:21 +00002557 flat_delta += static_cast<int>(it->rinfo()->pc() - prev_pc);
Steve Blocka7e24c12009-10-30 11:49:00 +00002558 if (txt[0] == ']') break; // End of nested comment
2559 // A new comment
2560 CollectCommentStatistics(it);
2561 // Skip code that was covered with previous comment
2562 prev_pc = it->rinfo()->pc();
2563 }
2564 it->next();
2565 }
2566 EnterComment(comment_txt, flat_delta);
2567}
2568
2569
2570// Collects code size statistics:
2571// - by code kind
2572// - by code comment
2573void PagedSpace::CollectCodeStatistics() {
2574 HeapObjectIterator obj_it(this);
Leon Clarked91b9f72010-01-27 17:25:45 +00002575 for (HeapObject* obj = obj_it.next(); obj != NULL; obj = obj_it.next()) {
Steve Blocka7e24c12009-10-30 11:49:00 +00002576 if (obj->IsCode()) {
2577 Code* code = Code::cast(obj);
2578 code_kind_statistics[code->kind()] += code->Size();
2579 RelocIterator it(code);
2580 int delta = 0;
2581 const byte* prev_pc = code->instruction_start();
2582 while (!it.done()) {
2583 if (it.rinfo()->rmode() == RelocInfo::COMMENT) {
Steve Blockd0582a62009-12-15 09:54:21 +00002584 delta += static_cast<int>(it.rinfo()->pc() - prev_pc);
Steve Blocka7e24c12009-10-30 11:49:00 +00002585 CollectCommentStatistics(&it);
2586 prev_pc = it.rinfo()->pc();
2587 }
2588 it.next();
2589 }
2590
2591 ASSERT(code->instruction_start() <= prev_pc &&
Leon Clarkeac952652010-07-15 11:15:24 +01002592 prev_pc <= code->instruction_end());
2593 delta += static_cast<int>(code->instruction_end() - prev_pc);
Steve Blocka7e24c12009-10-30 11:49:00 +00002594 EnterComment("NoComment", delta);
2595 }
2596 }
2597}
2598
2599
2600void OldSpace::ReportStatistics() {
Ben Murdochf87a2032010-10-22 12:50:53 +01002601 int pct = static_cast<int>(Available() * 100 / Capacity());
2602 PrintF(" capacity: %" V8_PTR_PREFIX "d"
2603 ", waste: %" V8_PTR_PREFIX "d"
2604 ", available: %" V8_PTR_PREFIX "d, %%%d\n",
Steve Blocka7e24c12009-10-30 11:49:00 +00002605 Capacity(), Waste(), Available(), pct);
2606
Steve Blocka7e24c12009-10-30 11:49:00 +00002607 ClearHistograms();
2608 HeapObjectIterator obj_it(this);
Leon Clarked91b9f72010-01-27 17:25:45 +00002609 for (HeapObject* obj = obj_it.next(); obj != NULL; obj = obj_it.next())
2610 CollectHistogramInfo(obj);
Steve Blocka7e24c12009-10-30 11:49:00 +00002611 ReportHistogram(true);
2612}
Steve Blocka7e24c12009-10-30 11:49:00 +00002613#endif
2614
2615// -----------------------------------------------------------------------------
2616// FixedSpace implementation
2617
2618void FixedSpace::PrepareForMarkCompact(bool will_compact) {
Steve Block6ded16b2010-05-10 14:33:55 +01002619 // Call prepare of the super class.
2620 PagedSpace::PrepareForMarkCompact(will_compact);
2621
Steve Blocka7e24c12009-10-30 11:49:00 +00002622 if (will_compact) {
2623 // Reset relocation info.
2624 MCResetRelocationInfo();
2625
2626 // During a compacting collection, everything in the space is considered
2627 // 'available' (set by the call to MCResetRelocationInfo) and we will
2628 // rediscover live and wasted bytes during the collection.
2629 ASSERT(Available() == Capacity());
2630 } else {
2631 // During a non-compacting collection, everything below the linear
2632 // allocation pointer except wasted top-of-page blocks is considered
2633 // allocated and we will rediscover available bytes during the
2634 // collection.
2635 accounting_stats_.AllocateBytes(free_list_.available());
2636 }
2637
2638 // Clear the free list before a full GC---it will be rebuilt afterward.
2639 free_list_.Reset();
2640}
2641
2642
2643void FixedSpace::MCCommitRelocationInfo() {
2644 // Update fast allocation info.
2645 allocation_info_.top = mc_forwarding_info_.top;
2646 allocation_info_.limit = mc_forwarding_info_.limit;
2647 ASSERT(allocation_info_.VerifyPagedAllocation());
2648
2649 // The space is compacted and we haven't yet wasted any space.
2650 ASSERT(Waste() == 0);
2651
2652 // Update allocation_top of each page in use and compute waste.
2653 int computed_size = 0;
2654 PageIterator it(this, PageIterator::PAGES_USED_BY_MC);
2655 while (it.has_next()) {
2656 Page* page = it.next();
2657 Address page_top = page->AllocationTop();
Steve Blockd0582a62009-12-15 09:54:21 +00002658 computed_size += static_cast<int>(page_top - page->ObjectAreaStart());
Steve Blocka7e24c12009-10-30 11:49:00 +00002659 if (it.has_next()) {
Steve Blockd0582a62009-12-15 09:54:21 +00002660 accounting_stats_.WasteBytes(
2661 static_cast<int>(page->ObjectAreaEnd() - page_top));
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002662 page->SetAllocationWatermark(page_top);
Steve Blocka7e24c12009-10-30 11:49:00 +00002663 }
2664 }
2665
2666 // Make sure the computed size - based on the used portion of the
2667 // pages in use - matches the size we adjust during allocation.
2668 ASSERT(computed_size == Size());
2669}
2670
2671
2672// Slow case for normal allocation. Try in order: (1) allocate in the next
2673// page in the space, (2) allocate off the space's free list, (3) expand the
2674// space, (4) fail.
2675HeapObject* FixedSpace::SlowAllocateRaw(int size_in_bytes) {
2676 ASSERT_EQ(object_size_in_bytes_, size_in_bytes);
2677 // Linear allocation in this space has failed. If there is another page
2678 // in the space, move to that page and allocate there. This allocation
2679 // should succeed.
2680 Page* current_page = TopPageOf(allocation_info_);
2681 if (current_page->next_page()->is_valid()) {
2682 return AllocateInNextPage(current_page, size_in_bytes);
2683 }
2684
Steve Blockd0582a62009-12-15 09:54:21 +00002685 // There is no next page in this space. Try free list allocation unless
2686 // that is currently forbidden. The fixed space free list implicitly assumes
2687 // that all free blocks are of the fixed size.
2688 if (!Heap::linear_allocation()) {
John Reck59135872010-11-02 12:39:01 -07002689 Object* result;
2690 MaybeObject* maybe = free_list_.Allocate();
2691 if (maybe->ToObject(&result)) {
Steve Blocka7e24c12009-10-30 11:49:00 +00002692 accounting_stats_.AllocateBytes(size_in_bytes);
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002693 HeapObject* obj = HeapObject::cast(result);
2694 Page* p = Page::FromAddress(obj->address());
2695
2696 if (obj->address() >= p->AllocationWatermark()) {
2697 // There should be no hole between the allocation watermark
2698 // and allocated object address.
2699 // Memory above the allocation watermark was not swept and
2700 // might contain garbage pointers to new space.
2701 ASSERT(obj->address() == p->AllocationWatermark());
2702 p->SetAllocationWatermark(obj->address() + size_in_bytes);
2703 }
2704
2705 return obj;
Steve Blocka7e24c12009-10-30 11:49:00 +00002706 }
2707 }
2708
2709 // Free list allocation failed and there is no next page. Fail if we have
2710 // hit the old generation size limit that should cause a garbage
2711 // collection.
2712 if (!Heap::always_allocate() && Heap::OldGenerationAllocationLimitReached()) {
2713 return NULL;
2714 }
2715
2716 // Try to expand the space and allocate in the new next page.
2717 ASSERT(!current_page->next_page()->is_valid());
2718 if (Expand(current_page)) {
2719 return AllocateInNextPage(current_page, size_in_bytes);
2720 }
2721
2722 // Finally, fail.
2723 return NULL;
2724}
2725
2726
2727// Move to the next page (there is assumed to be one) and allocate there.
2728// The top of page block is always wasted, because it is too small to hold a
2729// map.
2730HeapObject* FixedSpace::AllocateInNextPage(Page* current_page,
2731 int size_in_bytes) {
2732 ASSERT(current_page->next_page()->is_valid());
Steve Block6ded16b2010-05-10 14:33:55 +01002733 ASSERT(allocation_info_.top == PageAllocationLimit(current_page));
Steve Blocka7e24c12009-10-30 11:49:00 +00002734 ASSERT_EQ(object_size_in_bytes_, size_in_bytes);
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002735 Page* next_page = current_page->next_page();
2736 next_page->ClearGCFields();
2737 current_page->SetAllocationWatermark(allocation_info_.top);
Steve Blocka7e24c12009-10-30 11:49:00 +00002738 accounting_stats_.WasteBytes(page_extra_);
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002739 SetAllocationInfo(&allocation_info_, next_page);
Steve Blocka7e24c12009-10-30 11:49:00 +00002740 return AllocateLinearly(&allocation_info_, size_in_bytes);
2741}
2742
2743
Kristian Monsen80d68ea2010-09-08 11:05:35 +01002744void FixedSpace::DeallocateBlock(Address start,
2745 int size_in_bytes,
2746 bool add_to_freelist) {
2747 // Free-list elements in fixed space are assumed to have a fixed size.
2748 // We break the free block into chunks and add them to the free list
2749 // individually.
2750 int size = object_size_in_bytes();
2751 ASSERT(size_in_bytes % size == 0);
2752 Address end = start + size_in_bytes;
2753 for (Address a = start; a < end; a += size) {
2754 Free(a, add_to_freelist);
2755 }
2756}
2757
2758
Steve Blocka7e24c12009-10-30 11:49:00 +00002759#ifdef DEBUG
2760void FixedSpace::ReportStatistics() {
Ben Murdochf87a2032010-10-22 12:50:53 +01002761 int pct = static_cast<int>(Available() * 100 / Capacity());
2762 PrintF(" capacity: %" V8_PTR_PREFIX "d"
2763 ", waste: %" V8_PTR_PREFIX "d"
2764 ", available: %" V8_PTR_PREFIX "d, %%%d\n",
Steve Blocka7e24c12009-10-30 11:49:00 +00002765 Capacity(), Waste(), Available(), pct);
2766
Steve Blocka7e24c12009-10-30 11:49:00 +00002767 ClearHistograms();
2768 HeapObjectIterator obj_it(this);
Leon Clarked91b9f72010-01-27 17:25:45 +00002769 for (HeapObject* obj = obj_it.next(); obj != NULL; obj = obj_it.next())
2770 CollectHistogramInfo(obj);
Steve Blocka7e24c12009-10-30 11:49:00 +00002771 ReportHistogram(false);
2772}
Steve Blocka7e24c12009-10-30 11:49:00 +00002773#endif
2774
2775
2776// -----------------------------------------------------------------------------
2777// MapSpace implementation
2778
2779void MapSpace::PrepareForMarkCompact(bool will_compact) {
2780 // Call prepare of the super class.
2781 FixedSpace::PrepareForMarkCompact(will_compact);
2782
2783 if (will_compact) {
2784 // Initialize map index entry.
2785 int page_count = 0;
2786 PageIterator it(this, PageIterator::ALL_PAGES);
2787 while (it.has_next()) {
2788 ASSERT_MAP_PAGE_INDEX(page_count);
2789
2790 Page* p = it.next();
2791 ASSERT(p->mc_page_index == page_count);
2792
2793 page_addresses_[page_count++] = p->address();
2794 }
2795 }
2796}
2797
2798
2799#ifdef DEBUG
2800void MapSpace::VerifyObject(HeapObject* object) {
2801 // The object should be a map or a free-list node.
2802 ASSERT(object->IsMap() || object->IsByteArray());
2803}
2804#endif
2805
2806
2807// -----------------------------------------------------------------------------
2808// GlobalPropertyCellSpace implementation
2809
2810#ifdef DEBUG
2811void CellSpace::VerifyObject(HeapObject* object) {
2812 // The object should be a global object property cell or a free-list node.
2813 ASSERT(object->IsJSGlobalPropertyCell() ||
2814 object->map() == Heap::two_pointer_filler_map());
2815}
2816#endif
2817
2818
2819// -----------------------------------------------------------------------------
2820// LargeObjectIterator
2821
2822LargeObjectIterator::LargeObjectIterator(LargeObjectSpace* space) {
2823 current_ = space->first_chunk_;
2824 size_func_ = NULL;
2825}
2826
2827
2828LargeObjectIterator::LargeObjectIterator(LargeObjectSpace* space,
2829 HeapObjectCallback size_func) {
2830 current_ = space->first_chunk_;
2831 size_func_ = size_func;
2832}
2833
2834
2835HeapObject* LargeObjectIterator::next() {
Leon Clarked91b9f72010-01-27 17:25:45 +00002836 if (current_ == NULL) return NULL;
2837
Steve Blocka7e24c12009-10-30 11:49:00 +00002838 HeapObject* object = current_->GetObject();
2839 current_ = current_->next();
2840 return object;
2841}
2842
2843
2844// -----------------------------------------------------------------------------
2845// LargeObjectChunk
2846
2847LargeObjectChunk* LargeObjectChunk::New(int size_in_bytes,
Steve Blocka7e24c12009-10-30 11:49:00 +00002848 Executability executable) {
2849 size_t requested = ChunkSizeFor(size_in_bytes);
Ben Murdochb0fe1622011-05-05 13:52:32 +01002850 size_t size;
2851 void* mem = MemoryAllocator::AllocateRawMemory(requested, &size, executable);
Steve Blocka7e24c12009-10-30 11:49:00 +00002852 if (mem == NULL) return NULL;
Ben Murdochb0fe1622011-05-05 13:52:32 +01002853
2854 // The start of the chunk may be overlayed with a page so we have to
2855 // make sure that the page flags fit in the size field.
2856 ASSERT((size & Page::kPageFlagMask) == 0);
2857
2858 LOG(NewEvent("LargeObjectChunk", mem, size));
2859 if (size < requested) {
2860 MemoryAllocator::FreeRawMemory(mem, size, executable);
Steve Blocka7e24c12009-10-30 11:49:00 +00002861 LOG(DeleteEvent("LargeObjectChunk", mem));
2862 return NULL;
2863 }
Ben Murdochb0fe1622011-05-05 13:52:32 +01002864
2865 ObjectSpace space = (executable == EXECUTABLE)
2866 ? kObjectSpaceCodeSpace
2867 : kObjectSpaceLoSpace;
2868 MemoryAllocator::PerformAllocationCallback(
2869 space, kAllocationActionAllocate, size);
2870
2871 LargeObjectChunk* chunk = reinterpret_cast<LargeObjectChunk*>(mem);
2872 chunk->size_ = size;
2873 return chunk;
Steve Blocka7e24c12009-10-30 11:49:00 +00002874}
2875
2876
2877int LargeObjectChunk::ChunkSizeFor(int size_in_bytes) {
Steve Blockd0582a62009-12-15 09:54:21 +00002878 int os_alignment = static_cast<int>(OS::AllocateAlignment());
Ben Murdochb0fe1622011-05-05 13:52:32 +01002879 if (os_alignment < Page::kPageSize) {
Steve Blocka7e24c12009-10-30 11:49:00 +00002880 size_in_bytes += (Page::kPageSize - os_alignment);
Ben Murdochb0fe1622011-05-05 13:52:32 +01002881 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002882 return size_in_bytes + Page::kObjectStartOffset;
2883}
2884
2885// -----------------------------------------------------------------------------
2886// LargeObjectSpace
2887
2888LargeObjectSpace::LargeObjectSpace(AllocationSpace id)
2889 : Space(id, NOT_EXECUTABLE), // Managed on a per-allocation basis
2890 first_chunk_(NULL),
2891 size_(0),
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -08002892 page_count_(0),
2893 objects_size_(0) {}
Steve Blocka7e24c12009-10-30 11:49:00 +00002894
2895
2896bool LargeObjectSpace::Setup() {
2897 first_chunk_ = NULL;
2898 size_ = 0;
2899 page_count_ = 0;
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -08002900 objects_size_ = 0;
Steve Blocka7e24c12009-10-30 11:49:00 +00002901 return true;
2902}
2903
2904
2905void LargeObjectSpace::TearDown() {
2906 while (first_chunk_ != NULL) {
2907 LargeObjectChunk* chunk = first_chunk_;
2908 first_chunk_ = first_chunk_->next();
2909 LOG(DeleteEvent("LargeObjectChunk", chunk->address()));
Steve Block791712a2010-08-27 10:21:07 +01002910 Page* page = Page::FromAddress(RoundUp(chunk->address(), Page::kPageSize));
2911 Executability executable =
2912 page->IsPageExecutable() ? EXECUTABLE : NOT_EXECUTABLE;
Iain Merrick9ac36c92010-09-13 15:29:50 +01002913 ObjectSpace space = kObjectSpaceLoSpace;
2914 if (executable == EXECUTABLE) space = kObjectSpaceCodeSpace;
2915 size_t size = chunk->size();
2916 MemoryAllocator::FreeRawMemory(chunk->address(), size, executable);
2917 MemoryAllocator::PerformAllocationCallback(
2918 space, kAllocationActionFree, size);
Steve Blocka7e24c12009-10-30 11:49:00 +00002919 }
2920
2921 size_ = 0;
2922 page_count_ = 0;
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -08002923 objects_size_ = 0;
Steve Blocka7e24c12009-10-30 11:49:00 +00002924}
2925
2926
2927#ifdef ENABLE_HEAP_PROTECTION
2928
2929void LargeObjectSpace::Protect() {
2930 LargeObjectChunk* chunk = first_chunk_;
2931 while (chunk != NULL) {
2932 MemoryAllocator::Protect(chunk->address(), chunk->size());
2933 chunk = chunk->next();
2934 }
2935}
2936
2937
2938void LargeObjectSpace::Unprotect() {
2939 LargeObjectChunk* chunk = first_chunk_;
2940 while (chunk != NULL) {
2941 bool is_code = chunk->GetObject()->IsCode();
2942 MemoryAllocator::Unprotect(chunk->address(), chunk->size(),
2943 is_code ? EXECUTABLE : NOT_EXECUTABLE);
2944 chunk = chunk->next();
2945 }
2946}
2947
2948#endif
2949
2950
John Reck59135872010-11-02 12:39:01 -07002951MaybeObject* LargeObjectSpace::AllocateRawInternal(int requested_size,
2952 int object_size,
2953 Executability executable) {
Steve Blocka7e24c12009-10-30 11:49:00 +00002954 ASSERT(0 < object_size && object_size <= requested_size);
2955
2956 // Check if we want to force a GC before growing the old space further.
2957 // If so, fail the allocation.
2958 if (!Heap::always_allocate() && Heap::OldGenerationAllocationLimitReached()) {
Ben Murdochf87a2032010-10-22 12:50:53 +01002959 return Failure::RetryAfterGC(identity());
Steve Blocka7e24c12009-10-30 11:49:00 +00002960 }
2961
Ben Murdochb0fe1622011-05-05 13:52:32 +01002962 LargeObjectChunk* chunk = LargeObjectChunk::New(requested_size, executable);
Steve Blocka7e24c12009-10-30 11:49:00 +00002963 if (chunk == NULL) {
Ben Murdochf87a2032010-10-22 12:50:53 +01002964 return Failure::RetryAfterGC(identity());
Steve Blocka7e24c12009-10-30 11:49:00 +00002965 }
2966
Ben Murdochb0fe1622011-05-05 13:52:32 +01002967 size_ += static_cast<int>(chunk->size());
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -08002968 objects_size_ += requested_size;
Steve Blocka7e24c12009-10-30 11:49:00 +00002969 page_count_++;
2970 chunk->set_next(first_chunk_);
Steve Blocka7e24c12009-10-30 11:49:00 +00002971 first_chunk_ = chunk;
2972
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002973 // Initialize page header.
Steve Blocka7e24c12009-10-30 11:49:00 +00002974 Page* page = Page::FromAddress(RoundUp(chunk->address(), Page::kPageSize));
2975 Address object_address = page->ObjectAreaStart();
Ben Murdochb0fe1622011-05-05 13:52:32 +01002976
Steve Blocka7e24c12009-10-30 11:49:00 +00002977 // Clear the low order bit of the second word in the page to flag it as a
2978 // large object page. If the chunk_size happened to be written there, its
2979 // low order bit should already be clear.
Steve Block6ded16b2010-05-10 14:33:55 +01002980 page->SetIsLargeObjectPage(true);
Steve Block791712a2010-08-27 10:21:07 +01002981 page->SetIsPageExecutable(executable);
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002982 page->SetRegionMarks(Page::kAllRegionsCleanMarks);
Steve Blocka7e24c12009-10-30 11:49:00 +00002983 return HeapObject::FromAddress(object_address);
2984}
2985
2986
John Reck59135872010-11-02 12:39:01 -07002987MaybeObject* LargeObjectSpace::AllocateRawCode(int size_in_bytes) {
Steve Blocka7e24c12009-10-30 11:49:00 +00002988 ASSERT(0 < size_in_bytes);
2989 return AllocateRawInternal(size_in_bytes,
2990 size_in_bytes,
2991 EXECUTABLE);
2992}
2993
2994
John Reck59135872010-11-02 12:39:01 -07002995MaybeObject* LargeObjectSpace::AllocateRawFixedArray(int size_in_bytes) {
Steve Blocka7e24c12009-10-30 11:49:00 +00002996 ASSERT(0 < size_in_bytes);
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002997 return AllocateRawInternal(size_in_bytes,
Steve Blocka7e24c12009-10-30 11:49:00 +00002998 size_in_bytes,
2999 NOT_EXECUTABLE);
3000}
3001
3002
John Reck59135872010-11-02 12:39:01 -07003003MaybeObject* LargeObjectSpace::AllocateRaw(int size_in_bytes) {
Steve Blocka7e24c12009-10-30 11:49:00 +00003004 ASSERT(0 < size_in_bytes);
3005 return AllocateRawInternal(size_in_bytes,
3006 size_in_bytes,
3007 NOT_EXECUTABLE);
3008}
3009
3010
3011// GC support
John Reck59135872010-11-02 12:39:01 -07003012MaybeObject* LargeObjectSpace::FindObject(Address a) {
Steve Blocka7e24c12009-10-30 11:49:00 +00003013 for (LargeObjectChunk* chunk = first_chunk_;
3014 chunk != NULL;
3015 chunk = chunk->next()) {
3016 Address chunk_address = chunk->address();
3017 if (chunk_address <= a && a < chunk_address + chunk->size()) {
3018 return chunk->GetObject();
3019 }
3020 }
3021 return Failure::Exception();
3022}
3023
Kristian Monsen80d68ea2010-09-08 11:05:35 +01003024
3025LargeObjectChunk* LargeObjectSpace::FindChunkContainingPc(Address pc) {
3026 // TODO(853): Change this implementation to only find executable
3027 // chunks and use some kind of hash-based approach to speed it up.
3028 for (LargeObjectChunk* chunk = first_chunk_;
3029 chunk != NULL;
3030 chunk = chunk->next()) {
3031 Address chunk_address = chunk->address();
3032 if (chunk_address <= pc && pc < chunk_address + chunk->size()) {
3033 return chunk;
3034 }
3035 }
3036 return NULL;
3037}
3038
3039
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01003040void LargeObjectSpace::IterateDirtyRegions(ObjectSlotCallback copy_object) {
Steve Blocka7e24c12009-10-30 11:49:00 +00003041 LargeObjectIterator it(this);
Leon Clarked91b9f72010-01-27 17:25:45 +00003042 for (HeapObject* object = it.next(); object != NULL; object = it.next()) {
Steve Blocka7e24c12009-10-30 11:49:00 +00003043 // We only have code, sequential strings, or fixed arrays in large
3044 // object space, and only fixed arrays can possibly contain pointers to
3045 // the young generation.
Steve Blocka7e24c12009-10-30 11:49:00 +00003046 if (object->IsFixedArray()) {
Steve Blocka7e24c12009-10-30 11:49:00 +00003047 Page* page = Page::FromAddress(object->address());
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01003048 uint32_t marks = page->GetRegionMarks();
3049 uint32_t newmarks = Page::kAllRegionsCleanMarks;
Steve Blocka7e24c12009-10-30 11:49:00 +00003050
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01003051 if (marks != Page::kAllRegionsCleanMarks) {
3052 // For a large page a single dirty mark corresponds to several
3053 // regions (modulo 32). So we treat a large page as a sequence of
3054 // normal pages of size Page::kPageSize having same dirty marks
3055 // and subsequently iterate dirty regions on each of these pages.
3056 Address start = object->address();
3057 Address end = page->ObjectAreaEnd();
3058 Address object_end = start + object->Size();
3059
3060 // Iterate regions of the first normal page covering object.
3061 uint32_t first_region_number = page->GetRegionNumberForAddress(start);
3062 newmarks |=
3063 Heap::IterateDirtyRegions(marks >> first_region_number,
3064 start,
3065 end,
3066 &Heap::IteratePointersInDirtyRegion,
3067 copy_object) << first_region_number;
3068
3069 start = end;
3070 end = start + Page::kPageSize;
3071 while (end <= object_end) {
3072 // Iterate next 32 regions.
3073 newmarks |=
3074 Heap::IterateDirtyRegions(marks,
3075 start,
3076 end,
3077 &Heap::IteratePointersInDirtyRegion,
3078 copy_object);
3079 start = end;
3080 end = start + Page::kPageSize;
3081 }
3082
3083 if (start != object_end) {
3084 // Iterate the last piece of an object which is less than
3085 // Page::kPageSize.
3086 newmarks |=
3087 Heap::IterateDirtyRegions(marks,
3088 start,
3089 object_end,
3090 &Heap::IteratePointersInDirtyRegion,
3091 copy_object);
3092 }
3093
3094 page->SetRegionMarks(newmarks);
Steve Blocka7e24c12009-10-30 11:49:00 +00003095 }
3096 }
3097 }
3098}
3099
3100
3101void LargeObjectSpace::FreeUnmarkedObjects() {
3102 LargeObjectChunk* previous = NULL;
3103 LargeObjectChunk* current = first_chunk_;
3104 while (current != NULL) {
3105 HeapObject* object = current->GetObject();
3106 if (object->IsMarked()) {
3107 object->ClearMark();
3108 MarkCompactCollector::tracer()->decrement_marked_count();
3109 previous = current;
3110 current = current->next();
3111 } else {
Steve Block791712a2010-08-27 10:21:07 +01003112 Page* page = Page::FromAddress(RoundUp(current->address(),
3113 Page::kPageSize));
3114 Executability executable =
3115 page->IsPageExecutable() ? EXECUTABLE : NOT_EXECUTABLE;
Steve Blocka7e24c12009-10-30 11:49:00 +00003116 Address chunk_address = current->address();
3117 size_t chunk_size = current->size();
3118
3119 // Cut the chunk out from the chunk list.
3120 current = current->next();
3121 if (previous == NULL) {
3122 first_chunk_ = current;
3123 } else {
3124 previous->set_next(current);
3125 }
3126
3127 // Free the chunk.
Leon Clarked91b9f72010-01-27 17:25:45 +00003128 MarkCompactCollector::ReportDeleteIfNeeded(object);
Steve Block1e0659c2011-05-24 12:43:12 +01003129 LiveObjectList::ProcessNonLive(object);
3130
Steve Blockd0582a62009-12-15 09:54:21 +00003131 size_ -= static_cast<int>(chunk_size);
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -08003132 objects_size_ -= object->Size();
Steve Blocka7e24c12009-10-30 11:49:00 +00003133 page_count_--;
Iain Merrick9ac36c92010-09-13 15:29:50 +01003134 ObjectSpace space = kObjectSpaceLoSpace;
3135 if (executable == EXECUTABLE) space = kObjectSpaceCodeSpace;
Steve Block791712a2010-08-27 10:21:07 +01003136 MemoryAllocator::FreeRawMemory(chunk_address, chunk_size, executable);
Iain Merrick9ac36c92010-09-13 15:29:50 +01003137 MemoryAllocator::PerformAllocationCallback(space, kAllocationActionFree,
3138 size_);
Steve Blocka7e24c12009-10-30 11:49:00 +00003139 LOG(DeleteEvent("LargeObjectChunk", chunk_address));
3140 }
3141 }
3142}
3143
3144
3145bool LargeObjectSpace::Contains(HeapObject* object) {
3146 Address address = object->address();
Steve Block6ded16b2010-05-10 14:33:55 +01003147 if (Heap::new_space()->Contains(address)) {
3148 return false;
3149 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003150 Page* page = Page::FromAddress(address);
3151
3152 SLOW_ASSERT(!page->IsLargeObjectPage()
3153 || !FindObject(address)->IsFailure());
3154
3155 return page->IsLargeObjectPage();
3156}
3157
3158
3159#ifdef DEBUG
3160// We do not assume that the large object iterator works, because it depends
3161// on the invariants we are checking during verification.
3162void LargeObjectSpace::Verify() {
3163 for (LargeObjectChunk* chunk = first_chunk_;
3164 chunk != NULL;
3165 chunk = chunk->next()) {
3166 // Each chunk contains an object that starts at the large object page's
3167 // object area start.
3168 HeapObject* object = chunk->GetObject();
3169 Page* page = Page::FromAddress(object->address());
3170 ASSERT(object->address() == page->ObjectAreaStart());
3171
3172 // The first word should be a map, and we expect all map pointers to be
3173 // in map space.
3174 Map* map = object->map();
3175 ASSERT(map->IsMap());
3176 ASSERT(Heap::map_space()->Contains(map));
3177
3178 // We have only code, sequential strings, external strings
3179 // (sequential strings that have been morphed into external
3180 // strings), fixed arrays, and byte arrays in large object space.
3181 ASSERT(object->IsCode() || object->IsSeqString() ||
3182 object->IsExternalString() || object->IsFixedArray() ||
3183 object->IsByteArray());
3184
3185 // The object itself should look OK.
3186 object->Verify();
3187
3188 // Byte arrays and strings don't have interior pointers.
3189 if (object->IsCode()) {
3190 VerifyPointersVisitor code_visitor;
3191 object->IterateBody(map->instance_type(),
3192 object->Size(),
3193 &code_visitor);
3194 } else if (object->IsFixedArray()) {
3195 // We loop over fixed arrays ourselves, rather then using the visitor,
3196 // because the visitor doesn't support the start/offset iteration
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01003197 // needed for IsRegionDirty.
Steve Blocka7e24c12009-10-30 11:49:00 +00003198 FixedArray* array = FixedArray::cast(object);
3199 for (int j = 0; j < array->length(); j++) {
3200 Object* element = array->get(j);
3201 if (element->IsHeapObject()) {
3202 HeapObject* element_object = HeapObject::cast(element);
3203 ASSERT(Heap::Contains(element_object));
3204 ASSERT(element_object->map()->IsMap());
3205 if (Heap::InNewSpace(element_object)) {
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01003206 Address array_addr = object->address();
3207 Address element_addr = array_addr + FixedArray::kHeaderSize +
3208 j * kPointerSize;
3209
3210 ASSERT(Page::FromAddress(array_addr)->IsRegionDirty(element_addr));
Steve Blocka7e24c12009-10-30 11:49:00 +00003211 }
3212 }
3213 }
3214 }
3215 }
3216}
3217
3218
3219void LargeObjectSpace::Print() {
3220 LargeObjectIterator it(this);
Leon Clarked91b9f72010-01-27 17:25:45 +00003221 for (HeapObject* obj = it.next(); obj != NULL; obj = it.next()) {
3222 obj->Print();
Steve Blocka7e24c12009-10-30 11:49:00 +00003223 }
3224}
3225
3226
3227void LargeObjectSpace::ReportStatistics() {
Ben Murdochf87a2032010-10-22 12:50:53 +01003228 PrintF(" size: %" V8_PTR_PREFIX "d\n", size_);
Steve Blocka7e24c12009-10-30 11:49:00 +00003229 int num_objects = 0;
3230 ClearHistograms();
3231 LargeObjectIterator it(this);
Leon Clarked91b9f72010-01-27 17:25:45 +00003232 for (HeapObject* obj = it.next(); obj != NULL; obj = it.next()) {
Steve Blocka7e24c12009-10-30 11:49:00 +00003233 num_objects++;
Leon Clarked91b9f72010-01-27 17:25:45 +00003234 CollectHistogramInfo(obj);
Steve Blocka7e24c12009-10-30 11:49:00 +00003235 }
3236
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -08003237 PrintF(" number of objects %d, "
3238 "size of objects %" V8_PTR_PREFIX "d\n", num_objects, objects_size_);
Steve Blocka7e24c12009-10-30 11:49:00 +00003239 if (num_objects > 0) ReportHistogram(false);
3240}
3241
3242
3243void LargeObjectSpace::CollectCodeStatistics() {
3244 LargeObjectIterator obj_it(this);
Leon Clarked91b9f72010-01-27 17:25:45 +00003245 for (HeapObject* obj = obj_it.next(); obj != NULL; obj = obj_it.next()) {
Steve Blocka7e24c12009-10-30 11:49:00 +00003246 if (obj->IsCode()) {
3247 Code* code = Code::cast(obj);
3248 code_kind_statistics[code->kind()] += code->Size();
3249 }
3250 }
3251}
Steve Blocka7e24c12009-10-30 11:49:00 +00003252#endif // DEBUG
3253
3254} } // namespace v8::internal