blob: 239c9cd6c9a95b479981df24069f4cbd38ee3811 [file] [log] [blame]
ager@chromium.org9258b6b2008-09-11 09:11:10 +00001// Copyright 2006-2008 the V8 project authors. All rights reserved.
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002// Redistribution and use in source and binary forms, with or without
3// modification, are permitted provided that the following conditions are
4// met:
5//
6// * Redistributions of source code must retain the above copyright
7// notice, this list of conditions and the following disclaimer.
8// * Redistributions in binary form must reproduce the above
9// copyright notice, this list of conditions and the following
10// disclaimer in the documentation and/or other materials provided
11// with the distribution.
12// * Neither the name of Google Inc. nor the names of its
13// contributors may be used to endorse or promote products derived
14// from this software without specific prior written permission.
15//
16// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
28#include "v8.h"
29
30#include "macro-assembler.h"
31#include "mark-compact.h"
32#include "platform.h"
33
kasperl@chromium.org71affb52009-05-26 05:44:31 +000034namespace v8 {
35namespace internal {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +000036
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +000037// For contiguous spaces, top should be in the space (or at the end) and limit
38// should be the end of the space.
39#define ASSERT_SEMISPACE_ALLOCATION_INFO(info, space) \
kasperl@chromium.orgdefbd102009-07-13 14:04:26 +000040 ASSERT((space).low() <= (info).top \
41 && (info).top <= (space).high() \
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +000042 && (info).limit == (space).high())
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +000043
erik.corry@gmail.com145eff52010-08-23 11:36:18 +000044intptr_t Page::watermark_invalidated_mark_ = 1 << Page::WATERMARK_INVALIDATED;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +000045
46// ----------------------------------------------------------------------------
47// HeapObjectIterator
48
49HeapObjectIterator::HeapObjectIterator(PagedSpace* space) {
50 Initialize(space->bottom(), space->top(), NULL);
51}
52
53
54HeapObjectIterator::HeapObjectIterator(PagedSpace* space,
55 HeapObjectCallback size_func) {
56 Initialize(space->bottom(), space->top(), size_func);
57}
58
59
60HeapObjectIterator::HeapObjectIterator(PagedSpace* space, Address start) {
61 Initialize(start, space->top(), NULL);
62}
63
64
65HeapObjectIterator::HeapObjectIterator(PagedSpace* space, Address start,
66 HeapObjectCallback size_func) {
67 Initialize(start, space->top(), size_func);
68}
69
70
ricow@chromium.orgd236f4d2010-09-01 06:52:08 +000071HeapObjectIterator::HeapObjectIterator(Page* page,
72 HeapObjectCallback size_func) {
73 Initialize(page->ObjectAreaStart(), page->AllocationTop(), size_func);
74}
75
76
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +000077void HeapObjectIterator::Initialize(Address cur, Address end,
78 HeapObjectCallback size_f) {
79 cur_addr_ = cur;
80 end_addr_ = end;
81 end_page_ = Page::FromAllocationTop(end);
82 size_func_ = size_f;
83 Page* p = Page::FromAllocationTop(cur_addr_);
84 cur_limit_ = (p == end_page_) ? end_addr_ : p->AllocationTop();
85
86#ifdef DEBUG
87 Verify();
88#endif
89}
90
91
sgjesse@chromium.orgb302e562010-02-03 11:26:59 +000092HeapObject* HeapObjectIterator::FromNextPage() {
93 if (cur_addr_ == end_addr_) return NULL;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +000094
95 Page* cur_page = Page::FromAllocationTop(cur_addr_);
96 cur_page = cur_page->next_page();
97 ASSERT(cur_page->is_valid());
98
99 cur_addr_ = cur_page->ObjectAreaStart();
100 cur_limit_ = (cur_page == end_page_) ? end_addr_ : cur_page->AllocationTop();
101
sgjesse@chromium.orgb302e562010-02-03 11:26:59 +0000102 if (cur_addr_ == end_addr_) return NULL;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000103 ASSERT(cur_addr_ < cur_limit_);
104#ifdef DEBUG
105 Verify();
106#endif
sgjesse@chromium.orgb302e562010-02-03 11:26:59 +0000107 return FromCurrentPage();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000108}
109
110
111#ifdef DEBUG
112void HeapObjectIterator::Verify() {
113 Page* p = Page::FromAllocationTop(cur_addr_);
114 ASSERT(p == Page::FromAllocationTop(cur_limit_));
115 ASSERT(p->Offset(cur_addr_) <= p->Offset(cur_limit_));
116}
117#endif
118
119
120// -----------------------------------------------------------------------------
121// PageIterator
122
kasperl@chromium.orgb3284ad2009-05-18 06:12:45 +0000123PageIterator::PageIterator(PagedSpace* space, Mode mode) : space_(space) {
124 prev_page_ = NULL;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000125 switch (mode) {
126 case PAGES_IN_USE:
kasperl@chromium.orgb3284ad2009-05-18 06:12:45 +0000127 stop_page_ = space->AllocationTopPage();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000128 break;
129 case PAGES_USED_BY_MC:
kasperl@chromium.orgb3284ad2009-05-18 06:12:45 +0000130 stop_page_ = space->MCRelocationTopPage();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000131 break;
132 case ALL_PAGES:
kasperl@chromium.orgb3284ad2009-05-18 06:12:45 +0000133#ifdef DEBUG
134 // Verify that the cached last page in the space is actually the
135 // last page.
136 for (Page* p = space->first_page_; p->is_valid(); p = p->next_page()) {
137 if (!p->next_page()->is_valid()) {
138 ASSERT(space->last_page_ == p);
139 }
140 }
141#endif
142 stop_page_ = space->last_page_;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000143 break;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000144 }
145}
146
147
148// -----------------------------------------------------------------------------
sgjesse@chromium.orgc5145742009-10-07 09:00:33 +0000149// CodeRange
150
151List<CodeRange::FreeBlock> CodeRange::free_list_(0);
152List<CodeRange::FreeBlock> CodeRange::allocation_list_(0);
153int CodeRange::current_allocation_block_index_ = 0;
154VirtualMemory* CodeRange::code_range_ = NULL;
155
156
157bool CodeRange::Setup(const size_t requested) {
158 ASSERT(code_range_ == NULL);
159
160 code_range_ = new VirtualMemory(requested);
161 CHECK(code_range_ != NULL);
162 if (!code_range_->IsReserved()) {
163 delete code_range_;
164 code_range_ = NULL;
165 return false;
166 }
167
168 // We are sure that we have mapped a block of requested addresses.
169 ASSERT(code_range_->size() == requested);
170 LOG(NewEvent("CodeRange", code_range_->address(), requested));
171 allocation_list_.Add(FreeBlock(code_range_->address(), code_range_->size()));
172 current_allocation_block_index_ = 0;
173 return true;
174}
175
176
177int CodeRange::CompareFreeBlockAddress(const FreeBlock* left,
178 const FreeBlock* right) {
179 // The entire point of CodeRange is that the difference between two
180 // addresses in the range can be represented as a signed 32-bit int,
181 // so the cast is semantically correct.
182 return static_cast<int>(left->start - right->start);
183}
184
185
186void CodeRange::GetNextAllocationBlock(size_t requested) {
187 for (current_allocation_block_index_++;
188 current_allocation_block_index_ < allocation_list_.length();
189 current_allocation_block_index_++) {
190 if (requested <= allocation_list_[current_allocation_block_index_].size) {
191 return; // Found a large enough allocation block.
192 }
193 }
194
195 // Sort and merge the free blocks on the free list and the allocation list.
196 free_list_.AddAll(allocation_list_);
197 allocation_list_.Clear();
198 free_list_.Sort(&CompareFreeBlockAddress);
199 for (int i = 0; i < free_list_.length();) {
200 FreeBlock merged = free_list_[i];
201 i++;
202 // Add adjacent free blocks to the current merged block.
203 while (i < free_list_.length() &&
204 free_list_[i].start == merged.start + merged.size) {
205 merged.size += free_list_[i].size;
206 i++;
207 }
208 if (merged.size > 0) {
209 allocation_list_.Add(merged);
210 }
211 }
212 free_list_.Clear();
213
214 for (current_allocation_block_index_ = 0;
215 current_allocation_block_index_ < allocation_list_.length();
216 current_allocation_block_index_++) {
217 if (requested <= allocation_list_[current_allocation_block_index_].size) {
218 return; // Found a large enough allocation block.
219 }
220 }
221
222 // Code range is full or too fragmented.
223 V8::FatalProcessOutOfMemory("CodeRange::GetNextAllocationBlock");
224}
225
226
227
228void* CodeRange::AllocateRawMemory(const size_t requested, size_t* allocated) {
229 ASSERT(current_allocation_block_index_ < allocation_list_.length());
230 if (requested > allocation_list_[current_allocation_block_index_].size) {
231 // Find an allocation block large enough. This function call may
232 // call V8::FatalProcessOutOfMemory if it cannot find a large enough block.
233 GetNextAllocationBlock(requested);
234 }
235 // Commit the requested memory at the start of the current allocation block.
236 *allocated = RoundUp(requested, Page::kPageSize);
237 FreeBlock current = allocation_list_[current_allocation_block_index_];
238 if (*allocated >= current.size - Page::kPageSize) {
239 // Don't leave a small free block, useless for a large object or chunk.
240 *allocated = current.size;
241 }
242 ASSERT(*allocated <= current.size);
243 if (!code_range_->Commit(current.start, *allocated, true)) {
244 *allocated = 0;
245 return NULL;
246 }
247 allocation_list_[current_allocation_block_index_].start += *allocated;
248 allocation_list_[current_allocation_block_index_].size -= *allocated;
249 if (*allocated == current.size) {
250 GetNextAllocationBlock(0); // This block is used up, get the next one.
251 }
252 return current.start;
253}
254
255
256void CodeRange::FreeRawMemory(void* address, size_t length) {
257 free_list_.Add(FreeBlock(address, length));
258 code_range_->Uncommit(address, length);
259}
260
261
262void CodeRange::TearDown() {
263 delete code_range_; // Frees all memory in the virtual memory range.
264 code_range_ = NULL;
265 free_list_.Free();
266 allocation_list_.Free();
267}
268
269
270// -----------------------------------------------------------------------------
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000271// MemoryAllocator
272//
ager@chromium.org01fe7df2010-11-10 11:59:11 +0000273intptr_t MemoryAllocator::capacity_ = 0;
274intptr_t MemoryAllocator::capacity_executable_ = 0;
275intptr_t MemoryAllocator::size_ = 0;
kmillikin@chromium.orgf05f2912010-09-30 10:07:24 +0000276intptr_t MemoryAllocator::size_executable_ = 0;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000277
kmillikin@chromium.org3cdd9e12010-09-06 11:39:48 +0000278List<MemoryAllocator::MemoryAllocationCallbackRegistration>
279 MemoryAllocator::memory_allocation_callbacks_;
280
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000281VirtualMemory* MemoryAllocator::initial_chunk_ = NULL;
282
283// 270 is an estimate based on the static default heap size of a pair of 256K
284// semispaces and a 64M old generation.
285const int kEstimatedNumberOfChunks = 270;
286List<MemoryAllocator::ChunkInfo> MemoryAllocator::chunks_(
287 kEstimatedNumberOfChunks);
288List<int> MemoryAllocator::free_chunk_ids_(kEstimatedNumberOfChunks);
289int MemoryAllocator::max_nof_chunks_ = 0;
290int MemoryAllocator::top_ = 0;
291
292
293void MemoryAllocator::Push(int free_chunk_id) {
294 ASSERT(max_nof_chunks_ > 0);
295 ASSERT(top_ < max_nof_chunks_);
296 free_chunk_ids_[top_++] = free_chunk_id;
297}
298
299
300int MemoryAllocator::Pop() {
301 ASSERT(top_ > 0);
302 return free_chunk_ids_[--top_];
303}
304
305
ager@chromium.org01fe7df2010-11-10 11:59:11 +0000306bool MemoryAllocator::Setup(intptr_t capacity, intptr_t capacity_executable) {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000307 capacity_ = RoundUp(capacity, Page::kPageSize);
ager@chromium.org01fe7df2010-11-10 11:59:11 +0000308 capacity_executable_ = RoundUp(capacity_executable, Page::kPageSize);
309 ASSERT_GE(capacity_, capacity_executable_);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000310
311 // Over-estimate the size of chunks_ array. It assumes the expansion of old
312 // space is always in the unit of a chunk (kChunkSize) except the last
313 // expansion.
314 //
315 // Due to alignment, allocated space might be one page less than required
316 // number (kPagesPerChunk) of pages for old spaces.
317 //
kasper.lund7276f142008-07-30 08:49:36 +0000318 // Reserve two chunk ids for semispaces, one for map space, one for old
319 // space, and one for code space.
kmillikin@chromium.orgf05f2912010-09-30 10:07:24 +0000320 max_nof_chunks_ =
321 static_cast<int>((capacity_ / (kChunkSize - Page::kPageSize))) + 5;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000322 if (max_nof_chunks_ > kMaxNofChunks) return false;
323
324 size_ = 0;
erik.corry@gmail.com145eff52010-08-23 11:36:18 +0000325 size_executable_ = 0;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000326 ChunkInfo info; // uninitialized element.
327 for (int i = max_nof_chunks_ - 1; i >= 0; i--) {
328 chunks_.Add(info);
329 free_chunk_ids_.Add(i);
330 }
331 top_ = max_nof_chunks_;
332 return true;
333}
334
335
336void MemoryAllocator::TearDown() {
337 for (int i = 0; i < max_nof_chunks_; i++) {
338 if (chunks_[i].address() != NULL) DeleteChunk(i);
339 }
340 chunks_.Clear();
341 free_chunk_ids_.Clear();
342
343 if (initial_chunk_ != NULL) {
344 LOG(DeleteEvent("InitialChunk", initial_chunk_->address()));
345 delete initial_chunk_;
346 initial_chunk_ = NULL;
347 }
348
349 ASSERT(top_ == max_nof_chunks_); // all chunks are free
350 top_ = 0;
351 capacity_ = 0;
ager@chromium.org01fe7df2010-11-10 11:59:11 +0000352 capacity_executable_ = 0;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000353 size_ = 0;
354 max_nof_chunks_ = 0;
355}
356
357
358void* MemoryAllocator::AllocateRawMemory(const size_t requested,
kasper.lund7276f142008-07-30 08:49:36 +0000359 size_t* allocated,
ager@chromium.org9258b6b2008-09-11 09:11:10 +0000360 Executability executable) {
fschneider@chromium.orged78ffd2010-07-21 11:05:19 +0000361 if (size_ + static_cast<size_t>(requested) > static_cast<size_t>(capacity_)) {
362 return NULL;
363 }
ager@chromium.org01fe7df2010-11-10 11:59:11 +0000364
sgjesse@chromium.orgc5145742009-10-07 09:00:33 +0000365 void* mem;
ager@chromium.org01fe7df2010-11-10 11:59:11 +0000366 if (executable == EXECUTABLE) {
367 // Check executable memory limit.
368 if (size_executable_ + requested >
369 static_cast<size_t>(capacity_executable_)) {
370 LOG(StringEvent("MemoryAllocator::AllocateRawMemory",
371 "V8 Executable Allocation capacity exceeded"));
372 return NULL;
373 }
374 // Allocate executable memory either from code range or from the
375 // OS.
376 if (CodeRange::exists()) {
377 mem = CodeRange::AllocateRawMemory(requested, allocated);
378 } else {
379 mem = OS::Allocate(requested, allocated, true);
380 }
381 // Update executable memory size.
382 size_executable_ += static_cast<int>(*allocated);
sgjesse@chromium.orgc5145742009-10-07 09:00:33 +0000383 } else {
ager@chromium.org01fe7df2010-11-10 11:59:11 +0000384 mem = OS::Allocate(requested, allocated, false);
sgjesse@chromium.orgc5145742009-10-07 09:00:33 +0000385 }
ager@chromium.orgc4c92722009-11-18 14:12:51 +0000386 int alloced = static_cast<int>(*allocated);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000387 size_ += alloced;
erik.corry@gmail.com145eff52010-08-23 11:36:18 +0000388
sgjesse@chromium.orgb302e562010-02-03 11:26:59 +0000389#ifdef DEBUG
390 ZapBlock(reinterpret_cast<Address>(mem), alloced);
391#endif
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000392 Counters::memory_allocated.Increment(alloced);
393 return mem;
394}
395
396
erik.corry@gmail.com145eff52010-08-23 11:36:18 +0000397void MemoryAllocator::FreeRawMemory(void* mem,
398 size_t length,
399 Executability executable) {
sgjesse@chromium.orgb302e562010-02-03 11:26:59 +0000400#ifdef DEBUG
401 ZapBlock(reinterpret_cast<Address>(mem), length);
402#endif
sgjesse@chromium.orgc5145742009-10-07 09:00:33 +0000403 if (CodeRange::contains(static_cast<Address>(mem))) {
404 CodeRange::FreeRawMemory(mem, length);
405 } else {
406 OS::Free(mem, length);
407 }
ager@chromium.orgc4c92722009-11-18 14:12:51 +0000408 Counters::memory_allocated.Decrement(static_cast<int>(length));
409 size_ -= static_cast<int>(length);
erik.corry@gmail.com145eff52010-08-23 11:36:18 +0000410 if (executable == EXECUTABLE) size_executable_ -= static_cast<int>(length);
kmillikin@chromium.org3cdd9e12010-09-06 11:39:48 +0000411
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000412 ASSERT(size_ >= 0);
ager@chromium.org01fe7df2010-11-10 11:59:11 +0000413 ASSERT(size_executable_ >= 0);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000414}
415
416
kmillikin@chromium.org3cdd9e12010-09-06 11:39:48 +0000417void MemoryAllocator::PerformAllocationCallback(ObjectSpace space,
418 AllocationAction action,
419 size_t size) {
420 for (int i = 0; i < memory_allocation_callbacks_.length(); ++i) {
421 MemoryAllocationCallbackRegistration registration =
422 memory_allocation_callbacks_[i];
423 if ((registration.space & space) == space &&
424 (registration.action & action) == action)
425 registration.callback(space, action, static_cast<int>(size));
426 }
427}
428
429
430bool MemoryAllocator::MemoryAllocationCallbackRegistered(
431 MemoryAllocationCallback callback) {
432 for (int i = 0; i < memory_allocation_callbacks_.length(); ++i) {
433 if (memory_allocation_callbacks_[i].callback == callback) return true;
434 }
435 return false;
436}
437
438
439void MemoryAllocator::AddMemoryAllocationCallback(
440 MemoryAllocationCallback callback,
441 ObjectSpace space,
442 AllocationAction action) {
443 ASSERT(callback != NULL);
444 MemoryAllocationCallbackRegistration registration(callback, space, action);
445 ASSERT(!MemoryAllocator::MemoryAllocationCallbackRegistered(callback));
446 return memory_allocation_callbacks_.Add(registration);
447}
448
449
450void MemoryAllocator::RemoveMemoryAllocationCallback(
451 MemoryAllocationCallback callback) {
452 ASSERT(callback != NULL);
453 for (int i = 0; i < memory_allocation_callbacks_.length(); ++i) {
454 if (memory_allocation_callbacks_[i].callback == callback) {
455 memory_allocation_callbacks_.Remove(i);
456 return;
457 }
458 }
459 UNREACHABLE();
460}
461
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000462void* MemoryAllocator::ReserveInitialChunk(const size_t requested) {
463 ASSERT(initial_chunk_ == NULL);
464
465 initial_chunk_ = new VirtualMemory(requested);
466 CHECK(initial_chunk_ != NULL);
467 if (!initial_chunk_->IsReserved()) {
468 delete initial_chunk_;
469 initial_chunk_ = NULL;
470 return NULL;
471 }
472
473 // We are sure that we have mapped a block of requested addresses.
474 ASSERT(initial_chunk_->size() == requested);
475 LOG(NewEvent("InitialChunk", initial_chunk_->address(), requested));
ager@chromium.orgc4c92722009-11-18 14:12:51 +0000476 size_ += static_cast<int>(requested);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000477 return initial_chunk_->address();
478}
479
480
481static int PagesInChunk(Address start, size_t size) {
482 // The first page starts on the first page-aligned address from start onward
483 // and the last page ends on the last page-aligned address before
484 // start+size. Page::kPageSize is a power of two so we can divide by
485 // shifting.
ager@chromium.orgc4c92722009-11-18 14:12:51 +0000486 return static_cast<int>((RoundDown(start + size, Page::kPageSize)
sgjesse@chromium.org846fb742009-12-18 08:56:33 +0000487 - RoundUp(start, Page::kPageSize)) >> kPageSizeBits);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000488}
489
490
491Page* MemoryAllocator::AllocatePages(int requested_pages, int* allocated_pages,
492 PagedSpace* owner) {
493 if (requested_pages <= 0) return Page::FromAddress(NULL);
494 size_t chunk_size = requested_pages * Page::kPageSize;
495
496 // There is not enough space to guarantee the desired number pages can be
497 // allocated.
498 if (size_ + static_cast<int>(chunk_size) > capacity_) {
499 // Request as many pages as we can.
500 chunk_size = capacity_ - size_;
sgjesse@chromium.org846fb742009-12-18 08:56:33 +0000501 requested_pages = static_cast<int>(chunk_size >> kPageSizeBits);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000502
503 if (requested_pages <= 0) return Page::FromAddress(NULL);
504 }
kasper.lund7276f142008-07-30 08:49:36 +0000505 void* chunk = AllocateRawMemory(chunk_size, &chunk_size, owner->executable());
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000506 if (chunk == NULL) return Page::FromAddress(NULL);
507 LOG(NewEvent("PagedChunk", chunk, chunk_size));
508
509 *allocated_pages = PagesInChunk(static_cast<Address>(chunk), chunk_size);
510 if (*allocated_pages == 0) {
erik.corry@gmail.com145eff52010-08-23 11:36:18 +0000511 FreeRawMemory(chunk, chunk_size, owner->executable());
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000512 LOG(DeleteEvent("PagedChunk", chunk));
513 return Page::FromAddress(NULL);
514 }
515
516 int chunk_id = Pop();
517 chunks_[chunk_id].init(static_cast<Address>(chunk), chunk_size, owner);
518
kmillikin@chromium.org3cdd9e12010-09-06 11:39:48 +0000519 ObjectSpace space = static_cast<ObjectSpace>(1 << owner->identity());
520 PerformAllocationCallback(space, kAllocationActionAllocate, chunk_size);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000521 return InitializePagesInChunk(chunk_id, *allocated_pages, owner);
522}
523
524
525Page* MemoryAllocator::CommitPages(Address start, size_t size,
526 PagedSpace* owner, int* num_pages) {
527 ASSERT(start != NULL);
528 *num_pages = PagesInChunk(start, size);
529 ASSERT(*num_pages > 0);
530 ASSERT(initial_chunk_ != NULL);
kasperl@chromium.orgf5aa8372009-03-24 14:47:14 +0000531 ASSERT(InInitialChunk(start));
532 ASSERT(InInitialChunk(start + size - 1));
ager@chromium.org9258b6b2008-09-11 09:11:10 +0000533 if (!initial_chunk_->Commit(start, size, owner->executable() == EXECUTABLE)) {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000534 return Page::FromAddress(NULL);
535 }
sgjesse@chromium.orgb302e562010-02-03 11:26:59 +0000536#ifdef DEBUG
537 ZapBlock(start, size);
538#endif
ager@chromium.orgc4c92722009-11-18 14:12:51 +0000539 Counters::memory_allocated.Increment(static_cast<int>(size));
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000540
541 // So long as we correctly overestimated the number of chunks we should not
542 // run out of chunk ids.
543 CHECK(!OutOfChunkIds());
544 int chunk_id = Pop();
545 chunks_[chunk_id].init(start, size, owner);
546 return InitializePagesInChunk(chunk_id, *num_pages, owner);
547}
548
549
kasper.lund7276f142008-07-30 08:49:36 +0000550bool MemoryAllocator::CommitBlock(Address start,
551 size_t size,
ager@chromium.org9258b6b2008-09-11 09:11:10 +0000552 Executability executable) {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000553 ASSERT(start != NULL);
554 ASSERT(size > 0);
555 ASSERT(initial_chunk_ != NULL);
kasperl@chromium.orgf5aa8372009-03-24 14:47:14 +0000556 ASSERT(InInitialChunk(start));
557 ASSERT(InInitialChunk(start + size - 1));
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000558
kasper.lund7276f142008-07-30 08:49:36 +0000559 if (!initial_chunk_->Commit(start, size, executable)) return false;
sgjesse@chromium.orgb302e562010-02-03 11:26:59 +0000560#ifdef DEBUG
561 ZapBlock(start, size);
562#endif
ager@chromium.orgc4c92722009-11-18 14:12:51 +0000563 Counters::memory_allocated.Increment(static_cast<int>(size));
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000564 return true;
565}
566
sgjesse@chromium.orgb302e562010-02-03 11:26:59 +0000567
ager@chromium.orgadd848f2009-08-13 12:44:13 +0000568bool MemoryAllocator::UncommitBlock(Address start, size_t size) {
569 ASSERT(start != NULL);
570 ASSERT(size > 0);
571 ASSERT(initial_chunk_ != NULL);
572 ASSERT(InInitialChunk(start));
573 ASSERT(InInitialChunk(start + size - 1));
574
575 if (!initial_chunk_->Uncommit(start, size)) return false;
ager@chromium.orgc4c92722009-11-18 14:12:51 +0000576 Counters::memory_allocated.Decrement(static_cast<int>(size));
ager@chromium.orgadd848f2009-08-13 12:44:13 +0000577 return true;
578}
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000579
sgjesse@chromium.orgb302e562010-02-03 11:26:59 +0000580
581void MemoryAllocator::ZapBlock(Address start, size_t size) {
582 for (size_t s = 0; s + kPointerSize <= size; s += kPointerSize) {
583 Memory::Address_at(start + s) = kZapValue;
584 }
585}
586
587
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000588Page* MemoryAllocator::InitializePagesInChunk(int chunk_id, int pages_in_chunk,
589 PagedSpace* owner) {
590 ASSERT(IsValidChunk(chunk_id));
591 ASSERT(pages_in_chunk > 0);
592
593 Address chunk_start = chunks_[chunk_id].address();
594
595 Address low = RoundUp(chunk_start, Page::kPageSize);
596
597#ifdef DEBUG
598 size_t chunk_size = chunks_[chunk_id].size();
599 Address high = RoundDown(chunk_start + chunk_size, Page::kPageSize);
600 ASSERT(pages_in_chunk <=
601 ((OffsetFrom(high) - OffsetFrom(low)) / Page::kPageSize));
602#endif
603
604 Address page_addr = low;
605 for (int i = 0; i < pages_in_chunk; i++) {
606 Page* p = Page::FromAddress(page_addr);
607 p->opaque_header = OffsetFrom(page_addr + Page::kPageSize) | chunk_id;
ricow@chromium.org30ce4112010-05-31 10:38:25 +0000608 p->InvalidateWatermark(true);
fschneider@chromium.org013f3e12010-04-26 13:27:52 +0000609 p->SetIsLargeObjectPage(false);
ricow@chromium.org30ce4112010-05-31 10:38:25 +0000610 p->SetAllocationWatermark(p->ObjectAreaStart());
611 p->SetCachedAllocationWatermark(p->ObjectAreaStart());
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000612 page_addr += Page::kPageSize;
613 }
614
615 // Set the next page of the last page to 0.
616 Page* last_page = Page::FromAddress(page_addr - Page::kPageSize);
617 last_page->opaque_header = OffsetFrom(0) | chunk_id;
618
619 return Page::FromAddress(low);
620}
621
622
623Page* MemoryAllocator::FreePages(Page* p) {
624 if (!p->is_valid()) return p;
625
626 // Find the first page in the same chunk as 'p'
627 Page* first_page = FindFirstPageInSameChunk(p);
628 Page* page_to_return = Page::FromAddress(NULL);
629
630 if (p != first_page) {
631 // Find the last page in the same chunk as 'prev'.
632 Page* last_page = FindLastPageInSameChunk(p);
633 first_page = GetNextPage(last_page); // first page in next chunk
634
635 // set the next_page of last_page to NULL
636 SetNextPage(last_page, Page::FromAddress(NULL));
637 page_to_return = p; // return 'p' when exiting
638 }
639
640 while (first_page->is_valid()) {
641 int chunk_id = GetChunkId(first_page);
642 ASSERT(IsValidChunk(chunk_id));
643
644 // Find the first page of the next chunk before deleting this chunk.
645 first_page = GetNextPage(FindLastPageInSameChunk(first_page));
646
647 // Free the current chunk.
648 DeleteChunk(chunk_id);
649 }
650
651 return page_to_return;
652}
653
654
fschneider@chromium.org013f3e12010-04-26 13:27:52 +0000655void MemoryAllocator::FreeAllPages(PagedSpace* space) {
656 for (int i = 0, length = chunks_.length(); i < length; i++) {
657 if (chunks_[i].owner() == space) {
658 DeleteChunk(i);
659 }
660 }
661}
662
663
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000664void MemoryAllocator::DeleteChunk(int chunk_id) {
665 ASSERT(IsValidChunk(chunk_id));
666
667 ChunkInfo& c = chunks_[chunk_id];
668
669 // We cannot free a chunk contained in the initial chunk because it was not
670 // allocated with AllocateRawMemory. Instead we uncommit the virtual
671 // memory.
kasperl@chromium.orgf5aa8372009-03-24 14:47:14 +0000672 if (InInitialChunk(c.address())) {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000673 // TODO(1240712): VirtualMemory::Uncommit has a return value which
674 // is ignored here.
675 initial_chunk_->Uncommit(c.address(), c.size());
ager@chromium.orgc4c92722009-11-18 14:12:51 +0000676 Counters::memory_allocated.Decrement(static_cast<int>(c.size()));
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000677 } else {
678 LOG(DeleteEvent("PagedChunk", c.address()));
kmillikin@chromium.org3cdd9e12010-09-06 11:39:48 +0000679 ObjectSpace space = static_cast<ObjectSpace>(1 << c.owner()->identity());
680 size_t size = c.size();
681 FreeRawMemory(c.address(), size, c.executable());
682 PerformAllocationCallback(space, kAllocationActionFree, size);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000683 }
684 c.init(NULL, 0, NULL);
685 Push(chunk_id);
686}
687
688
689Page* MemoryAllocator::FindFirstPageInSameChunk(Page* p) {
690 int chunk_id = GetChunkId(p);
691 ASSERT(IsValidChunk(chunk_id));
692
693 Address low = RoundUp(chunks_[chunk_id].address(), Page::kPageSize);
694 return Page::FromAddress(low);
695}
696
697
698Page* MemoryAllocator::FindLastPageInSameChunk(Page* p) {
699 int chunk_id = GetChunkId(p);
700 ASSERT(IsValidChunk(chunk_id));
701
702 Address chunk_start = chunks_[chunk_id].address();
703 size_t chunk_size = chunks_[chunk_id].size();
704
705 Address high = RoundDown(chunk_start + chunk_size, Page::kPageSize);
706 ASSERT(chunk_start <= p->address() && p->address() < high);
707
708 return Page::FromAddress(high - Page::kPageSize);
709}
710
711
712#ifdef DEBUG
713void MemoryAllocator::ReportStatistics() {
714 float pct = static_cast<float>(capacity_ - size_) / capacity_;
kmillikin@chromium.orgf05f2912010-09-30 10:07:24 +0000715 PrintF(" capacity: %" V8_PTR_PREFIX "d"
716 ", used: %" V8_PTR_PREFIX "d"
717 ", available: %%%d\n\n",
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000718 capacity_, size_, static_cast<int>(pct*100));
719}
720#endif
721
722
fschneider@chromium.org013f3e12010-04-26 13:27:52 +0000723void MemoryAllocator::RelinkPageListInChunkOrder(PagedSpace* space,
724 Page** first_page,
725 Page** last_page,
726 Page** last_page_in_use) {
727 Page* first = NULL;
728 Page* last = NULL;
729
730 for (int i = 0, length = chunks_.length(); i < length; i++) {
731 ChunkInfo& chunk = chunks_[i];
732
733 if (chunk.owner() == space) {
734 if (first == NULL) {
735 Address low = RoundUp(chunk.address(), Page::kPageSize);
736 first = Page::FromAddress(low);
737 }
738 last = RelinkPagesInChunk(i,
739 chunk.address(),
740 chunk.size(),
741 last,
742 last_page_in_use);
743 }
744 }
745
746 if (first_page != NULL) {
747 *first_page = first;
748 }
749
750 if (last_page != NULL) {
751 *last_page = last;
752 }
753}
754
755
756Page* MemoryAllocator::RelinkPagesInChunk(int chunk_id,
757 Address chunk_start,
758 size_t chunk_size,
759 Page* prev,
760 Page** last_page_in_use) {
761 Address page_addr = RoundUp(chunk_start, Page::kPageSize);
762 int pages_in_chunk = PagesInChunk(chunk_start, chunk_size);
763
764 if (prev->is_valid()) {
765 SetNextPage(prev, Page::FromAddress(page_addr));
766 }
767
768 for (int i = 0; i < pages_in_chunk; i++) {
769 Page* p = Page::FromAddress(page_addr);
770 p->opaque_header = OffsetFrom(page_addr + Page::kPageSize) | chunk_id;
771 page_addr += Page::kPageSize;
772
ricow@chromium.org30ce4112010-05-31 10:38:25 +0000773 p->InvalidateWatermark(true);
fschneider@chromium.org013f3e12010-04-26 13:27:52 +0000774 if (p->WasInUseBeforeMC()) {
775 *last_page_in_use = p;
776 }
777 }
778
779 // Set the next page of the last page to 0.
780 Page* last_page = Page::FromAddress(page_addr - Page::kPageSize);
781 last_page->opaque_header = OffsetFrom(0) | chunk_id;
782
783 if (last_page->WasInUseBeforeMC()) {
784 *last_page_in_use = last_page;
785 }
786
787 return last_page;
788}
789
790
791
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000792// -----------------------------------------------------------------------------
793// PagedSpace implementation
794
kmillikin@chromium.orgf05f2912010-09-30 10:07:24 +0000795PagedSpace::PagedSpace(intptr_t max_capacity,
ager@chromium.org9258b6b2008-09-11 09:11:10 +0000796 AllocationSpace id,
797 Executability executable)
kasper.lund7276f142008-07-30 08:49:36 +0000798 : Space(id, executable) {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000799 max_capacity_ = (RoundDown(max_capacity, Page::kPageSize) / Page::kPageSize)
800 * Page::kObjectAreaSize;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000801 accounting_stats_.Clear();
802
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000803 allocation_info_.top = NULL;
804 allocation_info_.limit = NULL;
805
806 mc_forwarding_info_.top = NULL;
807 mc_forwarding_info_.limit = NULL;
808}
809
810
811bool PagedSpace::Setup(Address start, size_t size) {
812 if (HasBeenSetup()) return false;
813
814 int num_pages = 0;
815 // Try to use the virtual memory range passed to us. If it is too small to
816 // contain at least one page, ignore it and allocate instead.
ager@chromium.org9258b6b2008-09-11 09:11:10 +0000817 int pages_in_chunk = PagesInChunk(start, size);
818 if (pages_in_chunk > 0) {
819 first_page_ = MemoryAllocator::CommitPages(RoundUp(start, Page::kPageSize),
820 Page::kPageSize * pages_in_chunk,
821 this, &num_pages);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000822 } else {
kmillikin@chromium.orgf05f2912010-09-30 10:07:24 +0000823 int requested_pages =
824 Min(MemoryAllocator::kPagesPerChunk,
825 static_cast<int>(max_capacity_ / Page::kObjectAreaSize));
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000826 first_page_ =
827 MemoryAllocator::AllocatePages(requested_pages, &num_pages, this);
828 if (!first_page_->is_valid()) return false;
829 }
830
831 // We are sure that the first page is valid and that we have at least one
832 // page.
833 ASSERT(first_page_->is_valid());
834 ASSERT(num_pages > 0);
835 accounting_stats_.ExpandSpace(num_pages * Page::kObjectAreaSize);
836 ASSERT(Capacity() <= max_capacity_);
837
ricow@chromium.org30ce4112010-05-31 10:38:25 +0000838 // Sequentially clear region marks in the newly allocated
kasperl@chromium.orgb3284ad2009-05-18 06:12:45 +0000839 // pages and cache the current last page in the space.
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000840 for (Page* p = first_page_; p->is_valid(); p = p->next_page()) {
ricow@chromium.org30ce4112010-05-31 10:38:25 +0000841 p->SetRegionMarks(Page::kAllRegionsCleanMarks);
kasperl@chromium.orgb3284ad2009-05-18 06:12:45 +0000842 last_page_ = p;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000843 }
844
845 // Use first_page_ for allocation.
846 SetAllocationInfo(&allocation_info_, first_page_);
847
fschneider@chromium.org013f3e12010-04-26 13:27:52 +0000848 page_list_is_chunk_ordered_ = true;
849
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000850 return true;
851}
852
853
854bool PagedSpace::HasBeenSetup() {
855 return (Capacity() > 0);
856}
857
858
859void PagedSpace::TearDown() {
fschneider@chromium.org013f3e12010-04-26 13:27:52 +0000860 MemoryAllocator::FreeAllPages(this);
861 first_page_ = NULL;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000862 accounting_stats_.Clear();
863}
864
865
kasperl@chromium.orgf5aa8372009-03-24 14:47:14 +0000866#ifdef ENABLE_HEAP_PROTECTION
867
868void PagedSpace::Protect() {
869 Page* page = first_page_;
870 while (page->is_valid()) {
871 MemoryAllocator::ProtectChunkFromPage(page);
872 page = MemoryAllocator::FindLastPageInSameChunk(page)->next_page();
873 }
874}
875
876
877void PagedSpace::Unprotect() {
878 Page* page = first_page_;
879 while (page->is_valid()) {
880 MemoryAllocator::UnprotectChunkFromPage(page);
881 page = MemoryAllocator::FindLastPageInSameChunk(page)->next_page();
882 }
883}
884
885#endif
886
887
ricow@chromium.org30ce4112010-05-31 10:38:25 +0000888void PagedSpace::MarkAllPagesClean() {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000889 PageIterator it(this, PageIterator::ALL_PAGES);
890 while (it.has_next()) {
ricow@chromium.org30ce4112010-05-31 10:38:25 +0000891 it.next()->SetRegionMarks(Page::kAllRegionsCleanMarks);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000892 }
893}
894
895
lrn@chromium.org303ada72010-10-27 09:33:13 +0000896MaybeObject* PagedSpace::FindObject(Address addr) {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000897 // Note: this function can only be called before or after mark-compact GC
898 // because it accesses map pointers.
899 ASSERT(!MarkCompactCollector::in_use());
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000900
901 if (!Contains(addr)) return Failure::Exception();
902
903 Page* p = Page::FromAddress(addr);
kasper.lund7276f142008-07-30 08:49:36 +0000904 ASSERT(IsUsed(p));
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000905 Address cur = p->ObjectAreaStart();
906 Address end = p->AllocationTop();
907 while (cur < end) {
908 HeapObject* obj = HeapObject::FromAddress(cur);
909 Address next = cur + obj->Size();
910 if ((cur <= addr) && (addr < next)) return obj;
911 cur = next;
912 }
913
kasper.lund7276f142008-07-30 08:49:36 +0000914 UNREACHABLE();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000915 return Failure::Exception();
916}
917
918
kasper.lund7276f142008-07-30 08:49:36 +0000919bool PagedSpace::IsUsed(Page* page) {
920 PageIterator it(this, PageIterator::PAGES_IN_USE);
921 while (it.has_next()) {
922 if (page == it.next()) return true;
923 }
924 return false;
925}
926
927
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000928void PagedSpace::SetAllocationInfo(AllocationInfo* alloc_info, Page* p) {
929 alloc_info->top = p->ObjectAreaStart();
930 alloc_info->limit = p->ObjectAreaEnd();
kasper.lund7276f142008-07-30 08:49:36 +0000931 ASSERT(alloc_info->VerifyPagedAllocation());
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000932}
933
934
935void PagedSpace::MCResetRelocationInfo() {
936 // Set page indexes.
937 int i = 0;
938 PageIterator it(this, PageIterator::ALL_PAGES);
939 while (it.has_next()) {
940 Page* p = it.next();
941 p->mc_page_index = i++;
942 }
943
944 // Set mc_forwarding_info_ to the first page in the space.
945 SetAllocationInfo(&mc_forwarding_info_, first_page_);
946 // All the bytes in the space are 'available'. We will rediscover
947 // allocated and wasted bytes during GC.
948 accounting_stats_.Reset();
949}
950
951
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000952int PagedSpace::MCSpaceOffsetForAddress(Address addr) {
953#ifdef DEBUG
954 // The Contains function considers the address at the beginning of a
955 // page in the page, MCSpaceOffsetForAddress considers it is in the
956 // previous page.
957 if (Page::IsAlignedToPageSize(addr)) {
958 ASSERT(Contains(addr - kPointerSize));
959 } else {
960 ASSERT(Contains(addr));
961 }
962#endif
963
964 // If addr is at the end of a page, it belongs to previous page
965 Page* p = Page::IsAlignedToPageSize(addr)
966 ? Page::FromAllocationTop(addr)
967 : Page::FromAddress(addr);
968 int index = p->mc_page_index;
969 return (index * Page::kPageSize) + p->Offset(addr);
970}
971
972
kasper.lund7276f142008-07-30 08:49:36 +0000973// Slow case for reallocating and promoting objects during a compacting
974// collection. This function is not space-specific.
975HeapObject* PagedSpace::SlowMCAllocateRaw(int size_in_bytes) {
976 Page* current_page = TopPageOf(mc_forwarding_info_);
977 if (!current_page->next_page()->is_valid()) {
978 if (!Expand(current_page)) {
979 return NULL;
980 }
981 }
982
983 // There are surely more pages in the space now.
984 ASSERT(current_page->next_page()->is_valid());
985 // We do not add the top of page block for current page to the space's
986 // free list---the block may contain live objects so we cannot write
987 // bookkeeping information to it. Instead, we will recover top of page
988 // blocks when we move objects to their new locations.
989 //
990 // We do however write the allocation pointer to the page. The encoding
991 // of forwarding addresses is as an offset in terms of live bytes, so we
992 // need quick access to the allocation top of each page to decode
993 // forwarding addresses.
ricow@chromium.org30ce4112010-05-31 10:38:25 +0000994 current_page->SetAllocationWatermark(mc_forwarding_info_.top);
995 current_page->next_page()->InvalidateWatermark(true);
kasper.lund7276f142008-07-30 08:49:36 +0000996 SetAllocationInfo(&mc_forwarding_info_, current_page->next_page());
997 return AllocateLinearly(&mc_forwarding_info_, size_in_bytes);
998}
999
1000
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001001bool PagedSpace::Expand(Page* last_page) {
1002 ASSERT(max_capacity_ % Page::kObjectAreaSize == 0);
1003 ASSERT(Capacity() % Page::kObjectAreaSize == 0);
1004
1005 if (Capacity() == max_capacity_) return false;
1006
1007 ASSERT(Capacity() < max_capacity_);
1008 // Last page must be valid and its next page is invalid.
1009 ASSERT(last_page->is_valid() && !last_page->next_page()->is_valid());
1010
kmillikin@chromium.orgf05f2912010-09-30 10:07:24 +00001011 int available_pages =
1012 static_cast<int>((max_capacity_ - Capacity()) / Page::kObjectAreaSize);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001013 if (available_pages <= 0) return false;
1014
1015 int desired_pages = Min(available_pages, MemoryAllocator::kPagesPerChunk);
1016 Page* p = MemoryAllocator::AllocatePages(desired_pages, &desired_pages, this);
1017 if (!p->is_valid()) return false;
1018
1019 accounting_stats_.ExpandSpace(desired_pages * Page::kObjectAreaSize);
1020 ASSERT(Capacity() <= max_capacity_);
1021
1022 MemoryAllocator::SetNextPage(last_page, p);
1023
ricow@chromium.org30ce4112010-05-31 10:38:25 +00001024 // Sequentially clear region marks of new pages and and cache the
kasperl@chromium.orgb3284ad2009-05-18 06:12:45 +00001025 // new last page in the space.
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001026 while (p->is_valid()) {
ricow@chromium.org30ce4112010-05-31 10:38:25 +00001027 p->SetRegionMarks(Page::kAllRegionsCleanMarks);
kasperl@chromium.orgb3284ad2009-05-18 06:12:45 +00001028 last_page_ = p;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001029 p = p->next_page();
1030 }
1031
1032 return true;
1033}
1034
1035
1036#ifdef DEBUG
1037int PagedSpace::CountTotalPages() {
1038 int count = 0;
1039 for (Page* p = first_page_; p->is_valid(); p = p->next_page()) {
1040 count++;
1041 }
1042 return count;
1043}
1044#endif
1045
1046
1047void PagedSpace::Shrink() {
fschneider@chromium.org013f3e12010-04-26 13:27:52 +00001048 if (!page_list_is_chunk_ordered_) {
1049 // We can't shrink space if pages is not chunk-ordered
1050 // (see comment for class MemoryAllocator for definition).
1051 return;
1052 }
1053
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001054 // Release half of free pages.
1055 Page* top_page = AllocationTopPage();
1056 ASSERT(top_page->is_valid());
1057
sgjesse@chromium.org911335c2009-08-19 12:59:44 +00001058 // Count the number of pages we would like to free.
1059 int pages_to_free = 0;
1060 for (Page* p = top_page->next_page(); p->is_valid(); p = p->next_page()) {
1061 pages_to_free++;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001062 }
1063
sgjesse@chromium.org911335c2009-08-19 12:59:44 +00001064 // Free pages after top_page.
1065 Page* p = MemoryAllocator::FreePages(top_page->next_page());
1066 MemoryAllocator::SetNextPage(top_page, p);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001067
sgjesse@chromium.org911335c2009-08-19 12:59:44 +00001068 // Find out how many pages we failed to free and update last_page_.
1069 // Please note pages can only be freed in whole chunks.
1070 last_page_ = top_page;
1071 for (Page* p = top_page->next_page(); p->is_valid(); p = p->next_page()) {
1072 pages_to_free--;
kasperl@chromium.orgb3284ad2009-05-18 06:12:45 +00001073 last_page_ = p;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001074 }
1075
sgjesse@chromium.org911335c2009-08-19 12:59:44 +00001076 accounting_stats_.ShrinkSpace(pages_to_free * Page::kObjectAreaSize);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001077 ASSERT(Capacity() == CountTotalPages() * Page::kObjectAreaSize);
1078}
1079
1080
1081bool PagedSpace::EnsureCapacity(int capacity) {
1082 if (Capacity() >= capacity) return true;
1083
1084 // Start from the allocation top and loop to the last page in the space.
1085 Page* last_page = AllocationTopPage();
1086 Page* next_page = last_page->next_page();
1087 while (next_page->is_valid()) {
1088 last_page = MemoryAllocator::FindLastPageInSameChunk(next_page);
1089 next_page = last_page->next_page();
1090 }
1091
1092 // Expand the space until it has the required capacity or expansion fails.
1093 do {
1094 if (!Expand(last_page)) return false;
1095 ASSERT(last_page->next_page()->is_valid());
1096 last_page =
1097 MemoryAllocator::FindLastPageInSameChunk(last_page->next_page());
1098 } while (Capacity() < capacity);
1099
1100 return true;
1101}
1102
1103
1104#ifdef DEBUG
1105void PagedSpace::Print() { }
1106#endif
1107
1108
kasperl@chromium.orgdefbd102009-07-13 14:04:26 +00001109#ifdef DEBUG
1110// We do not assume that the PageIterator works, because it depends on the
1111// invariants we are checking during verification.
1112void PagedSpace::Verify(ObjectVisitor* visitor) {
1113 // The allocation pointer should be valid, and it should be in a page in the
1114 // space.
1115 ASSERT(allocation_info_.VerifyPagedAllocation());
1116 Page* top_page = Page::FromAllocationTop(allocation_info_.top);
1117 ASSERT(MemoryAllocator::IsPageInSpace(top_page, this));
1118
1119 // Loop over all the pages.
1120 bool above_allocation_top = false;
1121 Page* current_page = first_page_;
1122 while (current_page->is_valid()) {
1123 if (above_allocation_top) {
1124 // We don't care what's above the allocation top.
1125 } else {
kasperl@chromium.orgdefbd102009-07-13 14:04:26 +00001126 Address top = current_page->AllocationTop();
1127 if (current_page == top_page) {
1128 ASSERT(top == allocation_info_.top);
1129 // The next page will be above the allocation top.
1130 above_allocation_top = true;
kasperl@chromium.orgdefbd102009-07-13 14:04:26 +00001131 }
1132
1133 // It should be packed with objects from the bottom to the top.
1134 Address current = current_page->ObjectAreaStart();
1135 while (current < top) {
1136 HeapObject* object = HeapObject::FromAddress(current);
1137
1138 // The first word should be a map, and we expect all map pointers to
1139 // be in map space.
1140 Map* map = object->map();
1141 ASSERT(map->IsMap());
1142 ASSERT(Heap::map_space()->Contains(map));
1143
1144 // Perform space-specific object verification.
1145 VerifyObject(object);
1146
1147 // The object itself should look OK.
1148 object->Verify();
1149
1150 // All the interior pointers should be contained in the heap and
ricow@chromium.org30ce4112010-05-31 10:38:25 +00001151 // have page regions covering intergenerational references should be
1152 // marked dirty.
kasperl@chromium.orgdefbd102009-07-13 14:04:26 +00001153 int size = object->Size();
christian.plesner.hansen@gmail.com2bc58ef2009-09-22 10:00:30 +00001154 object->IterateBody(map->instance_type(), size, visitor);
kasperl@chromium.orgdefbd102009-07-13 14:04:26 +00001155
1156 current += size;
1157 }
1158
1159 // The allocation pointer should not be in the middle of an object.
1160 ASSERT(current == top);
1161 }
1162
1163 current_page = current_page->next_page();
1164 }
1165}
1166#endif
1167
1168
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001169// -----------------------------------------------------------------------------
1170// NewSpace implementation
1171
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +00001172
1173bool NewSpace::Setup(Address start, int size) {
1174 // Setup new space based on the preallocated memory block defined by
1175 // start and size. The provided space is divided into two semi-spaces.
1176 // To support fast containment testing in the new space, the size of
1177 // this chunk must be a power of two and it must be aligned to its size.
1178 int initial_semispace_capacity = Heap::InitialSemiSpaceSize();
ager@chromium.org3811b432009-10-28 14:53:37 +00001179 int maximum_semispace_capacity = Heap::MaxSemiSpaceSize();
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +00001180
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001181 ASSERT(initial_semispace_capacity <= maximum_semispace_capacity);
1182 ASSERT(IsPowerOf2(maximum_semispace_capacity));
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001183
1184 // Allocate and setup the histogram arrays if necessary.
1185#if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
1186 allocated_histogram_ = NewArray<HistogramInfo>(LAST_TYPE + 1);
1187 promoted_histogram_ = NewArray<HistogramInfo>(LAST_TYPE + 1);
1188
1189#define SET_NAME(name) allocated_histogram_[name].set_name(#name); \
1190 promoted_histogram_[name].set_name(#name);
1191 INSTANCE_TYPE_LIST(SET_NAME)
1192#undef SET_NAME
1193#endif
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001194
ager@chromium.org3811b432009-10-28 14:53:37 +00001195 ASSERT(size == 2 * Heap::ReservedSemiSpaceSize());
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001196 ASSERT(IsAddressAligned(start, size, 0));
1197
sgjesse@chromium.org911335c2009-08-19 12:59:44 +00001198 if (!to_space_.Setup(start,
1199 initial_semispace_capacity,
1200 maximum_semispace_capacity)) {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001201 return false;
1202 }
sgjesse@chromium.org911335c2009-08-19 12:59:44 +00001203 if (!from_space_.Setup(start + maximum_semispace_capacity,
1204 initial_semispace_capacity,
1205 maximum_semispace_capacity)) {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001206 return false;
1207 }
1208
1209 start_ = start;
1210 address_mask_ = ~(size - 1);
ricow@chromium.org30ce4112010-05-31 10:38:25 +00001211 object_mask_ = address_mask_ | kHeapObjectTagMask;
ager@chromium.org9085a012009-05-11 19:22:57 +00001212 object_expected_ = reinterpret_cast<uintptr_t>(start) | kHeapObjectTag;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001213
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +00001214 allocation_info_.top = to_space_.low();
1215 allocation_info_.limit = to_space_.high();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001216 mc_forwarding_info_.top = NULL;
1217 mc_forwarding_info_.limit = NULL;
1218
1219 ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
1220 return true;
1221}
1222
1223
1224void NewSpace::TearDown() {
1225#if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
1226 if (allocated_histogram_) {
1227 DeleteArray(allocated_histogram_);
1228 allocated_histogram_ = NULL;
1229 }
1230 if (promoted_histogram_) {
1231 DeleteArray(promoted_histogram_);
1232 promoted_histogram_ = NULL;
1233 }
1234#endif
1235
1236 start_ = NULL;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001237 allocation_info_.top = NULL;
1238 allocation_info_.limit = NULL;
1239 mc_forwarding_info_.top = NULL;
1240 mc_forwarding_info_.limit = NULL;
1241
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +00001242 to_space_.TearDown();
1243 from_space_.TearDown();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001244}
1245
1246
kasperl@chromium.orgf5aa8372009-03-24 14:47:14 +00001247#ifdef ENABLE_HEAP_PROTECTION
1248
1249void NewSpace::Protect() {
1250 MemoryAllocator::Protect(ToSpaceLow(), Capacity());
1251 MemoryAllocator::Protect(FromSpaceLow(), Capacity());
1252}
1253
1254
1255void NewSpace::Unprotect() {
1256 MemoryAllocator::Unprotect(ToSpaceLow(), Capacity(),
1257 to_space_.executable());
1258 MemoryAllocator::Unprotect(FromSpaceLow(), Capacity(),
1259 from_space_.executable());
1260}
1261
1262#endif
1263
1264
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001265void NewSpace::Flip() {
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +00001266 SemiSpace tmp = from_space_;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001267 from_space_ = to_space_;
1268 to_space_ = tmp;
1269}
1270
1271
ager@chromium.orgab99eea2009-08-25 07:05:41 +00001272void NewSpace::Grow() {
sgjesse@chromium.org911335c2009-08-19 12:59:44 +00001273 ASSERT(Capacity() < MaximumCapacity());
ager@chromium.orgab99eea2009-08-25 07:05:41 +00001274 if (to_space_.Grow()) {
1275 // Only grow from space if we managed to grow to space.
1276 if (!from_space_.Grow()) {
1277 // If we managed to grow to space but couldn't grow from space,
1278 // attempt to shrink to space.
1279 if (!to_space_.ShrinkTo(from_space_.Capacity())) {
1280 // We are in an inconsistent state because we could not
1281 // commit/uncommit memory from new space.
1282 V8::FatalProcessOutOfMemory("Failed to grow new space.");
1283 }
1284 }
1285 }
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +00001286 allocation_info_.limit = to_space_.high();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001287 ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
ager@chromium.orgab99eea2009-08-25 07:05:41 +00001288}
1289
1290
1291void NewSpace::Shrink() {
kmillikin@chromium.orgf05f2912010-09-30 10:07:24 +00001292 int new_capacity = Max(InitialCapacity(), 2 * SizeAsInt());
ager@chromium.orgc4c92722009-11-18 14:12:51 +00001293 int rounded_new_capacity =
1294 RoundUp(new_capacity, static_cast<int>(OS::AllocateAlignment()));
ager@chromium.orgab99eea2009-08-25 07:05:41 +00001295 if (rounded_new_capacity < Capacity() &&
1296 to_space_.ShrinkTo(rounded_new_capacity)) {
1297 // Only shrink from space if we managed to shrink to space.
1298 if (!from_space_.ShrinkTo(rounded_new_capacity)) {
1299 // If we managed to shrink to space but couldn't shrink from
1300 // space, attempt to grow to space again.
1301 if (!to_space_.GrowTo(from_space_.Capacity())) {
1302 // We are in an inconsistent state because we could not
1303 // commit/uncommit memory from new space.
1304 V8::FatalProcessOutOfMemory("Failed to shrink new space.");
1305 }
1306 }
1307 }
1308 allocation_info_.limit = to_space_.high();
1309 ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001310}
1311
1312
1313void NewSpace::ResetAllocationInfo() {
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +00001314 allocation_info_.top = to_space_.low();
1315 allocation_info_.limit = to_space_.high();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001316 ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
1317}
1318
1319
1320void NewSpace::MCResetRelocationInfo() {
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +00001321 mc_forwarding_info_.top = from_space_.low();
1322 mc_forwarding_info_.limit = from_space_.high();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001323 ASSERT_SEMISPACE_ALLOCATION_INFO(mc_forwarding_info_, from_space_);
1324}
1325
1326
1327void NewSpace::MCCommitRelocationInfo() {
1328 // Assumes that the spaces have been flipped so that mc_forwarding_info_ is
1329 // valid allocation info for the to space.
1330 allocation_info_.top = mc_forwarding_info_.top;
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +00001331 allocation_info_.limit = to_space_.high();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001332 ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
1333}
1334
1335
1336#ifdef DEBUG
1337// We do not use the SemispaceIterator because verification doesn't assume
1338// that it works (it depends on the invariants we are checking).
1339void NewSpace::Verify() {
1340 // The allocation pointer should be in the space or at the very end.
1341 ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
1342
1343 // There should be objects packed in from the low address up to the
1344 // allocation pointer.
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +00001345 Address current = to_space_.low();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001346 while (current < top()) {
1347 HeapObject* object = HeapObject::FromAddress(current);
1348
1349 // The first word should be a map, and we expect all map pointers to
1350 // be in map space.
1351 Map* map = object->map();
1352 ASSERT(map->IsMap());
1353 ASSERT(Heap::map_space()->Contains(map));
1354
1355 // The object should not be code or a map.
1356 ASSERT(!object->IsMap());
1357 ASSERT(!object->IsCode());
1358
1359 // The object itself should look OK.
1360 object->Verify();
1361
1362 // All the interior pointers should be contained in the heap.
1363 VerifyPointersVisitor visitor;
1364 int size = object->Size();
1365 object->IterateBody(map->instance_type(), size, &visitor);
1366
1367 current += size;
1368 }
1369
1370 // The allocation pointer should not be in the middle of an object.
1371 ASSERT(current == top());
1372}
1373#endif
1374
1375
ager@chromium.orgadd848f2009-08-13 12:44:13 +00001376bool SemiSpace::Commit() {
1377 ASSERT(!is_committed());
1378 if (!MemoryAllocator::CommitBlock(start_, capacity_, executable())) {
1379 return false;
1380 }
1381 committed_ = true;
1382 return true;
1383}
1384
1385
1386bool SemiSpace::Uncommit() {
1387 ASSERT(is_committed());
1388 if (!MemoryAllocator::UncommitBlock(start_, capacity_)) {
1389 return false;
1390 }
1391 committed_ = false;
1392 return true;
1393}
1394
1395
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001396// -----------------------------------------------------------------------------
1397// SemiSpace implementation
1398
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +00001399bool SemiSpace::Setup(Address start,
1400 int initial_capacity,
1401 int maximum_capacity) {
1402 // Creates a space in the young generation. The constructor does not
1403 // allocate memory from the OS. A SemiSpace is given a contiguous chunk of
1404 // memory of size 'capacity' when set up, and does not grow or shrink
1405 // otherwise. In the mark-compact collector, the memory region of the from
1406 // space is used as the marking stack. It requires contiguous memory
1407 // addresses.
ager@chromium.orgab99eea2009-08-25 07:05:41 +00001408 initial_capacity_ = initial_capacity;
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +00001409 capacity_ = initial_capacity;
1410 maximum_capacity_ = maximum_capacity;
ager@chromium.orgadd848f2009-08-13 12:44:13 +00001411 committed_ = false;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001412
1413 start_ = start;
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +00001414 address_mask_ = ~(maximum_capacity - 1);
ricow@chromium.org30ce4112010-05-31 10:38:25 +00001415 object_mask_ = address_mask_ | kHeapObjectTagMask;
ager@chromium.org9085a012009-05-11 19:22:57 +00001416 object_expected_ = reinterpret_cast<uintptr_t>(start) | kHeapObjectTag;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001417 age_mark_ = start_;
ager@chromium.orgadd848f2009-08-13 12:44:13 +00001418
1419 return Commit();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001420}
1421
1422
1423void SemiSpace::TearDown() {
1424 start_ = NULL;
1425 capacity_ = 0;
1426}
1427
1428
christian.plesner.hansen@gmail.com5a6af922009-08-12 14:20:51 +00001429bool SemiSpace::Grow() {
sgjesse@chromium.orgc81c8942009-08-21 10:54:26 +00001430 // Double the semispace size but only up to maximum capacity.
sgjesse@chromium.org911335c2009-08-19 12:59:44 +00001431 int maximum_extra = maximum_capacity_ - capacity_;
ager@chromium.orgc4c92722009-11-18 14:12:51 +00001432 int extra = Min(RoundUp(capacity_, static_cast<int>(OS::AllocateAlignment())),
sgjesse@chromium.org911335c2009-08-19 12:59:44 +00001433 maximum_extra);
christian.plesner.hansen@gmail.com5a6af922009-08-12 14:20:51 +00001434 if (!MemoryAllocator::CommitBlock(high(), extra, executable())) {
kasper.lund7276f142008-07-30 08:49:36 +00001435 return false;
1436 }
christian.plesner.hansen@gmail.com5a6af922009-08-12 14:20:51 +00001437 capacity_ += extra;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001438 return true;
1439}
1440
1441
ager@chromium.orgab99eea2009-08-25 07:05:41 +00001442bool SemiSpace::GrowTo(int new_capacity) {
1443 ASSERT(new_capacity <= maximum_capacity_);
1444 ASSERT(new_capacity > capacity_);
1445 size_t delta = new_capacity - capacity_;
1446 ASSERT(IsAligned(delta, OS::AllocateAlignment()));
1447 if (!MemoryAllocator::CommitBlock(high(), delta, executable())) {
1448 return false;
1449 }
1450 capacity_ = new_capacity;
1451 return true;
1452}
1453
1454
1455bool SemiSpace::ShrinkTo(int new_capacity) {
1456 ASSERT(new_capacity >= initial_capacity_);
1457 ASSERT(new_capacity < capacity_);
1458 size_t delta = capacity_ - new_capacity;
1459 ASSERT(IsAligned(delta, OS::AllocateAlignment()));
1460 if (!MemoryAllocator::UncommitBlock(high() - delta, delta)) {
1461 return false;
1462 }
1463 capacity_ = new_capacity;
1464 return true;
1465}
1466
1467
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001468#ifdef DEBUG
1469void SemiSpace::Print() { }
ager@chromium.org9258b6b2008-09-11 09:11:10 +00001470
1471
1472void SemiSpace::Verify() { }
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001473#endif
1474
1475
1476// -----------------------------------------------------------------------------
1477// SemiSpaceIterator implementation.
1478SemiSpaceIterator::SemiSpaceIterator(NewSpace* space) {
1479 Initialize(space, space->bottom(), space->top(), NULL);
1480}
1481
1482
1483SemiSpaceIterator::SemiSpaceIterator(NewSpace* space,
1484 HeapObjectCallback size_func) {
1485 Initialize(space, space->bottom(), space->top(), size_func);
1486}
1487
1488
1489SemiSpaceIterator::SemiSpaceIterator(NewSpace* space, Address start) {
1490 Initialize(space, start, space->top(), NULL);
1491}
1492
1493
1494void SemiSpaceIterator::Initialize(NewSpace* space, Address start,
1495 Address end,
1496 HeapObjectCallback size_func) {
1497 ASSERT(space->ToSpaceContains(start));
1498 ASSERT(space->ToSpaceLow() <= end
1499 && end <= space->ToSpaceHigh());
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +00001500 space_ = &space->to_space_;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001501 current_ = start;
1502 limit_ = end;
1503 size_func_ = size_func;
1504}
1505
1506
1507#ifdef DEBUG
1508// A static array of histogram info for each type.
1509static HistogramInfo heap_histograms[LAST_TYPE+1];
1510static JSObject::SpillInformation js_spill_information;
1511
1512// heap_histograms is shared, always clear it before using it.
1513static void ClearHistograms() {
1514 // We reset the name each time, though it hasn't changed.
1515#define DEF_TYPE_NAME(name) heap_histograms[name].set_name(#name);
1516 INSTANCE_TYPE_LIST(DEF_TYPE_NAME)
1517#undef DEF_TYPE_NAME
1518
1519#define CLEAR_HISTOGRAM(name) heap_histograms[name].clear();
1520 INSTANCE_TYPE_LIST(CLEAR_HISTOGRAM)
1521#undef CLEAR_HISTOGRAM
1522
1523 js_spill_information.Clear();
1524}
1525
1526
1527static int code_kind_statistics[Code::NUMBER_OF_KINDS];
1528
1529
1530static void ClearCodeKindStatistics() {
1531 for (int i = 0; i < Code::NUMBER_OF_KINDS; i++) {
1532 code_kind_statistics[i] = 0;
1533 }
1534}
1535
1536
1537static void ReportCodeKindStatistics() {
fschneider@chromium.org013f3e12010-04-26 13:27:52 +00001538 const char* table[Code::NUMBER_OF_KINDS] = { NULL };
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001539
1540#define CASE(name) \
1541 case Code::name: table[Code::name] = #name; \
1542 break
1543
1544 for (int i = 0; i < Code::NUMBER_OF_KINDS; i++) {
1545 switch (static_cast<Code::Kind>(i)) {
1546 CASE(FUNCTION);
1547 CASE(STUB);
1548 CASE(BUILTIN);
1549 CASE(LOAD_IC);
1550 CASE(KEYED_LOAD_IC);
1551 CASE(STORE_IC);
1552 CASE(KEYED_STORE_IC);
1553 CASE(CALL_IC);
lrn@chromium.org1af7e1b2010-06-07 11:12:01 +00001554 CASE(KEYED_CALL_IC);
ager@chromium.orgce5e87b2010-03-10 10:24:18 +00001555 CASE(BINARY_OP_IC);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001556 }
1557 }
1558
1559#undef CASE
1560
1561 PrintF("\n Code kind histograms: \n");
1562 for (int i = 0; i < Code::NUMBER_OF_KINDS; i++) {
1563 if (code_kind_statistics[i] > 0) {
1564 PrintF(" %-20s: %10d bytes\n", table[i], code_kind_statistics[i]);
1565 }
1566 }
1567 PrintF("\n");
1568}
1569
1570
1571static int CollectHistogramInfo(HeapObject* obj) {
1572 InstanceType type = obj->map()->instance_type();
1573 ASSERT(0 <= type && type <= LAST_TYPE);
1574 ASSERT(heap_histograms[type].name() != NULL);
1575 heap_histograms[type].increment_number(1);
1576 heap_histograms[type].increment_bytes(obj->Size());
1577
1578 if (FLAG_collect_heap_spill_statistics && obj->IsJSObject()) {
1579 JSObject::cast(obj)->IncrementSpillStatistics(&js_spill_information);
1580 }
1581
1582 return obj->Size();
1583}
1584
1585
1586static void ReportHistogram(bool print_spill) {
1587 PrintF("\n Object Histogram:\n");
1588 for (int i = 0; i <= LAST_TYPE; i++) {
1589 if (heap_histograms[i].number() > 0) {
ager@chromium.orgce5e87b2010-03-10 10:24:18 +00001590 PrintF(" %-34s%10d (%10d bytes)\n",
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001591 heap_histograms[i].name(),
1592 heap_histograms[i].number(),
1593 heap_histograms[i].bytes());
1594 }
1595 }
1596 PrintF("\n");
1597
1598 // Summarize string types.
1599 int string_number = 0;
1600 int string_bytes = 0;
kasperl@chromium.org68ac0092009-07-09 06:00:35 +00001601#define INCREMENT(type, size, name, camel_name) \
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001602 string_number += heap_histograms[type].number(); \
1603 string_bytes += heap_histograms[type].bytes();
1604 STRING_TYPE_LIST(INCREMENT)
1605#undef INCREMENT
1606 if (string_number > 0) {
ager@chromium.orgce5e87b2010-03-10 10:24:18 +00001607 PrintF(" %-34s%10d (%10d bytes)\n\n", "STRING_TYPE", string_number,
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001608 string_bytes);
1609 }
1610
1611 if (FLAG_collect_heap_spill_statistics && print_spill) {
1612 js_spill_information.Print();
1613 }
1614}
1615#endif // DEBUG
1616
1617
1618// Support for statistics gathering for --heap-stats and --log-gc.
1619#if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
1620void NewSpace::ClearHistograms() {
1621 for (int i = 0; i <= LAST_TYPE; i++) {
1622 allocated_histogram_[i].clear();
1623 promoted_histogram_[i].clear();
1624 }
1625}
1626
1627// Because the copying collector does not touch garbage objects, we iterate
1628// the new space before a collection to get a histogram of allocated objects.
1629// This only happens (1) when compiled with DEBUG and the --heap-stats flag is
1630// set, or when compiled with ENABLE_LOGGING_AND_PROFILING and the --log-gc
1631// flag is set.
1632void NewSpace::CollectStatistics() {
1633 ClearHistograms();
1634 SemiSpaceIterator it(this);
sgjesse@chromium.orgb302e562010-02-03 11:26:59 +00001635 for (HeapObject* obj = it.next(); obj != NULL; obj = it.next())
1636 RecordAllocation(obj);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001637}
1638
1639
1640#ifdef ENABLE_LOGGING_AND_PROFILING
1641static void DoReportStatistics(HistogramInfo* info, const char* description) {
1642 LOG(HeapSampleBeginEvent("NewSpace", description));
1643 // Lump all the string types together.
1644 int string_number = 0;
1645 int string_bytes = 0;
kasperl@chromium.org68ac0092009-07-09 06:00:35 +00001646#define INCREMENT(type, size, name, camel_name) \
1647 string_number += info[type].number(); \
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001648 string_bytes += info[type].bytes();
1649 STRING_TYPE_LIST(INCREMENT)
1650#undef INCREMENT
1651 if (string_number > 0) {
1652 LOG(HeapSampleItemEvent("STRING_TYPE", string_number, string_bytes));
1653 }
1654
1655 // Then do the other types.
1656 for (int i = FIRST_NONSTRING_TYPE; i <= LAST_TYPE; ++i) {
1657 if (info[i].number() > 0) {
1658 LOG(HeapSampleItemEvent(info[i].name(), info[i].number(),
1659 info[i].bytes()));
1660 }
1661 }
1662 LOG(HeapSampleEndEvent("NewSpace", description));
1663}
1664#endif // ENABLE_LOGGING_AND_PROFILING
1665
1666
1667void NewSpace::ReportStatistics() {
1668#ifdef DEBUG
1669 if (FLAG_heap_stats) {
1670 float pct = static_cast<float>(Available()) / Capacity();
kmillikin@chromium.orgf05f2912010-09-30 10:07:24 +00001671 PrintF(" capacity: %" V8_PTR_PREFIX "d"
1672 ", available: %" V8_PTR_PREFIX "d, %%%d\n",
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001673 Capacity(), Available(), static_cast<int>(pct*100));
1674 PrintF("\n Object Histogram:\n");
1675 for (int i = 0; i <= LAST_TYPE; i++) {
1676 if (allocated_histogram_[i].number() > 0) {
ager@chromium.orgce5e87b2010-03-10 10:24:18 +00001677 PrintF(" %-34s%10d (%10d bytes)\n",
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001678 allocated_histogram_[i].name(),
1679 allocated_histogram_[i].number(),
1680 allocated_histogram_[i].bytes());
1681 }
1682 }
1683 PrintF("\n");
1684 }
1685#endif // DEBUG
1686
1687#ifdef ENABLE_LOGGING_AND_PROFILING
1688 if (FLAG_log_gc) {
1689 DoReportStatistics(allocated_histogram_, "allocated");
1690 DoReportStatistics(promoted_histogram_, "promoted");
1691 }
1692#endif // ENABLE_LOGGING_AND_PROFILING
1693}
1694
1695
1696void NewSpace::RecordAllocation(HeapObject* obj) {
1697 InstanceType type = obj->map()->instance_type();
1698 ASSERT(0 <= type && type <= LAST_TYPE);
1699 allocated_histogram_[type].increment_number(1);
1700 allocated_histogram_[type].increment_bytes(obj->Size());
1701}
1702
1703
1704void NewSpace::RecordPromotion(HeapObject* obj) {
1705 InstanceType type = obj->map()->instance_type();
1706 ASSERT(0 <= type && type <= LAST_TYPE);
1707 promoted_histogram_[type].increment_number(1);
1708 promoted_histogram_[type].increment_bytes(obj->Size());
1709}
1710#endif // defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
1711
1712
1713// -----------------------------------------------------------------------------
1714// Free lists for old object spaces implementation
1715
1716void FreeListNode::set_size(int size_in_bytes) {
1717 ASSERT(size_in_bytes > 0);
1718 ASSERT(IsAligned(size_in_bytes, kPointerSize));
1719
1720 // We write a map and possibly size information to the block. If the block
1721 // is big enough to be a ByteArray with at least one extra word (the next
1722 // pointer), we set its map to be the byte array map and its size to an
1723 // appropriate array length for the desired size from HeapObject::Size().
1724 // If the block is too small (eg, one or two words), to hold both a size
1725 // field and a next pointer, we give it a filler map that gives it the
1726 // correct size.
ricow@chromium.org30ce4112010-05-31 10:38:25 +00001727 if (size_in_bytes > ByteArray::kHeaderSize) {
kasperl@chromium.org68ac0092009-07-09 06:00:35 +00001728 set_map(Heap::raw_unchecked_byte_array_map());
ager@chromium.org3811b432009-10-28 14:53:37 +00001729 // Can't use ByteArray::cast because it fails during deserialization.
1730 ByteArray* this_as_byte_array = reinterpret_cast<ByteArray*>(this);
1731 this_as_byte_array->set_length(ByteArray::LengthFor(size_in_bytes));
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001732 } else if (size_in_bytes == kPointerSize) {
kasperl@chromium.orgdefbd102009-07-13 14:04:26 +00001733 set_map(Heap::raw_unchecked_one_pointer_filler_map());
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001734 } else if (size_in_bytes == 2 * kPointerSize) {
kasperl@chromium.orgdefbd102009-07-13 14:04:26 +00001735 set_map(Heap::raw_unchecked_two_pointer_filler_map());
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001736 } else {
1737 UNREACHABLE();
1738 }
ager@chromium.org3811b432009-10-28 14:53:37 +00001739 // We would like to ASSERT(Size() == size_in_bytes) but this would fail during
1740 // deserialization because the byte array map is not done yet.
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001741}
1742
1743
1744Address FreeListNode::next() {
ager@chromium.org3811b432009-10-28 14:53:37 +00001745 ASSERT(IsFreeListNode(this));
kasperl@chromium.orgdefbd102009-07-13 14:04:26 +00001746 if (map() == Heap::raw_unchecked_byte_array_map()) {
1747 ASSERT(Size() >= kNextOffset + kPointerSize);
1748 return Memory::Address_at(address() + kNextOffset);
1749 } else {
1750 return Memory::Address_at(address() + kPointerSize);
1751 }
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001752}
1753
1754
1755void FreeListNode::set_next(Address next) {
ager@chromium.org3811b432009-10-28 14:53:37 +00001756 ASSERT(IsFreeListNode(this));
kasperl@chromium.orgdefbd102009-07-13 14:04:26 +00001757 if (map() == Heap::raw_unchecked_byte_array_map()) {
1758 ASSERT(Size() >= kNextOffset + kPointerSize);
1759 Memory::Address_at(address() + kNextOffset) = next;
1760 } else {
1761 Memory::Address_at(address() + kPointerSize) = next;
1762 }
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001763}
1764
1765
1766OldSpaceFreeList::OldSpaceFreeList(AllocationSpace owner) : owner_(owner) {
1767 Reset();
1768}
1769
1770
1771void OldSpaceFreeList::Reset() {
1772 available_ = 0;
1773 for (int i = 0; i < kFreeListsLength; i++) {
1774 free_[i].head_node_ = NULL;
1775 }
1776 needs_rebuild_ = false;
1777 finger_ = kHead;
1778 free_[kHead].next_size_ = kEnd;
1779}
1780
1781
1782void OldSpaceFreeList::RebuildSizeList() {
1783 ASSERT(needs_rebuild_);
1784 int cur = kHead;
1785 for (int i = cur + 1; i < kFreeListsLength; i++) {
1786 if (free_[i].head_node_ != NULL) {
1787 free_[cur].next_size_ = i;
1788 cur = i;
1789 }
1790 }
1791 free_[cur].next_size_ = kEnd;
1792 needs_rebuild_ = false;
1793}
1794
1795
1796int OldSpaceFreeList::Free(Address start, int size_in_bytes) {
1797#ifdef DEBUG
sgjesse@chromium.orgb302e562010-02-03 11:26:59 +00001798 MemoryAllocator::ZapBlock(start, size_in_bytes);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001799#endif
1800 FreeListNode* node = FreeListNode::FromAddress(start);
1801 node->set_size(size_in_bytes);
1802
sgjesse@chromium.org755c5b12009-05-29 11:04:38 +00001803 // We don't use the freelists in compacting mode. This makes it more like a
1804 // GC that only has mark-sweep-compact and doesn't have a mark-sweep
1805 // collector.
1806 if (FLAG_always_compact) {
1807 return size_in_bytes;
1808 }
1809
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001810 // Early return to drop too-small blocks on the floor (one or two word
1811 // blocks cannot hold a map pointer, a size field, and a pointer to the
1812 // next block in the free list).
1813 if (size_in_bytes < kMinBlockSize) {
1814 return size_in_bytes;
1815 }
1816
1817 // Insert other blocks at the head of an exact free list.
1818 int index = size_in_bytes >> kPointerSizeLog2;
1819 node->set_next(free_[index].head_node_);
1820 free_[index].head_node_ = node->address();
1821 available_ += size_in_bytes;
1822 needs_rebuild_ = true;
1823 return 0;
1824}
1825
1826
lrn@chromium.org303ada72010-10-27 09:33:13 +00001827MaybeObject* OldSpaceFreeList::Allocate(int size_in_bytes, int* wasted_bytes) {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001828 ASSERT(0 < size_in_bytes);
1829 ASSERT(size_in_bytes <= kMaxBlockSize);
1830 ASSERT(IsAligned(size_in_bytes, kPointerSize));
1831
1832 if (needs_rebuild_) RebuildSizeList();
1833 int index = size_in_bytes >> kPointerSizeLog2;
1834 // Check for a perfect fit.
1835 if (free_[index].head_node_ != NULL) {
1836 FreeListNode* node = FreeListNode::FromAddress(free_[index].head_node_);
1837 // If this was the last block of its size, remove the size.
1838 if ((free_[index].head_node_ = node->next()) == NULL) RemoveSize(index);
1839 available_ -= size_in_bytes;
1840 *wasted_bytes = 0;
sgjesse@chromium.org755c5b12009-05-29 11:04:38 +00001841 ASSERT(!FLAG_always_compact); // We only use the freelists with mark-sweep.
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001842 return node;
1843 }
1844 // Search the size list for the best fit.
1845 int prev = finger_ < index ? finger_ : kHead;
1846 int cur = FindSize(index, &prev);
1847 ASSERT(index < cur);
1848 if (cur == kEnd) {
1849 // No large enough size in list.
1850 *wasted_bytes = 0;
whesse@chromium.org4a5224e2010-10-20 12:37:07 +00001851 return Failure::RetryAfterGC(owner_);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001852 }
sgjesse@chromium.org755c5b12009-05-29 11:04:38 +00001853 ASSERT(!FLAG_always_compact); // We only use the freelists with mark-sweep.
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001854 int rem = cur - index;
1855 int rem_bytes = rem << kPointerSizeLog2;
1856 FreeListNode* cur_node = FreeListNode::FromAddress(free_[cur].head_node_);
kasper.lund7276f142008-07-30 08:49:36 +00001857 ASSERT(cur_node->Size() == (cur << kPointerSizeLog2));
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001858 FreeListNode* rem_node = FreeListNode::FromAddress(free_[cur].head_node_ +
1859 size_in_bytes);
1860 // Distinguish the cases prev < rem < cur and rem <= prev < cur
1861 // to avoid many redundant tests and calls to Insert/RemoveSize.
1862 if (prev < rem) {
1863 // Simple case: insert rem between prev and cur.
1864 finger_ = prev;
1865 free_[prev].next_size_ = rem;
1866 // If this was the last block of size cur, remove the size.
1867 if ((free_[cur].head_node_ = cur_node->next()) == NULL) {
1868 free_[rem].next_size_ = free_[cur].next_size_;
1869 } else {
1870 free_[rem].next_size_ = cur;
1871 }
1872 // Add the remainder block.
1873 rem_node->set_size(rem_bytes);
1874 rem_node->set_next(free_[rem].head_node_);
1875 free_[rem].head_node_ = rem_node->address();
1876 } else {
1877 // If this was the last block of size cur, remove the size.
1878 if ((free_[cur].head_node_ = cur_node->next()) == NULL) {
1879 finger_ = prev;
1880 free_[prev].next_size_ = free_[cur].next_size_;
1881 }
1882 if (rem_bytes < kMinBlockSize) {
1883 // Too-small remainder is wasted.
1884 rem_node->set_size(rem_bytes);
1885 available_ -= size_in_bytes + rem_bytes;
1886 *wasted_bytes = rem_bytes;
1887 return cur_node;
1888 }
1889 // Add the remainder block and, if needed, insert its size.
1890 rem_node->set_size(rem_bytes);
1891 rem_node->set_next(free_[rem].head_node_);
1892 free_[rem].head_node_ = rem_node->address();
1893 if (rem_node->next() == NULL) InsertSize(rem);
1894 }
1895 available_ -= size_in_bytes;
1896 *wasted_bytes = 0;
1897 return cur_node;
1898}
1899
1900
erik.corry@gmail.com4a6c3272010-11-18 12:04:40 +00001901void OldSpaceFreeList::MarkNodes() {
1902 for (int i = 0; i < kFreeListsLength; i++) {
1903 Address cur_addr = free_[i].head_node_;
1904 while (cur_addr != NULL) {
1905 FreeListNode* cur_node = FreeListNode::FromAddress(cur_addr);
1906 cur_addr = cur_node->next();
1907 cur_node->SetMark();
1908 }
1909 }
1910}
1911
1912
kasper.lund7276f142008-07-30 08:49:36 +00001913#ifdef DEBUG
1914bool OldSpaceFreeList::Contains(FreeListNode* node) {
1915 for (int i = 0; i < kFreeListsLength; i++) {
1916 Address cur_addr = free_[i].head_node_;
1917 while (cur_addr != NULL) {
1918 FreeListNode* cur_node = FreeListNode::FromAddress(cur_addr);
1919 if (cur_node == node) return true;
1920 cur_addr = cur_node->next();
1921 }
1922 }
1923 return false;
1924}
1925#endif
1926
1927
kasperl@chromium.orgdefbd102009-07-13 14:04:26 +00001928FixedSizeFreeList::FixedSizeFreeList(AllocationSpace owner, int object_size)
1929 : owner_(owner), object_size_(object_size) {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001930 Reset();
1931}
1932
1933
kasperl@chromium.orgdefbd102009-07-13 14:04:26 +00001934void FixedSizeFreeList::Reset() {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001935 available_ = 0;
ricow@chromium.org30ce4112010-05-31 10:38:25 +00001936 head_ = tail_ = NULL;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001937}
1938
1939
kasperl@chromium.orgdefbd102009-07-13 14:04:26 +00001940void FixedSizeFreeList::Free(Address start) {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001941#ifdef DEBUG
sgjesse@chromium.orgb302e562010-02-03 11:26:59 +00001942 MemoryAllocator::ZapBlock(start, object_size_);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001943#endif
fschneider@chromium.org0c20e672010-01-14 15:28:53 +00001944 // We only use the freelists with mark-sweep.
1945 ASSERT(!MarkCompactCollector::IsCompacting());
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001946 FreeListNode* node = FreeListNode::FromAddress(start);
kasperl@chromium.orgdefbd102009-07-13 14:04:26 +00001947 node->set_size(object_size_);
ricow@chromium.org30ce4112010-05-31 10:38:25 +00001948 node->set_next(NULL);
1949 if (head_ == NULL) {
1950 tail_ = head_ = node->address();
1951 } else {
1952 FreeListNode::FromAddress(tail_)->set_next(node->address());
1953 tail_ = node->address();
1954 }
kasperl@chromium.orgdefbd102009-07-13 14:04:26 +00001955 available_ += object_size_;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001956}
1957
1958
lrn@chromium.org303ada72010-10-27 09:33:13 +00001959MaybeObject* FixedSizeFreeList::Allocate() {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001960 if (head_ == NULL) {
whesse@chromium.org4a5224e2010-10-20 12:37:07 +00001961 return Failure::RetryAfterGC(owner_);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001962 }
1963
sgjesse@chromium.org755c5b12009-05-29 11:04:38 +00001964 ASSERT(!FLAG_always_compact); // We only use the freelists with mark-sweep.
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001965 FreeListNode* node = FreeListNode::FromAddress(head_);
1966 head_ = node->next();
kasperl@chromium.orgdefbd102009-07-13 14:04:26 +00001967 available_ -= object_size_;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001968 return node;
1969}
1970
1971
erik.corry@gmail.com4a6c3272010-11-18 12:04:40 +00001972void FixedSizeFreeList::MarkNodes() {
1973 Address cur_addr = head_;
1974 while (cur_addr != NULL && cur_addr != tail_) {
1975 FreeListNode* cur_node = FreeListNode::FromAddress(cur_addr);
1976 cur_addr = cur_node->next();
1977 cur_node->SetMark();
1978 }
1979}
1980
1981
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001982// -----------------------------------------------------------------------------
1983// OldSpace implementation
1984
1985void OldSpace::PrepareForMarkCompact(bool will_compact) {
fschneider@chromium.org013f3e12010-04-26 13:27:52 +00001986 // Call prepare of the super class.
1987 PagedSpace::PrepareForMarkCompact(will_compact);
1988
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001989 if (will_compact) {
1990 // Reset relocation info. During a compacting collection, everything in
1991 // the space is considered 'available' and we will rediscover live data
1992 // and waste during the collection.
1993 MCResetRelocationInfo();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001994 ASSERT(Available() == Capacity());
1995 } else {
1996 // During a non-compacting collection, everything below the linear
1997 // allocation pointer is considered allocated (everything above is
1998 // available) and we will rediscover available and wasted bytes during
1999 // the collection.
2000 accounting_stats_.AllocateBytes(free_list_.available());
2001 accounting_stats_.FillWastedBytes(Waste());
2002 }
2003
kasper.lund7276f142008-07-30 08:49:36 +00002004 // Clear the free list before a full GC---it will be rebuilt afterward.
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002005 free_list_.Reset();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002006}
2007
2008
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002009void OldSpace::MCCommitRelocationInfo() {
2010 // Update fast allocation info.
2011 allocation_info_.top = mc_forwarding_info_.top;
2012 allocation_info_.limit = mc_forwarding_info_.limit;
kasper.lund7276f142008-07-30 08:49:36 +00002013 ASSERT(allocation_info_.VerifyPagedAllocation());
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002014
2015 // The space is compacted and we haven't yet built free lists or
2016 // wasted any space.
2017 ASSERT(Waste() == 0);
2018 ASSERT(AvailableFree() == 0);
2019
2020 // Build the free list for the space.
2021 int computed_size = 0;
2022 PageIterator it(this, PageIterator::PAGES_USED_BY_MC);
2023 while (it.has_next()) {
2024 Page* p = it.next();
2025 // Space below the relocation pointer is allocated.
ager@chromium.orgc4c92722009-11-18 14:12:51 +00002026 computed_size +=
ricow@chromium.org30ce4112010-05-31 10:38:25 +00002027 static_cast<int>(p->AllocationWatermark() - p->ObjectAreaStart());
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002028 if (it.has_next()) {
ricow@chromium.org30ce4112010-05-31 10:38:25 +00002029 // Free the space at the top of the page.
ager@chromium.orgc4c92722009-11-18 14:12:51 +00002030 int extra_size =
ricow@chromium.org30ce4112010-05-31 10:38:25 +00002031 static_cast<int>(p->ObjectAreaEnd() - p->AllocationWatermark());
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002032 if (extra_size > 0) {
ricow@chromium.org30ce4112010-05-31 10:38:25 +00002033 int wasted_bytes = free_list_.Free(p->AllocationWatermark(),
2034 extra_size);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002035 // The bytes we have just "freed" to add to the free list were
2036 // already accounted as available.
2037 accounting_stats_.WasteBytes(wasted_bytes);
2038 }
2039 }
2040 }
2041
2042 // Make sure the computed size - based on the used portion of the pages in
2043 // use - matches the size obtained while computing forwarding addresses.
2044 ASSERT(computed_size == Size());
2045}
2046
2047
fschneider@chromium.org0c20e672010-01-14 15:28:53 +00002048bool NewSpace::ReserveSpace(int bytes) {
2049 // We can't reliably unpack a partial snapshot that needs more new space
2050 // space than the minimum NewSpace size.
2051 ASSERT(bytes <= InitialCapacity());
2052 Address limit = allocation_info_.limit;
2053 Address top = allocation_info_.top;
2054 return limit - top >= bytes;
2055}
2056
2057
fschneider@chromium.org013f3e12010-04-26 13:27:52 +00002058void PagedSpace::FreePages(Page* prev, Page* last) {
2059 if (last == AllocationTopPage()) {
2060 // Pages are already at the end of used pages.
2061 return;
2062 }
2063
2064 Page* first = NULL;
2065
2066 // Remove pages from the list.
2067 if (prev == NULL) {
2068 first = first_page_;
2069 first_page_ = last->next_page();
2070 } else {
2071 first = prev->next_page();
2072 MemoryAllocator::SetNextPage(prev, last->next_page());
2073 }
2074
2075 // Attach it after the last page.
2076 MemoryAllocator::SetNextPage(last_page_, first);
2077 last_page_ = last;
2078 MemoryAllocator::SetNextPage(last, NULL);
2079
2080 // Clean them up.
2081 do {
ricow@chromium.org30ce4112010-05-31 10:38:25 +00002082 first->InvalidateWatermark(true);
2083 first->SetAllocationWatermark(first->ObjectAreaStart());
2084 first->SetCachedAllocationWatermark(first->ObjectAreaStart());
2085 first->SetRegionMarks(Page::kAllRegionsCleanMarks);
fschneider@chromium.org013f3e12010-04-26 13:27:52 +00002086 first = first->next_page();
2087 } while (first != NULL);
2088
2089 // Order of pages in this space might no longer be consistent with
2090 // order of pages in chunks.
2091 page_list_is_chunk_ordered_ = false;
2092}
2093
2094
ricow@chromium.orgd236f4d2010-09-01 06:52:08 +00002095void PagedSpace::RelinkPageListInChunkOrder(bool deallocate_blocks) {
2096 const bool add_to_freelist = true;
2097
2098 // Mark used and unused pages to properly fill unused pages
2099 // after reordering.
2100 PageIterator all_pages_iterator(this, PageIterator::ALL_PAGES);
2101 Page* last_in_use = AllocationTopPage();
2102 bool in_use = true;
2103
2104 while (all_pages_iterator.has_next()) {
2105 Page* p = all_pages_iterator.next();
2106 p->SetWasInUseBeforeMC(in_use);
2107 if (p == last_in_use) {
2108 // We passed a page containing allocation top. All consequent
2109 // pages are not used.
2110 in_use = false;
2111 }
2112 }
2113
2114 if (page_list_is_chunk_ordered_) return;
2115
2116 Page* new_last_in_use = Page::FromAddress(NULL);
2117 MemoryAllocator::RelinkPageListInChunkOrder(this,
2118 &first_page_,
2119 &last_page_,
2120 &new_last_in_use);
2121 ASSERT(new_last_in_use->is_valid());
2122
2123 if (new_last_in_use != last_in_use) {
2124 // Current allocation top points to a page which is now in the middle
2125 // of page list. We should move allocation top forward to the new last
2126 // used page so various object iterators will continue to work properly.
2127 int size_in_bytes = static_cast<int>(PageAllocationLimit(last_in_use) -
2128 last_in_use->AllocationTop());
2129
2130 last_in_use->SetAllocationWatermark(last_in_use->AllocationTop());
2131 if (size_in_bytes > 0) {
2132 Address start = last_in_use->AllocationTop();
2133 if (deallocate_blocks) {
2134 accounting_stats_.AllocateBytes(size_in_bytes);
2135 DeallocateBlock(start, size_in_bytes, add_to_freelist);
2136 } else {
2137 Heap::CreateFillerObjectAt(start, size_in_bytes);
2138 }
2139 }
2140
2141 // New last in use page was in the middle of the list before
2142 // sorting so it full.
2143 SetTop(new_last_in_use->AllocationTop());
2144
2145 ASSERT(AllocationTopPage() == new_last_in_use);
2146 ASSERT(AllocationTopPage()->WasInUseBeforeMC());
2147 }
2148
2149 PageIterator pages_in_use_iterator(this, PageIterator::PAGES_IN_USE);
2150 while (pages_in_use_iterator.has_next()) {
2151 Page* p = pages_in_use_iterator.next();
2152 if (!p->WasInUseBeforeMC()) {
2153 // Empty page is in the middle of a sequence of used pages.
2154 // Allocate it as a whole and deallocate immediately.
2155 int size_in_bytes = static_cast<int>(PageAllocationLimit(p) -
2156 p->ObjectAreaStart());
2157
2158 p->SetAllocationWatermark(p->ObjectAreaStart());
2159 Address start = p->ObjectAreaStart();
2160 if (deallocate_blocks) {
2161 accounting_stats_.AllocateBytes(size_in_bytes);
2162 DeallocateBlock(start, size_in_bytes, add_to_freelist);
2163 } else {
2164 Heap::CreateFillerObjectAt(start, size_in_bytes);
2165 }
2166 }
2167 }
2168
2169 page_list_is_chunk_ordered_ = true;
2170}
2171
2172
fschneider@chromium.org013f3e12010-04-26 13:27:52 +00002173void PagedSpace::PrepareForMarkCompact(bool will_compact) {
2174 if (will_compact) {
ricow@chromium.orgd236f4d2010-09-01 06:52:08 +00002175 RelinkPageListInChunkOrder(false);
fschneider@chromium.org013f3e12010-04-26 13:27:52 +00002176 }
2177}
2178
2179
fschneider@chromium.org0c20e672010-01-14 15:28:53 +00002180bool PagedSpace::ReserveSpace(int bytes) {
2181 Address limit = allocation_info_.limit;
2182 Address top = allocation_info_.top;
2183 if (limit - top >= bytes) return true;
2184
2185 // There wasn't enough space in the current page. Lets put the rest
2186 // of the page on the free list and start a fresh page.
2187 PutRestOfCurrentPageOnFreeList(TopPageOf(allocation_info_));
2188
2189 Page* reserved_page = TopPageOf(allocation_info_);
2190 int bytes_left_to_reserve = bytes;
2191 while (bytes_left_to_reserve > 0) {
2192 if (!reserved_page->next_page()->is_valid()) {
2193 if (Heap::OldGenerationAllocationLimitReached()) return false;
2194 Expand(reserved_page);
2195 }
2196 bytes_left_to_reserve -= Page::kPageSize;
2197 reserved_page = reserved_page->next_page();
2198 if (!reserved_page->is_valid()) return false;
2199 }
2200 ASSERT(TopPageOf(allocation_info_)->next_page()->is_valid());
ricow@chromium.org30ce4112010-05-31 10:38:25 +00002201 TopPageOf(allocation_info_)->next_page()->InvalidateWatermark(true);
fschneider@chromium.org0c20e672010-01-14 15:28:53 +00002202 SetAllocationInfo(&allocation_info_,
2203 TopPageOf(allocation_info_)->next_page());
2204 return true;
2205}
2206
2207
2208// You have to call this last, since the implementation from PagedSpace
2209// doesn't know that memory was 'promised' to large object space.
2210bool LargeObjectSpace::ReserveSpace(int bytes) {
2211 return Heap::OldGenerationSpaceAvailable() >= bytes;
2212}
2213
2214
kasper.lund7276f142008-07-30 08:49:36 +00002215// Slow case for normal allocation. Try in order: (1) allocate in the next
2216// page in the space, (2) allocate off the space's free list, (3) expand the
2217// space, (4) fail.
2218HeapObject* OldSpace::SlowAllocateRaw(int size_in_bytes) {
2219 // Linear allocation in this space has failed. If there is another page
2220 // in the space, move to that page and allocate there. This allocation
2221 // should succeed (size_in_bytes should not be greater than a page's
2222 // object area size).
2223 Page* current_page = TopPageOf(allocation_info_);
2224 if (current_page->next_page()->is_valid()) {
2225 return AllocateInNextPage(current_page, size_in_bytes);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002226 }
kasper.lund7276f142008-07-30 08:49:36 +00002227
ager@chromium.org3811b432009-10-28 14:53:37 +00002228 // There is no next page in this space. Try free list allocation unless that
2229 // is currently forbidden.
2230 if (!Heap::linear_allocation()) {
2231 int wasted_bytes;
lrn@chromium.org303ada72010-10-27 09:33:13 +00002232 Object* result;
2233 MaybeObject* maybe = free_list_.Allocate(size_in_bytes, &wasted_bytes);
ager@chromium.org3811b432009-10-28 14:53:37 +00002234 accounting_stats_.WasteBytes(wasted_bytes);
lrn@chromium.org303ada72010-10-27 09:33:13 +00002235 if (maybe->ToObject(&result)) {
ager@chromium.org3811b432009-10-28 14:53:37 +00002236 accounting_stats_.AllocateBytes(size_in_bytes);
ricow@chromium.org30ce4112010-05-31 10:38:25 +00002237
2238 HeapObject* obj = HeapObject::cast(result);
2239 Page* p = Page::FromAddress(obj->address());
2240
2241 if (obj->address() >= p->AllocationWatermark()) {
2242 // There should be no hole between the allocation watermark
2243 // and allocated object address.
2244 // Memory above the allocation watermark was not swept and
2245 // might contain garbage pointers to new space.
2246 ASSERT(obj->address() == p->AllocationWatermark());
2247 p->SetAllocationWatermark(obj->address() + size_in_bytes);
2248 }
2249
2250 return obj;
ager@chromium.org3811b432009-10-28 14:53:37 +00002251 }
kasper.lund7276f142008-07-30 08:49:36 +00002252 }
2253
kasperl@chromium.org9bbf9682008-10-30 11:53:07 +00002254 // Free list allocation failed and there is no next page. Fail if we have
2255 // hit the old generation size limit that should cause a garbage
2256 // collection.
2257 if (!Heap::always_allocate() && Heap::OldGenerationAllocationLimitReached()) {
2258 return NULL;
2259 }
2260
2261 // Try to expand the space and allocate in the new next page.
kasper.lund7276f142008-07-30 08:49:36 +00002262 ASSERT(!current_page->next_page()->is_valid());
2263 if (Expand(current_page)) {
2264 return AllocateInNextPage(current_page, size_in_bytes);
2265 }
2266
2267 // Finally, fail.
2268 return NULL;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002269}
2270
2271
fschneider@chromium.org0c20e672010-01-14 15:28:53 +00002272void OldSpace::PutRestOfCurrentPageOnFreeList(Page* current_page) {
ricow@chromium.org30ce4112010-05-31 10:38:25 +00002273 current_page->SetAllocationWatermark(allocation_info_.top);
ager@chromium.orgc4c92722009-11-18 14:12:51 +00002274 int free_size =
2275 static_cast<int>(current_page->ObjectAreaEnd() - allocation_info_.top);
kasper.lund7276f142008-07-30 08:49:36 +00002276 if (free_size > 0) {
2277 int wasted_bytes = free_list_.Free(allocation_info_.top, free_size);
2278 accounting_stats_.WasteBytes(wasted_bytes);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002279 }
fschneider@chromium.org0c20e672010-01-14 15:28:53 +00002280}
2281
2282
2283void FixedSpace::PutRestOfCurrentPageOnFreeList(Page* current_page) {
ricow@chromium.org30ce4112010-05-31 10:38:25 +00002284 current_page->SetAllocationWatermark(allocation_info_.top);
fschneider@chromium.org0c20e672010-01-14 15:28:53 +00002285 int free_size =
2286 static_cast<int>(current_page->ObjectAreaEnd() - allocation_info_.top);
2287 // In the fixed space free list all the free list items have the right size.
2288 // We use up the rest of the page while preserving this invariant.
2289 while (free_size >= object_size_in_bytes_) {
2290 free_list_.Free(allocation_info_.top);
2291 allocation_info_.top += object_size_in_bytes_;
2292 free_size -= object_size_in_bytes_;
2293 accounting_stats_.WasteBytes(object_size_in_bytes_);
2294 }
2295}
2296
2297
2298// Add the block at the top of the page to the space's free list, set the
2299// allocation info to the next page (assumed to be one), and allocate
2300// linearly there.
2301HeapObject* OldSpace::AllocateInNextPage(Page* current_page,
2302 int size_in_bytes) {
2303 ASSERT(current_page->next_page()->is_valid());
ricow@chromium.org30ce4112010-05-31 10:38:25 +00002304 Page* next_page = current_page->next_page();
2305 next_page->ClearGCFields();
fschneider@chromium.org0c20e672010-01-14 15:28:53 +00002306 PutRestOfCurrentPageOnFreeList(current_page);
ricow@chromium.org30ce4112010-05-31 10:38:25 +00002307 SetAllocationInfo(&allocation_info_, next_page);
kasper.lund7276f142008-07-30 08:49:36 +00002308 return AllocateLinearly(&allocation_info_, size_in_bytes);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002309}
2310
2311
ricow@chromium.orgd236f4d2010-09-01 06:52:08 +00002312void OldSpace::DeallocateBlock(Address start,
2313 int size_in_bytes,
2314 bool add_to_freelist) {
2315 Free(start, size_in_bytes, add_to_freelist);
2316}
2317
2318
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002319#ifdef DEBUG
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002320struct CommentStatistic {
2321 const char* comment;
2322 int size;
2323 int count;
2324 void Clear() {
2325 comment = NULL;
2326 size = 0;
2327 count = 0;
2328 }
2329};
2330
2331
2332// must be small, since an iteration is used for lookup
2333const int kMaxComments = 64;
2334static CommentStatistic comments_statistics[kMaxComments+1];
2335
2336
2337void PagedSpace::ReportCodeStatistics() {
2338 ReportCodeKindStatistics();
2339 PrintF("Code comment statistics (\" [ comment-txt : size/ "
2340 "count (average)\"):\n");
2341 for (int i = 0; i <= kMaxComments; i++) {
2342 const CommentStatistic& cs = comments_statistics[i];
2343 if (cs.size > 0) {
2344 PrintF(" %-30s: %10d/%6d (%d)\n", cs.comment, cs.size, cs.count,
2345 cs.size/cs.count);
2346 }
2347 }
2348 PrintF("\n");
2349}
2350
2351
2352void PagedSpace::ResetCodeStatistics() {
2353 ClearCodeKindStatistics();
2354 for (int i = 0; i < kMaxComments; i++) comments_statistics[i].Clear();
2355 comments_statistics[kMaxComments].comment = "Unknown";
2356 comments_statistics[kMaxComments].size = 0;
2357 comments_statistics[kMaxComments].count = 0;
2358}
2359
2360
2361// Adds comment to 'comment_statistics' table. Performance OK sa long as
2362// 'kMaxComments' is small
2363static void EnterComment(const char* comment, int delta) {
2364 // Do not count empty comments
2365 if (delta <= 0) return;
2366 CommentStatistic* cs = &comments_statistics[kMaxComments];
2367 // Search for a free or matching entry in 'comments_statistics': 'cs'
2368 // points to result.
2369 for (int i = 0; i < kMaxComments; i++) {
2370 if (comments_statistics[i].comment == NULL) {
2371 cs = &comments_statistics[i];
2372 cs->comment = comment;
2373 break;
2374 } else if (strcmp(comments_statistics[i].comment, comment) == 0) {
2375 cs = &comments_statistics[i];
2376 break;
2377 }
2378 }
2379 // Update entry for 'comment'
2380 cs->size += delta;
2381 cs->count += 1;
2382}
2383
2384
2385// Call for each nested comment start (start marked with '[ xxx', end marked
2386// with ']'. RelocIterator 'it' must point to a comment reloc info.
2387static void CollectCommentStatistics(RelocIterator* it) {
2388 ASSERT(!it->done());
ager@chromium.org236ad962008-09-25 09:45:57 +00002389 ASSERT(it->rinfo()->rmode() == RelocInfo::COMMENT);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002390 const char* tmp = reinterpret_cast<const char*>(it->rinfo()->data());
2391 if (tmp[0] != '[') {
2392 // Not a nested comment; skip
2393 return;
2394 }
2395
2396 // Search for end of nested comment or a new nested comment
2397 const char* const comment_txt =
2398 reinterpret_cast<const char*>(it->rinfo()->data());
2399 const byte* prev_pc = it->rinfo()->pc();
2400 int flat_delta = 0;
2401 it->next();
2402 while (true) {
2403 // All nested comments must be terminated properly, and therefore exit
2404 // from loop.
2405 ASSERT(!it->done());
ager@chromium.org236ad962008-09-25 09:45:57 +00002406 if (it->rinfo()->rmode() == RelocInfo::COMMENT) {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002407 const char* const txt =
2408 reinterpret_cast<const char*>(it->rinfo()->data());
ager@chromium.orgc4c92722009-11-18 14:12:51 +00002409 flat_delta += static_cast<int>(it->rinfo()->pc() - prev_pc);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002410 if (txt[0] == ']') break; // End of nested comment
2411 // A new comment
2412 CollectCommentStatistics(it);
2413 // Skip code that was covered with previous comment
2414 prev_pc = it->rinfo()->pc();
2415 }
2416 it->next();
2417 }
2418 EnterComment(comment_txt, flat_delta);
2419}
2420
2421
2422// Collects code size statistics:
2423// - by code kind
2424// - by code comment
2425void PagedSpace::CollectCodeStatistics() {
2426 HeapObjectIterator obj_it(this);
sgjesse@chromium.orgb302e562010-02-03 11:26:59 +00002427 for (HeapObject* obj = obj_it.next(); obj != NULL; obj = obj_it.next()) {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002428 if (obj->IsCode()) {
2429 Code* code = Code::cast(obj);
2430 code_kind_statistics[code->kind()] += code->Size();
2431 RelocIterator it(code);
2432 int delta = 0;
2433 const byte* prev_pc = code->instruction_start();
2434 while (!it.done()) {
ager@chromium.org236ad962008-09-25 09:45:57 +00002435 if (it.rinfo()->rmode() == RelocInfo::COMMENT) {
ager@chromium.orgc4c92722009-11-18 14:12:51 +00002436 delta += static_cast<int>(it.rinfo()->pc() - prev_pc);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002437 CollectCommentStatistics(&it);
2438 prev_pc = it.rinfo()->pc();
2439 }
2440 it.next();
2441 }
2442
2443 ASSERT(code->instruction_start() <= prev_pc &&
erik.corry@gmail.com4a2e25e2010-07-07 12:22:46 +00002444 prev_pc <= code->instruction_end());
2445 delta += static_cast<int>(code->instruction_end() - prev_pc);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002446 EnterComment("NoComment", delta);
2447 }
2448 }
2449}
2450
2451
2452void OldSpace::ReportStatistics() {
kmillikin@chromium.orgf05f2912010-09-30 10:07:24 +00002453 int pct = static_cast<int>(Available() * 100 / Capacity());
2454 PrintF(" capacity: %" V8_PTR_PREFIX "d"
2455 ", waste: %" V8_PTR_PREFIX "d"
2456 ", available: %" V8_PTR_PREFIX "d, %%%d\n",
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002457 Capacity(), Waste(), Available(), pct);
2458
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002459 ClearHistograms();
2460 HeapObjectIterator obj_it(this);
sgjesse@chromium.orgb302e562010-02-03 11:26:59 +00002461 for (HeapObject* obj = obj_it.next(); obj != NULL; obj = obj_it.next())
2462 CollectHistogramInfo(obj);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002463 ReportHistogram(true);
2464}
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002465#endif
2466
2467// -----------------------------------------------------------------------------
kasperl@chromium.orgdefbd102009-07-13 14:04:26 +00002468// FixedSpace implementation
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002469
kasperl@chromium.orgdefbd102009-07-13 14:04:26 +00002470void FixedSpace::PrepareForMarkCompact(bool will_compact) {
fschneider@chromium.org013f3e12010-04-26 13:27:52 +00002471 // Call prepare of the super class.
2472 PagedSpace::PrepareForMarkCompact(will_compact);
2473
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002474 if (will_compact) {
2475 // Reset relocation info.
2476 MCResetRelocationInfo();
2477
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002478 // During a compacting collection, everything in the space is considered
2479 // 'available' (set by the call to MCResetRelocationInfo) and we will
2480 // rediscover live and wasted bytes during the collection.
2481 ASSERT(Available() == Capacity());
2482 } else {
2483 // During a non-compacting collection, everything below the linear
2484 // allocation pointer except wasted top-of-page blocks is considered
2485 // allocated and we will rediscover available bytes during the
2486 // collection.
2487 accounting_stats_.AllocateBytes(free_list_.available());
2488 }
2489
kasper.lund7276f142008-07-30 08:49:36 +00002490 // Clear the free list before a full GC---it will be rebuilt afterward.
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002491 free_list_.Reset();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002492}
2493
2494
kasperl@chromium.orgdefbd102009-07-13 14:04:26 +00002495void FixedSpace::MCCommitRelocationInfo() {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002496 // Update fast allocation info.
2497 allocation_info_.top = mc_forwarding_info_.top;
2498 allocation_info_.limit = mc_forwarding_info_.limit;
kasper.lund7276f142008-07-30 08:49:36 +00002499 ASSERT(allocation_info_.VerifyPagedAllocation());
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002500
2501 // The space is compacted and we haven't yet wasted any space.
2502 ASSERT(Waste() == 0);
2503
2504 // Update allocation_top of each page in use and compute waste.
2505 int computed_size = 0;
2506 PageIterator it(this, PageIterator::PAGES_USED_BY_MC);
2507 while (it.has_next()) {
2508 Page* page = it.next();
2509 Address page_top = page->AllocationTop();
ager@chromium.orgc4c92722009-11-18 14:12:51 +00002510 computed_size += static_cast<int>(page_top - page->ObjectAreaStart());
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002511 if (it.has_next()) {
ager@chromium.orgc4c92722009-11-18 14:12:51 +00002512 accounting_stats_.WasteBytes(
2513 static_cast<int>(page->ObjectAreaEnd() - page_top));
ricow@chromium.org30ce4112010-05-31 10:38:25 +00002514 page->SetAllocationWatermark(page_top);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002515 }
2516 }
2517
2518 // Make sure the computed size - based on the used portion of the
2519 // pages in use - matches the size we adjust during allocation.
2520 ASSERT(computed_size == Size());
2521}
2522
2523
kasper.lund7276f142008-07-30 08:49:36 +00002524// Slow case for normal allocation. Try in order: (1) allocate in the next
2525// page in the space, (2) allocate off the space's free list, (3) expand the
2526// space, (4) fail.
kasperl@chromium.orgdefbd102009-07-13 14:04:26 +00002527HeapObject* FixedSpace::SlowAllocateRaw(int size_in_bytes) {
2528 ASSERT_EQ(object_size_in_bytes_, size_in_bytes);
kasper.lund7276f142008-07-30 08:49:36 +00002529 // Linear allocation in this space has failed. If there is another page
2530 // in the space, move to that page and allocate there. This allocation
2531 // should succeed.
2532 Page* current_page = TopPageOf(allocation_info_);
2533 if (current_page->next_page()->is_valid()) {
2534 return AllocateInNextPage(current_page, size_in_bytes);
2535 }
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002536
ager@chromium.org3811b432009-10-28 14:53:37 +00002537 // There is no next page in this space. Try free list allocation unless
2538 // that is currently forbidden. The fixed space free list implicitly assumes
2539 // that all free blocks are of the fixed size.
2540 if (!Heap::linear_allocation()) {
lrn@chromium.org303ada72010-10-27 09:33:13 +00002541 Object* result;
2542 MaybeObject* maybe = free_list_.Allocate();
2543 if (maybe->ToObject(&result)) {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002544 accounting_stats_.AllocateBytes(size_in_bytes);
ricow@chromium.org30ce4112010-05-31 10:38:25 +00002545 HeapObject* obj = HeapObject::cast(result);
2546 Page* p = Page::FromAddress(obj->address());
2547
2548 if (obj->address() >= p->AllocationWatermark()) {
2549 // There should be no hole between the allocation watermark
2550 // and allocated object address.
2551 // Memory above the allocation watermark was not swept and
2552 // might contain garbage pointers to new space.
2553 ASSERT(obj->address() == p->AllocationWatermark());
2554 p->SetAllocationWatermark(obj->address() + size_in_bytes);
2555 }
2556
2557 return obj;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002558 }
2559 }
kasper.lund7276f142008-07-30 08:49:36 +00002560
kasperl@chromium.org9bbf9682008-10-30 11:53:07 +00002561 // Free list allocation failed and there is no next page. Fail if we have
2562 // hit the old generation size limit that should cause a garbage
2563 // collection.
2564 if (!Heap::always_allocate() && Heap::OldGenerationAllocationLimitReached()) {
2565 return NULL;
2566 }
2567
2568 // Try to expand the space and allocate in the new next page.
kasper.lund7276f142008-07-30 08:49:36 +00002569 ASSERT(!current_page->next_page()->is_valid());
2570 if (Expand(current_page)) {
2571 return AllocateInNextPage(current_page, size_in_bytes);
2572 }
2573
2574 // Finally, fail.
2575 return NULL;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002576}
2577
2578
kasper.lund7276f142008-07-30 08:49:36 +00002579// Move to the next page (there is assumed to be one) and allocate there.
2580// The top of page block is always wasted, because it is too small to hold a
2581// map.
kasperl@chromium.orgdefbd102009-07-13 14:04:26 +00002582HeapObject* FixedSpace::AllocateInNextPage(Page* current_page,
2583 int size_in_bytes) {
kasper.lund7276f142008-07-30 08:49:36 +00002584 ASSERT(current_page->next_page()->is_valid());
fschneider@chromium.org013f3e12010-04-26 13:27:52 +00002585 ASSERT(allocation_info_.top == PageAllocationLimit(current_page));
kasperl@chromium.orgdefbd102009-07-13 14:04:26 +00002586 ASSERT_EQ(object_size_in_bytes_, size_in_bytes);
ricow@chromium.org30ce4112010-05-31 10:38:25 +00002587 Page* next_page = current_page->next_page();
2588 next_page->ClearGCFields();
2589 current_page->SetAllocationWatermark(allocation_info_.top);
kasperl@chromium.orgdefbd102009-07-13 14:04:26 +00002590 accounting_stats_.WasteBytes(page_extra_);
ricow@chromium.org30ce4112010-05-31 10:38:25 +00002591 SetAllocationInfo(&allocation_info_, next_page);
kasper.lund7276f142008-07-30 08:49:36 +00002592 return AllocateLinearly(&allocation_info_, size_in_bytes);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002593}
2594
2595
ricow@chromium.orgd236f4d2010-09-01 06:52:08 +00002596void FixedSpace::DeallocateBlock(Address start,
2597 int size_in_bytes,
2598 bool add_to_freelist) {
2599 // Free-list elements in fixed space are assumed to have a fixed size.
2600 // We break the free block into chunks and add them to the free list
2601 // individually.
2602 int size = object_size_in_bytes();
2603 ASSERT(size_in_bytes % size == 0);
2604 Address end = start + size_in_bytes;
2605 for (Address a = start; a < end; a += size) {
2606 Free(a, add_to_freelist);
2607 }
2608}
2609
2610
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002611#ifdef DEBUG
kasperl@chromium.orgdefbd102009-07-13 14:04:26 +00002612void FixedSpace::ReportStatistics() {
kmillikin@chromium.orgf05f2912010-09-30 10:07:24 +00002613 int pct = static_cast<int>(Available() * 100 / Capacity());
2614 PrintF(" capacity: %" V8_PTR_PREFIX "d"
2615 ", waste: %" V8_PTR_PREFIX "d"
2616 ", available: %" V8_PTR_PREFIX "d, %%%d\n",
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002617 Capacity(), Waste(), Available(), pct);
2618
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002619 ClearHistograms();
2620 HeapObjectIterator obj_it(this);
sgjesse@chromium.orgb302e562010-02-03 11:26:59 +00002621 for (HeapObject* obj = obj_it.next(); obj != NULL; obj = obj_it.next())
2622 CollectHistogramInfo(obj);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002623 ReportHistogram(false);
2624}
kasperl@chromium.orgdefbd102009-07-13 14:04:26 +00002625#endif
2626
2627
2628// -----------------------------------------------------------------------------
2629// MapSpace implementation
2630
2631void MapSpace::PrepareForMarkCompact(bool will_compact) {
2632 // Call prepare of the super class.
2633 FixedSpace::PrepareForMarkCompact(will_compact);
2634
2635 if (will_compact) {
2636 // Initialize map index entry.
2637 int page_count = 0;
2638 PageIterator it(this, PageIterator::ALL_PAGES);
2639 while (it.has_next()) {
2640 ASSERT_MAP_PAGE_INDEX(page_count);
2641
2642 Page* p = it.next();
2643 ASSERT(p->mc_page_index == page_count);
2644
2645 page_addresses_[page_count++] = p->address();
2646 }
2647 }
2648}
2649
2650
2651#ifdef DEBUG
2652void MapSpace::VerifyObject(HeapObject* object) {
2653 // The object should be a map or a free-list node.
2654 ASSERT(object->IsMap() || object->IsByteArray());
2655}
2656#endif
2657
2658
2659// -----------------------------------------------------------------------------
2660// GlobalPropertyCellSpace implementation
2661
2662#ifdef DEBUG
2663void CellSpace::VerifyObject(HeapObject* object) {
2664 // The object should be a global object property cell or a free-list node.
2665 ASSERT(object->IsJSGlobalPropertyCell() ||
2666 object->map() == Heap::two_pointer_filler_map());
2667}
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002668#endif
2669
2670
2671// -----------------------------------------------------------------------------
2672// LargeObjectIterator
2673
2674LargeObjectIterator::LargeObjectIterator(LargeObjectSpace* space) {
2675 current_ = space->first_chunk_;
2676 size_func_ = NULL;
2677}
2678
2679
2680LargeObjectIterator::LargeObjectIterator(LargeObjectSpace* space,
2681 HeapObjectCallback size_func) {
2682 current_ = space->first_chunk_;
2683 size_func_ = size_func;
2684}
2685
2686
2687HeapObject* LargeObjectIterator::next() {
sgjesse@chromium.orgb302e562010-02-03 11:26:59 +00002688 if (current_ == NULL) return NULL;
2689
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002690 HeapObject* object = current_->GetObject();
2691 current_ = current_->next();
2692 return object;
2693}
2694
2695
2696// -----------------------------------------------------------------------------
2697// LargeObjectChunk
2698
2699LargeObjectChunk* LargeObjectChunk::New(int size_in_bytes,
kasper.lund7276f142008-07-30 08:49:36 +00002700 size_t* chunk_size,
ager@chromium.org9258b6b2008-09-11 09:11:10 +00002701 Executability executable) {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002702 size_t requested = ChunkSizeFor(size_in_bytes);
kasper.lund7276f142008-07-30 08:49:36 +00002703 void* mem = MemoryAllocator::AllocateRawMemory(requested,
2704 chunk_size,
2705 executable);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002706 if (mem == NULL) return NULL;
2707 LOG(NewEvent("LargeObjectChunk", mem, *chunk_size));
2708 if (*chunk_size < requested) {
erik.corry@gmail.com145eff52010-08-23 11:36:18 +00002709 MemoryAllocator::FreeRawMemory(mem, *chunk_size, executable);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002710 LOG(DeleteEvent("LargeObjectChunk", mem));
2711 return NULL;
2712 }
kmillikin@chromium.org3cdd9e12010-09-06 11:39:48 +00002713 ObjectSpace space =
2714 (executable == EXECUTABLE) ? kObjectSpaceCodeSpace : kObjectSpaceLoSpace;
2715 MemoryAllocator::PerformAllocationCallback(space,
2716 kAllocationActionAllocate,
2717 *chunk_size);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002718 return reinterpret_cast<LargeObjectChunk*>(mem);
2719}
2720
2721
2722int LargeObjectChunk::ChunkSizeFor(int size_in_bytes) {
ager@chromium.orgc4c92722009-11-18 14:12:51 +00002723 int os_alignment = static_cast<int>(OS::AllocateAlignment());
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002724 if (os_alignment < Page::kPageSize)
2725 size_in_bytes += (Page::kPageSize - os_alignment);
2726 return size_in_bytes + Page::kObjectStartOffset;
2727}
2728
2729// -----------------------------------------------------------------------------
2730// LargeObjectSpace
2731
ager@chromium.org9258b6b2008-09-11 09:11:10 +00002732LargeObjectSpace::LargeObjectSpace(AllocationSpace id)
2733 : Space(id, NOT_EXECUTABLE), // Managed on a per-allocation basis
kasper.lund7276f142008-07-30 08:49:36 +00002734 first_chunk_(NULL),
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002735 size_(0),
erik.corry@gmail.com4a6c3272010-11-18 12:04:40 +00002736 page_count_(0),
2737 objects_size_(0) {}
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002738
2739
2740bool LargeObjectSpace::Setup() {
2741 first_chunk_ = NULL;
2742 size_ = 0;
2743 page_count_ = 0;
erik.corry@gmail.com4a6c3272010-11-18 12:04:40 +00002744 objects_size_ = 0;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002745 return true;
2746}
2747
2748
2749void LargeObjectSpace::TearDown() {
2750 while (first_chunk_ != NULL) {
2751 LargeObjectChunk* chunk = first_chunk_;
2752 first_chunk_ = first_chunk_->next();
2753 LOG(DeleteEvent("LargeObjectChunk", chunk->address()));
erik.corry@gmail.com145eff52010-08-23 11:36:18 +00002754 Page* page = Page::FromAddress(RoundUp(chunk->address(), Page::kPageSize));
2755 Executability executable =
2756 page->IsPageExecutable() ? EXECUTABLE : NOT_EXECUTABLE;
kmillikin@chromium.org3cdd9e12010-09-06 11:39:48 +00002757 ObjectSpace space = kObjectSpaceLoSpace;
2758 if (executable == EXECUTABLE) space = kObjectSpaceCodeSpace;
2759 size_t size = chunk->size();
2760 MemoryAllocator::FreeRawMemory(chunk->address(), size, executable);
2761 MemoryAllocator::PerformAllocationCallback(
2762 space, kAllocationActionFree, size);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002763 }
2764
2765 size_ = 0;
2766 page_count_ = 0;
erik.corry@gmail.com4a6c3272010-11-18 12:04:40 +00002767 objects_size_ = 0;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002768}
2769
2770
kasperl@chromium.orgf5aa8372009-03-24 14:47:14 +00002771#ifdef ENABLE_HEAP_PROTECTION
2772
2773void LargeObjectSpace::Protect() {
2774 LargeObjectChunk* chunk = first_chunk_;
2775 while (chunk != NULL) {
2776 MemoryAllocator::Protect(chunk->address(), chunk->size());
2777 chunk = chunk->next();
2778 }
2779}
2780
2781
2782void LargeObjectSpace::Unprotect() {
2783 LargeObjectChunk* chunk = first_chunk_;
2784 while (chunk != NULL) {
2785 bool is_code = chunk->GetObject()->IsCode();
2786 MemoryAllocator::Unprotect(chunk->address(), chunk->size(),
2787 is_code ? EXECUTABLE : NOT_EXECUTABLE);
2788 chunk = chunk->next();
2789 }
2790}
2791
2792#endif
2793
2794
lrn@chromium.org303ada72010-10-27 09:33:13 +00002795MaybeObject* LargeObjectSpace::AllocateRawInternal(int requested_size,
2796 int object_size,
2797 Executability executable) {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002798 ASSERT(0 < object_size && object_size <= requested_size);
kasperl@chromium.org9bbf9682008-10-30 11:53:07 +00002799
2800 // Check if we want to force a GC before growing the old space further.
2801 // If so, fail the allocation.
2802 if (!Heap::always_allocate() && Heap::OldGenerationAllocationLimitReached()) {
whesse@chromium.org4a5224e2010-10-20 12:37:07 +00002803 return Failure::RetryAfterGC(identity());
kasperl@chromium.org9bbf9682008-10-30 11:53:07 +00002804 }
2805
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002806 size_t chunk_size;
2807 LargeObjectChunk* chunk =
ager@chromium.org9258b6b2008-09-11 09:11:10 +00002808 LargeObjectChunk::New(requested_size, &chunk_size, executable);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002809 if (chunk == NULL) {
whesse@chromium.org4a5224e2010-10-20 12:37:07 +00002810 return Failure::RetryAfterGC(identity());
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002811 }
2812
ager@chromium.orgc4c92722009-11-18 14:12:51 +00002813 size_ += static_cast<int>(chunk_size);
erik.corry@gmail.com4a6c3272010-11-18 12:04:40 +00002814 objects_size_ += requested_size;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002815 page_count_++;
2816 chunk->set_next(first_chunk_);
2817 chunk->set_size(chunk_size);
2818 first_chunk_ = chunk;
2819
ricow@chromium.org30ce4112010-05-31 10:38:25 +00002820 // Initialize page header.
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002821 Page* page = Page::FromAddress(RoundUp(chunk->address(), Page::kPageSize));
2822 Address object_address = page->ObjectAreaStart();
2823 // Clear the low order bit of the second word in the page to flag it as a
2824 // large object page. If the chunk_size happened to be written there, its
2825 // low order bit should already be clear.
2826 ASSERT((chunk_size & 0x1) == 0);
fschneider@chromium.org013f3e12010-04-26 13:27:52 +00002827 page->SetIsLargeObjectPage(true);
erik.corry@gmail.com145eff52010-08-23 11:36:18 +00002828 page->SetIsPageExecutable(executable);
ricow@chromium.org30ce4112010-05-31 10:38:25 +00002829 page->SetRegionMarks(Page::kAllRegionsCleanMarks);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002830 return HeapObject::FromAddress(object_address);
2831}
2832
2833
lrn@chromium.org303ada72010-10-27 09:33:13 +00002834MaybeObject* LargeObjectSpace::AllocateRawCode(int size_in_bytes) {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002835 ASSERT(0 < size_in_bytes);
ager@chromium.org9258b6b2008-09-11 09:11:10 +00002836 return AllocateRawInternal(size_in_bytes,
2837 size_in_bytes,
2838 EXECUTABLE);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002839}
2840
2841
lrn@chromium.org303ada72010-10-27 09:33:13 +00002842MaybeObject* LargeObjectSpace::AllocateRawFixedArray(int size_in_bytes) {
ager@chromium.org9258b6b2008-09-11 09:11:10 +00002843 ASSERT(0 < size_in_bytes);
ricow@chromium.org30ce4112010-05-31 10:38:25 +00002844 return AllocateRawInternal(size_in_bytes,
ager@chromium.org9258b6b2008-09-11 09:11:10 +00002845 size_in_bytes,
2846 NOT_EXECUTABLE);
2847}
2848
2849
lrn@chromium.org303ada72010-10-27 09:33:13 +00002850MaybeObject* LargeObjectSpace::AllocateRaw(int size_in_bytes) {
ager@chromium.org9258b6b2008-09-11 09:11:10 +00002851 ASSERT(0 < size_in_bytes);
2852 return AllocateRawInternal(size_in_bytes,
2853 size_in_bytes,
2854 NOT_EXECUTABLE);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002855}
2856
2857
2858// GC support
lrn@chromium.org303ada72010-10-27 09:33:13 +00002859MaybeObject* LargeObjectSpace::FindObject(Address a) {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002860 for (LargeObjectChunk* chunk = first_chunk_;
2861 chunk != NULL;
2862 chunk = chunk->next()) {
2863 Address chunk_address = chunk->address();
2864 if (chunk_address <= a && a < chunk_address + chunk->size()) {
2865 return chunk->GetObject();
2866 }
2867 }
2868 return Failure::Exception();
2869}
2870
ricow@chromium.orgd236f4d2010-09-01 06:52:08 +00002871
2872LargeObjectChunk* LargeObjectSpace::FindChunkContainingPc(Address pc) {
2873 // TODO(853): Change this implementation to only find executable
2874 // chunks and use some kind of hash-based approach to speed it up.
2875 for (LargeObjectChunk* chunk = first_chunk_;
2876 chunk != NULL;
2877 chunk = chunk->next()) {
2878 Address chunk_address = chunk->address();
2879 if (chunk_address <= pc && pc < chunk_address + chunk->size()) {
2880 return chunk;
2881 }
2882 }
2883 return NULL;
2884}
2885
2886
ricow@chromium.org30ce4112010-05-31 10:38:25 +00002887void LargeObjectSpace::IterateDirtyRegions(ObjectSlotCallback copy_object) {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002888 LargeObjectIterator it(this);
sgjesse@chromium.orgb302e562010-02-03 11:26:59 +00002889 for (HeapObject* object = it.next(); object != NULL; object = it.next()) {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002890 // We only have code, sequential strings, or fixed arrays in large
2891 // object space, and only fixed arrays can possibly contain pointers to
2892 // the young generation.
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002893 if (object->IsFixedArray()) {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002894 Page* page = Page::FromAddress(object->address());
ricow@chromium.org30ce4112010-05-31 10:38:25 +00002895 uint32_t marks = page->GetRegionMarks();
2896 uint32_t newmarks = Page::kAllRegionsCleanMarks;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002897
ricow@chromium.org30ce4112010-05-31 10:38:25 +00002898 if (marks != Page::kAllRegionsCleanMarks) {
2899 // For a large page a single dirty mark corresponds to several
2900 // regions (modulo 32). So we treat a large page as a sequence of
2901 // normal pages of size Page::kPageSize having same dirty marks
2902 // and subsequently iterate dirty regions on each of these pages.
2903 Address start = object->address();
2904 Address end = page->ObjectAreaEnd();
2905 Address object_end = start + object->Size();
2906
2907 // Iterate regions of the first normal page covering object.
2908 uint32_t first_region_number = page->GetRegionNumberForAddress(start);
2909 newmarks |=
2910 Heap::IterateDirtyRegions(marks >> first_region_number,
2911 start,
2912 end,
2913 &Heap::IteratePointersInDirtyRegion,
2914 copy_object) << first_region_number;
2915
2916 start = end;
2917 end = start + Page::kPageSize;
2918 while (end <= object_end) {
2919 // Iterate next 32 regions.
2920 newmarks |=
2921 Heap::IterateDirtyRegions(marks,
2922 start,
2923 end,
2924 &Heap::IteratePointersInDirtyRegion,
2925 copy_object);
2926 start = end;
2927 end = start + Page::kPageSize;
2928 }
2929
2930 if (start != object_end) {
2931 // Iterate the last piece of an object which is less than
2932 // Page::kPageSize.
2933 newmarks |=
2934 Heap::IterateDirtyRegions(marks,
2935 start,
2936 object_end,
2937 &Heap::IteratePointersInDirtyRegion,
2938 copy_object);
2939 }
2940
2941 page->SetRegionMarks(newmarks);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002942 }
2943 }
2944 }
2945}
2946
2947
2948void LargeObjectSpace::FreeUnmarkedObjects() {
2949 LargeObjectChunk* previous = NULL;
2950 LargeObjectChunk* current = first_chunk_;
2951 while (current != NULL) {
2952 HeapObject* object = current->GetObject();
kasper.lund7276f142008-07-30 08:49:36 +00002953 if (object->IsMarked()) {
2954 object->ClearMark();
2955 MarkCompactCollector::tracer()->decrement_marked_count();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002956 previous = current;
2957 current = current->next();
2958 } else {
erik.corry@gmail.com145eff52010-08-23 11:36:18 +00002959 Page* page = Page::FromAddress(RoundUp(current->address(),
2960 Page::kPageSize));
2961 Executability executable =
2962 page->IsPageExecutable() ? EXECUTABLE : NOT_EXECUTABLE;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002963 Address chunk_address = current->address();
2964 size_t chunk_size = current->size();
2965
2966 // Cut the chunk out from the chunk list.
2967 current = current->next();
2968 if (previous == NULL) {
2969 first_chunk_ = current;
2970 } else {
2971 previous->set_next(current);
2972 }
2973
2974 // Free the chunk.
sgjesse@chromium.orgb302e562010-02-03 11:26:59 +00002975 MarkCompactCollector::ReportDeleteIfNeeded(object);
ager@chromium.orgc4c92722009-11-18 14:12:51 +00002976 size_ -= static_cast<int>(chunk_size);
erik.corry@gmail.com4a6c3272010-11-18 12:04:40 +00002977 objects_size_ -= object->Size();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002978 page_count_--;
kmillikin@chromium.org3cdd9e12010-09-06 11:39:48 +00002979 ObjectSpace space = kObjectSpaceLoSpace;
2980 if (executable == EXECUTABLE) space = kObjectSpaceCodeSpace;
erik.corry@gmail.com145eff52010-08-23 11:36:18 +00002981 MemoryAllocator::FreeRawMemory(chunk_address, chunk_size, executable);
kmillikin@chromium.org3cdd9e12010-09-06 11:39:48 +00002982 MemoryAllocator::PerformAllocationCallback(space, kAllocationActionFree,
2983 size_);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002984 LOG(DeleteEvent("LargeObjectChunk", chunk_address));
2985 }
2986 }
2987}
2988
2989
2990bool LargeObjectSpace::Contains(HeapObject* object) {
2991 Address address = object->address();
sgjesse@chromium.orgdf7a2842010-03-25 14:34:15 +00002992 if (Heap::new_space()->Contains(address)) {
2993 return false;
2994 }
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002995 Page* page = Page::FromAddress(address);
2996
2997 SLOW_ASSERT(!page->IsLargeObjectPage()
2998 || !FindObject(address)->IsFailure());
2999
3000 return page->IsLargeObjectPage();
3001}
3002
3003
3004#ifdef DEBUG
3005// We do not assume that the large object iterator works, because it depends
3006// on the invariants we are checking during verification.
3007void LargeObjectSpace::Verify() {
3008 for (LargeObjectChunk* chunk = first_chunk_;
3009 chunk != NULL;
3010 chunk = chunk->next()) {
3011 // Each chunk contains an object that starts at the large object page's
3012 // object area start.
3013 HeapObject* object = chunk->GetObject();
3014 Page* page = Page::FromAddress(object->address());
3015 ASSERT(object->address() == page->ObjectAreaStart());
3016
3017 // The first word should be a map, and we expect all map pointers to be
3018 // in map space.
3019 Map* map = object->map();
3020 ASSERT(map->IsMap());
3021 ASSERT(Heap::map_space()->Contains(map));
3022
ager@chromium.orga1645e22009-09-09 19:27:10 +00003023 // We have only code, sequential strings, external strings
3024 // (sequential strings that have been morphed into external
3025 // strings), fixed arrays, and byte arrays in large object space.
3026 ASSERT(object->IsCode() || object->IsSeqString() ||
3027 object->IsExternalString() || object->IsFixedArray() ||
3028 object->IsByteArray());
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00003029
3030 // The object itself should look OK.
mads.s.ager@gmail.com9a4089a2008-09-01 08:55:01 +00003031 object->Verify();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00003032
3033 // Byte arrays and strings don't have interior pointers.
3034 if (object->IsCode()) {
3035 VerifyPointersVisitor code_visitor;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00003036 object->IterateBody(map->instance_type(),
3037 object->Size(),
3038 &code_visitor);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00003039 } else if (object->IsFixedArray()) {
3040 // We loop over fixed arrays ourselves, rather then using the visitor,
3041 // because the visitor doesn't support the start/offset iteration
ricow@chromium.org30ce4112010-05-31 10:38:25 +00003042 // needed for IsRegionDirty.
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00003043 FixedArray* array = FixedArray::cast(object);
3044 for (int j = 0; j < array->length(); j++) {
3045 Object* element = array->get(j);
3046 if (element->IsHeapObject()) {
3047 HeapObject* element_object = HeapObject::cast(element);
3048 ASSERT(Heap::Contains(element_object));
3049 ASSERT(element_object->map()->IsMap());
3050 if (Heap::InNewSpace(element_object)) {
ricow@chromium.org30ce4112010-05-31 10:38:25 +00003051 Address array_addr = object->address();
3052 Address element_addr = array_addr + FixedArray::kHeaderSize +
3053 j * kPointerSize;
3054
3055 ASSERT(Page::FromAddress(array_addr)->IsRegionDirty(element_addr));
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00003056 }
3057 }
3058 }
3059 }
3060 }
3061}
3062
3063
3064void LargeObjectSpace::Print() {
3065 LargeObjectIterator it(this);
sgjesse@chromium.orgb302e562010-02-03 11:26:59 +00003066 for (HeapObject* obj = it.next(); obj != NULL; obj = it.next()) {
3067 obj->Print();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00003068 }
3069}
3070
3071
3072void LargeObjectSpace::ReportStatistics() {
kmillikin@chromium.orgf05f2912010-09-30 10:07:24 +00003073 PrintF(" size: %" V8_PTR_PREFIX "d\n", size_);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00003074 int num_objects = 0;
3075 ClearHistograms();
3076 LargeObjectIterator it(this);
sgjesse@chromium.orgb302e562010-02-03 11:26:59 +00003077 for (HeapObject* obj = it.next(); obj != NULL; obj = it.next()) {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00003078 num_objects++;
sgjesse@chromium.orgb302e562010-02-03 11:26:59 +00003079 CollectHistogramInfo(obj);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00003080 }
3081
erik.corry@gmail.com4a6c3272010-11-18 12:04:40 +00003082 PrintF(" number of objects %d, "
3083 "size of objects %" V8_PTR_PREFIX "d\n", num_objects, objects_size_);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00003084 if (num_objects > 0) ReportHistogram(false);
3085}
3086
3087
3088void LargeObjectSpace::CollectCodeStatistics() {
3089 LargeObjectIterator obj_it(this);
sgjesse@chromium.orgb302e562010-02-03 11:26:59 +00003090 for (HeapObject* obj = obj_it.next(); obj != NULL; obj = obj_it.next()) {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00003091 if (obj->IsCode()) {
3092 Code* code = Code::cast(obj);
3093 code_kind_statistics[code->kind()] += code->Size();
3094 }
3095 }
3096}
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00003097#endif // DEBUG
3098
3099} } // namespace v8::internal