blob: 2f3e41aea49686ff2522d7682000ce0009bfd196 [file] [log] [blame]
Steve Blocka7e24c12009-10-30 11:49:00 +00001// Copyright 2006-2008 the V8 project authors. All rights reserved.
2// Redistribution and use in source and binary forms, with or without
3// modification, are permitted provided that the following conditions are
4// met:
5//
6// * Redistributions of source code must retain the above copyright
7// notice, this list of conditions and the following disclaimer.
8// * Redistributions in binary form must reproduce the above
9// copyright notice, this list of conditions and the following
10// disclaimer in the documentation and/or other materials provided
11// with the distribution.
12// * Neither the name of Google Inc. nor the names of its
13// contributors may be used to endorse or promote products derived
14// from this software without specific prior written permission.
15//
16// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
28#include "v8.h"
29
30#include "macro-assembler.h"
31#include "mark-compact.h"
32#include "platform.h"
33
34namespace v8 {
35namespace internal {
36
37// For contiguous spaces, top should be in the space (or at the end) and limit
38// should be the end of the space.
39#define ASSERT_SEMISPACE_ALLOCATION_INFO(info, space) \
40 ASSERT((space).low() <= (info).top \
41 && (info).top <= (space).high() \
42 && (info).limit == (space).high())
43
Steve Block791712a2010-08-27 10:21:07 +010044intptr_t Page::watermark_invalidated_mark_ = 1 << Page::WATERMARK_INVALIDATED;
Steve Blocka7e24c12009-10-30 11:49:00 +000045
46// ----------------------------------------------------------------------------
47// HeapObjectIterator
48
49HeapObjectIterator::HeapObjectIterator(PagedSpace* space) {
50 Initialize(space->bottom(), space->top(), NULL);
51}
52
53
54HeapObjectIterator::HeapObjectIterator(PagedSpace* space,
55 HeapObjectCallback size_func) {
56 Initialize(space->bottom(), space->top(), size_func);
57}
58
59
60HeapObjectIterator::HeapObjectIterator(PagedSpace* space, Address start) {
61 Initialize(start, space->top(), NULL);
62}
63
64
65HeapObjectIterator::HeapObjectIterator(PagedSpace* space, Address start,
66 HeapObjectCallback size_func) {
67 Initialize(start, space->top(), size_func);
68}
69
70
Kristian Monsen80d68ea2010-09-08 11:05:35 +010071HeapObjectIterator::HeapObjectIterator(Page* page,
72 HeapObjectCallback size_func) {
73 Initialize(page->ObjectAreaStart(), page->AllocationTop(), size_func);
74}
75
76
Steve Blocka7e24c12009-10-30 11:49:00 +000077void HeapObjectIterator::Initialize(Address cur, Address end,
78 HeapObjectCallback size_f) {
79 cur_addr_ = cur;
80 end_addr_ = end;
81 end_page_ = Page::FromAllocationTop(end);
82 size_func_ = size_f;
83 Page* p = Page::FromAllocationTop(cur_addr_);
84 cur_limit_ = (p == end_page_) ? end_addr_ : p->AllocationTop();
85
86#ifdef DEBUG
87 Verify();
88#endif
89}
90
91
Leon Clarked91b9f72010-01-27 17:25:45 +000092HeapObject* HeapObjectIterator::FromNextPage() {
93 if (cur_addr_ == end_addr_) return NULL;
Steve Blocka7e24c12009-10-30 11:49:00 +000094
95 Page* cur_page = Page::FromAllocationTop(cur_addr_);
96 cur_page = cur_page->next_page();
97 ASSERT(cur_page->is_valid());
98
99 cur_addr_ = cur_page->ObjectAreaStart();
100 cur_limit_ = (cur_page == end_page_) ? end_addr_ : cur_page->AllocationTop();
101
Leon Clarked91b9f72010-01-27 17:25:45 +0000102 if (cur_addr_ == end_addr_) return NULL;
Steve Blocka7e24c12009-10-30 11:49:00 +0000103 ASSERT(cur_addr_ < cur_limit_);
104#ifdef DEBUG
105 Verify();
106#endif
Leon Clarked91b9f72010-01-27 17:25:45 +0000107 return FromCurrentPage();
Steve Blocka7e24c12009-10-30 11:49:00 +0000108}
109
110
111#ifdef DEBUG
112void HeapObjectIterator::Verify() {
113 Page* p = Page::FromAllocationTop(cur_addr_);
114 ASSERT(p == Page::FromAllocationTop(cur_limit_));
115 ASSERT(p->Offset(cur_addr_) <= p->Offset(cur_limit_));
116}
117#endif
118
119
120// -----------------------------------------------------------------------------
121// PageIterator
122
123PageIterator::PageIterator(PagedSpace* space, Mode mode) : space_(space) {
124 prev_page_ = NULL;
125 switch (mode) {
126 case PAGES_IN_USE:
127 stop_page_ = space->AllocationTopPage();
128 break;
129 case PAGES_USED_BY_MC:
130 stop_page_ = space->MCRelocationTopPage();
131 break;
132 case ALL_PAGES:
133#ifdef DEBUG
134 // Verify that the cached last page in the space is actually the
135 // last page.
136 for (Page* p = space->first_page_; p->is_valid(); p = p->next_page()) {
137 if (!p->next_page()->is_valid()) {
138 ASSERT(space->last_page_ == p);
139 }
140 }
141#endif
142 stop_page_ = space->last_page_;
143 break;
144 }
145}
146
147
148// -----------------------------------------------------------------------------
Steve Blocka7e24c12009-10-30 11:49:00 +0000149// CodeRange
150
151List<CodeRange::FreeBlock> CodeRange::free_list_(0);
152List<CodeRange::FreeBlock> CodeRange::allocation_list_(0);
153int CodeRange::current_allocation_block_index_ = 0;
154VirtualMemory* CodeRange::code_range_ = NULL;
155
156
157bool CodeRange::Setup(const size_t requested) {
158 ASSERT(code_range_ == NULL);
159
160 code_range_ = new VirtualMemory(requested);
161 CHECK(code_range_ != NULL);
162 if (!code_range_->IsReserved()) {
163 delete code_range_;
164 code_range_ = NULL;
165 return false;
166 }
167
168 // We are sure that we have mapped a block of requested addresses.
169 ASSERT(code_range_->size() == requested);
170 LOG(NewEvent("CodeRange", code_range_->address(), requested));
171 allocation_list_.Add(FreeBlock(code_range_->address(), code_range_->size()));
172 current_allocation_block_index_ = 0;
173 return true;
174}
175
176
177int CodeRange::CompareFreeBlockAddress(const FreeBlock* left,
178 const FreeBlock* right) {
179 // The entire point of CodeRange is that the difference between two
180 // addresses in the range can be represented as a signed 32-bit int,
181 // so the cast is semantically correct.
182 return static_cast<int>(left->start - right->start);
183}
184
185
186void CodeRange::GetNextAllocationBlock(size_t requested) {
187 for (current_allocation_block_index_++;
188 current_allocation_block_index_ < allocation_list_.length();
189 current_allocation_block_index_++) {
190 if (requested <= allocation_list_[current_allocation_block_index_].size) {
191 return; // Found a large enough allocation block.
192 }
193 }
194
195 // Sort and merge the free blocks on the free list and the allocation list.
196 free_list_.AddAll(allocation_list_);
197 allocation_list_.Clear();
198 free_list_.Sort(&CompareFreeBlockAddress);
199 for (int i = 0; i < free_list_.length();) {
200 FreeBlock merged = free_list_[i];
201 i++;
202 // Add adjacent free blocks to the current merged block.
203 while (i < free_list_.length() &&
204 free_list_[i].start == merged.start + merged.size) {
205 merged.size += free_list_[i].size;
206 i++;
207 }
208 if (merged.size > 0) {
209 allocation_list_.Add(merged);
210 }
211 }
212 free_list_.Clear();
213
214 for (current_allocation_block_index_ = 0;
215 current_allocation_block_index_ < allocation_list_.length();
216 current_allocation_block_index_++) {
217 if (requested <= allocation_list_[current_allocation_block_index_].size) {
218 return; // Found a large enough allocation block.
219 }
220 }
221
222 // Code range is full or too fragmented.
223 V8::FatalProcessOutOfMemory("CodeRange::GetNextAllocationBlock");
224}
225
226
227
228void* CodeRange::AllocateRawMemory(const size_t requested, size_t* allocated) {
229 ASSERT(current_allocation_block_index_ < allocation_list_.length());
230 if (requested > allocation_list_[current_allocation_block_index_].size) {
231 // Find an allocation block large enough. This function call may
232 // call V8::FatalProcessOutOfMemory if it cannot find a large enough block.
233 GetNextAllocationBlock(requested);
234 }
235 // Commit the requested memory at the start of the current allocation block.
236 *allocated = RoundUp(requested, Page::kPageSize);
237 FreeBlock current = allocation_list_[current_allocation_block_index_];
238 if (*allocated >= current.size - Page::kPageSize) {
239 // Don't leave a small free block, useless for a large object or chunk.
240 *allocated = current.size;
241 }
242 ASSERT(*allocated <= current.size);
243 if (!code_range_->Commit(current.start, *allocated, true)) {
244 *allocated = 0;
245 return NULL;
246 }
247 allocation_list_[current_allocation_block_index_].start += *allocated;
248 allocation_list_[current_allocation_block_index_].size -= *allocated;
249 if (*allocated == current.size) {
250 GetNextAllocationBlock(0); // This block is used up, get the next one.
251 }
252 return current.start;
253}
254
255
256void CodeRange::FreeRawMemory(void* address, size_t length) {
257 free_list_.Add(FreeBlock(address, length));
258 code_range_->Uncommit(address, length);
259}
260
261
262void CodeRange::TearDown() {
263 delete code_range_; // Frees all memory in the virtual memory range.
264 code_range_ = NULL;
265 free_list_.Free();
266 allocation_list_.Free();
267}
268
269
270// -----------------------------------------------------------------------------
271// MemoryAllocator
272//
Russell Brenner90bac252010-11-18 13:33:46 -0800273intptr_t MemoryAllocator::capacity_ = 0;
274intptr_t MemoryAllocator::capacity_executable_ = 0;
275intptr_t MemoryAllocator::size_ = 0;
Ben Murdochf87a2032010-10-22 12:50:53 +0100276intptr_t MemoryAllocator::size_executable_ = 0;
Steve Blocka7e24c12009-10-30 11:49:00 +0000277
Iain Merrick9ac36c92010-09-13 15:29:50 +0100278List<MemoryAllocator::MemoryAllocationCallbackRegistration>
279 MemoryAllocator::memory_allocation_callbacks_;
280
Steve Blocka7e24c12009-10-30 11:49:00 +0000281VirtualMemory* MemoryAllocator::initial_chunk_ = NULL;
282
283// 270 is an estimate based on the static default heap size of a pair of 256K
284// semispaces and a 64M old generation.
285const int kEstimatedNumberOfChunks = 270;
286List<MemoryAllocator::ChunkInfo> MemoryAllocator::chunks_(
287 kEstimatedNumberOfChunks);
288List<int> MemoryAllocator::free_chunk_ids_(kEstimatedNumberOfChunks);
289int MemoryAllocator::max_nof_chunks_ = 0;
290int MemoryAllocator::top_ = 0;
291
292
293void MemoryAllocator::Push(int free_chunk_id) {
294 ASSERT(max_nof_chunks_ > 0);
295 ASSERT(top_ < max_nof_chunks_);
296 free_chunk_ids_[top_++] = free_chunk_id;
297}
298
299
300int MemoryAllocator::Pop() {
301 ASSERT(top_ > 0);
302 return free_chunk_ids_[--top_];
303}
304
305
Russell Brenner90bac252010-11-18 13:33:46 -0800306bool MemoryAllocator::Setup(intptr_t capacity, intptr_t capacity_executable) {
Steve Blocka7e24c12009-10-30 11:49:00 +0000307 capacity_ = RoundUp(capacity, Page::kPageSize);
Russell Brenner90bac252010-11-18 13:33:46 -0800308 capacity_executable_ = RoundUp(capacity_executable, Page::kPageSize);
309 ASSERT_GE(capacity_, capacity_executable_);
Steve Blocka7e24c12009-10-30 11:49:00 +0000310
311 // Over-estimate the size of chunks_ array. It assumes the expansion of old
312 // space is always in the unit of a chunk (kChunkSize) except the last
313 // expansion.
314 //
315 // Due to alignment, allocated space might be one page less than required
316 // number (kPagesPerChunk) of pages for old spaces.
317 //
318 // Reserve two chunk ids for semispaces, one for map space, one for old
319 // space, and one for code space.
Ben Murdochf87a2032010-10-22 12:50:53 +0100320 max_nof_chunks_ =
321 static_cast<int>((capacity_ / (kChunkSize - Page::kPageSize))) + 5;
Steve Blocka7e24c12009-10-30 11:49:00 +0000322 if (max_nof_chunks_ > kMaxNofChunks) return false;
323
324 size_ = 0;
Steve Block791712a2010-08-27 10:21:07 +0100325 size_executable_ = 0;
Steve Blocka7e24c12009-10-30 11:49:00 +0000326 ChunkInfo info; // uninitialized element.
327 for (int i = max_nof_chunks_ - 1; i >= 0; i--) {
328 chunks_.Add(info);
329 free_chunk_ids_.Add(i);
330 }
331 top_ = max_nof_chunks_;
332 return true;
333}
334
335
336void MemoryAllocator::TearDown() {
337 for (int i = 0; i < max_nof_chunks_; i++) {
338 if (chunks_[i].address() != NULL) DeleteChunk(i);
339 }
340 chunks_.Clear();
341 free_chunk_ids_.Clear();
342
343 if (initial_chunk_ != NULL) {
344 LOG(DeleteEvent("InitialChunk", initial_chunk_->address()));
345 delete initial_chunk_;
346 initial_chunk_ = NULL;
347 }
348
349 ASSERT(top_ == max_nof_chunks_); // all chunks are free
350 top_ = 0;
351 capacity_ = 0;
Russell Brenner90bac252010-11-18 13:33:46 -0800352 capacity_executable_ = 0;
Steve Blocka7e24c12009-10-30 11:49:00 +0000353 size_ = 0;
354 max_nof_chunks_ = 0;
355}
356
357
358void* MemoryAllocator::AllocateRawMemory(const size_t requested,
359 size_t* allocated,
360 Executability executable) {
Kristian Monsen50ef84f2010-07-29 15:18:00 +0100361 if (size_ + static_cast<size_t>(requested) > static_cast<size_t>(capacity_)) {
362 return NULL;
363 }
Russell Brenner90bac252010-11-18 13:33:46 -0800364
Steve Blocka7e24c12009-10-30 11:49:00 +0000365 void* mem;
Russell Brenner90bac252010-11-18 13:33:46 -0800366 if (executable == EXECUTABLE) {
367 // Check executable memory limit.
368 if (size_executable_ + requested >
369 static_cast<size_t>(capacity_executable_)) {
370 LOG(StringEvent("MemoryAllocator::AllocateRawMemory",
371 "V8 Executable Allocation capacity exceeded"));
372 return NULL;
373 }
374 // Allocate executable memory either from code range or from the
375 // OS.
376 if (CodeRange::exists()) {
377 mem = CodeRange::AllocateRawMemory(requested, allocated);
378 } else {
379 mem = OS::Allocate(requested, allocated, true);
380 }
381 // Update executable memory size.
382 size_executable_ += static_cast<int>(*allocated);
Steve Blocka7e24c12009-10-30 11:49:00 +0000383 } else {
Russell Brenner90bac252010-11-18 13:33:46 -0800384 mem = OS::Allocate(requested, allocated, false);
Steve Blocka7e24c12009-10-30 11:49:00 +0000385 }
Steve Blockd0582a62009-12-15 09:54:21 +0000386 int alloced = static_cast<int>(*allocated);
Steve Blocka7e24c12009-10-30 11:49:00 +0000387 size_ += alloced;
Steve Block791712a2010-08-27 10:21:07 +0100388
Leon Clarke4515c472010-02-03 11:58:03 +0000389#ifdef DEBUG
390 ZapBlock(reinterpret_cast<Address>(mem), alloced);
391#endif
Steve Blocka7e24c12009-10-30 11:49:00 +0000392 Counters::memory_allocated.Increment(alloced);
393 return mem;
394}
395
396
Steve Block791712a2010-08-27 10:21:07 +0100397void MemoryAllocator::FreeRawMemory(void* mem,
398 size_t length,
399 Executability executable) {
Leon Clarke4515c472010-02-03 11:58:03 +0000400#ifdef DEBUG
401 ZapBlock(reinterpret_cast<Address>(mem), length);
402#endif
Steve Blocka7e24c12009-10-30 11:49:00 +0000403 if (CodeRange::contains(static_cast<Address>(mem))) {
404 CodeRange::FreeRawMemory(mem, length);
405 } else {
406 OS::Free(mem, length);
407 }
Steve Blockd0582a62009-12-15 09:54:21 +0000408 Counters::memory_allocated.Decrement(static_cast<int>(length));
409 size_ -= static_cast<int>(length);
Steve Block791712a2010-08-27 10:21:07 +0100410 if (executable == EXECUTABLE) size_executable_ -= static_cast<int>(length);
Iain Merrick9ac36c92010-09-13 15:29:50 +0100411
Steve Blocka7e24c12009-10-30 11:49:00 +0000412 ASSERT(size_ >= 0);
Russell Brenner90bac252010-11-18 13:33:46 -0800413 ASSERT(size_executable_ >= 0);
Steve Blocka7e24c12009-10-30 11:49:00 +0000414}
415
416
Iain Merrick9ac36c92010-09-13 15:29:50 +0100417void MemoryAllocator::PerformAllocationCallback(ObjectSpace space,
418 AllocationAction action,
419 size_t size) {
420 for (int i = 0; i < memory_allocation_callbacks_.length(); ++i) {
421 MemoryAllocationCallbackRegistration registration =
422 memory_allocation_callbacks_[i];
423 if ((registration.space & space) == space &&
424 (registration.action & action) == action)
425 registration.callback(space, action, static_cast<int>(size));
426 }
427}
428
429
430bool MemoryAllocator::MemoryAllocationCallbackRegistered(
431 MemoryAllocationCallback callback) {
432 for (int i = 0; i < memory_allocation_callbacks_.length(); ++i) {
433 if (memory_allocation_callbacks_[i].callback == callback) return true;
434 }
435 return false;
436}
437
438
439void MemoryAllocator::AddMemoryAllocationCallback(
440 MemoryAllocationCallback callback,
441 ObjectSpace space,
442 AllocationAction action) {
443 ASSERT(callback != NULL);
444 MemoryAllocationCallbackRegistration registration(callback, space, action);
445 ASSERT(!MemoryAllocator::MemoryAllocationCallbackRegistered(callback));
446 return memory_allocation_callbacks_.Add(registration);
447}
448
449
450void MemoryAllocator::RemoveMemoryAllocationCallback(
451 MemoryAllocationCallback callback) {
452 ASSERT(callback != NULL);
453 for (int i = 0; i < memory_allocation_callbacks_.length(); ++i) {
454 if (memory_allocation_callbacks_[i].callback == callback) {
455 memory_allocation_callbacks_.Remove(i);
456 return;
457 }
458 }
459 UNREACHABLE();
460}
461
Steve Blocka7e24c12009-10-30 11:49:00 +0000462void* MemoryAllocator::ReserveInitialChunk(const size_t requested) {
463 ASSERT(initial_chunk_ == NULL);
464
465 initial_chunk_ = new VirtualMemory(requested);
466 CHECK(initial_chunk_ != NULL);
467 if (!initial_chunk_->IsReserved()) {
468 delete initial_chunk_;
469 initial_chunk_ = NULL;
470 return NULL;
471 }
472
473 // We are sure that we have mapped a block of requested addresses.
474 ASSERT(initial_chunk_->size() == requested);
475 LOG(NewEvent("InitialChunk", initial_chunk_->address(), requested));
Steve Blockd0582a62009-12-15 09:54:21 +0000476 size_ += static_cast<int>(requested);
Steve Blocka7e24c12009-10-30 11:49:00 +0000477 return initial_chunk_->address();
478}
479
480
481static int PagesInChunk(Address start, size_t size) {
482 // The first page starts on the first page-aligned address from start onward
483 // and the last page ends on the last page-aligned address before
484 // start+size. Page::kPageSize is a power of two so we can divide by
485 // shifting.
Steve Blockd0582a62009-12-15 09:54:21 +0000486 return static_cast<int>((RoundDown(start + size, Page::kPageSize)
Leon Clarkee46be812010-01-19 14:06:41 +0000487 - RoundUp(start, Page::kPageSize)) >> kPageSizeBits);
Steve Blocka7e24c12009-10-30 11:49:00 +0000488}
489
490
491Page* MemoryAllocator::AllocatePages(int requested_pages, int* allocated_pages,
492 PagedSpace* owner) {
493 if (requested_pages <= 0) return Page::FromAddress(NULL);
494 size_t chunk_size = requested_pages * Page::kPageSize;
495
496 // There is not enough space to guarantee the desired number pages can be
497 // allocated.
498 if (size_ + static_cast<int>(chunk_size) > capacity_) {
499 // Request as many pages as we can.
500 chunk_size = capacity_ - size_;
Leon Clarkee46be812010-01-19 14:06:41 +0000501 requested_pages = static_cast<int>(chunk_size >> kPageSizeBits);
Steve Blocka7e24c12009-10-30 11:49:00 +0000502
503 if (requested_pages <= 0) return Page::FromAddress(NULL);
504 }
505 void* chunk = AllocateRawMemory(chunk_size, &chunk_size, owner->executable());
506 if (chunk == NULL) return Page::FromAddress(NULL);
507 LOG(NewEvent("PagedChunk", chunk, chunk_size));
508
509 *allocated_pages = PagesInChunk(static_cast<Address>(chunk), chunk_size);
510 if (*allocated_pages == 0) {
Steve Block791712a2010-08-27 10:21:07 +0100511 FreeRawMemory(chunk, chunk_size, owner->executable());
Steve Blocka7e24c12009-10-30 11:49:00 +0000512 LOG(DeleteEvent("PagedChunk", chunk));
513 return Page::FromAddress(NULL);
514 }
515
516 int chunk_id = Pop();
517 chunks_[chunk_id].init(static_cast<Address>(chunk), chunk_size, owner);
518
Iain Merrick9ac36c92010-09-13 15:29:50 +0100519 ObjectSpace space = static_cast<ObjectSpace>(1 << owner->identity());
520 PerformAllocationCallback(space, kAllocationActionAllocate, chunk_size);
Steve Blocka7e24c12009-10-30 11:49:00 +0000521 return InitializePagesInChunk(chunk_id, *allocated_pages, owner);
522}
523
524
525Page* MemoryAllocator::CommitPages(Address start, size_t size,
526 PagedSpace* owner, int* num_pages) {
527 ASSERT(start != NULL);
528 *num_pages = PagesInChunk(start, size);
529 ASSERT(*num_pages > 0);
530 ASSERT(initial_chunk_ != NULL);
531 ASSERT(InInitialChunk(start));
532 ASSERT(InInitialChunk(start + size - 1));
533 if (!initial_chunk_->Commit(start, size, owner->executable() == EXECUTABLE)) {
534 return Page::FromAddress(NULL);
535 }
Leon Clarke4515c472010-02-03 11:58:03 +0000536#ifdef DEBUG
537 ZapBlock(start, size);
538#endif
Steve Blockd0582a62009-12-15 09:54:21 +0000539 Counters::memory_allocated.Increment(static_cast<int>(size));
Steve Blocka7e24c12009-10-30 11:49:00 +0000540
541 // So long as we correctly overestimated the number of chunks we should not
542 // run out of chunk ids.
543 CHECK(!OutOfChunkIds());
544 int chunk_id = Pop();
545 chunks_[chunk_id].init(start, size, owner);
546 return InitializePagesInChunk(chunk_id, *num_pages, owner);
547}
548
549
550bool MemoryAllocator::CommitBlock(Address start,
551 size_t size,
552 Executability executable) {
553 ASSERT(start != NULL);
554 ASSERT(size > 0);
555 ASSERT(initial_chunk_ != NULL);
556 ASSERT(InInitialChunk(start));
557 ASSERT(InInitialChunk(start + size - 1));
558
559 if (!initial_chunk_->Commit(start, size, executable)) return false;
Leon Clarke4515c472010-02-03 11:58:03 +0000560#ifdef DEBUG
561 ZapBlock(start, size);
562#endif
Steve Blockd0582a62009-12-15 09:54:21 +0000563 Counters::memory_allocated.Increment(static_cast<int>(size));
Steve Blocka7e24c12009-10-30 11:49:00 +0000564 return true;
565}
566
Leon Clarke4515c472010-02-03 11:58:03 +0000567
Steve Blocka7e24c12009-10-30 11:49:00 +0000568bool MemoryAllocator::UncommitBlock(Address start, size_t size) {
569 ASSERT(start != NULL);
570 ASSERT(size > 0);
571 ASSERT(initial_chunk_ != NULL);
572 ASSERT(InInitialChunk(start));
573 ASSERT(InInitialChunk(start + size - 1));
574
575 if (!initial_chunk_->Uncommit(start, size)) return false;
Steve Blockd0582a62009-12-15 09:54:21 +0000576 Counters::memory_allocated.Decrement(static_cast<int>(size));
Steve Blocka7e24c12009-10-30 11:49:00 +0000577 return true;
578}
579
Leon Clarke4515c472010-02-03 11:58:03 +0000580
581void MemoryAllocator::ZapBlock(Address start, size_t size) {
582 for (size_t s = 0; s + kPointerSize <= size; s += kPointerSize) {
583 Memory::Address_at(start + s) = kZapValue;
584 }
585}
586
587
Steve Blocka7e24c12009-10-30 11:49:00 +0000588Page* MemoryAllocator::InitializePagesInChunk(int chunk_id, int pages_in_chunk,
589 PagedSpace* owner) {
590 ASSERT(IsValidChunk(chunk_id));
591 ASSERT(pages_in_chunk > 0);
592
593 Address chunk_start = chunks_[chunk_id].address();
594
595 Address low = RoundUp(chunk_start, Page::kPageSize);
596
597#ifdef DEBUG
598 size_t chunk_size = chunks_[chunk_id].size();
599 Address high = RoundDown(chunk_start + chunk_size, Page::kPageSize);
600 ASSERT(pages_in_chunk <=
601 ((OffsetFrom(high) - OffsetFrom(low)) / Page::kPageSize));
602#endif
603
604 Address page_addr = low;
605 for (int i = 0; i < pages_in_chunk; i++) {
606 Page* p = Page::FromAddress(page_addr);
607 p->opaque_header = OffsetFrom(page_addr + Page::kPageSize) | chunk_id;
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100608 p->InvalidateWatermark(true);
Steve Block6ded16b2010-05-10 14:33:55 +0100609 p->SetIsLargeObjectPage(false);
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100610 p->SetAllocationWatermark(p->ObjectAreaStart());
611 p->SetCachedAllocationWatermark(p->ObjectAreaStart());
Steve Blocka7e24c12009-10-30 11:49:00 +0000612 page_addr += Page::kPageSize;
613 }
614
615 // Set the next page of the last page to 0.
616 Page* last_page = Page::FromAddress(page_addr - Page::kPageSize);
617 last_page->opaque_header = OffsetFrom(0) | chunk_id;
618
619 return Page::FromAddress(low);
620}
621
622
623Page* MemoryAllocator::FreePages(Page* p) {
624 if (!p->is_valid()) return p;
625
626 // Find the first page in the same chunk as 'p'
627 Page* first_page = FindFirstPageInSameChunk(p);
628 Page* page_to_return = Page::FromAddress(NULL);
629
630 if (p != first_page) {
631 // Find the last page in the same chunk as 'prev'.
632 Page* last_page = FindLastPageInSameChunk(p);
633 first_page = GetNextPage(last_page); // first page in next chunk
634
635 // set the next_page of last_page to NULL
636 SetNextPage(last_page, Page::FromAddress(NULL));
637 page_to_return = p; // return 'p' when exiting
638 }
639
640 while (first_page->is_valid()) {
641 int chunk_id = GetChunkId(first_page);
642 ASSERT(IsValidChunk(chunk_id));
643
644 // Find the first page of the next chunk before deleting this chunk.
645 first_page = GetNextPage(FindLastPageInSameChunk(first_page));
646
647 // Free the current chunk.
648 DeleteChunk(chunk_id);
649 }
650
651 return page_to_return;
652}
653
654
Steve Block6ded16b2010-05-10 14:33:55 +0100655void MemoryAllocator::FreeAllPages(PagedSpace* space) {
656 for (int i = 0, length = chunks_.length(); i < length; i++) {
657 if (chunks_[i].owner() == space) {
658 DeleteChunk(i);
659 }
660 }
661}
662
663
Steve Blocka7e24c12009-10-30 11:49:00 +0000664void MemoryAllocator::DeleteChunk(int chunk_id) {
665 ASSERT(IsValidChunk(chunk_id));
666
667 ChunkInfo& c = chunks_[chunk_id];
668
669 // We cannot free a chunk contained in the initial chunk because it was not
670 // allocated with AllocateRawMemory. Instead we uncommit the virtual
671 // memory.
672 if (InInitialChunk(c.address())) {
673 // TODO(1240712): VirtualMemory::Uncommit has a return value which
674 // is ignored here.
675 initial_chunk_->Uncommit(c.address(), c.size());
Steve Blockd0582a62009-12-15 09:54:21 +0000676 Counters::memory_allocated.Decrement(static_cast<int>(c.size()));
Steve Blocka7e24c12009-10-30 11:49:00 +0000677 } else {
678 LOG(DeleteEvent("PagedChunk", c.address()));
Iain Merrick9ac36c92010-09-13 15:29:50 +0100679 ObjectSpace space = static_cast<ObjectSpace>(1 << c.owner()->identity());
680 size_t size = c.size();
681 FreeRawMemory(c.address(), size, c.executable());
682 PerformAllocationCallback(space, kAllocationActionFree, size);
Steve Blocka7e24c12009-10-30 11:49:00 +0000683 }
684 c.init(NULL, 0, NULL);
685 Push(chunk_id);
686}
687
688
689Page* MemoryAllocator::FindFirstPageInSameChunk(Page* p) {
690 int chunk_id = GetChunkId(p);
691 ASSERT(IsValidChunk(chunk_id));
692
693 Address low = RoundUp(chunks_[chunk_id].address(), Page::kPageSize);
694 return Page::FromAddress(low);
695}
696
697
698Page* MemoryAllocator::FindLastPageInSameChunk(Page* p) {
699 int chunk_id = GetChunkId(p);
700 ASSERT(IsValidChunk(chunk_id));
701
702 Address chunk_start = chunks_[chunk_id].address();
703 size_t chunk_size = chunks_[chunk_id].size();
704
705 Address high = RoundDown(chunk_start + chunk_size, Page::kPageSize);
706 ASSERT(chunk_start <= p->address() && p->address() < high);
707
708 return Page::FromAddress(high - Page::kPageSize);
709}
710
711
712#ifdef DEBUG
713void MemoryAllocator::ReportStatistics() {
714 float pct = static_cast<float>(capacity_ - size_) / capacity_;
Ben Murdochf87a2032010-10-22 12:50:53 +0100715 PrintF(" capacity: %" V8_PTR_PREFIX "d"
716 ", used: %" V8_PTR_PREFIX "d"
717 ", available: %%%d\n\n",
Steve Blocka7e24c12009-10-30 11:49:00 +0000718 capacity_, size_, static_cast<int>(pct*100));
719}
720#endif
721
722
Steve Block6ded16b2010-05-10 14:33:55 +0100723void MemoryAllocator::RelinkPageListInChunkOrder(PagedSpace* space,
724 Page** first_page,
725 Page** last_page,
726 Page** last_page_in_use) {
727 Page* first = NULL;
728 Page* last = NULL;
729
730 for (int i = 0, length = chunks_.length(); i < length; i++) {
731 ChunkInfo& chunk = chunks_[i];
732
733 if (chunk.owner() == space) {
734 if (first == NULL) {
735 Address low = RoundUp(chunk.address(), Page::kPageSize);
736 first = Page::FromAddress(low);
737 }
738 last = RelinkPagesInChunk(i,
739 chunk.address(),
740 chunk.size(),
741 last,
742 last_page_in_use);
743 }
744 }
745
746 if (first_page != NULL) {
747 *first_page = first;
748 }
749
750 if (last_page != NULL) {
751 *last_page = last;
752 }
753}
754
755
756Page* MemoryAllocator::RelinkPagesInChunk(int chunk_id,
757 Address chunk_start,
758 size_t chunk_size,
759 Page* prev,
760 Page** last_page_in_use) {
761 Address page_addr = RoundUp(chunk_start, Page::kPageSize);
762 int pages_in_chunk = PagesInChunk(chunk_start, chunk_size);
763
764 if (prev->is_valid()) {
765 SetNextPage(prev, Page::FromAddress(page_addr));
766 }
767
768 for (int i = 0; i < pages_in_chunk; i++) {
769 Page* p = Page::FromAddress(page_addr);
770 p->opaque_header = OffsetFrom(page_addr + Page::kPageSize) | chunk_id;
771 page_addr += Page::kPageSize;
772
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100773 p->InvalidateWatermark(true);
Steve Block6ded16b2010-05-10 14:33:55 +0100774 if (p->WasInUseBeforeMC()) {
775 *last_page_in_use = p;
776 }
777 }
778
779 // Set the next page of the last page to 0.
780 Page* last_page = Page::FromAddress(page_addr - Page::kPageSize);
781 last_page->opaque_header = OffsetFrom(0) | chunk_id;
782
783 if (last_page->WasInUseBeforeMC()) {
784 *last_page_in_use = last_page;
785 }
786
787 return last_page;
788}
789
790
791
Steve Blocka7e24c12009-10-30 11:49:00 +0000792// -----------------------------------------------------------------------------
793// PagedSpace implementation
794
Ben Murdochf87a2032010-10-22 12:50:53 +0100795PagedSpace::PagedSpace(intptr_t max_capacity,
Steve Blocka7e24c12009-10-30 11:49:00 +0000796 AllocationSpace id,
797 Executability executable)
798 : Space(id, executable) {
799 max_capacity_ = (RoundDown(max_capacity, Page::kPageSize) / Page::kPageSize)
800 * Page::kObjectAreaSize;
801 accounting_stats_.Clear();
802
803 allocation_info_.top = NULL;
804 allocation_info_.limit = NULL;
805
806 mc_forwarding_info_.top = NULL;
807 mc_forwarding_info_.limit = NULL;
808}
809
810
811bool PagedSpace::Setup(Address start, size_t size) {
812 if (HasBeenSetup()) return false;
813
814 int num_pages = 0;
815 // Try to use the virtual memory range passed to us. If it is too small to
816 // contain at least one page, ignore it and allocate instead.
817 int pages_in_chunk = PagesInChunk(start, size);
818 if (pages_in_chunk > 0) {
819 first_page_ = MemoryAllocator::CommitPages(RoundUp(start, Page::kPageSize),
820 Page::kPageSize * pages_in_chunk,
821 this, &num_pages);
822 } else {
Ben Murdochf87a2032010-10-22 12:50:53 +0100823 int requested_pages =
824 Min(MemoryAllocator::kPagesPerChunk,
825 static_cast<int>(max_capacity_ / Page::kObjectAreaSize));
Steve Blocka7e24c12009-10-30 11:49:00 +0000826 first_page_ =
827 MemoryAllocator::AllocatePages(requested_pages, &num_pages, this);
828 if (!first_page_->is_valid()) return false;
829 }
830
831 // We are sure that the first page is valid and that we have at least one
832 // page.
833 ASSERT(first_page_->is_valid());
834 ASSERT(num_pages > 0);
835 accounting_stats_.ExpandSpace(num_pages * Page::kObjectAreaSize);
836 ASSERT(Capacity() <= max_capacity_);
837
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100838 // Sequentially clear region marks in the newly allocated
Steve Blocka7e24c12009-10-30 11:49:00 +0000839 // pages and cache the current last page in the space.
840 for (Page* p = first_page_; p->is_valid(); p = p->next_page()) {
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100841 p->SetRegionMarks(Page::kAllRegionsCleanMarks);
Steve Blocka7e24c12009-10-30 11:49:00 +0000842 last_page_ = p;
843 }
844
845 // Use first_page_ for allocation.
846 SetAllocationInfo(&allocation_info_, first_page_);
847
Steve Block6ded16b2010-05-10 14:33:55 +0100848 page_list_is_chunk_ordered_ = true;
849
Steve Blocka7e24c12009-10-30 11:49:00 +0000850 return true;
851}
852
853
854bool PagedSpace::HasBeenSetup() {
855 return (Capacity() > 0);
856}
857
858
859void PagedSpace::TearDown() {
Steve Block6ded16b2010-05-10 14:33:55 +0100860 MemoryAllocator::FreeAllPages(this);
861 first_page_ = NULL;
Steve Blocka7e24c12009-10-30 11:49:00 +0000862 accounting_stats_.Clear();
863}
864
865
866#ifdef ENABLE_HEAP_PROTECTION
867
868void PagedSpace::Protect() {
869 Page* page = first_page_;
870 while (page->is_valid()) {
871 MemoryAllocator::ProtectChunkFromPage(page);
872 page = MemoryAllocator::FindLastPageInSameChunk(page)->next_page();
873 }
874}
875
876
877void PagedSpace::Unprotect() {
878 Page* page = first_page_;
879 while (page->is_valid()) {
880 MemoryAllocator::UnprotectChunkFromPage(page);
881 page = MemoryAllocator::FindLastPageInSameChunk(page)->next_page();
882 }
883}
884
885#endif
886
887
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100888void PagedSpace::MarkAllPagesClean() {
Steve Blocka7e24c12009-10-30 11:49:00 +0000889 PageIterator it(this, PageIterator::ALL_PAGES);
890 while (it.has_next()) {
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100891 it.next()->SetRegionMarks(Page::kAllRegionsCleanMarks);
Steve Blocka7e24c12009-10-30 11:49:00 +0000892 }
893}
894
895
John Reck59135872010-11-02 12:39:01 -0700896MaybeObject* PagedSpace::FindObject(Address addr) {
Steve Blocka7e24c12009-10-30 11:49:00 +0000897 // Note: this function can only be called before or after mark-compact GC
898 // because it accesses map pointers.
899 ASSERT(!MarkCompactCollector::in_use());
900
901 if (!Contains(addr)) return Failure::Exception();
902
903 Page* p = Page::FromAddress(addr);
904 ASSERT(IsUsed(p));
905 Address cur = p->ObjectAreaStart();
906 Address end = p->AllocationTop();
907 while (cur < end) {
908 HeapObject* obj = HeapObject::FromAddress(cur);
909 Address next = cur + obj->Size();
910 if ((cur <= addr) && (addr < next)) return obj;
911 cur = next;
912 }
913
914 UNREACHABLE();
915 return Failure::Exception();
916}
917
918
919bool PagedSpace::IsUsed(Page* page) {
920 PageIterator it(this, PageIterator::PAGES_IN_USE);
921 while (it.has_next()) {
922 if (page == it.next()) return true;
923 }
924 return false;
925}
926
927
928void PagedSpace::SetAllocationInfo(AllocationInfo* alloc_info, Page* p) {
929 alloc_info->top = p->ObjectAreaStart();
930 alloc_info->limit = p->ObjectAreaEnd();
931 ASSERT(alloc_info->VerifyPagedAllocation());
932}
933
934
935void PagedSpace::MCResetRelocationInfo() {
936 // Set page indexes.
937 int i = 0;
938 PageIterator it(this, PageIterator::ALL_PAGES);
939 while (it.has_next()) {
940 Page* p = it.next();
941 p->mc_page_index = i++;
942 }
943
944 // Set mc_forwarding_info_ to the first page in the space.
945 SetAllocationInfo(&mc_forwarding_info_, first_page_);
946 // All the bytes in the space are 'available'. We will rediscover
947 // allocated and wasted bytes during GC.
948 accounting_stats_.Reset();
949}
950
951
952int PagedSpace::MCSpaceOffsetForAddress(Address addr) {
953#ifdef DEBUG
954 // The Contains function considers the address at the beginning of a
955 // page in the page, MCSpaceOffsetForAddress considers it is in the
956 // previous page.
957 if (Page::IsAlignedToPageSize(addr)) {
958 ASSERT(Contains(addr - kPointerSize));
959 } else {
960 ASSERT(Contains(addr));
961 }
962#endif
963
964 // If addr is at the end of a page, it belongs to previous page
965 Page* p = Page::IsAlignedToPageSize(addr)
966 ? Page::FromAllocationTop(addr)
967 : Page::FromAddress(addr);
968 int index = p->mc_page_index;
969 return (index * Page::kPageSize) + p->Offset(addr);
970}
971
972
973// Slow case for reallocating and promoting objects during a compacting
974// collection. This function is not space-specific.
975HeapObject* PagedSpace::SlowMCAllocateRaw(int size_in_bytes) {
976 Page* current_page = TopPageOf(mc_forwarding_info_);
977 if (!current_page->next_page()->is_valid()) {
978 if (!Expand(current_page)) {
979 return NULL;
980 }
981 }
982
983 // There are surely more pages in the space now.
984 ASSERT(current_page->next_page()->is_valid());
985 // We do not add the top of page block for current page to the space's
986 // free list---the block may contain live objects so we cannot write
987 // bookkeeping information to it. Instead, we will recover top of page
988 // blocks when we move objects to their new locations.
989 //
990 // We do however write the allocation pointer to the page. The encoding
991 // of forwarding addresses is as an offset in terms of live bytes, so we
992 // need quick access to the allocation top of each page to decode
993 // forwarding addresses.
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100994 current_page->SetAllocationWatermark(mc_forwarding_info_.top);
995 current_page->next_page()->InvalidateWatermark(true);
Steve Blocka7e24c12009-10-30 11:49:00 +0000996 SetAllocationInfo(&mc_forwarding_info_, current_page->next_page());
997 return AllocateLinearly(&mc_forwarding_info_, size_in_bytes);
998}
999
1000
1001bool PagedSpace::Expand(Page* last_page) {
1002 ASSERT(max_capacity_ % Page::kObjectAreaSize == 0);
1003 ASSERT(Capacity() % Page::kObjectAreaSize == 0);
1004
1005 if (Capacity() == max_capacity_) return false;
1006
1007 ASSERT(Capacity() < max_capacity_);
1008 // Last page must be valid and its next page is invalid.
1009 ASSERT(last_page->is_valid() && !last_page->next_page()->is_valid());
1010
Ben Murdochf87a2032010-10-22 12:50:53 +01001011 int available_pages =
1012 static_cast<int>((max_capacity_ - Capacity()) / Page::kObjectAreaSize);
Steve Blocka7e24c12009-10-30 11:49:00 +00001013 if (available_pages <= 0) return false;
1014
1015 int desired_pages = Min(available_pages, MemoryAllocator::kPagesPerChunk);
1016 Page* p = MemoryAllocator::AllocatePages(desired_pages, &desired_pages, this);
1017 if (!p->is_valid()) return false;
1018
1019 accounting_stats_.ExpandSpace(desired_pages * Page::kObjectAreaSize);
1020 ASSERT(Capacity() <= max_capacity_);
1021
1022 MemoryAllocator::SetNextPage(last_page, p);
1023
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01001024 // Sequentially clear region marks of new pages and and cache the
Steve Blocka7e24c12009-10-30 11:49:00 +00001025 // new last page in the space.
1026 while (p->is_valid()) {
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01001027 p->SetRegionMarks(Page::kAllRegionsCleanMarks);
Steve Blocka7e24c12009-10-30 11:49:00 +00001028 last_page_ = p;
1029 p = p->next_page();
1030 }
1031
1032 return true;
1033}
1034
1035
1036#ifdef DEBUG
1037int PagedSpace::CountTotalPages() {
1038 int count = 0;
1039 for (Page* p = first_page_; p->is_valid(); p = p->next_page()) {
1040 count++;
1041 }
1042 return count;
1043}
1044#endif
1045
1046
1047void PagedSpace::Shrink() {
Steve Block6ded16b2010-05-10 14:33:55 +01001048 if (!page_list_is_chunk_ordered_) {
1049 // We can't shrink space if pages is not chunk-ordered
1050 // (see comment for class MemoryAllocator for definition).
1051 return;
1052 }
1053
Steve Blocka7e24c12009-10-30 11:49:00 +00001054 // Release half of free pages.
1055 Page* top_page = AllocationTopPage();
1056 ASSERT(top_page->is_valid());
1057
1058 // Count the number of pages we would like to free.
1059 int pages_to_free = 0;
1060 for (Page* p = top_page->next_page(); p->is_valid(); p = p->next_page()) {
1061 pages_to_free++;
1062 }
1063
1064 // Free pages after top_page.
1065 Page* p = MemoryAllocator::FreePages(top_page->next_page());
1066 MemoryAllocator::SetNextPage(top_page, p);
1067
1068 // Find out how many pages we failed to free and update last_page_.
1069 // Please note pages can only be freed in whole chunks.
1070 last_page_ = top_page;
1071 for (Page* p = top_page->next_page(); p->is_valid(); p = p->next_page()) {
1072 pages_to_free--;
1073 last_page_ = p;
1074 }
1075
1076 accounting_stats_.ShrinkSpace(pages_to_free * Page::kObjectAreaSize);
1077 ASSERT(Capacity() == CountTotalPages() * Page::kObjectAreaSize);
1078}
1079
1080
1081bool PagedSpace::EnsureCapacity(int capacity) {
1082 if (Capacity() >= capacity) return true;
1083
1084 // Start from the allocation top and loop to the last page in the space.
1085 Page* last_page = AllocationTopPage();
1086 Page* next_page = last_page->next_page();
1087 while (next_page->is_valid()) {
1088 last_page = MemoryAllocator::FindLastPageInSameChunk(next_page);
1089 next_page = last_page->next_page();
1090 }
1091
1092 // Expand the space until it has the required capacity or expansion fails.
1093 do {
1094 if (!Expand(last_page)) return false;
1095 ASSERT(last_page->next_page()->is_valid());
1096 last_page =
1097 MemoryAllocator::FindLastPageInSameChunk(last_page->next_page());
1098 } while (Capacity() < capacity);
1099
1100 return true;
1101}
1102
1103
1104#ifdef DEBUG
1105void PagedSpace::Print() { }
1106#endif
1107
1108
1109#ifdef DEBUG
1110// We do not assume that the PageIterator works, because it depends on the
1111// invariants we are checking during verification.
1112void PagedSpace::Verify(ObjectVisitor* visitor) {
1113 // The allocation pointer should be valid, and it should be in a page in the
1114 // space.
1115 ASSERT(allocation_info_.VerifyPagedAllocation());
1116 Page* top_page = Page::FromAllocationTop(allocation_info_.top);
1117 ASSERT(MemoryAllocator::IsPageInSpace(top_page, this));
1118
1119 // Loop over all the pages.
1120 bool above_allocation_top = false;
1121 Page* current_page = first_page_;
1122 while (current_page->is_valid()) {
1123 if (above_allocation_top) {
1124 // We don't care what's above the allocation top.
1125 } else {
Steve Blocka7e24c12009-10-30 11:49:00 +00001126 Address top = current_page->AllocationTop();
1127 if (current_page == top_page) {
1128 ASSERT(top == allocation_info_.top);
1129 // The next page will be above the allocation top.
1130 above_allocation_top = true;
Steve Blocka7e24c12009-10-30 11:49:00 +00001131 }
1132
1133 // It should be packed with objects from the bottom to the top.
1134 Address current = current_page->ObjectAreaStart();
1135 while (current < top) {
1136 HeapObject* object = HeapObject::FromAddress(current);
1137
1138 // The first word should be a map, and we expect all map pointers to
1139 // be in map space.
1140 Map* map = object->map();
1141 ASSERT(map->IsMap());
1142 ASSERT(Heap::map_space()->Contains(map));
1143
1144 // Perform space-specific object verification.
1145 VerifyObject(object);
1146
1147 // The object itself should look OK.
1148 object->Verify();
1149
1150 // All the interior pointers should be contained in the heap and
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01001151 // have page regions covering intergenerational references should be
1152 // marked dirty.
Steve Blocka7e24c12009-10-30 11:49:00 +00001153 int size = object->Size();
1154 object->IterateBody(map->instance_type(), size, visitor);
1155
1156 current += size;
1157 }
1158
1159 // The allocation pointer should not be in the middle of an object.
1160 ASSERT(current == top);
1161 }
1162
1163 current_page = current_page->next_page();
1164 }
1165}
1166#endif
1167
1168
1169// -----------------------------------------------------------------------------
1170// NewSpace implementation
1171
1172
1173bool NewSpace::Setup(Address start, int size) {
1174 // Setup new space based on the preallocated memory block defined by
1175 // start and size. The provided space is divided into two semi-spaces.
1176 // To support fast containment testing in the new space, the size of
1177 // this chunk must be a power of two and it must be aligned to its size.
1178 int initial_semispace_capacity = Heap::InitialSemiSpaceSize();
Steve Block3ce2e202009-11-05 08:53:23 +00001179 int maximum_semispace_capacity = Heap::MaxSemiSpaceSize();
Steve Blocka7e24c12009-10-30 11:49:00 +00001180
1181 ASSERT(initial_semispace_capacity <= maximum_semispace_capacity);
1182 ASSERT(IsPowerOf2(maximum_semispace_capacity));
1183
1184 // Allocate and setup the histogram arrays if necessary.
1185#if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
1186 allocated_histogram_ = NewArray<HistogramInfo>(LAST_TYPE + 1);
1187 promoted_histogram_ = NewArray<HistogramInfo>(LAST_TYPE + 1);
1188
1189#define SET_NAME(name) allocated_histogram_[name].set_name(#name); \
1190 promoted_histogram_[name].set_name(#name);
1191 INSTANCE_TYPE_LIST(SET_NAME)
1192#undef SET_NAME
1193#endif
1194
Steve Block3ce2e202009-11-05 08:53:23 +00001195 ASSERT(size == 2 * Heap::ReservedSemiSpaceSize());
Steve Blocka7e24c12009-10-30 11:49:00 +00001196 ASSERT(IsAddressAligned(start, size, 0));
1197
1198 if (!to_space_.Setup(start,
1199 initial_semispace_capacity,
1200 maximum_semispace_capacity)) {
1201 return false;
1202 }
1203 if (!from_space_.Setup(start + maximum_semispace_capacity,
1204 initial_semispace_capacity,
1205 maximum_semispace_capacity)) {
1206 return false;
1207 }
1208
1209 start_ = start;
1210 address_mask_ = ~(size - 1);
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01001211 object_mask_ = address_mask_ | kHeapObjectTagMask;
Steve Blocka7e24c12009-10-30 11:49:00 +00001212 object_expected_ = reinterpret_cast<uintptr_t>(start) | kHeapObjectTag;
1213
1214 allocation_info_.top = to_space_.low();
1215 allocation_info_.limit = to_space_.high();
1216 mc_forwarding_info_.top = NULL;
1217 mc_forwarding_info_.limit = NULL;
1218
1219 ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
1220 return true;
1221}
1222
1223
1224void NewSpace::TearDown() {
1225#if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
1226 if (allocated_histogram_) {
1227 DeleteArray(allocated_histogram_);
1228 allocated_histogram_ = NULL;
1229 }
1230 if (promoted_histogram_) {
1231 DeleteArray(promoted_histogram_);
1232 promoted_histogram_ = NULL;
1233 }
1234#endif
1235
1236 start_ = NULL;
1237 allocation_info_.top = NULL;
1238 allocation_info_.limit = NULL;
1239 mc_forwarding_info_.top = NULL;
1240 mc_forwarding_info_.limit = NULL;
1241
1242 to_space_.TearDown();
1243 from_space_.TearDown();
1244}
1245
1246
1247#ifdef ENABLE_HEAP_PROTECTION
1248
1249void NewSpace::Protect() {
1250 MemoryAllocator::Protect(ToSpaceLow(), Capacity());
1251 MemoryAllocator::Protect(FromSpaceLow(), Capacity());
1252}
1253
1254
1255void NewSpace::Unprotect() {
1256 MemoryAllocator::Unprotect(ToSpaceLow(), Capacity(),
1257 to_space_.executable());
1258 MemoryAllocator::Unprotect(FromSpaceLow(), Capacity(),
1259 from_space_.executable());
1260}
1261
1262#endif
1263
1264
1265void NewSpace::Flip() {
1266 SemiSpace tmp = from_space_;
1267 from_space_ = to_space_;
1268 to_space_ = tmp;
1269}
1270
1271
1272void NewSpace::Grow() {
1273 ASSERT(Capacity() < MaximumCapacity());
1274 if (to_space_.Grow()) {
1275 // Only grow from space if we managed to grow to space.
1276 if (!from_space_.Grow()) {
1277 // If we managed to grow to space but couldn't grow from space,
1278 // attempt to shrink to space.
1279 if (!to_space_.ShrinkTo(from_space_.Capacity())) {
1280 // We are in an inconsistent state because we could not
1281 // commit/uncommit memory from new space.
1282 V8::FatalProcessOutOfMemory("Failed to grow new space.");
1283 }
1284 }
1285 }
1286 allocation_info_.limit = to_space_.high();
1287 ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
1288}
1289
1290
1291void NewSpace::Shrink() {
Ben Murdochf87a2032010-10-22 12:50:53 +01001292 int new_capacity = Max(InitialCapacity(), 2 * SizeAsInt());
Steve Blockd0582a62009-12-15 09:54:21 +00001293 int rounded_new_capacity =
1294 RoundUp(new_capacity, static_cast<int>(OS::AllocateAlignment()));
Steve Blocka7e24c12009-10-30 11:49:00 +00001295 if (rounded_new_capacity < Capacity() &&
1296 to_space_.ShrinkTo(rounded_new_capacity)) {
1297 // Only shrink from space if we managed to shrink to space.
1298 if (!from_space_.ShrinkTo(rounded_new_capacity)) {
1299 // If we managed to shrink to space but couldn't shrink from
1300 // space, attempt to grow to space again.
1301 if (!to_space_.GrowTo(from_space_.Capacity())) {
1302 // We are in an inconsistent state because we could not
1303 // commit/uncommit memory from new space.
1304 V8::FatalProcessOutOfMemory("Failed to shrink new space.");
1305 }
1306 }
1307 }
1308 allocation_info_.limit = to_space_.high();
1309 ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
1310}
1311
1312
1313void NewSpace::ResetAllocationInfo() {
1314 allocation_info_.top = to_space_.low();
1315 allocation_info_.limit = to_space_.high();
1316 ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
1317}
1318
1319
1320void NewSpace::MCResetRelocationInfo() {
1321 mc_forwarding_info_.top = from_space_.low();
1322 mc_forwarding_info_.limit = from_space_.high();
1323 ASSERT_SEMISPACE_ALLOCATION_INFO(mc_forwarding_info_, from_space_);
1324}
1325
1326
1327void NewSpace::MCCommitRelocationInfo() {
1328 // Assumes that the spaces have been flipped so that mc_forwarding_info_ is
1329 // valid allocation info for the to space.
1330 allocation_info_.top = mc_forwarding_info_.top;
1331 allocation_info_.limit = to_space_.high();
1332 ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
1333}
1334
1335
1336#ifdef DEBUG
1337// We do not use the SemispaceIterator because verification doesn't assume
1338// that it works (it depends on the invariants we are checking).
1339void NewSpace::Verify() {
1340 // The allocation pointer should be in the space or at the very end.
1341 ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
1342
1343 // There should be objects packed in from the low address up to the
1344 // allocation pointer.
1345 Address current = to_space_.low();
1346 while (current < top()) {
1347 HeapObject* object = HeapObject::FromAddress(current);
1348
1349 // The first word should be a map, and we expect all map pointers to
1350 // be in map space.
1351 Map* map = object->map();
1352 ASSERT(map->IsMap());
1353 ASSERT(Heap::map_space()->Contains(map));
1354
1355 // The object should not be code or a map.
1356 ASSERT(!object->IsMap());
1357 ASSERT(!object->IsCode());
1358
1359 // The object itself should look OK.
1360 object->Verify();
1361
1362 // All the interior pointers should be contained in the heap.
1363 VerifyPointersVisitor visitor;
1364 int size = object->Size();
1365 object->IterateBody(map->instance_type(), size, &visitor);
1366
1367 current += size;
1368 }
1369
1370 // The allocation pointer should not be in the middle of an object.
1371 ASSERT(current == top());
1372}
1373#endif
1374
1375
1376bool SemiSpace::Commit() {
1377 ASSERT(!is_committed());
1378 if (!MemoryAllocator::CommitBlock(start_, capacity_, executable())) {
1379 return false;
1380 }
1381 committed_ = true;
1382 return true;
1383}
1384
1385
1386bool SemiSpace::Uncommit() {
1387 ASSERT(is_committed());
1388 if (!MemoryAllocator::UncommitBlock(start_, capacity_)) {
1389 return false;
1390 }
1391 committed_ = false;
1392 return true;
1393}
1394
1395
1396// -----------------------------------------------------------------------------
1397// SemiSpace implementation
1398
1399bool SemiSpace::Setup(Address start,
1400 int initial_capacity,
1401 int maximum_capacity) {
1402 // Creates a space in the young generation. The constructor does not
1403 // allocate memory from the OS. A SemiSpace is given a contiguous chunk of
1404 // memory of size 'capacity' when set up, and does not grow or shrink
1405 // otherwise. In the mark-compact collector, the memory region of the from
1406 // space is used as the marking stack. It requires contiguous memory
1407 // addresses.
1408 initial_capacity_ = initial_capacity;
1409 capacity_ = initial_capacity;
1410 maximum_capacity_ = maximum_capacity;
1411 committed_ = false;
1412
1413 start_ = start;
1414 address_mask_ = ~(maximum_capacity - 1);
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01001415 object_mask_ = address_mask_ | kHeapObjectTagMask;
Steve Blocka7e24c12009-10-30 11:49:00 +00001416 object_expected_ = reinterpret_cast<uintptr_t>(start) | kHeapObjectTag;
1417 age_mark_ = start_;
1418
1419 return Commit();
1420}
1421
1422
1423void SemiSpace::TearDown() {
1424 start_ = NULL;
1425 capacity_ = 0;
1426}
1427
1428
1429bool SemiSpace::Grow() {
1430 // Double the semispace size but only up to maximum capacity.
1431 int maximum_extra = maximum_capacity_ - capacity_;
Steve Blockd0582a62009-12-15 09:54:21 +00001432 int extra = Min(RoundUp(capacity_, static_cast<int>(OS::AllocateAlignment())),
Steve Blocka7e24c12009-10-30 11:49:00 +00001433 maximum_extra);
1434 if (!MemoryAllocator::CommitBlock(high(), extra, executable())) {
1435 return false;
1436 }
1437 capacity_ += extra;
1438 return true;
1439}
1440
1441
1442bool SemiSpace::GrowTo(int new_capacity) {
1443 ASSERT(new_capacity <= maximum_capacity_);
1444 ASSERT(new_capacity > capacity_);
1445 size_t delta = new_capacity - capacity_;
1446 ASSERT(IsAligned(delta, OS::AllocateAlignment()));
1447 if (!MemoryAllocator::CommitBlock(high(), delta, executable())) {
1448 return false;
1449 }
1450 capacity_ = new_capacity;
1451 return true;
1452}
1453
1454
1455bool SemiSpace::ShrinkTo(int new_capacity) {
1456 ASSERT(new_capacity >= initial_capacity_);
1457 ASSERT(new_capacity < capacity_);
1458 size_t delta = capacity_ - new_capacity;
1459 ASSERT(IsAligned(delta, OS::AllocateAlignment()));
1460 if (!MemoryAllocator::UncommitBlock(high() - delta, delta)) {
1461 return false;
1462 }
1463 capacity_ = new_capacity;
1464 return true;
1465}
1466
1467
1468#ifdef DEBUG
1469void SemiSpace::Print() { }
1470
1471
1472void SemiSpace::Verify() { }
1473#endif
1474
1475
1476// -----------------------------------------------------------------------------
1477// SemiSpaceIterator implementation.
1478SemiSpaceIterator::SemiSpaceIterator(NewSpace* space) {
1479 Initialize(space, space->bottom(), space->top(), NULL);
1480}
1481
1482
1483SemiSpaceIterator::SemiSpaceIterator(NewSpace* space,
1484 HeapObjectCallback size_func) {
1485 Initialize(space, space->bottom(), space->top(), size_func);
1486}
1487
1488
1489SemiSpaceIterator::SemiSpaceIterator(NewSpace* space, Address start) {
1490 Initialize(space, start, space->top(), NULL);
1491}
1492
1493
1494void SemiSpaceIterator::Initialize(NewSpace* space, Address start,
1495 Address end,
1496 HeapObjectCallback size_func) {
1497 ASSERT(space->ToSpaceContains(start));
1498 ASSERT(space->ToSpaceLow() <= end
1499 && end <= space->ToSpaceHigh());
1500 space_ = &space->to_space_;
1501 current_ = start;
1502 limit_ = end;
1503 size_func_ = size_func;
1504}
1505
1506
1507#ifdef DEBUG
1508// A static array of histogram info for each type.
1509static HistogramInfo heap_histograms[LAST_TYPE+1];
1510static JSObject::SpillInformation js_spill_information;
1511
1512// heap_histograms is shared, always clear it before using it.
1513static void ClearHistograms() {
1514 // We reset the name each time, though it hasn't changed.
1515#define DEF_TYPE_NAME(name) heap_histograms[name].set_name(#name);
1516 INSTANCE_TYPE_LIST(DEF_TYPE_NAME)
1517#undef DEF_TYPE_NAME
1518
1519#define CLEAR_HISTOGRAM(name) heap_histograms[name].clear();
1520 INSTANCE_TYPE_LIST(CLEAR_HISTOGRAM)
1521#undef CLEAR_HISTOGRAM
1522
1523 js_spill_information.Clear();
1524}
1525
1526
1527static int code_kind_statistics[Code::NUMBER_OF_KINDS];
1528
1529
1530static void ClearCodeKindStatistics() {
1531 for (int i = 0; i < Code::NUMBER_OF_KINDS; i++) {
1532 code_kind_statistics[i] = 0;
1533 }
1534}
1535
1536
1537static void ReportCodeKindStatistics() {
Steve Block6ded16b2010-05-10 14:33:55 +01001538 const char* table[Code::NUMBER_OF_KINDS] = { NULL };
Steve Blocka7e24c12009-10-30 11:49:00 +00001539
1540#define CASE(name) \
1541 case Code::name: table[Code::name] = #name; \
1542 break
1543
1544 for (int i = 0; i < Code::NUMBER_OF_KINDS; i++) {
1545 switch (static_cast<Code::Kind>(i)) {
1546 CASE(FUNCTION);
1547 CASE(STUB);
1548 CASE(BUILTIN);
1549 CASE(LOAD_IC);
1550 CASE(KEYED_LOAD_IC);
1551 CASE(STORE_IC);
1552 CASE(KEYED_STORE_IC);
1553 CASE(CALL_IC);
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01001554 CASE(KEYED_CALL_IC);
Steve Block6ded16b2010-05-10 14:33:55 +01001555 CASE(BINARY_OP_IC);
Steve Blocka7e24c12009-10-30 11:49:00 +00001556 }
1557 }
1558
1559#undef CASE
1560
1561 PrintF("\n Code kind histograms: \n");
1562 for (int i = 0; i < Code::NUMBER_OF_KINDS; i++) {
1563 if (code_kind_statistics[i] > 0) {
1564 PrintF(" %-20s: %10d bytes\n", table[i], code_kind_statistics[i]);
1565 }
1566 }
1567 PrintF("\n");
1568}
1569
1570
1571static int CollectHistogramInfo(HeapObject* obj) {
1572 InstanceType type = obj->map()->instance_type();
1573 ASSERT(0 <= type && type <= LAST_TYPE);
1574 ASSERT(heap_histograms[type].name() != NULL);
1575 heap_histograms[type].increment_number(1);
1576 heap_histograms[type].increment_bytes(obj->Size());
1577
1578 if (FLAG_collect_heap_spill_statistics && obj->IsJSObject()) {
1579 JSObject::cast(obj)->IncrementSpillStatistics(&js_spill_information);
1580 }
1581
1582 return obj->Size();
1583}
1584
1585
1586static void ReportHistogram(bool print_spill) {
1587 PrintF("\n Object Histogram:\n");
1588 for (int i = 0; i <= LAST_TYPE; i++) {
1589 if (heap_histograms[i].number() > 0) {
Steve Block6ded16b2010-05-10 14:33:55 +01001590 PrintF(" %-34s%10d (%10d bytes)\n",
Steve Blocka7e24c12009-10-30 11:49:00 +00001591 heap_histograms[i].name(),
1592 heap_histograms[i].number(),
1593 heap_histograms[i].bytes());
1594 }
1595 }
1596 PrintF("\n");
1597
1598 // Summarize string types.
1599 int string_number = 0;
1600 int string_bytes = 0;
1601#define INCREMENT(type, size, name, camel_name) \
1602 string_number += heap_histograms[type].number(); \
1603 string_bytes += heap_histograms[type].bytes();
1604 STRING_TYPE_LIST(INCREMENT)
1605#undef INCREMENT
1606 if (string_number > 0) {
Steve Block6ded16b2010-05-10 14:33:55 +01001607 PrintF(" %-34s%10d (%10d bytes)\n\n", "STRING_TYPE", string_number,
Steve Blocka7e24c12009-10-30 11:49:00 +00001608 string_bytes);
1609 }
1610
1611 if (FLAG_collect_heap_spill_statistics && print_spill) {
1612 js_spill_information.Print();
1613 }
1614}
1615#endif // DEBUG
1616
1617
1618// Support for statistics gathering for --heap-stats and --log-gc.
1619#if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
1620void NewSpace::ClearHistograms() {
1621 for (int i = 0; i <= LAST_TYPE; i++) {
1622 allocated_histogram_[i].clear();
1623 promoted_histogram_[i].clear();
1624 }
1625}
1626
1627// Because the copying collector does not touch garbage objects, we iterate
1628// the new space before a collection to get a histogram of allocated objects.
1629// This only happens (1) when compiled with DEBUG and the --heap-stats flag is
1630// set, or when compiled with ENABLE_LOGGING_AND_PROFILING and the --log-gc
1631// flag is set.
1632void NewSpace::CollectStatistics() {
1633 ClearHistograms();
1634 SemiSpaceIterator it(this);
Leon Clarked91b9f72010-01-27 17:25:45 +00001635 for (HeapObject* obj = it.next(); obj != NULL; obj = it.next())
1636 RecordAllocation(obj);
Steve Blocka7e24c12009-10-30 11:49:00 +00001637}
1638
1639
1640#ifdef ENABLE_LOGGING_AND_PROFILING
1641static void DoReportStatistics(HistogramInfo* info, const char* description) {
1642 LOG(HeapSampleBeginEvent("NewSpace", description));
1643 // Lump all the string types together.
1644 int string_number = 0;
1645 int string_bytes = 0;
1646#define INCREMENT(type, size, name, camel_name) \
1647 string_number += info[type].number(); \
1648 string_bytes += info[type].bytes();
1649 STRING_TYPE_LIST(INCREMENT)
1650#undef INCREMENT
1651 if (string_number > 0) {
1652 LOG(HeapSampleItemEvent("STRING_TYPE", string_number, string_bytes));
1653 }
1654
1655 // Then do the other types.
1656 for (int i = FIRST_NONSTRING_TYPE; i <= LAST_TYPE; ++i) {
1657 if (info[i].number() > 0) {
1658 LOG(HeapSampleItemEvent(info[i].name(), info[i].number(),
1659 info[i].bytes()));
1660 }
1661 }
1662 LOG(HeapSampleEndEvent("NewSpace", description));
1663}
1664#endif // ENABLE_LOGGING_AND_PROFILING
1665
1666
1667void NewSpace::ReportStatistics() {
1668#ifdef DEBUG
1669 if (FLAG_heap_stats) {
1670 float pct = static_cast<float>(Available()) / Capacity();
Ben Murdochf87a2032010-10-22 12:50:53 +01001671 PrintF(" capacity: %" V8_PTR_PREFIX "d"
1672 ", available: %" V8_PTR_PREFIX "d, %%%d\n",
Steve Blocka7e24c12009-10-30 11:49:00 +00001673 Capacity(), Available(), static_cast<int>(pct*100));
1674 PrintF("\n Object Histogram:\n");
1675 for (int i = 0; i <= LAST_TYPE; i++) {
1676 if (allocated_histogram_[i].number() > 0) {
Steve Block6ded16b2010-05-10 14:33:55 +01001677 PrintF(" %-34s%10d (%10d bytes)\n",
Steve Blocka7e24c12009-10-30 11:49:00 +00001678 allocated_histogram_[i].name(),
1679 allocated_histogram_[i].number(),
1680 allocated_histogram_[i].bytes());
1681 }
1682 }
1683 PrintF("\n");
1684 }
1685#endif // DEBUG
1686
1687#ifdef ENABLE_LOGGING_AND_PROFILING
1688 if (FLAG_log_gc) {
1689 DoReportStatistics(allocated_histogram_, "allocated");
1690 DoReportStatistics(promoted_histogram_, "promoted");
1691 }
1692#endif // ENABLE_LOGGING_AND_PROFILING
1693}
1694
1695
1696void NewSpace::RecordAllocation(HeapObject* obj) {
1697 InstanceType type = obj->map()->instance_type();
1698 ASSERT(0 <= type && type <= LAST_TYPE);
1699 allocated_histogram_[type].increment_number(1);
1700 allocated_histogram_[type].increment_bytes(obj->Size());
1701}
1702
1703
1704void NewSpace::RecordPromotion(HeapObject* obj) {
1705 InstanceType type = obj->map()->instance_type();
1706 ASSERT(0 <= type && type <= LAST_TYPE);
1707 promoted_histogram_[type].increment_number(1);
1708 promoted_histogram_[type].increment_bytes(obj->Size());
1709}
1710#endif // defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
1711
1712
1713// -----------------------------------------------------------------------------
1714// Free lists for old object spaces implementation
1715
1716void FreeListNode::set_size(int size_in_bytes) {
1717 ASSERT(size_in_bytes > 0);
1718 ASSERT(IsAligned(size_in_bytes, kPointerSize));
1719
1720 // We write a map and possibly size information to the block. If the block
1721 // is big enough to be a ByteArray with at least one extra word (the next
1722 // pointer), we set its map to be the byte array map and its size to an
1723 // appropriate array length for the desired size from HeapObject::Size().
1724 // If the block is too small (eg, one or two words), to hold both a size
1725 // field and a next pointer, we give it a filler map that gives it the
1726 // correct size.
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01001727 if (size_in_bytes > ByteArray::kHeaderSize) {
Steve Blocka7e24c12009-10-30 11:49:00 +00001728 set_map(Heap::raw_unchecked_byte_array_map());
Steve Blockd0582a62009-12-15 09:54:21 +00001729 // Can't use ByteArray::cast because it fails during deserialization.
1730 ByteArray* this_as_byte_array = reinterpret_cast<ByteArray*>(this);
1731 this_as_byte_array->set_length(ByteArray::LengthFor(size_in_bytes));
Steve Blocka7e24c12009-10-30 11:49:00 +00001732 } else if (size_in_bytes == kPointerSize) {
1733 set_map(Heap::raw_unchecked_one_pointer_filler_map());
1734 } else if (size_in_bytes == 2 * kPointerSize) {
1735 set_map(Heap::raw_unchecked_two_pointer_filler_map());
1736 } else {
1737 UNREACHABLE();
1738 }
Steve Blockd0582a62009-12-15 09:54:21 +00001739 // We would like to ASSERT(Size() == size_in_bytes) but this would fail during
1740 // deserialization because the byte array map is not done yet.
Steve Blocka7e24c12009-10-30 11:49:00 +00001741}
1742
1743
1744Address FreeListNode::next() {
Steve Block3ce2e202009-11-05 08:53:23 +00001745 ASSERT(IsFreeListNode(this));
Steve Blocka7e24c12009-10-30 11:49:00 +00001746 if (map() == Heap::raw_unchecked_byte_array_map()) {
1747 ASSERT(Size() >= kNextOffset + kPointerSize);
1748 return Memory::Address_at(address() + kNextOffset);
1749 } else {
1750 return Memory::Address_at(address() + kPointerSize);
1751 }
1752}
1753
1754
1755void FreeListNode::set_next(Address next) {
Steve Block3ce2e202009-11-05 08:53:23 +00001756 ASSERT(IsFreeListNode(this));
Steve Blocka7e24c12009-10-30 11:49:00 +00001757 if (map() == Heap::raw_unchecked_byte_array_map()) {
1758 ASSERT(Size() >= kNextOffset + kPointerSize);
1759 Memory::Address_at(address() + kNextOffset) = next;
1760 } else {
1761 Memory::Address_at(address() + kPointerSize) = next;
1762 }
1763}
1764
1765
1766OldSpaceFreeList::OldSpaceFreeList(AllocationSpace owner) : owner_(owner) {
1767 Reset();
1768}
1769
1770
1771void OldSpaceFreeList::Reset() {
1772 available_ = 0;
1773 for (int i = 0; i < kFreeListsLength; i++) {
1774 free_[i].head_node_ = NULL;
1775 }
1776 needs_rebuild_ = false;
1777 finger_ = kHead;
1778 free_[kHead].next_size_ = kEnd;
1779}
1780
1781
1782void OldSpaceFreeList::RebuildSizeList() {
1783 ASSERT(needs_rebuild_);
1784 int cur = kHead;
1785 for (int i = cur + 1; i < kFreeListsLength; i++) {
1786 if (free_[i].head_node_ != NULL) {
1787 free_[cur].next_size_ = i;
1788 cur = i;
1789 }
1790 }
1791 free_[cur].next_size_ = kEnd;
1792 needs_rebuild_ = false;
1793}
1794
1795
1796int OldSpaceFreeList::Free(Address start, int size_in_bytes) {
1797#ifdef DEBUG
Leon Clarke4515c472010-02-03 11:58:03 +00001798 MemoryAllocator::ZapBlock(start, size_in_bytes);
Steve Blocka7e24c12009-10-30 11:49:00 +00001799#endif
1800 FreeListNode* node = FreeListNode::FromAddress(start);
1801 node->set_size(size_in_bytes);
1802
1803 // We don't use the freelists in compacting mode. This makes it more like a
1804 // GC that only has mark-sweep-compact and doesn't have a mark-sweep
1805 // collector.
1806 if (FLAG_always_compact) {
1807 return size_in_bytes;
1808 }
1809
1810 // Early return to drop too-small blocks on the floor (one or two word
1811 // blocks cannot hold a map pointer, a size field, and a pointer to the
1812 // next block in the free list).
1813 if (size_in_bytes < kMinBlockSize) {
1814 return size_in_bytes;
1815 }
1816
1817 // Insert other blocks at the head of an exact free list.
1818 int index = size_in_bytes >> kPointerSizeLog2;
1819 node->set_next(free_[index].head_node_);
1820 free_[index].head_node_ = node->address();
1821 available_ += size_in_bytes;
1822 needs_rebuild_ = true;
1823 return 0;
1824}
1825
1826
John Reck59135872010-11-02 12:39:01 -07001827MaybeObject* OldSpaceFreeList::Allocate(int size_in_bytes, int* wasted_bytes) {
Steve Blocka7e24c12009-10-30 11:49:00 +00001828 ASSERT(0 < size_in_bytes);
1829 ASSERT(size_in_bytes <= kMaxBlockSize);
1830 ASSERT(IsAligned(size_in_bytes, kPointerSize));
1831
1832 if (needs_rebuild_) RebuildSizeList();
1833 int index = size_in_bytes >> kPointerSizeLog2;
1834 // Check for a perfect fit.
1835 if (free_[index].head_node_ != NULL) {
1836 FreeListNode* node = FreeListNode::FromAddress(free_[index].head_node_);
1837 // If this was the last block of its size, remove the size.
1838 if ((free_[index].head_node_ = node->next()) == NULL) RemoveSize(index);
1839 available_ -= size_in_bytes;
1840 *wasted_bytes = 0;
1841 ASSERT(!FLAG_always_compact); // We only use the freelists with mark-sweep.
1842 return node;
1843 }
1844 // Search the size list for the best fit.
1845 int prev = finger_ < index ? finger_ : kHead;
1846 int cur = FindSize(index, &prev);
1847 ASSERT(index < cur);
1848 if (cur == kEnd) {
1849 // No large enough size in list.
1850 *wasted_bytes = 0;
Ben Murdochf87a2032010-10-22 12:50:53 +01001851 return Failure::RetryAfterGC(owner_);
Steve Blocka7e24c12009-10-30 11:49:00 +00001852 }
1853 ASSERT(!FLAG_always_compact); // We only use the freelists with mark-sweep.
1854 int rem = cur - index;
1855 int rem_bytes = rem << kPointerSizeLog2;
1856 FreeListNode* cur_node = FreeListNode::FromAddress(free_[cur].head_node_);
1857 ASSERT(cur_node->Size() == (cur << kPointerSizeLog2));
1858 FreeListNode* rem_node = FreeListNode::FromAddress(free_[cur].head_node_ +
1859 size_in_bytes);
1860 // Distinguish the cases prev < rem < cur and rem <= prev < cur
1861 // to avoid many redundant tests and calls to Insert/RemoveSize.
1862 if (prev < rem) {
1863 // Simple case: insert rem between prev and cur.
1864 finger_ = prev;
1865 free_[prev].next_size_ = rem;
1866 // If this was the last block of size cur, remove the size.
1867 if ((free_[cur].head_node_ = cur_node->next()) == NULL) {
1868 free_[rem].next_size_ = free_[cur].next_size_;
1869 } else {
1870 free_[rem].next_size_ = cur;
1871 }
1872 // Add the remainder block.
1873 rem_node->set_size(rem_bytes);
1874 rem_node->set_next(free_[rem].head_node_);
1875 free_[rem].head_node_ = rem_node->address();
1876 } else {
1877 // If this was the last block of size cur, remove the size.
1878 if ((free_[cur].head_node_ = cur_node->next()) == NULL) {
1879 finger_ = prev;
1880 free_[prev].next_size_ = free_[cur].next_size_;
1881 }
1882 if (rem_bytes < kMinBlockSize) {
1883 // Too-small remainder is wasted.
1884 rem_node->set_size(rem_bytes);
1885 available_ -= size_in_bytes + rem_bytes;
1886 *wasted_bytes = rem_bytes;
1887 return cur_node;
1888 }
1889 // Add the remainder block and, if needed, insert its size.
1890 rem_node->set_size(rem_bytes);
1891 rem_node->set_next(free_[rem].head_node_);
1892 free_[rem].head_node_ = rem_node->address();
1893 if (rem_node->next() == NULL) InsertSize(rem);
1894 }
1895 available_ -= size_in_bytes;
1896 *wasted_bytes = 0;
1897 return cur_node;
1898}
1899
1900
1901#ifdef DEBUG
1902bool OldSpaceFreeList::Contains(FreeListNode* node) {
1903 for (int i = 0; i < kFreeListsLength; i++) {
1904 Address cur_addr = free_[i].head_node_;
1905 while (cur_addr != NULL) {
1906 FreeListNode* cur_node = FreeListNode::FromAddress(cur_addr);
1907 if (cur_node == node) return true;
1908 cur_addr = cur_node->next();
1909 }
1910 }
1911 return false;
1912}
1913#endif
1914
1915
1916FixedSizeFreeList::FixedSizeFreeList(AllocationSpace owner, int object_size)
1917 : owner_(owner), object_size_(object_size) {
1918 Reset();
1919}
1920
1921
1922void FixedSizeFreeList::Reset() {
1923 available_ = 0;
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01001924 head_ = tail_ = NULL;
Steve Blocka7e24c12009-10-30 11:49:00 +00001925}
1926
1927
1928void FixedSizeFreeList::Free(Address start) {
1929#ifdef DEBUG
Leon Clarke4515c472010-02-03 11:58:03 +00001930 MemoryAllocator::ZapBlock(start, object_size_);
Steve Blocka7e24c12009-10-30 11:49:00 +00001931#endif
Leon Clarkee46be812010-01-19 14:06:41 +00001932 // We only use the freelists with mark-sweep.
1933 ASSERT(!MarkCompactCollector::IsCompacting());
Steve Blocka7e24c12009-10-30 11:49:00 +00001934 FreeListNode* node = FreeListNode::FromAddress(start);
1935 node->set_size(object_size_);
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01001936 node->set_next(NULL);
1937 if (head_ == NULL) {
1938 tail_ = head_ = node->address();
1939 } else {
1940 FreeListNode::FromAddress(tail_)->set_next(node->address());
1941 tail_ = node->address();
1942 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001943 available_ += object_size_;
1944}
1945
1946
John Reck59135872010-11-02 12:39:01 -07001947MaybeObject* FixedSizeFreeList::Allocate() {
Steve Blocka7e24c12009-10-30 11:49:00 +00001948 if (head_ == NULL) {
Ben Murdochf87a2032010-10-22 12:50:53 +01001949 return Failure::RetryAfterGC(owner_);
Steve Blocka7e24c12009-10-30 11:49:00 +00001950 }
1951
1952 ASSERT(!FLAG_always_compact); // We only use the freelists with mark-sweep.
1953 FreeListNode* node = FreeListNode::FromAddress(head_);
1954 head_ = node->next();
1955 available_ -= object_size_;
1956 return node;
1957}
1958
1959
1960// -----------------------------------------------------------------------------
1961// OldSpace implementation
1962
1963void OldSpace::PrepareForMarkCompact(bool will_compact) {
Steve Block6ded16b2010-05-10 14:33:55 +01001964 // Call prepare of the super class.
1965 PagedSpace::PrepareForMarkCompact(will_compact);
1966
Steve Blocka7e24c12009-10-30 11:49:00 +00001967 if (will_compact) {
1968 // Reset relocation info. During a compacting collection, everything in
1969 // the space is considered 'available' and we will rediscover live data
1970 // and waste during the collection.
1971 MCResetRelocationInfo();
1972 ASSERT(Available() == Capacity());
1973 } else {
1974 // During a non-compacting collection, everything below the linear
1975 // allocation pointer is considered allocated (everything above is
1976 // available) and we will rediscover available and wasted bytes during
1977 // the collection.
1978 accounting_stats_.AllocateBytes(free_list_.available());
1979 accounting_stats_.FillWastedBytes(Waste());
1980 }
1981
1982 // Clear the free list before a full GC---it will be rebuilt afterward.
1983 free_list_.Reset();
1984}
1985
1986
1987void OldSpace::MCCommitRelocationInfo() {
1988 // Update fast allocation info.
1989 allocation_info_.top = mc_forwarding_info_.top;
1990 allocation_info_.limit = mc_forwarding_info_.limit;
1991 ASSERT(allocation_info_.VerifyPagedAllocation());
1992
1993 // The space is compacted and we haven't yet built free lists or
1994 // wasted any space.
1995 ASSERT(Waste() == 0);
1996 ASSERT(AvailableFree() == 0);
1997
1998 // Build the free list for the space.
1999 int computed_size = 0;
2000 PageIterator it(this, PageIterator::PAGES_USED_BY_MC);
2001 while (it.has_next()) {
2002 Page* p = it.next();
2003 // Space below the relocation pointer is allocated.
Steve Blockd0582a62009-12-15 09:54:21 +00002004 computed_size +=
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002005 static_cast<int>(p->AllocationWatermark() - p->ObjectAreaStart());
Steve Blocka7e24c12009-10-30 11:49:00 +00002006 if (it.has_next()) {
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002007 // Free the space at the top of the page.
Steve Blockd0582a62009-12-15 09:54:21 +00002008 int extra_size =
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002009 static_cast<int>(p->ObjectAreaEnd() - p->AllocationWatermark());
Steve Blocka7e24c12009-10-30 11:49:00 +00002010 if (extra_size > 0) {
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002011 int wasted_bytes = free_list_.Free(p->AllocationWatermark(),
2012 extra_size);
Steve Blocka7e24c12009-10-30 11:49:00 +00002013 // The bytes we have just "freed" to add to the free list were
2014 // already accounted as available.
2015 accounting_stats_.WasteBytes(wasted_bytes);
2016 }
2017 }
2018 }
2019
2020 // Make sure the computed size - based on the used portion of the pages in
2021 // use - matches the size obtained while computing forwarding addresses.
2022 ASSERT(computed_size == Size());
2023}
2024
2025
Leon Clarkee46be812010-01-19 14:06:41 +00002026bool NewSpace::ReserveSpace(int bytes) {
2027 // We can't reliably unpack a partial snapshot that needs more new space
2028 // space than the minimum NewSpace size.
2029 ASSERT(bytes <= InitialCapacity());
2030 Address limit = allocation_info_.limit;
2031 Address top = allocation_info_.top;
2032 return limit - top >= bytes;
2033}
2034
2035
Steve Block6ded16b2010-05-10 14:33:55 +01002036void PagedSpace::FreePages(Page* prev, Page* last) {
2037 if (last == AllocationTopPage()) {
2038 // Pages are already at the end of used pages.
2039 return;
2040 }
2041
2042 Page* first = NULL;
2043
2044 // Remove pages from the list.
2045 if (prev == NULL) {
2046 first = first_page_;
2047 first_page_ = last->next_page();
2048 } else {
2049 first = prev->next_page();
2050 MemoryAllocator::SetNextPage(prev, last->next_page());
2051 }
2052
2053 // Attach it after the last page.
2054 MemoryAllocator::SetNextPage(last_page_, first);
2055 last_page_ = last;
2056 MemoryAllocator::SetNextPage(last, NULL);
2057
2058 // Clean them up.
2059 do {
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002060 first->InvalidateWatermark(true);
2061 first->SetAllocationWatermark(first->ObjectAreaStart());
2062 first->SetCachedAllocationWatermark(first->ObjectAreaStart());
2063 first->SetRegionMarks(Page::kAllRegionsCleanMarks);
Steve Block6ded16b2010-05-10 14:33:55 +01002064 first = first->next_page();
2065 } while (first != NULL);
2066
2067 // Order of pages in this space might no longer be consistent with
2068 // order of pages in chunks.
2069 page_list_is_chunk_ordered_ = false;
2070}
2071
2072
Kristian Monsen80d68ea2010-09-08 11:05:35 +01002073void PagedSpace::RelinkPageListInChunkOrder(bool deallocate_blocks) {
2074 const bool add_to_freelist = true;
2075
2076 // Mark used and unused pages to properly fill unused pages
2077 // after reordering.
2078 PageIterator all_pages_iterator(this, PageIterator::ALL_PAGES);
2079 Page* last_in_use = AllocationTopPage();
2080 bool in_use = true;
2081
2082 while (all_pages_iterator.has_next()) {
2083 Page* p = all_pages_iterator.next();
2084 p->SetWasInUseBeforeMC(in_use);
2085 if (p == last_in_use) {
2086 // We passed a page containing allocation top. All consequent
2087 // pages are not used.
2088 in_use = false;
2089 }
2090 }
2091
2092 if (page_list_is_chunk_ordered_) return;
2093
2094 Page* new_last_in_use = Page::FromAddress(NULL);
2095 MemoryAllocator::RelinkPageListInChunkOrder(this,
2096 &first_page_,
2097 &last_page_,
2098 &new_last_in_use);
2099 ASSERT(new_last_in_use->is_valid());
2100
2101 if (new_last_in_use != last_in_use) {
2102 // Current allocation top points to a page which is now in the middle
2103 // of page list. We should move allocation top forward to the new last
2104 // used page so various object iterators will continue to work properly.
2105 int size_in_bytes = static_cast<int>(PageAllocationLimit(last_in_use) -
2106 last_in_use->AllocationTop());
2107
2108 last_in_use->SetAllocationWatermark(last_in_use->AllocationTop());
2109 if (size_in_bytes > 0) {
2110 Address start = last_in_use->AllocationTop();
2111 if (deallocate_blocks) {
2112 accounting_stats_.AllocateBytes(size_in_bytes);
2113 DeallocateBlock(start, size_in_bytes, add_to_freelist);
2114 } else {
2115 Heap::CreateFillerObjectAt(start, size_in_bytes);
2116 }
2117 }
2118
2119 // New last in use page was in the middle of the list before
2120 // sorting so it full.
2121 SetTop(new_last_in_use->AllocationTop());
2122
2123 ASSERT(AllocationTopPage() == new_last_in_use);
2124 ASSERT(AllocationTopPage()->WasInUseBeforeMC());
2125 }
2126
2127 PageIterator pages_in_use_iterator(this, PageIterator::PAGES_IN_USE);
2128 while (pages_in_use_iterator.has_next()) {
2129 Page* p = pages_in_use_iterator.next();
2130 if (!p->WasInUseBeforeMC()) {
2131 // Empty page is in the middle of a sequence of used pages.
2132 // Allocate it as a whole and deallocate immediately.
2133 int size_in_bytes = static_cast<int>(PageAllocationLimit(p) -
2134 p->ObjectAreaStart());
2135
2136 p->SetAllocationWatermark(p->ObjectAreaStart());
2137 Address start = p->ObjectAreaStart();
2138 if (deallocate_blocks) {
2139 accounting_stats_.AllocateBytes(size_in_bytes);
2140 DeallocateBlock(start, size_in_bytes, add_to_freelist);
2141 } else {
2142 Heap::CreateFillerObjectAt(start, size_in_bytes);
2143 }
2144 }
2145 }
2146
2147 page_list_is_chunk_ordered_ = true;
2148}
2149
2150
Steve Block6ded16b2010-05-10 14:33:55 +01002151void PagedSpace::PrepareForMarkCompact(bool will_compact) {
2152 if (will_compact) {
Kristian Monsen80d68ea2010-09-08 11:05:35 +01002153 RelinkPageListInChunkOrder(false);
Steve Block6ded16b2010-05-10 14:33:55 +01002154 }
2155}
2156
2157
Leon Clarkee46be812010-01-19 14:06:41 +00002158bool PagedSpace::ReserveSpace(int bytes) {
2159 Address limit = allocation_info_.limit;
2160 Address top = allocation_info_.top;
2161 if (limit - top >= bytes) return true;
2162
2163 // There wasn't enough space in the current page. Lets put the rest
2164 // of the page on the free list and start a fresh page.
2165 PutRestOfCurrentPageOnFreeList(TopPageOf(allocation_info_));
2166
2167 Page* reserved_page = TopPageOf(allocation_info_);
2168 int bytes_left_to_reserve = bytes;
2169 while (bytes_left_to_reserve > 0) {
2170 if (!reserved_page->next_page()->is_valid()) {
2171 if (Heap::OldGenerationAllocationLimitReached()) return false;
2172 Expand(reserved_page);
2173 }
2174 bytes_left_to_reserve -= Page::kPageSize;
2175 reserved_page = reserved_page->next_page();
2176 if (!reserved_page->is_valid()) return false;
2177 }
2178 ASSERT(TopPageOf(allocation_info_)->next_page()->is_valid());
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002179 TopPageOf(allocation_info_)->next_page()->InvalidateWatermark(true);
Leon Clarkee46be812010-01-19 14:06:41 +00002180 SetAllocationInfo(&allocation_info_,
2181 TopPageOf(allocation_info_)->next_page());
2182 return true;
2183}
2184
2185
2186// You have to call this last, since the implementation from PagedSpace
2187// doesn't know that memory was 'promised' to large object space.
2188bool LargeObjectSpace::ReserveSpace(int bytes) {
2189 return Heap::OldGenerationSpaceAvailable() >= bytes;
2190}
2191
2192
Steve Blocka7e24c12009-10-30 11:49:00 +00002193// Slow case for normal allocation. Try in order: (1) allocate in the next
2194// page in the space, (2) allocate off the space's free list, (3) expand the
2195// space, (4) fail.
2196HeapObject* OldSpace::SlowAllocateRaw(int size_in_bytes) {
2197 // Linear allocation in this space has failed. If there is another page
2198 // in the space, move to that page and allocate there. This allocation
2199 // should succeed (size_in_bytes should not be greater than a page's
2200 // object area size).
2201 Page* current_page = TopPageOf(allocation_info_);
2202 if (current_page->next_page()->is_valid()) {
2203 return AllocateInNextPage(current_page, size_in_bytes);
2204 }
2205
Steve Blockd0582a62009-12-15 09:54:21 +00002206 // There is no next page in this space. Try free list allocation unless that
2207 // is currently forbidden.
2208 if (!Heap::linear_allocation()) {
2209 int wasted_bytes;
John Reck59135872010-11-02 12:39:01 -07002210 Object* result;
2211 MaybeObject* maybe = free_list_.Allocate(size_in_bytes, &wasted_bytes);
Steve Blockd0582a62009-12-15 09:54:21 +00002212 accounting_stats_.WasteBytes(wasted_bytes);
John Reck59135872010-11-02 12:39:01 -07002213 if (maybe->ToObject(&result)) {
Steve Blockd0582a62009-12-15 09:54:21 +00002214 accounting_stats_.AllocateBytes(size_in_bytes);
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002215
2216 HeapObject* obj = HeapObject::cast(result);
2217 Page* p = Page::FromAddress(obj->address());
2218
2219 if (obj->address() >= p->AllocationWatermark()) {
2220 // There should be no hole between the allocation watermark
2221 // and allocated object address.
2222 // Memory above the allocation watermark was not swept and
2223 // might contain garbage pointers to new space.
2224 ASSERT(obj->address() == p->AllocationWatermark());
2225 p->SetAllocationWatermark(obj->address() + size_in_bytes);
2226 }
2227
2228 return obj;
Steve Blockd0582a62009-12-15 09:54:21 +00002229 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002230 }
2231
2232 // Free list allocation failed and there is no next page. Fail if we have
2233 // hit the old generation size limit that should cause a garbage
2234 // collection.
2235 if (!Heap::always_allocate() && Heap::OldGenerationAllocationLimitReached()) {
2236 return NULL;
2237 }
2238
2239 // Try to expand the space and allocate in the new next page.
2240 ASSERT(!current_page->next_page()->is_valid());
2241 if (Expand(current_page)) {
2242 return AllocateInNextPage(current_page, size_in_bytes);
2243 }
2244
2245 // Finally, fail.
2246 return NULL;
2247}
2248
2249
Leon Clarkee46be812010-01-19 14:06:41 +00002250void OldSpace::PutRestOfCurrentPageOnFreeList(Page* current_page) {
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002251 current_page->SetAllocationWatermark(allocation_info_.top);
Steve Blockd0582a62009-12-15 09:54:21 +00002252 int free_size =
2253 static_cast<int>(current_page->ObjectAreaEnd() - allocation_info_.top);
Steve Blocka7e24c12009-10-30 11:49:00 +00002254 if (free_size > 0) {
2255 int wasted_bytes = free_list_.Free(allocation_info_.top, free_size);
2256 accounting_stats_.WasteBytes(wasted_bytes);
2257 }
Leon Clarkee46be812010-01-19 14:06:41 +00002258}
2259
2260
2261void FixedSpace::PutRestOfCurrentPageOnFreeList(Page* current_page) {
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002262 current_page->SetAllocationWatermark(allocation_info_.top);
Leon Clarkee46be812010-01-19 14:06:41 +00002263 int free_size =
2264 static_cast<int>(current_page->ObjectAreaEnd() - allocation_info_.top);
2265 // In the fixed space free list all the free list items have the right size.
2266 // We use up the rest of the page while preserving this invariant.
2267 while (free_size >= object_size_in_bytes_) {
2268 free_list_.Free(allocation_info_.top);
2269 allocation_info_.top += object_size_in_bytes_;
2270 free_size -= object_size_in_bytes_;
2271 accounting_stats_.WasteBytes(object_size_in_bytes_);
2272 }
2273}
2274
2275
2276// Add the block at the top of the page to the space's free list, set the
2277// allocation info to the next page (assumed to be one), and allocate
2278// linearly there.
2279HeapObject* OldSpace::AllocateInNextPage(Page* current_page,
2280 int size_in_bytes) {
2281 ASSERT(current_page->next_page()->is_valid());
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002282 Page* next_page = current_page->next_page();
2283 next_page->ClearGCFields();
Leon Clarkee46be812010-01-19 14:06:41 +00002284 PutRestOfCurrentPageOnFreeList(current_page);
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002285 SetAllocationInfo(&allocation_info_, next_page);
Steve Blocka7e24c12009-10-30 11:49:00 +00002286 return AllocateLinearly(&allocation_info_, size_in_bytes);
2287}
2288
2289
Kristian Monsen80d68ea2010-09-08 11:05:35 +01002290void OldSpace::DeallocateBlock(Address start,
2291 int size_in_bytes,
2292 bool add_to_freelist) {
2293 Free(start, size_in_bytes, add_to_freelist);
2294}
2295
2296
Steve Blocka7e24c12009-10-30 11:49:00 +00002297#ifdef DEBUG
2298struct CommentStatistic {
2299 const char* comment;
2300 int size;
2301 int count;
2302 void Clear() {
2303 comment = NULL;
2304 size = 0;
2305 count = 0;
2306 }
2307};
2308
2309
2310// must be small, since an iteration is used for lookup
2311const int kMaxComments = 64;
2312static CommentStatistic comments_statistics[kMaxComments+1];
2313
2314
2315void PagedSpace::ReportCodeStatistics() {
2316 ReportCodeKindStatistics();
2317 PrintF("Code comment statistics (\" [ comment-txt : size/ "
2318 "count (average)\"):\n");
2319 for (int i = 0; i <= kMaxComments; i++) {
2320 const CommentStatistic& cs = comments_statistics[i];
2321 if (cs.size > 0) {
2322 PrintF(" %-30s: %10d/%6d (%d)\n", cs.comment, cs.size, cs.count,
2323 cs.size/cs.count);
2324 }
2325 }
2326 PrintF("\n");
2327}
2328
2329
2330void PagedSpace::ResetCodeStatistics() {
2331 ClearCodeKindStatistics();
2332 for (int i = 0; i < kMaxComments; i++) comments_statistics[i].Clear();
2333 comments_statistics[kMaxComments].comment = "Unknown";
2334 comments_statistics[kMaxComments].size = 0;
2335 comments_statistics[kMaxComments].count = 0;
2336}
2337
2338
2339// Adds comment to 'comment_statistics' table. Performance OK sa long as
2340// 'kMaxComments' is small
2341static void EnterComment(const char* comment, int delta) {
2342 // Do not count empty comments
2343 if (delta <= 0) return;
2344 CommentStatistic* cs = &comments_statistics[kMaxComments];
2345 // Search for a free or matching entry in 'comments_statistics': 'cs'
2346 // points to result.
2347 for (int i = 0; i < kMaxComments; i++) {
2348 if (comments_statistics[i].comment == NULL) {
2349 cs = &comments_statistics[i];
2350 cs->comment = comment;
2351 break;
2352 } else if (strcmp(comments_statistics[i].comment, comment) == 0) {
2353 cs = &comments_statistics[i];
2354 break;
2355 }
2356 }
2357 // Update entry for 'comment'
2358 cs->size += delta;
2359 cs->count += 1;
2360}
2361
2362
2363// Call for each nested comment start (start marked with '[ xxx', end marked
2364// with ']'. RelocIterator 'it' must point to a comment reloc info.
2365static void CollectCommentStatistics(RelocIterator* it) {
2366 ASSERT(!it->done());
2367 ASSERT(it->rinfo()->rmode() == RelocInfo::COMMENT);
2368 const char* tmp = reinterpret_cast<const char*>(it->rinfo()->data());
2369 if (tmp[0] != '[') {
2370 // Not a nested comment; skip
2371 return;
2372 }
2373
2374 // Search for end of nested comment or a new nested comment
2375 const char* const comment_txt =
2376 reinterpret_cast<const char*>(it->rinfo()->data());
2377 const byte* prev_pc = it->rinfo()->pc();
2378 int flat_delta = 0;
2379 it->next();
2380 while (true) {
2381 // All nested comments must be terminated properly, and therefore exit
2382 // from loop.
2383 ASSERT(!it->done());
2384 if (it->rinfo()->rmode() == RelocInfo::COMMENT) {
2385 const char* const txt =
2386 reinterpret_cast<const char*>(it->rinfo()->data());
Steve Blockd0582a62009-12-15 09:54:21 +00002387 flat_delta += static_cast<int>(it->rinfo()->pc() - prev_pc);
Steve Blocka7e24c12009-10-30 11:49:00 +00002388 if (txt[0] == ']') break; // End of nested comment
2389 // A new comment
2390 CollectCommentStatistics(it);
2391 // Skip code that was covered with previous comment
2392 prev_pc = it->rinfo()->pc();
2393 }
2394 it->next();
2395 }
2396 EnterComment(comment_txt, flat_delta);
2397}
2398
2399
2400// Collects code size statistics:
2401// - by code kind
2402// - by code comment
2403void PagedSpace::CollectCodeStatistics() {
2404 HeapObjectIterator obj_it(this);
Leon Clarked91b9f72010-01-27 17:25:45 +00002405 for (HeapObject* obj = obj_it.next(); obj != NULL; obj = obj_it.next()) {
Steve Blocka7e24c12009-10-30 11:49:00 +00002406 if (obj->IsCode()) {
2407 Code* code = Code::cast(obj);
2408 code_kind_statistics[code->kind()] += code->Size();
2409 RelocIterator it(code);
2410 int delta = 0;
2411 const byte* prev_pc = code->instruction_start();
2412 while (!it.done()) {
2413 if (it.rinfo()->rmode() == RelocInfo::COMMENT) {
Steve Blockd0582a62009-12-15 09:54:21 +00002414 delta += static_cast<int>(it.rinfo()->pc() - prev_pc);
Steve Blocka7e24c12009-10-30 11:49:00 +00002415 CollectCommentStatistics(&it);
2416 prev_pc = it.rinfo()->pc();
2417 }
2418 it.next();
2419 }
2420
2421 ASSERT(code->instruction_start() <= prev_pc &&
Leon Clarkeac952652010-07-15 11:15:24 +01002422 prev_pc <= code->instruction_end());
2423 delta += static_cast<int>(code->instruction_end() - prev_pc);
Steve Blocka7e24c12009-10-30 11:49:00 +00002424 EnterComment("NoComment", delta);
2425 }
2426 }
2427}
2428
2429
2430void OldSpace::ReportStatistics() {
Ben Murdochf87a2032010-10-22 12:50:53 +01002431 int pct = static_cast<int>(Available() * 100 / Capacity());
2432 PrintF(" capacity: %" V8_PTR_PREFIX "d"
2433 ", waste: %" V8_PTR_PREFIX "d"
2434 ", available: %" V8_PTR_PREFIX "d, %%%d\n",
Steve Blocka7e24c12009-10-30 11:49:00 +00002435 Capacity(), Waste(), Available(), pct);
2436
Steve Blocka7e24c12009-10-30 11:49:00 +00002437 ClearHistograms();
2438 HeapObjectIterator obj_it(this);
Leon Clarked91b9f72010-01-27 17:25:45 +00002439 for (HeapObject* obj = obj_it.next(); obj != NULL; obj = obj_it.next())
2440 CollectHistogramInfo(obj);
Steve Blocka7e24c12009-10-30 11:49:00 +00002441 ReportHistogram(true);
2442}
Steve Blocka7e24c12009-10-30 11:49:00 +00002443#endif
2444
2445// -----------------------------------------------------------------------------
2446// FixedSpace implementation
2447
2448void FixedSpace::PrepareForMarkCompact(bool will_compact) {
Steve Block6ded16b2010-05-10 14:33:55 +01002449 // Call prepare of the super class.
2450 PagedSpace::PrepareForMarkCompact(will_compact);
2451
Steve Blocka7e24c12009-10-30 11:49:00 +00002452 if (will_compact) {
2453 // Reset relocation info.
2454 MCResetRelocationInfo();
2455
2456 // During a compacting collection, everything in the space is considered
2457 // 'available' (set by the call to MCResetRelocationInfo) and we will
2458 // rediscover live and wasted bytes during the collection.
2459 ASSERT(Available() == Capacity());
2460 } else {
2461 // During a non-compacting collection, everything below the linear
2462 // allocation pointer except wasted top-of-page blocks is considered
2463 // allocated and we will rediscover available bytes during the
2464 // collection.
2465 accounting_stats_.AllocateBytes(free_list_.available());
2466 }
2467
2468 // Clear the free list before a full GC---it will be rebuilt afterward.
2469 free_list_.Reset();
2470}
2471
2472
2473void FixedSpace::MCCommitRelocationInfo() {
2474 // Update fast allocation info.
2475 allocation_info_.top = mc_forwarding_info_.top;
2476 allocation_info_.limit = mc_forwarding_info_.limit;
2477 ASSERT(allocation_info_.VerifyPagedAllocation());
2478
2479 // The space is compacted and we haven't yet wasted any space.
2480 ASSERT(Waste() == 0);
2481
2482 // Update allocation_top of each page in use and compute waste.
2483 int computed_size = 0;
2484 PageIterator it(this, PageIterator::PAGES_USED_BY_MC);
2485 while (it.has_next()) {
2486 Page* page = it.next();
2487 Address page_top = page->AllocationTop();
Steve Blockd0582a62009-12-15 09:54:21 +00002488 computed_size += static_cast<int>(page_top - page->ObjectAreaStart());
Steve Blocka7e24c12009-10-30 11:49:00 +00002489 if (it.has_next()) {
Steve Blockd0582a62009-12-15 09:54:21 +00002490 accounting_stats_.WasteBytes(
2491 static_cast<int>(page->ObjectAreaEnd() - page_top));
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002492 page->SetAllocationWatermark(page_top);
Steve Blocka7e24c12009-10-30 11:49:00 +00002493 }
2494 }
2495
2496 // Make sure the computed size - based on the used portion of the
2497 // pages in use - matches the size we adjust during allocation.
2498 ASSERT(computed_size == Size());
2499}
2500
2501
2502// Slow case for normal allocation. Try in order: (1) allocate in the next
2503// page in the space, (2) allocate off the space's free list, (3) expand the
2504// space, (4) fail.
2505HeapObject* FixedSpace::SlowAllocateRaw(int size_in_bytes) {
2506 ASSERT_EQ(object_size_in_bytes_, size_in_bytes);
2507 // Linear allocation in this space has failed. If there is another page
2508 // in the space, move to that page and allocate there. This allocation
2509 // should succeed.
2510 Page* current_page = TopPageOf(allocation_info_);
2511 if (current_page->next_page()->is_valid()) {
2512 return AllocateInNextPage(current_page, size_in_bytes);
2513 }
2514
Steve Blockd0582a62009-12-15 09:54:21 +00002515 // There is no next page in this space. Try free list allocation unless
2516 // that is currently forbidden. The fixed space free list implicitly assumes
2517 // that all free blocks are of the fixed size.
2518 if (!Heap::linear_allocation()) {
John Reck59135872010-11-02 12:39:01 -07002519 Object* result;
2520 MaybeObject* maybe = free_list_.Allocate();
2521 if (maybe->ToObject(&result)) {
Steve Blocka7e24c12009-10-30 11:49:00 +00002522 accounting_stats_.AllocateBytes(size_in_bytes);
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002523 HeapObject* obj = HeapObject::cast(result);
2524 Page* p = Page::FromAddress(obj->address());
2525
2526 if (obj->address() >= p->AllocationWatermark()) {
2527 // There should be no hole between the allocation watermark
2528 // and allocated object address.
2529 // Memory above the allocation watermark was not swept and
2530 // might contain garbage pointers to new space.
2531 ASSERT(obj->address() == p->AllocationWatermark());
2532 p->SetAllocationWatermark(obj->address() + size_in_bytes);
2533 }
2534
2535 return obj;
Steve Blocka7e24c12009-10-30 11:49:00 +00002536 }
2537 }
2538
2539 // Free list allocation failed and there is no next page. Fail if we have
2540 // hit the old generation size limit that should cause a garbage
2541 // collection.
2542 if (!Heap::always_allocate() && Heap::OldGenerationAllocationLimitReached()) {
2543 return NULL;
2544 }
2545
2546 // Try to expand the space and allocate in the new next page.
2547 ASSERT(!current_page->next_page()->is_valid());
2548 if (Expand(current_page)) {
2549 return AllocateInNextPage(current_page, size_in_bytes);
2550 }
2551
2552 // Finally, fail.
2553 return NULL;
2554}
2555
2556
2557// Move to the next page (there is assumed to be one) and allocate there.
2558// The top of page block is always wasted, because it is too small to hold a
2559// map.
2560HeapObject* FixedSpace::AllocateInNextPage(Page* current_page,
2561 int size_in_bytes) {
2562 ASSERT(current_page->next_page()->is_valid());
Steve Block6ded16b2010-05-10 14:33:55 +01002563 ASSERT(allocation_info_.top == PageAllocationLimit(current_page));
Steve Blocka7e24c12009-10-30 11:49:00 +00002564 ASSERT_EQ(object_size_in_bytes_, size_in_bytes);
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002565 Page* next_page = current_page->next_page();
2566 next_page->ClearGCFields();
2567 current_page->SetAllocationWatermark(allocation_info_.top);
Steve Blocka7e24c12009-10-30 11:49:00 +00002568 accounting_stats_.WasteBytes(page_extra_);
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002569 SetAllocationInfo(&allocation_info_, next_page);
Steve Blocka7e24c12009-10-30 11:49:00 +00002570 return AllocateLinearly(&allocation_info_, size_in_bytes);
2571}
2572
2573
Kristian Monsen80d68ea2010-09-08 11:05:35 +01002574void FixedSpace::DeallocateBlock(Address start,
2575 int size_in_bytes,
2576 bool add_to_freelist) {
2577 // Free-list elements in fixed space are assumed to have a fixed size.
2578 // We break the free block into chunks and add them to the free list
2579 // individually.
2580 int size = object_size_in_bytes();
2581 ASSERT(size_in_bytes % size == 0);
2582 Address end = start + size_in_bytes;
2583 for (Address a = start; a < end; a += size) {
2584 Free(a, add_to_freelist);
2585 }
2586}
2587
2588
Steve Blocka7e24c12009-10-30 11:49:00 +00002589#ifdef DEBUG
2590void FixedSpace::ReportStatistics() {
Ben Murdochf87a2032010-10-22 12:50:53 +01002591 int pct = static_cast<int>(Available() * 100 / Capacity());
2592 PrintF(" capacity: %" V8_PTR_PREFIX "d"
2593 ", waste: %" V8_PTR_PREFIX "d"
2594 ", available: %" V8_PTR_PREFIX "d, %%%d\n",
Steve Blocka7e24c12009-10-30 11:49:00 +00002595 Capacity(), Waste(), Available(), pct);
2596
Steve Blocka7e24c12009-10-30 11:49:00 +00002597 ClearHistograms();
2598 HeapObjectIterator obj_it(this);
Leon Clarked91b9f72010-01-27 17:25:45 +00002599 for (HeapObject* obj = obj_it.next(); obj != NULL; obj = obj_it.next())
2600 CollectHistogramInfo(obj);
Steve Blocka7e24c12009-10-30 11:49:00 +00002601 ReportHistogram(false);
2602}
Steve Blocka7e24c12009-10-30 11:49:00 +00002603#endif
2604
2605
2606// -----------------------------------------------------------------------------
2607// MapSpace implementation
2608
2609void MapSpace::PrepareForMarkCompact(bool will_compact) {
2610 // Call prepare of the super class.
2611 FixedSpace::PrepareForMarkCompact(will_compact);
2612
2613 if (will_compact) {
2614 // Initialize map index entry.
2615 int page_count = 0;
2616 PageIterator it(this, PageIterator::ALL_PAGES);
2617 while (it.has_next()) {
2618 ASSERT_MAP_PAGE_INDEX(page_count);
2619
2620 Page* p = it.next();
2621 ASSERT(p->mc_page_index == page_count);
2622
2623 page_addresses_[page_count++] = p->address();
2624 }
2625 }
2626}
2627
2628
2629#ifdef DEBUG
2630void MapSpace::VerifyObject(HeapObject* object) {
2631 // The object should be a map or a free-list node.
2632 ASSERT(object->IsMap() || object->IsByteArray());
2633}
2634#endif
2635
2636
2637// -----------------------------------------------------------------------------
2638// GlobalPropertyCellSpace implementation
2639
2640#ifdef DEBUG
2641void CellSpace::VerifyObject(HeapObject* object) {
2642 // The object should be a global object property cell or a free-list node.
2643 ASSERT(object->IsJSGlobalPropertyCell() ||
2644 object->map() == Heap::two_pointer_filler_map());
2645}
2646#endif
2647
2648
2649// -----------------------------------------------------------------------------
2650// LargeObjectIterator
2651
2652LargeObjectIterator::LargeObjectIterator(LargeObjectSpace* space) {
2653 current_ = space->first_chunk_;
2654 size_func_ = NULL;
2655}
2656
2657
2658LargeObjectIterator::LargeObjectIterator(LargeObjectSpace* space,
2659 HeapObjectCallback size_func) {
2660 current_ = space->first_chunk_;
2661 size_func_ = size_func;
2662}
2663
2664
2665HeapObject* LargeObjectIterator::next() {
Leon Clarked91b9f72010-01-27 17:25:45 +00002666 if (current_ == NULL) return NULL;
2667
Steve Blocka7e24c12009-10-30 11:49:00 +00002668 HeapObject* object = current_->GetObject();
2669 current_ = current_->next();
2670 return object;
2671}
2672
2673
2674// -----------------------------------------------------------------------------
2675// LargeObjectChunk
2676
2677LargeObjectChunk* LargeObjectChunk::New(int size_in_bytes,
2678 size_t* chunk_size,
2679 Executability executable) {
2680 size_t requested = ChunkSizeFor(size_in_bytes);
2681 void* mem = MemoryAllocator::AllocateRawMemory(requested,
2682 chunk_size,
2683 executable);
2684 if (mem == NULL) return NULL;
2685 LOG(NewEvent("LargeObjectChunk", mem, *chunk_size));
2686 if (*chunk_size < requested) {
Steve Block791712a2010-08-27 10:21:07 +01002687 MemoryAllocator::FreeRawMemory(mem, *chunk_size, executable);
Steve Blocka7e24c12009-10-30 11:49:00 +00002688 LOG(DeleteEvent("LargeObjectChunk", mem));
2689 return NULL;
2690 }
Iain Merrick9ac36c92010-09-13 15:29:50 +01002691 ObjectSpace space =
2692 (executable == EXECUTABLE) ? kObjectSpaceCodeSpace : kObjectSpaceLoSpace;
2693 MemoryAllocator::PerformAllocationCallback(space,
2694 kAllocationActionAllocate,
2695 *chunk_size);
Steve Blocka7e24c12009-10-30 11:49:00 +00002696 return reinterpret_cast<LargeObjectChunk*>(mem);
2697}
2698
2699
2700int LargeObjectChunk::ChunkSizeFor(int size_in_bytes) {
Steve Blockd0582a62009-12-15 09:54:21 +00002701 int os_alignment = static_cast<int>(OS::AllocateAlignment());
Steve Blocka7e24c12009-10-30 11:49:00 +00002702 if (os_alignment < Page::kPageSize)
2703 size_in_bytes += (Page::kPageSize - os_alignment);
2704 return size_in_bytes + Page::kObjectStartOffset;
2705}
2706
2707// -----------------------------------------------------------------------------
2708// LargeObjectSpace
2709
2710LargeObjectSpace::LargeObjectSpace(AllocationSpace id)
2711 : Space(id, NOT_EXECUTABLE), // Managed on a per-allocation basis
2712 first_chunk_(NULL),
2713 size_(0),
2714 page_count_(0) {}
2715
2716
2717bool LargeObjectSpace::Setup() {
2718 first_chunk_ = NULL;
2719 size_ = 0;
2720 page_count_ = 0;
2721 return true;
2722}
2723
2724
2725void LargeObjectSpace::TearDown() {
2726 while (first_chunk_ != NULL) {
2727 LargeObjectChunk* chunk = first_chunk_;
2728 first_chunk_ = first_chunk_->next();
2729 LOG(DeleteEvent("LargeObjectChunk", chunk->address()));
Steve Block791712a2010-08-27 10:21:07 +01002730 Page* page = Page::FromAddress(RoundUp(chunk->address(), Page::kPageSize));
2731 Executability executable =
2732 page->IsPageExecutable() ? EXECUTABLE : NOT_EXECUTABLE;
Iain Merrick9ac36c92010-09-13 15:29:50 +01002733 ObjectSpace space = kObjectSpaceLoSpace;
2734 if (executable == EXECUTABLE) space = kObjectSpaceCodeSpace;
2735 size_t size = chunk->size();
2736 MemoryAllocator::FreeRawMemory(chunk->address(), size, executable);
2737 MemoryAllocator::PerformAllocationCallback(
2738 space, kAllocationActionFree, size);
Steve Blocka7e24c12009-10-30 11:49:00 +00002739 }
2740
2741 size_ = 0;
2742 page_count_ = 0;
2743}
2744
2745
2746#ifdef ENABLE_HEAP_PROTECTION
2747
2748void LargeObjectSpace::Protect() {
2749 LargeObjectChunk* chunk = first_chunk_;
2750 while (chunk != NULL) {
2751 MemoryAllocator::Protect(chunk->address(), chunk->size());
2752 chunk = chunk->next();
2753 }
2754}
2755
2756
2757void LargeObjectSpace::Unprotect() {
2758 LargeObjectChunk* chunk = first_chunk_;
2759 while (chunk != NULL) {
2760 bool is_code = chunk->GetObject()->IsCode();
2761 MemoryAllocator::Unprotect(chunk->address(), chunk->size(),
2762 is_code ? EXECUTABLE : NOT_EXECUTABLE);
2763 chunk = chunk->next();
2764 }
2765}
2766
2767#endif
2768
2769
John Reck59135872010-11-02 12:39:01 -07002770MaybeObject* LargeObjectSpace::AllocateRawInternal(int requested_size,
2771 int object_size,
2772 Executability executable) {
Steve Blocka7e24c12009-10-30 11:49:00 +00002773 ASSERT(0 < object_size && object_size <= requested_size);
2774
2775 // Check if we want to force a GC before growing the old space further.
2776 // If so, fail the allocation.
2777 if (!Heap::always_allocate() && Heap::OldGenerationAllocationLimitReached()) {
Ben Murdochf87a2032010-10-22 12:50:53 +01002778 return Failure::RetryAfterGC(identity());
Steve Blocka7e24c12009-10-30 11:49:00 +00002779 }
2780
2781 size_t chunk_size;
2782 LargeObjectChunk* chunk =
2783 LargeObjectChunk::New(requested_size, &chunk_size, executable);
2784 if (chunk == NULL) {
Ben Murdochf87a2032010-10-22 12:50:53 +01002785 return Failure::RetryAfterGC(identity());
Steve Blocka7e24c12009-10-30 11:49:00 +00002786 }
2787
Steve Blockd0582a62009-12-15 09:54:21 +00002788 size_ += static_cast<int>(chunk_size);
Steve Blocka7e24c12009-10-30 11:49:00 +00002789 page_count_++;
2790 chunk->set_next(first_chunk_);
2791 chunk->set_size(chunk_size);
2792 first_chunk_ = chunk;
2793
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002794 // Initialize page header.
Steve Blocka7e24c12009-10-30 11:49:00 +00002795 Page* page = Page::FromAddress(RoundUp(chunk->address(), Page::kPageSize));
2796 Address object_address = page->ObjectAreaStart();
2797 // Clear the low order bit of the second word in the page to flag it as a
2798 // large object page. If the chunk_size happened to be written there, its
2799 // low order bit should already be clear.
2800 ASSERT((chunk_size & 0x1) == 0);
Steve Block6ded16b2010-05-10 14:33:55 +01002801 page->SetIsLargeObjectPage(true);
Steve Block791712a2010-08-27 10:21:07 +01002802 page->SetIsPageExecutable(executable);
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002803 page->SetRegionMarks(Page::kAllRegionsCleanMarks);
Steve Blocka7e24c12009-10-30 11:49:00 +00002804 return HeapObject::FromAddress(object_address);
2805}
2806
2807
John Reck59135872010-11-02 12:39:01 -07002808MaybeObject* LargeObjectSpace::AllocateRawCode(int size_in_bytes) {
Steve Blocka7e24c12009-10-30 11:49:00 +00002809 ASSERT(0 < size_in_bytes);
2810 return AllocateRawInternal(size_in_bytes,
2811 size_in_bytes,
2812 EXECUTABLE);
2813}
2814
2815
John Reck59135872010-11-02 12:39:01 -07002816MaybeObject* LargeObjectSpace::AllocateRawFixedArray(int size_in_bytes) {
Steve Blocka7e24c12009-10-30 11:49:00 +00002817 ASSERT(0 < size_in_bytes);
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002818 return AllocateRawInternal(size_in_bytes,
Steve Blocka7e24c12009-10-30 11:49:00 +00002819 size_in_bytes,
2820 NOT_EXECUTABLE);
2821}
2822
2823
John Reck59135872010-11-02 12:39:01 -07002824MaybeObject* LargeObjectSpace::AllocateRaw(int size_in_bytes) {
Steve Blocka7e24c12009-10-30 11:49:00 +00002825 ASSERT(0 < size_in_bytes);
2826 return AllocateRawInternal(size_in_bytes,
2827 size_in_bytes,
2828 NOT_EXECUTABLE);
2829}
2830
2831
2832// GC support
John Reck59135872010-11-02 12:39:01 -07002833MaybeObject* LargeObjectSpace::FindObject(Address a) {
Steve Blocka7e24c12009-10-30 11:49:00 +00002834 for (LargeObjectChunk* chunk = first_chunk_;
2835 chunk != NULL;
2836 chunk = chunk->next()) {
2837 Address chunk_address = chunk->address();
2838 if (chunk_address <= a && a < chunk_address + chunk->size()) {
2839 return chunk->GetObject();
2840 }
2841 }
2842 return Failure::Exception();
2843}
2844
Kristian Monsen80d68ea2010-09-08 11:05:35 +01002845
2846LargeObjectChunk* LargeObjectSpace::FindChunkContainingPc(Address pc) {
2847 // TODO(853): Change this implementation to only find executable
2848 // chunks and use some kind of hash-based approach to speed it up.
2849 for (LargeObjectChunk* chunk = first_chunk_;
2850 chunk != NULL;
2851 chunk = chunk->next()) {
2852 Address chunk_address = chunk->address();
2853 if (chunk_address <= pc && pc < chunk_address + chunk->size()) {
2854 return chunk;
2855 }
2856 }
2857 return NULL;
2858}
2859
2860
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002861void LargeObjectSpace::IterateDirtyRegions(ObjectSlotCallback copy_object) {
Steve Blocka7e24c12009-10-30 11:49:00 +00002862 LargeObjectIterator it(this);
Leon Clarked91b9f72010-01-27 17:25:45 +00002863 for (HeapObject* object = it.next(); object != NULL; object = it.next()) {
Steve Blocka7e24c12009-10-30 11:49:00 +00002864 // We only have code, sequential strings, or fixed arrays in large
2865 // object space, and only fixed arrays can possibly contain pointers to
2866 // the young generation.
Steve Blocka7e24c12009-10-30 11:49:00 +00002867 if (object->IsFixedArray()) {
Steve Blocka7e24c12009-10-30 11:49:00 +00002868 Page* page = Page::FromAddress(object->address());
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002869 uint32_t marks = page->GetRegionMarks();
2870 uint32_t newmarks = Page::kAllRegionsCleanMarks;
Steve Blocka7e24c12009-10-30 11:49:00 +00002871
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002872 if (marks != Page::kAllRegionsCleanMarks) {
2873 // For a large page a single dirty mark corresponds to several
2874 // regions (modulo 32). So we treat a large page as a sequence of
2875 // normal pages of size Page::kPageSize having same dirty marks
2876 // and subsequently iterate dirty regions on each of these pages.
2877 Address start = object->address();
2878 Address end = page->ObjectAreaEnd();
2879 Address object_end = start + object->Size();
2880
2881 // Iterate regions of the first normal page covering object.
2882 uint32_t first_region_number = page->GetRegionNumberForAddress(start);
2883 newmarks |=
2884 Heap::IterateDirtyRegions(marks >> first_region_number,
2885 start,
2886 end,
2887 &Heap::IteratePointersInDirtyRegion,
2888 copy_object) << first_region_number;
2889
2890 start = end;
2891 end = start + Page::kPageSize;
2892 while (end <= object_end) {
2893 // Iterate next 32 regions.
2894 newmarks |=
2895 Heap::IterateDirtyRegions(marks,
2896 start,
2897 end,
2898 &Heap::IteratePointersInDirtyRegion,
2899 copy_object);
2900 start = end;
2901 end = start + Page::kPageSize;
2902 }
2903
2904 if (start != object_end) {
2905 // Iterate the last piece of an object which is less than
2906 // Page::kPageSize.
2907 newmarks |=
2908 Heap::IterateDirtyRegions(marks,
2909 start,
2910 object_end,
2911 &Heap::IteratePointersInDirtyRegion,
2912 copy_object);
2913 }
2914
2915 page->SetRegionMarks(newmarks);
Steve Blocka7e24c12009-10-30 11:49:00 +00002916 }
2917 }
2918 }
2919}
2920
2921
2922void LargeObjectSpace::FreeUnmarkedObjects() {
2923 LargeObjectChunk* previous = NULL;
2924 LargeObjectChunk* current = first_chunk_;
2925 while (current != NULL) {
2926 HeapObject* object = current->GetObject();
2927 if (object->IsMarked()) {
2928 object->ClearMark();
2929 MarkCompactCollector::tracer()->decrement_marked_count();
2930 previous = current;
2931 current = current->next();
2932 } else {
Steve Block791712a2010-08-27 10:21:07 +01002933 Page* page = Page::FromAddress(RoundUp(current->address(),
2934 Page::kPageSize));
2935 Executability executable =
2936 page->IsPageExecutable() ? EXECUTABLE : NOT_EXECUTABLE;
Steve Blocka7e24c12009-10-30 11:49:00 +00002937 Address chunk_address = current->address();
2938 size_t chunk_size = current->size();
2939
2940 // Cut the chunk out from the chunk list.
2941 current = current->next();
2942 if (previous == NULL) {
2943 first_chunk_ = current;
2944 } else {
2945 previous->set_next(current);
2946 }
2947
2948 // Free the chunk.
Leon Clarked91b9f72010-01-27 17:25:45 +00002949 MarkCompactCollector::ReportDeleteIfNeeded(object);
Steve Blockd0582a62009-12-15 09:54:21 +00002950 size_ -= static_cast<int>(chunk_size);
Steve Blocka7e24c12009-10-30 11:49:00 +00002951 page_count_--;
Iain Merrick9ac36c92010-09-13 15:29:50 +01002952 ObjectSpace space = kObjectSpaceLoSpace;
2953 if (executable == EXECUTABLE) space = kObjectSpaceCodeSpace;
Steve Block791712a2010-08-27 10:21:07 +01002954 MemoryAllocator::FreeRawMemory(chunk_address, chunk_size, executable);
Iain Merrick9ac36c92010-09-13 15:29:50 +01002955 MemoryAllocator::PerformAllocationCallback(space, kAllocationActionFree,
2956 size_);
Steve Blocka7e24c12009-10-30 11:49:00 +00002957 LOG(DeleteEvent("LargeObjectChunk", chunk_address));
2958 }
2959 }
2960}
2961
2962
2963bool LargeObjectSpace::Contains(HeapObject* object) {
2964 Address address = object->address();
Steve Block6ded16b2010-05-10 14:33:55 +01002965 if (Heap::new_space()->Contains(address)) {
2966 return false;
2967 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002968 Page* page = Page::FromAddress(address);
2969
2970 SLOW_ASSERT(!page->IsLargeObjectPage()
2971 || !FindObject(address)->IsFailure());
2972
2973 return page->IsLargeObjectPage();
2974}
2975
2976
2977#ifdef DEBUG
2978// We do not assume that the large object iterator works, because it depends
2979// on the invariants we are checking during verification.
2980void LargeObjectSpace::Verify() {
2981 for (LargeObjectChunk* chunk = first_chunk_;
2982 chunk != NULL;
2983 chunk = chunk->next()) {
2984 // Each chunk contains an object that starts at the large object page's
2985 // object area start.
2986 HeapObject* object = chunk->GetObject();
2987 Page* page = Page::FromAddress(object->address());
2988 ASSERT(object->address() == page->ObjectAreaStart());
2989
2990 // The first word should be a map, and we expect all map pointers to be
2991 // in map space.
2992 Map* map = object->map();
2993 ASSERT(map->IsMap());
2994 ASSERT(Heap::map_space()->Contains(map));
2995
2996 // We have only code, sequential strings, external strings
2997 // (sequential strings that have been morphed into external
2998 // strings), fixed arrays, and byte arrays in large object space.
2999 ASSERT(object->IsCode() || object->IsSeqString() ||
3000 object->IsExternalString() || object->IsFixedArray() ||
3001 object->IsByteArray());
3002
3003 // The object itself should look OK.
3004 object->Verify();
3005
3006 // Byte arrays and strings don't have interior pointers.
3007 if (object->IsCode()) {
3008 VerifyPointersVisitor code_visitor;
3009 object->IterateBody(map->instance_type(),
3010 object->Size(),
3011 &code_visitor);
3012 } else if (object->IsFixedArray()) {
3013 // We loop over fixed arrays ourselves, rather then using the visitor,
3014 // because the visitor doesn't support the start/offset iteration
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01003015 // needed for IsRegionDirty.
Steve Blocka7e24c12009-10-30 11:49:00 +00003016 FixedArray* array = FixedArray::cast(object);
3017 for (int j = 0; j < array->length(); j++) {
3018 Object* element = array->get(j);
3019 if (element->IsHeapObject()) {
3020 HeapObject* element_object = HeapObject::cast(element);
3021 ASSERT(Heap::Contains(element_object));
3022 ASSERT(element_object->map()->IsMap());
3023 if (Heap::InNewSpace(element_object)) {
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01003024 Address array_addr = object->address();
3025 Address element_addr = array_addr + FixedArray::kHeaderSize +
3026 j * kPointerSize;
3027
3028 ASSERT(Page::FromAddress(array_addr)->IsRegionDirty(element_addr));
Steve Blocka7e24c12009-10-30 11:49:00 +00003029 }
3030 }
3031 }
3032 }
3033 }
3034}
3035
3036
3037void LargeObjectSpace::Print() {
3038 LargeObjectIterator it(this);
Leon Clarked91b9f72010-01-27 17:25:45 +00003039 for (HeapObject* obj = it.next(); obj != NULL; obj = it.next()) {
3040 obj->Print();
Steve Blocka7e24c12009-10-30 11:49:00 +00003041 }
3042}
3043
3044
3045void LargeObjectSpace::ReportStatistics() {
Ben Murdochf87a2032010-10-22 12:50:53 +01003046 PrintF(" size: %" V8_PTR_PREFIX "d\n", size_);
Steve Blocka7e24c12009-10-30 11:49:00 +00003047 int num_objects = 0;
3048 ClearHistograms();
3049 LargeObjectIterator it(this);
Leon Clarked91b9f72010-01-27 17:25:45 +00003050 for (HeapObject* obj = it.next(); obj != NULL; obj = it.next()) {
Steve Blocka7e24c12009-10-30 11:49:00 +00003051 num_objects++;
Leon Clarked91b9f72010-01-27 17:25:45 +00003052 CollectHistogramInfo(obj);
Steve Blocka7e24c12009-10-30 11:49:00 +00003053 }
3054
3055 PrintF(" number of objects %d\n", num_objects);
3056 if (num_objects > 0) ReportHistogram(false);
3057}
3058
3059
3060void LargeObjectSpace::CollectCodeStatistics() {
3061 LargeObjectIterator obj_it(this);
Leon Clarked91b9f72010-01-27 17:25:45 +00003062 for (HeapObject* obj = obj_it.next(); obj != NULL; obj = obj_it.next()) {
Steve Blocka7e24c12009-10-30 11:49:00 +00003063 if (obj->IsCode()) {
3064 Code* code = Code::cast(obj);
3065 code_kind_statistics[code->kind()] += code->Size();
3066 }
3067 }
3068}
Steve Blocka7e24c12009-10-30 11:49:00 +00003069#endif // DEBUG
3070
3071} } // namespace v8::internal