blob: e3fb923312911d6c2fdc495e3303dab5b1479e8a [file] [log] [blame]
Steve Blocka7e24c12009-10-30 11:49:00 +00001// Copyright 2006-2008 the V8 project authors. All rights reserved.
2// Redistribution and use in source and binary forms, with or without
3// modification, are permitted provided that the following conditions are
4// met:
5//
6// * Redistributions of source code must retain the above copyright
7// notice, this list of conditions and the following disclaimer.
8// * Redistributions in binary form must reproduce the above
9// copyright notice, this list of conditions and the following
10// disclaimer in the documentation and/or other materials provided
11// with the distribution.
12// * Neither the name of Google Inc. nor the names of its
13// contributors may be used to endorse or promote products derived
14// from this software without specific prior written permission.
15//
16// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
28#include "v8.h"
29
30#include "macro-assembler.h"
31#include "mark-compact.h"
32#include "platform.h"
33
34namespace v8 {
35namespace internal {
36
37// For contiguous spaces, top should be in the space (or at the end) and limit
38// should be the end of the space.
39#define ASSERT_SEMISPACE_ALLOCATION_INFO(info, space) \
40 ASSERT((space).low() <= (info).top \
41 && (info).top <= (space).high() \
42 && (info).limit == (space).high())
43
Steve Block791712a2010-08-27 10:21:07 +010044intptr_t Page::watermark_invalidated_mark_ = 1 << Page::WATERMARK_INVALIDATED;
Steve Blocka7e24c12009-10-30 11:49:00 +000045
46// ----------------------------------------------------------------------------
47// HeapObjectIterator
48
49HeapObjectIterator::HeapObjectIterator(PagedSpace* space) {
50 Initialize(space->bottom(), space->top(), NULL);
51}
52
53
54HeapObjectIterator::HeapObjectIterator(PagedSpace* space,
55 HeapObjectCallback size_func) {
56 Initialize(space->bottom(), space->top(), size_func);
57}
58
59
60HeapObjectIterator::HeapObjectIterator(PagedSpace* space, Address start) {
61 Initialize(start, space->top(), NULL);
62}
63
64
65HeapObjectIterator::HeapObjectIterator(PagedSpace* space, Address start,
66 HeapObjectCallback size_func) {
67 Initialize(start, space->top(), size_func);
68}
69
70
Kristian Monsen80d68ea2010-09-08 11:05:35 +010071HeapObjectIterator::HeapObjectIterator(Page* page,
72 HeapObjectCallback size_func) {
73 Initialize(page->ObjectAreaStart(), page->AllocationTop(), size_func);
74}
75
76
Steve Blocka7e24c12009-10-30 11:49:00 +000077void HeapObjectIterator::Initialize(Address cur, Address end,
78 HeapObjectCallback size_f) {
79 cur_addr_ = cur;
80 end_addr_ = end;
81 end_page_ = Page::FromAllocationTop(end);
82 size_func_ = size_f;
83 Page* p = Page::FromAllocationTop(cur_addr_);
84 cur_limit_ = (p == end_page_) ? end_addr_ : p->AllocationTop();
85
86#ifdef DEBUG
87 Verify();
88#endif
89}
90
91
Leon Clarked91b9f72010-01-27 17:25:45 +000092HeapObject* HeapObjectIterator::FromNextPage() {
93 if (cur_addr_ == end_addr_) return NULL;
Steve Blocka7e24c12009-10-30 11:49:00 +000094
95 Page* cur_page = Page::FromAllocationTop(cur_addr_);
96 cur_page = cur_page->next_page();
97 ASSERT(cur_page->is_valid());
98
99 cur_addr_ = cur_page->ObjectAreaStart();
100 cur_limit_ = (cur_page == end_page_) ? end_addr_ : cur_page->AllocationTop();
101
Leon Clarked91b9f72010-01-27 17:25:45 +0000102 if (cur_addr_ == end_addr_) return NULL;
Steve Blocka7e24c12009-10-30 11:49:00 +0000103 ASSERT(cur_addr_ < cur_limit_);
104#ifdef DEBUG
105 Verify();
106#endif
Leon Clarked91b9f72010-01-27 17:25:45 +0000107 return FromCurrentPage();
Steve Blocka7e24c12009-10-30 11:49:00 +0000108}
109
110
111#ifdef DEBUG
112void HeapObjectIterator::Verify() {
113 Page* p = Page::FromAllocationTop(cur_addr_);
114 ASSERT(p == Page::FromAllocationTop(cur_limit_));
115 ASSERT(p->Offset(cur_addr_) <= p->Offset(cur_limit_));
116}
117#endif
118
119
120// -----------------------------------------------------------------------------
121// PageIterator
122
123PageIterator::PageIterator(PagedSpace* space, Mode mode) : space_(space) {
124 prev_page_ = NULL;
125 switch (mode) {
126 case PAGES_IN_USE:
127 stop_page_ = space->AllocationTopPage();
128 break;
129 case PAGES_USED_BY_MC:
130 stop_page_ = space->MCRelocationTopPage();
131 break;
132 case ALL_PAGES:
133#ifdef DEBUG
134 // Verify that the cached last page in the space is actually the
135 // last page.
136 for (Page* p = space->first_page_; p->is_valid(); p = p->next_page()) {
137 if (!p->next_page()->is_valid()) {
138 ASSERT(space->last_page_ == p);
139 }
140 }
141#endif
142 stop_page_ = space->last_page_;
143 break;
144 }
145}
146
147
148// -----------------------------------------------------------------------------
Steve Blocka7e24c12009-10-30 11:49:00 +0000149// CodeRange
150
151List<CodeRange::FreeBlock> CodeRange::free_list_(0);
152List<CodeRange::FreeBlock> CodeRange::allocation_list_(0);
153int CodeRange::current_allocation_block_index_ = 0;
154VirtualMemory* CodeRange::code_range_ = NULL;
155
156
157bool CodeRange::Setup(const size_t requested) {
158 ASSERT(code_range_ == NULL);
159
160 code_range_ = new VirtualMemory(requested);
161 CHECK(code_range_ != NULL);
162 if (!code_range_->IsReserved()) {
163 delete code_range_;
164 code_range_ = NULL;
165 return false;
166 }
167
168 // We are sure that we have mapped a block of requested addresses.
169 ASSERT(code_range_->size() == requested);
170 LOG(NewEvent("CodeRange", code_range_->address(), requested));
171 allocation_list_.Add(FreeBlock(code_range_->address(), code_range_->size()));
172 current_allocation_block_index_ = 0;
173 return true;
174}
175
176
177int CodeRange::CompareFreeBlockAddress(const FreeBlock* left,
178 const FreeBlock* right) {
179 // The entire point of CodeRange is that the difference between two
180 // addresses in the range can be represented as a signed 32-bit int,
181 // so the cast is semantically correct.
182 return static_cast<int>(left->start - right->start);
183}
184
185
186void CodeRange::GetNextAllocationBlock(size_t requested) {
187 for (current_allocation_block_index_++;
188 current_allocation_block_index_ < allocation_list_.length();
189 current_allocation_block_index_++) {
190 if (requested <= allocation_list_[current_allocation_block_index_].size) {
191 return; // Found a large enough allocation block.
192 }
193 }
194
195 // Sort and merge the free blocks on the free list and the allocation list.
196 free_list_.AddAll(allocation_list_);
197 allocation_list_.Clear();
198 free_list_.Sort(&CompareFreeBlockAddress);
199 for (int i = 0; i < free_list_.length();) {
200 FreeBlock merged = free_list_[i];
201 i++;
202 // Add adjacent free blocks to the current merged block.
203 while (i < free_list_.length() &&
204 free_list_[i].start == merged.start + merged.size) {
205 merged.size += free_list_[i].size;
206 i++;
207 }
208 if (merged.size > 0) {
209 allocation_list_.Add(merged);
210 }
211 }
212 free_list_.Clear();
213
214 for (current_allocation_block_index_ = 0;
215 current_allocation_block_index_ < allocation_list_.length();
216 current_allocation_block_index_++) {
217 if (requested <= allocation_list_[current_allocation_block_index_].size) {
218 return; // Found a large enough allocation block.
219 }
220 }
221
222 // Code range is full or too fragmented.
223 V8::FatalProcessOutOfMemory("CodeRange::GetNextAllocationBlock");
224}
225
226
227
228void* CodeRange::AllocateRawMemory(const size_t requested, size_t* allocated) {
229 ASSERT(current_allocation_block_index_ < allocation_list_.length());
230 if (requested > allocation_list_[current_allocation_block_index_].size) {
231 // Find an allocation block large enough. This function call may
232 // call V8::FatalProcessOutOfMemory if it cannot find a large enough block.
233 GetNextAllocationBlock(requested);
234 }
235 // Commit the requested memory at the start of the current allocation block.
236 *allocated = RoundUp(requested, Page::kPageSize);
237 FreeBlock current = allocation_list_[current_allocation_block_index_];
238 if (*allocated >= current.size - Page::kPageSize) {
239 // Don't leave a small free block, useless for a large object or chunk.
240 *allocated = current.size;
241 }
242 ASSERT(*allocated <= current.size);
243 if (!code_range_->Commit(current.start, *allocated, true)) {
244 *allocated = 0;
245 return NULL;
246 }
247 allocation_list_[current_allocation_block_index_].start += *allocated;
248 allocation_list_[current_allocation_block_index_].size -= *allocated;
249 if (*allocated == current.size) {
250 GetNextAllocationBlock(0); // This block is used up, get the next one.
251 }
252 return current.start;
253}
254
255
256void CodeRange::FreeRawMemory(void* address, size_t length) {
257 free_list_.Add(FreeBlock(address, length));
258 code_range_->Uncommit(address, length);
259}
260
261
262void CodeRange::TearDown() {
263 delete code_range_; // Frees all memory in the virtual memory range.
264 code_range_ = NULL;
265 free_list_.Free();
266 allocation_list_.Free();
267}
268
269
270// -----------------------------------------------------------------------------
271// MemoryAllocator
272//
Ben Murdochf87a2032010-10-22 12:50:53 +0100273intptr_t MemoryAllocator::capacity_ = 0;
274intptr_t MemoryAllocator::size_ = 0;
275intptr_t MemoryAllocator::size_executable_ = 0;
Steve Blocka7e24c12009-10-30 11:49:00 +0000276
Iain Merrick9ac36c92010-09-13 15:29:50 +0100277List<MemoryAllocator::MemoryAllocationCallbackRegistration>
278 MemoryAllocator::memory_allocation_callbacks_;
279
Steve Blocka7e24c12009-10-30 11:49:00 +0000280VirtualMemory* MemoryAllocator::initial_chunk_ = NULL;
281
282// 270 is an estimate based on the static default heap size of a pair of 256K
283// semispaces and a 64M old generation.
284const int kEstimatedNumberOfChunks = 270;
285List<MemoryAllocator::ChunkInfo> MemoryAllocator::chunks_(
286 kEstimatedNumberOfChunks);
287List<int> MemoryAllocator::free_chunk_ids_(kEstimatedNumberOfChunks);
288int MemoryAllocator::max_nof_chunks_ = 0;
289int MemoryAllocator::top_ = 0;
290
291
292void MemoryAllocator::Push(int free_chunk_id) {
293 ASSERT(max_nof_chunks_ > 0);
294 ASSERT(top_ < max_nof_chunks_);
295 free_chunk_ids_[top_++] = free_chunk_id;
296}
297
298
299int MemoryAllocator::Pop() {
300 ASSERT(top_ > 0);
301 return free_chunk_ids_[--top_];
302}
303
304
Ben Murdochf87a2032010-10-22 12:50:53 +0100305bool MemoryAllocator::Setup(intptr_t capacity) {
Steve Blocka7e24c12009-10-30 11:49:00 +0000306 capacity_ = RoundUp(capacity, Page::kPageSize);
307
308 // Over-estimate the size of chunks_ array. It assumes the expansion of old
309 // space is always in the unit of a chunk (kChunkSize) except the last
310 // expansion.
311 //
312 // Due to alignment, allocated space might be one page less than required
313 // number (kPagesPerChunk) of pages for old spaces.
314 //
315 // Reserve two chunk ids for semispaces, one for map space, one for old
316 // space, and one for code space.
Ben Murdochf87a2032010-10-22 12:50:53 +0100317 max_nof_chunks_ =
318 static_cast<int>((capacity_ / (kChunkSize - Page::kPageSize))) + 5;
Steve Blocka7e24c12009-10-30 11:49:00 +0000319 if (max_nof_chunks_ > kMaxNofChunks) return false;
320
321 size_ = 0;
Steve Block791712a2010-08-27 10:21:07 +0100322 size_executable_ = 0;
Steve Blocka7e24c12009-10-30 11:49:00 +0000323 ChunkInfo info; // uninitialized element.
324 for (int i = max_nof_chunks_ - 1; i >= 0; i--) {
325 chunks_.Add(info);
326 free_chunk_ids_.Add(i);
327 }
328 top_ = max_nof_chunks_;
329 return true;
330}
331
332
333void MemoryAllocator::TearDown() {
334 for (int i = 0; i < max_nof_chunks_; i++) {
335 if (chunks_[i].address() != NULL) DeleteChunk(i);
336 }
337 chunks_.Clear();
338 free_chunk_ids_.Clear();
339
340 if (initial_chunk_ != NULL) {
341 LOG(DeleteEvent("InitialChunk", initial_chunk_->address()));
342 delete initial_chunk_;
343 initial_chunk_ = NULL;
344 }
345
346 ASSERT(top_ == max_nof_chunks_); // all chunks are free
347 top_ = 0;
348 capacity_ = 0;
349 size_ = 0;
350 max_nof_chunks_ = 0;
351}
352
353
354void* MemoryAllocator::AllocateRawMemory(const size_t requested,
355 size_t* allocated,
356 Executability executable) {
Kristian Monsen50ef84f2010-07-29 15:18:00 +0100357 if (size_ + static_cast<size_t>(requested) > static_cast<size_t>(capacity_)) {
358 return NULL;
359 }
Steve Blocka7e24c12009-10-30 11:49:00 +0000360 void* mem;
361 if (executable == EXECUTABLE && CodeRange::exists()) {
362 mem = CodeRange::AllocateRawMemory(requested, allocated);
363 } else {
364 mem = OS::Allocate(requested, allocated, (executable == EXECUTABLE));
365 }
Steve Blockd0582a62009-12-15 09:54:21 +0000366 int alloced = static_cast<int>(*allocated);
Steve Blocka7e24c12009-10-30 11:49:00 +0000367 size_ += alloced;
Steve Block791712a2010-08-27 10:21:07 +0100368
Iain Merrick9ac36c92010-09-13 15:29:50 +0100369 if (executable == EXECUTABLE) size_executable_ += alloced;
Leon Clarke4515c472010-02-03 11:58:03 +0000370#ifdef DEBUG
371 ZapBlock(reinterpret_cast<Address>(mem), alloced);
372#endif
Steve Blocka7e24c12009-10-30 11:49:00 +0000373 Counters::memory_allocated.Increment(alloced);
374 return mem;
375}
376
377
Steve Block791712a2010-08-27 10:21:07 +0100378void MemoryAllocator::FreeRawMemory(void* mem,
379 size_t length,
380 Executability executable) {
Leon Clarke4515c472010-02-03 11:58:03 +0000381#ifdef DEBUG
382 ZapBlock(reinterpret_cast<Address>(mem), length);
383#endif
Steve Blocka7e24c12009-10-30 11:49:00 +0000384 if (CodeRange::contains(static_cast<Address>(mem))) {
385 CodeRange::FreeRawMemory(mem, length);
386 } else {
387 OS::Free(mem, length);
388 }
Steve Blockd0582a62009-12-15 09:54:21 +0000389 Counters::memory_allocated.Decrement(static_cast<int>(length));
390 size_ -= static_cast<int>(length);
Steve Block791712a2010-08-27 10:21:07 +0100391 if (executable == EXECUTABLE) size_executable_ -= static_cast<int>(length);
Iain Merrick9ac36c92010-09-13 15:29:50 +0100392
Steve Blocka7e24c12009-10-30 11:49:00 +0000393 ASSERT(size_ >= 0);
394}
395
396
Iain Merrick9ac36c92010-09-13 15:29:50 +0100397void MemoryAllocator::PerformAllocationCallback(ObjectSpace space,
398 AllocationAction action,
399 size_t size) {
400 for (int i = 0; i < memory_allocation_callbacks_.length(); ++i) {
401 MemoryAllocationCallbackRegistration registration =
402 memory_allocation_callbacks_[i];
403 if ((registration.space & space) == space &&
404 (registration.action & action) == action)
405 registration.callback(space, action, static_cast<int>(size));
406 }
407}
408
409
410bool MemoryAllocator::MemoryAllocationCallbackRegistered(
411 MemoryAllocationCallback callback) {
412 for (int i = 0; i < memory_allocation_callbacks_.length(); ++i) {
413 if (memory_allocation_callbacks_[i].callback == callback) return true;
414 }
415 return false;
416}
417
418
419void MemoryAllocator::AddMemoryAllocationCallback(
420 MemoryAllocationCallback callback,
421 ObjectSpace space,
422 AllocationAction action) {
423 ASSERT(callback != NULL);
424 MemoryAllocationCallbackRegistration registration(callback, space, action);
425 ASSERT(!MemoryAllocator::MemoryAllocationCallbackRegistered(callback));
426 return memory_allocation_callbacks_.Add(registration);
427}
428
429
430void MemoryAllocator::RemoveMemoryAllocationCallback(
431 MemoryAllocationCallback callback) {
432 ASSERT(callback != NULL);
433 for (int i = 0; i < memory_allocation_callbacks_.length(); ++i) {
434 if (memory_allocation_callbacks_[i].callback == callback) {
435 memory_allocation_callbacks_.Remove(i);
436 return;
437 }
438 }
439 UNREACHABLE();
440}
441
Steve Blocka7e24c12009-10-30 11:49:00 +0000442void* MemoryAllocator::ReserveInitialChunk(const size_t requested) {
443 ASSERT(initial_chunk_ == NULL);
444
445 initial_chunk_ = new VirtualMemory(requested);
446 CHECK(initial_chunk_ != NULL);
447 if (!initial_chunk_->IsReserved()) {
448 delete initial_chunk_;
449 initial_chunk_ = NULL;
450 return NULL;
451 }
452
453 // We are sure that we have mapped a block of requested addresses.
454 ASSERT(initial_chunk_->size() == requested);
455 LOG(NewEvent("InitialChunk", initial_chunk_->address(), requested));
Steve Blockd0582a62009-12-15 09:54:21 +0000456 size_ += static_cast<int>(requested);
Steve Blocka7e24c12009-10-30 11:49:00 +0000457 return initial_chunk_->address();
458}
459
460
461static int PagesInChunk(Address start, size_t size) {
462 // The first page starts on the first page-aligned address from start onward
463 // and the last page ends on the last page-aligned address before
464 // start+size. Page::kPageSize is a power of two so we can divide by
465 // shifting.
Steve Blockd0582a62009-12-15 09:54:21 +0000466 return static_cast<int>((RoundDown(start + size, Page::kPageSize)
Leon Clarkee46be812010-01-19 14:06:41 +0000467 - RoundUp(start, Page::kPageSize)) >> kPageSizeBits);
Steve Blocka7e24c12009-10-30 11:49:00 +0000468}
469
470
471Page* MemoryAllocator::AllocatePages(int requested_pages, int* allocated_pages,
472 PagedSpace* owner) {
473 if (requested_pages <= 0) return Page::FromAddress(NULL);
474 size_t chunk_size = requested_pages * Page::kPageSize;
475
476 // There is not enough space to guarantee the desired number pages can be
477 // allocated.
478 if (size_ + static_cast<int>(chunk_size) > capacity_) {
479 // Request as many pages as we can.
480 chunk_size = capacity_ - size_;
Leon Clarkee46be812010-01-19 14:06:41 +0000481 requested_pages = static_cast<int>(chunk_size >> kPageSizeBits);
Steve Blocka7e24c12009-10-30 11:49:00 +0000482
483 if (requested_pages <= 0) return Page::FromAddress(NULL);
484 }
485 void* chunk = AllocateRawMemory(chunk_size, &chunk_size, owner->executable());
486 if (chunk == NULL) return Page::FromAddress(NULL);
487 LOG(NewEvent("PagedChunk", chunk, chunk_size));
488
489 *allocated_pages = PagesInChunk(static_cast<Address>(chunk), chunk_size);
490 if (*allocated_pages == 0) {
Steve Block791712a2010-08-27 10:21:07 +0100491 FreeRawMemory(chunk, chunk_size, owner->executable());
Steve Blocka7e24c12009-10-30 11:49:00 +0000492 LOG(DeleteEvent("PagedChunk", chunk));
493 return Page::FromAddress(NULL);
494 }
495
496 int chunk_id = Pop();
497 chunks_[chunk_id].init(static_cast<Address>(chunk), chunk_size, owner);
498
Iain Merrick9ac36c92010-09-13 15:29:50 +0100499 ObjectSpace space = static_cast<ObjectSpace>(1 << owner->identity());
500 PerformAllocationCallback(space, kAllocationActionAllocate, chunk_size);
Steve Blocka7e24c12009-10-30 11:49:00 +0000501 return InitializePagesInChunk(chunk_id, *allocated_pages, owner);
502}
503
504
505Page* MemoryAllocator::CommitPages(Address start, size_t size,
506 PagedSpace* owner, int* num_pages) {
507 ASSERT(start != NULL);
508 *num_pages = PagesInChunk(start, size);
509 ASSERT(*num_pages > 0);
510 ASSERT(initial_chunk_ != NULL);
511 ASSERT(InInitialChunk(start));
512 ASSERT(InInitialChunk(start + size - 1));
513 if (!initial_chunk_->Commit(start, size, owner->executable() == EXECUTABLE)) {
514 return Page::FromAddress(NULL);
515 }
Leon Clarke4515c472010-02-03 11:58:03 +0000516#ifdef DEBUG
517 ZapBlock(start, size);
518#endif
Steve Blockd0582a62009-12-15 09:54:21 +0000519 Counters::memory_allocated.Increment(static_cast<int>(size));
Steve Blocka7e24c12009-10-30 11:49:00 +0000520
521 // So long as we correctly overestimated the number of chunks we should not
522 // run out of chunk ids.
523 CHECK(!OutOfChunkIds());
524 int chunk_id = Pop();
525 chunks_[chunk_id].init(start, size, owner);
526 return InitializePagesInChunk(chunk_id, *num_pages, owner);
527}
528
529
530bool MemoryAllocator::CommitBlock(Address start,
531 size_t size,
532 Executability executable) {
533 ASSERT(start != NULL);
534 ASSERT(size > 0);
535 ASSERT(initial_chunk_ != NULL);
536 ASSERT(InInitialChunk(start));
537 ASSERT(InInitialChunk(start + size - 1));
538
539 if (!initial_chunk_->Commit(start, size, executable)) return false;
Leon Clarke4515c472010-02-03 11:58:03 +0000540#ifdef DEBUG
541 ZapBlock(start, size);
542#endif
Steve Blockd0582a62009-12-15 09:54:21 +0000543 Counters::memory_allocated.Increment(static_cast<int>(size));
Steve Blocka7e24c12009-10-30 11:49:00 +0000544 return true;
545}
546
Leon Clarke4515c472010-02-03 11:58:03 +0000547
Steve Blocka7e24c12009-10-30 11:49:00 +0000548bool MemoryAllocator::UncommitBlock(Address start, size_t size) {
549 ASSERT(start != NULL);
550 ASSERT(size > 0);
551 ASSERT(initial_chunk_ != NULL);
552 ASSERT(InInitialChunk(start));
553 ASSERT(InInitialChunk(start + size - 1));
554
555 if (!initial_chunk_->Uncommit(start, size)) return false;
Steve Blockd0582a62009-12-15 09:54:21 +0000556 Counters::memory_allocated.Decrement(static_cast<int>(size));
Steve Blocka7e24c12009-10-30 11:49:00 +0000557 return true;
558}
559
Leon Clarke4515c472010-02-03 11:58:03 +0000560
561void MemoryAllocator::ZapBlock(Address start, size_t size) {
562 for (size_t s = 0; s + kPointerSize <= size; s += kPointerSize) {
563 Memory::Address_at(start + s) = kZapValue;
564 }
565}
566
567
Steve Blocka7e24c12009-10-30 11:49:00 +0000568Page* MemoryAllocator::InitializePagesInChunk(int chunk_id, int pages_in_chunk,
569 PagedSpace* owner) {
570 ASSERT(IsValidChunk(chunk_id));
571 ASSERT(pages_in_chunk > 0);
572
573 Address chunk_start = chunks_[chunk_id].address();
574
575 Address low = RoundUp(chunk_start, Page::kPageSize);
576
577#ifdef DEBUG
578 size_t chunk_size = chunks_[chunk_id].size();
579 Address high = RoundDown(chunk_start + chunk_size, Page::kPageSize);
580 ASSERT(pages_in_chunk <=
581 ((OffsetFrom(high) - OffsetFrom(low)) / Page::kPageSize));
582#endif
583
584 Address page_addr = low;
585 for (int i = 0; i < pages_in_chunk; i++) {
586 Page* p = Page::FromAddress(page_addr);
587 p->opaque_header = OffsetFrom(page_addr + Page::kPageSize) | chunk_id;
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100588 p->InvalidateWatermark(true);
Steve Block6ded16b2010-05-10 14:33:55 +0100589 p->SetIsLargeObjectPage(false);
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100590 p->SetAllocationWatermark(p->ObjectAreaStart());
591 p->SetCachedAllocationWatermark(p->ObjectAreaStart());
Steve Blocka7e24c12009-10-30 11:49:00 +0000592 page_addr += Page::kPageSize;
593 }
594
595 // Set the next page of the last page to 0.
596 Page* last_page = Page::FromAddress(page_addr - Page::kPageSize);
597 last_page->opaque_header = OffsetFrom(0) | chunk_id;
598
599 return Page::FromAddress(low);
600}
601
602
603Page* MemoryAllocator::FreePages(Page* p) {
604 if (!p->is_valid()) return p;
605
606 // Find the first page in the same chunk as 'p'
607 Page* first_page = FindFirstPageInSameChunk(p);
608 Page* page_to_return = Page::FromAddress(NULL);
609
610 if (p != first_page) {
611 // Find the last page in the same chunk as 'prev'.
612 Page* last_page = FindLastPageInSameChunk(p);
613 first_page = GetNextPage(last_page); // first page in next chunk
614
615 // set the next_page of last_page to NULL
616 SetNextPage(last_page, Page::FromAddress(NULL));
617 page_to_return = p; // return 'p' when exiting
618 }
619
620 while (first_page->is_valid()) {
621 int chunk_id = GetChunkId(first_page);
622 ASSERT(IsValidChunk(chunk_id));
623
624 // Find the first page of the next chunk before deleting this chunk.
625 first_page = GetNextPage(FindLastPageInSameChunk(first_page));
626
627 // Free the current chunk.
628 DeleteChunk(chunk_id);
629 }
630
631 return page_to_return;
632}
633
634
Steve Block6ded16b2010-05-10 14:33:55 +0100635void MemoryAllocator::FreeAllPages(PagedSpace* space) {
636 for (int i = 0, length = chunks_.length(); i < length; i++) {
637 if (chunks_[i].owner() == space) {
638 DeleteChunk(i);
639 }
640 }
641}
642
643
Steve Blocka7e24c12009-10-30 11:49:00 +0000644void MemoryAllocator::DeleteChunk(int chunk_id) {
645 ASSERT(IsValidChunk(chunk_id));
646
647 ChunkInfo& c = chunks_[chunk_id];
648
649 // We cannot free a chunk contained in the initial chunk because it was not
650 // allocated with AllocateRawMemory. Instead we uncommit the virtual
651 // memory.
652 if (InInitialChunk(c.address())) {
653 // TODO(1240712): VirtualMemory::Uncommit has a return value which
654 // is ignored here.
655 initial_chunk_->Uncommit(c.address(), c.size());
Steve Blockd0582a62009-12-15 09:54:21 +0000656 Counters::memory_allocated.Decrement(static_cast<int>(c.size()));
Steve Blocka7e24c12009-10-30 11:49:00 +0000657 } else {
658 LOG(DeleteEvent("PagedChunk", c.address()));
Iain Merrick9ac36c92010-09-13 15:29:50 +0100659 ObjectSpace space = static_cast<ObjectSpace>(1 << c.owner()->identity());
660 size_t size = c.size();
661 FreeRawMemory(c.address(), size, c.executable());
662 PerformAllocationCallback(space, kAllocationActionFree, size);
Steve Blocka7e24c12009-10-30 11:49:00 +0000663 }
664 c.init(NULL, 0, NULL);
665 Push(chunk_id);
666}
667
668
669Page* MemoryAllocator::FindFirstPageInSameChunk(Page* p) {
670 int chunk_id = GetChunkId(p);
671 ASSERT(IsValidChunk(chunk_id));
672
673 Address low = RoundUp(chunks_[chunk_id].address(), Page::kPageSize);
674 return Page::FromAddress(low);
675}
676
677
678Page* MemoryAllocator::FindLastPageInSameChunk(Page* p) {
679 int chunk_id = GetChunkId(p);
680 ASSERT(IsValidChunk(chunk_id));
681
682 Address chunk_start = chunks_[chunk_id].address();
683 size_t chunk_size = chunks_[chunk_id].size();
684
685 Address high = RoundDown(chunk_start + chunk_size, Page::kPageSize);
686 ASSERT(chunk_start <= p->address() && p->address() < high);
687
688 return Page::FromAddress(high - Page::kPageSize);
689}
690
691
692#ifdef DEBUG
693void MemoryAllocator::ReportStatistics() {
694 float pct = static_cast<float>(capacity_ - size_) / capacity_;
Ben Murdochf87a2032010-10-22 12:50:53 +0100695 PrintF(" capacity: %" V8_PTR_PREFIX "d"
696 ", used: %" V8_PTR_PREFIX "d"
697 ", available: %%%d\n\n",
Steve Blocka7e24c12009-10-30 11:49:00 +0000698 capacity_, size_, static_cast<int>(pct*100));
699}
700#endif
701
702
Steve Block6ded16b2010-05-10 14:33:55 +0100703void MemoryAllocator::RelinkPageListInChunkOrder(PagedSpace* space,
704 Page** first_page,
705 Page** last_page,
706 Page** last_page_in_use) {
707 Page* first = NULL;
708 Page* last = NULL;
709
710 for (int i = 0, length = chunks_.length(); i < length; i++) {
711 ChunkInfo& chunk = chunks_[i];
712
713 if (chunk.owner() == space) {
714 if (first == NULL) {
715 Address low = RoundUp(chunk.address(), Page::kPageSize);
716 first = Page::FromAddress(low);
717 }
718 last = RelinkPagesInChunk(i,
719 chunk.address(),
720 chunk.size(),
721 last,
722 last_page_in_use);
723 }
724 }
725
726 if (first_page != NULL) {
727 *first_page = first;
728 }
729
730 if (last_page != NULL) {
731 *last_page = last;
732 }
733}
734
735
736Page* MemoryAllocator::RelinkPagesInChunk(int chunk_id,
737 Address chunk_start,
738 size_t chunk_size,
739 Page* prev,
740 Page** last_page_in_use) {
741 Address page_addr = RoundUp(chunk_start, Page::kPageSize);
742 int pages_in_chunk = PagesInChunk(chunk_start, chunk_size);
743
744 if (prev->is_valid()) {
745 SetNextPage(prev, Page::FromAddress(page_addr));
746 }
747
748 for (int i = 0; i < pages_in_chunk; i++) {
749 Page* p = Page::FromAddress(page_addr);
750 p->opaque_header = OffsetFrom(page_addr + Page::kPageSize) | chunk_id;
751 page_addr += Page::kPageSize;
752
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100753 p->InvalidateWatermark(true);
Steve Block6ded16b2010-05-10 14:33:55 +0100754 if (p->WasInUseBeforeMC()) {
755 *last_page_in_use = p;
756 }
757 }
758
759 // Set the next page of the last page to 0.
760 Page* last_page = Page::FromAddress(page_addr - Page::kPageSize);
761 last_page->opaque_header = OffsetFrom(0) | chunk_id;
762
763 if (last_page->WasInUseBeforeMC()) {
764 *last_page_in_use = last_page;
765 }
766
767 return last_page;
768}
769
770
771
Steve Blocka7e24c12009-10-30 11:49:00 +0000772// -----------------------------------------------------------------------------
773// PagedSpace implementation
774
Ben Murdochf87a2032010-10-22 12:50:53 +0100775PagedSpace::PagedSpace(intptr_t max_capacity,
Steve Blocka7e24c12009-10-30 11:49:00 +0000776 AllocationSpace id,
777 Executability executable)
778 : Space(id, executable) {
779 max_capacity_ = (RoundDown(max_capacity, Page::kPageSize) / Page::kPageSize)
780 * Page::kObjectAreaSize;
781 accounting_stats_.Clear();
782
783 allocation_info_.top = NULL;
784 allocation_info_.limit = NULL;
785
786 mc_forwarding_info_.top = NULL;
787 mc_forwarding_info_.limit = NULL;
788}
789
790
791bool PagedSpace::Setup(Address start, size_t size) {
792 if (HasBeenSetup()) return false;
793
794 int num_pages = 0;
795 // Try to use the virtual memory range passed to us. If it is too small to
796 // contain at least one page, ignore it and allocate instead.
797 int pages_in_chunk = PagesInChunk(start, size);
798 if (pages_in_chunk > 0) {
799 first_page_ = MemoryAllocator::CommitPages(RoundUp(start, Page::kPageSize),
800 Page::kPageSize * pages_in_chunk,
801 this, &num_pages);
802 } else {
Ben Murdochf87a2032010-10-22 12:50:53 +0100803 int requested_pages =
804 Min(MemoryAllocator::kPagesPerChunk,
805 static_cast<int>(max_capacity_ / Page::kObjectAreaSize));
Steve Blocka7e24c12009-10-30 11:49:00 +0000806 first_page_ =
807 MemoryAllocator::AllocatePages(requested_pages, &num_pages, this);
808 if (!first_page_->is_valid()) return false;
809 }
810
811 // We are sure that the first page is valid and that we have at least one
812 // page.
813 ASSERT(first_page_->is_valid());
814 ASSERT(num_pages > 0);
815 accounting_stats_.ExpandSpace(num_pages * Page::kObjectAreaSize);
816 ASSERT(Capacity() <= max_capacity_);
817
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100818 // Sequentially clear region marks in the newly allocated
Steve Blocka7e24c12009-10-30 11:49:00 +0000819 // pages and cache the current last page in the space.
820 for (Page* p = first_page_; p->is_valid(); p = p->next_page()) {
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100821 p->SetRegionMarks(Page::kAllRegionsCleanMarks);
Steve Blocka7e24c12009-10-30 11:49:00 +0000822 last_page_ = p;
823 }
824
825 // Use first_page_ for allocation.
826 SetAllocationInfo(&allocation_info_, first_page_);
827
Steve Block6ded16b2010-05-10 14:33:55 +0100828 page_list_is_chunk_ordered_ = true;
829
Steve Blocka7e24c12009-10-30 11:49:00 +0000830 return true;
831}
832
833
834bool PagedSpace::HasBeenSetup() {
835 return (Capacity() > 0);
836}
837
838
839void PagedSpace::TearDown() {
Steve Block6ded16b2010-05-10 14:33:55 +0100840 MemoryAllocator::FreeAllPages(this);
841 first_page_ = NULL;
Steve Blocka7e24c12009-10-30 11:49:00 +0000842 accounting_stats_.Clear();
843}
844
845
846#ifdef ENABLE_HEAP_PROTECTION
847
848void PagedSpace::Protect() {
849 Page* page = first_page_;
850 while (page->is_valid()) {
851 MemoryAllocator::ProtectChunkFromPage(page);
852 page = MemoryAllocator::FindLastPageInSameChunk(page)->next_page();
853 }
854}
855
856
857void PagedSpace::Unprotect() {
858 Page* page = first_page_;
859 while (page->is_valid()) {
860 MemoryAllocator::UnprotectChunkFromPage(page);
861 page = MemoryAllocator::FindLastPageInSameChunk(page)->next_page();
862 }
863}
864
865#endif
866
867
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100868void PagedSpace::MarkAllPagesClean() {
Steve Blocka7e24c12009-10-30 11:49:00 +0000869 PageIterator it(this, PageIterator::ALL_PAGES);
870 while (it.has_next()) {
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100871 it.next()->SetRegionMarks(Page::kAllRegionsCleanMarks);
Steve Blocka7e24c12009-10-30 11:49:00 +0000872 }
873}
874
875
John Reck59135872010-11-02 12:39:01 -0700876MaybeObject* PagedSpace::FindObject(Address addr) {
Steve Blocka7e24c12009-10-30 11:49:00 +0000877 // Note: this function can only be called before or after mark-compact GC
878 // because it accesses map pointers.
879 ASSERT(!MarkCompactCollector::in_use());
880
881 if (!Contains(addr)) return Failure::Exception();
882
883 Page* p = Page::FromAddress(addr);
884 ASSERT(IsUsed(p));
885 Address cur = p->ObjectAreaStart();
886 Address end = p->AllocationTop();
887 while (cur < end) {
888 HeapObject* obj = HeapObject::FromAddress(cur);
889 Address next = cur + obj->Size();
890 if ((cur <= addr) && (addr < next)) return obj;
891 cur = next;
892 }
893
894 UNREACHABLE();
895 return Failure::Exception();
896}
897
898
899bool PagedSpace::IsUsed(Page* page) {
900 PageIterator it(this, PageIterator::PAGES_IN_USE);
901 while (it.has_next()) {
902 if (page == it.next()) return true;
903 }
904 return false;
905}
906
907
908void PagedSpace::SetAllocationInfo(AllocationInfo* alloc_info, Page* p) {
909 alloc_info->top = p->ObjectAreaStart();
910 alloc_info->limit = p->ObjectAreaEnd();
911 ASSERT(alloc_info->VerifyPagedAllocation());
912}
913
914
915void PagedSpace::MCResetRelocationInfo() {
916 // Set page indexes.
917 int i = 0;
918 PageIterator it(this, PageIterator::ALL_PAGES);
919 while (it.has_next()) {
920 Page* p = it.next();
921 p->mc_page_index = i++;
922 }
923
924 // Set mc_forwarding_info_ to the first page in the space.
925 SetAllocationInfo(&mc_forwarding_info_, first_page_);
926 // All the bytes in the space are 'available'. We will rediscover
927 // allocated and wasted bytes during GC.
928 accounting_stats_.Reset();
929}
930
931
932int PagedSpace::MCSpaceOffsetForAddress(Address addr) {
933#ifdef DEBUG
934 // The Contains function considers the address at the beginning of a
935 // page in the page, MCSpaceOffsetForAddress considers it is in the
936 // previous page.
937 if (Page::IsAlignedToPageSize(addr)) {
938 ASSERT(Contains(addr - kPointerSize));
939 } else {
940 ASSERT(Contains(addr));
941 }
942#endif
943
944 // If addr is at the end of a page, it belongs to previous page
945 Page* p = Page::IsAlignedToPageSize(addr)
946 ? Page::FromAllocationTop(addr)
947 : Page::FromAddress(addr);
948 int index = p->mc_page_index;
949 return (index * Page::kPageSize) + p->Offset(addr);
950}
951
952
953// Slow case for reallocating and promoting objects during a compacting
954// collection. This function is not space-specific.
955HeapObject* PagedSpace::SlowMCAllocateRaw(int size_in_bytes) {
956 Page* current_page = TopPageOf(mc_forwarding_info_);
957 if (!current_page->next_page()->is_valid()) {
958 if (!Expand(current_page)) {
959 return NULL;
960 }
961 }
962
963 // There are surely more pages in the space now.
964 ASSERT(current_page->next_page()->is_valid());
965 // We do not add the top of page block for current page to the space's
966 // free list---the block may contain live objects so we cannot write
967 // bookkeeping information to it. Instead, we will recover top of page
968 // blocks when we move objects to their new locations.
969 //
970 // We do however write the allocation pointer to the page. The encoding
971 // of forwarding addresses is as an offset in terms of live bytes, so we
972 // need quick access to the allocation top of each page to decode
973 // forwarding addresses.
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100974 current_page->SetAllocationWatermark(mc_forwarding_info_.top);
975 current_page->next_page()->InvalidateWatermark(true);
Steve Blocka7e24c12009-10-30 11:49:00 +0000976 SetAllocationInfo(&mc_forwarding_info_, current_page->next_page());
977 return AllocateLinearly(&mc_forwarding_info_, size_in_bytes);
978}
979
980
981bool PagedSpace::Expand(Page* last_page) {
982 ASSERT(max_capacity_ % Page::kObjectAreaSize == 0);
983 ASSERT(Capacity() % Page::kObjectAreaSize == 0);
984
985 if (Capacity() == max_capacity_) return false;
986
987 ASSERT(Capacity() < max_capacity_);
988 // Last page must be valid and its next page is invalid.
989 ASSERT(last_page->is_valid() && !last_page->next_page()->is_valid());
990
Ben Murdochf87a2032010-10-22 12:50:53 +0100991 int available_pages =
992 static_cast<int>((max_capacity_ - Capacity()) / Page::kObjectAreaSize);
Steve Blocka7e24c12009-10-30 11:49:00 +0000993 if (available_pages <= 0) return false;
994
995 int desired_pages = Min(available_pages, MemoryAllocator::kPagesPerChunk);
996 Page* p = MemoryAllocator::AllocatePages(desired_pages, &desired_pages, this);
997 if (!p->is_valid()) return false;
998
999 accounting_stats_.ExpandSpace(desired_pages * Page::kObjectAreaSize);
1000 ASSERT(Capacity() <= max_capacity_);
1001
1002 MemoryAllocator::SetNextPage(last_page, p);
1003
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01001004 // Sequentially clear region marks of new pages and and cache the
Steve Blocka7e24c12009-10-30 11:49:00 +00001005 // new last page in the space.
1006 while (p->is_valid()) {
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01001007 p->SetRegionMarks(Page::kAllRegionsCleanMarks);
Steve Blocka7e24c12009-10-30 11:49:00 +00001008 last_page_ = p;
1009 p = p->next_page();
1010 }
1011
1012 return true;
1013}
1014
1015
1016#ifdef DEBUG
1017int PagedSpace::CountTotalPages() {
1018 int count = 0;
1019 for (Page* p = first_page_; p->is_valid(); p = p->next_page()) {
1020 count++;
1021 }
1022 return count;
1023}
1024#endif
1025
1026
1027void PagedSpace::Shrink() {
Steve Block6ded16b2010-05-10 14:33:55 +01001028 if (!page_list_is_chunk_ordered_) {
1029 // We can't shrink space if pages is not chunk-ordered
1030 // (see comment for class MemoryAllocator for definition).
1031 return;
1032 }
1033
Steve Blocka7e24c12009-10-30 11:49:00 +00001034 // Release half of free pages.
1035 Page* top_page = AllocationTopPage();
1036 ASSERT(top_page->is_valid());
1037
1038 // Count the number of pages we would like to free.
1039 int pages_to_free = 0;
1040 for (Page* p = top_page->next_page(); p->is_valid(); p = p->next_page()) {
1041 pages_to_free++;
1042 }
1043
1044 // Free pages after top_page.
1045 Page* p = MemoryAllocator::FreePages(top_page->next_page());
1046 MemoryAllocator::SetNextPage(top_page, p);
1047
1048 // Find out how many pages we failed to free and update last_page_.
1049 // Please note pages can only be freed in whole chunks.
1050 last_page_ = top_page;
1051 for (Page* p = top_page->next_page(); p->is_valid(); p = p->next_page()) {
1052 pages_to_free--;
1053 last_page_ = p;
1054 }
1055
1056 accounting_stats_.ShrinkSpace(pages_to_free * Page::kObjectAreaSize);
1057 ASSERT(Capacity() == CountTotalPages() * Page::kObjectAreaSize);
1058}
1059
1060
1061bool PagedSpace::EnsureCapacity(int capacity) {
1062 if (Capacity() >= capacity) return true;
1063
1064 // Start from the allocation top and loop to the last page in the space.
1065 Page* last_page = AllocationTopPage();
1066 Page* next_page = last_page->next_page();
1067 while (next_page->is_valid()) {
1068 last_page = MemoryAllocator::FindLastPageInSameChunk(next_page);
1069 next_page = last_page->next_page();
1070 }
1071
1072 // Expand the space until it has the required capacity or expansion fails.
1073 do {
1074 if (!Expand(last_page)) return false;
1075 ASSERT(last_page->next_page()->is_valid());
1076 last_page =
1077 MemoryAllocator::FindLastPageInSameChunk(last_page->next_page());
1078 } while (Capacity() < capacity);
1079
1080 return true;
1081}
1082
1083
1084#ifdef DEBUG
1085void PagedSpace::Print() { }
1086#endif
1087
1088
1089#ifdef DEBUG
1090// We do not assume that the PageIterator works, because it depends on the
1091// invariants we are checking during verification.
1092void PagedSpace::Verify(ObjectVisitor* visitor) {
1093 // The allocation pointer should be valid, and it should be in a page in the
1094 // space.
1095 ASSERT(allocation_info_.VerifyPagedAllocation());
1096 Page* top_page = Page::FromAllocationTop(allocation_info_.top);
1097 ASSERT(MemoryAllocator::IsPageInSpace(top_page, this));
1098
1099 // Loop over all the pages.
1100 bool above_allocation_top = false;
1101 Page* current_page = first_page_;
1102 while (current_page->is_valid()) {
1103 if (above_allocation_top) {
1104 // We don't care what's above the allocation top.
1105 } else {
Steve Blocka7e24c12009-10-30 11:49:00 +00001106 Address top = current_page->AllocationTop();
1107 if (current_page == top_page) {
1108 ASSERT(top == allocation_info_.top);
1109 // The next page will be above the allocation top.
1110 above_allocation_top = true;
Steve Blocka7e24c12009-10-30 11:49:00 +00001111 }
1112
1113 // It should be packed with objects from the bottom to the top.
1114 Address current = current_page->ObjectAreaStart();
1115 while (current < top) {
1116 HeapObject* object = HeapObject::FromAddress(current);
1117
1118 // The first word should be a map, and we expect all map pointers to
1119 // be in map space.
1120 Map* map = object->map();
1121 ASSERT(map->IsMap());
1122 ASSERT(Heap::map_space()->Contains(map));
1123
1124 // Perform space-specific object verification.
1125 VerifyObject(object);
1126
1127 // The object itself should look OK.
1128 object->Verify();
1129
1130 // All the interior pointers should be contained in the heap and
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01001131 // have page regions covering intergenerational references should be
1132 // marked dirty.
Steve Blocka7e24c12009-10-30 11:49:00 +00001133 int size = object->Size();
1134 object->IterateBody(map->instance_type(), size, visitor);
1135
1136 current += size;
1137 }
1138
1139 // The allocation pointer should not be in the middle of an object.
1140 ASSERT(current == top);
1141 }
1142
1143 current_page = current_page->next_page();
1144 }
1145}
1146#endif
1147
1148
1149// -----------------------------------------------------------------------------
1150// NewSpace implementation
1151
1152
1153bool NewSpace::Setup(Address start, int size) {
1154 // Setup new space based on the preallocated memory block defined by
1155 // start and size. The provided space is divided into two semi-spaces.
1156 // To support fast containment testing in the new space, the size of
1157 // this chunk must be a power of two and it must be aligned to its size.
1158 int initial_semispace_capacity = Heap::InitialSemiSpaceSize();
Steve Block3ce2e202009-11-05 08:53:23 +00001159 int maximum_semispace_capacity = Heap::MaxSemiSpaceSize();
Steve Blocka7e24c12009-10-30 11:49:00 +00001160
1161 ASSERT(initial_semispace_capacity <= maximum_semispace_capacity);
1162 ASSERT(IsPowerOf2(maximum_semispace_capacity));
1163
1164 // Allocate and setup the histogram arrays if necessary.
1165#if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
1166 allocated_histogram_ = NewArray<HistogramInfo>(LAST_TYPE + 1);
1167 promoted_histogram_ = NewArray<HistogramInfo>(LAST_TYPE + 1);
1168
1169#define SET_NAME(name) allocated_histogram_[name].set_name(#name); \
1170 promoted_histogram_[name].set_name(#name);
1171 INSTANCE_TYPE_LIST(SET_NAME)
1172#undef SET_NAME
1173#endif
1174
Steve Block3ce2e202009-11-05 08:53:23 +00001175 ASSERT(size == 2 * Heap::ReservedSemiSpaceSize());
Steve Blocka7e24c12009-10-30 11:49:00 +00001176 ASSERT(IsAddressAligned(start, size, 0));
1177
1178 if (!to_space_.Setup(start,
1179 initial_semispace_capacity,
1180 maximum_semispace_capacity)) {
1181 return false;
1182 }
1183 if (!from_space_.Setup(start + maximum_semispace_capacity,
1184 initial_semispace_capacity,
1185 maximum_semispace_capacity)) {
1186 return false;
1187 }
1188
1189 start_ = start;
1190 address_mask_ = ~(size - 1);
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01001191 object_mask_ = address_mask_ | kHeapObjectTagMask;
Steve Blocka7e24c12009-10-30 11:49:00 +00001192 object_expected_ = reinterpret_cast<uintptr_t>(start) | kHeapObjectTag;
1193
1194 allocation_info_.top = to_space_.low();
1195 allocation_info_.limit = to_space_.high();
1196 mc_forwarding_info_.top = NULL;
1197 mc_forwarding_info_.limit = NULL;
1198
1199 ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
1200 return true;
1201}
1202
1203
1204void NewSpace::TearDown() {
1205#if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
1206 if (allocated_histogram_) {
1207 DeleteArray(allocated_histogram_);
1208 allocated_histogram_ = NULL;
1209 }
1210 if (promoted_histogram_) {
1211 DeleteArray(promoted_histogram_);
1212 promoted_histogram_ = NULL;
1213 }
1214#endif
1215
1216 start_ = NULL;
1217 allocation_info_.top = NULL;
1218 allocation_info_.limit = NULL;
1219 mc_forwarding_info_.top = NULL;
1220 mc_forwarding_info_.limit = NULL;
1221
1222 to_space_.TearDown();
1223 from_space_.TearDown();
1224}
1225
1226
1227#ifdef ENABLE_HEAP_PROTECTION
1228
1229void NewSpace::Protect() {
1230 MemoryAllocator::Protect(ToSpaceLow(), Capacity());
1231 MemoryAllocator::Protect(FromSpaceLow(), Capacity());
1232}
1233
1234
1235void NewSpace::Unprotect() {
1236 MemoryAllocator::Unprotect(ToSpaceLow(), Capacity(),
1237 to_space_.executable());
1238 MemoryAllocator::Unprotect(FromSpaceLow(), Capacity(),
1239 from_space_.executable());
1240}
1241
1242#endif
1243
1244
1245void NewSpace::Flip() {
1246 SemiSpace tmp = from_space_;
1247 from_space_ = to_space_;
1248 to_space_ = tmp;
1249}
1250
1251
1252void NewSpace::Grow() {
1253 ASSERT(Capacity() < MaximumCapacity());
1254 if (to_space_.Grow()) {
1255 // Only grow from space if we managed to grow to space.
1256 if (!from_space_.Grow()) {
1257 // If we managed to grow to space but couldn't grow from space,
1258 // attempt to shrink to space.
1259 if (!to_space_.ShrinkTo(from_space_.Capacity())) {
1260 // We are in an inconsistent state because we could not
1261 // commit/uncommit memory from new space.
1262 V8::FatalProcessOutOfMemory("Failed to grow new space.");
1263 }
1264 }
1265 }
1266 allocation_info_.limit = to_space_.high();
1267 ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
1268}
1269
1270
1271void NewSpace::Shrink() {
Ben Murdochf87a2032010-10-22 12:50:53 +01001272 int new_capacity = Max(InitialCapacity(), 2 * SizeAsInt());
Steve Blockd0582a62009-12-15 09:54:21 +00001273 int rounded_new_capacity =
1274 RoundUp(new_capacity, static_cast<int>(OS::AllocateAlignment()));
Steve Blocka7e24c12009-10-30 11:49:00 +00001275 if (rounded_new_capacity < Capacity() &&
1276 to_space_.ShrinkTo(rounded_new_capacity)) {
1277 // Only shrink from space if we managed to shrink to space.
1278 if (!from_space_.ShrinkTo(rounded_new_capacity)) {
1279 // If we managed to shrink to space but couldn't shrink from
1280 // space, attempt to grow to space again.
1281 if (!to_space_.GrowTo(from_space_.Capacity())) {
1282 // We are in an inconsistent state because we could not
1283 // commit/uncommit memory from new space.
1284 V8::FatalProcessOutOfMemory("Failed to shrink new space.");
1285 }
1286 }
1287 }
1288 allocation_info_.limit = to_space_.high();
1289 ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
1290}
1291
1292
1293void NewSpace::ResetAllocationInfo() {
1294 allocation_info_.top = to_space_.low();
1295 allocation_info_.limit = to_space_.high();
1296 ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
1297}
1298
1299
1300void NewSpace::MCResetRelocationInfo() {
1301 mc_forwarding_info_.top = from_space_.low();
1302 mc_forwarding_info_.limit = from_space_.high();
1303 ASSERT_SEMISPACE_ALLOCATION_INFO(mc_forwarding_info_, from_space_);
1304}
1305
1306
1307void NewSpace::MCCommitRelocationInfo() {
1308 // Assumes that the spaces have been flipped so that mc_forwarding_info_ is
1309 // valid allocation info for the to space.
1310 allocation_info_.top = mc_forwarding_info_.top;
1311 allocation_info_.limit = to_space_.high();
1312 ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
1313}
1314
1315
1316#ifdef DEBUG
1317// We do not use the SemispaceIterator because verification doesn't assume
1318// that it works (it depends on the invariants we are checking).
1319void NewSpace::Verify() {
1320 // The allocation pointer should be in the space or at the very end.
1321 ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
1322
1323 // There should be objects packed in from the low address up to the
1324 // allocation pointer.
1325 Address current = to_space_.low();
1326 while (current < top()) {
1327 HeapObject* object = HeapObject::FromAddress(current);
1328
1329 // The first word should be a map, and we expect all map pointers to
1330 // be in map space.
1331 Map* map = object->map();
1332 ASSERT(map->IsMap());
1333 ASSERT(Heap::map_space()->Contains(map));
1334
1335 // The object should not be code or a map.
1336 ASSERT(!object->IsMap());
1337 ASSERT(!object->IsCode());
1338
1339 // The object itself should look OK.
1340 object->Verify();
1341
1342 // All the interior pointers should be contained in the heap.
1343 VerifyPointersVisitor visitor;
1344 int size = object->Size();
1345 object->IterateBody(map->instance_type(), size, &visitor);
1346
1347 current += size;
1348 }
1349
1350 // The allocation pointer should not be in the middle of an object.
1351 ASSERT(current == top());
1352}
1353#endif
1354
1355
1356bool SemiSpace::Commit() {
1357 ASSERT(!is_committed());
1358 if (!MemoryAllocator::CommitBlock(start_, capacity_, executable())) {
1359 return false;
1360 }
1361 committed_ = true;
1362 return true;
1363}
1364
1365
1366bool SemiSpace::Uncommit() {
1367 ASSERT(is_committed());
1368 if (!MemoryAllocator::UncommitBlock(start_, capacity_)) {
1369 return false;
1370 }
1371 committed_ = false;
1372 return true;
1373}
1374
1375
1376// -----------------------------------------------------------------------------
1377// SemiSpace implementation
1378
1379bool SemiSpace::Setup(Address start,
1380 int initial_capacity,
1381 int maximum_capacity) {
1382 // Creates a space in the young generation. The constructor does not
1383 // allocate memory from the OS. A SemiSpace is given a contiguous chunk of
1384 // memory of size 'capacity' when set up, and does not grow or shrink
1385 // otherwise. In the mark-compact collector, the memory region of the from
1386 // space is used as the marking stack. It requires contiguous memory
1387 // addresses.
1388 initial_capacity_ = initial_capacity;
1389 capacity_ = initial_capacity;
1390 maximum_capacity_ = maximum_capacity;
1391 committed_ = false;
1392
1393 start_ = start;
1394 address_mask_ = ~(maximum_capacity - 1);
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01001395 object_mask_ = address_mask_ | kHeapObjectTagMask;
Steve Blocka7e24c12009-10-30 11:49:00 +00001396 object_expected_ = reinterpret_cast<uintptr_t>(start) | kHeapObjectTag;
1397 age_mark_ = start_;
1398
1399 return Commit();
1400}
1401
1402
1403void SemiSpace::TearDown() {
1404 start_ = NULL;
1405 capacity_ = 0;
1406}
1407
1408
1409bool SemiSpace::Grow() {
1410 // Double the semispace size but only up to maximum capacity.
1411 int maximum_extra = maximum_capacity_ - capacity_;
Steve Blockd0582a62009-12-15 09:54:21 +00001412 int extra = Min(RoundUp(capacity_, static_cast<int>(OS::AllocateAlignment())),
Steve Blocka7e24c12009-10-30 11:49:00 +00001413 maximum_extra);
1414 if (!MemoryAllocator::CommitBlock(high(), extra, executable())) {
1415 return false;
1416 }
1417 capacity_ += extra;
1418 return true;
1419}
1420
1421
1422bool SemiSpace::GrowTo(int new_capacity) {
1423 ASSERT(new_capacity <= maximum_capacity_);
1424 ASSERT(new_capacity > capacity_);
1425 size_t delta = new_capacity - capacity_;
1426 ASSERT(IsAligned(delta, OS::AllocateAlignment()));
1427 if (!MemoryAllocator::CommitBlock(high(), delta, executable())) {
1428 return false;
1429 }
1430 capacity_ = new_capacity;
1431 return true;
1432}
1433
1434
1435bool SemiSpace::ShrinkTo(int new_capacity) {
1436 ASSERT(new_capacity >= initial_capacity_);
1437 ASSERT(new_capacity < capacity_);
1438 size_t delta = capacity_ - new_capacity;
1439 ASSERT(IsAligned(delta, OS::AllocateAlignment()));
1440 if (!MemoryAllocator::UncommitBlock(high() - delta, delta)) {
1441 return false;
1442 }
1443 capacity_ = new_capacity;
1444 return true;
1445}
1446
1447
1448#ifdef DEBUG
1449void SemiSpace::Print() { }
1450
1451
1452void SemiSpace::Verify() { }
1453#endif
1454
1455
1456// -----------------------------------------------------------------------------
1457// SemiSpaceIterator implementation.
1458SemiSpaceIterator::SemiSpaceIterator(NewSpace* space) {
1459 Initialize(space, space->bottom(), space->top(), NULL);
1460}
1461
1462
1463SemiSpaceIterator::SemiSpaceIterator(NewSpace* space,
1464 HeapObjectCallback size_func) {
1465 Initialize(space, space->bottom(), space->top(), size_func);
1466}
1467
1468
1469SemiSpaceIterator::SemiSpaceIterator(NewSpace* space, Address start) {
1470 Initialize(space, start, space->top(), NULL);
1471}
1472
1473
1474void SemiSpaceIterator::Initialize(NewSpace* space, Address start,
1475 Address end,
1476 HeapObjectCallback size_func) {
1477 ASSERT(space->ToSpaceContains(start));
1478 ASSERT(space->ToSpaceLow() <= end
1479 && end <= space->ToSpaceHigh());
1480 space_ = &space->to_space_;
1481 current_ = start;
1482 limit_ = end;
1483 size_func_ = size_func;
1484}
1485
1486
1487#ifdef DEBUG
1488// A static array of histogram info for each type.
1489static HistogramInfo heap_histograms[LAST_TYPE+1];
1490static JSObject::SpillInformation js_spill_information;
1491
1492// heap_histograms is shared, always clear it before using it.
1493static void ClearHistograms() {
1494 // We reset the name each time, though it hasn't changed.
1495#define DEF_TYPE_NAME(name) heap_histograms[name].set_name(#name);
1496 INSTANCE_TYPE_LIST(DEF_TYPE_NAME)
1497#undef DEF_TYPE_NAME
1498
1499#define CLEAR_HISTOGRAM(name) heap_histograms[name].clear();
1500 INSTANCE_TYPE_LIST(CLEAR_HISTOGRAM)
1501#undef CLEAR_HISTOGRAM
1502
1503 js_spill_information.Clear();
1504}
1505
1506
1507static int code_kind_statistics[Code::NUMBER_OF_KINDS];
1508
1509
1510static void ClearCodeKindStatistics() {
1511 for (int i = 0; i < Code::NUMBER_OF_KINDS; i++) {
1512 code_kind_statistics[i] = 0;
1513 }
1514}
1515
1516
1517static void ReportCodeKindStatistics() {
Steve Block6ded16b2010-05-10 14:33:55 +01001518 const char* table[Code::NUMBER_OF_KINDS] = { NULL };
Steve Blocka7e24c12009-10-30 11:49:00 +00001519
1520#define CASE(name) \
1521 case Code::name: table[Code::name] = #name; \
1522 break
1523
1524 for (int i = 0; i < Code::NUMBER_OF_KINDS; i++) {
1525 switch (static_cast<Code::Kind>(i)) {
1526 CASE(FUNCTION);
1527 CASE(STUB);
1528 CASE(BUILTIN);
1529 CASE(LOAD_IC);
1530 CASE(KEYED_LOAD_IC);
1531 CASE(STORE_IC);
1532 CASE(KEYED_STORE_IC);
1533 CASE(CALL_IC);
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01001534 CASE(KEYED_CALL_IC);
Steve Block6ded16b2010-05-10 14:33:55 +01001535 CASE(BINARY_OP_IC);
Steve Blocka7e24c12009-10-30 11:49:00 +00001536 }
1537 }
1538
1539#undef CASE
1540
1541 PrintF("\n Code kind histograms: \n");
1542 for (int i = 0; i < Code::NUMBER_OF_KINDS; i++) {
1543 if (code_kind_statistics[i] > 0) {
1544 PrintF(" %-20s: %10d bytes\n", table[i], code_kind_statistics[i]);
1545 }
1546 }
1547 PrintF("\n");
1548}
1549
1550
1551static int CollectHistogramInfo(HeapObject* obj) {
1552 InstanceType type = obj->map()->instance_type();
1553 ASSERT(0 <= type && type <= LAST_TYPE);
1554 ASSERT(heap_histograms[type].name() != NULL);
1555 heap_histograms[type].increment_number(1);
1556 heap_histograms[type].increment_bytes(obj->Size());
1557
1558 if (FLAG_collect_heap_spill_statistics && obj->IsJSObject()) {
1559 JSObject::cast(obj)->IncrementSpillStatistics(&js_spill_information);
1560 }
1561
1562 return obj->Size();
1563}
1564
1565
1566static void ReportHistogram(bool print_spill) {
1567 PrintF("\n Object Histogram:\n");
1568 for (int i = 0; i <= LAST_TYPE; i++) {
1569 if (heap_histograms[i].number() > 0) {
Steve Block6ded16b2010-05-10 14:33:55 +01001570 PrintF(" %-34s%10d (%10d bytes)\n",
Steve Blocka7e24c12009-10-30 11:49:00 +00001571 heap_histograms[i].name(),
1572 heap_histograms[i].number(),
1573 heap_histograms[i].bytes());
1574 }
1575 }
1576 PrintF("\n");
1577
1578 // Summarize string types.
1579 int string_number = 0;
1580 int string_bytes = 0;
1581#define INCREMENT(type, size, name, camel_name) \
1582 string_number += heap_histograms[type].number(); \
1583 string_bytes += heap_histograms[type].bytes();
1584 STRING_TYPE_LIST(INCREMENT)
1585#undef INCREMENT
1586 if (string_number > 0) {
Steve Block6ded16b2010-05-10 14:33:55 +01001587 PrintF(" %-34s%10d (%10d bytes)\n\n", "STRING_TYPE", string_number,
Steve Blocka7e24c12009-10-30 11:49:00 +00001588 string_bytes);
1589 }
1590
1591 if (FLAG_collect_heap_spill_statistics && print_spill) {
1592 js_spill_information.Print();
1593 }
1594}
1595#endif // DEBUG
1596
1597
1598// Support for statistics gathering for --heap-stats and --log-gc.
1599#if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
1600void NewSpace::ClearHistograms() {
1601 for (int i = 0; i <= LAST_TYPE; i++) {
1602 allocated_histogram_[i].clear();
1603 promoted_histogram_[i].clear();
1604 }
1605}
1606
1607// Because the copying collector does not touch garbage objects, we iterate
1608// the new space before a collection to get a histogram of allocated objects.
1609// This only happens (1) when compiled with DEBUG and the --heap-stats flag is
1610// set, or when compiled with ENABLE_LOGGING_AND_PROFILING and the --log-gc
1611// flag is set.
1612void NewSpace::CollectStatistics() {
1613 ClearHistograms();
1614 SemiSpaceIterator it(this);
Leon Clarked91b9f72010-01-27 17:25:45 +00001615 for (HeapObject* obj = it.next(); obj != NULL; obj = it.next())
1616 RecordAllocation(obj);
Steve Blocka7e24c12009-10-30 11:49:00 +00001617}
1618
1619
1620#ifdef ENABLE_LOGGING_AND_PROFILING
1621static void DoReportStatistics(HistogramInfo* info, const char* description) {
1622 LOG(HeapSampleBeginEvent("NewSpace", description));
1623 // Lump all the string types together.
1624 int string_number = 0;
1625 int string_bytes = 0;
1626#define INCREMENT(type, size, name, camel_name) \
1627 string_number += info[type].number(); \
1628 string_bytes += info[type].bytes();
1629 STRING_TYPE_LIST(INCREMENT)
1630#undef INCREMENT
1631 if (string_number > 0) {
1632 LOG(HeapSampleItemEvent("STRING_TYPE", string_number, string_bytes));
1633 }
1634
1635 // Then do the other types.
1636 for (int i = FIRST_NONSTRING_TYPE; i <= LAST_TYPE; ++i) {
1637 if (info[i].number() > 0) {
1638 LOG(HeapSampleItemEvent(info[i].name(), info[i].number(),
1639 info[i].bytes()));
1640 }
1641 }
1642 LOG(HeapSampleEndEvent("NewSpace", description));
1643}
1644#endif // ENABLE_LOGGING_AND_PROFILING
1645
1646
1647void NewSpace::ReportStatistics() {
1648#ifdef DEBUG
1649 if (FLAG_heap_stats) {
1650 float pct = static_cast<float>(Available()) / Capacity();
Ben Murdochf87a2032010-10-22 12:50:53 +01001651 PrintF(" capacity: %" V8_PTR_PREFIX "d"
1652 ", available: %" V8_PTR_PREFIX "d, %%%d\n",
Steve Blocka7e24c12009-10-30 11:49:00 +00001653 Capacity(), Available(), static_cast<int>(pct*100));
1654 PrintF("\n Object Histogram:\n");
1655 for (int i = 0; i <= LAST_TYPE; i++) {
1656 if (allocated_histogram_[i].number() > 0) {
Steve Block6ded16b2010-05-10 14:33:55 +01001657 PrintF(" %-34s%10d (%10d bytes)\n",
Steve Blocka7e24c12009-10-30 11:49:00 +00001658 allocated_histogram_[i].name(),
1659 allocated_histogram_[i].number(),
1660 allocated_histogram_[i].bytes());
1661 }
1662 }
1663 PrintF("\n");
1664 }
1665#endif // DEBUG
1666
1667#ifdef ENABLE_LOGGING_AND_PROFILING
1668 if (FLAG_log_gc) {
1669 DoReportStatistics(allocated_histogram_, "allocated");
1670 DoReportStatistics(promoted_histogram_, "promoted");
1671 }
1672#endif // ENABLE_LOGGING_AND_PROFILING
1673}
1674
1675
1676void NewSpace::RecordAllocation(HeapObject* obj) {
1677 InstanceType type = obj->map()->instance_type();
1678 ASSERT(0 <= type && type <= LAST_TYPE);
1679 allocated_histogram_[type].increment_number(1);
1680 allocated_histogram_[type].increment_bytes(obj->Size());
1681}
1682
1683
1684void NewSpace::RecordPromotion(HeapObject* obj) {
1685 InstanceType type = obj->map()->instance_type();
1686 ASSERT(0 <= type && type <= LAST_TYPE);
1687 promoted_histogram_[type].increment_number(1);
1688 promoted_histogram_[type].increment_bytes(obj->Size());
1689}
1690#endif // defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
1691
1692
1693// -----------------------------------------------------------------------------
1694// Free lists for old object spaces implementation
1695
1696void FreeListNode::set_size(int size_in_bytes) {
1697 ASSERT(size_in_bytes > 0);
1698 ASSERT(IsAligned(size_in_bytes, kPointerSize));
1699
1700 // We write a map and possibly size information to the block. If the block
1701 // is big enough to be a ByteArray with at least one extra word (the next
1702 // pointer), we set its map to be the byte array map and its size to an
1703 // appropriate array length for the desired size from HeapObject::Size().
1704 // If the block is too small (eg, one or two words), to hold both a size
1705 // field and a next pointer, we give it a filler map that gives it the
1706 // correct size.
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01001707 if (size_in_bytes > ByteArray::kHeaderSize) {
Steve Blocka7e24c12009-10-30 11:49:00 +00001708 set_map(Heap::raw_unchecked_byte_array_map());
Steve Blockd0582a62009-12-15 09:54:21 +00001709 // Can't use ByteArray::cast because it fails during deserialization.
1710 ByteArray* this_as_byte_array = reinterpret_cast<ByteArray*>(this);
1711 this_as_byte_array->set_length(ByteArray::LengthFor(size_in_bytes));
Steve Blocka7e24c12009-10-30 11:49:00 +00001712 } else if (size_in_bytes == kPointerSize) {
1713 set_map(Heap::raw_unchecked_one_pointer_filler_map());
1714 } else if (size_in_bytes == 2 * kPointerSize) {
1715 set_map(Heap::raw_unchecked_two_pointer_filler_map());
1716 } else {
1717 UNREACHABLE();
1718 }
Steve Blockd0582a62009-12-15 09:54:21 +00001719 // We would like to ASSERT(Size() == size_in_bytes) but this would fail during
1720 // deserialization because the byte array map is not done yet.
Steve Blocka7e24c12009-10-30 11:49:00 +00001721}
1722
1723
1724Address FreeListNode::next() {
Steve Block3ce2e202009-11-05 08:53:23 +00001725 ASSERT(IsFreeListNode(this));
Steve Blocka7e24c12009-10-30 11:49:00 +00001726 if (map() == Heap::raw_unchecked_byte_array_map()) {
1727 ASSERT(Size() >= kNextOffset + kPointerSize);
1728 return Memory::Address_at(address() + kNextOffset);
1729 } else {
1730 return Memory::Address_at(address() + kPointerSize);
1731 }
1732}
1733
1734
1735void FreeListNode::set_next(Address next) {
Steve Block3ce2e202009-11-05 08:53:23 +00001736 ASSERT(IsFreeListNode(this));
Steve Blocka7e24c12009-10-30 11:49:00 +00001737 if (map() == Heap::raw_unchecked_byte_array_map()) {
1738 ASSERT(Size() >= kNextOffset + kPointerSize);
1739 Memory::Address_at(address() + kNextOffset) = next;
1740 } else {
1741 Memory::Address_at(address() + kPointerSize) = next;
1742 }
1743}
1744
1745
1746OldSpaceFreeList::OldSpaceFreeList(AllocationSpace owner) : owner_(owner) {
1747 Reset();
1748}
1749
1750
1751void OldSpaceFreeList::Reset() {
1752 available_ = 0;
1753 for (int i = 0; i < kFreeListsLength; i++) {
1754 free_[i].head_node_ = NULL;
1755 }
1756 needs_rebuild_ = false;
1757 finger_ = kHead;
1758 free_[kHead].next_size_ = kEnd;
1759}
1760
1761
1762void OldSpaceFreeList::RebuildSizeList() {
1763 ASSERT(needs_rebuild_);
1764 int cur = kHead;
1765 for (int i = cur + 1; i < kFreeListsLength; i++) {
1766 if (free_[i].head_node_ != NULL) {
1767 free_[cur].next_size_ = i;
1768 cur = i;
1769 }
1770 }
1771 free_[cur].next_size_ = kEnd;
1772 needs_rebuild_ = false;
1773}
1774
1775
1776int OldSpaceFreeList::Free(Address start, int size_in_bytes) {
1777#ifdef DEBUG
Leon Clarke4515c472010-02-03 11:58:03 +00001778 MemoryAllocator::ZapBlock(start, size_in_bytes);
Steve Blocka7e24c12009-10-30 11:49:00 +00001779#endif
1780 FreeListNode* node = FreeListNode::FromAddress(start);
1781 node->set_size(size_in_bytes);
1782
1783 // We don't use the freelists in compacting mode. This makes it more like a
1784 // GC that only has mark-sweep-compact and doesn't have a mark-sweep
1785 // collector.
1786 if (FLAG_always_compact) {
1787 return size_in_bytes;
1788 }
1789
1790 // Early return to drop too-small blocks on the floor (one or two word
1791 // blocks cannot hold a map pointer, a size field, and a pointer to the
1792 // next block in the free list).
1793 if (size_in_bytes < kMinBlockSize) {
1794 return size_in_bytes;
1795 }
1796
1797 // Insert other blocks at the head of an exact free list.
1798 int index = size_in_bytes >> kPointerSizeLog2;
1799 node->set_next(free_[index].head_node_);
1800 free_[index].head_node_ = node->address();
1801 available_ += size_in_bytes;
1802 needs_rebuild_ = true;
1803 return 0;
1804}
1805
1806
John Reck59135872010-11-02 12:39:01 -07001807MaybeObject* OldSpaceFreeList::Allocate(int size_in_bytes, int* wasted_bytes) {
Steve Blocka7e24c12009-10-30 11:49:00 +00001808 ASSERT(0 < size_in_bytes);
1809 ASSERT(size_in_bytes <= kMaxBlockSize);
1810 ASSERT(IsAligned(size_in_bytes, kPointerSize));
1811
1812 if (needs_rebuild_) RebuildSizeList();
1813 int index = size_in_bytes >> kPointerSizeLog2;
1814 // Check for a perfect fit.
1815 if (free_[index].head_node_ != NULL) {
1816 FreeListNode* node = FreeListNode::FromAddress(free_[index].head_node_);
1817 // If this was the last block of its size, remove the size.
1818 if ((free_[index].head_node_ = node->next()) == NULL) RemoveSize(index);
1819 available_ -= size_in_bytes;
1820 *wasted_bytes = 0;
1821 ASSERT(!FLAG_always_compact); // We only use the freelists with mark-sweep.
1822 return node;
1823 }
1824 // Search the size list for the best fit.
1825 int prev = finger_ < index ? finger_ : kHead;
1826 int cur = FindSize(index, &prev);
1827 ASSERT(index < cur);
1828 if (cur == kEnd) {
1829 // No large enough size in list.
1830 *wasted_bytes = 0;
Ben Murdochf87a2032010-10-22 12:50:53 +01001831 return Failure::RetryAfterGC(owner_);
Steve Blocka7e24c12009-10-30 11:49:00 +00001832 }
1833 ASSERT(!FLAG_always_compact); // We only use the freelists with mark-sweep.
1834 int rem = cur - index;
1835 int rem_bytes = rem << kPointerSizeLog2;
1836 FreeListNode* cur_node = FreeListNode::FromAddress(free_[cur].head_node_);
1837 ASSERT(cur_node->Size() == (cur << kPointerSizeLog2));
1838 FreeListNode* rem_node = FreeListNode::FromAddress(free_[cur].head_node_ +
1839 size_in_bytes);
1840 // Distinguish the cases prev < rem < cur and rem <= prev < cur
1841 // to avoid many redundant tests and calls to Insert/RemoveSize.
1842 if (prev < rem) {
1843 // Simple case: insert rem between prev and cur.
1844 finger_ = prev;
1845 free_[prev].next_size_ = rem;
1846 // If this was the last block of size cur, remove the size.
1847 if ((free_[cur].head_node_ = cur_node->next()) == NULL) {
1848 free_[rem].next_size_ = free_[cur].next_size_;
1849 } else {
1850 free_[rem].next_size_ = cur;
1851 }
1852 // Add the remainder block.
1853 rem_node->set_size(rem_bytes);
1854 rem_node->set_next(free_[rem].head_node_);
1855 free_[rem].head_node_ = rem_node->address();
1856 } else {
1857 // If this was the last block of size cur, remove the size.
1858 if ((free_[cur].head_node_ = cur_node->next()) == NULL) {
1859 finger_ = prev;
1860 free_[prev].next_size_ = free_[cur].next_size_;
1861 }
1862 if (rem_bytes < kMinBlockSize) {
1863 // Too-small remainder is wasted.
1864 rem_node->set_size(rem_bytes);
1865 available_ -= size_in_bytes + rem_bytes;
1866 *wasted_bytes = rem_bytes;
1867 return cur_node;
1868 }
1869 // Add the remainder block and, if needed, insert its size.
1870 rem_node->set_size(rem_bytes);
1871 rem_node->set_next(free_[rem].head_node_);
1872 free_[rem].head_node_ = rem_node->address();
1873 if (rem_node->next() == NULL) InsertSize(rem);
1874 }
1875 available_ -= size_in_bytes;
1876 *wasted_bytes = 0;
1877 return cur_node;
1878}
1879
1880
1881#ifdef DEBUG
1882bool OldSpaceFreeList::Contains(FreeListNode* node) {
1883 for (int i = 0; i < kFreeListsLength; i++) {
1884 Address cur_addr = free_[i].head_node_;
1885 while (cur_addr != NULL) {
1886 FreeListNode* cur_node = FreeListNode::FromAddress(cur_addr);
1887 if (cur_node == node) return true;
1888 cur_addr = cur_node->next();
1889 }
1890 }
1891 return false;
1892}
1893#endif
1894
1895
1896FixedSizeFreeList::FixedSizeFreeList(AllocationSpace owner, int object_size)
1897 : owner_(owner), object_size_(object_size) {
1898 Reset();
1899}
1900
1901
1902void FixedSizeFreeList::Reset() {
1903 available_ = 0;
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01001904 head_ = tail_ = NULL;
Steve Blocka7e24c12009-10-30 11:49:00 +00001905}
1906
1907
1908void FixedSizeFreeList::Free(Address start) {
1909#ifdef DEBUG
Leon Clarke4515c472010-02-03 11:58:03 +00001910 MemoryAllocator::ZapBlock(start, object_size_);
Steve Blocka7e24c12009-10-30 11:49:00 +00001911#endif
Leon Clarkee46be812010-01-19 14:06:41 +00001912 // We only use the freelists with mark-sweep.
1913 ASSERT(!MarkCompactCollector::IsCompacting());
Steve Blocka7e24c12009-10-30 11:49:00 +00001914 FreeListNode* node = FreeListNode::FromAddress(start);
1915 node->set_size(object_size_);
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01001916 node->set_next(NULL);
1917 if (head_ == NULL) {
1918 tail_ = head_ = node->address();
1919 } else {
1920 FreeListNode::FromAddress(tail_)->set_next(node->address());
1921 tail_ = node->address();
1922 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001923 available_ += object_size_;
1924}
1925
1926
John Reck59135872010-11-02 12:39:01 -07001927MaybeObject* FixedSizeFreeList::Allocate() {
Steve Blocka7e24c12009-10-30 11:49:00 +00001928 if (head_ == NULL) {
Ben Murdochf87a2032010-10-22 12:50:53 +01001929 return Failure::RetryAfterGC(owner_);
Steve Blocka7e24c12009-10-30 11:49:00 +00001930 }
1931
1932 ASSERT(!FLAG_always_compact); // We only use the freelists with mark-sweep.
1933 FreeListNode* node = FreeListNode::FromAddress(head_);
1934 head_ = node->next();
1935 available_ -= object_size_;
1936 return node;
1937}
1938
1939
1940// -----------------------------------------------------------------------------
1941// OldSpace implementation
1942
1943void OldSpace::PrepareForMarkCompact(bool will_compact) {
Steve Block6ded16b2010-05-10 14:33:55 +01001944 // Call prepare of the super class.
1945 PagedSpace::PrepareForMarkCompact(will_compact);
1946
Steve Blocka7e24c12009-10-30 11:49:00 +00001947 if (will_compact) {
1948 // Reset relocation info. During a compacting collection, everything in
1949 // the space is considered 'available' and we will rediscover live data
1950 // and waste during the collection.
1951 MCResetRelocationInfo();
1952 ASSERT(Available() == Capacity());
1953 } else {
1954 // During a non-compacting collection, everything below the linear
1955 // allocation pointer is considered allocated (everything above is
1956 // available) and we will rediscover available and wasted bytes during
1957 // the collection.
1958 accounting_stats_.AllocateBytes(free_list_.available());
1959 accounting_stats_.FillWastedBytes(Waste());
1960 }
1961
1962 // Clear the free list before a full GC---it will be rebuilt afterward.
1963 free_list_.Reset();
1964}
1965
1966
1967void OldSpace::MCCommitRelocationInfo() {
1968 // Update fast allocation info.
1969 allocation_info_.top = mc_forwarding_info_.top;
1970 allocation_info_.limit = mc_forwarding_info_.limit;
1971 ASSERT(allocation_info_.VerifyPagedAllocation());
1972
1973 // The space is compacted and we haven't yet built free lists or
1974 // wasted any space.
1975 ASSERT(Waste() == 0);
1976 ASSERT(AvailableFree() == 0);
1977
1978 // Build the free list for the space.
1979 int computed_size = 0;
1980 PageIterator it(this, PageIterator::PAGES_USED_BY_MC);
1981 while (it.has_next()) {
1982 Page* p = it.next();
1983 // Space below the relocation pointer is allocated.
Steve Blockd0582a62009-12-15 09:54:21 +00001984 computed_size +=
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01001985 static_cast<int>(p->AllocationWatermark() - p->ObjectAreaStart());
Steve Blocka7e24c12009-10-30 11:49:00 +00001986 if (it.has_next()) {
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01001987 // Free the space at the top of the page.
Steve Blockd0582a62009-12-15 09:54:21 +00001988 int extra_size =
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01001989 static_cast<int>(p->ObjectAreaEnd() - p->AllocationWatermark());
Steve Blocka7e24c12009-10-30 11:49:00 +00001990 if (extra_size > 0) {
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01001991 int wasted_bytes = free_list_.Free(p->AllocationWatermark(),
1992 extra_size);
Steve Blocka7e24c12009-10-30 11:49:00 +00001993 // The bytes we have just "freed" to add to the free list were
1994 // already accounted as available.
1995 accounting_stats_.WasteBytes(wasted_bytes);
1996 }
1997 }
1998 }
1999
2000 // Make sure the computed size - based on the used portion of the pages in
2001 // use - matches the size obtained while computing forwarding addresses.
2002 ASSERT(computed_size == Size());
2003}
2004
2005
Leon Clarkee46be812010-01-19 14:06:41 +00002006bool NewSpace::ReserveSpace(int bytes) {
2007 // We can't reliably unpack a partial snapshot that needs more new space
2008 // space than the minimum NewSpace size.
2009 ASSERT(bytes <= InitialCapacity());
2010 Address limit = allocation_info_.limit;
2011 Address top = allocation_info_.top;
2012 return limit - top >= bytes;
2013}
2014
2015
Steve Block6ded16b2010-05-10 14:33:55 +01002016void PagedSpace::FreePages(Page* prev, Page* last) {
2017 if (last == AllocationTopPage()) {
2018 // Pages are already at the end of used pages.
2019 return;
2020 }
2021
2022 Page* first = NULL;
2023
2024 // Remove pages from the list.
2025 if (prev == NULL) {
2026 first = first_page_;
2027 first_page_ = last->next_page();
2028 } else {
2029 first = prev->next_page();
2030 MemoryAllocator::SetNextPage(prev, last->next_page());
2031 }
2032
2033 // Attach it after the last page.
2034 MemoryAllocator::SetNextPage(last_page_, first);
2035 last_page_ = last;
2036 MemoryAllocator::SetNextPage(last, NULL);
2037
2038 // Clean them up.
2039 do {
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002040 first->InvalidateWatermark(true);
2041 first->SetAllocationWatermark(first->ObjectAreaStart());
2042 first->SetCachedAllocationWatermark(first->ObjectAreaStart());
2043 first->SetRegionMarks(Page::kAllRegionsCleanMarks);
Steve Block6ded16b2010-05-10 14:33:55 +01002044 first = first->next_page();
2045 } while (first != NULL);
2046
2047 // Order of pages in this space might no longer be consistent with
2048 // order of pages in chunks.
2049 page_list_is_chunk_ordered_ = false;
2050}
2051
2052
Kristian Monsen80d68ea2010-09-08 11:05:35 +01002053void PagedSpace::RelinkPageListInChunkOrder(bool deallocate_blocks) {
2054 const bool add_to_freelist = true;
2055
2056 // Mark used and unused pages to properly fill unused pages
2057 // after reordering.
2058 PageIterator all_pages_iterator(this, PageIterator::ALL_PAGES);
2059 Page* last_in_use = AllocationTopPage();
2060 bool in_use = true;
2061
2062 while (all_pages_iterator.has_next()) {
2063 Page* p = all_pages_iterator.next();
2064 p->SetWasInUseBeforeMC(in_use);
2065 if (p == last_in_use) {
2066 // We passed a page containing allocation top. All consequent
2067 // pages are not used.
2068 in_use = false;
2069 }
2070 }
2071
2072 if (page_list_is_chunk_ordered_) return;
2073
2074 Page* new_last_in_use = Page::FromAddress(NULL);
2075 MemoryAllocator::RelinkPageListInChunkOrder(this,
2076 &first_page_,
2077 &last_page_,
2078 &new_last_in_use);
2079 ASSERT(new_last_in_use->is_valid());
2080
2081 if (new_last_in_use != last_in_use) {
2082 // Current allocation top points to a page which is now in the middle
2083 // of page list. We should move allocation top forward to the new last
2084 // used page so various object iterators will continue to work properly.
2085 int size_in_bytes = static_cast<int>(PageAllocationLimit(last_in_use) -
2086 last_in_use->AllocationTop());
2087
2088 last_in_use->SetAllocationWatermark(last_in_use->AllocationTop());
2089 if (size_in_bytes > 0) {
2090 Address start = last_in_use->AllocationTop();
2091 if (deallocate_blocks) {
2092 accounting_stats_.AllocateBytes(size_in_bytes);
2093 DeallocateBlock(start, size_in_bytes, add_to_freelist);
2094 } else {
2095 Heap::CreateFillerObjectAt(start, size_in_bytes);
2096 }
2097 }
2098
2099 // New last in use page was in the middle of the list before
2100 // sorting so it full.
2101 SetTop(new_last_in_use->AllocationTop());
2102
2103 ASSERT(AllocationTopPage() == new_last_in_use);
2104 ASSERT(AllocationTopPage()->WasInUseBeforeMC());
2105 }
2106
2107 PageIterator pages_in_use_iterator(this, PageIterator::PAGES_IN_USE);
2108 while (pages_in_use_iterator.has_next()) {
2109 Page* p = pages_in_use_iterator.next();
2110 if (!p->WasInUseBeforeMC()) {
2111 // Empty page is in the middle of a sequence of used pages.
2112 // Allocate it as a whole and deallocate immediately.
2113 int size_in_bytes = static_cast<int>(PageAllocationLimit(p) -
2114 p->ObjectAreaStart());
2115
2116 p->SetAllocationWatermark(p->ObjectAreaStart());
2117 Address start = p->ObjectAreaStart();
2118 if (deallocate_blocks) {
2119 accounting_stats_.AllocateBytes(size_in_bytes);
2120 DeallocateBlock(start, size_in_bytes, add_to_freelist);
2121 } else {
2122 Heap::CreateFillerObjectAt(start, size_in_bytes);
2123 }
2124 }
2125 }
2126
2127 page_list_is_chunk_ordered_ = true;
2128}
2129
2130
Steve Block6ded16b2010-05-10 14:33:55 +01002131void PagedSpace::PrepareForMarkCompact(bool will_compact) {
2132 if (will_compact) {
Kristian Monsen80d68ea2010-09-08 11:05:35 +01002133 RelinkPageListInChunkOrder(false);
Steve Block6ded16b2010-05-10 14:33:55 +01002134 }
2135}
2136
2137
Leon Clarkee46be812010-01-19 14:06:41 +00002138bool PagedSpace::ReserveSpace(int bytes) {
2139 Address limit = allocation_info_.limit;
2140 Address top = allocation_info_.top;
2141 if (limit - top >= bytes) return true;
2142
2143 // There wasn't enough space in the current page. Lets put the rest
2144 // of the page on the free list and start a fresh page.
2145 PutRestOfCurrentPageOnFreeList(TopPageOf(allocation_info_));
2146
2147 Page* reserved_page = TopPageOf(allocation_info_);
2148 int bytes_left_to_reserve = bytes;
2149 while (bytes_left_to_reserve > 0) {
2150 if (!reserved_page->next_page()->is_valid()) {
2151 if (Heap::OldGenerationAllocationLimitReached()) return false;
2152 Expand(reserved_page);
2153 }
2154 bytes_left_to_reserve -= Page::kPageSize;
2155 reserved_page = reserved_page->next_page();
2156 if (!reserved_page->is_valid()) return false;
2157 }
2158 ASSERT(TopPageOf(allocation_info_)->next_page()->is_valid());
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002159 TopPageOf(allocation_info_)->next_page()->InvalidateWatermark(true);
Leon Clarkee46be812010-01-19 14:06:41 +00002160 SetAllocationInfo(&allocation_info_,
2161 TopPageOf(allocation_info_)->next_page());
2162 return true;
2163}
2164
2165
2166// You have to call this last, since the implementation from PagedSpace
2167// doesn't know that memory was 'promised' to large object space.
2168bool LargeObjectSpace::ReserveSpace(int bytes) {
2169 return Heap::OldGenerationSpaceAvailable() >= bytes;
2170}
2171
2172
Steve Blocka7e24c12009-10-30 11:49:00 +00002173// Slow case for normal allocation. Try in order: (1) allocate in the next
2174// page in the space, (2) allocate off the space's free list, (3) expand the
2175// space, (4) fail.
2176HeapObject* OldSpace::SlowAllocateRaw(int size_in_bytes) {
2177 // Linear allocation in this space has failed. If there is another page
2178 // in the space, move to that page and allocate there. This allocation
2179 // should succeed (size_in_bytes should not be greater than a page's
2180 // object area size).
2181 Page* current_page = TopPageOf(allocation_info_);
2182 if (current_page->next_page()->is_valid()) {
2183 return AllocateInNextPage(current_page, size_in_bytes);
2184 }
2185
Steve Blockd0582a62009-12-15 09:54:21 +00002186 // There is no next page in this space. Try free list allocation unless that
2187 // is currently forbidden.
2188 if (!Heap::linear_allocation()) {
2189 int wasted_bytes;
John Reck59135872010-11-02 12:39:01 -07002190 Object* result;
2191 MaybeObject* maybe = free_list_.Allocate(size_in_bytes, &wasted_bytes);
Steve Blockd0582a62009-12-15 09:54:21 +00002192 accounting_stats_.WasteBytes(wasted_bytes);
John Reck59135872010-11-02 12:39:01 -07002193 if (maybe->ToObject(&result)) {
Steve Blockd0582a62009-12-15 09:54:21 +00002194 accounting_stats_.AllocateBytes(size_in_bytes);
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002195
2196 HeapObject* obj = HeapObject::cast(result);
2197 Page* p = Page::FromAddress(obj->address());
2198
2199 if (obj->address() >= p->AllocationWatermark()) {
2200 // There should be no hole between the allocation watermark
2201 // and allocated object address.
2202 // Memory above the allocation watermark was not swept and
2203 // might contain garbage pointers to new space.
2204 ASSERT(obj->address() == p->AllocationWatermark());
2205 p->SetAllocationWatermark(obj->address() + size_in_bytes);
2206 }
2207
2208 return obj;
Steve Blockd0582a62009-12-15 09:54:21 +00002209 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002210 }
2211
2212 // Free list allocation failed and there is no next page. Fail if we have
2213 // hit the old generation size limit that should cause a garbage
2214 // collection.
2215 if (!Heap::always_allocate() && Heap::OldGenerationAllocationLimitReached()) {
2216 return NULL;
2217 }
2218
2219 // Try to expand the space and allocate in the new next page.
2220 ASSERT(!current_page->next_page()->is_valid());
2221 if (Expand(current_page)) {
2222 return AllocateInNextPage(current_page, size_in_bytes);
2223 }
2224
2225 // Finally, fail.
2226 return NULL;
2227}
2228
2229
Leon Clarkee46be812010-01-19 14:06:41 +00002230void OldSpace::PutRestOfCurrentPageOnFreeList(Page* current_page) {
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002231 current_page->SetAllocationWatermark(allocation_info_.top);
Steve Blockd0582a62009-12-15 09:54:21 +00002232 int free_size =
2233 static_cast<int>(current_page->ObjectAreaEnd() - allocation_info_.top);
Steve Blocka7e24c12009-10-30 11:49:00 +00002234 if (free_size > 0) {
2235 int wasted_bytes = free_list_.Free(allocation_info_.top, free_size);
2236 accounting_stats_.WasteBytes(wasted_bytes);
2237 }
Leon Clarkee46be812010-01-19 14:06:41 +00002238}
2239
2240
2241void FixedSpace::PutRestOfCurrentPageOnFreeList(Page* current_page) {
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002242 current_page->SetAllocationWatermark(allocation_info_.top);
Leon Clarkee46be812010-01-19 14:06:41 +00002243 int free_size =
2244 static_cast<int>(current_page->ObjectAreaEnd() - allocation_info_.top);
2245 // In the fixed space free list all the free list items have the right size.
2246 // We use up the rest of the page while preserving this invariant.
2247 while (free_size >= object_size_in_bytes_) {
2248 free_list_.Free(allocation_info_.top);
2249 allocation_info_.top += object_size_in_bytes_;
2250 free_size -= object_size_in_bytes_;
2251 accounting_stats_.WasteBytes(object_size_in_bytes_);
2252 }
2253}
2254
2255
2256// Add the block at the top of the page to the space's free list, set the
2257// allocation info to the next page (assumed to be one), and allocate
2258// linearly there.
2259HeapObject* OldSpace::AllocateInNextPage(Page* current_page,
2260 int size_in_bytes) {
2261 ASSERT(current_page->next_page()->is_valid());
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002262 Page* next_page = current_page->next_page();
2263 next_page->ClearGCFields();
Leon Clarkee46be812010-01-19 14:06:41 +00002264 PutRestOfCurrentPageOnFreeList(current_page);
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002265 SetAllocationInfo(&allocation_info_, next_page);
Steve Blocka7e24c12009-10-30 11:49:00 +00002266 return AllocateLinearly(&allocation_info_, size_in_bytes);
2267}
2268
2269
Kristian Monsen80d68ea2010-09-08 11:05:35 +01002270void OldSpace::DeallocateBlock(Address start,
2271 int size_in_bytes,
2272 bool add_to_freelist) {
2273 Free(start, size_in_bytes, add_to_freelist);
2274}
2275
2276
Steve Blocka7e24c12009-10-30 11:49:00 +00002277#ifdef DEBUG
2278struct CommentStatistic {
2279 const char* comment;
2280 int size;
2281 int count;
2282 void Clear() {
2283 comment = NULL;
2284 size = 0;
2285 count = 0;
2286 }
2287};
2288
2289
2290// must be small, since an iteration is used for lookup
2291const int kMaxComments = 64;
2292static CommentStatistic comments_statistics[kMaxComments+1];
2293
2294
2295void PagedSpace::ReportCodeStatistics() {
2296 ReportCodeKindStatistics();
2297 PrintF("Code comment statistics (\" [ comment-txt : size/ "
2298 "count (average)\"):\n");
2299 for (int i = 0; i <= kMaxComments; i++) {
2300 const CommentStatistic& cs = comments_statistics[i];
2301 if (cs.size > 0) {
2302 PrintF(" %-30s: %10d/%6d (%d)\n", cs.comment, cs.size, cs.count,
2303 cs.size/cs.count);
2304 }
2305 }
2306 PrintF("\n");
2307}
2308
2309
2310void PagedSpace::ResetCodeStatistics() {
2311 ClearCodeKindStatistics();
2312 for (int i = 0; i < kMaxComments; i++) comments_statistics[i].Clear();
2313 comments_statistics[kMaxComments].comment = "Unknown";
2314 comments_statistics[kMaxComments].size = 0;
2315 comments_statistics[kMaxComments].count = 0;
2316}
2317
2318
2319// Adds comment to 'comment_statistics' table. Performance OK sa long as
2320// 'kMaxComments' is small
2321static void EnterComment(const char* comment, int delta) {
2322 // Do not count empty comments
2323 if (delta <= 0) return;
2324 CommentStatistic* cs = &comments_statistics[kMaxComments];
2325 // Search for a free or matching entry in 'comments_statistics': 'cs'
2326 // points to result.
2327 for (int i = 0; i < kMaxComments; i++) {
2328 if (comments_statistics[i].comment == NULL) {
2329 cs = &comments_statistics[i];
2330 cs->comment = comment;
2331 break;
2332 } else if (strcmp(comments_statistics[i].comment, comment) == 0) {
2333 cs = &comments_statistics[i];
2334 break;
2335 }
2336 }
2337 // Update entry for 'comment'
2338 cs->size += delta;
2339 cs->count += 1;
2340}
2341
2342
2343// Call for each nested comment start (start marked with '[ xxx', end marked
2344// with ']'. RelocIterator 'it' must point to a comment reloc info.
2345static void CollectCommentStatistics(RelocIterator* it) {
2346 ASSERT(!it->done());
2347 ASSERT(it->rinfo()->rmode() == RelocInfo::COMMENT);
2348 const char* tmp = reinterpret_cast<const char*>(it->rinfo()->data());
2349 if (tmp[0] != '[') {
2350 // Not a nested comment; skip
2351 return;
2352 }
2353
2354 // Search for end of nested comment or a new nested comment
2355 const char* const comment_txt =
2356 reinterpret_cast<const char*>(it->rinfo()->data());
2357 const byte* prev_pc = it->rinfo()->pc();
2358 int flat_delta = 0;
2359 it->next();
2360 while (true) {
2361 // All nested comments must be terminated properly, and therefore exit
2362 // from loop.
2363 ASSERT(!it->done());
2364 if (it->rinfo()->rmode() == RelocInfo::COMMENT) {
2365 const char* const txt =
2366 reinterpret_cast<const char*>(it->rinfo()->data());
Steve Blockd0582a62009-12-15 09:54:21 +00002367 flat_delta += static_cast<int>(it->rinfo()->pc() - prev_pc);
Steve Blocka7e24c12009-10-30 11:49:00 +00002368 if (txt[0] == ']') break; // End of nested comment
2369 // A new comment
2370 CollectCommentStatistics(it);
2371 // Skip code that was covered with previous comment
2372 prev_pc = it->rinfo()->pc();
2373 }
2374 it->next();
2375 }
2376 EnterComment(comment_txt, flat_delta);
2377}
2378
2379
2380// Collects code size statistics:
2381// - by code kind
2382// - by code comment
2383void PagedSpace::CollectCodeStatistics() {
2384 HeapObjectIterator obj_it(this);
Leon Clarked91b9f72010-01-27 17:25:45 +00002385 for (HeapObject* obj = obj_it.next(); obj != NULL; obj = obj_it.next()) {
Steve Blocka7e24c12009-10-30 11:49:00 +00002386 if (obj->IsCode()) {
2387 Code* code = Code::cast(obj);
2388 code_kind_statistics[code->kind()] += code->Size();
2389 RelocIterator it(code);
2390 int delta = 0;
2391 const byte* prev_pc = code->instruction_start();
2392 while (!it.done()) {
2393 if (it.rinfo()->rmode() == RelocInfo::COMMENT) {
Steve Blockd0582a62009-12-15 09:54:21 +00002394 delta += static_cast<int>(it.rinfo()->pc() - prev_pc);
Steve Blocka7e24c12009-10-30 11:49:00 +00002395 CollectCommentStatistics(&it);
2396 prev_pc = it.rinfo()->pc();
2397 }
2398 it.next();
2399 }
2400
2401 ASSERT(code->instruction_start() <= prev_pc &&
Leon Clarkeac952652010-07-15 11:15:24 +01002402 prev_pc <= code->instruction_end());
2403 delta += static_cast<int>(code->instruction_end() - prev_pc);
Steve Blocka7e24c12009-10-30 11:49:00 +00002404 EnterComment("NoComment", delta);
2405 }
2406 }
2407}
2408
2409
2410void OldSpace::ReportStatistics() {
Ben Murdochf87a2032010-10-22 12:50:53 +01002411 int pct = static_cast<int>(Available() * 100 / Capacity());
2412 PrintF(" capacity: %" V8_PTR_PREFIX "d"
2413 ", waste: %" V8_PTR_PREFIX "d"
2414 ", available: %" V8_PTR_PREFIX "d, %%%d\n",
Steve Blocka7e24c12009-10-30 11:49:00 +00002415 Capacity(), Waste(), Available(), pct);
2416
Steve Blocka7e24c12009-10-30 11:49:00 +00002417 ClearHistograms();
2418 HeapObjectIterator obj_it(this);
Leon Clarked91b9f72010-01-27 17:25:45 +00002419 for (HeapObject* obj = obj_it.next(); obj != NULL; obj = obj_it.next())
2420 CollectHistogramInfo(obj);
Steve Blocka7e24c12009-10-30 11:49:00 +00002421 ReportHistogram(true);
2422}
Steve Blocka7e24c12009-10-30 11:49:00 +00002423#endif
2424
2425// -----------------------------------------------------------------------------
2426// FixedSpace implementation
2427
2428void FixedSpace::PrepareForMarkCompact(bool will_compact) {
Steve Block6ded16b2010-05-10 14:33:55 +01002429 // Call prepare of the super class.
2430 PagedSpace::PrepareForMarkCompact(will_compact);
2431
Steve Blocka7e24c12009-10-30 11:49:00 +00002432 if (will_compact) {
2433 // Reset relocation info.
2434 MCResetRelocationInfo();
2435
2436 // During a compacting collection, everything in the space is considered
2437 // 'available' (set by the call to MCResetRelocationInfo) and we will
2438 // rediscover live and wasted bytes during the collection.
2439 ASSERT(Available() == Capacity());
2440 } else {
2441 // During a non-compacting collection, everything below the linear
2442 // allocation pointer except wasted top-of-page blocks is considered
2443 // allocated and we will rediscover available bytes during the
2444 // collection.
2445 accounting_stats_.AllocateBytes(free_list_.available());
2446 }
2447
2448 // Clear the free list before a full GC---it will be rebuilt afterward.
2449 free_list_.Reset();
2450}
2451
2452
2453void FixedSpace::MCCommitRelocationInfo() {
2454 // Update fast allocation info.
2455 allocation_info_.top = mc_forwarding_info_.top;
2456 allocation_info_.limit = mc_forwarding_info_.limit;
2457 ASSERT(allocation_info_.VerifyPagedAllocation());
2458
2459 // The space is compacted and we haven't yet wasted any space.
2460 ASSERT(Waste() == 0);
2461
2462 // Update allocation_top of each page in use and compute waste.
2463 int computed_size = 0;
2464 PageIterator it(this, PageIterator::PAGES_USED_BY_MC);
2465 while (it.has_next()) {
2466 Page* page = it.next();
2467 Address page_top = page->AllocationTop();
Steve Blockd0582a62009-12-15 09:54:21 +00002468 computed_size += static_cast<int>(page_top - page->ObjectAreaStart());
Steve Blocka7e24c12009-10-30 11:49:00 +00002469 if (it.has_next()) {
Steve Blockd0582a62009-12-15 09:54:21 +00002470 accounting_stats_.WasteBytes(
2471 static_cast<int>(page->ObjectAreaEnd() - page_top));
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002472 page->SetAllocationWatermark(page_top);
Steve Blocka7e24c12009-10-30 11:49:00 +00002473 }
2474 }
2475
2476 // Make sure the computed size - based on the used portion of the
2477 // pages in use - matches the size we adjust during allocation.
2478 ASSERT(computed_size == Size());
2479}
2480
2481
2482// Slow case for normal allocation. Try in order: (1) allocate in the next
2483// page in the space, (2) allocate off the space's free list, (3) expand the
2484// space, (4) fail.
2485HeapObject* FixedSpace::SlowAllocateRaw(int size_in_bytes) {
2486 ASSERT_EQ(object_size_in_bytes_, size_in_bytes);
2487 // Linear allocation in this space has failed. If there is another page
2488 // in the space, move to that page and allocate there. This allocation
2489 // should succeed.
2490 Page* current_page = TopPageOf(allocation_info_);
2491 if (current_page->next_page()->is_valid()) {
2492 return AllocateInNextPage(current_page, size_in_bytes);
2493 }
2494
Steve Blockd0582a62009-12-15 09:54:21 +00002495 // There is no next page in this space. Try free list allocation unless
2496 // that is currently forbidden. The fixed space free list implicitly assumes
2497 // that all free blocks are of the fixed size.
2498 if (!Heap::linear_allocation()) {
John Reck59135872010-11-02 12:39:01 -07002499 Object* result;
2500 MaybeObject* maybe = free_list_.Allocate();
2501 if (maybe->ToObject(&result)) {
Steve Blocka7e24c12009-10-30 11:49:00 +00002502 accounting_stats_.AllocateBytes(size_in_bytes);
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002503 HeapObject* obj = HeapObject::cast(result);
2504 Page* p = Page::FromAddress(obj->address());
2505
2506 if (obj->address() >= p->AllocationWatermark()) {
2507 // There should be no hole between the allocation watermark
2508 // and allocated object address.
2509 // Memory above the allocation watermark was not swept and
2510 // might contain garbage pointers to new space.
2511 ASSERT(obj->address() == p->AllocationWatermark());
2512 p->SetAllocationWatermark(obj->address() + size_in_bytes);
2513 }
2514
2515 return obj;
Steve Blocka7e24c12009-10-30 11:49:00 +00002516 }
2517 }
2518
2519 // Free list allocation failed and there is no next page. Fail if we have
2520 // hit the old generation size limit that should cause a garbage
2521 // collection.
2522 if (!Heap::always_allocate() && Heap::OldGenerationAllocationLimitReached()) {
2523 return NULL;
2524 }
2525
2526 // Try to expand the space and allocate in the new next page.
2527 ASSERT(!current_page->next_page()->is_valid());
2528 if (Expand(current_page)) {
2529 return AllocateInNextPage(current_page, size_in_bytes);
2530 }
2531
2532 // Finally, fail.
2533 return NULL;
2534}
2535
2536
2537// Move to the next page (there is assumed to be one) and allocate there.
2538// The top of page block is always wasted, because it is too small to hold a
2539// map.
2540HeapObject* FixedSpace::AllocateInNextPage(Page* current_page,
2541 int size_in_bytes) {
2542 ASSERT(current_page->next_page()->is_valid());
Steve Block6ded16b2010-05-10 14:33:55 +01002543 ASSERT(allocation_info_.top == PageAllocationLimit(current_page));
Steve Blocka7e24c12009-10-30 11:49:00 +00002544 ASSERT_EQ(object_size_in_bytes_, size_in_bytes);
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002545 Page* next_page = current_page->next_page();
2546 next_page->ClearGCFields();
2547 current_page->SetAllocationWatermark(allocation_info_.top);
Steve Blocka7e24c12009-10-30 11:49:00 +00002548 accounting_stats_.WasteBytes(page_extra_);
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002549 SetAllocationInfo(&allocation_info_, next_page);
Steve Blocka7e24c12009-10-30 11:49:00 +00002550 return AllocateLinearly(&allocation_info_, size_in_bytes);
2551}
2552
2553
Kristian Monsen80d68ea2010-09-08 11:05:35 +01002554void FixedSpace::DeallocateBlock(Address start,
2555 int size_in_bytes,
2556 bool add_to_freelist) {
2557 // Free-list elements in fixed space are assumed to have a fixed size.
2558 // We break the free block into chunks and add them to the free list
2559 // individually.
2560 int size = object_size_in_bytes();
2561 ASSERT(size_in_bytes % size == 0);
2562 Address end = start + size_in_bytes;
2563 for (Address a = start; a < end; a += size) {
2564 Free(a, add_to_freelist);
2565 }
2566}
2567
2568
Steve Blocka7e24c12009-10-30 11:49:00 +00002569#ifdef DEBUG
2570void FixedSpace::ReportStatistics() {
Ben Murdochf87a2032010-10-22 12:50:53 +01002571 int pct = static_cast<int>(Available() * 100 / Capacity());
2572 PrintF(" capacity: %" V8_PTR_PREFIX "d"
2573 ", waste: %" V8_PTR_PREFIX "d"
2574 ", available: %" V8_PTR_PREFIX "d, %%%d\n",
Steve Blocka7e24c12009-10-30 11:49:00 +00002575 Capacity(), Waste(), Available(), pct);
2576
Steve Blocka7e24c12009-10-30 11:49:00 +00002577 ClearHistograms();
2578 HeapObjectIterator obj_it(this);
Leon Clarked91b9f72010-01-27 17:25:45 +00002579 for (HeapObject* obj = obj_it.next(); obj != NULL; obj = obj_it.next())
2580 CollectHistogramInfo(obj);
Steve Blocka7e24c12009-10-30 11:49:00 +00002581 ReportHistogram(false);
2582}
Steve Blocka7e24c12009-10-30 11:49:00 +00002583#endif
2584
2585
2586// -----------------------------------------------------------------------------
2587// MapSpace implementation
2588
2589void MapSpace::PrepareForMarkCompact(bool will_compact) {
2590 // Call prepare of the super class.
2591 FixedSpace::PrepareForMarkCompact(will_compact);
2592
2593 if (will_compact) {
2594 // Initialize map index entry.
2595 int page_count = 0;
2596 PageIterator it(this, PageIterator::ALL_PAGES);
2597 while (it.has_next()) {
2598 ASSERT_MAP_PAGE_INDEX(page_count);
2599
2600 Page* p = it.next();
2601 ASSERT(p->mc_page_index == page_count);
2602
2603 page_addresses_[page_count++] = p->address();
2604 }
2605 }
2606}
2607
2608
2609#ifdef DEBUG
2610void MapSpace::VerifyObject(HeapObject* object) {
2611 // The object should be a map or a free-list node.
2612 ASSERT(object->IsMap() || object->IsByteArray());
2613}
2614#endif
2615
2616
2617// -----------------------------------------------------------------------------
2618// GlobalPropertyCellSpace implementation
2619
2620#ifdef DEBUG
2621void CellSpace::VerifyObject(HeapObject* object) {
2622 // The object should be a global object property cell or a free-list node.
2623 ASSERT(object->IsJSGlobalPropertyCell() ||
2624 object->map() == Heap::two_pointer_filler_map());
2625}
2626#endif
2627
2628
2629// -----------------------------------------------------------------------------
2630// LargeObjectIterator
2631
2632LargeObjectIterator::LargeObjectIterator(LargeObjectSpace* space) {
2633 current_ = space->first_chunk_;
2634 size_func_ = NULL;
2635}
2636
2637
2638LargeObjectIterator::LargeObjectIterator(LargeObjectSpace* space,
2639 HeapObjectCallback size_func) {
2640 current_ = space->first_chunk_;
2641 size_func_ = size_func;
2642}
2643
2644
2645HeapObject* LargeObjectIterator::next() {
Leon Clarked91b9f72010-01-27 17:25:45 +00002646 if (current_ == NULL) return NULL;
2647
Steve Blocka7e24c12009-10-30 11:49:00 +00002648 HeapObject* object = current_->GetObject();
2649 current_ = current_->next();
2650 return object;
2651}
2652
2653
2654// -----------------------------------------------------------------------------
2655// LargeObjectChunk
2656
2657LargeObjectChunk* LargeObjectChunk::New(int size_in_bytes,
2658 size_t* chunk_size,
2659 Executability executable) {
2660 size_t requested = ChunkSizeFor(size_in_bytes);
2661 void* mem = MemoryAllocator::AllocateRawMemory(requested,
2662 chunk_size,
2663 executable);
2664 if (mem == NULL) return NULL;
2665 LOG(NewEvent("LargeObjectChunk", mem, *chunk_size));
2666 if (*chunk_size < requested) {
Steve Block791712a2010-08-27 10:21:07 +01002667 MemoryAllocator::FreeRawMemory(mem, *chunk_size, executable);
Steve Blocka7e24c12009-10-30 11:49:00 +00002668 LOG(DeleteEvent("LargeObjectChunk", mem));
2669 return NULL;
2670 }
Iain Merrick9ac36c92010-09-13 15:29:50 +01002671 ObjectSpace space =
2672 (executable == EXECUTABLE) ? kObjectSpaceCodeSpace : kObjectSpaceLoSpace;
2673 MemoryAllocator::PerformAllocationCallback(space,
2674 kAllocationActionAllocate,
2675 *chunk_size);
Steve Blocka7e24c12009-10-30 11:49:00 +00002676 return reinterpret_cast<LargeObjectChunk*>(mem);
2677}
2678
2679
2680int LargeObjectChunk::ChunkSizeFor(int size_in_bytes) {
Steve Blockd0582a62009-12-15 09:54:21 +00002681 int os_alignment = static_cast<int>(OS::AllocateAlignment());
Steve Blocka7e24c12009-10-30 11:49:00 +00002682 if (os_alignment < Page::kPageSize)
2683 size_in_bytes += (Page::kPageSize - os_alignment);
2684 return size_in_bytes + Page::kObjectStartOffset;
2685}
2686
2687// -----------------------------------------------------------------------------
2688// LargeObjectSpace
2689
2690LargeObjectSpace::LargeObjectSpace(AllocationSpace id)
2691 : Space(id, NOT_EXECUTABLE), // Managed on a per-allocation basis
2692 first_chunk_(NULL),
2693 size_(0),
2694 page_count_(0) {}
2695
2696
2697bool LargeObjectSpace::Setup() {
2698 first_chunk_ = NULL;
2699 size_ = 0;
2700 page_count_ = 0;
2701 return true;
2702}
2703
2704
2705void LargeObjectSpace::TearDown() {
2706 while (first_chunk_ != NULL) {
2707 LargeObjectChunk* chunk = first_chunk_;
2708 first_chunk_ = first_chunk_->next();
2709 LOG(DeleteEvent("LargeObjectChunk", chunk->address()));
Steve Block791712a2010-08-27 10:21:07 +01002710 Page* page = Page::FromAddress(RoundUp(chunk->address(), Page::kPageSize));
2711 Executability executable =
2712 page->IsPageExecutable() ? EXECUTABLE : NOT_EXECUTABLE;
Iain Merrick9ac36c92010-09-13 15:29:50 +01002713 ObjectSpace space = kObjectSpaceLoSpace;
2714 if (executable == EXECUTABLE) space = kObjectSpaceCodeSpace;
2715 size_t size = chunk->size();
2716 MemoryAllocator::FreeRawMemory(chunk->address(), size, executable);
2717 MemoryAllocator::PerformAllocationCallback(
2718 space, kAllocationActionFree, size);
Steve Blocka7e24c12009-10-30 11:49:00 +00002719 }
2720
2721 size_ = 0;
2722 page_count_ = 0;
2723}
2724
2725
2726#ifdef ENABLE_HEAP_PROTECTION
2727
2728void LargeObjectSpace::Protect() {
2729 LargeObjectChunk* chunk = first_chunk_;
2730 while (chunk != NULL) {
2731 MemoryAllocator::Protect(chunk->address(), chunk->size());
2732 chunk = chunk->next();
2733 }
2734}
2735
2736
2737void LargeObjectSpace::Unprotect() {
2738 LargeObjectChunk* chunk = first_chunk_;
2739 while (chunk != NULL) {
2740 bool is_code = chunk->GetObject()->IsCode();
2741 MemoryAllocator::Unprotect(chunk->address(), chunk->size(),
2742 is_code ? EXECUTABLE : NOT_EXECUTABLE);
2743 chunk = chunk->next();
2744 }
2745}
2746
2747#endif
2748
2749
John Reck59135872010-11-02 12:39:01 -07002750MaybeObject* LargeObjectSpace::AllocateRawInternal(int requested_size,
2751 int object_size,
2752 Executability executable) {
Steve Blocka7e24c12009-10-30 11:49:00 +00002753 ASSERT(0 < object_size && object_size <= requested_size);
2754
2755 // Check if we want to force a GC before growing the old space further.
2756 // If so, fail the allocation.
2757 if (!Heap::always_allocate() && Heap::OldGenerationAllocationLimitReached()) {
Ben Murdochf87a2032010-10-22 12:50:53 +01002758 return Failure::RetryAfterGC(identity());
Steve Blocka7e24c12009-10-30 11:49:00 +00002759 }
2760
2761 size_t chunk_size;
2762 LargeObjectChunk* chunk =
2763 LargeObjectChunk::New(requested_size, &chunk_size, executable);
2764 if (chunk == NULL) {
Ben Murdochf87a2032010-10-22 12:50:53 +01002765 return Failure::RetryAfterGC(identity());
Steve Blocka7e24c12009-10-30 11:49:00 +00002766 }
2767
Steve Blockd0582a62009-12-15 09:54:21 +00002768 size_ += static_cast<int>(chunk_size);
Steve Blocka7e24c12009-10-30 11:49:00 +00002769 page_count_++;
2770 chunk->set_next(first_chunk_);
2771 chunk->set_size(chunk_size);
2772 first_chunk_ = chunk;
2773
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002774 // Initialize page header.
Steve Blocka7e24c12009-10-30 11:49:00 +00002775 Page* page = Page::FromAddress(RoundUp(chunk->address(), Page::kPageSize));
2776 Address object_address = page->ObjectAreaStart();
2777 // Clear the low order bit of the second word in the page to flag it as a
2778 // large object page. If the chunk_size happened to be written there, its
2779 // low order bit should already be clear.
2780 ASSERT((chunk_size & 0x1) == 0);
Steve Block6ded16b2010-05-10 14:33:55 +01002781 page->SetIsLargeObjectPage(true);
Steve Block791712a2010-08-27 10:21:07 +01002782 page->SetIsPageExecutable(executable);
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002783 page->SetRegionMarks(Page::kAllRegionsCleanMarks);
Steve Blocka7e24c12009-10-30 11:49:00 +00002784 return HeapObject::FromAddress(object_address);
2785}
2786
2787
John Reck59135872010-11-02 12:39:01 -07002788MaybeObject* LargeObjectSpace::AllocateRawCode(int size_in_bytes) {
Steve Blocka7e24c12009-10-30 11:49:00 +00002789 ASSERT(0 < size_in_bytes);
2790 return AllocateRawInternal(size_in_bytes,
2791 size_in_bytes,
2792 EXECUTABLE);
2793}
2794
2795
John Reck59135872010-11-02 12:39:01 -07002796MaybeObject* LargeObjectSpace::AllocateRawFixedArray(int size_in_bytes) {
Steve Blocka7e24c12009-10-30 11:49:00 +00002797 ASSERT(0 < size_in_bytes);
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002798 return AllocateRawInternal(size_in_bytes,
Steve Blocka7e24c12009-10-30 11:49:00 +00002799 size_in_bytes,
2800 NOT_EXECUTABLE);
2801}
2802
2803
John Reck59135872010-11-02 12:39:01 -07002804MaybeObject* LargeObjectSpace::AllocateRaw(int size_in_bytes) {
Steve Blocka7e24c12009-10-30 11:49:00 +00002805 ASSERT(0 < size_in_bytes);
2806 return AllocateRawInternal(size_in_bytes,
2807 size_in_bytes,
2808 NOT_EXECUTABLE);
2809}
2810
2811
2812// GC support
John Reck59135872010-11-02 12:39:01 -07002813MaybeObject* LargeObjectSpace::FindObject(Address a) {
Steve Blocka7e24c12009-10-30 11:49:00 +00002814 for (LargeObjectChunk* chunk = first_chunk_;
2815 chunk != NULL;
2816 chunk = chunk->next()) {
2817 Address chunk_address = chunk->address();
2818 if (chunk_address <= a && a < chunk_address + chunk->size()) {
2819 return chunk->GetObject();
2820 }
2821 }
2822 return Failure::Exception();
2823}
2824
Kristian Monsen80d68ea2010-09-08 11:05:35 +01002825
2826LargeObjectChunk* LargeObjectSpace::FindChunkContainingPc(Address pc) {
2827 // TODO(853): Change this implementation to only find executable
2828 // chunks and use some kind of hash-based approach to speed it up.
2829 for (LargeObjectChunk* chunk = first_chunk_;
2830 chunk != NULL;
2831 chunk = chunk->next()) {
2832 Address chunk_address = chunk->address();
2833 if (chunk_address <= pc && pc < chunk_address + chunk->size()) {
2834 return chunk;
2835 }
2836 }
2837 return NULL;
2838}
2839
2840
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002841void LargeObjectSpace::IterateDirtyRegions(ObjectSlotCallback copy_object) {
Steve Blocka7e24c12009-10-30 11:49:00 +00002842 LargeObjectIterator it(this);
Leon Clarked91b9f72010-01-27 17:25:45 +00002843 for (HeapObject* object = it.next(); object != NULL; object = it.next()) {
Steve Blocka7e24c12009-10-30 11:49:00 +00002844 // We only have code, sequential strings, or fixed arrays in large
2845 // object space, and only fixed arrays can possibly contain pointers to
2846 // the young generation.
Steve Blocka7e24c12009-10-30 11:49:00 +00002847 if (object->IsFixedArray()) {
Steve Blocka7e24c12009-10-30 11:49:00 +00002848 Page* page = Page::FromAddress(object->address());
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002849 uint32_t marks = page->GetRegionMarks();
2850 uint32_t newmarks = Page::kAllRegionsCleanMarks;
Steve Blocka7e24c12009-10-30 11:49:00 +00002851
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002852 if (marks != Page::kAllRegionsCleanMarks) {
2853 // For a large page a single dirty mark corresponds to several
2854 // regions (modulo 32). So we treat a large page as a sequence of
2855 // normal pages of size Page::kPageSize having same dirty marks
2856 // and subsequently iterate dirty regions on each of these pages.
2857 Address start = object->address();
2858 Address end = page->ObjectAreaEnd();
2859 Address object_end = start + object->Size();
2860
2861 // Iterate regions of the first normal page covering object.
2862 uint32_t first_region_number = page->GetRegionNumberForAddress(start);
2863 newmarks |=
2864 Heap::IterateDirtyRegions(marks >> first_region_number,
2865 start,
2866 end,
2867 &Heap::IteratePointersInDirtyRegion,
2868 copy_object) << first_region_number;
2869
2870 start = end;
2871 end = start + Page::kPageSize;
2872 while (end <= object_end) {
2873 // Iterate next 32 regions.
2874 newmarks |=
2875 Heap::IterateDirtyRegions(marks,
2876 start,
2877 end,
2878 &Heap::IteratePointersInDirtyRegion,
2879 copy_object);
2880 start = end;
2881 end = start + Page::kPageSize;
2882 }
2883
2884 if (start != object_end) {
2885 // Iterate the last piece of an object which is less than
2886 // Page::kPageSize.
2887 newmarks |=
2888 Heap::IterateDirtyRegions(marks,
2889 start,
2890 object_end,
2891 &Heap::IteratePointersInDirtyRegion,
2892 copy_object);
2893 }
2894
2895 page->SetRegionMarks(newmarks);
Steve Blocka7e24c12009-10-30 11:49:00 +00002896 }
2897 }
2898 }
2899}
2900
2901
2902void LargeObjectSpace::FreeUnmarkedObjects() {
2903 LargeObjectChunk* previous = NULL;
2904 LargeObjectChunk* current = first_chunk_;
2905 while (current != NULL) {
2906 HeapObject* object = current->GetObject();
2907 if (object->IsMarked()) {
2908 object->ClearMark();
2909 MarkCompactCollector::tracer()->decrement_marked_count();
2910 previous = current;
2911 current = current->next();
2912 } else {
Steve Block791712a2010-08-27 10:21:07 +01002913 Page* page = Page::FromAddress(RoundUp(current->address(),
2914 Page::kPageSize));
2915 Executability executable =
2916 page->IsPageExecutable() ? EXECUTABLE : NOT_EXECUTABLE;
Steve Blocka7e24c12009-10-30 11:49:00 +00002917 Address chunk_address = current->address();
2918 size_t chunk_size = current->size();
2919
2920 // Cut the chunk out from the chunk list.
2921 current = current->next();
2922 if (previous == NULL) {
2923 first_chunk_ = current;
2924 } else {
2925 previous->set_next(current);
2926 }
2927
2928 // Free the chunk.
Leon Clarked91b9f72010-01-27 17:25:45 +00002929 MarkCompactCollector::ReportDeleteIfNeeded(object);
Steve Blockd0582a62009-12-15 09:54:21 +00002930 size_ -= static_cast<int>(chunk_size);
Steve Blocka7e24c12009-10-30 11:49:00 +00002931 page_count_--;
Iain Merrick9ac36c92010-09-13 15:29:50 +01002932 ObjectSpace space = kObjectSpaceLoSpace;
2933 if (executable == EXECUTABLE) space = kObjectSpaceCodeSpace;
Steve Block791712a2010-08-27 10:21:07 +01002934 MemoryAllocator::FreeRawMemory(chunk_address, chunk_size, executable);
Iain Merrick9ac36c92010-09-13 15:29:50 +01002935 MemoryAllocator::PerformAllocationCallback(space, kAllocationActionFree,
2936 size_);
Steve Blocka7e24c12009-10-30 11:49:00 +00002937 LOG(DeleteEvent("LargeObjectChunk", chunk_address));
2938 }
2939 }
2940}
2941
2942
2943bool LargeObjectSpace::Contains(HeapObject* object) {
2944 Address address = object->address();
Steve Block6ded16b2010-05-10 14:33:55 +01002945 if (Heap::new_space()->Contains(address)) {
2946 return false;
2947 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002948 Page* page = Page::FromAddress(address);
2949
2950 SLOW_ASSERT(!page->IsLargeObjectPage()
2951 || !FindObject(address)->IsFailure());
2952
2953 return page->IsLargeObjectPage();
2954}
2955
2956
2957#ifdef DEBUG
2958// We do not assume that the large object iterator works, because it depends
2959// on the invariants we are checking during verification.
2960void LargeObjectSpace::Verify() {
2961 for (LargeObjectChunk* chunk = first_chunk_;
2962 chunk != NULL;
2963 chunk = chunk->next()) {
2964 // Each chunk contains an object that starts at the large object page's
2965 // object area start.
2966 HeapObject* object = chunk->GetObject();
2967 Page* page = Page::FromAddress(object->address());
2968 ASSERT(object->address() == page->ObjectAreaStart());
2969
2970 // The first word should be a map, and we expect all map pointers to be
2971 // in map space.
2972 Map* map = object->map();
2973 ASSERT(map->IsMap());
2974 ASSERT(Heap::map_space()->Contains(map));
2975
2976 // We have only code, sequential strings, external strings
2977 // (sequential strings that have been morphed into external
2978 // strings), fixed arrays, and byte arrays in large object space.
2979 ASSERT(object->IsCode() || object->IsSeqString() ||
2980 object->IsExternalString() || object->IsFixedArray() ||
2981 object->IsByteArray());
2982
2983 // The object itself should look OK.
2984 object->Verify();
2985
2986 // Byte arrays and strings don't have interior pointers.
2987 if (object->IsCode()) {
2988 VerifyPointersVisitor code_visitor;
2989 object->IterateBody(map->instance_type(),
2990 object->Size(),
2991 &code_visitor);
2992 } else if (object->IsFixedArray()) {
2993 // We loop over fixed arrays ourselves, rather then using the visitor,
2994 // because the visitor doesn't support the start/offset iteration
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002995 // needed for IsRegionDirty.
Steve Blocka7e24c12009-10-30 11:49:00 +00002996 FixedArray* array = FixedArray::cast(object);
2997 for (int j = 0; j < array->length(); j++) {
2998 Object* element = array->get(j);
2999 if (element->IsHeapObject()) {
3000 HeapObject* element_object = HeapObject::cast(element);
3001 ASSERT(Heap::Contains(element_object));
3002 ASSERT(element_object->map()->IsMap());
3003 if (Heap::InNewSpace(element_object)) {
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01003004 Address array_addr = object->address();
3005 Address element_addr = array_addr + FixedArray::kHeaderSize +
3006 j * kPointerSize;
3007
3008 ASSERT(Page::FromAddress(array_addr)->IsRegionDirty(element_addr));
Steve Blocka7e24c12009-10-30 11:49:00 +00003009 }
3010 }
3011 }
3012 }
3013 }
3014}
3015
3016
3017void LargeObjectSpace::Print() {
3018 LargeObjectIterator it(this);
Leon Clarked91b9f72010-01-27 17:25:45 +00003019 for (HeapObject* obj = it.next(); obj != NULL; obj = it.next()) {
3020 obj->Print();
Steve Blocka7e24c12009-10-30 11:49:00 +00003021 }
3022}
3023
3024
3025void LargeObjectSpace::ReportStatistics() {
Ben Murdochf87a2032010-10-22 12:50:53 +01003026 PrintF(" size: %" V8_PTR_PREFIX "d\n", size_);
Steve Blocka7e24c12009-10-30 11:49:00 +00003027 int num_objects = 0;
3028 ClearHistograms();
3029 LargeObjectIterator it(this);
Leon Clarked91b9f72010-01-27 17:25:45 +00003030 for (HeapObject* obj = it.next(); obj != NULL; obj = it.next()) {
Steve Blocka7e24c12009-10-30 11:49:00 +00003031 num_objects++;
Leon Clarked91b9f72010-01-27 17:25:45 +00003032 CollectHistogramInfo(obj);
Steve Blocka7e24c12009-10-30 11:49:00 +00003033 }
3034
3035 PrintF(" number of objects %d\n", num_objects);
3036 if (num_objects > 0) ReportHistogram(false);
3037}
3038
3039
3040void LargeObjectSpace::CollectCodeStatistics() {
3041 LargeObjectIterator obj_it(this);
Leon Clarked91b9f72010-01-27 17:25:45 +00003042 for (HeapObject* obj = obj_it.next(); obj != NULL; obj = obj_it.next()) {
Steve Blocka7e24c12009-10-30 11:49:00 +00003043 if (obj->IsCode()) {
3044 Code* code = Code::cast(obj);
3045 code_kind_statistics[code->kind()] += code->Size();
3046 }
3047 }
3048}
Steve Blocka7e24c12009-10-30 11:49:00 +00003049#endif // DEBUG
3050
3051} } // namespace v8::internal