blob: b2aaf325bfe101f8b54cb301a219296d763b5f76 [file] [log] [blame]
ager@chromium.org9258b6b2008-09-11 09:11:10 +00001// Copyright 2006-2008 the V8 project authors. All rights reserved.
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002// Redistribution and use in source and binary forms, with or without
3// modification, are permitted provided that the following conditions are
4// met:
5//
6// * Redistributions of source code must retain the above copyright
7// notice, this list of conditions and the following disclaimer.
8// * Redistributions in binary form must reproduce the above
9// copyright notice, this list of conditions and the following
10// disclaimer in the documentation and/or other materials provided
11// with the distribution.
12// * Neither the name of Google Inc. nor the names of its
13// contributors may be used to endorse or promote products derived
14// from this software without specific prior written permission.
15//
16// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
28#include "v8.h"
29
30#include "accessors.h"
31#include "api.h"
32#include "bootstrapper.h"
33#include "codegen-inl.h"
kasperl@chromium.orgb9123622008-09-17 14:05:56 +000034#include "compilation-cache.h"
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +000035#include "debug.h"
36#include "global-handles.h"
37#include "jsregexp.h"
38#include "mark-compact.h"
39#include "natives.h"
40#include "scanner.h"
41#include "scopeinfo.h"
42#include "v8threads.h"
43
44namespace v8 { namespace internal {
45
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +000046#define ROOT_ALLOCATION(type, name) type* Heap::name##_;
47 ROOT_LIST(ROOT_ALLOCATION)
48#undef ROOT_ALLOCATION
49
50
51#define STRUCT_ALLOCATION(NAME, Name, name) Map* Heap::name##_map_;
52 STRUCT_LIST(STRUCT_ALLOCATION)
53#undef STRUCT_ALLOCATION
54
55
56#define SYMBOL_ALLOCATION(name, string) String* Heap::name##_;
57 SYMBOL_LIST(SYMBOL_ALLOCATION)
58#undef SYMBOL_ALLOCATION
59
60
61NewSpace* Heap::new_space_ = NULL;
ager@chromium.org9258b6b2008-09-11 09:11:10 +000062OldSpace* Heap::old_pointer_space_ = NULL;
63OldSpace* Heap::old_data_space_ = NULL;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +000064OldSpace* Heap::code_space_ = NULL;
65MapSpace* Heap::map_space_ = NULL;
66LargeObjectSpace* Heap::lo_space_ = NULL;
67
68int Heap::promoted_space_limit_ = 0;
69int Heap::old_gen_exhausted_ = false;
70
kasper.lund7276f142008-07-30 08:49:36 +000071int Heap::amount_of_external_allocated_memory_ = 0;
72int Heap::amount_of_external_allocated_memory_at_last_global_gc_ = 0;
73
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +000074// semispace_size_ should be a power of 2 and old_generation_size_ should be
75// a multiple of Page::kPageSize.
76int Heap::semispace_size_ = 1*MB;
77int Heap::old_generation_size_ = 512*MB;
78int Heap::initial_semispace_size_ = 256*KB;
79
80GCCallback Heap::global_gc_prologue_callback_ = NULL;
81GCCallback Heap::global_gc_epilogue_callback_ = NULL;
82
83// Variables set based on semispace_size_ and old_generation_size_ in
84// ConfigureHeap.
85int Heap::young_generation_size_ = 0; // Will be 2 * semispace_size_.
86
87// Double the new space after this many scavenge collections.
88int Heap::new_space_growth_limit_ = 8;
89int Heap::scavenge_count_ = 0;
90Heap::HeapState Heap::gc_state_ = NOT_IN_GC;
91
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +000092int Heap::mc_count_ = 0;
93int Heap::gc_count_ = 0;
94
kasper.lund7276f142008-07-30 08:49:36 +000095#ifdef DEBUG
96bool Heap::allocation_allowed_ = true;
97
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +000098int Heap::allocation_timeout_ = 0;
99bool Heap::disallow_allocation_failure_ = false;
100#endif // DEBUG
101
102
103int Heap::Capacity() {
104 if (!HasBeenSetup()) return 0;
105
106 return new_space_->Capacity() +
ager@chromium.org9258b6b2008-09-11 09:11:10 +0000107 old_pointer_space_->Capacity() +
108 old_data_space_->Capacity() +
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000109 code_space_->Capacity() +
110 map_space_->Capacity();
111}
112
113
114int Heap::Available() {
115 if (!HasBeenSetup()) return 0;
116
117 return new_space_->Available() +
ager@chromium.org9258b6b2008-09-11 09:11:10 +0000118 old_pointer_space_->Available() +
119 old_data_space_->Available() +
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000120 code_space_->Available() +
121 map_space_->Available();
122}
123
124
125bool Heap::HasBeenSetup() {
126 return new_space_ != NULL &&
ager@chromium.org9258b6b2008-09-11 09:11:10 +0000127 old_pointer_space_ != NULL &&
128 old_data_space_ != NULL &&
129 code_space_ != NULL &&
130 map_space_ != NULL &&
131 lo_space_ != NULL;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000132}
133
134
135GarbageCollector Heap::SelectGarbageCollector(AllocationSpace space) {
136 // Is global GC requested?
137 if (space != NEW_SPACE || FLAG_gc_global) {
138 Counters::gc_compactor_caused_by_request.Increment();
139 return MARK_COMPACTOR;
140 }
141
142 // Is enough data promoted to justify a global GC?
kasper.lund7276f142008-07-30 08:49:36 +0000143 if (PromotedSpaceSize() + PromotedExternalMemorySize()
144 > promoted_space_limit_) {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000145 Counters::gc_compactor_caused_by_promoted_data.Increment();
146 return MARK_COMPACTOR;
147 }
148
149 // Have allocation in OLD and LO failed?
150 if (old_gen_exhausted_) {
151 Counters::gc_compactor_caused_by_oldspace_exhaustion.Increment();
152 return MARK_COMPACTOR;
153 }
154
155 // Is there enough space left in OLD to guarantee that a scavenge can
156 // succeed?
157 //
ager@chromium.org9258b6b2008-09-11 09:11:10 +0000158 // Note that MemoryAllocator->MaxAvailable() undercounts the memory available
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000159 // for object promotion. It counts only the bytes that the memory
160 // allocator has not yet allocated from the OS and assigned to any space,
161 // and does not count available bytes already in the old space or code
162 // space. Undercounting is safe---we may get an unrequested full GC when
163 // a scavenge would have succeeded.
ager@chromium.org9258b6b2008-09-11 09:11:10 +0000164 if (MemoryAllocator::MaxAvailable() <= new_space_->Size()) {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000165 Counters::gc_compactor_caused_by_oldspace_exhaustion.Increment();
166 return MARK_COMPACTOR;
167 }
168
169 // Default
170 return SCAVENGER;
171}
172
173
174// TODO(1238405): Combine the infrastructure for --heap-stats and
175// --log-gc to avoid the complicated preprocessor and flag testing.
176#if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
177void Heap::ReportStatisticsBeforeGC() {
178 // Heap::ReportHeapStatistics will also log NewSpace statistics when
179 // compiled with ENABLE_LOGGING_AND_PROFILING and --log-gc is set. The
180 // following logic is used to avoid double logging.
181#if defined(DEBUG) && defined(ENABLE_LOGGING_AND_PROFILING)
182 if (FLAG_heap_stats || FLAG_log_gc) new_space_->CollectStatistics();
183 if (FLAG_heap_stats) {
184 ReportHeapStatistics("Before GC");
185 } else if (FLAG_log_gc) {
186 new_space_->ReportStatistics();
187 }
188 if (FLAG_heap_stats || FLAG_log_gc) new_space_->ClearHistograms();
189#elif defined(DEBUG)
190 if (FLAG_heap_stats) {
191 new_space_->CollectStatistics();
192 ReportHeapStatistics("Before GC");
193 new_space_->ClearHistograms();
194 }
195#elif defined(ENABLE_LOGGING_AND_PROFILING)
196 if (FLAG_log_gc) {
197 new_space_->CollectStatistics();
198 new_space_->ReportStatistics();
199 new_space_->ClearHistograms();
200 }
201#endif
202}
203
204
205// TODO(1238405): Combine the infrastructure for --heap-stats and
206// --log-gc to avoid the complicated preprocessor and flag testing.
207void Heap::ReportStatisticsAfterGC() {
208 // Similar to the before GC, we use some complicated logic to ensure that
209 // NewSpace statistics are logged exactly once when --log-gc is turned on.
210#if defined(DEBUG) && defined(ENABLE_LOGGING_AND_PROFILING)
211 if (FLAG_heap_stats) {
212 ReportHeapStatistics("After GC");
213 } else if (FLAG_log_gc) {
214 new_space_->ReportStatistics();
215 }
216#elif defined(DEBUG)
217 if (FLAG_heap_stats) ReportHeapStatistics("After GC");
218#elif defined(ENABLE_LOGGING_AND_PROFILING)
219 if (FLAG_log_gc) new_space_->ReportStatistics();
220#endif
221}
222#endif // defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
223
224
225void Heap::GarbageCollectionPrologue() {
226 RegExpImpl::NewSpaceCollectionPrologue();
kasper.lund7276f142008-07-30 08:49:36 +0000227 gc_count_++;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000228#ifdef DEBUG
229 ASSERT(allocation_allowed_ && gc_state_ == NOT_IN_GC);
230 allow_allocation(false);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000231
232 if (FLAG_verify_heap) {
233 Verify();
234 }
235
236 if (FLAG_gc_verbose) Print();
237
238 if (FLAG_print_rset) {
ager@chromium.org9258b6b2008-09-11 09:11:10 +0000239 // Not all spaces have remembered set bits that we care about.
240 old_pointer_space_->PrintRSet();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000241 map_space_->PrintRSet();
242 lo_space_->PrintRSet();
243 }
244#endif
245
246#if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
247 ReportStatisticsBeforeGC();
248#endif
249}
250
251int Heap::SizeOfObjects() {
ager@chromium.org9258b6b2008-09-11 09:11:10 +0000252 int total = 0;
253 AllSpaces spaces;
254 while (Space* space = spaces.next()) total += space->Size();
255 return total;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000256}
257
258void Heap::GarbageCollectionEpilogue() {
259#ifdef DEBUG
260 allow_allocation(true);
261 ZapFromSpace();
262
263 if (FLAG_verify_heap) {
264 Verify();
265 }
266
267 if (FLAG_print_global_handles) GlobalHandles::Print();
268 if (FLAG_print_handles) PrintHandles();
269 if (FLAG_gc_verbose) Print();
270 if (FLAG_code_stats) ReportCodeStatistics("After GC");
271#endif
272
273 Counters::alive_after_last_gc.Set(SizeOfObjects());
274
275 SymbolTable* symbol_table = SymbolTable::cast(Heap::symbol_table_);
276 Counters::symbol_table_capacity.Set(symbol_table->Capacity());
277 Counters::number_of_symbols.Set(symbol_table->NumberOfElements());
278#if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
279 ReportStatisticsAfterGC();
280#endif
281}
282
283
ager@chromium.org9258b6b2008-09-11 09:11:10 +0000284void Heap::CollectAllGarbage() {
285 // Since we are ignoring the return value, the exact choice of space does
286 // not matter, so long as we do not specify NEW_SPACE, which would not
287 // cause a full GC.
288 CollectGarbage(0, OLD_POINTER_SPACE);
289}
290
291
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000292bool Heap::CollectGarbage(int requested_size, AllocationSpace space) {
293 // The VM is in the GC state until exiting this function.
294 VMState state(GC);
295
296#ifdef DEBUG
297 // Reset the allocation timeout to the GC interval, but make sure to
298 // allow at least a few allocations after a collection. The reason
299 // for this is that we have a lot of allocation sequences and we
300 // assume that a garbage collection will allow the subsequent
301 // allocation attempts to go through.
302 allocation_timeout_ = Max(6, FLAG_gc_interval);
303#endif
304
305 { GCTracer tracer;
306 GarbageCollectionPrologue();
kasper.lund7276f142008-07-30 08:49:36 +0000307 // The GC count was incremented in the prologue. Tell the tracer about
308 // it.
309 tracer.set_gc_count(gc_count_);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000310
311 GarbageCollector collector = SelectGarbageCollector(space);
kasper.lund7276f142008-07-30 08:49:36 +0000312 // Tell the tracer which collector we've selected.
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000313 tracer.set_collector(collector);
314
315 StatsRate* rate = (collector == SCAVENGER)
316 ? &Counters::gc_scavenger
317 : &Counters::gc_compactor;
318 rate->Start();
kasper.lund7276f142008-07-30 08:49:36 +0000319 PerformGarbageCollection(space, collector, &tracer);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000320 rate->Stop();
321
322 GarbageCollectionEpilogue();
323 }
324
325
326#ifdef ENABLE_LOGGING_AND_PROFILING
327 if (FLAG_log_gc) HeapProfiler::WriteSample();
328#endif
329
330 switch (space) {
331 case NEW_SPACE:
332 return new_space_->Available() >= requested_size;
ager@chromium.org9258b6b2008-09-11 09:11:10 +0000333 case OLD_POINTER_SPACE:
334 return old_pointer_space_->Available() >= requested_size;
335 case OLD_DATA_SPACE:
336 return old_data_space_->Available() >= requested_size;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000337 case CODE_SPACE:
338 return code_space_->Available() >= requested_size;
339 case MAP_SPACE:
340 return map_space_->Available() >= requested_size;
341 case LO_SPACE:
342 return lo_space_->Available() >= requested_size;
343 }
344 return false;
345}
346
347
kasper.lund7276f142008-07-30 08:49:36 +0000348void Heap::PerformScavenge() {
349 GCTracer tracer;
350 PerformGarbageCollection(NEW_SPACE, SCAVENGER, &tracer);
351}
352
353
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000354void Heap::PerformGarbageCollection(AllocationSpace space,
kasper.lund7276f142008-07-30 08:49:36 +0000355 GarbageCollector collector,
356 GCTracer* tracer) {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000357 if (collector == MARK_COMPACTOR && global_gc_prologue_callback_) {
358 ASSERT(!allocation_allowed_);
359 global_gc_prologue_callback_();
360 }
361
362 if (collector == MARK_COMPACTOR) {
kasper.lund7276f142008-07-30 08:49:36 +0000363 MarkCompact(tracer);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000364
365 int promoted_space_size = PromotedSpaceSize();
366 promoted_space_limit_ =
367 promoted_space_size + Max(2 * MB, (promoted_space_size/100) * 35);
368 old_gen_exhausted_ = false;
369
370 // If we have used the mark-compact collector to collect the new
371 // space, and it has not compacted the new space, we force a
ager@chromium.org9258b6b2008-09-11 09:11:10 +0000372 // separate scavenge collection. This is a hack. It covers the
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000373 // case where (1) a new space collection was requested, (2) the
374 // collector selection policy selected the mark-compact collector,
375 // and (3) the mark-compact collector policy selected not to
376 // compact the new space. In that case, there is no more (usable)
377 // free space in the new space after the collection compared to
378 // before.
379 if (space == NEW_SPACE && !MarkCompactCollector::HasCompacted()) {
380 Scavenge();
381 }
382 } else {
383 Scavenge();
384 }
385 Counters::objs_since_last_young.Set(0);
386
387 // Process weak handles post gc.
388 GlobalHandles::PostGarbageCollectionProcessing();
389
kasper.lund7276f142008-07-30 08:49:36 +0000390 if (collector == MARK_COMPACTOR) {
391 // Register the amount of external allocated memory.
392 amount_of_external_allocated_memory_at_last_global_gc_ =
393 amount_of_external_allocated_memory_;
394 }
395
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000396 if (collector == MARK_COMPACTOR && global_gc_epilogue_callback_) {
397 ASSERT(!allocation_allowed_);
398 global_gc_epilogue_callback_();
399 }
400}
401
402
kasper.lund7276f142008-07-30 08:49:36 +0000403void Heap::MarkCompact(GCTracer* tracer) {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000404 gc_state_ = MARK_COMPACT;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000405 mc_count_++;
kasper.lund7276f142008-07-30 08:49:36 +0000406 tracer->set_full_gc_count(mc_count_);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000407 LOG(ResourceEvent("markcompact", "begin"));
408
409 MarkCompactPrologue();
410
kasper.lund7276f142008-07-30 08:49:36 +0000411 MarkCompactCollector::CollectGarbage(tracer);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000412
413 MarkCompactEpilogue();
414
415 LOG(ResourceEvent("markcompact", "end"));
416
417 gc_state_ = NOT_IN_GC;
418
419 Shrink();
420
421 Counters::objs_since_last_full.Set(0);
422}
423
424
425void Heap::MarkCompactPrologue() {
kasperl@chromium.orgb9123622008-09-17 14:05:56 +0000426 CompilationCache::MarkCompactPrologue();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000427 RegExpImpl::OldSpaceCollectionPrologue();
428 Top::MarkCompactPrologue();
429 ThreadManager::MarkCompactPrologue();
430}
431
432
433void Heap::MarkCompactEpilogue() {
434 Top::MarkCompactEpilogue();
435 ThreadManager::MarkCompactEpilogue();
436}
437
438
439Object* Heap::FindCodeObject(Address a) {
440 Object* obj = code_space_->FindObject(a);
441 if (obj->IsFailure()) {
442 obj = lo_space_->FindObject(a);
443 }
kasper.lund7276f142008-07-30 08:49:36 +0000444 ASSERT(!obj->IsFailure());
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000445 return obj;
446}
447
448
449// Helper class for copying HeapObjects
450class CopyVisitor: public ObjectVisitor {
451 public:
452
453 void VisitPointer(Object** p) {
454 CopyObject(p);
455 }
456
457 void VisitPointers(Object** start, Object** end) {
458 // Copy all HeapObject pointers in [start, end)
459 for (Object** p = start; p < end; p++) CopyObject(p);
460 }
461
462 private:
463 void CopyObject(Object** p) {
464 if (!Heap::InFromSpace(*p)) return;
465 Heap::CopyObject(reinterpret_cast<HeapObject**>(p));
466 }
467};
468
469
470// Shared state read by the scavenge collector and set by CopyObject.
471static Address promoted_top = NULL;
472
473
474#ifdef DEBUG
ager@chromium.org9258b6b2008-09-11 09:11:10 +0000475// Visitor class to verify pointers in code or data space do not point into
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000476// new space.
ager@chromium.org9258b6b2008-09-11 09:11:10 +0000477class VerifyNonPointerSpacePointersVisitor: public ObjectVisitor {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000478 public:
479 void VisitPointers(Object** start, Object**end) {
480 for (Object** current = start; current < end; current++) {
481 if ((*current)->IsHeapObject()) {
482 ASSERT(!Heap::InNewSpace(HeapObject::cast(*current)));
483 }
484 }
485 }
486};
487#endif
488
489void Heap::Scavenge() {
490#ifdef DEBUG
491 if (FLAG_enable_slow_asserts) {
ager@chromium.org9258b6b2008-09-11 09:11:10 +0000492 VerifyNonPointerSpacePointersVisitor v;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000493 HeapObjectIterator it(code_space_);
494 while (it.has_next()) {
495 HeapObject* object = it.next();
496 if (object->IsCode()) {
497 Code::cast(object)->ConvertICTargetsFromAddressToObject();
498 }
499 object->Iterate(&v);
500 if (object->IsCode()) {
501 Code::cast(object)->ConvertICTargetsFromObjectToAddress();
502 }
503 }
504 }
505#endif
506
507 gc_state_ = SCAVENGE;
508
509 // Implements Cheney's copying algorithm
510 LOG(ResourceEvent("scavenge", "begin"));
511
512 scavenge_count_++;
513 if (new_space_->Capacity() < new_space_->MaximumCapacity() &&
514 scavenge_count_ > new_space_growth_limit_) {
515 // Double the size of the new space, and double the limit. The next
516 // doubling attempt will occur after the current new_space_growth_limit_
517 // more collections.
518 // TODO(1240712): NewSpace::Double has a return value which is
519 // ignored here.
520 new_space_->Double();
521 new_space_growth_limit_ *= 2;
522 }
523
524 // Flip the semispaces. After flipping, to space is empty, from space has
525 // live objects.
526 new_space_->Flip();
527 new_space_->ResetAllocationInfo();
528
529 // We need to sweep newly copied objects which can be in either the to space
530 // or the old space. For to space objects, we use a mark. Newly copied
531 // objects lie between the mark and the allocation top. For objects
532 // promoted to old space, we write their addresses downward from the top of
533 // the new space. Sweeping newly promoted objects requires an allocation
534 // pointer and a mark. Note that the allocation pointer 'top' actually
535 // moves downward from the high address in the to space.
536 //
537 // There is guaranteed to be enough room at the top of the to space for the
538 // addresses of promoted objects: every object promoted frees up its size in
539 // bytes from the top of the new space, and objects are at least one pointer
540 // in size. Using the new space to record promoted addresses makes the
541 // scavenge collector agnostic to the allocation strategy (eg, linear or
542 // free-list) used in old space.
543 Address new_mark = new_space_->ToSpaceLow();
544 Address promoted_mark = new_space_->ToSpaceHigh();
545 promoted_top = new_space_->ToSpaceHigh();
546
547 CopyVisitor copy_visitor;
548 // Copy roots.
549 IterateRoots(&copy_visitor);
550
551 // Copy objects reachable from the old generation. By definition, there
ager@chromium.org9258b6b2008-09-11 09:11:10 +0000552 // are no intergenerational pointers in code or data spaces.
553 IterateRSet(old_pointer_space_, &CopyObject);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000554 IterateRSet(map_space_, &CopyObject);
555 lo_space_->IterateRSet(&CopyObject);
556
557 bool has_processed_weak_pointers = false;
558
559 while (true) {
560 ASSERT(new_mark <= new_space_->top());
561 ASSERT(promoted_mark >= promoted_top);
562
563 // Copy objects reachable from newly copied objects.
564 while (new_mark < new_space_->top() || promoted_mark > promoted_top) {
565 // Sweep newly copied objects in the to space. The allocation pointer
566 // can change during sweeping.
567 Address previous_top = new_space_->top();
568 SemiSpaceIterator new_it(new_space_, new_mark);
569 while (new_it.has_next()) {
570 new_it.next()->Iterate(&copy_visitor);
571 }
572 new_mark = previous_top;
573
574 // Sweep newly copied objects in the old space. The promotion 'top'
575 // pointer could change during sweeping.
576 previous_top = promoted_top;
577 for (Address current = promoted_mark - kPointerSize;
578 current >= previous_top;
579 current -= kPointerSize) {
580 HeapObject* object = HeapObject::cast(Memory::Object_at(current));
581 object->Iterate(&copy_visitor);
582 UpdateRSet(object);
583 }
584 promoted_mark = previous_top;
585 }
586
587 if (has_processed_weak_pointers) break; // We are done.
588 // Copy objects reachable from weak pointers.
589 GlobalHandles::IterateWeakRoots(&copy_visitor);
590 has_processed_weak_pointers = true;
591 }
592
593 // Set age mark.
594 new_space_->set_age_mark(new_mark);
595
596 LOG(ResourceEvent("scavenge", "end"));
597
598 gc_state_ = NOT_IN_GC;
599}
600
601
602void Heap::ClearRSetRange(Address start, int size_in_bytes) {
603 uint32_t start_bit;
604 Address start_word_address =
605 Page::ComputeRSetBitPosition(start, 0, &start_bit);
606 uint32_t end_bit;
607 Address end_word_address =
608 Page::ComputeRSetBitPosition(start + size_in_bytes - kIntSize,
609 0,
610 &end_bit);
611
612 // We want to clear the bits in the starting word starting with the
613 // first bit, and in the ending word up to and including the last
614 // bit. Build a pair of bitmasks to do that.
615 uint32_t start_bitmask = start_bit - 1;
616 uint32_t end_bitmask = ~((end_bit << 1) - 1);
617
618 // If the start address and end address are the same, we mask that
619 // word once, otherwise mask the starting and ending word
620 // separately and all the ones in between.
621 if (start_word_address == end_word_address) {
622 Memory::uint32_at(start_word_address) &= (start_bitmask | end_bitmask);
623 } else {
624 Memory::uint32_at(start_word_address) &= start_bitmask;
625 Memory::uint32_at(end_word_address) &= end_bitmask;
626 start_word_address += kIntSize;
627 memset(start_word_address, 0, end_word_address - start_word_address);
628 }
629}
630
631
632class UpdateRSetVisitor: public ObjectVisitor {
633 public:
634
635 void VisitPointer(Object** p) {
636 UpdateRSet(p);
637 }
638
639 void VisitPointers(Object** start, Object** end) {
640 // Update a store into slots [start, end), used (a) to update remembered
641 // set when promoting a young object to old space or (b) to rebuild
642 // remembered sets after a mark-compact collection.
643 for (Object** p = start; p < end; p++) UpdateRSet(p);
644 }
645 private:
646
647 void UpdateRSet(Object** p) {
648 // The remembered set should not be set. It should be clear for objects
649 // newly copied to old space, and it is cleared before rebuilding in the
650 // mark-compact collector.
651 ASSERT(!Page::IsRSetSet(reinterpret_cast<Address>(p), 0));
652 if (Heap::InNewSpace(*p)) {
653 Page::SetRSet(reinterpret_cast<Address>(p), 0);
654 }
655 }
656};
657
658
659int Heap::UpdateRSet(HeapObject* obj) {
660 ASSERT(!InNewSpace(obj));
661 // Special handling of fixed arrays to iterate the body based on the start
662 // address and offset. Just iterating the pointers as in UpdateRSetVisitor
663 // will not work because Page::SetRSet needs to have the start of the
664 // object.
665 if (obj->IsFixedArray()) {
666 FixedArray* array = FixedArray::cast(obj);
667 int length = array->length();
668 for (int i = 0; i < length; i++) {
669 int offset = FixedArray::kHeaderSize + i * kPointerSize;
670 ASSERT(!Page::IsRSetSet(obj->address(), offset));
671 if (Heap::InNewSpace(array->get(i))) {
672 Page::SetRSet(obj->address(), offset);
673 }
674 }
675 } else if (!obj->IsCode()) {
676 // Skip code object, we know it does not contain inter-generational
677 // pointers.
678 UpdateRSetVisitor v;
679 obj->Iterate(&v);
680 }
681 return obj->Size();
682}
683
684
685void Heap::RebuildRSets() {
ager@chromium.org9258b6b2008-09-11 09:11:10 +0000686 // By definition, we do not care about remembered set bits in code or data
687 // spaces.
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000688 map_space_->ClearRSet();
689 RebuildRSets(map_space_);
690
ager@chromium.org9258b6b2008-09-11 09:11:10 +0000691 old_pointer_space_->ClearRSet();
692 RebuildRSets(old_pointer_space_);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000693
694 Heap::lo_space_->ClearRSet();
695 RebuildRSets(lo_space_);
696}
697
698
699void Heap::RebuildRSets(PagedSpace* space) {
700 HeapObjectIterator it(space);
701 while (it.has_next()) Heap::UpdateRSet(it.next());
702}
703
704
705void Heap::RebuildRSets(LargeObjectSpace* space) {
706 LargeObjectIterator it(space);
707 while (it.has_next()) Heap::UpdateRSet(it.next());
708}
709
710
711#if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
712void Heap::RecordCopiedObject(HeapObject* obj) {
713 bool should_record = false;
714#ifdef DEBUG
715 should_record = FLAG_heap_stats;
716#endif
717#ifdef ENABLE_LOGGING_AND_PROFILING
718 should_record = should_record || FLAG_log_gc;
719#endif
720 if (should_record) {
721 if (new_space_->Contains(obj)) {
722 new_space_->RecordAllocation(obj);
723 } else {
724 new_space_->RecordPromotion(obj);
725 }
726 }
727}
728#endif // defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
729
730
731HeapObject* Heap::MigrateObject(HeapObject** source_p,
732 HeapObject* target,
733 int size) {
734 void** src = reinterpret_cast<void**>((*source_p)->address());
735 void** dst = reinterpret_cast<void**>(target->address());
736 int counter = size/kPointerSize - 1;
737 do {
738 *dst++ = *src++;
739 } while (counter-- > 0);
740
kasper.lund7276f142008-07-30 08:49:36 +0000741 // Set the forwarding address.
742 (*source_p)->set_map_word(MapWord::FromForwardingAddress(target));
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000743
744 // Update NewSpace stats if necessary.
745#if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
746 RecordCopiedObject(target);
747#endif
748
749 return target;
750}
751
752
753void Heap::CopyObject(HeapObject** p) {
754 ASSERT(InFromSpace(*p));
755
756 HeapObject* object = *p;
757
kasper.lund7276f142008-07-30 08:49:36 +0000758 // We use the first word (where the map pointer usually is) of a heap
759 // object to record the forwarding pointer. A forwarding pointer can
ager@chromium.org9258b6b2008-09-11 09:11:10 +0000760 // point to an old space, the code space, or the to space of the new
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000761 // generation.
kasper.lund7276f142008-07-30 08:49:36 +0000762 MapWord first_word = object->map_word();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000763
kasper.lund7276f142008-07-30 08:49:36 +0000764 // If the first word is a forwarding address, the object has already been
765 // copied.
766 if (first_word.IsForwardingAddress()) {
767 *p = first_word.ToForwardingAddress();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000768 return;
769 }
770
771 // Optimization: Bypass ConsString objects where the right-hand side is
772 // Heap::empty_string(). We do not use object->IsConsString because we
773 // already know that object has the heap object tag.
kasper.lund7276f142008-07-30 08:49:36 +0000774 InstanceType type = first_word.ToMap()->instance_type();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000775 if (type < FIRST_NONSTRING_TYPE &&
776 String::cast(object)->representation_tag() == kConsStringTag &&
777 ConsString::cast(object)->second() == Heap::empty_string()) {
778 object = HeapObject::cast(ConsString::cast(object)->first());
779 *p = object;
780 // After patching *p we have to repeat the checks that object is in the
781 // active semispace of the young generation and not already copied.
782 if (!InFromSpace(object)) return;
kasper.lund7276f142008-07-30 08:49:36 +0000783 first_word = object->map_word();
784 if (first_word.IsForwardingAddress()) {
785 *p = first_word.ToForwardingAddress();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000786 return;
787 }
kasper.lund7276f142008-07-30 08:49:36 +0000788 type = first_word.ToMap()->instance_type();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000789 }
790
kasper.lund7276f142008-07-30 08:49:36 +0000791 int object_size = object->SizeFromMap(first_word.ToMap());
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000792 Object* result;
793 // If the object should be promoted, we try to copy it to old space.
794 if (ShouldBePromoted(object->address(), object_size)) {
ager@chromium.org9258b6b2008-09-11 09:11:10 +0000795 OldSpace* target_space = Heap::TargetSpace(object);
796 ASSERT(target_space == Heap::old_pointer_space_ ||
797 target_space == Heap::old_data_space_);
798 result = target_space->AllocateRaw(object_size);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000799
800 if (!result->IsFailure()) {
801 *p = MigrateObject(p, HeapObject::cast(result), object_size);
ager@chromium.org9258b6b2008-09-11 09:11:10 +0000802 if (target_space == Heap::old_pointer_space_) {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000803 // Record the object's address at the top of the to space, to allow
804 // it to be swept by the scavenger.
805 promoted_top -= kPointerSize;
806 Memory::Object_at(promoted_top) = *p;
807 } else {
808#ifdef DEBUG
ager@chromium.org9258b6b2008-09-11 09:11:10 +0000809 // Objects promoted to the data space should not have pointers to
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000810 // new space.
ager@chromium.org9258b6b2008-09-11 09:11:10 +0000811 VerifyNonPointerSpacePointersVisitor v;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000812 (*p)->Iterate(&v);
813#endif
814 }
815 return;
816 }
817 }
818
819 // The object should remain in new space or the old space allocation failed.
820 result = new_space_->AllocateRaw(object_size);
821 // Failed allocation at this point is utterly unexpected.
822 ASSERT(!result->IsFailure());
823 *p = MigrateObject(p, HeapObject::cast(result), object_size);
824}
825
826
827Object* Heap::AllocatePartialMap(InstanceType instance_type,
828 int instance_size) {
829 Object* result = AllocateRawMap(Map::kSize);
830 if (result->IsFailure()) return result;
831
832 // Map::cast cannot be used due to uninitialized map field.
833 reinterpret_cast<Map*>(result)->set_map(meta_map());
834 reinterpret_cast<Map*>(result)->set_instance_type(instance_type);
835 reinterpret_cast<Map*>(result)->set_instance_size(instance_size);
836 reinterpret_cast<Map*>(result)->set_unused_property_fields(0);
837 return result;
838}
839
840
841Object* Heap::AllocateMap(InstanceType instance_type, int instance_size) {
842 Object* result = AllocateRawMap(Map::kSize);
843 if (result->IsFailure()) return result;
844
845 Map* map = reinterpret_cast<Map*>(result);
846 map->set_map(meta_map());
847 map->set_instance_type(instance_type);
848 map->set_prototype(null_value());
849 map->set_constructor(null_value());
850 map->set_instance_size(instance_size);
mads.s.ager@gmail.com9a4089a2008-09-01 08:55:01 +0000851 map->set_instance_descriptors(empty_descriptor_array());
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000852 map->set_code_cache(empty_fixed_array());
853 map->set_unused_property_fields(0);
854 map->set_bit_field(0);
855 return map;
856}
857
858
859bool Heap::CreateInitialMaps() {
860 Object* obj = AllocatePartialMap(MAP_TYPE, Map::kSize);
861 if (obj->IsFailure()) return false;
862
863 // Map::cast cannot be used due to uninitialized map field.
864 meta_map_ = reinterpret_cast<Map*>(obj);
865 meta_map()->set_map(meta_map());
866
867 obj = AllocatePartialMap(FIXED_ARRAY_TYPE, Array::kHeaderSize);
868 if (obj->IsFailure()) return false;
869 fixed_array_map_ = Map::cast(obj);
870
871 obj = AllocatePartialMap(ODDBALL_TYPE, Oddball::kSize);
872 if (obj->IsFailure()) return false;
873 oddball_map_ = Map::cast(obj);
874
875 // Allocate the empty array
876 obj = AllocateEmptyFixedArray();
877 if (obj->IsFailure()) return false;
878 empty_fixed_array_ = FixedArray::cast(obj);
879
ager@chromium.org9258b6b2008-09-11 09:11:10 +0000880 obj = Allocate(oddball_map(), OLD_DATA_SPACE);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000881 if (obj->IsFailure()) return false;
882 null_value_ = obj;
883
mads.s.ager@gmail.com9a4089a2008-09-01 08:55:01 +0000884 // Allocate the empty descriptor array. AllocateMap can now be used.
885 obj = AllocateEmptyFixedArray();
886 if (obj->IsFailure()) return false;
887 // There is a check against empty_descriptor_array() in cast().
888 empty_descriptor_array_ = reinterpret_cast<DescriptorArray*>(obj);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000889
mads.s.ager@gmail.com9a4089a2008-09-01 08:55:01 +0000890 // Fix the instance_descriptors for the existing maps.
891 meta_map()->set_instance_descriptors(empty_descriptor_array());
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000892 meta_map()->set_code_cache(empty_fixed_array());
893
mads.s.ager@gmail.com9a4089a2008-09-01 08:55:01 +0000894 fixed_array_map()->set_instance_descriptors(empty_descriptor_array());
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000895 fixed_array_map()->set_code_cache(empty_fixed_array());
896
mads.s.ager@gmail.com9a4089a2008-09-01 08:55:01 +0000897 oddball_map()->set_instance_descriptors(empty_descriptor_array());
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000898 oddball_map()->set_code_cache(empty_fixed_array());
899
900 // Fix prototype object for existing maps.
901 meta_map()->set_prototype(null_value());
902 meta_map()->set_constructor(null_value());
903
904 fixed_array_map()->set_prototype(null_value());
905 fixed_array_map()->set_constructor(null_value());
906 oddball_map()->set_prototype(null_value());
907 oddball_map()->set_constructor(null_value());
908
909 obj = AllocateMap(HEAP_NUMBER_TYPE, HeapNumber::kSize);
910 if (obj->IsFailure()) return false;
911 heap_number_map_ = Map::cast(obj);
912
913 obj = AllocateMap(PROXY_TYPE, Proxy::kSize);
914 if (obj->IsFailure()) return false;
915 proxy_map_ = Map::cast(obj);
916
917#define ALLOCATE_STRING_MAP(type, size, name) \
918 obj = AllocateMap(type, size); \
919 if (obj->IsFailure()) return false; \
920 name##_map_ = Map::cast(obj);
921 STRING_TYPE_LIST(ALLOCATE_STRING_MAP);
922#undef ALLOCATE_STRING_MAP
923
924 obj = AllocateMap(SHORT_STRING_TYPE, TwoByteString::kHeaderSize);
925 if (obj->IsFailure()) return false;
926 undetectable_short_string_map_ = Map::cast(obj);
927 undetectable_short_string_map_->set_is_undetectable();
928
929 obj = AllocateMap(MEDIUM_STRING_TYPE, TwoByteString::kHeaderSize);
930 if (obj->IsFailure()) return false;
931 undetectable_medium_string_map_ = Map::cast(obj);
932 undetectable_medium_string_map_->set_is_undetectable();
933
934 obj = AllocateMap(LONG_STRING_TYPE, TwoByteString::kHeaderSize);
935 if (obj->IsFailure()) return false;
936 undetectable_long_string_map_ = Map::cast(obj);
937 undetectable_long_string_map_->set_is_undetectable();
938
939 obj = AllocateMap(SHORT_ASCII_STRING_TYPE, AsciiString::kHeaderSize);
940 if (obj->IsFailure()) return false;
941 undetectable_short_ascii_string_map_ = Map::cast(obj);
942 undetectable_short_ascii_string_map_->set_is_undetectable();
943
944 obj = AllocateMap(MEDIUM_ASCII_STRING_TYPE, AsciiString::kHeaderSize);
945 if (obj->IsFailure()) return false;
946 undetectable_medium_ascii_string_map_ = Map::cast(obj);
947 undetectable_medium_ascii_string_map_->set_is_undetectable();
948
949 obj = AllocateMap(LONG_ASCII_STRING_TYPE, AsciiString::kHeaderSize);
950 if (obj->IsFailure()) return false;
951 undetectable_long_ascii_string_map_ = Map::cast(obj);
952 undetectable_long_ascii_string_map_->set_is_undetectable();
953
954 obj = AllocateMap(BYTE_ARRAY_TYPE, Array::kHeaderSize);
955 if (obj->IsFailure()) return false;
956 byte_array_map_ = Map::cast(obj);
957
958 obj = AllocateMap(CODE_TYPE, Code::kHeaderSize);
959 if (obj->IsFailure()) return false;
960 code_map_ = Map::cast(obj);
961
962 obj = AllocateMap(FILLER_TYPE, kPointerSize);
963 if (obj->IsFailure()) return false;
964 one_word_filler_map_ = Map::cast(obj);
965
966 obj = AllocateMap(FILLER_TYPE, 2 * kPointerSize);
967 if (obj->IsFailure()) return false;
968 two_word_filler_map_ = Map::cast(obj);
969
970#define ALLOCATE_STRUCT_MAP(NAME, Name, name) \
971 obj = AllocateMap(NAME##_TYPE, Name::kSize); \
972 if (obj->IsFailure()) return false; \
973 name##_map_ = Map::cast(obj);
974 STRUCT_LIST(ALLOCATE_STRUCT_MAP)
975#undef ALLOCATE_STRUCT_MAP
976
977 obj = AllocateMap(FIXED_ARRAY_TYPE, HeapObject::kSize);
978 if (obj->IsFailure()) return false;
979 hash_table_map_ = Map::cast(obj);
980
981 obj = AllocateMap(FIXED_ARRAY_TYPE, HeapObject::kSize);
982 if (obj->IsFailure()) return false;
983 context_map_ = Map::cast(obj);
984
985 obj = AllocateMap(FIXED_ARRAY_TYPE, HeapObject::kSize);
986 if (obj->IsFailure()) return false;
987 global_context_map_ = Map::cast(obj);
988
989 obj = AllocateMap(JS_FUNCTION_TYPE, JSFunction::kSize);
990 if (obj->IsFailure()) return false;
991 boilerplate_function_map_ = Map::cast(obj);
992
993 obj = AllocateMap(SHARED_FUNCTION_INFO_TYPE, SharedFunctionInfo::kSize);
994 if (obj->IsFailure()) return false;
995 shared_function_info_map_ = Map::cast(obj);
996
mads.s.ager@gmail.com9a4089a2008-09-01 08:55:01 +0000997 ASSERT(!Heap::InNewSpace(Heap::empty_fixed_array()));
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000998 return true;
999}
1000
1001
1002Object* Heap::AllocateHeapNumber(double value, PretenureFlag pretenure) {
1003 // Statically ensure that it is safe to allocate heap numbers in paged
1004 // spaces.
1005 STATIC_ASSERT(HeapNumber::kSize <= Page::kMaxHeapObjectSize);
ager@chromium.org9258b6b2008-09-11 09:11:10 +00001006 AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001007 Object* result = AllocateRaw(HeapNumber::kSize, space);
1008 if (result->IsFailure()) return result;
1009
1010 HeapObject::cast(result)->set_map(heap_number_map());
1011 HeapNumber::cast(result)->set_value(value);
1012 return result;
1013}
1014
1015
1016Object* Heap::AllocateHeapNumber(double value) {
1017 // This version of AllocateHeapNumber is optimized for
1018 // allocation in new space.
1019 STATIC_ASSERT(HeapNumber::kSize <= Page::kMaxHeapObjectSize);
1020 ASSERT(allocation_allowed_ && gc_state_ == NOT_IN_GC);
1021 Object* result = new_space_->AllocateRaw(HeapNumber::kSize);
1022 if (result->IsFailure()) return result;
1023 HeapObject::cast(result)->set_map(heap_number_map());
1024 HeapNumber::cast(result)->set_value(value);
1025 return result;
1026}
1027
1028
1029Object* Heap::CreateOddball(Map* map,
1030 const char* to_string,
1031 Object* to_number) {
ager@chromium.org9258b6b2008-09-11 09:11:10 +00001032 Object* result = Allocate(map, OLD_DATA_SPACE);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001033 if (result->IsFailure()) return result;
1034 return Oddball::cast(result)->Initialize(to_string, to_number);
1035}
1036
1037
1038bool Heap::CreateApiObjects() {
1039 Object* obj;
1040
1041 obj = AllocateMap(JS_OBJECT_TYPE, JSObject::kHeaderSize);
1042 if (obj->IsFailure()) return false;
1043 neander_map_ = Map::cast(obj);
1044
1045 obj = Heap::AllocateJSObjectFromMap(neander_map_);
1046 if (obj->IsFailure()) return false;
1047 Object* elements = AllocateFixedArray(2);
1048 if (elements->IsFailure()) return false;
1049 FixedArray::cast(elements)->set(0, Smi::FromInt(0));
1050 JSObject::cast(obj)->set_elements(FixedArray::cast(elements));
1051 message_listeners_ = JSObject::cast(obj);
1052
1053 obj = Heap::AllocateJSObjectFromMap(neander_map_);
1054 if (obj->IsFailure()) return false;
1055 elements = AllocateFixedArray(2);
1056 if (elements->IsFailure()) return false;
1057 FixedArray::cast(elements)->set(0, Smi::FromInt(0));
1058 JSObject::cast(obj)->set_elements(FixedArray::cast(elements));
1059 debug_event_listeners_ = JSObject::cast(obj);
1060
1061 return true;
1062}
1063
1064void Heap::CreateFixedStubs() {
1065 // Here we create roots for fixed stubs. They are needed at GC
1066 // for cooking and uncooking (check out frames.cc).
1067 // The eliminates the need for doing dictionary lookup in the
1068 // stub cache for these stubs.
1069 HandleScope scope;
1070 {
1071 CEntryStub stub;
1072 c_entry_code_ = *stub.GetCode();
1073 }
1074 {
1075 CEntryDebugBreakStub stub;
1076 c_entry_debug_break_code_ = *stub.GetCode();
1077 }
1078 {
1079 JSEntryStub stub;
1080 js_entry_code_ = *stub.GetCode();
1081 }
1082 {
1083 JSConstructEntryStub stub;
1084 js_construct_entry_code_ = *stub.GetCode();
1085 }
1086}
1087
1088
1089bool Heap::CreateInitialObjects() {
1090 Object* obj;
1091
1092 // The -0 value must be set before NumberFromDouble works.
1093 obj = AllocateHeapNumber(-0.0, TENURED);
1094 if (obj->IsFailure()) return false;
1095 minus_zero_value_ = obj;
1096 ASSERT(signbit(minus_zero_value_->Number()) != 0);
1097
1098 obj = AllocateHeapNumber(OS::nan_value(), TENURED);
1099 if (obj->IsFailure()) return false;
1100 nan_value_ = obj;
1101
ager@chromium.org9258b6b2008-09-11 09:11:10 +00001102 obj = Allocate(oddball_map(), OLD_DATA_SPACE);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001103 if (obj->IsFailure()) return false;
1104 undefined_value_ = obj;
1105 ASSERT(!InNewSpace(undefined_value()));
1106
1107 // Allocate initial symbol table.
1108 obj = SymbolTable::Allocate(kInitialSymbolTableSize);
1109 if (obj->IsFailure()) return false;
1110 symbol_table_ = obj;
1111
1112 // Assign the print strings for oddballs after creating symboltable.
1113 Object* symbol = LookupAsciiSymbol("undefined");
1114 if (symbol->IsFailure()) return false;
1115 Oddball::cast(undefined_value_)->set_to_string(String::cast(symbol));
1116 Oddball::cast(undefined_value_)->set_to_number(nan_value_);
1117
1118 // Assign the print strings for oddballs after creating symboltable.
1119 symbol = LookupAsciiSymbol("null");
1120 if (symbol->IsFailure()) return false;
1121 Oddball::cast(null_value_)->set_to_string(String::cast(symbol));
1122 Oddball::cast(null_value_)->set_to_number(Smi::FromInt(0));
1123
1124 // Allocate the null_value
1125 obj = Oddball::cast(null_value())->Initialize("null", Smi::FromInt(0));
1126 if (obj->IsFailure()) return false;
1127
1128 obj = CreateOddball(oddball_map(), "true", Smi::FromInt(1));
1129 if (obj->IsFailure()) return false;
1130 true_value_ = obj;
1131
1132 obj = CreateOddball(oddball_map(), "false", Smi::FromInt(0));
1133 if (obj->IsFailure()) return false;
1134 false_value_ = obj;
1135
1136 obj = CreateOddball(oddball_map(), "hole", Smi::FromInt(-1));
1137 if (obj->IsFailure()) return false;
1138 the_hole_value_ = obj;
1139
1140 // Allocate the empty string.
1141 obj = AllocateRawAsciiString(0, TENURED);
1142 if (obj->IsFailure()) return false;
1143 empty_string_ = String::cast(obj);
1144
1145#define SYMBOL_INITIALIZE(name, string) \
1146 obj = LookupAsciiSymbol(string); \
1147 if (obj->IsFailure()) return false; \
1148 (name##_) = String::cast(obj);
1149 SYMBOL_LIST(SYMBOL_INITIALIZE)
1150#undef SYMBOL_INITIALIZE
1151
1152 // Allocate the proxy for __proto__.
1153 obj = AllocateProxy((Address) &Accessors::ObjectPrototype);
1154 if (obj->IsFailure()) return false;
1155 prototype_accessors_ = Proxy::cast(obj);
1156
1157 // Allocate the code_stubs dictionary.
1158 obj = Dictionary::Allocate(4);
1159 if (obj->IsFailure()) return false;
1160 code_stubs_ = Dictionary::cast(obj);
1161
1162 // Allocate the non_monomorphic_cache used in stub-cache.cc
1163 obj = Dictionary::Allocate(4);
1164 if (obj->IsFailure()) return false;
1165 non_monomorphic_cache_ = Dictionary::cast(obj);
1166
1167 CreateFixedStubs();
1168
1169 // Allocate the number->string conversion cache
1170 obj = AllocateFixedArray(kNumberStringCacheSize * 2);
1171 if (obj->IsFailure()) return false;
1172 number_string_cache_ = FixedArray::cast(obj);
1173
1174 // Allocate cache for single character strings.
1175 obj = AllocateFixedArray(String::kMaxAsciiCharCode+1);
1176 if (obj->IsFailure()) return false;
1177 single_character_string_cache_ = FixedArray::cast(obj);
1178
1179 // Allocate cache for external strings pointing to native source code.
1180 obj = AllocateFixedArray(Natives::GetBuiltinsCount());
1181 if (obj->IsFailure()) return false;
1182 natives_source_cache_ = FixedArray::cast(obj);
1183
kasperl@chromium.orgb9123622008-09-17 14:05:56 +00001184 // Initialize compilation cache.
1185 CompilationCache::Clear();
ager@chromium.org9258b6b2008-09-11 09:11:10 +00001186
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001187 return true;
1188}
1189
1190
1191static inline int double_get_hash(double d) {
1192 DoubleRepresentation rep(d);
1193 return ((static_cast<int>(rep.bits) ^ static_cast<int>(rep.bits >> 32)) &
1194 (Heap::kNumberStringCacheSize - 1));
1195}
1196
1197
1198static inline int smi_get_hash(Smi* smi) {
1199 return (smi->value() & (Heap::kNumberStringCacheSize - 1));
1200}
1201
1202
1203
1204Object* Heap::GetNumberStringCache(Object* number) {
1205 int hash;
1206 if (number->IsSmi()) {
1207 hash = smi_get_hash(Smi::cast(number));
1208 } else {
1209 hash = double_get_hash(number->Number());
1210 }
1211 Object* key = number_string_cache_->get(hash * 2);
1212 if (key == number) {
1213 return String::cast(number_string_cache_->get(hash * 2 + 1));
1214 } else if (key->IsHeapNumber() &&
1215 number->IsHeapNumber() &&
1216 key->Number() == number->Number()) {
1217 return String::cast(number_string_cache_->get(hash * 2 + 1));
1218 }
1219 return undefined_value();
1220}
1221
1222
1223void Heap::SetNumberStringCache(Object* number, String* string) {
1224 int hash;
1225 if (number->IsSmi()) {
1226 hash = smi_get_hash(Smi::cast(number));
1227 number_string_cache_->set(hash * 2, number, FixedArray::SKIP_WRITE_BARRIER);
1228 } else {
1229 hash = double_get_hash(number->Number());
1230 number_string_cache_->set(hash * 2, number);
1231 }
1232 number_string_cache_->set(hash * 2 + 1, string);
1233}
1234
1235
1236Object* Heap::SmiOrNumberFromDouble(double value,
1237 bool new_object,
1238 PretenureFlag pretenure) {
1239 // We need to distinguish the minus zero value and this cannot be
1240 // done after conversion to int. Doing this by comparing bit
1241 // patterns is faster than using fpclassify() et al.
1242 static const DoubleRepresentation plus_zero(0.0);
1243 static const DoubleRepresentation minus_zero(-0.0);
1244 static const DoubleRepresentation nan(OS::nan_value());
1245 ASSERT(minus_zero_value_ != NULL);
1246 ASSERT(sizeof(plus_zero.value) == sizeof(plus_zero.bits));
1247
1248 DoubleRepresentation rep(value);
1249 if (rep.bits == plus_zero.bits) return Smi::FromInt(0); // not uncommon
1250 if (rep.bits == minus_zero.bits) {
1251 return new_object ? AllocateHeapNumber(-0.0, pretenure)
1252 : minus_zero_value_;
1253 }
1254 if (rep.bits == nan.bits) {
1255 return new_object
1256 ? AllocateHeapNumber(OS::nan_value(), pretenure)
1257 : nan_value_;
1258 }
1259
1260 // Try to represent the value as a tagged small integer.
1261 int int_value = FastD2I(value);
1262 if (value == FastI2D(int_value) && Smi::IsValid(int_value)) {
1263 return Smi::FromInt(int_value);
1264 }
1265
1266 // Materialize the value in the heap.
1267 return AllocateHeapNumber(value, pretenure);
1268}
1269
1270
1271Object* Heap::NewNumberFromDouble(double value, PretenureFlag pretenure) {
1272 return SmiOrNumberFromDouble(value,
1273 true /* number object must be new */,
1274 pretenure);
1275}
1276
1277
1278Object* Heap::NumberFromDouble(double value, PretenureFlag pretenure) {
1279 return SmiOrNumberFromDouble(value,
1280 false /* use preallocated NaN, -0.0 */,
1281 pretenure);
1282}
1283
1284
1285Object* Heap::AllocateProxy(Address proxy, PretenureFlag pretenure) {
1286 // Statically ensure that it is safe to allocate proxies in paged spaces.
1287 STATIC_ASSERT(Proxy::kSize <= Page::kMaxHeapObjectSize);
ager@chromium.org9258b6b2008-09-11 09:11:10 +00001288 AllocationSpace space =
1289 (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001290 Object* result = Allocate(proxy_map(), space);
1291 if (result->IsFailure()) return result;
1292
1293 Proxy::cast(result)->set_proxy(proxy);
1294 return result;
1295}
1296
1297
1298Object* Heap::AllocateSharedFunctionInfo(Object* name) {
1299 Object* result = Allocate(shared_function_info_map(), NEW_SPACE);
1300 if (result->IsFailure()) return result;
1301
1302 SharedFunctionInfo* share = SharedFunctionInfo::cast(result);
1303 share->set_name(name);
1304 Code* illegal = Builtins::builtin(Builtins::Illegal);
1305 share->set_code(illegal);
1306 share->set_expected_nof_properties(0);
1307 share->set_length(0);
1308 share->set_formal_parameter_count(0);
1309 share->set_instance_class_name(Object_symbol());
1310 share->set_function_data(undefined_value());
1311 share->set_lazy_load_data(undefined_value());
1312 share->set_script(undefined_value());
1313 share->set_start_position_and_type(0);
1314 share->set_debug_info(undefined_value());
1315 return result;
1316}
1317
1318
1319Object* Heap::AllocateConsString(String* first, String* second) {
1320 int length = first->length() + second->length();
1321 bool is_ascii = first->is_ascii() && second->is_ascii();
1322
1323 // If the resulting string is small make a flat string.
1324 if (length < ConsString::kMinLength) {
1325 Object* result = is_ascii
1326 ? AllocateRawAsciiString(length)
1327 : AllocateRawTwoByteString(length);
1328 if (result->IsFailure()) return result;
1329 // Copy the characters into the new object.
1330 String* string_result = String::cast(result);
1331 int first_length = first->length();
1332 // Copy the content of the first string.
1333 for (int i = 0; i < first_length; i++) {
1334 string_result->Set(i, first->Get(i));
1335 }
1336 int second_length = second->length();
1337 // Copy the content of the first string.
1338 for (int i = 0; i < second_length; i++) {
1339 string_result->Set(first_length + i, second->Get(i));
1340 }
1341 return result;
1342 }
1343
1344 Map* map;
1345 if (length <= String::kMaxShortStringSize) {
1346 map = is_ascii ? short_cons_ascii_string_map()
1347 : short_cons_string_map();
1348 } else if (length <= String::kMaxMediumStringSize) {
1349 map = is_ascii ? medium_cons_ascii_string_map()
1350 : medium_cons_string_map();
1351 } else {
1352 map = is_ascii ? long_cons_ascii_string_map()
1353 : long_cons_string_map();
1354 }
1355
1356 Object* result = Allocate(map, NEW_SPACE);
1357 if (result->IsFailure()) return result;
1358
1359 ConsString* cons_string = ConsString::cast(result);
1360 cons_string->set_first(first);
1361 cons_string->set_second(second);
1362 cons_string->set_length(length);
1363
1364 return result;
1365}
1366
1367
1368Object* Heap::AllocateSlicedString(String* buffer, int start, int end) {
1369 int length = end - start;
1370
1371 // If the resulting string is small make a sub string.
1372 if (end - start <= SlicedString::kMinLength) {
1373 return Heap::AllocateSubString(buffer, start, end);
1374 }
1375
1376 Map* map;
1377 if (length <= String::kMaxShortStringSize) {
1378 map = buffer->is_ascii() ? short_sliced_ascii_string_map()
1379 : short_sliced_string_map();
1380 } else if (length <= String::kMaxMediumStringSize) {
1381 map = buffer->is_ascii() ? medium_sliced_ascii_string_map()
1382 : medium_sliced_string_map();
1383 } else {
1384 map = buffer->is_ascii() ? long_sliced_ascii_string_map()
1385 : long_sliced_string_map();
1386 }
1387
1388 Object* result = Allocate(map, NEW_SPACE);
1389 if (result->IsFailure()) return result;
1390
1391 SlicedString* sliced_string = SlicedString::cast(result);
1392 sliced_string->set_buffer(buffer);
1393 sliced_string->set_start(start);
1394 sliced_string->set_length(length);
1395
1396 return result;
1397}
1398
1399
1400Object* Heap::AllocateSubString(String* buffer, int start, int end) {
1401 int length = end - start;
1402
1403 // Make an attempt to flatten the buffer to reduce access time.
1404 buffer->TryFlatten();
1405
1406 Object* result = buffer->is_ascii()
1407 ? AllocateRawAsciiString(length)
1408 : AllocateRawTwoByteString(length);
1409 if (result->IsFailure()) return result;
1410
1411 // Copy the characters into the new object.
1412 String* string_result = String::cast(result);
1413 for (int i = 0; i < length; i++) {
1414 string_result->Set(i, buffer->Get(start + i));
1415 }
1416 return result;
1417}
1418
1419
1420Object* Heap::AllocateExternalStringFromAscii(
1421 ExternalAsciiString::Resource* resource) {
1422 Map* map;
1423 int length = resource->length();
1424 if (length <= String::kMaxShortStringSize) {
1425 map = short_external_ascii_string_map();
1426 } else if (length <= String::kMaxMediumStringSize) {
1427 map = medium_external_ascii_string_map();
1428 } else {
1429 map = long_external_ascii_string_map();
1430 }
1431
1432 Object* result = Allocate(map, NEW_SPACE);
1433 if (result->IsFailure()) return result;
1434
1435 ExternalAsciiString* external_string = ExternalAsciiString::cast(result);
1436 external_string->set_length(length);
1437 external_string->set_resource(resource);
1438
1439 return result;
1440}
1441
1442
1443Object* Heap::AllocateExternalStringFromTwoByte(
1444 ExternalTwoByteString::Resource* resource) {
1445 Map* map;
1446 int length = resource->length();
1447 if (length <= String::kMaxShortStringSize) {
1448 map = short_external_string_map();
1449 } else if (length <= String::kMaxMediumStringSize) {
1450 map = medium_external_string_map();
1451 } else {
1452 map = long_external_string_map();
1453 }
1454
1455 Object* result = Allocate(map, NEW_SPACE);
1456 if (result->IsFailure()) return result;
1457
1458 ExternalTwoByteString* external_string = ExternalTwoByteString::cast(result);
1459 external_string->set_length(length);
1460 external_string->set_resource(resource);
1461
1462 return result;
1463}
1464
1465
1466Object* Heap:: LookupSingleCharacterStringFromCode(uint16_t code) {
1467 if (code <= String::kMaxAsciiCharCode) {
1468 Object* value = Heap::single_character_string_cache()->get(code);
1469 if (value != Heap::undefined_value()) return value;
1470 Object* result = Heap::AllocateRawAsciiString(1);
1471 if (result->IsFailure()) return result;
1472 String::cast(result)->Set(0, code);
1473 Heap::single_character_string_cache()->set(code, result);
1474 return result;
1475 }
1476 Object* result = Heap::AllocateRawTwoByteString(1);
1477 if (result->IsFailure()) return result;
1478 String::cast(result)->Set(0, code);
1479 return result;
1480}
1481
1482
1483Object* Heap::AllocateByteArray(int length) {
1484 int size = ByteArray::SizeFor(length);
ager@chromium.org9258b6b2008-09-11 09:11:10 +00001485 AllocationSpace space =
1486 size > MaxHeapObjectSize() ? LO_SPACE : NEW_SPACE;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001487
1488 Object* result = AllocateRaw(size, space);
ager@chromium.org9258b6b2008-09-11 09:11:10 +00001489
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001490 if (result->IsFailure()) return result;
1491
1492 reinterpret_cast<Array*>(result)->set_map(byte_array_map());
1493 reinterpret_cast<Array*>(result)->set_length(length);
1494 return result;
1495}
1496
1497
1498Object* Heap::CreateCode(const CodeDesc& desc,
1499 ScopeInfo<>* sinfo,
1500 Code::Flags flags) {
1501 // Compute size
1502 int body_size = RoundUp(desc.instr_size + desc.reloc_size, kObjectAlignment);
1503 int sinfo_size = 0;
1504 if (sinfo != NULL) sinfo_size = sinfo->Serialize(NULL);
1505 int obj_size = Code::SizeFor(body_size, sinfo_size);
ager@chromium.org9258b6b2008-09-11 09:11:10 +00001506 Object* result;
1507 if (obj_size > MaxHeapObjectSize()) {
1508 result = lo_space_->AllocateRawCode(obj_size);
1509 } else {
1510 result = code_space_->AllocateRaw(obj_size);
1511 }
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001512
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001513 if (result->IsFailure()) return result;
1514
1515 // Initialize the object
1516 HeapObject::cast(result)->set_map(code_map());
1517 Code* code = Code::cast(result);
1518 code->set_instruction_size(desc.instr_size);
1519 code->set_relocation_size(desc.reloc_size);
1520 code->set_sinfo_size(sinfo_size);
1521 code->set_flags(flags);
1522 code->set_ic_flag(Code::IC_TARGET_IS_ADDRESS);
1523 code->CopyFrom(desc); // migrate generated code
1524 if (sinfo != NULL) sinfo->Serialize(code); // write scope info
1525
1526#ifdef DEBUG
1527 code->Verify();
1528#endif
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001529 return code;
1530}
1531
1532
1533Object* Heap::CopyCode(Code* code) {
1534 // Allocate an object the same size as the code object.
1535 int obj_size = code->Size();
ager@chromium.org9258b6b2008-09-11 09:11:10 +00001536 Object* result;
1537 if (obj_size > MaxHeapObjectSize()) {
1538 result = lo_space_->AllocateRawCode(obj_size);
1539 } else {
1540 result = code_space_->AllocateRaw(obj_size);
1541 }
1542
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001543 if (result->IsFailure()) return result;
1544
1545 // Copy code object.
1546 Address old_addr = code->address();
1547 Address new_addr = reinterpret_cast<HeapObject*>(result)->address();
1548 memcpy(new_addr, old_addr, obj_size);
1549
1550 // Relocate the copy.
1551 Code* new_code = Code::cast(result);
1552 new_code->Relocate(new_addr - old_addr);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001553 return new_code;
1554}
1555
1556
1557Object* Heap::Allocate(Map* map, AllocationSpace space) {
1558 ASSERT(gc_state_ == NOT_IN_GC);
1559 ASSERT(map->instance_type() != MAP_TYPE);
1560 Object* result = AllocateRaw(map->instance_size(), space);
1561 if (result->IsFailure()) return result;
1562 HeapObject::cast(result)->set_map(map);
1563 return result;
1564}
1565
1566
1567Object* Heap::InitializeFunction(JSFunction* function,
1568 SharedFunctionInfo* shared,
1569 Object* prototype) {
1570 ASSERT(!prototype->IsMap());
1571 function->initialize_properties();
1572 function->initialize_elements();
1573 function->set_shared(shared);
1574 function->set_prototype_or_initial_map(prototype);
1575 function->set_context(undefined_value());
1576 function->set_literals(empty_fixed_array());
1577 return function;
1578}
1579
1580
1581Object* Heap::AllocateFunctionPrototype(JSFunction* function) {
1582 // Allocate the prototype.
1583 Object* prototype =
1584 AllocateJSObject(Top::context()->global_context()->object_function());
1585 if (prototype->IsFailure()) return prototype;
1586 // When creating the prototype for the function we must set its
1587 // constructor to the function.
1588 Object* result =
1589 JSObject::cast(prototype)->SetProperty(constructor_symbol(),
1590 function,
1591 DONT_ENUM);
1592 if (result->IsFailure()) return result;
1593 return prototype;
1594}
1595
1596
1597Object* Heap::AllocateFunction(Map* function_map,
1598 SharedFunctionInfo* shared,
1599 Object* prototype) {
ager@chromium.org9258b6b2008-09-11 09:11:10 +00001600 Object* result = Allocate(function_map, OLD_POINTER_SPACE);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001601 if (result->IsFailure()) return result;
1602 return InitializeFunction(JSFunction::cast(result), shared, prototype);
1603}
1604
1605
1606Object* Heap::AllocateArgumentsObject(Object* callee, int length) {
mads.s.ager@gmail.com9a4089a2008-09-01 08:55:01 +00001607 // To get fast allocation and map sharing for arguments objects we
1608 // allocate them based on an arguments boilerplate.
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001609
1610 // This calls Copy directly rather than using Heap::AllocateRaw so we
1611 // duplicate the check here.
1612 ASSERT(allocation_allowed_ && gc_state_ == NOT_IN_GC);
1613
1614 JSObject* boilerplate =
1615 Top::context()->global_context()->arguments_boilerplate();
1616 Object* result = boilerplate->Copy();
1617 if (result->IsFailure()) return result;
1618
1619 Object* obj = JSObject::cast(result)->properties();
1620 FixedArray::cast(obj)->set(arguments_callee_index, callee);
1621 FixedArray::cast(obj)->set(arguments_length_index, Smi::FromInt(length));
1622
1623 // Allocate the fixed array.
1624 obj = Heap::AllocateFixedArray(length);
1625 if (obj->IsFailure()) return obj;
1626 JSObject::cast(result)->set_elements(FixedArray::cast(obj));
1627
1628 // Check the state of the object
1629 ASSERT(JSObject::cast(result)->HasFastProperties());
1630 ASSERT(JSObject::cast(result)->HasFastElements());
1631
1632 return result;
1633}
1634
1635
1636Object* Heap::AllocateInitialMap(JSFunction* fun) {
1637 ASSERT(!fun->has_initial_map());
1638
1639 // First create a new map.
1640 Object* map_obj = Heap::AllocateMap(JS_OBJECT_TYPE, JSObject::kHeaderSize);
1641 if (map_obj->IsFailure()) return map_obj;
1642
1643 // Fetch or allocate prototype.
1644 Object* prototype;
1645 if (fun->has_instance_prototype()) {
1646 prototype = fun->instance_prototype();
1647 } else {
1648 prototype = AllocateFunctionPrototype(fun);
1649 if (prototype->IsFailure()) return prototype;
1650 }
1651 Map* map = Map::cast(map_obj);
1652 map->set_unused_property_fields(fun->shared()->expected_nof_properties());
1653 map->set_prototype(prototype);
1654 return map;
1655}
1656
1657
1658void Heap::InitializeJSObjectFromMap(JSObject* obj,
1659 FixedArray* properties,
1660 Map* map) {
1661 obj->set_properties(properties);
1662 obj->initialize_elements();
1663 // TODO(1240798): Initialize the object's body using valid initial values
1664 // according to the object's initial map. For example, if the map's
1665 // instance type is JS_ARRAY_TYPE, the length field should be initialized
1666 // to a number (eg, Smi::FromInt(0)) and the elements initialized to a
1667 // fixed array (eg, Heap::empty_fixed_array()). Currently, the object
1668 // verification code has to cope with (temporarily) invalid objects. See
1669 // for example, JSArray::JSArrayVerify).
1670 obj->InitializeBody(map->instance_size());
1671}
1672
1673
1674Object* Heap::AllocateJSObjectFromMap(Map* map, PretenureFlag pretenure) {
1675 // JSFunctions should be allocated using AllocateFunction to be
1676 // properly initialized.
1677 ASSERT(map->instance_type() != JS_FUNCTION_TYPE);
1678
1679 // Allocate the backing storage for the properties.
mads.s.ager@gmail.com9a4089a2008-09-01 08:55:01 +00001680 Object* properties = AllocateFixedArray(map->unused_property_fields());
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001681 if (properties->IsFailure()) return properties;
1682
1683 // Allocate the JSObject.
ager@chromium.org9258b6b2008-09-11 09:11:10 +00001684 AllocationSpace space =
1685 (pretenure == TENURED) ? OLD_POINTER_SPACE : NEW_SPACE;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001686 if (map->instance_size() > MaxHeapObjectSize()) space = LO_SPACE;
1687 Object* obj = Allocate(map, space);
1688 if (obj->IsFailure()) return obj;
1689
1690 // Initialize the JSObject.
1691 InitializeJSObjectFromMap(JSObject::cast(obj),
1692 FixedArray::cast(properties),
1693 map);
1694 return obj;
1695}
1696
1697
1698Object* Heap::AllocateJSObject(JSFunction* constructor,
1699 PretenureFlag pretenure) {
1700 // Allocate the initial map if absent.
1701 if (!constructor->has_initial_map()) {
1702 Object* initial_map = AllocateInitialMap(constructor);
1703 if (initial_map->IsFailure()) return initial_map;
1704 constructor->set_initial_map(Map::cast(initial_map));
1705 Map::cast(initial_map)->set_constructor(constructor);
1706 }
1707 // Allocate the object based on the constructors initial map.
1708 return AllocateJSObjectFromMap(constructor->initial_map(), pretenure);
1709}
1710
1711
1712Object* Heap::ReinitializeJSGlobalObject(JSFunction* constructor,
1713 JSGlobalObject* object) {
1714 // Allocate initial map if absent.
1715 if (!constructor->has_initial_map()) {
1716 Object* initial_map = AllocateInitialMap(constructor);
1717 if (initial_map->IsFailure()) return initial_map;
1718 constructor->set_initial_map(Map::cast(initial_map));
1719 Map::cast(initial_map)->set_constructor(constructor);
1720 }
1721
1722 Map* map = constructor->initial_map();
1723
1724 // Check that the already allocated object has the same size as
1725 // objects allocated using the constructor.
1726 ASSERT(map->instance_size() == object->map()->instance_size());
1727
1728 // Allocate the backing storage for the properties.
mads.s.ager@gmail.com9a4089a2008-09-01 08:55:01 +00001729 Object* properties = AllocateFixedArray(map->unused_property_fields());
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001730 if (properties->IsFailure()) return properties;
1731
1732 // Reset the map for the object.
1733 object->set_map(constructor->initial_map());
1734
1735 // Reinitialize the object from the constructor map.
1736 InitializeJSObjectFromMap(object, FixedArray::cast(properties), map);
1737 return object;
1738}
1739
1740
1741Object* Heap::AllocateStringFromAscii(Vector<const char> string,
1742 PretenureFlag pretenure) {
1743 Object* result = AllocateRawAsciiString(string.length(), pretenure);
1744 if (result->IsFailure()) return result;
1745
1746 // Copy the characters into the new object.
1747 AsciiString* string_result = AsciiString::cast(result);
1748 for (int i = 0; i < string.length(); i++) {
1749 string_result->AsciiStringSet(i, string[i]);
1750 }
1751 return result;
1752}
1753
1754
1755Object* Heap::AllocateStringFromUtf8(Vector<const char> string,
1756 PretenureFlag pretenure) {
1757 // Count the number of characters in the UTF-8 string and check if
1758 // it is an ASCII string.
1759 Access<Scanner::Utf8Decoder> decoder(Scanner::utf8_decoder());
1760 decoder->Reset(string.start(), string.length());
1761 int chars = 0;
1762 bool is_ascii = true;
1763 while (decoder->has_more()) {
1764 uc32 r = decoder->GetNext();
1765 if (r > String::kMaxAsciiCharCode) is_ascii = false;
1766 chars++;
1767 }
1768
1769 // If the string is ascii, we do not need to convert the characters
1770 // since UTF8 is backwards compatible with ascii.
1771 if (is_ascii) return AllocateStringFromAscii(string, pretenure);
1772
1773 Object* result = AllocateRawTwoByteString(chars, pretenure);
1774 if (result->IsFailure()) return result;
1775
1776 // Convert and copy the characters into the new object.
1777 String* string_result = String::cast(result);
1778 decoder->Reset(string.start(), string.length());
1779 for (int i = 0; i < chars; i++) {
1780 uc32 r = decoder->GetNext();
1781 string_result->Set(i, r);
1782 }
1783 return result;
1784}
1785
1786
1787Object* Heap::AllocateStringFromTwoByte(Vector<const uc16> string,
1788 PretenureFlag pretenure) {
1789 // Check if the string is an ASCII string.
1790 int i = 0;
1791 while (i < string.length() && string[i] <= String::kMaxAsciiCharCode) i++;
1792
1793 Object* result;
1794 if (i == string.length()) { // It's an ASCII string.
1795 result = AllocateRawAsciiString(string.length(), pretenure);
1796 } else { // It's not an ASCII string.
1797 result = AllocateRawTwoByteString(string.length(), pretenure);
1798 }
1799 if (result->IsFailure()) return result;
1800
1801 // Copy the characters into the new object, which may be either ASCII or
1802 // UTF-16.
1803 String* string_result = String::cast(result);
1804 for (int i = 0; i < string.length(); i++) {
1805 string_result->Set(i, string[i]);
1806 }
1807 return result;
1808}
1809
1810
1811Map* Heap::SymbolMapForString(String* string) {
1812 // If the string is in new space it cannot be used as a symbol.
1813 if (InNewSpace(string)) return NULL;
1814
1815 // Find the corresponding symbol map for strings.
1816 Map* map = string->map();
1817
1818 if (map == short_ascii_string_map()) return short_ascii_symbol_map();
1819 if (map == medium_ascii_string_map()) return medium_ascii_symbol_map();
1820 if (map == long_ascii_string_map()) return long_ascii_symbol_map();
1821
1822 if (map == short_string_map()) return short_symbol_map();
1823 if (map == medium_string_map()) return medium_symbol_map();
1824 if (map == long_string_map()) return long_symbol_map();
1825
1826 if (map == short_cons_string_map()) return short_cons_symbol_map();
1827 if (map == medium_cons_string_map()) return medium_cons_symbol_map();
1828 if (map == long_cons_string_map()) return long_cons_symbol_map();
1829
1830 if (map == short_cons_ascii_string_map()) {
1831 return short_cons_ascii_symbol_map();
1832 }
1833 if (map == medium_cons_ascii_string_map()) {
1834 return medium_cons_ascii_symbol_map();
1835 }
1836 if (map == long_cons_ascii_string_map()) {
1837 return long_cons_ascii_symbol_map();
1838 }
1839
1840 if (map == short_sliced_string_map()) return short_sliced_symbol_map();
1841 if (map == medium_sliced_string_map()) return short_sliced_symbol_map();
1842 if (map == long_sliced_string_map()) return short_sliced_symbol_map();
1843
1844 if (map == short_sliced_ascii_string_map()) {
1845 return short_sliced_ascii_symbol_map();
1846 }
1847 if (map == medium_sliced_ascii_string_map()) {
1848 return short_sliced_ascii_symbol_map();
1849 }
1850 if (map == long_sliced_ascii_string_map()) {
1851 return short_sliced_ascii_symbol_map();
1852 }
1853
1854 if (map == short_external_string_map()) return short_external_string_map();
1855 if (map == medium_external_string_map()) return medium_external_string_map();
1856 if (map == long_external_string_map()) return long_external_string_map();
1857
1858 if (map == short_external_ascii_string_map()) {
1859 return short_external_ascii_string_map();
1860 }
1861 if (map == medium_external_ascii_string_map()) {
1862 return medium_external_ascii_string_map();
1863 }
1864 if (map == long_external_ascii_string_map()) {
1865 return long_external_ascii_string_map();
1866 }
1867
1868 // No match found.
1869 return NULL;
1870}
1871
1872
1873Object* Heap::AllocateSymbol(unibrow::CharacterStream* buffer,
1874 int chars,
1875 int hash) {
1876 // Ensure the chars matches the number of characters in the buffer.
1877 ASSERT(static_cast<unsigned>(chars) == buffer->Length());
1878 // Determine whether the string is ascii.
1879 bool is_ascii = true;
1880 while (buffer->has_more()) {
1881 if (buffer->GetNext() > unibrow::Utf8::kMaxOneByteChar) is_ascii = false;
1882 }
1883 buffer->Rewind();
1884
1885 // Compute map and object size.
1886 int size;
1887 Map* map;
1888
1889 if (is_ascii) {
1890 if (chars <= String::kMaxShortStringSize) {
1891 map = short_ascii_symbol_map();
1892 } else if (chars <= String::kMaxMediumStringSize) {
1893 map = medium_ascii_symbol_map();
1894 } else {
1895 map = long_ascii_symbol_map();
1896 }
1897 size = AsciiString::SizeFor(chars);
1898 } else {
1899 if (chars <= String::kMaxShortStringSize) {
1900 map = short_symbol_map();
1901 } else if (chars <= String::kMaxMediumStringSize) {
1902 map = medium_symbol_map();
1903 } else {
1904 map = long_symbol_map();
1905 }
1906 size = TwoByteString::SizeFor(chars);
1907 }
1908
1909 // Allocate string.
ager@chromium.org9258b6b2008-09-11 09:11:10 +00001910 AllocationSpace space =
1911 (size > MaxHeapObjectSize()) ? LO_SPACE : OLD_DATA_SPACE;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001912 Object* result = AllocateRaw(size, space);
1913 if (result->IsFailure()) return result;
1914
1915 reinterpret_cast<HeapObject*>(result)->set_map(map);
1916 // The hash value contains the length of the string.
1917 String::cast(result)->set_length_field(hash);
1918
1919 ASSERT_EQ(size, String::cast(result)->Size());
1920
1921 // Fill in the characters.
1922 for (int i = 0; i < chars; i++) {
1923 String::cast(result)->Set(i, buffer->GetNext());
1924 }
1925 return result;
1926}
1927
1928
1929Object* Heap::AllocateRawAsciiString(int length, PretenureFlag pretenure) {
ager@chromium.org9258b6b2008-09-11 09:11:10 +00001930 AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001931 int size = AsciiString::SizeFor(length);
1932 if (size > MaxHeapObjectSize()) {
1933 space = LO_SPACE;
1934 }
1935
1936 // Use AllocateRaw rather than Allocate because the object's size cannot be
1937 // determined from the map.
1938 Object* result = AllocateRaw(size, space);
1939 if (result->IsFailure()) return result;
1940
1941 // Determine the map based on the string's length.
1942 Map* map;
1943 if (length <= String::kMaxShortStringSize) {
1944 map = short_ascii_string_map();
1945 } else if (length <= String::kMaxMediumStringSize) {
1946 map = medium_ascii_string_map();
1947 } else {
1948 map = long_ascii_string_map();
1949 }
1950
1951 // Partially initialize the object.
1952 HeapObject::cast(result)->set_map(map);
1953 String::cast(result)->set_length(length);
1954 ASSERT_EQ(size, HeapObject::cast(result)->Size());
1955 return result;
1956}
1957
1958
1959Object* Heap::AllocateRawTwoByteString(int length, PretenureFlag pretenure) {
ager@chromium.org9258b6b2008-09-11 09:11:10 +00001960 AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001961 int size = TwoByteString::SizeFor(length);
1962 if (size > MaxHeapObjectSize()) {
1963 space = LO_SPACE;
1964 }
1965
1966 // Use AllocateRaw rather than Allocate because the object's size cannot be
1967 // determined from the map.
1968 Object* result = AllocateRaw(size, space);
1969 if (result->IsFailure()) return result;
1970
1971 // Determine the map based on the string's length.
1972 Map* map;
1973 if (length <= String::kMaxShortStringSize) {
1974 map = short_string_map();
1975 } else if (length <= String::kMaxMediumStringSize) {
1976 map = medium_string_map();
1977 } else {
1978 map = long_string_map();
1979 }
1980
1981 // Partially initialize the object.
1982 HeapObject::cast(result)->set_map(map);
1983 String::cast(result)->set_length(length);
1984 ASSERT_EQ(size, HeapObject::cast(result)->Size());
1985 return result;
1986}
1987
1988
1989Object* Heap::AllocateEmptyFixedArray() {
1990 int size = FixedArray::SizeFor(0);
ager@chromium.org9258b6b2008-09-11 09:11:10 +00001991 Object* result = AllocateRaw(size, OLD_DATA_SPACE);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001992 if (result->IsFailure()) return result;
1993 // Initialize the object.
1994 reinterpret_cast<Array*>(result)->set_map(fixed_array_map());
1995 reinterpret_cast<Array*>(result)->set_length(0);
1996 return result;
1997}
1998
1999
2000Object* Heap::AllocateFixedArray(int length, PretenureFlag pretenure) {
2001 ASSERT(empty_fixed_array()->IsFixedArray());
2002 if (length == 0) return empty_fixed_array();
2003
2004 int size = FixedArray::SizeFor(length);
2005 Object* result;
2006 if (size > MaxHeapObjectSize()) {
2007 result = lo_space_->AllocateRawFixedArray(size);
2008 } else {
ager@chromium.org9258b6b2008-09-11 09:11:10 +00002009 AllocationSpace space =
2010 (pretenure == TENURED) ? OLD_POINTER_SPACE : NEW_SPACE;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002011 result = AllocateRaw(size, space);
2012 }
2013 if (result->IsFailure()) return result;
2014
2015 // Initialize the object.
2016 reinterpret_cast<Array*>(result)->set_map(fixed_array_map());
2017 FixedArray* array = FixedArray::cast(result);
2018 array->set_length(length);
2019 for (int index = 0; index < length; index++) array->set_undefined(index);
2020 return array;
2021}
2022
2023
2024Object* Heap::AllocateFixedArrayWithHoles(int length) {
2025 if (length == 0) return empty_fixed_array();
2026 int size = FixedArray::SizeFor(length);
2027 Object* result = size > MaxHeapObjectSize()
2028 ? lo_space_->AllocateRawFixedArray(size)
2029 : AllocateRaw(size, NEW_SPACE);
2030 if (result->IsFailure()) return result;
2031
2032 // Initialize the object.
2033 reinterpret_cast<Array*>(result)->set_map(fixed_array_map());
2034 FixedArray* array = FixedArray::cast(result);
2035 array->set_length(length);
2036 for (int index = 0; index < length; index++) array->set_the_hole(index);
2037 return array;
2038}
2039
2040
2041Object* Heap::AllocateHashTable(int length) {
2042 Object* result = Heap::AllocateFixedArray(length);
2043 if (result->IsFailure()) return result;
2044 reinterpret_cast<Array*>(result)->set_map(hash_table_map());
2045 ASSERT(result->IsDictionary());
2046 return result;
2047}
2048
2049
2050Object* Heap::AllocateGlobalContext() {
2051 Object* result = Heap::AllocateFixedArray(Context::GLOBAL_CONTEXT_SLOTS);
2052 if (result->IsFailure()) return result;
2053 Context* context = reinterpret_cast<Context*>(result);
2054 context->set_map(global_context_map());
2055 ASSERT(context->IsGlobalContext());
2056 ASSERT(result->IsContext());
2057 return result;
2058}
2059
2060
2061Object* Heap::AllocateFunctionContext(int length, JSFunction* function) {
2062 ASSERT(length >= Context::MIN_CONTEXT_SLOTS);
2063 Object* result = Heap::AllocateFixedArray(length);
2064 if (result->IsFailure()) return result;
2065 Context* context = reinterpret_cast<Context*>(result);
2066 context->set_map(context_map());
2067 context->set_closure(function);
2068 context->set_fcontext(context);
2069 context->set_previous(NULL);
2070 context->set_extension(NULL);
2071 context->set_global(function->context()->global());
2072 ASSERT(!context->IsGlobalContext());
2073 ASSERT(context->is_function_context());
2074 ASSERT(result->IsContext());
2075 return result;
2076}
2077
2078
2079Object* Heap::AllocateWithContext(Context* previous, JSObject* extension) {
2080 Object* result = Heap::AllocateFixedArray(Context::MIN_CONTEXT_SLOTS);
2081 if (result->IsFailure()) return result;
2082 Context* context = reinterpret_cast<Context*>(result);
2083 context->set_map(context_map());
2084 context->set_closure(previous->closure());
2085 context->set_fcontext(previous->fcontext());
2086 context->set_previous(previous);
2087 context->set_extension(extension);
2088 context->set_global(previous->global());
2089 ASSERT(!context->IsGlobalContext());
2090 ASSERT(!context->is_function_context());
2091 ASSERT(result->IsContext());
2092 return result;
2093}
2094
2095
2096Object* Heap::AllocateStruct(InstanceType type) {
2097 Map* map;
2098 switch (type) {
2099#define MAKE_CASE(NAME, Name, name) case NAME##_TYPE: map = name##_map(); break;
2100STRUCT_LIST(MAKE_CASE)
2101#undef MAKE_CASE
2102 default:
2103 UNREACHABLE();
2104 return Failure::InternalError();
2105 }
2106 int size = map->instance_size();
2107 AllocationSpace space =
ager@chromium.org9258b6b2008-09-11 09:11:10 +00002108 (size > MaxHeapObjectSize()) ? LO_SPACE : OLD_POINTER_SPACE;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002109 Object* result = Heap::Allocate(map, space);
2110 if (result->IsFailure()) return result;
2111 Struct::cast(result)->InitializeBody(size);
2112 return result;
2113}
2114
2115
2116#ifdef DEBUG
2117
2118void Heap::Print() {
2119 if (!HasBeenSetup()) return;
2120 Top::PrintStack();
ager@chromium.org9258b6b2008-09-11 09:11:10 +00002121 AllSpaces spaces;
2122 while (Space* space = spaces.next()) space->Print();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002123}
2124
2125
2126void Heap::ReportCodeStatistics(const char* title) {
2127 PrintF(">>>>>> Code Stats (%s) >>>>>>\n", title);
2128 PagedSpace::ResetCodeStatistics();
2129 // We do not look for code in new space, map space, or old space. If code
2130 // somehow ends up in those spaces, we would miss it here.
2131 code_space_->CollectCodeStatistics();
2132 lo_space_->CollectCodeStatistics();
2133 PagedSpace::ReportCodeStatistics();
2134}
2135
2136
2137// This function expects that NewSpace's allocated objects histogram is
2138// populated (via a call to CollectStatistics or else as a side effect of a
2139// just-completed scavenge collection).
2140void Heap::ReportHeapStatistics(const char* title) {
2141 USE(title);
2142 PrintF(">>>>>> =============== %s (%d) =============== >>>>>>\n",
2143 title, gc_count_);
2144 PrintF("mark-compact GC : %d\n", mc_count_);
2145 PrintF("promoted_space_limit_ %d\n", promoted_space_limit_);
2146
2147 PrintF("\n");
2148 PrintF("Number of handles : %d\n", HandleScope::NumberOfHandles());
2149 GlobalHandles::PrintStats();
2150 PrintF("\n");
2151
2152 PrintF("Heap statistics : ");
2153 MemoryAllocator::ReportStatistics();
2154 PrintF("To space : ");
2155 new_space_->ReportStatistics();
ager@chromium.org9258b6b2008-09-11 09:11:10 +00002156 PrintF("Old pointer space : ");
2157 old_pointer_space_->ReportStatistics();
2158 PrintF("Old data space : ");
2159 old_data_space_->ReportStatistics();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002160 PrintF("Code space : ");
2161 code_space_->ReportStatistics();
2162 PrintF("Map space : ");
2163 map_space_->ReportStatistics();
2164 PrintF("Large object space : ");
2165 lo_space_->ReportStatistics();
2166 PrintF(">>>>>> ========================================= >>>>>>\n");
2167}
2168
2169#endif // DEBUG
2170
2171bool Heap::Contains(HeapObject* value) {
2172 return Contains(value->address());
2173}
2174
2175
2176bool Heap::Contains(Address addr) {
2177 if (OS::IsOutsideAllocatedSpace(addr)) return false;
2178 return HasBeenSetup() &&
2179 (new_space_->ToSpaceContains(addr) ||
ager@chromium.org9258b6b2008-09-11 09:11:10 +00002180 old_pointer_space_->Contains(addr) ||
2181 old_data_space_->Contains(addr) ||
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002182 code_space_->Contains(addr) ||
2183 map_space_->Contains(addr) ||
2184 lo_space_->SlowContains(addr));
2185}
2186
2187
2188bool Heap::InSpace(HeapObject* value, AllocationSpace space) {
2189 return InSpace(value->address(), space);
2190}
2191
2192
2193bool Heap::InSpace(Address addr, AllocationSpace space) {
2194 if (OS::IsOutsideAllocatedSpace(addr)) return false;
2195 if (!HasBeenSetup()) return false;
2196
2197 switch (space) {
2198 case NEW_SPACE:
2199 return new_space_->ToSpaceContains(addr);
ager@chromium.org9258b6b2008-09-11 09:11:10 +00002200 case OLD_POINTER_SPACE:
2201 return old_pointer_space_->Contains(addr);
2202 case OLD_DATA_SPACE:
2203 return old_data_space_->Contains(addr);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002204 case CODE_SPACE:
2205 return code_space_->Contains(addr);
2206 case MAP_SPACE:
2207 return map_space_->Contains(addr);
2208 case LO_SPACE:
2209 return lo_space_->SlowContains(addr);
2210 }
2211
2212 return false;
2213}
2214
2215
2216#ifdef DEBUG
2217void Heap::Verify() {
2218 ASSERT(HasBeenSetup());
2219
2220 VerifyPointersVisitor visitor;
2221 Heap::IterateRoots(&visitor);
2222
ager@chromium.org9258b6b2008-09-11 09:11:10 +00002223 AllSpaces spaces;
2224 while (Space* space = spaces.next()) {
2225 space->Verify();
2226 }
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002227}
2228#endif // DEBUG
2229
2230
2231Object* Heap::LookupSymbol(Vector<const char> string) {
2232 Object* symbol = NULL;
2233 Object* new_table =
2234 SymbolTable::cast(symbol_table_)->LookupSymbol(string, &symbol);
2235 if (new_table->IsFailure()) return new_table;
2236 symbol_table_ = new_table;
2237 ASSERT(symbol != NULL);
2238 return symbol;
2239}
2240
2241
2242Object* Heap::LookupSymbol(String* string) {
2243 if (string->IsSymbol()) return string;
2244 Object* symbol = NULL;
2245 Object* new_table =
2246 SymbolTable::cast(symbol_table_)->LookupString(string, &symbol);
2247 if (new_table->IsFailure()) return new_table;
2248 symbol_table_ = new_table;
2249 ASSERT(symbol != NULL);
2250 return symbol;
2251}
2252
2253
2254#ifdef DEBUG
2255void Heap::ZapFromSpace() {
2256 ASSERT(HAS_HEAP_OBJECT_TAG(kFromSpaceZapValue));
2257 for (Address a = new_space_->FromSpaceLow();
2258 a < new_space_->FromSpaceHigh();
2259 a += kPointerSize) {
2260 Memory::Address_at(a) = kFromSpaceZapValue;
2261 }
2262}
2263#endif // DEBUG
2264
2265
2266void Heap::IterateRSetRange(Address object_start,
2267 Address object_end,
2268 Address rset_start,
2269 ObjectSlotCallback copy_object_func) {
2270 Address object_address = object_start;
2271 Address rset_address = rset_start;
2272
2273 // Loop over all the pointers in [object_start, object_end).
2274 while (object_address < object_end) {
2275 uint32_t rset_word = Memory::uint32_at(rset_address);
2276
2277 if (rset_word != 0) {
2278 // Bits were set.
2279 uint32_t result_rset = rset_word;
2280
2281 // Loop over all the bits in the remembered set word. Though
2282 // remembered sets are sparse, faster (eg, binary) search for
2283 // set bits does not seem to help much here.
2284 for (int bit_offset = 0; bit_offset < kBitsPerInt; bit_offset++) {
2285 uint32_t bitmask = 1 << bit_offset;
2286 // Do not dereference pointers at or past object_end.
2287 if ((rset_word & bitmask) != 0 && object_address < object_end) {
2288 Object** object_p = reinterpret_cast<Object**>(object_address);
2289 if (Heap::InFromSpace(*object_p)) {
2290 copy_object_func(reinterpret_cast<HeapObject**>(object_p));
2291 }
2292 // If this pointer does not need to be remembered anymore, clear
2293 // the remembered set bit.
2294 if (!Heap::InToSpace(*object_p)) result_rset &= ~bitmask;
2295 }
2296 object_address += kPointerSize;
2297 }
2298
2299 // Update the remembered set if it has changed.
2300 if (result_rset != rset_word) {
2301 Memory::uint32_at(rset_address) = result_rset;
2302 }
2303 } else {
2304 // No bits in the word were set. This is the common case.
2305 object_address += kPointerSize * kBitsPerInt;
2306 }
2307
2308 rset_address += kIntSize;
2309 }
2310}
2311
2312
2313void Heap::IterateRSet(PagedSpace* space, ObjectSlotCallback copy_object_func) {
2314 ASSERT(Page::is_rset_in_use());
ager@chromium.org9258b6b2008-09-11 09:11:10 +00002315 ASSERT(space == old_pointer_space_ || space == map_space_);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002316
2317 PageIterator it(space, PageIterator::PAGES_IN_USE);
2318 while (it.has_next()) {
2319 Page* page = it.next();
2320 IterateRSetRange(page->ObjectAreaStart(), page->AllocationTop(),
2321 page->RSetStart(), copy_object_func);
2322 }
2323}
2324
2325
2326#ifdef DEBUG
2327#define SYNCHRONIZE_TAG(tag) v->Synchronize(tag)
2328#else
2329#define SYNCHRONIZE_TAG(tag)
2330#endif
2331
2332void Heap::IterateRoots(ObjectVisitor* v) {
2333 IterateStrongRoots(v);
2334 v->VisitPointer(reinterpret_cast<Object**>(&symbol_table_));
2335 SYNCHRONIZE_TAG("symbol_table");
2336}
2337
2338
2339void Heap::IterateStrongRoots(ObjectVisitor* v) {
2340#define ROOT_ITERATE(type, name) \
2341 v->VisitPointer(reinterpret_cast<Object**>(&name##_));
2342 STRONG_ROOT_LIST(ROOT_ITERATE);
2343#undef ROOT_ITERATE
2344 SYNCHRONIZE_TAG("strong_root_list");
2345
2346#define STRUCT_MAP_ITERATE(NAME, Name, name) \
2347 v->VisitPointer(reinterpret_cast<Object**>(&name##_map_));
2348 STRUCT_LIST(STRUCT_MAP_ITERATE);
2349#undef STRUCT_MAP_ITERATE
2350 SYNCHRONIZE_TAG("struct_map");
2351
2352#define SYMBOL_ITERATE(name, string) \
2353 v->VisitPointer(reinterpret_cast<Object**>(&name##_));
2354 SYMBOL_LIST(SYMBOL_ITERATE)
2355#undef SYMBOL_ITERATE
2356 SYNCHRONIZE_TAG("symbol");
2357
2358 Bootstrapper::Iterate(v);
2359 SYNCHRONIZE_TAG("bootstrapper");
2360 Top::Iterate(v);
2361 SYNCHRONIZE_TAG("top");
2362 Debug::Iterate(v);
2363 SYNCHRONIZE_TAG("debug");
kasperl@chromium.orgb9123622008-09-17 14:05:56 +00002364 CompilationCache::Iterate(v);
2365 SYNCHRONIZE_TAG("compilationcache");
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002366
2367 // Iterate over local handles in handle scopes.
2368 HandleScopeImplementer::Iterate(v);
2369 SYNCHRONIZE_TAG("handlescope");
2370
2371 // Iterate over the builtin code objects and code stubs in the heap. Note
2372 // that it is not strictly necessary to iterate over code objects on
2373 // scavenge collections. We still do it here because this same function
2374 // is used by the mark-sweep collector and the deserializer.
2375 Builtins::IterateBuiltins(v);
2376 SYNCHRONIZE_TAG("builtins");
2377
2378 // Iterate over global handles.
2379 GlobalHandles::IterateRoots(v);
2380 SYNCHRONIZE_TAG("globalhandles");
2381
2382 // Iterate over pointers being held by inactive threads.
2383 ThreadManager::Iterate(v);
2384 SYNCHRONIZE_TAG("threadmanager");
2385}
2386#undef SYNCHRONIZE_TAG
2387
2388
2389// Flag is set when the heap has been configured. The heap can be repeatedly
2390// configured through the API until it is setup.
2391static bool heap_configured = false;
2392
2393// TODO(1236194): Since the heap size is configurable on the command line
2394// and through the API, we should gracefully handle the case that the heap
2395// size is not big enough to fit all the initial objects.
2396bool Heap::ConfigureHeap(int semispace_size, int old_gen_size) {
2397 if (HasBeenSetup()) return false;
2398
2399 if (semispace_size > 0) semispace_size_ = semispace_size;
2400 if (old_gen_size > 0) old_generation_size_ = old_gen_size;
2401
2402 // The new space size must be a power of two to support single-bit testing
2403 // for containment.
mads.s.ager@gmail.com769cc962008-08-06 10:02:49 +00002404 semispace_size_ = RoundUpToPowerOf2(semispace_size_);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002405 initial_semispace_size_ = Min(initial_semispace_size_, semispace_size_);
2406 young_generation_size_ = 2 * semispace_size_;
2407
2408 // The old generation is paged.
2409 old_generation_size_ = RoundUp(old_generation_size_, Page::kPageSize);
2410
2411 heap_configured = true;
2412 return true;
2413}
2414
2415
kasper.lund7276f142008-07-30 08:49:36 +00002416bool Heap::ConfigureHeapDefault() {
2417 return ConfigureHeap(FLAG_new_space_size, FLAG_old_space_size);
2418}
2419
2420
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002421int Heap::PromotedSpaceSize() {
ager@chromium.org9258b6b2008-09-11 09:11:10 +00002422 return old_pointer_space_->Size()
2423 + old_data_space_->Size()
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002424 + code_space_->Size()
2425 + map_space_->Size()
2426 + lo_space_->Size();
2427}
2428
2429
kasper.lund7276f142008-07-30 08:49:36 +00002430int Heap::PromotedExternalMemorySize() {
2431 if (amount_of_external_allocated_memory_
2432 <= amount_of_external_allocated_memory_at_last_global_gc_) return 0;
2433 return amount_of_external_allocated_memory_
2434 - amount_of_external_allocated_memory_at_last_global_gc_;
2435}
2436
2437
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002438bool Heap::Setup(bool create_heap_objects) {
2439 // Initialize heap spaces and initial maps and objects. Whenever something
2440 // goes wrong, just return false. The caller should check the results and
2441 // call Heap::TearDown() to release allocated memory.
2442 //
2443 // If the heap is not yet configured (eg, through the API), configure it.
2444 // Configuration is based on the flags new-space-size (really the semispace
2445 // size) and old-space-size if set or the initial values of semispace_size_
2446 // and old_generation_size_ otherwise.
2447 if (!heap_configured) {
kasper.lund7276f142008-07-30 08:49:36 +00002448 if (!ConfigureHeapDefault()) return false;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002449 }
2450
2451 // Setup memory allocator and allocate an initial chunk of memory. The
2452 // initial chunk is double the size of the new space to ensure that we can
2453 // find a pair of semispaces that are contiguous and aligned to their size.
2454 if (!MemoryAllocator::Setup(MaxCapacity())) return false;
2455 void* chunk
2456 = MemoryAllocator::ReserveInitialChunk(2 * young_generation_size_);
2457 if (chunk == NULL) return false;
2458
2459 // Put the initial chunk of the old space at the start of the initial
2460 // chunk, then the two new space semispaces, then the initial chunk of
2461 // code space. Align the pair of semispaces to their size, which must be
2462 // a power of 2.
2463 ASSERT(IsPowerOf2(young_generation_size_));
kasperl@chromium.orgb9123622008-09-17 14:05:56 +00002464 Address code_space_start = reinterpret_cast<Address>(chunk);
2465 Address new_space_start = RoundUp(code_space_start, young_generation_size_);
2466 Address old_space_start = new_space_start + young_generation_size_;
2467 int code_space_size = new_space_start - code_space_start;
2468 int old_space_size = young_generation_size_ - code_space_size;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002469
ager@chromium.org9258b6b2008-09-11 09:11:10 +00002470 // Initialize new space.
kasper.lund7276f142008-07-30 08:49:36 +00002471 new_space_ = new NewSpace(initial_semispace_size_,
2472 semispace_size_,
ager@chromium.org9258b6b2008-09-11 09:11:10 +00002473 NEW_SPACE);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002474 if (new_space_ == NULL) return false;
2475 if (!new_space_->Setup(new_space_start, young_generation_size_)) return false;
2476
2477 // Initialize old space, set the maximum capacity to the old generation
kasper.lund7276f142008-07-30 08:49:36 +00002478 // size. It will not contain code.
ager@chromium.org9258b6b2008-09-11 09:11:10 +00002479 old_pointer_space_ =
2480 new OldSpace(old_generation_size_, OLD_POINTER_SPACE, NOT_EXECUTABLE);
2481 if (old_pointer_space_ == NULL) return false;
2482 if (!old_pointer_space_->Setup(old_space_start, old_space_size >> 1)) {
2483 return false;
2484 }
2485 old_data_space_ =
2486 new OldSpace(old_generation_size_, OLD_DATA_SPACE, NOT_EXECUTABLE);
2487 if (old_data_space_ == NULL) return false;
2488 if (!old_data_space_->Setup(old_space_start + (old_space_size >> 1),
2489 old_space_size >> 1)) {
2490 return false;
2491 }
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002492
2493 // Initialize the code space, set its maximum capacity to the old
kasper.lund7276f142008-07-30 08:49:36 +00002494 // generation size. It needs executable memory.
ager@chromium.org9258b6b2008-09-11 09:11:10 +00002495 code_space_ =
2496 new OldSpace(old_generation_size_, CODE_SPACE, EXECUTABLE);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002497 if (code_space_ == NULL) return false;
2498 if (!code_space_->Setup(code_space_start, code_space_size)) return false;
2499
2500 // Initialize map space.
kasper.lund7276f142008-07-30 08:49:36 +00002501 map_space_ = new MapSpace(kMaxMapSpaceSize, MAP_SPACE);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002502 if (map_space_ == NULL) return false;
2503 // Setting up a paged space without giving it a virtual memory range big
2504 // enough to hold at least a page will cause it to allocate.
2505 if (!map_space_->Setup(NULL, 0)) return false;
2506
ager@chromium.org9258b6b2008-09-11 09:11:10 +00002507 // The large object code space may contain code or data. We set the memory
2508 // to be non-executable here for safety, but this means we need to enable it
2509 // explicitly when allocating large code objects.
2510 lo_space_ = new LargeObjectSpace(LO_SPACE);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002511 if (lo_space_ == NULL) return false;
2512 if (!lo_space_->Setup()) return false;
2513
2514 if (create_heap_objects) {
2515 // Create initial maps.
2516 if (!CreateInitialMaps()) return false;
2517 if (!CreateApiObjects()) return false;
2518
2519 // Create initial objects
2520 if (!CreateInitialObjects()) return false;
2521 }
2522
2523 LOG(IntEvent("heap-capacity", Capacity()));
2524 LOG(IntEvent("heap-available", Available()));
2525
2526 return true;
2527}
2528
2529
2530void Heap::TearDown() {
2531 GlobalHandles::TearDown();
2532
2533 if (new_space_ != NULL) {
2534 new_space_->TearDown();
2535 delete new_space_;
2536 new_space_ = NULL;
2537 }
2538
ager@chromium.org9258b6b2008-09-11 09:11:10 +00002539 if (old_pointer_space_ != NULL) {
2540 old_pointer_space_->TearDown();
2541 delete old_pointer_space_;
2542 old_pointer_space_ = NULL;
2543 }
2544
2545 if (old_data_space_ != NULL) {
2546 old_data_space_->TearDown();
2547 delete old_data_space_;
2548 old_data_space_ = NULL;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002549 }
2550
2551 if (code_space_ != NULL) {
2552 code_space_->TearDown();
2553 delete code_space_;
2554 code_space_ = NULL;
2555 }
2556
2557 if (map_space_ != NULL) {
2558 map_space_->TearDown();
2559 delete map_space_;
2560 map_space_ = NULL;
2561 }
2562
2563 if (lo_space_ != NULL) {
2564 lo_space_->TearDown();
2565 delete lo_space_;
2566 lo_space_ = NULL;
2567 }
2568
2569 MemoryAllocator::TearDown();
2570}
2571
2572
2573void Heap::Shrink() {
2574 // Try to shrink map, old, and code spaces.
2575 map_space_->Shrink();
ager@chromium.org9258b6b2008-09-11 09:11:10 +00002576 old_pointer_space_->Shrink();
2577 old_data_space_->Shrink();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002578 code_space_->Shrink();
2579}
2580
2581
2582#ifdef DEBUG
2583
2584class PrintHandleVisitor: public ObjectVisitor {
2585 public:
2586 void VisitPointers(Object** start, Object** end) {
2587 for (Object** p = start; p < end; p++)
2588 PrintF(" handle %p to %p\n", p, *p);
2589 }
2590};
2591
2592void Heap::PrintHandles() {
2593 PrintF("Handles:\n");
2594 PrintHandleVisitor v;
2595 HandleScopeImplementer::Iterate(&v);
2596}
2597
2598#endif
2599
2600
ager@chromium.org9258b6b2008-09-11 09:11:10 +00002601Space* AllSpaces::next() {
2602 switch (counter_++) {
2603 case NEW_SPACE:
2604 return Heap::new_space();
2605 case OLD_POINTER_SPACE:
2606 return Heap::old_pointer_space();
2607 case OLD_DATA_SPACE:
2608 return Heap::old_data_space();
2609 case CODE_SPACE:
2610 return Heap::code_space();
2611 case MAP_SPACE:
2612 return Heap::map_space();
2613 case LO_SPACE:
2614 return Heap::lo_space();
2615 default:
2616 return NULL;
2617 }
2618}
2619
2620
2621PagedSpace* PagedSpaces::next() {
2622 switch (counter_++) {
2623 case OLD_POINTER_SPACE:
2624 return Heap::old_pointer_space();
2625 case OLD_DATA_SPACE:
2626 return Heap::old_data_space();
2627 case CODE_SPACE:
2628 return Heap::code_space();
2629 case MAP_SPACE:
2630 return Heap::map_space();
2631 default:
2632 return NULL;
2633 }
2634}
2635
2636
2637
2638OldSpace* OldSpaces::next() {
2639 switch (counter_++) {
2640 case OLD_POINTER_SPACE:
2641 return Heap::old_pointer_space();
2642 case OLD_DATA_SPACE:
2643 return Heap::old_data_space();
2644 case CODE_SPACE:
2645 return Heap::code_space();
2646 default:
2647 return NULL;
2648 }
2649}
2650
2651
kasper.lund7276f142008-07-30 08:49:36 +00002652SpaceIterator::SpaceIterator() : current_space_(FIRST_SPACE), iterator_(NULL) {
2653}
2654
2655
2656SpaceIterator::~SpaceIterator() {
2657 // Delete active iterator if any.
2658 delete iterator_;
2659}
2660
2661
2662bool SpaceIterator::has_next() {
2663 // Iterate until no more spaces.
2664 return current_space_ != LAST_SPACE;
2665}
2666
2667
2668ObjectIterator* SpaceIterator::next() {
2669 if (iterator_ != NULL) {
2670 delete iterator_;
2671 iterator_ = NULL;
2672 // Move to the next space
2673 current_space_++;
2674 if (current_space_ > LAST_SPACE) {
2675 return NULL;
2676 }
2677 }
2678
2679 // Return iterator for the new current space.
2680 return CreateIterator();
2681}
2682
2683
2684// Create an iterator for the space to iterate.
2685ObjectIterator* SpaceIterator::CreateIterator() {
2686 ASSERT(iterator_ == NULL);
2687
2688 switch (current_space_) {
2689 case NEW_SPACE:
2690 iterator_ = new SemiSpaceIterator(Heap::new_space());
2691 break;
ager@chromium.org9258b6b2008-09-11 09:11:10 +00002692 case OLD_POINTER_SPACE:
2693 iterator_ = new HeapObjectIterator(Heap::old_pointer_space());
2694 break;
2695 case OLD_DATA_SPACE:
2696 iterator_ = new HeapObjectIterator(Heap::old_data_space());
kasper.lund7276f142008-07-30 08:49:36 +00002697 break;
2698 case CODE_SPACE:
2699 iterator_ = new HeapObjectIterator(Heap::code_space());
2700 break;
2701 case MAP_SPACE:
2702 iterator_ = new HeapObjectIterator(Heap::map_space());
2703 break;
2704 case LO_SPACE:
2705 iterator_ = new LargeObjectIterator(Heap::lo_space());
2706 break;
2707 }
2708
2709 // Return the newly allocated iterator;
2710 ASSERT(iterator_ != NULL);
2711 return iterator_;
2712}
2713
2714
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002715HeapIterator::HeapIterator() {
2716 Init();
2717}
2718
2719
2720HeapIterator::~HeapIterator() {
2721 Shutdown();
2722}
2723
2724
2725void HeapIterator::Init() {
2726 // Start the iteration.
2727 space_iterator_ = new SpaceIterator();
2728 object_iterator_ = space_iterator_->next();
2729}
2730
2731
2732void HeapIterator::Shutdown() {
2733 // Make sure the last iterator is deallocated.
2734 delete space_iterator_;
2735 space_iterator_ = NULL;
2736 object_iterator_ = NULL;
2737}
2738
2739
2740bool HeapIterator::has_next() {
2741 // No iterator means we are done.
2742 if (object_iterator_ == NULL) return false;
2743
2744 if (object_iterator_->has_next_object()) {
2745 // If the current iterator has more objects we are fine.
2746 return true;
2747 } else {
2748 // Go though the spaces looking for one that has objects.
2749 while (space_iterator_->has_next()) {
2750 object_iterator_ = space_iterator_->next();
2751 if (object_iterator_->has_next_object()) {
2752 return true;
2753 }
2754 }
2755 }
2756 // Done with the last space.
2757 object_iterator_ = NULL;
2758 return false;
2759}
2760
2761
2762HeapObject* HeapIterator::next() {
2763 if (has_next()) {
2764 return object_iterator_->next_object();
2765 } else {
2766 return NULL;
2767 }
2768}
2769
2770
2771void HeapIterator::reset() {
2772 // Restart the iterator.
2773 Shutdown();
2774 Init();
2775}
2776
2777
2778//
2779// HeapProfiler class implementation.
2780//
2781#ifdef ENABLE_LOGGING_AND_PROFILING
2782void HeapProfiler::CollectStats(HeapObject* obj, HistogramInfo* info) {
2783 InstanceType type = obj->map()->instance_type();
2784 ASSERT(0 <= type && type <= LAST_TYPE);
2785 info[type].increment_number(1);
2786 info[type].increment_bytes(obj->Size());
2787}
2788#endif
2789
2790
2791#ifdef ENABLE_LOGGING_AND_PROFILING
2792void HeapProfiler::WriteSample() {
2793 LOG(HeapSampleBeginEvent("Heap", "allocated"));
2794
2795 HistogramInfo info[LAST_TYPE+1];
2796#define DEF_TYPE_NAME(name) info[name].set_name(#name);
2797 INSTANCE_TYPE_LIST(DEF_TYPE_NAME)
2798#undef DEF_TYPE_NAME
2799
2800 HeapIterator iterator;
2801 while (iterator.has_next()) {
2802 CollectStats(iterator.next(), info);
2803 }
2804
2805 // Lump all the string types together.
2806 int string_number = 0;
2807 int string_bytes = 0;
2808#define INCREMENT_SIZE(type, size, name) \
2809 string_number += info[type].number(); \
2810 string_bytes += info[type].bytes();
2811 STRING_TYPE_LIST(INCREMENT_SIZE)
2812#undef INCREMENT_SIZE
2813 if (string_bytes > 0) {
2814 LOG(HeapSampleItemEvent("STRING_TYPE", string_number, string_bytes));
2815 }
2816
2817 for (int i = FIRST_NONSTRING_TYPE; i <= LAST_TYPE; ++i) {
2818 if (info[i].bytes() > 0) {
2819 LOG(HeapSampleItemEvent(info[i].name(), info[i].number(),
2820 info[i].bytes()));
2821 }
2822 }
2823
2824 LOG(HeapSampleEndEvent("Heap", "allocated"));
2825}
2826
2827
2828#endif
2829
2830
2831
2832#ifdef DEBUG
2833
2834static bool search_for_any_global;
2835static Object* search_target;
2836static bool found_target;
2837static List<Object*> object_stack(20);
2838
2839
2840// Tags 0, 1, and 3 are used. Use 2 for marking visited HeapObject.
2841static const int kMarkTag = 2;
2842
2843static void MarkObjectRecursively(Object** p);
2844class MarkObjectVisitor : public ObjectVisitor {
2845 public:
2846 void VisitPointers(Object** start, Object** end) {
2847 // Copy all HeapObject pointers in [start, end)
2848 for (Object** p = start; p < end; p++) {
2849 if ((*p)->IsHeapObject())
2850 MarkObjectRecursively(p);
2851 }
2852 }
2853};
2854
2855static MarkObjectVisitor mark_visitor;
2856
2857static void MarkObjectRecursively(Object** p) {
2858 if (!(*p)->IsHeapObject()) return;
2859
2860 HeapObject* obj = HeapObject::cast(*p);
2861
2862 Object* map = obj->map();
2863
2864 if (!map->IsHeapObject()) return; // visited before
2865
2866 if (found_target) return; // stop if target found
2867 object_stack.Add(obj);
2868 if ((search_for_any_global && obj->IsJSGlobalObject()) ||
2869 (!search_for_any_global && (obj == search_target))) {
2870 found_target = true;
2871 return;
2872 }
2873
2874 if (obj->IsCode()) {
2875 Code::cast(obj)->ConvertICTargetsFromAddressToObject();
2876 }
2877
2878 // not visited yet
2879 Map* map_p = reinterpret_cast<Map*>(HeapObject::cast(map));
2880
2881 Address map_addr = map_p->address();
2882
2883 obj->set_map(reinterpret_cast<Map*>(map_addr + kMarkTag));
2884
2885 MarkObjectRecursively(&map);
2886
2887 obj->IterateBody(map_p->instance_type(), obj->SizeFromMap(map_p),
2888 &mark_visitor);
2889
2890 if (!found_target) // don't pop if found the target
2891 object_stack.RemoveLast();
2892}
2893
2894
2895static void UnmarkObjectRecursively(Object** p);
2896class UnmarkObjectVisitor : public ObjectVisitor {
2897 public:
2898 void VisitPointers(Object** start, Object** end) {
2899 // Copy all HeapObject pointers in [start, end)
2900 for (Object** p = start; p < end; p++) {
2901 if ((*p)->IsHeapObject())
2902 UnmarkObjectRecursively(p);
2903 }
2904 }
2905};
2906
2907static UnmarkObjectVisitor unmark_visitor;
2908
2909static void UnmarkObjectRecursively(Object** p) {
2910 if (!(*p)->IsHeapObject()) return;
2911
2912 HeapObject* obj = HeapObject::cast(*p);
2913
2914 Object* map = obj->map();
2915
2916 if (map->IsHeapObject()) return; // unmarked already
2917
2918 Address map_addr = reinterpret_cast<Address>(map);
2919
2920 map_addr -= kMarkTag;
2921
2922 ASSERT_TAG_ALIGNED(map_addr);
2923
2924 HeapObject* map_p = HeapObject::FromAddress(map_addr);
2925
2926 obj->set_map(reinterpret_cast<Map*>(map_p));
2927
2928 UnmarkObjectRecursively(reinterpret_cast<Object**>(&map_p));
2929
2930 obj->IterateBody(Map::cast(map_p)->instance_type(),
2931 obj->SizeFromMap(Map::cast(map_p)),
2932 &unmark_visitor);
2933
2934 if (obj->IsCode()) {
2935 Code::cast(obj)->ConvertICTargetsFromObjectToAddress();
2936 }
2937}
2938
2939
2940static void MarkRootObjectRecursively(Object** root) {
2941 if (search_for_any_global) {
2942 ASSERT(search_target == NULL);
2943 } else {
2944 ASSERT(search_target->IsHeapObject());
2945 }
2946 found_target = false;
2947 object_stack.Clear();
2948
2949 MarkObjectRecursively(root);
2950 UnmarkObjectRecursively(root);
2951
2952 if (found_target) {
2953 PrintF("=====================================\n");
2954 PrintF("==== Path to object ====\n");
2955 PrintF("=====================================\n\n");
2956
2957 ASSERT(!object_stack.is_empty());
2958 for (int i = 0; i < object_stack.length(); i++) {
2959 if (i > 0) PrintF("\n |\n |\n V\n\n");
2960 Object* obj = object_stack[i];
2961 obj->Print();
2962 }
2963 PrintF("=====================================\n");
2964 }
2965}
2966
2967
2968// Helper class for visiting HeapObjects recursively.
2969class MarkRootVisitor: public ObjectVisitor {
2970 public:
2971 void VisitPointers(Object** start, Object** end) {
2972 // Visit all HeapObject pointers in [start, end)
2973 for (Object** p = start; p < end; p++) {
2974 if ((*p)->IsHeapObject())
2975 MarkRootObjectRecursively(p);
2976 }
2977 }
2978};
2979
2980
2981// Triggers a depth-first traversal of reachable objects from roots
2982// and finds a path to a specific heap object and prints it.
2983void Heap::TracePathToObject() {
2984 search_target = NULL;
2985 search_for_any_global = false;
2986
2987 MarkRootVisitor root_visitor;
2988 IterateRoots(&root_visitor);
2989}
2990
2991
2992// Triggers a depth-first traversal of reachable objects from roots
2993// and finds a path to any global object and prints it. Useful for
2994// determining the source for leaks of global objects.
2995void Heap::TracePathToGlobal() {
2996 search_target = NULL;
2997 search_for_any_global = true;
2998
2999 MarkRootVisitor root_visitor;
3000 IterateRoots(&root_visitor);
3001}
3002#endif
3003
3004
kasper.lund7276f142008-07-30 08:49:36 +00003005GCTracer::GCTracer()
3006 : start_time_(0.0),
3007 start_size_(0.0),
3008 gc_count_(0),
3009 full_gc_count_(0),
3010 is_compacting_(false),
3011 marked_count_(0) {
3012 // These two fields reflect the state of the previous full collection.
3013 // Set them before they are changed by the collector.
3014 previous_has_compacted_ = MarkCompactCollector::HasCompacted();
3015 previous_marked_count_ = MarkCompactCollector::previous_marked_count();
3016 if (!FLAG_trace_gc) return;
3017 start_time_ = OS::TimeCurrentMillis();
3018 start_size_ = SizeOfHeapObjects();
3019}
3020
3021
3022GCTracer::~GCTracer() {
3023 if (!FLAG_trace_gc) return;
3024 // Printf ONE line iff flag is set.
3025 PrintF("%s %.1f -> %.1f MB, %d ms.\n",
3026 CollectorString(),
3027 start_size_, SizeOfHeapObjects(),
3028 static_cast<int>(OS::TimeCurrentMillis() - start_time_));
3029}
3030
3031
3032const char* GCTracer::CollectorString() {
3033 switch (collector_) {
3034 case SCAVENGER:
3035 return "Scavenge";
3036 case MARK_COMPACTOR:
3037 return MarkCompactCollector::HasCompacted() ? "Mark-compact"
3038 : "Mark-sweep";
3039 }
3040 return "Unknown GC";
3041}
3042
3043
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00003044} } // namespace v8::internal