blob: 5548fe93a95f4807858a4b6444d993969171c674 [file] [log] [blame]
ager@chromium.org9258b6b2008-09-11 09:11:10 +00001// Copyright 2006-2008 the V8 project authors. All rights reserved.
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002// Redistribution and use in source and binary forms, with or without
3// modification, are permitted provided that the following conditions are
4// met:
5//
6// * Redistributions of source code must retain the above copyright
7// notice, this list of conditions and the following disclaimer.
8// * Redistributions in binary form must reproduce the above
9// copyright notice, this list of conditions and the following
10// disclaimer in the documentation and/or other materials provided
11// with the distribution.
12// * Neither the name of Google Inc. nor the names of its
13// contributors may be used to endorse or promote products derived
14// from this software without specific prior written permission.
15//
16// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
28#include "v8.h"
29
30#include "accessors.h"
31#include "api.h"
32#include "bootstrapper.h"
33#include "codegen-inl.h"
34#include "debug.h"
35#include "global-handles.h"
36#include "jsregexp.h"
37#include "mark-compact.h"
38#include "natives.h"
39#include "scanner.h"
40#include "scopeinfo.h"
41#include "v8threads.h"
42
43namespace v8 { namespace internal {
44
45#ifdef DEBUG
46DEFINE_bool(gc_greedy, false, "perform GC prior to some allocations");
47DEFINE_bool(gc_verbose, false, "print stuff during garbage collection");
48DEFINE_bool(heap_stats, false, "report heap statistics before and after GC");
49DEFINE_bool(code_stats, false, "report code statistics after GC");
50DEFINE_bool(verify_heap, false, "verify heap pointers before and after GC");
51DEFINE_bool(print_handles, false, "report handles after GC");
52DEFINE_bool(print_global_handles, false, "report global handles after GC");
53DEFINE_bool(print_rset, false, "print remembered sets before GC");
54#endif
55
56DEFINE_int(new_space_size, 0, "size of (each semispace in) the new generation");
57DEFINE_int(old_space_size, 0, "size of the old generation");
58
59DEFINE_bool(gc_global, false, "always perform global GCs");
60DEFINE_int(gc_interval, -1, "garbage collect after <n> allocations");
61DEFINE_bool(trace_gc, false,
62 "print one trace line following each garbage collection");
63
64
65#ifdef ENABLE_LOGGING_AND_PROFILING
66DECLARE_bool(log_gc);
67#endif
68
69
70#define ROOT_ALLOCATION(type, name) type* Heap::name##_;
71 ROOT_LIST(ROOT_ALLOCATION)
72#undef ROOT_ALLOCATION
73
74
75#define STRUCT_ALLOCATION(NAME, Name, name) Map* Heap::name##_map_;
76 STRUCT_LIST(STRUCT_ALLOCATION)
77#undef STRUCT_ALLOCATION
78
79
80#define SYMBOL_ALLOCATION(name, string) String* Heap::name##_;
81 SYMBOL_LIST(SYMBOL_ALLOCATION)
82#undef SYMBOL_ALLOCATION
83
84
85NewSpace* Heap::new_space_ = NULL;
ager@chromium.org9258b6b2008-09-11 09:11:10 +000086OldSpace* Heap::old_pointer_space_ = NULL;
87OldSpace* Heap::old_data_space_ = NULL;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +000088OldSpace* Heap::code_space_ = NULL;
89MapSpace* Heap::map_space_ = NULL;
90LargeObjectSpace* Heap::lo_space_ = NULL;
91
92int Heap::promoted_space_limit_ = 0;
93int Heap::old_gen_exhausted_ = false;
94
kasper.lund7276f142008-07-30 08:49:36 +000095int Heap::amount_of_external_allocated_memory_ = 0;
96int Heap::amount_of_external_allocated_memory_at_last_global_gc_ = 0;
97
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +000098// semispace_size_ should be a power of 2 and old_generation_size_ should be
99// a multiple of Page::kPageSize.
100int Heap::semispace_size_ = 1*MB;
101int Heap::old_generation_size_ = 512*MB;
102int Heap::initial_semispace_size_ = 256*KB;
103
104GCCallback Heap::global_gc_prologue_callback_ = NULL;
105GCCallback Heap::global_gc_epilogue_callback_ = NULL;
106
107// Variables set based on semispace_size_ and old_generation_size_ in
108// ConfigureHeap.
109int Heap::young_generation_size_ = 0; // Will be 2 * semispace_size_.
110
111// Double the new space after this many scavenge collections.
112int Heap::new_space_growth_limit_ = 8;
113int Heap::scavenge_count_ = 0;
114Heap::HeapState Heap::gc_state_ = NOT_IN_GC;
115
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000116int Heap::mc_count_ = 0;
117int Heap::gc_count_ = 0;
118
kasper.lund7276f142008-07-30 08:49:36 +0000119#ifdef DEBUG
120bool Heap::allocation_allowed_ = true;
121
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000122int Heap::allocation_timeout_ = 0;
123bool Heap::disallow_allocation_failure_ = false;
124#endif // DEBUG
125
126
127int Heap::Capacity() {
128 if (!HasBeenSetup()) return 0;
129
130 return new_space_->Capacity() +
ager@chromium.org9258b6b2008-09-11 09:11:10 +0000131 old_pointer_space_->Capacity() +
132 old_data_space_->Capacity() +
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000133 code_space_->Capacity() +
134 map_space_->Capacity();
135}
136
137
138int Heap::Available() {
139 if (!HasBeenSetup()) return 0;
140
141 return new_space_->Available() +
ager@chromium.org9258b6b2008-09-11 09:11:10 +0000142 old_pointer_space_->Available() +
143 old_data_space_->Available() +
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000144 code_space_->Available() +
145 map_space_->Available();
146}
147
148
149bool Heap::HasBeenSetup() {
150 return new_space_ != NULL &&
ager@chromium.org9258b6b2008-09-11 09:11:10 +0000151 old_pointer_space_ != NULL &&
152 old_data_space_ != NULL &&
153 code_space_ != NULL &&
154 map_space_ != NULL &&
155 lo_space_ != NULL;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000156}
157
158
159GarbageCollector Heap::SelectGarbageCollector(AllocationSpace space) {
160 // Is global GC requested?
161 if (space != NEW_SPACE || FLAG_gc_global) {
162 Counters::gc_compactor_caused_by_request.Increment();
163 return MARK_COMPACTOR;
164 }
165
166 // Is enough data promoted to justify a global GC?
kasper.lund7276f142008-07-30 08:49:36 +0000167 if (PromotedSpaceSize() + PromotedExternalMemorySize()
168 > promoted_space_limit_) {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000169 Counters::gc_compactor_caused_by_promoted_data.Increment();
170 return MARK_COMPACTOR;
171 }
172
173 // Have allocation in OLD and LO failed?
174 if (old_gen_exhausted_) {
175 Counters::gc_compactor_caused_by_oldspace_exhaustion.Increment();
176 return MARK_COMPACTOR;
177 }
178
179 // Is there enough space left in OLD to guarantee that a scavenge can
180 // succeed?
181 //
ager@chromium.org9258b6b2008-09-11 09:11:10 +0000182 // Note that MemoryAllocator->MaxAvailable() undercounts the memory available
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000183 // for object promotion. It counts only the bytes that the memory
184 // allocator has not yet allocated from the OS and assigned to any space,
185 // and does not count available bytes already in the old space or code
186 // space. Undercounting is safe---we may get an unrequested full GC when
187 // a scavenge would have succeeded.
ager@chromium.org9258b6b2008-09-11 09:11:10 +0000188 if (MemoryAllocator::MaxAvailable() <= new_space_->Size()) {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000189 Counters::gc_compactor_caused_by_oldspace_exhaustion.Increment();
190 return MARK_COMPACTOR;
191 }
192
193 // Default
194 return SCAVENGER;
195}
196
197
198// TODO(1238405): Combine the infrastructure for --heap-stats and
199// --log-gc to avoid the complicated preprocessor and flag testing.
200#if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
201void Heap::ReportStatisticsBeforeGC() {
202 // Heap::ReportHeapStatistics will also log NewSpace statistics when
203 // compiled with ENABLE_LOGGING_AND_PROFILING and --log-gc is set. The
204 // following logic is used to avoid double logging.
205#if defined(DEBUG) && defined(ENABLE_LOGGING_AND_PROFILING)
206 if (FLAG_heap_stats || FLAG_log_gc) new_space_->CollectStatistics();
207 if (FLAG_heap_stats) {
208 ReportHeapStatistics("Before GC");
209 } else if (FLAG_log_gc) {
210 new_space_->ReportStatistics();
211 }
212 if (FLAG_heap_stats || FLAG_log_gc) new_space_->ClearHistograms();
213#elif defined(DEBUG)
214 if (FLAG_heap_stats) {
215 new_space_->CollectStatistics();
216 ReportHeapStatistics("Before GC");
217 new_space_->ClearHistograms();
218 }
219#elif defined(ENABLE_LOGGING_AND_PROFILING)
220 if (FLAG_log_gc) {
221 new_space_->CollectStatistics();
222 new_space_->ReportStatistics();
223 new_space_->ClearHistograms();
224 }
225#endif
226}
227
228
229// TODO(1238405): Combine the infrastructure for --heap-stats and
230// --log-gc to avoid the complicated preprocessor and flag testing.
231void Heap::ReportStatisticsAfterGC() {
232 // Similar to the before GC, we use some complicated logic to ensure that
233 // NewSpace statistics are logged exactly once when --log-gc is turned on.
234#if defined(DEBUG) && defined(ENABLE_LOGGING_AND_PROFILING)
235 if (FLAG_heap_stats) {
236 ReportHeapStatistics("After GC");
237 } else if (FLAG_log_gc) {
238 new_space_->ReportStatistics();
239 }
240#elif defined(DEBUG)
241 if (FLAG_heap_stats) ReportHeapStatistics("After GC");
242#elif defined(ENABLE_LOGGING_AND_PROFILING)
243 if (FLAG_log_gc) new_space_->ReportStatistics();
244#endif
245}
246#endif // defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
247
248
249void Heap::GarbageCollectionPrologue() {
250 RegExpImpl::NewSpaceCollectionPrologue();
kasper.lund7276f142008-07-30 08:49:36 +0000251 gc_count_++;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000252#ifdef DEBUG
253 ASSERT(allocation_allowed_ && gc_state_ == NOT_IN_GC);
254 allow_allocation(false);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000255
256 if (FLAG_verify_heap) {
257 Verify();
258 }
259
260 if (FLAG_gc_verbose) Print();
261
262 if (FLAG_print_rset) {
ager@chromium.org9258b6b2008-09-11 09:11:10 +0000263 // Not all spaces have remembered set bits that we care about.
264 old_pointer_space_->PrintRSet();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000265 map_space_->PrintRSet();
266 lo_space_->PrintRSet();
267 }
268#endif
269
270#if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
271 ReportStatisticsBeforeGC();
272#endif
273}
274
275int Heap::SizeOfObjects() {
ager@chromium.org9258b6b2008-09-11 09:11:10 +0000276 int total = 0;
277 AllSpaces spaces;
278 while (Space* space = spaces.next()) total += space->Size();
279 return total;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000280}
281
282void Heap::GarbageCollectionEpilogue() {
283#ifdef DEBUG
284 allow_allocation(true);
285 ZapFromSpace();
286
287 if (FLAG_verify_heap) {
288 Verify();
289 }
290
291 if (FLAG_print_global_handles) GlobalHandles::Print();
292 if (FLAG_print_handles) PrintHandles();
293 if (FLAG_gc_verbose) Print();
294 if (FLAG_code_stats) ReportCodeStatistics("After GC");
295#endif
296
297 Counters::alive_after_last_gc.Set(SizeOfObjects());
298
299 SymbolTable* symbol_table = SymbolTable::cast(Heap::symbol_table_);
300 Counters::symbol_table_capacity.Set(symbol_table->Capacity());
301 Counters::number_of_symbols.Set(symbol_table->NumberOfElements());
302#if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
303 ReportStatisticsAfterGC();
304#endif
305}
306
307
ager@chromium.org9258b6b2008-09-11 09:11:10 +0000308void Heap::CollectAllGarbage() {
309 // Since we are ignoring the return value, the exact choice of space does
310 // not matter, so long as we do not specify NEW_SPACE, which would not
311 // cause a full GC.
312 CollectGarbage(0, OLD_POINTER_SPACE);
313}
314
315
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000316bool Heap::CollectGarbage(int requested_size, AllocationSpace space) {
317 // The VM is in the GC state until exiting this function.
318 VMState state(GC);
319
320#ifdef DEBUG
321 // Reset the allocation timeout to the GC interval, but make sure to
322 // allow at least a few allocations after a collection. The reason
323 // for this is that we have a lot of allocation sequences and we
324 // assume that a garbage collection will allow the subsequent
325 // allocation attempts to go through.
326 allocation_timeout_ = Max(6, FLAG_gc_interval);
327#endif
328
329 { GCTracer tracer;
330 GarbageCollectionPrologue();
kasper.lund7276f142008-07-30 08:49:36 +0000331 // The GC count was incremented in the prologue. Tell the tracer about
332 // it.
333 tracer.set_gc_count(gc_count_);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000334
335 GarbageCollector collector = SelectGarbageCollector(space);
kasper.lund7276f142008-07-30 08:49:36 +0000336 // Tell the tracer which collector we've selected.
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000337 tracer.set_collector(collector);
338
339 StatsRate* rate = (collector == SCAVENGER)
340 ? &Counters::gc_scavenger
341 : &Counters::gc_compactor;
342 rate->Start();
kasper.lund7276f142008-07-30 08:49:36 +0000343 PerformGarbageCollection(space, collector, &tracer);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000344 rate->Stop();
345
346 GarbageCollectionEpilogue();
347 }
348
349
350#ifdef ENABLE_LOGGING_AND_PROFILING
351 if (FLAG_log_gc) HeapProfiler::WriteSample();
352#endif
353
354 switch (space) {
355 case NEW_SPACE:
356 return new_space_->Available() >= requested_size;
ager@chromium.org9258b6b2008-09-11 09:11:10 +0000357 case OLD_POINTER_SPACE:
358 return old_pointer_space_->Available() >= requested_size;
359 case OLD_DATA_SPACE:
360 return old_data_space_->Available() >= requested_size;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000361 case CODE_SPACE:
362 return code_space_->Available() >= requested_size;
363 case MAP_SPACE:
364 return map_space_->Available() >= requested_size;
365 case LO_SPACE:
366 return lo_space_->Available() >= requested_size;
367 }
368 return false;
369}
370
371
kasper.lund7276f142008-07-30 08:49:36 +0000372void Heap::PerformScavenge() {
373 GCTracer tracer;
374 PerformGarbageCollection(NEW_SPACE, SCAVENGER, &tracer);
375}
376
377
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000378void Heap::PerformGarbageCollection(AllocationSpace space,
kasper.lund7276f142008-07-30 08:49:36 +0000379 GarbageCollector collector,
380 GCTracer* tracer) {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000381 if (collector == MARK_COMPACTOR && global_gc_prologue_callback_) {
382 ASSERT(!allocation_allowed_);
383 global_gc_prologue_callback_();
384 }
385
386 if (collector == MARK_COMPACTOR) {
kasper.lund7276f142008-07-30 08:49:36 +0000387 MarkCompact(tracer);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000388
389 int promoted_space_size = PromotedSpaceSize();
390 promoted_space_limit_ =
391 promoted_space_size + Max(2 * MB, (promoted_space_size/100) * 35);
392 old_gen_exhausted_ = false;
393
394 // If we have used the mark-compact collector to collect the new
395 // space, and it has not compacted the new space, we force a
ager@chromium.org9258b6b2008-09-11 09:11:10 +0000396 // separate scavenge collection. This is a hack. It covers the
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000397 // case where (1) a new space collection was requested, (2) the
398 // collector selection policy selected the mark-compact collector,
399 // and (3) the mark-compact collector policy selected not to
400 // compact the new space. In that case, there is no more (usable)
401 // free space in the new space after the collection compared to
402 // before.
403 if (space == NEW_SPACE && !MarkCompactCollector::HasCompacted()) {
404 Scavenge();
405 }
406 } else {
407 Scavenge();
408 }
409 Counters::objs_since_last_young.Set(0);
410
411 // Process weak handles post gc.
412 GlobalHandles::PostGarbageCollectionProcessing();
413
kasper.lund7276f142008-07-30 08:49:36 +0000414 if (collector == MARK_COMPACTOR) {
415 // Register the amount of external allocated memory.
416 amount_of_external_allocated_memory_at_last_global_gc_ =
417 amount_of_external_allocated_memory_;
418 }
419
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000420 if (collector == MARK_COMPACTOR && global_gc_epilogue_callback_) {
421 ASSERT(!allocation_allowed_);
422 global_gc_epilogue_callback_();
423 }
424}
425
426
kasper.lund7276f142008-07-30 08:49:36 +0000427void Heap::MarkCompact(GCTracer* tracer) {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000428 gc_state_ = MARK_COMPACT;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000429 mc_count_++;
kasper.lund7276f142008-07-30 08:49:36 +0000430 tracer->set_full_gc_count(mc_count_);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000431 LOG(ResourceEvent("markcompact", "begin"));
432
433 MarkCompactPrologue();
434
kasper.lund7276f142008-07-30 08:49:36 +0000435 MarkCompactCollector::CollectGarbage(tracer);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000436
437 MarkCompactEpilogue();
438
439 LOG(ResourceEvent("markcompact", "end"));
440
441 gc_state_ = NOT_IN_GC;
442
443 Shrink();
444
445 Counters::objs_since_last_full.Set(0);
446}
447
448
449void Heap::MarkCompactPrologue() {
ager@chromium.org9258b6b2008-09-11 09:11:10 +0000450 // Empty eval caches
451 Heap::eval_cache_global_ = Heap::null_value();
452 Heap::eval_cache_non_global_ = Heap::null_value();
453
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000454 RegExpImpl::OldSpaceCollectionPrologue();
455 Top::MarkCompactPrologue();
456 ThreadManager::MarkCompactPrologue();
457}
458
459
460void Heap::MarkCompactEpilogue() {
461 Top::MarkCompactEpilogue();
462 ThreadManager::MarkCompactEpilogue();
463}
464
465
466Object* Heap::FindCodeObject(Address a) {
467 Object* obj = code_space_->FindObject(a);
468 if (obj->IsFailure()) {
469 obj = lo_space_->FindObject(a);
470 }
kasper.lund7276f142008-07-30 08:49:36 +0000471 ASSERT(!obj->IsFailure());
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000472 return obj;
473}
474
475
476// Helper class for copying HeapObjects
477class CopyVisitor: public ObjectVisitor {
478 public:
479
480 void VisitPointer(Object** p) {
481 CopyObject(p);
482 }
483
484 void VisitPointers(Object** start, Object** end) {
485 // Copy all HeapObject pointers in [start, end)
486 for (Object** p = start; p < end; p++) CopyObject(p);
487 }
488
489 private:
490 void CopyObject(Object** p) {
491 if (!Heap::InFromSpace(*p)) return;
492 Heap::CopyObject(reinterpret_cast<HeapObject**>(p));
493 }
494};
495
496
497// Shared state read by the scavenge collector and set by CopyObject.
498static Address promoted_top = NULL;
499
500
501#ifdef DEBUG
ager@chromium.org9258b6b2008-09-11 09:11:10 +0000502// Visitor class to verify pointers in code or data space do not point into
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000503// new space.
ager@chromium.org9258b6b2008-09-11 09:11:10 +0000504class VerifyNonPointerSpacePointersVisitor: public ObjectVisitor {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000505 public:
506 void VisitPointers(Object** start, Object**end) {
507 for (Object** current = start; current < end; current++) {
508 if ((*current)->IsHeapObject()) {
509 ASSERT(!Heap::InNewSpace(HeapObject::cast(*current)));
510 }
511 }
512 }
513};
514#endif
515
516void Heap::Scavenge() {
517#ifdef DEBUG
518 if (FLAG_enable_slow_asserts) {
ager@chromium.org9258b6b2008-09-11 09:11:10 +0000519 VerifyNonPointerSpacePointersVisitor v;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000520 HeapObjectIterator it(code_space_);
521 while (it.has_next()) {
522 HeapObject* object = it.next();
523 if (object->IsCode()) {
524 Code::cast(object)->ConvertICTargetsFromAddressToObject();
525 }
526 object->Iterate(&v);
527 if (object->IsCode()) {
528 Code::cast(object)->ConvertICTargetsFromObjectToAddress();
529 }
530 }
531 }
532#endif
533
534 gc_state_ = SCAVENGE;
535
536 // Implements Cheney's copying algorithm
537 LOG(ResourceEvent("scavenge", "begin"));
538
539 scavenge_count_++;
540 if (new_space_->Capacity() < new_space_->MaximumCapacity() &&
541 scavenge_count_ > new_space_growth_limit_) {
542 // Double the size of the new space, and double the limit. The next
543 // doubling attempt will occur after the current new_space_growth_limit_
544 // more collections.
545 // TODO(1240712): NewSpace::Double has a return value which is
546 // ignored here.
547 new_space_->Double();
548 new_space_growth_limit_ *= 2;
549 }
550
551 // Flip the semispaces. After flipping, to space is empty, from space has
552 // live objects.
553 new_space_->Flip();
554 new_space_->ResetAllocationInfo();
555
556 // We need to sweep newly copied objects which can be in either the to space
557 // or the old space. For to space objects, we use a mark. Newly copied
558 // objects lie between the mark and the allocation top. For objects
559 // promoted to old space, we write their addresses downward from the top of
560 // the new space. Sweeping newly promoted objects requires an allocation
561 // pointer and a mark. Note that the allocation pointer 'top' actually
562 // moves downward from the high address in the to space.
563 //
564 // There is guaranteed to be enough room at the top of the to space for the
565 // addresses of promoted objects: every object promoted frees up its size in
566 // bytes from the top of the new space, and objects are at least one pointer
567 // in size. Using the new space to record promoted addresses makes the
568 // scavenge collector agnostic to the allocation strategy (eg, linear or
569 // free-list) used in old space.
570 Address new_mark = new_space_->ToSpaceLow();
571 Address promoted_mark = new_space_->ToSpaceHigh();
572 promoted_top = new_space_->ToSpaceHigh();
573
574 CopyVisitor copy_visitor;
575 // Copy roots.
576 IterateRoots(&copy_visitor);
577
578 // Copy objects reachable from the old generation. By definition, there
ager@chromium.org9258b6b2008-09-11 09:11:10 +0000579 // are no intergenerational pointers in code or data spaces.
580 IterateRSet(old_pointer_space_, &CopyObject);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000581 IterateRSet(map_space_, &CopyObject);
582 lo_space_->IterateRSet(&CopyObject);
583
584 bool has_processed_weak_pointers = false;
585
586 while (true) {
587 ASSERT(new_mark <= new_space_->top());
588 ASSERT(promoted_mark >= promoted_top);
589
590 // Copy objects reachable from newly copied objects.
591 while (new_mark < new_space_->top() || promoted_mark > promoted_top) {
592 // Sweep newly copied objects in the to space. The allocation pointer
593 // can change during sweeping.
594 Address previous_top = new_space_->top();
595 SemiSpaceIterator new_it(new_space_, new_mark);
596 while (new_it.has_next()) {
597 new_it.next()->Iterate(&copy_visitor);
598 }
599 new_mark = previous_top;
600
601 // Sweep newly copied objects in the old space. The promotion 'top'
602 // pointer could change during sweeping.
603 previous_top = promoted_top;
604 for (Address current = promoted_mark - kPointerSize;
605 current >= previous_top;
606 current -= kPointerSize) {
607 HeapObject* object = HeapObject::cast(Memory::Object_at(current));
608 object->Iterate(&copy_visitor);
609 UpdateRSet(object);
610 }
611 promoted_mark = previous_top;
612 }
613
614 if (has_processed_weak_pointers) break; // We are done.
615 // Copy objects reachable from weak pointers.
616 GlobalHandles::IterateWeakRoots(&copy_visitor);
617 has_processed_weak_pointers = true;
618 }
619
620 // Set age mark.
621 new_space_->set_age_mark(new_mark);
622
623 LOG(ResourceEvent("scavenge", "end"));
624
625 gc_state_ = NOT_IN_GC;
626}
627
628
629void Heap::ClearRSetRange(Address start, int size_in_bytes) {
630 uint32_t start_bit;
631 Address start_word_address =
632 Page::ComputeRSetBitPosition(start, 0, &start_bit);
633 uint32_t end_bit;
634 Address end_word_address =
635 Page::ComputeRSetBitPosition(start + size_in_bytes - kIntSize,
636 0,
637 &end_bit);
638
639 // We want to clear the bits in the starting word starting with the
640 // first bit, and in the ending word up to and including the last
641 // bit. Build a pair of bitmasks to do that.
642 uint32_t start_bitmask = start_bit - 1;
643 uint32_t end_bitmask = ~((end_bit << 1) - 1);
644
645 // If the start address and end address are the same, we mask that
646 // word once, otherwise mask the starting and ending word
647 // separately and all the ones in between.
648 if (start_word_address == end_word_address) {
649 Memory::uint32_at(start_word_address) &= (start_bitmask | end_bitmask);
650 } else {
651 Memory::uint32_at(start_word_address) &= start_bitmask;
652 Memory::uint32_at(end_word_address) &= end_bitmask;
653 start_word_address += kIntSize;
654 memset(start_word_address, 0, end_word_address - start_word_address);
655 }
656}
657
658
659class UpdateRSetVisitor: public ObjectVisitor {
660 public:
661
662 void VisitPointer(Object** p) {
663 UpdateRSet(p);
664 }
665
666 void VisitPointers(Object** start, Object** end) {
667 // Update a store into slots [start, end), used (a) to update remembered
668 // set when promoting a young object to old space or (b) to rebuild
669 // remembered sets after a mark-compact collection.
670 for (Object** p = start; p < end; p++) UpdateRSet(p);
671 }
672 private:
673
674 void UpdateRSet(Object** p) {
675 // The remembered set should not be set. It should be clear for objects
676 // newly copied to old space, and it is cleared before rebuilding in the
677 // mark-compact collector.
678 ASSERT(!Page::IsRSetSet(reinterpret_cast<Address>(p), 0));
679 if (Heap::InNewSpace(*p)) {
680 Page::SetRSet(reinterpret_cast<Address>(p), 0);
681 }
682 }
683};
684
685
686int Heap::UpdateRSet(HeapObject* obj) {
687 ASSERT(!InNewSpace(obj));
688 // Special handling of fixed arrays to iterate the body based on the start
689 // address and offset. Just iterating the pointers as in UpdateRSetVisitor
690 // will not work because Page::SetRSet needs to have the start of the
691 // object.
692 if (obj->IsFixedArray()) {
693 FixedArray* array = FixedArray::cast(obj);
694 int length = array->length();
695 for (int i = 0; i < length; i++) {
696 int offset = FixedArray::kHeaderSize + i * kPointerSize;
697 ASSERT(!Page::IsRSetSet(obj->address(), offset));
698 if (Heap::InNewSpace(array->get(i))) {
699 Page::SetRSet(obj->address(), offset);
700 }
701 }
702 } else if (!obj->IsCode()) {
703 // Skip code object, we know it does not contain inter-generational
704 // pointers.
705 UpdateRSetVisitor v;
706 obj->Iterate(&v);
707 }
708 return obj->Size();
709}
710
711
712void Heap::RebuildRSets() {
ager@chromium.org9258b6b2008-09-11 09:11:10 +0000713 // By definition, we do not care about remembered set bits in code or data
714 // spaces.
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000715 map_space_->ClearRSet();
716 RebuildRSets(map_space_);
717
ager@chromium.org9258b6b2008-09-11 09:11:10 +0000718 old_pointer_space_->ClearRSet();
719 RebuildRSets(old_pointer_space_);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000720
721 Heap::lo_space_->ClearRSet();
722 RebuildRSets(lo_space_);
723}
724
725
726void Heap::RebuildRSets(PagedSpace* space) {
727 HeapObjectIterator it(space);
728 while (it.has_next()) Heap::UpdateRSet(it.next());
729}
730
731
732void Heap::RebuildRSets(LargeObjectSpace* space) {
733 LargeObjectIterator it(space);
734 while (it.has_next()) Heap::UpdateRSet(it.next());
735}
736
737
738#if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
739void Heap::RecordCopiedObject(HeapObject* obj) {
740 bool should_record = false;
741#ifdef DEBUG
742 should_record = FLAG_heap_stats;
743#endif
744#ifdef ENABLE_LOGGING_AND_PROFILING
745 should_record = should_record || FLAG_log_gc;
746#endif
747 if (should_record) {
748 if (new_space_->Contains(obj)) {
749 new_space_->RecordAllocation(obj);
750 } else {
751 new_space_->RecordPromotion(obj);
752 }
753 }
754}
755#endif // defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
756
757
758HeapObject* Heap::MigrateObject(HeapObject** source_p,
759 HeapObject* target,
760 int size) {
761 void** src = reinterpret_cast<void**>((*source_p)->address());
762 void** dst = reinterpret_cast<void**>(target->address());
763 int counter = size/kPointerSize - 1;
764 do {
765 *dst++ = *src++;
766 } while (counter-- > 0);
767
kasper.lund7276f142008-07-30 08:49:36 +0000768 // Set the forwarding address.
769 (*source_p)->set_map_word(MapWord::FromForwardingAddress(target));
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000770
771 // Update NewSpace stats if necessary.
772#if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
773 RecordCopiedObject(target);
774#endif
775
776 return target;
777}
778
779
780void Heap::CopyObject(HeapObject** p) {
781 ASSERT(InFromSpace(*p));
782
783 HeapObject* object = *p;
784
kasper.lund7276f142008-07-30 08:49:36 +0000785 // We use the first word (where the map pointer usually is) of a heap
786 // object to record the forwarding pointer. A forwarding pointer can
ager@chromium.org9258b6b2008-09-11 09:11:10 +0000787 // point to an old space, the code space, or the to space of the new
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000788 // generation.
kasper.lund7276f142008-07-30 08:49:36 +0000789 MapWord first_word = object->map_word();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000790
kasper.lund7276f142008-07-30 08:49:36 +0000791 // If the first word is a forwarding address, the object has already been
792 // copied.
793 if (first_word.IsForwardingAddress()) {
794 *p = first_word.ToForwardingAddress();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000795 return;
796 }
797
798 // Optimization: Bypass ConsString objects where the right-hand side is
799 // Heap::empty_string(). We do not use object->IsConsString because we
800 // already know that object has the heap object tag.
kasper.lund7276f142008-07-30 08:49:36 +0000801 InstanceType type = first_word.ToMap()->instance_type();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000802 if (type < FIRST_NONSTRING_TYPE &&
803 String::cast(object)->representation_tag() == kConsStringTag &&
804 ConsString::cast(object)->second() == Heap::empty_string()) {
805 object = HeapObject::cast(ConsString::cast(object)->first());
806 *p = object;
807 // After patching *p we have to repeat the checks that object is in the
808 // active semispace of the young generation and not already copied.
809 if (!InFromSpace(object)) return;
kasper.lund7276f142008-07-30 08:49:36 +0000810 first_word = object->map_word();
811 if (first_word.IsForwardingAddress()) {
812 *p = first_word.ToForwardingAddress();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000813 return;
814 }
kasper.lund7276f142008-07-30 08:49:36 +0000815 type = first_word.ToMap()->instance_type();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000816 }
817
kasper.lund7276f142008-07-30 08:49:36 +0000818 int object_size = object->SizeFromMap(first_word.ToMap());
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000819 Object* result;
820 // If the object should be promoted, we try to copy it to old space.
821 if (ShouldBePromoted(object->address(), object_size)) {
ager@chromium.org9258b6b2008-09-11 09:11:10 +0000822 OldSpace* target_space = Heap::TargetSpace(object);
823 ASSERT(target_space == Heap::old_pointer_space_ ||
824 target_space == Heap::old_data_space_);
825 result = target_space->AllocateRaw(object_size);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000826
827 if (!result->IsFailure()) {
828 *p = MigrateObject(p, HeapObject::cast(result), object_size);
ager@chromium.org9258b6b2008-09-11 09:11:10 +0000829 if (target_space == Heap::old_pointer_space_) {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000830 // Record the object's address at the top of the to space, to allow
831 // it to be swept by the scavenger.
832 promoted_top -= kPointerSize;
833 Memory::Object_at(promoted_top) = *p;
834 } else {
835#ifdef DEBUG
ager@chromium.org9258b6b2008-09-11 09:11:10 +0000836 // Objects promoted to the data space should not have pointers to
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000837 // new space.
ager@chromium.org9258b6b2008-09-11 09:11:10 +0000838 VerifyNonPointerSpacePointersVisitor v;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000839 (*p)->Iterate(&v);
840#endif
841 }
842 return;
843 }
844 }
845
846 // The object should remain in new space or the old space allocation failed.
847 result = new_space_->AllocateRaw(object_size);
848 // Failed allocation at this point is utterly unexpected.
849 ASSERT(!result->IsFailure());
850 *p = MigrateObject(p, HeapObject::cast(result), object_size);
851}
852
853
854Object* Heap::AllocatePartialMap(InstanceType instance_type,
855 int instance_size) {
856 Object* result = AllocateRawMap(Map::kSize);
857 if (result->IsFailure()) return result;
858
859 // Map::cast cannot be used due to uninitialized map field.
860 reinterpret_cast<Map*>(result)->set_map(meta_map());
861 reinterpret_cast<Map*>(result)->set_instance_type(instance_type);
862 reinterpret_cast<Map*>(result)->set_instance_size(instance_size);
863 reinterpret_cast<Map*>(result)->set_unused_property_fields(0);
864 return result;
865}
866
867
868Object* Heap::AllocateMap(InstanceType instance_type, int instance_size) {
869 Object* result = AllocateRawMap(Map::kSize);
870 if (result->IsFailure()) return result;
871
872 Map* map = reinterpret_cast<Map*>(result);
873 map->set_map(meta_map());
874 map->set_instance_type(instance_type);
875 map->set_prototype(null_value());
876 map->set_constructor(null_value());
877 map->set_instance_size(instance_size);
mads.s.ager@gmail.com9a4089a2008-09-01 08:55:01 +0000878 map->set_instance_descriptors(empty_descriptor_array());
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000879 map->set_code_cache(empty_fixed_array());
880 map->set_unused_property_fields(0);
881 map->set_bit_field(0);
882 return map;
883}
884
885
886bool Heap::CreateInitialMaps() {
887 Object* obj = AllocatePartialMap(MAP_TYPE, Map::kSize);
888 if (obj->IsFailure()) return false;
889
890 // Map::cast cannot be used due to uninitialized map field.
891 meta_map_ = reinterpret_cast<Map*>(obj);
892 meta_map()->set_map(meta_map());
893
894 obj = AllocatePartialMap(FIXED_ARRAY_TYPE, Array::kHeaderSize);
895 if (obj->IsFailure()) return false;
896 fixed_array_map_ = Map::cast(obj);
897
898 obj = AllocatePartialMap(ODDBALL_TYPE, Oddball::kSize);
899 if (obj->IsFailure()) return false;
900 oddball_map_ = Map::cast(obj);
901
902 // Allocate the empty array
903 obj = AllocateEmptyFixedArray();
904 if (obj->IsFailure()) return false;
905 empty_fixed_array_ = FixedArray::cast(obj);
906
ager@chromium.org9258b6b2008-09-11 09:11:10 +0000907 obj = Allocate(oddball_map(), OLD_DATA_SPACE);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000908 if (obj->IsFailure()) return false;
909 null_value_ = obj;
910
mads.s.ager@gmail.com9a4089a2008-09-01 08:55:01 +0000911 // Allocate the empty descriptor array. AllocateMap can now be used.
912 obj = AllocateEmptyFixedArray();
913 if (obj->IsFailure()) return false;
914 // There is a check against empty_descriptor_array() in cast().
915 empty_descriptor_array_ = reinterpret_cast<DescriptorArray*>(obj);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000916
mads.s.ager@gmail.com9a4089a2008-09-01 08:55:01 +0000917 // Fix the instance_descriptors for the existing maps.
918 meta_map()->set_instance_descriptors(empty_descriptor_array());
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000919 meta_map()->set_code_cache(empty_fixed_array());
920
mads.s.ager@gmail.com9a4089a2008-09-01 08:55:01 +0000921 fixed_array_map()->set_instance_descriptors(empty_descriptor_array());
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000922 fixed_array_map()->set_code_cache(empty_fixed_array());
923
mads.s.ager@gmail.com9a4089a2008-09-01 08:55:01 +0000924 oddball_map()->set_instance_descriptors(empty_descriptor_array());
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000925 oddball_map()->set_code_cache(empty_fixed_array());
926
927 // Fix prototype object for existing maps.
928 meta_map()->set_prototype(null_value());
929 meta_map()->set_constructor(null_value());
930
931 fixed_array_map()->set_prototype(null_value());
932 fixed_array_map()->set_constructor(null_value());
933 oddball_map()->set_prototype(null_value());
934 oddball_map()->set_constructor(null_value());
935
936 obj = AllocateMap(HEAP_NUMBER_TYPE, HeapNumber::kSize);
937 if (obj->IsFailure()) return false;
938 heap_number_map_ = Map::cast(obj);
939
940 obj = AllocateMap(PROXY_TYPE, Proxy::kSize);
941 if (obj->IsFailure()) return false;
942 proxy_map_ = Map::cast(obj);
943
944#define ALLOCATE_STRING_MAP(type, size, name) \
945 obj = AllocateMap(type, size); \
946 if (obj->IsFailure()) return false; \
947 name##_map_ = Map::cast(obj);
948 STRING_TYPE_LIST(ALLOCATE_STRING_MAP);
949#undef ALLOCATE_STRING_MAP
950
951 obj = AllocateMap(SHORT_STRING_TYPE, TwoByteString::kHeaderSize);
952 if (obj->IsFailure()) return false;
953 undetectable_short_string_map_ = Map::cast(obj);
954 undetectable_short_string_map_->set_is_undetectable();
955
956 obj = AllocateMap(MEDIUM_STRING_TYPE, TwoByteString::kHeaderSize);
957 if (obj->IsFailure()) return false;
958 undetectable_medium_string_map_ = Map::cast(obj);
959 undetectable_medium_string_map_->set_is_undetectable();
960
961 obj = AllocateMap(LONG_STRING_TYPE, TwoByteString::kHeaderSize);
962 if (obj->IsFailure()) return false;
963 undetectable_long_string_map_ = Map::cast(obj);
964 undetectable_long_string_map_->set_is_undetectable();
965
966 obj = AllocateMap(SHORT_ASCII_STRING_TYPE, AsciiString::kHeaderSize);
967 if (obj->IsFailure()) return false;
968 undetectable_short_ascii_string_map_ = Map::cast(obj);
969 undetectable_short_ascii_string_map_->set_is_undetectable();
970
971 obj = AllocateMap(MEDIUM_ASCII_STRING_TYPE, AsciiString::kHeaderSize);
972 if (obj->IsFailure()) return false;
973 undetectable_medium_ascii_string_map_ = Map::cast(obj);
974 undetectable_medium_ascii_string_map_->set_is_undetectable();
975
976 obj = AllocateMap(LONG_ASCII_STRING_TYPE, AsciiString::kHeaderSize);
977 if (obj->IsFailure()) return false;
978 undetectable_long_ascii_string_map_ = Map::cast(obj);
979 undetectable_long_ascii_string_map_->set_is_undetectable();
980
981 obj = AllocateMap(BYTE_ARRAY_TYPE, Array::kHeaderSize);
982 if (obj->IsFailure()) return false;
983 byte_array_map_ = Map::cast(obj);
984
985 obj = AllocateMap(CODE_TYPE, Code::kHeaderSize);
986 if (obj->IsFailure()) return false;
987 code_map_ = Map::cast(obj);
988
989 obj = AllocateMap(FILLER_TYPE, kPointerSize);
990 if (obj->IsFailure()) return false;
991 one_word_filler_map_ = Map::cast(obj);
992
993 obj = AllocateMap(FILLER_TYPE, 2 * kPointerSize);
994 if (obj->IsFailure()) return false;
995 two_word_filler_map_ = Map::cast(obj);
996
997#define ALLOCATE_STRUCT_MAP(NAME, Name, name) \
998 obj = AllocateMap(NAME##_TYPE, Name::kSize); \
999 if (obj->IsFailure()) return false; \
1000 name##_map_ = Map::cast(obj);
1001 STRUCT_LIST(ALLOCATE_STRUCT_MAP)
1002#undef ALLOCATE_STRUCT_MAP
1003
1004 obj = AllocateMap(FIXED_ARRAY_TYPE, HeapObject::kSize);
1005 if (obj->IsFailure()) return false;
1006 hash_table_map_ = Map::cast(obj);
1007
1008 obj = AllocateMap(FIXED_ARRAY_TYPE, HeapObject::kSize);
1009 if (obj->IsFailure()) return false;
1010 context_map_ = Map::cast(obj);
1011
1012 obj = AllocateMap(FIXED_ARRAY_TYPE, HeapObject::kSize);
1013 if (obj->IsFailure()) return false;
1014 global_context_map_ = Map::cast(obj);
1015
1016 obj = AllocateMap(JS_FUNCTION_TYPE, JSFunction::kSize);
1017 if (obj->IsFailure()) return false;
1018 boilerplate_function_map_ = Map::cast(obj);
1019
1020 obj = AllocateMap(SHARED_FUNCTION_INFO_TYPE, SharedFunctionInfo::kSize);
1021 if (obj->IsFailure()) return false;
1022 shared_function_info_map_ = Map::cast(obj);
1023
mads.s.ager@gmail.com9a4089a2008-09-01 08:55:01 +00001024 ASSERT(!Heap::InNewSpace(Heap::empty_fixed_array()));
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001025 return true;
1026}
1027
1028
1029Object* Heap::AllocateHeapNumber(double value, PretenureFlag pretenure) {
1030 // Statically ensure that it is safe to allocate heap numbers in paged
1031 // spaces.
1032 STATIC_ASSERT(HeapNumber::kSize <= Page::kMaxHeapObjectSize);
ager@chromium.org9258b6b2008-09-11 09:11:10 +00001033 AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001034 Object* result = AllocateRaw(HeapNumber::kSize, space);
1035 if (result->IsFailure()) return result;
1036
1037 HeapObject::cast(result)->set_map(heap_number_map());
1038 HeapNumber::cast(result)->set_value(value);
1039 return result;
1040}
1041
1042
1043Object* Heap::AllocateHeapNumber(double value) {
1044 // This version of AllocateHeapNumber is optimized for
1045 // allocation in new space.
1046 STATIC_ASSERT(HeapNumber::kSize <= Page::kMaxHeapObjectSize);
1047 ASSERT(allocation_allowed_ && gc_state_ == NOT_IN_GC);
1048 Object* result = new_space_->AllocateRaw(HeapNumber::kSize);
1049 if (result->IsFailure()) return result;
1050 HeapObject::cast(result)->set_map(heap_number_map());
1051 HeapNumber::cast(result)->set_value(value);
1052 return result;
1053}
1054
1055
1056Object* Heap::CreateOddball(Map* map,
1057 const char* to_string,
1058 Object* to_number) {
ager@chromium.org9258b6b2008-09-11 09:11:10 +00001059 Object* result = Allocate(map, OLD_DATA_SPACE);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001060 if (result->IsFailure()) return result;
1061 return Oddball::cast(result)->Initialize(to_string, to_number);
1062}
1063
1064
1065bool Heap::CreateApiObjects() {
1066 Object* obj;
1067
1068 obj = AllocateMap(JS_OBJECT_TYPE, JSObject::kHeaderSize);
1069 if (obj->IsFailure()) return false;
1070 neander_map_ = Map::cast(obj);
1071
1072 obj = Heap::AllocateJSObjectFromMap(neander_map_);
1073 if (obj->IsFailure()) return false;
1074 Object* elements = AllocateFixedArray(2);
1075 if (elements->IsFailure()) return false;
1076 FixedArray::cast(elements)->set(0, Smi::FromInt(0));
1077 JSObject::cast(obj)->set_elements(FixedArray::cast(elements));
1078 message_listeners_ = JSObject::cast(obj);
1079
1080 obj = Heap::AllocateJSObjectFromMap(neander_map_);
1081 if (obj->IsFailure()) return false;
1082 elements = AllocateFixedArray(2);
1083 if (elements->IsFailure()) return false;
1084 FixedArray::cast(elements)->set(0, Smi::FromInt(0));
1085 JSObject::cast(obj)->set_elements(FixedArray::cast(elements));
1086 debug_event_listeners_ = JSObject::cast(obj);
1087
1088 return true;
1089}
1090
1091void Heap::CreateFixedStubs() {
1092 // Here we create roots for fixed stubs. They are needed at GC
1093 // for cooking and uncooking (check out frames.cc).
1094 // The eliminates the need for doing dictionary lookup in the
1095 // stub cache for these stubs.
1096 HandleScope scope;
1097 {
1098 CEntryStub stub;
1099 c_entry_code_ = *stub.GetCode();
1100 }
1101 {
1102 CEntryDebugBreakStub stub;
1103 c_entry_debug_break_code_ = *stub.GetCode();
1104 }
1105 {
1106 JSEntryStub stub;
1107 js_entry_code_ = *stub.GetCode();
1108 }
1109 {
1110 JSConstructEntryStub stub;
1111 js_construct_entry_code_ = *stub.GetCode();
1112 }
1113}
1114
1115
1116bool Heap::CreateInitialObjects() {
1117 Object* obj;
1118
1119 // The -0 value must be set before NumberFromDouble works.
1120 obj = AllocateHeapNumber(-0.0, TENURED);
1121 if (obj->IsFailure()) return false;
1122 minus_zero_value_ = obj;
1123 ASSERT(signbit(minus_zero_value_->Number()) != 0);
1124
1125 obj = AllocateHeapNumber(OS::nan_value(), TENURED);
1126 if (obj->IsFailure()) return false;
1127 nan_value_ = obj;
1128
ager@chromium.org9258b6b2008-09-11 09:11:10 +00001129 obj = Allocate(oddball_map(), OLD_DATA_SPACE);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001130 if (obj->IsFailure()) return false;
1131 undefined_value_ = obj;
1132 ASSERT(!InNewSpace(undefined_value()));
1133
1134 // Allocate initial symbol table.
1135 obj = SymbolTable::Allocate(kInitialSymbolTableSize);
1136 if (obj->IsFailure()) return false;
1137 symbol_table_ = obj;
1138
1139 // Assign the print strings for oddballs after creating symboltable.
1140 Object* symbol = LookupAsciiSymbol("undefined");
1141 if (symbol->IsFailure()) return false;
1142 Oddball::cast(undefined_value_)->set_to_string(String::cast(symbol));
1143 Oddball::cast(undefined_value_)->set_to_number(nan_value_);
1144
1145 // Assign the print strings for oddballs after creating symboltable.
1146 symbol = LookupAsciiSymbol("null");
1147 if (symbol->IsFailure()) return false;
1148 Oddball::cast(null_value_)->set_to_string(String::cast(symbol));
1149 Oddball::cast(null_value_)->set_to_number(Smi::FromInt(0));
1150
1151 // Allocate the null_value
1152 obj = Oddball::cast(null_value())->Initialize("null", Smi::FromInt(0));
1153 if (obj->IsFailure()) return false;
1154
1155 obj = CreateOddball(oddball_map(), "true", Smi::FromInt(1));
1156 if (obj->IsFailure()) return false;
1157 true_value_ = obj;
1158
1159 obj = CreateOddball(oddball_map(), "false", Smi::FromInt(0));
1160 if (obj->IsFailure()) return false;
1161 false_value_ = obj;
1162
1163 obj = CreateOddball(oddball_map(), "hole", Smi::FromInt(-1));
1164 if (obj->IsFailure()) return false;
1165 the_hole_value_ = obj;
1166
1167 // Allocate the empty string.
1168 obj = AllocateRawAsciiString(0, TENURED);
1169 if (obj->IsFailure()) return false;
1170 empty_string_ = String::cast(obj);
1171
1172#define SYMBOL_INITIALIZE(name, string) \
1173 obj = LookupAsciiSymbol(string); \
1174 if (obj->IsFailure()) return false; \
1175 (name##_) = String::cast(obj);
1176 SYMBOL_LIST(SYMBOL_INITIALIZE)
1177#undef SYMBOL_INITIALIZE
1178
1179 // Allocate the proxy for __proto__.
1180 obj = AllocateProxy((Address) &Accessors::ObjectPrototype);
1181 if (obj->IsFailure()) return false;
1182 prototype_accessors_ = Proxy::cast(obj);
1183
1184 // Allocate the code_stubs dictionary.
1185 obj = Dictionary::Allocate(4);
1186 if (obj->IsFailure()) return false;
1187 code_stubs_ = Dictionary::cast(obj);
1188
1189 // Allocate the non_monomorphic_cache used in stub-cache.cc
1190 obj = Dictionary::Allocate(4);
1191 if (obj->IsFailure()) return false;
1192 non_monomorphic_cache_ = Dictionary::cast(obj);
1193
1194 CreateFixedStubs();
1195
1196 // Allocate the number->string conversion cache
1197 obj = AllocateFixedArray(kNumberStringCacheSize * 2);
1198 if (obj->IsFailure()) return false;
1199 number_string_cache_ = FixedArray::cast(obj);
1200
1201 // Allocate cache for single character strings.
1202 obj = AllocateFixedArray(String::kMaxAsciiCharCode+1);
1203 if (obj->IsFailure()) return false;
1204 single_character_string_cache_ = FixedArray::cast(obj);
1205
1206 // Allocate cache for external strings pointing to native source code.
1207 obj = AllocateFixedArray(Natives::GetBuiltinsCount());
1208 if (obj->IsFailure()) return false;
1209 natives_source_cache_ = FixedArray::cast(obj);
1210
ager@chromium.org9258b6b2008-09-11 09:11:10 +00001211 // Initialized eval cache to null value.
1212 eval_cache_global_ = null_value();
1213 eval_cache_non_global_ = null_value();
1214
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001215 return true;
1216}
1217
1218
1219static inline int double_get_hash(double d) {
1220 DoubleRepresentation rep(d);
1221 return ((static_cast<int>(rep.bits) ^ static_cast<int>(rep.bits >> 32)) &
1222 (Heap::kNumberStringCacheSize - 1));
1223}
1224
1225
1226static inline int smi_get_hash(Smi* smi) {
1227 return (smi->value() & (Heap::kNumberStringCacheSize - 1));
1228}
1229
1230
1231
1232Object* Heap::GetNumberStringCache(Object* number) {
1233 int hash;
1234 if (number->IsSmi()) {
1235 hash = smi_get_hash(Smi::cast(number));
1236 } else {
1237 hash = double_get_hash(number->Number());
1238 }
1239 Object* key = number_string_cache_->get(hash * 2);
1240 if (key == number) {
1241 return String::cast(number_string_cache_->get(hash * 2 + 1));
1242 } else if (key->IsHeapNumber() &&
1243 number->IsHeapNumber() &&
1244 key->Number() == number->Number()) {
1245 return String::cast(number_string_cache_->get(hash * 2 + 1));
1246 }
1247 return undefined_value();
1248}
1249
1250
1251void Heap::SetNumberStringCache(Object* number, String* string) {
1252 int hash;
1253 if (number->IsSmi()) {
1254 hash = smi_get_hash(Smi::cast(number));
1255 number_string_cache_->set(hash * 2, number, FixedArray::SKIP_WRITE_BARRIER);
1256 } else {
1257 hash = double_get_hash(number->Number());
1258 number_string_cache_->set(hash * 2, number);
1259 }
1260 number_string_cache_->set(hash * 2 + 1, string);
1261}
1262
1263
1264Object* Heap::SmiOrNumberFromDouble(double value,
1265 bool new_object,
1266 PretenureFlag pretenure) {
1267 // We need to distinguish the minus zero value and this cannot be
1268 // done after conversion to int. Doing this by comparing bit
1269 // patterns is faster than using fpclassify() et al.
1270 static const DoubleRepresentation plus_zero(0.0);
1271 static const DoubleRepresentation minus_zero(-0.0);
1272 static const DoubleRepresentation nan(OS::nan_value());
1273 ASSERT(minus_zero_value_ != NULL);
1274 ASSERT(sizeof(plus_zero.value) == sizeof(plus_zero.bits));
1275
1276 DoubleRepresentation rep(value);
1277 if (rep.bits == plus_zero.bits) return Smi::FromInt(0); // not uncommon
1278 if (rep.bits == minus_zero.bits) {
1279 return new_object ? AllocateHeapNumber(-0.0, pretenure)
1280 : minus_zero_value_;
1281 }
1282 if (rep.bits == nan.bits) {
1283 return new_object
1284 ? AllocateHeapNumber(OS::nan_value(), pretenure)
1285 : nan_value_;
1286 }
1287
1288 // Try to represent the value as a tagged small integer.
1289 int int_value = FastD2I(value);
1290 if (value == FastI2D(int_value) && Smi::IsValid(int_value)) {
1291 return Smi::FromInt(int_value);
1292 }
1293
1294 // Materialize the value in the heap.
1295 return AllocateHeapNumber(value, pretenure);
1296}
1297
1298
1299Object* Heap::NewNumberFromDouble(double value, PretenureFlag pretenure) {
1300 return SmiOrNumberFromDouble(value,
1301 true /* number object must be new */,
1302 pretenure);
1303}
1304
1305
1306Object* Heap::NumberFromDouble(double value, PretenureFlag pretenure) {
1307 return SmiOrNumberFromDouble(value,
1308 false /* use preallocated NaN, -0.0 */,
1309 pretenure);
1310}
1311
1312
1313Object* Heap::AllocateProxy(Address proxy, PretenureFlag pretenure) {
1314 // Statically ensure that it is safe to allocate proxies in paged spaces.
1315 STATIC_ASSERT(Proxy::kSize <= Page::kMaxHeapObjectSize);
ager@chromium.org9258b6b2008-09-11 09:11:10 +00001316 AllocationSpace space =
1317 (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001318 Object* result = Allocate(proxy_map(), space);
1319 if (result->IsFailure()) return result;
1320
1321 Proxy::cast(result)->set_proxy(proxy);
1322 return result;
1323}
1324
1325
1326Object* Heap::AllocateSharedFunctionInfo(Object* name) {
1327 Object* result = Allocate(shared_function_info_map(), NEW_SPACE);
1328 if (result->IsFailure()) return result;
1329
1330 SharedFunctionInfo* share = SharedFunctionInfo::cast(result);
1331 share->set_name(name);
1332 Code* illegal = Builtins::builtin(Builtins::Illegal);
1333 share->set_code(illegal);
1334 share->set_expected_nof_properties(0);
1335 share->set_length(0);
1336 share->set_formal_parameter_count(0);
1337 share->set_instance_class_name(Object_symbol());
1338 share->set_function_data(undefined_value());
1339 share->set_lazy_load_data(undefined_value());
1340 share->set_script(undefined_value());
1341 share->set_start_position_and_type(0);
1342 share->set_debug_info(undefined_value());
1343 return result;
1344}
1345
1346
1347Object* Heap::AllocateConsString(String* first, String* second) {
1348 int length = first->length() + second->length();
1349 bool is_ascii = first->is_ascii() && second->is_ascii();
1350
1351 // If the resulting string is small make a flat string.
1352 if (length < ConsString::kMinLength) {
1353 Object* result = is_ascii
1354 ? AllocateRawAsciiString(length)
1355 : AllocateRawTwoByteString(length);
1356 if (result->IsFailure()) return result;
1357 // Copy the characters into the new object.
1358 String* string_result = String::cast(result);
1359 int first_length = first->length();
1360 // Copy the content of the first string.
1361 for (int i = 0; i < first_length; i++) {
1362 string_result->Set(i, first->Get(i));
1363 }
1364 int second_length = second->length();
1365 // Copy the content of the first string.
1366 for (int i = 0; i < second_length; i++) {
1367 string_result->Set(first_length + i, second->Get(i));
1368 }
1369 return result;
1370 }
1371
1372 Map* map;
1373 if (length <= String::kMaxShortStringSize) {
1374 map = is_ascii ? short_cons_ascii_string_map()
1375 : short_cons_string_map();
1376 } else if (length <= String::kMaxMediumStringSize) {
1377 map = is_ascii ? medium_cons_ascii_string_map()
1378 : medium_cons_string_map();
1379 } else {
1380 map = is_ascii ? long_cons_ascii_string_map()
1381 : long_cons_string_map();
1382 }
1383
1384 Object* result = Allocate(map, NEW_SPACE);
1385 if (result->IsFailure()) return result;
1386
1387 ConsString* cons_string = ConsString::cast(result);
1388 cons_string->set_first(first);
1389 cons_string->set_second(second);
1390 cons_string->set_length(length);
1391
1392 return result;
1393}
1394
1395
1396Object* Heap::AllocateSlicedString(String* buffer, int start, int end) {
1397 int length = end - start;
1398
1399 // If the resulting string is small make a sub string.
1400 if (end - start <= SlicedString::kMinLength) {
1401 return Heap::AllocateSubString(buffer, start, end);
1402 }
1403
1404 Map* map;
1405 if (length <= String::kMaxShortStringSize) {
1406 map = buffer->is_ascii() ? short_sliced_ascii_string_map()
1407 : short_sliced_string_map();
1408 } else if (length <= String::kMaxMediumStringSize) {
1409 map = buffer->is_ascii() ? medium_sliced_ascii_string_map()
1410 : medium_sliced_string_map();
1411 } else {
1412 map = buffer->is_ascii() ? long_sliced_ascii_string_map()
1413 : long_sliced_string_map();
1414 }
1415
1416 Object* result = Allocate(map, NEW_SPACE);
1417 if (result->IsFailure()) return result;
1418
1419 SlicedString* sliced_string = SlicedString::cast(result);
1420 sliced_string->set_buffer(buffer);
1421 sliced_string->set_start(start);
1422 sliced_string->set_length(length);
1423
1424 return result;
1425}
1426
1427
1428Object* Heap::AllocateSubString(String* buffer, int start, int end) {
1429 int length = end - start;
1430
1431 // Make an attempt to flatten the buffer to reduce access time.
1432 buffer->TryFlatten();
1433
1434 Object* result = buffer->is_ascii()
1435 ? AllocateRawAsciiString(length)
1436 : AllocateRawTwoByteString(length);
1437 if (result->IsFailure()) return result;
1438
1439 // Copy the characters into the new object.
1440 String* string_result = String::cast(result);
1441 for (int i = 0; i < length; i++) {
1442 string_result->Set(i, buffer->Get(start + i));
1443 }
1444 return result;
1445}
1446
1447
1448Object* Heap::AllocateExternalStringFromAscii(
1449 ExternalAsciiString::Resource* resource) {
1450 Map* map;
1451 int length = resource->length();
1452 if (length <= String::kMaxShortStringSize) {
1453 map = short_external_ascii_string_map();
1454 } else if (length <= String::kMaxMediumStringSize) {
1455 map = medium_external_ascii_string_map();
1456 } else {
1457 map = long_external_ascii_string_map();
1458 }
1459
1460 Object* result = Allocate(map, NEW_SPACE);
1461 if (result->IsFailure()) return result;
1462
1463 ExternalAsciiString* external_string = ExternalAsciiString::cast(result);
1464 external_string->set_length(length);
1465 external_string->set_resource(resource);
1466
1467 return result;
1468}
1469
1470
1471Object* Heap::AllocateExternalStringFromTwoByte(
1472 ExternalTwoByteString::Resource* resource) {
1473 Map* map;
1474 int length = resource->length();
1475 if (length <= String::kMaxShortStringSize) {
1476 map = short_external_string_map();
1477 } else if (length <= String::kMaxMediumStringSize) {
1478 map = medium_external_string_map();
1479 } else {
1480 map = long_external_string_map();
1481 }
1482
1483 Object* result = Allocate(map, NEW_SPACE);
1484 if (result->IsFailure()) return result;
1485
1486 ExternalTwoByteString* external_string = ExternalTwoByteString::cast(result);
1487 external_string->set_length(length);
1488 external_string->set_resource(resource);
1489
1490 return result;
1491}
1492
1493
1494Object* Heap:: LookupSingleCharacterStringFromCode(uint16_t code) {
1495 if (code <= String::kMaxAsciiCharCode) {
1496 Object* value = Heap::single_character_string_cache()->get(code);
1497 if (value != Heap::undefined_value()) return value;
1498 Object* result = Heap::AllocateRawAsciiString(1);
1499 if (result->IsFailure()) return result;
1500 String::cast(result)->Set(0, code);
1501 Heap::single_character_string_cache()->set(code, result);
1502 return result;
1503 }
1504 Object* result = Heap::AllocateRawTwoByteString(1);
1505 if (result->IsFailure()) return result;
1506 String::cast(result)->Set(0, code);
1507 return result;
1508}
1509
1510
1511Object* Heap::AllocateByteArray(int length) {
1512 int size = ByteArray::SizeFor(length);
ager@chromium.org9258b6b2008-09-11 09:11:10 +00001513 AllocationSpace space =
1514 size > MaxHeapObjectSize() ? LO_SPACE : NEW_SPACE;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001515
1516 Object* result = AllocateRaw(size, space);
ager@chromium.org9258b6b2008-09-11 09:11:10 +00001517
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001518 if (result->IsFailure()) return result;
1519
1520 reinterpret_cast<Array*>(result)->set_map(byte_array_map());
1521 reinterpret_cast<Array*>(result)->set_length(length);
1522 return result;
1523}
1524
1525
1526Object* Heap::CreateCode(const CodeDesc& desc,
1527 ScopeInfo<>* sinfo,
1528 Code::Flags flags) {
1529 // Compute size
1530 int body_size = RoundUp(desc.instr_size + desc.reloc_size, kObjectAlignment);
1531 int sinfo_size = 0;
1532 if (sinfo != NULL) sinfo_size = sinfo->Serialize(NULL);
1533 int obj_size = Code::SizeFor(body_size, sinfo_size);
ager@chromium.org9258b6b2008-09-11 09:11:10 +00001534 Object* result;
1535 if (obj_size > MaxHeapObjectSize()) {
1536 result = lo_space_->AllocateRawCode(obj_size);
1537 } else {
1538 result = code_space_->AllocateRaw(obj_size);
1539 }
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001540
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001541 if (result->IsFailure()) return result;
1542
1543 // Initialize the object
1544 HeapObject::cast(result)->set_map(code_map());
1545 Code* code = Code::cast(result);
1546 code->set_instruction_size(desc.instr_size);
1547 code->set_relocation_size(desc.reloc_size);
1548 code->set_sinfo_size(sinfo_size);
1549 code->set_flags(flags);
1550 code->set_ic_flag(Code::IC_TARGET_IS_ADDRESS);
1551 code->CopyFrom(desc); // migrate generated code
1552 if (sinfo != NULL) sinfo->Serialize(code); // write scope info
1553
1554#ifdef DEBUG
1555 code->Verify();
1556#endif
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001557 return code;
1558}
1559
1560
1561Object* Heap::CopyCode(Code* code) {
1562 // Allocate an object the same size as the code object.
1563 int obj_size = code->Size();
ager@chromium.org9258b6b2008-09-11 09:11:10 +00001564 Object* result;
1565 if (obj_size > MaxHeapObjectSize()) {
1566 result = lo_space_->AllocateRawCode(obj_size);
1567 } else {
1568 result = code_space_->AllocateRaw(obj_size);
1569 }
1570
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001571 if (result->IsFailure()) return result;
1572
1573 // Copy code object.
1574 Address old_addr = code->address();
1575 Address new_addr = reinterpret_cast<HeapObject*>(result)->address();
1576 memcpy(new_addr, old_addr, obj_size);
1577
1578 // Relocate the copy.
1579 Code* new_code = Code::cast(result);
1580 new_code->Relocate(new_addr - old_addr);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001581 return new_code;
1582}
1583
1584
1585Object* Heap::Allocate(Map* map, AllocationSpace space) {
1586 ASSERT(gc_state_ == NOT_IN_GC);
1587 ASSERT(map->instance_type() != MAP_TYPE);
1588 Object* result = AllocateRaw(map->instance_size(), space);
1589 if (result->IsFailure()) return result;
1590 HeapObject::cast(result)->set_map(map);
1591 return result;
1592}
1593
1594
1595Object* Heap::InitializeFunction(JSFunction* function,
1596 SharedFunctionInfo* shared,
1597 Object* prototype) {
1598 ASSERT(!prototype->IsMap());
1599 function->initialize_properties();
1600 function->initialize_elements();
1601 function->set_shared(shared);
1602 function->set_prototype_or_initial_map(prototype);
1603 function->set_context(undefined_value());
1604 function->set_literals(empty_fixed_array());
1605 return function;
1606}
1607
1608
1609Object* Heap::AllocateFunctionPrototype(JSFunction* function) {
1610 // Allocate the prototype.
1611 Object* prototype =
1612 AllocateJSObject(Top::context()->global_context()->object_function());
1613 if (prototype->IsFailure()) return prototype;
1614 // When creating the prototype for the function we must set its
1615 // constructor to the function.
1616 Object* result =
1617 JSObject::cast(prototype)->SetProperty(constructor_symbol(),
1618 function,
1619 DONT_ENUM);
1620 if (result->IsFailure()) return result;
1621 return prototype;
1622}
1623
1624
1625Object* Heap::AllocateFunction(Map* function_map,
1626 SharedFunctionInfo* shared,
1627 Object* prototype) {
ager@chromium.org9258b6b2008-09-11 09:11:10 +00001628 Object* result = Allocate(function_map, OLD_POINTER_SPACE);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001629 if (result->IsFailure()) return result;
1630 return InitializeFunction(JSFunction::cast(result), shared, prototype);
1631}
1632
1633
1634Object* Heap::AllocateArgumentsObject(Object* callee, int length) {
mads.s.ager@gmail.com9a4089a2008-09-01 08:55:01 +00001635 // To get fast allocation and map sharing for arguments objects we
1636 // allocate them based on an arguments boilerplate.
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001637
1638 // This calls Copy directly rather than using Heap::AllocateRaw so we
1639 // duplicate the check here.
1640 ASSERT(allocation_allowed_ && gc_state_ == NOT_IN_GC);
1641
1642 JSObject* boilerplate =
1643 Top::context()->global_context()->arguments_boilerplate();
1644 Object* result = boilerplate->Copy();
1645 if (result->IsFailure()) return result;
1646
1647 Object* obj = JSObject::cast(result)->properties();
1648 FixedArray::cast(obj)->set(arguments_callee_index, callee);
1649 FixedArray::cast(obj)->set(arguments_length_index, Smi::FromInt(length));
1650
1651 // Allocate the fixed array.
1652 obj = Heap::AllocateFixedArray(length);
1653 if (obj->IsFailure()) return obj;
1654 JSObject::cast(result)->set_elements(FixedArray::cast(obj));
1655
1656 // Check the state of the object
1657 ASSERT(JSObject::cast(result)->HasFastProperties());
1658 ASSERT(JSObject::cast(result)->HasFastElements());
1659
1660 return result;
1661}
1662
1663
1664Object* Heap::AllocateInitialMap(JSFunction* fun) {
1665 ASSERT(!fun->has_initial_map());
1666
1667 // First create a new map.
1668 Object* map_obj = Heap::AllocateMap(JS_OBJECT_TYPE, JSObject::kHeaderSize);
1669 if (map_obj->IsFailure()) return map_obj;
1670
1671 // Fetch or allocate prototype.
1672 Object* prototype;
1673 if (fun->has_instance_prototype()) {
1674 prototype = fun->instance_prototype();
1675 } else {
1676 prototype = AllocateFunctionPrototype(fun);
1677 if (prototype->IsFailure()) return prototype;
1678 }
1679 Map* map = Map::cast(map_obj);
1680 map->set_unused_property_fields(fun->shared()->expected_nof_properties());
1681 map->set_prototype(prototype);
1682 return map;
1683}
1684
1685
1686void Heap::InitializeJSObjectFromMap(JSObject* obj,
1687 FixedArray* properties,
1688 Map* map) {
1689 obj->set_properties(properties);
1690 obj->initialize_elements();
1691 // TODO(1240798): Initialize the object's body using valid initial values
1692 // according to the object's initial map. For example, if the map's
1693 // instance type is JS_ARRAY_TYPE, the length field should be initialized
1694 // to a number (eg, Smi::FromInt(0)) and the elements initialized to a
1695 // fixed array (eg, Heap::empty_fixed_array()). Currently, the object
1696 // verification code has to cope with (temporarily) invalid objects. See
1697 // for example, JSArray::JSArrayVerify).
1698 obj->InitializeBody(map->instance_size());
1699}
1700
1701
1702Object* Heap::AllocateJSObjectFromMap(Map* map, PretenureFlag pretenure) {
1703 // JSFunctions should be allocated using AllocateFunction to be
1704 // properly initialized.
1705 ASSERT(map->instance_type() != JS_FUNCTION_TYPE);
1706
1707 // Allocate the backing storage for the properties.
mads.s.ager@gmail.com9a4089a2008-09-01 08:55:01 +00001708 Object* properties = AllocateFixedArray(map->unused_property_fields());
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001709 if (properties->IsFailure()) return properties;
1710
1711 // Allocate the JSObject.
ager@chromium.org9258b6b2008-09-11 09:11:10 +00001712 AllocationSpace space =
1713 (pretenure == TENURED) ? OLD_POINTER_SPACE : NEW_SPACE;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001714 if (map->instance_size() > MaxHeapObjectSize()) space = LO_SPACE;
1715 Object* obj = Allocate(map, space);
1716 if (obj->IsFailure()) return obj;
1717
1718 // Initialize the JSObject.
1719 InitializeJSObjectFromMap(JSObject::cast(obj),
1720 FixedArray::cast(properties),
1721 map);
1722 return obj;
1723}
1724
1725
1726Object* Heap::AllocateJSObject(JSFunction* constructor,
1727 PretenureFlag pretenure) {
1728 // Allocate the initial map if absent.
1729 if (!constructor->has_initial_map()) {
1730 Object* initial_map = AllocateInitialMap(constructor);
1731 if (initial_map->IsFailure()) return initial_map;
1732 constructor->set_initial_map(Map::cast(initial_map));
1733 Map::cast(initial_map)->set_constructor(constructor);
1734 }
1735 // Allocate the object based on the constructors initial map.
1736 return AllocateJSObjectFromMap(constructor->initial_map(), pretenure);
1737}
1738
1739
1740Object* Heap::ReinitializeJSGlobalObject(JSFunction* constructor,
1741 JSGlobalObject* object) {
1742 // Allocate initial map if absent.
1743 if (!constructor->has_initial_map()) {
1744 Object* initial_map = AllocateInitialMap(constructor);
1745 if (initial_map->IsFailure()) return initial_map;
1746 constructor->set_initial_map(Map::cast(initial_map));
1747 Map::cast(initial_map)->set_constructor(constructor);
1748 }
1749
1750 Map* map = constructor->initial_map();
1751
1752 // Check that the already allocated object has the same size as
1753 // objects allocated using the constructor.
1754 ASSERT(map->instance_size() == object->map()->instance_size());
1755
1756 // Allocate the backing storage for the properties.
mads.s.ager@gmail.com9a4089a2008-09-01 08:55:01 +00001757 Object* properties = AllocateFixedArray(map->unused_property_fields());
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001758 if (properties->IsFailure()) return properties;
1759
1760 // Reset the map for the object.
1761 object->set_map(constructor->initial_map());
1762
1763 // Reinitialize the object from the constructor map.
1764 InitializeJSObjectFromMap(object, FixedArray::cast(properties), map);
1765 return object;
1766}
1767
1768
1769Object* Heap::AllocateStringFromAscii(Vector<const char> string,
1770 PretenureFlag pretenure) {
1771 Object* result = AllocateRawAsciiString(string.length(), pretenure);
1772 if (result->IsFailure()) return result;
1773
1774 // Copy the characters into the new object.
1775 AsciiString* string_result = AsciiString::cast(result);
1776 for (int i = 0; i < string.length(); i++) {
1777 string_result->AsciiStringSet(i, string[i]);
1778 }
1779 return result;
1780}
1781
1782
1783Object* Heap::AllocateStringFromUtf8(Vector<const char> string,
1784 PretenureFlag pretenure) {
1785 // Count the number of characters in the UTF-8 string and check if
1786 // it is an ASCII string.
1787 Access<Scanner::Utf8Decoder> decoder(Scanner::utf8_decoder());
1788 decoder->Reset(string.start(), string.length());
1789 int chars = 0;
1790 bool is_ascii = true;
1791 while (decoder->has_more()) {
1792 uc32 r = decoder->GetNext();
1793 if (r > String::kMaxAsciiCharCode) is_ascii = false;
1794 chars++;
1795 }
1796
1797 // If the string is ascii, we do not need to convert the characters
1798 // since UTF8 is backwards compatible with ascii.
1799 if (is_ascii) return AllocateStringFromAscii(string, pretenure);
1800
1801 Object* result = AllocateRawTwoByteString(chars, pretenure);
1802 if (result->IsFailure()) return result;
1803
1804 // Convert and copy the characters into the new object.
1805 String* string_result = String::cast(result);
1806 decoder->Reset(string.start(), string.length());
1807 for (int i = 0; i < chars; i++) {
1808 uc32 r = decoder->GetNext();
1809 string_result->Set(i, r);
1810 }
1811 return result;
1812}
1813
1814
1815Object* Heap::AllocateStringFromTwoByte(Vector<const uc16> string,
1816 PretenureFlag pretenure) {
1817 // Check if the string is an ASCII string.
1818 int i = 0;
1819 while (i < string.length() && string[i] <= String::kMaxAsciiCharCode) i++;
1820
1821 Object* result;
1822 if (i == string.length()) { // It's an ASCII string.
1823 result = AllocateRawAsciiString(string.length(), pretenure);
1824 } else { // It's not an ASCII string.
1825 result = AllocateRawTwoByteString(string.length(), pretenure);
1826 }
1827 if (result->IsFailure()) return result;
1828
1829 // Copy the characters into the new object, which may be either ASCII or
1830 // UTF-16.
1831 String* string_result = String::cast(result);
1832 for (int i = 0; i < string.length(); i++) {
1833 string_result->Set(i, string[i]);
1834 }
1835 return result;
1836}
1837
1838
1839Map* Heap::SymbolMapForString(String* string) {
1840 // If the string is in new space it cannot be used as a symbol.
1841 if (InNewSpace(string)) return NULL;
1842
1843 // Find the corresponding symbol map for strings.
1844 Map* map = string->map();
1845
1846 if (map == short_ascii_string_map()) return short_ascii_symbol_map();
1847 if (map == medium_ascii_string_map()) return medium_ascii_symbol_map();
1848 if (map == long_ascii_string_map()) return long_ascii_symbol_map();
1849
1850 if (map == short_string_map()) return short_symbol_map();
1851 if (map == medium_string_map()) return medium_symbol_map();
1852 if (map == long_string_map()) return long_symbol_map();
1853
1854 if (map == short_cons_string_map()) return short_cons_symbol_map();
1855 if (map == medium_cons_string_map()) return medium_cons_symbol_map();
1856 if (map == long_cons_string_map()) return long_cons_symbol_map();
1857
1858 if (map == short_cons_ascii_string_map()) {
1859 return short_cons_ascii_symbol_map();
1860 }
1861 if (map == medium_cons_ascii_string_map()) {
1862 return medium_cons_ascii_symbol_map();
1863 }
1864 if (map == long_cons_ascii_string_map()) {
1865 return long_cons_ascii_symbol_map();
1866 }
1867
1868 if (map == short_sliced_string_map()) return short_sliced_symbol_map();
1869 if (map == medium_sliced_string_map()) return short_sliced_symbol_map();
1870 if (map == long_sliced_string_map()) return short_sliced_symbol_map();
1871
1872 if (map == short_sliced_ascii_string_map()) {
1873 return short_sliced_ascii_symbol_map();
1874 }
1875 if (map == medium_sliced_ascii_string_map()) {
1876 return short_sliced_ascii_symbol_map();
1877 }
1878 if (map == long_sliced_ascii_string_map()) {
1879 return short_sliced_ascii_symbol_map();
1880 }
1881
1882 if (map == short_external_string_map()) return short_external_string_map();
1883 if (map == medium_external_string_map()) return medium_external_string_map();
1884 if (map == long_external_string_map()) return long_external_string_map();
1885
1886 if (map == short_external_ascii_string_map()) {
1887 return short_external_ascii_string_map();
1888 }
1889 if (map == medium_external_ascii_string_map()) {
1890 return medium_external_ascii_string_map();
1891 }
1892 if (map == long_external_ascii_string_map()) {
1893 return long_external_ascii_string_map();
1894 }
1895
1896 // No match found.
1897 return NULL;
1898}
1899
1900
1901Object* Heap::AllocateSymbol(unibrow::CharacterStream* buffer,
1902 int chars,
1903 int hash) {
1904 // Ensure the chars matches the number of characters in the buffer.
1905 ASSERT(static_cast<unsigned>(chars) == buffer->Length());
1906 // Determine whether the string is ascii.
1907 bool is_ascii = true;
1908 while (buffer->has_more()) {
1909 if (buffer->GetNext() > unibrow::Utf8::kMaxOneByteChar) is_ascii = false;
1910 }
1911 buffer->Rewind();
1912
1913 // Compute map and object size.
1914 int size;
1915 Map* map;
1916
1917 if (is_ascii) {
1918 if (chars <= String::kMaxShortStringSize) {
1919 map = short_ascii_symbol_map();
1920 } else if (chars <= String::kMaxMediumStringSize) {
1921 map = medium_ascii_symbol_map();
1922 } else {
1923 map = long_ascii_symbol_map();
1924 }
1925 size = AsciiString::SizeFor(chars);
1926 } else {
1927 if (chars <= String::kMaxShortStringSize) {
1928 map = short_symbol_map();
1929 } else if (chars <= String::kMaxMediumStringSize) {
1930 map = medium_symbol_map();
1931 } else {
1932 map = long_symbol_map();
1933 }
1934 size = TwoByteString::SizeFor(chars);
1935 }
1936
1937 // Allocate string.
ager@chromium.org9258b6b2008-09-11 09:11:10 +00001938 AllocationSpace space =
1939 (size > MaxHeapObjectSize()) ? LO_SPACE : OLD_DATA_SPACE;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001940 Object* result = AllocateRaw(size, space);
1941 if (result->IsFailure()) return result;
1942
1943 reinterpret_cast<HeapObject*>(result)->set_map(map);
1944 // The hash value contains the length of the string.
1945 String::cast(result)->set_length_field(hash);
1946
1947 ASSERT_EQ(size, String::cast(result)->Size());
1948
1949 // Fill in the characters.
1950 for (int i = 0; i < chars; i++) {
1951 String::cast(result)->Set(i, buffer->GetNext());
1952 }
1953 return result;
1954}
1955
1956
1957Object* Heap::AllocateRawAsciiString(int length, PretenureFlag pretenure) {
ager@chromium.org9258b6b2008-09-11 09:11:10 +00001958 AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001959 int size = AsciiString::SizeFor(length);
1960 if (size > MaxHeapObjectSize()) {
1961 space = LO_SPACE;
1962 }
1963
1964 // Use AllocateRaw rather than Allocate because the object's size cannot be
1965 // determined from the map.
1966 Object* result = AllocateRaw(size, space);
1967 if (result->IsFailure()) return result;
1968
1969 // Determine the map based on the string's length.
1970 Map* map;
1971 if (length <= String::kMaxShortStringSize) {
1972 map = short_ascii_string_map();
1973 } else if (length <= String::kMaxMediumStringSize) {
1974 map = medium_ascii_string_map();
1975 } else {
1976 map = long_ascii_string_map();
1977 }
1978
1979 // Partially initialize the object.
1980 HeapObject::cast(result)->set_map(map);
1981 String::cast(result)->set_length(length);
1982 ASSERT_EQ(size, HeapObject::cast(result)->Size());
1983 return result;
1984}
1985
1986
1987Object* Heap::AllocateRawTwoByteString(int length, PretenureFlag pretenure) {
ager@chromium.org9258b6b2008-09-11 09:11:10 +00001988 AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001989 int size = TwoByteString::SizeFor(length);
1990 if (size > MaxHeapObjectSize()) {
1991 space = LO_SPACE;
1992 }
1993
1994 // Use AllocateRaw rather than Allocate because the object's size cannot be
1995 // determined from the map.
1996 Object* result = AllocateRaw(size, space);
1997 if (result->IsFailure()) return result;
1998
1999 // Determine the map based on the string's length.
2000 Map* map;
2001 if (length <= String::kMaxShortStringSize) {
2002 map = short_string_map();
2003 } else if (length <= String::kMaxMediumStringSize) {
2004 map = medium_string_map();
2005 } else {
2006 map = long_string_map();
2007 }
2008
2009 // Partially initialize the object.
2010 HeapObject::cast(result)->set_map(map);
2011 String::cast(result)->set_length(length);
2012 ASSERT_EQ(size, HeapObject::cast(result)->Size());
2013 return result;
2014}
2015
2016
2017Object* Heap::AllocateEmptyFixedArray() {
2018 int size = FixedArray::SizeFor(0);
ager@chromium.org9258b6b2008-09-11 09:11:10 +00002019 Object* result = AllocateRaw(size, OLD_DATA_SPACE);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002020 if (result->IsFailure()) return result;
2021 // Initialize the object.
2022 reinterpret_cast<Array*>(result)->set_map(fixed_array_map());
2023 reinterpret_cast<Array*>(result)->set_length(0);
2024 return result;
2025}
2026
2027
2028Object* Heap::AllocateFixedArray(int length, PretenureFlag pretenure) {
2029 ASSERT(empty_fixed_array()->IsFixedArray());
2030 if (length == 0) return empty_fixed_array();
2031
2032 int size = FixedArray::SizeFor(length);
2033 Object* result;
2034 if (size > MaxHeapObjectSize()) {
2035 result = lo_space_->AllocateRawFixedArray(size);
2036 } else {
ager@chromium.org9258b6b2008-09-11 09:11:10 +00002037 AllocationSpace space =
2038 (pretenure == TENURED) ? OLD_POINTER_SPACE : NEW_SPACE;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002039 result = AllocateRaw(size, space);
2040 }
2041 if (result->IsFailure()) return result;
2042
2043 // Initialize the object.
2044 reinterpret_cast<Array*>(result)->set_map(fixed_array_map());
2045 FixedArray* array = FixedArray::cast(result);
2046 array->set_length(length);
2047 for (int index = 0; index < length; index++) array->set_undefined(index);
2048 return array;
2049}
2050
2051
2052Object* Heap::AllocateFixedArrayWithHoles(int length) {
2053 if (length == 0) return empty_fixed_array();
2054 int size = FixedArray::SizeFor(length);
2055 Object* result = size > MaxHeapObjectSize()
2056 ? lo_space_->AllocateRawFixedArray(size)
2057 : AllocateRaw(size, NEW_SPACE);
2058 if (result->IsFailure()) return result;
2059
2060 // Initialize the object.
2061 reinterpret_cast<Array*>(result)->set_map(fixed_array_map());
2062 FixedArray* array = FixedArray::cast(result);
2063 array->set_length(length);
2064 for (int index = 0; index < length; index++) array->set_the_hole(index);
2065 return array;
2066}
2067
2068
2069Object* Heap::AllocateHashTable(int length) {
2070 Object* result = Heap::AllocateFixedArray(length);
2071 if (result->IsFailure()) return result;
2072 reinterpret_cast<Array*>(result)->set_map(hash_table_map());
2073 ASSERT(result->IsDictionary());
2074 return result;
2075}
2076
2077
2078Object* Heap::AllocateGlobalContext() {
2079 Object* result = Heap::AllocateFixedArray(Context::GLOBAL_CONTEXT_SLOTS);
2080 if (result->IsFailure()) return result;
2081 Context* context = reinterpret_cast<Context*>(result);
2082 context->set_map(global_context_map());
2083 ASSERT(context->IsGlobalContext());
2084 ASSERT(result->IsContext());
2085 return result;
2086}
2087
2088
2089Object* Heap::AllocateFunctionContext(int length, JSFunction* function) {
2090 ASSERT(length >= Context::MIN_CONTEXT_SLOTS);
2091 Object* result = Heap::AllocateFixedArray(length);
2092 if (result->IsFailure()) return result;
2093 Context* context = reinterpret_cast<Context*>(result);
2094 context->set_map(context_map());
2095 context->set_closure(function);
2096 context->set_fcontext(context);
2097 context->set_previous(NULL);
2098 context->set_extension(NULL);
2099 context->set_global(function->context()->global());
2100 ASSERT(!context->IsGlobalContext());
2101 ASSERT(context->is_function_context());
2102 ASSERT(result->IsContext());
2103 return result;
2104}
2105
2106
2107Object* Heap::AllocateWithContext(Context* previous, JSObject* extension) {
2108 Object* result = Heap::AllocateFixedArray(Context::MIN_CONTEXT_SLOTS);
2109 if (result->IsFailure()) return result;
2110 Context* context = reinterpret_cast<Context*>(result);
2111 context->set_map(context_map());
2112 context->set_closure(previous->closure());
2113 context->set_fcontext(previous->fcontext());
2114 context->set_previous(previous);
2115 context->set_extension(extension);
2116 context->set_global(previous->global());
2117 ASSERT(!context->IsGlobalContext());
2118 ASSERT(!context->is_function_context());
2119 ASSERT(result->IsContext());
2120 return result;
2121}
2122
2123
2124Object* Heap::AllocateStruct(InstanceType type) {
2125 Map* map;
2126 switch (type) {
2127#define MAKE_CASE(NAME, Name, name) case NAME##_TYPE: map = name##_map(); break;
2128STRUCT_LIST(MAKE_CASE)
2129#undef MAKE_CASE
2130 default:
2131 UNREACHABLE();
2132 return Failure::InternalError();
2133 }
2134 int size = map->instance_size();
2135 AllocationSpace space =
ager@chromium.org9258b6b2008-09-11 09:11:10 +00002136 (size > MaxHeapObjectSize()) ? LO_SPACE : OLD_POINTER_SPACE;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002137 Object* result = Heap::Allocate(map, space);
2138 if (result->IsFailure()) return result;
2139 Struct::cast(result)->InitializeBody(size);
2140 return result;
2141}
2142
2143
2144#ifdef DEBUG
2145
2146void Heap::Print() {
2147 if (!HasBeenSetup()) return;
2148 Top::PrintStack();
ager@chromium.org9258b6b2008-09-11 09:11:10 +00002149 AllSpaces spaces;
2150 while (Space* space = spaces.next()) space->Print();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002151}
2152
2153
2154void Heap::ReportCodeStatistics(const char* title) {
2155 PrintF(">>>>>> Code Stats (%s) >>>>>>\n", title);
2156 PagedSpace::ResetCodeStatistics();
2157 // We do not look for code in new space, map space, or old space. If code
2158 // somehow ends up in those spaces, we would miss it here.
2159 code_space_->CollectCodeStatistics();
2160 lo_space_->CollectCodeStatistics();
2161 PagedSpace::ReportCodeStatistics();
2162}
2163
2164
2165// This function expects that NewSpace's allocated objects histogram is
2166// populated (via a call to CollectStatistics or else as a side effect of a
2167// just-completed scavenge collection).
2168void Heap::ReportHeapStatistics(const char* title) {
2169 USE(title);
2170 PrintF(">>>>>> =============== %s (%d) =============== >>>>>>\n",
2171 title, gc_count_);
2172 PrintF("mark-compact GC : %d\n", mc_count_);
2173 PrintF("promoted_space_limit_ %d\n", promoted_space_limit_);
2174
2175 PrintF("\n");
2176 PrintF("Number of handles : %d\n", HandleScope::NumberOfHandles());
2177 GlobalHandles::PrintStats();
2178 PrintF("\n");
2179
2180 PrintF("Heap statistics : ");
2181 MemoryAllocator::ReportStatistics();
2182 PrintF("To space : ");
2183 new_space_->ReportStatistics();
ager@chromium.org9258b6b2008-09-11 09:11:10 +00002184 PrintF("Old pointer space : ");
2185 old_pointer_space_->ReportStatistics();
2186 PrintF("Old data space : ");
2187 old_data_space_->ReportStatistics();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002188 PrintF("Code space : ");
2189 code_space_->ReportStatistics();
2190 PrintF("Map space : ");
2191 map_space_->ReportStatistics();
2192 PrintF("Large object space : ");
2193 lo_space_->ReportStatistics();
2194 PrintF(">>>>>> ========================================= >>>>>>\n");
2195}
2196
2197#endif // DEBUG
2198
2199bool Heap::Contains(HeapObject* value) {
2200 return Contains(value->address());
2201}
2202
2203
2204bool Heap::Contains(Address addr) {
2205 if (OS::IsOutsideAllocatedSpace(addr)) return false;
2206 return HasBeenSetup() &&
2207 (new_space_->ToSpaceContains(addr) ||
ager@chromium.org9258b6b2008-09-11 09:11:10 +00002208 old_pointer_space_->Contains(addr) ||
2209 old_data_space_->Contains(addr) ||
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002210 code_space_->Contains(addr) ||
2211 map_space_->Contains(addr) ||
2212 lo_space_->SlowContains(addr));
2213}
2214
2215
2216bool Heap::InSpace(HeapObject* value, AllocationSpace space) {
2217 return InSpace(value->address(), space);
2218}
2219
2220
2221bool Heap::InSpace(Address addr, AllocationSpace space) {
2222 if (OS::IsOutsideAllocatedSpace(addr)) return false;
2223 if (!HasBeenSetup()) return false;
2224
2225 switch (space) {
2226 case NEW_SPACE:
2227 return new_space_->ToSpaceContains(addr);
ager@chromium.org9258b6b2008-09-11 09:11:10 +00002228 case OLD_POINTER_SPACE:
2229 return old_pointer_space_->Contains(addr);
2230 case OLD_DATA_SPACE:
2231 return old_data_space_->Contains(addr);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002232 case CODE_SPACE:
2233 return code_space_->Contains(addr);
2234 case MAP_SPACE:
2235 return map_space_->Contains(addr);
2236 case LO_SPACE:
2237 return lo_space_->SlowContains(addr);
2238 }
2239
2240 return false;
2241}
2242
2243
2244#ifdef DEBUG
2245void Heap::Verify() {
2246 ASSERT(HasBeenSetup());
2247
2248 VerifyPointersVisitor visitor;
2249 Heap::IterateRoots(&visitor);
2250
ager@chromium.org9258b6b2008-09-11 09:11:10 +00002251 AllSpaces spaces;
2252 while (Space* space = spaces.next()) {
2253 space->Verify();
2254 }
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002255}
2256#endif // DEBUG
2257
2258
2259Object* Heap::LookupSymbol(Vector<const char> string) {
2260 Object* symbol = NULL;
2261 Object* new_table =
2262 SymbolTable::cast(symbol_table_)->LookupSymbol(string, &symbol);
2263 if (new_table->IsFailure()) return new_table;
2264 symbol_table_ = new_table;
2265 ASSERT(symbol != NULL);
2266 return symbol;
2267}
2268
2269
2270Object* Heap::LookupSymbol(String* string) {
2271 if (string->IsSymbol()) return string;
2272 Object* symbol = NULL;
2273 Object* new_table =
2274 SymbolTable::cast(symbol_table_)->LookupString(string, &symbol);
2275 if (new_table->IsFailure()) return new_table;
2276 symbol_table_ = new_table;
2277 ASSERT(symbol != NULL);
2278 return symbol;
2279}
2280
2281
ager@chromium.org9258b6b2008-09-11 09:11:10 +00002282Object* Heap::LookupEvalCache(bool is_global_context, String* src) {
2283 Object* cache = is_global_context ?
2284 eval_cache_global_ : eval_cache_non_global_;
2285 return cache == null_value() ?
2286 null_value() : EvalCache::cast(cache)->Lookup(src);
2287}
2288
2289
2290Object* Heap::PutInEvalCache(bool is_global_context, String* src,
2291 JSFunction* value) {
2292 Object** cache_ptr = is_global_context ?
2293 &eval_cache_global_ : &eval_cache_non_global_;
2294
2295 if (*cache_ptr == null_value()) {
2296 Object* obj = EvalCache::Allocate(kInitialEvalCacheSize);
2297 if (obj->IsFailure()) return false;
2298 *cache_ptr = obj;
2299 }
2300
2301 Object* new_cache =
2302 EvalCache::cast(*cache_ptr)->Put(src, value);
2303 if (new_cache->IsFailure()) return new_cache;
2304 *cache_ptr = new_cache;
2305
2306 return value;
2307}
2308
2309
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002310#ifdef DEBUG
2311void Heap::ZapFromSpace() {
2312 ASSERT(HAS_HEAP_OBJECT_TAG(kFromSpaceZapValue));
2313 for (Address a = new_space_->FromSpaceLow();
2314 a < new_space_->FromSpaceHigh();
2315 a += kPointerSize) {
2316 Memory::Address_at(a) = kFromSpaceZapValue;
2317 }
2318}
2319#endif // DEBUG
2320
2321
2322void Heap::IterateRSetRange(Address object_start,
2323 Address object_end,
2324 Address rset_start,
2325 ObjectSlotCallback copy_object_func) {
2326 Address object_address = object_start;
2327 Address rset_address = rset_start;
2328
2329 // Loop over all the pointers in [object_start, object_end).
2330 while (object_address < object_end) {
2331 uint32_t rset_word = Memory::uint32_at(rset_address);
2332
2333 if (rset_word != 0) {
2334 // Bits were set.
2335 uint32_t result_rset = rset_word;
2336
2337 // Loop over all the bits in the remembered set word. Though
2338 // remembered sets are sparse, faster (eg, binary) search for
2339 // set bits does not seem to help much here.
2340 for (int bit_offset = 0; bit_offset < kBitsPerInt; bit_offset++) {
2341 uint32_t bitmask = 1 << bit_offset;
2342 // Do not dereference pointers at or past object_end.
2343 if ((rset_word & bitmask) != 0 && object_address < object_end) {
2344 Object** object_p = reinterpret_cast<Object**>(object_address);
2345 if (Heap::InFromSpace(*object_p)) {
2346 copy_object_func(reinterpret_cast<HeapObject**>(object_p));
2347 }
2348 // If this pointer does not need to be remembered anymore, clear
2349 // the remembered set bit.
2350 if (!Heap::InToSpace(*object_p)) result_rset &= ~bitmask;
2351 }
2352 object_address += kPointerSize;
2353 }
2354
2355 // Update the remembered set if it has changed.
2356 if (result_rset != rset_word) {
2357 Memory::uint32_at(rset_address) = result_rset;
2358 }
2359 } else {
2360 // No bits in the word were set. This is the common case.
2361 object_address += kPointerSize * kBitsPerInt;
2362 }
2363
2364 rset_address += kIntSize;
2365 }
2366}
2367
2368
2369void Heap::IterateRSet(PagedSpace* space, ObjectSlotCallback copy_object_func) {
2370 ASSERT(Page::is_rset_in_use());
ager@chromium.org9258b6b2008-09-11 09:11:10 +00002371 ASSERT(space == old_pointer_space_ || space == map_space_);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002372
2373 PageIterator it(space, PageIterator::PAGES_IN_USE);
2374 while (it.has_next()) {
2375 Page* page = it.next();
2376 IterateRSetRange(page->ObjectAreaStart(), page->AllocationTop(),
2377 page->RSetStart(), copy_object_func);
2378 }
2379}
2380
2381
2382#ifdef DEBUG
2383#define SYNCHRONIZE_TAG(tag) v->Synchronize(tag)
2384#else
2385#define SYNCHRONIZE_TAG(tag)
2386#endif
2387
2388void Heap::IterateRoots(ObjectVisitor* v) {
2389 IterateStrongRoots(v);
2390 v->VisitPointer(reinterpret_cast<Object**>(&symbol_table_));
2391 SYNCHRONIZE_TAG("symbol_table");
2392}
2393
2394
2395void Heap::IterateStrongRoots(ObjectVisitor* v) {
2396#define ROOT_ITERATE(type, name) \
2397 v->VisitPointer(reinterpret_cast<Object**>(&name##_));
2398 STRONG_ROOT_LIST(ROOT_ITERATE);
2399#undef ROOT_ITERATE
2400 SYNCHRONIZE_TAG("strong_root_list");
2401
2402#define STRUCT_MAP_ITERATE(NAME, Name, name) \
2403 v->VisitPointer(reinterpret_cast<Object**>(&name##_map_));
2404 STRUCT_LIST(STRUCT_MAP_ITERATE);
2405#undef STRUCT_MAP_ITERATE
2406 SYNCHRONIZE_TAG("struct_map");
2407
2408#define SYMBOL_ITERATE(name, string) \
2409 v->VisitPointer(reinterpret_cast<Object**>(&name##_));
2410 SYMBOL_LIST(SYMBOL_ITERATE)
2411#undef SYMBOL_ITERATE
2412 SYNCHRONIZE_TAG("symbol");
2413
2414 Bootstrapper::Iterate(v);
2415 SYNCHRONIZE_TAG("bootstrapper");
2416 Top::Iterate(v);
2417 SYNCHRONIZE_TAG("top");
2418 Debug::Iterate(v);
2419 SYNCHRONIZE_TAG("debug");
2420
2421 // Iterate over local handles in handle scopes.
2422 HandleScopeImplementer::Iterate(v);
2423 SYNCHRONIZE_TAG("handlescope");
2424
2425 // Iterate over the builtin code objects and code stubs in the heap. Note
2426 // that it is not strictly necessary to iterate over code objects on
2427 // scavenge collections. We still do it here because this same function
2428 // is used by the mark-sweep collector and the deserializer.
2429 Builtins::IterateBuiltins(v);
2430 SYNCHRONIZE_TAG("builtins");
2431
2432 // Iterate over global handles.
2433 GlobalHandles::IterateRoots(v);
2434 SYNCHRONIZE_TAG("globalhandles");
2435
2436 // Iterate over pointers being held by inactive threads.
2437 ThreadManager::Iterate(v);
2438 SYNCHRONIZE_TAG("threadmanager");
2439}
2440#undef SYNCHRONIZE_TAG
2441
2442
2443// Flag is set when the heap has been configured. The heap can be repeatedly
2444// configured through the API until it is setup.
2445static bool heap_configured = false;
2446
2447// TODO(1236194): Since the heap size is configurable on the command line
2448// and through the API, we should gracefully handle the case that the heap
2449// size is not big enough to fit all the initial objects.
2450bool Heap::ConfigureHeap(int semispace_size, int old_gen_size) {
2451 if (HasBeenSetup()) return false;
2452
2453 if (semispace_size > 0) semispace_size_ = semispace_size;
2454 if (old_gen_size > 0) old_generation_size_ = old_gen_size;
2455
2456 // The new space size must be a power of two to support single-bit testing
2457 // for containment.
mads.s.ager@gmail.com769cc962008-08-06 10:02:49 +00002458 semispace_size_ = RoundUpToPowerOf2(semispace_size_);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002459 initial_semispace_size_ = Min(initial_semispace_size_, semispace_size_);
2460 young_generation_size_ = 2 * semispace_size_;
2461
2462 // The old generation is paged.
2463 old_generation_size_ = RoundUp(old_generation_size_, Page::kPageSize);
2464
2465 heap_configured = true;
2466 return true;
2467}
2468
2469
kasper.lund7276f142008-07-30 08:49:36 +00002470bool Heap::ConfigureHeapDefault() {
2471 return ConfigureHeap(FLAG_new_space_size, FLAG_old_space_size);
2472}
2473
2474
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002475int Heap::PromotedSpaceSize() {
ager@chromium.org9258b6b2008-09-11 09:11:10 +00002476 return old_pointer_space_->Size()
2477 + old_data_space_->Size()
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002478 + code_space_->Size()
2479 + map_space_->Size()
2480 + lo_space_->Size();
2481}
2482
2483
kasper.lund7276f142008-07-30 08:49:36 +00002484int Heap::PromotedExternalMemorySize() {
2485 if (amount_of_external_allocated_memory_
2486 <= amount_of_external_allocated_memory_at_last_global_gc_) return 0;
2487 return amount_of_external_allocated_memory_
2488 - amount_of_external_allocated_memory_at_last_global_gc_;
2489}
2490
2491
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002492bool Heap::Setup(bool create_heap_objects) {
2493 // Initialize heap spaces and initial maps and objects. Whenever something
2494 // goes wrong, just return false. The caller should check the results and
2495 // call Heap::TearDown() to release allocated memory.
2496 //
2497 // If the heap is not yet configured (eg, through the API), configure it.
2498 // Configuration is based on the flags new-space-size (really the semispace
2499 // size) and old-space-size if set or the initial values of semispace_size_
2500 // and old_generation_size_ otherwise.
2501 if (!heap_configured) {
kasper.lund7276f142008-07-30 08:49:36 +00002502 if (!ConfigureHeapDefault()) return false;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002503 }
2504
2505 // Setup memory allocator and allocate an initial chunk of memory. The
2506 // initial chunk is double the size of the new space to ensure that we can
2507 // find a pair of semispaces that are contiguous and aligned to their size.
2508 if (!MemoryAllocator::Setup(MaxCapacity())) return false;
2509 void* chunk
2510 = MemoryAllocator::ReserveInitialChunk(2 * young_generation_size_);
2511 if (chunk == NULL) return false;
2512
2513 // Put the initial chunk of the old space at the start of the initial
2514 // chunk, then the two new space semispaces, then the initial chunk of
2515 // code space. Align the pair of semispaces to their size, which must be
2516 // a power of 2.
2517 ASSERT(IsPowerOf2(young_generation_size_));
2518 Address old_space_start = reinterpret_cast<Address>(chunk);
2519 Address new_space_start = RoundUp(old_space_start, young_generation_size_);
2520 Address code_space_start = new_space_start + young_generation_size_;
2521 int old_space_size = new_space_start - old_space_start;
2522 int code_space_size = young_generation_size_ - old_space_size;
2523
ager@chromium.org9258b6b2008-09-11 09:11:10 +00002524 // Initialize new space.
kasper.lund7276f142008-07-30 08:49:36 +00002525 new_space_ = new NewSpace(initial_semispace_size_,
2526 semispace_size_,
ager@chromium.org9258b6b2008-09-11 09:11:10 +00002527 NEW_SPACE);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002528 if (new_space_ == NULL) return false;
2529 if (!new_space_->Setup(new_space_start, young_generation_size_)) return false;
2530
2531 // Initialize old space, set the maximum capacity to the old generation
kasper.lund7276f142008-07-30 08:49:36 +00002532 // size. It will not contain code.
ager@chromium.org9258b6b2008-09-11 09:11:10 +00002533 old_pointer_space_ =
2534 new OldSpace(old_generation_size_, OLD_POINTER_SPACE, NOT_EXECUTABLE);
2535 if (old_pointer_space_ == NULL) return false;
2536 if (!old_pointer_space_->Setup(old_space_start, old_space_size >> 1)) {
2537 return false;
2538 }
2539 old_data_space_ =
2540 new OldSpace(old_generation_size_, OLD_DATA_SPACE, NOT_EXECUTABLE);
2541 if (old_data_space_ == NULL) return false;
2542 if (!old_data_space_->Setup(old_space_start + (old_space_size >> 1),
2543 old_space_size >> 1)) {
2544 return false;
2545 }
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002546
2547 // Initialize the code space, set its maximum capacity to the old
kasper.lund7276f142008-07-30 08:49:36 +00002548 // generation size. It needs executable memory.
ager@chromium.org9258b6b2008-09-11 09:11:10 +00002549 code_space_ =
2550 new OldSpace(old_generation_size_, CODE_SPACE, EXECUTABLE);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002551 if (code_space_ == NULL) return false;
2552 if (!code_space_->Setup(code_space_start, code_space_size)) return false;
2553
2554 // Initialize map space.
kasper.lund7276f142008-07-30 08:49:36 +00002555 map_space_ = new MapSpace(kMaxMapSpaceSize, MAP_SPACE);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002556 if (map_space_ == NULL) return false;
2557 // Setting up a paged space without giving it a virtual memory range big
2558 // enough to hold at least a page will cause it to allocate.
2559 if (!map_space_->Setup(NULL, 0)) return false;
2560
ager@chromium.org9258b6b2008-09-11 09:11:10 +00002561 // The large object code space may contain code or data. We set the memory
2562 // to be non-executable here for safety, but this means we need to enable it
2563 // explicitly when allocating large code objects.
2564 lo_space_ = new LargeObjectSpace(LO_SPACE);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002565 if (lo_space_ == NULL) return false;
2566 if (!lo_space_->Setup()) return false;
2567
2568 if (create_heap_objects) {
2569 // Create initial maps.
2570 if (!CreateInitialMaps()) return false;
2571 if (!CreateApiObjects()) return false;
2572
2573 // Create initial objects
2574 if (!CreateInitialObjects()) return false;
2575 }
2576
2577 LOG(IntEvent("heap-capacity", Capacity()));
2578 LOG(IntEvent("heap-available", Available()));
2579
2580 return true;
2581}
2582
2583
2584void Heap::TearDown() {
2585 GlobalHandles::TearDown();
2586
2587 if (new_space_ != NULL) {
2588 new_space_->TearDown();
2589 delete new_space_;
2590 new_space_ = NULL;
2591 }
2592
ager@chromium.org9258b6b2008-09-11 09:11:10 +00002593 if (old_pointer_space_ != NULL) {
2594 old_pointer_space_->TearDown();
2595 delete old_pointer_space_;
2596 old_pointer_space_ = NULL;
2597 }
2598
2599 if (old_data_space_ != NULL) {
2600 old_data_space_->TearDown();
2601 delete old_data_space_;
2602 old_data_space_ = NULL;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002603 }
2604
2605 if (code_space_ != NULL) {
2606 code_space_->TearDown();
2607 delete code_space_;
2608 code_space_ = NULL;
2609 }
2610
2611 if (map_space_ != NULL) {
2612 map_space_->TearDown();
2613 delete map_space_;
2614 map_space_ = NULL;
2615 }
2616
2617 if (lo_space_ != NULL) {
2618 lo_space_->TearDown();
2619 delete lo_space_;
2620 lo_space_ = NULL;
2621 }
2622
2623 MemoryAllocator::TearDown();
2624}
2625
2626
2627void Heap::Shrink() {
2628 // Try to shrink map, old, and code spaces.
2629 map_space_->Shrink();
ager@chromium.org9258b6b2008-09-11 09:11:10 +00002630 old_pointer_space_->Shrink();
2631 old_data_space_->Shrink();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002632 code_space_->Shrink();
2633}
2634
2635
2636#ifdef DEBUG
2637
2638class PrintHandleVisitor: public ObjectVisitor {
2639 public:
2640 void VisitPointers(Object** start, Object** end) {
2641 for (Object** p = start; p < end; p++)
2642 PrintF(" handle %p to %p\n", p, *p);
2643 }
2644};
2645
2646void Heap::PrintHandles() {
2647 PrintF("Handles:\n");
2648 PrintHandleVisitor v;
2649 HandleScopeImplementer::Iterate(&v);
2650}
2651
2652#endif
2653
2654
ager@chromium.org9258b6b2008-09-11 09:11:10 +00002655Space* AllSpaces::next() {
2656 switch (counter_++) {
2657 case NEW_SPACE:
2658 return Heap::new_space();
2659 case OLD_POINTER_SPACE:
2660 return Heap::old_pointer_space();
2661 case OLD_DATA_SPACE:
2662 return Heap::old_data_space();
2663 case CODE_SPACE:
2664 return Heap::code_space();
2665 case MAP_SPACE:
2666 return Heap::map_space();
2667 case LO_SPACE:
2668 return Heap::lo_space();
2669 default:
2670 return NULL;
2671 }
2672}
2673
2674
2675PagedSpace* PagedSpaces::next() {
2676 switch (counter_++) {
2677 case OLD_POINTER_SPACE:
2678 return Heap::old_pointer_space();
2679 case OLD_DATA_SPACE:
2680 return Heap::old_data_space();
2681 case CODE_SPACE:
2682 return Heap::code_space();
2683 case MAP_SPACE:
2684 return Heap::map_space();
2685 default:
2686 return NULL;
2687 }
2688}
2689
2690
2691
2692OldSpace* OldSpaces::next() {
2693 switch (counter_++) {
2694 case OLD_POINTER_SPACE:
2695 return Heap::old_pointer_space();
2696 case OLD_DATA_SPACE:
2697 return Heap::old_data_space();
2698 case CODE_SPACE:
2699 return Heap::code_space();
2700 default:
2701 return NULL;
2702 }
2703}
2704
2705
kasper.lund7276f142008-07-30 08:49:36 +00002706SpaceIterator::SpaceIterator() : current_space_(FIRST_SPACE), iterator_(NULL) {
2707}
2708
2709
2710SpaceIterator::~SpaceIterator() {
2711 // Delete active iterator if any.
2712 delete iterator_;
2713}
2714
2715
2716bool SpaceIterator::has_next() {
2717 // Iterate until no more spaces.
2718 return current_space_ != LAST_SPACE;
2719}
2720
2721
2722ObjectIterator* SpaceIterator::next() {
2723 if (iterator_ != NULL) {
2724 delete iterator_;
2725 iterator_ = NULL;
2726 // Move to the next space
2727 current_space_++;
2728 if (current_space_ > LAST_SPACE) {
2729 return NULL;
2730 }
2731 }
2732
2733 // Return iterator for the new current space.
2734 return CreateIterator();
2735}
2736
2737
2738// Create an iterator for the space to iterate.
2739ObjectIterator* SpaceIterator::CreateIterator() {
2740 ASSERT(iterator_ == NULL);
2741
2742 switch (current_space_) {
2743 case NEW_SPACE:
2744 iterator_ = new SemiSpaceIterator(Heap::new_space());
2745 break;
ager@chromium.org9258b6b2008-09-11 09:11:10 +00002746 case OLD_POINTER_SPACE:
2747 iterator_ = new HeapObjectIterator(Heap::old_pointer_space());
2748 break;
2749 case OLD_DATA_SPACE:
2750 iterator_ = new HeapObjectIterator(Heap::old_data_space());
kasper.lund7276f142008-07-30 08:49:36 +00002751 break;
2752 case CODE_SPACE:
2753 iterator_ = new HeapObjectIterator(Heap::code_space());
2754 break;
2755 case MAP_SPACE:
2756 iterator_ = new HeapObjectIterator(Heap::map_space());
2757 break;
2758 case LO_SPACE:
2759 iterator_ = new LargeObjectIterator(Heap::lo_space());
2760 break;
2761 }
2762
2763 // Return the newly allocated iterator;
2764 ASSERT(iterator_ != NULL);
2765 return iterator_;
2766}
2767
2768
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002769HeapIterator::HeapIterator() {
2770 Init();
2771}
2772
2773
2774HeapIterator::~HeapIterator() {
2775 Shutdown();
2776}
2777
2778
2779void HeapIterator::Init() {
2780 // Start the iteration.
2781 space_iterator_ = new SpaceIterator();
2782 object_iterator_ = space_iterator_->next();
2783}
2784
2785
2786void HeapIterator::Shutdown() {
2787 // Make sure the last iterator is deallocated.
2788 delete space_iterator_;
2789 space_iterator_ = NULL;
2790 object_iterator_ = NULL;
2791}
2792
2793
2794bool HeapIterator::has_next() {
2795 // No iterator means we are done.
2796 if (object_iterator_ == NULL) return false;
2797
2798 if (object_iterator_->has_next_object()) {
2799 // If the current iterator has more objects we are fine.
2800 return true;
2801 } else {
2802 // Go though the spaces looking for one that has objects.
2803 while (space_iterator_->has_next()) {
2804 object_iterator_ = space_iterator_->next();
2805 if (object_iterator_->has_next_object()) {
2806 return true;
2807 }
2808 }
2809 }
2810 // Done with the last space.
2811 object_iterator_ = NULL;
2812 return false;
2813}
2814
2815
2816HeapObject* HeapIterator::next() {
2817 if (has_next()) {
2818 return object_iterator_->next_object();
2819 } else {
2820 return NULL;
2821 }
2822}
2823
2824
2825void HeapIterator::reset() {
2826 // Restart the iterator.
2827 Shutdown();
2828 Init();
2829}
2830
2831
2832//
2833// HeapProfiler class implementation.
2834//
2835#ifdef ENABLE_LOGGING_AND_PROFILING
2836void HeapProfiler::CollectStats(HeapObject* obj, HistogramInfo* info) {
2837 InstanceType type = obj->map()->instance_type();
2838 ASSERT(0 <= type && type <= LAST_TYPE);
2839 info[type].increment_number(1);
2840 info[type].increment_bytes(obj->Size());
2841}
2842#endif
2843
2844
2845#ifdef ENABLE_LOGGING_AND_PROFILING
2846void HeapProfiler::WriteSample() {
2847 LOG(HeapSampleBeginEvent("Heap", "allocated"));
2848
2849 HistogramInfo info[LAST_TYPE+1];
2850#define DEF_TYPE_NAME(name) info[name].set_name(#name);
2851 INSTANCE_TYPE_LIST(DEF_TYPE_NAME)
2852#undef DEF_TYPE_NAME
2853
2854 HeapIterator iterator;
2855 while (iterator.has_next()) {
2856 CollectStats(iterator.next(), info);
2857 }
2858
2859 // Lump all the string types together.
2860 int string_number = 0;
2861 int string_bytes = 0;
2862#define INCREMENT_SIZE(type, size, name) \
2863 string_number += info[type].number(); \
2864 string_bytes += info[type].bytes();
2865 STRING_TYPE_LIST(INCREMENT_SIZE)
2866#undef INCREMENT_SIZE
2867 if (string_bytes > 0) {
2868 LOG(HeapSampleItemEvent("STRING_TYPE", string_number, string_bytes));
2869 }
2870
2871 for (int i = FIRST_NONSTRING_TYPE; i <= LAST_TYPE; ++i) {
2872 if (info[i].bytes() > 0) {
2873 LOG(HeapSampleItemEvent(info[i].name(), info[i].number(),
2874 info[i].bytes()));
2875 }
2876 }
2877
2878 LOG(HeapSampleEndEvent("Heap", "allocated"));
2879}
2880
2881
2882#endif
2883
2884
2885
2886#ifdef DEBUG
2887
2888static bool search_for_any_global;
2889static Object* search_target;
2890static bool found_target;
2891static List<Object*> object_stack(20);
2892
2893
2894// Tags 0, 1, and 3 are used. Use 2 for marking visited HeapObject.
2895static const int kMarkTag = 2;
2896
2897static void MarkObjectRecursively(Object** p);
2898class MarkObjectVisitor : public ObjectVisitor {
2899 public:
2900 void VisitPointers(Object** start, Object** end) {
2901 // Copy all HeapObject pointers in [start, end)
2902 for (Object** p = start; p < end; p++) {
2903 if ((*p)->IsHeapObject())
2904 MarkObjectRecursively(p);
2905 }
2906 }
2907};
2908
2909static MarkObjectVisitor mark_visitor;
2910
2911static void MarkObjectRecursively(Object** p) {
2912 if (!(*p)->IsHeapObject()) return;
2913
2914 HeapObject* obj = HeapObject::cast(*p);
2915
2916 Object* map = obj->map();
2917
2918 if (!map->IsHeapObject()) return; // visited before
2919
2920 if (found_target) return; // stop if target found
2921 object_stack.Add(obj);
2922 if ((search_for_any_global && obj->IsJSGlobalObject()) ||
2923 (!search_for_any_global && (obj == search_target))) {
2924 found_target = true;
2925 return;
2926 }
2927
2928 if (obj->IsCode()) {
2929 Code::cast(obj)->ConvertICTargetsFromAddressToObject();
2930 }
2931
2932 // not visited yet
2933 Map* map_p = reinterpret_cast<Map*>(HeapObject::cast(map));
2934
2935 Address map_addr = map_p->address();
2936
2937 obj->set_map(reinterpret_cast<Map*>(map_addr + kMarkTag));
2938
2939 MarkObjectRecursively(&map);
2940
2941 obj->IterateBody(map_p->instance_type(), obj->SizeFromMap(map_p),
2942 &mark_visitor);
2943
2944 if (!found_target) // don't pop if found the target
2945 object_stack.RemoveLast();
2946}
2947
2948
2949static void UnmarkObjectRecursively(Object** p);
2950class UnmarkObjectVisitor : public ObjectVisitor {
2951 public:
2952 void VisitPointers(Object** start, Object** end) {
2953 // Copy all HeapObject pointers in [start, end)
2954 for (Object** p = start; p < end; p++) {
2955 if ((*p)->IsHeapObject())
2956 UnmarkObjectRecursively(p);
2957 }
2958 }
2959};
2960
2961static UnmarkObjectVisitor unmark_visitor;
2962
2963static void UnmarkObjectRecursively(Object** p) {
2964 if (!(*p)->IsHeapObject()) return;
2965
2966 HeapObject* obj = HeapObject::cast(*p);
2967
2968 Object* map = obj->map();
2969
2970 if (map->IsHeapObject()) return; // unmarked already
2971
2972 Address map_addr = reinterpret_cast<Address>(map);
2973
2974 map_addr -= kMarkTag;
2975
2976 ASSERT_TAG_ALIGNED(map_addr);
2977
2978 HeapObject* map_p = HeapObject::FromAddress(map_addr);
2979
2980 obj->set_map(reinterpret_cast<Map*>(map_p));
2981
2982 UnmarkObjectRecursively(reinterpret_cast<Object**>(&map_p));
2983
2984 obj->IterateBody(Map::cast(map_p)->instance_type(),
2985 obj->SizeFromMap(Map::cast(map_p)),
2986 &unmark_visitor);
2987
2988 if (obj->IsCode()) {
2989 Code::cast(obj)->ConvertICTargetsFromObjectToAddress();
2990 }
2991}
2992
2993
2994static void MarkRootObjectRecursively(Object** root) {
2995 if (search_for_any_global) {
2996 ASSERT(search_target == NULL);
2997 } else {
2998 ASSERT(search_target->IsHeapObject());
2999 }
3000 found_target = false;
3001 object_stack.Clear();
3002
3003 MarkObjectRecursively(root);
3004 UnmarkObjectRecursively(root);
3005
3006 if (found_target) {
3007 PrintF("=====================================\n");
3008 PrintF("==== Path to object ====\n");
3009 PrintF("=====================================\n\n");
3010
3011 ASSERT(!object_stack.is_empty());
3012 for (int i = 0; i < object_stack.length(); i++) {
3013 if (i > 0) PrintF("\n |\n |\n V\n\n");
3014 Object* obj = object_stack[i];
3015 obj->Print();
3016 }
3017 PrintF("=====================================\n");
3018 }
3019}
3020
3021
3022// Helper class for visiting HeapObjects recursively.
3023class MarkRootVisitor: public ObjectVisitor {
3024 public:
3025 void VisitPointers(Object** start, Object** end) {
3026 // Visit all HeapObject pointers in [start, end)
3027 for (Object** p = start; p < end; p++) {
3028 if ((*p)->IsHeapObject())
3029 MarkRootObjectRecursively(p);
3030 }
3031 }
3032};
3033
3034
3035// Triggers a depth-first traversal of reachable objects from roots
3036// and finds a path to a specific heap object and prints it.
3037void Heap::TracePathToObject() {
3038 search_target = NULL;
3039 search_for_any_global = false;
3040
3041 MarkRootVisitor root_visitor;
3042 IterateRoots(&root_visitor);
3043}
3044
3045
3046// Triggers a depth-first traversal of reachable objects from roots
3047// and finds a path to any global object and prints it. Useful for
3048// determining the source for leaks of global objects.
3049void Heap::TracePathToGlobal() {
3050 search_target = NULL;
3051 search_for_any_global = true;
3052
3053 MarkRootVisitor root_visitor;
3054 IterateRoots(&root_visitor);
3055}
3056#endif
3057
3058
kasper.lund7276f142008-07-30 08:49:36 +00003059GCTracer::GCTracer()
3060 : start_time_(0.0),
3061 start_size_(0.0),
3062 gc_count_(0),
3063 full_gc_count_(0),
3064 is_compacting_(false),
3065 marked_count_(0) {
3066 // These two fields reflect the state of the previous full collection.
3067 // Set them before they are changed by the collector.
3068 previous_has_compacted_ = MarkCompactCollector::HasCompacted();
3069 previous_marked_count_ = MarkCompactCollector::previous_marked_count();
3070 if (!FLAG_trace_gc) return;
3071 start_time_ = OS::TimeCurrentMillis();
3072 start_size_ = SizeOfHeapObjects();
3073}
3074
3075
3076GCTracer::~GCTracer() {
3077 if (!FLAG_trace_gc) return;
3078 // Printf ONE line iff flag is set.
3079 PrintF("%s %.1f -> %.1f MB, %d ms.\n",
3080 CollectorString(),
3081 start_size_, SizeOfHeapObjects(),
3082 static_cast<int>(OS::TimeCurrentMillis() - start_time_));
3083}
3084
3085
3086const char* GCTracer::CollectorString() {
3087 switch (collector_) {
3088 case SCAVENGER:
3089 return "Scavenge";
3090 case MARK_COMPACTOR:
3091 return MarkCompactCollector::HasCompacted() ? "Mark-compact"
3092 : "Mark-sweep";
3093 }
3094 return "Unknown GC";
3095}
3096
3097
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00003098} } // namespace v8::internal