blob: 772cf329c7c70d108c587d77402ff94b2a3c6ca8 [file] [log] [blame]
ager@chromium.org71daaf62009-04-01 07:22:49 +00001// Copyright 2009 the V8 project authors. All rights reserved.
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002// Redistribution and use in source and binary forms, with or without
3// modification, are permitted provided that the following conditions are
4// met:
5//
6// * Redistributions of source code must retain the above copyright
7// notice, this list of conditions and the following disclaimer.
8// * Redistributions in binary form must reproduce the above
9// copyright notice, this list of conditions and the following
10// disclaimer in the documentation and/or other materials provided
11// with the distribution.
12// * Neither the name of Google Inc. nor the names of its
13// contributors may be used to endorse or promote products derived
14// from this software without specific prior written permission.
15//
16// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
28#include "v8.h"
29
30#include "accessors.h"
31#include "api.h"
32#include "bootstrapper.h"
33#include "codegen-inl.h"
kasperl@chromium.orgb9123622008-09-17 14:05:56 +000034#include "compilation-cache.h"
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +000035#include "debug.h"
36#include "global-handles.h"
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +000037#include "mark-compact.h"
38#include "natives.h"
39#include "scanner.h"
40#include "scopeinfo.h"
41#include "v8threads.h"
42
kasperl@chromium.org71affb52009-05-26 05:44:31 +000043namespace v8 {
44namespace internal {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +000045
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +000046#define ROOT_ALLOCATION(type, name) type* Heap::name##_;
47 ROOT_LIST(ROOT_ALLOCATION)
48#undef ROOT_ALLOCATION
49
50
51#define STRUCT_ALLOCATION(NAME, Name, name) Map* Heap::name##_map_;
52 STRUCT_LIST(STRUCT_ALLOCATION)
53#undef STRUCT_ALLOCATION
54
55
56#define SYMBOL_ALLOCATION(name, string) String* Heap::name##_;
57 SYMBOL_LIST(SYMBOL_ALLOCATION)
58#undef SYMBOL_ALLOCATION
59
ager@chromium.org3b45ab52009-03-19 22:21:34 +000060String* Heap::hidden_symbol_;
61
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +000062NewSpace Heap::new_space_;
ager@chromium.org9258b6b2008-09-11 09:11:10 +000063OldSpace* Heap::old_pointer_space_ = NULL;
64OldSpace* Heap::old_data_space_ = NULL;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +000065OldSpace* Heap::code_space_ = NULL;
66MapSpace* Heap::map_space_ = NULL;
67LargeObjectSpace* Heap::lo_space_ = NULL;
68
kasperl@chromium.org9bbf9682008-10-30 11:53:07 +000069static const int kMinimumPromotionLimit = 2*MB;
70static const int kMinimumAllocationLimit = 8*MB;
71
72int Heap::old_gen_promotion_limit_ = kMinimumPromotionLimit;
73int Heap::old_gen_allocation_limit_ = kMinimumAllocationLimit;
74
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +000075int Heap::old_gen_exhausted_ = false;
76
kasper.lund7276f142008-07-30 08:49:36 +000077int Heap::amount_of_external_allocated_memory_ = 0;
78int Heap::amount_of_external_allocated_memory_at_last_global_gc_ = 0;
79
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +000080// semispace_size_ should be a power of 2 and old_generation_size_ should be
81// a multiple of Page::kPageSize.
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +000082int Heap::semispace_size_ = 2*MB;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +000083int Heap::old_generation_size_ = 512*MB;
84int Heap::initial_semispace_size_ = 256*KB;
85
86GCCallback Heap::global_gc_prologue_callback_ = NULL;
87GCCallback Heap::global_gc_epilogue_callback_ = NULL;
88
89// Variables set based on semispace_size_ and old_generation_size_ in
90// ConfigureHeap.
91int Heap::young_generation_size_ = 0; // Will be 2 * semispace_size_.
92
93// Double the new space after this many scavenge collections.
94int Heap::new_space_growth_limit_ = 8;
95int Heap::scavenge_count_ = 0;
96Heap::HeapState Heap::gc_state_ = NOT_IN_GC;
97
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +000098int Heap::mc_count_ = 0;
99int Heap::gc_count_ = 0;
100
kasperl@chromium.org9bbf9682008-10-30 11:53:07 +0000101int Heap::always_allocate_scope_depth_ = 0;
kasperl@chromium.org061ef742009-02-27 12:16:20 +0000102bool Heap::context_disposed_pending_ = false;
kasperl@chromium.org9bbf9682008-10-30 11:53:07 +0000103
kasper.lund7276f142008-07-30 08:49:36 +0000104#ifdef DEBUG
105bool Heap::allocation_allowed_ = true;
106
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000107int Heap::allocation_timeout_ = 0;
108bool Heap::disallow_allocation_failure_ = false;
109#endif // DEBUG
110
111
112int Heap::Capacity() {
113 if (!HasBeenSetup()) return 0;
114
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +0000115 return new_space_.Capacity() +
ager@chromium.org9258b6b2008-09-11 09:11:10 +0000116 old_pointer_space_->Capacity() +
117 old_data_space_->Capacity() +
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000118 code_space_->Capacity() +
119 map_space_->Capacity();
120}
121
122
123int Heap::Available() {
124 if (!HasBeenSetup()) return 0;
125
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +0000126 return new_space_.Available() +
ager@chromium.org9258b6b2008-09-11 09:11:10 +0000127 old_pointer_space_->Available() +
128 old_data_space_->Available() +
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000129 code_space_->Available() +
130 map_space_->Available();
131}
132
133
134bool Heap::HasBeenSetup() {
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +0000135 return old_pointer_space_ != NULL &&
ager@chromium.org9258b6b2008-09-11 09:11:10 +0000136 old_data_space_ != NULL &&
137 code_space_ != NULL &&
138 map_space_ != NULL &&
139 lo_space_ != NULL;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000140}
141
142
143GarbageCollector Heap::SelectGarbageCollector(AllocationSpace space) {
144 // Is global GC requested?
145 if (space != NEW_SPACE || FLAG_gc_global) {
146 Counters::gc_compactor_caused_by_request.Increment();
147 return MARK_COMPACTOR;
148 }
149
150 // Is enough data promoted to justify a global GC?
kasperl@chromium.org9bbf9682008-10-30 11:53:07 +0000151 if (OldGenerationPromotionLimitReached()) {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000152 Counters::gc_compactor_caused_by_promoted_data.Increment();
153 return MARK_COMPACTOR;
154 }
155
156 // Have allocation in OLD and LO failed?
157 if (old_gen_exhausted_) {
158 Counters::gc_compactor_caused_by_oldspace_exhaustion.Increment();
159 return MARK_COMPACTOR;
160 }
161
162 // Is there enough space left in OLD to guarantee that a scavenge can
163 // succeed?
164 //
ager@chromium.org9258b6b2008-09-11 09:11:10 +0000165 // Note that MemoryAllocator->MaxAvailable() undercounts the memory available
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000166 // for object promotion. It counts only the bytes that the memory
167 // allocator has not yet allocated from the OS and assigned to any space,
168 // and does not count available bytes already in the old space or code
169 // space. Undercounting is safe---we may get an unrequested full GC when
170 // a scavenge would have succeeded.
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +0000171 if (MemoryAllocator::MaxAvailable() <= new_space_.Size()) {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000172 Counters::gc_compactor_caused_by_oldspace_exhaustion.Increment();
173 return MARK_COMPACTOR;
174 }
175
176 // Default
177 return SCAVENGER;
178}
179
180
181// TODO(1238405): Combine the infrastructure for --heap-stats and
182// --log-gc to avoid the complicated preprocessor and flag testing.
183#if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
184void Heap::ReportStatisticsBeforeGC() {
185 // Heap::ReportHeapStatistics will also log NewSpace statistics when
186 // compiled with ENABLE_LOGGING_AND_PROFILING and --log-gc is set. The
187 // following logic is used to avoid double logging.
188#if defined(DEBUG) && defined(ENABLE_LOGGING_AND_PROFILING)
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +0000189 if (FLAG_heap_stats || FLAG_log_gc) new_space_.CollectStatistics();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000190 if (FLAG_heap_stats) {
191 ReportHeapStatistics("Before GC");
192 } else if (FLAG_log_gc) {
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +0000193 new_space_.ReportStatistics();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000194 }
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +0000195 if (FLAG_heap_stats || FLAG_log_gc) new_space_.ClearHistograms();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000196#elif defined(DEBUG)
197 if (FLAG_heap_stats) {
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +0000198 new_space_.CollectStatistics();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000199 ReportHeapStatistics("Before GC");
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +0000200 new_space_.ClearHistograms();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000201 }
202#elif defined(ENABLE_LOGGING_AND_PROFILING)
203 if (FLAG_log_gc) {
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +0000204 new_space_.CollectStatistics();
205 new_space_.ReportStatistics();
206 new_space_.ClearHistograms();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000207 }
208#endif
209}
210
211
212// TODO(1238405): Combine the infrastructure for --heap-stats and
213// --log-gc to avoid the complicated preprocessor and flag testing.
214void Heap::ReportStatisticsAfterGC() {
215 // Similar to the before GC, we use some complicated logic to ensure that
216 // NewSpace statistics are logged exactly once when --log-gc is turned on.
217#if defined(DEBUG) && defined(ENABLE_LOGGING_AND_PROFILING)
218 if (FLAG_heap_stats) {
219 ReportHeapStatistics("After GC");
220 } else if (FLAG_log_gc) {
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +0000221 new_space_.ReportStatistics();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000222 }
223#elif defined(DEBUG)
224 if (FLAG_heap_stats) ReportHeapStatistics("After GC");
225#elif defined(ENABLE_LOGGING_AND_PROFILING)
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +0000226 if (FLAG_log_gc) new_space_.ReportStatistics();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000227#endif
228}
229#endif // defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
230
231
232void Heap::GarbageCollectionPrologue() {
kasper.lund7276f142008-07-30 08:49:36 +0000233 gc_count_++;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000234#ifdef DEBUG
235 ASSERT(allocation_allowed_ && gc_state_ == NOT_IN_GC);
236 allow_allocation(false);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000237
238 if (FLAG_verify_heap) {
239 Verify();
240 }
241
242 if (FLAG_gc_verbose) Print();
243
244 if (FLAG_print_rset) {
ager@chromium.org9258b6b2008-09-11 09:11:10 +0000245 // Not all spaces have remembered set bits that we care about.
246 old_pointer_space_->PrintRSet();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000247 map_space_->PrintRSet();
248 lo_space_->PrintRSet();
249 }
250#endif
251
252#if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
253 ReportStatisticsBeforeGC();
254#endif
255}
256
257int Heap::SizeOfObjects() {
ager@chromium.org9258b6b2008-09-11 09:11:10 +0000258 int total = 0;
259 AllSpaces spaces;
260 while (Space* space = spaces.next()) total += space->Size();
261 return total;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000262}
263
264void Heap::GarbageCollectionEpilogue() {
265#ifdef DEBUG
266 allow_allocation(true);
267 ZapFromSpace();
268
269 if (FLAG_verify_heap) {
270 Verify();
271 }
272
273 if (FLAG_print_global_handles) GlobalHandles::Print();
274 if (FLAG_print_handles) PrintHandles();
275 if (FLAG_gc_verbose) Print();
276 if (FLAG_code_stats) ReportCodeStatistics("After GC");
277#endif
278
279 Counters::alive_after_last_gc.Set(SizeOfObjects());
280
281 SymbolTable* symbol_table = SymbolTable::cast(Heap::symbol_table_);
282 Counters::symbol_table_capacity.Set(symbol_table->Capacity());
283 Counters::number_of_symbols.Set(symbol_table->NumberOfElements());
284#if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
285 ReportStatisticsAfterGC();
286#endif
kasperl@chromium.org71affb52009-05-26 05:44:31 +0000287#ifdef ENABLE_DEBUGGER_SUPPORT
288 Debug::AfterGarbageCollection();
289#endif
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000290}
291
292
ager@chromium.org9258b6b2008-09-11 09:11:10 +0000293void Heap::CollectAllGarbage() {
294 // Since we are ignoring the return value, the exact choice of space does
295 // not matter, so long as we do not specify NEW_SPACE, which would not
296 // cause a full GC.
297 CollectGarbage(0, OLD_POINTER_SPACE);
298}
299
300
kasperl@chromium.org061ef742009-02-27 12:16:20 +0000301void Heap::CollectAllGarbageIfContextDisposed() {
kasperl@chromium.orgd55d36b2009-03-05 08:03:28 +0000302 // If the garbage collector interface is exposed through the global
303 // gc() function, we avoid being clever about forcing GCs when
304 // contexts are disposed and leave it to the embedder to make
305 // informed decisions about when to force a collection.
306 if (!FLAG_expose_gc && context_disposed_pending_) {
ager@chromium.orgbb29dc92009-03-24 13:25:23 +0000307 HistogramTimerScope scope(&Counters::gc_context);
kasperl@chromium.org061ef742009-02-27 12:16:20 +0000308 CollectAllGarbage();
kasperl@chromium.org061ef742009-02-27 12:16:20 +0000309 }
kasperl@chromium.orgd55d36b2009-03-05 08:03:28 +0000310 context_disposed_pending_ = false;
kasperl@chromium.org061ef742009-02-27 12:16:20 +0000311}
312
313
314void Heap::NotifyContextDisposed() {
315 context_disposed_pending_ = true;
316}
317
318
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000319bool Heap::CollectGarbage(int requested_size, AllocationSpace space) {
320 // The VM is in the GC state until exiting this function.
321 VMState state(GC);
322
323#ifdef DEBUG
324 // Reset the allocation timeout to the GC interval, but make sure to
325 // allow at least a few allocations after a collection. The reason
326 // for this is that we have a lot of allocation sequences and we
327 // assume that a garbage collection will allow the subsequent
328 // allocation attempts to go through.
329 allocation_timeout_ = Max(6, FLAG_gc_interval);
330#endif
331
332 { GCTracer tracer;
333 GarbageCollectionPrologue();
kasper.lund7276f142008-07-30 08:49:36 +0000334 // The GC count was incremented in the prologue. Tell the tracer about
335 // it.
336 tracer.set_gc_count(gc_count_);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000337
338 GarbageCollector collector = SelectGarbageCollector(space);
kasper.lund7276f142008-07-30 08:49:36 +0000339 // Tell the tracer which collector we've selected.
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000340 tracer.set_collector(collector);
341
ager@chromium.orgbb29dc92009-03-24 13:25:23 +0000342 HistogramTimer* rate = (collector == SCAVENGER)
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000343 ? &Counters::gc_scavenger
344 : &Counters::gc_compactor;
345 rate->Start();
kasper.lund7276f142008-07-30 08:49:36 +0000346 PerformGarbageCollection(space, collector, &tracer);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000347 rate->Stop();
348
349 GarbageCollectionEpilogue();
350 }
351
352
353#ifdef ENABLE_LOGGING_AND_PROFILING
354 if (FLAG_log_gc) HeapProfiler::WriteSample();
355#endif
356
357 switch (space) {
358 case NEW_SPACE:
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +0000359 return new_space_.Available() >= requested_size;
ager@chromium.org9258b6b2008-09-11 09:11:10 +0000360 case OLD_POINTER_SPACE:
361 return old_pointer_space_->Available() >= requested_size;
362 case OLD_DATA_SPACE:
363 return old_data_space_->Available() >= requested_size;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000364 case CODE_SPACE:
365 return code_space_->Available() >= requested_size;
366 case MAP_SPACE:
367 return map_space_->Available() >= requested_size;
368 case LO_SPACE:
369 return lo_space_->Available() >= requested_size;
370 }
371 return false;
372}
373
374
kasper.lund7276f142008-07-30 08:49:36 +0000375void Heap::PerformScavenge() {
376 GCTracer tracer;
377 PerformGarbageCollection(NEW_SPACE, SCAVENGER, &tracer);
378}
379
380
kasperl@chromium.orgd1e3e722009-04-14 13:38:25 +0000381#ifdef DEBUG
kasperl@chromium.org416c5b02009-04-14 14:03:52 +0000382// Helper class for verifying the symbol table.
383class SymbolTableVerifier : public ObjectVisitor {
384 public:
385 SymbolTableVerifier() { }
386 void VisitPointers(Object** start, Object** end) {
387 // Visit all HeapObject pointers in [start, end).
388 for (Object** p = start; p < end; p++) {
389 if ((*p)->IsHeapObject()) {
390 // Check that the symbol is actually a symbol.
391 ASSERT((*p)->IsNull() || (*p)->IsUndefined() || (*p)->IsSymbol());
kasperl@chromium.orgd1e3e722009-04-14 13:38:25 +0000392 }
393 }
kasperl@chromium.org416c5b02009-04-14 14:03:52 +0000394 }
395};
396#endif // DEBUG
kasperl@chromium.orgd1e3e722009-04-14 13:38:25 +0000397
kasperl@chromium.org416c5b02009-04-14 14:03:52 +0000398
399static void VerifySymbolTable() {
400#ifdef DEBUG
kasperl@chromium.orgd1e3e722009-04-14 13:38:25 +0000401 SymbolTableVerifier verifier;
402 SymbolTable* symbol_table = SymbolTable::cast(Heap::symbol_table());
403 symbol_table->IterateElements(&verifier);
404#endif // DEBUG
405}
406
407
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000408void Heap::PerformGarbageCollection(AllocationSpace space,
kasper.lund7276f142008-07-30 08:49:36 +0000409 GarbageCollector collector,
410 GCTracer* tracer) {
kasperl@chromium.orgd1e3e722009-04-14 13:38:25 +0000411 VerifySymbolTable();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000412 if (collector == MARK_COMPACTOR && global_gc_prologue_callback_) {
413 ASSERT(!allocation_allowed_);
414 global_gc_prologue_callback_();
415 }
416
417 if (collector == MARK_COMPACTOR) {
kasper.lund7276f142008-07-30 08:49:36 +0000418 MarkCompact(tracer);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000419
kasperl@chromium.org9bbf9682008-10-30 11:53:07 +0000420 int old_gen_size = PromotedSpaceSize();
421 old_gen_promotion_limit_ =
422 old_gen_size + Max(kMinimumPromotionLimit, old_gen_size / 3);
423 old_gen_allocation_limit_ =
424 old_gen_size + Max(kMinimumAllocationLimit, old_gen_size / 3);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000425 old_gen_exhausted_ = false;
426
427 // If we have used the mark-compact collector to collect the new
428 // space, and it has not compacted the new space, we force a
ager@chromium.org9258b6b2008-09-11 09:11:10 +0000429 // separate scavenge collection. This is a hack. It covers the
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000430 // case where (1) a new space collection was requested, (2) the
431 // collector selection policy selected the mark-compact collector,
432 // and (3) the mark-compact collector policy selected not to
433 // compact the new space. In that case, there is no more (usable)
434 // free space in the new space after the collection compared to
435 // before.
436 if (space == NEW_SPACE && !MarkCompactCollector::HasCompacted()) {
437 Scavenge();
438 }
439 } else {
440 Scavenge();
441 }
442 Counters::objs_since_last_young.Set(0);
443
ager@chromium.orga74f0da2008-12-03 16:05:52 +0000444 PostGarbageCollectionProcessing();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000445
kasper.lund7276f142008-07-30 08:49:36 +0000446 if (collector == MARK_COMPACTOR) {
447 // Register the amount of external allocated memory.
448 amount_of_external_allocated_memory_at_last_global_gc_ =
449 amount_of_external_allocated_memory_;
450 }
451
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000452 if (collector == MARK_COMPACTOR && global_gc_epilogue_callback_) {
453 ASSERT(!allocation_allowed_);
454 global_gc_epilogue_callback_();
455 }
kasperl@chromium.orgd1e3e722009-04-14 13:38:25 +0000456 VerifySymbolTable();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000457}
458
459
ager@chromium.orga74f0da2008-12-03 16:05:52 +0000460void Heap::PostGarbageCollectionProcessing() {
461 // Process weak handles post gc.
462 GlobalHandles::PostGarbageCollectionProcessing();
463 // Update flat string readers.
464 FlatStringReader::PostGarbageCollectionProcessing();
465}
466
467
kasper.lund7276f142008-07-30 08:49:36 +0000468void Heap::MarkCompact(GCTracer* tracer) {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000469 gc_state_ = MARK_COMPACT;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000470 mc_count_++;
kasper.lund7276f142008-07-30 08:49:36 +0000471 tracer->set_full_gc_count(mc_count_);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000472 LOG(ResourceEvent("markcompact", "begin"));
473
kasperl@chromium.org061ef742009-02-27 12:16:20 +0000474 MarkCompactCollector::Prepare(tracer);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000475
kasperl@chromium.org061ef742009-02-27 12:16:20 +0000476 bool is_compacting = MarkCompactCollector::IsCompacting();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000477
kasperl@chromium.org061ef742009-02-27 12:16:20 +0000478 MarkCompactPrologue(is_compacting);
479
480 MarkCompactCollector::CollectGarbage();
481
482 MarkCompactEpilogue(is_compacting);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000483
484 LOG(ResourceEvent("markcompact", "end"));
485
486 gc_state_ = NOT_IN_GC;
487
488 Shrink();
489
490 Counters::objs_since_last_full.Set(0);
kasperl@chromium.org061ef742009-02-27 12:16:20 +0000491 context_disposed_pending_ = false;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000492}
493
494
kasperl@chromium.org061ef742009-02-27 12:16:20 +0000495void Heap::MarkCompactPrologue(bool is_compacting) {
496 // At any old GC clear the keyed lookup cache to enable collection of unused
497 // maps.
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +0000498 ClearKeyedLookupCache();
kasperl@chromium.org061ef742009-02-27 12:16:20 +0000499
kasperl@chromium.orgb9123622008-09-17 14:05:56 +0000500 CompilationCache::MarkCompactPrologue();
kasperl@chromium.org061ef742009-02-27 12:16:20 +0000501
502 Top::MarkCompactPrologue(is_compacting);
503 ThreadManager::MarkCompactPrologue(is_compacting);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000504}
505
506
kasperl@chromium.org061ef742009-02-27 12:16:20 +0000507void Heap::MarkCompactEpilogue(bool is_compacting) {
508 Top::MarkCompactEpilogue(is_compacting);
509 ThreadManager::MarkCompactEpilogue(is_compacting);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000510}
511
512
513Object* Heap::FindCodeObject(Address a) {
514 Object* obj = code_space_->FindObject(a);
515 if (obj->IsFailure()) {
516 obj = lo_space_->FindObject(a);
517 }
kasper.lund7276f142008-07-30 08:49:36 +0000518 ASSERT(!obj->IsFailure());
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000519 return obj;
520}
521
522
523// Helper class for copying HeapObjects
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +0000524class ScavengeVisitor: public ObjectVisitor {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000525 public:
526
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +0000527 void VisitPointer(Object** p) { ScavengePointer(p); }
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000528
529 void VisitPointers(Object** start, Object** end) {
530 // Copy all HeapObject pointers in [start, end)
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +0000531 for (Object** p = start; p < end; p++) ScavengePointer(p);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000532 }
533
534 private:
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +0000535 void ScavengePointer(Object** p) {
536 Object* object = *p;
537 if (!Heap::InNewSpace(object)) return;
538 Heap::ScavengeObject(reinterpret_cast<HeapObject**>(p),
539 reinterpret_cast<HeapObject*>(object));
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000540 }
541};
542
543
kasperl@chromium.orgb3284ad2009-05-18 06:12:45 +0000544// A queue of pointers and maps of to-be-promoted objects during a
545// scavenge collection.
546class PromotionQueue {
547 public:
548 void Initialize(Address start_address) {
549 front_ = rear_ = reinterpret_cast<HeapObject**>(start_address);
550 }
551
552 bool is_empty() { return front_ <= rear_; }
553
554 void insert(HeapObject* object, Map* map) {
555 *(--rear_) = object;
556 *(--rear_) = map;
557 // Assert no overflow into live objects.
558 ASSERT(reinterpret_cast<Address>(rear_) >= Heap::new_space()->top());
559 }
560
561 void remove(HeapObject** object, Map** map) {
562 *object = *(--front_);
563 *map = Map::cast(*(--front_));
564 // Assert no underflow.
565 ASSERT(front_ >= rear_);
566 }
567
568 private:
569 // The front of the queue is higher in memory than the rear.
570 HeapObject** front_;
571 HeapObject** rear_;
572};
573
574
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +0000575// Shared state read by the scavenge collector and set by ScavengeObject.
kasperl@chromium.orgb3284ad2009-05-18 06:12:45 +0000576static PromotionQueue promotion_queue;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000577
578
579#ifdef DEBUG
ager@chromium.org9258b6b2008-09-11 09:11:10 +0000580// Visitor class to verify pointers in code or data space do not point into
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000581// new space.
ager@chromium.org9258b6b2008-09-11 09:11:10 +0000582class VerifyNonPointerSpacePointersVisitor: public ObjectVisitor {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000583 public:
584 void VisitPointers(Object** start, Object**end) {
585 for (Object** current = start; current < end; current++) {
586 if ((*current)->IsHeapObject()) {
587 ASSERT(!Heap::InNewSpace(HeapObject::cast(*current)));
588 }
589 }
590 }
591};
kasperl@chromium.orgb3284ad2009-05-18 06:12:45 +0000592
593
594static void VerifyNonPointerSpacePointers() {
595 // Verify that there are no pointers to new space in spaces where we
596 // do not expect them.
597 VerifyNonPointerSpacePointersVisitor v;
598 HeapObjectIterator code_it(Heap::code_space());
599 while (code_it.has_next()) {
600 HeapObject* object = code_it.next();
601 if (object->IsCode()) {
602 Code::cast(object)->ConvertICTargetsFromAddressToObject();
603 object->Iterate(&v);
604 Code::cast(object)->ConvertICTargetsFromObjectToAddress();
605 } else {
606 // If we find non-code objects in code space (e.g., free list
607 // nodes) we want to verify them as well.
608 object->Iterate(&v);
609 }
610 }
611
612 HeapObjectIterator data_it(Heap::old_data_space());
613 while (data_it.has_next()) data_it.next()->Iterate(&v);
614}
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000615#endif
616
617void Heap::Scavenge() {
618#ifdef DEBUG
kasperl@chromium.orgb3284ad2009-05-18 06:12:45 +0000619 if (FLAG_enable_slow_asserts) VerifyNonPointerSpacePointers();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000620#endif
621
622 gc_state_ = SCAVENGE;
623
624 // Implements Cheney's copying algorithm
625 LOG(ResourceEvent("scavenge", "begin"));
626
627 scavenge_count_++;
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +0000628 if (new_space_.Capacity() < new_space_.MaximumCapacity() &&
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000629 scavenge_count_ > new_space_growth_limit_) {
630 // Double the size of the new space, and double the limit. The next
631 // doubling attempt will occur after the current new_space_growth_limit_
632 // more collections.
633 // TODO(1240712): NewSpace::Double has a return value which is
634 // ignored here.
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +0000635 new_space_.Double();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000636 new_space_growth_limit_ *= 2;
637 }
638
639 // Flip the semispaces. After flipping, to space is empty, from space has
640 // live objects.
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +0000641 new_space_.Flip();
642 new_space_.ResetAllocationInfo();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000643
kasperl@chromium.orgb3284ad2009-05-18 06:12:45 +0000644 // We need to sweep newly copied objects which can be either in the
645 // to space or promoted to the old generation. For to-space
646 // objects, we treat the bottom of the to space as a queue. Newly
647 // copied and unswept objects lie between a 'front' mark and the
648 // allocation pointer.
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000649 //
kasperl@chromium.orgb3284ad2009-05-18 06:12:45 +0000650 // Promoted objects can go into various old-generation spaces, and
651 // can be allocated internally in the spaces (from the free list).
652 // We treat the top of the to space as a queue of addresses of
653 // promoted objects. The addresses of newly promoted and unswept
654 // objects lie between a 'front' mark and a 'rear' mark that is
655 // updated as a side effect of promoting an object.
656 //
657 // There is guaranteed to be enough room at the top of the to space
658 // for the addresses of promoted objects: every object promoted
659 // frees up its size in bytes from the top of the new space, and
660 // objects are at least one pointer in size.
661 Address new_space_front = new_space_.ToSpaceLow();
662 promotion_queue.Initialize(new_space_.ToSpaceHigh());
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000663
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +0000664 ScavengeVisitor scavenge_visitor;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000665 // Copy roots.
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +0000666 IterateRoots(&scavenge_visitor);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000667
kasperl@chromium.orgb3284ad2009-05-18 06:12:45 +0000668 // Copy objects reachable from weak pointers.
669 GlobalHandles::IterateWeakRoots(&scavenge_visitor);
670
kasperl@chromium.org71affb52009-05-26 05:44:31 +0000671#if V8_HOST_ARCH_64_BIT
672 // TODO(X64): Make this go away again. We currently disable RSets for
673 // 64-bit-mode.
674 HeapObjectIterator old_pointer_iterator(old_pointer_space_);
675 while (old_pointer_iterator.has_next()) {
676 HeapObject* heap_object = old_pointer_iterator.next();
677 heap_object->Iterate(&scavenge_visitor);
678 }
679 HeapObjectIterator map_iterator(map_space_);
680 while (map_iterator.has_next()) {
681 HeapObject* heap_object = map_iterator.next();
682 heap_object->Iterate(&scavenge_visitor);
683 }
684 LargeObjectIterator lo_iterator(lo_space_);
685 while (lo_iterator.has_next()) {
686 HeapObject* heap_object = lo_iterator.next();
687 if (heap_object->IsFixedArray()) {
688 heap_object->Iterate(&scavenge_visitor);
689 }
690 }
691#else // V8_HOST_ARCH_64_BIT
kasperl@chromium.orgb3284ad2009-05-18 06:12:45 +0000692 // Copy objects reachable from the old generation. By definition,
693 // there are no intergenerational pointers in code or data spaces.
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +0000694 IterateRSet(old_pointer_space_, &ScavengePointer);
695 IterateRSet(map_space_, &ScavengePointer);
696 lo_space_->IterateRSet(&ScavengePointer);
kasperl@chromium.org71affb52009-05-26 05:44:31 +0000697#endif // V8_HOST_ARCH_64_BIT
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000698
kasperl@chromium.orgb3284ad2009-05-18 06:12:45 +0000699 do {
700 ASSERT(new_space_front <= new_space_.top());
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000701
kasperl@chromium.orgb3284ad2009-05-18 06:12:45 +0000702 // The addresses new_space_front and new_space_.top() define a
703 // queue of unprocessed copied objects. Process them until the
704 // queue is empty.
705 while (new_space_front < new_space_.top()) {
706 HeapObject* object = HeapObject::FromAddress(new_space_front);
707 object->Iterate(&scavenge_visitor);
708 new_space_front += object->Size();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000709 }
710
kasperl@chromium.orgb3284ad2009-05-18 06:12:45 +0000711 // Promote and process all the to-be-promoted objects.
712 while (!promotion_queue.is_empty()) {
713 HeapObject* source;
714 Map* map;
715 promotion_queue.remove(&source, &map);
716 // Copy the from-space object to its new location (given by the
717 // forwarding address) and fix its map.
718 HeapObject* target = source->map_word().ToForwardingAddress();
719 CopyBlock(reinterpret_cast<Object**>(target->address()),
720 reinterpret_cast<Object**>(source->address()),
721 source->SizeFromMap(map));
722 target->set_map(map);
723
724#if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
725 // Update NewSpace stats if necessary.
726 RecordCopiedObject(target);
727#endif
728 // Visit the newly copied object for pointers to new space.
729 target->Iterate(&scavenge_visitor);
730 UpdateRSet(target);
731 }
732
733 // Take another spin if there are now unswept objects in new space
734 // (there are currently no more unswept promoted objects).
735 } while (new_space_front < new_space_.top());
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000736
737 // Set age mark.
kasperl@chromium.orgb3284ad2009-05-18 06:12:45 +0000738 new_space_.set_age_mark(new_space_.top());
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000739
740 LOG(ResourceEvent("scavenge", "end"));
741
742 gc_state_ = NOT_IN_GC;
743}
744
745
746void Heap::ClearRSetRange(Address start, int size_in_bytes) {
747 uint32_t start_bit;
748 Address start_word_address =
749 Page::ComputeRSetBitPosition(start, 0, &start_bit);
750 uint32_t end_bit;
751 Address end_word_address =
752 Page::ComputeRSetBitPosition(start + size_in_bytes - kIntSize,
753 0,
754 &end_bit);
755
756 // We want to clear the bits in the starting word starting with the
757 // first bit, and in the ending word up to and including the last
758 // bit. Build a pair of bitmasks to do that.
759 uint32_t start_bitmask = start_bit - 1;
760 uint32_t end_bitmask = ~((end_bit << 1) - 1);
761
762 // If the start address and end address are the same, we mask that
763 // word once, otherwise mask the starting and ending word
764 // separately and all the ones in between.
765 if (start_word_address == end_word_address) {
766 Memory::uint32_at(start_word_address) &= (start_bitmask | end_bitmask);
767 } else {
768 Memory::uint32_at(start_word_address) &= start_bitmask;
769 Memory::uint32_at(end_word_address) &= end_bitmask;
770 start_word_address += kIntSize;
771 memset(start_word_address, 0, end_word_address - start_word_address);
772 }
773}
774
775
776class UpdateRSetVisitor: public ObjectVisitor {
777 public:
778
779 void VisitPointer(Object** p) {
780 UpdateRSet(p);
781 }
782
783 void VisitPointers(Object** start, Object** end) {
784 // Update a store into slots [start, end), used (a) to update remembered
785 // set when promoting a young object to old space or (b) to rebuild
786 // remembered sets after a mark-compact collection.
787 for (Object** p = start; p < end; p++) UpdateRSet(p);
788 }
789 private:
790
791 void UpdateRSet(Object** p) {
792 // The remembered set should not be set. It should be clear for objects
793 // newly copied to old space, and it is cleared before rebuilding in the
794 // mark-compact collector.
795 ASSERT(!Page::IsRSetSet(reinterpret_cast<Address>(p), 0));
796 if (Heap::InNewSpace(*p)) {
797 Page::SetRSet(reinterpret_cast<Address>(p), 0);
798 }
799 }
800};
801
802
803int Heap::UpdateRSet(HeapObject* obj) {
kasperl@chromium.org71affb52009-05-26 05:44:31 +0000804#ifndef V8_HOST_ARCH_64_BIT
805 // TODO(X64) Reenable RSet when we have a working 64-bit layout of Page.
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000806 ASSERT(!InNewSpace(obj));
807 // Special handling of fixed arrays to iterate the body based on the start
808 // address and offset. Just iterating the pointers as in UpdateRSetVisitor
809 // will not work because Page::SetRSet needs to have the start of the
810 // object.
811 if (obj->IsFixedArray()) {
812 FixedArray* array = FixedArray::cast(obj);
813 int length = array->length();
814 for (int i = 0; i < length; i++) {
815 int offset = FixedArray::kHeaderSize + i * kPointerSize;
816 ASSERT(!Page::IsRSetSet(obj->address(), offset));
817 if (Heap::InNewSpace(array->get(i))) {
818 Page::SetRSet(obj->address(), offset);
819 }
820 }
821 } else if (!obj->IsCode()) {
822 // Skip code object, we know it does not contain inter-generational
823 // pointers.
824 UpdateRSetVisitor v;
825 obj->Iterate(&v);
826 }
kasperl@chromium.org71affb52009-05-26 05:44:31 +0000827#endif // V8_HOST_ARCH_64_BIT
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000828 return obj->Size();
829}
830
831
832void Heap::RebuildRSets() {
ager@chromium.org9258b6b2008-09-11 09:11:10 +0000833 // By definition, we do not care about remembered set bits in code or data
834 // spaces.
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000835 map_space_->ClearRSet();
836 RebuildRSets(map_space_);
837
ager@chromium.org9258b6b2008-09-11 09:11:10 +0000838 old_pointer_space_->ClearRSet();
839 RebuildRSets(old_pointer_space_);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000840
841 Heap::lo_space_->ClearRSet();
842 RebuildRSets(lo_space_);
843}
844
845
846void Heap::RebuildRSets(PagedSpace* space) {
847 HeapObjectIterator it(space);
848 while (it.has_next()) Heap::UpdateRSet(it.next());
849}
850
851
852void Heap::RebuildRSets(LargeObjectSpace* space) {
853 LargeObjectIterator it(space);
854 while (it.has_next()) Heap::UpdateRSet(it.next());
855}
856
857
858#if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
859void Heap::RecordCopiedObject(HeapObject* obj) {
860 bool should_record = false;
861#ifdef DEBUG
862 should_record = FLAG_heap_stats;
863#endif
864#ifdef ENABLE_LOGGING_AND_PROFILING
865 should_record = should_record || FLAG_log_gc;
866#endif
867 if (should_record) {
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +0000868 if (new_space_.Contains(obj)) {
869 new_space_.RecordAllocation(obj);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000870 } else {
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +0000871 new_space_.RecordPromotion(obj);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000872 }
873 }
874}
875#endif // defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
876
877
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +0000878
879HeapObject* Heap::MigrateObject(HeapObject* source,
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000880 HeapObject* target,
881 int size) {
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +0000882 // Copy the content of source to target.
883 CopyBlock(reinterpret_cast<Object**>(target->address()),
884 reinterpret_cast<Object**>(source->address()),
885 size);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000886
kasper.lund7276f142008-07-30 08:49:36 +0000887 // Set the forwarding address.
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +0000888 source->set_map_word(MapWord::FromForwardingAddress(target));
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000889
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000890#if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
kasperl@chromium.orgb3284ad2009-05-18 06:12:45 +0000891 // Update NewSpace stats if necessary.
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000892 RecordCopiedObject(target);
893#endif
894
895 return target;
896}
897
898
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +0000899static inline bool IsShortcutCandidate(HeapObject* object, Map* map) {
kasperl@chromium.orgd1e3e722009-04-14 13:38:25 +0000900 STATIC_ASSERT(kNotStringTag != 0 && kSymbolTag != 0);
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +0000901 ASSERT(object->map() == map);
kasperl@chromium.orgd1e3e722009-04-14 13:38:25 +0000902 InstanceType type = map->instance_type();
903 if ((type & kShortcutTypeMask) != kShortcutTypeTag) return false;
904 ASSERT(object->IsString() && !object->IsSymbol());
905 return ConsString::cast(object)->unchecked_second() == Heap::empty_string();
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +0000906}
907
908
909void Heap::ScavengeObjectSlow(HeapObject** p, HeapObject* object) {
910 ASSERT(InFromSpace(object));
911 MapWord first_word = object->map_word();
912 ASSERT(!first_word.IsForwardingAddress());
913
914 // Optimization: Bypass flattened ConsString objects.
915 if (IsShortcutCandidate(object, first_word.ToMap())) {
ager@chromium.org870a0b62008-11-04 11:43:05 +0000916 object = HeapObject::cast(ConsString::cast(object)->unchecked_first());
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000917 *p = object;
918 // After patching *p we have to repeat the checks that object is in the
919 // active semispace of the young generation and not already copied.
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +0000920 if (!InNewSpace(object)) return;
kasper.lund7276f142008-07-30 08:49:36 +0000921 first_word = object->map_word();
922 if (first_word.IsForwardingAddress()) {
923 *p = first_word.ToForwardingAddress();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000924 return;
925 }
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000926 }
927
kasper.lund7276f142008-07-30 08:49:36 +0000928 int object_size = object->SizeFromMap(first_word.ToMap());
kasperl@chromium.orgb3284ad2009-05-18 06:12:45 +0000929 // We rely on live objects in new space to be at least two pointers,
930 // so we can store the from-space address and map pointer of promoted
931 // objects in the to space.
932 ASSERT(object_size >= 2 * kPointerSize);
933
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000934 // If the object should be promoted, we try to copy it to old space.
935 if (ShouldBePromoted(object->address(), object_size)) {
ager@chromium.org9258b6b2008-09-11 09:11:10 +0000936 OldSpace* target_space = Heap::TargetSpace(object);
937 ASSERT(target_space == Heap::old_pointer_space_ ||
938 target_space == Heap::old_data_space_);
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +0000939 Object* result = target_space->AllocateRaw(object_size);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000940 if (!result->IsFailure()) {
kasperl@chromium.orgb3284ad2009-05-18 06:12:45 +0000941 HeapObject* target = HeapObject::cast(result);
ager@chromium.org9258b6b2008-09-11 09:11:10 +0000942 if (target_space == Heap::old_pointer_space_) {
kasperl@chromium.orgb3284ad2009-05-18 06:12:45 +0000943 // Save the from-space object pointer and its map pointer at the
944 // top of the to space to be swept and copied later. Write the
945 // forwarding address over the map word of the from-space
946 // object.
947 promotion_queue.insert(object, first_word.ToMap());
948 object->set_map_word(MapWord::FromForwardingAddress(target));
949
950 // Give the space allocated for the result a proper map by
951 // treating it as a free list node (not linked into the free
952 // list).
953 FreeListNode* node = FreeListNode::FromAddress(target->address());
954 node->set_size(object_size);
955
956 *p = target;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000957 } else {
kasperl@chromium.orgb3284ad2009-05-18 06:12:45 +0000958 // Objects promoted to the data space can be copied immediately
959 // and not revisited---we will never sweep that space for
960 // pointers and the copied objects do not contain pointers to
961 // new space objects.
962 *p = MigrateObject(object, target, object_size);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000963#ifdef DEBUG
ager@chromium.org9258b6b2008-09-11 09:11:10 +0000964 VerifyNonPointerSpacePointersVisitor v;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000965 (*p)->Iterate(&v);
966#endif
967 }
968 return;
969 }
970 }
971
972 // The object should remain in new space or the old space allocation failed.
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +0000973 Object* result = new_space_.AllocateRaw(object_size);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000974 // Failed allocation at this point is utterly unexpected.
975 ASSERT(!result->IsFailure());
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +0000976 *p = MigrateObject(object, HeapObject::cast(result), object_size);
977}
978
979
980void Heap::ScavengePointer(HeapObject** p) {
981 ScavengeObject(p, *p);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000982}
983
984
985Object* Heap::AllocatePartialMap(InstanceType instance_type,
986 int instance_size) {
987 Object* result = AllocateRawMap(Map::kSize);
988 if (result->IsFailure()) return result;
989
990 // Map::cast cannot be used due to uninitialized map field.
991 reinterpret_cast<Map*>(result)->set_map(meta_map());
992 reinterpret_cast<Map*>(result)->set_instance_type(instance_type);
993 reinterpret_cast<Map*>(result)->set_instance_size(instance_size);
ager@chromium.org7c537e22008-10-16 08:43:32 +0000994 reinterpret_cast<Map*>(result)->set_inobject_properties(0);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000995 reinterpret_cast<Map*>(result)->set_unused_property_fields(0);
996 return result;
997}
998
999
1000Object* Heap::AllocateMap(InstanceType instance_type, int instance_size) {
1001 Object* result = AllocateRawMap(Map::kSize);
1002 if (result->IsFailure()) return result;
1003
1004 Map* map = reinterpret_cast<Map*>(result);
1005 map->set_map(meta_map());
1006 map->set_instance_type(instance_type);
1007 map->set_prototype(null_value());
1008 map->set_constructor(null_value());
1009 map->set_instance_size(instance_size);
ager@chromium.org7c537e22008-10-16 08:43:32 +00001010 map->set_inobject_properties(0);
mads.s.ager@gmail.com9a4089a2008-09-01 08:55:01 +00001011 map->set_instance_descriptors(empty_descriptor_array());
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001012 map->set_code_cache(empty_fixed_array());
1013 map->set_unused_property_fields(0);
1014 map->set_bit_field(0);
ager@chromium.org3a37e9b2009-04-27 09:26:21 +00001015 map->set_bit_field2(0);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001016 return map;
1017}
1018
1019
1020bool Heap::CreateInitialMaps() {
1021 Object* obj = AllocatePartialMap(MAP_TYPE, Map::kSize);
1022 if (obj->IsFailure()) return false;
1023
1024 // Map::cast cannot be used due to uninitialized map field.
1025 meta_map_ = reinterpret_cast<Map*>(obj);
1026 meta_map()->set_map(meta_map());
1027
kasperl@chromium.org71affb52009-05-26 05:44:31 +00001028 obj = AllocatePartialMap(FIXED_ARRAY_TYPE, FixedArray::kHeaderSize);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001029 if (obj->IsFailure()) return false;
1030 fixed_array_map_ = Map::cast(obj);
1031
1032 obj = AllocatePartialMap(ODDBALL_TYPE, Oddball::kSize);
1033 if (obj->IsFailure()) return false;
1034 oddball_map_ = Map::cast(obj);
1035
1036 // Allocate the empty array
1037 obj = AllocateEmptyFixedArray();
1038 if (obj->IsFailure()) return false;
1039 empty_fixed_array_ = FixedArray::cast(obj);
1040
ager@chromium.org9258b6b2008-09-11 09:11:10 +00001041 obj = Allocate(oddball_map(), OLD_DATA_SPACE);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001042 if (obj->IsFailure()) return false;
1043 null_value_ = obj;
1044
mads.s.ager@gmail.com9a4089a2008-09-01 08:55:01 +00001045 // Allocate the empty descriptor array. AllocateMap can now be used.
1046 obj = AllocateEmptyFixedArray();
1047 if (obj->IsFailure()) return false;
1048 // There is a check against empty_descriptor_array() in cast().
1049 empty_descriptor_array_ = reinterpret_cast<DescriptorArray*>(obj);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001050
mads.s.ager@gmail.com9a4089a2008-09-01 08:55:01 +00001051 // Fix the instance_descriptors for the existing maps.
1052 meta_map()->set_instance_descriptors(empty_descriptor_array());
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001053 meta_map()->set_code_cache(empty_fixed_array());
1054
mads.s.ager@gmail.com9a4089a2008-09-01 08:55:01 +00001055 fixed_array_map()->set_instance_descriptors(empty_descriptor_array());
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001056 fixed_array_map()->set_code_cache(empty_fixed_array());
1057
mads.s.ager@gmail.com9a4089a2008-09-01 08:55:01 +00001058 oddball_map()->set_instance_descriptors(empty_descriptor_array());
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001059 oddball_map()->set_code_cache(empty_fixed_array());
1060
1061 // Fix prototype object for existing maps.
1062 meta_map()->set_prototype(null_value());
1063 meta_map()->set_constructor(null_value());
1064
1065 fixed_array_map()->set_prototype(null_value());
1066 fixed_array_map()->set_constructor(null_value());
1067 oddball_map()->set_prototype(null_value());
1068 oddball_map()->set_constructor(null_value());
1069
1070 obj = AllocateMap(HEAP_NUMBER_TYPE, HeapNumber::kSize);
1071 if (obj->IsFailure()) return false;
1072 heap_number_map_ = Map::cast(obj);
1073
1074 obj = AllocateMap(PROXY_TYPE, Proxy::kSize);
1075 if (obj->IsFailure()) return false;
1076 proxy_map_ = Map::cast(obj);
1077
1078#define ALLOCATE_STRING_MAP(type, size, name) \
1079 obj = AllocateMap(type, size); \
1080 if (obj->IsFailure()) return false; \
1081 name##_map_ = Map::cast(obj);
1082 STRING_TYPE_LIST(ALLOCATE_STRING_MAP);
1083#undef ALLOCATE_STRING_MAP
1084
kasperl@chromium.org71affb52009-05-26 05:44:31 +00001085 obj = AllocateMap(SHORT_STRING_TYPE, SeqTwoByteString::kAlignedSize);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001086 if (obj->IsFailure()) return false;
1087 undetectable_short_string_map_ = Map::cast(obj);
1088 undetectable_short_string_map_->set_is_undetectable();
1089
kasperl@chromium.org71affb52009-05-26 05:44:31 +00001090 obj = AllocateMap(MEDIUM_STRING_TYPE, SeqTwoByteString::kAlignedSize);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001091 if (obj->IsFailure()) return false;
1092 undetectable_medium_string_map_ = Map::cast(obj);
1093 undetectable_medium_string_map_->set_is_undetectable();
1094
kasperl@chromium.org71affb52009-05-26 05:44:31 +00001095 obj = AllocateMap(LONG_STRING_TYPE, SeqTwoByteString::kAlignedSize);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001096 if (obj->IsFailure()) return false;
1097 undetectable_long_string_map_ = Map::cast(obj);
1098 undetectable_long_string_map_->set_is_undetectable();
1099
kasperl@chromium.org71affb52009-05-26 05:44:31 +00001100 obj = AllocateMap(SHORT_ASCII_STRING_TYPE, SeqAsciiString::kAlignedSize);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001101 if (obj->IsFailure()) return false;
1102 undetectable_short_ascii_string_map_ = Map::cast(obj);
1103 undetectable_short_ascii_string_map_->set_is_undetectable();
1104
kasperl@chromium.org71affb52009-05-26 05:44:31 +00001105 obj = AllocateMap(MEDIUM_ASCII_STRING_TYPE, SeqAsciiString::kAlignedSize);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001106 if (obj->IsFailure()) return false;
1107 undetectable_medium_ascii_string_map_ = Map::cast(obj);
1108 undetectable_medium_ascii_string_map_->set_is_undetectable();
1109
kasperl@chromium.org71affb52009-05-26 05:44:31 +00001110 obj = AllocateMap(LONG_ASCII_STRING_TYPE, SeqAsciiString::kAlignedSize);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001111 if (obj->IsFailure()) return false;
1112 undetectable_long_ascii_string_map_ = Map::cast(obj);
1113 undetectable_long_ascii_string_map_->set_is_undetectable();
1114
kasperl@chromium.org71affb52009-05-26 05:44:31 +00001115 obj = AllocateMap(BYTE_ARRAY_TYPE, Array::kAlignedSize);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001116 if (obj->IsFailure()) return false;
1117 byte_array_map_ = Map::cast(obj);
1118
1119 obj = AllocateMap(CODE_TYPE, Code::kHeaderSize);
1120 if (obj->IsFailure()) return false;
1121 code_map_ = Map::cast(obj);
1122
1123 obj = AllocateMap(FILLER_TYPE, kPointerSize);
1124 if (obj->IsFailure()) return false;
1125 one_word_filler_map_ = Map::cast(obj);
1126
1127 obj = AllocateMap(FILLER_TYPE, 2 * kPointerSize);
1128 if (obj->IsFailure()) return false;
1129 two_word_filler_map_ = Map::cast(obj);
1130
1131#define ALLOCATE_STRUCT_MAP(NAME, Name, name) \
1132 obj = AllocateMap(NAME##_TYPE, Name::kSize); \
1133 if (obj->IsFailure()) return false; \
1134 name##_map_ = Map::cast(obj);
1135 STRUCT_LIST(ALLOCATE_STRUCT_MAP)
1136#undef ALLOCATE_STRUCT_MAP
1137
ager@chromium.org236ad962008-09-25 09:45:57 +00001138 obj = AllocateMap(FIXED_ARRAY_TYPE, HeapObject::kHeaderSize);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001139 if (obj->IsFailure()) return false;
1140 hash_table_map_ = Map::cast(obj);
1141
ager@chromium.org236ad962008-09-25 09:45:57 +00001142 obj = AllocateMap(FIXED_ARRAY_TYPE, HeapObject::kHeaderSize);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001143 if (obj->IsFailure()) return false;
1144 context_map_ = Map::cast(obj);
1145
ager@chromium.org236ad962008-09-25 09:45:57 +00001146 obj = AllocateMap(FIXED_ARRAY_TYPE, HeapObject::kHeaderSize);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001147 if (obj->IsFailure()) return false;
christian.plesner.hansen@gmail.com37abdec2009-01-06 14:43:28 +00001148 catch_context_map_ = Map::cast(obj);
1149
1150 obj = AllocateMap(FIXED_ARRAY_TYPE, HeapObject::kHeaderSize);
1151 if (obj->IsFailure()) return false;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001152 global_context_map_ = Map::cast(obj);
1153
1154 obj = AllocateMap(JS_FUNCTION_TYPE, JSFunction::kSize);
1155 if (obj->IsFailure()) return false;
1156 boilerplate_function_map_ = Map::cast(obj);
1157
1158 obj = AllocateMap(SHARED_FUNCTION_INFO_TYPE, SharedFunctionInfo::kSize);
1159 if (obj->IsFailure()) return false;
1160 shared_function_info_map_ = Map::cast(obj);
1161
mads.s.ager@gmail.com9a4089a2008-09-01 08:55:01 +00001162 ASSERT(!Heap::InNewSpace(Heap::empty_fixed_array()));
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001163 return true;
1164}
1165
1166
1167Object* Heap::AllocateHeapNumber(double value, PretenureFlag pretenure) {
1168 // Statically ensure that it is safe to allocate heap numbers in paged
1169 // spaces.
1170 STATIC_ASSERT(HeapNumber::kSize <= Page::kMaxHeapObjectSize);
ager@chromium.org9258b6b2008-09-11 09:11:10 +00001171 AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
kasperl@chromium.org9bbf9682008-10-30 11:53:07 +00001172 Object* result = AllocateRaw(HeapNumber::kSize, space, OLD_DATA_SPACE);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001173 if (result->IsFailure()) return result;
1174
1175 HeapObject::cast(result)->set_map(heap_number_map());
1176 HeapNumber::cast(result)->set_value(value);
1177 return result;
1178}
1179
1180
1181Object* Heap::AllocateHeapNumber(double value) {
kasperl@chromium.org9bbf9682008-10-30 11:53:07 +00001182 // Use general version, if we're forced to always allocate.
1183 if (always_allocate()) return AllocateHeapNumber(value, NOT_TENURED);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001184 // This version of AllocateHeapNumber is optimized for
1185 // allocation in new space.
1186 STATIC_ASSERT(HeapNumber::kSize <= Page::kMaxHeapObjectSize);
1187 ASSERT(allocation_allowed_ && gc_state_ == NOT_IN_GC);
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +00001188 Object* result = new_space_.AllocateRaw(HeapNumber::kSize);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001189 if (result->IsFailure()) return result;
1190 HeapObject::cast(result)->set_map(heap_number_map());
1191 HeapNumber::cast(result)->set_value(value);
1192 return result;
1193}
1194
1195
1196Object* Heap::CreateOddball(Map* map,
1197 const char* to_string,
1198 Object* to_number) {
ager@chromium.org9258b6b2008-09-11 09:11:10 +00001199 Object* result = Allocate(map, OLD_DATA_SPACE);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001200 if (result->IsFailure()) return result;
1201 return Oddball::cast(result)->Initialize(to_string, to_number);
1202}
1203
1204
1205bool Heap::CreateApiObjects() {
1206 Object* obj;
1207
1208 obj = AllocateMap(JS_OBJECT_TYPE, JSObject::kHeaderSize);
1209 if (obj->IsFailure()) return false;
1210 neander_map_ = Map::cast(obj);
1211
1212 obj = Heap::AllocateJSObjectFromMap(neander_map_);
1213 if (obj->IsFailure()) return false;
1214 Object* elements = AllocateFixedArray(2);
1215 if (elements->IsFailure()) return false;
1216 FixedArray::cast(elements)->set(0, Smi::FromInt(0));
1217 JSObject::cast(obj)->set_elements(FixedArray::cast(elements));
1218 message_listeners_ = JSObject::cast(obj);
1219
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001220 return true;
1221}
1222
1223void Heap::CreateFixedStubs() {
1224 // Here we create roots for fixed stubs. They are needed at GC
1225 // for cooking and uncooking (check out frames.cc).
1226 // The eliminates the need for doing dictionary lookup in the
1227 // stub cache for these stubs.
1228 HandleScope scope;
1229 {
1230 CEntryStub stub;
1231 c_entry_code_ = *stub.GetCode();
1232 }
1233 {
1234 CEntryDebugBreakStub stub;
1235 c_entry_debug_break_code_ = *stub.GetCode();
1236 }
1237 {
1238 JSEntryStub stub;
1239 js_entry_code_ = *stub.GetCode();
1240 }
1241 {
1242 JSConstructEntryStub stub;
1243 js_construct_entry_code_ = *stub.GetCode();
1244 }
1245}
1246
1247
1248bool Heap::CreateInitialObjects() {
1249 Object* obj;
1250
1251 // The -0 value must be set before NumberFromDouble works.
1252 obj = AllocateHeapNumber(-0.0, TENURED);
1253 if (obj->IsFailure()) return false;
1254 minus_zero_value_ = obj;
1255 ASSERT(signbit(minus_zero_value_->Number()) != 0);
1256
1257 obj = AllocateHeapNumber(OS::nan_value(), TENURED);
1258 if (obj->IsFailure()) return false;
1259 nan_value_ = obj;
1260
ager@chromium.org9258b6b2008-09-11 09:11:10 +00001261 obj = Allocate(oddball_map(), OLD_DATA_SPACE);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001262 if (obj->IsFailure()) return false;
1263 undefined_value_ = obj;
1264 ASSERT(!InNewSpace(undefined_value()));
1265
1266 // Allocate initial symbol table.
1267 obj = SymbolTable::Allocate(kInitialSymbolTableSize);
1268 if (obj->IsFailure()) return false;
1269 symbol_table_ = obj;
1270
1271 // Assign the print strings for oddballs after creating symboltable.
1272 Object* symbol = LookupAsciiSymbol("undefined");
1273 if (symbol->IsFailure()) return false;
1274 Oddball::cast(undefined_value_)->set_to_string(String::cast(symbol));
1275 Oddball::cast(undefined_value_)->set_to_number(nan_value_);
1276
1277 // Assign the print strings for oddballs after creating symboltable.
1278 symbol = LookupAsciiSymbol("null");
1279 if (symbol->IsFailure()) return false;
1280 Oddball::cast(null_value_)->set_to_string(String::cast(symbol));
1281 Oddball::cast(null_value_)->set_to_number(Smi::FromInt(0));
1282
1283 // Allocate the null_value
1284 obj = Oddball::cast(null_value())->Initialize("null", Smi::FromInt(0));
1285 if (obj->IsFailure()) return false;
1286
1287 obj = CreateOddball(oddball_map(), "true", Smi::FromInt(1));
1288 if (obj->IsFailure()) return false;
1289 true_value_ = obj;
1290
1291 obj = CreateOddball(oddball_map(), "false", Smi::FromInt(0));
1292 if (obj->IsFailure()) return false;
1293 false_value_ = obj;
1294
1295 obj = CreateOddball(oddball_map(), "hole", Smi::FromInt(-1));
1296 if (obj->IsFailure()) return false;
1297 the_hole_value_ = obj;
1298
1299 // Allocate the empty string.
1300 obj = AllocateRawAsciiString(0, TENURED);
1301 if (obj->IsFailure()) return false;
1302 empty_string_ = String::cast(obj);
1303
1304#define SYMBOL_INITIALIZE(name, string) \
1305 obj = LookupAsciiSymbol(string); \
1306 if (obj->IsFailure()) return false; \
1307 (name##_) = String::cast(obj);
1308 SYMBOL_LIST(SYMBOL_INITIALIZE)
1309#undef SYMBOL_INITIALIZE
1310
ager@chromium.org3b45ab52009-03-19 22:21:34 +00001311 // Allocate the hidden symbol which is used to identify the hidden properties
1312 // in JSObjects. The hash code has a special value so that it will not match
1313 // the empty string when searching for the property. It cannot be part of the
1314 // SYMBOL_LIST because it needs to be allocated manually with the special
1315 // hash code in place. The hash code for the hidden_symbol is zero to ensure
1316 // that it will always be at the first entry in property descriptors.
1317 obj = AllocateSymbol(CStrVector(""), 0, String::kHashComputedMask);
1318 if (obj->IsFailure()) return false;
1319 hidden_symbol_ = String::cast(obj);
1320
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001321 // Allocate the proxy for __proto__.
1322 obj = AllocateProxy((Address) &Accessors::ObjectPrototype);
1323 if (obj->IsFailure()) return false;
1324 prototype_accessors_ = Proxy::cast(obj);
1325
1326 // Allocate the code_stubs dictionary.
1327 obj = Dictionary::Allocate(4);
1328 if (obj->IsFailure()) return false;
1329 code_stubs_ = Dictionary::cast(obj);
1330
1331 // Allocate the non_monomorphic_cache used in stub-cache.cc
1332 obj = Dictionary::Allocate(4);
1333 if (obj->IsFailure()) return false;
1334 non_monomorphic_cache_ = Dictionary::cast(obj);
1335
1336 CreateFixedStubs();
1337
1338 // Allocate the number->string conversion cache
1339 obj = AllocateFixedArray(kNumberStringCacheSize * 2);
1340 if (obj->IsFailure()) return false;
1341 number_string_cache_ = FixedArray::cast(obj);
1342
1343 // Allocate cache for single character strings.
1344 obj = AllocateFixedArray(String::kMaxAsciiCharCode+1);
1345 if (obj->IsFailure()) return false;
1346 single_character_string_cache_ = FixedArray::cast(obj);
1347
1348 // Allocate cache for external strings pointing to native source code.
1349 obj = AllocateFixedArray(Natives::GetBuiltinsCount());
1350 if (obj->IsFailure()) return false;
1351 natives_source_cache_ = FixedArray::cast(obj);
1352
kasperl@chromium.org7be3c992009-03-12 07:19:55 +00001353 // Handling of script id generation is in Factory::NewScript.
1354 last_script_id_ = undefined_value();
1355
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +00001356 // Initialize keyed lookup cache.
1357 ClearKeyedLookupCache();
1358
kasperl@chromium.orgb9123622008-09-17 14:05:56 +00001359 // Initialize compilation cache.
1360 CompilationCache::Clear();
ager@chromium.org9258b6b2008-09-11 09:11:10 +00001361
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001362 return true;
1363}
1364
1365
1366static inline int double_get_hash(double d) {
1367 DoubleRepresentation rep(d);
1368 return ((static_cast<int>(rep.bits) ^ static_cast<int>(rep.bits >> 32)) &
1369 (Heap::kNumberStringCacheSize - 1));
1370}
1371
1372
1373static inline int smi_get_hash(Smi* smi) {
1374 return (smi->value() & (Heap::kNumberStringCacheSize - 1));
1375}
1376
1377
1378
1379Object* Heap::GetNumberStringCache(Object* number) {
1380 int hash;
1381 if (number->IsSmi()) {
1382 hash = smi_get_hash(Smi::cast(number));
1383 } else {
1384 hash = double_get_hash(number->Number());
1385 }
1386 Object* key = number_string_cache_->get(hash * 2);
1387 if (key == number) {
1388 return String::cast(number_string_cache_->get(hash * 2 + 1));
1389 } else if (key->IsHeapNumber() &&
1390 number->IsHeapNumber() &&
1391 key->Number() == number->Number()) {
1392 return String::cast(number_string_cache_->get(hash * 2 + 1));
1393 }
1394 return undefined_value();
1395}
1396
1397
1398void Heap::SetNumberStringCache(Object* number, String* string) {
1399 int hash;
1400 if (number->IsSmi()) {
1401 hash = smi_get_hash(Smi::cast(number));
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +00001402 number_string_cache_->set(hash * 2, number, SKIP_WRITE_BARRIER);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001403 } else {
1404 hash = double_get_hash(number->Number());
1405 number_string_cache_->set(hash * 2, number);
1406 }
1407 number_string_cache_->set(hash * 2 + 1, string);
1408}
1409
1410
1411Object* Heap::SmiOrNumberFromDouble(double value,
1412 bool new_object,
1413 PretenureFlag pretenure) {
1414 // We need to distinguish the minus zero value and this cannot be
1415 // done after conversion to int. Doing this by comparing bit
1416 // patterns is faster than using fpclassify() et al.
1417 static const DoubleRepresentation plus_zero(0.0);
1418 static const DoubleRepresentation minus_zero(-0.0);
1419 static const DoubleRepresentation nan(OS::nan_value());
1420 ASSERT(minus_zero_value_ != NULL);
1421 ASSERT(sizeof(plus_zero.value) == sizeof(plus_zero.bits));
1422
1423 DoubleRepresentation rep(value);
1424 if (rep.bits == plus_zero.bits) return Smi::FromInt(0); // not uncommon
1425 if (rep.bits == minus_zero.bits) {
1426 return new_object ? AllocateHeapNumber(-0.0, pretenure)
1427 : minus_zero_value_;
1428 }
1429 if (rep.bits == nan.bits) {
1430 return new_object
1431 ? AllocateHeapNumber(OS::nan_value(), pretenure)
1432 : nan_value_;
1433 }
1434
1435 // Try to represent the value as a tagged small integer.
1436 int int_value = FastD2I(value);
1437 if (value == FastI2D(int_value) && Smi::IsValid(int_value)) {
1438 return Smi::FromInt(int_value);
1439 }
1440
1441 // Materialize the value in the heap.
1442 return AllocateHeapNumber(value, pretenure);
1443}
1444
1445
1446Object* Heap::NewNumberFromDouble(double value, PretenureFlag pretenure) {
1447 return SmiOrNumberFromDouble(value,
1448 true /* number object must be new */,
1449 pretenure);
1450}
1451
1452
1453Object* Heap::NumberFromDouble(double value, PretenureFlag pretenure) {
1454 return SmiOrNumberFromDouble(value,
1455 false /* use preallocated NaN, -0.0 */,
1456 pretenure);
1457}
1458
1459
1460Object* Heap::AllocateProxy(Address proxy, PretenureFlag pretenure) {
1461 // Statically ensure that it is safe to allocate proxies in paged spaces.
1462 STATIC_ASSERT(Proxy::kSize <= Page::kMaxHeapObjectSize);
ager@chromium.org9258b6b2008-09-11 09:11:10 +00001463 AllocationSpace space =
1464 (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001465 Object* result = Allocate(proxy_map(), space);
1466 if (result->IsFailure()) return result;
1467
1468 Proxy::cast(result)->set_proxy(proxy);
1469 return result;
1470}
1471
1472
1473Object* Heap::AllocateSharedFunctionInfo(Object* name) {
1474 Object* result = Allocate(shared_function_info_map(), NEW_SPACE);
1475 if (result->IsFailure()) return result;
1476
1477 SharedFunctionInfo* share = SharedFunctionInfo::cast(result);
1478 share->set_name(name);
1479 Code* illegal = Builtins::builtin(Builtins::Illegal);
1480 share->set_code(illegal);
1481 share->set_expected_nof_properties(0);
1482 share->set_length(0);
1483 share->set_formal_parameter_count(0);
1484 share->set_instance_class_name(Object_symbol());
1485 share->set_function_data(undefined_value());
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001486 share->set_script(undefined_value());
1487 share->set_start_position_and_type(0);
1488 share->set_debug_info(undefined_value());
kasperl@chromium.orgd1e3e722009-04-14 13:38:25 +00001489 share->set_inferred_name(empty_string());
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001490 return result;
1491}
1492
1493
ager@chromium.org870a0b62008-11-04 11:43:05 +00001494Object* Heap::AllocateConsString(String* first,
ager@chromium.orgc3e50d82008-11-05 11:53:10 +00001495 String* second) {
ager@chromium.orgbb29dc92009-03-24 13:25:23 +00001496 int first_length = first->length();
1497 int second_length = second->length();
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +00001498 int length = first_length + second_length;
ager@chromium.org5ec48922009-05-05 07:25:34 +00001499 bool is_ascii = first->IsAsciiRepresentation()
1500 && second->IsAsciiRepresentation();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001501
1502 // If the resulting string is small make a flat string.
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +00001503 if (length < String::kMinNonFlatLength) {
ager@chromium.orgbb29dc92009-03-24 13:25:23 +00001504 ASSERT(first->IsFlat());
1505 ASSERT(second->IsFlat());
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +00001506 if (is_ascii) {
1507 Object* result = AllocateRawAsciiString(length);
1508 if (result->IsFailure()) return result;
1509 // Copy the characters into the new object.
1510 char* dest = SeqAsciiString::cast(result)->GetChars();
ager@chromium.orgbb29dc92009-03-24 13:25:23 +00001511 String::WriteToFlat(first, dest, 0, first_length);
1512 String::WriteToFlat(second, dest + first_length, 0, second_length);
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +00001513 return result;
1514 } else {
1515 Object* result = AllocateRawTwoByteString(length);
1516 if (result->IsFailure()) return result;
1517 // Copy the characters into the new object.
1518 uc16* dest = SeqTwoByteString::cast(result)->GetChars();
ager@chromium.orgbb29dc92009-03-24 13:25:23 +00001519 String::WriteToFlat(first, dest, 0, first_length);
1520 String::WriteToFlat(second, dest + first_length, 0, second_length);
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +00001521 return result;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001522 }
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001523 }
1524
1525 Map* map;
1526 if (length <= String::kMaxShortStringSize) {
1527 map = is_ascii ? short_cons_ascii_string_map()
1528 : short_cons_string_map();
1529 } else if (length <= String::kMaxMediumStringSize) {
1530 map = is_ascii ? medium_cons_ascii_string_map()
1531 : medium_cons_string_map();
1532 } else {
1533 map = is_ascii ? long_cons_ascii_string_map()
1534 : long_cons_string_map();
1535 }
1536
1537 Object* result = Allocate(map, NEW_SPACE);
1538 if (result->IsFailure()) return result;
kasperl@chromium.org9fe21c62008-10-28 08:53:51 +00001539 ASSERT(InNewSpace(result));
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001540 ConsString* cons_string = ConsString::cast(result);
kasperl@chromium.org9fe21c62008-10-28 08:53:51 +00001541 cons_string->set_first(first, SKIP_WRITE_BARRIER);
1542 cons_string->set_second(second, SKIP_WRITE_BARRIER);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001543 cons_string->set_length(length);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001544 return result;
1545}
1546
1547
ager@chromium.org870a0b62008-11-04 11:43:05 +00001548Object* Heap::AllocateSlicedString(String* buffer,
ager@chromium.org870a0b62008-11-04 11:43:05 +00001549 int start,
1550 int end) {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001551 int length = end - start;
1552
1553 // If the resulting string is small make a sub string.
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +00001554 if (end - start <= String::kMinNonFlatLength) {
ager@chromium.orgbb29dc92009-03-24 13:25:23 +00001555 return Heap::AllocateSubString(buffer, start, end);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001556 }
1557
1558 Map* map;
1559 if (length <= String::kMaxShortStringSize) {
ager@chromium.org5ec48922009-05-05 07:25:34 +00001560 map = buffer->IsAsciiRepresentation() ?
ager@chromium.org870a0b62008-11-04 11:43:05 +00001561 short_sliced_ascii_string_map() :
1562 short_sliced_string_map();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001563 } else if (length <= String::kMaxMediumStringSize) {
ager@chromium.org5ec48922009-05-05 07:25:34 +00001564 map = buffer->IsAsciiRepresentation() ?
ager@chromium.org870a0b62008-11-04 11:43:05 +00001565 medium_sliced_ascii_string_map() :
1566 medium_sliced_string_map();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001567 } else {
ager@chromium.org5ec48922009-05-05 07:25:34 +00001568 map = buffer->IsAsciiRepresentation() ?
ager@chromium.org870a0b62008-11-04 11:43:05 +00001569 long_sliced_ascii_string_map() :
1570 long_sliced_string_map();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001571 }
1572
1573 Object* result = Allocate(map, NEW_SPACE);
1574 if (result->IsFailure()) return result;
1575
1576 SlicedString* sliced_string = SlicedString::cast(result);
1577 sliced_string->set_buffer(buffer);
1578 sliced_string->set_start(start);
1579 sliced_string->set_length(length);
1580
1581 return result;
1582}
1583
1584
ager@chromium.org870a0b62008-11-04 11:43:05 +00001585Object* Heap::AllocateSubString(String* buffer,
ager@chromium.org870a0b62008-11-04 11:43:05 +00001586 int start,
1587 int end) {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001588 int length = end - start;
1589
ager@chromium.org7c537e22008-10-16 08:43:32 +00001590 if (length == 1) {
ager@chromium.org870a0b62008-11-04 11:43:05 +00001591 return Heap::LookupSingleCharacterStringFromCode(
ager@chromium.orgbb29dc92009-03-24 13:25:23 +00001592 buffer->Get(start));
ager@chromium.org7c537e22008-10-16 08:43:32 +00001593 }
1594
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001595 // Make an attempt to flatten the buffer to reduce access time.
ager@chromium.orgbb29dc92009-03-24 13:25:23 +00001596 if (!buffer->IsFlat()) {
1597 buffer->TryFlatten();
ager@chromium.org870a0b62008-11-04 11:43:05 +00001598 }
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001599
ager@chromium.org5ec48922009-05-05 07:25:34 +00001600 Object* result = buffer->IsAsciiRepresentation()
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001601 ? AllocateRawAsciiString(length)
1602 : AllocateRawTwoByteString(length);
1603 if (result->IsFailure()) return result;
1604
1605 // Copy the characters into the new object.
1606 String* string_result = String::cast(result);
ager@chromium.org7c537e22008-10-16 08:43:32 +00001607 StringHasher hasher(length);
1608 int i = 0;
1609 for (; i < length && hasher.is_array_index(); i++) {
ager@chromium.orgbb29dc92009-03-24 13:25:23 +00001610 uc32 c = buffer->Get(start + i);
ager@chromium.org7c537e22008-10-16 08:43:32 +00001611 hasher.AddCharacter(c);
ager@chromium.orgbb29dc92009-03-24 13:25:23 +00001612 string_result->Set(i, c);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001613 }
ager@chromium.org7c537e22008-10-16 08:43:32 +00001614 for (; i < length; i++) {
ager@chromium.orgbb29dc92009-03-24 13:25:23 +00001615 uc32 c = buffer->Get(start + i);
ager@chromium.org7c537e22008-10-16 08:43:32 +00001616 hasher.AddCharacterNoIndex(c);
ager@chromium.orgbb29dc92009-03-24 13:25:23 +00001617 string_result->Set(i, c);
ager@chromium.org7c537e22008-10-16 08:43:32 +00001618 }
1619 string_result->set_length_field(hasher.GetHashField());
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001620 return result;
1621}
1622
1623
1624Object* Heap::AllocateExternalStringFromAscii(
1625 ExternalAsciiString::Resource* resource) {
1626 Map* map;
1627 int length = resource->length();
1628 if (length <= String::kMaxShortStringSize) {
1629 map = short_external_ascii_string_map();
1630 } else if (length <= String::kMaxMediumStringSize) {
1631 map = medium_external_ascii_string_map();
1632 } else {
1633 map = long_external_ascii_string_map();
1634 }
1635
1636 Object* result = Allocate(map, NEW_SPACE);
1637 if (result->IsFailure()) return result;
1638
1639 ExternalAsciiString* external_string = ExternalAsciiString::cast(result);
1640 external_string->set_length(length);
1641 external_string->set_resource(resource);
1642
1643 return result;
1644}
1645
1646
1647Object* Heap::AllocateExternalStringFromTwoByte(
1648 ExternalTwoByteString::Resource* resource) {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001649 int length = resource->length();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001650
ager@chromium.org6f10e412009-02-13 10:11:16 +00001651 Map* map = ExternalTwoByteString::StringMap(length);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001652 Object* result = Allocate(map, NEW_SPACE);
1653 if (result->IsFailure()) return result;
1654
1655 ExternalTwoByteString* external_string = ExternalTwoByteString::cast(result);
1656 external_string->set_length(length);
1657 external_string->set_resource(resource);
1658
1659 return result;
1660}
1661
1662
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +00001663Object* Heap::LookupSingleCharacterStringFromCode(uint16_t code) {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001664 if (code <= String::kMaxAsciiCharCode) {
1665 Object* value = Heap::single_character_string_cache()->get(code);
1666 if (value != Heap::undefined_value()) return value;
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +00001667
1668 char buffer[1];
1669 buffer[0] = static_cast<char>(code);
1670 Object* result = LookupSymbol(Vector<const char>(buffer, 1));
1671
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001672 if (result->IsFailure()) return result;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001673 Heap::single_character_string_cache()->set(code, result);
1674 return result;
1675 }
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +00001676
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001677 Object* result = Heap::AllocateRawTwoByteString(1);
1678 if (result->IsFailure()) return result;
ager@chromium.org870a0b62008-11-04 11:43:05 +00001679 String* answer = String::cast(result);
ager@chromium.orgbb29dc92009-03-24 13:25:23 +00001680 answer->Set(0, code);
ager@chromium.org870a0b62008-11-04 11:43:05 +00001681 return answer;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001682}
1683
1684
ager@chromium.orga74f0da2008-12-03 16:05:52 +00001685Object* Heap::AllocateByteArray(int length, PretenureFlag pretenure) {
1686 if (pretenure == NOT_TENURED) {
1687 return AllocateByteArray(length);
1688 }
1689 int size = ByteArray::SizeFor(length);
1690 AllocationSpace space =
1691 size > MaxHeapObjectSize() ? LO_SPACE : OLD_DATA_SPACE;
1692
1693 Object* result = AllocateRaw(size, space, OLD_DATA_SPACE);
1694
1695 if (result->IsFailure()) return result;
1696
1697 reinterpret_cast<Array*>(result)->set_map(byte_array_map());
1698 reinterpret_cast<Array*>(result)->set_length(length);
1699 return result;
1700}
1701
1702
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001703Object* Heap::AllocateByteArray(int length) {
1704 int size = ByteArray::SizeFor(length);
ager@chromium.org9258b6b2008-09-11 09:11:10 +00001705 AllocationSpace space =
1706 size > MaxHeapObjectSize() ? LO_SPACE : NEW_SPACE;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001707
kasperl@chromium.org9bbf9682008-10-30 11:53:07 +00001708 Object* result = AllocateRaw(size, space, OLD_DATA_SPACE);
ager@chromium.org9258b6b2008-09-11 09:11:10 +00001709
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001710 if (result->IsFailure()) return result;
1711
1712 reinterpret_cast<Array*>(result)->set_map(byte_array_map());
1713 reinterpret_cast<Array*>(result)->set_length(length);
1714 return result;
1715}
1716
1717
ager@chromium.org6f10e412009-02-13 10:11:16 +00001718void Heap::CreateFillerObjectAt(Address addr, int size) {
1719 if (size == 0) return;
1720 HeapObject* filler = HeapObject::FromAddress(addr);
1721 if (size == kPointerSize) {
1722 filler->set_map(Heap::one_word_filler_map());
1723 } else {
1724 filler->set_map(Heap::byte_array_map());
1725 ByteArray::cast(filler)->set_length(ByteArray::LengthFor(size));
1726 }
1727}
1728
1729
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001730Object* Heap::CreateCode(const CodeDesc& desc,
kasperl@chromium.org71affb52009-05-26 05:44:31 +00001731 ZoneScopeInfo* sinfo,
ager@chromium.orga74f0da2008-12-03 16:05:52 +00001732 Code::Flags flags,
kasperl@chromium.org061ef742009-02-27 12:16:20 +00001733 Handle<Object> self_reference) {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001734 // Compute size
1735 int body_size = RoundUp(desc.instr_size + desc.reloc_size, kObjectAlignment);
1736 int sinfo_size = 0;
1737 if (sinfo != NULL) sinfo_size = sinfo->Serialize(NULL);
1738 int obj_size = Code::SizeFor(body_size, sinfo_size);
kasperl@chromium.org061ef742009-02-27 12:16:20 +00001739 ASSERT(IsAligned(obj_size, Code::kCodeAlignment));
ager@chromium.org9258b6b2008-09-11 09:11:10 +00001740 Object* result;
1741 if (obj_size > MaxHeapObjectSize()) {
1742 result = lo_space_->AllocateRawCode(obj_size);
1743 } else {
1744 result = code_space_->AllocateRaw(obj_size);
1745 }
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001746
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001747 if (result->IsFailure()) return result;
1748
1749 // Initialize the object
1750 HeapObject::cast(result)->set_map(code_map());
1751 Code* code = Code::cast(result);
1752 code->set_instruction_size(desc.instr_size);
1753 code->set_relocation_size(desc.reloc_size);
1754 code->set_sinfo_size(sinfo_size);
1755 code->set_flags(flags);
1756 code->set_ic_flag(Code::IC_TARGET_IS_ADDRESS);
kasperl@chromium.org061ef742009-02-27 12:16:20 +00001757 // Allow self references to created code object by patching the handle to
1758 // point to the newly allocated Code object.
1759 if (!self_reference.is_null()) {
1760 *(self_reference.location()) = code;
ager@chromium.orga74f0da2008-12-03 16:05:52 +00001761 }
1762 // Migrate generated code.
1763 // The generated code can contain Object** values (typically from handles)
1764 // that are dereferenced during the copy to point directly to the actual heap
1765 // objects. These pointers can include references to the code object itself,
1766 // through the self_reference parameter.
1767 code->CopyFrom(desc);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001768 if (sinfo != NULL) sinfo->Serialize(code); // write scope info
christian.plesner.hansen@gmail.com37abdec2009-01-06 14:43:28 +00001769 LOG(CodeAllocateEvent(code, desc.origin));
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001770
1771#ifdef DEBUG
1772 code->Verify();
1773#endif
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001774 return code;
1775}
1776
1777
1778Object* Heap::CopyCode(Code* code) {
1779 // Allocate an object the same size as the code object.
1780 int obj_size = code->Size();
ager@chromium.org9258b6b2008-09-11 09:11:10 +00001781 Object* result;
1782 if (obj_size > MaxHeapObjectSize()) {
1783 result = lo_space_->AllocateRawCode(obj_size);
1784 } else {
1785 result = code_space_->AllocateRaw(obj_size);
1786 }
1787
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001788 if (result->IsFailure()) return result;
1789
1790 // Copy code object.
1791 Address old_addr = code->address();
1792 Address new_addr = reinterpret_cast<HeapObject*>(result)->address();
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +00001793 CopyBlock(reinterpret_cast<Object**>(new_addr),
1794 reinterpret_cast<Object**>(old_addr),
1795 obj_size);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001796 // Relocate the copy.
1797 Code* new_code = Code::cast(result);
1798 new_code->Relocate(new_addr - old_addr);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001799 return new_code;
1800}
1801
1802
1803Object* Heap::Allocate(Map* map, AllocationSpace space) {
1804 ASSERT(gc_state_ == NOT_IN_GC);
1805 ASSERT(map->instance_type() != MAP_TYPE);
kasperl@chromium.org9bbf9682008-10-30 11:53:07 +00001806 Object* result = AllocateRaw(map->instance_size(),
1807 space,
1808 TargetSpaceId(map->instance_type()));
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001809 if (result->IsFailure()) return result;
1810 HeapObject::cast(result)->set_map(map);
1811 return result;
1812}
1813
1814
1815Object* Heap::InitializeFunction(JSFunction* function,
1816 SharedFunctionInfo* shared,
1817 Object* prototype) {
1818 ASSERT(!prototype->IsMap());
1819 function->initialize_properties();
1820 function->initialize_elements();
1821 function->set_shared(shared);
1822 function->set_prototype_or_initial_map(prototype);
1823 function->set_context(undefined_value());
kasperl@chromium.org9fe21c62008-10-28 08:53:51 +00001824 function->set_literals(empty_fixed_array(), SKIP_WRITE_BARRIER);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001825 return function;
1826}
1827
1828
1829Object* Heap::AllocateFunctionPrototype(JSFunction* function) {
ager@chromium.orgddb913d2009-01-27 10:01:48 +00001830 // Allocate the prototype. Make sure to use the object function
1831 // from the function's context, since the function can be from a
1832 // different context.
1833 JSFunction* object_function =
1834 function->context()->global_context()->object_function();
1835 Object* prototype = AllocateJSObject(object_function);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001836 if (prototype->IsFailure()) return prototype;
1837 // When creating the prototype for the function we must set its
1838 // constructor to the function.
1839 Object* result =
1840 JSObject::cast(prototype)->SetProperty(constructor_symbol(),
1841 function,
1842 DONT_ENUM);
1843 if (result->IsFailure()) return result;
1844 return prototype;
1845}
1846
1847
1848Object* Heap::AllocateFunction(Map* function_map,
1849 SharedFunctionInfo* shared,
1850 Object* prototype) {
ager@chromium.org9258b6b2008-09-11 09:11:10 +00001851 Object* result = Allocate(function_map, OLD_POINTER_SPACE);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001852 if (result->IsFailure()) return result;
1853 return InitializeFunction(JSFunction::cast(result), shared, prototype);
1854}
1855
1856
1857Object* Heap::AllocateArgumentsObject(Object* callee, int length) {
mads.s.ager@gmail.com9a4089a2008-09-01 08:55:01 +00001858 // To get fast allocation and map sharing for arguments objects we
1859 // allocate them based on an arguments boilerplate.
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001860
1861 // This calls Copy directly rather than using Heap::AllocateRaw so we
1862 // duplicate the check here.
1863 ASSERT(allocation_allowed_ && gc_state_ == NOT_IN_GC);
1864
1865 JSObject* boilerplate =
1866 Top::context()->global_context()->arguments_boilerplate();
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +00001867
1868 // Make the clone.
1869 Map* map = boilerplate->map();
1870 int object_size = map->instance_size();
kasperl@chromium.org9bbf9682008-10-30 11:53:07 +00001871 Object* result = AllocateRaw(object_size, NEW_SPACE, OLD_POINTER_SPACE);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001872 if (result->IsFailure()) return result;
1873
kasperl@chromium.org9bbf9682008-10-30 11:53:07 +00001874 // Copy the content. The arguments boilerplate doesn't have any
1875 // fields that point to new space so it's safe to skip the write
1876 // barrier here.
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +00001877 CopyBlock(reinterpret_cast<Object**>(HeapObject::cast(result)->address()),
1878 reinterpret_cast<Object**>(boilerplate->address()),
1879 object_size);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001880
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +00001881 // Set the two properties.
1882 JSObject::cast(result)->InObjectPropertyAtPut(arguments_callee_index,
kasperl@chromium.org9bbf9682008-10-30 11:53:07 +00001883 callee);
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +00001884 JSObject::cast(result)->InObjectPropertyAtPut(arguments_length_index,
1885 Smi::FromInt(length),
1886 SKIP_WRITE_BARRIER);
1887
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001888 // Check the state of the object
1889 ASSERT(JSObject::cast(result)->HasFastProperties());
1890 ASSERT(JSObject::cast(result)->HasFastElements());
1891
1892 return result;
1893}
1894
1895
1896Object* Heap::AllocateInitialMap(JSFunction* fun) {
1897 ASSERT(!fun->has_initial_map());
1898
ager@chromium.org7c537e22008-10-16 08:43:32 +00001899 // First create a new map with the expected number of properties being
1900 // allocated in-object.
1901 int expected_nof_properties = fun->shared()->expected_nof_properties();
1902 int instance_size = JSObject::kHeaderSize +
1903 expected_nof_properties * kPointerSize;
1904 if (instance_size > JSObject::kMaxInstanceSize) {
1905 instance_size = JSObject::kMaxInstanceSize;
1906 expected_nof_properties = (instance_size - JSObject::kHeaderSize) /
1907 kPointerSize;
1908 }
1909 Object* map_obj = Heap::AllocateMap(JS_OBJECT_TYPE, instance_size);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001910 if (map_obj->IsFailure()) return map_obj;
1911
1912 // Fetch or allocate prototype.
1913 Object* prototype;
1914 if (fun->has_instance_prototype()) {
1915 prototype = fun->instance_prototype();
1916 } else {
1917 prototype = AllocateFunctionPrototype(fun);
1918 if (prototype->IsFailure()) return prototype;
1919 }
1920 Map* map = Map::cast(map_obj);
ager@chromium.org7c537e22008-10-16 08:43:32 +00001921 map->set_inobject_properties(expected_nof_properties);
1922 map->set_unused_property_fields(expected_nof_properties);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001923 map->set_prototype(prototype);
1924 return map;
1925}
1926
1927
1928void Heap::InitializeJSObjectFromMap(JSObject* obj,
1929 FixedArray* properties,
1930 Map* map) {
1931 obj->set_properties(properties);
1932 obj->initialize_elements();
1933 // TODO(1240798): Initialize the object's body using valid initial values
1934 // according to the object's initial map. For example, if the map's
1935 // instance type is JS_ARRAY_TYPE, the length field should be initialized
1936 // to a number (eg, Smi::FromInt(0)) and the elements initialized to a
1937 // fixed array (eg, Heap::empty_fixed_array()). Currently, the object
1938 // verification code has to cope with (temporarily) invalid objects. See
1939 // for example, JSArray::JSArrayVerify).
1940 obj->InitializeBody(map->instance_size());
1941}
1942
1943
1944Object* Heap::AllocateJSObjectFromMap(Map* map, PretenureFlag pretenure) {
1945 // JSFunctions should be allocated using AllocateFunction to be
1946 // properly initialized.
1947 ASSERT(map->instance_type() != JS_FUNCTION_TYPE);
1948
1949 // Allocate the backing storage for the properties.
ager@chromium.org7c537e22008-10-16 08:43:32 +00001950 int prop_size = map->unused_property_fields() - map->inobject_properties();
1951 Object* properties = AllocateFixedArray(prop_size);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001952 if (properties->IsFailure()) return properties;
1953
1954 // Allocate the JSObject.
ager@chromium.org9258b6b2008-09-11 09:11:10 +00001955 AllocationSpace space =
1956 (pretenure == TENURED) ? OLD_POINTER_SPACE : NEW_SPACE;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001957 if (map->instance_size() > MaxHeapObjectSize()) space = LO_SPACE;
1958 Object* obj = Allocate(map, space);
1959 if (obj->IsFailure()) return obj;
1960
1961 // Initialize the JSObject.
1962 InitializeJSObjectFromMap(JSObject::cast(obj),
1963 FixedArray::cast(properties),
1964 map);
1965 return obj;
1966}
1967
1968
1969Object* Heap::AllocateJSObject(JSFunction* constructor,
1970 PretenureFlag pretenure) {
1971 // Allocate the initial map if absent.
1972 if (!constructor->has_initial_map()) {
1973 Object* initial_map = AllocateInitialMap(constructor);
1974 if (initial_map->IsFailure()) return initial_map;
1975 constructor->set_initial_map(Map::cast(initial_map));
1976 Map::cast(initial_map)->set_constructor(constructor);
1977 }
1978 // Allocate the object based on the constructors initial map.
1979 return AllocateJSObjectFromMap(constructor->initial_map(), pretenure);
1980}
1981
1982
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +00001983Object* Heap::CopyJSObject(JSObject* source) {
1984 // Never used to copy functions. If functions need to be copied we
1985 // have to be careful to clear the literals array.
1986 ASSERT(!source->IsJSFunction());
1987
1988 // Make the clone.
1989 Map* map = source->map();
1990 int object_size = map->instance_size();
kasperl@chromium.org9bbf9682008-10-30 11:53:07 +00001991 Object* clone;
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +00001992
kasperl@chromium.org9bbf9682008-10-30 11:53:07 +00001993 // If we're forced to always allocate, we use the general allocation
1994 // functions which may leave us with an object in old space.
1995 if (always_allocate()) {
1996 clone = AllocateRaw(object_size, NEW_SPACE, OLD_POINTER_SPACE);
1997 if (clone->IsFailure()) return clone;
1998 Address clone_address = HeapObject::cast(clone)->address();
1999 CopyBlock(reinterpret_cast<Object**>(clone_address),
2000 reinterpret_cast<Object**>(source->address()),
2001 object_size);
2002 // Update write barrier for all fields that lie beyond the header.
2003 for (int offset = JSObject::kHeaderSize;
2004 offset < object_size;
2005 offset += kPointerSize) {
2006 RecordWrite(clone_address, offset);
2007 }
2008 } else {
2009 clone = new_space_.AllocateRaw(object_size);
2010 if (clone->IsFailure()) return clone;
2011 ASSERT(Heap::InNewSpace(clone));
2012 // Since we know the clone is allocated in new space, we can copy
ager@chromium.org32912102009-01-16 10:38:43 +00002013 // the contents without worrying about updating the write barrier.
kasperl@chromium.org9bbf9682008-10-30 11:53:07 +00002014 CopyBlock(reinterpret_cast<Object**>(HeapObject::cast(clone)->address()),
2015 reinterpret_cast<Object**>(source->address()),
2016 object_size);
2017 }
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +00002018
2019 FixedArray* elements = FixedArray::cast(source->elements());
2020 FixedArray* properties = FixedArray::cast(source->properties());
2021 // Update elements if necessary.
2022 if (elements->length()> 0) {
2023 Object* elem = CopyFixedArray(elements);
2024 if (elem->IsFailure()) return elem;
2025 JSObject::cast(clone)->set_elements(FixedArray::cast(elem));
2026 }
2027 // Update properties if necessary.
2028 if (properties->length() > 0) {
2029 Object* prop = CopyFixedArray(properties);
2030 if (prop->IsFailure()) return prop;
2031 JSObject::cast(clone)->set_properties(FixedArray::cast(prop));
2032 }
2033 // Return the new clone.
2034 return clone;
2035}
2036
2037
2038Object* Heap::ReinitializeJSGlobalProxy(JSFunction* constructor,
2039 JSGlobalProxy* object) {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002040 // Allocate initial map if absent.
2041 if (!constructor->has_initial_map()) {
2042 Object* initial_map = AllocateInitialMap(constructor);
2043 if (initial_map->IsFailure()) return initial_map;
2044 constructor->set_initial_map(Map::cast(initial_map));
2045 Map::cast(initial_map)->set_constructor(constructor);
2046 }
2047
2048 Map* map = constructor->initial_map();
2049
2050 // Check that the already allocated object has the same size as
2051 // objects allocated using the constructor.
2052 ASSERT(map->instance_size() == object->map()->instance_size());
2053
2054 // Allocate the backing storage for the properties.
ager@chromium.org7c537e22008-10-16 08:43:32 +00002055 int prop_size = map->unused_property_fields() - map->inobject_properties();
2056 Object* properties = AllocateFixedArray(prop_size);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002057 if (properties->IsFailure()) return properties;
2058
2059 // Reset the map for the object.
2060 object->set_map(constructor->initial_map());
2061
2062 // Reinitialize the object from the constructor map.
2063 InitializeJSObjectFromMap(object, FixedArray::cast(properties), map);
2064 return object;
2065}
2066
2067
2068Object* Heap::AllocateStringFromAscii(Vector<const char> string,
2069 PretenureFlag pretenure) {
2070 Object* result = AllocateRawAsciiString(string.length(), pretenure);
2071 if (result->IsFailure()) return result;
2072
2073 // Copy the characters into the new object.
ager@chromium.org7c537e22008-10-16 08:43:32 +00002074 SeqAsciiString* string_result = SeqAsciiString::cast(result);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002075 for (int i = 0; i < string.length(); i++) {
ager@chromium.org7c537e22008-10-16 08:43:32 +00002076 string_result->SeqAsciiStringSet(i, string[i]);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002077 }
2078 return result;
2079}
2080
2081
2082Object* Heap::AllocateStringFromUtf8(Vector<const char> string,
2083 PretenureFlag pretenure) {
2084 // Count the number of characters in the UTF-8 string and check if
2085 // it is an ASCII string.
2086 Access<Scanner::Utf8Decoder> decoder(Scanner::utf8_decoder());
2087 decoder->Reset(string.start(), string.length());
2088 int chars = 0;
2089 bool is_ascii = true;
2090 while (decoder->has_more()) {
2091 uc32 r = decoder->GetNext();
2092 if (r > String::kMaxAsciiCharCode) is_ascii = false;
2093 chars++;
2094 }
2095
2096 // If the string is ascii, we do not need to convert the characters
2097 // since UTF8 is backwards compatible with ascii.
2098 if (is_ascii) return AllocateStringFromAscii(string, pretenure);
2099
2100 Object* result = AllocateRawTwoByteString(chars, pretenure);
2101 if (result->IsFailure()) return result;
2102
2103 // Convert and copy the characters into the new object.
2104 String* string_result = String::cast(result);
2105 decoder->Reset(string.start(), string.length());
2106 for (int i = 0; i < chars; i++) {
2107 uc32 r = decoder->GetNext();
ager@chromium.orgbb29dc92009-03-24 13:25:23 +00002108 string_result->Set(i, r);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002109 }
2110 return result;
2111}
2112
2113
2114Object* Heap::AllocateStringFromTwoByte(Vector<const uc16> string,
2115 PretenureFlag pretenure) {
2116 // Check if the string is an ASCII string.
2117 int i = 0;
2118 while (i < string.length() && string[i] <= String::kMaxAsciiCharCode) i++;
2119
2120 Object* result;
2121 if (i == string.length()) { // It's an ASCII string.
2122 result = AllocateRawAsciiString(string.length(), pretenure);
2123 } else { // It's not an ASCII string.
2124 result = AllocateRawTwoByteString(string.length(), pretenure);
2125 }
2126 if (result->IsFailure()) return result;
2127
2128 // Copy the characters into the new object, which may be either ASCII or
2129 // UTF-16.
2130 String* string_result = String::cast(result);
2131 for (int i = 0; i < string.length(); i++) {
ager@chromium.orgbb29dc92009-03-24 13:25:23 +00002132 string_result->Set(i, string[i]);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002133 }
2134 return result;
2135}
2136
2137
2138Map* Heap::SymbolMapForString(String* string) {
2139 // If the string is in new space it cannot be used as a symbol.
2140 if (InNewSpace(string)) return NULL;
2141
2142 // Find the corresponding symbol map for strings.
2143 Map* map = string->map();
2144
2145 if (map == short_ascii_string_map()) return short_ascii_symbol_map();
2146 if (map == medium_ascii_string_map()) return medium_ascii_symbol_map();
2147 if (map == long_ascii_string_map()) return long_ascii_symbol_map();
2148
2149 if (map == short_string_map()) return short_symbol_map();
2150 if (map == medium_string_map()) return medium_symbol_map();
2151 if (map == long_string_map()) return long_symbol_map();
2152
2153 if (map == short_cons_string_map()) return short_cons_symbol_map();
2154 if (map == medium_cons_string_map()) return medium_cons_symbol_map();
2155 if (map == long_cons_string_map()) return long_cons_symbol_map();
2156
2157 if (map == short_cons_ascii_string_map()) {
2158 return short_cons_ascii_symbol_map();
2159 }
2160 if (map == medium_cons_ascii_string_map()) {
2161 return medium_cons_ascii_symbol_map();
2162 }
2163 if (map == long_cons_ascii_string_map()) {
2164 return long_cons_ascii_symbol_map();
2165 }
2166
2167 if (map == short_sliced_string_map()) return short_sliced_symbol_map();
kasperl@chromium.org9fe21c62008-10-28 08:53:51 +00002168 if (map == medium_sliced_string_map()) return medium_sliced_symbol_map();
2169 if (map == long_sliced_string_map()) return long_sliced_symbol_map();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002170
2171 if (map == short_sliced_ascii_string_map()) {
2172 return short_sliced_ascii_symbol_map();
2173 }
2174 if (map == medium_sliced_ascii_string_map()) {
kasperl@chromium.org9fe21c62008-10-28 08:53:51 +00002175 return medium_sliced_ascii_symbol_map();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002176 }
2177 if (map == long_sliced_ascii_string_map()) {
kasperl@chromium.org9fe21c62008-10-28 08:53:51 +00002178 return long_sliced_ascii_symbol_map();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002179 }
2180
ager@chromium.org6f10e412009-02-13 10:11:16 +00002181 if (map == short_external_string_map()) {
2182 return short_external_symbol_map();
2183 }
2184 if (map == medium_external_string_map()) {
2185 return medium_external_symbol_map();
2186 }
2187 if (map == long_external_string_map()) {
2188 return long_external_symbol_map();
2189 }
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002190
2191 if (map == short_external_ascii_string_map()) {
ager@chromium.org6f10e412009-02-13 10:11:16 +00002192 return short_external_ascii_symbol_map();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002193 }
2194 if (map == medium_external_ascii_string_map()) {
ager@chromium.org6f10e412009-02-13 10:11:16 +00002195 return medium_external_ascii_symbol_map();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002196 }
2197 if (map == long_external_ascii_string_map()) {
ager@chromium.org6f10e412009-02-13 10:11:16 +00002198 return long_external_ascii_symbol_map();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002199 }
2200
2201 // No match found.
2202 return NULL;
2203}
2204
2205
ager@chromium.orga74f0da2008-12-03 16:05:52 +00002206Object* Heap::AllocateInternalSymbol(unibrow::CharacterStream* buffer,
2207 int chars,
2208 uint32_t length_field) {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002209 // Ensure the chars matches the number of characters in the buffer.
2210 ASSERT(static_cast<unsigned>(chars) == buffer->Length());
2211 // Determine whether the string is ascii.
2212 bool is_ascii = true;
ager@chromium.org6f10e412009-02-13 10:11:16 +00002213 while (buffer->has_more() && is_ascii) {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002214 if (buffer->GetNext() > unibrow::Utf8::kMaxOneByteChar) is_ascii = false;
2215 }
2216 buffer->Rewind();
2217
2218 // Compute map and object size.
2219 int size;
2220 Map* map;
2221
2222 if (is_ascii) {
2223 if (chars <= String::kMaxShortStringSize) {
2224 map = short_ascii_symbol_map();
2225 } else if (chars <= String::kMaxMediumStringSize) {
2226 map = medium_ascii_symbol_map();
2227 } else {
2228 map = long_ascii_symbol_map();
2229 }
ager@chromium.org7c537e22008-10-16 08:43:32 +00002230 size = SeqAsciiString::SizeFor(chars);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002231 } else {
2232 if (chars <= String::kMaxShortStringSize) {
2233 map = short_symbol_map();
2234 } else if (chars <= String::kMaxMediumStringSize) {
2235 map = medium_symbol_map();
2236 } else {
2237 map = long_symbol_map();
2238 }
ager@chromium.org7c537e22008-10-16 08:43:32 +00002239 size = SeqTwoByteString::SizeFor(chars);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002240 }
2241
2242 // Allocate string.
ager@chromium.org9258b6b2008-09-11 09:11:10 +00002243 AllocationSpace space =
2244 (size > MaxHeapObjectSize()) ? LO_SPACE : OLD_DATA_SPACE;
kasperl@chromium.org9bbf9682008-10-30 11:53:07 +00002245 Object* result = AllocateRaw(size, space, OLD_DATA_SPACE);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002246 if (result->IsFailure()) return result;
2247
2248 reinterpret_cast<HeapObject*>(result)->set_map(map);
2249 // The hash value contains the length of the string.
ager@chromium.org870a0b62008-11-04 11:43:05 +00002250 String* answer = String::cast(result);
ager@chromium.org870a0b62008-11-04 11:43:05 +00002251 answer->set_length_field(length_field);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002252
ager@chromium.org870a0b62008-11-04 11:43:05 +00002253 ASSERT_EQ(size, answer->Size());
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002254
2255 // Fill in the characters.
2256 for (int i = 0; i < chars; i++) {
ager@chromium.orgbb29dc92009-03-24 13:25:23 +00002257 answer->Set(i, buffer->GetNext());
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002258 }
ager@chromium.org870a0b62008-11-04 11:43:05 +00002259 return answer;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002260}
2261
2262
2263Object* Heap::AllocateRawAsciiString(int length, PretenureFlag pretenure) {
ager@chromium.org9258b6b2008-09-11 09:11:10 +00002264 AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
ager@chromium.org7c537e22008-10-16 08:43:32 +00002265 int size = SeqAsciiString::SizeFor(length);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002266 if (size > MaxHeapObjectSize()) {
2267 space = LO_SPACE;
2268 }
2269
2270 // Use AllocateRaw rather than Allocate because the object's size cannot be
2271 // determined from the map.
kasperl@chromium.org9bbf9682008-10-30 11:53:07 +00002272 Object* result = AllocateRaw(size, space, OLD_DATA_SPACE);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002273 if (result->IsFailure()) return result;
2274
2275 // Determine the map based on the string's length.
2276 Map* map;
2277 if (length <= String::kMaxShortStringSize) {
2278 map = short_ascii_string_map();
2279 } else if (length <= String::kMaxMediumStringSize) {
2280 map = medium_ascii_string_map();
2281 } else {
2282 map = long_ascii_string_map();
2283 }
2284
2285 // Partially initialize the object.
2286 HeapObject::cast(result)->set_map(map);
2287 String::cast(result)->set_length(length);
2288 ASSERT_EQ(size, HeapObject::cast(result)->Size());
2289 return result;
2290}
2291
2292
2293Object* Heap::AllocateRawTwoByteString(int length, PretenureFlag pretenure) {
ager@chromium.org9258b6b2008-09-11 09:11:10 +00002294 AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
ager@chromium.org7c537e22008-10-16 08:43:32 +00002295 int size = SeqTwoByteString::SizeFor(length);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002296 if (size > MaxHeapObjectSize()) {
2297 space = LO_SPACE;
2298 }
2299
2300 // Use AllocateRaw rather than Allocate because the object's size cannot be
2301 // determined from the map.
kasperl@chromium.org9bbf9682008-10-30 11:53:07 +00002302 Object* result = AllocateRaw(size, space, OLD_DATA_SPACE);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002303 if (result->IsFailure()) return result;
2304
2305 // Determine the map based on the string's length.
2306 Map* map;
2307 if (length <= String::kMaxShortStringSize) {
2308 map = short_string_map();
2309 } else if (length <= String::kMaxMediumStringSize) {
2310 map = medium_string_map();
2311 } else {
2312 map = long_string_map();
2313 }
2314
2315 // Partially initialize the object.
2316 HeapObject::cast(result)->set_map(map);
2317 String::cast(result)->set_length(length);
2318 ASSERT_EQ(size, HeapObject::cast(result)->Size());
2319 return result;
2320}
2321
2322
2323Object* Heap::AllocateEmptyFixedArray() {
2324 int size = FixedArray::SizeFor(0);
kasperl@chromium.org9bbf9682008-10-30 11:53:07 +00002325 Object* result = AllocateRaw(size, OLD_DATA_SPACE, OLD_DATA_SPACE);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002326 if (result->IsFailure()) return result;
2327 // Initialize the object.
2328 reinterpret_cast<Array*>(result)->set_map(fixed_array_map());
2329 reinterpret_cast<Array*>(result)->set_length(0);
2330 return result;
2331}
2332
2333
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +00002334Object* Heap::AllocateRawFixedArray(int length) {
kasperl@chromium.org9bbf9682008-10-30 11:53:07 +00002335 // Use the general function if we're forced to always allocate.
2336 if (always_allocate()) return AllocateFixedArray(length, NOT_TENURED);
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +00002337 // Allocate the raw data for a fixed array.
2338 int size = FixedArray::SizeFor(length);
2339 return (size > MaxHeapObjectSize())
2340 ? lo_space_->AllocateRawFixedArray(size)
2341 : new_space_.AllocateRaw(size);
2342}
2343
2344
2345Object* Heap::CopyFixedArray(FixedArray* src) {
2346 int len = src->length();
2347 Object* obj = AllocateRawFixedArray(len);
2348 if (obj->IsFailure()) return obj;
2349 if (Heap::InNewSpace(obj)) {
2350 HeapObject* dst = HeapObject::cast(obj);
2351 CopyBlock(reinterpret_cast<Object**>(dst->address()),
2352 reinterpret_cast<Object**>(src->address()),
2353 FixedArray::SizeFor(len));
2354 return obj;
2355 }
2356 HeapObject::cast(obj)->set_map(src->map());
2357 FixedArray* result = FixedArray::cast(obj);
2358 result->set_length(len);
2359 // Copy the content
2360 WriteBarrierMode mode = result->GetWriteBarrierMode();
2361 for (int i = 0; i < len; i++) result->set(i, src->get(i), mode);
2362 return result;
2363}
2364
2365
2366Object* Heap::AllocateFixedArray(int length) {
ager@chromium.org32912102009-01-16 10:38:43 +00002367 if (length == 0) return empty_fixed_array();
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +00002368 Object* result = AllocateRawFixedArray(length);
2369 if (!result->IsFailure()) {
2370 // Initialize header.
2371 reinterpret_cast<Array*>(result)->set_map(fixed_array_map());
2372 FixedArray* array = FixedArray::cast(result);
2373 array->set_length(length);
2374 Object* value = undefined_value();
2375 // Initialize body.
2376 for (int index = 0; index < length; index++) {
2377 array->set(index, value, SKIP_WRITE_BARRIER);
2378 }
2379 }
2380 return result;
2381}
2382
2383
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002384Object* Heap::AllocateFixedArray(int length, PretenureFlag pretenure) {
2385 ASSERT(empty_fixed_array()->IsFixedArray());
2386 if (length == 0) return empty_fixed_array();
2387
2388 int size = FixedArray::SizeFor(length);
2389 Object* result;
2390 if (size > MaxHeapObjectSize()) {
2391 result = lo_space_->AllocateRawFixedArray(size);
2392 } else {
ager@chromium.org9258b6b2008-09-11 09:11:10 +00002393 AllocationSpace space =
2394 (pretenure == TENURED) ? OLD_POINTER_SPACE : NEW_SPACE;
kasperl@chromium.org9bbf9682008-10-30 11:53:07 +00002395 result = AllocateRaw(size, space, OLD_POINTER_SPACE);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002396 }
2397 if (result->IsFailure()) return result;
2398
2399 // Initialize the object.
2400 reinterpret_cast<Array*>(result)->set_map(fixed_array_map());
2401 FixedArray* array = FixedArray::cast(result);
2402 array->set_length(length);
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +00002403 Object* value = undefined_value();
2404 for (int index = 0; index < length; index++) {
2405 array->set(index, value, SKIP_WRITE_BARRIER);
2406 }
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002407 return array;
2408}
2409
2410
2411Object* Heap::AllocateFixedArrayWithHoles(int length) {
2412 if (length == 0) return empty_fixed_array();
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +00002413 Object* result = AllocateRawFixedArray(length);
2414 if (!result->IsFailure()) {
2415 // Initialize header.
2416 reinterpret_cast<Array*>(result)->set_map(fixed_array_map());
2417 FixedArray* array = FixedArray::cast(result);
2418 array->set_length(length);
2419 // Initialize body.
2420 Object* value = the_hole_value();
2421 for (int index = 0; index < length; index++) {
2422 array->set(index, value, SKIP_WRITE_BARRIER);
2423 }
2424 }
2425 return result;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002426}
2427
2428
2429Object* Heap::AllocateHashTable(int length) {
2430 Object* result = Heap::AllocateFixedArray(length);
2431 if (result->IsFailure()) return result;
2432 reinterpret_cast<Array*>(result)->set_map(hash_table_map());
2433 ASSERT(result->IsDictionary());
2434 return result;
2435}
2436
2437
2438Object* Heap::AllocateGlobalContext() {
2439 Object* result = Heap::AllocateFixedArray(Context::GLOBAL_CONTEXT_SLOTS);
2440 if (result->IsFailure()) return result;
2441 Context* context = reinterpret_cast<Context*>(result);
2442 context->set_map(global_context_map());
2443 ASSERT(context->IsGlobalContext());
2444 ASSERT(result->IsContext());
2445 return result;
2446}
2447
2448
2449Object* Heap::AllocateFunctionContext(int length, JSFunction* function) {
2450 ASSERT(length >= Context::MIN_CONTEXT_SLOTS);
2451 Object* result = Heap::AllocateFixedArray(length);
2452 if (result->IsFailure()) return result;
2453 Context* context = reinterpret_cast<Context*>(result);
2454 context->set_map(context_map());
2455 context->set_closure(function);
2456 context->set_fcontext(context);
2457 context->set_previous(NULL);
2458 context->set_extension(NULL);
2459 context->set_global(function->context()->global());
2460 ASSERT(!context->IsGlobalContext());
2461 ASSERT(context->is_function_context());
2462 ASSERT(result->IsContext());
2463 return result;
2464}
2465
2466
christian.plesner.hansen@gmail.com37abdec2009-01-06 14:43:28 +00002467Object* Heap::AllocateWithContext(Context* previous,
2468 JSObject* extension,
2469 bool is_catch_context) {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002470 Object* result = Heap::AllocateFixedArray(Context::MIN_CONTEXT_SLOTS);
2471 if (result->IsFailure()) return result;
2472 Context* context = reinterpret_cast<Context*>(result);
christian.plesner.hansen@gmail.com37abdec2009-01-06 14:43:28 +00002473 context->set_map(is_catch_context ? catch_context_map() : context_map());
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002474 context->set_closure(previous->closure());
2475 context->set_fcontext(previous->fcontext());
2476 context->set_previous(previous);
2477 context->set_extension(extension);
2478 context->set_global(previous->global());
2479 ASSERT(!context->IsGlobalContext());
2480 ASSERT(!context->is_function_context());
2481 ASSERT(result->IsContext());
2482 return result;
2483}
2484
2485
2486Object* Heap::AllocateStruct(InstanceType type) {
2487 Map* map;
2488 switch (type) {
2489#define MAKE_CASE(NAME, Name, name) case NAME##_TYPE: map = name##_map(); break;
2490STRUCT_LIST(MAKE_CASE)
2491#undef MAKE_CASE
2492 default:
2493 UNREACHABLE();
2494 return Failure::InternalError();
2495 }
2496 int size = map->instance_size();
2497 AllocationSpace space =
ager@chromium.org9258b6b2008-09-11 09:11:10 +00002498 (size > MaxHeapObjectSize()) ? LO_SPACE : OLD_POINTER_SPACE;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002499 Object* result = Heap::Allocate(map, space);
2500 if (result->IsFailure()) return result;
2501 Struct::cast(result)->InitializeBody(size);
2502 return result;
2503}
2504
2505
2506#ifdef DEBUG
2507
2508void Heap::Print() {
2509 if (!HasBeenSetup()) return;
2510 Top::PrintStack();
ager@chromium.org9258b6b2008-09-11 09:11:10 +00002511 AllSpaces spaces;
2512 while (Space* space = spaces.next()) space->Print();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002513}
2514
2515
2516void Heap::ReportCodeStatistics(const char* title) {
2517 PrintF(">>>>>> Code Stats (%s) >>>>>>\n", title);
2518 PagedSpace::ResetCodeStatistics();
2519 // We do not look for code in new space, map space, or old space. If code
2520 // somehow ends up in those spaces, we would miss it here.
2521 code_space_->CollectCodeStatistics();
2522 lo_space_->CollectCodeStatistics();
2523 PagedSpace::ReportCodeStatistics();
2524}
2525
2526
2527// This function expects that NewSpace's allocated objects histogram is
2528// populated (via a call to CollectStatistics or else as a side effect of a
2529// just-completed scavenge collection).
2530void Heap::ReportHeapStatistics(const char* title) {
2531 USE(title);
2532 PrintF(">>>>>> =============== %s (%d) =============== >>>>>>\n",
2533 title, gc_count_);
2534 PrintF("mark-compact GC : %d\n", mc_count_);
kasperl@chromium.org9bbf9682008-10-30 11:53:07 +00002535 PrintF("old_gen_promotion_limit_ %d\n", old_gen_promotion_limit_);
2536 PrintF("old_gen_allocation_limit_ %d\n", old_gen_allocation_limit_);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002537
2538 PrintF("\n");
2539 PrintF("Number of handles : %d\n", HandleScope::NumberOfHandles());
2540 GlobalHandles::PrintStats();
2541 PrintF("\n");
2542
2543 PrintF("Heap statistics : ");
2544 MemoryAllocator::ReportStatistics();
2545 PrintF("To space : ");
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +00002546 new_space_.ReportStatistics();
ager@chromium.org9258b6b2008-09-11 09:11:10 +00002547 PrintF("Old pointer space : ");
2548 old_pointer_space_->ReportStatistics();
2549 PrintF("Old data space : ");
2550 old_data_space_->ReportStatistics();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002551 PrintF("Code space : ");
2552 code_space_->ReportStatistics();
2553 PrintF("Map space : ");
2554 map_space_->ReportStatistics();
2555 PrintF("Large object space : ");
2556 lo_space_->ReportStatistics();
2557 PrintF(">>>>>> ========================================= >>>>>>\n");
2558}
2559
2560#endif // DEBUG
2561
2562bool Heap::Contains(HeapObject* value) {
2563 return Contains(value->address());
2564}
2565
2566
2567bool Heap::Contains(Address addr) {
2568 if (OS::IsOutsideAllocatedSpace(addr)) return false;
2569 return HasBeenSetup() &&
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +00002570 (new_space_.ToSpaceContains(addr) ||
ager@chromium.org9258b6b2008-09-11 09:11:10 +00002571 old_pointer_space_->Contains(addr) ||
2572 old_data_space_->Contains(addr) ||
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002573 code_space_->Contains(addr) ||
2574 map_space_->Contains(addr) ||
2575 lo_space_->SlowContains(addr));
2576}
2577
2578
2579bool Heap::InSpace(HeapObject* value, AllocationSpace space) {
2580 return InSpace(value->address(), space);
2581}
2582
2583
2584bool Heap::InSpace(Address addr, AllocationSpace space) {
2585 if (OS::IsOutsideAllocatedSpace(addr)) return false;
2586 if (!HasBeenSetup()) return false;
2587
2588 switch (space) {
2589 case NEW_SPACE:
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +00002590 return new_space_.ToSpaceContains(addr);
ager@chromium.org9258b6b2008-09-11 09:11:10 +00002591 case OLD_POINTER_SPACE:
2592 return old_pointer_space_->Contains(addr);
2593 case OLD_DATA_SPACE:
2594 return old_data_space_->Contains(addr);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002595 case CODE_SPACE:
2596 return code_space_->Contains(addr);
2597 case MAP_SPACE:
2598 return map_space_->Contains(addr);
2599 case LO_SPACE:
2600 return lo_space_->SlowContains(addr);
2601 }
2602
2603 return false;
2604}
2605
2606
2607#ifdef DEBUG
2608void Heap::Verify() {
2609 ASSERT(HasBeenSetup());
2610
2611 VerifyPointersVisitor visitor;
2612 Heap::IterateRoots(&visitor);
2613
ager@chromium.org9258b6b2008-09-11 09:11:10 +00002614 AllSpaces spaces;
2615 while (Space* space = spaces.next()) {
2616 space->Verify();
2617 }
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002618}
2619#endif // DEBUG
2620
2621
2622Object* Heap::LookupSymbol(Vector<const char> string) {
2623 Object* symbol = NULL;
2624 Object* new_table =
2625 SymbolTable::cast(symbol_table_)->LookupSymbol(string, &symbol);
2626 if (new_table->IsFailure()) return new_table;
2627 symbol_table_ = new_table;
2628 ASSERT(symbol != NULL);
2629 return symbol;
2630}
2631
2632
2633Object* Heap::LookupSymbol(String* string) {
2634 if (string->IsSymbol()) return string;
2635 Object* symbol = NULL;
2636 Object* new_table =
2637 SymbolTable::cast(symbol_table_)->LookupString(string, &symbol);
2638 if (new_table->IsFailure()) return new_table;
2639 symbol_table_ = new_table;
2640 ASSERT(symbol != NULL);
2641 return symbol;
2642}
2643
2644
ager@chromium.org7c537e22008-10-16 08:43:32 +00002645bool Heap::LookupSymbolIfExists(String* string, String** symbol) {
2646 if (string->IsSymbol()) {
2647 *symbol = string;
2648 return true;
2649 }
2650 SymbolTable* table = SymbolTable::cast(symbol_table_);
2651 return table->LookupSymbolIfExists(string, symbol);
2652}
2653
2654
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002655#ifdef DEBUG
2656void Heap::ZapFromSpace() {
2657 ASSERT(HAS_HEAP_OBJECT_TAG(kFromSpaceZapValue));
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +00002658 for (Address a = new_space_.FromSpaceLow();
2659 a < new_space_.FromSpaceHigh();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002660 a += kPointerSize) {
2661 Memory::Address_at(a) = kFromSpaceZapValue;
2662 }
2663}
2664#endif // DEBUG
2665
2666
kasperl@chromium.org71affb52009-05-26 05:44:31 +00002667int Heap::IterateRSetRange(Address object_start,
2668 Address object_end,
2669 Address rset_start,
2670 ObjectSlotCallback copy_object_func) {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002671 Address object_address = object_start;
2672 Address rset_address = rset_start;
kasperl@chromium.org71affb52009-05-26 05:44:31 +00002673 int set_bits_count = 0;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002674
2675 // Loop over all the pointers in [object_start, object_end).
2676 while (object_address < object_end) {
2677 uint32_t rset_word = Memory::uint32_at(rset_address);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002678 if (rset_word != 0) {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002679 uint32_t result_rset = rset_word;
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +00002680 for (uint32_t bitmask = 1; bitmask != 0; bitmask = bitmask << 1) {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002681 // Do not dereference pointers at or past object_end.
2682 if ((rset_word & bitmask) != 0 && object_address < object_end) {
2683 Object** object_p = reinterpret_cast<Object**>(object_address);
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +00002684 if (Heap::InNewSpace(*object_p)) {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002685 copy_object_func(reinterpret_cast<HeapObject**>(object_p));
2686 }
2687 // If this pointer does not need to be remembered anymore, clear
2688 // the remembered set bit.
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +00002689 if (!Heap::InNewSpace(*object_p)) result_rset &= ~bitmask;
kasperl@chromium.org71affb52009-05-26 05:44:31 +00002690 set_bits_count++;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002691 }
2692 object_address += kPointerSize;
2693 }
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002694 // Update the remembered set if it has changed.
2695 if (result_rset != rset_word) {
2696 Memory::uint32_at(rset_address) = result_rset;
2697 }
2698 } else {
2699 // No bits in the word were set. This is the common case.
2700 object_address += kPointerSize * kBitsPerInt;
2701 }
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002702 rset_address += kIntSize;
2703 }
kasperl@chromium.org71affb52009-05-26 05:44:31 +00002704 return set_bits_count;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002705}
2706
2707
2708void Heap::IterateRSet(PagedSpace* space, ObjectSlotCallback copy_object_func) {
2709 ASSERT(Page::is_rset_in_use());
ager@chromium.org9258b6b2008-09-11 09:11:10 +00002710 ASSERT(space == old_pointer_space_ || space == map_space_);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002711
kasperl@chromium.org71affb52009-05-26 05:44:31 +00002712 static void* paged_rset_histogram = StatsTable::CreateHistogram(
2713 "V8.RSetPaged",
2714 0,
2715 Page::kObjectAreaSize / kPointerSize,
2716 30);
2717
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002718 PageIterator it(space, PageIterator::PAGES_IN_USE);
2719 while (it.has_next()) {
2720 Page* page = it.next();
kasperl@chromium.org71affb52009-05-26 05:44:31 +00002721 int count = IterateRSetRange(page->ObjectAreaStart(), page->AllocationTop(),
2722 page->RSetStart(), copy_object_func);
2723 if (paged_rset_histogram != NULL) {
2724 StatsTable::AddHistogramSample(paged_rset_histogram, count);
2725 }
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002726 }
2727}
2728
2729
2730#ifdef DEBUG
2731#define SYNCHRONIZE_TAG(tag) v->Synchronize(tag)
2732#else
2733#define SYNCHRONIZE_TAG(tag)
2734#endif
2735
2736void Heap::IterateRoots(ObjectVisitor* v) {
2737 IterateStrongRoots(v);
2738 v->VisitPointer(reinterpret_cast<Object**>(&symbol_table_));
2739 SYNCHRONIZE_TAG("symbol_table");
2740}
2741
2742
2743void Heap::IterateStrongRoots(ObjectVisitor* v) {
2744#define ROOT_ITERATE(type, name) \
kasperl@chromium.org41044eb2008-10-06 08:24:46 +00002745 v->VisitPointer(bit_cast<Object**, type**>(&name##_));
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002746 STRONG_ROOT_LIST(ROOT_ITERATE);
2747#undef ROOT_ITERATE
2748 SYNCHRONIZE_TAG("strong_root_list");
2749
2750#define STRUCT_MAP_ITERATE(NAME, Name, name) \
kasperl@chromium.org41044eb2008-10-06 08:24:46 +00002751 v->VisitPointer(bit_cast<Object**, Map**>(&name##_map_));
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002752 STRUCT_LIST(STRUCT_MAP_ITERATE);
2753#undef STRUCT_MAP_ITERATE
2754 SYNCHRONIZE_TAG("struct_map");
2755
2756#define SYMBOL_ITERATE(name, string) \
kasperl@chromium.org41044eb2008-10-06 08:24:46 +00002757 v->VisitPointer(bit_cast<Object**, String**>(&name##_));
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002758 SYMBOL_LIST(SYMBOL_ITERATE)
2759#undef SYMBOL_ITERATE
ager@chromium.org3b45ab52009-03-19 22:21:34 +00002760 v->VisitPointer(bit_cast<Object**, String**>(&hidden_symbol_));
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002761 SYNCHRONIZE_TAG("symbol");
2762
2763 Bootstrapper::Iterate(v);
2764 SYNCHRONIZE_TAG("bootstrapper");
2765 Top::Iterate(v);
2766 SYNCHRONIZE_TAG("top");
ager@chromium.org65dad4b2009-04-23 08:48:43 +00002767
2768#ifdef ENABLE_DEBUGGER_SUPPORT
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002769 Debug::Iterate(v);
ager@chromium.org65dad4b2009-04-23 08:48:43 +00002770#endif
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002771 SYNCHRONIZE_TAG("debug");
kasperl@chromium.orgb9123622008-09-17 14:05:56 +00002772 CompilationCache::Iterate(v);
2773 SYNCHRONIZE_TAG("compilationcache");
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002774
2775 // Iterate over local handles in handle scopes.
2776 HandleScopeImplementer::Iterate(v);
2777 SYNCHRONIZE_TAG("handlescope");
2778
2779 // Iterate over the builtin code objects and code stubs in the heap. Note
2780 // that it is not strictly necessary to iterate over code objects on
2781 // scavenge collections. We still do it here because this same function
2782 // is used by the mark-sweep collector and the deserializer.
2783 Builtins::IterateBuiltins(v);
2784 SYNCHRONIZE_TAG("builtins");
2785
2786 // Iterate over global handles.
2787 GlobalHandles::IterateRoots(v);
2788 SYNCHRONIZE_TAG("globalhandles");
2789
2790 // Iterate over pointers being held by inactive threads.
2791 ThreadManager::Iterate(v);
2792 SYNCHRONIZE_TAG("threadmanager");
2793}
2794#undef SYNCHRONIZE_TAG
2795
2796
2797// Flag is set when the heap has been configured. The heap can be repeatedly
2798// configured through the API until it is setup.
2799static bool heap_configured = false;
2800
2801// TODO(1236194): Since the heap size is configurable on the command line
2802// and through the API, we should gracefully handle the case that the heap
2803// size is not big enough to fit all the initial objects.
2804bool Heap::ConfigureHeap(int semispace_size, int old_gen_size) {
2805 if (HasBeenSetup()) return false;
2806
2807 if (semispace_size > 0) semispace_size_ = semispace_size;
2808 if (old_gen_size > 0) old_generation_size_ = old_gen_size;
2809
2810 // The new space size must be a power of two to support single-bit testing
2811 // for containment.
mads.s.ager@gmail.com769cc962008-08-06 10:02:49 +00002812 semispace_size_ = RoundUpToPowerOf2(semispace_size_);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002813 initial_semispace_size_ = Min(initial_semispace_size_, semispace_size_);
2814 young_generation_size_ = 2 * semispace_size_;
2815
2816 // The old generation is paged.
2817 old_generation_size_ = RoundUp(old_generation_size_, Page::kPageSize);
2818
2819 heap_configured = true;
2820 return true;
2821}
2822
2823
kasper.lund7276f142008-07-30 08:49:36 +00002824bool Heap::ConfigureHeapDefault() {
2825 return ConfigureHeap(FLAG_new_space_size, FLAG_old_space_size);
2826}
2827
2828
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002829int Heap::PromotedSpaceSize() {
ager@chromium.org9258b6b2008-09-11 09:11:10 +00002830 return old_pointer_space_->Size()
2831 + old_data_space_->Size()
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002832 + code_space_->Size()
2833 + map_space_->Size()
2834 + lo_space_->Size();
2835}
2836
2837
kasper.lund7276f142008-07-30 08:49:36 +00002838int Heap::PromotedExternalMemorySize() {
2839 if (amount_of_external_allocated_memory_
2840 <= amount_of_external_allocated_memory_at_last_global_gc_) return 0;
2841 return amount_of_external_allocated_memory_
2842 - amount_of_external_allocated_memory_at_last_global_gc_;
2843}
2844
2845
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002846bool Heap::Setup(bool create_heap_objects) {
2847 // Initialize heap spaces and initial maps and objects. Whenever something
2848 // goes wrong, just return false. The caller should check the results and
2849 // call Heap::TearDown() to release allocated memory.
2850 //
2851 // If the heap is not yet configured (eg, through the API), configure it.
2852 // Configuration is based on the flags new-space-size (really the semispace
2853 // size) and old-space-size if set or the initial values of semispace_size_
2854 // and old_generation_size_ otherwise.
2855 if (!heap_configured) {
kasper.lund7276f142008-07-30 08:49:36 +00002856 if (!ConfigureHeapDefault()) return false;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002857 }
2858
2859 // Setup memory allocator and allocate an initial chunk of memory. The
2860 // initial chunk is double the size of the new space to ensure that we can
2861 // find a pair of semispaces that are contiguous and aligned to their size.
2862 if (!MemoryAllocator::Setup(MaxCapacity())) return false;
2863 void* chunk
2864 = MemoryAllocator::ReserveInitialChunk(2 * young_generation_size_);
2865 if (chunk == NULL) return false;
2866
2867 // Put the initial chunk of the old space at the start of the initial
2868 // chunk, then the two new space semispaces, then the initial chunk of
2869 // code space. Align the pair of semispaces to their size, which must be
2870 // a power of 2.
2871 ASSERT(IsPowerOf2(young_generation_size_));
kasperl@chromium.orgb9123622008-09-17 14:05:56 +00002872 Address code_space_start = reinterpret_cast<Address>(chunk);
2873 Address new_space_start = RoundUp(code_space_start, young_generation_size_);
2874 Address old_space_start = new_space_start + young_generation_size_;
2875 int code_space_size = new_space_start - code_space_start;
2876 int old_space_size = young_generation_size_ - code_space_size;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002877
ager@chromium.org9258b6b2008-09-11 09:11:10 +00002878 // Initialize new space.
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +00002879 if (!new_space_.Setup(new_space_start, young_generation_size_)) return false;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002880
2881 // Initialize old space, set the maximum capacity to the old generation
kasper.lund7276f142008-07-30 08:49:36 +00002882 // size. It will not contain code.
ager@chromium.org9258b6b2008-09-11 09:11:10 +00002883 old_pointer_space_ =
2884 new OldSpace(old_generation_size_, OLD_POINTER_SPACE, NOT_EXECUTABLE);
2885 if (old_pointer_space_ == NULL) return false;
2886 if (!old_pointer_space_->Setup(old_space_start, old_space_size >> 1)) {
2887 return false;
2888 }
2889 old_data_space_ =
2890 new OldSpace(old_generation_size_, OLD_DATA_SPACE, NOT_EXECUTABLE);
2891 if (old_data_space_ == NULL) return false;
2892 if (!old_data_space_->Setup(old_space_start + (old_space_size >> 1),
2893 old_space_size >> 1)) {
2894 return false;
2895 }
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002896
2897 // Initialize the code space, set its maximum capacity to the old
kasper.lund7276f142008-07-30 08:49:36 +00002898 // generation size. It needs executable memory.
ager@chromium.org9258b6b2008-09-11 09:11:10 +00002899 code_space_ =
2900 new OldSpace(old_generation_size_, CODE_SPACE, EXECUTABLE);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002901 if (code_space_ == NULL) return false;
2902 if (!code_space_->Setup(code_space_start, code_space_size)) return false;
2903
2904 // Initialize map space.
kasper.lund7276f142008-07-30 08:49:36 +00002905 map_space_ = new MapSpace(kMaxMapSpaceSize, MAP_SPACE);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002906 if (map_space_ == NULL) return false;
2907 // Setting up a paged space without giving it a virtual memory range big
2908 // enough to hold at least a page will cause it to allocate.
2909 if (!map_space_->Setup(NULL, 0)) return false;
2910
ager@chromium.org9258b6b2008-09-11 09:11:10 +00002911 // The large object code space may contain code or data. We set the memory
2912 // to be non-executable here for safety, but this means we need to enable it
2913 // explicitly when allocating large code objects.
2914 lo_space_ = new LargeObjectSpace(LO_SPACE);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002915 if (lo_space_ == NULL) return false;
2916 if (!lo_space_->Setup()) return false;
2917
2918 if (create_heap_objects) {
2919 // Create initial maps.
2920 if (!CreateInitialMaps()) return false;
2921 if (!CreateApiObjects()) return false;
2922
2923 // Create initial objects
2924 if (!CreateInitialObjects()) return false;
2925 }
2926
2927 LOG(IntEvent("heap-capacity", Capacity()));
2928 LOG(IntEvent("heap-available", Available()));
2929
2930 return true;
2931}
2932
2933
2934void Heap::TearDown() {
2935 GlobalHandles::TearDown();
2936
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +00002937 new_space_.TearDown();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002938
ager@chromium.org9258b6b2008-09-11 09:11:10 +00002939 if (old_pointer_space_ != NULL) {
2940 old_pointer_space_->TearDown();
2941 delete old_pointer_space_;
2942 old_pointer_space_ = NULL;
2943 }
2944
2945 if (old_data_space_ != NULL) {
2946 old_data_space_->TearDown();
2947 delete old_data_space_;
2948 old_data_space_ = NULL;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002949 }
2950
2951 if (code_space_ != NULL) {
2952 code_space_->TearDown();
2953 delete code_space_;
2954 code_space_ = NULL;
2955 }
2956
2957 if (map_space_ != NULL) {
2958 map_space_->TearDown();
2959 delete map_space_;
2960 map_space_ = NULL;
2961 }
2962
2963 if (lo_space_ != NULL) {
2964 lo_space_->TearDown();
2965 delete lo_space_;
2966 lo_space_ = NULL;
2967 }
2968
2969 MemoryAllocator::TearDown();
2970}
2971
2972
2973void Heap::Shrink() {
2974 // Try to shrink map, old, and code spaces.
2975 map_space_->Shrink();
ager@chromium.org9258b6b2008-09-11 09:11:10 +00002976 old_pointer_space_->Shrink();
2977 old_data_space_->Shrink();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002978 code_space_->Shrink();
2979}
2980
2981
kasperl@chromium.orgf5aa8372009-03-24 14:47:14 +00002982#ifdef ENABLE_HEAP_PROTECTION
2983
2984void Heap::Protect() {
ager@chromium.org71daaf62009-04-01 07:22:49 +00002985 if (HasBeenSetup()) {
2986 new_space_.Protect();
2987 map_space_->Protect();
2988 old_pointer_space_->Protect();
2989 old_data_space_->Protect();
2990 code_space_->Protect();
2991 lo_space_->Protect();
2992 }
kasperl@chromium.orgf5aa8372009-03-24 14:47:14 +00002993}
2994
2995
2996void Heap::Unprotect() {
ager@chromium.org71daaf62009-04-01 07:22:49 +00002997 if (HasBeenSetup()) {
2998 new_space_.Unprotect();
2999 map_space_->Unprotect();
3000 old_pointer_space_->Unprotect();
3001 old_data_space_->Unprotect();
3002 code_space_->Unprotect();
3003 lo_space_->Unprotect();
3004 }
kasperl@chromium.orgf5aa8372009-03-24 14:47:14 +00003005}
3006
3007#endif
3008
3009
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00003010#ifdef DEBUG
3011
3012class PrintHandleVisitor: public ObjectVisitor {
3013 public:
3014 void VisitPointers(Object** start, Object** end) {
3015 for (Object** p = start; p < end; p++)
3016 PrintF(" handle %p to %p\n", p, *p);
3017 }
3018};
3019
3020void Heap::PrintHandles() {
3021 PrintF("Handles:\n");
3022 PrintHandleVisitor v;
3023 HandleScopeImplementer::Iterate(&v);
3024}
3025
3026#endif
3027
3028
ager@chromium.org9258b6b2008-09-11 09:11:10 +00003029Space* AllSpaces::next() {
3030 switch (counter_++) {
3031 case NEW_SPACE:
3032 return Heap::new_space();
3033 case OLD_POINTER_SPACE:
3034 return Heap::old_pointer_space();
3035 case OLD_DATA_SPACE:
3036 return Heap::old_data_space();
3037 case CODE_SPACE:
3038 return Heap::code_space();
3039 case MAP_SPACE:
3040 return Heap::map_space();
3041 case LO_SPACE:
3042 return Heap::lo_space();
3043 default:
3044 return NULL;
3045 }
3046}
3047
3048
3049PagedSpace* PagedSpaces::next() {
3050 switch (counter_++) {
3051 case OLD_POINTER_SPACE:
3052 return Heap::old_pointer_space();
3053 case OLD_DATA_SPACE:
3054 return Heap::old_data_space();
3055 case CODE_SPACE:
3056 return Heap::code_space();
3057 case MAP_SPACE:
3058 return Heap::map_space();
3059 default:
3060 return NULL;
3061 }
3062}
3063
3064
3065
3066OldSpace* OldSpaces::next() {
3067 switch (counter_++) {
3068 case OLD_POINTER_SPACE:
3069 return Heap::old_pointer_space();
3070 case OLD_DATA_SPACE:
3071 return Heap::old_data_space();
3072 case CODE_SPACE:
3073 return Heap::code_space();
3074 default:
3075 return NULL;
3076 }
3077}
3078
3079
kasper.lund7276f142008-07-30 08:49:36 +00003080SpaceIterator::SpaceIterator() : current_space_(FIRST_SPACE), iterator_(NULL) {
3081}
3082
3083
3084SpaceIterator::~SpaceIterator() {
3085 // Delete active iterator if any.
3086 delete iterator_;
3087}
3088
3089
3090bool SpaceIterator::has_next() {
3091 // Iterate until no more spaces.
3092 return current_space_ != LAST_SPACE;
3093}
3094
3095
3096ObjectIterator* SpaceIterator::next() {
3097 if (iterator_ != NULL) {
3098 delete iterator_;
3099 iterator_ = NULL;
3100 // Move to the next space
3101 current_space_++;
3102 if (current_space_ > LAST_SPACE) {
3103 return NULL;
3104 }
3105 }
3106
3107 // Return iterator for the new current space.
3108 return CreateIterator();
3109}
3110
3111
3112// Create an iterator for the space to iterate.
3113ObjectIterator* SpaceIterator::CreateIterator() {
3114 ASSERT(iterator_ == NULL);
3115
3116 switch (current_space_) {
3117 case NEW_SPACE:
3118 iterator_ = new SemiSpaceIterator(Heap::new_space());
3119 break;
ager@chromium.org9258b6b2008-09-11 09:11:10 +00003120 case OLD_POINTER_SPACE:
3121 iterator_ = new HeapObjectIterator(Heap::old_pointer_space());
3122 break;
3123 case OLD_DATA_SPACE:
3124 iterator_ = new HeapObjectIterator(Heap::old_data_space());
kasper.lund7276f142008-07-30 08:49:36 +00003125 break;
3126 case CODE_SPACE:
3127 iterator_ = new HeapObjectIterator(Heap::code_space());
3128 break;
3129 case MAP_SPACE:
3130 iterator_ = new HeapObjectIterator(Heap::map_space());
3131 break;
3132 case LO_SPACE:
3133 iterator_ = new LargeObjectIterator(Heap::lo_space());
3134 break;
3135 }
3136
3137 // Return the newly allocated iterator;
3138 ASSERT(iterator_ != NULL);
3139 return iterator_;
3140}
3141
3142
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00003143HeapIterator::HeapIterator() {
3144 Init();
3145}
3146
3147
3148HeapIterator::~HeapIterator() {
3149 Shutdown();
3150}
3151
3152
3153void HeapIterator::Init() {
3154 // Start the iteration.
3155 space_iterator_ = new SpaceIterator();
3156 object_iterator_ = space_iterator_->next();
3157}
3158
3159
3160void HeapIterator::Shutdown() {
3161 // Make sure the last iterator is deallocated.
3162 delete space_iterator_;
3163 space_iterator_ = NULL;
3164 object_iterator_ = NULL;
3165}
3166
3167
3168bool HeapIterator::has_next() {
3169 // No iterator means we are done.
3170 if (object_iterator_ == NULL) return false;
3171
3172 if (object_iterator_->has_next_object()) {
3173 // If the current iterator has more objects we are fine.
3174 return true;
3175 } else {
3176 // Go though the spaces looking for one that has objects.
3177 while (space_iterator_->has_next()) {
3178 object_iterator_ = space_iterator_->next();
3179 if (object_iterator_->has_next_object()) {
3180 return true;
3181 }
3182 }
3183 }
3184 // Done with the last space.
3185 object_iterator_ = NULL;
3186 return false;
3187}
3188
3189
3190HeapObject* HeapIterator::next() {
3191 if (has_next()) {
3192 return object_iterator_->next_object();
3193 } else {
3194 return NULL;
3195 }
3196}
3197
3198
3199void HeapIterator::reset() {
3200 // Restart the iterator.
3201 Shutdown();
3202 Init();
3203}
3204
3205
3206//
3207// HeapProfiler class implementation.
3208//
3209#ifdef ENABLE_LOGGING_AND_PROFILING
3210void HeapProfiler::CollectStats(HeapObject* obj, HistogramInfo* info) {
3211 InstanceType type = obj->map()->instance_type();
3212 ASSERT(0 <= type && type <= LAST_TYPE);
3213 info[type].increment_number(1);
3214 info[type].increment_bytes(obj->Size());
3215}
3216#endif
3217
3218
3219#ifdef ENABLE_LOGGING_AND_PROFILING
3220void HeapProfiler::WriteSample() {
3221 LOG(HeapSampleBeginEvent("Heap", "allocated"));
3222
3223 HistogramInfo info[LAST_TYPE+1];
3224#define DEF_TYPE_NAME(name) info[name].set_name(#name);
3225 INSTANCE_TYPE_LIST(DEF_TYPE_NAME)
3226#undef DEF_TYPE_NAME
3227
3228 HeapIterator iterator;
3229 while (iterator.has_next()) {
3230 CollectStats(iterator.next(), info);
3231 }
3232
3233 // Lump all the string types together.
3234 int string_number = 0;
3235 int string_bytes = 0;
3236#define INCREMENT_SIZE(type, size, name) \
3237 string_number += info[type].number(); \
3238 string_bytes += info[type].bytes();
3239 STRING_TYPE_LIST(INCREMENT_SIZE)
3240#undef INCREMENT_SIZE
3241 if (string_bytes > 0) {
3242 LOG(HeapSampleItemEvent("STRING_TYPE", string_number, string_bytes));
3243 }
3244
3245 for (int i = FIRST_NONSTRING_TYPE; i <= LAST_TYPE; ++i) {
3246 if (info[i].bytes() > 0) {
3247 LOG(HeapSampleItemEvent(info[i].name(), info[i].number(),
3248 info[i].bytes()));
3249 }
3250 }
3251
3252 LOG(HeapSampleEndEvent("Heap", "allocated"));
3253}
3254
3255
3256#endif
3257
3258
3259
3260#ifdef DEBUG
3261
3262static bool search_for_any_global;
3263static Object* search_target;
3264static bool found_target;
3265static List<Object*> object_stack(20);
3266
3267
3268// Tags 0, 1, and 3 are used. Use 2 for marking visited HeapObject.
3269static const int kMarkTag = 2;
3270
3271static void MarkObjectRecursively(Object** p);
3272class MarkObjectVisitor : public ObjectVisitor {
3273 public:
3274 void VisitPointers(Object** start, Object** end) {
3275 // Copy all HeapObject pointers in [start, end)
3276 for (Object** p = start; p < end; p++) {
3277 if ((*p)->IsHeapObject())
3278 MarkObjectRecursively(p);
3279 }
3280 }
3281};
3282
3283static MarkObjectVisitor mark_visitor;
3284
3285static void MarkObjectRecursively(Object** p) {
3286 if (!(*p)->IsHeapObject()) return;
3287
3288 HeapObject* obj = HeapObject::cast(*p);
3289
3290 Object* map = obj->map();
3291
3292 if (!map->IsHeapObject()) return; // visited before
3293
3294 if (found_target) return; // stop if target found
3295 object_stack.Add(obj);
3296 if ((search_for_any_global && obj->IsJSGlobalObject()) ||
3297 (!search_for_any_global && (obj == search_target))) {
3298 found_target = true;
3299 return;
3300 }
3301
3302 if (obj->IsCode()) {
3303 Code::cast(obj)->ConvertICTargetsFromAddressToObject();
3304 }
3305
3306 // not visited yet
3307 Map* map_p = reinterpret_cast<Map*>(HeapObject::cast(map));
3308
3309 Address map_addr = map_p->address();
3310
3311 obj->set_map(reinterpret_cast<Map*>(map_addr + kMarkTag));
3312
3313 MarkObjectRecursively(&map);
3314
3315 obj->IterateBody(map_p->instance_type(), obj->SizeFromMap(map_p),
3316 &mark_visitor);
3317
3318 if (!found_target) // don't pop if found the target
3319 object_stack.RemoveLast();
3320}
3321
3322
3323static void UnmarkObjectRecursively(Object** p);
3324class UnmarkObjectVisitor : public ObjectVisitor {
3325 public:
3326 void VisitPointers(Object** start, Object** end) {
3327 // Copy all HeapObject pointers in [start, end)
3328 for (Object** p = start; p < end; p++) {
3329 if ((*p)->IsHeapObject())
3330 UnmarkObjectRecursively(p);
3331 }
3332 }
3333};
3334
3335static UnmarkObjectVisitor unmark_visitor;
3336
3337static void UnmarkObjectRecursively(Object** p) {
3338 if (!(*p)->IsHeapObject()) return;
3339
3340 HeapObject* obj = HeapObject::cast(*p);
3341
3342 Object* map = obj->map();
3343
3344 if (map->IsHeapObject()) return; // unmarked already
3345
3346 Address map_addr = reinterpret_cast<Address>(map);
3347
3348 map_addr -= kMarkTag;
3349
3350 ASSERT_TAG_ALIGNED(map_addr);
3351
3352 HeapObject* map_p = HeapObject::FromAddress(map_addr);
3353
3354 obj->set_map(reinterpret_cast<Map*>(map_p));
3355
3356 UnmarkObjectRecursively(reinterpret_cast<Object**>(&map_p));
3357
3358 obj->IterateBody(Map::cast(map_p)->instance_type(),
3359 obj->SizeFromMap(Map::cast(map_p)),
3360 &unmark_visitor);
3361
3362 if (obj->IsCode()) {
3363 Code::cast(obj)->ConvertICTargetsFromObjectToAddress();
3364 }
3365}
3366
3367
3368static void MarkRootObjectRecursively(Object** root) {
3369 if (search_for_any_global) {
3370 ASSERT(search_target == NULL);
3371 } else {
3372 ASSERT(search_target->IsHeapObject());
3373 }
3374 found_target = false;
3375 object_stack.Clear();
3376
3377 MarkObjectRecursively(root);
3378 UnmarkObjectRecursively(root);
3379
3380 if (found_target) {
3381 PrintF("=====================================\n");
3382 PrintF("==== Path to object ====\n");
3383 PrintF("=====================================\n\n");
3384
3385 ASSERT(!object_stack.is_empty());
3386 for (int i = 0; i < object_stack.length(); i++) {
3387 if (i > 0) PrintF("\n |\n |\n V\n\n");
3388 Object* obj = object_stack[i];
3389 obj->Print();
3390 }
3391 PrintF("=====================================\n");
3392 }
3393}
3394
3395
3396// Helper class for visiting HeapObjects recursively.
3397class MarkRootVisitor: public ObjectVisitor {
3398 public:
3399 void VisitPointers(Object** start, Object** end) {
3400 // Visit all HeapObject pointers in [start, end)
3401 for (Object** p = start; p < end; p++) {
3402 if ((*p)->IsHeapObject())
3403 MarkRootObjectRecursively(p);
3404 }
3405 }
3406};
3407
3408
3409// Triggers a depth-first traversal of reachable objects from roots
3410// and finds a path to a specific heap object and prints it.
3411void Heap::TracePathToObject() {
3412 search_target = NULL;
3413 search_for_any_global = false;
3414
3415 MarkRootVisitor root_visitor;
3416 IterateRoots(&root_visitor);
3417}
3418
3419
3420// Triggers a depth-first traversal of reachable objects from roots
3421// and finds a path to any global object and prints it. Useful for
3422// determining the source for leaks of global objects.
3423void Heap::TracePathToGlobal() {
3424 search_target = NULL;
3425 search_for_any_global = true;
3426
3427 MarkRootVisitor root_visitor;
3428 IterateRoots(&root_visitor);
3429}
3430#endif
3431
3432
kasper.lund7276f142008-07-30 08:49:36 +00003433GCTracer::GCTracer()
3434 : start_time_(0.0),
3435 start_size_(0.0),
3436 gc_count_(0),
3437 full_gc_count_(0),
3438 is_compacting_(false),
3439 marked_count_(0) {
3440 // These two fields reflect the state of the previous full collection.
3441 // Set them before they are changed by the collector.
3442 previous_has_compacted_ = MarkCompactCollector::HasCompacted();
3443 previous_marked_count_ = MarkCompactCollector::previous_marked_count();
3444 if (!FLAG_trace_gc) return;
3445 start_time_ = OS::TimeCurrentMillis();
3446 start_size_ = SizeOfHeapObjects();
3447}
3448
3449
3450GCTracer::~GCTracer() {
3451 if (!FLAG_trace_gc) return;
3452 // Printf ONE line iff flag is set.
3453 PrintF("%s %.1f -> %.1f MB, %d ms.\n",
3454 CollectorString(),
3455 start_size_, SizeOfHeapObjects(),
3456 static_cast<int>(OS::TimeCurrentMillis() - start_time_));
3457}
3458
3459
3460const char* GCTracer::CollectorString() {
3461 switch (collector_) {
3462 case SCAVENGER:
3463 return "Scavenge";
3464 case MARK_COMPACTOR:
3465 return MarkCompactCollector::HasCompacted() ? "Mark-compact"
3466 : "Mark-sweep";
3467 }
3468 return "Unknown GC";
3469}
3470
3471
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +00003472#ifdef DEBUG
3473bool Heap::GarbageCollectionGreedyCheck() {
3474 ASSERT(FLAG_gc_greedy);
3475 if (Bootstrapper::IsActive()) return true;
3476 if (disallow_allocation_failure()) return true;
3477 return CollectGarbage(0, NEW_SPACE);
3478}
3479#endif
3480
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00003481} } // namespace v8::internal