blob: f3842976ff12026df2c2f3e4879fc3d22b29f051 [file] [log] [blame]
ager@chromium.org9258b6b2008-09-11 09:11:10 +00001// Copyright 2006-2008 the V8 project authors. All rights reserved.
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002// Redistribution and use in source and binary forms, with or without
3// modification, are permitted provided that the following conditions are
4// met:
5//
6// * Redistributions of source code must retain the above copyright
7// notice, this list of conditions and the following disclaimer.
8// * Redistributions in binary form must reproduce the above
9// copyright notice, this list of conditions and the following
10// disclaimer in the documentation and/or other materials provided
11// with the distribution.
12// * Neither the name of Google Inc. nor the names of its
13// contributors may be used to endorse or promote products derived
14// from this software without specific prior written permission.
15//
16// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
28#include "v8.h"
29
30#include "accessors.h"
31#include "api.h"
32#include "bootstrapper.h"
33#include "codegen-inl.h"
kasperl@chromium.orgb9123622008-09-17 14:05:56 +000034#include "compilation-cache.h"
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +000035#include "debug.h"
36#include "global-handles.h"
37#include "jsregexp.h"
38#include "mark-compact.h"
39#include "natives.h"
40#include "scanner.h"
41#include "scopeinfo.h"
42#include "v8threads.h"
43
44namespace v8 { namespace internal {
45
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +000046#define ROOT_ALLOCATION(type, name) type* Heap::name##_;
47 ROOT_LIST(ROOT_ALLOCATION)
48#undef ROOT_ALLOCATION
49
50
51#define STRUCT_ALLOCATION(NAME, Name, name) Map* Heap::name##_map_;
52 STRUCT_LIST(STRUCT_ALLOCATION)
53#undef STRUCT_ALLOCATION
54
55
56#define SYMBOL_ALLOCATION(name, string) String* Heap::name##_;
57 SYMBOL_LIST(SYMBOL_ALLOCATION)
58#undef SYMBOL_ALLOCATION
59
ager@chromium.org3b45ab52009-03-19 22:21:34 +000060String* Heap::hidden_symbol_;
61
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +000062NewSpace Heap::new_space_;
ager@chromium.org9258b6b2008-09-11 09:11:10 +000063OldSpace* Heap::old_pointer_space_ = NULL;
64OldSpace* Heap::old_data_space_ = NULL;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +000065OldSpace* Heap::code_space_ = NULL;
66MapSpace* Heap::map_space_ = NULL;
67LargeObjectSpace* Heap::lo_space_ = NULL;
68
kasperl@chromium.org9bbf9682008-10-30 11:53:07 +000069static const int kMinimumPromotionLimit = 2*MB;
70static const int kMinimumAllocationLimit = 8*MB;
71
72int Heap::old_gen_promotion_limit_ = kMinimumPromotionLimit;
73int Heap::old_gen_allocation_limit_ = kMinimumAllocationLimit;
74
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +000075int Heap::old_gen_exhausted_ = false;
76
kasper.lund7276f142008-07-30 08:49:36 +000077int Heap::amount_of_external_allocated_memory_ = 0;
78int Heap::amount_of_external_allocated_memory_at_last_global_gc_ = 0;
79
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +000080// semispace_size_ should be a power of 2 and old_generation_size_ should be
81// a multiple of Page::kPageSize.
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +000082int Heap::semispace_size_ = 2*MB;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +000083int Heap::old_generation_size_ = 512*MB;
84int Heap::initial_semispace_size_ = 256*KB;
85
86GCCallback Heap::global_gc_prologue_callback_ = NULL;
87GCCallback Heap::global_gc_epilogue_callback_ = NULL;
88
89// Variables set based on semispace_size_ and old_generation_size_ in
90// ConfigureHeap.
91int Heap::young_generation_size_ = 0; // Will be 2 * semispace_size_.
92
93// Double the new space after this many scavenge collections.
94int Heap::new_space_growth_limit_ = 8;
95int Heap::scavenge_count_ = 0;
96Heap::HeapState Heap::gc_state_ = NOT_IN_GC;
97
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +000098int Heap::mc_count_ = 0;
99int Heap::gc_count_ = 0;
100
kasperl@chromium.org9bbf9682008-10-30 11:53:07 +0000101int Heap::always_allocate_scope_depth_ = 0;
kasperl@chromium.org061ef742009-02-27 12:16:20 +0000102bool Heap::context_disposed_pending_ = false;
kasperl@chromium.org9bbf9682008-10-30 11:53:07 +0000103
kasper.lund7276f142008-07-30 08:49:36 +0000104#ifdef DEBUG
105bool Heap::allocation_allowed_ = true;
106
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000107int Heap::allocation_timeout_ = 0;
108bool Heap::disallow_allocation_failure_ = false;
109#endif // DEBUG
110
111
112int Heap::Capacity() {
113 if (!HasBeenSetup()) return 0;
114
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +0000115 return new_space_.Capacity() +
ager@chromium.org9258b6b2008-09-11 09:11:10 +0000116 old_pointer_space_->Capacity() +
117 old_data_space_->Capacity() +
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000118 code_space_->Capacity() +
119 map_space_->Capacity();
120}
121
122
123int Heap::Available() {
124 if (!HasBeenSetup()) return 0;
125
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +0000126 return new_space_.Available() +
ager@chromium.org9258b6b2008-09-11 09:11:10 +0000127 old_pointer_space_->Available() +
128 old_data_space_->Available() +
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000129 code_space_->Available() +
130 map_space_->Available();
131}
132
133
134bool Heap::HasBeenSetup() {
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +0000135 return old_pointer_space_ != NULL &&
ager@chromium.org9258b6b2008-09-11 09:11:10 +0000136 old_data_space_ != NULL &&
137 code_space_ != NULL &&
138 map_space_ != NULL &&
139 lo_space_ != NULL;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000140}
141
142
143GarbageCollector Heap::SelectGarbageCollector(AllocationSpace space) {
144 // Is global GC requested?
145 if (space != NEW_SPACE || FLAG_gc_global) {
146 Counters::gc_compactor_caused_by_request.Increment();
147 return MARK_COMPACTOR;
148 }
149
150 // Is enough data promoted to justify a global GC?
kasperl@chromium.org9bbf9682008-10-30 11:53:07 +0000151 if (OldGenerationPromotionLimitReached()) {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000152 Counters::gc_compactor_caused_by_promoted_data.Increment();
153 return MARK_COMPACTOR;
154 }
155
156 // Have allocation in OLD and LO failed?
157 if (old_gen_exhausted_) {
158 Counters::gc_compactor_caused_by_oldspace_exhaustion.Increment();
159 return MARK_COMPACTOR;
160 }
161
162 // Is there enough space left in OLD to guarantee that a scavenge can
163 // succeed?
164 //
ager@chromium.org9258b6b2008-09-11 09:11:10 +0000165 // Note that MemoryAllocator->MaxAvailable() undercounts the memory available
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000166 // for object promotion. It counts only the bytes that the memory
167 // allocator has not yet allocated from the OS and assigned to any space,
168 // and does not count available bytes already in the old space or code
169 // space. Undercounting is safe---we may get an unrequested full GC when
170 // a scavenge would have succeeded.
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +0000171 if (MemoryAllocator::MaxAvailable() <= new_space_.Size()) {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000172 Counters::gc_compactor_caused_by_oldspace_exhaustion.Increment();
173 return MARK_COMPACTOR;
174 }
175
176 // Default
177 return SCAVENGER;
178}
179
180
181// TODO(1238405): Combine the infrastructure for --heap-stats and
182// --log-gc to avoid the complicated preprocessor and flag testing.
183#if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
184void Heap::ReportStatisticsBeforeGC() {
185 // Heap::ReportHeapStatistics will also log NewSpace statistics when
186 // compiled with ENABLE_LOGGING_AND_PROFILING and --log-gc is set. The
187 // following logic is used to avoid double logging.
188#if defined(DEBUG) && defined(ENABLE_LOGGING_AND_PROFILING)
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +0000189 if (FLAG_heap_stats || FLAG_log_gc) new_space_.CollectStatistics();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000190 if (FLAG_heap_stats) {
191 ReportHeapStatistics("Before GC");
192 } else if (FLAG_log_gc) {
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +0000193 new_space_.ReportStatistics();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000194 }
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +0000195 if (FLAG_heap_stats || FLAG_log_gc) new_space_.ClearHistograms();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000196#elif defined(DEBUG)
197 if (FLAG_heap_stats) {
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +0000198 new_space_.CollectStatistics();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000199 ReportHeapStatistics("Before GC");
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +0000200 new_space_.ClearHistograms();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000201 }
202#elif defined(ENABLE_LOGGING_AND_PROFILING)
203 if (FLAG_log_gc) {
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +0000204 new_space_.CollectStatistics();
205 new_space_.ReportStatistics();
206 new_space_.ClearHistograms();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000207 }
208#endif
209}
210
211
212// TODO(1238405): Combine the infrastructure for --heap-stats and
213// --log-gc to avoid the complicated preprocessor and flag testing.
214void Heap::ReportStatisticsAfterGC() {
215 // Similar to the before GC, we use some complicated logic to ensure that
216 // NewSpace statistics are logged exactly once when --log-gc is turned on.
217#if defined(DEBUG) && defined(ENABLE_LOGGING_AND_PROFILING)
218 if (FLAG_heap_stats) {
219 ReportHeapStatistics("After GC");
220 } else if (FLAG_log_gc) {
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +0000221 new_space_.ReportStatistics();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000222 }
223#elif defined(DEBUG)
224 if (FLAG_heap_stats) ReportHeapStatistics("After GC");
225#elif defined(ENABLE_LOGGING_AND_PROFILING)
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +0000226 if (FLAG_log_gc) new_space_.ReportStatistics();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000227#endif
228}
229#endif // defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
230
231
232void Heap::GarbageCollectionPrologue() {
233 RegExpImpl::NewSpaceCollectionPrologue();
kasper.lund7276f142008-07-30 08:49:36 +0000234 gc_count_++;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000235#ifdef DEBUG
236 ASSERT(allocation_allowed_ && gc_state_ == NOT_IN_GC);
237 allow_allocation(false);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000238
239 if (FLAG_verify_heap) {
240 Verify();
241 }
242
243 if (FLAG_gc_verbose) Print();
244
245 if (FLAG_print_rset) {
ager@chromium.org9258b6b2008-09-11 09:11:10 +0000246 // Not all spaces have remembered set bits that we care about.
247 old_pointer_space_->PrintRSet();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000248 map_space_->PrintRSet();
249 lo_space_->PrintRSet();
250 }
251#endif
252
253#if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
254 ReportStatisticsBeforeGC();
255#endif
256}
257
258int Heap::SizeOfObjects() {
ager@chromium.org9258b6b2008-09-11 09:11:10 +0000259 int total = 0;
260 AllSpaces spaces;
261 while (Space* space = spaces.next()) total += space->Size();
262 return total;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000263}
264
265void Heap::GarbageCollectionEpilogue() {
266#ifdef DEBUG
267 allow_allocation(true);
268 ZapFromSpace();
269
270 if (FLAG_verify_heap) {
271 Verify();
272 }
273
274 if (FLAG_print_global_handles) GlobalHandles::Print();
275 if (FLAG_print_handles) PrintHandles();
276 if (FLAG_gc_verbose) Print();
277 if (FLAG_code_stats) ReportCodeStatistics("After GC");
278#endif
279
280 Counters::alive_after_last_gc.Set(SizeOfObjects());
281
282 SymbolTable* symbol_table = SymbolTable::cast(Heap::symbol_table_);
283 Counters::symbol_table_capacity.Set(symbol_table->Capacity());
284 Counters::number_of_symbols.Set(symbol_table->NumberOfElements());
285#if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
286 ReportStatisticsAfterGC();
287#endif
288}
289
290
ager@chromium.org9258b6b2008-09-11 09:11:10 +0000291void Heap::CollectAllGarbage() {
292 // Since we are ignoring the return value, the exact choice of space does
293 // not matter, so long as we do not specify NEW_SPACE, which would not
294 // cause a full GC.
295 CollectGarbage(0, OLD_POINTER_SPACE);
296}
297
298
kasperl@chromium.org061ef742009-02-27 12:16:20 +0000299void Heap::CollectAllGarbageIfContextDisposed() {
kasperl@chromium.orgd55d36b2009-03-05 08:03:28 +0000300 // If the garbage collector interface is exposed through the global
301 // gc() function, we avoid being clever about forcing GCs when
302 // contexts are disposed and leave it to the embedder to make
303 // informed decisions about when to force a collection.
304 if (!FLAG_expose_gc && context_disposed_pending_) {
kasperl@chromium.org061ef742009-02-27 12:16:20 +0000305 StatsRateScope scope(&Counters::gc_context);
306 CollectAllGarbage();
kasperl@chromium.org061ef742009-02-27 12:16:20 +0000307 }
kasperl@chromium.orgd55d36b2009-03-05 08:03:28 +0000308 context_disposed_pending_ = false;
kasperl@chromium.org061ef742009-02-27 12:16:20 +0000309}
310
311
312void Heap::NotifyContextDisposed() {
313 context_disposed_pending_ = true;
314}
315
316
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000317bool Heap::CollectGarbage(int requested_size, AllocationSpace space) {
318 // The VM is in the GC state until exiting this function.
319 VMState state(GC);
320
321#ifdef DEBUG
322 // Reset the allocation timeout to the GC interval, but make sure to
323 // allow at least a few allocations after a collection. The reason
324 // for this is that we have a lot of allocation sequences and we
325 // assume that a garbage collection will allow the subsequent
326 // allocation attempts to go through.
327 allocation_timeout_ = Max(6, FLAG_gc_interval);
328#endif
329
330 { GCTracer tracer;
331 GarbageCollectionPrologue();
kasper.lund7276f142008-07-30 08:49:36 +0000332 // The GC count was incremented in the prologue. Tell the tracer about
333 // it.
334 tracer.set_gc_count(gc_count_);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000335
336 GarbageCollector collector = SelectGarbageCollector(space);
kasper.lund7276f142008-07-30 08:49:36 +0000337 // Tell the tracer which collector we've selected.
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000338 tracer.set_collector(collector);
339
340 StatsRate* rate = (collector == SCAVENGER)
341 ? &Counters::gc_scavenger
342 : &Counters::gc_compactor;
343 rate->Start();
kasper.lund7276f142008-07-30 08:49:36 +0000344 PerformGarbageCollection(space, collector, &tracer);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000345 rate->Stop();
346
347 GarbageCollectionEpilogue();
348 }
349
350
351#ifdef ENABLE_LOGGING_AND_PROFILING
352 if (FLAG_log_gc) HeapProfiler::WriteSample();
353#endif
354
355 switch (space) {
356 case NEW_SPACE:
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +0000357 return new_space_.Available() >= requested_size;
ager@chromium.org9258b6b2008-09-11 09:11:10 +0000358 case OLD_POINTER_SPACE:
359 return old_pointer_space_->Available() >= requested_size;
360 case OLD_DATA_SPACE:
361 return old_data_space_->Available() >= requested_size;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000362 case CODE_SPACE:
363 return code_space_->Available() >= requested_size;
364 case MAP_SPACE:
365 return map_space_->Available() >= requested_size;
366 case LO_SPACE:
367 return lo_space_->Available() >= requested_size;
368 }
369 return false;
370}
371
372
kasper.lund7276f142008-07-30 08:49:36 +0000373void Heap::PerformScavenge() {
374 GCTracer tracer;
375 PerformGarbageCollection(NEW_SPACE, SCAVENGER, &tracer);
376}
377
378
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000379void Heap::PerformGarbageCollection(AllocationSpace space,
kasper.lund7276f142008-07-30 08:49:36 +0000380 GarbageCollector collector,
381 GCTracer* tracer) {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000382 if (collector == MARK_COMPACTOR && global_gc_prologue_callback_) {
383 ASSERT(!allocation_allowed_);
384 global_gc_prologue_callback_();
385 }
386
387 if (collector == MARK_COMPACTOR) {
kasper.lund7276f142008-07-30 08:49:36 +0000388 MarkCompact(tracer);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000389
kasperl@chromium.org9bbf9682008-10-30 11:53:07 +0000390 int old_gen_size = PromotedSpaceSize();
391 old_gen_promotion_limit_ =
392 old_gen_size + Max(kMinimumPromotionLimit, old_gen_size / 3);
393 old_gen_allocation_limit_ =
394 old_gen_size + Max(kMinimumAllocationLimit, old_gen_size / 3);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000395 old_gen_exhausted_ = false;
396
397 // If we have used the mark-compact collector to collect the new
398 // space, and it has not compacted the new space, we force a
ager@chromium.org9258b6b2008-09-11 09:11:10 +0000399 // separate scavenge collection. This is a hack. It covers the
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000400 // case where (1) a new space collection was requested, (2) the
401 // collector selection policy selected the mark-compact collector,
402 // and (3) the mark-compact collector policy selected not to
403 // compact the new space. In that case, there is no more (usable)
404 // free space in the new space after the collection compared to
405 // before.
406 if (space == NEW_SPACE && !MarkCompactCollector::HasCompacted()) {
407 Scavenge();
408 }
409 } else {
410 Scavenge();
411 }
412 Counters::objs_since_last_young.Set(0);
413
ager@chromium.orga74f0da2008-12-03 16:05:52 +0000414 PostGarbageCollectionProcessing();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000415
kasper.lund7276f142008-07-30 08:49:36 +0000416 if (collector == MARK_COMPACTOR) {
417 // Register the amount of external allocated memory.
418 amount_of_external_allocated_memory_at_last_global_gc_ =
419 amount_of_external_allocated_memory_;
420 }
421
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000422 if (collector == MARK_COMPACTOR && global_gc_epilogue_callback_) {
423 ASSERT(!allocation_allowed_);
424 global_gc_epilogue_callback_();
425 }
426}
427
428
ager@chromium.orga74f0da2008-12-03 16:05:52 +0000429void Heap::PostGarbageCollectionProcessing() {
430 // Process weak handles post gc.
431 GlobalHandles::PostGarbageCollectionProcessing();
432 // Update flat string readers.
433 FlatStringReader::PostGarbageCollectionProcessing();
434}
435
436
kasper.lund7276f142008-07-30 08:49:36 +0000437void Heap::MarkCompact(GCTracer* tracer) {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000438 gc_state_ = MARK_COMPACT;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000439 mc_count_++;
kasper.lund7276f142008-07-30 08:49:36 +0000440 tracer->set_full_gc_count(mc_count_);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000441 LOG(ResourceEvent("markcompact", "begin"));
442
kasperl@chromium.org061ef742009-02-27 12:16:20 +0000443 MarkCompactCollector::Prepare(tracer);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000444
kasperl@chromium.org061ef742009-02-27 12:16:20 +0000445 bool is_compacting = MarkCompactCollector::IsCompacting();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000446
kasperl@chromium.org061ef742009-02-27 12:16:20 +0000447 MarkCompactPrologue(is_compacting);
448
449 MarkCompactCollector::CollectGarbage();
450
451 MarkCompactEpilogue(is_compacting);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000452
453 LOG(ResourceEvent("markcompact", "end"));
454
455 gc_state_ = NOT_IN_GC;
456
457 Shrink();
458
459 Counters::objs_since_last_full.Set(0);
kasperl@chromium.org061ef742009-02-27 12:16:20 +0000460 context_disposed_pending_ = false;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000461}
462
463
kasperl@chromium.org061ef742009-02-27 12:16:20 +0000464void Heap::MarkCompactPrologue(bool is_compacting) {
465 // At any old GC clear the keyed lookup cache to enable collection of unused
466 // maps.
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +0000467 ClearKeyedLookupCache();
kasperl@chromium.org061ef742009-02-27 12:16:20 +0000468
kasperl@chromium.orgb9123622008-09-17 14:05:56 +0000469 CompilationCache::MarkCompactPrologue();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000470 RegExpImpl::OldSpaceCollectionPrologue();
kasperl@chromium.org061ef742009-02-27 12:16:20 +0000471
472 Top::MarkCompactPrologue(is_compacting);
473 ThreadManager::MarkCompactPrologue(is_compacting);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000474}
475
476
kasperl@chromium.org061ef742009-02-27 12:16:20 +0000477void Heap::MarkCompactEpilogue(bool is_compacting) {
478 Top::MarkCompactEpilogue(is_compacting);
479 ThreadManager::MarkCompactEpilogue(is_compacting);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000480}
481
482
483Object* Heap::FindCodeObject(Address a) {
484 Object* obj = code_space_->FindObject(a);
485 if (obj->IsFailure()) {
486 obj = lo_space_->FindObject(a);
487 }
kasper.lund7276f142008-07-30 08:49:36 +0000488 ASSERT(!obj->IsFailure());
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000489 return obj;
490}
491
492
493// Helper class for copying HeapObjects
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +0000494class ScavengeVisitor: public ObjectVisitor {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000495 public:
496
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +0000497 void VisitPointer(Object** p) { ScavengePointer(p); }
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000498
499 void VisitPointers(Object** start, Object** end) {
500 // Copy all HeapObject pointers in [start, end)
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +0000501 for (Object** p = start; p < end; p++) ScavengePointer(p);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000502 }
503
504 private:
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +0000505 void ScavengePointer(Object** p) {
506 Object* object = *p;
507 if (!Heap::InNewSpace(object)) return;
508 Heap::ScavengeObject(reinterpret_cast<HeapObject**>(p),
509 reinterpret_cast<HeapObject*>(object));
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000510 }
511};
512
513
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +0000514// Shared state read by the scavenge collector and set by ScavengeObject.
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000515static Address promoted_top = NULL;
516
517
518#ifdef DEBUG
ager@chromium.org9258b6b2008-09-11 09:11:10 +0000519// Visitor class to verify pointers in code or data space do not point into
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000520// new space.
ager@chromium.org9258b6b2008-09-11 09:11:10 +0000521class VerifyNonPointerSpacePointersVisitor: public ObjectVisitor {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000522 public:
523 void VisitPointers(Object** start, Object**end) {
524 for (Object** current = start; current < end; current++) {
525 if ((*current)->IsHeapObject()) {
526 ASSERT(!Heap::InNewSpace(HeapObject::cast(*current)));
527 }
528 }
529 }
530};
531#endif
532
533void Heap::Scavenge() {
534#ifdef DEBUG
535 if (FLAG_enable_slow_asserts) {
ager@chromium.org9258b6b2008-09-11 09:11:10 +0000536 VerifyNonPointerSpacePointersVisitor v;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000537 HeapObjectIterator it(code_space_);
538 while (it.has_next()) {
539 HeapObject* object = it.next();
540 if (object->IsCode()) {
541 Code::cast(object)->ConvertICTargetsFromAddressToObject();
542 }
543 object->Iterate(&v);
544 if (object->IsCode()) {
545 Code::cast(object)->ConvertICTargetsFromObjectToAddress();
546 }
547 }
548 }
549#endif
550
551 gc_state_ = SCAVENGE;
552
553 // Implements Cheney's copying algorithm
554 LOG(ResourceEvent("scavenge", "begin"));
555
556 scavenge_count_++;
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +0000557 if (new_space_.Capacity() < new_space_.MaximumCapacity() &&
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000558 scavenge_count_ > new_space_growth_limit_) {
559 // Double the size of the new space, and double the limit. The next
560 // doubling attempt will occur after the current new_space_growth_limit_
561 // more collections.
562 // TODO(1240712): NewSpace::Double has a return value which is
563 // ignored here.
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +0000564 new_space_.Double();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000565 new_space_growth_limit_ *= 2;
566 }
567
568 // Flip the semispaces. After flipping, to space is empty, from space has
569 // live objects.
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +0000570 new_space_.Flip();
571 new_space_.ResetAllocationInfo();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000572
573 // We need to sweep newly copied objects which can be in either the to space
574 // or the old space. For to space objects, we use a mark. Newly copied
575 // objects lie between the mark and the allocation top. For objects
576 // promoted to old space, we write their addresses downward from the top of
577 // the new space. Sweeping newly promoted objects requires an allocation
578 // pointer and a mark. Note that the allocation pointer 'top' actually
579 // moves downward from the high address in the to space.
580 //
581 // There is guaranteed to be enough room at the top of the to space for the
582 // addresses of promoted objects: every object promoted frees up its size in
583 // bytes from the top of the new space, and objects are at least one pointer
584 // in size. Using the new space to record promoted addresses makes the
585 // scavenge collector agnostic to the allocation strategy (eg, linear or
586 // free-list) used in old space.
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +0000587 Address new_mark = new_space_.ToSpaceLow();
588 Address promoted_mark = new_space_.ToSpaceHigh();
589 promoted_top = new_space_.ToSpaceHigh();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000590
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +0000591 ScavengeVisitor scavenge_visitor;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000592 // Copy roots.
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +0000593 IterateRoots(&scavenge_visitor);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000594
595 // Copy objects reachable from the old generation. By definition, there
ager@chromium.org9258b6b2008-09-11 09:11:10 +0000596 // are no intergenerational pointers in code or data spaces.
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +0000597 IterateRSet(old_pointer_space_, &ScavengePointer);
598 IterateRSet(map_space_, &ScavengePointer);
599 lo_space_->IterateRSet(&ScavengePointer);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000600
601 bool has_processed_weak_pointers = false;
602
603 while (true) {
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +0000604 ASSERT(new_mark <= new_space_.top());
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000605 ASSERT(promoted_mark >= promoted_top);
606
607 // Copy objects reachable from newly copied objects.
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +0000608 while (new_mark < new_space_.top() || promoted_mark > promoted_top) {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000609 // Sweep newly copied objects in the to space. The allocation pointer
610 // can change during sweeping.
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +0000611 Address previous_top = new_space_.top();
612 SemiSpaceIterator new_it(new_space(), new_mark);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000613 while (new_it.has_next()) {
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +0000614 new_it.next()->Iterate(&scavenge_visitor);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000615 }
616 new_mark = previous_top;
617
618 // Sweep newly copied objects in the old space. The promotion 'top'
619 // pointer could change during sweeping.
620 previous_top = promoted_top;
621 for (Address current = promoted_mark - kPointerSize;
622 current >= previous_top;
623 current -= kPointerSize) {
624 HeapObject* object = HeapObject::cast(Memory::Object_at(current));
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +0000625 object->Iterate(&scavenge_visitor);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000626 UpdateRSet(object);
627 }
628 promoted_mark = previous_top;
629 }
630
631 if (has_processed_weak_pointers) break; // We are done.
632 // Copy objects reachable from weak pointers.
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +0000633 GlobalHandles::IterateWeakRoots(&scavenge_visitor);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000634 has_processed_weak_pointers = true;
635 }
636
637 // Set age mark.
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +0000638 new_space_.set_age_mark(new_mark);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000639
640 LOG(ResourceEvent("scavenge", "end"));
641
642 gc_state_ = NOT_IN_GC;
643}
644
645
646void Heap::ClearRSetRange(Address start, int size_in_bytes) {
647 uint32_t start_bit;
648 Address start_word_address =
649 Page::ComputeRSetBitPosition(start, 0, &start_bit);
650 uint32_t end_bit;
651 Address end_word_address =
652 Page::ComputeRSetBitPosition(start + size_in_bytes - kIntSize,
653 0,
654 &end_bit);
655
656 // We want to clear the bits in the starting word starting with the
657 // first bit, and in the ending word up to and including the last
658 // bit. Build a pair of bitmasks to do that.
659 uint32_t start_bitmask = start_bit - 1;
660 uint32_t end_bitmask = ~((end_bit << 1) - 1);
661
662 // If the start address and end address are the same, we mask that
663 // word once, otherwise mask the starting and ending word
664 // separately and all the ones in between.
665 if (start_word_address == end_word_address) {
666 Memory::uint32_at(start_word_address) &= (start_bitmask | end_bitmask);
667 } else {
668 Memory::uint32_at(start_word_address) &= start_bitmask;
669 Memory::uint32_at(end_word_address) &= end_bitmask;
670 start_word_address += kIntSize;
671 memset(start_word_address, 0, end_word_address - start_word_address);
672 }
673}
674
675
676class UpdateRSetVisitor: public ObjectVisitor {
677 public:
678
679 void VisitPointer(Object** p) {
680 UpdateRSet(p);
681 }
682
683 void VisitPointers(Object** start, Object** end) {
684 // Update a store into slots [start, end), used (a) to update remembered
685 // set when promoting a young object to old space or (b) to rebuild
686 // remembered sets after a mark-compact collection.
687 for (Object** p = start; p < end; p++) UpdateRSet(p);
688 }
689 private:
690
691 void UpdateRSet(Object** p) {
692 // The remembered set should not be set. It should be clear for objects
693 // newly copied to old space, and it is cleared before rebuilding in the
694 // mark-compact collector.
695 ASSERT(!Page::IsRSetSet(reinterpret_cast<Address>(p), 0));
696 if (Heap::InNewSpace(*p)) {
697 Page::SetRSet(reinterpret_cast<Address>(p), 0);
698 }
699 }
700};
701
702
703int Heap::UpdateRSet(HeapObject* obj) {
704 ASSERT(!InNewSpace(obj));
705 // Special handling of fixed arrays to iterate the body based on the start
706 // address and offset. Just iterating the pointers as in UpdateRSetVisitor
707 // will not work because Page::SetRSet needs to have the start of the
708 // object.
709 if (obj->IsFixedArray()) {
710 FixedArray* array = FixedArray::cast(obj);
711 int length = array->length();
712 for (int i = 0; i < length; i++) {
713 int offset = FixedArray::kHeaderSize + i * kPointerSize;
714 ASSERT(!Page::IsRSetSet(obj->address(), offset));
715 if (Heap::InNewSpace(array->get(i))) {
716 Page::SetRSet(obj->address(), offset);
717 }
718 }
719 } else if (!obj->IsCode()) {
720 // Skip code object, we know it does not contain inter-generational
721 // pointers.
722 UpdateRSetVisitor v;
723 obj->Iterate(&v);
724 }
725 return obj->Size();
726}
727
728
729void Heap::RebuildRSets() {
ager@chromium.org9258b6b2008-09-11 09:11:10 +0000730 // By definition, we do not care about remembered set bits in code or data
731 // spaces.
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000732 map_space_->ClearRSet();
733 RebuildRSets(map_space_);
734
ager@chromium.org9258b6b2008-09-11 09:11:10 +0000735 old_pointer_space_->ClearRSet();
736 RebuildRSets(old_pointer_space_);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000737
738 Heap::lo_space_->ClearRSet();
739 RebuildRSets(lo_space_);
740}
741
742
743void Heap::RebuildRSets(PagedSpace* space) {
744 HeapObjectIterator it(space);
745 while (it.has_next()) Heap::UpdateRSet(it.next());
746}
747
748
749void Heap::RebuildRSets(LargeObjectSpace* space) {
750 LargeObjectIterator it(space);
751 while (it.has_next()) Heap::UpdateRSet(it.next());
752}
753
754
755#if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
756void Heap::RecordCopiedObject(HeapObject* obj) {
757 bool should_record = false;
758#ifdef DEBUG
759 should_record = FLAG_heap_stats;
760#endif
761#ifdef ENABLE_LOGGING_AND_PROFILING
762 should_record = should_record || FLAG_log_gc;
763#endif
764 if (should_record) {
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +0000765 if (new_space_.Contains(obj)) {
766 new_space_.RecordAllocation(obj);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000767 } else {
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +0000768 new_space_.RecordPromotion(obj);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000769 }
770 }
771}
772#endif // defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
773
774
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +0000775
776HeapObject* Heap::MigrateObject(HeapObject* source,
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000777 HeapObject* target,
778 int size) {
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +0000779 // Copy the content of source to target.
780 CopyBlock(reinterpret_cast<Object**>(target->address()),
781 reinterpret_cast<Object**>(source->address()),
782 size);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000783
kasper.lund7276f142008-07-30 08:49:36 +0000784 // Set the forwarding address.
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +0000785 source->set_map_word(MapWord::FromForwardingAddress(target));
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000786
787 // Update NewSpace stats if necessary.
788#if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
789 RecordCopiedObject(target);
790#endif
791
792 return target;
793}
794
795
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +0000796// Inlined function.
797void Heap::ScavengeObject(HeapObject** p, HeapObject* object) {
798 ASSERT(InFromSpace(object));
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000799
kasper.lund7276f142008-07-30 08:49:36 +0000800 // We use the first word (where the map pointer usually is) of a heap
801 // object to record the forwarding pointer. A forwarding pointer can
ager@chromium.org9258b6b2008-09-11 09:11:10 +0000802 // point to an old space, the code space, or the to space of the new
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000803 // generation.
kasper.lund7276f142008-07-30 08:49:36 +0000804 MapWord first_word = object->map_word();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000805
kasper.lund7276f142008-07-30 08:49:36 +0000806 // If the first word is a forwarding address, the object has already been
807 // copied.
808 if (first_word.IsForwardingAddress()) {
809 *p = first_word.ToForwardingAddress();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000810 return;
811 }
812
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +0000813 // Call the slow part of scavenge object.
814 return ScavengeObjectSlow(p, object);
815}
816
ager@chromium.org870a0b62008-11-04 11:43:05 +0000817
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +0000818static inline bool IsShortcutCandidate(HeapObject* object, Map* map) {
ager@chromium.org870a0b62008-11-04 11:43:05 +0000819 // A ConsString object with Heap::empty_string() as the right side
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +0000820 // is a candidate for being shortcut by the scavenger.
821 ASSERT(object->map() == map);
ager@chromium.org870a0b62008-11-04 11:43:05 +0000822 if (map->instance_type() >= FIRST_NONSTRING_TYPE) return false;
ager@chromium.orgc3e50d82008-11-05 11:53:10 +0000823 return (StringShape(map).representation_tag() == kConsStringTag) &&
ager@chromium.org870a0b62008-11-04 11:43:05 +0000824 (ConsString::cast(object)->unchecked_second() == Heap::empty_string());
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +0000825}
826
827
828void Heap::ScavengeObjectSlow(HeapObject** p, HeapObject* object) {
829 ASSERT(InFromSpace(object));
830 MapWord first_word = object->map_word();
831 ASSERT(!first_word.IsForwardingAddress());
832
833 // Optimization: Bypass flattened ConsString objects.
834 if (IsShortcutCandidate(object, first_word.ToMap())) {
ager@chromium.org870a0b62008-11-04 11:43:05 +0000835 object = HeapObject::cast(ConsString::cast(object)->unchecked_first());
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000836 *p = object;
837 // After patching *p we have to repeat the checks that object is in the
838 // active semispace of the young generation and not already copied.
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +0000839 if (!InNewSpace(object)) return;
kasper.lund7276f142008-07-30 08:49:36 +0000840 first_word = object->map_word();
841 if (first_word.IsForwardingAddress()) {
842 *p = first_word.ToForwardingAddress();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000843 return;
844 }
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000845 }
846
kasper.lund7276f142008-07-30 08:49:36 +0000847 int object_size = object->SizeFromMap(first_word.ToMap());
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000848 // If the object should be promoted, we try to copy it to old space.
849 if (ShouldBePromoted(object->address(), object_size)) {
ager@chromium.org9258b6b2008-09-11 09:11:10 +0000850 OldSpace* target_space = Heap::TargetSpace(object);
851 ASSERT(target_space == Heap::old_pointer_space_ ||
852 target_space == Heap::old_data_space_);
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +0000853 Object* result = target_space->AllocateRaw(object_size);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000854 if (!result->IsFailure()) {
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +0000855 *p = MigrateObject(object, HeapObject::cast(result), object_size);
ager@chromium.org9258b6b2008-09-11 09:11:10 +0000856 if (target_space == Heap::old_pointer_space_) {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000857 // Record the object's address at the top of the to space, to allow
858 // it to be swept by the scavenger.
859 promoted_top -= kPointerSize;
860 Memory::Object_at(promoted_top) = *p;
861 } else {
862#ifdef DEBUG
ager@chromium.org9258b6b2008-09-11 09:11:10 +0000863 // Objects promoted to the data space should not have pointers to
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000864 // new space.
ager@chromium.org9258b6b2008-09-11 09:11:10 +0000865 VerifyNonPointerSpacePointersVisitor v;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000866 (*p)->Iterate(&v);
867#endif
868 }
869 return;
870 }
871 }
872
873 // The object should remain in new space or the old space allocation failed.
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +0000874 Object* result = new_space_.AllocateRaw(object_size);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000875 // Failed allocation at this point is utterly unexpected.
876 ASSERT(!result->IsFailure());
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +0000877 *p = MigrateObject(object, HeapObject::cast(result), object_size);
878}
879
880
881void Heap::ScavengePointer(HeapObject** p) {
882 ScavengeObject(p, *p);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000883}
884
885
886Object* Heap::AllocatePartialMap(InstanceType instance_type,
887 int instance_size) {
888 Object* result = AllocateRawMap(Map::kSize);
889 if (result->IsFailure()) return result;
890
891 // Map::cast cannot be used due to uninitialized map field.
892 reinterpret_cast<Map*>(result)->set_map(meta_map());
893 reinterpret_cast<Map*>(result)->set_instance_type(instance_type);
894 reinterpret_cast<Map*>(result)->set_instance_size(instance_size);
ager@chromium.org7c537e22008-10-16 08:43:32 +0000895 reinterpret_cast<Map*>(result)->set_inobject_properties(0);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000896 reinterpret_cast<Map*>(result)->set_unused_property_fields(0);
897 return result;
898}
899
900
901Object* Heap::AllocateMap(InstanceType instance_type, int instance_size) {
902 Object* result = AllocateRawMap(Map::kSize);
903 if (result->IsFailure()) return result;
904
905 Map* map = reinterpret_cast<Map*>(result);
906 map->set_map(meta_map());
907 map->set_instance_type(instance_type);
908 map->set_prototype(null_value());
909 map->set_constructor(null_value());
910 map->set_instance_size(instance_size);
ager@chromium.org7c537e22008-10-16 08:43:32 +0000911 map->set_inobject_properties(0);
mads.s.ager@gmail.com9a4089a2008-09-01 08:55:01 +0000912 map->set_instance_descriptors(empty_descriptor_array());
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000913 map->set_code_cache(empty_fixed_array());
914 map->set_unused_property_fields(0);
915 map->set_bit_field(0);
916 return map;
917}
918
919
920bool Heap::CreateInitialMaps() {
921 Object* obj = AllocatePartialMap(MAP_TYPE, Map::kSize);
922 if (obj->IsFailure()) return false;
923
924 // Map::cast cannot be used due to uninitialized map field.
925 meta_map_ = reinterpret_cast<Map*>(obj);
926 meta_map()->set_map(meta_map());
927
928 obj = AllocatePartialMap(FIXED_ARRAY_TYPE, Array::kHeaderSize);
929 if (obj->IsFailure()) return false;
930 fixed_array_map_ = Map::cast(obj);
931
932 obj = AllocatePartialMap(ODDBALL_TYPE, Oddball::kSize);
933 if (obj->IsFailure()) return false;
934 oddball_map_ = Map::cast(obj);
935
936 // Allocate the empty array
937 obj = AllocateEmptyFixedArray();
938 if (obj->IsFailure()) return false;
939 empty_fixed_array_ = FixedArray::cast(obj);
940
ager@chromium.org9258b6b2008-09-11 09:11:10 +0000941 obj = Allocate(oddball_map(), OLD_DATA_SPACE);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000942 if (obj->IsFailure()) return false;
943 null_value_ = obj;
944
mads.s.ager@gmail.com9a4089a2008-09-01 08:55:01 +0000945 // Allocate the empty descriptor array. AllocateMap can now be used.
946 obj = AllocateEmptyFixedArray();
947 if (obj->IsFailure()) return false;
948 // There is a check against empty_descriptor_array() in cast().
949 empty_descriptor_array_ = reinterpret_cast<DescriptorArray*>(obj);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000950
mads.s.ager@gmail.com9a4089a2008-09-01 08:55:01 +0000951 // Fix the instance_descriptors for the existing maps.
952 meta_map()->set_instance_descriptors(empty_descriptor_array());
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000953 meta_map()->set_code_cache(empty_fixed_array());
954
mads.s.ager@gmail.com9a4089a2008-09-01 08:55:01 +0000955 fixed_array_map()->set_instance_descriptors(empty_descriptor_array());
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000956 fixed_array_map()->set_code_cache(empty_fixed_array());
957
mads.s.ager@gmail.com9a4089a2008-09-01 08:55:01 +0000958 oddball_map()->set_instance_descriptors(empty_descriptor_array());
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000959 oddball_map()->set_code_cache(empty_fixed_array());
960
961 // Fix prototype object for existing maps.
962 meta_map()->set_prototype(null_value());
963 meta_map()->set_constructor(null_value());
964
965 fixed_array_map()->set_prototype(null_value());
966 fixed_array_map()->set_constructor(null_value());
967 oddball_map()->set_prototype(null_value());
968 oddball_map()->set_constructor(null_value());
969
970 obj = AllocateMap(HEAP_NUMBER_TYPE, HeapNumber::kSize);
971 if (obj->IsFailure()) return false;
972 heap_number_map_ = Map::cast(obj);
973
974 obj = AllocateMap(PROXY_TYPE, Proxy::kSize);
975 if (obj->IsFailure()) return false;
976 proxy_map_ = Map::cast(obj);
977
978#define ALLOCATE_STRING_MAP(type, size, name) \
979 obj = AllocateMap(type, size); \
980 if (obj->IsFailure()) return false; \
981 name##_map_ = Map::cast(obj);
982 STRING_TYPE_LIST(ALLOCATE_STRING_MAP);
983#undef ALLOCATE_STRING_MAP
984
ager@chromium.org7c537e22008-10-16 08:43:32 +0000985 obj = AllocateMap(SHORT_STRING_TYPE, SeqTwoByteString::kHeaderSize);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000986 if (obj->IsFailure()) return false;
987 undetectable_short_string_map_ = Map::cast(obj);
988 undetectable_short_string_map_->set_is_undetectable();
989
ager@chromium.org7c537e22008-10-16 08:43:32 +0000990 obj = AllocateMap(MEDIUM_STRING_TYPE, SeqTwoByteString::kHeaderSize);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000991 if (obj->IsFailure()) return false;
992 undetectable_medium_string_map_ = Map::cast(obj);
993 undetectable_medium_string_map_->set_is_undetectable();
994
ager@chromium.org7c537e22008-10-16 08:43:32 +0000995 obj = AllocateMap(LONG_STRING_TYPE, SeqTwoByteString::kHeaderSize);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000996 if (obj->IsFailure()) return false;
997 undetectable_long_string_map_ = Map::cast(obj);
998 undetectable_long_string_map_->set_is_undetectable();
999
ager@chromium.org7c537e22008-10-16 08:43:32 +00001000 obj = AllocateMap(SHORT_ASCII_STRING_TYPE, SeqAsciiString::kHeaderSize);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001001 if (obj->IsFailure()) return false;
1002 undetectable_short_ascii_string_map_ = Map::cast(obj);
1003 undetectable_short_ascii_string_map_->set_is_undetectable();
1004
ager@chromium.org7c537e22008-10-16 08:43:32 +00001005 obj = AllocateMap(MEDIUM_ASCII_STRING_TYPE, SeqAsciiString::kHeaderSize);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001006 if (obj->IsFailure()) return false;
1007 undetectable_medium_ascii_string_map_ = Map::cast(obj);
1008 undetectable_medium_ascii_string_map_->set_is_undetectable();
1009
ager@chromium.org7c537e22008-10-16 08:43:32 +00001010 obj = AllocateMap(LONG_ASCII_STRING_TYPE, SeqAsciiString::kHeaderSize);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001011 if (obj->IsFailure()) return false;
1012 undetectable_long_ascii_string_map_ = Map::cast(obj);
1013 undetectable_long_ascii_string_map_->set_is_undetectable();
1014
1015 obj = AllocateMap(BYTE_ARRAY_TYPE, Array::kHeaderSize);
1016 if (obj->IsFailure()) return false;
1017 byte_array_map_ = Map::cast(obj);
1018
1019 obj = AllocateMap(CODE_TYPE, Code::kHeaderSize);
1020 if (obj->IsFailure()) return false;
1021 code_map_ = Map::cast(obj);
1022
1023 obj = AllocateMap(FILLER_TYPE, kPointerSize);
1024 if (obj->IsFailure()) return false;
1025 one_word_filler_map_ = Map::cast(obj);
1026
1027 obj = AllocateMap(FILLER_TYPE, 2 * kPointerSize);
1028 if (obj->IsFailure()) return false;
1029 two_word_filler_map_ = Map::cast(obj);
1030
1031#define ALLOCATE_STRUCT_MAP(NAME, Name, name) \
1032 obj = AllocateMap(NAME##_TYPE, Name::kSize); \
1033 if (obj->IsFailure()) return false; \
1034 name##_map_ = Map::cast(obj);
1035 STRUCT_LIST(ALLOCATE_STRUCT_MAP)
1036#undef ALLOCATE_STRUCT_MAP
1037
ager@chromium.org236ad962008-09-25 09:45:57 +00001038 obj = AllocateMap(FIXED_ARRAY_TYPE, HeapObject::kHeaderSize);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001039 if (obj->IsFailure()) return false;
1040 hash_table_map_ = Map::cast(obj);
1041
ager@chromium.org236ad962008-09-25 09:45:57 +00001042 obj = AllocateMap(FIXED_ARRAY_TYPE, HeapObject::kHeaderSize);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001043 if (obj->IsFailure()) return false;
1044 context_map_ = Map::cast(obj);
1045
ager@chromium.org236ad962008-09-25 09:45:57 +00001046 obj = AllocateMap(FIXED_ARRAY_TYPE, HeapObject::kHeaderSize);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001047 if (obj->IsFailure()) return false;
christian.plesner.hansen@gmail.com37abdec2009-01-06 14:43:28 +00001048 catch_context_map_ = Map::cast(obj);
1049
1050 obj = AllocateMap(FIXED_ARRAY_TYPE, HeapObject::kHeaderSize);
1051 if (obj->IsFailure()) return false;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001052 global_context_map_ = Map::cast(obj);
1053
1054 obj = AllocateMap(JS_FUNCTION_TYPE, JSFunction::kSize);
1055 if (obj->IsFailure()) return false;
1056 boilerplate_function_map_ = Map::cast(obj);
1057
1058 obj = AllocateMap(SHARED_FUNCTION_INFO_TYPE, SharedFunctionInfo::kSize);
1059 if (obj->IsFailure()) return false;
1060 shared_function_info_map_ = Map::cast(obj);
1061
mads.s.ager@gmail.com9a4089a2008-09-01 08:55:01 +00001062 ASSERT(!Heap::InNewSpace(Heap::empty_fixed_array()));
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001063 return true;
1064}
1065
1066
1067Object* Heap::AllocateHeapNumber(double value, PretenureFlag pretenure) {
1068 // Statically ensure that it is safe to allocate heap numbers in paged
1069 // spaces.
1070 STATIC_ASSERT(HeapNumber::kSize <= Page::kMaxHeapObjectSize);
ager@chromium.org9258b6b2008-09-11 09:11:10 +00001071 AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
kasperl@chromium.org9bbf9682008-10-30 11:53:07 +00001072 Object* result = AllocateRaw(HeapNumber::kSize, space, OLD_DATA_SPACE);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001073 if (result->IsFailure()) return result;
1074
1075 HeapObject::cast(result)->set_map(heap_number_map());
1076 HeapNumber::cast(result)->set_value(value);
1077 return result;
1078}
1079
1080
1081Object* Heap::AllocateHeapNumber(double value) {
kasperl@chromium.org9bbf9682008-10-30 11:53:07 +00001082 // Use general version, if we're forced to always allocate.
1083 if (always_allocate()) return AllocateHeapNumber(value, NOT_TENURED);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001084 // This version of AllocateHeapNumber is optimized for
1085 // allocation in new space.
1086 STATIC_ASSERT(HeapNumber::kSize <= Page::kMaxHeapObjectSize);
1087 ASSERT(allocation_allowed_ && gc_state_ == NOT_IN_GC);
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +00001088 Object* result = new_space_.AllocateRaw(HeapNumber::kSize);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001089 if (result->IsFailure()) return result;
1090 HeapObject::cast(result)->set_map(heap_number_map());
1091 HeapNumber::cast(result)->set_value(value);
1092 return result;
1093}
1094
1095
1096Object* Heap::CreateOddball(Map* map,
1097 const char* to_string,
1098 Object* to_number) {
ager@chromium.org9258b6b2008-09-11 09:11:10 +00001099 Object* result = Allocate(map, OLD_DATA_SPACE);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001100 if (result->IsFailure()) return result;
1101 return Oddball::cast(result)->Initialize(to_string, to_number);
1102}
1103
1104
1105bool Heap::CreateApiObjects() {
1106 Object* obj;
1107
1108 obj = AllocateMap(JS_OBJECT_TYPE, JSObject::kHeaderSize);
1109 if (obj->IsFailure()) return false;
1110 neander_map_ = Map::cast(obj);
1111
1112 obj = Heap::AllocateJSObjectFromMap(neander_map_);
1113 if (obj->IsFailure()) return false;
1114 Object* elements = AllocateFixedArray(2);
1115 if (elements->IsFailure()) return false;
1116 FixedArray::cast(elements)->set(0, Smi::FromInt(0));
1117 JSObject::cast(obj)->set_elements(FixedArray::cast(elements));
1118 message_listeners_ = JSObject::cast(obj);
1119
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001120 return true;
1121}
1122
1123void Heap::CreateFixedStubs() {
1124 // Here we create roots for fixed stubs. They are needed at GC
1125 // for cooking and uncooking (check out frames.cc).
1126 // The eliminates the need for doing dictionary lookup in the
1127 // stub cache for these stubs.
1128 HandleScope scope;
1129 {
1130 CEntryStub stub;
1131 c_entry_code_ = *stub.GetCode();
1132 }
1133 {
1134 CEntryDebugBreakStub stub;
1135 c_entry_debug_break_code_ = *stub.GetCode();
1136 }
1137 {
1138 JSEntryStub stub;
1139 js_entry_code_ = *stub.GetCode();
1140 }
1141 {
1142 JSConstructEntryStub stub;
1143 js_construct_entry_code_ = *stub.GetCode();
1144 }
1145}
1146
1147
1148bool Heap::CreateInitialObjects() {
1149 Object* obj;
1150
1151 // The -0 value must be set before NumberFromDouble works.
1152 obj = AllocateHeapNumber(-0.0, TENURED);
1153 if (obj->IsFailure()) return false;
1154 minus_zero_value_ = obj;
1155 ASSERT(signbit(minus_zero_value_->Number()) != 0);
1156
1157 obj = AllocateHeapNumber(OS::nan_value(), TENURED);
1158 if (obj->IsFailure()) return false;
1159 nan_value_ = obj;
1160
ager@chromium.org9258b6b2008-09-11 09:11:10 +00001161 obj = Allocate(oddball_map(), OLD_DATA_SPACE);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001162 if (obj->IsFailure()) return false;
1163 undefined_value_ = obj;
1164 ASSERT(!InNewSpace(undefined_value()));
1165
1166 // Allocate initial symbol table.
1167 obj = SymbolTable::Allocate(kInitialSymbolTableSize);
1168 if (obj->IsFailure()) return false;
1169 symbol_table_ = obj;
1170
1171 // Assign the print strings for oddballs after creating symboltable.
1172 Object* symbol = LookupAsciiSymbol("undefined");
1173 if (symbol->IsFailure()) return false;
1174 Oddball::cast(undefined_value_)->set_to_string(String::cast(symbol));
1175 Oddball::cast(undefined_value_)->set_to_number(nan_value_);
1176
1177 // Assign the print strings for oddballs after creating symboltable.
1178 symbol = LookupAsciiSymbol("null");
1179 if (symbol->IsFailure()) return false;
1180 Oddball::cast(null_value_)->set_to_string(String::cast(symbol));
1181 Oddball::cast(null_value_)->set_to_number(Smi::FromInt(0));
1182
1183 // Allocate the null_value
1184 obj = Oddball::cast(null_value())->Initialize("null", Smi::FromInt(0));
1185 if (obj->IsFailure()) return false;
1186
1187 obj = CreateOddball(oddball_map(), "true", Smi::FromInt(1));
1188 if (obj->IsFailure()) return false;
1189 true_value_ = obj;
1190
1191 obj = CreateOddball(oddball_map(), "false", Smi::FromInt(0));
1192 if (obj->IsFailure()) return false;
1193 false_value_ = obj;
1194
1195 obj = CreateOddball(oddball_map(), "hole", Smi::FromInt(-1));
1196 if (obj->IsFailure()) return false;
1197 the_hole_value_ = obj;
1198
1199 // Allocate the empty string.
1200 obj = AllocateRawAsciiString(0, TENURED);
1201 if (obj->IsFailure()) return false;
1202 empty_string_ = String::cast(obj);
1203
1204#define SYMBOL_INITIALIZE(name, string) \
1205 obj = LookupAsciiSymbol(string); \
1206 if (obj->IsFailure()) return false; \
1207 (name##_) = String::cast(obj);
1208 SYMBOL_LIST(SYMBOL_INITIALIZE)
1209#undef SYMBOL_INITIALIZE
1210
ager@chromium.org3b45ab52009-03-19 22:21:34 +00001211 // Allocate the hidden symbol which is used to identify the hidden properties
1212 // in JSObjects. The hash code has a special value so that it will not match
1213 // the empty string when searching for the property. It cannot be part of the
1214 // SYMBOL_LIST because it needs to be allocated manually with the special
1215 // hash code in place. The hash code for the hidden_symbol is zero to ensure
1216 // that it will always be at the first entry in property descriptors.
1217 obj = AllocateSymbol(CStrVector(""), 0, String::kHashComputedMask);
1218 if (obj->IsFailure()) return false;
1219 hidden_symbol_ = String::cast(obj);
1220
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001221 // Allocate the proxy for __proto__.
1222 obj = AllocateProxy((Address) &Accessors::ObjectPrototype);
1223 if (obj->IsFailure()) return false;
1224 prototype_accessors_ = Proxy::cast(obj);
1225
1226 // Allocate the code_stubs dictionary.
1227 obj = Dictionary::Allocate(4);
1228 if (obj->IsFailure()) return false;
1229 code_stubs_ = Dictionary::cast(obj);
1230
1231 // Allocate the non_monomorphic_cache used in stub-cache.cc
1232 obj = Dictionary::Allocate(4);
1233 if (obj->IsFailure()) return false;
1234 non_monomorphic_cache_ = Dictionary::cast(obj);
1235
1236 CreateFixedStubs();
1237
1238 // Allocate the number->string conversion cache
1239 obj = AllocateFixedArray(kNumberStringCacheSize * 2);
1240 if (obj->IsFailure()) return false;
1241 number_string_cache_ = FixedArray::cast(obj);
1242
1243 // Allocate cache for single character strings.
1244 obj = AllocateFixedArray(String::kMaxAsciiCharCode+1);
1245 if (obj->IsFailure()) return false;
1246 single_character_string_cache_ = FixedArray::cast(obj);
1247
1248 // Allocate cache for external strings pointing to native source code.
1249 obj = AllocateFixedArray(Natives::GetBuiltinsCount());
1250 if (obj->IsFailure()) return false;
1251 natives_source_cache_ = FixedArray::cast(obj);
1252
kasperl@chromium.org7be3c992009-03-12 07:19:55 +00001253 // Handling of script id generation is in Factory::NewScript.
1254 last_script_id_ = undefined_value();
1255
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +00001256 // Initialize keyed lookup cache.
1257 ClearKeyedLookupCache();
1258
kasperl@chromium.orgb9123622008-09-17 14:05:56 +00001259 // Initialize compilation cache.
1260 CompilationCache::Clear();
ager@chromium.org9258b6b2008-09-11 09:11:10 +00001261
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001262 return true;
1263}
1264
1265
1266static inline int double_get_hash(double d) {
1267 DoubleRepresentation rep(d);
1268 return ((static_cast<int>(rep.bits) ^ static_cast<int>(rep.bits >> 32)) &
1269 (Heap::kNumberStringCacheSize - 1));
1270}
1271
1272
1273static inline int smi_get_hash(Smi* smi) {
1274 return (smi->value() & (Heap::kNumberStringCacheSize - 1));
1275}
1276
1277
1278
1279Object* Heap::GetNumberStringCache(Object* number) {
1280 int hash;
1281 if (number->IsSmi()) {
1282 hash = smi_get_hash(Smi::cast(number));
1283 } else {
1284 hash = double_get_hash(number->Number());
1285 }
1286 Object* key = number_string_cache_->get(hash * 2);
1287 if (key == number) {
1288 return String::cast(number_string_cache_->get(hash * 2 + 1));
1289 } else if (key->IsHeapNumber() &&
1290 number->IsHeapNumber() &&
1291 key->Number() == number->Number()) {
1292 return String::cast(number_string_cache_->get(hash * 2 + 1));
1293 }
1294 return undefined_value();
1295}
1296
1297
1298void Heap::SetNumberStringCache(Object* number, String* string) {
1299 int hash;
1300 if (number->IsSmi()) {
1301 hash = smi_get_hash(Smi::cast(number));
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +00001302 number_string_cache_->set(hash * 2, number, SKIP_WRITE_BARRIER);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001303 } else {
1304 hash = double_get_hash(number->Number());
1305 number_string_cache_->set(hash * 2, number);
1306 }
1307 number_string_cache_->set(hash * 2 + 1, string);
1308}
1309
1310
1311Object* Heap::SmiOrNumberFromDouble(double value,
1312 bool new_object,
1313 PretenureFlag pretenure) {
1314 // We need to distinguish the minus zero value and this cannot be
1315 // done after conversion to int. Doing this by comparing bit
1316 // patterns is faster than using fpclassify() et al.
1317 static const DoubleRepresentation plus_zero(0.0);
1318 static const DoubleRepresentation minus_zero(-0.0);
1319 static const DoubleRepresentation nan(OS::nan_value());
1320 ASSERT(minus_zero_value_ != NULL);
1321 ASSERT(sizeof(plus_zero.value) == sizeof(plus_zero.bits));
1322
1323 DoubleRepresentation rep(value);
1324 if (rep.bits == plus_zero.bits) return Smi::FromInt(0); // not uncommon
1325 if (rep.bits == minus_zero.bits) {
1326 return new_object ? AllocateHeapNumber(-0.0, pretenure)
1327 : minus_zero_value_;
1328 }
1329 if (rep.bits == nan.bits) {
1330 return new_object
1331 ? AllocateHeapNumber(OS::nan_value(), pretenure)
1332 : nan_value_;
1333 }
1334
1335 // Try to represent the value as a tagged small integer.
1336 int int_value = FastD2I(value);
1337 if (value == FastI2D(int_value) && Smi::IsValid(int_value)) {
1338 return Smi::FromInt(int_value);
1339 }
1340
1341 // Materialize the value in the heap.
1342 return AllocateHeapNumber(value, pretenure);
1343}
1344
1345
1346Object* Heap::NewNumberFromDouble(double value, PretenureFlag pretenure) {
1347 return SmiOrNumberFromDouble(value,
1348 true /* number object must be new */,
1349 pretenure);
1350}
1351
1352
1353Object* Heap::NumberFromDouble(double value, PretenureFlag pretenure) {
1354 return SmiOrNumberFromDouble(value,
1355 false /* use preallocated NaN, -0.0 */,
1356 pretenure);
1357}
1358
1359
1360Object* Heap::AllocateProxy(Address proxy, PretenureFlag pretenure) {
1361 // Statically ensure that it is safe to allocate proxies in paged spaces.
1362 STATIC_ASSERT(Proxy::kSize <= Page::kMaxHeapObjectSize);
ager@chromium.org9258b6b2008-09-11 09:11:10 +00001363 AllocationSpace space =
1364 (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001365 Object* result = Allocate(proxy_map(), space);
1366 if (result->IsFailure()) return result;
1367
1368 Proxy::cast(result)->set_proxy(proxy);
1369 return result;
1370}
1371
1372
1373Object* Heap::AllocateSharedFunctionInfo(Object* name) {
1374 Object* result = Allocate(shared_function_info_map(), NEW_SPACE);
1375 if (result->IsFailure()) return result;
1376
1377 SharedFunctionInfo* share = SharedFunctionInfo::cast(result);
1378 share->set_name(name);
1379 Code* illegal = Builtins::builtin(Builtins::Illegal);
1380 share->set_code(illegal);
1381 share->set_expected_nof_properties(0);
1382 share->set_length(0);
1383 share->set_formal_parameter_count(0);
1384 share->set_instance_class_name(Object_symbol());
1385 share->set_function_data(undefined_value());
1386 share->set_lazy_load_data(undefined_value());
1387 share->set_script(undefined_value());
1388 share->set_start_position_and_type(0);
1389 share->set_debug_info(undefined_value());
1390 return result;
1391}
1392
1393
ager@chromium.org870a0b62008-11-04 11:43:05 +00001394Object* Heap::AllocateConsString(String* first,
ager@chromium.orgc3e50d82008-11-05 11:53:10 +00001395 String* second) {
1396 StringShape first_shape(first);
1397 StringShape second_shape(second);
ager@chromium.org870a0b62008-11-04 11:43:05 +00001398 int first_length = first->length(first_shape);
1399 int second_length = second->length(second_shape);
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +00001400 int length = first_length + second_length;
ager@chromium.org870a0b62008-11-04 11:43:05 +00001401 bool is_ascii = first_shape.IsAsciiRepresentation()
1402 && second_shape.IsAsciiRepresentation();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001403
1404 // If the resulting string is small make a flat string.
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +00001405 if (length < String::kMinNonFlatLength) {
ager@chromium.org870a0b62008-11-04 11:43:05 +00001406 ASSERT(first->IsFlat(first_shape));
1407 ASSERT(second->IsFlat(second_shape));
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +00001408 if (is_ascii) {
1409 Object* result = AllocateRawAsciiString(length);
1410 if (result->IsFailure()) return result;
1411 // Copy the characters into the new object.
1412 char* dest = SeqAsciiString::cast(result)->GetChars();
ager@chromium.org870a0b62008-11-04 11:43:05 +00001413 String::WriteToFlat(first, first_shape, dest, 0, first_length);
1414 String::WriteToFlat(second,
1415 second_shape,
1416 dest + first_length,
1417 0,
1418 second_length);
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +00001419 return result;
1420 } else {
1421 Object* result = AllocateRawTwoByteString(length);
1422 if (result->IsFailure()) return result;
1423 // Copy the characters into the new object.
1424 uc16* dest = SeqTwoByteString::cast(result)->GetChars();
ager@chromium.org870a0b62008-11-04 11:43:05 +00001425 String::WriteToFlat(first, first_shape, dest, 0, first_length);
1426 String::WriteToFlat(second,
1427 second_shape,
1428 dest + first_length,
1429 0,
1430 second_length);
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +00001431 return result;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001432 }
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001433 }
1434
1435 Map* map;
1436 if (length <= String::kMaxShortStringSize) {
1437 map = is_ascii ? short_cons_ascii_string_map()
1438 : short_cons_string_map();
1439 } else if (length <= String::kMaxMediumStringSize) {
1440 map = is_ascii ? medium_cons_ascii_string_map()
1441 : medium_cons_string_map();
1442 } else {
1443 map = is_ascii ? long_cons_ascii_string_map()
1444 : long_cons_string_map();
1445 }
1446
1447 Object* result = Allocate(map, NEW_SPACE);
1448 if (result->IsFailure()) return result;
kasperl@chromium.org9fe21c62008-10-28 08:53:51 +00001449 ASSERT(InNewSpace(result));
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001450 ConsString* cons_string = ConsString::cast(result);
kasperl@chromium.org9fe21c62008-10-28 08:53:51 +00001451 cons_string->set_first(first, SKIP_WRITE_BARRIER);
1452 cons_string->set_second(second, SKIP_WRITE_BARRIER);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001453 cons_string->set_length(length);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001454 return result;
1455}
1456
1457
ager@chromium.org870a0b62008-11-04 11:43:05 +00001458Object* Heap::AllocateSlicedString(String* buffer,
ager@chromium.org870a0b62008-11-04 11:43:05 +00001459 int start,
1460 int end) {
ager@chromium.orgc3e50d82008-11-05 11:53:10 +00001461 StringShape buffer_shape(buffer);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001462 int length = end - start;
1463
1464 // If the resulting string is small make a sub string.
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +00001465 if (end - start <= String::kMinNonFlatLength) {
ager@chromium.org870a0b62008-11-04 11:43:05 +00001466 return Heap::AllocateSubString(buffer, buffer_shape, start, end);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001467 }
1468
1469 Map* map;
1470 if (length <= String::kMaxShortStringSize) {
ager@chromium.org870a0b62008-11-04 11:43:05 +00001471 map = buffer_shape.IsAsciiRepresentation() ?
1472 short_sliced_ascii_string_map() :
1473 short_sliced_string_map();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001474 } else if (length <= String::kMaxMediumStringSize) {
ager@chromium.org870a0b62008-11-04 11:43:05 +00001475 map = buffer_shape.IsAsciiRepresentation() ?
1476 medium_sliced_ascii_string_map() :
1477 medium_sliced_string_map();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001478 } else {
ager@chromium.org870a0b62008-11-04 11:43:05 +00001479 map = buffer_shape.IsAsciiRepresentation() ?
1480 long_sliced_ascii_string_map() :
1481 long_sliced_string_map();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001482 }
1483
1484 Object* result = Allocate(map, NEW_SPACE);
1485 if (result->IsFailure()) return result;
1486
1487 SlicedString* sliced_string = SlicedString::cast(result);
1488 sliced_string->set_buffer(buffer);
1489 sliced_string->set_start(start);
1490 sliced_string->set_length(length);
1491
1492 return result;
1493}
1494
1495
ager@chromium.org870a0b62008-11-04 11:43:05 +00001496Object* Heap::AllocateSubString(String* buffer,
1497 StringShape buffer_shape,
1498 int start,
1499 int end) {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001500 int length = end - start;
1501
ager@chromium.org7c537e22008-10-16 08:43:32 +00001502 if (length == 1) {
ager@chromium.org870a0b62008-11-04 11:43:05 +00001503 return Heap::LookupSingleCharacterStringFromCode(
1504 buffer->Get(buffer_shape, start));
ager@chromium.org7c537e22008-10-16 08:43:32 +00001505 }
1506
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001507 // Make an attempt to flatten the buffer to reduce access time.
ager@chromium.org870a0b62008-11-04 11:43:05 +00001508 if (!buffer->IsFlat(buffer_shape)) {
1509 buffer->TryFlatten(buffer_shape);
1510 buffer_shape = StringShape(buffer);
1511 }
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001512
ager@chromium.org870a0b62008-11-04 11:43:05 +00001513 Object* result = buffer_shape.IsAsciiRepresentation()
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001514 ? AllocateRawAsciiString(length)
1515 : AllocateRawTwoByteString(length);
1516 if (result->IsFailure()) return result;
1517
1518 // Copy the characters into the new object.
1519 String* string_result = String::cast(result);
ager@chromium.org870a0b62008-11-04 11:43:05 +00001520 StringShape result_shape(string_result);
ager@chromium.org7c537e22008-10-16 08:43:32 +00001521 StringHasher hasher(length);
1522 int i = 0;
1523 for (; i < length && hasher.is_array_index(); i++) {
ager@chromium.org870a0b62008-11-04 11:43:05 +00001524 uc32 c = buffer->Get(buffer_shape, start + i);
ager@chromium.org7c537e22008-10-16 08:43:32 +00001525 hasher.AddCharacter(c);
ager@chromium.org870a0b62008-11-04 11:43:05 +00001526 string_result->Set(result_shape, i, c);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001527 }
ager@chromium.org7c537e22008-10-16 08:43:32 +00001528 for (; i < length; i++) {
ager@chromium.org870a0b62008-11-04 11:43:05 +00001529 uc32 c = buffer->Get(buffer_shape, start + i);
ager@chromium.org7c537e22008-10-16 08:43:32 +00001530 hasher.AddCharacterNoIndex(c);
ager@chromium.org870a0b62008-11-04 11:43:05 +00001531 string_result->Set(result_shape, i, c);
ager@chromium.org7c537e22008-10-16 08:43:32 +00001532 }
1533 string_result->set_length_field(hasher.GetHashField());
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001534 return result;
1535}
1536
1537
1538Object* Heap::AllocateExternalStringFromAscii(
1539 ExternalAsciiString::Resource* resource) {
1540 Map* map;
1541 int length = resource->length();
1542 if (length <= String::kMaxShortStringSize) {
1543 map = short_external_ascii_string_map();
1544 } else if (length <= String::kMaxMediumStringSize) {
1545 map = medium_external_ascii_string_map();
1546 } else {
1547 map = long_external_ascii_string_map();
1548 }
1549
1550 Object* result = Allocate(map, NEW_SPACE);
1551 if (result->IsFailure()) return result;
1552
1553 ExternalAsciiString* external_string = ExternalAsciiString::cast(result);
1554 external_string->set_length(length);
1555 external_string->set_resource(resource);
1556
1557 return result;
1558}
1559
1560
1561Object* Heap::AllocateExternalStringFromTwoByte(
1562 ExternalTwoByteString::Resource* resource) {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001563 int length = resource->length();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001564
ager@chromium.org6f10e412009-02-13 10:11:16 +00001565 Map* map = ExternalTwoByteString::StringMap(length);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001566 Object* result = Allocate(map, NEW_SPACE);
1567 if (result->IsFailure()) return result;
1568
1569 ExternalTwoByteString* external_string = ExternalTwoByteString::cast(result);
1570 external_string->set_length(length);
1571 external_string->set_resource(resource);
1572
1573 return result;
1574}
1575
1576
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +00001577Object* Heap::LookupSingleCharacterStringFromCode(uint16_t code) {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001578 if (code <= String::kMaxAsciiCharCode) {
1579 Object* value = Heap::single_character_string_cache()->get(code);
1580 if (value != Heap::undefined_value()) return value;
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +00001581
1582 char buffer[1];
1583 buffer[0] = static_cast<char>(code);
1584 Object* result = LookupSymbol(Vector<const char>(buffer, 1));
1585
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001586 if (result->IsFailure()) return result;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001587 Heap::single_character_string_cache()->set(code, result);
1588 return result;
1589 }
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +00001590
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001591 Object* result = Heap::AllocateRawTwoByteString(1);
1592 if (result->IsFailure()) return result;
ager@chromium.org870a0b62008-11-04 11:43:05 +00001593 String* answer = String::cast(result);
1594 answer->Set(StringShape(answer), 0, code);
1595 return answer;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001596}
1597
1598
ager@chromium.orga74f0da2008-12-03 16:05:52 +00001599Object* Heap::AllocateByteArray(int length, PretenureFlag pretenure) {
1600 if (pretenure == NOT_TENURED) {
1601 return AllocateByteArray(length);
1602 }
1603 int size = ByteArray::SizeFor(length);
1604 AllocationSpace space =
1605 size > MaxHeapObjectSize() ? LO_SPACE : OLD_DATA_SPACE;
1606
1607 Object* result = AllocateRaw(size, space, OLD_DATA_SPACE);
1608
1609 if (result->IsFailure()) return result;
1610
1611 reinterpret_cast<Array*>(result)->set_map(byte_array_map());
1612 reinterpret_cast<Array*>(result)->set_length(length);
1613 return result;
1614}
1615
1616
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001617Object* Heap::AllocateByteArray(int length) {
1618 int size = ByteArray::SizeFor(length);
ager@chromium.org9258b6b2008-09-11 09:11:10 +00001619 AllocationSpace space =
1620 size > MaxHeapObjectSize() ? LO_SPACE : NEW_SPACE;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001621
kasperl@chromium.org9bbf9682008-10-30 11:53:07 +00001622 Object* result = AllocateRaw(size, space, OLD_DATA_SPACE);
ager@chromium.org9258b6b2008-09-11 09:11:10 +00001623
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001624 if (result->IsFailure()) return result;
1625
1626 reinterpret_cast<Array*>(result)->set_map(byte_array_map());
1627 reinterpret_cast<Array*>(result)->set_length(length);
1628 return result;
1629}
1630
1631
ager@chromium.org6f10e412009-02-13 10:11:16 +00001632void Heap::CreateFillerObjectAt(Address addr, int size) {
1633 if (size == 0) return;
1634 HeapObject* filler = HeapObject::FromAddress(addr);
1635 if (size == kPointerSize) {
1636 filler->set_map(Heap::one_word_filler_map());
1637 } else {
1638 filler->set_map(Heap::byte_array_map());
1639 ByteArray::cast(filler)->set_length(ByteArray::LengthFor(size));
1640 }
1641}
1642
1643
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001644Object* Heap::CreateCode(const CodeDesc& desc,
1645 ScopeInfo<>* sinfo,
ager@chromium.orga74f0da2008-12-03 16:05:52 +00001646 Code::Flags flags,
kasperl@chromium.org061ef742009-02-27 12:16:20 +00001647 Handle<Object> self_reference) {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001648 // Compute size
1649 int body_size = RoundUp(desc.instr_size + desc.reloc_size, kObjectAlignment);
1650 int sinfo_size = 0;
1651 if (sinfo != NULL) sinfo_size = sinfo->Serialize(NULL);
1652 int obj_size = Code::SizeFor(body_size, sinfo_size);
kasperl@chromium.org061ef742009-02-27 12:16:20 +00001653 ASSERT(IsAligned(obj_size, Code::kCodeAlignment));
ager@chromium.org9258b6b2008-09-11 09:11:10 +00001654 Object* result;
1655 if (obj_size > MaxHeapObjectSize()) {
1656 result = lo_space_->AllocateRawCode(obj_size);
1657 } else {
1658 result = code_space_->AllocateRaw(obj_size);
1659 }
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001660
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001661 if (result->IsFailure()) return result;
1662
1663 // Initialize the object
1664 HeapObject::cast(result)->set_map(code_map());
1665 Code* code = Code::cast(result);
1666 code->set_instruction_size(desc.instr_size);
1667 code->set_relocation_size(desc.reloc_size);
1668 code->set_sinfo_size(sinfo_size);
1669 code->set_flags(flags);
1670 code->set_ic_flag(Code::IC_TARGET_IS_ADDRESS);
kasperl@chromium.org061ef742009-02-27 12:16:20 +00001671 // Allow self references to created code object by patching the handle to
1672 // point to the newly allocated Code object.
1673 if (!self_reference.is_null()) {
1674 *(self_reference.location()) = code;
ager@chromium.orga74f0da2008-12-03 16:05:52 +00001675 }
1676 // Migrate generated code.
1677 // The generated code can contain Object** values (typically from handles)
1678 // that are dereferenced during the copy to point directly to the actual heap
1679 // objects. These pointers can include references to the code object itself,
1680 // through the self_reference parameter.
1681 code->CopyFrom(desc);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001682 if (sinfo != NULL) sinfo->Serialize(code); // write scope info
christian.plesner.hansen@gmail.com37abdec2009-01-06 14:43:28 +00001683 LOG(CodeAllocateEvent(code, desc.origin));
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001684
1685#ifdef DEBUG
1686 code->Verify();
1687#endif
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001688 return code;
1689}
1690
1691
1692Object* Heap::CopyCode(Code* code) {
1693 // Allocate an object the same size as the code object.
1694 int obj_size = code->Size();
ager@chromium.org9258b6b2008-09-11 09:11:10 +00001695 Object* result;
1696 if (obj_size > MaxHeapObjectSize()) {
1697 result = lo_space_->AllocateRawCode(obj_size);
1698 } else {
1699 result = code_space_->AllocateRaw(obj_size);
1700 }
1701
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001702 if (result->IsFailure()) return result;
1703
1704 // Copy code object.
1705 Address old_addr = code->address();
1706 Address new_addr = reinterpret_cast<HeapObject*>(result)->address();
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +00001707 CopyBlock(reinterpret_cast<Object**>(new_addr),
1708 reinterpret_cast<Object**>(old_addr),
1709 obj_size);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001710 // Relocate the copy.
1711 Code* new_code = Code::cast(result);
1712 new_code->Relocate(new_addr - old_addr);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001713 return new_code;
1714}
1715
1716
1717Object* Heap::Allocate(Map* map, AllocationSpace space) {
1718 ASSERT(gc_state_ == NOT_IN_GC);
1719 ASSERT(map->instance_type() != MAP_TYPE);
kasperl@chromium.org9bbf9682008-10-30 11:53:07 +00001720 Object* result = AllocateRaw(map->instance_size(),
1721 space,
1722 TargetSpaceId(map->instance_type()));
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001723 if (result->IsFailure()) return result;
1724 HeapObject::cast(result)->set_map(map);
1725 return result;
1726}
1727
1728
1729Object* Heap::InitializeFunction(JSFunction* function,
1730 SharedFunctionInfo* shared,
1731 Object* prototype) {
1732 ASSERT(!prototype->IsMap());
1733 function->initialize_properties();
1734 function->initialize_elements();
1735 function->set_shared(shared);
1736 function->set_prototype_or_initial_map(prototype);
1737 function->set_context(undefined_value());
kasperl@chromium.org9fe21c62008-10-28 08:53:51 +00001738 function->set_literals(empty_fixed_array(), SKIP_WRITE_BARRIER);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001739 return function;
1740}
1741
1742
1743Object* Heap::AllocateFunctionPrototype(JSFunction* function) {
ager@chromium.orgddb913d2009-01-27 10:01:48 +00001744 // Allocate the prototype. Make sure to use the object function
1745 // from the function's context, since the function can be from a
1746 // different context.
1747 JSFunction* object_function =
1748 function->context()->global_context()->object_function();
1749 Object* prototype = AllocateJSObject(object_function);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001750 if (prototype->IsFailure()) return prototype;
1751 // When creating the prototype for the function we must set its
1752 // constructor to the function.
1753 Object* result =
1754 JSObject::cast(prototype)->SetProperty(constructor_symbol(),
1755 function,
1756 DONT_ENUM);
1757 if (result->IsFailure()) return result;
1758 return prototype;
1759}
1760
1761
1762Object* Heap::AllocateFunction(Map* function_map,
1763 SharedFunctionInfo* shared,
1764 Object* prototype) {
ager@chromium.org9258b6b2008-09-11 09:11:10 +00001765 Object* result = Allocate(function_map, OLD_POINTER_SPACE);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001766 if (result->IsFailure()) return result;
1767 return InitializeFunction(JSFunction::cast(result), shared, prototype);
1768}
1769
1770
1771Object* Heap::AllocateArgumentsObject(Object* callee, int length) {
mads.s.ager@gmail.com9a4089a2008-09-01 08:55:01 +00001772 // To get fast allocation and map sharing for arguments objects we
1773 // allocate them based on an arguments boilerplate.
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001774
1775 // This calls Copy directly rather than using Heap::AllocateRaw so we
1776 // duplicate the check here.
1777 ASSERT(allocation_allowed_ && gc_state_ == NOT_IN_GC);
1778
1779 JSObject* boilerplate =
1780 Top::context()->global_context()->arguments_boilerplate();
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +00001781
1782 // Make the clone.
1783 Map* map = boilerplate->map();
1784 int object_size = map->instance_size();
kasperl@chromium.org9bbf9682008-10-30 11:53:07 +00001785 Object* result = AllocateRaw(object_size, NEW_SPACE, OLD_POINTER_SPACE);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001786 if (result->IsFailure()) return result;
1787
kasperl@chromium.org9bbf9682008-10-30 11:53:07 +00001788 // Copy the content. The arguments boilerplate doesn't have any
1789 // fields that point to new space so it's safe to skip the write
1790 // barrier here.
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +00001791 CopyBlock(reinterpret_cast<Object**>(HeapObject::cast(result)->address()),
1792 reinterpret_cast<Object**>(boilerplate->address()),
1793 object_size);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001794
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +00001795 // Set the two properties.
1796 JSObject::cast(result)->InObjectPropertyAtPut(arguments_callee_index,
kasperl@chromium.org9bbf9682008-10-30 11:53:07 +00001797 callee);
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +00001798 JSObject::cast(result)->InObjectPropertyAtPut(arguments_length_index,
1799 Smi::FromInt(length),
1800 SKIP_WRITE_BARRIER);
1801
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001802 // Check the state of the object
1803 ASSERT(JSObject::cast(result)->HasFastProperties());
1804 ASSERT(JSObject::cast(result)->HasFastElements());
1805
1806 return result;
1807}
1808
1809
1810Object* Heap::AllocateInitialMap(JSFunction* fun) {
1811 ASSERT(!fun->has_initial_map());
1812
ager@chromium.org7c537e22008-10-16 08:43:32 +00001813 // First create a new map with the expected number of properties being
1814 // allocated in-object.
1815 int expected_nof_properties = fun->shared()->expected_nof_properties();
1816 int instance_size = JSObject::kHeaderSize +
1817 expected_nof_properties * kPointerSize;
1818 if (instance_size > JSObject::kMaxInstanceSize) {
1819 instance_size = JSObject::kMaxInstanceSize;
1820 expected_nof_properties = (instance_size - JSObject::kHeaderSize) /
1821 kPointerSize;
1822 }
1823 Object* map_obj = Heap::AllocateMap(JS_OBJECT_TYPE, instance_size);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001824 if (map_obj->IsFailure()) return map_obj;
1825
1826 // Fetch or allocate prototype.
1827 Object* prototype;
1828 if (fun->has_instance_prototype()) {
1829 prototype = fun->instance_prototype();
1830 } else {
1831 prototype = AllocateFunctionPrototype(fun);
1832 if (prototype->IsFailure()) return prototype;
1833 }
1834 Map* map = Map::cast(map_obj);
ager@chromium.org7c537e22008-10-16 08:43:32 +00001835 map->set_inobject_properties(expected_nof_properties);
1836 map->set_unused_property_fields(expected_nof_properties);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001837 map->set_prototype(prototype);
1838 return map;
1839}
1840
1841
1842void Heap::InitializeJSObjectFromMap(JSObject* obj,
1843 FixedArray* properties,
1844 Map* map) {
1845 obj->set_properties(properties);
1846 obj->initialize_elements();
1847 // TODO(1240798): Initialize the object's body using valid initial values
1848 // according to the object's initial map. For example, if the map's
1849 // instance type is JS_ARRAY_TYPE, the length field should be initialized
1850 // to a number (eg, Smi::FromInt(0)) and the elements initialized to a
1851 // fixed array (eg, Heap::empty_fixed_array()). Currently, the object
1852 // verification code has to cope with (temporarily) invalid objects. See
1853 // for example, JSArray::JSArrayVerify).
1854 obj->InitializeBody(map->instance_size());
1855}
1856
1857
1858Object* Heap::AllocateJSObjectFromMap(Map* map, PretenureFlag pretenure) {
1859 // JSFunctions should be allocated using AllocateFunction to be
1860 // properly initialized.
1861 ASSERT(map->instance_type() != JS_FUNCTION_TYPE);
1862
1863 // Allocate the backing storage for the properties.
ager@chromium.org7c537e22008-10-16 08:43:32 +00001864 int prop_size = map->unused_property_fields() - map->inobject_properties();
1865 Object* properties = AllocateFixedArray(prop_size);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001866 if (properties->IsFailure()) return properties;
1867
1868 // Allocate the JSObject.
ager@chromium.org9258b6b2008-09-11 09:11:10 +00001869 AllocationSpace space =
1870 (pretenure == TENURED) ? OLD_POINTER_SPACE : NEW_SPACE;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001871 if (map->instance_size() > MaxHeapObjectSize()) space = LO_SPACE;
1872 Object* obj = Allocate(map, space);
1873 if (obj->IsFailure()) return obj;
1874
1875 // Initialize the JSObject.
1876 InitializeJSObjectFromMap(JSObject::cast(obj),
1877 FixedArray::cast(properties),
1878 map);
1879 return obj;
1880}
1881
1882
1883Object* Heap::AllocateJSObject(JSFunction* constructor,
1884 PretenureFlag pretenure) {
1885 // Allocate the initial map if absent.
1886 if (!constructor->has_initial_map()) {
1887 Object* initial_map = AllocateInitialMap(constructor);
1888 if (initial_map->IsFailure()) return initial_map;
1889 constructor->set_initial_map(Map::cast(initial_map));
1890 Map::cast(initial_map)->set_constructor(constructor);
1891 }
1892 // Allocate the object based on the constructors initial map.
1893 return AllocateJSObjectFromMap(constructor->initial_map(), pretenure);
1894}
1895
1896
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +00001897Object* Heap::CopyJSObject(JSObject* source) {
1898 // Never used to copy functions. If functions need to be copied we
1899 // have to be careful to clear the literals array.
1900 ASSERT(!source->IsJSFunction());
1901
1902 // Make the clone.
1903 Map* map = source->map();
1904 int object_size = map->instance_size();
kasperl@chromium.org9bbf9682008-10-30 11:53:07 +00001905 Object* clone;
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +00001906
kasperl@chromium.org9bbf9682008-10-30 11:53:07 +00001907 // If we're forced to always allocate, we use the general allocation
1908 // functions which may leave us with an object in old space.
1909 if (always_allocate()) {
1910 clone = AllocateRaw(object_size, NEW_SPACE, OLD_POINTER_SPACE);
1911 if (clone->IsFailure()) return clone;
1912 Address clone_address = HeapObject::cast(clone)->address();
1913 CopyBlock(reinterpret_cast<Object**>(clone_address),
1914 reinterpret_cast<Object**>(source->address()),
1915 object_size);
1916 // Update write barrier for all fields that lie beyond the header.
1917 for (int offset = JSObject::kHeaderSize;
1918 offset < object_size;
1919 offset += kPointerSize) {
1920 RecordWrite(clone_address, offset);
1921 }
1922 } else {
1923 clone = new_space_.AllocateRaw(object_size);
1924 if (clone->IsFailure()) return clone;
1925 ASSERT(Heap::InNewSpace(clone));
1926 // Since we know the clone is allocated in new space, we can copy
ager@chromium.org32912102009-01-16 10:38:43 +00001927 // the contents without worrying about updating the write barrier.
kasperl@chromium.org9bbf9682008-10-30 11:53:07 +00001928 CopyBlock(reinterpret_cast<Object**>(HeapObject::cast(clone)->address()),
1929 reinterpret_cast<Object**>(source->address()),
1930 object_size);
1931 }
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +00001932
1933 FixedArray* elements = FixedArray::cast(source->elements());
1934 FixedArray* properties = FixedArray::cast(source->properties());
1935 // Update elements if necessary.
1936 if (elements->length()> 0) {
1937 Object* elem = CopyFixedArray(elements);
1938 if (elem->IsFailure()) return elem;
1939 JSObject::cast(clone)->set_elements(FixedArray::cast(elem));
1940 }
1941 // Update properties if necessary.
1942 if (properties->length() > 0) {
1943 Object* prop = CopyFixedArray(properties);
1944 if (prop->IsFailure()) return prop;
1945 JSObject::cast(clone)->set_properties(FixedArray::cast(prop));
1946 }
1947 // Return the new clone.
1948 return clone;
1949}
1950
1951
1952Object* Heap::ReinitializeJSGlobalProxy(JSFunction* constructor,
1953 JSGlobalProxy* object) {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001954 // Allocate initial map if absent.
1955 if (!constructor->has_initial_map()) {
1956 Object* initial_map = AllocateInitialMap(constructor);
1957 if (initial_map->IsFailure()) return initial_map;
1958 constructor->set_initial_map(Map::cast(initial_map));
1959 Map::cast(initial_map)->set_constructor(constructor);
1960 }
1961
1962 Map* map = constructor->initial_map();
1963
1964 // Check that the already allocated object has the same size as
1965 // objects allocated using the constructor.
1966 ASSERT(map->instance_size() == object->map()->instance_size());
1967
1968 // Allocate the backing storage for the properties.
ager@chromium.org7c537e22008-10-16 08:43:32 +00001969 int prop_size = map->unused_property_fields() - map->inobject_properties();
1970 Object* properties = AllocateFixedArray(prop_size);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001971 if (properties->IsFailure()) return properties;
1972
1973 // Reset the map for the object.
1974 object->set_map(constructor->initial_map());
1975
1976 // Reinitialize the object from the constructor map.
1977 InitializeJSObjectFromMap(object, FixedArray::cast(properties), map);
1978 return object;
1979}
1980
1981
1982Object* Heap::AllocateStringFromAscii(Vector<const char> string,
1983 PretenureFlag pretenure) {
1984 Object* result = AllocateRawAsciiString(string.length(), pretenure);
1985 if (result->IsFailure()) return result;
1986
1987 // Copy the characters into the new object.
ager@chromium.org7c537e22008-10-16 08:43:32 +00001988 SeqAsciiString* string_result = SeqAsciiString::cast(result);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001989 for (int i = 0; i < string.length(); i++) {
ager@chromium.org7c537e22008-10-16 08:43:32 +00001990 string_result->SeqAsciiStringSet(i, string[i]);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001991 }
1992 return result;
1993}
1994
1995
1996Object* Heap::AllocateStringFromUtf8(Vector<const char> string,
1997 PretenureFlag pretenure) {
1998 // Count the number of characters in the UTF-8 string and check if
1999 // it is an ASCII string.
2000 Access<Scanner::Utf8Decoder> decoder(Scanner::utf8_decoder());
2001 decoder->Reset(string.start(), string.length());
2002 int chars = 0;
2003 bool is_ascii = true;
2004 while (decoder->has_more()) {
2005 uc32 r = decoder->GetNext();
2006 if (r > String::kMaxAsciiCharCode) is_ascii = false;
2007 chars++;
2008 }
2009
2010 // If the string is ascii, we do not need to convert the characters
2011 // since UTF8 is backwards compatible with ascii.
2012 if (is_ascii) return AllocateStringFromAscii(string, pretenure);
2013
2014 Object* result = AllocateRawTwoByteString(chars, pretenure);
2015 if (result->IsFailure()) return result;
2016
2017 // Convert and copy the characters into the new object.
2018 String* string_result = String::cast(result);
2019 decoder->Reset(string.start(), string.length());
ager@chromium.org870a0b62008-11-04 11:43:05 +00002020 StringShape result_shape(string_result);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002021 for (int i = 0; i < chars; i++) {
2022 uc32 r = decoder->GetNext();
ager@chromium.org870a0b62008-11-04 11:43:05 +00002023 string_result->Set(result_shape, i, r);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002024 }
2025 return result;
2026}
2027
2028
2029Object* Heap::AllocateStringFromTwoByte(Vector<const uc16> string,
2030 PretenureFlag pretenure) {
2031 // Check if the string is an ASCII string.
2032 int i = 0;
2033 while (i < string.length() && string[i] <= String::kMaxAsciiCharCode) i++;
2034
2035 Object* result;
2036 if (i == string.length()) { // It's an ASCII string.
2037 result = AllocateRawAsciiString(string.length(), pretenure);
2038 } else { // It's not an ASCII string.
2039 result = AllocateRawTwoByteString(string.length(), pretenure);
2040 }
2041 if (result->IsFailure()) return result;
2042
2043 // Copy the characters into the new object, which may be either ASCII or
2044 // UTF-16.
2045 String* string_result = String::cast(result);
ager@chromium.org870a0b62008-11-04 11:43:05 +00002046 StringShape result_shape(string_result);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002047 for (int i = 0; i < string.length(); i++) {
ager@chromium.org870a0b62008-11-04 11:43:05 +00002048 string_result->Set(result_shape, i, string[i]);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002049 }
2050 return result;
2051}
2052
2053
2054Map* Heap::SymbolMapForString(String* string) {
2055 // If the string is in new space it cannot be used as a symbol.
2056 if (InNewSpace(string)) return NULL;
2057
2058 // Find the corresponding symbol map for strings.
2059 Map* map = string->map();
2060
2061 if (map == short_ascii_string_map()) return short_ascii_symbol_map();
2062 if (map == medium_ascii_string_map()) return medium_ascii_symbol_map();
2063 if (map == long_ascii_string_map()) return long_ascii_symbol_map();
2064
2065 if (map == short_string_map()) return short_symbol_map();
2066 if (map == medium_string_map()) return medium_symbol_map();
2067 if (map == long_string_map()) return long_symbol_map();
2068
2069 if (map == short_cons_string_map()) return short_cons_symbol_map();
2070 if (map == medium_cons_string_map()) return medium_cons_symbol_map();
2071 if (map == long_cons_string_map()) return long_cons_symbol_map();
2072
2073 if (map == short_cons_ascii_string_map()) {
2074 return short_cons_ascii_symbol_map();
2075 }
2076 if (map == medium_cons_ascii_string_map()) {
2077 return medium_cons_ascii_symbol_map();
2078 }
2079 if (map == long_cons_ascii_string_map()) {
2080 return long_cons_ascii_symbol_map();
2081 }
2082
2083 if (map == short_sliced_string_map()) return short_sliced_symbol_map();
kasperl@chromium.org9fe21c62008-10-28 08:53:51 +00002084 if (map == medium_sliced_string_map()) return medium_sliced_symbol_map();
2085 if (map == long_sliced_string_map()) return long_sliced_symbol_map();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002086
2087 if (map == short_sliced_ascii_string_map()) {
2088 return short_sliced_ascii_symbol_map();
2089 }
2090 if (map == medium_sliced_ascii_string_map()) {
kasperl@chromium.org9fe21c62008-10-28 08:53:51 +00002091 return medium_sliced_ascii_symbol_map();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002092 }
2093 if (map == long_sliced_ascii_string_map()) {
kasperl@chromium.org9fe21c62008-10-28 08:53:51 +00002094 return long_sliced_ascii_symbol_map();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002095 }
2096
ager@chromium.org6f10e412009-02-13 10:11:16 +00002097 if (map == short_external_string_map()) {
2098 return short_external_symbol_map();
2099 }
2100 if (map == medium_external_string_map()) {
2101 return medium_external_symbol_map();
2102 }
2103 if (map == long_external_string_map()) {
2104 return long_external_symbol_map();
2105 }
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002106
2107 if (map == short_external_ascii_string_map()) {
ager@chromium.org6f10e412009-02-13 10:11:16 +00002108 return short_external_ascii_symbol_map();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002109 }
2110 if (map == medium_external_ascii_string_map()) {
ager@chromium.org6f10e412009-02-13 10:11:16 +00002111 return medium_external_ascii_symbol_map();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002112 }
2113 if (map == long_external_ascii_string_map()) {
ager@chromium.org6f10e412009-02-13 10:11:16 +00002114 return long_external_ascii_symbol_map();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002115 }
2116
2117 // No match found.
2118 return NULL;
2119}
2120
2121
ager@chromium.orga74f0da2008-12-03 16:05:52 +00002122Object* Heap::AllocateInternalSymbol(unibrow::CharacterStream* buffer,
2123 int chars,
2124 uint32_t length_field) {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002125 // Ensure the chars matches the number of characters in the buffer.
2126 ASSERT(static_cast<unsigned>(chars) == buffer->Length());
2127 // Determine whether the string is ascii.
2128 bool is_ascii = true;
ager@chromium.org6f10e412009-02-13 10:11:16 +00002129 while (buffer->has_more() && is_ascii) {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002130 if (buffer->GetNext() > unibrow::Utf8::kMaxOneByteChar) is_ascii = false;
2131 }
2132 buffer->Rewind();
2133
2134 // Compute map and object size.
2135 int size;
2136 Map* map;
2137
2138 if (is_ascii) {
2139 if (chars <= String::kMaxShortStringSize) {
2140 map = short_ascii_symbol_map();
2141 } else if (chars <= String::kMaxMediumStringSize) {
2142 map = medium_ascii_symbol_map();
2143 } else {
2144 map = long_ascii_symbol_map();
2145 }
ager@chromium.org7c537e22008-10-16 08:43:32 +00002146 size = SeqAsciiString::SizeFor(chars);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002147 } else {
2148 if (chars <= String::kMaxShortStringSize) {
2149 map = short_symbol_map();
2150 } else if (chars <= String::kMaxMediumStringSize) {
2151 map = medium_symbol_map();
2152 } else {
2153 map = long_symbol_map();
2154 }
ager@chromium.org7c537e22008-10-16 08:43:32 +00002155 size = SeqTwoByteString::SizeFor(chars);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002156 }
2157
2158 // Allocate string.
ager@chromium.org9258b6b2008-09-11 09:11:10 +00002159 AllocationSpace space =
2160 (size > MaxHeapObjectSize()) ? LO_SPACE : OLD_DATA_SPACE;
kasperl@chromium.org9bbf9682008-10-30 11:53:07 +00002161 Object* result = AllocateRaw(size, space, OLD_DATA_SPACE);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002162 if (result->IsFailure()) return result;
2163
2164 reinterpret_cast<HeapObject*>(result)->set_map(map);
2165 // The hash value contains the length of the string.
ager@chromium.org870a0b62008-11-04 11:43:05 +00002166 String* answer = String::cast(result);
2167 StringShape answer_shape(answer);
2168 answer->set_length_field(length_field);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002169
ager@chromium.org870a0b62008-11-04 11:43:05 +00002170 ASSERT_EQ(size, answer->Size());
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002171
2172 // Fill in the characters.
2173 for (int i = 0; i < chars; i++) {
ager@chromium.org870a0b62008-11-04 11:43:05 +00002174 answer->Set(answer_shape, i, buffer->GetNext());
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002175 }
ager@chromium.org870a0b62008-11-04 11:43:05 +00002176 return answer;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002177}
2178
2179
2180Object* Heap::AllocateRawAsciiString(int length, PretenureFlag pretenure) {
ager@chromium.org9258b6b2008-09-11 09:11:10 +00002181 AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
ager@chromium.org7c537e22008-10-16 08:43:32 +00002182 int size = SeqAsciiString::SizeFor(length);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002183 if (size > MaxHeapObjectSize()) {
2184 space = LO_SPACE;
2185 }
2186
2187 // Use AllocateRaw rather than Allocate because the object's size cannot be
2188 // determined from the map.
kasperl@chromium.org9bbf9682008-10-30 11:53:07 +00002189 Object* result = AllocateRaw(size, space, OLD_DATA_SPACE);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002190 if (result->IsFailure()) return result;
2191
2192 // Determine the map based on the string's length.
2193 Map* map;
2194 if (length <= String::kMaxShortStringSize) {
2195 map = short_ascii_string_map();
2196 } else if (length <= String::kMaxMediumStringSize) {
2197 map = medium_ascii_string_map();
2198 } else {
2199 map = long_ascii_string_map();
2200 }
2201
2202 // Partially initialize the object.
2203 HeapObject::cast(result)->set_map(map);
2204 String::cast(result)->set_length(length);
2205 ASSERT_EQ(size, HeapObject::cast(result)->Size());
2206 return result;
2207}
2208
2209
2210Object* Heap::AllocateRawTwoByteString(int length, PretenureFlag pretenure) {
ager@chromium.org9258b6b2008-09-11 09:11:10 +00002211 AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
ager@chromium.org7c537e22008-10-16 08:43:32 +00002212 int size = SeqTwoByteString::SizeFor(length);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002213 if (size > MaxHeapObjectSize()) {
2214 space = LO_SPACE;
2215 }
2216
2217 // Use AllocateRaw rather than Allocate because the object's size cannot be
2218 // determined from the map.
kasperl@chromium.org9bbf9682008-10-30 11:53:07 +00002219 Object* result = AllocateRaw(size, space, OLD_DATA_SPACE);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002220 if (result->IsFailure()) return result;
2221
2222 // Determine the map based on the string's length.
2223 Map* map;
2224 if (length <= String::kMaxShortStringSize) {
2225 map = short_string_map();
2226 } else if (length <= String::kMaxMediumStringSize) {
2227 map = medium_string_map();
2228 } else {
2229 map = long_string_map();
2230 }
2231
2232 // Partially initialize the object.
2233 HeapObject::cast(result)->set_map(map);
2234 String::cast(result)->set_length(length);
2235 ASSERT_EQ(size, HeapObject::cast(result)->Size());
2236 return result;
2237}
2238
2239
2240Object* Heap::AllocateEmptyFixedArray() {
2241 int size = FixedArray::SizeFor(0);
kasperl@chromium.org9bbf9682008-10-30 11:53:07 +00002242 Object* result = AllocateRaw(size, OLD_DATA_SPACE, OLD_DATA_SPACE);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002243 if (result->IsFailure()) return result;
2244 // Initialize the object.
2245 reinterpret_cast<Array*>(result)->set_map(fixed_array_map());
2246 reinterpret_cast<Array*>(result)->set_length(0);
2247 return result;
2248}
2249
2250
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +00002251Object* Heap::AllocateRawFixedArray(int length) {
kasperl@chromium.org9bbf9682008-10-30 11:53:07 +00002252 // Use the general function if we're forced to always allocate.
2253 if (always_allocate()) return AllocateFixedArray(length, NOT_TENURED);
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +00002254 // Allocate the raw data for a fixed array.
2255 int size = FixedArray::SizeFor(length);
2256 return (size > MaxHeapObjectSize())
2257 ? lo_space_->AllocateRawFixedArray(size)
2258 : new_space_.AllocateRaw(size);
2259}
2260
2261
2262Object* Heap::CopyFixedArray(FixedArray* src) {
2263 int len = src->length();
2264 Object* obj = AllocateRawFixedArray(len);
2265 if (obj->IsFailure()) return obj;
2266 if (Heap::InNewSpace(obj)) {
2267 HeapObject* dst = HeapObject::cast(obj);
2268 CopyBlock(reinterpret_cast<Object**>(dst->address()),
2269 reinterpret_cast<Object**>(src->address()),
2270 FixedArray::SizeFor(len));
2271 return obj;
2272 }
2273 HeapObject::cast(obj)->set_map(src->map());
2274 FixedArray* result = FixedArray::cast(obj);
2275 result->set_length(len);
2276 // Copy the content
2277 WriteBarrierMode mode = result->GetWriteBarrierMode();
2278 for (int i = 0; i < len; i++) result->set(i, src->get(i), mode);
2279 return result;
2280}
2281
2282
2283Object* Heap::AllocateFixedArray(int length) {
ager@chromium.org32912102009-01-16 10:38:43 +00002284 if (length == 0) return empty_fixed_array();
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +00002285 Object* result = AllocateRawFixedArray(length);
2286 if (!result->IsFailure()) {
2287 // Initialize header.
2288 reinterpret_cast<Array*>(result)->set_map(fixed_array_map());
2289 FixedArray* array = FixedArray::cast(result);
2290 array->set_length(length);
2291 Object* value = undefined_value();
2292 // Initialize body.
2293 for (int index = 0; index < length; index++) {
2294 array->set(index, value, SKIP_WRITE_BARRIER);
2295 }
2296 }
2297 return result;
2298}
2299
2300
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002301Object* Heap::AllocateFixedArray(int length, PretenureFlag pretenure) {
2302 ASSERT(empty_fixed_array()->IsFixedArray());
2303 if (length == 0) return empty_fixed_array();
2304
2305 int size = FixedArray::SizeFor(length);
2306 Object* result;
2307 if (size > MaxHeapObjectSize()) {
2308 result = lo_space_->AllocateRawFixedArray(size);
2309 } else {
ager@chromium.org9258b6b2008-09-11 09:11:10 +00002310 AllocationSpace space =
2311 (pretenure == TENURED) ? OLD_POINTER_SPACE : NEW_SPACE;
kasperl@chromium.org9bbf9682008-10-30 11:53:07 +00002312 result = AllocateRaw(size, space, OLD_POINTER_SPACE);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002313 }
2314 if (result->IsFailure()) return result;
2315
2316 // Initialize the object.
2317 reinterpret_cast<Array*>(result)->set_map(fixed_array_map());
2318 FixedArray* array = FixedArray::cast(result);
2319 array->set_length(length);
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +00002320 Object* value = undefined_value();
2321 for (int index = 0; index < length; index++) {
2322 array->set(index, value, SKIP_WRITE_BARRIER);
2323 }
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002324 return array;
2325}
2326
2327
2328Object* Heap::AllocateFixedArrayWithHoles(int length) {
2329 if (length == 0) return empty_fixed_array();
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +00002330 Object* result = AllocateRawFixedArray(length);
2331 if (!result->IsFailure()) {
2332 // Initialize header.
2333 reinterpret_cast<Array*>(result)->set_map(fixed_array_map());
2334 FixedArray* array = FixedArray::cast(result);
2335 array->set_length(length);
2336 // Initialize body.
2337 Object* value = the_hole_value();
2338 for (int index = 0; index < length; index++) {
2339 array->set(index, value, SKIP_WRITE_BARRIER);
2340 }
2341 }
2342 return result;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002343}
2344
2345
2346Object* Heap::AllocateHashTable(int length) {
2347 Object* result = Heap::AllocateFixedArray(length);
2348 if (result->IsFailure()) return result;
2349 reinterpret_cast<Array*>(result)->set_map(hash_table_map());
2350 ASSERT(result->IsDictionary());
2351 return result;
2352}
2353
2354
2355Object* Heap::AllocateGlobalContext() {
2356 Object* result = Heap::AllocateFixedArray(Context::GLOBAL_CONTEXT_SLOTS);
2357 if (result->IsFailure()) return result;
2358 Context* context = reinterpret_cast<Context*>(result);
2359 context->set_map(global_context_map());
2360 ASSERT(context->IsGlobalContext());
2361 ASSERT(result->IsContext());
2362 return result;
2363}
2364
2365
2366Object* Heap::AllocateFunctionContext(int length, JSFunction* function) {
2367 ASSERT(length >= Context::MIN_CONTEXT_SLOTS);
2368 Object* result = Heap::AllocateFixedArray(length);
2369 if (result->IsFailure()) return result;
2370 Context* context = reinterpret_cast<Context*>(result);
2371 context->set_map(context_map());
2372 context->set_closure(function);
2373 context->set_fcontext(context);
2374 context->set_previous(NULL);
2375 context->set_extension(NULL);
2376 context->set_global(function->context()->global());
2377 ASSERT(!context->IsGlobalContext());
2378 ASSERT(context->is_function_context());
2379 ASSERT(result->IsContext());
2380 return result;
2381}
2382
2383
christian.plesner.hansen@gmail.com37abdec2009-01-06 14:43:28 +00002384Object* Heap::AllocateWithContext(Context* previous,
2385 JSObject* extension,
2386 bool is_catch_context) {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002387 Object* result = Heap::AllocateFixedArray(Context::MIN_CONTEXT_SLOTS);
2388 if (result->IsFailure()) return result;
2389 Context* context = reinterpret_cast<Context*>(result);
christian.plesner.hansen@gmail.com37abdec2009-01-06 14:43:28 +00002390 context->set_map(is_catch_context ? catch_context_map() : context_map());
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002391 context->set_closure(previous->closure());
2392 context->set_fcontext(previous->fcontext());
2393 context->set_previous(previous);
2394 context->set_extension(extension);
2395 context->set_global(previous->global());
2396 ASSERT(!context->IsGlobalContext());
2397 ASSERT(!context->is_function_context());
2398 ASSERT(result->IsContext());
2399 return result;
2400}
2401
2402
2403Object* Heap::AllocateStruct(InstanceType type) {
2404 Map* map;
2405 switch (type) {
2406#define MAKE_CASE(NAME, Name, name) case NAME##_TYPE: map = name##_map(); break;
2407STRUCT_LIST(MAKE_CASE)
2408#undef MAKE_CASE
2409 default:
2410 UNREACHABLE();
2411 return Failure::InternalError();
2412 }
2413 int size = map->instance_size();
2414 AllocationSpace space =
ager@chromium.org9258b6b2008-09-11 09:11:10 +00002415 (size > MaxHeapObjectSize()) ? LO_SPACE : OLD_POINTER_SPACE;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002416 Object* result = Heap::Allocate(map, space);
2417 if (result->IsFailure()) return result;
2418 Struct::cast(result)->InitializeBody(size);
2419 return result;
2420}
2421
2422
2423#ifdef DEBUG
2424
2425void Heap::Print() {
2426 if (!HasBeenSetup()) return;
2427 Top::PrintStack();
ager@chromium.org9258b6b2008-09-11 09:11:10 +00002428 AllSpaces spaces;
2429 while (Space* space = spaces.next()) space->Print();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002430}
2431
2432
2433void Heap::ReportCodeStatistics(const char* title) {
2434 PrintF(">>>>>> Code Stats (%s) >>>>>>\n", title);
2435 PagedSpace::ResetCodeStatistics();
2436 // We do not look for code in new space, map space, or old space. If code
2437 // somehow ends up in those spaces, we would miss it here.
2438 code_space_->CollectCodeStatistics();
2439 lo_space_->CollectCodeStatistics();
2440 PagedSpace::ReportCodeStatistics();
2441}
2442
2443
2444// This function expects that NewSpace's allocated objects histogram is
2445// populated (via a call to CollectStatistics or else as a side effect of a
2446// just-completed scavenge collection).
2447void Heap::ReportHeapStatistics(const char* title) {
2448 USE(title);
2449 PrintF(">>>>>> =============== %s (%d) =============== >>>>>>\n",
2450 title, gc_count_);
2451 PrintF("mark-compact GC : %d\n", mc_count_);
kasperl@chromium.org9bbf9682008-10-30 11:53:07 +00002452 PrintF("old_gen_promotion_limit_ %d\n", old_gen_promotion_limit_);
2453 PrintF("old_gen_allocation_limit_ %d\n", old_gen_allocation_limit_);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002454
2455 PrintF("\n");
2456 PrintF("Number of handles : %d\n", HandleScope::NumberOfHandles());
2457 GlobalHandles::PrintStats();
2458 PrintF("\n");
2459
2460 PrintF("Heap statistics : ");
2461 MemoryAllocator::ReportStatistics();
2462 PrintF("To space : ");
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +00002463 new_space_.ReportStatistics();
ager@chromium.org9258b6b2008-09-11 09:11:10 +00002464 PrintF("Old pointer space : ");
2465 old_pointer_space_->ReportStatistics();
2466 PrintF("Old data space : ");
2467 old_data_space_->ReportStatistics();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002468 PrintF("Code space : ");
2469 code_space_->ReportStatistics();
2470 PrintF("Map space : ");
2471 map_space_->ReportStatistics();
2472 PrintF("Large object space : ");
2473 lo_space_->ReportStatistics();
2474 PrintF(">>>>>> ========================================= >>>>>>\n");
2475}
2476
2477#endif // DEBUG
2478
2479bool Heap::Contains(HeapObject* value) {
2480 return Contains(value->address());
2481}
2482
2483
2484bool Heap::Contains(Address addr) {
2485 if (OS::IsOutsideAllocatedSpace(addr)) return false;
2486 return HasBeenSetup() &&
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +00002487 (new_space_.ToSpaceContains(addr) ||
ager@chromium.org9258b6b2008-09-11 09:11:10 +00002488 old_pointer_space_->Contains(addr) ||
2489 old_data_space_->Contains(addr) ||
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002490 code_space_->Contains(addr) ||
2491 map_space_->Contains(addr) ||
2492 lo_space_->SlowContains(addr));
2493}
2494
2495
2496bool Heap::InSpace(HeapObject* value, AllocationSpace space) {
2497 return InSpace(value->address(), space);
2498}
2499
2500
2501bool Heap::InSpace(Address addr, AllocationSpace space) {
2502 if (OS::IsOutsideAllocatedSpace(addr)) return false;
2503 if (!HasBeenSetup()) return false;
2504
2505 switch (space) {
2506 case NEW_SPACE:
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +00002507 return new_space_.ToSpaceContains(addr);
ager@chromium.org9258b6b2008-09-11 09:11:10 +00002508 case OLD_POINTER_SPACE:
2509 return old_pointer_space_->Contains(addr);
2510 case OLD_DATA_SPACE:
2511 return old_data_space_->Contains(addr);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002512 case CODE_SPACE:
2513 return code_space_->Contains(addr);
2514 case MAP_SPACE:
2515 return map_space_->Contains(addr);
2516 case LO_SPACE:
2517 return lo_space_->SlowContains(addr);
2518 }
2519
2520 return false;
2521}
2522
2523
2524#ifdef DEBUG
2525void Heap::Verify() {
2526 ASSERT(HasBeenSetup());
2527
2528 VerifyPointersVisitor visitor;
2529 Heap::IterateRoots(&visitor);
2530
ager@chromium.org9258b6b2008-09-11 09:11:10 +00002531 AllSpaces spaces;
2532 while (Space* space = spaces.next()) {
2533 space->Verify();
2534 }
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002535}
2536#endif // DEBUG
2537
2538
2539Object* Heap::LookupSymbol(Vector<const char> string) {
2540 Object* symbol = NULL;
2541 Object* new_table =
2542 SymbolTable::cast(symbol_table_)->LookupSymbol(string, &symbol);
2543 if (new_table->IsFailure()) return new_table;
2544 symbol_table_ = new_table;
2545 ASSERT(symbol != NULL);
2546 return symbol;
2547}
2548
2549
2550Object* Heap::LookupSymbol(String* string) {
2551 if (string->IsSymbol()) return string;
2552 Object* symbol = NULL;
2553 Object* new_table =
2554 SymbolTable::cast(symbol_table_)->LookupString(string, &symbol);
2555 if (new_table->IsFailure()) return new_table;
2556 symbol_table_ = new_table;
2557 ASSERT(symbol != NULL);
2558 return symbol;
2559}
2560
2561
ager@chromium.org7c537e22008-10-16 08:43:32 +00002562bool Heap::LookupSymbolIfExists(String* string, String** symbol) {
2563 if (string->IsSymbol()) {
2564 *symbol = string;
2565 return true;
2566 }
2567 SymbolTable* table = SymbolTable::cast(symbol_table_);
2568 return table->LookupSymbolIfExists(string, symbol);
2569}
2570
2571
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002572#ifdef DEBUG
2573void Heap::ZapFromSpace() {
2574 ASSERT(HAS_HEAP_OBJECT_TAG(kFromSpaceZapValue));
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +00002575 for (Address a = new_space_.FromSpaceLow();
2576 a < new_space_.FromSpaceHigh();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002577 a += kPointerSize) {
2578 Memory::Address_at(a) = kFromSpaceZapValue;
2579 }
2580}
2581#endif // DEBUG
2582
2583
2584void Heap::IterateRSetRange(Address object_start,
2585 Address object_end,
2586 Address rset_start,
2587 ObjectSlotCallback copy_object_func) {
2588 Address object_address = object_start;
2589 Address rset_address = rset_start;
2590
2591 // Loop over all the pointers in [object_start, object_end).
2592 while (object_address < object_end) {
2593 uint32_t rset_word = Memory::uint32_at(rset_address);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002594 if (rset_word != 0) {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002595 uint32_t result_rset = rset_word;
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +00002596 for (uint32_t bitmask = 1; bitmask != 0; bitmask = bitmask << 1) {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002597 // Do not dereference pointers at or past object_end.
2598 if ((rset_word & bitmask) != 0 && object_address < object_end) {
2599 Object** object_p = reinterpret_cast<Object**>(object_address);
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +00002600 if (Heap::InNewSpace(*object_p)) {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002601 copy_object_func(reinterpret_cast<HeapObject**>(object_p));
2602 }
2603 // If this pointer does not need to be remembered anymore, clear
2604 // the remembered set bit.
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +00002605 if (!Heap::InNewSpace(*object_p)) result_rset &= ~bitmask;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002606 }
2607 object_address += kPointerSize;
2608 }
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002609 // Update the remembered set if it has changed.
2610 if (result_rset != rset_word) {
2611 Memory::uint32_at(rset_address) = result_rset;
2612 }
2613 } else {
2614 // No bits in the word were set. This is the common case.
2615 object_address += kPointerSize * kBitsPerInt;
2616 }
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002617 rset_address += kIntSize;
2618 }
2619}
2620
2621
2622void Heap::IterateRSet(PagedSpace* space, ObjectSlotCallback copy_object_func) {
2623 ASSERT(Page::is_rset_in_use());
ager@chromium.org9258b6b2008-09-11 09:11:10 +00002624 ASSERT(space == old_pointer_space_ || space == map_space_);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002625
2626 PageIterator it(space, PageIterator::PAGES_IN_USE);
2627 while (it.has_next()) {
2628 Page* page = it.next();
2629 IterateRSetRange(page->ObjectAreaStart(), page->AllocationTop(),
2630 page->RSetStart(), copy_object_func);
2631 }
2632}
2633
2634
2635#ifdef DEBUG
2636#define SYNCHRONIZE_TAG(tag) v->Synchronize(tag)
2637#else
2638#define SYNCHRONIZE_TAG(tag)
2639#endif
2640
2641void Heap::IterateRoots(ObjectVisitor* v) {
2642 IterateStrongRoots(v);
2643 v->VisitPointer(reinterpret_cast<Object**>(&symbol_table_));
2644 SYNCHRONIZE_TAG("symbol_table");
2645}
2646
2647
2648void Heap::IterateStrongRoots(ObjectVisitor* v) {
2649#define ROOT_ITERATE(type, name) \
kasperl@chromium.org41044eb2008-10-06 08:24:46 +00002650 v->VisitPointer(bit_cast<Object**, type**>(&name##_));
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002651 STRONG_ROOT_LIST(ROOT_ITERATE);
2652#undef ROOT_ITERATE
2653 SYNCHRONIZE_TAG("strong_root_list");
2654
2655#define STRUCT_MAP_ITERATE(NAME, Name, name) \
kasperl@chromium.org41044eb2008-10-06 08:24:46 +00002656 v->VisitPointer(bit_cast<Object**, Map**>(&name##_map_));
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002657 STRUCT_LIST(STRUCT_MAP_ITERATE);
2658#undef STRUCT_MAP_ITERATE
2659 SYNCHRONIZE_TAG("struct_map");
2660
2661#define SYMBOL_ITERATE(name, string) \
kasperl@chromium.org41044eb2008-10-06 08:24:46 +00002662 v->VisitPointer(bit_cast<Object**, String**>(&name##_));
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002663 SYMBOL_LIST(SYMBOL_ITERATE)
2664#undef SYMBOL_ITERATE
ager@chromium.org3b45ab52009-03-19 22:21:34 +00002665 v->VisitPointer(bit_cast<Object**, String**>(&hidden_symbol_));
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002666 SYNCHRONIZE_TAG("symbol");
2667
2668 Bootstrapper::Iterate(v);
2669 SYNCHRONIZE_TAG("bootstrapper");
2670 Top::Iterate(v);
2671 SYNCHRONIZE_TAG("top");
2672 Debug::Iterate(v);
2673 SYNCHRONIZE_TAG("debug");
kasperl@chromium.orgb9123622008-09-17 14:05:56 +00002674 CompilationCache::Iterate(v);
2675 SYNCHRONIZE_TAG("compilationcache");
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002676
2677 // Iterate over local handles in handle scopes.
2678 HandleScopeImplementer::Iterate(v);
2679 SYNCHRONIZE_TAG("handlescope");
2680
2681 // Iterate over the builtin code objects and code stubs in the heap. Note
2682 // that it is not strictly necessary to iterate over code objects on
2683 // scavenge collections. We still do it here because this same function
2684 // is used by the mark-sweep collector and the deserializer.
2685 Builtins::IterateBuiltins(v);
2686 SYNCHRONIZE_TAG("builtins");
2687
2688 // Iterate over global handles.
2689 GlobalHandles::IterateRoots(v);
2690 SYNCHRONIZE_TAG("globalhandles");
2691
2692 // Iterate over pointers being held by inactive threads.
2693 ThreadManager::Iterate(v);
2694 SYNCHRONIZE_TAG("threadmanager");
2695}
2696#undef SYNCHRONIZE_TAG
2697
2698
2699// Flag is set when the heap has been configured. The heap can be repeatedly
2700// configured through the API until it is setup.
2701static bool heap_configured = false;
2702
2703// TODO(1236194): Since the heap size is configurable on the command line
2704// and through the API, we should gracefully handle the case that the heap
2705// size is not big enough to fit all the initial objects.
2706bool Heap::ConfigureHeap(int semispace_size, int old_gen_size) {
2707 if (HasBeenSetup()) return false;
2708
2709 if (semispace_size > 0) semispace_size_ = semispace_size;
2710 if (old_gen_size > 0) old_generation_size_ = old_gen_size;
2711
2712 // The new space size must be a power of two to support single-bit testing
2713 // for containment.
mads.s.ager@gmail.com769cc962008-08-06 10:02:49 +00002714 semispace_size_ = RoundUpToPowerOf2(semispace_size_);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002715 initial_semispace_size_ = Min(initial_semispace_size_, semispace_size_);
2716 young_generation_size_ = 2 * semispace_size_;
2717
2718 // The old generation is paged.
2719 old_generation_size_ = RoundUp(old_generation_size_, Page::kPageSize);
2720
2721 heap_configured = true;
2722 return true;
2723}
2724
2725
kasper.lund7276f142008-07-30 08:49:36 +00002726bool Heap::ConfigureHeapDefault() {
2727 return ConfigureHeap(FLAG_new_space_size, FLAG_old_space_size);
2728}
2729
2730
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002731int Heap::PromotedSpaceSize() {
ager@chromium.org9258b6b2008-09-11 09:11:10 +00002732 return old_pointer_space_->Size()
2733 + old_data_space_->Size()
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002734 + code_space_->Size()
2735 + map_space_->Size()
2736 + lo_space_->Size();
2737}
2738
2739
kasper.lund7276f142008-07-30 08:49:36 +00002740int Heap::PromotedExternalMemorySize() {
2741 if (amount_of_external_allocated_memory_
2742 <= amount_of_external_allocated_memory_at_last_global_gc_) return 0;
2743 return amount_of_external_allocated_memory_
2744 - amount_of_external_allocated_memory_at_last_global_gc_;
2745}
2746
2747
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002748bool Heap::Setup(bool create_heap_objects) {
2749 // Initialize heap spaces and initial maps and objects. Whenever something
2750 // goes wrong, just return false. The caller should check the results and
2751 // call Heap::TearDown() to release allocated memory.
2752 //
2753 // If the heap is not yet configured (eg, through the API), configure it.
2754 // Configuration is based on the flags new-space-size (really the semispace
2755 // size) and old-space-size if set or the initial values of semispace_size_
2756 // and old_generation_size_ otherwise.
2757 if (!heap_configured) {
kasper.lund7276f142008-07-30 08:49:36 +00002758 if (!ConfigureHeapDefault()) return false;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002759 }
2760
2761 // Setup memory allocator and allocate an initial chunk of memory. The
2762 // initial chunk is double the size of the new space to ensure that we can
2763 // find a pair of semispaces that are contiguous and aligned to their size.
2764 if (!MemoryAllocator::Setup(MaxCapacity())) return false;
2765 void* chunk
2766 = MemoryAllocator::ReserveInitialChunk(2 * young_generation_size_);
2767 if (chunk == NULL) return false;
2768
2769 // Put the initial chunk of the old space at the start of the initial
2770 // chunk, then the two new space semispaces, then the initial chunk of
2771 // code space. Align the pair of semispaces to their size, which must be
2772 // a power of 2.
2773 ASSERT(IsPowerOf2(young_generation_size_));
kasperl@chromium.orgb9123622008-09-17 14:05:56 +00002774 Address code_space_start = reinterpret_cast<Address>(chunk);
2775 Address new_space_start = RoundUp(code_space_start, young_generation_size_);
2776 Address old_space_start = new_space_start + young_generation_size_;
2777 int code_space_size = new_space_start - code_space_start;
2778 int old_space_size = young_generation_size_ - code_space_size;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002779
ager@chromium.org9258b6b2008-09-11 09:11:10 +00002780 // Initialize new space.
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +00002781 if (!new_space_.Setup(new_space_start, young_generation_size_)) return false;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002782
2783 // Initialize old space, set the maximum capacity to the old generation
kasper.lund7276f142008-07-30 08:49:36 +00002784 // size. It will not contain code.
ager@chromium.org9258b6b2008-09-11 09:11:10 +00002785 old_pointer_space_ =
2786 new OldSpace(old_generation_size_, OLD_POINTER_SPACE, NOT_EXECUTABLE);
2787 if (old_pointer_space_ == NULL) return false;
2788 if (!old_pointer_space_->Setup(old_space_start, old_space_size >> 1)) {
2789 return false;
2790 }
2791 old_data_space_ =
2792 new OldSpace(old_generation_size_, OLD_DATA_SPACE, NOT_EXECUTABLE);
2793 if (old_data_space_ == NULL) return false;
2794 if (!old_data_space_->Setup(old_space_start + (old_space_size >> 1),
2795 old_space_size >> 1)) {
2796 return false;
2797 }
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002798
2799 // Initialize the code space, set its maximum capacity to the old
kasper.lund7276f142008-07-30 08:49:36 +00002800 // generation size. It needs executable memory.
ager@chromium.org9258b6b2008-09-11 09:11:10 +00002801 code_space_ =
2802 new OldSpace(old_generation_size_, CODE_SPACE, EXECUTABLE);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002803 if (code_space_ == NULL) return false;
2804 if (!code_space_->Setup(code_space_start, code_space_size)) return false;
2805
2806 // Initialize map space.
kasper.lund7276f142008-07-30 08:49:36 +00002807 map_space_ = new MapSpace(kMaxMapSpaceSize, MAP_SPACE);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002808 if (map_space_ == NULL) return false;
2809 // Setting up a paged space without giving it a virtual memory range big
2810 // enough to hold at least a page will cause it to allocate.
2811 if (!map_space_->Setup(NULL, 0)) return false;
2812
ager@chromium.org9258b6b2008-09-11 09:11:10 +00002813 // The large object code space may contain code or data. We set the memory
2814 // to be non-executable here for safety, but this means we need to enable it
2815 // explicitly when allocating large code objects.
2816 lo_space_ = new LargeObjectSpace(LO_SPACE);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002817 if (lo_space_ == NULL) return false;
2818 if (!lo_space_->Setup()) return false;
2819
2820 if (create_heap_objects) {
2821 // Create initial maps.
2822 if (!CreateInitialMaps()) return false;
2823 if (!CreateApiObjects()) return false;
2824
2825 // Create initial objects
2826 if (!CreateInitialObjects()) return false;
2827 }
2828
2829 LOG(IntEvent("heap-capacity", Capacity()));
2830 LOG(IntEvent("heap-available", Available()));
2831
2832 return true;
2833}
2834
2835
2836void Heap::TearDown() {
2837 GlobalHandles::TearDown();
2838
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +00002839 new_space_.TearDown();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002840
ager@chromium.org9258b6b2008-09-11 09:11:10 +00002841 if (old_pointer_space_ != NULL) {
2842 old_pointer_space_->TearDown();
2843 delete old_pointer_space_;
2844 old_pointer_space_ = NULL;
2845 }
2846
2847 if (old_data_space_ != NULL) {
2848 old_data_space_->TearDown();
2849 delete old_data_space_;
2850 old_data_space_ = NULL;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002851 }
2852
2853 if (code_space_ != NULL) {
2854 code_space_->TearDown();
2855 delete code_space_;
2856 code_space_ = NULL;
2857 }
2858
2859 if (map_space_ != NULL) {
2860 map_space_->TearDown();
2861 delete map_space_;
2862 map_space_ = NULL;
2863 }
2864
2865 if (lo_space_ != NULL) {
2866 lo_space_->TearDown();
2867 delete lo_space_;
2868 lo_space_ = NULL;
2869 }
2870
2871 MemoryAllocator::TearDown();
2872}
2873
2874
2875void Heap::Shrink() {
2876 // Try to shrink map, old, and code spaces.
2877 map_space_->Shrink();
ager@chromium.org9258b6b2008-09-11 09:11:10 +00002878 old_pointer_space_->Shrink();
2879 old_data_space_->Shrink();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002880 code_space_->Shrink();
2881}
2882
2883
2884#ifdef DEBUG
2885
2886class PrintHandleVisitor: public ObjectVisitor {
2887 public:
2888 void VisitPointers(Object** start, Object** end) {
2889 for (Object** p = start; p < end; p++)
2890 PrintF(" handle %p to %p\n", p, *p);
2891 }
2892};
2893
2894void Heap::PrintHandles() {
2895 PrintF("Handles:\n");
2896 PrintHandleVisitor v;
2897 HandleScopeImplementer::Iterate(&v);
2898}
2899
2900#endif
2901
2902
ager@chromium.org9258b6b2008-09-11 09:11:10 +00002903Space* AllSpaces::next() {
2904 switch (counter_++) {
2905 case NEW_SPACE:
2906 return Heap::new_space();
2907 case OLD_POINTER_SPACE:
2908 return Heap::old_pointer_space();
2909 case OLD_DATA_SPACE:
2910 return Heap::old_data_space();
2911 case CODE_SPACE:
2912 return Heap::code_space();
2913 case MAP_SPACE:
2914 return Heap::map_space();
2915 case LO_SPACE:
2916 return Heap::lo_space();
2917 default:
2918 return NULL;
2919 }
2920}
2921
2922
2923PagedSpace* PagedSpaces::next() {
2924 switch (counter_++) {
2925 case OLD_POINTER_SPACE:
2926 return Heap::old_pointer_space();
2927 case OLD_DATA_SPACE:
2928 return Heap::old_data_space();
2929 case CODE_SPACE:
2930 return Heap::code_space();
2931 case MAP_SPACE:
2932 return Heap::map_space();
2933 default:
2934 return NULL;
2935 }
2936}
2937
2938
2939
2940OldSpace* OldSpaces::next() {
2941 switch (counter_++) {
2942 case OLD_POINTER_SPACE:
2943 return Heap::old_pointer_space();
2944 case OLD_DATA_SPACE:
2945 return Heap::old_data_space();
2946 case CODE_SPACE:
2947 return Heap::code_space();
2948 default:
2949 return NULL;
2950 }
2951}
2952
2953
kasper.lund7276f142008-07-30 08:49:36 +00002954SpaceIterator::SpaceIterator() : current_space_(FIRST_SPACE), iterator_(NULL) {
2955}
2956
2957
2958SpaceIterator::~SpaceIterator() {
2959 // Delete active iterator if any.
2960 delete iterator_;
2961}
2962
2963
2964bool SpaceIterator::has_next() {
2965 // Iterate until no more spaces.
2966 return current_space_ != LAST_SPACE;
2967}
2968
2969
2970ObjectIterator* SpaceIterator::next() {
2971 if (iterator_ != NULL) {
2972 delete iterator_;
2973 iterator_ = NULL;
2974 // Move to the next space
2975 current_space_++;
2976 if (current_space_ > LAST_SPACE) {
2977 return NULL;
2978 }
2979 }
2980
2981 // Return iterator for the new current space.
2982 return CreateIterator();
2983}
2984
2985
2986// Create an iterator for the space to iterate.
2987ObjectIterator* SpaceIterator::CreateIterator() {
2988 ASSERT(iterator_ == NULL);
2989
2990 switch (current_space_) {
2991 case NEW_SPACE:
2992 iterator_ = new SemiSpaceIterator(Heap::new_space());
2993 break;
ager@chromium.org9258b6b2008-09-11 09:11:10 +00002994 case OLD_POINTER_SPACE:
2995 iterator_ = new HeapObjectIterator(Heap::old_pointer_space());
2996 break;
2997 case OLD_DATA_SPACE:
2998 iterator_ = new HeapObjectIterator(Heap::old_data_space());
kasper.lund7276f142008-07-30 08:49:36 +00002999 break;
3000 case CODE_SPACE:
3001 iterator_ = new HeapObjectIterator(Heap::code_space());
3002 break;
3003 case MAP_SPACE:
3004 iterator_ = new HeapObjectIterator(Heap::map_space());
3005 break;
3006 case LO_SPACE:
3007 iterator_ = new LargeObjectIterator(Heap::lo_space());
3008 break;
3009 }
3010
3011 // Return the newly allocated iterator;
3012 ASSERT(iterator_ != NULL);
3013 return iterator_;
3014}
3015
3016
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00003017HeapIterator::HeapIterator() {
3018 Init();
3019}
3020
3021
3022HeapIterator::~HeapIterator() {
3023 Shutdown();
3024}
3025
3026
3027void HeapIterator::Init() {
3028 // Start the iteration.
3029 space_iterator_ = new SpaceIterator();
3030 object_iterator_ = space_iterator_->next();
3031}
3032
3033
3034void HeapIterator::Shutdown() {
3035 // Make sure the last iterator is deallocated.
3036 delete space_iterator_;
3037 space_iterator_ = NULL;
3038 object_iterator_ = NULL;
3039}
3040
3041
3042bool HeapIterator::has_next() {
3043 // No iterator means we are done.
3044 if (object_iterator_ == NULL) return false;
3045
3046 if (object_iterator_->has_next_object()) {
3047 // If the current iterator has more objects we are fine.
3048 return true;
3049 } else {
3050 // Go though the spaces looking for one that has objects.
3051 while (space_iterator_->has_next()) {
3052 object_iterator_ = space_iterator_->next();
3053 if (object_iterator_->has_next_object()) {
3054 return true;
3055 }
3056 }
3057 }
3058 // Done with the last space.
3059 object_iterator_ = NULL;
3060 return false;
3061}
3062
3063
3064HeapObject* HeapIterator::next() {
3065 if (has_next()) {
3066 return object_iterator_->next_object();
3067 } else {
3068 return NULL;
3069 }
3070}
3071
3072
3073void HeapIterator::reset() {
3074 // Restart the iterator.
3075 Shutdown();
3076 Init();
3077}
3078
3079
3080//
3081// HeapProfiler class implementation.
3082//
3083#ifdef ENABLE_LOGGING_AND_PROFILING
3084void HeapProfiler::CollectStats(HeapObject* obj, HistogramInfo* info) {
3085 InstanceType type = obj->map()->instance_type();
3086 ASSERT(0 <= type && type <= LAST_TYPE);
3087 info[type].increment_number(1);
3088 info[type].increment_bytes(obj->Size());
3089}
3090#endif
3091
3092
3093#ifdef ENABLE_LOGGING_AND_PROFILING
3094void HeapProfiler::WriteSample() {
3095 LOG(HeapSampleBeginEvent("Heap", "allocated"));
3096
3097 HistogramInfo info[LAST_TYPE+1];
3098#define DEF_TYPE_NAME(name) info[name].set_name(#name);
3099 INSTANCE_TYPE_LIST(DEF_TYPE_NAME)
3100#undef DEF_TYPE_NAME
3101
3102 HeapIterator iterator;
3103 while (iterator.has_next()) {
3104 CollectStats(iterator.next(), info);
3105 }
3106
3107 // Lump all the string types together.
3108 int string_number = 0;
3109 int string_bytes = 0;
3110#define INCREMENT_SIZE(type, size, name) \
3111 string_number += info[type].number(); \
3112 string_bytes += info[type].bytes();
3113 STRING_TYPE_LIST(INCREMENT_SIZE)
3114#undef INCREMENT_SIZE
3115 if (string_bytes > 0) {
3116 LOG(HeapSampleItemEvent("STRING_TYPE", string_number, string_bytes));
3117 }
3118
3119 for (int i = FIRST_NONSTRING_TYPE; i <= LAST_TYPE; ++i) {
3120 if (info[i].bytes() > 0) {
3121 LOG(HeapSampleItemEvent(info[i].name(), info[i].number(),
3122 info[i].bytes()));
3123 }
3124 }
3125
3126 LOG(HeapSampleEndEvent("Heap", "allocated"));
3127}
3128
3129
3130#endif
3131
3132
3133
3134#ifdef DEBUG
3135
3136static bool search_for_any_global;
3137static Object* search_target;
3138static bool found_target;
3139static List<Object*> object_stack(20);
3140
3141
3142// Tags 0, 1, and 3 are used. Use 2 for marking visited HeapObject.
3143static const int kMarkTag = 2;
3144
3145static void MarkObjectRecursively(Object** p);
3146class MarkObjectVisitor : public ObjectVisitor {
3147 public:
3148 void VisitPointers(Object** start, Object** end) {
3149 // Copy all HeapObject pointers in [start, end)
3150 for (Object** p = start; p < end; p++) {
3151 if ((*p)->IsHeapObject())
3152 MarkObjectRecursively(p);
3153 }
3154 }
3155};
3156
3157static MarkObjectVisitor mark_visitor;
3158
3159static void MarkObjectRecursively(Object** p) {
3160 if (!(*p)->IsHeapObject()) return;
3161
3162 HeapObject* obj = HeapObject::cast(*p);
3163
3164 Object* map = obj->map();
3165
3166 if (!map->IsHeapObject()) return; // visited before
3167
3168 if (found_target) return; // stop if target found
3169 object_stack.Add(obj);
3170 if ((search_for_any_global && obj->IsJSGlobalObject()) ||
3171 (!search_for_any_global && (obj == search_target))) {
3172 found_target = true;
3173 return;
3174 }
3175
3176 if (obj->IsCode()) {
3177 Code::cast(obj)->ConvertICTargetsFromAddressToObject();
3178 }
3179
3180 // not visited yet
3181 Map* map_p = reinterpret_cast<Map*>(HeapObject::cast(map));
3182
3183 Address map_addr = map_p->address();
3184
3185 obj->set_map(reinterpret_cast<Map*>(map_addr + kMarkTag));
3186
3187 MarkObjectRecursively(&map);
3188
3189 obj->IterateBody(map_p->instance_type(), obj->SizeFromMap(map_p),
3190 &mark_visitor);
3191
3192 if (!found_target) // don't pop if found the target
3193 object_stack.RemoveLast();
3194}
3195
3196
3197static void UnmarkObjectRecursively(Object** p);
3198class UnmarkObjectVisitor : public ObjectVisitor {
3199 public:
3200 void VisitPointers(Object** start, Object** end) {
3201 // Copy all HeapObject pointers in [start, end)
3202 for (Object** p = start; p < end; p++) {
3203 if ((*p)->IsHeapObject())
3204 UnmarkObjectRecursively(p);
3205 }
3206 }
3207};
3208
3209static UnmarkObjectVisitor unmark_visitor;
3210
3211static void UnmarkObjectRecursively(Object** p) {
3212 if (!(*p)->IsHeapObject()) return;
3213
3214 HeapObject* obj = HeapObject::cast(*p);
3215
3216 Object* map = obj->map();
3217
3218 if (map->IsHeapObject()) return; // unmarked already
3219
3220 Address map_addr = reinterpret_cast<Address>(map);
3221
3222 map_addr -= kMarkTag;
3223
3224 ASSERT_TAG_ALIGNED(map_addr);
3225
3226 HeapObject* map_p = HeapObject::FromAddress(map_addr);
3227
3228 obj->set_map(reinterpret_cast<Map*>(map_p));
3229
3230 UnmarkObjectRecursively(reinterpret_cast<Object**>(&map_p));
3231
3232 obj->IterateBody(Map::cast(map_p)->instance_type(),
3233 obj->SizeFromMap(Map::cast(map_p)),
3234 &unmark_visitor);
3235
3236 if (obj->IsCode()) {
3237 Code::cast(obj)->ConvertICTargetsFromObjectToAddress();
3238 }
3239}
3240
3241
3242static void MarkRootObjectRecursively(Object** root) {
3243 if (search_for_any_global) {
3244 ASSERT(search_target == NULL);
3245 } else {
3246 ASSERT(search_target->IsHeapObject());
3247 }
3248 found_target = false;
3249 object_stack.Clear();
3250
3251 MarkObjectRecursively(root);
3252 UnmarkObjectRecursively(root);
3253
3254 if (found_target) {
3255 PrintF("=====================================\n");
3256 PrintF("==== Path to object ====\n");
3257 PrintF("=====================================\n\n");
3258
3259 ASSERT(!object_stack.is_empty());
3260 for (int i = 0; i < object_stack.length(); i++) {
3261 if (i > 0) PrintF("\n |\n |\n V\n\n");
3262 Object* obj = object_stack[i];
3263 obj->Print();
3264 }
3265 PrintF("=====================================\n");
3266 }
3267}
3268
3269
3270// Helper class for visiting HeapObjects recursively.
3271class MarkRootVisitor: public ObjectVisitor {
3272 public:
3273 void VisitPointers(Object** start, Object** end) {
3274 // Visit all HeapObject pointers in [start, end)
3275 for (Object** p = start; p < end; p++) {
3276 if ((*p)->IsHeapObject())
3277 MarkRootObjectRecursively(p);
3278 }
3279 }
3280};
3281
3282
3283// Triggers a depth-first traversal of reachable objects from roots
3284// and finds a path to a specific heap object and prints it.
3285void Heap::TracePathToObject() {
3286 search_target = NULL;
3287 search_for_any_global = false;
3288
3289 MarkRootVisitor root_visitor;
3290 IterateRoots(&root_visitor);
3291}
3292
3293
3294// Triggers a depth-first traversal of reachable objects from roots
3295// and finds a path to any global object and prints it. Useful for
3296// determining the source for leaks of global objects.
3297void Heap::TracePathToGlobal() {
3298 search_target = NULL;
3299 search_for_any_global = true;
3300
3301 MarkRootVisitor root_visitor;
3302 IterateRoots(&root_visitor);
3303}
3304#endif
3305
3306
kasper.lund7276f142008-07-30 08:49:36 +00003307GCTracer::GCTracer()
3308 : start_time_(0.0),
3309 start_size_(0.0),
3310 gc_count_(0),
3311 full_gc_count_(0),
3312 is_compacting_(false),
3313 marked_count_(0) {
3314 // These two fields reflect the state of the previous full collection.
3315 // Set them before they are changed by the collector.
3316 previous_has_compacted_ = MarkCompactCollector::HasCompacted();
3317 previous_marked_count_ = MarkCompactCollector::previous_marked_count();
3318 if (!FLAG_trace_gc) return;
3319 start_time_ = OS::TimeCurrentMillis();
3320 start_size_ = SizeOfHeapObjects();
3321}
3322
3323
3324GCTracer::~GCTracer() {
3325 if (!FLAG_trace_gc) return;
3326 // Printf ONE line iff flag is set.
3327 PrintF("%s %.1f -> %.1f MB, %d ms.\n",
3328 CollectorString(),
3329 start_size_, SizeOfHeapObjects(),
3330 static_cast<int>(OS::TimeCurrentMillis() - start_time_));
3331}
3332
3333
3334const char* GCTracer::CollectorString() {
3335 switch (collector_) {
3336 case SCAVENGER:
3337 return "Scavenge";
3338 case MARK_COMPACTOR:
3339 return MarkCompactCollector::HasCompacted() ? "Mark-compact"
3340 : "Mark-sweep";
3341 }
3342 return "Unknown GC";
3343}
3344
3345
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +00003346#ifdef DEBUG
3347bool Heap::GarbageCollectionGreedyCheck() {
3348 ASSERT(FLAG_gc_greedy);
3349 if (Bootstrapper::IsActive()) return true;
3350 if (disallow_allocation_failure()) return true;
3351 return CollectGarbage(0, NEW_SPACE);
3352}
3353#endif
3354
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00003355} } // namespace v8::internal