blob: 6be1dafe7260b7834c12f14701863a7086c69312 [file] [log] [blame]
Steve Blocka7e24c12009-10-30 11:49:00 +00001// Copyright 2009 the V8 project authors. All rights reserved.
2// Redistribution and use in source and binary forms, with or without
3// modification, are permitted provided that the following conditions are
4// met:
5//
6// * Redistributions of source code must retain the above copyright
7// notice, this list of conditions and the following disclaimer.
8// * Redistributions in binary form must reproduce the above
9// copyright notice, this list of conditions and the following
10// disclaimer in the documentation and/or other materials provided
11// with the distribution.
12// * Neither the name of Google Inc. nor the names of its
13// contributors may be used to endorse or promote products derived
14// from this software without specific prior written permission.
15//
16// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
28#include "v8.h"
29
30#include "accessors.h"
31#include "api.h"
32#include "bootstrapper.h"
33#include "codegen-inl.h"
34#include "compilation-cache.h"
35#include "debug.h"
36#include "heap-profiler.h"
37#include "global-handles.h"
38#include "mark-compact.h"
39#include "natives.h"
40#include "scanner.h"
41#include "scopeinfo.h"
Steve Block3ce2e202009-11-05 08:53:23 +000042#include "snapshot.h"
Steve Blocka7e24c12009-10-30 11:49:00 +000043#include "v8threads.h"
44#if V8_TARGET_ARCH_ARM && V8_NATIVE_REGEXP
45#include "regexp-macro-assembler.h"
Steve Blockd0582a62009-12-15 09:54:21 +000046#include "arm/regexp-macro-assembler-arm.h"
Steve Blocka7e24c12009-10-30 11:49:00 +000047#endif
48
49namespace v8 {
50namespace internal {
51
52
53String* Heap::hidden_symbol_;
54Object* Heap::roots_[Heap::kRootListLength];
55
56
57NewSpace Heap::new_space_;
58OldSpace* Heap::old_pointer_space_ = NULL;
59OldSpace* Heap::old_data_space_ = NULL;
60OldSpace* Heap::code_space_ = NULL;
61MapSpace* Heap::map_space_ = NULL;
62CellSpace* Heap::cell_space_ = NULL;
63LargeObjectSpace* Heap::lo_space_ = NULL;
64
65static const int kMinimumPromotionLimit = 2*MB;
66static const int kMinimumAllocationLimit = 8*MB;
67
68int Heap::old_gen_promotion_limit_ = kMinimumPromotionLimit;
69int Heap::old_gen_allocation_limit_ = kMinimumAllocationLimit;
70
71int Heap::old_gen_exhausted_ = false;
72
73int Heap::amount_of_external_allocated_memory_ = 0;
74int Heap::amount_of_external_allocated_memory_at_last_global_gc_ = 0;
75
76// semispace_size_ should be a power of 2 and old_generation_size_ should be
77// a multiple of Page::kPageSize.
78#if defined(ANDROID)
Leon Clarked91b9f72010-01-27 17:25:45 +000079int Heap::max_semispace_size_ = 2*MB;
80int Heap::max_old_generation_size_ = 192*MB;
Steve Blocka7e24c12009-10-30 11:49:00 +000081int Heap::initial_semispace_size_ = 128*KB;
82size_t Heap::code_range_size_ = 0;
83#elif defined(V8_TARGET_ARCH_X64)
Steve Block3ce2e202009-11-05 08:53:23 +000084int Heap::max_semispace_size_ = 16*MB;
85int Heap::max_old_generation_size_ = 1*GB;
Steve Blocka7e24c12009-10-30 11:49:00 +000086int Heap::initial_semispace_size_ = 1*MB;
Steve Block3ce2e202009-11-05 08:53:23 +000087size_t Heap::code_range_size_ = 512*MB;
Steve Blocka7e24c12009-10-30 11:49:00 +000088#else
Steve Block3ce2e202009-11-05 08:53:23 +000089int Heap::max_semispace_size_ = 8*MB;
90int Heap::max_old_generation_size_ = 512*MB;
Steve Blocka7e24c12009-10-30 11:49:00 +000091int Heap::initial_semispace_size_ = 512*KB;
92size_t Heap::code_range_size_ = 0;
93#endif
94
Steve Block3ce2e202009-11-05 08:53:23 +000095// The snapshot semispace size will be the default semispace size if
96// snapshotting is used and will be the requested semispace size as
97// set up by ConfigureHeap otherwise.
98int Heap::reserved_semispace_size_ = Heap::max_semispace_size_;
99
Steve Blocka7e24c12009-10-30 11:49:00 +0000100GCCallback Heap::global_gc_prologue_callback_ = NULL;
101GCCallback Heap::global_gc_epilogue_callback_ = NULL;
102
103// Variables set based on semispace_size_ and old_generation_size_ in
104// ConfigureHeap.
Steve Block3ce2e202009-11-05 08:53:23 +0000105
106// Will be 4 * reserved_semispace_size_ to ensure that young
107// generation can be aligned to its size.
Steve Blocka7e24c12009-10-30 11:49:00 +0000108int Heap::survived_since_last_expansion_ = 0;
109int Heap::external_allocation_limit_ = 0;
110
111Heap::HeapState Heap::gc_state_ = NOT_IN_GC;
112
113int Heap::mc_count_ = 0;
114int Heap::gc_count_ = 0;
115
116int Heap::always_allocate_scope_depth_ = 0;
Steve Blockd0582a62009-12-15 09:54:21 +0000117int Heap::linear_allocation_scope_depth_ = 0;
Steve Blocka7e24c12009-10-30 11:49:00 +0000118bool Heap::context_disposed_pending_ = false;
119
120#ifdef DEBUG
121bool Heap::allocation_allowed_ = true;
122
123int Heap::allocation_timeout_ = 0;
124bool Heap::disallow_allocation_failure_ = false;
125#endif // DEBUG
126
127
128int Heap::Capacity() {
129 if (!HasBeenSetup()) return 0;
130
131 return new_space_.Capacity() +
132 old_pointer_space_->Capacity() +
133 old_data_space_->Capacity() +
134 code_space_->Capacity() +
135 map_space_->Capacity() +
136 cell_space_->Capacity();
137}
138
139
Steve Block3ce2e202009-11-05 08:53:23 +0000140int Heap::CommittedMemory() {
141 if (!HasBeenSetup()) return 0;
142
143 return new_space_.CommittedMemory() +
144 old_pointer_space_->CommittedMemory() +
145 old_data_space_->CommittedMemory() +
146 code_space_->CommittedMemory() +
147 map_space_->CommittedMemory() +
148 cell_space_->CommittedMemory() +
149 lo_space_->Size();
150}
151
152
Steve Blocka7e24c12009-10-30 11:49:00 +0000153int Heap::Available() {
154 if (!HasBeenSetup()) return 0;
155
156 return new_space_.Available() +
157 old_pointer_space_->Available() +
158 old_data_space_->Available() +
159 code_space_->Available() +
160 map_space_->Available() +
161 cell_space_->Available();
162}
163
164
165bool Heap::HasBeenSetup() {
166 return old_pointer_space_ != NULL &&
167 old_data_space_ != NULL &&
168 code_space_ != NULL &&
169 map_space_ != NULL &&
170 cell_space_ != NULL &&
171 lo_space_ != NULL;
172}
173
174
175GarbageCollector Heap::SelectGarbageCollector(AllocationSpace space) {
176 // Is global GC requested?
177 if (space != NEW_SPACE || FLAG_gc_global) {
178 Counters::gc_compactor_caused_by_request.Increment();
179 return MARK_COMPACTOR;
180 }
181
182 // Is enough data promoted to justify a global GC?
183 if (OldGenerationPromotionLimitReached()) {
184 Counters::gc_compactor_caused_by_promoted_data.Increment();
185 return MARK_COMPACTOR;
186 }
187
188 // Have allocation in OLD and LO failed?
189 if (old_gen_exhausted_) {
190 Counters::gc_compactor_caused_by_oldspace_exhaustion.Increment();
191 return MARK_COMPACTOR;
192 }
193
194 // Is there enough space left in OLD to guarantee that a scavenge can
195 // succeed?
196 //
197 // Note that MemoryAllocator->MaxAvailable() undercounts the memory available
198 // for object promotion. It counts only the bytes that the memory
199 // allocator has not yet allocated from the OS and assigned to any space,
200 // and does not count available bytes already in the old space or code
201 // space. Undercounting is safe---we may get an unrequested full GC when
202 // a scavenge would have succeeded.
203 if (MemoryAllocator::MaxAvailable() <= new_space_.Size()) {
204 Counters::gc_compactor_caused_by_oldspace_exhaustion.Increment();
205 return MARK_COMPACTOR;
206 }
207
208 // Default
209 return SCAVENGER;
210}
211
212
213// TODO(1238405): Combine the infrastructure for --heap-stats and
214// --log-gc to avoid the complicated preprocessor and flag testing.
215#if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
216void Heap::ReportStatisticsBeforeGC() {
217 // Heap::ReportHeapStatistics will also log NewSpace statistics when
218 // compiled with ENABLE_LOGGING_AND_PROFILING and --log-gc is set. The
219 // following logic is used to avoid double logging.
220#if defined(DEBUG) && defined(ENABLE_LOGGING_AND_PROFILING)
221 if (FLAG_heap_stats || FLAG_log_gc) new_space_.CollectStatistics();
222 if (FLAG_heap_stats) {
223 ReportHeapStatistics("Before GC");
224 } else if (FLAG_log_gc) {
225 new_space_.ReportStatistics();
226 }
227 if (FLAG_heap_stats || FLAG_log_gc) new_space_.ClearHistograms();
228#elif defined(DEBUG)
229 if (FLAG_heap_stats) {
230 new_space_.CollectStatistics();
231 ReportHeapStatistics("Before GC");
232 new_space_.ClearHistograms();
233 }
234#elif defined(ENABLE_LOGGING_AND_PROFILING)
235 if (FLAG_log_gc) {
236 new_space_.CollectStatistics();
237 new_space_.ReportStatistics();
238 new_space_.ClearHistograms();
239 }
240#endif
241}
242
243
244#if defined(ENABLE_LOGGING_AND_PROFILING)
245void Heap::PrintShortHeapStatistics() {
246 if (!FLAG_trace_gc_verbose) return;
247 PrintF("Memory allocator, used: %8d, available: %8d\n",
Steve Block3ce2e202009-11-05 08:53:23 +0000248 MemoryAllocator::Size(),
249 MemoryAllocator::Available());
Steve Blocka7e24c12009-10-30 11:49:00 +0000250 PrintF("New space, used: %8d, available: %8d\n",
Steve Block3ce2e202009-11-05 08:53:23 +0000251 Heap::new_space_.Size(),
252 new_space_.Available());
253 PrintF("Old pointers, used: %8d, available: %8d, waste: %8d\n",
254 old_pointer_space_->Size(),
255 old_pointer_space_->Available(),
256 old_pointer_space_->Waste());
257 PrintF("Old data space, used: %8d, available: %8d, waste: %8d\n",
258 old_data_space_->Size(),
259 old_data_space_->Available(),
260 old_data_space_->Waste());
261 PrintF("Code space, used: %8d, available: %8d, waste: %8d\n",
262 code_space_->Size(),
263 code_space_->Available(),
264 code_space_->Waste());
265 PrintF("Map space, used: %8d, available: %8d, waste: %8d\n",
266 map_space_->Size(),
267 map_space_->Available(),
268 map_space_->Waste());
269 PrintF("Cell space, used: %8d, available: %8d, waste: %8d\n",
270 cell_space_->Size(),
271 cell_space_->Available(),
272 cell_space_->Waste());
Steve Blocka7e24c12009-10-30 11:49:00 +0000273 PrintF("Large object space, used: %8d, avaialble: %8d\n",
Steve Block3ce2e202009-11-05 08:53:23 +0000274 lo_space_->Size(),
275 lo_space_->Available());
Steve Blocka7e24c12009-10-30 11:49:00 +0000276}
277#endif
278
279
280// TODO(1238405): Combine the infrastructure for --heap-stats and
281// --log-gc to avoid the complicated preprocessor and flag testing.
282void Heap::ReportStatisticsAfterGC() {
283 // Similar to the before GC, we use some complicated logic to ensure that
284 // NewSpace statistics are logged exactly once when --log-gc is turned on.
285#if defined(DEBUG) && defined(ENABLE_LOGGING_AND_PROFILING)
286 if (FLAG_heap_stats) {
287 new_space_.CollectStatistics();
288 ReportHeapStatistics("After GC");
289 } else if (FLAG_log_gc) {
290 new_space_.ReportStatistics();
291 }
292#elif defined(DEBUG)
293 if (FLAG_heap_stats) ReportHeapStatistics("After GC");
294#elif defined(ENABLE_LOGGING_AND_PROFILING)
295 if (FLAG_log_gc) new_space_.ReportStatistics();
296#endif
297}
298#endif // defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
299
300
301void Heap::GarbageCollectionPrologue() {
302 TranscendentalCache::Clear();
303 gc_count_++;
304#ifdef DEBUG
305 ASSERT(allocation_allowed_ && gc_state_ == NOT_IN_GC);
306 allow_allocation(false);
307
308 if (FLAG_verify_heap) {
309 Verify();
310 }
311
312 if (FLAG_gc_verbose) Print();
313
314 if (FLAG_print_rset) {
315 // Not all spaces have remembered set bits that we care about.
316 old_pointer_space_->PrintRSet();
317 map_space_->PrintRSet();
318 lo_space_->PrintRSet();
319 }
320#endif
321
322#if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
323 ReportStatisticsBeforeGC();
324#endif
325}
326
327int Heap::SizeOfObjects() {
328 int total = 0;
329 AllSpaces spaces;
Leon Clarked91b9f72010-01-27 17:25:45 +0000330 for (Space* space = spaces.next(); space != NULL; space = spaces.next()) {
Steve Blocka7e24c12009-10-30 11:49:00 +0000331 total += space->Size();
332 }
333 return total;
334}
335
336void Heap::GarbageCollectionEpilogue() {
337#ifdef DEBUG
338 allow_allocation(true);
339 ZapFromSpace();
340
341 if (FLAG_verify_heap) {
342 Verify();
343 }
344
345 if (FLAG_print_global_handles) GlobalHandles::Print();
346 if (FLAG_print_handles) PrintHandles();
347 if (FLAG_gc_verbose) Print();
348 if (FLAG_code_stats) ReportCodeStatistics("After GC");
349#endif
350
351 Counters::alive_after_last_gc.Set(SizeOfObjects());
352
353 Counters::symbol_table_capacity.Set(symbol_table()->Capacity());
354 Counters::number_of_symbols.Set(symbol_table()->NumberOfElements());
355#if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
356 ReportStatisticsAfterGC();
357#endif
358#ifdef ENABLE_DEBUGGER_SUPPORT
359 Debug::AfterGarbageCollection();
360#endif
361}
362
363
364void Heap::CollectAllGarbage(bool force_compaction) {
365 // Since we are ignoring the return value, the exact choice of space does
366 // not matter, so long as we do not specify NEW_SPACE, which would not
367 // cause a full GC.
368 MarkCompactCollector::SetForceCompaction(force_compaction);
369 CollectGarbage(0, OLD_POINTER_SPACE);
370 MarkCompactCollector::SetForceCompaction(false);
371}
372
373
374void Heap::CollectAllGarbageIfContextDisposed() {
375 // If the garbage collector interface is exposed through the global
376 // gc() function, we avoid being clever about forcing GCs when
377 // contexts are disposed and leave it to the embedder to make
378 // informed decisions about when to force a collection.
379 if (!FLAG_expose_gc && context_disposed_pending_) {
380 HistogramTimerScope scope(&Counters::gc_context);
381 CollectAllGarbage(false);
382 }
383 context_disposed_pending_ = false;
384}
385
386
387void Heap::NotifyContextDisposed() {
388 context_disposed_pending_ = true;
389}
390
391
392bool Heap::CollectGarbage(int requested_size, AllocationSpace space) {
393 // The VM is in the GC state until exiting this function.
394 VMState state(GC);
395
396#ifdef DEBUG
397 // Reset the allocation timeout to the GC interval, but make sure to
398 // allow at least a few allocations after a collection. The reason
399 // for this is that we have a lot of allocation sequences and we
400 // assume that a garbage collection will allow the subsequent
401 // allocation attempts to go through.
402 allocation_timeout_ = Max(6, FLAG_gc_interval);
403#endif
404
405 { GCTracer tracer;
406 GarbageCollectionPrologue();
407 // The GC count was incremented in the prologue. Tell the tracer about
408 // it.
409 tracer.set_gc_count(gc_count_);
410
411 GarbageCollector collector = SelectGarbageCollector(space);
412 // Tell the tracer which collector we've selected.
413 tracer.set_collector(collector);
414
415 HistogramTimer* rate = (collector == SCAVENGER)
416 ? &Counters::gc_scavenger
417 : &Counters::gc_compactor;
418 rate->Start();
419 PerformGarbageCollection(space, collector, &tracer);
420 rate->Stop();
421
422 GarbageCollectionEpilogue();
423 }
424
425
426#ifdef ENABLE_LOGGING_AND_PROFILING
427 if (FLAG_log_gc) HeapProfiler::WriteSample();
428#endif
429
430 switch (space) {
431 case NEW_SPACE:
432 return new_space_.Available() >= requested_size;
433 case OLD_POINTER_SPACE:
434 return old_pointer_space_->Available() >= requested_size;
435 case OLD_DATA_SPACE:
436 return old_data_space_->Available() >= requested_size;
437 case CODE_SPACE:
438 return code_space_->Available() >= requested_size;
439 case MAP_SPACE:
440 return map_space_->Available() >= requested_size;
441 case CELL_SPACE:
442 return cell_space_->Available() >= requested_size;
443 case LO_SPACE:
444 return lo_space_->Available() >= requested_size;
445 }
446 return false;
447}
448
449
450void Heap::PerformScavenge() {
451 GCTracer tracer;
452 PerformGarbageCollection(NEW_SPACE, SCAVENGER, &tracer);
453}
454
455
456#ifdef DEBUG
457// Helper class for verifying the symbol table.
458class SymbolTableVerifier : public ObjectVisitor {
459 public:
460 SymbolTableVerifier() { }
461 void VisitPointers(Object** start, Object** end) {
462 // Visit all HeapObject pointers in [start, end).
463 for (Object** p = start; p < end; p++) {
464 if ((*p)->IsHeapObject()) {
465 // Check that the symbol is actually a symbol.
466 ASSERT((*p)->IsNull() || (*p)->IsUndefined() || (*p)->IsSymbol());
467 }
468 }
469 }
470};
471#endif // DEBUG
472
473
474static void VerifySymbolTable() {
475#ifdef DEBUG
476 SymbolTableVerifier verifier;
477 Heap::symbol_table()->IterateElements(&verifier);
478#endif // DEBUG
479}
480
481
Leon Clarkee46be812010-01-19 14:06:41 +0000482void Heap::ReserveSpace(
483 int new_space_size,
484 int pointer_space_size,
485 int data_space_size,
486 int code_space_size,
487 int map_space_size,
488 int cell_space_size,
489 int large_object_size) {
490 NewSpace* new_space = Heap::new_space();
491 PagedSpace* old_pointer_space = Heap::old_pointer_space();
492 PagedSpace* old_data_space = Heap::old_data_space();
493 PagedSpace* code_space = Heap::code_space();
494 PagedSpace* map_space = Heap::map_space();
495 PagedSpace* cell_space = Heap::cell_space();
496 LargeObjectSpace* lo_space = Heap::lo_space();
497 bool gc_performed = true;
498 while (gc_performed) {
499 gc_performed = false;
500 if (!new_space->ReserveSpace(new_space_size)) {
501 Heap::CollectGarbage(new_space_size, NEW_SPACE);
502 gc_performed = true;
503 }
504 if (!old_pointer_space->ReserveSpace(pointer_space_size)) {
505 Heap::CollectGarbage(pointer_space_size, OLD_POINTER_SPACE);
506 gc_performed = true;
507 }
508 if (!(old_data_space->ReserveSpace(data_space_size))) {
509 Heap::CollectGarbage(data_space_size, OLD_DATA_SPACE);
510 gc_performed = true;
511 }
512 if (!(code_space->ReserveSpace(code_space_size))) {
513 Heap::CollectGarbage(code_space_size, CODE_SPACE);
514 gc_performed = true;
515 }
516 if (!(map_space->ReserveSpace(map_space_size))) {
517 Heap::CollectGarbage(map_space_size, MAP_SPACE);
518 gc_performed = true;
519 }
520 if (!(cell_space->ReserveSpace(cell_space_size))) {
521 Heap::CollectGarbage(cell_space_size, CELL_SPACE);
522 gc_performed = true;
523 }
524 // We add a slack-factor of 2 in order to have space for the remembered
525 // set and a series of large-object allocations that are only just larger
526 // than the page size.
527 large_object_size *= 2;
528 // The ReserveSpace method on the large object space checks how much
529 // we can expand the old generation. This includes expansion caused by
530 // allocation in the other spaces.
531 large_object_size += cell_space_size + map_space_size + code_space_size +
532 data_space_size + pointer_space_size;
533 if (!(lo_space->ReserveSpace(large_object_size))) {
534 Heap::CollectGarbage(large_object_size, LO_SPACE);
535 gc_performed = true;
536 }
537 }
538}
539
540
Steve Blocka7e24c12009-10-30 11:49:00 +0000541void Heap::EnsureFromSpaceIsCommitted() {
542 if (new_space_.CommitFromSpaceIfNeeded()) return;
543
544 // Committing memory to from space failed.
545 // Try shrinking and try again.
546 Shrink();
547 if (new_space_.CommitFromSpaceIfNeeded()) return;
548
549 // Committing memory to from space failed again.
550 // Memory is exhausted and we will die.
551 V8::FatalProcessOutOfMemory("Committing semi space failed.");
552}
553
554
555void Heap::PerformGarbageCollection(AllocationSpace space,
556 GarbageCollector collector,
557 GCTracer* tracer) {
558 VerifySymbolTable();
559 if (collector == MARK_COMPACTOR && global_gc_prologue_callback_) {
560 ASSERT(!allocation_allowed_);
561 global_gc_prologue_callback_();
562 }
563 EnsureFromSpaceIsCommitted();
564 if (collector == MARK_COMPACTOR) {
565 MarkCompact(tracer);
566
567 int old_gen_size = PromotedSpaceSize();
568 old_gen_promotion_limit_ =
569 old_gen_size + Max(kMinimumPromotionLimit, old_gen_size / 3);
570 old_gen_allocation_limit_ =
571 old_gen_size + Max(kMinimumAllocationLimit, old_gen_size / 2);
572 old_gen_exhausted_ = false;
573 }
574 Scavenge();
575
576 Counters::objs_since_last_young.Set(0);
577
Steve Block3ce2e202009-11-05 08:53:23 +0000578 if (collector == MARK_COMPACTOR) {
579 DisableAssertNoAllocation allow_allocation;
580 GlobalHandles::PostGarbageCollectionProcessing();
581 }
582
583 // Update relocatables.
584 Relocatable::PostGarbageCollectionProcessing();
Steve Blocka7e24c12009-10-30 11:49:00 +0000585
586 if (collector == MARK_COMPACTOR) {
587 // Register the amount of external allocated memory.
588 amount_of_external_allocated_memory_at_last_global_gc_ =
589 amount_of_external_allocated_memory_;
590 }
591
592 if (collector == MARK_COMPACTOR && global_gc_epilogue_callback_) {
593 ASSERT(!allocation_allowed_);
594 global_gc_epilogue_callback_();
595 }
596 VerifySymbolTable();
597}
598
599
Steve Blocka7e24c12009-10-30 11:49:00 +0000600void Heap::MarkCompact(GCTracer* tracer) {
601 gc_state_ = MARK_COMPACT;
602 mc_count_++;
603 tracer->set_full_gc_count(mc_count_);
604 LOG(ResourceEvent("markcompact", "begin"));
605
606 MarkCompactCollector::Prepare(tracer);
607
608 bool is_compacting = MarkCompactCollector::IsCompacting();
609
610 MarkCompactPrologue(is_compacting);
611
612 MarkCompactCollector::CollectGarbage();
613
614 MarkCompactEpilogue(is_compacting);
615
616 LOG(ResourceEvent("markcompact", "end"));
617
618 gc_state_ = NOT_IN_GC;
619
620 Shrink();
621
622 Counters::objs_since_last_full.Set(0);
623 context_disposed_pending_ = false;
624}
625
626
627void Heap::MarkCompactPrologue(bool is_compacting) {
628 // At any old GC clear the keyed lookup cache to enable collection of unused
629 // maps.
630 KeyedLookupCache::Clear();
631 ContextSlotCache::Clear();
632 DescriptorLookupCache::Clear();
633
634 CompilationCache::MarkCompactPrologue();
635
636 Top::MarkCompactPrologue(is_compacting);
637 ThreadManager::MarkCompactPrologue(is_compacting);
Leon Clarkee46be812010-01-19 14:06:41 +0000638
639 if (is_compacting) FlushNumberStringCache();
Steve Blocka7e24c12009-10-30 11:49:00 +0000640}
641
642
643void Heap::MarkCompactEpilogue(bool is_compacting) {
644 Top::MarkCompactEpilogue(is_compacting);
645 ThreadManager::MarkCompactEpilogue(is_compacting);
646}
647
648
649Object* Heap::FindCodeObject(Address a) {
650 Object* obj = code_space_->FindObject(a);
651 if (obj->IsFailure()) {
652 obj = lo_space_->FindObject(a);
653 }
654 ASSERT(!obj->IsFailure());
655 return obj;
656}
657
658
659// Helper class for copying HeapObjects
660class ScavengeVisitor: public ObjectVisitor {
661 public:
662
663 void VisitPointer(Object** p) { ScavengePointer(p); }
664
665 void VisitPointers(Object** start, Object** end) {
666 // Copy all HeapObject pointers in [start, end)
667 for (Object** p = start; p < end; p++) ScavengePointer(p);
668 }
669
670 private:
671 void ScavengePointer(Object** p) {
672 Object* object = *p;
673 if (!Heap::InNewSpace(object)) return;
674 Heap::ScavengeObject(reinterpret_cast<HeapObject**>(p),
675 reinterpret_cast<HeapObject*>(object));
676 }
677};
678
679
680// A queue of pointers and maps of to-be-promoted objects during a
681// scavenge collection.
682class PromotionQueue {
683 public:
684 void Initialize(Address start_address) {
685 front_ = rear_ = reinterpret_cast<HeapObject**>(start_address);
686 }
687
688 bool is_empty() { return front_ <= rear_; }
689
690 void insert(HeapObject* object, Map* map) {
691 *(--rear_) = object;
692 *(--rear_) = map;
693 // Assert no overflow into live objects.
694 ASSERT(reinterpret_cast<Address>(rear_) >= Heap::new_space()->top());
695 }
696
697 void remove(HeapObject** object, Map** map) {
698 *object = *(--front_);
699 *map = Map::cast(*(--front_));
700 // Assert no underflow.
701 ASSERT(front_ >= rear_);
702 }
703
704 private:
705 // The front of the queue is higher in memory than the rear.
706 HeapObject** front_;
707 HeapObject** rear_;
708};
709
710
711// Shared state read by the scavenge collector and set by ScavengeObject.
712static PromotionQueue promotion_queue;
713
714
715#ifdef DEBUG
716// Visitor class to verify pointers in code or data space do not point into
717// new space.
718class VerifyNonPointerSpacePointersVisitor: public ObjectVisitor {
719 public:
720 void VisitPointers(Object** start, Object**end) {
721 for (Object** current = start; current < end; current++) {
722 if ((*current)->IsHeapObject()) {
723 ASSERT(!Heap::InNewSpace(HeapObject::cast(*current)));
724 }
725 }
726 }
727};
728
729
730static void VerifyNonPointerSpacePointers() {
731 // Verify that there are no pointers to new space in spaces where we
732 // do not expect them.
733 VerifyNonPointerSpacePointersVisitor v;
734 HeapObjectIterator code_it(Heap::code_space());
Leon Clarked91b9f72010-01-27 17:25:45 +0000735 for (HeapObject* object = code_it.next();
736 object != NULL; object = code_it.next())
Steve Blocka7e24c12009-10-30 11:49:00 +0000737 object->Iterate(&v);
Steve Blocka7e24c12009-10-30 11:49:00 +0000738
739 HeapObjectIterator data_it(Heap::old_data_space());
Leon Clarked91b9f72010-01-27 17:25:45 +0000740 for (HeapObject* object = data_it.next();
741 object != NULL; object = data_it.next())
742 object->Iterate(&v);
Steve Blocka7e24c12009-10-30 11:49:00 +0000743}
744#endif
745
746
747void Heap::Scavenge() {
748#ifdef DEBUG
749 if (FLAG_enable_slow_asserts) VerifyNonPointerSpacePointers();
750#endif
751
752 gc_state_ = SCAVENGE;
753
754 // Implements Cheney's copying algorithm
755 LOG(ResourceEvent("scavenge", "begin"));
756
757 // Clear descriptor cache.
758 DescriptorLookupCache::Clear();
759
760 // Used for updating survived_since_last_expansion_ at function end.
761 int survived_watermark = PromotedSpaceSize();
762
763 if (new_space_.Capacity() < new_space_.MaximumCapacity() &&
764 survived_since_last_expansion_ > new_space_.Capacity()) {
765 // Grow the size of new space if there is room to grow and enough
766 // data has survived scavenge since the last expansion.
767 new_space_.Grow();
768 survived_since_last_expansion_ = 0;
769 }
770
771 // Flip the semispaces. After flipping, to space is empty, from space has
772 // live objects.
773 new_space_.Flip();
774 new_space_.ResetAllocationInfo();
775
776 // We need to sweep newly copied objects which can be either in the
777 // to space or promoted to the old generation. For to-space
778 // objects, we treat the bottom of the to space as a queue. Newly
779 // copied and unswept objects lie between a 'front' mark and the
780 // allocation pointer.
781 //
782 // Promoted objects can go into various old-generation spaces, and
783 // can be allocated internally in the spaces (from the free list).
784 // We treat the top of the to space as a queue of addresses of
785 // promoted objects. The addresses of newly promoted and unswept
786 // objects lie between a 'front' mark and a 'rear' mark that is
787 // updated as a side effect of promoting an object.
788 //
789 // There is guaranteed to be enough room at the top of the to space
790 // for the addresses of promoted objects: every object promoted
791 // frees up its size in bytes from the top of the new space, and
792 // objects are at least one pointer in size.
793 Address new_space_front = new_space_.ToSpaceLow();
794 promotion_queue.Initialize(new_space_.ToSpaceHigh());
795
796 ScavengeVisitor scavenge_visitor;
797 // Copy roots.
Leon Clarkee46be812010-01-19 14:06:41 +0000798 IterateRoots(&scavenge_visitor, VISIT_ALL_IN_SCAVENGE);
Steve Blocka7e24c12009-10-30 11:49:00 +0000799
800 // Copy objects reachable from the old generation. By definition,
801 // there are no intergenerational pointers in code or data spaces.
802 IterateRSet(old_pointer_space_, &ScavengePointer);
803 IterateRSet(map_space_, &ScavengePointer);
804 lo_space_->IterateRSet(&ScavengePointer);
805
806 // Copy objects reachable from cells by scavenging cell values directly.
807 HeapObjectIterator cell_iterator(cell_space_);
Leon Clarked91b9f72010-01-27 17:25:45 +0000808 for (HeapObject* cell = cell_iterator.next();
809 cell != NULL; cell = cell_iterator.next()) {
Steve Blocka7e24c12009-10-30 11:49:00 +0000810 if (cell->IsJSGlobalPropertyCell()) {
811 Address value_address =
812 reinterpret_cast<Address>(cell) +
813 (JSGlobalPropertyCell::kValueOffset - kHeapObjectTag);
814 scavenge_visitor.VisitPointer(reinterpret_cast<Object**>(value_address));
815 }
816 }
817
Leon Clarkee46be812010-01-19 14:06:41 +0000818 new_space_front = DoScavenge(&scavenge_visitor, new_space_front);
819
820 ScavengeExternalStringTable();
821 ASSERT(new_space_front == new_space_.top());
822
823 // Set age mark.
824 new_space_.set_age_mark(new_space_.top());
825
826 // Update how much has survived scavenge.
827 survived_since_last_expansion_ +=
828 (PromotedSpaceSize() - survived_watermark) + new_space_.Size();
829
830 LOG(ResourceEvent("scavenge", "end"));
831
832 gc_state_ = NOT_IN_GC;
833}
834
835
836void Heap::ScavengeExternalStringTable() {
837 ExternalStringTable::Verify();
838
839 if (ExternalStringTable::new_space_strings_.is_empty()) return;
840
841 Object** start = &ExternalStringTable::new_space_strings_[0];
842 Object** end = start + ExternalStringTable::new_space_strings_.length();
843 Object** last = start;
844
845 for (Object** p = start; p < end; ++p) {
846 ASSERT(Heap::InFromSpace(*p));
847 MapWord first_word = HeapObject::cast(*p)->map_word();
848
849 if (!first_word.IsForwardingAddress()) {
850 // Unreachable external string can be finalized.
851 FinalizeExternalString(String::cast(*p));
852 continue;
853 }
854
855 // String is still reachable.
856 String* target = String::cast(first_word.ToForwardingAddress());
857 ASSERT(target->IsExternalString());
858
859 if (Heap::InNewSpace(target)) {
860 // String is still in new space. Update the table entry.
861 *last = target;
862 ++last;
863 } else {
864 // String got promoted. Move it to the old string list.
865 ExternalStringTable::AddOldString(target);
866 }
867 }
868
869 ASSERT(last <= end);
870 ExternalStringTable::ShrinkNewStrings(static_cast<int>(last - start));
871}
872
873
874Address Heap::DoScavenge(ObjectVisitor* scavenge_visitor,
875 Address new_space_front) {
Steve Blocka7e24c12009-10-30 11:49:00 +0000876 do {
877 ASSERT(new_space_front <= new_space_.top());
878
879 // The addresses new_space_front and new_space_.top() define a
880 // queue of unprocessed copied objects. Process them until the
881 // queue is empty.
882 while (new_space_front < new_space_.top()) {
883 HeapObject* object = HeapObject::FromAddress(new_space_front);
Leon Clarkee46be812010-01-19 14:06:41 +0000884 object->Iterate(scavenge_visitor);
Steve Blocka7e24c12009-10-30 11:49:00 +0000885 new_space_front += object->Size();
886 }
887
888 // Promote and process all the to-be-promoted objects.
889 while (!promotion_queue.is_empty()) {
890 HeapObject* source;
891 Map* map;
892 promotion_queue.remove(&source, &map);
893 // Copy the from-space object to its new location (given by the
894 // forwarding address) and fix its map.
895 HeapObject* target = source->map_word().ToForwardingAddress();
896 CopyBlock(reinterpret_cast<Object**>(target->address()),
897 reinterpret_cast<Object**>(source->address()),
898 source->SizeFromMap(map));
899 target->set_map(map);
900
901#if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
902 // Update NewSpace stats if necessary.
903 RecordCopiedObject(target);
904#endif
905 // Visit the newly copied object for pointers to new space.
Leon Clarkee46be812010-01-19 14:06:41 +0000906 target->Iterate(scavenge_visitor);
Steve Blocka7e24c12009-10-30 11:49:00 +0000907 UpdateRSet(target);
908 }
909
910 // Take another spin if there are now unswept objects in new space
911 // (there are currently no more unswept promoted objects).
912 } while (new_space_front < new_space_.top());
913
Leon Clarkee46be812010-01-19 14:06:41 +0000914 return new_space_front;
Steve Blocka7e24c12009-10-30 11:49:00 +0000915}
916
917
918void Heap::ClearRSetRange(Address start, int size_in_bytes) {
919 uint32_t start_bit;
920 Address start_word_address =
921 Page::ComputeRSetBitPosition(start, 0, &start_bit);
922 uint32_t end_bit;
923 Address end_word_address =
924 Page::ComputeRSetBitPosition(start + size_in_bytes - kIntSize,
925 0,
926 &end_bit);
927
928 // We want to clear the bits in the starting word starting with the
929 // first bit, and in the ending word up to and including the last
930 // bit. Build a pair of bitmasks to do that.
931 uint32_t start_bitmask = start_bit - 1;
932 uint32_t end_bitmask = ~((end_bit << 1) - 1);
933
934 // If the start address and end address are the same, we mask that
935 // word once, otherwise mask the starting and ending word
936 // separately and all the ones in between.
937 if (start_word_address == end_word_address) {
938 Memory::uint32_at(start_word_address) &= (start_bitmask | end_bitmask);
939 } else {
940 Memory::uint32_at(start_word_address) &= start_bitmask;
941 Memory::uint32_at(end_word_address) &= end_bitmask;
942 start_word_address += kIntSize;
943 memset(start_word_address, 0, end_word_address - start_word_address);
944 }
945}
946
947
948class UpdateRSetVisitor: public ObjectVisitor {
949 public:
950
951 void VisitPointer(Object** p) {
952 UpdateRSet(p);
953 }
954
955 void VisitPointers(Object** start, Object** end) {
956 // Update a store into slots [start, end), used (a) to update remembered
957 // set when promoting a young object to old space or (b) to rebuild
958 // remembered sets after a mark-compact collection.
959 for (Object** p = start; p < end; p++) UpdateRSet(p);
960 }
961 private:
962
963 void UpdateRSet(Object** p) {
964 // The remembered set should not be set. It should be clear for objects
965 // newly copied to old space, and it is cleared before rebuilding in the
966 // mark-compact collector.
967 ASSERT(!Page::IsRSetSet(reinterpret_cast<Address>(p), 0));
968 if (Heap::InNewSpace(*p)) {
969 Page::SetRSet(reinterpret_cast<Address>(p), 0);
970 }
971 }
972};
973
974
975int Heap::UpdateRSet(HeapObject* obj) {
976 ASSERT(!InNewSpace(obj));
977 // Special handling of fixed arrays to iterate the body based on the start
978 // address and offset. Just iterating the pointers as in UpdateRSetVisitor
979 // will not work because Page::SetRSet needs to have the start of the
980 // object for large object pages.
981 if (obj->IsFixedArray()) {
982 FixedArray* array = FixedArray::cast(obj);
983 int length = array->length();
984 for (int i = 0; i < length; i++) {
985 int offset = FixedArray::kHeaderSize + i * kPointerSize;
986 ASSERT(!Page::IsRSetSet(obj->address(), offset));
987 if (Heap::InNewSpace(array->get(i))) {
988 Page::SetRSet(obj->address(), offset);
989 }
990 }
991 } else if (!obj->IsCode()) {
992 // Skip code object, we know it does not contain inter-generational
993 // pointers.
994 UpdateRSetVisitor v;
995 obj->Iterate(&v);
996 }
997 return obj->Size();
998}
999
1000
1001void Heap::RebuildRSets() {
1002 // By definition, we do not care about remembered set bits in code,
1003 // data, or cell spaces.
1004 map_space_->ClearRSet();
1005 RebuildRSets(map_space_);
1006
1007 old_pointer_space_->ClearRSet();
1008 RebuildRSets(old_pointer_space_);
1009
1010 Heap::lo_space_->ClearRSet();
1011 RebuildRSets(lo_space_);
1012}
1013
1014
1015void Heap::RebuildRSets(PagedSpace* space) {
1016 HeapObjectIterator it(space);
Leon Clarked91b9f72010-01-27 17:25:45 +00001017 for (HeapObject* obj = it.next(); obj != NULL; obj = it.next())
1018 Heap::UpdateRSet(obj);
Steve Blocka7e24c12009-10-30 11:49:00 +00001019}
1020
1021
1022void Heap::RebuildRSets(LargeObjectSpace* space) {
1023 LargeObjectIterator it(space);
Leon Clarked91b9f72010-01-27 17:25:45 +00001024 for (HeapObject* obj = it.next(); obj != NULL; obj = it.next())
1025 Heap::UpdateRSet(obj);
Steve Blocka7e24c12009-10-30 11:49:00 +00001026}
1027
1028
1029#if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
1030void Heap::RecordCopiedObject(HeapObject* obj) {
1031 bool should_record = false;
1032#ifdef DEBUG
1033 should_record = FLAG_heap_stats;
1034#endif
1035#ifdef ENABLE_LOGGING_AND_PROFILING
1036 should_record = should_record || FLAG_log_gc;
1037#endif
1038 if (should_record) {
1039 if (new_space_.Contains(obj)) {
1040 new_space_.RecordAllocation(obj);
1041 } else {
1042 new_space_.RecordPromotion(obj);
1043 }
1044 }
1045}
1046#endif // defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
1047
1048
1049
1050HeapObject* Heap::MigrateObject(HeapObject* source,
1051 HeapObject* target,
1052 int size) {
1053 // Copy the content of source to target.
1054 CopyBlock(reinterpret_cast<Object**>(target->address()),
1055 reinterpret_cast<Object**>(source->address()),
1056 size);
1057
1058 // Set the forwarding address.
1059 source->set_map_word(MapWord::FromForwardingAddress(target));
1060
1061#if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
1062 // Update NewSpace stats if necessary.
1063 RecordCopiedObject(target);
1064#endif
1065
1066 return target;
1067}
1068
1069
1070static inline bool IsShortcutCandidate(HeapObject* object, Map* map) {
1071 STATIC_ASSERT(kNotStringTag != 0 && kSymbolTag != 0);
1072 ASSERT(object->map() == map);
1073 InstanceType type = map->instance_type();
1074 if ((type & kShortcutTypeMask) != kShortcutTypeTag) return false;
1075 ASSERT(object->IsString() && !object->IsSymbol());
1076 return ConsString::cast(object)->unchecked_second() == Heap::empty_string();
1077}
1078
1079
1080void Heap::ScavengeObjectSlow(HeapObject** p, HeapObject* object) {
1081 ASSERT(InFromSpace(object));
1082 MapWord first_word = object->map_word();
1083 ASSERT(!first_word.IsForwardingAddress());
1084
1085 // Optimization: Bypass flattened ConsString objects.
1086 if (IsShortcutCandidate(object, first_word.ToMap())) {
1087 object = HeapObject::cast(ConsString::cast(object)->unchecked_first());
1088 *p = object;
1089 // After patching *p we have to repeat the checks that object is in the
1090 // active semispace of the young generation and not already copied.
1091 if (!InNewSpace(object)) return;
1092 first_word = object->map_word();
1093 if (first_word.IsForwardingAddress()) {
1094 *p = first_word.ToForwardingAddress();
1095 return;
1096 }
1097 }
1098
1099 int object_size = object->SizeFromMap(first_word.ToMap());
1100 // We rely on live objects in new space to be at least two pointers,
1101 // so we can store the from-space address and map pointer of promoted
1102 // objects in the to space.
1103 ASSERT(object_size >= 2 * kPointerSize);
1104
1105 // If the object should be promoted, we try to copy it to old space.
1106 if (ShouldBePromoted(object->address(), object_size)) {
1107 Object* result;
1108 if (object_size > MaxObjectSizeInPagedSpace()) {
1109 result = lo_space_->AllocateRawFixedArray(object_size);
1110 if (!result->IsFailure()) {
1111 // Save the from-space object pointer and its map pointer at the
1112 // top of the to space to be swept and copied later. Write the
1113 // forwarding address over the map word of the from-space
1114 // object.
1115 HeapObject* target = HeapObject::cast(result);
1116 promotion_queue.insert(object, first_word.ToMap());
1117 object->set_map_word(MapWord::FromForwardingAddress(target));
1118
1119 // Give the space allocated for the result a proper map by
1120 // treating it as a free list node (not linked into the free
1121 // list).
1122 FreeListNode* node = FreeListNode::FromAddress(target->address());
1123 node->set_size(object_size);
1124
1125 *p = target;
1126 return;
1127 }
1128 } else {
1129 OldSpace* target_space = Heap::TargetSpace(object);
1130 ASSERT(target_space == Heap::old_pointer_space_ ||
1131 target_space == Heap::old_data_space_);
1132 result = target_space->AllocateRaw(object_size);
1133 if (!result->IsFailure()) {
1134 HeapObject* target = HeapObject::cast(result);
1135 if (target_space == Heap::old_pointer_space_) {
1136 // Save the from-space object pointer and its map pointer at the
1137 // top of the to space to be swept and copied later. Write the
1138 // forwarding address over the map word of the from-space
1139 // object.
1140 promotion_queue.insert(object, first_word.ToMap());
1141 object->set_map_word(MapWord::FromForwardingAddress(target));
1142
1143 // Give the space allocated for the result a proper map by
1144 // treating it as a free list node (not linked into the free
1145 // list).
1146 FreeListNode* node = FreeListNode::FromAddress(target->address());
1147 node->set_size(object_size);
1148
1149 *p = target;
1150 } else {
1151 // Objects promoted to the data space can be copied immediately
1152 // and not revisited---we will never sweep that space for
1153 // pointers and the copied objects do not contain pointers to
1154 // new space objects.
1155 *p = MigrateObject(object, target, object_size);
1156#ifdef DEBUG
1157 VerifyNonPointerSpacePointersVisitor v;
1158 (*p)->Iterate(&v);
1159#endif
1160 }
1161 return;
1162 }
1163 }
1164 }
1165 // The object should remain in new space or the old space allocation failed.
1166 Object* result = new_space_.AllocateRaw(object_size);
1167 // Failed allocation at this point is utterly unexpected.
1168 ASSERT(!result->IsFailure());
1169 *p = MigrateObject(object, HeapObject::cast(result), object_size);
1170}
1171
1172
1173void Heap::ScavengePointer(HeapObject** p) {
1174 ScavengeObject(p, *p);
1175}
1176
1177
1178Object* Heap::AllocatePartialMap(InstanceType instance_type,
1179 int instance_size) {
1180 Object* result = AllocateRawMap();
1181 if (result->IsFailure()) return result;
1182
1183 // Map::cast cannot be used due to uninitialized map field.
1184 reinterpret_cast<Map*>(result)->set_map(raw_unchecked_meta_map());
1185 reinterpret_cast<Map*>(result)->set_instance_type(instance_type);
1186 reinterpret_cast<Map*>(result)->set_instance_size(instance_size);
1187 reinterpret_cast<Map*>(result)->set_inobject_properties(0);
1188 reinterpret_cast<Map*>(result)->set_unused_property_fields(0);
1189 return result;
1190}
1191
1192
1193Object* Heap::AllocateMap(InstanceType instance_type, int instance_size) {
1194 Object* result = AllocateRawMap();
1195 if (result->IsFailure()) return result;
1196
1197 Map* map = reinterpret_cast<Map*>(result);
1198 map->set_map(meta_map());
1199 map->set_instance_type(instance_type);
1200 map->set_prototype(null_value());
1201 map->set_constructor(null_value());
1202 map->set_instance_size(instance_size);
1203 map->set_inobject_properties(0);
1204 map->set_pre_allocated_property_fields(0);
1205 map->set_instance_descriptors(empty_descriptor_array());
1206 map->set_code_cache(empty_fixed_array());
1207 map->set_unused_property_fields(0);
1208 map->set_bit_field(0);
Leon Clarked91b9f72010-01-27 17:25:45 +00001209 map->set_bit_field2(1 << Map::kIsExtensible);
Leon Clarkee46be812010-01-19 14:06:41 +00001210
1211 // If the map object is aligned fill the padding area with Smi 0 objects.
1212 if (Map::kPadStart < Map::kSize) {
1213 memset(reinterpret_cast<byte*>(map) + Map::kPadStart - kHeapObjectTag,
1214 0,
1215 Map::kSize - Map::kPadStart);
1216 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001217 return map;
1218}
1219
1220
1221const Heap::StringTypeTable Heap::string_type_table[] = {
1222#define STRING_TYPE_ELEMENT(type, size, name, camel_name) \
1223 {type, size, k##camel_name##MapRootIndex},
1224 STRING_TYPE_LIST(STRING_TYPE_ELEMENT)
1225#undef STRING_TYPE_ELEMENT
1226};
1227
1228
1229const Heap::ConstantSymbolTable Heap::constant_symbol_table[] = {
1230#define CONSTANT_SYMBOL_ELEMENT(name, contents) \
1231 {contents, k##name##RootIndex},
1232 SYMBOL_LIST(CONSTANT_SYMBOL_ELEMENT)
1233#undef CONSTANT_SYMBOL_ELEMENT
1234};
1235
1236
1237const Heap::StructTable Heap::struct_table[] = {
1238#define STRUCT_TABLE_ELEMENT(NAME, Name, name) \
1239 { NAME##_TYPE, Name::kSize, k##Name##MapRootIndex },
1240 STRUCT_LIST(STRUCT_TABLE_ELEMENT)
1241#undef STRUCT_TABLE_ELEMENT
1242};
1243
1244
1245bool Heap::CreateInitialMaps() {
1246 Object* obj = AllocatePartialMap(MAP_TYPE, Map::kSize);
1247 if (obj->IsFailure()) return false;
1248 // Map::cast cannot be used due to uninitialized map field.
1249 Map* new_meta_map = reinterpret_cast<Map*>(obj);
1250 set_meta_map(new_meta_map);
1251 new_meta_map->set_map(new_meta_map);
1252
1253 obj = AllocatePartialMap(FIXED_ARRAY_TYPE, FixedArray::kHeaderSize);
1254 if (obj->IsFailure()) return false;
1255 set_fixed_array_map(Map::cast(obj));
1256
1257 obj = AllocatePartialMap(ODDBALL_TYPE, Oddball::kSize);
1258 if (obj->IsFailure()) return false;
1259 set_oddball_map(Map::cast(obj));
1260
1261 // Allocate the empty array
1262 obj = AllocateEmptyFixedArray();
1263 if (obj->IsFailure()) return false;
1264 set_empty_fixed_array(FixedArray::cast(obj));
1265
1266 obj = Allocate(oddball_map(), OLD_DATA_SPACE);
1267 if (obj->IsFailure()) return false;
1268 set_null_value(obj);
1269
1270 // Allocate the empty descriptor array.
1271 obj = AllocateEmptyFixedArray();
1272 if (obj->IsFailure()) return false;
1273 set_empty_descriptor_array(DescriptorArray::cast(obj));
1274
1275 // Fix the instance_descriptors for the existing maps.
1276 meta_map()->set_instance_descriptors(empty_descriptor_array());
1277 meta_map()->set_code_cache(empty_fixed_array());
1278
1279 fixed_array_map()->set_instance_descriptors(empty_descriptor_array());
1280 fixed_array_map()->set_code_cache(empty_fixed_array());
1281
1282 oddball_map()->set_instance_descriptors(empty_descriptor_array());
1283 oddball_map()->set_code_cache(empty_fixed_array());
1284
1285 // Fix prototype object for existing maps.
1286 meta_map()->set_prototype(null_value());
1287 meta_map()->set_constructor(null_value());
1288
1289 fixed_array_map()->set_prototype(null_value());
1290 fixed_array_map()->set_constructor(null_value());
1291
1292 oddball_map()->set_prototype(null_value());
1293 oddball_map()->set_constructor(null_value());
1294
1295 obj = AllocateMap(HEAP_NUMBER_TYPE, HeapNumber::kSize);
1296 if (obj->IsFailure()) return false;
1297 set_heap_number_map(Map::cast(obj));
1298
1299 obj = AllocateMap(PROXY_TYPE, Proxy::kSize);
1300 if (obj->IsFailure()) return false;
1301 set_proxy_map(Map::cast(obj));
1302
1303 for (unsigned i = 0; i < ARRAY_SIZE(string_type_table); i++) {
1304 const StringTypeTable& entry = string_type_table[i];
1305 obj = AllocateMap(entry.type, entry.size);
1306 if (obj->IsFailure()) return false;
1307 roots_[entry.index] = Map::cast(obj);
1308 }
1309
Steve Blockd0582a62009-12-15 09:54:21 +00001310 obj = AllocateMap(STRING_TYPE, SeqTwoByteString::kAlignedSize);
Steve Blocka7e24c12009-10-30 11:49:00 +00001311 if (obj->IsFailure()) return false;
Steve Blockd0582a62009-12-15 09:54:21 +00001312 set_undetectable_string_map(Map::cast(obj));
Steve Blocka7e24c12009-10-30 11:49:00 +00001313 Map::cast(obj)->set_is_undetectable();
1314
Steve Blockd0582a62009-12-15 09:54:21 +00001315 obj = AllocateMap(ASCII_STRING_TYPE, SeqAsciiString::kAlignedSize);
Steve Blocka7e24c12009-10-30 11:49:00 +00001316 if (obj->IsFailure()) return false;
Steve Blockd0582a62009-12-15 09:54:21 +00001317 set_undetectable_ascii_string_map(Map::cast(obj));
Steve Blocka7e24c12009-10-30 11:49:00 +00001318 Map::cast(obj)->set_is_undetectable();
1319
1320 obj = AllocateMap(BYTE_ARRAY_TYPE, ByteArray::kAlignedSize);
1321 if (obj->IsFailure()) return false;
1322 set_byte_array_map(Map::cast(obj));
1323
1324 obj = AllocateMap(PIXEL_ARRAY_TYPE, PixelArray::kAlignedSize);
1325 if (obj->IsFailure()) return false;
1326 set_pixel_array_map(Map::cast(obj));
1327
Steve Block3ce2e202009-11-05 08:53:23 +00001328 obj = AllocateMap(EXTERNAL_BYTE_ARRAY_TYPE,
1329 ExternalArray::kAlignedSize);
1330 if (obj->IsFailure()) return false;
1331 set_external_byte_array_map(Map::cast(obj));
1332
1333 obj = AllocateMap(EXTERNAL_UNSIGNED_BYTE_ARRAY_TYPE,
1334 ExternalArray::kAlignedSize);
1335 if (obj->IsFailure()) return false;
1336 set_external_unsigned_byte_array_map(Map::cast(obj));
1337
1338 obj = AllocateMap(EXTERNAL_SHORT_ARRAY_TYPE,
1339 ExternalArray::kAlignedSize);
1340 if (obj->IsFailure()) return false;
1341 set_external_short_array_map(Map::cast(obj));
1342
1343 obj = AllocateMap(EXTERNAL_UNSIGNED_SHORT_ARRAY_TYPE,
1344 ExternalArray::kAlignedSize);
1345 if (obj->IsFailure()) return false;
1346 set_external_unsigned_short_array_map(Map::cast(obj));
1347
1348 obj = AllocateMap(EXTERNAL_INT_ARRAY_TYPE,
1349 ExternalArray::kAlignedSize);
1350 if (obj->IsFailure()) return false;
1351 set_external_int_array_map(Map::cast(obj));
1352
1353 obj = AllocateMap(EXTERNAL_UNSIGNED_INT_ARRAY_TYPE,
1354 ExternalArray::kAlignedSize);
1355 if (obj->IsFailure()) return false;
1356 set_external_unsigned_int_array_map(Map::cast(obj));
1357
1358 obj = AllocateMap(EXTERNAL_FLOAT_ARRAY_TYPE,
1359 ExternalArray::kAlignedSize);
1360 if (obj->IsFailure()) return false;
1361 set_external_float_array_map(Map::cast(obj));
1362
Steve Blocka7e24c12009-10-30 11:49:00 +00001363 obj = AllocateMap(CODE_TYPE, Code::kHeaderSize);
1364 if (obj->IsFailure()) return false;
1365 set_code_map(Map::cast(obj));
1366
1367 obj = AllocateMap(JS_GLOBAL_PROPERTY_CELL_TYPE,
1368 JSGlobalPropertyCell::kSize);
1369 if (obj->IsFailure()) return false;
1370 set_global_property_cell_map(Map::cast(obj));
1371
1372 obj = AllocateMap(FILLER_TYPE, kPointerSize);
1373 if (obj->IsFailure()) return false;
1374 set_one_pointer_filler_map(Map::cast(obj));
1375
1376 obj = AllocateMap(FILLER_TYPE, 2 * kPointerSize);
1377 if (obj->IsFailure()) return false;
1378 set_two_pointer_filler_map(Map::cast(obj));
1379
1380 for (unsigned i = 0; i < ARRAY_SIZE(struct_table); i++) {
1381 const StructTable& entry = struct_table[i];
1382 obj = AllocateMap(entry.type, entry.size);
1383 if (obj->IsFailure()) return false;
1384 roots_[entry.index] = Map::cast(obj);
1385 }
1386
1387 obj = AllocateMap(FIXED_ARRAY_TYPE, HeapObject::kHeaderSize);
1388 if (obj->IsFailure()) return false;
1389 set_hash_table_map(Map::cast(obj));
1390
1391 obj = AllocateMap(FIXED_ARRAY_TYPE, HeapObject::kHeaderSize);
1392 if (obj->IsFailure()) return false;
1393 set_context_map(Map::cast(obj));
1394
1395 obj = AllocateMap(FIXED_ARRAY_TYPE, HeapObject::kHeaderSize);
1396 if (obj->IsFailure()) return false;
1397 set_catch_context_map(Map::cast(obj));
1398
1399 obj = AllocateMap(FIXED_ARRAY_TYPE, HeapObject::kHeaderSize);
1400 if (obj->IsFailure()) return false;
1401 set_global_context_map(Map::cast(obj));
1402
1403 obj = AllocateMap(JS_FUNCTION_TYPE, JSFunction::kSize);
1404 if (obj->IsFailure()) return false;
1405 set_boilerplate_function_map(Map::cast(obj));
1406
1407 obj = AllocateMap(SHARED_FUNCTION_INFO_TYPE, SharedFunctionInfo::kSize);
1408 if (obj->IsFailure()) return false;
1409 set_shared_function_info_map(Map::cast(obj));
1410
1411 ASSERT(!Heap::InNewSpace(Heap::empty_fixed_array()));
1412 return true;
1413}
1414
1415
1416Object* Heap::AllocateHeapNumber(double value, PretenureFlag pretenure) {
1417 // Statically ensure that it is safe to allocate heap numbers in paged
1418 // spaces.
1419 STATIC_ASSERT(HeapNumber::kSize <= Page::kMaxHeapObjectSize);
1420 AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
1421
Steve Blocka7e24c12009-10-30 11:49:00 +00001422 Object* result = AllocateRaw(HeapNumber::kSize, space, OLD_DATA_SPACE);
1423 if (result->IsFailure()) return result;
1424
1425 HeapObject::cast(result)->set_map(heap_number_map());
1426 HeapNumber::cast(result)->set_value(value);
1427 return result;
1428}
1429
1430
1431Object* Heap::AllocateHeapNumber(double value) {
1432 // Use general version, if we're forced to always allocate.
1433 if (always_allocate()) return AllocateHeapNumber(value, TENURED);
1434
1435 // This version of AllocateHeapNumber is optimized for
1436 // allocation in new space.
1437 STATIC_ASSERT(HeapNumber::kSize <= Page::kMaxHeapObjectSize);
1438 ASSERT(allocation_allowed_ && gc_state_ == NOT_IN_GC);
1439 Object* result = new_space_.AllocateRaw(HeapNumber::kSize);
1440 if (result->IsFailure()) return result;
1441 HeapObject::cast(result)->set_map(heap_number_map());
1442 HeapNumber::cast(result)->set_value(value);
1443 return result;
1444}
1445
1446
1447Object* Heap::AllocateJSGlobalPropertyCell(Object* value) {
1448 Object* result = AllocateRawCell();
1449 if (result->IsFailure()) return result;
1450 HeapObject::cast(result)->set_map(global_property_cell_map());
1451 JSGlobalPropertyCell::cast(result)->set_value(value);
1452 return result;
1453}
1454
1455
1456Object* Heap::CreateOddball(Map* map,
1457 const char* to_string,
1458 Object* to_number) {
1459 Object* result = Allocate(map, OLD_DATA_SPACE);
1460 if (result->IsFailure()) return result;
1461 return Oddball::cast(result)->Initialize(to_string, to_number);
1462}
1463
1464
1465bool Heap::CreateApiObjects() {
1466 Object* obj;
1467
1468 obj = AllocateMap(JS_OBJECT_TYPE, JSObject::kHeaderSize);
1469 if (obj->IsFailure()) return false;
1470 set_neander_map(Map::cast(obj));
1471
1472 obj = Heap::AllocateJSObjectFromMap(neander_map());
1473 if (obj->IsFailure()) return false;
1474 Object* elements = AllocateFixedArray(2);
1475 if (elements->IsFailure()) return false;
1476 FixedArray::cast(elements)->set(0, Smi::FromInt(0));
1477 JSObject::cast(obj)->set_elements(FixedArray::cast(elements));
1478 set_message_listeners(JSObject::cast(obj));
1479
1480 return true;
1481}
1482
1483
1484void Heap::CreateCEntryStub() {
1485 CEntryStub stub(1);
1486 set_c_entry_code(*stub.GetCode());
1487}
1488
1489
1490#if V8_TARGET_ARCH_ARM && V8_NATIVE_REGEXP
1491void Heap::CreateRegExpCEntryStub() {
1492 RegExpCEntryStub stub;
1493 set_re_c_entry_code(*stub.GetCode());
1494}
1495#endif
1496
1497
1498void Heap::CreateCEntryDebugBreakStub() {
1499 CEntryDebugBreakStub stub;
1500 set_c_entry_debug_break_code(*stub.GetCode());
1501}
1502
1503
1504void Heap::CreateJSEntryStub() {
1505 JSEntryStub stub;
1506 set_js_entry_code(*stub.GetCode());
1507}
1508
1509
1510void Heap::CreateJSConstructEntryStub() {
1511 JSConstructEntryStub stub;
1512 set_js_construct_entry_code(*stub.GetCode());
1513}
1514
1515
1516void Heap::CreateFixedStubs() {
1517 // Here we create roots for fixed stubs. They are needed at GC
1518 // for cooking and uncooking (check out frames.cc).
1519 // The eliminates the need for doing dictionary lookup in the
1520 // stub cache for these stubs.
1521 HandleScope scope;
1522 // gcc-4.4 has problem generating correct code of following snippet:
1523 // { CEntryStub stub;
1524 // c_entry_code_ = *stub.GetCode();
1525 // }
1526 // { CEntryDebugBreakStub stub;
1527 // c_entry_debug_break_code_ = *stub.GetCode();
1528 // }
1529 // To workaround the problem, make separate functions without inlining.
1530 Heap::CreateCEntryStub();
1531 Heap::CreateCEntryDebugBreakStub();
1532 Heap::CreateJSEntryStub();
1533 Heap::CreateJSConstructEntryStub();
1534#if V8_TARGET_ARCH_ARM && V8_NATIVE_REGEXP
1535 Heap::CreateRegExpCEntryStub();
1536#endif
1537}
1538
1539
1540bool Heap::CreateInitialObjects() {
1541 Object* obj;
1542
1543 // The -0 value must be set before NumberFromDouble works.
1544 obj = AllocateHeapNumber(-0.0, TENURED);
1545 if (obj->IsFailure()) return false;
1546 set_minus_zero_value(obj);
1547 ASSERT(signbit(minus_zero_value()->Number()) != 0);
1548
1549 obj = AllocateHeapNumber(OS::nan_value(), TENURED);
1550 if (obj->IsFailure()) return false;
1551 set_nan_value(obj);
1552
1553 obj = Allocate(oddball_map(), OLD_DATA_SPACE);
1554 if (obj->IsFailure()) return false;
1555 set_undefined_value(obj);
1556 ASSERT(!InNewSpace(undefined_value()));
1557
1558 // Allocate initial symbol table.
1559 obj = SymbolTable::Allocate(kInitialSymbolTableSize);
1560 if (obj->IsFailure()) return false;
1561 // Don't use set_symbol_table() due to asserts.
1562 roots_[kSymbolTableRootIndex] = obj;
1563
1564 // Assign the print strings for oddballs after creating symboltable.
1565 Object* symbol = LookupAsciiSymbol("undefined");
1566 if (symbol->IsFailure()) return false;
1567 Oddball::cast(undefined_value())->set_to_string(String::cast(symbol));
1568 Oddball::cast(undefined_value())->set_to_number(nan_value());
1569
1570 // Assign the print strings for oddballs after creating symboltable.
1571 symbol = LookupAsciiSymbol("null");
1572 if (symbol->IsFailure()) return false;
1573 Oddball::cast(null_value())->set_to_string(String::cast(symbol));
1574 Oddball::cast(null_value())->set_to_number(Smi::FromInt(0));
1575
1576 // Allocate the null_value
1577 obj = Oddball::cast(null_value())->Initialize("null", Smi::FromInt(0));
1578 if (obj->IsFailure()) return false;
1579
1580 obj = CreateOddball(oddball_map(), "true", Smi::FromInt(1));
1581 if (obj->IsFailure()) return false;
1582 set_true_value(obj);
1583
1584 obj = CreateOddball(oddball_map(), "false", Smi::FromInt(0));
1585 if (obj->IsFailure()) return false;
1586 set_false_value(obj);
1587
1588 obj = CreateOddball(oddball_map(), "hole", Smi::FromInt(-1));
1589 if (obj->IsFailure()) return false;
1590 set_the_hole_value(obj);
1591
1592 obj = CreateOddball(
1593 oddball_map(), "no_interceptor_result_sentinel", Smi::FromInt(-2));
1594 if (obj->IsFailure()) return false;
1595 set_no_interceptor_result_sentinel(obj);
1596
1597 obj = CreateOddball(oddball_map(), "termination_exception", Smi::FromInt(-3));
1598 if (obj->IsFailure()) return false;
1599 set_termination_exception(obj);
1600
1601 // Allocate the empty string.
1602 obj = AllocateRawAsciiString(0, TENURED);
1603 if (obj->IsFailure()) return false;
1604 set_empty_string(String::cast(obj));
1605
1606 for (unsigned i = 0; i < ARRAY_SIZE(constant_symbol_table); i++) {
1607 obj = LookupAsciiSymbol(constant_symbol_table[i].contents);
1608 if (obj->IsFailure()) return false;
1609 roots_[constant_symbol_table[i].index] = String::cast(obj);
1610 }
1611
1612 // Allocate the hidden symbol which is used to identify the hidden properties
1613 // in JSObjects. The hash code has a special value so that it will not match
1614 // the empty string when searching for the property. It cannot be part of the
1615 // loop above because it needs to be allocated manually with the special
1616 // hash code in place. The hash code for the hidden_symbol is zero to ensure
1617 // that it will always be at the first entry in property descriptors.
1618 obj = AllocateSymbol(CStrVector(""), 0, String::kHashComputedMask);
1619 if (obj->IsFailure()) return false;
1620 hidden_symbol_ = String::cast(obj);
1621
1622 // Allocate the proxy for __proto__.
1623 obj = AllocateProxy((Address) &Accessors::ObjectPrototype);
1624 if (obj->IsFailure()) return false;
1625 set_prototype_accessors(Proxy::cast(obj));
1626
1627 // Allocate the code_stubs dictionary. The initial size is set to avoid
1628 // expanding the dictionary during bootstrapping.
1629 obj = NumberDictionary::Allocate(128);
1630 if (obj->IsFailure()) return false;
1631 set_code_stubs(NumberDictionary::cast(obj));
1632
1633 // Allocate the non_monomorphic_cache used in stub-cache.cc. The initial size
1634 // is set to avoid expanding the dictionary during bootstrapping.
1635 obj = NumberDictionary::Allocate(64);
1636 if (obj->IsFailure()) return false;
1637 set_non_monomorphic_cache(NumberDictionary::cast(obj));
1638
1639 CreateFixedStubs();
1640
Leon Clarkee46be812010-01-19 14:06:41 +00001641 if (InitializeNumberStringCache()->IsFailure()) return false;
Steve Blocka7e24c12009-10-30 11:49:00 +00001642
1643 // Allocate cache for single character strings.
1644 obj = AllocateFixedArray(String::kMaxAsciiCharCode+1);
1645 if (obj->IsFailure()) return false;
1646 set_single_character_string_cache(FixedArray::cast(obj));
1647
1648 // Allocate cache for external strings pointing to native source code.
1649 obj = AllocateFixedArray(Natives::GetBuiltinsCount());
1650 if (obj->IsFailure()) return false;
1651 set_natives_source_cache(FixedArray::cast(obj));
1652
1653 // Handling of script id generation is in Factory::NewScript.
1654 set_last_script_id(undefined_value());
1655
1656 // Initialize keyed lookup cache.
1657 KeyedLookupCache::Clear();
1658
1659 // Initialize context slot cache.
1660 ContextSlotCache::Clear();
1661
1662 // Initialize descriptor cache.
1663 DescriptorLookupCache::Clear();
1664
1665 // Initialize compilation cache.
1666 CompilationCache::Clear();
1667
1668 return true;
1669}
1670
1671
Leon Clarkee46be812010-01-19 14:06:41 +00001672Object* Heap::InitializeNumberStringCache() {
1673 // Compute the size of the number string cache based on the max heap size.
1674 // max_semispace_size_ == 512 KB => number_string_cache_size = 32.
1675 // max_semispace_size_ == 8 MB => number_string_cache_size = 16KB.
1676 int number_string_cache_size = max_semispace_size_ / 512;
1677 number_string_cache_size = Max(32, Min(16*KB, number_string_cache_size));
1678 Object* obj = AllocateFixedArray(number_string_cache_size * 2);
1679 if (!obj->IsFailure()) set_number_string_cache(FixedArray::cast(obj));
1680 return obj;
1681}
1682
1683
1684void Heap::FlushNumberStringCache() {
1685 // Flush the number to string cache.
1686 int len = number_string_cache()->length();
1687 for (int i = 0; i < len; i++) {
1688 number_string_cache()->set_undefined(i);
1689 }
1690}
1691
1692
Steve Blocka7e24c12009-10-30 11:49:00 +00001693static inline int double_get_hash(double d) {
1694 DoubleRepresentation rep(d);
Leon Clarkee46be812010-01-19 14:06:41 +00001695 return static_cast<int>(rep.bits) ^ static_cast<int>(rep.bits >> 32);
Steve Blocka7e24c12009-10-30 11:49:00 +00001696}
1697
1698
1699static inline int smi_get_hash(Smi* smi) {
Leon Clarkee46be812010-01-19 14:06:41 +00001700 return smi->value();
Steve Blocka7e24c12009-10-30 11:49:00 +00001701}
1702
1703
Steve Blocka7e24c12009-10-30 11:49:00 +00001704Object* Heap::GetNumberStringCache(Object* number) {
1705 int hash;
Leon Clarkee46be812010-01-19 14:06:41 +00001706 int mask = (number_string_cache()->length() >> 1) - 1;
Steve Blocka7e24c12009-10-30 11:49:00 +00001707 if (number->IsSmi()) {
Leon Clarkee46be812010-01-19 14:06:41 +00001708 hash = smi_get_hash(Smi::cast(number)) & mask;
Steve Blocka7e24c12009-10-30 11:49:00 +00001709 } else {
Leon Clarkee46be812010-01-19 14:06:41 +00001710 hash = double_get_hash(number->Number()) & mask;
Steve Blocka7e24c12009-10-30 11:49:00 +00001711 }
1712 Object* key = number_string_cache()->get(hash * 2);
1713 if (key == number) {
1714 return String::cast(number_string_cache()->get(hash * 2 + 1));
1715 } else if (key->IsHeapNumber() &&
1716 number->IsHeapNumber() &&
1717 key->Number() == number->Number()) {
1718 return String::cast(number_string_cache()->get(hash * 2 + 1));
1719 }
1720 return undefined_value();
1721}
1722
1723
1724void Heap::SetNumberStringCache(Object* number, String* string) {
1725 int hash;
Leon Clarkee46be812010-01-19 14:06:41 +00001726 int mask = (number_string_cache()->length() >> 1) - 1;
Steve Blocka7e24c12009-10-30 11:49:00 +00001727 if (number->IsSmi()) {
Leon Clarkee46be812010-01-19 14:06:41 +00001728 hash = smi_get_hash(Smi::cast(number)) & mask;
Steve Blocka7e24c12009-10-30 11:49:00 +00001729 number_string_cache()->set(hash * 2, number, SKIP_WRITE_BARRIER);
1730 } else {
Leon Clarkee46be812010-01-19 14:06:41 +00001731 hash = double_get_hash(number->Number()) & mask;
Steve Blocka7e24c12009-10-30 11:49:00 +00001732 number_string_cache()->set(hash * 2, number);
1733 }
1734 number_string_cache()->set(hash * 2 + 1, string);
1735}
1736
1737
1738Object* Heap::SmiOrNumberFromDouble(double value,
1739 bool new_object,
1740 PretenureFlag pretenure) {
1741 // We need to distinguish the minus zero value and this cannot be
1742 // done after conversion to int. Doing this by comparing bit
1743 // patterns is faster than using fpclassify() et al.
1744 static const DoubleRepresentation plus_zero(0.0);
1745 static const DoubleRepresentation minus_zero(-0.0);
1746 static const DoubleRepresentation nan(OS::nan_value());
1747 ASSERT(minus_zero_value() != NULL);
1748 ASSERT(sizeof(plus_zero.value) == sizeof(plus_zero.bits));
1749
1750 DoubleRepresentation rep(value);
1751 if (rep.bits == plus_zero.bits) return Smi::FromInt(0); // not uncommon
1752 if (rep.bits == minus_zero.bits) {
1753 return new_object ? AllocateHeapNumber(-0.0, pretenure)
1754 : minus_zero_value();
1755 }
1756 if (rep.bits == nan.bits) {
1757 return new_object
1758 ? AllocateHeapNumber(OS::nan_value(), pretenure)
1759 : nan_value();
1760 }
1761
1762 // Try to represent the value as a tagged small integer.
1763 int int_value = FastD2I(value);
1764 if (value == FastI2D(int_value) && Smi::IsValid(int_value)) {
1765 return Smi::FromInt(int_value);
1766 }
1767
1768 // Materialize the value in the heap.
1769 return AllocateHeapNumber(value, pretenure);
1770}
1771
1772
1773Object* Heap::NumberToString(Object* number) {
1774 Object* cached = GetNumberStringCache(number);
1775 if (cached != undefined_value()) {
1776 return cached;
1777 }
1778
1779 char arr[100];
1780 Vector<char> buffer(arr, ARRAY_SIZE(arr));
1781 const char* str;
1782 if (number->IsSmi()) {
1783 int num = Smi::cast(number)->value();
1784 str = IntToCString(num, buffer);
1785 } else {
1786 double num = HeapNumber::cast(number)->value();
1787 str = DoubleToCString(num, buffer);
1788 }
1789 Object* result = AllocateStringFromAscii(CStrVector(str));
1790
1791 if (!result->IsFailure()) {
1792 SetNumberStringCache(number, String::cast(result));
1793 }
1794 return result;
1795}
1796
1797
Steve Block3ce2e202009-11-05 08:53:23 +00001798Map* Heap::MapForExternalArrayType(ExternalArrayType array_type) {
1799 return Map::cast(roots_[RootIndexForExternalArrayType(array_type)]);
1800}
1801
1802
1803Heap::RootListIndex Heap::RootIndexForExternalArrayType(
1804 ExternalArrayType array_type) {
1805 switch (array_type) {
1806 case kExternalByteArray:
1807 return kExternalByteArrayMapRootIndex;
1808 case kExternalUnsignedByteArray:
1809 return kExternalUnsignedByteArrayMapRootIndex;
1810 case kExternalShortArray:
1811 return kExternalShortArrayMapRootIndex;
1812 case kExternalUnsignedShortArray:
1813 return kExternalUnsignedShortArrayMapRootIndex;
1814 case kExternalIntArray:
1815 return kExternalIntArrayMapRootIndex;
1816 case kExternalUnsignedIntArray:
1817 return kExternalUnsignedIntArrayMapRootIndex;
1818 case kExternalFloatArray:
1819 return kExternalFloatArrayMapRootIndex;
1820 default:
1821 UNREACHABLE();
1822 return kUndefinedValueRootIndex;
1823 }
1824}
1825
1826
Steve Blocka7e24c12009-10-30 11:49:00 +00001827Object* Heap::NewNumberFromDouble(double value, PretenureFlag pretenure) {
1828 return SmiOrNumberFromDouble(value,
1829 true /* number object must be new */,
1830 pretenure);
1831}
1832
1833
1834Object* Heap::NumberFromDouble(double value, PretenureFlag pretenure) {
1835 return SmiOrNumberFromDouble(value,
1836 false /* use preallocated NaN, -0.0 */,
1837 pretenure);
1838}
1839
1840
1841Object* Heap::AllocateProxy(Address proxy, PretenureFlag pretenure) {
1842 // Statically ensure that it is safe to allocate proxies in paged spaces.
1843 STATIC_ASSERT(Proxy::kSize <= Page::kMaxHeapObjectSize);
1844 AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
1845 Object* result = Allocate(proxy_map(), space);
1846 if (result->IsFailure()) return result;
1847
1848 Proxy::cast(result)->set_proxy(proxy);
1849 return result;
1850}
1851
1852
1853Object* Heap::AllocateSharedFunctionInfo(Object* name) {
1854 Object* result = Allocate(shared_function_info_map(), OLD_POINTER_SPACE);
1855 if (result->IsFailure()) return result;
1856
1857 SharedFunctionInfo* share = SharedFunctionInfo::cast(result);
1858 share->set_name(name);
1859 Code* illegal = Builtins::builtin(Builtins::Illegal);
1860 share->set_code(illegal);
1861 Code* construct_stub = Builtins::builtin(Builtins::JSConstructStubGeneric);
1862 share->set_construct_stub(construct_stub);
1863 share->set_expected_nof_properties(0);
1864 share->set_length(0);
1865 share->set_formal_parameter_count(0);
1866 share->set_instance_class_name(Object_symbol());
1867 share->set_function_data(undefined_value());
1868 share->set_script(undefined_value());
1869 share->set_start_position_and_type(0);
1870 share->set_debug_info(undefined_value());
1871 share->set_inferred_name(empty_string());
1872 share->set_compiler_hints(0);
1873 share->set_this_property_assignments_count(0);
1874 share->set_this_property_assignments(undefined_value());
1875 return result;
1876}
1877
1878
Steve Blockd0582a62009-12-15 09:54:21 +00001879// Returns true for a character in a range. Both limits are inclusive.
1880static inline bool Between(uint32_t character, uint32_t from, uint32_t to) {
1881 // This makes uses of the the unsigned wraparound.
1882 return character - from <= to - from;
1883}
1884
1885
1886static inline Object* MakeOrFindTwoCharacterString(uint32_t c1, uint32_t c2) {
1887 String* symbol;
1888 // Numeric strings have a different hash algorithm not known by
1889 // LookupTwoCharsSymbolIfExists, so we skip this step for such strings.
1890 if ((!Between(c1, '0', '9') || !Between(c2, '0', '9')) &&
1891 Heap::symbol_table()->LookupTwoCharsSymbolIfExists(c1, c2, &symbol)) {
1892 return symbol;
1893 // Now we know the length is 2, we might as well make use of that fact
1894 // when building the new string.
1895 } else if ((c1 | c2) <= String::kMaxAsciiCharCodeU) { // We can do this
1896 ASSERT(IsPowerOf2(String::kMaxAsciiCharCodeU + 1)); // because of this.
1897 Object* result = Heap::AllocateRawAsciiString(2);
1898 if (result->IsFailure()) return result;
1899 char* dest = SeqAsciiString::cast(result)->GetChars();
1900 dest[0] = c1;
1901 dest[1] = c2;
1902 return result;
1903 } else {
1904 Object* result = Heap::AllocateRawTwoByteString(2);
1905 if (result->IsFailure()) return result;
1906 uc16* dest = SeqTwoByteString::cast(result)->GetChars();
1907 dest[0] = c1;
1908 dest[1] = c2;
1909 return result;
1910 }
1911}
1912
1913
Steve Blocka7e24c12009-10-30 11:49:00 +00001914Object* Heap::AllocateConsString(String* first, String* second) {
1915 int first_length = first->length();
Steve Blockd0582a62009-12-15 09:54:21 +00001916 if (first_length == 0) {
1917 return second;
1918 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001919
1920 int second_length = second->length();
Steve Blockd0582a62009-12-15 09:54:21 +00001921 if (second_length == 0) {
1922 return first;
1923 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001924
1925 int length = first_length + second_length;
Steve Blockd0582a62009-12-15 09:54:21 +00001926
1927 // Optimization for 2-byte strings often used as keys in a decompression
1928 // dictionary. Check whether we already have the string in the symbol
1929 // table to prevent creation of many unneccesary strings.
1930 if (length == 2) {
1931 unsigned c1 = first->Get(0);
1932 unsigned c2 = second->Get(0);
1933 return MakeOrFindTwoCharacterString(c1, c2);
1934 }
1935
Steve Blocka7e24c12009-10-30 11:49:00 +00001936 bool is_ascii = first->IsAsciiRepresentation()
1937 && second->IsAsciiRepresentation();
1938
1939 // Make sure that an out of memory exception is thrown if the length
Steve Block3ce2e202009-11-05 08:53:23 +00001940 // of the new cons string is too large.
1941 if (length > String::kMaxLength || length < 0) {
Steve Blocka7e24c12009-10-30 11:49:00 +00001942 Top::context()->mark_out_of_memory();
1943 return Failure::OutOfMemoryException();
1944 }
1945
1946 // If the resulting string is small make a flat string.
1947 if (length < String::kMinNonFlatLength) {
1948 ASSERT(first->IsFlat());
1949 ASSERT(second->IsFlat());
1950 if (is_ascii) {
1951 Object* result = AllocateRawAsciiString(length);
1952 if (result->IsFailure()) return result;
1953 // Copy the characters into the new object.
1954 char* dest = SeqAsciiString::cast(result)->GetChars();
1955 // Copy first part.
Steve Blockd0582a62009-12-15 09:54:21 +00001956 const char* src;
1957 if (first->IsExternalString()) {
1958 src = ExternalAsciiString::cast(first)->resource()->data();
1959 } else {
1960 src = SeqAsciiString::cast(first)->GetChars();
1961 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001962 for (int i = 0; i < first_length; i++) *dest++ = src[i];
1963 // Copy second part.
Steve Blockd0582a62009-12-15 09:54:21 +00001964 if (second->IsExternalString()) {
1965 src = ExternalAsciiString::cast(second)->resource()->data();
1966 } else {
1967 src = SeqAsciiString::cast(second)->GetChars();
1968 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001969 for (int i = 0; i < second_length; i++) *dest++ = src[i];
1970 return result;
1971 } else {
1972 Object* result = AllocateRawTwoByteString(length);
1973 if (result->IsFailure()) return result;
1974 // Copy the characters into the new object.
1975 uc16* dest = SeqTwoByteString::cast(result)->GetChars();
1976 String::WriteToFlat(first, dest, 0, first_length);
1977 String::WriteToFlat(second, dest + first_length, 0, second_length);
1978 return result;
1979 }
1980 }
1981
Steve Blockd0582a62009-12-15 09:54:21 +00001982 Map* map = is_ascii ? cons_ascii_string_map() : cons_string_map();
Steve Blocka7e24c12009-10-30 11:49:00 +00001983
Leon Clarkee46be812010-01-19 14:06:41 +00001984 Object* result = Allocate(map, NEW_SPACE);
Steve Blocka7e24c12009-10-30 11:49:00 +00001985 if (result->IsFailure()) return result;
Steve Blocka7e24c12009-10-30 11:49:00 +00001986 ConsString* cons_string = ConsString::cast(result);
Steve Blockd0582a62009-12-15 09:54:21 +00001987 WriteBarrierMode mode = cons_string->GetWriteBarrierMode();
Steve Blocka7e24c12009-10-30 11:49:00 +00001988 cons_string->set_length(length);
Steve Blockd0582a62009-12-15 09:54:21 +00001989 cons_string->set_hash_field(String::kEmptyHashField);
1990 cons_string->set_first(first, mode);
1991 cons_string->set_second(second, mode);
Steve Blocka7e24c12009-10-30 11:49:00 +00001992 return result;
1993}
1994
1995
1996Object* Heap::AllocateSubString(String* buffer,
1997 int start,
1998 int end) {
1999 int length = end - start;
2000
2001 if (length == 1) {
2002 return Heap::LookupSingleCharacterStringFromCode(
2003 buffer->Get(start));
Steve Blockd0582a62009-12-15 09:54:21 +00002004 } else if (length == 2) {
2005 // Optimization for 2-byte strings often used as keys in a decompression
2006 // dictionary. Check whether we already have the string in the symbol
2007 // table to prevent creation of many unneccesary strings.
2008 unsigned c1 = buffer->Get(start);
2009 unsigned c2 = buffer->Get(start + 1);
2010 return MakeOrFindTwoCharacterString(c1, c2);
Steve Blocka7e24c12009-10-30 11:49:00 +00002011 }
2012
2013 // Make an attempt to flatten the buffer to reduce access time.
2014 if (!buffer->IsFlat()) {
2015 buffer->TryFlatten();
2016 }
2017
2018 Object* result = buffer->IsAsciiRepresentation()
2019 ? AllocateRawAsciiString(length)
2020 : AllocateRawTwoByteString(length);
2021 if (result->IsFailure()) return result;
Steve Blockd0582a62009-12-15 09:54:21 +00002022 String* string_result = String::cast(result);
Steve Blocka7e24c12009-10-30 11:49:00 +00002023
2024 // Copy the characters into the new object.
Steve Blockd0582a62009-12-15 09:54:21 +00002025 if (buffer->IsAsciiRepresentation()) {
2026 ASSERT(string_result->IsAsciiRepresentation());
2027 char* dest = SeqAsciiString::cast(string_result)->GetChars();
2028 String::WriteToFlat(buffer, dest, start, end);
2029 } else {
2030 ASSERT(string_result->IsTwoByteRepresentation());
2031 uc16* dest = SeqTwoByteString::cast(string_result)->GetChars();
2032 String::WriteToFlat(buffer, dest, start, end);
Steve Blocka7e24c12009-10-30 11:49:00 +00002033 }
Steve Blockd0582a62009-12-15 09:54:21 +00002034
Steve Blocka7e24c12009-10-30 11:49:00 +00002035 return result;
2036}
2037
2038
2039Object* Heap::AllocateExternalStringFromAscii(
2040 ExternalAsciiString::Resource* resource) {
Steve Blockd0582a62009-12-15 09:54:21 +00002041 size_t length = resource->length();
2042 if (length > static_cast<size_t>(String::kMaxLength)) {
2043 Top::context()->mark_out_of_memory();
2044 return Failure::OutOfMemoryException();
Steve Blocka7e24c12009-10-30 11:49:00 +00002045 }
2046
Steve Blockd0582a62009-12-15 09:54:21 +00002047 Map* map = external_ascii_string_map();
Leon Clarkee46be812010-01-19 14:06:41 +00002048 Object* result = Allocate(map, NEW_SPACE);
Steve Blocka7e24c12009-10-30 11:49:00 +00002049 if (result->IsFailure()) return result;
2050
2051 ExternalAsciiString* external_string = ExternalAsciiString::cast(result);
Steve Blockd0582a62009-12-15 09:54:21 +00002052 external_string->set_length(static_cast<int>(length));
2053 external_string->set_hash_field(String::kEmptyHashField);
Steve Blocka7e24c12009-10-30 11:49:00 +00002054 external_string->set_resource(resource);
2055
2056 return result;
2057}
2058
2059
2060Object* Heap::AllocateExternalStringFromTwoByte(
2061 ExternalTwoByteString::Resource* resource) {
Steve Blockd0582a62009-12-15 09:54:21 +00002062 size_t length = resource->length();
2063 if (length > static_cast<size_t>(String::kMaxLength)) {
2064 Top::context()->mark_out_of_memory();
2065 return Failure::OutOfMemoryException();
2066 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002067
Steve Blockd0582a62009-12-15 09:54:21 +00002068 Map* map = Heap::external_string_map();
Leon Clarkee46be812010-01-19 14:06:41 +00002069 Object* result = Allocate(map, NEW_SPACE);
Steve Blocka7e24c12009-10-30 11:49:00 +00002070 if (result->IsFailure()) return result;
2071
2072 ExternalTwoByteString* external_string = ExternalTwoByteString::cast(result);
Steve Blockd0582a62009-12-15 09:54:21 +00002073 external_string->set_length(static_cast<int>(length));
2074 external_string->set_hash_field(String::kEmptyHashField);
Steve Blocka7e24c12009-10-30 11:49:00 +00002075 external_string->set_resource(resource);
2076
2077 return result;
2078}
2079
2080
2081Object* Heap::LookupSingleCharacterStringFromCode(uint16_t code) {
2082 if (code <= String::kMaxAsciiCharCode) {
2083 Object* value = Heap::single_character_string_cache()->get(code);
2084 if (value != Heap::undefined_value()) return value;
2085
2086 char buffer[1];
2087 buffer[0] = static_cast<char>(code);
2088 Object* result = LookupSymbol(Vector<const char>(buffer, 1));
2089
2090 if (result->IsFailure()) return result;
2091 Heap::single_character_string_cache()->set(code, result);
2092 return result;
2093 }
2094
2095 Object* result = Heap::AllocateRawTwoByteString(1);
2096 if (result->IsFailure()) return result;
2097 String* answer = String::cast(result);
2098 answer->Set(0, code);
2099 return answer;
2100}
2101
2102
2103Object* Heap::AllocateByteArray(int length, PretenureFlag pretenure) {
Leon Clarkee46be812010-01-19 14:06:41 +00002104 if (length < 0 || length > ByteArray::kMaxLength) {
2105 return Failure::OutOfMemoryException();
2106 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002107 if (pretenure == NOT_TENURED) {
2108 return AllocateByteArray(length);
2109 }
2110 int size = ByteArray::SizeFor(length);
Leon Clarkee46be812010-01-19 14:06:41 +00002111 Object* result = (size <= MaxObjectSizeInPagedSpace())
2112 ? old_data_space_->AllocateRaw(size)
2113 : lo_space_->AllocateRaw(size);
Steve Blocka7e24c12009-10-30 11:49:00 +00002114 if (result->IsFailure()) return result;
2115
2116 reinterpret_cast<Array*>(result)->set_map(byte_array_map());
2117 reinterpret_cast<Array*>(result)->set_length(length);
2118 return result;
2119}
2120
2121
2122Object* Heap::AllocateByteArray(int length) {
Leon Clarkee46be812010-01-19 14:06:41 +00002123 if (length < 0 || length > ByteArray::kMaxLength) {
2124 return Failure::OutOfMemoryException();
2125 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002126 int size = ByteArray::SizeFor(length);
2127 AllocationSpace space =
Leon Clarkee46be812010-01-19 14:06:41 +00002128 (size > MaxObjectSizeInPagedSpace()) ? LO_SPACE : NEW_SPACE;
Steve Blocka7e24c12009-10-30 11:49:00 +00002129 Object* result = AllocateRaw(size, space, OLD_DATA_SPACE);
Steve Blocka7e24c12009-10-30 11:49:00 +00002130 if (result->IsFailure()) return result;
2131
2132 reinterpret_cast<Array*>(result)->set_map(byte_array_map());
2133 reinterpret_cast<Array*>(result)->set_length(length);
2134 return result;
2135}
2136
2137
2138void Heap::CreateFillerObjectAt(Address addr, int size) {
2139 if (size == 0) return;
2140 HeapObject* filler = HeapObject::FromAddress(addr);
2141 if (size == kPointerSize) {
2142 filler->set_map(Heap::one_pointer_filler_map());
2143 } else {
2144 filler->set_map(Heap::byte_array_map());
2145 ByteArray::cast(filler)->set_length(ByteArray::LengthFor(size));
2146 }
2147}
2148
2149
2150Object* Heap::AllocatePixelArray(int length,
2151 uint8_t* external_pointer,
2152 PretenureFlag pretenure) {
2153 AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
Steve Blocka7e24c12009-10-30 11:49:00 +00002154 Object* result = AllocateRaw(PixelArray::kAlignedSize, space, OLD_DATA_SPACE);
Steve Blocka7e24c12009-10-30 11:49:00 +00002155 if (result->IsFailure()) return result;
2156
2157 reinterpret_cast<PixelArray*>(result)->set_map(pixel_array_map());
2158 reinterpret_cast<PixelArray*>(result)->set_length(length);
2159 reinterpret_cast<PixelArray*>(result)->set_external_pointer(external_pointer);
2160
2161 return result;
2162}
2163
2164
Steve Block3ce2e202009-11-05 08:53:23 +00002165Object* Heap::AllocateExternalArray(int length,
2166 ExternalArrayType array_type,
2167 void* external_pointer,
2168 PretenureFlag pretenure) {
2169 AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
Steve Block3ce2e202009-11-05 08:53:23 +00002170 Object* result = AllocateRaw(ExternalArray::kAlignedSize,
2171 space,
2172 OLD_DATA_SPACE);
Steve Block3ce2e202009-11-05 08:53:23 +00002173 if (result->IsFailure()) return result;
2174
2175 reinterpret_cast<ExternalArray*>(result)->set_map(
2176 MapForExternalArrayType(array_type));
2177 reinterpret_cast<ExternalArray*>(result)->set_length(length);
2178 reinterpret_cast<ExternalArray*>(result)->set_external_pointer(
2179 external_pointer);
2180
2181 return result;
2182}
2183
2184
Steve Blocka7e24c12009-10-30 11:49:00 +00002185Object* Heap::CreateCode(const CodeDesc& desc,
2186 ZoneScopeInfo* sinfo,
2187 Code::Flags flags,
2188 Handle<Object> self_reference) {
2189 // Compute size
2190 int body_size = RoundUp(desc.instr_size + desc.reloc_size, kObjectAlignment);
2191 int sinfo_size = 0;
2192 if (sinfo != NULL) sinfo_size = sinfo->Serialize(NULL);
2193 int obj_size = Code::SizeFor(body_size, sinfo_size);
2194 ASSERT(IsAligned(obj_size, Code::kCodeAlignment));
2195 Object* result;
2196 if (obj_size > MaxObjectSizeInPagedSpace()) {
2197 result = lo_space_->AllocateRawCode(obj_size);
2198 } else {
2199 result = code_space_->AllocateRaw(obj_size);
2200 }
2201
2202 if (result->IsFailure()) return result;
2203
2204 // Initialize the object
2205 HeapObject::cast(result)->set_map(code_map());
2206 Code* code = Code::cast(result);
2207 ASSERT(!CodeRange::exists() || CodeRange::contains(code->address()));
2208 code->set_instruction_size(desc.instr_size);
2209 code->set_relocation_size(desc.reloc_size);
2210 code->set_sinfo_size(sinfo_size);
2211 code->set_flags(flags);
2212 // Allow self references to created code object by patching the handle to
2213 // point to the newly allocated Code object.
2214 if (!self_reference.is_null()) {
2215 *(self_reference.location()) = code;
2216 }
2217 // Migrate generated code.
2218 // The generated code can contain Object** values (typically from handles)
2219 // that are dereferenced during the copy to point directly to the actual heap
2220 // objects. These pointers can include references to the code object itself,
2221 // through the self_reference parameter.
2222 code->CopyFrom(desc);
2223 if (sinfo != NULL) sinfo->Serialize(code); // write scope info
2224
2225#ifdef DEBUG
2226 code->Verify();
2227#endif
2228 return code;
2229}
2230
2231
2232Object* Heap::CopyCode(Code* code) {
2233 // Allocate an object the same size as the code object.
2234 int obj_size = code->Size();
2235 Object* result;
2236 if (obj_size > MaxObjectSizeInPagedSpace()) {
2237 result = lo_space_->AllocateRawCode(obj_size);
2238 } else {
2239 result = code_space_->AllocateRaw(obj_size);
2240 }
2241
2242 if (result->IsFailure()) return result;
2243
2244 // Copy code object.
2245 Address old_addr = code->address();
2246 Address new_addr = reinterpret_cast<HeapObject*>(result)->address();
2247 CopyBlock(reinterpret_cast<Object**>(new_addr),
2248 reinterpret_cast<Object**>(old_addr),
2249 obj_size);
2250 // Relocate the copy.
2251 Code* new_code = Code::cast(result);
2252 ASSERT(!CodeRange::exists() || CodeRange::contains(code->address()));
2253 new_code->Relocate(new_addr - old_addr);
2254 return new_code;
2255}
2256
2257
2258Object* Heap::Allocate(Map* map, AllocationSpace space) {
2259 ASSERT(gc_state_ == NOT_IN_GC);
2260 ASSERT(map->instance_type() != MAP_TYPE);
Leon Clarkee46be812010-01-19 14:06:41 +00002261 // If allocation failures are disallowed, we may allocate in a different
2262 // space when new space is full and the object is not a large object.
2263 AllocationSpace retry_space =
2264 (space != NEW_SPACE) ? space : TargetSpaceId(map->instance_type());
2265 Object* result =
2266 AllocateRaw(map->instance_size(), space, retry_space);
Steve Blocka7e24c12009-10-30 11:49:00 +00002267 if (result->IsFailure()) return result;
2268 HeapObject::cast(result)->set_map(map);
Steve Block3ce2e202009-11-05 08:53:23 +00002269#ifdef ENABLE_LOGGING_AND_PROFILING
2270 ProducerHeapProfile::RecordJSObjectAllocation(result);
2271#endif
Steve Blocka7e24c12009-10-30 11:49:00 +00002272 return result;
2273}
2274
2275
2276Object* Heap::InitializeFunction(JSFunction* function,
2277 SharedFunctionInfo* shared,
2278 Object* prototype) {
2279 ASSERT(!prototype->IsMap());
2280 function->initialize_properties();
2281 function->initialize_elements();
2282 function->set_shared(shared);
2283 function->set_prototype_or_initial_map(prototype);
2284 function->set_context(undefined_value());
2285 function->set_literals(empty_fixed_array(), SKIP_WRITE_BARRIER);
2286 return function;
2287}
2288
2289
2290Object* Heap::AllocateFunctionPrototype(JSFunction* function) {
2291 // Allocate the prototype. Make sure to use the object function
2292 // from the function's context, since the function can be from a
2293 // different context.
2294 JSFunction* object_function =
2295 function->context()->global_context()->object_function();
2296 Object* prototype = AllocateJSObject(object_function);
2297 if (prototype->IsFailure()) return prototype;
2298 // When creating the prototype for the function we must set its
2299 // constructor to the function.
2300 Object* result =
2301 JSObject::cast(prototype)->SetProperty(constructor_symbol(),
2302 function,
2303 DONT_ENUM);
2304 if (result->IsFailure()) return result;
2305 return prototype;
2306}
2307
2308
2309Object* Heap::AllocateFunction(Map* function_map,
2310 SharedFunctionInfo* shared,
Leon Clarkee46be812010-01-19 14:06:41 +00002311 Object* prototype,
2312 PretenureFlag pretenure) {
2313 AllocationSpace space =
2314 (pretenure == TENURED) ? OLD_POINTER_SPACE : NEW_SPACE;
2315 Object* result = Allocate(function_map, space);
Steve Blocka7e24c12009-10-30 11:49:00 +00002316 if (result->IsFailure()) return result;
2317 return InitializeFunction(JSFunction::cast(result), shared, prototype);
2318}
2319
2320
2321Object* Heap::AllocateArgumentsObject(Object* callee, int length) {
2322 // To get fast allocation and map sharing for arguments objects we
2323 // allocate them based on an arguments boilerplate.
2324
2325 // This calls Copy directly rather than using Heap::AllocateRaw so we
2326 // duplicate the check here.
2327 ASSERT(allocation_allowed_ && gc_state_ == NOT_IN_GC);
2328
2329 JSObject* boilerplate =
2330 Top::context()->global_context()->arguments_boilerplate();
2331
Leon Clarkee46be812010-01-19 14:06:41 +00002332 // Check that the size of the boilerplate matches our
2333 // expectations. The ArgumentsAccessStub::GenerateNewObject relies
2334 // on the size being a known constant.
2335 ASSERT(kArgumentsObjectSize == boilerplate->map()->instance_size());
2336
2337 // Do the allocation.
2338 Object* result =
2339 AllocateRaw(kArgumentsObjectSize, NEW_SPACE, OLD_POINTER_SPACE);
Steve Blocka7e24c12009-10-30 11:49:00 +00002340 if (result->IsFailure()) return result;
2341
2342 // Copy the content. The arguments boilerplate doesn't have any
2343 // fields that point to new space so it's safe to skip the write
2344 // barrier here.
2345 CopyBlock(reinterpret_cast<Object**>(HeapObject::cast(result)->address()),
2346 reinterpret_cast<Object**>(boilerplate->address()),
Leon Clarkee46be812010-01-19 14:06:41 +00002347 kArgumentsObjectSize);
Steve Blocka7e24c12009-10-30 11:49:00 +00002348
2349 // Set the two properties.
2350 JSObject::cast(result)->InObjectPropertyAtPut(arguments_callee_index,
2351 callee);
2352 JSObject::cast(result)->InObjectPropertyAtPut(arguments_length_index,
2353 Smi::FromInt(length),
2354 SKIP_WRITE_BARRIER);
2355
2356 // Check the state of the object
2357 ASSERT(JSObject::cast(result)->HasFastProperties());
2358 ASSERT(JSObject::cast(result)->HasFastElements());
2359
2360 return result;
2361}
2362
2363
2364Object* Heap::AllocateInitialMap(JSFunction* fun) {
2365 ASSERT(!fun->has_initial_map());
2366
2367 // First create a new map with the size and number of in-object properties
2368 // suggested by the function.
2369 int instance_size = fun->shared()->CalculateInstanceSize();
2370 int in_object_properties = fun->shared()->CalculateInObjectProperties();
2371 Object* map_obj = Heap::AllocateMap(JS_OBJECT_TYPE, instance_size);
2372 if (map_obj->IsFailure()) return map_obj;
2373
2374 // Fetch or allocate prototype.
2375 Object* prototype;
2376 if (fun->has_instance_prototype()) {
2377 prototype = fun->instance_prototype();
2378 } else {
2379 prototype = AllocateFunctionPrototype(fun);
2380 if (prototype->IsFailure()) return prototype;
2381 }
2382 Map* map = Map::cast(map_obj);
2383 map->set_inobject_properties(in_object_properties);
2384 map->set_unused_property_fields(in_object_properties);
2385 map->set_prototype(prototype);
2386
2387 // If the function has only simple this property assignments add field
2388 // descriptors for these to the initial map as the object cannot be
2389 // constructed without having these properties.
2390 ASSERT(in_object_properties <= Map::kMaxPreAllocatedPropertyFields);
Steve Blockd0582a62009-12-15 09:54:21 +00002391 if (fun->shared()->has_only_simple_this_property_assignments() &&
2392 fun->shared()->this_property_assignments_count() > 0) {
Steve Blocka7e24c12009-10-30 11:49:00 +00002393 int count = fun->shared()->this_property_assignments_count();
2394 if (count > in_object_properties) {
2395 count = in_object_properties;
2396 }
2397 Object* descriptors_obj = DescriptorArray::Allocate(count);
2398 if (descriptors_obj->IsFailure()) return descriptors_obj;
2399 DescriptorArray* descriptors = DescriptorArray::cast(descriptors_obj);
2400 for (int i = 0; i < count; i++) {
2401 String* name = fun->shared()->GetThisPropertyAssignmentName(i);
2402 ASSERT(name->IsSymbol());
2403 FieldDescriptor field(name, i, NONE);
2404 descriptors->Set(i, &field);
2405 }
2406 descriptors->Sort();
2407 map->set_instance_descriptors(descriptors);
2408 map->set_pre_allocated_property_fields(count);
2409 map->set_unused_property_fields(in_object_properties - count);
2410 }
2411 return map;
2412}
2413
2414
2415void Heap::InitializeJSObjectFromMap(JSObject* obj,
2416 FixedArray* properties,
2417 Map* map) {
2418 obj->set_properties(properties);
2419 obj->initialize_elements();
2420 // TODO(1240798): Initialize the object's body using valid initial values
2421 // according to the object's initial map. For example, if the map's
2422 // instance type is JS_ARRAY_TYPE, the length field should be initialized
2423 // to a number (eg, Smi::FromInt(0)) and the elements initialized to a
2424 // fixed array (eg, Heap::empty_fixed_array()). Currently, the object
2425 // verification code has to cope with (temporarily) invalid objects. See
2426 // for example, JSArray::JSArrayVerify).
2427 obj->InitializeBody(map->instance_size());
2428}
2429
2430
2431Object* Heap::AllocateJSObjectFromMap(Map* map, PretenureFlag pretenure) {
2432 // JSFunctions should be allocated using AllocateFunction to be
2433 // properly initialized.
2434 ASSERT(map->instance_type() != JS_FUNCTION_TYPE);
2435
2436 // Both types of globla objects should be allocated using
2437 // AllocateGloblaObject to be properly initialized.
2438 ASSERT(map->instance_type() != JS_GLOBAL_OBJECT_TYPE);
2439 ASSERT(map->instance_type() != JS_BUILTINS_OBJECT_TYPE);
2440
2441 // Allocate the backing storage for the properties.
2442 int prop_size =
2443 map->pre_allocated_property_fields() +
2444 map->unused_property_fields() -
2445 map->inobject_properties();
2446 ASSERT(prop_size >= 0);
2447 Object* properties = AllocateFixedArray(prop_size, pretenure);
2448 if (properties->IsFailure()) return properties;
2449
2450 // Allocate the JSObject.
2451 AllocationSpace space =
2452 (pretenure == TENURED) ? OLD_POINTER_SPACE : NEW_SPACE;
2453 if (map->instance_size() > MaxObjectSizeInPagedSpace()) space = LO_SPACE;
2454 Object* obj = Allocate(map, space);
2455 if (obj->IsFailure()) return obj;
2456
2457 // Initialize the JSObject.
2458 InitializeJSObjectFromMap(JSObject::cast(obj),
2459 FixedArray::cast(properties),
2460 map);
2461 return obj;
2462}
2463
2464
2465Object* Heap::AllocateJSObject(JSFunction* constructor,
2466 PretenureFlag pretenure) {
2467 // Allocate the initial map if absent.
2468 if (!constructor->has_initial_map()) {
2469 Object* initial_map = AllocateInitialMap(constructor);
2470 if (initial_map->IsFailure()) return initial_map;
2471 constructor->set_initial_map(Map::cast(initial_map));
2472 Map::cast(initial_map)->set_constructor(constructor);
2473 }
2474 // Allocate the object based on the constructors initial map.
2475 Object* result =
2476 AllocateJSObjectFromMap(constructor->initial_map(), pretenure);
2477 // Make sure result is NOT a global object if valid.
2478 ASSERT(result->IsFailure() || !result->IsGlobalObject());
2479 return result;
2480}
2481
2482
2483Object* Heap::AllocateGlobalObject(JSFunction* constructor) {
2484 ASSERT(constructor->has_initial_map());
2485 Map* map = constructor->initial_map();
2486
2487 // Make sure no field properties are described in the initial map.
2488 // This guarantees us that normalizing the properties does not
2489 // require us to change property values to JSGlobalPropertyCells.
2490 ASSERT(map->NextFreePropertyIndex() == 0);
2491
2492 // Make sure we don't have a ton of pre-allocated slots in the
2493 // global objects. They will be unused once we normalize the object.
2494 ASSERT(map->unused_property_fields() == 0);
2495 ASSERT(map->inobject_properties() == 0);
2496
2497 // Initial size of the backing store to avoid resize of the storage during
2498 // bootstrapping. The size differs between the JS global object ad the
2499 // builtins object.
2500 int initial_size = map->instance_type() == JS_GLOBAL_OBJECT_TYPE ? 64 : 512;
2501
2502 // Allocate a dictionary object for backing storage.
2503 Object* obj =
2504 StringDictionary::Allocate(
2505 map->NumberOfDescribedProperties() * 2 + initial_size);
2506 if (obj->IsFailure()) return obj;
2507 StringDictionary* dictionary = StringDictionary::cast(obj);
2508
2509 // The global object might be created from an object template with accessors.
2510 // Fill these accessors into the dictionary.
2511 DescriptorArray* descs = map->instance_descriptors();
2512 for (int i = 0; i < descs->number_of_descriptors(); i++) {
2513 PropertyDetails details = descs->GetDetails(i);
2514 ASSERT(details.type() == CALLBACKS); // Only accessors are expected.
2515 PropertyDetails d =
2516 PropertyDetails(details.attributes(), CALLBACKS, details.index());
2517 Object* value = descs->GetCallbacksObject(i);
2518 value = Heap::AllocateJSGlobalPropertyCell(value);
2519 if (value->IsFailure()) return value;
2520
2521 Object* result = dictionary->Add(descs->GetKey(i), value, d);
2522 if (result->IsFailure()) return result;
2523 dictionary = StringDictionary::cast(result);
2524 }
2525
2526 // Allocate the global object and initialize it with the backing store.
2527 obj = Allocate(map, OLD_POINTER_SPACE);
2528 if (obj->IsFailure()) return obj;
2529 JSObject* global = JSObject::cast(obj);
2530 InitializeJSObjectFromMap(global, dictionary, map);
2531
2532 // Create a new map for the global object.
2533 obj = map->CopyDropDescriptors();
2534 if (obj->IsFailure()) return obj;
2535 Map* new_map = Map::cast(obj);
2536
2537 // Setup the global object as a normalized object.
2538 global->set_map(new_map);
2539 global->map()->set_instance_descriptors(Heap::empty_descriptor_array());
2540 global->set_properties(dictionary);
2541
2542 // Make sure result is a global object with properties in dictionary.
2543 ASSERT(global->IsGlobalObject());
2544 ASSERT(!global->HasFastProperties());
2545 return global;
2546}
2547
2548
2549Object* Heap::CopyJSObject(JSObject* source) {
2550 // Never used to copy functions. If functions need to be copied we
2551 // have to be careful to clear the literals array.
2552 ASSERT(!source->IsJSFunction());
2553
2554 // Make the clone.
2555 Map* map = source->map();
2556 int object_size = map->instance_size();
2557 Object* clone;
2558
2559 // If we're forced to always allocate, we use the general allocation
2560 // functions which may leave us with an object in old space.
2561 if (always_allocate()) {
2562 clone = AllocateRaw(object_size, NEW_SPACE, OLD_POINTER_SPACE);
2563 if (clone->IsFailure()) return clone;
2564 Address clone_address = HeapObject::cast(clone)->address();
2565 CopyBlock(reinterpret_cast<Object**>(clone_address),
2566 reinterpret_cast<Object**>(source->address()),
2567 object_size);
2568 // Update write barrier for all fields that lie beyond the header.
2569 for (int offset = JSObject::kHeaderSize;
2570 offset < object_size;
2571 offset += kPointerSize) {
2572 RecordWrite(clone_address, offset);
2573 }
2574 } else {
2575 clone = new_space_.AllocateRaw(object_size);
2576 if (clone->IsFailure()) return clone;
2577 ASSERT(Heap::InNewSpace(clone));
2578 // Since we know the clone is allocated in new space, we can copy
2579 // the contents without worrying about updating the write barrier.
2580 CopyBlock(reinterpret_cast<Object**>(HeapObject::cast(clone)->address()),
2581 reinterpret_cast<Object**>(source->address()),
2582 object_size);
2583 }
2584
2585 FixedArray* elements = FixedArray::cast(source->elements());
2586 FixedArray* properties = FixedArray::cast(source->properties());
2587 // Update elements if necessary.
2588 if (elements->length()> 0) {
2589 Object* elem = CopyFixedArray(elements);
2590 if (elem->IsFailure()) return elem;
2591 JSObject::cast(clone)->set_elements(FixedArray::cast(elem));
2592 }
2593 // Update properties if necessary.
2594 if (properties->length() > 0) {
2595 Object* prop = CopyFixedArray(properties);
2596 if (prop->IsFailure()) return prop;
2597 JSObject::cast(clone)->set_properties(FixedArray::cast(prop));
2598 }
2599 // Return the new clone.
Steve Block3ce2e202009-11-05 08:53:23 +00002600#ifdef ENABLE_LOGGING_AND_PROFILING
2601 ProducerHeapProfile::RecordJSObjectAllocation(clone);
2602#endif
Steve Blocka7e24c12009-10-30 11:49:00 +00002603 return clone;
2604}
2605
2606
2607Object* Heap::ReinitializeJSGlobalProxy(JSFunction* constructor,
2608 JSGlobalProxy* object) {
2609 // Allocate initial map if absent.
2610 if (!constructor->has_initial_map()) {
2611 Object* initial_map = AllocateInitialMap(constructor);
2612 if (initial_map->IsFailure()) return initial_map;
2613 constructor->set_initial_map(Map::cast(initial_map));
2614 Map::cast(initial_map)->set_constructor(constructor);
2615 }
2616
2617 Map* map = constructor->initial_map();
2618
2619 // Check that the already allocated object has the same size as
2620 // objects allocated using the constructor.
2621 ASSERT(map->instance_size() == object->map()->instance_size());
2622
2623 // Allocate the backing storage for the properties.
2624 int prop_size = map->unused_property_fields() - map->inobject_properties();
2625 Object* properties = AllocateFixedArray(prop_size, TENURED);
2626 if (properties->IsFailure()) return properties;
2627
2628 // Reset the map for the object.
2629 object->set_map(constructor->initial_map());
2630
2631 // Reinitialize the object from the constructor map.
2632 InitializeJSObjectFromMap(object, FixedArray::cast(properties), map);
2633 return object;
2634}
2635
2636
2637Object* Heap::AllocateStringFromAscii(Vector<const char> string,
2638 PretenureFlag pretenure) {
2639 Object* result = AllocateRawAsciiString(string.length(), pretenure);
2640 if (result->IsFailure()) return result;
2641
2642 // Copy the characters into the new object.
2643 SeqAsciiString* string_result = SeqAsciiString::cast(result);
2644 for (int i = 0; i < string.length(); i++) {
2645 string_result->SeqAsciiStringSet(i, string[i]);
2646 }
2647 return result;
2648}
2649
2650
2651Object* Heap::AllocateStringFromUtf8(Vector<const char> string,
2652 PretenureFlag pretenure) {
2653 // Count the number of characters in the UTF-8 string and check if
2654 // it is an ASCII string.
2655 Access<Scanner::Utf8Decoder> decoder(Scanner::utf8_decoder());
2656 decoder->Reset(string.start(), string.length());
2657 int chars = 0;
2658 bool is_ascii = true;
2659 while (decoder->has_more()) {
2660 uc32 r = decoder->GetNext();
2661 if (r > String::kMaxAsciiCharCode) is_ascii = false;
2662 chars++;
2663 }
2664
2665 // If the string is ascii, we do not need to convert the characters
2666 // since UTF8 is backwards compatible with ascii.
2667 if (is_ascii) return AllocateStringFromAscii(string, pretenure);
2668
2669 Object* result = AllocateRawTwoByteString(chars, pretenure);
2670 if (result->IsFailure()) return result;
2671
2672 // Convert and copy the characters into the new object.
2673 String* string_result = String::cast(result);
2674 decoder->Reset(string.start(), string.length());
2675 for (int i = 0; i < chars; i++) {
2676 uc32 r = decoder->GetNext();
2677 string_result->Set(i, r);
2678 }
2679 return result;
2680}
2681
2682
2683Object* Heap::AllocateStringFromTwoByte(Vector<const uc16> string,
2684 PretenureFlag pretenure) {
2685 // Check if the string is an ASCII string.
2686 int i = 0;
2687 while (i < string.length() && string[i] <= String::kMaxAsciiCharCode) i++;
2688
2689 Object* result;
2690 if (i == string.length()) { // It's an ASCII string.
2691 result = AllocateRawAsciiString(string.length(), pretenure);
2692 } else { // It's not an ASCII string.
2693 result = AllocateRawTwoByteString(string.length(), pretenure);
2694 }
2695 if (result->IsFailure()) return result;
2696
2697 // Copy the characters into the new object, which may be either ASCII or
2698 // UTF-16.
2699 String* string_result = String::cast(result);
2700 for (int i = 0; i < string.length(); i++) {
2701 string_result->Set(i, string[i]);
2702 }
2703 return result;
2704}
2705
2706
2707Map* Heap::SymbolMapForString(String* string) {
2708 // If the string is in new space it cannot be used as a symbol.
2709 if (InNewSpace(string)) return NULL;
2710
2711 // Find the corresponding symbol map for strings.
2712 Map* map = string->map();
Steve Blockd0582a62009-12-15 09:54:21 +00002713 if (map == ascii_string_map()) return ascii_symbol_map();
2714 if (map == string_map()) return symbol_map();
2715 if (map == cons_string_map()) return cons_symbol_map();
2716 if (map == cons_ascii_string_map()) return cons_ascii_symbol_map();
2717 if (map == external_string_map()) return external_symbol_map();
2718 if (map == external_ascii_string_map()) return external_ascii_symbol_map();
Steve Blocka7e24c12009-10-30 11:49:00 +00002719
2720 // No match found.
2721 return NULL;
2722}
2723
2724
2725Object* Heap::AllocateInternalSymbol(unibrow::CharacterStream* buffer,
2726 int chars,
Steve Blockd0582a62009-12-15 09:54:21 +00002727 uint32_t hash_field) {
Leon Clarkee46be812010-01-19 14:06:41 +00002728 ASSERT(chars >= 0);
Steve Blocka7e24c12009-10-30 11:49:00 +00002729 // Ensure the chars matches the number of characters in the buffer.
2730 ASSERT(static_cast<unsigned>(chars) == buffer->Length());
2731 // Determine whether the string is ascii.
2732 bool is_ascii = true;
Leon Clarkee46be812010-01-19 14:06:41 +00002733 while (buffer->has_more()) {
2734 if (buffer->GetNext() > unibrow::Utf8::kMaxOneByteChar) {
2735 is_ascii = false;
2736 break;
2737 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002738 }
2739 buffer->Rewind();
2740
2741 // Compute map and object size.
2742 int size;
2743 Map* map;
2744
2745 if (is_ascii) {
Leon Clarkee46be812010-01-19 14:06:41 +00002746 if (chars > SeqAsciiString::kMaxLength) {
2747 return Failure::OutOfMemoryException();
2748 }
Steve Blockd0582a62009-12-15 09:54:21 +00002749 map = ascii_symbol_map();
Steve Blocka7e24c12009-10-30 11:49:00 +00002750 size = SeqAsciiString::SizeFor(chars);
2751 } else {
Leon Clarkee46be812010-01-19 14:06:41 +00002752 if (chars > SeqTwoByteString::kMaxLength) {
2753 return Failure::OutOfMemoryException();
2754 }
Steve Blockd0582a62009-12-15 09:54:21 +00002755 map = symbol_map();
Steve Blocka7e24c12009-10-30 11:49:00 +00002756 size = SeqTwoByteString::SizeFor(chars);
2757 }
2758
2759 // Allocate string.
Leon Clarkee46be812010-01-19 14:06:41 +00002760 Object* result = (size > MaxObjectSizeInPagedSpace())
2761 ? lo_space_->AllocateRaw(size)
2762 : old_data_space_->AllocateRaw(size);
Steve Blocka7e24c12009-10-30 11:49:00 +00002763 if (result->IsFailure()) return result;
2764
2765 reinterpret_cast<HeapObject*>(result)->set_map(map);
Steve Blockd0582a62009-12-15 09:54:21 +00002766 // Set length and hash fields of the allocated string.
Steve Blocka7e24c12009-10-30 11:49:00 +00002767 String* answer = String::cast(result);
Steve Blockd0582a62009-12-15 09:54:21 +00002768 answer->set_length(chars);
2769 answer->set_hash_field(hash_field);
Steve Blocka7e24c12009-10-30 11:49:00 +00002770
2771 ASSERT_EQ(size, answer->Size());
2772
2773 // Fill in the characters.
2774 for (int i = 0; i < chars; i++) {
2775 answer->Set(i, buffer->GetNext());
2776 }
2777 return answer;
2778}
2779
2780
2781Object* Heap::AllocateRawAsciiString(int length, PretenureFlag pretenure) {
Leon Clarkee46be812010-01-19 14:06:41 +00002782 if (length < 0 || length > SeqAsciiString::kMaxLength) {
2783 return Failure::OutOfMemoryException();
2784 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002785
2786 int size = SeqAsciiString::SizeFor(length);
Leon Clarkee46be812010-01-19 14:06:41 +00002787 ASSERT(size <= SeqAsciiString::kMaxSize);
Steve Blocka7e24c12009-10-30 11:49:00 +00002788
Leon Clarkee46be812010-01-19 14:06:41 +00002789 AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
2790 AllocationSpace retry_space = OLD_DATA_SPACE;
2791
Steve Blocka7e24c12009-10-30 11:49:00 +00002792 if (space == NEW_SPACE) {
Leon Clarkee46be812010-01-19 14:06:41 +00002793 if (size > kMaxObjectSizeInNewSpace) {
2794 // Allocate in large object space, retry space will be ignored.
2795 space = LO_SPACE;
2796 } else if (size > MaxObjectSizeInPagedSpace()) {
2797 // Allocate in new space, retry in large object space.
2798 retry_space = LO_SPACE;
2799 }
2800 } else if (space == OLD_DATA_SPACE && size > MaxObjectSizeInPagedSpace()) {
2801 space = LO_SPACE;
Steve Blocka7e24c12009-10-30 11:49:00 +00002802 }
Leon Clarkee46be812010-01-19 14:06:41 +00002803 Object* result = AllocateRaw(size, space, retry_space);
Steve Blocka7e24c12009-10-30 11:49:00 +00002804 if (result->IsFailure()) return result;
2805
Steve Blocka7e24c12009-10-30 11:49:00 +00002806 // Partially initialize the object.
Steve Blockd0582a62009-12-15 09:54:21 +00002807 HeapObject::cast(result)->set_map(ascii_string_map());
Steve Blocka7e24c12009-10-30 11:49:00 +00002808 String::cast(result)->set_length(length);
Steve Blockd0582a62009-12-15 09:54:21 +00002809 String::cast(result)->set_hash_field(String::kEmptyHashField);
Steve Blocka7e24c12009-10-30 11:49:00 +00002810 ASSERT_EQ(size, HeapObject::cast(result)->Size());
2811 return result;
2812}
2813
2814
2815Object* Heap::AllocateRawTwoByteString(int length, PretenureFlag pretenure) {
Leon Clarkee46be812010-01-19 14:06:41 +00002816 if (length < 0 || length > SeqTwoByteString::kMaxLength) {
2817 return Failure::OutOfMemoryException();
Steve Blocka7e24c12009-10-30 11:49:00 +00002818 }
Leon Clarkee46be812010-01-19 14:06:41 +00002819 int size = SeqTwoByteString::SizeFor(length);
2820 ASSERT(size <= SeqTwoByteString::kMaxSize);
2821 AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
2822 AllocationSpace retry_space = OLD_DATA_SPACE;
2823
2824 if (space == NEW_SPACE) {
2825 if (size > kMaxObjectSizeInNewSpace) {
2826 // Allocate in large object space, retry space will be ignored.
2827 space = LO_SPACE;
2828 } else if (size > MaxObjectSizeInPagedSpace()) {
2829 // Allocate in new space, retry in large object space.
2830 retry_space = LO_SPACE;
2831 }
2832 } else if (space == OLD_DATA_SPACE && size > MaxObjectSizeInPagedSpace()) {
2833 space = LO_SPACE;
2834 }
2835 Object* result = AllocateRaw(size, space, retry_space);
Steve Blocka7e24c12009-10-30 11:49:00 +00002836 if (result->IsFailure()) return result;
2837
Steve Blocka7e24c12009-10-30 11:49:00 +00002838 // Partially initialize the object.
Steve Blockd0582a62009-12-15 09:54:21 +00002839 HeapObject::cast(result)->set_map(string_map());
Steve Blocka7e24c12009-10-30 11:49:00 +00002840 String::cast(result)->set_length(length);
Steve Blockd0582a62009-12-15 09:54:21 +00002841 String::cast(result)->set_hash_field(String::kEmptyHashField);
Steve Blocka7e24c12009-10-30 11:49:00 +00002842 ASSERT_EQ(size, HeapObject::cast(result)->Size());
2843 return result;
2844}
2845
2846
2847Object* Heap::AllocateEmptyFixedArray() {
2848 int size = FixedArray::SizeFor(0);
2849 Object* result = AllocateRaw(size, OLD_DATA_SPACE, OLD_DATA_SPACE);
2850 if (result->IsFailure()) return result;
2851 // Initialize the object.
2852 reinterpret_cast<Array*>(result)->set_map(fixed_array_map());
2853 reinterpret_cast<Array*>(result)->set_length(0);
2854 return result;
2855}
2856
2857
2858Object* Heap::AllocateRawFixedArray(int length) {
Leon Clarkee46be812010-01-19 14:06:41 +00002859 if (length < 0 || length > FixedArray::kMaxLength) {
2860 return Failure::OutOfMemoryException();
2861 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002862 // Use the general function if we're forced to always allocate.
2863 if (always_allocate()) return AllocateFixedArray(length, TENURED);
2864 // Allocate the raw data for a fixed array.
2865 int size = FixedArray::SizeFor(length);
2866 return size <= kMaxObjectSizeInNewSpace
2867 ? new_space_.AllocateRaw(size)
2868 : lo_space_->AllocateRawFixedArray(size);
2869}
2870
2871
2872Object* Heap::CopyFixedArray(FixedArray* src) {
2873 int len = src->length();
2874 Object* obj = AllocateRawFixedArray(len);
2875 if (obj->IsFailure()) return obj;
2876 if (Heap::InNewSpace(obj)) {
2877 HeapObject* dst = HeapObject::cast(obj);
2878 CopyBlock(reinterpret_cast<Object**>(dst->address()),
2879 reinterpret_cast<Object**>(src->address()),
2880 FixedArray::SizeFor(len));
2881 return obj;
2882 }
2883 HeapObject::cast(obj)->set_map(src->map());
2884 FixedArray* result = FixedArray::cast(obj);
2885 result->set_length(len);
2886 // Copy the content
2887 WriteBarrierMode mode = result->GetWriteBarrierMode();
2888 for (int i = 0; i < len; i++) result->set(i, src->get(i), mode);
2889 return result;
2890}
2891
2892
2893Object* Heap::AllocateFixedArray(int length) {
2894 ASSERT(length >= 0);
2895 if (length == 0) return empty_fixed_array();
2896 Object* result = AllocateRawFixedArray(length);
2897 if (!result->IsFailure()) {
2898 // Initialize header.
2899 reinterpret_cast<Array*>(result)->set_map(fixed_array_map());
2900 FixedArray* array = FixedArray::cast(result);
2901 array->set_length(length);
2902 Object* value = undefined_value();
2903 // Initialize body.
2904 for (int index = 0; index < length; index++) {
2905 array->set(index, value, SKIP_WRITE_BARRIER);
2906 }
2907 }
2908 return result;
2909}
2910
2911
2912Object* Heap::AllocateFixedArray(int length, PretenureFlag pretenure) {
Leon Clarkee46be812010-01-19 14:06:41 +00002913 ASSERT(length >= 0);
Steve Blocka7e24c12009-10-30 11:49:00 +00002914 ASSERT(empty_fixed_array()->IsFixedArray());
Leon Clarkee46be812010-01-19 14:06:41 +00002915 if (length < 0 || length > FixedArray::kMaxLength) {
2916 return Failure::OutOfMemoryException();
2917 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002918 if (length == 0) return empty_fixed_array();
2919
Leon Clarkee46be812010-01-19 14:06:41 +00002920 AllocationSpace space =
2921 (pretenure == TENURED) ? OLD_POINTER_SPACE : NEW_SPACE;
Steve Blocka7e24c12009-10-30 11:49:00 +00002922 int size = FixedArray::SizeFor(length);
Leon Clarkee46be812010-01-19 14:06:41 +00002923 if (space == NEW_SPACE && size > kMaxObjectSizeInNewSpace) {
2924 // Too big for new space.
2925 space = LO_SPACE;
2926 } else if (space == OLD_POINTER_SPACE &&
2927 size > MaxObjectSizeInPagedSpace()) {
2928 // Too big for old pointer space.
2929 space = LO_SPACE;
2930 }
2931
2932 // Specialize allocation for the space.
Steve Blocka7e24c12009-10-30 11:49:00 +00002933 Object* result = Failure::OutOfMemoryException();
Leon Clarkee46be812010-01-19 14:06:41 +00002934 if (space == NEW_SPACE) {
2935 // We cannot use Heap::AllocateRaw() because it will not properly
2936 // allocate extra remembered set bits if always_allocate() is true and
2937 // new space allocation fails.
2938 result = new_space_.AllocateRaw(size);
2939 if (result->IsFailure() && always_allocate()) {
2940 if (size <= MaxObjectSizeInPagedSpace()) {
2941 result = old_pointer_space_->AllocateRaw(size);
2942 } else {
2943 result = lo_space_->AllocateRawFixedArray(size);
2944 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002945 }
Leon Clarkee46be812010-01-19 14:06:41 +00002946 } else if (space == OLD_POINTER_SPACE) {
2947 result = old_pointer_space_->AllocateRaw(size);
2948 } else {
2949 ASSERT(space == LO_SPACE);
2950 result = lo_space_->AllocateRawFixedArray(size);
Steve Blocka7e24c12009-10-30 11:49:00 +00002951 }
Leon Clarkee46be812010-01-19 14:06:41 +00002952 if (result->IsFailure()) return result;
2953
Steve Blocka7e24c12009-10-30 11:49:00 +00002954 // Initialize the object.
2955 reinterpret_cast<Array*>(result)->set_map(fixed_array_map());
2956 FixedArray* array = FixedArray::cast(result);
2957 array->set_length(length);
2958 Object* value = undefined_value();
2959 for (int index = 0; index < length; index++) {
2960 array->set(index, value, SKIP_WRITE_BARRIER);
2961 }
2962 return array;
2963}
2964
2965
2966Object* Heap::AllocateFixedArrayWithHoles(int length) {
2967 if (length == 0) return empty_fixed_array();
2968 Object* result = AllocateRawFixedArray(length);
2969 if (!result->IsFailure()) {
2970 // Initialize header.
2971 reinterpret_cast<Array*>(result)->set_map(fixed_array_map());
2972 FixedArray* array = FixedArray::cast(result);
2973 array->set_length(length);
2974 // Initialize body.
2975 Object* value = the_hole_value();
2976 for (int index = 0; index < length; index++) {
2977 array->set(index, value, SKIP_WRITE_BARRIER);
2978 }
2979 }
2980 return result;
2981}
2982
2983
2984Object* Heap::AllocateHashTable(int length) {
2985 Object* result = Heap::AllocateFixedArray(length);
2986 if (result->IsFailure()) return result;
2987 reinterpret_cast<Array*>(result)->set_map(hash_table_map());
2988 ASSERT(result->IsHashTable());
2989 return result;
2990}
2991
2992
2993Object* Heap::AllocateGlobalContext() {
2994 Object* result = Heap::AllocateFixedArray(Context::GLOBAL_CONTEXT_SLOTS);
2995 if (result->IsFailure()) return result;
2996 Context* context = reinterpret_cast<Context*>(result);
2997 context->set_map(global_context_map());
2998 ASSERT(context->IsGlobalContext());
2999 ASSERT(result->IsContext());
3000 return result;
3001}
3002
3003
3004Object* Heap::AllocateFunctionContext(int length, JSFunction* function) {
3005 ASSERT(length >= Context::MIN_CONTEXT_SLOTS);
3006 Object* result = Heap::AllocateFixedArray(length);
3007 if (result->IsFailure()) return result;
3008 Context* context = reinterpret_cast<Context*>(result);
3009 context->set_map(context_map());
3010 context->set_closure(function);
3011 context->set_fcontext(context);
3012 context->set_previous(NULL);
3013 context->set_extension(NULL);
3014 context->set_global(function->context()->global());
3015 ASSERT(!context->IsGlobalContext());
3016 ASSERT(context->is_function_context());
3017 ASSERT(result->IsContext());
3018 return result;
3019}
3020
3021
3022Object* Heap::AllocateWithContext(Context* previous,
3023 JSObject* extension,
3024 bool is_catch_context) {
3025 Object* result = Heap::AllocateFixedArray(Context::MIN_CONTEXT_SLOTS);
3026 if (result->IsFailure()) return result;
3027 Context* context = reinterpret_cast<Context*>(result);
3028 context->set_map(is_catch_context ? catch_context_map() : context_map());
3029 context->set_closure(previous->closure());
3030 context->set_fcontext(previous->fcontext());
3031 context->set_previous(previous);
3032 context->set_extension(extension);
3033 context->set_global(previous->global());
3034 ASSERT(!context->IsGlobalContext());
3035 ASSERT(!context->is_function_context());
3036 ASSERT(result->IsContext());
3037 return result;
3038}
3039
3040
3041Object* Heap::AllocateStruct(InstanceType type) {
3042 Map* map;
3043 switch (type) {
3044#define MAKE_CASE(NAME, Name, name) case NAME##_TYPE: map = name##_map(); break;
3045STRUCT_LIST(MAKE_CASE)
3046#undef MAKE_CASE
3047 default:
3048 UNREACHABLE();
3049 return Failure::InternalError();
3050 }
3051 int size = map->instance_size();
3052 AllocationSpace space =
3053 (size > MaxObjectSizeInPagedSpace()) ? LO_SPACE : OLD_POINTER_SPACE;
3054 Object* result = Heap::Allocate(map, space);
3055 if (result->IsFailure()) return result;
3056 Struct::cast(result)->InitializeBody(size);
3057 return result;
3058}
3059
3060
3061bool Heap::IdleNotification() {
3062 static const int kIdlesBeforeScavenge = 4;
3063 static const int kIdlesBeforeMarkSweep = 7;
3064 static const int kIdlesBeforeMarkCompact = 8;
3065 static int number_idle_notifications = 0;
3066 static int last_gc_count = gc_count_;
3067
3068 bool finished = false;
3069
3070 if (last_gc_count == gc_count_) {
3071 number_idle_notifications++;
3072 } else {
3073 number_idle_notifications = 0;
3074 last_gc_count = gc_count_;
3075 }
3076
3077 if (number_idle_notifications == kIdlesBeforeScavenge) {
3078 CollectGarbage(0, NEW_SPACE);
3079 new_space_.Shrink();
3080 last_gc_count = gc_count_;
3081
3082 } else if (number_idle_notifications == kIdlesBeforeMarkSweep) {
Steve Blockd0582a62009-12-15 09:54:21 +00003083 // Before doing the mark-sweep collections we clear the
3084 // compilation cache to avoid hanging on to source code and
3085 // generated code for cached functions.
3086 CompilationCache::Clear();
3087
Steve Blocka7e24c12009-10-30 11:49:00 +00003088 CollectAllGarbage(false);
3089 new_space_.Shrink();
3090 last_gc_count = gc_count_;
3091
3092 } else if (number_idle_notifications == kIdlesBeforeMarkCompact) {
3093 CollectAllGarbage(true);
3094 new_space_.Shrink();
3095 last_gc_count = gc_count_;
3096 number_idle_notifications = 0;
3097 finished = true;
3098 }
3099
3100 // Uncommit unused memory in new space.
3101 Heap::UncommitFromSpace();
3102 return finished;
3103}
3104
3105
3106#ifdef DEBUG
3107
3108void Heap::Print() {
3109 if (!HasBeenSetup()) return;
3110 Top::PrintStack();
3111 AllSpaces spaces;
Leon Clarked91b9f72010-01-27 17:25:45 +00003112 for (Space* space = spaces.next(); space != NULL; space = spaces.next())
3113 space->Print();
Steve Blocka7e24c12009-10-30 11:49:00 +00003114}
3115
3116
3117void Heap::ReportCodeStatistics(const char* title) {
3118 PrintF(">>>>>> Code Stats (%s) >>>>>>\n", title);
3119 PagedSpace::ResetCodeStatistics();
3120 // We do not look for code in new space, map space, or old space. If code
3121 // somehow ends up in those spaces, we would miss it here.
3122 code_space_->CollectCodeStatistics();
3123 lo_space_->CollectCodeStatistics();
3124 PagedSpace::ReportCodeStatistics();
3125}
3126
3127
3128// This function expects that NewSpace's allocated objects histogram is
3129// populated (via a call to CollectStatistics or else as a side effect of a
3130// just-completed scavenge collection).
3131void Heap::ReportHeapStatistics(const char* title) {
3132 USE(title);
3133 PrintF(">>>>>> =============== %s (%d) =============== >>>>>>\n",
3134 title, gc_count_);
3135 PrintF("mark-compact GC : %d\n", mc_count_);
3136 PrintF("old_gen_promotion_limit_ %d\n", old_gen_promotion_limit_);
3137 PrintF("old_gen_allocation_limit_ %d\n", old_gen_allocation_limit_);
3138
3139 PrintF("\n");
3140 PrintF("Number of handles : %d\n", HandleScope::NumberOfHandles());
3141 GlobalHandles::PrintStats();
3142 PrintF("\n");
3143
3144 PrintF("Heap statistics : ");
3145 MemoryAllocator::ReportStatistics();
3146 PrintF("To space : ");
3147 new_space_.ReportStatistics();
3148 PrintF("Old pointer space : ");
3149 old_pointer_space_->ReportStatistics();
3150 PrintF("Old data space : ");
3151 old_data_space_->ReportStatistics();
3152 PrintF("Code space : ");
3153 code_space_->ReportStatistics();
3154 PrintF("Map space : ");
3155 map_space_->ReportStatistics();
3156 PrintF("Cell space : ");
3157 cell_space_->ReportStatistics();
3158 PrintF("Large object space : ");
3159 lo_space_->ReportStatistics();
3160 PrintF(">>>>>> ========================================= >>>>>>\n");
3161}
3162
3163#endif // DEBUG
3164
3165bool Heap::Contains(HeapObject* value) {
3166 return Contains(value->address());
3167}
3168
3169
3170bool Heap::Contains(Address addr) {
3171 if (OS::IsOutsideAllocatedSpace(addr)) return false;
3172 return HasBeenSetup() &&
3173 (new_space_.ToSpaceContains(addr) ||
3174 old_pointer_space_->Contains(addr) ||
3175 old_data_space_->Contains(addr) ||
3176 code_space_->Contains(addr) ||
3177 map_space_->Contains(addr) ||
3178 cell_space_->Contains(addr) ||
3179 lo_space_->SlowContains(addr));
3180}
3181
3182
3183bool Heap::InSpace(HeapObject* value, AllocationSpace space) {
3184 return InSpace(value->address(), space);
3185}
3186
3187
3188bool Heap::InSpace(Address addr, AllocationSpace space) {
3189 if (OS::IsOutsideAllocatedSpace(addr)) return false;
3190 if (!HasBeenSetup()) return false;
3191
3192 switch (space) {
3193 case NEW_SPACE:
3194 return new_space_.ToSpaceContains(addr);
3195 case OLD_POINTER_SPACE:
3196 return old_pointer_space_->Contains(addr);
3197 case OLD_DATA_SPACE:
3198 return old_data_space_->Contains(addr);
3199 case CODE_SPACE:
3200 return code_space_->Contains(addr);
3201 case MAP_SPACE:
3202 return map_space_->Contains(addr);
3203 case CELL_SPACE:
3204 return cell_space_->Contains(addr);
3205 case LO_SPACE:
3206 return lo_space_->SlowContains(addr);
3207 }
3208
3209 return false;
3210}
3211
3212
3213#ifdef DEBUG
3214void Heap::Verify() {
3215 ASSERT(HasBeenSetup());
3216
3217 VerifyPointersVisitor visitor;
Steve Blockd0582a62009-12-15 09:54:21 +00003218 IterateRoots(&visitor, VISIT_ONLY_STRONG);
Steve Blocka7e24c12009-10-30 11:49:00 +00003219
3220 new_space_.Verify();
3221
3222 VerifyPointersAndRSetVisitor rset_visitor;
3223 old_pointer_space_->Verify(&rset_visitor);
3224 map_space_->Verify(&rset_visitor);
3225
3226 VerifyPointersVisitor no_rset_visitor;
3227 old_data_space_->Verify(&no_rset_visitor);
3228 code_space_->Verify(&no_rset_visitor);
3229 cell_space_->Verify(&no_rset_visitor);
3230
3231 lo_space_->Verify();
3232}
3233#endif // DEBUG
3234
3235
3236Object* Heap::LookupSymbol(Vector<const char> string) {
3237 Object* symbol = NULL;
3238 Object* new_table = symbol_table()->LookupSymbol(string, &symbol);
3239 if (new_table->IsFailure()) return new_table;
3240 // Can't use set_symbol_table because SymbolTable::cast knows that
3241 // SymbolTable is a singleton and checks for identity.
3242 roots_[kSymbolTableRootIndex] = new_table;
3243 ASSERT(symbol != NULL);
3244 return symbol;
3245}
3246
3247
3248Object* Heap::LookupSymbol(String* string) {
3249 if (string->IsSymbol()) return string;
3250 Object* symbol = NULL;
3251 Object* new_table = symbol_table()->LookupString(string, &symbol);
3252 if (new_table->IsFailure()) return new_table;
3253 // Can't use set_symbol_table because SymbolTable::cast knows that
3254 // SymbolTable is a singleton and checks for identity.
3255 roots_[kSymbolTableRootIndex] = new_table;
3256 ASSERT(symbol != NULL);
3257 return symbol;
3258}
3259
3260
3261bool Heap::LookupSymbolIfExists(String* string, String** symbol) {
3262 if (string->IsSymbol()) {
3263 *symbol = string;
3264 return true;
3265 }
3266 return symbol_table()->LookupSymbolIfExists(string, symbol);
3267}
3268
3269
3270#ifdef DEBUG
3271void Heap::ZapFromSpace() {
3272 ASSERT(reinterpret_cast<Object*>(kFromSpaceZapValue)->IsHeapObject());
3273 for (Address a = new_space_.FromSpaceLow();
3274 a < new_space_.FromSpaceHigh();
3275 a += kPointerSize) {
3276 Memory::Address_at(a) = kFromSpaceZapValue;
3277 }
3278}
3279#endif // DEBUG
3280
3281
3282int Heap::IterateRSetRange(Address object_start,
3283 Address object_end,
3284 Address rset_start,
3285 ObjectSlotCallback copy_object_func) {
3286 Address object_address = object_start;
3287 Address rset_address = rset_start;
3288 int set_bits_count = 0;
3289
3290 // Loop over all the pointers in [object_start, object_end).
3291 while (object_address < object_end) {
3292 uint32_t rset_word = Memory::uint32_at(rset_address);
3293 if (rset_word != 0) {
3294 uint32_t result_rset = rset_word;
3295 for (uint32_t bitmask = 1; bitmask != 0; bitmask = bitmask << 1) {
3296 // Do not dereference pointers at or past object_end.
3297 if ((rset_word & bitmask) != 0 && object_address < object_end) {
3298 Object** object_p = reinterpret_cast<Object**>(object_address);
3299 if (Heap::InNewSpace(*object_p)) {
3300 copy_object_func(reinterpret_cast<HeapObject**>(object_p));
3301 }
3302 // If this pointer does not need to be remembered anymore, clear
3303 // the remembered set bit.
3304 if (!Heap::InNewSpace(*object_p)) result_rset &= ~bitmask;
3305 set_bits_count++;
3306 }
3307 object_address += kPointerSize;
3308 }
3309 // Update the remembered set if it has changed.
3310 if (result_rset != rset_word) {
3311 Memory::uint32_at(rset_address) = result_rset;
3312 }
3313 } else {
3314 // No bits in the word were set. This is the common case.
3315 object_address += kPointerSize * kBitsPerInt;
3316 }
3317 rset_address += kIntSize;
3318 }
3319 return set_bits_count;
3320}
3321
3322
3323void Heap::IterateRSet(PagedSpace* space, ObjectSlotCallback copy_object_func) {
3324 ASSERT(Page::is_rset_in_use());
3325 ASSERT(space == old_pointer_space_ || space == map_space_);
3326
3327 static void* paged_rset_histogram = StatsTable::CreateHistogram(
3328 "V8.RSetPaged",
3329 0,
3330 Page::kObjectAreaSize / kPointerSize,
3331 30);
3332
3333 PageIterator it(space, PageIterator::PAGES_IN_USE);
3334 while (it.has_next()) {
3335 Page* page = it.next();
3336 int count = IterateRSetRange(page->ObjectAreaStart(), page->AllocationTop(),
3337 page->RSetStart(), copy_object_func);
3338 if (paged_rset_histogram != NULL) {
3339 StatsTable::AddHistogramSample(paged_rset_histogram, count);
3340 }
3341 }
3342}
3343
3344
Steve Blockd0582a62009-12-15 09:54:21 +00003345void Heap::IterateRoots(ObjectVisitor* v, VisitMode mode) {
3346 IterateStrongRoots(v, mode);
Leon Clarked91b9f72010-01-27 17:25:45 +00003347 IterateWeakRoots(v, mode);
3348}
3349
3350
3351void Heap::IterateWeakRoots(ObjectVisitor* v, VisitMode mode) {
Steve Blocka7e24c12009-10-30 11:49:00 +00003352 v->VisitPointer(reinterpret_cast<Object**>(&roots_[kSymbolTableRootIndex]));
Steve Blockd0582a62009-12-15 09:54:21 +00003353 v->Synchronize("symbol_table");
Leon Clarkee46be812010-01-19 14:06:41 +00003354 if (mode != VISIT_ALL_IN_SCAVENGE) {
3355 // Scavenge collections have special processing for this.
3356 ExternalStringTable::Iterate(v);
3357 }
3358 v->Synchronize("external_string_table");
Steve Blocka7e24c12009-10-30 11:49:00 +00003359}
3360
3361
Steve Blockd0582a62009-12-15 09:54:21 +00003362void Heap::IterateStrongRoots(ObjectVisitor* v, VisitMode mode) {
Steve Blocka7e24c12009-10-30 11:49:00 +00003363 v->VisitPointers(&roots_[0], &roots_[kStrongRootListLength]);
Steve Blockd0582a62009-12-15 09:54:21 +00003364 v->Synchronize("strong_root_list");
Steve Blocka7e24c12009-10-30 11:49:00 +00003365
3366 v->VisitPointer(bit_cast<Object**, String**>(&hidden_symbol_));
Steve Blockd0582a62009-12-15 09:54:21 +00003367 v->Synchronize("symbol");
Steve Blocka7e24c12009-10-30 11:49:00 +00003368
3369 Bootstrapper::Iterate(v);
Steve Blockd0582a62009-12-15 09:54:21 +00003370 v->Synchronize("bootstrapper");
Steve Blocka7e24c12009-10-30 11:49:00 +00003371 Top::Iterate(v);
Steve Blockd0582a62009-12-15 09:54:21 +00003372 v->Synchronize("top");
Steve Blocka7e24c12009-10-30 11:49:00 +00003373 Relocatable::Iterate(v);
Steve Blockd0582a62009-12-15 09:54:21 +00003374 v->Synchronize("relocatable");
Steve Blocka7e24c12009-10-30 11:49:00 +00003375
3376#ifdef ENABLE_DEBUGGER_SUPPORT
3377 Debug::Iterate(v);
3378#endif
Steve Blockd0582a62009-12-15 09:54:21 +00003379 v->Synchronize("debug");
Steve Blocka7e24c12009-10-30 11:49:00 +00003380 CompilationCache::Iterate(v);
Steve Blockd0582a62009-12-15 09:54:21 +00003381 v->Synchronize("compilationcache");
Steve Blocka7e24c12009-10-30 11:49:00 +00003382
3383 // Iterate over local handles in handle scopes.
3384 HandleScopeImplementer::Iterate(v);
Steve Blockd0582a62009-12-15 09:54:21 +00003385 v->Synchronize("handlescope");
Steve Blocka7e24c12009-10-30 11:49:00 +00003386
Leon Clarkee46be812010-01-19 14:06:41 +00003387 // Iterate over the builtin code objects and code stubs in the
3388 // heap. Note that it is not necessary to iterate over code objects
3389 // on scavenge collections.
3390 if (mode != VISIT_ALL_IN_SCAVENGE) {
3391 Builtins::IterateBuiltins(v);
3392 }
Steve Blockd0582a62009-12-15 09:54:21 +00003393 v->Synchronize("builtins");
Steve Blocka7e24c12009-10-30 11:49:00 +00003394
3395 // Iterate over global handles.
Steve Blockd0582a62009-12-15 09:54:21 +00003396 if (mode == VISIT_ONLY_STRONG) {
3397 GlobalHandles::IterateStrongRoots(v);
3398 } else {
3399 GlobalHandles::IterateAllRoots(v);
3400 }
3401 v->Synchronize("globalhandles");
Steve Blocka7e24c12009-10-30 11:49:00 +00003402
3403 // Iterate over pointers being held by inactive threads.
3404 ThreadManager::Iterate(v);
Steve Blockd0582a62009-12-15 09:54:21 +00003405 v->Synchronize("threadmanager");
Leon Clarked91b9f72010-01-27 17:25:45 +00003406
3407 // Iterate over the pointers the Serialization/Deserialization code is
3408 // holding.
3409 // During garbage collection this keeps the partial snapshot cache alive.
3410 // During deserialization of the startup snapshot this creates the partial
3411 // snapshot cache and deserializes the objects it refers to. During
3412 // serialization this does nothing, since the partial snapshot cache is
3413 // empty. However the next thing we do is create the partial snapshot,
3414 // filling up the partial snapshot cache with objects it needs as we go.
3415 SerializerDeserializer::Iterate(v);
3416 // We don't do a v->Synchronize call here, because in debug mode that will
3417 // output a flag to the snapshot. However at this point the serializer and
3418 // deserializer are deliberately a little unsynchronized (see above) so the
3419 // checking of the sync flag in the snapshot would fail.
Steve Blocka7e24c12009-10-30 11:49:00 +00003420}
Steve Blocka7e24c12009-10-30 11:49:00 +00003421
3422
3423// Flag is set when the heap has been configured. The heap can be repeatedly
3424// configured through the API until it is setup.
3425static bool heap_configured = false;
3426
3427// TODO(1236194): Since the heap size is configurable on the command line
3428// and through the API, we should gracefully handle the case that the heap
3429// size is not big enough to fit all the initial objects.
Steve Block3ce2e202009-11-05 08:53:23 +00003430bool Heap::ConfigureHeap(int max_semispace_size, int max_old_gen_size) {
Steve Blocka7e24c12009-10-30 11:49:00 +00003431 if (HasBeenSetup()) return false;
3432
Steve Block3ce2e202009-11-05 08:53:23 +00003433 if (max_semispace_size > 0) max_semispace_size_ = max_semispace_size;
3434
3435 if (Snapshot::IsEnabled()) {
3436 // If we are using a snapshot we always reserve the default amount
3437 // of memory for each semispace because code in the snapshot has
3438 // write-barrier code that relies on the size and alignment of new
3439 // space. We therefore cannot use a larger max semispace size
3440 // than the default reserved semispace size.
3441 if (max_semispace_size_ > reserved_semispace_size_) {
3442 max_semispace_size_ = reserved_semispace_size_;
3443 }
3444 } else {
3445 // If we are not using snapshots we reserve space for the actual
3446 // max semispace size.
3447 reserved_semispace_size_ = max_semispace_size_;
3448 }
3449
3450 if (max_old_gen_size > 0) max_old_generation_size_ = max_old_gen_size;
Steve Blocka7e24c12009-10-30 11:49:00 +00003451
3452 // The new space size must be a power of two to support single-bit testing
3453 // for containment.
Steve Block3ce2e202009-11-05 08:53:23 +00003454 max_semispace_size_ = RoundUpToPowerOf2(max_semispace_size_);
3455 reserved_semispace_size_ = RoundUpToPowerOf2(reserved_semispace_size_);
3456 initial_semispace_size_ = Min(initial_semispace_size_, max_semispace_size_);
3457 external_allocation_limit_ = 10 * max_semispace_size_;
Steve Blocka7e24c12009-10-30 11:49:00 +00003458
3459 // The old generation is paged.
Steve Block3ce2e202009-11-05 08:53:23 +00003460 max_old_generation_size_ = RoundUp(max_old_generation_size_, Page::kPageSize);
Steve Blocka7e24c12009-10-30 11:49:00 +00003461
3462 heap_configured = true;
3463 return true;
3464}
3465
3466
3467bool Heap::ConfigureHeapDefault() {
Steve Block3ce2e202009-11-05 08:53:23 +00003468 return ConfigureHeap(FLAG_max_new_space_size / 2, FLAG_max_old_space_size);
Steve Blocka7e24c12009-10-30 11:49:00 +00003469}
3470
3471
Steve Blockd0582a62009-12-15 09:54:21 +00003472void Heap::RecordStats(HeapStats* stats) {
3473 *stats->start_marker = 0xDECADE00;
3474 *stats->end_marker = 0xDECADE01;
3475 *stats->new_space_size = new_space_.Size();
3476 *stats->new_space_capacity = new_space_.Capacity();
3477 *stats->old_pointer_space_size = old_pointer_space_->Size();
3478 *stats->old_pointer_space_capacity = old_pointer_space_->Capacity();
3479 *stats->old_data_space_size = old_data_space_->Size();
3480 *stats->old_data_space_capacity = old_data_space_->Capacity();
3481 *stats->code_space_size = code_space_->Size();
3482 *stats->code_space_capacity = code_space_->Capacity();
3483 *stats->map_space_size = map_space_->Size();
3484 *stats->map_space_capacity = map_space_->Capacity();
3485 *stats->cell_space_size = cell_space_->Size();
3486 *stats->cell_space_capacity = cell_space_->Capacity();
3487 *stats->lo_space_size = lo_space_->Size();
3488 GlobalHandles::RecordStats(stats);
3489}
3490
3491
Steve Blocka7e24c12009-10-30 11:49:00 +00003492int Heap::PromotedSpaceSize() {
3493 return old_pointer_space_->Size()
3494 + old_data_space_->Size()
3495 + code_space_->Size()
3496 + map_space_->Size()
3497 + cell_space_->Size()
3498 + lo_space_->Size();
3499}
3500
3501
3502int Heap::PromotedExternalMemorySize() {
3503 if (amount_of_external_allocated_memory_
3504 <= amount_of_external_allocated_memory_at_last_global_gc_) return 0;
3505 return amount_of_external_allocated_memory_
3506 - amount_of_external_allocated_memory_at_last_global_gc_;
3507}
3508
3509
3510bool Heap::Setup(bool create_heap_objects) {
3511 // Initialize heap spaces and initial maps and objects. Whenever something
3512 // goes wrong, just return false. The caller should check the results and
3513 // call Heap::TearDown() to release allocated memory.
3514 //
3515 // If the heap is not yet configured (eg, through the API), configure it.
3516 // Configuration is based on the flags new-space-size (really the semispace
3517 // size) and old-space-size if set or the initial values of semispace_size_
3518 // and old_generation_size_ otherwise.
3519 if (!heap_configured) {
3520 if (!ConfigureHeapDefault()) return false;
3521 }
3522
3523 // Setup memory allocator and reserve a chunk of memory for new
Steve Block3ce2e202009-11-05 08:53:23 +00003524 // space. The chunk is double the size of the requested reserved
3525 // new space size to ensure that we can find a pair of semispaces that
3526 // are contiguous and aligned to their size.
3527 if (!MemoryAllocator::Setup(MaxReserved())) return false;
Steve Blocka7e24c12009-10-30 11:49:00 +00003528 void* chunk =
Steve Block3ce2e202009-11-05 08:53:23 +00003529 MemoryAllocator::ReserveInitialChunk(4 * reserved_semispace_size_);
Steve Blocka7e24c12009-10-30 11:49:00 +00003530 if (chunk == NULL) return false;
3531
3532 // Align the pair of semispaces to their size, which must be a power
3533 // of 2.
Steve Blocka7e24c12009-10-30 11:49:00 +00003534 Address new_space_start =
Steve Block3ce2e202009-11-05 08:53:23 +00003535 RoundUp(reinterpret_cast<byte*>(chunk), 2 * reserved_semispace_size_);
3536 if (!new_space_.Setup(new_space_start, 2 * reserved_semispace_size_)) {
3537 return false;
3538 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003539
3540 // Initialize old pointer space.
3541 old_pointer_space_ =
Steve Block3ce2e202009-11-05 08:53:23 +00003542 new OldSpace(max_old_generation_size_, OLD_POINTER_SPACE, NOT_EXECUTABLE);
Steve Blocka7e24c12009-10-30 11:49:00 +00003543 if (old_pointer_space_ == NULL) return false;
3544 if (!old_pointer_space_->Setup(NULL, 0)) return false;
3545
3546 // Initialize old data space.
3547 old_data_space_ =
Steve Block3ce2e202009-11-05 08:53:23 +00003548 new OldSpace(max_old_generation_size_, OLD_DATA_SPACE, NOT_EXECUTABLE);
Steve Blocka7e24c12009-10-30 11:49:00 +00003549 if (old_data_space_ == NULL) return false;
3550 if (!old_data_space_->Setup(NULL, 0)) return false;
3551
3552 // Initialize the code space, set its maximum capacity to the old
3553 // generation size. It needs executable memory.
3554 // On 64-bit platform(s), we put all code objects in a 2 GB range of
3555 // virtual address space, so that they can call each other with near calls.
3556 if (code_range_size_ > 0) {
3557 if (!CodeRange::Setup(code_range_size_)) {
3558 return false;
3559 }
3560 }
3561
3562 code_space_ =
Steve Block3ce2e202009-11-05 08:53:23 +00003563 new OldSpace(max_old_generation_size_, CODE_SPACE, EXECUTABLE);
Steve Blocka7e24c12009-10-30 11:49:00 +00003564 if (code_space_ == NULL) return false;
3565 if (!code_space_->Setup(NULL, 0)) return false;
3566
3567 // Initialize map space.
Leon Clarkee46be812010-01-19 14:06:41 +00003568 map_space_ = new MapSpace(FLAG_use_big_map_space
3569 ? max_old_generation_size_
Leon Clarked91b9f72010-01-27 17:25:45 +00003570 : MapSpace::kMaxMapPageIndex * Page::kPageSize,
3571 FLAG_max_map_space_pages,
Leon Clarkee46be812010-01-19 14:06:41 +00003572 MAP_SPACE);
Steve Blocka7e24c12009-10-30 11:49:00 +00003573 if (map_space_ == NULL) return false;
3574 if (!map_space_->Setup(NULL, 0)) return false;
3575
3576 // Initialize global property cell space.
Steve Block3ce2e202009-11-05 08:53:23 +00003577 cell_space_ = new CellSpace(max_old_generation_size_, CELL_SPACE);
Steve Blocka7e24c12009-10-30 11:49:00 +00003578 if (cell_space_ == NULL) return false;
3579 if (!cell_space_->Setup(NULL, 0)) return false;
3580
3581 // The large object code space may contain code or data. We set the memory
3582 // to be non-executable here for safety, but this means we need to enable it
3583 // explicitly when allocating large code objects.
3584 lo_space_ = new LargeObjectSpace(LO_SPACE);
3585 if (lo_space_ == NULL) return false;
3586 if (!lo_space_->Setup()) return false;
3587
3588 if (create_heap_objects) {
3589 // Create initial maps.
3590 if (!CreateInitialMaps()) return false;
3591 if (!CreateApiObjects()) return false;
3592
3593 // Create initial objects
3594 if (!CreateInitialObjects()) return false;
3595 }
3596
3597 LOG(IntEvent("heap-capacity", Capacity()));
3598 LOG(IntEvent("heap-available", Available()));
3599
Steve Block3ce2e202009-11-05 08:53:23 +00003600#ifdef ENABLE_LOGGING_AND_PROFILING
3601 // This should be called only after initial objects have been created.
3602 ProducerHeapProfile::Setup();
3603#endif
3604
Steve Blocka7e24c12009-10-30 11:49:00 +00003605 return true;
3606}
3607
3608
Steve Blockd0582a62009-12-15 09:54:21 +00003609void Heap::SetStackLimits() {
Steve Blocka7e24c12009-10-30 11:49:00 +00003610 // On 64 bit machines, pointers are generally out of range of Smis. We write
3611 // something that looks like an out of range Smi to the GC.
3612
Steve Blockd0582a62009-12-15 09:54:21 +00003613 // Set up the special root array entries containing the stack limits.
3614 // These are actually addresses, but the tag makes the GC ignore it.
Steve Blocka7e24c12009-10-30 11:49:00 +00003615 roots_[kStackLimitRootIndex] =
Steve Blockd0582a62009-12-15 09:54:21 +00003616 reinterpret_cast<Object*>(
3617 (StackGuard::jslimit() & ~kSmiTagMask) | kSmiTag);
3618 roots_[kRealStackLimitRootIndex] =
3619 reinterpret_cast<Object*>(
3620 (StackGuard::real_jslimit() & ~kSmiTagMask) | kSmiTag);
Steve Blocka7e24c12009-10-30 11:49:00 +00003621}
3622
3623
3624void Heap::TearDown() {
3625 GlobalHandles::TearDown();
3626
Leon Clarkee46be812010-01-19 14:06:41 +00003627 ExternalStringTable::TearDown();
3628
Steve Blocka7e24c12009-10-30 11:49:00 +00003629 new_space_.TearDown();
3630
3631 if (old_pointer_space_ != NULL) {
3632 old_pointer_space_->TearDown();
3633 delete old_pointer_space_;
3634 old_pointer_space_ = NULL;
3635 }
3636
3637 if (old_data_space_ != NULL) {
3638 old_data_space_->TearDown();
3639 delete old_data_space_;
3640 old_data_space_ = NULL;
3641 }
3642
3643 if (code_space_ != NULL) {
3644 code_space_->TearDown();
3645 delete code_space_;
3646 code_space_ = NULL;
3647 }
3648
3649 if (map_space_ != NULL) {
3650 map_space_->TearDown();
3651 delete map_space_;
3652 map_space_ = NULL;
3653 }
3654
3655 if (cell_space_ != NULL) {
3656 cell_space_->TearDown();
3657 delete cell_space_;
3658 cell_space_ = NULL;
3659 }
3660
3661 if (lo_space_ != NULL) {
3662 lo_space_->TearDown();
3663 delete lo_space_;
3664 lo_space_ = NULL;
3665 }
3666
3667 MemoryAllocator::TearDown();
3668}
3669
3670
3671void Heap::Shrink() {
3672 // Try to shrink all paged spaces.
3673 PagedSpaces spaces;
Leon Clarked91b9f72010-01-27 17:25:45 +00003674 for (PagedSpace* space = spaces.next(); space != NULL; space = spaces.next())
3675 space->Shrink();
Steve Blocka7e24c12009-10-30 11:49:00 +00003676}
3677
3678
3679#ifdef ENABLE_HEAP_PROTECTION
3680
3681void Heap::Protect() {
3682 if (HasBeenSetup()) {
3683 AllSpaces spaces;
Leon Clarked91b9f72010-01-27 17:25:45 +00003684 for (Space* space = spaces.next(); space != NULL; space = spaces.next())
3685 space->Protect();
Steve Blocka7e24c12009-10-30 11:49:00 +00003686 }
3687}
3688
3689
3690void Heap::Unprotect() {
3691 if (HasBeenSetup()) {
3692 AllSpaces spaces;
Leon Clarked91b9f72010-01-27 17:25:45 +00003693 for (Space* space = spaces.next(); space != NULL; space = spaces.next())
3694 space->Unprotect();
Steve Blocka7e24c12009-10-30 11:49:00 +00003695 }
3696}
3697
3698#endif
3699
3700
3701#ifdef DEBUG
3702
3703class PrintHandleVisitor: public ObjectVisitor {
3704 public:
3705 void VisitPointers(Object** start, Object** end) {
3706 for (Object** p = start; p < end; p++)
3707 PrintF(" handle %p to %p\n", p, *p);
3708 }
3709};
3710
3711void Heap::PrintHandles() {
3712 PrintF("Handles:\n");
3713 PrintHandleVisitor v;
3714 HandleScopeImplementer::Iterate(&v);
3715}
3716
3717#endif
3718
3719
3720Space* AllSpaces::next() {
3721 switch (counter_++) {
3722 case NEW_SPACE:
3723 return Heap::new_space();
3724 case OLD_POINTER_SPACE:
3725 return Heap::old_pointer_space();
3726 case OLD_DATA_SPACE:
3727 return Heap::old_data_space();
3728 case CODE_SPACE:
3729 return Heap::code_space();
3730 case MAP_SPACE:
3731 return Heap::map_space();
3732 case CELL_SPACE:
3733 return Heap::cell_space();
3734 case LO_SPACE:
3735 return Heap::lo_space();
3736 default:
3737 return NULL;
3738 }
3739}
3740
3741
3742PagedSpace* PagedSpaces::next() {
3743 switch (counter_++) {
3744 case OLD_POINTER_SPACE:
3745 return Heap::old_pointer_space();
3746 case OLD_DATA_SPACE:
3747 return Heap::old_data_space();
3748 case CODE_SPACE:
3749 return Heap::code_space();
3750 case MAP_SPACE:
3751 return Heap::map_space();
3752 case CELL_SPACE:
3753 return Heap::cell_space();
3754 default:
3755 return NULL;
3756 }
3757}
3758
3759
3760
3761OldSpace* OldSpaces::next() {
3762 switch (counter_++) {
3763 case OLD_POINTER_SPACE:
3764 return Heap::old_pointer_space();
3765 case OLD_DATA_SPACE:
3766 return Heap::old_data_space();
3767 case CODE_SPACE:
3768 return Heap::code_space();
3769 default:
3770 return NULL;
3771 }
3772}
3773
3774
3775SpaceIterator::SpaceIterator() : current_space_(FIRST_SPACE), iterator_(NULL) {
3776}
3777
3778
3779SpaceIterator::~SpaceIterator() {
3780 // Delete active iterator if any.
3781 delete iterator_;
3782}
3783
3784
3785bool SpaceIterator::has_next() {
3786 // Iterate until no more spaces.
3787 return current_space_ != LAST_SPACE;
3788}
3789
3790
3791ObjectIterator* SpaceIterator::next() {
3792 if (iterator_ != NULL) {
3793 delete iterator_;
3794 iterator_ = NULL;
3795 // Move to the next space
3796 current_space_++;
3797 if (current_space_ > LAST_SPACE) {
3798 return NULL;
3799 }
3800 }
3801
3802 // Return iterator for the new current space.
3803 return CreateIterator();
3804}
3805
3806
3807// Create an iterator for the space to iterate.
3808ObjectIterator* SpaceIterator::CreateIterator() {
3809 ASSERT(iterator_ == NULL);
3810
3811 switch (current_space_) {
3812 case NEW_SPACE:
3813 iterator_ = new SemiSpaceIterator(Heap::new_space());
3814 break;
3815 case OLD_POINTER_SPACE:
3816 iterator_ = new HeapObjectIterator(Heap::old_pointer_space());
3817 break;
3818 case OLD_DATA_SPACE:
3819 iterator_ = new HeapObjectIterator(Heap::old_data_space());
3820 break;
3821 case CODE_SPACE:
3822 iterator_ = new HeapObjectIterator(Heap::code_space());
3823 break;
3824 case MAP_SPACE:
3825 iterator_ = new HeapObjectIterator(Heap::map_space());
3826 break;
3827 case CELL_SPACE:
3828 iterator_ = new HeapObjectIterator(Heap::cell_space());
3829 break;
3830 case LO_SPACE:
3831 iterator_ = new LargeObjectIterator(Heap::lo_space());
3832 break;
3833 }
3834
3835 // Return the newly allocated iterator;
3836 ASSERT(iterator_ != NULL);
3837 return iterator_;
3838}
3839
3840
3841HeapIterator::HeapIterator() {
3842 Init();
3843}
3844
3845
3846HeapIterator::~HeapIterator() {
3847 Shutdown();
3848}
3849
3850
3851void HeapIterator::Init() {
3852 // Start the iteration.
3853 space_iterator_ = new SpaceIterator();
3854 object_iterator_ = space_iterator_->next();
3855}
3856
3857
3858void HeapIterator::Shutdown() {
3859 // Make sure the last iterator is deallocated.
3860 delete space_iterator_;
3861 space_iterator_ = NULL;
3862 object_iterator_ = NULL;
3863}
3864
3865
Leon Clarked91b9f72010-01-27 17:25:45 +00003866HeapObject* HeapIterator::next() {
Steve Blocka7e24c12009-10-30 11:49:00 +00003867 // No iterator means we are done.
Leon Clarked91b9f72010-01-27 17:25:45 +00003868 if (object_iterator_ == NULL) return NULL;
Steve Blocka7e24c12009-10-30 11:49:00 +00003869
Leon Clarked91b9f72010-01-27 17:25:45 +00003870 if (HeapObject* obj = object_iterator_->next_object()) {
Steve Blocka7e24c12009-10-30 11:49:00 +00003871 // If the current iterator has more objects we are fine.
Leon Clarked91b9f72010-01-27 17:25:45 +00003872 return obj;
Steve Blocka7e24c12009-10-30 11:49:00 +00003873 } else {
3874 // Go though the spaces looking for one that has objects.
3875 while (space_iterator_->has_next()) {
3876 object_iterator_ = space_iterator_->next();
Leon Clarked91b9f72010-01-27 17:25:45 +00003877 if (HeapObject* obj = object_iterator_->next_object()) {
3878 return obj;
Steve Blocka7e24c12009-10-30 11:49:00 +00003879 }
3880 }
3881 }
3882 // Done with the last space.
3883 object_iterator_ = NULL;
Leon Clarked91b9f72010-01-27 17:25:45 +00003884 return NULL;
Steve Blocka7e24c12009-10-30 11:49:00 +00003885}
3886
3887
3888void HeapIterator::reset() {
3889 // Restart the iterator.
3890 Shutdown();
3891 Init();
3892}
3893
3894
3895#ifdef DEBUG
3896
3897static bool search_for_any_global;
3898static Object* search_target;
3899static bool found_target;
3900static List<Object*> object_stack(20);
3901
3902
3903// Tags 0, 1, and 3 are used. Use 2 for marking visited HeapObject.
3904static const int kMarkTag = 2;
3905
3906static void MarkObjectRecursively(Object** p);
3907class MarkObjectVisitor : public ObjectVisitor {
3908 public:
3909 void VisitPointers(Object** start, Object** end) {
3910 // Copy all HeapObject pointers in [start, end)
3911 for (Object** p = start; p < end; p++) {
3912 if ((*p)->IsHeapObject())
3913 MarkObjectRecursively(p);
3914 }
3915 }
3916};
3917
3918static MarkObjectVisitor mark_visitor;
3919
3920static void MarkObjectRecursively(Object** p) {
3921 if (!(*p)->IsHeapObject()) return;
3922
3923 HeapObject* obj = HeapObject::cast(*p);
3924
3925 Object* map = obj->map();
3926
3927 if (!map->IsHeapObject()) return; // visited before
3928
3929 if (found_target) return; // stop if target found
3930 object_stack.Add(obj);
3931 if ((search_for_any_global && obj->IsJSGlobalObject()) ||
3932 (!search_for_any_global && (obj == search_target))) {
3933 found_target = true;
3934 return;
3935 }
3936
3937 // not visited yet
3938 Map* map_p = reinterpret_cast<Map*>(HeapObject::cast(map));
3939
3940 Address map_addr = map_p->address();
3941
3942 obj->set_map(reinterpret_cast<Map*>(map_addr + kMarkTag));
3943
3944 MarkObjectRecursively(&map);
3945
3946 obj->IterateBody(map_p->instance_type(), obj->SizeFromMap(map_p),
3947 &mark_visitor);
3948
3949 if (!found_target) // don't pop if found the target
3950 object_stack.RemoveLast();
3951}
3952
3953
3954static void UnmarkObjectRecursively(Object** p);
3955class UnmarkObjectVisitor : public ObjectVisitor {
3956 public:
3957 void VisitPointers(Object** start, Object** end) {
3958 // Copy all HeapObject pointers in [start, end)
3959 for (Object** p = start; p < end; p++) {
3960 if ((*p)->IsHeapObject())
3961 UnmarkObjectRecursively(p);
3962 }
3963 }
3964};
3965
3966static UnmarkObjectVisitor unmark_visitor;
3967
3968static void UnmarkObjectRecursively(Object** p) {
3969 if (!(*p)->IsHeapObject()) return;
3970
3971 HeapObject* obj = HeapObject::cast(*p);
3972
3973 Object* map = obj->map();
3974
3975 if (map->IsHeapObject()) return; // unmarked already
3976
3977 Address map_addr = reinterpret_cast<Address>(map);
3978
3979 map_addr -= kMarkTag;
3980
3981 ASSERT_TAG_ALIGNED(map_addr);
3982
3983 HeapObject* map_p = HeapObject::FromAddress(map_addr);
3984
3985 obj->set_map(reinterpret_cast<Map*>(map_p));
3986
3987 UnmarkObjectRecursively(reinterpret_cast<Object**>(&map_p));
3988
3989 obj->IterateBody(Map::cast(map_p)->instance_type(),
3990 obj->SizeFromMap(Map::cast(map_p)),
3991 &unmark_visitor);
3992}
3993
3994
3995static void MarkRootObjectRecursively(Object** root) {
3996 if (search_for_any_global) {
3997 ASSERT(search_target == NULL);
3998 } else {
3999 ASSERT(search_target->IsHeapObject());
4000 }
4001 found_target = false;
4002 object_stack.Clear();
4003
4004 MarkObjectRecursively(root);
4005 UnmarkObjectRecursively(root);
4006
4007 if (found_target) {
4008 PrintF("=====================================\n");
4009 PrintF("==== Path to object ====\n");
4010 PrintF("=====================================\n\n");
4011
4012 ASSERT(!object_stack.is_empty());
4013 for (int i = 0; i < object_stack.length(); i++) {
4014 if (i > 0) PrintF("\n |\n |\n V\n\n");
4015 Object* obj = object_stack[i];
4016 obj->Print();
4017 }
4018 PrintF("=====================================\n");
4019 }
4020}
4021
4022
4023// Helper class for visiting HeapObjects recursively.
4024class MarkRootVisitor: public ObjectVisitor {
4025 public:
4026 void VisitPointers(Object** start, Object** end) {
4027 // Visit all HeapObject pointers in [start, end)
4028 for (Object** p = start; p < end; p++) {
4029 if ((*p)->IsHeapObject())
4030 MarkRootObjectRecursively(p);
4031 }
4032 }
4033};
4034
4035
4036// Triggers a depth-first traversal of reachable objects from roots
4037// and finds a path to a specific heap object and prints it.
Leon Clarkee46be812010-01-19 14:06:41 +00004038void Heap::TracePathToObject(Object* target) {
4039 search_target = target;
Steve Blocka7e24c12009-10-30 11:49:00 +00004040 search_for_any_global = false;
4041
4042 MarkRootVisitor root_visitor;
Steve Blockd0582a62009-12-15 09:54:21 +00004043 IterateRoots(&root_visitor, VISIT_ONLY_STRONG);
Steve Blocka7e24c12009-10-30 11:49:00 +00004044}
4045
4046
4047// Triggers a depth-first traversal of reachable objects from roots
4048// and finds a path to any global object and prints it. Useful for
4049// determining the source for leaks of global objects.
4050void Heap::TracePathToGlobal() {
4051 search_target = NULL;
4052 search_for_any_global = true;
4053
4054 MarkRootVisitor root_visitor;
Steve Blockd0582a62009-12-15 09:54:21 +00004055 IterateRoots(&root_visitor, VISIT_ONLY_STRONG);
Steve Blocka7e24c12009-10-30 11:49:00 +00004056}
4057#endif
4058
4059
4060GCTracer::GCTracer()
4061 : start_time_(0.0),
4062 start_size_(0.0),
4063 gc_count_(0),
4064 full_gc_count_(0),
4065 is_compacting_(false),
4066 marked_count_(0) {
4067 // These two fields reflect the state of the previous full collection.
4068 // Set them before they are changed by the collector.
4069 previous_has_compacted_ = MarkCompactCollector::HasCompacted();
4070 previous_marked_count_ = MarkCompactCollector::previous_marked_count();
4071 if (!FLAG_trace_gc) return;
4072 start_time_ = OS::TimeCurrentMillis();
4073 start_size_ = SizeOfHeapObjects();
4074}
4075
4076
4077GCTracer::~GCTracer() {
4078 if (!FLAG_trace_gc) return;
4079 // Printf ONE line iff flag is set.
4080 PrintF("%s %.1f -> %.1f MB, %d ms.\n",
4081 CollectorString(),
4082 start_size_, SizeOfHeapObjects(),
4083 static_cast<int>(OS::TimeCurrentMillis() - start_time_));
4084
4085#if defined(ENABLE_LOGGING_AND_PROFILING)
4086 Heap::PrintShortHeapStatistics();
4087#endif
4088}
4089
4090
4091const char* GCTracer::CollectorString() {
4092 switch (collector_) {
4093 case SCAVENGER:
4094 return "Scavenge";
4095 case MARK_COMPACTOR:
4096 return MarkCompactCollector::HasCompacted() ? "Mark-compact"
4097 : "Mark-sweep";
4098 }
4099 return "Unknown GC";
4100}
4101
4102
4103int KeyedLookupCache::Hash(Map* map, String* name) {
4104 // Uses only lower 32 bits if pointers are larger.
4105 uintptr_t addr_hash =
Leon Clarkee46be812010-01-19 14:06:41 +00004106 static_cast<uint32_t>(reinterpret_cast<uintptr_t>(map)) >> kMapHashShift;
4107 return (addr_hash ^ name->Hash()) & kCapacityMask;
Steve Blocka7e24c12009-10-30 11:49:00 +00004108}
4109
4110
4111int KeyedLookupCache::Lookup(Map* map, String* name) {
4112 int index = Hash(map, name);
4113 Key& key = keys_[index];
4114 if ((key.map == map) && key.name->Equals(name)) {
4115 return field_offsets_[index];
4116 }
4117 return -1;
4118}
4119
4120
4121void KeyedLookupCache::Update(Map* map, String* name, int field_offset) {
4122 String* symbol;
4123 if (Heap::LookupSymbolIfExists(name, &symbol)) {
4124 int index = Hash(map, symbol);
4125 Key& key = keys_[index];
4126 key.map = map;
4127 key.name = symbol;
4128 field_offsets_[index] = field_offset;
4129 }
4130}
4131
4132
4133void KeyedLookupCache::Clear() {
4134 for (int index = 0; index < kLength; index++) keys_[index].map = NULL;
4135}
4136
4137
4138KeyedLookupCache::Key KeyedLookupCache::keys_[KeyedLookupCache::kLength];
4139
4140
4141int KeyedLookupCache::field_offsets_[KeyedLookupCache::kLength];
4142
4143
4144void DescriptorLookupCache::Clear() {
4145 for (int index = 0; index < kLength; index++) keys_[index].array = NULL;
4146}
4147
4148
4149DescriptorLookupCache::Key
4150DescriptorLookupCache::keys_[DescriptorLookupCache::kLength];
4151
4152int DescriptorLookupCache::results_[DescriptorLookupCache::kLength];
4153
4154
4155#ifdef DEBUG
4156bool Heap::GarbageCollectionGreedyCheck() {
4157 ASSERT(FLAG_gc_greedy);
4158 if (Bootstrapper::IsActive()) return true;
4159 if (disallow_allocation_failure()) return true;
4160 return CollectGarbage(0, NEW_SPACE);
4161}
4162#endif
4163
4164
4165TranscendentalCache::TranscendentalCache(TranscendentalCache::Type t)
4166 : type_(t) {
4167 uint32_t in0 = 0xffffffffu; // Bit-pattern for a NaN that isn't
4168 uint32_t in1 = 0xffffffffu; // generated by the FPU.
4169 for (int i = 0; i < kCacheSize; i++) {
4170 elements_[i].in[0] = in0;
4171 elements_[i].in[1] = in1;
4172 elements_[i].output = NULL;
4173 }
4174}
4175
4176
4177TranscendentalCache* TranscendentalCache::caches_[kNumberOfCaches];
4178
4179
4180void TranscendentalCache::Clear() {
4181 for (int i = 0; i < kNumberOfCaches; i++) {
4182 if (caches_[i] != NULL) {
4183 delete caches_[i];
4184 caches_[i] = NULL;
4185 }
4186 }
4187}
4188
4189
Leon Clarkee46be812010-01-19 14:06:41 +00004190void ExternalStringTable::CleanUp() {
4191 int last = 0;
4192 for (int i = 0; i < new_space_strings_.length(); ++i) {
4193 if (new_space_strings_[i] == Heap::raw_unchecked_null_value()) continue;
4194 if (Heap::InNewSpace(new_space_strings_[i])) {
4195 new_space_strings_[last++] = new_space_strings_[i];
4196 } else {
4197 old_space_strings_.Add(new_space_strings_[i]);
4198 }
4199 }
4200 new_space_strings_.Rewind(last);
4201 last = 0;
4202 for (int i = 0; i < old_space_strings_.length(); ++i) {
4203 if (old_space_strings_[i] == Heap::raw_unchecked_null_value()) continue;
4204 ASSERT(!Heap::InNewSpace(old_space_strings_[i]));
4205 old_space_strings_[last++] = old_space_strings_[i];
4206 }
4207 old_space_strings_.Rewind(last);
4208 Verify();
4209}
4210
4211
4212void ExternalStringTable::TearDown() {
4213 new_space_strings_.Free();
4214 old_space_strings_.Free();
4215}
4216
4217
4218List<Object*> ExternalStringTable::new_space_strings_;
4219List<Object*> ExternalStringTable::old_space_strings_;
4220
Steve Blocka7e24c12009-10-30 11:49:00 +00004221} } // namespace v8::internal