blob: dc4102739dd64c13ca1510c8f086f91837d10707 [file] [log] [blame]
Steve Blocka7e24c12009-10-30 11:49:00 +00001// Copyright 2009 the V8 project authors. All rights reserved.
2// Redistribution and use in source and binary forms, with or without
3// modification, are permitted provided that the following conditions are
4// met:
5//
6// * Redistributions of source code must retain the above copyright
7// notice, this list of conditions and the following disclaimer.
8// * Redistributions in binary form must reproduce the above
9// copyright notice, this list of conditions and the following
10// disclaimer in the documentation and/or other materials provided
11// with the distribution.
12// * Neither the name of Google Inc. nor the names of its
13// contributors may be used to endorse or promote products derived
14// from this software without specific prior written permission.
15//
16// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
28#include "v8.h"
29
30#include "accessors.h"
31#include "api.h"
32#include "bootstrapper.h"
33#include "codegen-inl.h"
34#include "compilation-cache.h"
35#include "debug.h"
36#include "heap-profiler.h"
37#include "global-handles.h"
38#include "mark-compact.h"
39#include "natives.h"
40#include "scanner.h"
41#include "scopeinfo.h"
Steve Block3ce2e202009-11-05 08:53:23 +000042#include "snapshot.h"
Steve Blocka7e24c12009-10-30 11:49:00 +000043#include "v8threads.h"
Steve Block6ded16b2010-05-10 14:33:55 +010044#if V8_TARGET_ARCH_ARM && !V8_INTERPRETED_REGEXP
Steve Blocka7e24c12009-10-30 11:49:00 +000045#include "regexp-macro-assembler.h"
Steve Blockd0582a62009-12-15 09:54:21 +000046#include "arm/regexp-macro-assembler-arm.h"
Steve Blocka7e24c12009-10-30 11:49:00 +000047#endif
48
Steve Block6ded16b2010-05-10 14:33:55 +010049
Steve Blocka7e24c12009-10-30 11:49:00 +000050namespace v8 {
51namespace internal {
52
53
54String* Heap::hidden_symbol_;
55Object* Heap::roots_[Heap::kRootListLength];
56
57
58NewSpace Heap::new_space_;
59OldSpace* Heap::old_pointer_space_ = NULL;
60OldSpace* Heap::old_data_space_ = NULL;
61OldSpace* Heap::code_space_ = NULL;
62MapSpace* Heap::map_space_ = NULL;
63CellSpace* Heap::cell_space_ = NULL;
64LargeObjectSpace* Heap::lo_space_ = NULL;
65
66static const int kMinimumPromotionLimit = 2*MB;
67static const int kMinimumAllocationLimit = 8*MB;
68
69int Heap::old_gen_promotion_limit_ = kMinimumPromotionLimit;
70int Heap::old_gen_allocation_limit_ = kMinimumAllocationLimit;
71
72int Heap::old_gen_exhausted_ = false;
73
74int Heap::amount_of_external_allocated_memory_ = 0;
75int Heap::amount_of_external_allocated_memory_at_last_global_gc_ = 0;
76
77// semispace_size_ should be a power of 2 and old_generation_size_ should be
78// a multiple of Page::kPageSize.
79#if defined(ANDROID)
Leon Clarked91b9f72010-01-27 17:25:45 +000080int Heap::max_semispace_size_ = 2*MB;
81int Heap::max_old_generation_size_ = 192*MB;
Steve Blocka7e24c12009-10-30 11:49:00 +000082int Heap::initial_semispace_size_ = 128*KB;
83size_t Heap::code_range_size_ = 0;
84#elif defined(V8_TARGET_ARCH_X64)
Steve Block3ce2e202009-11-05 08:53:23 +000085int Heap::max_semispace_size_ = 16*MB;
86int Heap::max_old_generation_size_ = 1*GB;
Steve Blocka7e24c12009-10-30 11:49:00 +000087int Heap::initial_semispace_size_ = 1*MB;
Steve Block3ce2e202009-11-05 08:53:23 +000088size_t Heap::code_range_size_ = 512*MB;
Steve Blocka7e24c12009-10-30 11:49:00 +000089#else
Steve Block3ce2e202009-11-05 08:53:23 +000090int Heap::max_semispace_size_ = 8*MB;
91int Heap::max_old_generation_size_ = 512*MB;
Steve Blocka7e24c12009-10-30 11:49:00 +000092int Heap::initial_semispace_size_ = 512*KB;
93size_t Heap::code_range_size_ = 0;
94#endif
95
Steve Block3ce2e202009-11-05 08:53:23 +000096// The snapshot semispace size will be the default semispace size if
97// snapshotting is used and will be the requested semispace size as
98// set up by ConfigureHeap otherwise.
99int Heap::reserved_semispace_size_ = Heap::max_semispace_size_;
100
Steve Block6ded16b2010-05-10 14:33:55 +0100101List<Heap::GCPrologueCallbackPair> Heap::gc_prologue_callbacks_;
102List<Heap::GCEpilogueCallbackPair> Heap::gc_epilogue_callbacks_;
103
Steve Blocka7e24c12009-10-30 11:49:00 +0000104GCCallback Heap::global_gc_prologue_callback_ = NULL;
105GCCallback Heap::global_gc_epilogue_callback_ = NULL;
106
107// Variables set based on semispace_size_ and old_generation_size_ in
108// ConfigureHeap.
Steve Block3ce2e202009-11-05 08:53:23 +0000109
110// Will be 4 * reserved_semispace_size_ to ensure that young
111// generation can be aligned to its size.
Steve Blocka7e24c12009-10-30 11:49:00 +0000112int Heap::survived_since_last_expansion_ = 0;
113int Heap::external_allocation_limit_ = 0;
114
115Heap::HeapState Heap::gc_state_ = NOT_IN_GC;
116
117int Heap::mc_count_ = 0;
Leon Clarkef7060e22010-06-03 12:02:55 +0100118int Heap::ms_count_ = 0;
Steve Blocka7e24c12009-10-30 11:49:00 +0000119int Heap::gc_count_ = 0;
120
Leon Clarkef7060e22010-06-03 12:02:55 +0100121GCTracer* Heap::tracer_ = NULL;
122
Steve Block6ded16b2010-05-10 14:33:55 +0100123int Heap::unflattened_strings_length_ = 0;
124
Steve Blocka7e24c12009-10-30 11:49:00 +0000125int Heap::always_allocate_scope_depth_ = 0;
Steve Blockd0582a62009-12-15 09:54:21 +0000126int Heap::linear_allocation_scope_depth_ = 0;
Steve Block6ded16b2010-05-10 14:33:55 +0100127int Heap::contexts_disposed_ = 0;
Steve Blocka7e24c12009-10-30 11:49:00 +0000128
Steve Block8defd9f2010-07-08 12:39:36 +0100129int Heap::young_survivors_after_last_gc_ = 0;
130int Heap::high_survival_rate_period_length_ = 0;
131double Heap::survival_rate_ = 0;
132Heap::SurvivalRateTrend Heap::previous_survival_rate_trend_ = Heap::STABLE;
133Heap::SurvivalRateTrend Heap::survival_rate_trend_ = Heap::STABLE;
134
Steve Blocka7e24c12009-10-30 11:49:00 +0000135#ifdef DEBUG
136bool Heap::allocation_allowed_ = true;
137
138int Heap::allocation_timeout_ = 0;
139bool Heap::disallow_allocation_failure_ = false;
140#endif // DEBUG
141
Leon Clarkef7060e22010-06-03 12:02:55 +0100142int GCTracer::alive_after_last_gc_ = 0;
143double GCTracer::last_gc_end_timestamp_ = 0.0;
144int GCTracer::max_gc_pause_ = 0;
145int GCTracer::max_alive_after_gc_ = 0;
146int GCTracer::min_in_mutator_ = kMaxInt;
Steve Blocka7e24c12009-10-30 11:49:00 +0000147
148int Heap::Capacity() {
149 if (!HasBeenSetup()) return 0;
150
151 return new_space_.Capacity() +
152 old_pointer_space_->Capacity() +
153 old_data_space_->Capacity() +
154 code_space_->Capacity() +
155 map_space_->Capacity() +
156 cell_space_->Capacity();
157}
158
159
Steve Block3ce2e202009-11-05 08:53:23 +0000160int Heap::CommittedMemory() {
161 if (!HasBeenSetup()) return 0;
162
163 return new_space_.CommittedMemory() +
164 old_pointer_space_->CommittedMemory() +
165 old_data_space_->CommittedMemory() +
166 code_space_->CommittedMemory() +
167 map_space_->CommittedMemory() +
168 cell_space_->CommittedMemory() +
169 lo_space_->Size();
170}
171
172
Steve Blocka7e24c12009-10-30 11:49:00 +0000173int Heap::Available() {
174 if (!HasBeenSetup()) return 0;
175
176 return new_space_.Available() +
177 old_pointer_space_->Available() +
178 old_data_space_->Available() +
179 code_space_->Available() +
180 map_space_->Available() +
181 cell_space_->Available();
182}
183
184
185bool Heap::HasBeenSetup() {
186 return old_pointer_space_ != NULL &&
187 old_data_space_ != NULL &&
188 code_space_ != NULL &&
189 map_space_ != NULL &&
190 cell_space_ != NULL &&
191 lo_space_ != NULL;
192}
193
194
195GarbageCollector Heap::SelectGarbageCollector(AllocationSpace space) {
196 // Is global GC requested?
197 if (space != NEW_SPACE || FLAG_gc_global) {
198 Counters::gc_compactor_caused_by_request.Increment();
199 return MARK_COMPACTOR;
200 }
201
202 // Is enough data promoted to justify a global GC?
203 if (OldGenerationPromotionLimitReached()) {
204 Counters::gc_compactor_caused_by_promoted_data.Increment();
205 return MARK_COMPACTOR;
206 }
207
208 // Have allocation in OLD and LO failed?
209 if (old_gen_exhausted_) {
210 Counters::gc_compactor_caused_by_oldspace_exhaustion.Increment();
211 return MARK_COMPACTOR;
212 }
213
214 // Is there enough space left in OLD to guarantee that a scavenge can
215 // succeed?
216 //
217 // Note that MemoryAllocator->MaxAvailable() undercounts the memory available
218 // for object promotion. It counts only the bytes that the memory
219 // allocator has not yet allocated from the OS and assigned to any space,
220 // and does not count available bytes already in the old space or code
221 // space. Undercounting is safe---we may get an unrequested full GC when
222 // a scavenge would have succeeded.
223 if (MemoryAllocator::MaxAvailable() <= new_space_.Size()) {
224 Counters::gc_compactor_caused_by_oldspace_exhaustion.Increment();
225 return MARK_COMPACTOR;
226 }
227
228 // Default
229 return SCAVENGER;
230}
231
232
233// TODO(1238405): Combine the infrastructure for --heap-stats and
234// --log-gc to avoid the complicated preprocessor and flag testing.
235#if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
236void Heap::ReportStatisticsBeforeGC() {
237 // Heap::ReportHeapStatistics will also log NewSpace statistics when
238 // compiled with ENABLE_LOGGING_AND_PROFILING and --log-gc is set. The
239 // following logic is used to avoid double logging.
240#if defined(DEBUG) && defined(ENABLE_LOGGING_AND_PROFILING)
241 if (FLAG_heap_stats || FLAG_log_gc) new_space_.CollectStatistics();
242 if (FLAG_heap_stats) {
243 ReportHeapStatistics("Before GC");
244 } else if (FLAG_log_gc) {
245 new_space_.ReportStatistics();
246 }
247 if (FLAG_heap_stats || FLAG_log_gc) new_space_.ClearHistograms();
248#elif defined(DEBUG)
249 if (FLAG_heap_stats) {
250 new_space_.CollectStatistics();
251 ReportHeapStatistics("Before GC");
252 new_space_.ClearHistograms();
253 }
254#elif defined(ENABLE_LOGGING_AND_PROFILING)
255 if (FLAG_log_gc) {
256 new_space_.CollectStatistics();
257 new_space_.ReportStatistics();
258 new_space_.ClearHistograms();
259 }
260#endif
261}
262
263
264#if defined(ENABLE_LOGGING_AND_PROFILING)
265void Heap::PrintShortHeapStatistics() {
266 if (!FLAG_trace_gc_verbose) return;
267 PrintF("Memory allocator, used: %8d, available: %8d\n",
Steve Block3ce2e202009-11-05 08:53:23 +0000268 MemoryAllocator::Size(),
269 MemoryAllocator::Available());
Steve Blocka7e24c12009-10-30 11:49:00 +0000270 PrintF("New space, used: %8d, available: %8d\n",
Steve Block3ce2e202009-11-05 08:53:23 +0000271 Heap::new_space_.Size(),
272 new_space_.Available());
273 PrintF("Old pointers, used: %8d, available: %8d, waste: %8d\n",
274 old_pointer_space_->Size(),
275 old_pointer_space_->Available(),
276 old_pointer_space_->Waste());
277 PrintF("Old data space, used: %8d, available: %8d, waste: %8d\n",
278 old_data_space_->Size(),
279 old_data_space_->Available(),
280 old_data_space_->Waste());
281 PrintF("Code space, used: %8d, available: %8d, waste: %8d\n",
282 code_space_->Size(),
283 code_space_->Available(),
284 code_space_->Waste());
285 PrintF("Map space, used: %8d, available: %8d, waste: %8d\n",
286 map_space_->Size(),
287 map_space_->Available(),
288 map_space_->Waste());
289 PrintF("Cell space, used: %8d, available: %8d, waste: %8d\n",
290 cell_space_->Size(),
291 cell_space_->Available(),
292 cell_space_->Waste());
Steve Blocka7e24c12009-10-30 11:49:00 +0000293 PrintF("Large object space, used: %8d, avaialble: %8d\n",
Steve Block3ce2e202009-11-05 08:53:23 +0000294 lo_space_->Size(),
295 lo_space_->Available());
Steve Blocka7e24c12009-10-30 11:49:00 +0000296}
297#endif
298
299
300// TODO(1238405): Combine the infrastructure for --heap-stats and
301// --log-gc to avoid the complicated preprocessor and flag testing.
302void Heap::ReportStatisticsAfterGC() {
303 // Similar to the before GC, we use some complicated logic to ensure that
304 // NewSpace statistics are logged exactly once when --log-gc is turned on.
305#if defined(DEBUG) && defined(ENABLE_LOGGING_AND_PROFILING)
306 if (FLAG_heap_stats) {
307 new_space_.CollectStatistics();
308 ReportHeapStatistics("After GC");
309 } else if (FLAG_log_gc) {
310 new_space_.ReportStatistics();
311 }
312#elif defined(DEBUG)
313 if (FLAG_heap_stats) ReportHeapStatistics("After GC");
314#elif defined(ENABLE_LOGGING_AND_PROFILING)
315 if (FLAG_log_gc) new_space_.ReportStatistics();
316#endif
317}
318#endif // defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
319
320
321void Heap::GarbageCollectionPrologue() {
322 TranscendentalCache::Clear();
Steve Block6ded16b2010-05-10 14:33:55 +0100323 ClearJSFunctionResultCaches();
Steve Blocka7e24c12009-10-30 11:49:00 +0000324 gc_count_++;
Steve Block6ded16b2010-05-10 14:33:55 +0100325 unflattened_strings_length_ = 0;
Steve Blocka7e24c12009-10-30 11:49:00 +0000326#ifdef DEBUG
327 ASSERT(allocation_allowed_ && gc_state_ == NOT_IN_GC);
328 allow_allocation(false);
329
330 if (FLAG_verify_heap) {
331 Verify();
332 }
333
334 if (FLAG_gc_verbose) Print();
Steve Blocka7e24c12009-10-30 11:49:00 +0000335#endif
336
337#if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
338 ReportStatisticsBeforeGC();
339#endif
340}
341
342int Heap::SizeOfObjects() {
343 int total = 0;
344 AllSpaces spaces;
Leon Clarked91b9f72010-01-27 17:25:45 +0000345 for (Space* space = spaces.next(); space != NULL; space = spaces.next()) {
Steve Blocka7e24c12009-10-30 11:49:00 +0000346 total += space->Size();
347 }
348 return total;
349}
350
351void Heap::GarbageCollectionEpilogue() {
352#ifdef DEBUG
353 allow_allocation(true);
354 ZapFromSpace();
355
356 if (FLAG_verify_heap) {
357 Verify();
358 }
359
360 if (FLAG_print_global_handles) GlobalHandles::Print();
361 if (FLAG_print_handles) PrintHandles();
362 if (FLAG_gc_verbose) Print();
363 if (FLAG_code_stats) ReportCodeStatistics("After GC");
364#endif
365
366 Counters::alive_after_last_gc.Set(SizeOfObjects());
367
368 Counters::symbol_table_capacity.Set(symbol_table()->Capacity());
369 Counters::number_of_symbols.Set(symbol_table()->NumberOfElements());
370#if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
371 ReportStatisticsAfterGC();
372#endif
373#ifdef ENABLE_DEBUGGER_SUPPORT
374 Debug::AfterGarbageCollection();
375#endif
376}
377
378
379void Heap::CollectAllGarbage(bool force_compaction) {
380 // Since we are ignoring the return value, the exact choice of space does
381 // not matter, so long as we do not specify NEW_SPACE, which would not
382 // cause a full GC.
383 MarkCompactCollector::SetForceCompaction(force_compaction);
384 CollectGarbage(0, OLD_POINTER_SPACE);
385 MarkCompactCollector::SetForceCompaction(false);
386}
387
388
Steve Blocka7e24c12009-10-30 11:49:00 +0000389bool Heap::CollectGarbage(int requested_size, AllocationSpace space) {
390 // The VM is in the GC state until exiting this function.
391 VMState state(GC);
392
393#ifdef DEBUG
394 // Reset the allocation timeout to the GC interval, but make sure to
395 // allow at least a few allocations after a collection. The reason
396 // for this is that we have a lot of allocation sequences and we
397 // assume that a garbage collection will allow the subsequent
398 // allocation attempts to go through.
399 allocation_timeout_ = Max(6, FLAG_gc_interval);
400#endif
401
402 { GCTracer tracer;
403 GarbageCollectionPrologue();
404 // The GC count was incremented in the prologue. Tell the tracer about
405 // it.
406 tracer.set_gc_count(gc_count_);
407
408 GarbageCollector collector = SelectGarbageCollector(space);
409 // Tell the tracer which collector we've selected.
410 tracer.set_collector(collector);
411
412 HistogramTimer* rate = (collector == SCAVENGER)
413 ? &Counters::gc_scavenger
414 : &Counters::gc_compactor;
415 rate->Start();
416 PerformGarbageCollection(space, collector, &tracer);
417 rate->Stop();
418
419 GarbageCollectionEpilogue();
420 }
421
422
423#ifdef ENABLE_LOGGING_AND_PROFILING
424 if (FLAG_log_gc) HeapProfiler::WriteSample();
425#endif
426
427 switch (space) {
428 case NEW_SPACE:
429 return new_space_.Available() >= requested_size;
430 case OLD_POINTER_SPACE:
431 return old_pointer_space_->Available() >= requested_size;
432 case OLD_DATA_SPACE:
433 return old_data_space_->Available() >= requested_size;
434 case CODE_SPACE:
435 return code_space_->Available() >= requested_size;
436 case MAP_SPACE:
437 return map_space_->Available() >= requested_size;
438 case CELL_SPACE:
439 return cell_space_->Available() >= requested_size;
440 case LO_SPACE:
441 return lo_space_->Available() >= requested_size;
442 }
443 return false;
444}
445
446
447void Heap::PerformScavenge() {
448 GCTracer tracer;
449 PerformGarbageCollection(NEW_SPACE, SCAVENGER, &tracer);
450}
451
452
453#ifdef DEBUG
454// Helper class for verifying the symbol table.
455class SymbolTableVerifier : public ObjectVisitor {
456 public:
457 SymbolTableVerifier() { }
458 void VisitPointers(Object** start, Object** end) {
459 // Visit all HeapObject pointers in [start, end).
460 for (Object** p = start; p < end; p++) {
461 if ((*p)->IsHeapObject()) {
462 // Check that the symbol is actually a symbol.
463 ASSERT((*p)->IsNull() || (*p)->IsUndefined() || (*p)->IsSymbol());
464 }
465 }
466 }
467};
468#endif // DEBUG
469
470
471static void VerifySymbolTable() {
472#ifdef DEBUG
473 SymbolTableVerifier verifier;
474 Heap::symbol_table()->IterateElements(&verifier);
475#endif // DEBUG
476}
477
478
Leon Clarkee46be812010-01-19 14:06:41 +0000479void Heap::ReserveSpace(
480 int new_space_size,
481 int pointer_space_size,
482 int data_space_size,
483 int code_space_size,
484 int map_space_size,
485 int cell_space_size,
486 int large_object_size) {
487 NewSpace* new_space = Heap::new_space();
488 PagedSpace* old_pointer_space = Heap::old_pointer_space();
489 PagedSpace* old_data_space = Heap::old_data_space();
490 PagedSpace* code_space = Heap::code_space();
491 PagedSpace* map_space = Heap::map_space();
492 PagedSpace* cell_space = Heap::cell_space();
493 LargeObjectSpace* lo_space = Heap::lo_space();
494 bool gc_performed = true;
495 while (gc_performed) {
496 gc_performed = false;
497 if (!new_space->ReserveSpace(new_space_size)) {
498 Heap::CollectGarbage(new_space_size, NEW_SPACE);
499 gc_performed = true;
500 }
501 if (!old_pointer_space->ReserveSpace(pointer_space_size)) {
502 Heap::CollectGarbage(pointer_space_size, OLD_POINTER_SPACE);
503 gc_performed = true;
504 }
505 if (!(old_data_space->ReserveSpace(data_space_size))) {
506 Heap::CollectGarbage(data_space_size, OLD_DATA_SPACE);
507 gc_performed = true;
508 }
509 if (!(code_space->ReserveSpace(code_space_size))) {
510 Heap::CollectGarbage(code_space_size, CODE_SPACE);
511 gc_performed = true;
512 }
513 if (!(map_space->ReserveSpace(map_space_size))) {
514 Heap::CollectGarbage(map_space_size, MAP_SPACE);
515 gc_performed = true;
516 }
517 if (!(cell_space->ReserveSpace(cell_space_size))) {
518 Heap::CollectGarbage(cell_space_size, CELL_SPACE);
519 gc_performed = true;
520 }
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100521 // We add a slack-factor of 2 in order to have space for a series of
522 // large-object allocations that are only just larger than the page size.
Leon Clarkee46be812010-01-19 14:06:41 +0000523 large_object_size *= 2;
524 // The ReserveSpace method on the large object space checks how much
525 // we can expand the old generation. This includes expansion caused by
526 // allocation in the other spaces.
527 large_object_size += cell_space_size + map_space_size + code_space_size +
528 data_space_size + pointer_space_size;
529 if (!(lo_space->ReserveSpace(large_object_size))) {
530 Heap::CollectGarbage(large_object_size, LO_SPACE);
531 gc_performed = true;
532 }
533 }
534}
535
536
Steve Blocka7e24c12009-10-30 11:49:00 +0000537void Heap::EnsureFromSpaceIsCommitted() {
538 if (new_space_.CommitFromSpaceIfNeeded()) return;
539
540 // Committing memory to from space failed.
541 // Try shrinking and try again.
542 Shrink();
543 if (new_space_.CommitFromSpaceIfNeeded()) return;
544
545 // Committing memory to from space failed again.
546 // Memory is exhausted and we will die.
547 V8::FatalProcessOutOfMemory("Committing semi space failed.");
548}
549
550
Steve Block6ded16b2010-05-10 14:33:55 +0100551class ClearThreadJSFunctionResultCachesVisitor: public ThreadVisitor {
552 virtual void VisitThread(ThreadLocalTop* top) {
553 Context* context = top->context_;
554 if (context == NULL) return;
555
556 FixedArray* caches =
557 context->global()->global_context()->jsfunction_result_caches();
558 int length = caches->length();
559 for (int i = 0; i < length; i++) {
560 JSFunctionResultCache::cast(caches->get(i))->Clear();
561 }
562 }
563};
564
565
566void Heap::ClearJSFunctionResultCaches() {
567 if (Bootstrapper::IsActive()) return;
568 ClearThreadJSFunctionResultCachesVisitor visitor;
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100569 ThreadManager::IterateArchivedThreads(&visitor);
Steve Block6ded16b2010-05-10 14:33:55 +0100570}
571
572
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100573#ifdef DEBUG
574
575enum PageWatermarkValidity {
576 ALL_VALID,
577 ALL_INVALID
578};
579
580static void VerifyPageWatermarkValidity(PagedSpace* space,
581 PageWatermarkValidity validity) {
582 PageIterator it(space, PageIterator::PAGES_IN_USE);
583 bool expected_value = (validity == ALL_VALID);
584 while (it.has_next()) {
585 Page* page = it.next();
586 ASSERT(page->IsWatermarkValid() == expected_value);
587 }
588}
589#endif
590
Steve Block8defd9f2010-07-08 12:39:36 +0100591void Heap::UpdateSurvivalRateTrend(int start_new_space_size) {
592 double survival_rate =
593 (static_cast<double>(young_survivors_after_last_gc_) * 100) /
594 start_new_space_size;
595
596 if (survival_rate > kYoungSurvivalRateThreshold) {
597 high_survival_rate_period_length_++;
598 } else {
599 high_survival_rate_period_length_ = 0;
600 }
601
602 double survival_rate_diff = survival_rate_ - survival_rate;
603
604 if (survival_rate_diff > kYoungSurvivalRateAllowedDeviation) {
605 set_survival_rate_trend(DECREASING);
606 } else if (survival_rate_diff < -kYoungSurvivalRateAllowedDeviation) {
607 set_survival_rate_trend(INCREASING);
608 } else {
609 set_survival_rate_trend(STABLE);
610 }
611
612 survival_rate_ = survival_rate;
613}
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100614
Steve Blocka7e24c12009-10-30 11:49:00 +0000615void Heap::PerformGarbageCollection(AllocationSpace space,
616 GarbageCollector collector,
617 GCTracer* tracer) {
618 VerifySymbolTable();
619 if (collector == MARK_COMPACTOR && global_gc_prologue_callback_) {
620 ASSERT(!allocation_allowed_);
Leon Clarkef7060e22010-06-03 12:02:55 +0100621 GCTracer::Scope scope(tracer, GCTracer::Scope::EXTERNAL);
Steve Blocka7e24c12009-10-30 11:49:00 +0000622 global_gc_prologue_callback_();
623 }
Steve Block6ded16b2010-05-10 14:33:55 +0100624
625 GCType gc_type =
626 collector == MARK_COMPACTOR ? kGCTypeMarkSweepCompact : kGCTypeScavenge;
627
628 for (int i = 0; i < gc_prologue_callbacks_.length(); ++i) {
629 if (gc_type & gc_prologue_callbacks_[i].gc_type) {
630 gc_prologue_callbacks_[i].callback(gc_type, kNoGCCallbackFlags);
631 }
632 }
633
Steve Blocka7e24c12009-10-30 11:49:00 +0000634 EnsureFromSpaceIsCommitted();
Steve Block6ded16b2010-05-10 14:33:55 +0100635
Steve Block8defd9f2010-07-08 12:39:36 +0100636 int start_new_space_size = Heap::new_space()->Size();
637
Steve Blocka7e24c12009-10-30 11:49:00 +0000638 if (collector == MARK_COMPACTOR) {
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100639 if (FLAG_flush_code) {
640 // Flush all potentially unused code.
641 FlushCode();
642 }
643
Steve Block6ded16b2010-05-10 14:33:55 +0100644 // Perform mark-sweep with optional compaction.
Steve Blocka7e24c12009-10-30 11:49:00 +0000645 MarkCompact(tracer);
646
Steve Block8defd9f2010-07-08 12:39:36 +0100647 bool high_survival_rate_during_scavenges = IsHighSurvivalRate() &&
648 IsStableOrIncreasingSurvivalTrend();
649
650 UpdateSurvivalRateTrend(start_new_space_size);
651
Steve Blocka7e24c12009-10-30 11:49:00 +0000652 int old_gen_size = PromotedSpaceSize();
653 old_gen_promotion_limit_ =
654 old_gen_size + Max(kMinimumPromotionLimit, old_gen_size / 3);
655 old_gen_allocation_limit_ =
656 old_gen_size + Max(kMinimumAllocationLimit, old_gen_size / 2);
Steve Block8defd9f2010-07-08 12:39:36 +0100657
658 if (high_survival_rate_during_scavenges &&
659 IsStableOrIncreasingSurvivalTrend()) {
660 // Stable high survival rates of young objects both during partial and
661 // full collection indicate that mutator is either building or modifying
662 // a structure with a long lifetime.
663 // In this case we aggressively raise old generation memory limits to
664 // postpone subsequent mark-sweep collection and thus trade memory
665 // space for the mutation speed.
666 old_gen_promotion_limit_ *= 2;
667 old_gen_allocation_limit_ *= 2;
668 }
669
Steve Blocka7e24c12009-10-30 11:49:00 +0000670 old_gen_exhausted_ = false;
Steve Block6ded16b2010-05-10 14:33:55 +0100671 } else {
Leon Clarkef7060e22010-06-03 12:02:55 +0100672 tracer_ = tracer;
Steve Block6ded16b2010-05-10 14:33:55 +0100673 Scavenge();
Leon Clarkef7060e22010-06-03 12:02:55 +0100674 tracer_ = NULL;
Steve Block8defd9f2010-07-08 12:39:36 +0100675
676 UpdateSurvivalRateTrend(start_new_space_size);
Steve Blocka7e24c12009-10-30 11:49:00 +0000677 }
Steve Blocka7e24c12009-10-30 11:49:00 +0000678
679 Counters::objs_since_last_young.Set(0);
680
Steve Block3ce2e202009-11-05 08:53:23 +0000681 if (collector == MARK_COMPACTOR) {
682 DisableAssertNoAllocation allow_allocation;
Leon Clarkef7060e22010-06-03 12:02:55 +0100683 GCTracer::Scope scope(tracer, GCTracer::Scope::EXTERNAL);
Steve Block3ce2e202009-11-05 08:53:23 +0000684 GlobalHandles::PostGarbageCollectionProcessing();
685 }
686
687 // Update relocatables.
688 Relocatable::PostGarbageCollectionProcessing();
Steve Blocka7e24c12009-10-30 11:49:00 +0000689
690 if (collector == MARK_COMPACTOR) {
691 // Register the amount of external allocated memory.
692 amount_of_external_allocated_memory_at_last_global_gc_ =
693 amount_of_external_allocated_memory_;
694 }
695
Steve Block6ded16b2010-05-10 14:33:55 +0100696 GCCallbackFlags callback_flags = tracer->is_compacting()
697 ? kGCCallbackFlagCompacted
698 : kNoGCCallbackFlags;
699 for (int i = 0; i < gc_epilogue_callbacks_.length(); ++i) {
700 if (gc_type & gc_epilogue_callbacks_[i].gc_type) {
701 gc_epilogue_callbacks_[i].callback(gc_type, callback_flags);
702 }
703 }
704
Steve Blocka7e24c12009-10-30 11:49:00 +0000705 if (collector == MARK_COMPACTOR && global_gc_epilogue_callback_) {
706 ASSERT(!allocation_allowed_);
Leon Clarkef7060e22010-06-03 12:02:55 +0100707 GCTracer::Scope scope(tracer, GCTracer::Scope::EXTERNAL);
Steve Blocka7e24c12009-10-30 11:49:00 +0000708 global_gc_epilogue_callback_();
709 }
710 VerifySymbolTable();
711}
712
713
Steve Blocka7e24c12009-10-30 11:49:00 +0000714void Heap::MarkCompact(GCTracer* tracer) {
715 gc_state_ = MARK_COMPACT;
Steve Blocka7e24c12009-10-30 11:49:00 +0000716 LOG(ResourceEvent("markcompact", "begin"));
717
718 MarkCompactCollector::Prepare(tracer);
719
720 bool is_compacting = MarkCompactCollector::IsCompacting();
721
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100722 if (is_compacting) {
723 mc_count_++;
724 } else {
725 ms_count_++;
726 }
727 tracer->set_full_gc_count(mc_count_ + ms_count_);
728
Steve Blocka7e24c12009-10-30 11:49:00 +0000729 MarkCompactPrologue(is_compacting);
730
731 MarkCompactCollector::CollectGarbage();
732
733 MarkCompactEpilogue(is_compacting);
734
735 LOG(ResourceEvent("markcompact", "end"));
736
737 gc_state_ = NOT_IN_GC;
738
739 Shrink();
740
741 Counters::objs_since_last_full.Set(0);
Steve Block6ded16b2010-05-10 14:33:55 +0100742
743 contexts_disposed_ = 0;
Steve Blocka7e24c12009-10-30 11:49:00 +0000744}
745
746
747void Heap::MarkCompactPrologue(bool is_compacting) {
748 // At any old GC clear the keyed lookup cache to enable collection of unused
749 // maps.
750 KeyedLookupCache::Clear();
751 ContextSlotCache::Clear();
752 DescriptorLookupCache::Clear();
753
754 CompilationCache::MarkCompactPrologue();
755
756 Top::MarkCompactPrologue(is_compacting);
757 ThreadManager::MarkCompactPrologue(is_compacting);
Leon Clarkee46be812010-01-19 14:06:41 +0000758
Kristian Monsen25f61362010-05-21 11:50:48 +0100759 CompletelyClearInstanceofCache();
760
Leon Clarkee46be812010-01-19 14:06:41 +0000761 if (is_compacting) FlushNumberStringCache();
Steve Blocka7e24c12009-10-30 11:49:00 +0000762}
763
764
765void Heap::MarkCompactEpilogue(bool is_compacting) {
766 Top::MarkCompactEpilogue(is_compacting);
767 ThreadManager::MarkCompactEpilogue(is_compacting);
768}
769
770
771Object* Heap::FindCodeObject(Address a) {
772 Object* obj = code_space_->FindObject(a);
773 if (obj->IsFailure()) {
774 obj = lo_space_->FindObject(a);
775 }
776 ASSERT(!obj->IsFailure());
777 return obj;
778}
779
780
781// Helper class for copying HeapObjects
782class ScavengeVisitor: public ObjectVisitor {
783 public:
784
785 void VisitPointer(Object** p) { ScavengePointer(p); }
786
787 void VisitPointers(Object** start, Object** end) {
788 // Copy all HeapObject pointers in [start, end)
789 for (Object** p = start; p < end; p++) ScavengePointer(p);
790 }
791
792 private:
793 void ScavengePointer(Object** p) {
794 Object* object = *p;
795 if (!Heap::InNewSpace(object)) return;
796 Heap::ScavengeObject(reinterpret_cast<HeapObject**>(p),
797 reinterpret_cast<HeapObject*>(object));
798 }
799};
800
801
Ben Murdoch3bec4d22010-07-22 14:51:16 +0100802// A queue of objects promoted during scavenge. Each object is accompanied
803// by it's size to avoid dereferencing a map pointer for scanning.
Steve Blocka7e24c12009-10-30 11:49:00 +0000804class PromotionQueue {
805 public:
806 void Initialize(Address start_address) {
Ben Murdoch3bec4d22010-07-22 14:51:16 +0100807 front_ = rear_ = reinterpret_cast<intptr_t*>(start_address);
Steve Blocka7e24c12009-10-30 11:49:00 +0000808 }
809
810 bool is_empty() { return front_ <= rear_; }
811
Ben Murdoch3bec4d22010-07-22 14:51:16 +0100812 void insert(HeapObject* target, int size) {
813 *(--rear_) = reinterpret_cast<intptr_t>(target);
814 *(--rear_) = size;
Steve Blocka7e24c12009-10-30 11:49:00 +0000815 // Assert no overflow into live objects.
816 ASSERT(reinterpret_cast<Address>(rear_) >= Heap::new_space()->top());
817 }
818
Ben Murdoch3bec4d22010-07-22 14:51:16 +0100819 void remove(HeapObject** target, int* size) {
820 *target = reinterpret_cast<HeapObject*>(*(--front_));
821 *size = static_cast<int>(*(--front_));
Steve Blocka7e24c12009-10-30 11:49:00 +0000822 // Assert no underflow.
823 ASSERT(front_ >= rear_);
824 }
825
826 private:
827 // The front of the queue is higher in memory than the rear.
Ben Murdoch3bec4d22010-07-22 14:51:16 +0100828 intptr_t* front_;
829 intptr_t* rear_;
Steve Blocka7e24c12009-10-30 11:49:00 +0000830};
831
832
833// Shared state read by the scavenge collector and set by ScavengeObject.
834static PromotionQueue promotion_queue;
835
836
837#ifdef DEBUG
838// Visitor class to verify pointers in code or data space do not point into
839// new space.
840class VerifyNonPointerSpacePointersVisitor: public ObjectVisitor {
841 public:
842 void VisitPointers(Object** start, Object**end) {
843 for (Object** current = start; current < end; current++) {
844 if ((*current)->IsHeapObject()) {
845 ASSERT(!Heap::InNewSpace(HeapObject::cast(*current)));
846 }
847 }
848 }
849};
850
851
852static void VerifyNonPointerSpacePointers() {
853 // Verify that there are no pointers to new space in spaces where we
854 // do not expect them.
855 VerifyNonPointerSpacePointersVisitor v;
856 HeapObjectIterator code_it(Heap::code_space());
Leon Clarked91b9f72010-01-27 17:25:45 +0000857 for (HeapObject* object = code_it.next();
858 object != NULL; object = code_it.next())
Steve Blocka7e24c12009-10-30 11:49:00 +0000859 object->Iterate(&v);
Steve Blocka7e24c12009-10-30 11:49:00 +0000860
861 HeapObjectIterator data_it(Heap::old_data_space());
Leon Clarked91b9f72010-01-27 17:25:45 +0000862 for (HeapObject* object = data_it.next();
863 object != NULL; object = data_it.next())
864 object->Iterate(&v);
Steve Blocka7e24c12009-10-30 11:49:00 +0000865}
866#endif
867
868
Steve Block6ded16b2010-05-10 14:33:55 +0100869void Heap::CheckNewSpaceExpansionCriteria() {
870 if (new_space_.Capacity() < new_space_.MaximumCapacity() &&
871 survived_since_last_expansion_ > new_space_.Capacity()) {
872 // Grow the size of new space if there is room to grow and enough
873 // data has survived scavenge since the last expansion.
874 new_space_.Grow();
875 survived_since_last_expansion_ = 0;
876 }
877}
878
879
Steve Blocka7e24c12009-10-30 11:49:00 +0000880void Heap::Scavenge() {
881#ifdef DEBUG
882 if (FLAG_enable_slow_asserts) VerifyNonPointerSpacePointers();
883#endif
884
885 gc_state_ = SCAVENGE;
886
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100887 Page::FlipMeaningOfInvalidatedWatermarkFlag();
888#ifdef DEBUG
889 VerifyPageWatermarkValidity(old_pointer_space_, ALL_VALID);
890 VerifyPageWatermarkValidity(map_space_, ALL_VALID);
891#endif
892
893 // We do not update an allocation watermark of the top page during linear
894 // allocation to avoid overhead. So to maintain the watermark invariant
895 // we have to manually cache the watermark and mark the top page as having an
896 // invalid watermark. This guarantees that dirty regions iteration will use a
897 // correct watermark even if a linear allocation happens.
898 old_pointer_space_->FlushTopPageWatermark();
899 map_space_->FlushTopPageWatermark();
900
Steve Blocka7e24c12009-10-30 11:49:00 +0000901 // Implements Cheney's copying algorithm
902 LOG(ResourceEvent("scavenge", "begin"));
903
904 // Clear descriptor cache.
905 DescriptorLookupCache::Clear();
906
907 // Used for updating survived_since_last_expansion_ at function end.
908 int survived_watermark = PromotedSpaceSize();
909
Steve Block6ded16b2010-05-10 14:33:55 +0100910 CheckNewSpaceExpansionCriteria();
Steve Blocka7e24c12009-10-30 11:49:00 +0000911
912 // Flip the semispaces. After flipping, to space is empty, from space has
913 // live objects.
914 new_space_.Flip();
915 new_space_.ResetAllocationInfo();
916
917 // We need to sweep newly copied objects which can be either in the
918 // to space or promoted to the old generation. For to-space
919 // objects, we treat the bottom of the to space as a queue. Newly
920 // copied and unswept objects lie between a 'front' mark and the
921 // allocation pointer.
922 //
923 // Promoted objects can go into various old-generation spaces, and
924 // can be allocated internally in the spaces (from the free list).
925 // We treat the top of the to space as a queue of addresses of
926 // promoted objects. The addresses of newly promoted and unswept
927 // objects lie between a 'front' mark and a 'rear' mark that is
928 // updated as a side effect of promoting an object.
929 //
930 // There is guaranteed to be enough room at the top of the to space
931 // for the addresses of promoted objects: every object promoted
932 // frees up its size in bytes from the top of the new space, and
933 // objects are at least one pointer in size.
934 Address new_space_front = new_space_.ToSpaceLow();
935 promotion_queue.Initialize(new_space_.ToSpaceHigh());
936
937 ScavengeVisitor scavenge_visitor;
938 // Copy roots.
Leon Clarkee46be812010-01-19 14:06:41 +0000939 IterateRoots(&scavenge_visitor, VISIT_ALL_IN_SCAVENGE);
Steve Blocka7e24c12009-10-30 11:49:00 +0000940
941 // Copy objects reachable from the old generation. By definition,
942 // there are no intergenerational pointers in code or data spaces.
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100943 IterateDirtyRegions(old_pointer_space_,
944 &IteratePointersInDirtyRegion,
945 &ScavengePointer,
946 WATERMARK_CAN_BE_INVALID);
947
948 IterateDirtyRegions(map_space_,
949 &IteratePointersInDirtyMapsRegion,
950 &ScavengePointer,
951 WATERMARK_CAN_BE_INVALID);
952
953 lo_space_->IterateDirtyRegions(&ScavengePointer);
Steve Blocka7e24c12009-10-30 11:49:00 +0000954
955 // Copy objects reachable from cells by scavenging cell values directly.
956 HeapObjectIterator cell_iterator(cell_space_);
Leon Clarked91b9f72010-01-27 17:25:45 +0000957 for (HeapObject* cell = cell_iterator.next();
958 cell != NULL; cell = cell_iterator.next()) {
Steve Blocka7e24c12009-10-30 11:49:00 +0000959 if (cell->IsJSGlobalPropertyCell()) {
960 Address value_address =
961 reinterpret_cast<Address>(cell) +
962 (JSGlobalPropertyCell::kValueOffset - kHeapObjectTag);
963 scavenge_visitor.VisitPointer(reinterpret_cast<Object**>(value_address));
964 }
965 }
966
Leon Clarkee46be812010-01-19 14:06:41 +0000967 new_space_front = DoScavenge(&scavenge_visitor, new_space_front);
968
Steve Block6ded16b2010-05-10 14:33:55 +0100969 UpdateNewSpaceReferencesInExternalStringTable(
970 &UpdateNewSpaceReferenceInExternalStringTableEntry);
971
Leon Clarkee46be812010-01-19 14:06:41 +0000972 ASSERT(new_space_front == new_space_.top());
973
974 // Set age mark.
975 new_space_.set_age_mark(new_space_.top());
976
977 // Update how much has survived scavenge.
Steve Block6ded16b2010-05-10 14:33:55 +0100978 IncrementYoungSurvivorsCounter(
979 (PromotedSpaceSize() - survived_watermark) + new_space_.Size());
Leon Clarkee46be812010-01-19 14:06:41 +0000980
981 LOG(ResourceEvent("scavenge", "end"));
982
983 gc_state_ = NOT_IN_GC;
984}
985
986
Steve Block6ded16b2010-05-10 14:33:55 +0100987String* Heap::UpdateNewSpaceReferenceInExternalStringTableEntry(Object** p) {
988 MapWord first_word = HeapObject::cast(*p)->map_word();
989
990 if (!first_word.IsForwardingAddress()) {
991 // Unreachable external string can be finalized.
992 FinalizeExternalString(String::cast(*p));
993 return NULL;
994 }
995
996 // String is still reachable.
997 return String::cast(first_word.ToForwardingAddress());
998}
999
1000
1001void Heap::UpdateNewSpaceReferencesInExternalStringTable(
1002 ExternalStringTableUpdaterCallback updater_func) {
Leon Clarkee46be812010-01-19 14:06:41 +00001003 ExternalStringTable::Verify();
1004
1005 if (ExternalStringTable::new_space_strings_.is_empty()) return;
1006
1007 Object** start = &ExternalStringTable::new_space_strings_[0];
1008 Object** end = start + ExternalStringTable::new_space_strings_.length();
1009 Object** last = start;
1010
1011 for (Object** p = start; p < end; ++p) {
1012 ASSERT(Heap::InFromSpace(*p));
Steve Block6ded16b2010-05-10 14:33:55 +01001013 String* target = updater_func(p);
Leon Clarkee46be812010-01-19 14:06:41 +00001014
Steve Block6ded16b2010-05-10 14:33:55 +01001015 if (target == NULL) continue;
Leon Clarkee46be812010-01-19 14:06:41 +00001016
Leon Clarkee46be812010-01-19 14:06:41 +00001017 ASSERT(target->IsExternalString());
1018
1019 if (Heap::InNewSpace(target)) {
1020 // String is still in new space. Update the table entry.
1021 *last = target;
1022 ++last;
1023 } else {
1024 // String got promoted. Move it to the old string list.
1025 ExternalStringTable::AddOldString(target);
1026 }
1027 }
1028
1029 ASSERT(last <= end);
1030 ExternalStringTable::ShrinkNewStrings(static_cast<int>(last - start));
1031}
1032
1033
1034Address Heap::DoScavenge(ObjectVisitor* scavenge_visitor,
1035 Address new_space_front) {
Steve Blocka7e24c12009-10-30 11:49:00 +00001036 do {
1037 ASSERT(new_space_front <= new_space_.top());
1038
1039 // The addresses new_space_front and new_space_.top() define a
1040 // queue of unprocessed copied objects. Process them until the
1041 // queue is empty.
1042 while (new_space_front < new_space_.top()) {
1043 HeapObject* object = HeapObject::FromAddress(new_space_front);
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001044 Map* map = object->map();
1045 int size = object->SizeFromMap(map);
1046 object->IterateBody(map->instance_type(), size, scavenge_visitor);
1047 new_space_front += size;
Steve Blocka7e24c12009-10-30 11:49:00 +00001048 }
1049
1050 // Promote and process all the to-be-promoted objects.
1051 while (!promotion_queue.is_empty()) {
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001052 HeapObject* target;
1053 int size;
1054 promotion_queue.remove(&target, &size);
Steve Blocka7e24c12009-10-30 11:49:00 +00001055
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001056 // Promoted object might be already partially visited
1057 // during dirty regions iteration. Thus we search specificly
1058 // for pointers to from semispace instead of looking for pointers
1059 // to new space.
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01001060 ASSERT(!target->IsMap());
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001061 IterateAndMarkPointersToFromSpace(target->address(),
1062 target->address() + size,
1063 &ScavengePointer);
Steve Blocka7e24c12009-10-30 11:49:00 +00001064 }
1065
1066 // Take another spin if there are now unswept objects in new space
1067 // (there are currently no more unswept promoted objects).
1068 } while (new_space_front < new_space_.top());
1069
Leon Clarkee46be812010-01-19 14:06:41 +00001070 return new_space_front;
Steve Blocka7e24c12009-10-30 11:49:00 +00001071}
1072
1073
Steve Blocka7e24c12009-10-30 11:49:00 +00001074#if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001075static void RecordCopiedObject(HeapObject* obj) {
Steve Blocka7e24c12009-10-30 11:49:00 +00001076 bool should_record = false;
1077#ifdef DEBUG
1078 should_record = FLAG_heap_stats;
1079#endif
1080#ifdef ENABLE_LOGGING_AND_PROFILING
1081 should_record = should_record || FLAG_log_gc;
1082#endif
1083 if (should_record) {
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001084 if (Heap::new_space()->Contains(obj)) {
1085 Heap::new_space()->RecordAllocation(obj);
Steve Blocka7e24c12009-10-30 11:49:00 +00001086 } else {
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001087 Heap::new_space()->RecordPromotion(obj);
Steve Blocka7e24c12009-10-30 11:49:00 +00001088 }
1089 }
1090}
1091#endif // defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
1092
1093
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001094// Helper function used by CopyObject to copy a source object to an
1095// allocated target object and update the forwarding pointer in the source
1096// object. Returns the target object.
1097inline static HeapObject* MigrateObject(HeapObject* source,
1098 HeapObject* target,
1099 int size) {
Steve Blocka7e24c12009-10-30 11:49:00 +00001100 // Copy the content of source to target.
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001101 Heap::CopyBlock(target->address(), source->address(), size);
Steve Blocka7e24c12009-10-30 11:49:00 +00001102
1103 // Set the forwarding address.
1104 source->set_map_word(MapWord::FromForwardingAddress(target));
1105
1106#if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
1107 // Update NewSpace stats if necessary.
1108 RecordCopiedObject(target);
1109#endif
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001110 HEAP_PROFILE(ObjectMoveEvent(source->address(), target->address()));
Steve Blocka7e24c12009-10-30 11:49:00 +00001111
1112 return target;
1113}
1114
1115
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001116enum ObjectContents { DATA_OBJECT, POINTER_OBJECT };
1117enum SizeRestriction { SMALL, UNKNOWN_SIZE };
1118
1119
1120template<ObjectContents object_contents, SizeRestriction size_restriction>
1121static inline void EvacuateObject(Map* map,
1122 HeapObject** slot,
1123 HeapObject* object,
1124 int object_size) {
1125 ASSERT((size_restriction != SMALL) ||
1126 (object_size <= Page::kMaxHeapObjectSize));
1127 ASSERT(object->Size() == object_size);
1128
1129 if (Heap::ShouldBePromoted(object->address(), object_size)) {
1130 Object* result;
1131
1132 if ((size_restriction != SMALL) &&
1133 (object_size > Page::kMaxHeapObjectSize)) {
1134 result = Heap::lo_space()->AllocateRawFixedArray(object_size);
1135 } else {
1136 if (object_contents == DATA_OBJECT) {
1137 result = Heap::old_data_space()->AllocateRaw(object_size);
1138 } else {
1139 result = Heap::old_pointer_space()->AllocateRaw(object_size);
1140 }
1141 }
1142
1143 if (!result->IsFailure()) {
1144 HeapObject* target = HeapObject::cast(result);
1145 *slot = MigrateObject(object, target, object_size);
1146
1147 if (object_contents == POINTER_OBJECT) {
1148 promotion_queue.insert(target, object_size);
1149 }
1150
1151 Heap::tracer()->increment_promoted_objects_size(object_size);
1152 return;
1153 }
1154 }
1155 Object* result = Heap::new_space()->AllocateRaw(object_size);
1156 ASSERT(!result->IsFailure());
1157 *slot = MigrateObject(object, HeapObject::cast(result), object_size);
1158 return;
1159}
1160
1161
1162template<int object_size_in_words, ObjectContents object_contents>
1163static inline void EvacuateObjectOfFixedSize(Map* map,
1164 HeapObject** slot,
1165 HeapObject* object) {
1166 const int object_size = object_size_in_words << kPointerSizeLog2;
1167 EvacuateObject<object_contents, SMALL>(map, slot, object, object_size);
1168}
1169
1170
1171template<ObjectContents object_contents>
1172static inline void EvacuateObjectOfFixedSize(Map* map,
1173 HeapObject** slot,
1174 HeapObject* object) {
1175 int object_size = map->instance_size();
1176 EvacuateObject<object_contents, SMALL>(map, slot, object, object_size);
1177}
1178
1179
1180static inline void EvacuateFixedArray(Map* map,
1181 HeapObject** slot,
1182 HeapObject* object) {
1183 int object_size = FixedArray::cast(object)->FixedArraySize();
1184 EvacuateObject<POINTER_OBJECT, UNKNOWN_SIZE>(map, slot, object, object_size);
1185}
1186
1187
1188static inline void EvacuateByteArray(Map* map,
1189 HeapObject** slot,
1190 HeapObject* object) {
1191 int object_size = ByteArray::cast(object)->ByteArraySize();
1192 EvacuateObject<DATA_OBJECT, UNKNOWN_SIZE>(map, slot, object, object_size);
1193}
1194
1195
1196static Scavenger GetScavengerForSize(int object_size,
1197 ObjectContents object_contents) {
1198 ASSERT(IsAligned(object_size, kPointerSize));
1199 ASSERT(object_size < Page::kMaxHeapObjectSize);
1200
1201 switch (object_size >> kPointerSizeLog2) {
1202#define CASE(n) \
1203 case n: \
1204 if (object_contents == DATA_OBJECT) { \
1205 return static_cast<Scavenger>( \
1206 &EvacuateObjectOfFixedSize<n, DATA_OBJECT>); \
1207 } else { \
1208 return static_cast<Scavenger>( \
1209 &EvacuateObjectOfFixedSize<n, POINTER_OBJECT>); \
1210 }
1211
1212 CASE(1);
1213 CASE(2);
1214 CASE(3);
1215 CASE(4);
1216 CASE(5);
1217 CASE(6);
1218 CASE(7);
1219 CASE(8);
1220 CASE(9);
1221 CASE(10);
1222 CASE(11);
1223 CASE(12);
1224 CASE(13);
1225 CASE(14);
1226 CASE(15);
1227 CASE(16);
1228 default:
1229 if (object_contents == DATA_OBJECT) {
1230 return static_cast<Scavenger>(&EvacuateObjectOfFixedSize<DATA_OBJECT>);
1231 } else {
1232 return static_cast<Scavenger>(
1233 &EvacuateObjectOfFixedSize<POINTER_OBJECT>);
1234 }
1235
1236#undef CASE
1237 }
1238}
1239
1240
1241static inline void EvacuateSeqAsciiString(Map* map,
1242 HeapObject** slot,
1243 HeapObject* object) {
1244 int object_size = SeqAsciiString::cast(object)->
1245 SeqAsciiStringSize(map->instance_type());
1246 EvacuateObject<DATA_OBJECT, UNKNOWN_SIZE>(map, slot, object, object_size);
1247}
1248
1249
1250static inline void EvacuateSeqTwoByteString(Map* map,
1251 HeapObject** slot,
1252 HeapObject* object) {
1253 int object_size = SeqTwoByteString::cast(object)->
1254 SeqTwoByteStringSize(map->instance_type());
1255 EvacuateObject<DATA_OBJECT, UNKNOWN_SIZE>(map, slot, object, object_size);
1256}
1257
1258
1259static inline bool IsShortcutCandidate(int type) {
1260 return ((type & kShortcutTypeMask) == kShortcutTypeTag);
1261}
1262
1263
1264static inline void EvacuateShortcutCandidate(Map* map,
1265 HeapObject** slot,
1266 HeapObject* object) {
1267 ASSERT(IsShortcutCandidate(map->instance_type()));
1268
1269 if (ConsString::cast(object)->unchecked_second() == Heap::empty_string()) {
1270 HeapObject* first =
1271 HeapObject::cast(ConsString::cast(object)->unchecked_first());
1272
1273 *slot = first;
1274
1275 if (!Heap::InNewSpace(first)) {
1276 object->set_map_word(MapWord::FromForwardingAddress(first));
1277 return;
1278 }
1279
1280 MapWord first_word = first->map_word();
1281 if (first_word.IsForwardingAddress()) {
1282 HeapObject* target = first_word.ToForwardingAddress();
1283
1284 *slot = target;
1285 object->set_map_word(MapWord::FromForwardingAddress(target));
1286 return;
1287 }
1288
1289 first->map()->Scavenge(slot, first);
1290 object->set_map_word(MapWord::FromForwardingAddress(*slot));
1291 return;
1292 }
1293
1294 int object_size = ConsString::kSize;
1295 EvacuateObject<POINTER_OBJECT, SMALL>(map, slot, object, object_size);
1296}
1297
1298
1299Scavenger Heap::GetScavenger(int instance_type, int instance_size) {
1300 if (instance_type < FIRST_NONSTRING_TYPE) {
1301 switch (instance_type & kStringRepresentationMask) {
1302 case kSeqStringTag:
1303 if ((instance_type & kStringEncodingMask) == kAsciiStringTag) {
1304 return &EvacuateSeqAsciiString;
1305 } else {
1306 return &EvacuateSeqTwoByteString;
1307 }
1308
1309 case kConsStringTag:
1310 if (IsShortcutCandidate(instance_type)) {
1311 return &EvacuateShortcutCandidate;
1312 } else {
1313 ASSERT(instance_size == ConsString::kSize);
1314 return GetScavengerForSize(ConsString::kSize, POINTER_OBJECT);
1315 }
1316
1317 case kExternalStringTag:
1318 ASSERT(instance_size == ExternalString::kSize);
1319 return GetScavengerForSize(ExternalString::kSize, DATA_OBJECT);
1320 }
1321 UNREACHABLE();
1322 }
1323
1324 switch (instance_type) {
1325 case BYTE_ARRAY_TYPE:
1326 return reinterpret_cast<Scavenger>(&EvacuateByteArray);
1327
1328 case FIXED_ARRAY_TYPE:
1329 return reinterpret_cast<Scavenger>(&EvacuateFixedArray);
1330
1331 case JS_OBJECT_TYPE:
1332 case JS_CONTEXT_EXTENSION_OBJECT_TYPE:
1333 case JS_VALUE_TYPE:
1334 case JS_ARRAY_TYPE:
1335 case JS_REGEXP_TYPE:
1336 case JS_FUNCTION_TYPE:
1337 case JS_GLOBAL_PROXY_TYPE:
1338 case JS_GLOBAL_OBJECT_TYPE:
1339 case JS_BUILTINS_OBJECT_TYPE:
1340 return GetScavengerForSize(instance_size, POINTER_OBJECT);
1341
1342 case ODDBALL_TYPE:
1343 return NULL;
1344
1345 case PROXY_TYPE:
1346 return GetScavengerForSize(Proxy::kSize, DATA_OBJECT);
1347
1348 case MAP_TYPE:
1349 return NULL;
1350
1351 case CODE_TYPE:
1352 return NULL;
1353
1354 case JS_GLOBAL_PROPERTY_CELL_TYPE:
1355 return NULL;
1356
1357 case HEAP_NUMBER_TYPE:
1358 case FILLER_TYPE:
1359 case PIXEL_ARRAY_TYPE:
1360 case EXTERNAL_BYTE_ARRAY_TYPE:
1361 case EXTERNAL_UNSIGNED_BYTE_ARRAY_TYPE:
1362 case EXTERNAL_SHORT_ARRAY_TYPE:
1363 case EXTERNAL_UNSIGNED_SHORT_ARRAY_TYPE:
1364 case EXTERNAL_INT_ARRAY_TYPE:
1365 case EXTERNAL_UNSIGNED_INT_ARRAY_TYPE:
1366 case EXTERNAL_FLOAT_ARRAY_TYPE:
1367 return GetScavengerForSize(instance_size, DATA_OBJECT);
1368
1369 case SHARED_FUNCTION_INFO_TYPE:
1370 return GetScavengerForSize(SharedFunctionInfo::kAlignedSize,
1371 POINTER_OBJECT);
1372
1373#define MAKE_STRUCT_CASE(NAME, Name, name) \
1374 case NAME##_TYPE:
1375 STRUCT_LIST(MAKE_STRUCT_CASE)
1376#undef MAKE_STRUCT_CASE
1377 return GetScavengerForSize(instance_size, POINTER_OBJECT);
1378 default:
1379 UNREACHABLE();
1380 return NULL;
1381 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001382}
1383
1384
1385void Heap::ScavengeObjectSlow(HeapObject** p, HeapObject* object) {
1386 ASSERT(InFromSpace(object));
1387 MapWord first_word = object->map_word();
1388 ASSERT(!first_word.IsForwardingAddress());
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001389 Map* map = first_word.ToMap();
1390 map->Scavenge(p, object);
Steve Blocka7e24c12009-10-30 11:49:00 +00001391}
1392
1393
1394void Heap::ScavengePointer(HeapObject** p) {
1395 ScavengeObject(p, *p);
1396}
1397
1398
1399Object* Heap::AllocatePartialMap(InstanceType instance_type,
1400 int instance_size) {
1401 Object* result = AllocateRawMap();
1402 if (result->IsFailure()) return result;
1403
1404 // Map::cast cannot be used due to uninitialized map field.
1405 reinterpret_cast<Map*>(result)->set_map(raw_unchecked_meta_map());
1406 reinterpret_cast<Map*>(result)->set_instance_type(instance_type);
1407 reinterpret_cast<Map*>(result)->set_instance_size(instance_size);
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001408 reinterpret_cast<Map*>(result)->
1409 set_scavenger(GetScavenger(instance_type, instance_size));
Steve Blocka7e24c12009-10-30 11:49:00 +00001410 reinterpret_cast<Map*>(result)->set_inobject_properties(0);
Leon Clarke4515c472010-02-03 11:58:03 +00001411 reinterpret_cast<Map*>(result)->set_pre_allocated_property_fields(0);
Steve Blocka7e24c12009-10-30 11:49:00 +00001412 reinterpret_cast<Map*>(result)->set_unused_property_fields(0);
Leon Clarke4515c472010-02-03 11:58:03 +00001413 reinterpret_cast<Map*>(result)->set_bit_field(0);
1414 reinterpret_cast<Map*>(result)->set_bit_field2(0);
Steve Blocka7e24c12009-10-30 11:49:00 +00001415 return result;
1416}
1417
1418
1419Object* Heap::AllocateMap(InstanceType instance_type, int instance_size) {
1420 Object* result = AllocateRawMap();
1421 if (result->IsFailure()) return result;
1422
1423 Map* map = reinterpret_cast<Map*>(result);
1424 map->set_map(meta_map());
1425 map->set_instance_type(instance_type);
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001426 map->set_scavenger(GetScavenger(instance_type, instance_size));
Steve Blocka7e24c12009-10-30 11:49:00 +00001427 map->set_prototype(null_value());
1428 map->set_constructor(null_value());
1429 map->set_instance_size(instance_size);
1430 map->set_inobject_properties(0);
1431 map->set_pre_allocated_property_fields(0);
1432 map->set_instance_descriptors(empty_descriptor_array());
1433 map->set_code_cache(empty_fixed_array());
1434 map->set_unused_property_fields(0);
1435 map->set_bit_field(0);
Steve Block8defd9f2010-07-08 12:39:36 +01001436 map->set_bit_field2((1 << Map::kIsExtensible) | (1 << Map::kHasFastElements));
Leon Clarkee46be812010-01-19 14:06:41 +00001437
1438 // If the map object is aligned fill the padding area with Smi 0 objects.
1439 if (Map::kPadStart < Map::kSize) {
1440 memset(reinterpret_cast<byte*>(map) + Map::kPadStart - kHeapObjectTag,
1441 0,
1442 Map::kSize - Map::kPadStart);
1443 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001444 return map;
1445}
1446
1447
Steve Block6ded16b2010-05-10 14:33:55 +01001448Object* Heap::AllocateCodeCache() {
1449 Object* result = AllocateStruct(CODE_CACHE_TYPE);
1450 if (result->IsFailure()) return result;
1451 CodeCache* code_cache = CodeCache::cast(result);
1452 code_cache->set_default_cache(empty_fixed_array());
1453 code_cache->set_normal_type_cache(undefined_value());
1454 return code_cache;
1455}
1456
1457
Steve Blocka7e24c12009-10-30 11:49:00 +00001458const Heap::StringTypeTable Heap::string_type_table[] = {
1459#define STRING_TYPE_ELEMENT(type, size, name, camel_name) \
1460 {type, size, k##camel_name##MapRootIndex},
1461 STRING_TYPE_LIST(STRING_TYPE_ELEMENT)
1462#undef STRING_TYPE_ELEMENT
1463};
1464
1465
1466const Heap::ConstantSymbolTable Heap::constant_symbol_table[] = {
1467#define CONSTANT_SYMBOL_ELEMENT(name, contents) \
1468 {contents, k##name##RootIndex},
1469 SYMBOL_LIST(CONSTANT_SYMBOL_ELEMENT)
1470#undef CONSTANT_SYMBOL_ELEMENT
1471};
1472
1473
1474const Heap::StructTable Heap::struct_table[] = {
1475#define STRUCT_TABLE_ELEMENT(NAME, Name, name) \
1476 { NAME##_TYPE, Name::kSize, k##Name##MapRootIndex },
1477 STRUCT_LIST(STRUCT_TABLE_ELEMENT)
1478#undef STRUCT_TABLE_ELEMENT
1479};
1480
1481
1482bool Heap::CreateInitialMaps() {
1483 Object* obj = AllocatePartialMap(MAP_TYPE, Map::kSize);
1484 if (obj->IsFailure()) return false;
1485 // Map::cast cannot be used due to uninitialized map field.
1486 Map* new_meta_map = reinterpret_cast<Map*>(obj);
1487 set_meta_map(new_meta_map);
1488 new_meta_map->set_map(new_meta_map);
1489
1490 obj = AllocatePartialMap(FIXED_ARRAY_TYPE, FixedArray::kHeaderSize);
1491 if (obj->IsFailure()) return false;
1492 set_fixed_array_map(Map::cast(obj));
1493
1494 obj = AllocatePartialMap(ODDBALL_TYPE, Oddball::kSize);
1495 if (obj->IsFailure()) return false;
1496 set_oddball_map(Map::cast(obj));
1497
Steve Block6ded16b2010-05-10 14:33:55 +01001498 // Allocate the empty array.
Steve Blocka7e24c12009-10-30 11:49:00 +00001499 obj = AllocateEmptyFixedArray();
1500 if (obj->IsFailure()) return false;
1501 set_empty_fixed_array(FixedArray::cast(obj));
1502
1503 obj = Allocate(oddball_map(), OLD_DATA_SPACE);
1504 if (obj->IsFailure()) return false;
1505 set_null_value(obj);
1506
1507 // Allocate the empty descriptor array.
1508 obj = AllocateEmptyFixedArray();
1509 if (obj->IsFailure()) return false;
1510 set_empty_descriptor_array(DescriptorArray::cast(obj));
1511
1512 // Fix the instance_descriptors for the existing maps.
1513 meta_map()->set_instance_descriptors(empty_descriptor_array());
1514 meta_map()->set_code_cache(empty_fixed_array());
1515
1516 fixed_array_map()->set_instance_descriptors(empty_descriptor_array());
1517 fixed_array_map()->set_code_cache(empty_fixed_array());
1518
1519 oddball_map()->set_instance_descriptors(empty_descriptor_array());
1520 oddball_map()->set_code_cache(empty_fixed_array());
1521
1522 // Fix prototype object for existing maps.
1523 meta_map()->set_prototype(null_value());
1524 meta_map()->set_constructor(null_value());
1525
1526 fixed_array_map()->set_prototype(null_value());
1527 fixed_array_map()->set_constructor(null_value());
1528
1529 oddball_map()->set_prototype(null_value());
1530 oddball_map()->set_constructor(null_value());
1531
1532 obj = AllocateMap(HEAP_NUMBER_TYPE, HeapNumber::kSize);
1533 if (obj->IsFailure()) return false;
1534 set_heap_number_map(Map::cast(obj));
1535
1536 obj = AllocateMap(PROXY_TYPE, Proxy::kSize);
1537 if (obj->IsFailure()) return false;
1538 set_proxy_map(Map::cast(obj));
1539
1540 for (unsigned i = 0; i < ARRAY_SIZE(string_type_table); i++) {
1541 const StringTypeTable& entry = string_type_table[i];
1542 obj = AllocateMap(entry.type, entry.size);
1543 if (obj->IsFailure()) return false;
1544 roots_[entry.index] = Map::cast(obj);
1545 }
1546
Steve Blockd0582a62009-12-15 09:54:21 +00001547 obj = AllocateMap(STRING_TYPE, SeqTwoByteString::kAlignedSize);
Steve Blocka7e24c12009-10-30 11:49:00 +00001548 if (obj->IsFailure()) return false;
Steve Blockd0582a62009-12-15 09:54:21 +00001549 set_undetectable_string_map(Map::cast(obj));
Steve Blocka7e24c12009-10-30 11:49:00 +00001550 Map::cast(obj)->set_is_undetectable();
1551
Steve Blockd0582a62009-12-15 09:54:21 +00001552 obj = AllocateMap(ASCII_STRING_TYPE, SeqAsciiString::kAlignedSize);
Steve Blocka7e24c12009-10-30 11:49:00 +00001553 if (obj->IsFailure()) return false;
Steve Blockd0582a62009-12-15 09:54:21 +00001554 set_undetectable_ascii_string_map(Map::cast(obj));
Steve Blocka7e24c12009-10-30 11:49:00 +00001555 Map::cast(obj)->set_is_undetectable();
1556
1557 obj = AllocateMap(BYTE_ARRAY_TYPE, ByteArray::kAlignedSize);
1558 if (obj->IsFailure()) return false;
1559 set_byte_array_map(Map::cast(obj));
1560
1561 obj = AllocateMap(PIXEL_ARRAY_TYPE, PixelArray::kAlignedSize);
1562 if (obj->IsFailure()) return false;
1563 set_pixel_array_map(Map::cast(obj));
1564
Steve Block3ce2e202009-11-05 08:53:23 +00001565 obj = AllocateMap(EXTERNAL_BYTE_ARRAY_TYPE,
1566 ExternalArray::kAlignedSize);
1567 if (obj->IsFailure()) return false;
1568 set_external_byte_array_map(Map::cast(obj));
1569
1570 obj = AllocateMap(EXTERNAL_UNSIGNED_BYTE_ARRAY_TYPE,
1571 ExternalArray::kAlignedSize);
1572 if (obj->IsFailure()) return false;
1573 set_external_unsigned_byte_array_map(Map::cast(obj));
1574
1575 obj = AllocateMap(EXTERNAL_SHORT_ARRAY_TYPE,
1576 ExternalArray::kAlignedSize);
1577 if (obj->IsFailure()) return false;
1578 set_external_short_array_map(Map::cast(obj));
1579
1580 obj = AllocateMap(EXTERNAL_UNSIGNED_SHORT_ARRAY_TYPE,
1581 ExternalArray::kAlignedSize);
1582 if (obj->IsFailure()) return false;
1583 set_external_unsigned_short_array_map(Map::cast(obj));
1584
1585 obj = AllocateMap(EXTERNAL_INT_ARRAY_TYPE,
1586 ExternalArray::kAlignedSize);
1587 if (obj->IsFailure()) return false;
1588 set_external_int_array_map(Map::cast(obj));
1589
1590 obj = AllocateMap(EXTERNAL_UNSIGNED_INT_ARRAY_TYPE,
1591 ExternalArray::kAlignedSize);
1592 if (obj->IsFailure()) return false;
1593 set_external_unsigned_int_array_map(Map::cast(obj));
1594
1595 obj = AllocateMap(EXTERNAL_FLOAT_ARRAY_TYPE,
1596 ExternalArray::kAlignedSize);
1597 if (obj->IsFailure()) return false;
1598 set_external_float_array_map(Map::cast(obj));
1599
Steve Blocka7e24c12009-10-30 11:49:00 +00001600 obj = AllocateMap(CODE_TYPE, Code::kHeaderSize);
1601 if (obj->IsFailure()) return false;
1602 set_code_map(Map::cast(obj));
1603
1604 obj = AllocateMap(JS_GLOBAL_PROPERTY_CELL_TYPE,
1605 JSGlobalPropertyCell::kSize);
1606 if (obj->IsFailure()) return false;
1607 set_global_property_cell_map(Map::cast(obj));
1608
1609 obj = AllocateMap(FILLER_TYPE, kPointerSize);
1610 if (obj->IsFailure()) return false;
1611 set_one_pointer_filler_map(Map::cast(obj));
1612
1613 obj = AllocateMap(FILLER_TYPE, 2 * kPointerSize);
1614 if (obj->IsFailure()) return false;
1615 set_two_pointer_filler_map(Map::cast(obj));
1616
1617 for (unsigned i = 0; i < ARRAY_SIZE(struct_table); i++) {
1618 const StructTable& entry = struct_table[i];
1619 obj = AllocateMap(entry.type, entry.size);
1620 if (obj->IsFailure()) return false;
1621 roots_[entry.index] = Map::cast(obj);
1622 }
1623
1624 obj = AllocateMap(FIXED_ARRAY_TYPE, HeapObject::kHeaderSize);
1625 if (obj->IsFailure()) return false;
1626 set_hash_table_map(Map::cast(obj));
1627
1628 obj = AllocateMap(FIXED_ARRAY_TYPE, HeapObject::kHeaderSize);
1629 if (obj->IsFailure()) return false;
1630 set_context_map(Map::cast(obj));
1631
1632 obj = AllocateMap(FIXED_ARRAY_TYPE, HeapObject::kHeaderSize);
1633 if (obj->IsFailure()) return false;
1634 set_catch_context_map(Map::cast(obj));
1635
1636 obj = AllocateMap(FIXED_ARRAY_TYPE, HeapObject::kHeaderSize);
1637 if (obj->IsFailure()) return false;
1638 set_global_context_map(Map::cast(obj));
1639
Steve Block6ded16b2010-05-10 14:33:55 +01001640 obj = AllocateMap(SHARED_FUNCTION_INFO_TYPE,
1641 SharedFunctionInfo::kAlignedSize);
Steve Blocka7e24c12009-10-30 11:49:00 +00001642 if (obj->IsFailure()) return false;
1643 set_shared_function_info_map(Map::cast(obj));
1644
1645 ASSERT(!Heap::InNewSpace(Heap::empty_fixed_array()));
1646 return true;
1647}
1648
1649
1650Object* Heap::AllocateHeapNumber(double value, PretenureFlag pretenure) {
1651 // Statically ensure that it is safe to allocate heap numbers in paged
1652 // spaces.
1653 STATIC_ASSERT(HeapNumber::kSize <= Page::kMaxHeapObjectSize);
1654 AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
1655
Steve Blocka7e24c12009-10-30 11:49:00 +00001656 Object* result = AllocateRaw(HeapNumber::kSize, space, OLD_DATA_SPACE);
1657 if (result->IsFailure()) return result;
1658
1659 HeapObject::cast(result)->set_map(heap_number_map());
1660 HeapNumber::cast(result)->set_value(value);
1661 return result;
1662}
1663
1664
1665Object* Heap::AllocateHeapNumber(double value) {
1666 // Use general version, if we're forced to always allocate.
1667 if (always_allocate()) return AllocateHeapNumber(value, TENURED);
1668
1669 // This version of AllocateHeapNumber is optimized for
1670 // allocation in new space.
1671 STATIC_ASSERT(HeapNumber::kSize <= Page::kMaxHeapObjectSize);
1672 ASSERT(allocation_allowed_ && gc_state_ == NOT_IN_GC);
1673 Object* result = new_space_.AllocateRaw(HeapNumber::kSize);
1674 if (result->IsFailure()) return result;
1675 HeapObject::cast(result)->set_map(heap_number_map());
1676 HeapNumber::cast(result)->set_value(value);
1677 return result;
1678}
1679
1680
1681Object* Heap::AllocateJSGlobalPropertyCell(Object* value) {
1682 Object* result = AllocateRawCell();
1683 if (result->IsFailure()) return result;
1684 HeapObject::cast(result)->set_map(global_property_cell_map());
1685 JSGlobalPropertyCell::cast(result)->set_value(value);
1686 return result;
1687}
1688
1689
Steve Block6ded16b2010-05-10 14:33:55 +01001690Object* Heap::CreateOddball(const char* to_string,
Steve Blocka7e24c12009-10-30 11:49:00 +00001691 Object* to_number) {
Steve Block6ded16b2010-05-10 14:33:55 +01001692 Object* result = Allocate(oddball_map(), OLD_DATA_SPACE);
Steve Blocka7e24c12009-10-30 11:49:00 +00001693 if (result->IsFailure()) return result;
1694 return Oddball::cast(result)->Initialize(to_string, to_number);
1695}
1696
1697
1698bool Heap::CreateApiObjects() {
1699 Object* obj;
1700
1701 obj = AllocateMap(JS_OBJECT_TYPE, JSObject::kHeaderSize);
1702 if (obj->IsFailure()) return false;
1703 set_neander_map(Map::cast(obj));
1704
1705 obj = Heap::AllocateJSObjectFromMap(neander_map());
1706 if (obj->IsFailure()) return false;
1707 Object* elements = AllocateFixedArray(2);
1708 if (elements->IsFailure()) return false;
1709 FixedArray::cast(elements)->set(0, Smi::FromInt(0));
1710 JSObject::cast(obj)->set_elements(FixedArray::cast(elements));
1711 set_message_listeners(JSObject::cast(obj));
1712
1713 return true;
1714}
1715
1716
1717void Heap::CreateCEntryStub() {
1718 CEntryStub stub(1);
1719 set_c_entry_code(*stub.GetCode());
1720}
1721
1722
Steve Block6ded16b2010-05-10 14:33:55 +01001723#if V8_TARGET_ARCH_ARM && !V8_INTERPRETED_REGEXP
Steve Blocka7e24c12009-10-30 11:49:00 +00001724void Heap::CreateRegExpCEntryStub() {
1725 RegExpCEntryStub stub;
1726 set_re_c_entry_code(*stub.GetCode());
1727}
1728#endif
1729
1730
Steve Blocka7e24c12009-10-30 11:49:00 +00001731void Heap::CreateJSEntryStub() {
1732 JSEntryStub stub;
1733 set_js_entry_code(*stub.GetCode());
1734}
1735
1736
1737void Heap::CreateJSConstructEntryStub() {
1738 JSConstructEntryStub stub;
1739 set_js_construct_entry_code(*stub.GetCode());
1740}
1741
1742
1743void Heap::CreateFixedStubs() {
1744 // Here we create roots for fixed stubs. They are needed at GC
1745 // for cooking and uncooking (check out frames.cc).
1746 // The eliminates the need for doing dictionary lookup in the
1747 // stub cache for these stubs.
1748 HandleScope scope;
1749 // gcc-4.4 has problem generating correct code of following snippet:
1750 // { CEntryStub stub;
1751 // c_entry_code_ = *stub.GetCode();
1752 // }
Leon Clarke4515c472010-02-03 11:58:03 +00001753 // { DebuggerStatementStub stub;
1754 // debugger_statement_code_ = *stub.GetCode();
Steve Blocka7e24c12009-10-30 11:49:00 +00001755 // }
1756 // To workaround the problem, make separate functions without inlining.
1757 Heap::CreateCEntryStub();
Steve Blocka7e24c12009-10-30 11:49:00 +00001758 Heap::CreateJSEntryStub();
1759 Heap::CreateJSConstructEntryStub();
Steve Block6ded16b2010-05-10 14:33:55 +01001760#if V8_TARGET_ARCH_ARM && !V8_INTERPRETED_REGEXP
Steve Blocka7e24c12009-10-30 11:49:00 +00001761 Heap::CreateRegExpCEntryStub();
1762#endif
1763}
1764
1765
1766bool Heap::CreateInitialObjects() {
1767 Object* obj;
1768
1769 // The -0 value must be set before NumberFromDouble works.
1770 obj = AllocateHeapNumber(-0.0, TENURED);
1771 if (obj->IsFailure()) return false;
1772 set_minus_zero_value(obj);
1773 ASSERT(signbit(minus_zero_value()->Number()) != 0);
1774
1775 obj = AllocateHeapNumber(OS::nan_value(), TENURED);
1776 if (obj->IsFailure()) return false;
1777 set_nan_value(obj);
1778
1779 obj = Allocate(oddball_map(), OLD_DATA_SPACE);
1780 if (obj->IsFailure()) return false;
1781 set_undefined_value(obj);
1782 ASSERT(!InNewSpace(undefined_value()));
1783
1784 // Allocate initial symbol table.
1785 obj = SymbolTable::Allocate(kInitialSymbolTableSize);
1786 if (obj->IsFailure()) return false;
1787 // Don't use set_symbol_table() due to asserts.
1788 roots_[kSymbolTableRootIndex] = obj;
1789
1790 // Assign the print strings for oddballs after creating symboltable.
1791 Object* symbol = LookupAsciiSymbol("undefined");
1792 if (symbol->IsFailure()) return false;
1793 Oddball::cast(undefined_value())->set_to_string(String::cast(symbol));
1794 Oddball::cast(undefined_value())->set_to_number(nan_value());
1795
Steve Blocka7e24c12009-10-30 11:49:00 +00001796 // Allocate the null_value
1797 obj = Oddball::cast(null_value())->Initialize("null", Smi::FromInt(0));
1798 if (obj->IsFailure()) return false;
1799
Steve Block6ded16b2010-05-10 14:33:55 +01001800 obj = CreateOddball("true", Smi::FromInt(1));
Steve Blocka7e24c12009-10-30 11:49:00 +00001801 if (obj->IsFailure()) return false;
1802 set_true_value(obj);
1803
Steve Block6ded16b2010-05-10 14:33:55 +01001804 obj = CreateOddball("false", Smi::FromInt(0));
Steve Blocka7e24c12009-10-30 11:49:00 +00001805 if (obj->IsFailure()) return false;
1806 set_false_value(obj);
1807
Steve Block6ded16b2010-05-10 14:33:55 +01001808 obj = CreateOddball("hole", Smi::FromInt(-1));
Steve Blocka7e24c12009-10-30 11:49:00 +00001809 if (obj->IsFailure()) return false;
1810 set_the_hole_value(obj);
1811
Steve Block6ded16b2010-05-10 14:33:55 +01001812 obj = CreateOddball("no_interceptor_result_sentinel", Smi::FromInt(-2));
Steve Blocka7e24c12009-10-30 11:49:00 +00001813 if (obj->IsFailure()) return false;
1814 set_no_interceptor_result_sentinel(obj);
1815
Steve Block6ded16b2010-05-10 14:33:55 +01001816 obj = CreateOddball("termination_exception", Smi::FromInt(-3));
Steve Blocka7e24c12009-10-30 11:49:00 +00001817 if (obj->IsFailure()) return false;
1818 set_termination_exception(obj);
1819
1820 // Allocate the empty string.
1821 obj = AllocateRawAsciiString(0, TENURED);
1822 if (obj->IsFailure()) return false;
1823 set_empty_string(String::cast(obj));
1824
1825 for (unsigned i = 0; i < ARRAY_SIZE(constant_symbol_table); i++) {
1826 obj = LookupAsciiSymbol(constant_symbol_table[i].contents);
1827 if (obj->IsFailure()) return false;
1828 roots_[constant_symbol_table[i].index] = String::cast(obj);
1829 }
1830
1831 // Allocate the hidden symbol which is used to identify the hidden properties
1832 // in JSObjects. The hash code has a special value so that it will not match
1833 // the empty string when searching for the property. It cannot be part of the
1834 // loop above because it needs to be allocated manually with the special
1835 // hash code in place. The hash code for the hidden_symbol is zero to ensure
1836 // that it will always be at the first entry in property descriptors.
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01001837 obj = AllocateSymbol(CStrVector(""), 0, String::kZeroHash);
Steve Blocka7e24c12009-10-30 11:49:00 +00001838 if (obj->IsFailure()) return false;
1839 hidden_symbol_ = String::cast(obj);
1840
1841 // Allocate the proxy for __proto__.
1842 obj = AllocateProxy((Address) &Accessors::ObjectPrototype);
1843 if (obj->IsFailure()) return false;
1844 set_prototype_accessors(Proxy::cast(obj));
1845
1846 // Allocate the code_stubs dictionary. The initial size is set to avoid
1847 // expanding the dictionary during bootstrapping.
1848 obj = NumberDictionary::Allocate(128);
1849 if (obj->IsFailure()) return false;
1850 set_code_stubs(NumberDictionary::cast(obj));
1851
1852 // Allocate the non_monomorphic_cache used in stub-cache.cc. The initial size
1853 // is set to avoid expanding the dictionary during bootstrapping.
1854 obj = NumberDictionary::Allocate(64);
1855 if (obj->IsFailure()) return false;
1856 set_non_monomorphic_cache(NumberDictionary::cast(obj));
1857
Kristian Monsen25f61362010-05-21 11:50:48 +01001858 set_instanceof_cache_function(Smi::FromInt(0));
1859 set_instanceof_cache_map(Smi::FromInt(0));
1860 set_instanceof_cache_answer(Smi::FromInt(0));
1861
Steve Blocka7e24c12009-10-30 11:49:00 +00001862 CreateFixedStubs();
1863
Leon Clarkee46be812010-01-19 14:06:41 +00001864 if (InitializeNumberStringCache()->IsFailure()) return false;
Steve Blocka7e24c12009-10-30 11:49:00 +00001865
Steve Block6ded16b2010-05-10 14:33:55 +01001866 // Allocate cache for single character ASCII strings.
1867 obj = AllocateFixedArray(String::kMaxAsciiCharCode + 1, TENURED);
Steve Blocka7e24c12009-10-30 11:49:00 +00001868 if (obj->IsFailure()) return false;
1869 set_single_character_string_cache(FixedArray::cast(obj));
1870
1871 // Allocate cache for external strings pointing to native source code.
1872 obj = AllocateFixedArray(Natives::GetBuiltinsCount());
1873 if (obj->IsFailure()) return false;
1874 set_natives_source_cache(FixedArray::cast(obj));
1875
1876 // Handling of script id generation is in Factory::NewScript.
1877 set_last_script_id(undefined_value());
1878
1879 // Initialize keyed lookup cache.
1880 KeyedLookupCache::Clear();
1881
1882 // Initialize context slot cache.
1883 ContextSlotCache::Clear();
1884
1885 // Initialize descriptor cache.
1886 DescriptorLookupCache::Clear();
1887
1888 // Initialize compilation cache.
1889 CompilationCache::Clear();
1890
1891 return true;
1892}
1893
1894
Leon Clarkee46be812010-01-19 14:06:41 +00001895Object* Heap::InitializeNumberStringCache() {
1896 // Compute the size of the number string cache based on the max heap size.
1897 // max_semispace_size_ == 512 KB => number_string_cache_size = 32.
1898 // max_semispace_size_ == 8 MB => number_string_cache_size = 16KB.
1899 int number_string_cache_size = max_semispace_size_ / 512;
1900 number_string_cache_size = Max(32, Min(16*KB, number_string_cache_size));
Steve Block6ded16b2010-05-10 14:33:55 +01001901 Object* obj = AllocateFixedArray(number_string_cache_size * 2, TENURED);
Leon Clarkee46be812010-01-19 14:06:41 +00001902 if (!obj->IsFailure()) set_number_string_cache(FixedArray::cast(obj));
1903 return obj;
1904}
1905
1906
1907void Heap::FlushNumberStringCache() {
1908 // Flush the number to string cache.
1909 int len = number_string_cache()->length();
1910 for (int i = 0; i < len; i++) {
1911 number_string_cache()->set_undefined(i);
1912 }
1913}
1914
1915
Steve Blocka7e24c12009-10-30 11:49:00 +00001916static inline int double_get_hash(double d) {
1917 DoubleRepresentation rep(d);
Leon Clarkee46be812010-01-19 14:06:41 +00001918 return static_cast<int>(rep.bits) ^ static_cast<int>(rep.bits >> 32);
Steve Blocka7e24c12009-10-30 11:49:00 +00001919}
1920
1921
1922static inline int smi_get_hash(Smi* smi) {
Leon Clarkee46be812010-01-19 14:06:41 +00001923 return smi->value();
Steve Blocka7e24c12009-10-30 11:49:00 +00001924}
1925
1926
Steve Blocka7e24c12009-10-30 11:49:00 +00001927Object* Heap::GetNumberStringCache(Object* number) {
1928 int hash;
Leon Clarkee46be812010-01-19 14:06:41 +00001929 int mask = (number_string_cache()->length() >> 1) - 1;
Steve Blocka7e24c12009-10-30 11:49:00 +00001930 if (number->IsSmi()) {
Leon Clarkee46be812010-01-19 14:06:41 +00001931 hash = smi_get_hash(Smi::cast(number)) & mask;
Steve Blocka7e24c12009-10-30 11:49:00 +00001932 } else {
Leon Clarkee46be812010-01-19 14:06:41 +00001933 hash = double_get_hash(number->Number()) & mask;
Steve Blocka7e24c12009-10-30 11:49:00 +00001934 }
1935 Object* key = number_string_cache()->get(hash * 2);
1936 if (key == number) {
1937 return String::cast(number_string_cache()->get(hash * 2 + 1));
1938 } else if (key->IsHeapNumber() &&
1939 number->IsHeapNumber() &&
1940 key->Number() == number->Number()) {
1941 return String::cast(number_string_cache()->get(hash * 2 + 1));
1942 }
1943 return undefined_value();
1944}
1945
1946
1947void Heap::SetNumberStringCache(Object* number, String* string) {
1948 int hash;
Leon Clarkee46be812010-01-19 14:06:41 +00001949 int mask = (number_string_cache()->length() >> 1) - 1;
Steve Blocka7e24c12009-10-30 11:49:00 +00001950 if (number->IsSmi()) {
Leon Clarkee46be812010-01-19 14:06:41 +00001951 hash = smi_get_hash(Smi::cast(number)) & mask;
Leon Clarke4515c472010-02-03 11:58:03 +00001952 number_string_cache()->set(hash * 2, Smi::cast(number));
Steve Blocka7e24c12009-10-30 11:49:00 +00001953 } else {
Leon Clarkee46be812010-01-19 14:06:41 +00001954 hash = double_get_hash(number->Number()) & mask;
Steve Blocka7e24c12009-10-30 11:49:00 +00001955 number_string_cache()->set(hash * 2, number);
1956 }
1957 number_string_cache()->set(hash * 2 + 1, string);
1958}
1959
1960
Steve Block6ded16b2010-05-10 14:33:55 +01001961Object* Heap::NumberToString(Object* number, bool check_number_string_cache) {
Andrei Popescu402d9372010-02-26 13:31:12 +00001962 Counters::number_to_string_runtime.Increment();
Steve Block6ded16b2010-05-10 14:33:55 +01001963 if (check_number_string_cache) {
1964 Object* cached = GetNumberStringCache(number);
1965 if (cached != undefined_value()) {
1966 return cached;
1967 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001968 }
1969
1970 char arr[100];
1971 Vector<char> buffer(arr, ARRAY_SIZE(arr));
1972 const char* str;
1973 if (number->IsSmi()) {
1974 int num = Smi::cast(number)->value();
1975 str = IntToCString(num, buffer);
1976 } else {
1977 double num = HeapNumber::cast(number)->value();
1978 str = DoubleToCString(num, buffer);
1979 }
1980 Object* result = AllocateStringFromAscii(CStrVector(str));
1981
1982 if (!result->IsFailure()) {
1983 SetNumberStringCache(number, String::cast(result));
1984 }
1985 return result;
1986}
1987
1988
Steve Block3ce2e202009-11-05 08:53:23 +00001989Map* Heap::MapForExternalArrayType(ExternalArrayType array_type) {
1990 return Map::cast(roots_[RootIndexForExternalArrayType(array_type)]);
1991}
1992
1993
1994Heap::RootListIndex Heap::RootIndexForExternalArrayType(
1995 ExternalArrayType array_type) {
1996 switch (array_type) {
1997 case kExternalByteArray:
1998 return kExternalByteArrayMapRootIndex;
1999 case kExternalUnsignedByteArray:
2000 return kExternalUnsignedByteArrayMapRootIndex;
2001 case kExternalShortArray:
2002 return kExternalShortArrayMapRootIndex;
2003 case kExternalUnsignedShortArray:
2004 return kExternalUnsignedShortArrayMapRootIndex;
2005 case kExternalIntArray:
2006 return kExternalIntArrayMapRootIndex;
2007 case kExternalUnsignedIntArray:
2008 return kExternalUnsignedIntArrayMapRootIndex;
2009 case kExternalFloatArray:
2010 return kExternalFloatArrayMapRootIndex;
2011 default:
2012 UNREACHABLE();
2013 return kUndefinedValueRootIndex;
2014 }
2015}
2016
2017
Steve Blocka7e24c12009-10-30 11:49:00 +00002018Object* Heap::NumberFromDouble(double value, PretenureFlag pretenure) {
Steve Block6ded16b2010-05-10 14:33:55 +01002019 // We need to distinguish the minus zero value and this cannot be
2020 // done after conversion to int. Doing this by comparing bit
2021 // patterns is faster than using fpclassify() et al.
2022 static const DoubleRepresentation minus_zero(-0.0);
2023
2024 DoubleRepresentation rep(value);
2025 if (rep.bits == minus_zero.bits) {
2026 return AllocateHeapNumber(-0.0, pretenure);
2027 }
2028
2029 int int_value = FastD2I(value);
2030 if (value == int_value && Smi::IsValid(int_value)) {
2031 return Smi::FromInt(int_value);
2032 }
2033
2034 // Materialize the value in the heap.
2035 return AllocateHeapNumber(value, pretenure);
Steve Blocka7e24c12009-10-30 11:49:00 +00002036}
2037
2038
2039Object* Heap::AllocateProxy(Address proxy, PretenureFlag pretenure) {
2040 // Statically ensure that it is safe to allocate proxies in paged spaces.
2041 STATIC_ASSERT(Proxy::kSize <= Page::kMaxHeapObjectSize);
2042 AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
2043 Object* result = Allocate(proxy_map(), space);
2044 if (result->IsFailure()) return result;
2045
2046 Proxy::cast(result)->set_proxy(proxy);
2047 return result;
2048}
2049
2050
2051Object* Heap::AllocateSharedFunctionInfo(Object* name) {
2052 Object* result = Allocate(shared_function_info_map(), OLD_POINTER_SPACE);
2053 if (result->IsFailure()) return result;
2054
2055 SharedFunctionInfo* share = SharedFunctionInfo::cast(result);
2056 share->set_name(name);
2057 Code* illegal = Builtins::builtin(Builtins::Illegal);
2058 share->set_code(illegal);
Ben Murdoch3bec4d22010-07-22 14:51:16 +01002059 share->set_scope_info(SerializedScopeInfo::Empty());
Steve Blocka7e24c12009-10-30 11:49:00 +00002060 Code* construct_stub = Builtins::builtin(Builtins::JSConstructStubGeneric);
2061 share->set_construct_stub(construct_stub);
2062 share->set_expected_nof_properties(0);
2063 share->set_length(0);
2064 share->set_formal_parameter_count(0);
2065 share->set_instance_class_name(Object_symbol());
2066 share->set_function_data(undefined_value());
2067 share->set_script(undefined_value());
2068 share->set_start_position_and_type(0);
2069 share->set_debug_info(undefined_value());
2070 share->set_inferred_name(empty_string());
2071 share->set_compiler_hints(0);
2072 share->set_this_property_assignments_count(0);
2073 share->set_this_property_assignments(undefined_value());
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002074 share->set_num_literals(0);
2075 share->set_end_position(0);
2076 share->set_function_token_position(0);
Steve Blocka7e24c12009-10-30 11:49:00 +00002077 return result;
2078}
2079
2080
Steve Blockd0582a62009-12-15 09:54:21 +00002081// Returns true for a character in a range. Both limits are inclusive.
2082static inline bool Between(uint32_t character, uint32_t from, uint32_t to) {
2083 // This makes uses of the the unsigned wraparound.
2084 return character - from <= to - from;
2085}
2086
2087
2088static inline Object* MakeOrFindTwoCharacterString(uint32_t c1, uint32_t c2) {
2089 String* symbol;
2090 // Numeric strings have a different hash algorithm not known by
2091 // LookupTwoCharsSymbolIfExists, so we skip this step for such strings.
2092 if ((!Between(c1, '0', '9') || !Between(c2, '0', '9')) &&
2093 Heap::symbol_table()->LookupTwoCharsSymbolIfExists(c1, c2, &symbol)) {
2094 return symbol;
2095 // Now we know the length is 2, we might as well make use of that fact
2096 // when building the new string.
2097 } else if ((c1 | c2) <= String::kMaxAsciiCharCodeU) { // We can do this
2098 ASSERT(IsPowerOf2(String::kMaxAsciiCharCodeU + 1)); // because of this.
2099 Object* result = Heap::AllocateRawAsciiString(2);
2100 if (result->IsFailure()) return result;
2101 char* dest = SeqAsciiString::cast(result)->GetChars();
2102 dest[0] = c1;
2103 dest[1] = c2;
2104 return result;
2105 } else {
2106 Object* result = Heap::AllocateRawTwoByteString(2);
2107 if (result->IsFailure()) return result;
2108 uc16* dest = SeqTwoByteString::cast(result)->GetChars();
2109 dest[0] = c1;
2110 dest[1] = c2;
2111 return result;
2112 }
2113}
2114
2115
Steve Blocka7e24c12009-10-30 11:49:00 +00002116Object* Heap::AllocateConsString(String* first, String* second) {
2117 int first_length = first->length();
Steve Blockd0582a62009-12-15 09:54:21 +00002118 if (first_length == 0) {
2119 return second;
2120 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002121
2122 int second_length = second->length();
Steve Blockd0582a62009-12-15 09:54:21 +00002123 if (second_length == 0) {
2124 return first;
2125 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002126
2127 int length = first_length + second_length;
Steve Blockd0582a62009-12-15 09:54:21 +00002128
2129 // Optimization for 2-byte strings often used as keys in a decompression
2130 // dictionary. Check whether we already have the string in the symbol
2131 // table to prevent creation of many unneccesary strings.
2132 if (length == 2) {
2133 unsigned c1 = first->Get(0);
2134 unsigned c2 = second->Get(0);
2135 return MakeOrFindTwoCharacterString(c1, c2);
2136 }
2137
Steve Block6ded16b2010-05-10 14:33:55 +01002138 bool first_is_ascii = first->IsAsciiRepresentation();
2139 bool second_is_ascii = second->IsAsciiRepresentation();
2140 bool is_ascii = first_is_ascii && second_is_ascii;
Steve Blocka7e24c12009-10-30 11:49:00 +00002141
2142 // Make sure that an out of memory exception is thrown if the length
Steve Block3ce2e202009-11-05 08:53:23 +00002143 // of the new cons string is too large.
2144 if (length > String::kMaxLength || length < 0) {
Steve Blocka7e24c12009-10-30 11:49:00 +00002145 Top::context()->mark_out_of_memory();
2146 return Failure::OutOfMemoryException();
2147 }
2148
Kristian Monsen9dcf7e22010-06-28 14:14:28 +01002149 bool is_ascii_data_in_two_byte_string = false;
2150 if (!is_ascii) {
2151 // At least one of the strings uses two-byte representation so we
2152 // can't use the fast case code for short ascii strings below, but
2153 // we can try to save memory if all chars actually fit in ascii.
2154 is_ascii_data_in_two_byte_string =
2155 first->HasOnlyAsciiChars() && second->HasOnlyAsciiChars();
2156 if (is_ascii_data_in_two_byte_string) {
2157 Counters::string_add_runtime_ext_to_ascii.Increment();
2158 }
2159 }
2160
Steve Blocka7e24c12009-10-30 11:49:00 +00002161 // If the resulting string is small make a flat string.
2162 if (length < String::kMinNonFlatLength) {
2163 ASSERT(first->IsFlat());
2164 ASSERT(second->IsFlat());
2165 if (is_ascii) {
2166 Object* result = AllocateRawAsciiString(length);
2167 if (result->IsFailure()) return result;
2168 // Copy the characters into the new object.
2169 char* dest = SeqAsciiString::cast(result)->GetChars();
2170 // Copy first part.
Steve Blockd0582a62009-12-15 09:54:21 +00002171 const char* src;
2172 if (first->IsExternalString()) {
2173 src = ExternalAsciiString::cast(first)->resource()->data();
2174 } else {
2175 src = SeqAsciiString::cast(first)->GetChars();
2176 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002177 for (int i = 0; i < first_length; i++) *dest++ = src[i];
2178 // Copy second part.
Steve Blockd0582a62009-12-15 09:54:21 +00002179 if (second->IsExternalString()) {
2180 src = ExternalAsciiString::cast(second)->resource()->data();
2181 } else {
2182 src = SeqAsciiString::cast(second)->GetChars();
2183 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002184 for (int i = 0; i < second_length; i++) *dest++ = src[i];
2185 return result;
2186 } else {
Kristian Monsen9dcf7e22010-06-28 14:14:28 +01002187 if (is_ascii_data_in_two_byte_string) {
Steve Block6ded16b2010-05-10 14:33:55 +01002188 Object* result = AllocateRawAsciiString(length);
2189 if (result->IsFailure()) return result;
2190 // Copy the characters into the new object.
2191 char* dest = SeqAsciiString::cast(result)->GetChars();
2192 String::WriteToFlat(first, dest, 0, first_length);
2193 String::WriteToFlat(second, dest + first_length, 0, second_length);
Steve Block6ded16b2010-05-10 14:33:55 +01002194 return result;
2195 }
2196
Steve Blocka7e24c12009-10-30 11:49:00 +00002197 Object* result = AllocateRawTwoByteString(length);
2198 if (result->IsFailure()) return result;
2199 // Copy the characters into the new object.
2200 uc16* dest = SeqTwoByteString::cast(result)->GetChars();
2201 String::WriteToFlat(first, dest, 0, first_length);
2202 String::WriteToFlat(second, dest + first_length, 0, second_length);
2203 return result;
2204 }
2205 }
2206
Kristian Monsen9dcf7e22010-06-28 14:14:28 +01002207 Map* map = (is_ascii || is_ascii_data_in_two_byte_string) ?
2208 cons_ascii_string_map() : cons_string_map();
Steve Blocka7e24c12009-10-30 11:49:00 +00002209
Leon Clarkee46be812010-01-19 14:06:41 +00002210 Object* result = Allocate(map, NEW_SPACE);
Steve Blocka7e24c12009-10-30 11:49:00 +00002211 if (result->IsFailure()) return result;
Leon Clarke4515c472010-02-03 11:58:03 +00002212
2213 AssertNoAllocation no_gc;
Steve Blocka7e24c12009-10-30 11:49:00 +00002214 ConsString* cons_string = ConsString::cast(result);
Leon Clarke4515c472010-02-03 11:58:03 +00002215 WriteBarrierMode mode = cons_string->GetWriteBarrierMode(no_gc);
Steve Blocka7e24c12009-10-30 11:49:00 +00002216 cons_string->set_length(length);
Steve Blockd0582a62009-12-15 09:54:21 +00002217 cons_string->set_hash_field(String::kEmptyHashField);
2218 cons_string->set_first(first, mode);
2219 cons_string->set_second(second, mode);
Steve Blocka7e24c12009-10-30 11:49:00 +00002220 return result;
2221}
2222
2223
2224Object* Heap::AllocateSubString(String* buffer,
2225 int start,
Steve Block6ded16b2010-05-10 14:33:55 +01002226 int end,
2227 PretenureFlag pretenure) {
Steve Blocka7e24c12009-10-30 11:49:00 +00002228 int length = end - start;
2229
2230 if (length == 1) {
2231 return Heap::LookupSingleCharacterStringFromCode(
2232 buffer->Get(start));
Steve Blockd0582a62009-12-15 09:54:21 +00002233 } else if (length == 2) {
2234 // Optimization for 2-byte strings often used as keys in a decompression
2235 // dictionary. Check whether we already have the string in the symbol
2236 // table to prevent creation of many unneccesary strings.
2237 unsigned c1 = buffer->Get(start);
2238 unsigned c2 = buffer->Get(start + 1);
2239 return MakeOrFindTwoCharacterString(c1, c2);
Steve Blocka7e24c12009-10-30 11:49:00 +00002240 }
2241
2242 // Make an attempt to flatten the buffer to reduce access time.
Leon Clarkef7060e22010-06-03 12:02:55 +01002243 buffer = buffer->TryFlattenGetString();
Steve Blocka7e24c12009-10-30 11:49:00 +00002244
2245 Object* result = buffer->IsAsciiRepresentation()
Steve Block6ded16b2010-05-10 14:33:55 +01002246 ? AllocateRawAsciiString(length, pretenure )
2247 : AllocateRawTwoByteString(length, pretenure);
Steve Blocka7e24c12009-10-30 11:49:00 +00002248 if (result->IsFailure()) return result;
Steve Blockd0582a62009-12-15 09:54:21 +00002249 String* string_result = String::cast(result);
Steve Blocka7e24c12009-10-30 11:49:00 +00002250 // Copy the characters into the new object.
Steve Blockd0582a62009-12-15 09:54:21 +00002251 if (buffer->IsAsciiRepresentation()) {
2252 ASSERT(string_result->IsAsciiRepresentation());
2253 char* dest = SeqAsciiString::cast(string_result)->GetChars();
2254 String::WriteToFlat(buffer, dest, start, end);
2255 } else {
2256 ASSERT(string_result->IsTwoByteRepresentation());
2257 uc16* dest = SeqTwoByteString::cast(string_result)->GetChars();
2258 String::WriteToFlat(buffer, dest, start, end);
Steve Blocka7e24c12009-10-30 11:49:00 +00002259 }
Steve Blockd0582a62009-12-15 09:54:21 +00002260
Steve Blocka7e24c12009-10-30 11:49:00 +00002261 return result;
2262}
2263
2264
2265Object* Heap::AllocateExternalStringFromAscii(
2266 ExternalAsciiString::Resource* resource) {
Steve Blockd0582a62009-12-15 09:54:21 +00002267 size_t length = resource->length();
2268 if (length > static_cast<size_t>(String::kMaxLength)) {
2269 Top::context()->mark_out_of_memory();
2270 return Failure::OutOfMemoryException();
Steve Blocka7e24c12009-10-30 11:49:00 +00002271 }
2272
Steve Blockd0582a62009-12-15 09:54:21 +00002273 Map* map = external_ascii_string_map();
Leon Clarkee46be812010-01-19 14:06:41 +00002274 Object* result = Allocate(map, NEW_SPACE);
Steve Blocka7e24c12009-10-30 11:49:00 +00002275 if (result->IsFailure()) return result;
2276
2277 ExternalAsciiString* external_string = ExternalAsciiString::cast(result);
Steve Blockd0582a62009-12-15 09:54:21 +00002278 external_string->set_length(static_cast<int>(length));
2279 external_string->set_hash_field(String::kEmptyHashField);
Steve Blocka7e24c12009-10-30 11:49:00 +00002280 external_string->set_resource(resource);
2281
2282 return result;
2283}
2284
2285
2286Object* Heap::AllocateExternalStringFromTwoByte(
2287 ExternalTwoByteString::Resource* resource) {
Steve Blockd0582a62009-12-15 09:54:21 +00002288 size_t length = resource->length();
2289 if (length > static_cast<size_t>(String::kMaxLength)) {
2290 Top::context()->mark_out_of_memory();
2291 return Failure::OutOfMemoryException();
2292 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002293
Kristian Monsen9dcf7e22010-06-28 14:14:28 +01002294 // For small strings we check whether the resource contains only
2295 // ascii characters. If yes, we use a different string map.
2296 bool is_ascii = true;
2297 if (length >= static_cast<size_t>(String::kMinNonFlatLength)) {
2298 is_ascii = false;
2299 } else {
2300 const uc16* data = resource->data();
2301 for (size_t i = 0; i < length; i++) {
2302 if (data[i] > String::kMaxAsciiCharCode) {
2303 is_ascii = false;
2304 break;
2305 }
2306 }
2307 }
2308
2309 Map* map = is_ascii ?
2310 Heap::external_string_with_ascii_data_map() : Heap::external_string_map();
Leon Clarkee46be812010-01-19 14:06:41 +00002311 Object* result = Allocate(map, NEW_SPACE);
Steve Blocka7e24c12009-10-30 11:49:00 +00002312 if (result->IsFailure()) return result;
2313
2314 ExternalTwoByteString* external_string = ExternalTwoByteString::cast(result);
Steve Blockd0582a62009-12-15 09:54:21 +00002315 external_string->set_length(static_cast<int>(length));
2316 external_string->set_hash_field(String::kEmptyHashField);
Steve Blocka7e24c12009-10-30 11:49:00 +00002317 external_string->set_resource(resource);
2318
2319 return result;
2320}
2321
2322
2323Object* Heap::LookupSingleCharacterStringFromCode(uint16_t code) {
2324 if (code <= String::kMaxAsciiCharCode) {
2325 Object* value = Heap::single_character_string_cache()->get(code);
2326 if (value != Heap::undefined_value()) return value;
2327
2328 char buffer[1];
2329 buffer[0] = static_cast<char>(code);
2330 Object* result = LookupSymbol(Vector<const char>(buffer, 1));
2331
2332 if (result->IsFailure()) return result;
2333 Heap::single_character_string_cache()->set(code, result);
2334 return result;
2335 }
2336
2337 Object* result = Heap::AllocateRawTwoByteString(1);
2338 if (result->IsFailure()) return result;
2339 String* answer = String::cast(result);
2340 answer->Set(0, code);
2341 return answer;
2342}
2343
2344
2345Object* Heap::AllocateByteArray(int length, PretenureFlag pretenure) {
Leon Clarkee46be812010-01-19 14:06:41 +00002346 if (length < 0 || length > ByteArray::kMaxLength) {
2347 return Failure::OutOfMemoryException();
2348 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002349 if (pretenure == NOT_TENURED) {
2350 return AllocateByteArray(length);
2351 }
2352 int size = ByteArray::SizeFor(length);
Leon Clarkee46be812010-01-19 14:06:41 +00002353 Object* result = (size <= MaxObjectSizeInPagedSpace())
2354 ? old_data_space_->AllocateRaw(size)
2355 : lo_space_->AllocateRaw(size);
Steve Blocka7e24c12009-10-30 11:49:00 +00002356 if (result->IsFailure()) return result;
2357
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002358 reinterpret_cast<ByteArray*>(result)->set_map(byte_array_map());
2359 reinterpret_cast<ByteArray*>(result)->set_length(length);
Steve Blocka7e24c12009-10-30 11:49:00 +00002360 return result;
2361}
2362
2363
2364Object* Heap::AllocateByteArray(int length) {
Leon Clarkee46be812010-01-19 14:06:41 +00002365 if (length < 0 || length > ByteArray::kMaxLength) {
2366 return Failure::OutOfMemoryException();
2367 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002368 int size = ByteArray::SizeFor(length);
2369 AllocationSpace space =
Leon Clarkee46be812010-01-19 14:06:41 +00002370 (size > MaxObjectSizeInPagedSpace()) ? LO_SPACE : NEW_SPACE;
Steve Blocka7e24c12009-10-30 11:49:00 +00002371 Object* result = AllocateRaw(size, space, OLD_DATA_SPACE);
Steve Blocka7e24c12009-10-30 11:49:00 +00002372 if (result->IsFailure()) return result;
2373
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002374 reinterpret_cast<ByteArray*>(result)->set_map(byte_array_map());
2375 reinterpret_cast<ByteArray*>(result)->set_length(length);
Steve Blocka7e24c12009-10-30 11:49:00 +00002376 return result;
2377}
2378
2379
2380void Heap::CreateFillerObjectAt(Address addr, int size) {
2381 if (size == 0) return;
2382 HeapObject* filler = HeapObject::FromAddress(addr);
2383 if (size == kPointerSize) {
Steve Block6ded16b2010-05-10 14:33:55 +01002384 filler->set_map(one_pointer_filler_map());
2385 } else if (size == 2 * kPointerSize) {
2386 filler->set_map(two_pointer_filler_map());
Steve Blocka7e24c12009-10-30 11:49:00 +00002387 } else {
Steve Block6ded16b2010-05-10 14:33:55 +01002388 filler->set_map(byte_array_map());
Steve Blocka7e24c12009-10-30 11:49:00 +00002389 ByteArray::cast(filler)->set_length(ByteArray::LengthFor(size));
2390 }
2391}
2392
2393
2394Object* Heap::AllocatePixelArray(int length,
2395 uint8_t* external_pointer,
2396 PretenureFlag pretenure) {
2397 AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
Steve Blocka7e24c12009-10-30 11:49:00 +00002398 Object* result = AllocateRaw(PixelArray::kAlignedSize, space, OLD_DATA_SPACE);
Steve Blocka7e24c12009-10-30 11:49:00 +00002399 if (result->IsFailure()) return result;
2400
2401 reinterpret_cast<PixelArray*>(result)->set_map(pixel_array_map());
2402 reinterpret_cast<PixelArray*>(result)->set_length(length);
2403 reinterpret_cast<PixelArray*>(result)->set_external_pointer(external_pointer);
2404
2405 return result;
2406}
2407
2408
Steve Block3ce2e202009-11-05 08:53:23 +00002409Object* Heap::AllocateExternalArray(int length,
2410 ExternalArrayType array_type,
2411 void* external_pointer,
2412 PretenureFlag pretenure) {
2413 AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
Steve Block3ce2e202009-11-05 08:53:23 +00002414 Object* result = AllocateRaw(ExternalArray::kAlignedSize,
2415 space,
2416 OLD_DATA_SPACE);
Steve Block3ce2e202009-11-05 08:53:23 +00002417 if (result->IsFailure()) return result;
2418
2419 reinterpret_cast<ExternalArray*>(result)->set_map(
2420 MapForExternalArrayType(array_type));
2421 reinterpret_cast<ExternalArray*>(result)->set_length(length);
2422 reinterpret_cast<ExternalArray*>(result)->set_external_pointer(
2423 external_pointer);
2424
2425 return result;
2426}
2427
2428
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002429// The StackVisitor is used to traverse all the archived threads to see if
2430// there are activations on any of the stacks corresponding to the code.
2431class FlushingStackVisitor : public ThreadVisitor {
2432 public:
2433 explicit FlushingStackVisitor(Code* code) : found_(false), code_(code) {}
2434
2435 void VisitThread(ThreadLocalTop* top) {
2436 // If we already found the code in a previous traversed thread we return.
2437 if (found_) return;
2438
2439 for (StackFrameIterator it(top); !it.done(); it.Advance()) {
2440 if (code_->contains(it.frame()->pc())) {
2441 found_ = true;
2442 return;
2443 }
2444 }
2445 }
2446 bool FoundCode() {return found_;}
2447
2448 private:
2449 bool found_;
2450 Code* code_;
2451};
2452
2453
2454static void FlushCodeForFunction(SharedFunctionInfo* function_info) {
2455 // The function must be compiled and have the source code available,
2456 // to be able to recompile it in case we need the function again.
2457 if (!(function_info->is_compiled() && function_info->HasSourceCode())) return;
2458
2459 // We never flush code for Api functions.
2460 if (function_info->IsApiFunction()) return;
2461
2462 // Only flush code for functions.
2463 if (!function_info->code()->kind() == Code::FUNCTION) return;
2464
2465 // Function must be lazy compilable.
2466 if (!function_info->allows_lazy_compilation()) return;
2467
2468 // If this is a full script wrapped in a function we do no flush the code.
2469 if (function_info->is_toplevel()) return;
2470
2471 // If this function is in the compilation cache we do not flush the code.
2472 if (CompilationCache::HasFunction(function_info)) return;
2473
2474 // Make sure we are not referencing the code from the stack.
2475 for (StackFrameIterator it; !it.done(); it.Advance()) {
2476 if (function_info->code()->contains(it.frame()->pc())) return;
2477 }
2478 // Iterate the archived stacks in all threads to check if
2479 // the code is referenced.
2480 FlushingStackVisitor threadvisitor(function_info->code());
2481 ThreadManager::IterateArchivedThreads(&threadvisitor);
2482 if (threadvisitor.FoundCode()) return;
2483
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002484 // Compute the lazy compilable version of the code.
Ben Murdoch3bec4d22010-07-22 14:51:16 +01002485 HandleScope scope;
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002486 function_info->set_code(*ComputeLazyCompile(function_info->length()));
2487}
2488
2489
2490void Heap::FlushCode() {
2491#ifdef ENABLE_DEBUGGER_SUPPORT
2492 // Do not flush code if the debugger is loaded or there are breakpoints.
2493 if (Debug::IsLoaded() || Debug::has_break_points()) return;
2494#endif
2495 HeapObjectIterator it(old_pointer_space());
2496 for (HeapObject* obj = it.next(); obj != NULL; obj = it.next()) {
2497 if (obj->IsJSFunction()) {
2498 JSFunction* jsfunction = JSFunction::cast(obj);
2499
2500 // The function must have a valid context and not be a builtin.
2501 if (jsfunction->unchecked_context()->IsContext() &&
2502 !jsfunction->IsBuiltin()) {
2503 FlushCodeForFunction(jsfunction->shared());
2504 }
2505 }
2506 }
2507}
2508
2509
Steve Blocka7e24c12009-10-30 11:49:00 +00002510Object* Heap::CreateCode(const CodeDesc& desc,
Steve Blocka7e24c12009-10-30 11:49:00 +00002511 Code::Flags flags,
2512 Handle<Object> self_reference) {
Leon Clarkeac952652010-07-15 11:15:24 +01002513 // Allocate ByteArray before the Code object, so that we do not risk
2514 // leaving uninitialized Code object (and breaking the heap).
2515 Object* reloc_info = AllocateByteArray(desc.reloc_size, TENURED);
2516 if (reloc_info->IsFailure()) return reloc_info;
2517
Steve Blocka7e24c12009-10-30 11:49:00 +00002518 // Compute size
Leon Clarkeac952652010-07-15 11:15:24 +01002519 int body_size = RoundUp(desc.instr_size, kObjectAlignment);
Ben Murdoch3bec4d22010-07-22 14:51:16 +01002520 int obj_size = Code::SizeFor(body_size);
Steve Blocka7e24c12009-10-30 11:49:00 +00002521 ASSERT(IsAligned(obj_size, Code::kCodeAlignment));
2522 Object* result;
2523 if (obj_size > MaxObjectSizeInPagedSpace()) {
2524 result = lo_space_->AllocateRawCode(obj_size);
2525 } else {
2526 result = code_space_->AllocateRaw(obj_size);
2527 }
2528
2529 if (result->IsFailure()) return result;
2530
2531 // Initialize the object
2532 HeapObject::cast(result)->set_map(code_map());
2533 Code* code = Code::cast(result);
2534 ASSERT(!CodeRange::exists() || CodeRange::contains(code->address()));
2535 code->set_instruction_size(desc.instr_size);
Leon Clarkeac952652010-07-15 11:15:24 +01002536 code->set_relocation_info(ByteArray::cast(reloc_info));
Steve Blocka7e24c12009-10-30 11:49:00 +00002537 code->set_flags(flags);
2538 // Allow self references to created code object by patching the handle to
2539 // point to the newly allocated Code object.
2540 if (!self_reference.is_null()) {
2541 *(self_reference.location()) = code;
2542 }
2543 // Migrate generated code.
2544 // The generated code can contain Object** values (typically from handles)
2545 // that are dereferenced during the copy to point directly to the actual heap
2546 // objects. These pointers can include references to the code object itself,
2547 // through the self_reference parameter.
2548 code->CopyFrom(desc);
Steve Blocka7e24c12009-10-30 11:49:00 +00002549
2550#ifdef DEBUG
2551 code->Verify();
2552#endif
2553 return code;
2554}
2555
2556
2557Object* Heap::CopyCode(Code* code) {
2558 // Allocate an object the same size as the code object.
2559 int obj_size = code->Size();
2560 Object* result;
2561 if (obj_size > MaxObjectSizeInPagedSpace()) {
2562 result = lo_space_->AllocateRawCode(obj_size);
2563 } else {
2564 result = code_space_->AllocateRaw(obj_size);
2565 }
2566
2567 if (result->IsFailure()) return result;
2568
2569 // Copy code object.
2570 Address old_addr = code->address();
2571 Address new_addr = reinterpret_cast<HeapObject*>(result)->address();
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002572 CopyBlock(new_addr, old_addr, obj_size);
Steve Blocka7e24c12009-10-30 11:49:00 +00002573 // Relocate the copy.
2574 Code* new_code = Code::cast(result);
2575 ASSERT(!CodeRange::exists() || CodeRange::contains(code->address()));
2576 new_code->Relocate(new_addr - old_addr);
2577 return new_code;
2578}
2579
2580
Steve Block6ded16b2010-05-10 14:33:55 +01002581Object* Heap::CopyCode(Code* code, Vector<byte> reloc_info) {
Leon Clarkeac952652010-07-15 11:15:24 +01002582 // Allocate ByteArray before the Code object, so that we do not risk
2583 // leaving uninitialized Code object (and breaking the heap).
2584 Object* reloc_info_array = AllocateByteArray(reloc_info.length(), TENURED);
2585 if (reloc_info_array->IsFailure()) return reloc_info_array;
2586
2587 int new_body_size = RoundUp(code->instruction_size(), kObjectAlignment);
Steve Block6ded16b2010-05-10 14:33:55 +01002588
Ben Murdoch3bec4d22010-07-22 14:51:16 +01002589 int new_obj_size = Code::SizeFor(new_body_size);
Steve Block6ded16b2010-05-10 14:33:55 +01002590
2591 Address old_addr = code->address();
2592
2593 size_t relocation_offset =
Leon Clarkeac952652010-07-15 11:15:24 +01002594 static_cast<size_t>(code->instruction_end() - old_addr);
Steve Block6ded16b2010-05-10 14:33:55 +01002595
2596 Object* result;
2597 if (new_obj_size > MaxObjectSizeInPagedSpace()) {
2598 result = lo_space_->AllocateRawCode(new_obj_size);
2599 } else {
2600 result = code_space_->AllocateRaw(new_obj_size);
2601 }
2602
2603 if (result->IsFailure()) return result;
2604
2605 // Copy code object.
2606 Address new_addr = reinterpret_cast<HeapObject*>(result)->address();
2607
2608 // Copy header and instructions.
2609 memcpy(new_addr, old_addr, relocation_offset);
2610
Steve Block6ded16b2010-05-10 14:33:55 +01002611 Code* new_code = Code::cast(result);
Leon Clarkeac952652010-07-15 11:15:24 +01002612 new_code->set_relocation_info(ByteArray::cast(reloc_info_array));
Steve Block6ded16b2010-05-10 14:33:55 +01002613
Leon Clarkeac952652010-07-15 11:15:24 +01002614 // Copy patched rinfo.
2615 memcpy(new_code->relocation_start(), reloc_info.start(), reloc_info.length());
Steve Block6ded16b2010-05-10 14:33:55 +01002616
2617 // Relocate the copy.
2618 ASSERT(!CodeRange::exists() || CodeRange::contains(code->address()));
2619 new_code->Relocate(new_addr - old_addr);
2620
2621#ifdef DEBUG
2622 code->Verify();
2623#endif
2624 return new_code;
2625}
2626
2627
Steve Blocka7e24c12009-10-30 11:49:00 +00002628Object* Heap::Allocate(Map* map, AllocationSpace space) {
2629 ASSERT(gc_state_ == NOT_IN_GC);
2630 ASSERT(map->instance_type() != MAP_TYPE);
Leon Clarkee46be812010-01-19 14:06:41 +00002631 // If allocation failures are disallowed, we may allocate in a different
2632 // space when new space is full and the object is not a large object.
2633 AllocationSpace retry_space =
2634 (space != NEW_SPACE) ? space : TargetSpaceId(map->instance_type());
2635 Object* result =
2636 AllocateRaw(map->instance_size(), space, retry_space);
Steve Blocka7e24c12009-10-30 11:49:00 +00002637 if (result->IsFailure()) return result;
2638 HeapObject::cast(result)->set_map(map);
Steve Block3ce2e202009-11-05 08:53:23 +00002639#ifdef ENABLE_LOGGING_AND_PROFILING
2640 ProducerHeapProfile::RecordJSObjectAllocation(result);
2641#endif
Steve Blocka7e24c12009-10-30 11:49:00 +00002642 return result;
2643}
2644
2645
2646Object* Heap::InitializeFunction(JSFunction* function,
2647 SharedFunctionInfo* shared,
2648 Object* prototype) {
2649 ASSERT(!prototype->IsMap());
2650 function->initialize_properties();
2651 function->initialize_elements();
2652 function->set_shared(shared);
2653 function->set_prototype_or_initial_map(prototype);
2654 function->set_context(undefined_value());
Leon Clarke4515c472010-02-03 11:58:03 +00002655 function->set_literals(empty_fixed_array());
Steve Blocka7e24c12009-10-30 11:49:00 +00002656 return function;
2657}
2658
2659
2660Object* Heap::AllocateFunctionPrototype(JSFunction* function) {
2661 // Allocate the prototype. Make sure to use the object function
2662 // from the function's context, since the function can be from a
2663 // different context.
2664 JSFunction* object_function =
2665 function->context()->global_context()->object_function();
2666 Object* prototype = AllocateJSObject(object_function);
2667 if (prototype->IsFailure()) return prototype;
2668 // When creating the prototype for the function we must set its
2669 // constructor to the function.
2670 Object* result =
2671 JSObject::cast(prototype)->SetProperty(constructor_symbol(),
2672 function,
2673 DONT_ENUM);
2674 if (result->IsFailure()) return result;
2675 return prototype;
2676}
2677
2678
2679Object* Heap::AllocateFunction(Map* function_map,
2680 SharedFunctionInfo* shared,
Leon Clarkee46be812010-01-19 14:06:41 +00002681 Object* prototype,
2682 PretenureFlag pretenure) {
2683 AllocationSpace space =
2684 (pretenure == TENURED) ? OLD_POINTER_SPACE : NEW_SPACE;
2685 Object* result = Allocate(function_map, space);
Steve Blocka7e24c12009-10-30 11:49:00 +00002686 if (result->IsFailure()) return result;
2687 return InitializeFunction(JSFunction::cast(result), shared, prototype);
2688}
2689
2690
2691Object* Heap::AllocateArgumentsObject(Object* callee, int length) {
2692 // To get fast allocation and map sharing for arguments objects we
2693 // allocate them based on an arguments boilerplate.
2694
2695 // This calls Copy directly rather than using Heap::AllocateRaw so we
2696 // duplicate the check here.
2697 ASSERT(allocation_allowed_ && gc_state_ == NOT_IN_GC);
2698
2699 JSObject* boilerplate =
2700 Top::context()->global_context()->arguments_boilerplate();
2701
Leon Clarkee46be812010-01-19 14:06:41 +00002702 // Check that the size of the boilerplate matches our
2703 // expectations. The ArgumentsAccessStub::GenerateNewObject relies
2704 // on the size being a known constant.
2705 ASSERT(kArgumentsObjectSize == boilerplate->map()->instance_size());
2706
2707 // Do the allocation.
2708 Object* result =
2709 AllocateRaw(kArgumentsObjectSize, NEW_SPACE, OLD_POINTER_SPACE);
Steve Blocka7e24c12009-10-30 11:49:00 +00002710 if (result->IsFailure()) return result;
2711
2712 // Copy the content. The arguments boilerplate doesn't have any
2713 // fields that point to new space so it's safe to skip the write
2714 // barrier here.
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002715 CopyBlock(HeapObject::cast(result)->address(),
2716 boilerplate->address(),
Leon Clarkee46be812010-01-19 14:06:41 +00002717 kArgumentsObjectSize);
Steve Blocka7e24c12009-10-30 11:49:00 +00002718
2719 // Set the two properties.
2720 JSObject::cast(result)->InObjectPropertyAtPut(arguments_callee_index,
2721 callee);
2722 JSObject::cast(result)->InObjectPropertyAtPut(arguments_length_index,
2723 Smi::FromInt(length),
2724 SKIP_WRITE_BARRIER);
2725
2726 // Check the state of the object
2727 ASSERT(JSObject::cast(result)->HasFastProperties());
2728 ASSERT(JSObject::cast(result)->HasFastElements());
2729
2730 return result;
2731}
2732
2733
2734Object* Heap::AllocateInitialMap(JSFunction* fun) {
2735 ASSERT(!fun->has_initial_map());
2736
2737 // First create a new map with the size and number of in-object properties
2738 // suggested by the function.
2739 int instance_size = fun->shared()->CalculateInstanceSize();
2740 int in_object_properties = fun->shared()->CalculateInObjectProperties();
2741 Object* map_obj = Heap::AllocateMap(JS_OBJECT_TYPE, instance_size);
2742 if (map_obj->IsFailure()) return map_obj;
2743
2744 // Fetch or allocate prototype.
2745 Object* prototype;
2746 if (fun->has_instance_prototype()) {
2747 prototype = fun->instance_prototype();
2748 } else {
2749 prototype = AllocateFunctionPrototype(fun);
2750 if (prototype->IsFailure()) return prototype;
2751 }
2752 Map* map = Map::cast(map_obj);
2753 map->set_inobject_properties(in_object_properties);
2754 map->set_unused_property_fields(in_object_properties);
2755 map->set_prototype(prototype);
Steve Block8defd9f2010-07-08 12:39:36 +01002756 ASSERT(map->has_fast_elements());
Steve Blocka7e24c12009-10-30 11:49:00 +00002757
Andrei Popescu402d9372010-02-26 13:31:12 +00002758 // If the function has only simple this property assignments add
2759 // field descriptors for these to the initial map as the object
2760 // cannot be constructed without having these properties. Guard by
2761 // the inline_new flag so we only change the map if we generate a
2762 // specialized construct stub.
Steve Blocka7e24c12009-10-30 11:49:00 +00002763 ASSERT(in_object_properties <= Map::kMaxPreAllocatedPropertyFields);
Andrei Popescu402d9372010-02-26 13:31:12 +00002764 if (fun->shared()->CanGenerateInlineConstructor(prototype)) {
Steve Blocka7e24c12009-10-30 11:49:00 +00002765 int count = fun->shared()->this_property_assignments_count();
2766 if (count > in_object_properties) {
2767 count = in_object_properties;
2768 }
2769 Object* descriptors_obj = DescriptorArray::Allocate(count);
2770 if (descriptors_obj->IsFailure()) return descriptors_obj;
2771 DescriptorArray* descriptors = DescriptorArray::cast(descriptors_obj);
2772 for (int i = 0; i < count; i++) {
2773 String* name = fun->shared()->GetThisPropertyAssignmentName(i);
2774 ASSERT(name->IsSymbol());
2775 FieldDescriptor field(name, i, NONE);
Leon Clarke4515c472010-02-03 11:58:03 +00002776 field.SetEnumerationIndex(i);
Steve Blocka7e24c12009-10-30 11:49:00 +00002777 descriptors->Set(i, &field);
2778 }
Leon Clarke4515c472010-02-03 11:58:03 +00002779 descriptors->SetNextEnumerationIndex(count);
Steve Blocka7e24c12009-10-30 11:49:00 +00002780 descriptors->Sort();
2781 map->set_instance_descriptors(descriptors);
2782 map->set_pre_allocated_property_fields(count);
2783 map->set_unused_property_fields(in_object_properties - count);
2784 }
2785 return map;
2786}
2787
2788
2789void Heap::InitializeJSObjectFromMap(JSObject* obj,
2790 FixedArray* properties,
2791 Map* map) {
2792 obj->set_properties(properties);
2793 obj->initialize_elements();
2794 // TODO(1240798): Initialize the object's body using valid initial values
2795 // according to the object's initial map. For example, if the map's
2796 // instance type is JS_ARRAY_TYPE, the length field should be initialized
2797 // to a number (eg, Smi::FromInt(0)) and the elements initialized to a
2798 // fixed array (eg, Heap::empty_fixed_array()). Currently, the object
2799 // verification code has to cope with (temporarily) invalid objects. See
2800 // for example, JSArray::JSArrayVerify).
2801 obj->InitializeBody(map->instance_size());
2802}
2803
2804
2805Object* Heap::AllocateJSObjectFromMap(Map* map, PretenureFlag pretenure) {
2806 // JSFunctions should be allocated using AllocateFunction to be
2807 // properly initialized.
2808 ASSERT(map->instance_type() != JS_FUNCTION_TYPE);
2809
Steve Block8defd9f2010-07-08 12:39:36 +01002810 // Both types of global objects should be allocated using
2811 // AllocateGlobalObject to be properly initialized.
Steve Blocka7e24c12009-10-30 11:49:00 +00002812 ASSERT(map->instance_type() != JS_GLOBAL_OBJECT_TYPE);
2813 ASSERT(map->instance_type() != JS_BUILTINS_OBJECT_TYPE);
2814
2815 // Allocate the backing storage for the properties.
2816 int prop_size =
2817 map->pre_allocated_property_fields() +
2818 map->unused_property_fields() -
2819 map->inobject_properties();
2820 ASSERT(prop_size >= 0);
2821 Object* properties = AllocateFixedArray(prop_size, pretenure);
2822 if (properties->IsFailure()) return properties;
2823
2824 // Allocate the JSObject.
2825 AllocationSpace space =
2826 (pretenure == TENURED) ? OLD_POINTER_SPACE : NEW_SPACE;
2827 if (map->instance_size() > MaxObjectSizeInPagedSpace()) space = LO_SPACE;
2828 Object* obj = Allocate(map, space);
2829 if (obj->IsFailure()) return obj;
2830
2831 // Initialize the JSObject.
2832 InitializeJSObjectFromMap(JSObject::cast(obj),
2833 FixedArray::cast(properties),
2834 map);
Steve Block8defd9f2010-07-08 12:39:36 +01002835 ASSERT(JSObject::cast(obj)->HasFastElements());
Steve Blocka7e24c12009-10-30 11:49:00 +00002836 return obj;
2837}
2838
2839
2840Object* Heap::AllocateJSObject(JSFunction* constructor,
2841 PretenureFlag pretenure) {
2842 // Allocate the initial map if absent.
2843 if (!constructor->has_initial_map()) {
2844 Object* initial_map = AllocateInitialMap(constructor);
2845 if (initial_map->IsFailure()) return initial_map;
2846 constructor->set_initial_map(Map::cast(initial_map));
2847 Map::cast(initial_map)->set_constructor(constructor);
2848 }
2849 // Allocate the object based on the constructors initial map.
2850 Object* result =
2851 AllocateJSObjectFromMap(constructor->initial_map(), pretenure);
2852 // Make sure result is NOT a global object if valid.
2853 ASSERT(result->IsFailure() || !result->IsGlobalObject());
2854 return result;
2855}
2856
2857
2858Object* Heap::AllocateGlobalObject(JSFunction* constructor) {
2859 ASSERT(constructor->has_initial_map());
2860 Map* map = constructor->initial_map();
2861
2862 // Make sure no field properties are described in the initial map.
2863 // This guarantees us that normalizing the properties does not
2864 // require us to change property values to JSGlobalPropertyCells.
2865 ASSERT(map->NextFreePropertyIndex() == 0);
2866
2867 // Make sure we don't have a ton of pre-allocated slots in the
2868 // global objects. They will be unused once we normalize the object.
2869 ASSERT(map->unused_property_fields() == 0);
2870 ASSERT(map->inobject_properties() == 0);
2871
2872 // Initial size of the backing store to avoid resize of the storage during
2873 // bootstrapping. The size differs between the JS global object ad the
2874 // builtins object.
2875 int initial_size = map->instance_type() == JS_GLOBAL_OBJECT_TYPE ? 64 : 512;
2876
2877 // Allocate a dictionary object for backing storage.
2878 Object* obj =
2879 StringDictionary::Allocate(
2880 map->NumberOfDescribedProperties() * 2 + initial_size);
2881 if (obj->IsFailure()) return obj;
2882 StringDictionary* dictionary = StringDictionary::cast(obj);
2883
2884 // The global object might be created from an object template with accessors.
2885 // Fill these accessors into the dictionary.
2886 DescriptorArray* descs = map->instance_descriptors();
2887 for (int i = 0; i < descs->number_of_descriptors(); i++) {
2888 PropertyDetails details = descs->GetDetails(i);
2889 ASSERT(details.type() == CALLBACKS); // Only accessors are expected.
2890 PropertyDetails d =
2891 PropertyDetails(details.attributes(), CALLBACKS, details.index());
2892 Object* value = descs->GetCallbacksObject(i);
2893 value = Heap::AllocateJSGlobalPropertyCell(value);
2894 if (value->IsFailure()) return value;
2895
2896 Object* result = dictionary->Add(descs->GetKey(i), value, d);
2897 if (result->IsFailure()) return result;
2898 dictionary = StringDictionary::cast(result);
2899 }
2900
2901 // Allocate the global object and initialize it with the backing store.
2902 obj = Allocate(map, OLD_POINTER_SPACE);
2903 if (obj->IsFailure()) return obj;
2904 JSObject* global = JSObject::cast(obj);
2905 InitializeJSObjectFromMap(global, dictionary, map);
2906
2907 // Create a new map for the global object.
2908 obj = map->CopyDropDescriptors();
2909 if (obj->IsFailure()) return obj;
2910 Map* new_map = Map::cast(obj);
2911
2912 // Setup the global object as a normalized object.
2913 global->set_map(new_map);
2914 global->map()->set_instance_descriptors(Heap::empty_descriptor_array());
2915 global->set_properties(dictionary);
2916
2917 // Make sure result is a global object with properties in dictionary.
2918 ASSERT(global->IsGlobalObject());
2919 ASSERT(!global->HasFastProperties());
2920 return global;
2921}
2922
2923
2924Object* Heap::CopyJSObject(JSObject* source) {
2925 // Never used to copy functions. If functions need to be copied we
2926 // have to be careful to clear the literals array.
2927 ASSERT(!source->IsJSFunction());
2928
2929 // Make the clone.
2930 Map* map = source->map();
2931 int object_size = map->instance_size();
2932 Object* clone;
2933
2934 // If we're forced to always allocate, we use the general allocation
2935 // functions which may leave us with an object in old space.
2936 if (always_allocate()) {
2937 clone = AllocateRaw(object_size, NEW_SPACE, OLD_POINTER_SPACE);
2938 if (clone->IsFailure()) return clone;
2939 Address clone_address = HeapObject::cast(clone)->address();
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002940 CopyBlock(clone_address,
2941 source->address(),
Steve Blocka7e24c12009-10-30 11:49:00 +00002942 object_size);
2943 // Update write barrier for all fields that lie beyond the header.
Steve Block6ded16b2010-05-10 14:33:55 +01002944 RecordWrites(clone_address,
2945 JSObject::kHeaderSize,
2946 (object_size - JSObject::kHeaderSize) / kPointerSize);
Steve Blocka7e24c12009-10-30 11:49:00 +00002947 } else {
2948 clone = new_space_.AllocateRaw(object_size);
2949 if (clone->IsFailure()) return clone;
2950 ASSERT(Heap::InNewSpace(clone));
2951 // Since we know the clone is allocated in new space, we can copy
2952 // the contents without worrying about updating the write barrier.
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002953 CopyBlock(HeapObject::cast(clone)->address(),
2954 source->address(),
Steve Blocka7e24c12009-10-30 11:49:00 +00002955 object_size);
2956 }
2957
2958 FixedArray* elements = FixedArray::cast(source->elements());
2959 FixedArray* properties = FixedArray::cast(source->properties());
2960 // Update elements if necessary.
Steve Block6ded16b2010-05-10 14:33:55 +01002961 if (elements->length() > 0) {
Steve Blocka7e24c12009-10-30 11:49:00 +00002962 Object* elem = CopyFixedArray(elements);
2963 if (elem->IsFailure()) return elem;
2964 JSObject::cast(clone)->set_elements(FixedArray::cast(elem));
2965 }
2966 // Update properties if necessary.
2967 if (properties->length() > 0) {
2968 Object* prop = CopyFixedArray(properties);
2969 if (prop->IsFailure()) return prop;
2970 JSObject::cast(clone)->set_properties(FixedArray::cast(prop));
2971 }
2972 // Return the new clone.
Steve Block3ce2e202009-11-05 08:53:23 +00002973#ifdef ENABLE_LOGGING_AND_PROFILING
2974 ProducerHeapProfile::RecordJSObjectAllocation(clone);
2975#endif
Steve Blocka7e24c12009-10-30 11:49:00 +00002976 return clone;
2977}
2978
2979
2980Object* Heap::ReinitializeJSGlobalProxy(JSFunction* constructor,
2981 JSGlobalProxy* object) {
2982 // Allocate initial map if absent.
2983 if (!constructor->has_initial_map()) {
2984 Object* initial_map = AllocateInitialMap(constructor);
2985 if (initial_map->IsFailure()) return initial_map;
2986 constructor->set_initial_map(Map::cast(initial_map));
2987 Map::cast(initial_map)->set_constructor(constructor);
2988 }
2989
2990 Map* map = constructor->initial_map();
2991
2992 // Check that the already allocated object has the same size as
2993 // objects allocated using the constructor.
2994 ASSERT(map->instance_size() == object->map()->instance_size());
2995
2996 // Allocate the backing storage for the properties.
2997 int prop_size = map->unused_property_fields() - map->inobject_properties();
2998 Object* properties = AllocateFixedArray(prop_size, TENURED);
2999 if (properties->IsFailure()) return properties;
3000
3001 // Reset the map for the object.
3002 object->set_map(constructor->initial_map());
3003
3004 // Reinitialize the object from the constructor map.
3005 InitializeJSObjectFromMap(object, FixedArray::cast(properties), map);
3006 return object;
3007}
3008
3009
3010Object* Heap::AllocateStringFromAscii(Vector<const char> string,
3011 PretenureFlag pretenure) {
3012 Object* result = AllocateRawAsciiString(string.length(), pretenure);
3013 if (result->IsFailure()) return result;
3014
3015 // Copy the characters into the new object.
3016 SeqAsciiString* string_result = SeqAsciiString::cast(result);
3017 for (int i = 0; i < string.length(); i++) {
3018 string_result->SeqAsciiStringSet(i, string[i]);
3019 }
3020 return result;
3021}
3022
3023
3024Object* Heap::AllocateStringFromUtf8(Vector<const char> string,
3025 PretenureFlag pretenure) {
Leon Clarkeac952652010-07-15 11:15:24 +01003026 // V8 only supports characters in the Basic Multilingual Plane.
3027 const uc32 kMaxSupportedChar = 0xFFFF;
Steve Blocka7e24c12009-10-30 11:49:00 +00003028 // Count the number of characters in the UTF-8 string and check if
3029 // it is an ASCII string.
3030 Access<Scanner::Utf8Decoder> decoder(Scanner::utf8_decoder());
3031 decoder->Reset(string.start(), string.length());
3032 int chars = 0;
3033 bool is_ascii = true;
3034 while (decoder->has_more()) {
3035 uc32 r = decoder->GetNext();
3036 if (r > String::kMaxAsciiCharCode) is_ascii = false;
3037 chars++;
3038 }
3039
3040 // If the string is ascii, we do not need to convert the characters
3041 // since UTF8 is backwards compatible with ascii.
3042 if (is_ascii) return AllocateStringFromAscii(string, pretenure);
3043
3044 Object* result = AllocateRawTwoByteString(chars, pretenure);
3045 if (result->IsFailure()) return result;
3046
3047 // Convert and copy the characters into the new object.
3048 String* string_result = String::cast(result);
3049 decoder->Reset(string.start(), string.length());
3050 for (int i = 0; i < chars; i++) {
3051 uc32 r = decoder->GetNext();
Leon Clarkeac952652010-07-15 11:15:24 +01003052 if (r > kMaxSupportedChar) { r = unibrow::Utf8::kBadChar; }
Steve Blocka7e24c12009-10-30 11:49:00 +00003053 string_result->Set(i, r);
3054 }
3055 return result;
3056}
3057
3058
3059Object* Heap::AllocateStringFromTwoByte(Vector<const uc16> string,
3060 PretenureFlag pretenure) {
3061 // Check if the string is an ASCII string.
3062 int i = 0;
3063 while (i < string.length() && string[i] <= String::kMaxAsciiCharCode) i++;
3064
3065 Object* result;
3066 if (i == string.length()) { // It's an ASCII string.
3067 result = AllocateRawAsciiString(string.length(), pretenure);
3068 } else { // It's not an ASCII string.
3069 result = AllocateRawTwoByteString(string.length(), pretenure);
3070 }
3071 if (result->IsFailure()) return result;
3072
3073 // Copy the characters into the new object, which may be either ASCII or
3074 // UTF-16.
3075 String* string_result = String::cast(result);
3076 for (int i = 0; i < string.length(); i++) {
3077 string_result->Set(i, string[i]);
3078 }
3079 return result;
3080}
3081
3082
3083Map* Heap::SymbolMapForString(String* string) {
3084 // If the string is in new space it cannot be used as a symbol.
3085 if (InNewSpace(string)) return NULL;
3086
3087 // Find the corresponding symbol map for strings.
3088 Map* map = string->map();
Steve Blockd0582a62009-12-15 09:54:21 +00003089 if (map == ascii_string_map()) return ascii_symbol_map();
3090 if (map == string_map()) return symbol_map();
3091 if (map == cons_string_map()) return cons_symbol_map();
3092 if (map == cons_ascii_string_map()) return cons_ascii_symbol_map();
3093 if (map == external_string_map()) return external_symbol_map();
3094 if (map == external_ascii_string_map()) return external_ascii_symbol_map();
Kristian Monsen9dcf7e22010-06-28 14:14:28 +01003095 if (map == external_string_with_ascii_data_map()) {
3096 return external_symbol_with_ascii_data_map();
3097 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003098
3099 // No match found.
3100 return NULL;
3101}
3102
3103
3104Object* Heap::AllocateInternalSymbol(unibrow::CharacterStream* buffer,
3105 int chars,
Steve Blockd0582a62009-12-15 09:54:21 +00003106 uint32_t hash_field) {
Leon Clarkee46be812010-01-19 14:06:41 +00003107 ASSERT(chars >= 0);
Steve Blocka7e24c12009-10-30 11:49:00 +00003108 // Ensure the chars matches the number of characters in the buffer.
3109 ASSERT(static_cast<unsigned>(chars) == buffer->Length());
3110 // Determine whether the string is ascii.
3111 bool is_ascii = true;
Leon Clarkee46be812010-01-19 14:06:41 +00003112 while (buffer->has_more()) {
3113 if (buffer->GetNext() > unibrow::Utf8::kMaxOneByteChar) {
3114 is_ascii = false;
3115 break;
3116 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003117 }
3118 buffer->Rewind();
3119
3120 // Compute map and object size.
3121 int size;
3122 Map* map;
3123
3124 if (is_ascii) {
Leon Clarkee46be812010-01-19 14:06:41 +00003125 if (chars > SeqAsciiString::kMaxLength) {
3126 return Failure::OutOfMemoryException();
3127 }
Steve Blockd0582a62009-12-15 09:54:21 +00003128 map = ascii_symbol_map();
Steve Blocka7e24c12009-10-30 11:49:00 +00003129 size = SeqAsciiString::SizeFor(chars);
3130 } else {
Leon Clarkee46be812010-01-19 14:06:41 +00003131 if (chars > SeqTwoByteString::kMaxLength) {
3132 return Failure::OutOfMemoryException();
3133 }
Steve Blockd0582a62009-12-15 09:54:21 +00003134 map = symbol_map();
Steve Blocka7e24c12009-10-30 11:49:00 +00003135 size = SeqTwoByteString::SizeFor(chars);
3136 }
3137
3138 // Allocate string.
Leon Clarkee46be812010-01-19 14:06:41 +00003139 Object* result = (size > MaxObjectSizeInPagedSpace())
3140 ? lo_space_->AllocateRaw(size)
3141 : old_data_space_->AllocateRaw(size);
Steve Blocka7e24c12009-10-30 11:49:00 +00003142 if (result->IsFailure()) return result;
3143
3144 reinterpret_cast<HeapObject*>(result)->set_map(map);
Steve Blockd0582a62009-12-15 09:54:21 +00003145 // Set length and hash fields of the allocated string.
Steve Blocka7e24c12009-10-30 11:49:00 +00003146 String* answer = String::cast(result);
Steve Blockd0582a62009-12-15 09:54:21 +00003147 answer->set_length(chars);
3148 answer->set_hash_field(hash_field);
Steve Blocka7e24c12009-10-30 11:49:00 +00003149
3150 ASSERT_EQ(size, answer->Size());
3151
3152 // Fill in the characters.
3153 for (int i = 0; i < chars; i++) {
3154 answer->Set(i, buffer->GetNext());
3155 }
3156 return answer;
3157}
3158
3159
3160Object* Heap::AllocateRawAsciiString(int length, PretenureFlag pretenure) {
Leon Clarkee46be812010-01-19 14:06:41 +00003161 if (length < 0 || length > SeqAsciiString::kMaxLength) {
3162 return Failure::OutOfMemoryException();
3163 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003164
3165 int size = SeqAsciiString::SizeFor(length);
Leon Clarkee46be812010-01-19 14:06:41 +00003166 ASSERT(size <= SeqAsciiString::kMaxSize);
Steve Blocka7e24c12009-10-30 11:49:00 +00003167
Leon Clarkee46be812010-01-19 14:06:41 +00003168 AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
3169 AllocationSpace retry_space = OLD_DATA_SPACE;
3170
Steve Blocka7e24c12009-10-30 11:49:00 +00003171 if (space == NEW_SPACE) {
Leon Clarkee46be812010-01-19 14:06:41 +00003172 if (size > kMaxObjectSizeInNewSpace) {
3173 // Allocate in large object space, retry space will be ignored.
3174 space = LO_SPACE;
3175 } else if (size > MaxObjectSizeInPagedSpace()) {
3176 // Allocate in new space, retry in large object space.
3177 retry_space = LO_SPACE;
3178 }
3179 } else if (space == OLD_DATA_SPACE && size > MaxObjectSizeInPagedSpace()) {
3180 space = LO_SPACE;
Steve Blocka7e24c12009-10-30 11:49:00 +00003181 }
Leon Clarkee46be812010-01-19 14:06:41 +00003182 Object* result = AllocateRaw(size, space, retry_space);
Steve Blocka7e24c12009-10-30 11:49:00 +00003183 if (result->IsFailure()) return result;
3184
Steve Blocka7e24c12009-10-30 11:49:00 +00003185 // Partially initialize the object.
Steve Blockd0582a62009-12-15 09:54:21 +00003186 HeapObject::cast(result)->set_map(ascii_string_map());
Steve Blocka7e24c12009-10-30 11:49:00 +00003187 String::cast(result)->set_length(length);
Steve Blockd0582a62009-12-15 09:54:21 +00003188 String::cast(result)->set_hash_field(String::kEmptyHashField);
Steve Blocka7e24c12009-10-30 11:49:00 +00003189 ASSERT_EQ(size, HeapObject::cast(result)->Size());
3190 return result;
3191}
3192
3193
3194Object* Heap::AllocateRawTwoByteString(int length, PretenureFlag pretenure) {
Leon Clarkee46be812010-01-19 14:06:41 +00003195 if (length < 0 || length > SeqTwoByteString::kMaxLength) {
3196 return Failure::OutOfMemoryException();
Steve Blocka7e24c12009-10-30 11:49:00 +00003197 }
Leon Clarkee46be812010-01-19 14:06:41 +00003198 int size = SeqTwoByteString::SizeFor(length);
3199 ASSERT(size <= SeqTwoByteString::kMaxSize);
3200 AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
3201 AllocationSpace retry_space = OLD_DATA_SPACE;
3202
3203 if (space == NEW_SPACE) {
3204 if (size > kMaxObjectSizeInNewSpace) {
3205 // Allocate in large object space, retry space will be ignored.
3206 space = LO_SPACE;
3207 } else if (size > MaxObjectSizeInPagedSpace()) {
3208 // Allocate in new space, retry in large object space.
3209 retry_space = LO_SPACE;
3210 }
3211 } else if (space == OLD_DATA_SPACE && size > MaxObjectSizeInPagedSpace()) {
3212 space = LO_SPACE;
3213 }
3214 Object* result = AllocateRaw(size, space, retry_space);
Steve Blocka7e24c12009-10-30 11:49:00 +00003215 if (result->IsFailure()) return result;
3216
Steve Blocka7e24c12009-10-30 11:49:00 +00003217 // Partially initialize the object.
Steve Blockd0582a62009-12-15 09:54:21 +00003218 HeapObject::cast(result)->set_map(string_map());
Steve Blocka7e24c12009-10-30 11:49:00 +00003219 String::cast(result)->set_length(length);
Steve Blockd0582a62009-12-15 09:54:21 +00003220 String::cast(result)->set_hash_field(String::kEmptyHashField);
Steve Blocka7e24c12009-10-30 11:49:00 +00003221 ASSERT_EQ(size, HeapObject::cast(result)->Size());
3222 return result;
3223}
3224
3225
3226Object* Heap::AllocateEmptyFixedArray() {
3227 int size = FixedArray::SizeFor(0);
3228 Object* result = AllocateRaw(size, OLD_DATA_SPACE, OLD_DATA_SPACE);
3229 if (result->IsFailure()) return result;
3230 // Initialize the object.
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01003231 reinterpret_cast<FixedArray*>(result)->set_map(fixed_array_map());
3232 reinterpret_cast<FixedArray*>(result)->set_length(0);
Steve Blocka7e24c12009-10-30 11:49:00 +00003233 return result;
3234}
3235
3236
3237Object* Heap::AllocateRawFixedArray(int length) {
Leon Clarkee46be812010-01-19 14:06:41 +00003238 if (length < 0 || length > FixedArray::kMaxLength) {
3239 return Failure::OutOfMemoryException();
3240 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003241 // Use the general function if we're forced to always allocate.
3242 if (always_allocate()) return AllocateFixedArray(length, TENURED);
3243 // Allocate the raw data for a fixed array.
3244 int size = FixedArray::SizeFor(length);
3245 return size <= kMaxObjectSizeInNewSpace
3246 ? new_space_.AllocateRaw(size)
3247 : lo_space_->AllocateRawFixedArray(size);
3248}
3249
3250
3251Object* Heap::CopyFixedArray(FixedArray* src) {
3252 int len = src->length();
3253 Object* obj = AllocateRawFixedArray(len);
3254 if (obj->IsFailure()) return obj;
3255 if (Heap::InNewSpace(obj)) {
3256 HeapObject* dst = HeapObject::cast(obj);
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01003257 CopyBlock(dst->address(), src->address(), FixedArray::SizeFor(len));
Steve Blocka7e24c12009-10-30 11:49:00 +00003258 return obj;
3259 }
3260 HeapObject::cast(obj)->set_map(src->map());
3261 FixedArray* result = FixedArray::cast(obj);
3262 result->set_length(len);
Leon Clarke4515c472010-02-03 11:58:03 +00003263
Steve Blocka7e24c12009-10-30 11:49:00 +00003264 // Copy the content
Leon Clarke4515c472010-02-03 11:58:03 +00003265 AssertNoAllocation no_gc;
3266 WriteBarrierMode mode = result->GetWriteBarrierMode(no_gc);
Steve Blocka7e24c12009-10-30 11:49:00 +00003267 for (int i = 0; i < len; i++) result->set(i, src->get(i), mode);
3268 return result;
3269}
3270
3271
3272Object* Heap::AllocateFixedArray(int length) {
3273 ASSERT(length >= 0);
3274 if (length == 0) return empty_fixed_array();
3275 Object* result = AllocateRawFixedArray(length);
3276 if (!result->IsFailure()) {
3277 // Initialize header.
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01003278 FixedArray* array = reinterpret_cast<FixedArray*>(result);
3279 array->set_map(fixed_array_map());
Steve Blocka7e24c12009-10-30 11:49:00 +00003280 array->set_length(length);
Steve Blocka7e24c12009-10-30 11:49:00 +00003281 // Initialize body.
Steve Block6ded16b2010-05-10 14:33:55 +01003282 ASSERT(!Heap::InNewSpace(undefined_value()));
3283 MemsetPointer(array->data_start(), undefined_value(), length);
Steve Blocka7e24c12009-10-30 11:49:00 +00003284 }
3285 return result;
3286}
3287
3288
Steve Block6ded16b2010-05-10 14:33:55 +01003289Object* Heap::AllocateRawFixedArray(int length, PretenureFlag pretenure) {
Leon Clarkee46be812010-01-19 14:06:41 +00003290 if (length < 0 || length > FixedArray::kMaxLength) {
3291 return Failure::OutOfMemoryException();
3292 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003293
Leon Clarkee46be812010-01-19 14:06:41 +00003294 AllocationSpace space =
3295 (pretenure == TENURED) ? OLD_POINTER_SPACE : NEW_SPACE;
Steve Blocka7e24c12009-10-30 11:49:00 +00003296 int size = FixedArray::SizeFor(length);
Leon Clarkee46be812010-01-19 14:06:41 +00003297 if (space == NEW_SPACE && size > kMaxObjectSizeInNewSpace) {
3298 // Too big for new space.
3299 space = LO_SPACE;
3300 } else if (space == OLD_POINTER_SPACE &&
3301 size > MaxObjectSizeInPagedSpace()) {
3302 // Too big for old pointer space.
3303 space = LO_SPACE;
3304 }
3305
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01003306 AllocationSpace retry_space =
3307 (size <= MaxObjectSizeInPagedSpace()) ? OLD_POINTER_SPACE : LO_SPACE;
3308
3309 return AllocateRaw(size, space, retry_space);
Steve Blocka7e24c12009-10-30 11:49:00 +00003310}
3311
3312
Steve Block6ded16b2010-05-10 14:33:55 +01003313static Object* AllocateFixedArrayWithFiller(int length,
3314 PretenureFlag pretenure,
3315 Object* filler) {
3316 ASSERT(length >= 0);
3317 ASSERT(Heap::empty_fixed_array()->IsFixedArray());
3318 if (length == 0) return Heap::empty_fixed_array();
3319
3320 ASSERT(!Heap::InNewSpace(filler));
3321 Object* result = Heap::AllocateRawFixedArray(length, pretenure);
3322 if (result->IsFailure()) return result;
3323
3324 HeapObject::cast(result)->set_map(Heap::fixed_array_map());
3325 FixedArray* array = FixedArray::cast(result);
3326 array->set_length(length);
3327 MemsetPointer(array->data_start(), filler, length);
3328 return array;
3329}
3330
3331
3332Object* Heap::AllocateFixedArray(int length, PretenureFlag pretenure) {
3333 return AllocateFixedArrayWithFiller(length, pretenure, undefined_value());
3334}
3335
3336
3337Object* Heap::AllocateFixedArrayWithHoles(int length, PretenureFlag pretenure) {
3338 return AllocateFixedArrayWithFiller(length, pretenure, the_hole_value());
3339}
3340
3341
3342Object* Heap::AllocateUninitializedFixedArray(int length) {
3343 if (length == 0) return empty_fixed_array();
3344
3345 Object* obj = AllocateRawFixedArray(length);
3346 if (obj->IsFailure()) return obj;
3347
3348 reinterpret_cast<FixedArray*>(obj)->set_map(fixed_array_map());
3349 FixedArray::cast(obj)->set_length(length);
3350 return obj;
3351}
3352
3353
3354Object* Heap::AllocateHashTable(int length, PretenureFlag pretenure) {
3355 Object* result = Heap::AllocateFixedArray(length, pretenure);
Steve Blocka7e24c12009-10-30 11:49:00 +00003356 if (result->IsFailure()) return result;
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01003357 reinterpret_cast<HeapObject*>(result)->set_map(hash_table_map());
Steve Blocka7e24c12009-10-30 11:49:00 +00003358 ASSERT(result->IsHashTable());
3359 return result;
3360}
3361
3362
3363Object* Heap::AllocateGlobalContext() {
3364 Object* result = Heap::AllocateFixedArray(Context::GLOBAL_CONTEXT_SLOTS);
3365 if (result->IsFailure()) return result;
3366 Context* context = reinterpret_cast<Context*>(result);
3367 context->set_map(global_context_map());
3368 ASSERT(context->IsGlobalContext());
3369 ASSERT(result->IsContext());
3370 return result;
3371}
3372
3373
3374Object* Heap::AllocateFunctionContext(int length, JSFunction* function) {
3375 ASSERT(length >= Context::MIN_CONTEXT_SLOTS);
3376 Object* result = Heap::AllocateFixedArray(length);
3377 if (result->IsFailure()) return result;
3378 Context* context = reinterpret_cast<Context*>(result);
3379 context->set_map(context_map());
3380 context->set_closure(function);
3381 context->set_fcontext(context);
3382 context->set_previous(NULL);
3383 context->set_extension(NULL);
3384 context->set_global(function->context()->global());
3385 ASSERT(!context->IsGlobalContext());
3386 ASSERT(context->is_function_context());
3387 ASSERT(result->IsContext());
3388 return result;
3389}
3390
3391
3392Object* Heap::AllocateWithContext(Context* previous,
3393 JSObject* extension,
3394 bool is_catch_context) {
3395 Object* result = Heap::AllocateFixedArray(Context::MIN_CONTEXT_SLOTS);
3396 if (result->IsFailure()) return result;
3397 Context* context = reinterpret_cast<Context*>(result);
3398 context->set_map(is_catch_context ? catch_context_map() : context_map());
3399 context->set_closure(previous->closure());
3400 context->set_fcontext(previous->fcontext());
3401 context->set_previous(previous);
3402 context->set_extension(extension);
3403 context->set_global(previous->global());
3404 ASSERT(!context->IsGlobalContext());
3405 ASSERT(!context->is_function_context());
3406 ASSERT(result->IsContext());
3407 return result;
3408}
3409
3410
3411Object* Heap::AllocateStruct(InstanceType type) {
3412 Map* map;
3413 switch (type) {
3414#define MAKE_CASE(NAME, Name, name) case NAME##_TYPE: map = name##_map(); break;
3415STRUCT_LIST(MAKE_CASE)
3416#undef MAKE_CASE
3417 default:
3418 UNREACHABLE();
3419 return Failure::InternalError();
3420 }
3421 int size = map->instance_size();
3422 AllocationSpace space =
3423 (size > MaxObjectSizeInPagedSpace()) ? LO_SPACE : OLD_POINTER_SPACE;
3424 Object* result = Heap::Allocate(map, space);
3425 if (result->IsFailure()) return result;
3426 Struct::cast(result)->InitializeBody(size);
3427 return result;
3428}
3429
3430
3431bool Heap::IdleNotification() {
3432 static const int kIdlesBeforeScavenge = 4;
3433 static const int kIdlesBeforeMarkSweep = 7;
3434 static const int kIdlesBeforeMarkCompact = 8;
3435 static int number_idle_notifications = 0;
3436 static int last_gc_count = gc_count_;
3437
Steve Block6ded16b2010-05-10 14:33:55 +01003438 bool uncommit = true;
Steve Blocka7e24c12009-10-30 11:49:00 +00003439 bool finished = false;
3440
3441 if (last_gc_count == gc_count_) {
3442 number_idle_notifications++;
3443 } else {
3444 number_idle_notifications = 0;
3445 last_gc_count = gc_count_;
3446 }
3447
3448 if (number_idle_notifications == kIdlesBeforeScavenge) {
Steve Block6ded16b2010-05-10 14:33:55 +01003449 if (contexts_disposed_ > 0) {
3450 HistogramTimerScope scope(&Counters::gc_context);
3451 CollectAllGarbage(false);
3452 } else {
3453 CollectGarbage(0, NEW_SPACE);
3454 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003455 new_space_.Shrink();
3456 last_gc_count = gc_count_;
3457
3458 } else if (number_idle_notifications == kIdlesBeforeMarkSweep) {
Steve Blockd0582a62009-12-15 09:54:21 +00003459 // Before doing the mark-sweep collections we clear the
3460 // compilation cache to avoid hanging on to source code and
3461 // generated code for cached functions.
3462 CompilationCache::Clear();
3463
Steve Blocka7e24c12009-10-30 11:49:00 +00003464 CollectAllGarbage(false);
3465 new_space_.Shrink();
3466 last_gc_count = gc_count_;
3467
3468 } else if (number_idle_notifications == kIdlesBeforeMarkCompact) {
3469 CollectAllGarbage(true);
3470 new_space_.Shrink();
3471 last_gc_count = gc_count_;
3472 number_idle_notifications = 0;
3473 finished = true;
Steve Block6ded16b2010-05-10 14:33:55 +01003474
3475 } else if (contexts_disposed_ > 0) {
3476 if (FLAG_expose_gc) {
3477 contexts_disposed_ = 0;
3478 } else {
3479 HistogramTimerScope scope(&Counters::gc_context);
3480 CollectAllGarbage(false);
3481 last_gc_count = gc_count_;
3482 }
3483 // If this is the first idle notification, we reset the
3484 // notification count to avoid letting idle notifications for
3485 // context disposal garbage collections start a potentially too
3486 // aggressive idle GC cycle.
3487 if (number_idle_notifications <= 1) {
3488 number_idle_notifications = 0;
3489 uncommit = false;
3490 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003491 }
3492
Steve Block6ded16b2010-05-10 14:33:55 +01003493 // Make sure that we have no pending context disposals and
3494 // conditionally uncommit from space.
3495 ASSERT(contexts_disposed_ == 0);
3496 if (uncommit) Heap::UncommitFromSpace();
Steve Blocka7e24c12009-10-30 11:49:00 +00003497 return finished;
3498}
3499
3500
3501#ifdef DEBUG
3502
3503void Heap::Print() {
3504 if (!HasBeenSetup()) return;
3505 Top::PrintStack();
3506 AllSpaces spaces;
Leon Clarked91b9f72010-01-27 17:25:45 +00003507 for (Space* space = spaces.next(); space != NULL; space = spaces.next())
3508 space->Print();
Steve Blocka7e24c12009-10-30 11:49:00 +00003509}
3510
3511
3512void Heap::ReportCodeStatistics(const char* title) {
3513 PrintF(">>>>>> Code Stats (%s) >>>>>>\n", title);
3514 PagedSpace::ResetCodeStatistics();
3515 // We do not look for code in new space, map space, or old space. If code
3516 // somehow ends up in those spaces, we would miss it here.
3517 code_space_->CollectCodeStatistics();
3518 lo_space_->CollectCodeStatistics();
3519 PagedSpace::ReportCodeStatistics();
3520}
3521
3522
3523// This function expects that NewSpace's allocated objects histogram is
3524// populated (via a call to CollectStatistics or else as a side effect of a
3525// just-completed scavenge collection).
3526void Heap::ReportHeapStatistics(const char* title) {
3527 USE(title);
3528 PrintF(">>>>>> =============== %s (%d) =============== >>>>>>\n",
3529 title, gc_count_);
3530 PrintF("mark-compact GC : %d\n", mc_count_);
3531 PrintF("old_gen_promotion_limit_ %d\n", old_gen_promotion_limit_);
3532 PrintF("old_gen_allocation_limit_ %d\n", old_gen_allocation_limit_);
3533
3534 PrintF("\n");
3535 PrintF("Number of handles : %d\n", HandleScope::NumberOfHandles());
3536 GlobalHandles::PrintStats();
3537 PrintF("\n");
3538
3539 PrintF("Heap statistics : ");
3540 MemoryAllocator::ReportStatistics();
3541 PrintF("To space : ");
3542 new_space_.ReportStatistics();
3543 PrintF("Old pointer space : ");
3544 old_pointer_space_->ReportStatistics();
3545 PrintF("Old data space : ");
3546 old_data_space_->ReportStatistics();
3547 PrintF("Code space : ");
3548 code_space_->ReportStatistics();
3549 PrintF("Map space : ");
3550 map_space_->ReportStatistics();
3551 PrintF("Cell space : ");
3552 cell_space_->ReportStatistics();
3553 PrintF("Large object space : ");
3554 lo_space_->ReportStatistics();
3555 PrintF(">>>>>> ========================================= >>>>>>\n");
3556}
3557
3558#endif // DEBUG
3559
3560bool Heap::Contains(HeapObject* value) {
3561 return Contains(value->address());
3562}
3563
3564
3565bool Heap::Contains(Address addr) {
3566 if (OS::IsOutsideAllocatedSpace(addr)) return false;
3567 return HasBeenSetup() &&
3568 (new_space_.ToSpaceContains(addr) ||
3569 old_pointer_space_->Contains(addr) ||
3570 old_data_space_->Contains(addr) ||
3571 code_space_->Contains(addr) ||
3572 map_space_->Contains(addr) ||
3573 cell_space_->Contains(addr) ||
3574 lo_space_->SlowContains(addr));
3575}
3576
3577
3578bool Heap::InSpace(HeapObject* value, AllocationSpace space) {
3579 return InSpace(value->address(), space);
3580}
3581
3582
3583bool Heap::InSpace(Address addr, AllocationSpace space) {
3584 if (OS::IsOutsideAllocatedSpace(addr)) return false;
3585 if (!HasBeenSetup()) return false;
3586
3587 switch (space) {
3588 case NEW_SPACE:
3589 return new_space_.ToSpaceContains(addr);
3590 case OLD_POINTER_SPACE:
3591 return old_pointer_space_->Contains(addr);
3592 case OLD_DATA_SPACE:
3593 return old_data_space_->Contains(addr);
3594 case CODE_SPACE:
3595 return code_space_->Contains(addr);
3596 case MAP_SPACE:
3597 return map_space_->Contains(addr);
3598 case CELL_SPACE:
3599 return cell_space_->Contains(addr);
3600 case LO_SPACE:
3601 return lo_space_->SlowContains(addr);
3602 }
3603
3604 return false;
3605}
3606
3607
3608#ifdef DEBUG
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01003609static void DummyScavengePointer(HeapObject** p) {
3610}
3611
3612
3613static void VerifyPointersUnderWatermark(
3614 PagedSpace* space,
3615 DirtyRegionCallback visit_dirty_region) {
3616 PageIterator it(space, PageIterator::PAGES_IN_USE);
3617
3618 while (it.has_next()) {
3619 Page* page = it.next();
3620 Address start = page->ObjectAreaStart();
3621 Address end = page->AllocationWatermark();
3622
3623 Heap::IterateDirtyRegions(Page::kAllRegionsDirtyMarks,
3624 start,
3625 end,
3626 visit_dirty_region,
3627 &DummyScavengePointer);
3628 }
3629}
3630
3631
3632static void VerifyPointersUnderWatermark(LargeObjectSpace* space) {
3633 LargeObjectIterator it(space);
3634 for (HeapObject* object = it.next(); object != NULL; object = it.next()) {
3635 if (object->IsFixedArray()) {
3636 Address slot_address = object->address();
3637 Address end = object->address() + object->Size();
3638
3639 while (slot_address < end) {
3640 HeapObject** slot = reinterpret_cast<HeapObject**>(slot_address);
3641 // When we are not in GC the Heap::InNewSpace() predicate
3642 // checks that pointers which satisfy predicate point into
3643 // the active semispace.
3644 Heap::InNewSpace(*slot);
3645 slot_address += kPointerSize;
3646 }
3647 }
3648 }
3649}
3650
3651
Steve Blocka7e24c12009-10-30 11:49:00 +00003652void Heap::Verify() {
3653 ASSERT(HasBeenSetup());
3654
3655 VerifyPointersVisitor visitor;
Steve Blockd0582a62009-12-15 09:54:21 +00003656 IterateRoots(&visitor, VISIT_ONLY_STRONG);
Steve Blocka7e24c12009-10-30 11:49:00 +00003657
3658 new_space_.Verify();
3659
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01003660 VerifyPointersAndDirtyRegionsVisitor dirty_regions_visitor;
3661 old_pointer_space_->Verify(&dirty_regions_visitor);
3662 map_space_->Verify(&dirty_regions_visitor);
Steve Blocka7e24c12009-10-30 11:49:00 +00003663
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01003664 VerifyPointersUnderWatermark(old_pointer_space_,
3665 &IteratePointersInDirtyRegion);
3666 VerifyPointersUnderWatermark(map_space_,
3667 &IteratePointersInDirtyMapsRegion);
3668 VerifyPointersUnderWatermark(lo_space_);
3669
3670 VerifyPageWatermarkValidity(old_pointer_space_, ALL_INVALID);
3671 VerifyPageWatermarkValidity(map_space_, ALL_INVALID);
3672
3673 VerifyPointersVisitor no_dirty_regions_visitor;
3674 old_data_space_->Verify(&no_dirty_regions_visitor);
3675 code_space_->Verify(&no_dirty_regions_visitor);
3676 cell_space_->Verify(&no_dirty_regions_visitor);
Steve Blocka7e24c12009-10-30 11:49:00 +00003677
3678 lo_space_->Verify();
3679}
3680#endif // DEBUG
3681
3682
3683Object* Heap::LookupSymbol(Vector<const char> string) {
3684 Object* symbol = NULL;
3685 Object* new_table = symbol_table()->LookupSymbol(string, &symbol);
3686 if (new_table->IsFailure()) return new_table;
3687 // Can't use set_symbol_table because SymbolTable::cast knows that
3688 // SymbolTable is a singleton and checks for identity.
3689 roots_[kSymbolTableRootIndex] = new_table;
3690 ASSERT(symbol != NULL);
3691 return symbol;
3692}
3693
3694
3695Object* Heap::LookupSymbol(String* string) {
3696 if (string->IsSymbol()) return string;
3697 Object* symbol = NULL;
3698 Object* new_table = symbol_table()->LookupString(string, &symbol);
3699 if (new_table->IsFailure()) return new_table;
3700 // Can't use set_symbol_table because SymbolTable::cast knows that
3701 // SymbolTable is a singleton and checks for identity.
3702 roots_[kSymbolTableRootIndex] = new_table;
3703 ASSERT(symbol != NULL);
3704 return symbol;
3705}
3706
3707
3708bool Heap::LookupSymbolIfExists(String* string, String** symbol) {
3709 if (string->IsSymbol()) {
3710 *symbol = string;
3711 return true;
3712 }
3713 return symbol_table()->LookupSymbolIfExists(string, symbol);
3714}
3715
3716
3717#ifdef DEBUG
3718void Heap::ZapFromSpace() {
3719 ASSERT(reinterpret_cast<Object*>(kFromSpaceZapValue)->IsHeapObject());
3720 for (Address a = new_space_.FromSpaceLow();
3721 a < new_space_.FromSpaceHigh();
3722 a += kPointerSize) {
3723 Memory::Address_at(a) = kFromSpaceZapValue;
3724 }
3725}
3726#endif // DEBUG
3727
3728
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01003729bool Heap::IteratePointersInDirtyRegion(Address start,
3730 Address end,
3731 ObjectSlotCallback copy_object_func) {
3732 Address slot_address = start;
3733 bool pointers_to_new_space_found = false;
Steve Blocka7e24c12009-10-30 11:49:00 +00003734
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01003735 while (slot_address < end) {
3736 Object** slot = reinterpret_cast<Object**>(slot_address);
3737 if (Heap::InNewSpace(*slot)) {
3738 ASSERT((*slot)->IsHeapObject());
3739 copy_object_func(reinterpret_cast<HeapObject**>(slot));
3740 if (Heap::InNewSpace(*slot)) {
3741 ASSERT((*slot)->IsHeapObject());
3742 pointers_to_new_space_found = true;
Steve Blocka7e24c12009-10-30 11:49:00 +00003743 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003744 }
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01003745 slot_address += kPointerSize;
Steve Blocka7e24c12009-10-30 11:49:00 +00003746 }
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01003747 return pointers_to_new_space_found;
Steve Blocka7e24c12009-10-30 11:49:00 +00003748}
3749
3750
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01003751// Compute start address of the first map following given addr.
3752static inline Address MapStartAlign(Address addr) {
3753 Address page = Page::FromAddress(addr)->ObjectAreaStart();
3754 return page + (((addr - page) + (Map::kSize - 1)) / Map::kSize * Map::kSize);
3755}
Steve Blocka7e24c12009-10-30 11:49:00 +00003756
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01003757
3758// Compute end address of the first map preceding given addr.
3759static inline Address MapEndAlign(Address addr) {
3760 Address page = Page::FromAllocationTop(addr)->ObjectAreaStart();
3761 return page + ((addr - page) / Map::kSize * Map::kSize);
3762}
3763
3764
3765static bool IteratePointersInDirtyMaps(Address start,
3766 Address end,
3767 ObjectSlotCallback copy_object_func) {
3768 ASSERT(MapStartAlign(start) == start);
3769 ASSERT(MapEndAlign(end) == end);
3770
3771 Address map_address = start;
3772 bool pointers_to_new_space_found = false;
3773
3774 while (map_address < end) {
3775 ASSERT(!Heap::InNewSpace(Memory::Object_at(map_address)));
3776 ASSERT(Memory::Object_at(map_address)->IsMap());
3777
3778 Address pointer_fields_start = map_address + Map::kPointerFieldsBeginOffset;
3779 Address pointer_fields_end = map_address + Map::kPointerFieldsEndOffset;
3780
3781 if (Heap::IteratePointersInDirtyRegion(pointer_fields_start,
3782 pointer_fields_end,
3783 copy_object_func)) {
3784 pointers_to_new_space_found = true;
3785 }
3786
3787 map_address += Map::kSize;
3788 }
3789
3790 return pointers_to_new_space_found;
3791}
3792
3793
3794bool Heap::IteratePointersInDirtyMapsRegion(
3795 Address start,
3796 Address end,
3797 ObjectSlotCallback copy_object_func) {
3798 Address map_aligned_start = MapStartAlign(start);
3799 Address map_aligned_end = MapEndAlign(end);
3800
3801 bool contains_pointers_to_new_space = false;
3802
3803 if (map_aligned_start != start) {
3804 Address prev_map = map_aligned_start - Map::kSize;
3805 ASSERT(Memory::Object_at(prev_map)->IsMap());
3806
3807 Address pointer_fields_start =
3808 Max(start, prev_map + Map::kPointerFieldsBeginOffset);
3809
3810 Address pointer_fields_end =
Ben Murdoch3bec4d22010-07-22 14:51:16 +01003811 Min(prev_map + Map::kPointerFieldsEndOffset, end);
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01003812
3813 contains_pointers_to_new_space =
3814 IteratePointersInDirtyRegion(pointer_fields_start,
3815 pointer_fields_end,
3816 copy_object_func)
3817 || contains_pointers_to_new_space;
3818 }
3819
3820 contains_pointers_to_new_space =
3821 IteratePointersInDirtyMaps(map_aligned_start,
3822 map_aligned_end,
3823 copy_object_func)
3824 || contains_pointers_to_new_space;
3825
3826 if (map_aligned_end != end) {
3827 ASSERT(Memory::Object_at(map_aligned_end)->IsMap());
3828
Ben Murdoch3bec4d22010-07-22 14:51:16 +01003829 Address pointer_fields_start =
3830 map_aligned_end + Map::kPointerFieldsBeginOffset;
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01003831
3832 Address pointer_fields_end =
Ben Murdoch3bec4d22010-07-22 14:51:16 +01003833 Min(end, map_aligned_end + Map::kPointerFieldsEndOffset);
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01003834
3835 contains_pointers_to_new_space =
3836 IteratePointersInDirtyRegion(pointer_fields_start,
3837 pointer_fields_end,
3838 copy_object_func)
3839 || contains_pointers_to_new_space;
3840 }
3841
3842 return contains_pointers_to_new_space;
3843}
3844
3845
Ben Murdoch3bec4d22010-07-22 14:51:16 +01003846void Heap::IterateAndMarkPointersToFromSpace(Address start,
3847 Address end,
3848 ObjectSlotCallback callback) {
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01003849 Address slot_address = start;
3850 Page* page = Page::FromAddress(start);
3851
3852 uint32_t marks = page->GetRegionMarks();
3853
3854 while (slot_address < end) {
3855 Object** slot = reinterpret_cast<Object**>(slot_address);
Ben Murdoch3bec4d22010-07-22 14:51:16 +01003856 if (Heap::InFromSpace(*slot)) {
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01003857 ASSERT((*slot)->IsHeapObject());
3858 callback(reinterpret_cast<HeapObject**>(slot));
3859 if (Heap::InNewSpace(*slot)) {
3860 ASSERT((*slot)->IsHeapObject());
3861 marks |= page->GetRegionMaskForAddress(slot_address);
3862 }
3863 }
3864 slot_address += kPointerSize;
3865 }
3866
3867 page->SetRegionMarks(marks);
3868}
3869
3870
3871uint32_t Heap::IterateDirtyRegions(
3872 uint32_t marks,
3873 Address area_start,
3874 Address area_end,
3875 DirtyRegionCallback visit_dirty_region,
3876 ObjectSlotCallback copy_object_func) {
3877 uint32_t newmarks = 0;
3878 uint32_t mask = 1;
3879
3880 if (area_start >= area_end) {
3881 return newmarks;
3882 }
3883
3884 Address region_start = area_start;
3885
3886 // area_start does not necessarily coincide with start of the first region.
3887 // Thus to calculate the beginning of the next region we have to align
3888 // area_start by Page::kRegionSize.
3889 Address second_region =
3890 reinterpret_cast<Address>(
3891 reinterpret_cast<intptr_t>(area_start + Page::kRegionSize) &
3892 ~Page::kRegionAlignmentMask);
3893
3894 // Next region might be beyond area_end.
3895 Address region_end = Min(second_region, area_end);
3896
3897 if (marks & mask) {
3898 if (visit_dirty_region(region_start, region_end, copy_object_func)) {
3899 newmarks |= mask;
3900 }
3901 }
3902 mask <<= 1;
3903
3904 // Iterate subsequent regions which fully lay inside [area_start, area_end[.
3905 region_start = region_end;
3906 region_end = region_start + Page::kRegionSize;
3907
3908 while (region_end <= area_end) {
3909 if (marks & mask) {
3910 if (visit_dirty_region(region_start, region_end, copy_object_func)) {
3911 newmarks |= mask;
3912 }
3913 }
3914
3915 region_start = region_end;
3916 region_end = region_start + Page::kRegionSize;
3917
3918 mask <<= 1;
3919 }
3920
3921 if (region_start != area_end) {
3922 // A small piece of area left uniterated because area_end does not coincide
3923 // with region end. Check whether region covering last part of area is
3924 // dirty.
3925 if (marks & mask) {
3926 if (visit_dirty_region(region_start, area_end, copy_object_func)) {
3927 newmarks |= mask;
3928 }
3929 }
3930 }
3931
3932 return newmarks;
3933}
3934
3935
3936
3937void Heap::IterateDirtyRegions(
3938 PagedSpace* space,
3939 DirtyRegionCallback visit_dirty_region,
3940 ObjectSlotCallback copy_object_func,
3941 ExpectedPageWatermarkState expected_page_watermark_state) {
Steve Blocka7e24c12009-10-30 11:49:00 +00003942
3943 PageIterator it(space, PageIterator::PAGES_IN_USE);
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01003944
Steve Blocka7e24c12009-10-30 11:49:00 +00003945 while (it.has_next()) {
3946 Page* page = it.next();
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01003947 uint32_t marks = page->GetRegionMarks();
3948
3949 if (marks != Page::kAllRegionsCleanMarks) {
3950 Address start = page->ObjectAreaStart();
3951
3952 // Do not try to visit pointers beyond page allocation watermark.
3953 // Page can contain garbage pointers there.
3954 Address end;
3955
3956 if ((expected_page_watermark_state == WATERMARK_SHOULD_BE_VALID) ||
3957 page->IsWatermarkValid()) {
3958 end = page->AllocationWatermark();
3959 } else {
3960 end = page->CachedAllocationWatermark();
3961 }
3962
3963 ASSERT(space == old_pointer_space_ ||
3964 (space == map_space_ &&
3965 ((page->ObjectAreaStart() - end) % Map::kSize == 0)));
3966
3967 page->SetRegionMarks(IterateDirtyRegions(marks,
3968 start,
3969 end,
3970 visit_dirty_region,
3971 copy_object_func));
Steve Blocka7e24c12009-10-30 11:49:00 +00003972 }
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01003973
3974 // Mark page watermark as invalid to maintain watermark validity invariant.
3975 // See Page::FlipMeaningOfInvalidatedWatermarkFlag() for details.
3976 page->InvalidateWatermark(true);
Steve Blocka7e24c12009-10-30 11:49:00 +00003977 }
3978}
3979
3980
Steve Blockd0582a62009-12-15 09:54:21 +00003981void Heap::IterateRoots(ObjectVisitor* v, VisitMode mode) {
3982 IterateStrongRoots(v, mode);
Leon Clarked91b9f72010-01-27 17:25:45 +00003983 IterateWeakRoots(v, mode);
3984}
3985
3986
3987void Heap::IterateWeakRoots(ObjectVisitor* v, VisitMode mode) {
Steve Blocka7e24c12009-10-30 11:49:00 +00003988 v->VisitPointer(reinterpret_cast<Object**>(&roots_[kSymbolTableRootIndex]));
Steve Blockd0582a62009-12-15 09:54:21 +00003989 v->Synchronize("symbol_table");
Leon Clarkee46be812010-01-19 14:06:41 +00003990 if (mode != VISIT_ALL_IN_SCAVENGE) {
3991 // Scavenge collections have special processing for this.
3992 ExternalStringTable::Iterate(v);
3993 }
3994 v->Synchronize("external_string_table");
Steve Blocka7e24c12009-10-30 11:49:00 +00003995}
3996
3997
Steve Blockd0582a62009-12-15 09:54:21 +00003998void Heap::IterateStrongRoots(ObjectVisitor* v, VisitMode mode) {
Steve Blocka7e24c12009-10-30 11:49:00 +00003999 v->VisitPointers(&roots_[0], &roots_[kStrongRootListLength]);
Steve Blockd0582a62009-12-15 09:54:21 +00004000 v->Synchronize("strong_root_list");
Steve Blocka7e24c12009-10-30 11:49:00 +00004001
Steve Block6ded16b2010-05-10 14:33:55 +01004002 v->VisitPointer(BitCast<Object**, String**>(&hidden_symbol_));
Steve Blockd0582a62009-12-15 09:54:21 +00004003 v->Synchronize("symbol");
Steve Blocka7e24c12009-10-30 11:49:00 +00004004
4005 Bootstrapper::Iterate(v);
Steve Blockd0582a62009-12-15 09:54:21 +00004006 v->Synchronize("bootstrapper");
Steve Blocka7e24c12009-10-30 11:49:00 +00004007 Top::Iterate(v);
Steve Blockd0582a62009-12-15 09:54:21 +00004008 v->Synchronize("top");
Steve Blocka7e24c12009-10-30 11:49:00 +00004009 Relocatable::Iterate(v);
Steve Blockd0582a62009-12-15 09:54:21 +00004010 v->Synchronize("relocatable");
Steve Blocka7e24c12009-10-30 11:49:00 +00004011
4012#ifdef ENABLE_DEBUGGER_SUPPORT
4013 Debug::Iterate(v);
4014#endif
Steve Blockd0582a62009-12-15 09:54:21 +00004015 v->Synchronize("debug");
Steve Blocka7e24c12009-10-30 11:49:00 +00004016 CompilationCache::Iterate(v);
Steve Blockd0582a62009-12-15 09:54:21 +00004017 v->Synchronize("compilationcache");
Steve Blocka7e24c12009-10-30 11:49:00 +00004018
4019 // Iterate over local handles in handle scopes.
4020 HandleScopeImplementer::Iterate(v);
Steve Blockd0582a62009-12-15 09:54:21 +00004021 v->Synchronize("handlescope");
Steve Blocka7e24c12009-10-30 11:49:00 +00004022
Leon Clarkee46be812010-01-19 14:06:41 +00004023 // Iterate over the builtin code objects and code stubs in the
4024 // heap. Note that it is not necessary to iterate over code objects
4025 // on scavenge collections.
4026 if (mode != VISIT_ALL_IN_SCAVENGE) {
4027 Builtins::IterateBuiltins(v);
4028 }
Steve Blockd0582a62009-12-15 09:54:21 +00004029 v->Synchronize("builtins");
Steve Blocka7e24c12009-10-30 11:49:00 +00004030
4031 // Iterate over global handles.
Steve Blockd0582a62009-12-15 09:54:21 +00004032 if (mode == VISIT_ONLY_STRONG) {
4033 GlobalHandles::IterateStrongRoots(v);
4034 } else {
4035 GlobalHandles::IterateAllRoots(v);
4036 }
4037 v->Synchronize("globalhandles");
Steve Blocka7e24c12009-10-30 11:49:00 +00004038
4039 // Iterate over pointers being held by inactive threads.
4040 ThreadManager::Iterate(v);
Steve Blockd0582a62009-12-15 09:54:21 +00004041 v->Synchronize("threadmanager");
Leon Clarked91b9f72010-01-27 17:25:45 +00004042
4043 // Iterate over the pointers the Serialization/Deserialization code is
4044 // holding.
4045 // During garbage collection this keeps the partial snapshot cache alive.
4046 // During deserialization of the startup snapshot this creates the partial
4047 // snapshot cache and deserializes the objects it refers to. During
4048 // serialization this does nothing, since the partial snapshot cache is
4049 // empty. However the next thing we do is create the partial snapshot,
4050 // filling up the partial snapshot cache with objects it needs as we go.
4051 SerializerDeserializer::Iterate(v);
4052 // We don't do a v->Synchronize call here, because in debug mode that will
4053 // output a flag to the snapshot. However at this point the serializer and
4054 // deserializer are deliberately a little unsynchronized (see above) so the
4055 // checking of the sync flag in the snapshot would fail.
Steve Blocka7e24c12009-10-30 11:49:00 +00004056}
Steve Blocka7e24c12009-10-30 11:49:00 +00004057
4058
4059// Flag is set when the heap has been configured. The heap can be repeatedly
4060// configured through the API until it is setup.
4061static bool heap_configured = false;
4062
4063// TODO(1236194): Since the heap size is configurable on the command line
4064// and through the API, we should gracefully handle the case that the heap
4065// size is not big enough to fit all the initial objects.
Steve Block3ce2e202009-11-05 08:53:23 +00004066bool Heap::ConfigureHeap(int max_semispace_size, int max_old_gen_size) {
Steve Blocka7e24c12009-10-30 11:49:00 +00004067 if (HasBeenSetup()) return false;
4068
Steve Block3ce2e202009-11-05 08:53:23 +00004069 if (max_semispace_size > 0) max_semispace_size_ = max_semispace_size;
4070
4071 if (Snapshot::IsEnabled()) {
4072 // If we are using a snapshot we always reserve the default amount
4073 // of memory for each semispace because code in the snapshot has
4074 // write-barrier code that relies on the size and alignment of new
4075 // space. We therefore cannot use a larger max semispace size
4076 // than the default reserved semispace size.
4077 if (max_semispace_size_ > reserved_semispace_size_) {
4078 max_semispace_size_ = reserved_semispace_size_;
4079 }
4080 } else {
4081 // If we are not using snapshots we reserve space for the actual
4082 // max semispace size.
4083 reserved_semispace_size_ = max_semispace_size_;
4084 }
4085
4086 if (max_old_gen_size > 0) max_old_generation_size_ = max_old_gen_size;
Steve Blocka7e24c12009-10-30 11:49:00 +00004087
4088 // The new space size must be a power of two to support single-bit testing
4089 // for containment.
Steve Block3ce2e202009-11-05 08:53:23 +00004090 max_semispace_size_ = RoundUpToPowerOf2(max_semispace_size_);
4091 reserved_semispace_size_ = RoundUpToPowerOf2(reserved_semispace_size_);
4092 initial_semispace_size_ = Min(initial_semispace_size_, max_semispace_size_);
4093 external_allocation_limit_ = 10 * max_semispace_size_;
Steve Blocka7e24c12009-10-30 11:49:00 +00004094
4095 // The old generation is paged.
Steve Block3ce2e202009-11-05 08:53:23 +00004096 max_old_generation_size_ = RoundUp(max_old_generation_size_, Page::kPageSize);
Steve Blocka7e24c12009-10-30 11:49:00 +00004097
4098 heap_configured = true;
4099 return true;
4100}
4101
4102
4103bool Heap::ConfigureHeapDefault() {
Steve Block3ce2e202009-11-05 08:53:23 +00004104 return ConfigureHeap(FLAG_max_new_space_size / 2, FLAG_max_old_space_size);
Steve Blocka7e24c12009-10-30 11:49:00 +00004105}
4106
4107
Steve Blockd0582a62009-12-15 09:54:21 +00004108void Heap::RecordStats(HeapStats* stats) {
4109 *stats->start_marker = 0xDECADE00;
4110 *stats->end_marker = 0xDECADE01;
4111 *stats->new_space_size = new_space_.Size();
4112 *stats->new_space_capacity = new_space_.Capacity();
4113 *stats->old_pointer_space_size = old_pointer_space_->Size();
4114 *stats->old_pointer_space_capacity = old_pointer_space_->Capacity();
4115 *stats->old_data_space_size = old_data_space_->Size();
4116 *stats->old_data_space_capacity = old_data_space_->Capacity();
4117 *stats->code_space_size = code_space_->Size();
4118 *stats->code_space_capacity = code_space_->Capacity();
4119 *stats->map_space_size = map_space_->Size();
4120 *stats->map_space_capacity = map_space_->Capacity();
4121 *stats->cell_space_size = cell_space_->Size();
4122 *stats->cell_space_capacity = cell_space_->Capacity();
4123 *stats->lo_space_size = lo_space_->Size();
4124 GlobalHandles::RecordStats(stats);
4125}
4126
4127
Steve Blocka7e24c12009-10-30 11:49:00 +00004128int Heap::PromotedSpaceSize() {
4129 return old_pointer_space_->Size()
4130 + old_data_space_->Size()
4131 + code_space_->Size()
4132 + map_space_->Size()
4133 + cell_space_->Size()
4134 + lo_space_->Size();
4135}
4136
4137
4138int Heap::PromotedExternalMemorySize() {
4139 if (amount_of_external_allocated_memory_
4140 <= amount_of_external_allocated_memory_at_last_global_gc_) return 0;
4141 return amount_of_external_allocated_memory_
4142 - amount_of_external_allocated_memory_at_last_global_gc_;
4143}
4144
4145
4146bool Heap::Setup(bool create_heap_objects) {
4147 // Initialize heap spaces and initial maps and objects. Whenever something
4148 // goes wrong, just return false. The caller should check the results and
4149 // call Heap::TearDown() to release allocated memory.
4150 //
4151 // If the heap is not yet configured (eg, through the API), configure it.
4152 // Configuration is based on the flags new-space-size (really the semispace
4153 // size) and old-space-size if set or the initial values of semispace_size_
4154 // and old_generation_size_ otherwise.
4155 if (!heap_configured) {
4156 if (!ConfigureHeapDefault()) return false;
4157 }
4158
4159 // Setup memory allocator and reserve a chunk of memory for new
Steve Block3ce2e202009-11-05 08:53:23 +00004160 // space. The chunk is double the size of the requested reserved
4161 // new space size to ensure that we can find a pair of semispaces that
4162 // are contiguous and aligned to their size.
4163 if (!MemoryAllocator::Setup(MaxReserved())) return false;
Steve Blocka7e24c12009-10-30 11:49:00 +00004164 void* chunk =
Steve Block3ce2e202009-11-05 08:53:23 +00004165 MemoryAllocator::ReserveInitialChunk(4 * reserved_semispace_size_);
Steve Blocka7e24c12009-10-30 11:49:00 +00004166 if (chunk == NULL) return false;
4167
4168 // Align the pair of semispaces to their size, which must be a power
4169 // of 2.
Steve Blocka7e24c12009-10-30 11:49:00 +00004170 Address new_space_start =
Steve Block3ce2e202009-11-05 08:53:23 +00004171 RoundUp(reinterpret_cast<byte*>(chunk), 2 * reserved_semispace_size_);
4172 if (!new_space_.Setup(new_space_start, 2 * reserved_semispace_size_)) {
4173 return false;
4174 }
Steve Blocka7e24c12009-10-30 11:49:00 +00004175
4176 // Initialize old pointer space.
4177 old_pointer_space_ =
Steve Block3ce2e202009-11-05 08:53:23 +00004178 new OldSpace(max_old_generation_size_, OLD_POINTER_SPACE, NOT_EXECUTABLE);
Steve Blocka7e24c12009-10-30 11:49:00 +00004179 if (old_pointer_space_ == NULL) return false;
4180 if (!old_pointer_space_->Setup(NULL, 0)) return false;
4181
4182 // Initialize old data space.
4183 old_data_space_ =
Steve Block3ce2e202009-11-05 08:53:23 +00004184 new OldSpace(max_old_generation_size_, OLD_DATA_SPACE, NOT_EXECUTABLE);
Steve Blocka7e24c12009-10-30 11:49:00 +00004185 if (old_data_space_ == NULL) return false;
4186 if (!old_data_space_->Setup(NULL, 0)) return false;
4187
4188 // Initialize the code space, set its maximum capacity to the old
4189 // generation size. It needs executable memory.
4190 // On 64-bit platform(s), we put all code objects in a 2 GB range of
4191 // virtual address space, so that they can call each other with near calls.
4192 if (code_range_size_ > 0) {
4193 if (!CodeRange::Setup(code_range_size_)) {
4194 return false;
4195 }
4196 }
4197
4198 code_space_ =
Steve Block3ce2e202009-11-05 08:53:23 +00004199 new OldSpace(max_old_generation_size_, CODE_SPACE, EXECUTABLE);
Steve Blocka7e24c12009-10-30 11:49:00 +00004200 if (code_space_ == NULL) return false;
4201 if (!code_space_->Setup(NULL, 0)) return false;
4202
4203 // Initialize map space.
Leon Clarkee46be812010-01-19 14:06:41 +00004204 map_space_ = new MapSpace(FLAG_use_big_map_space
4205 ? max_old_generation_size_
Leon Clarked91b9f72010-01-27 17:25:45 +00004206 : MapSpace::kMaxMapPageIndex * Page::kPageSize,
4207 FLAG_max_map_space_pages,
Leon Clarkee46be812010-01-19 14:06:41 +00004208 MAP_SPACE);
Steve Blocka7e24c12009-10-30 11:49:00 +00004209 if (map_space_ == NULL) return false;
4210 if (!map_space_->Setup(NULL, 0)) return false;
4211
4212 // Initialize global property cell space.
Steve Block3ce2e202009-11-05 08:53:23 +00004213 cell_space_ = new CellSpace(max_old_generation_size_, CELL_SPACE);
Steve Blocka7e24c12009-10-30 11:49:00 +00004214 if (cell_space_ == NULL) return false;
4215 if (!cell_space_->Setup(NULL, 0)) return false;
4216
4217 // The large object code space may contain code or data. We set the memory
4218 // to be non-executable here for safety, but this means we need to enable it
4219 // explicitly when allocating large code objects.
4220 lo_space_ = new LargeObjectSpace(LO_SPACE);
4221 if (lo_space_ == NULL) return false;
4222 if (!lo_space_->Setup()) return false;
4223
4224 if (create_heap_objects) {
4225 // Create initial maps.
4226 if (!CreateInitialMaps()) return false;
4227 if (!CreateApiObjects()) return false;
4228
4229 // Create initial objects
4230 if (!CreateInitialObjects()) return false;
4231 }
4232
4233 LOG(IntEvent("heap-capacity", Capacity()));
4234 LOG(IntEvent("heap-available", Available()));
4235
Steve Block3ce2e202009-11-05 08:53:23 +00004236#ifdef ENABLE_LOGGING_AND_PROFILING
4237 // This should be called only after initial objects have been created.
4238 ProducerHeapProfile::Setup();
4239#endif
4240
Steve Blocka7e24c12009-10-30 11:49:00 +00004241 return true;
4242}
4243
4244
Steve Blockd0582a62009-12-15 09:54:21 +00004245void Heap::SetStackLimits() {
Steve Blocka7e24c12009-10-30 11:49:00 +00004246 // On 64 bit machines, pointers are generally out of range of Smis. We write
4247 // something that looks like an out of range Smi to the GC.
4248
Steve Blockd0582a62009-12-15 09:54:21 +00004249 // Set up the special root array entries containing the stack limits.
4250 // These are actually addresses, but the tag makes the GC ignore it.
Steve Blocka7e24c12009-10-30 11:49:00 +00004251 roots_[kStackLimitRootIndex] =
Steve Blockd0582a62009-12-15 09:54:21 +00004252 reinterpret_cast<Object*>(
4253 (StackGuard::jslimit() & ~kSmiTagMask) | kSmiTag);
4254 roots_[kRealStackLimitRootIndex] =
4255 reinterpret_cast<Object*>(
4256 (StackGuard::real_jslimit() & ~kSmiTagMask) | kSmiTag);
Steve Blocka7e24c12009-10-30 11:49:00 +00004257}
4258
4259
4260void Heap::TearDown() {
Leon Clarkef7060e22010-06-03 12:02:55 +01004261 if (FLAG_print_cumulative_gc_stat) {
4262 PrintF("\n\n");
4263 PrintF("gc_count=%d ", gc_count_);
4264 PrintF("mark_sweep_count=%d ", ms_count_);
4265 PrintF("mark_compact_count=%d ", mc_count_);
4266 PrintF("max_gc_pause=%d ", GCTracer::get_max_gc_pause());
4267 PrintF("min_in_mutator=%d ", GCTracer::get_min_in_mutator());
4268 PrintF("max_alive_after_gc=%d ", GCTracer::get_max_alive_after_gc());
4269 PrintF("\n\n");
4270 }
4271
Steve Blocka7e24c12009-10-30 11:49:00 +00004272 GlobalHandles::TearDown();
4273
Leon Clarkee46be812010-01-19 14:06:41 +00004274 ExternalStringTable::TearDown();
4275
Steve Blocka7e24c12009-10-30 11:49:00 +00004276 new_space_.TearDown();
4277
4278 if (old_pointer_space_ != NULL) {
4279 old_pointer_space_->TearDown();
4280 delete old_pointer_space_;
4281 old_pointer_space_ = NULL;
4282 }
4283
4284 if (old_data_space_ != NULL) {
4285 old_data_space_->TearDown();
4286 delete old_data_space_;
4287 old_data_space_ = NULL;
4288 }
4289
4290 if (code_space_ != NULL) {
4291 code_space_->TearDown();
4292 delete code_space_;
4293 code_space_ = NULL;
4294 }
4295
4296 if (map_space_ != NULL) {
4297 map_space_->TearDown();
4298 delete map_space_;
4299 map_space_ = NULL;
4300 }
4301
4302 if (cell_space_ != NULL) {
4303 cell_space_->TearDown();
4304 delete cell_space_;
4305 cell_space_ = NULL;
4306 }
4307
4308 if (lo_space_ != NULL) {
4309 lo_space_->TearDown();
4310 delete lo_space_;
4311 lo_space_ = NULL;
4312 }
4313
4314 MemoryAllocator::TearDown();
4315}
4316
4317
4318void Heap::Shrink() {
4319 // Try to shrink all paged spaces.
4320 PagedSpaces spaces;
Leon Clarked91b9f72010-01-27 17:25:45 +00004321 for (PagedSpace* space = spaces.next(); space != NULL; space = spaces.next())
4322 space->Shrink();
Steve Blocka7e24c12009-10-30 11:49:00 +00004323}
4324
4325
4326#ifdef ENABLE_HEAP_PROTECTION
4327
4328void Heap::Protect() {
4329 if (HasBeenSetup()) {
4330 AllSpaces spaces;
Leon Clarked91b9f72010-01-27 17:25:45 +00004331 for (Space* space = spaces.next(); space != NULL; space = spaces.next())
4332 space->Protect();
Steve Blocka7e24c12009-10-30 11:49:00 +00004333 }
4334}
4335
4336
4337void Heap::Unprotect() {
4338 if (HasBeenSetup()) {
4339 AllSpaces spaces;
Leon Clarked91b9f72010-01-27 17:25:45 +00004340 for (Space* space = spaces.next(); space != NULL; space = spaces.next())
4341 space->Unprotect();
Steve Blocka7e24c12009-10-30 11:49:00 +00004342 }
4343}
4344
4345#endif
4346
4347
Steve Block6ded16b2010-05-10 14:33:55 +01004348void Heap::AddGCPrologueCallback(GCPrologueCallback callback, GCType gc_type) {
4349 ASSERT(callback != NULL);
4350 GCPrologueCallbackPair pair(callback, gc_type);
4351 ASSERT(!gc_prologue_callbacks_.Contains(pair));
4352 return gc_prologue_callbacks_.Add(pair);
4353}
4354
4355
4356void Heap::RemoveGCPrologueCallback(GCPrologueCallback callback) {
4357 ASSERT(callback != NULL);
4358 for (int i = 0; i < gc_prologue_callbacks_.length(); ++i) {
4359 if (gc_prologue_callbacks_[i].callback == callback) {
4360 gc_prologue_callbacks_.Remove(i);
4361 return;
4362 }
4363 }
4364 UNREACHABLE();
4365}
4366
4367
4368void Heap::AddGCEpilogueCallback(GCEpilogueCallback callback, GCType gc_type) {
4369 ASSERT(callback != NULL);
4370 GCEpilogueCallbackPair pair(callback, gc_type);
4371 ASSERT(!gc_epilogue_callbacks_.Contains(pair));
4372 return gc_epilogue_callbacks_.Add(pair);
4373}
4374
4375
4376void Heap::RemoveGCEpilogueCallback(GCEpilogueCallback callback) {
4377 ASSERT(callback != NULL);
4378 for (int i = 0; i < gc_epilogue_callbacks_.length(); ++i) {
4379 if (gc_epilogue_callbacks_[i].callback == callback) {
4380 gc_epilogue_callbacks_.Remove(i);
4381 return;
4382 }
4383 }
4384 UNREACHABLE();
4385}
4386
4387
Steve Blocka7e24c12009-10-30 11:49:00 +00004388#ifdef DEBUG
4389
4390class PrintHandleVisitor: public ObjectVisitor {
4391 public:
4392 void VisitPointers(Object** start, Object** end) {
4393 for (Object** p = start; p < end; p++)
4394 PrintF(" handle %p to %p\n", p, *p);
4395 }
4396};
4397
4398void Heap::PrintHandles() {
4399 PrintF("Handles:\n");
4400 PrintHandleVisitor v;
4401 HandleScopeImplementer::Iterate(&v);
4402}
4403
4404#endif
4405
4406
4407Space* AllSpaces::next() {
4408 switch (counter_++) {
4409 case NEW_SPACE:
4410 return Heap::new_space();
4411 case OLD_POINTER_SPACE:
4412 return Heap::old_pointer_space();
4413 case OLD_DATA_SPACE:
4414 return Heap::old_data_space();
4415 case CODE_SPACE:
4416 return Heap::code_space();
4417 case MAP_SPACE:
4418 return Heap::map_space();
4419 case CELL_SPACE:
4420 return Heap::cell_space();
4421 case LO_SPACE:
4422 return Heap::lo_space();
4423 default:
4424 return NULL;
4425 }
4426}
4427
4428
4429PagedSpace* PagedSpaces::next() {
4430 switch (counter_++) {
4431 case OLD_POINTER_SPACE:
4432 return Heap::old_pointer_space();
4433 case OLD_DATA_SPACE:
4434 return Heap::old_data_space();
4435 case CODE_SPACE:
4436 return Heap::code_space();
4437 case MAP_SPACE:
4438 return Heap::map_space();
4439 case CELL_SPACE:
4440 return Heap::cell_space();
4441 default:
4442 return NULL;
4443 }
4444}
4445
4446
4447
4448OldSpace* OldSpaces::next() {
4449 switch (counter_++) {
4450 case OLD_POINTER_SPACE:
4451 return Heap::old_pointer_space();
4452 case OLD_DATA_SPACE:
4453 return Heap::old_data_space();
4454 case CODE_SPACE:
4455 return Heap::code_space();
4456 default:
4457 return NULL;
4458 }
4459}
4460
4461
4462SpaceIterator::SpaceIterator() : current_space_(FIRST_SPACE), iterator_(NULL) {
4463}
4464
4465
4466SpaceIterator::~SpaceIterator() {
4467 // Delete active iterator if any.
4468 delete iterator_;
4469}
4470
4471
4472bool SpaceIterator::has_next() {
4473 // Iterate until no more spaces.
4474 return current_space_ != LAST_SPACE;
4475}
4476
4477
4478ObjectIterator* SpaceIterator::next() {
4479 if (iterator_ != NULL) {
4480 delete iterator_;
4481 iterator_ = NULL;
4482 // Move to the next space
4483 current_space_++;
4484 if (current_space_ > LAST_SPACE) {
4485 return NULL;
4486 }
4487 }
4488
4489 // Return iterator for the new current space.
4490 return CreateIterator();
4491}
4492
4493
4494// Create an iterator for the space to iterate.
4495ObjectIterator* SpaceIterator::CreateIterator() {
4496 ASSERT(iterator_ == NULL);
4497
4498 switch (current_space_) {
4499 case NEW_SPACE:
4500 iterator_ = new SemiSpaceIterator(Heap::new_space());
4501 break;
4502 case OLD_POINTER_SPACE:
4503 iterator_ = new HeapObjectIterator(Heap::old_pointer_space());
4504 break;
4505 case OLD_DATA_SPACE:
4506 iterator_ = new HeapObjectIterator(Heap::old_data_space());
4507 break;
4508 case CODE_SPACE:
4509 iterator_ = new HeapObjectIterator(Heap::code_space());
4510 break;
4511 case MAP_SPACE:
4512 iterator_ = new HeapObjectIterator(Heap::map_space());
4513 break;
4514 case CELL_SPACE:
4515 iterator_ = new HeapObjectIterator(Heap::cell_space());
4516 break;
4517 case LO_SPACE:
4518 iterator_ = new LargeObjectIterator(Heap::lo_space());
4519 break;
4520 }
4521
4522 // Return the newly allocated iterator;
4523 ASSERT(iterator_ != NULL);
4524 return iterator_;
4525}
4526
4527
4528HeapIterator::HeapIterator() {
4529 Init();
4530}
4531
4532
4533HeapIterator::~HeapIterator() {
4534 Shutdown();
4535}
4536
4537
4538void HeapIterator::Init() {
4539 // Start the iteration.
4540 space_iterator_ = new SpaceIterator();
4541 object_iterator_ = space_iterator_->next();
4542}
4543
4544
4545void HeapIterator::Shutdown() {
4546 // Make sure the last iterator is deallocated.
4547 delete space_iterator_;
4548 space_iterator_ = NULL;
4549 object_iterator_ = NULL;
4550}
4551
4552
Leon Clarked91b9f72010-01-27 17:25:45 +00004553HeapObject* HeapIterator::next() {
Steve Blocka7e24c12009-10-30 11:49:00 +00004554 // No iterator means we are done.
Leon Clarked91b9f72010-01-27 17:25:45 +00004555 if (object_iterator_ == NULL) return NULL;
Steve Blocka7e24c12009-10-30 11:49:00 +00004556
Leon Clarked91b9f72010-01-27 17:25:45 +00004557 if (HeapObject* obj = object_iterator_->next_object()) {
Steve Blocka7e24c12009-10-30 11:49:00 +00004558 // If the current iterator has more objects we are fine.
Leon Clarked91b9f72010-01-27 17:25:45 +00004559 return obj;
Steve Blocka7e24c12009-10-30 11:49:00 +00004560 } else {
4561 // Go though the spaces looking for one that has objects.
4562 while (space_iterator_->has_next()) {
4563 object_iterator_ = space_iterator_->next();
Leon Clarked91b9f72010-01-27 17:25:45 +00004564 if (HeapObject* obj = object_iterator_->next_object()) {
4565 return obj;
Steve Blocka7e24c12009-10-30 11:49:00 +00004566 }
4567 }
4568 }
4569 // Done with the last space.
4570 object_iterator_ = NULL;
Leon Clarked91b9f72010-01-27 17:25:45 +00004571 return NULL;
Steve Blocka7e24c12009-10-30 11:49:00 +00004572}
4573
4574
4575void HeapIterator::reset() {
4576 // Restart the iterator.
4577 Shutdown();
4578 Init();
4579}
4580
4581
4582#ifdef DEBUG
4583
4584static bool search_for_any_global;
4585static Object* search_target;
4586static bool found_target;
4587static List<Object*> object_stack(20);
4588
4589
4590// Tags 0, 1, and 3 are used. Use 2 for marking visited HeapObject.
4591static const int kMarkTag = 2;
4592
4593static void MarkObjectRecursively(Object** p);
4594class MarkObjectVisitor : public ObjectVisitor {
4595 public:
4596 void VisitPointers(Object** start, Object** end) {
4597 // Copy all HeapObject pointers in [start, end)
4598 for (Object** p = start; p < end; p++) {
4599 if ((*p)->IsHeapObject())
4600 MarkObjectRecursively(p);
4601 }
4602 }
4603};
4604
4605static MarkObjectVisitor mark_visitor;
4606
4607static void MarkObjectRecursively(Object** p) {
4608 if (!(*p)->IsHeapObject()) return;
4609
4610 HeapObject* obj = HeapObject::cast(*p);
4611
4612 Object* map = obj->map();
4613
4614 if (!map->IsHeapObject()) return; // visited before
4615
4616 if (found_target) return; // stop if target found
4617 object_stack.Add(obj);
4618 if ((search_for_any_global && obj->IsJSGlobalObject()) ||
4619 (!search_for_any_global && (obj == search_target))) {
4620 found_target = true;
4621 return;
4622 }
4623
4624 // not visited yet
4625 Map* map_p = reinterpret_cast<Map*>(HeapObject::cast(map));
4626
4627 Address map_addr = map_p->address();
4628
4629 obj->set_map(reinterpret_cast<Map*>(map_addr + kMarkTag));
4630
4631 MarkObjectRecursively(&map);
4632
4633 obj->IterateBody(map_p->instance_type(), obj->SizeFromMap(map_p),
4634 &mark_visitor);
4635
4636 if (!found_target) // don't pop if found the target
4637 object_stack.RemoveLast();
4638}
4639
4640
4641static void UnmarkObjectRecursively(Object** p);
4642class UnmarkObjectVisitor : public ObjectVisitor {
4643 public:
4644 void VisitPointers(Object** start, Object** end) {
4645 // Copy all HeapObject pointers in [start, end)
4646 for (Object** p = start; p < end; p++) {
4647 if ((*p)->IsHeapObject())
4648 UnmarkObjectRecursively(p);
4649 }
4650 }
4651};
4652
4653static UnmarkObjectVisitor unmark_visitor;
4654
4655static void UnmarkObjectRecursively(Object** p) {
4656 if (!(*p)->IsHeapObject()) return;
4657
4658 HeapObject* obj = HeapObject::cast(*p);
4659
4660 Object* map = obj->map();
4661
4662 if (map->IsHeapObject()) return; // unmarked already
4663
4664 Address map_addr = reinterpret_cast<Address>(map);
4665
4666 map_addr -= kMarkTag;
4667
4668 ASSERT_TAG_ALIGNED(map_addr);
4669
4670 HeapObject* map_p = HeapObject::FromAddress(map_addr);
4671
4672 obj->set_map(reinterpret_cast<Map*>(map_p));
4673
4674 UnmarkObjectRecursively(reinterpret_cast<Object**>(&map_p));
4675
4676 obj->IterateBody(Map::cast(map_p)->instance_type(),
4677 obj->SizeFromMap(Map::cast(map_p)),
4678 &unmark_visitor);
4679}
4680
4681
4682static void MarkRootObjectRecursively(Object** root) {
4683 if (search_for_any_global) {
4684 ASSERT(search_target == NULL);
4685 } else {
4686 ASSERT(search_target->IsHeapObject());
4687 }
4688 found_target = false;
4689 object_stack.Clear();
4690
4691 MarkObjectRecursively(root);
4692 UnmarkObjectRecursively(root);
4693
4694 if (found_target) {
4695 PrintF("=====================================\n");
4696 PrintF("==== Path to object ====\n");
4697 PrintF("=====================================\n\n");
4698
4699 ASSERT(!object_stack.is_empty());
4700 for (int i = 0; i < object_stack.length(); i++) {
4701 if (i > 0) PrintF("\n |\n |\n V\n\n");
4702 Object* obj = object_stack[i];
4703 obj->Print();
4704 }
4705 PrintF("=====================================\n");
4706 }
4707}
4708
4709
4710// Helper class for visiting HeapObjects recursively.
4711class MarkRootVisitor: public ObjectVisitor {
4712 public:
4713 void VisitPointers(Object** start, Object** end) {
4714 // Visit all HeapObject pointers in [start, end)
4715 for (Object** p = start; p < end; p++) {
4716 if ((*p)->IsHeapObject())
4717 MarkRootObjectRecursively(p);
4718 }
4719 }
4720};
4721
4722
4723// Triggers a depth-first traversal of reachable objects from roots
4724// and finds a path to a specific heap object and prints it.
Leon Clarkee46be812010-01-19 14:06:41 +00004725void Heap::TracePathToObject(Object* target) {
4726 search_target = target;
Steve Blocka7e24c12009-10-30 11:49:00 +00004727 search_for_any_global = false;
4728
4729 MarkRootVisitor root_visitor;
Steve Blockd0582a62009-12-15 09:54:21 +00004730 IterateRoots(&root_visitor, VISIT_ONLY_STRONG);
Steve Blocka7e24c12009-10-30 11:49:00 +00004731}
4732
4733
4734// Triggers a depth-first traversal of reachable objects from roots
4735// and finds a path to any global object and prints it. Useful for
4736// determining the source for leaks of global objects.
4737void Heap::TracePathToGlobal() {
4738 search_target = NULL;
4739 search_for_any_global = true;
4740
4741 MarkRootVisitor root_visitor;
Steve Blockd0582a62009-12-15 09:54:21 +00004742 IterateRoots(&root_visitor, VISIT_ONLY_STRONG);
Steve Blocka7e24c12009-10-30 11:49:00 +00004743}
4744#endif
4745
4746
Leon Clarkef7060e22010-06-03 12:02:55 +01004747static int CountTotalHolesSize() {
4748 int holes_size = 0;
4749 OldSpaces spaces;
4750 for (OldSpace* space = spaces.next();
4751 space != NULL;
4752 space = spaces.next()) {
4753 holes_size += space->Waste() + space->AvailableFree();
4754 }
4755 return holes_size;
4756}
4757
4758
Steve Blocka7e24c12009-10-30 11:49:00 +00004759GCTracer::GCTracer()
4760 : start_time_(0.0),
Leon Clarkef7060e22010-06-03 12:02:55 +01004761 start_size_(0),
Steve Blocka7e24c12009-10-30 11:49:00 +00004762 gc_count_(0),
4763 full_gc_count_(0),
4764 is_compacting_(false),
Leon Clarkef7060e22010-06-03 12:02:55 +01004765 marked_count_(0),
4766 allocated_since_last_gc_(0),
4767 spent_in_mutator_(0),
4768 promoted_objects_size_(0) {
Steve Blocka7e24c12009-10-30 11:49:00 +00004769 // These two fields reflect the state of the previous full collection.
4770 // Set them before they are changed by the collector.
4771 previous_has_compacted_ = MarkCompactCollector::HasCompacted();
4772 previous_marked_count_ = MarkCompactCollector::previous_marked_count();
Leon Clarkef7060e22010-06-03 12:02:55 +01004773 if (!FLAG_trace_gc && !FLAG_print_cumulative_gc_stat) return;
Steve Blocka7e24c12009-10-30 11:49:00 +00004774 start_time_ = OS::TimeCurrentMillis();
Leon Clarkef7060e22010-06-03 12:02:55 +01004775 start_size_ = Heap::SizeOfObjects();
4776
4777 for (int i = 0; i < Scope::kNumberOfScopes; i++) {
4778 scopes_[i] = 0;
4779 }
4780
4781 in_free_list_or_wasted_before_gc_ = CountTotalHolesSize();
4782
4783 allocated_since_last_gc_ = Heap::SizeOfObjects() - alive_after_last_gc_;
4784
4785 if (last_gc_end_timestamp_ > 0) {
4786 spent_in_mutator_ = Max(start_time_ - last_gc_end_timestamp_, 0.0);
4787 }
Steve Blocka7e24c12009-10-30 11:49:00 +00004788}
4789
4790
4791GCTracer::~GCTracer() {
Steve Blocka7e24c12009-10-30 11:49:00 +00004792 // Printf ONE line iff flag is set.
Leon Clarkef7060e22010-06-03 12:02:55 +01004793 if (!FLAG_trace_gc && !FLAG_print_cumulative_gc_stat) return;
4794
4795 bool first_gc = (last_gc_end_timestamp_ == 0);
4796
4797 alive_after_last_gc_ = Heap::SizeOfObjects();
4798 last_gc_end_timestamp_ = OS::TimeCurrentMillis();
4799
4800 int time = static_cast<int>(last_gc_end_timestamp_ - start_time_);
4801
4802 // Update cumulative GC statistics if required.
4803 if (FLAG_print_cumulative_gc_stat) {
4804 max_gc_pause_ = Max(max_gc_pause_, time);
4805 max_alive_after_gc_ = Max(max_alive_after_gc_, alive_after_last_gc_);
4806 if (!first_gc) {
4807 min_in_mutator_ = Min(min_in_mutator_,
4808 static_cast<int>(spent_in_mutator_));
4809 }
4810 }
4811
4812 if (!FLAG_trace_gc_nvp) {
4813 int external_time = static_cast<int>(scopes_[Scope::EXTERNAL]);
4814
4815 PrintF("%s %.1f -> %.1f MB, ",
4816 CollectorString(),
4817 static_cast<double>(start_size_) / MB,
4818 SizeOfHeapObjects());
4819
4820 if (external_time > 0) PrintF("%d / ", external_time);
4821 PrintF("%d ms.\n", time);
4822 } else {
4823 PrintF("pause=%d ", time);
4824 PrintF("mutator=%d ",
4825 static_cast<int>(spent_in_mutator_));
4826
4827 PrintF("gc=");
4828 switch (collector_) {
4829 case SCAVENGER:
4830 PrintF("s");
4831 break;
4832 case MARK_COMPACTOR:
4833 PrintF(MarkCompactCollector::HasCompacted() ? "mc" : "ms");
4834 break;
4835 default:
4836 UNREACHABLE();
4837 }
4838 PrintF(" ");
4839
4840 PrintF("external=%d ", static_cast<int>(scopes_[Scope::EXTERNAL]));
4841 PrintF("mark=%d ", static_cast<int>(scopes_[Scope::MC_MARK]));
4842 PrintF("sweep=%d ", static_cast<int>(scopes_[Scope::MC_SWEEP]));
4843 PrintF("compact=%d ", static_cast<int>(scopes_[Scope::MC_COMPACT]));
4844
4845 PrintF("total_size_before=%d ", start_size_);
4846 PrintF("total_size_after=%d ", Heap::SizeOfObjects());
4847 PrintF("holes_size_before=%d ", in_free_list_or_wasted_before_gc_);
4848 PrintF("holes_size_after=%d ", CountTotalHolesSize());
4849
4850 PrintF("allocated=%d ", allocated_since_last_gc_);
4851 PrintF("promoted=%d ", promoted_objects_size_);
4852
4853 PrintF("\n");
4854 }
Steve Blocka7e24c12009-10-30 11:49:00 +00004855
4856#if defined(ENABLE_LOGGING_AND_PROFILING)
4857 Heap::PrintShortHeapStatistics();
4858#endif
4859}
4860
4861
4862const char* GCTracer::CollectorString() {
4863 switch (collector_) {
4864 case SCAVENGER:
4865 return "Scavenge";
4866 case MARK_COMPACTOR:
4867 return MarkCompactCollector::HasCompacted() ? "Mark-compact"
4868 : "Mark-sweep";
4869 }
4870 return "Unknown GC";
4871}
4872
4873
4874int KeyedLookupCache::Hash(Map* map, String* name) {
4875 // Uses only lower 32 bits if pointers are larger.
4876 uintptr_t addr_hash =
Leon Clarkee46be812010-01-19 14:06:41 +00004877 static_cast<uint32_t>(reinterpret_cast<uintptr_t>(map)) >> kMapHashShift;
Andrei Popescu402d9372010-02-26 13:31:12 +00004878 return static_cast<uint32_t>((addr_hash ^ name->Hash()) & kCapacityMask);
Steve Blocka7e24c12009-10-30 11:49:00 +00004879}
4880
4881
4882int KeyedLookupCache::Lookup(Map* map, String* name) {
4883 int index = Hash(map, name);
4884 Key& key = keys_[index];
4885 if ((key.map == map) && key.name->Equals(name)) {
4886 return field_offsets_[index];
4887 }
4888 return -1;
4889}
4890
4891
4892void KeyedLookupCache::Update(Map* map, String* name, int field_offset) {
4893 String* symbol;
4894 if (Heap::LookupSymbolIfExists(name, &symbol)) {
4895 int index = Hash(map, symbol);
4896 Key& key = keys_[index];
4897 key.map = map;
4898 key.name = symbol;
4899 field_offsets_[index] = field_offset;
4900 }
4901}
4902
4903
4904void KeyedLookupCache::Clear() {
4905 for (int index = 0; index < kLength; index++) keys_[index].map = NULL;
4906}
4907
4908
4909KeyedLookupCache::Key KeyedLookupCache::keys_[KeyedLookupCache::kLength];
4910
4911
4912int KeyedLookupCache::field_offsets_[KeyedLookupCache::kLength];
4913
4914
4915void DescriptorLookupCache::Clear() {
4916 for (int index = 0; index < kLength; index++) keys_[index].array = NULL;
4917}
4918
4919
4920DescriptorLookupCache::Key
4921DescriptorLookupCache::keys_[DescriptorLookupCache::kLength];
4922
4923int DescriptorLookupCache::results_[DescriptorLookupCache::kLength];
4924
4925
4926#ifdef DEBUG
4927bool Heap::GarbageCollectionGreedyCheck() {
4928 ASSERT(FLAG_gc_greedy);
4929 if (Bootstrapper::IsActive()) return true;
4930 if (disallow_allocation_failure()) return true;
4931 return CollectGarbage(0, NEW_SPACE);
4932}
4933#endif
4934
4935
4936TranscendentalCache::TranscendentalCache(TranscendentalCache::Type t)
4937 : type_(t) {
4938 uint32_t in0 = 0xffffffffu; // Bit-pattern for a NaN that isn't
4939 uint32_t in1 = 0xffffffffu; // generated by the FPU.
4940 for (int i = 0; i < kCacheSize; i++) {
4941 elements_[i].in[0] = in0;
4942 elements_[i].in[1] = in1;
4943 elements_[i].output = NULL;
4944 }
4945}
4946
4947
4948TranscendentalCache* TranscendentalCache::caches_[kNumberOfCaches];
4949
4950
4951void TranscendentalCache::Clear() {
4952 for (int i = 0; i < kNumberOfCaches; i++) {
4953 if (caches_[i] != NULL) {
4954 delete caches_[i];
4955 caches_[i] = NULL;
4956 }
4957 }
4958}
4959
4960
Leon Clarkee46be812010-01-19 14:06:41 +00004961void ExternalStringTable::CleanUp() {
4962 int last = 0;
4963 for (int i = 0; i < new_space_strings_.length(); ++i) {
4964 if (new_space_strings_[i] == Heap::raw_unchecked_null_value()) continue;
4965 if (Heap::InNewSpace(new_space_strings_[i])) {
4966 new_space_strings_[last++] = new_space_strings_[i];
4967 } else {
4968 old_space_strings_.Add(new_space_strings_[i]);
4969 }
4970 }
4971 new_space_strings_.Rewind(last);
4972 last = 0;
4973 for (int i = 0; i < old_space_strings_.length(); ++i) {
4974 if (old_space_strings_[i] == Heap::raw_unchecked_null_value()) continue;
4975 ASSERT(!Heap::InNewSpace(old_space_strings_[i]));
4976 old_space_strings_[last++] = old_space_strings_[i];
4977 }
4978 old_space_strings_.Rewind(last);
4979 Verify();
4980}
4981
4982
4983void ExternalStringTable::TearDown() {
4984 new_space_strings_.Free();
4985 old_space_strings_.Free();
4986}
4987
4988
4989List<Object*> ExternalStringTable::new_space_strings_;
4990List<Object*> ExternalStringTable::old_space_strings_;
4991
Steve Blocka7e24c12009-10-30 11:49:00 +00004992} } // namespace v8::internal