blob: c4d0439e0d0b5dc940dca636b49635d7929757c7 [file] [log] [blame]
Steve Blocka7e24c12009-10-30 11:49:00 +00001// Copyright 2009 the V8 project authors. All rights reserved.
2// Redistribution and use in source and binary forms, with or without
3// modification, are permitted provided that the following conditions are
4// met:
5//
6// * Redistributions of source code must retain the above copyright
7// notice, this list of conditions and the following disclaimer.
8// * Redistributions in binary form must reproduce the above
9// copyright notice, this list of conditions and the following
10// disclaimer in the documentation and/or other materials provided
11// with the distribution.
12// * Neither the name of Google Inc. nor the names of its
13// contributors may be used to endorse or promote products derived
14// from this software without specific prior written permission.
15//
16// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
28#include "v8.h"
29
30#include "accessors.h"
31#include "api.h"
32#include "bootstrapper.h"
33#include "codegen-inl.h"
34#include "compilation-cache.h"
35#include "debug.h"
36#include "heap-profiler.h"
37#include "global-handles.h"
38#include "mark-compact.h"
39#include "natives.h"
40#include "scanner.h"
41#include "scopeinfo.h"
Steve Block3ce2e202009-11-05 08:53:23 +000042#include "snapshot.h"
Steve Blocka7e24c12009-10-30 11:49:00 +000043#include "v8threads.h"
Steve Block6ded16b2010-05-10 14:33:55 +010044#if V8_TARGET_ARCH_ARM && !V8_INTERPRETED_REGEXP
Steve Blocka7e24c12009-10-30 11:49:00 +000045#include "regexp-macro-assembler.h"
Steve Blockd0582a62009-12-15 09:54:21 +000046#include "arm/regexp-macro-assembler-arm.h"
Steve Blocka7e24c12009-10-30 11:49:00 +000047#endif
48
Steve Block6ded16b2010-05-10 14:33:55 +010049
Steve Blocka7e24c12009-10-30 11:49:00 +000050namespace v8 {
51namespace internal {
52
53
54String* Heap::hidden_symbol_;
55Object* Heap::roots_[Heap::kRootListLength];
56
57
58NewSpace Heap::new_space_;
59OldSpace* Heap::old_pointer_space_ = NULL;
60OldSpace* Heap::old_data_space_ = NULL;
61OldSpace* Heap::code_space_ = NULL;
62MapSpace* Heap::map_space_ = NULL;
63CellSpace* Heap::cell_space_ = NULL;
64LargeObjectSpace* Heap::lo_space_ = NULL;
65
66static const int kMinimumPromotionLimit = 2*MB;
67static const int kMinimumAllocationLimit = 8*MB;
68
69int Heap::old_gen_promotion_limit_ = kMinimumPromotionLimit;
70int Heap::old_gen_allocation_limit_ = kMinimumAllocationLimit;
71
72int Heap::old_gen_exhausted_ = false;
73
74int Heap::amount_of_external_allocated_memory_ = 0;
75int Heap::amount_of_external_allocated_memory_at_last_global_gc_ = 0;
76
77// semispace_size_ should be a power of 2 and old_generation_size_ should be
78// a multiple of Page::kPageSize.
79#if defined(ANDROID)
Leon Clarked91b9f72010-01-27 17:25:45 +000080int Heap::max_semispace_size_ = 2*MB;
81int Heap::max_old_generation_size_ = 192*MB;
Steve Blocka7e24c12009-10-30 11:49:00 +000082int Heap::initial_semispace_size_ = 128*KB;
83size_t Heap::code_range_size_ = 0;
84#elif defined(V8_TARGET_ARCH_X64)
Steve Block3ce2e202009-11-05 08:53:23 +000085int Heap::max_semispace_size_ = 16*MB;
86int Heap::max_old_generation_size_ = 1*GB;
Steve Blocka7e24c12009-10-30 11:49:00 +000087int Heap::initial_semispace_size_ = 1*MB;
Steve Block3ce2e202009-11-05 08:53:23 +000088size_t Heap::code_range_size_ = 512*MB;
Steve Blocka7e24c12009-10-30 11:49:00 +000089#else
Steve Block3ce2e202009-11-05 08:53:23 +000090int Heap::max_semispace_size_ = 8*MB;
91int Heap::max_old_generation_size_ = 512*MB;
Steve Blocka7e24c12009-10-30 11:49:00 +000092int Heap::initial_semispace_size_ = 512*KB;
93size_t Heap::code_range_size_ = 0;
94#endif
95
Steve Block3ce2e202009-11-05 08:53:23 +000096// The snapshot semispace size will be the default semispace size if
97// snapshotting is used and will be the requested semispace size as
98// set up by ConfigureHeap otherwise.
99int Heap::reserved_semispace_size_ = Heap::max_semispace_size_;
100
Steve Block6ded16b2010-05-10 14:33:55 +0100101List<Heap::GCPrologueCallbackPair> Heap::gc_prologue_callbacks_;
102List<Heap::GCEpilogueCallbackPair> Heap::gc_epilogue_callbacks_;
103
Steve Blocka7e24c12009-10-30 11:49:00 +0000104GCCallback Heap::global_gc_prologue_callback_ = NULL;
105GCCallback Heap::global_gc_epilogue_callback_ = NULL;
106
107// Variables set based on semispace_size_ and old_generation_size_ in
108// ConfigureHeap.
Steve Block3ce2e202009-11-05 08:53:23 +0000109
110// Will be 4 * reserved_semispace_size_ to ensure that young
111// generation can be aligned to its size.
Steve Blocka7e24c12009-10-30 11:49:00 +0000112int Heap::survived_since_last_expansion_ = 0;
113int Heap::external_allocation_limit_ = 0;
114
115Heap::HeapState Heap::gc_state_ = NOT_IN_GC;
116
117int Heap::mc_count_ = 0;
Leon Clarkef7060e22010-06-03 12:02:55 +0100118int Heap::ms_count_ = 0;
Steve Blocka7e24c12009-10-30 11:49:00 +0000119int Heap::gc_count_ = 0;
120
Leon Clarkef7060e22010-06-03 12:02:55 +0100121GCTracer* Heap::tracer_ = NULL;
122
Steve Block6ded16b2010-05-10 14:33:55 +0100123int Heap::unflattened_strings_length_ = 0;
124
Steve Blocka7e24c12009-10-30 11:49:00 +0000125int Heap::always_allocate_scope_depth_ = 0;
Steve Blockd0582a62009-12-15 09:54:21 +0000126int Heap::linear_allocation_scope_depth_ = 0;
Steve Block6ded16b2010-05-10 14:33:55 +0100127int Heap::contexts_disposed_ = 0;
Steve Blocka7e24c12009-10-30 11:49:00 +0000128
Steve Block8defd9f2010-07-08 12:39:36 +0100129int Heap::young_survivors_after_last_gc_ = 0;
130int Heap::high_survival_rate_period_length_ = 0;
131double Heap::survival_rate_ = 0;
132Heap::SurvivalRateTrend Heap::previous_survival_rate_trend_ = Heap::STABLE;
133Heap::SurvivalRateTrend Heap::survival_rate_trend_ = Heap::STABLE;
134
Steve Blocka7e24c12009-10-30 11:49:00 +0000135#ifdef DEBUG
136bool Heap::allocation_allowed_ = true;
137
138int Heap::allocation_timeout_ = 0;
139bool Heap::disallow_allocation_failure_ = false;
140#endif // DEBUG
141
Leon Clarkef7060e22010-06-03 12:02:55 +0100142int GCTracer::alive_after_last_gc_ = 0;
143double GCTracer::last_gc_end_timestamp_ = 0.0;
144int GCTracer::max_gc_pause_ = 0;
145int GCTracer::max_alive_after_gc_ = 0;
146int GCTracer::min_in_mutator_ = kMaxInt;
Steve Blocka7e24c12009-10-30 11:49:00 +0000147
148int Heap::Capacity() {
149 if (!HasBeenSetup()) return 0;
150
151 return new_space_.Capacity() +
152 old_pointer_space_->Capacity() +
153 old_data_space_->Capacity() +
154 code_space_->Capacity() +
155 map_space_->Capacity() +
156 cell_space_->Capacity();
157}
158
159
Steve Block3ce2e202009-11-05 08:53:23 +0000160int Heap::CommittedMemory() {
161 if (!HasBeenSetup()) return 0;
162
163 return new_space_.CommittedMemory() +
164 old_pointer_space_->CommittedMemory() +
165 old_data_space_->CommittedMemory() +
166 code_space_->CommittedMemory() +
167 map_space_->CommittedMemory() +
168 cell_space_->CommittedMemory() +
169 lo_space_->Size();
170}
171
172
Steve Blocka7e24c12009-10-30 11:49:00 +0000173int Heap::Available() {
174 if (!HasBeenSetup()) return 0;
175
176 return new_space_.Available() +
177 old_pointer_space_->Available() +
178 old_data_space_->Available() +
179 code_space_->Available() +
180 map_space_->Available() +
181 cell_space_->Available();
182}
183
184
185bool Heap::HasBeenSetup() {
186 return old_pointer_space_ != NULL &&
187 old_data_space_ != NULL &&
188 code_space_ != NULL &&
189 map_space_ != NULL &&
190 cell_space_ != NULL &&
191 lo_space_ != NULL;
192}
193
194
195GarbageCollector Heap::SelectGarbageCollector(AllocationSpace space) {
196 // Is global GC requested?
197 if (space != NEW_SPACE || FLAG_gc_global) {
198 Counters::gc_compactor_caused_by_request.Increment();
199 return MARK_COMPACTOR;
200 }
201
202 // Is enough data promoted to justify a global GC?
203 if (OldGenerationPromotionLimitReached()) {
204 Counters::gc_compactor_caused_by_promoted_data.Increment();
205 return MARK_COMPACTOR;
206 }
207
208 // Have allocation in OLD and LO failed?
209 if (old_gen_exhausted_) {
210 Counters::gc_compactor_caused_by_oldspace_exhaustion.Increment();
211 return MARK_COMPACTOR;
212 }
213
214 // Is there enough space left in OLD to guarantee that a scavenge can
215 // succeed?
216 //
217 // Note that MemoryAllocator->MaxAvailable() undercounts the memory available
218 // for object promotion. It counts only the bytes that the memory
219 // allocator has not yet allocated from the OS and assigned to any space,
220 // and does not count available bytes already in the old space or code
221 // space. Undercounting is safe---we may get an unrequested full GC when
222 // a scavenge would have succeeded.
223 if (MemoryAllocator::MaxAvailable() <= new_space_.Size()) {
224 Counters::gc_compactor_caused_by_oldspace_exhaustion.Increment();
225 return MARK_COMPACTOR;
226 }
227
228 // Default
229 return SCAVENGER;
230}
231
232
233// TODO(1238405): Combine the infrastructure for --heap-stats and
234// --log-gc to avoid the complicated preprocessor and flag testing.
235#if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
236void Heap::ReportStatisticsBeforeGC() {
237 // Heap::ReportHeapStatistics will also log NewSpace statistics when
238 // compiled with ENABLE_LOGGING_AND_PROFILING and --log-gc is set. The
239 // following logic is used to avoid double logging.
240#if defined(DEBUG) && defined(ENABLE_LOGGING_AND_PROFILING)
241 if (FLAG_heap_stats || FLAG_log_gc) new_space_.CollectStatistics();
242 if (FLAG_heap_stats) {
243 ReportHeapStatistics("Before GC");
244 } else if (FLAG_log_gc) {
245 new_space_.ReportStatistics();
246 }
247 if (FLAG_heap_stats || FLAG_log_gc) new_space_.ClearHistograms();
248#elif defined(DEBUG)
249 if (FLAG_heap_stats) {
250 new_space_.CollectStatistics();
251 ReportHeapStatistics("Before GC");
252 new_space_.ClearHistograms();
253 }
254#elif defined(ENABLE_LOGGING_AND_PROFILING)
255 if (FLAG_log_gc) {
256 new_space_.CollectStatistics();
257 new_space_.ReportStatistics();
258 new_space_.ClearHistograms();
259 }
260#endif
261}
262
263
264#if defined(ENABLE_LOGGING_AND_PROFILING)
265void Heap::PrintShortHeapStatistics() {
266 if (!FLAG_trace_gc_verbose) return;
267 PrintF("Memory allocator, used: %8d, available: %8d\n",
Steve Block3ce2e202009-11-05 08:53:23 +0000268 MemoryAllocator::Size(),
269 MemoryAllocator::Available());
Steve Blocka7e24c12009-10-30 11:49:00 +0000270 PrintF("New space, used: %8d, available: %8d\n",
Steve Block3ce2e202009-11-05 08:53:23 +0000271 Heap::new_space_.Size(),
272 new_space_.Available());
273 PrintF("Old pointers, used: %8d, available: %8d, waste: %8d\n",
274 old_pointer_space_->Size(),
275 old_pointer_space_->Available(),
276 old_pointer_space_->Waste());
277 PrintF("Old data space, used: %8d, available: %8d, waste: %8d\n",
278 old_data_space_->Size(),
279 old_data_space_->Available(),
280 old_data_space_->Waste());
281 PrintF("Code space, used: %8d, available: %8d, waste: %8d\n",
282 code_space_->Size(),
283 code_space_->Available(),
284 code_space_->Waste());
285 PrintF("Map space, used: %8d, available: %8d, waste: %8d\n",
286 map_space_->Size(),
287 map_space_->Available(),
288 map_space_->Waste());
289 PrintF("Cell space, used: %8d, available: %8d, waste: %8d\n",
290 cell_space_->Size(),
291 cell_space_->Available(),
292 cell_space_->Waste());
Steve Blocka7e24c12009-10-30 11:49:00 +0000293 PrintF("Large object space, used: %8d, avaialble: %8d\n",
Steve Block3ce2e202009-11-05 08:53:23 +0000294 lo_space_->Size(),
295 lo_space_->Available());
Steve Blocka7e24c12009-10-30 11:49:00 +0000296}
297#endif
298
299
300// TODO(1238405): Combine the infrastructure for --heap-stats and
301// --log-gc to avoid the complicated preprocessor and flag testing.
302void Heap::ReportStatisticsAfterGC() {
303 // Similar to the before GC, we use some complicated logic to ensure that
304 // NewSpace statistics are logged exactly once when --log-gc is turned on.
305#if defined(DEBUG) && defined(ENABLE_LOGGING_AND_PROFILING)
306 if (FLAG_heap_stats) {
307 new_space_.CollectStatistics();
308 ReportHeapStatistics("After GC");
309 } else if (FLAG_log_gc) {
310 new_space_.ReportStatistics();
311 }
312#elif defined(DEBUG)
313 if (FLAG_heap_stats) ReportHeapStatistics("After GC");
314#elif defined(ENABLE_LOGGING_AND_PROFILING)
315 if (FLAG_log_gc) new_space_.ReportStatistics();
316#endif
317}
318#endif // defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
319
320
321void Heap::GarbageCollectionPrologue() {
322 TranscendentalCache::Clear();
Steve Block6ded16b2010-05-10 14:33:55 +0100323 ClearJSFunctionResultCaches();
Steve Blocka7e24c12009-10-30 11:49:00 +0000324 gc_count_++;
Steve Block6ded16b2010-05-10 14:33:55 +0100325 unflattened_strings_length_ = 0;
Steve Blocka7e24c12009-10-30 11:49:00 +0000326#ifdef DEBUG
327 ASSERT(allocation_allowed_ && gc_state_ == NOT_IN_GC);
328 allow_allocation(false);
329
330 if (FLAG_verify_heap) {
331 Verify();
332 }
333
334 if (FLAG_gc_verbose) Print();
Steve Blocka7e24c12009-10-30 11:49:00 +0000335#endif
336
337#if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
338 ReportStatisticsBeforeGC();
339#endif
340}
341
342int Heap::SizeOfObjects() {
343 int total = 0;
344 AllSpaces spaces;
Leon Clarked91b9f72010-01-27 17:25:45 +0000345 for (Space* space = spaces.next(); space != NULL; space = spaces.next()) {
Steve Blocka7e24c12009-10-30 11:49:00 +0000346 total += space->Size();
347 }
348 return total;
349}
350
351void Heap::GarbageCollectionEpilogue() {
352#ifdef DEBUG
353 allow_allocation(true);
354 ZapFromSpace();
355
356 if (FLAG_verify_heap) {
357 Verify();
358 }
359
360 if (FLAG_print_global_handles) GlobalHandles::Print();
361 if (FLAG_print_handles) PrintHandles();
362 if (FLAG_gc_verbose) Print();
363 if (FLAG_code_stats) ReportCodeStatistics("After GC");
364#endif
365
366 Counters::alive_after_last_gc.Set(SizeOfObjects());
367
368 Counters::symbol_table_capacity.Set(symbol_table()->Capacity());
369 Counters::number_of_symbols.Set(symbol_table()->NumberOfElements());
370#if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
371 ReportStatisticsAfterGC();
372#endif
373#ifdef ENABLE_DEBUGGER_SUPPORT
374 Debug::AfterGarbageCollection();
375#endif
376}
377
378
379void Heap::CollectAllGarbage(bool force_compaction) {
380 // Since we are ignoring the return value, the exact choice of space does
381 // not matter, so long as we do not specify NEW_SPACE, which would not
382 // cause a full GC.
383 MarkCompactCollector::SetForceCompaction(force_compaction);
384 CollectGarbage(0, OLD_POINTER_SPACE);
385 MarkCompactCollector::SetForceCompaction(false);
386}
387
388
Steve Blocka7e24c12009-10-30 11:49:00 +0000389bool Heap::CollectGarbage(int requested_size, AllocationSpace space) {
390 // The VM is in the GC state until exiting this function.
391 VMState state(GC);
392
393#ifdef DEBUG
394 // Reset the allocation timeout to the GC interval, but make sure to
395 // allow at least a few allocations after a collection. The reason
396 // for this is that we have a lot of allocation sequences and we
397 // assume that a garbage collection will allow the subsequent
398 // allocation attempts to go through.
399 allocation_timeout_ = Max(6, FLAG_gc_interval);
400#endif
401
402 { GCTracer tracer;
403 GarbageCollectionPrologue();
404 // The GC count was incremented in the prologue. Tell the tracer about
405 // it.
406 tracer.set_gc_count(gc_count_);
407
408 GarbageCollector collector = SelectGarbageCollector(space);
409 // Tell the tracer which collector we've selected.
410 tracer.set_collector(collector);
411
412 HistogramTimer* rate = (collector == SCAVENGER)
413 ? &Counters::gc_scavenger
414 : &Counters::gc_compactor;
415 rate->Start();
416 PerformGarbageCollection(space, collector, &tracer);
417 rate->Stop();
418
419 GarbageCollectionEpilogue();
420 }
421
422
423#ifdef ENABLE_LOGGING_AND_PROFILING
424 if (FLAG_log_gc) HeapProfiler::WriteSample();
425#endif
426
427 switch (space) {
428 case NEW_SPACE:
429 return new_space_.Available() >= requested_size;
430 case OLD_POINTER_SPACE:
431 return old_pointer_space_->Available() >= requested_size;
432 case OLD_DATA_SPACE:
433 return old_data_space_->Available() >= requested_size;
434 case CODE_SPACE:
435 return code_space_->Available() >= requested_size;
436 case MAP_SPACE:
437 return map_space_->Available() >= requested_size;
438 case CELL_SPACE:
439 return cell_space_->Available() >= requested_size;
440 case LO_SPACE:
441 return lo_space_->Available() >= requested_size;
442 }
443 return false;
444}
445
446
447void Heap::PerformScavenge() {
448 GCTracer tracer;
449 PerformGarbageCollection(NEW_SPACE, SCAVENGER, &tracer);
450}
451
452
453#ifdef DEBUG
454// Helper class for verifying the symbol table.
455class SymbolTableVerifier : public ObjectVisitor {
456 public:
457 SymbolTableVerifier() { }
458 void VisitPointers(Object** start, Object** end) {
459 // Visit all HeapObject pointers in [start, end).
460 for (Object** p = start; p < end; p++) {
461 if ((*p)->IsHeapObject()) {
462 // Check that the symbol is actually a symbol.
463 ASSERT((*p)->IsNull() || (*p)->IsUndefined() || (*p)->IsSymbol());
464 }
465 }
466 }
467};
468#endif // DEBUG
469
470
471static void VerifySymbolTable() {
472#ifdef DEBUG
473 SymbolTableVerifier verifier;
474 Heap::symbol_table()->IterateElements(&verifier);
475#endif // DEBUG
476}
477
478
Leon Clarkee46be812010-01-19 14:06:41 +0000479void Heap::ReserveSpace(
480 int new_space_size,
481 int pointer_space_size,
482 int data_space_size,
483 int code_space_size,
484 int map_space_size,
485 int cell_space_size,
486 int large_object_size) {
487 NewSpace* new_space = Heap::new_space();
488 PagedSpace* old_pointer_space = Heap::old_pointer_space();
489 PagedSpace* old_data_space = Heap::old_data_space();
490 PagedSpace* code_space = Heap::code_space();
491 PagedSpace* map_space = Heap::map_space();
492 PagedSpace* cell_space = Heap::cell_space();
493 LargeObjectSpace* lo_space = Heap::lo_space();
494 bool gc_performed = true;
495 while (gc_performed) {
496 gc_performed = false;
497 if (!new_space->ReserveSpace(new_space_size)) {
498 Heap::CollectGarbage(new_space_size, NEW_SPACE);
499 gc_performed = true;
500 }
501 if (!old_pointer_space->ReserveSpace(pointer_space_size)) {
502 Heap::CollectGarbage(pointer_space_size, OLD_POINTER_SPACE);
503 gc_performed = true;
504 }
505 if (!(old_data_space->ReserveSpace(data_space_size))) {
506 Heap::CollectGarbage(data_space_size, OLD_DATA_SPACE);
507 gc_performed = true;
508 }
509 if (!(code_space->ReserveSpace(code_space_size))) {
510 Heap::CollectGarbage(code_space_size, CODE_SPACE);
511 gc_performed = true;
512 }
513 if (!(map_space->ReserveSpace(map_space_size))) {
514 Heap::CollectGarbage(map_space_size, MAP_SPACE);
515 gc_performed = true;
516 }
517 if (!(cell_space->ReserveSpace(cell_space_size))) {
518 Heap::CollectGarbage(cell_space_size, CELL_SPACE);
519 gc_performed = true;
520 }
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100521 // We add a slack-factor of 2 in order to have space for a series of
522 // large-object allocations that are only just larger than the page size.
Leon Clarkee46be812010-01-19 14:06:41 +0000523 large_object_size *= 2;
524 // The ReserveSpace method on the large object space checks how much
525 // we can expand the old generation. This includes expansion caused by
526 // allocation in the other spaces.
527 large_object_size += cell_space_size + map_space_size + code_space_size +
528 data_space_size + pointer_space_size;
529 if (!(lo_space->ReserveSpace(large_object_size))) {
530 Heap::CollectGarbage(large_object_size, LO_SPACE);
531 gc_performed = true;
532 }
533 }
534}
535
536
Steve Blocka7e24c12009-10-30 11:49:00 +0000537void Heap::EnsureFromSpaceIsCommitted() {
538 if (new_space_.CommitFromSpaceIfNeeded()) return;
539
540 // Committing memory to from space failed.
541 // Try shrinking and try again.
542 Shrink();
543 if (new_space_.CommitFromSpaceIfNeeded()) return;
544
545 // Committing memory to from space failed again.
546 // Memory is exhausted and we will die.
547 V8::FatalProcessOutOfMemory("Committing semi space failed.");
548}
549
550
Steve Block6ded16b2010-05-10 14:33:55 +0100551class ClearThreadJSFunctionResultCachesVisitor: public ThreadVisitor {
552 virtual void VisitThread(ThreadLocalTop* top) {
553 Context* context = top->context_;
554 if (context == NULL) return;
555
556 FixedArray* caches =
557 context->global()->global_context()->jsfunction_result_caches();
558 int length = caches->length();
559 for (int i = 0; i < length; i++) {
560 JSFunctionResultCache::cast(caches->get(i))->Clear();
561 }
562 }
563};
564
565
566void Heap::ClearJSFunctionResultCaches() {
567 if (Bootstrapper::IsActive()) return;
568 ClearThreadJSFunctionResultCachesVisitor visitor;
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100569 ThreadManager::IterateArchivedThreads(&visitor);
Steve Block6ded16b2010-05-10 14:33:55 +0100570}
571
572
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100573#ifdef DEBUG
574
575enum PageWatermarkValidity {
576 ALL_VALID,
577 ALL_INVALID
578};
579
580static void VerifyPageWatermarkValidity(PagedSpace* space,
581 PageWatermarkValidity validity) {
582 PageIterator it(space, PageIterator::PAGES_IN_USE);
583 bool expected_value = (validity == ALL_VALID);
584 while (it.has_next()) {
585 Page* page = it.next();
586 ASSERT(page->IsWatermarkValid() == expected_value);
587 }
588}
589#endif
590
Steve Block8defd9f2010-07-08 12:39:36 +0100591void Heap::UpdateSurvivalRateTrend(int start_new_space_size) {
592 double survival_rate =
593 (static_cast<double>(young_survivors_after_last_gc_) * 100) /
594 start_new_space_size;
595
596 if (survival_rate > kYoungSurvivalRateThreshold) {
597 high_survival_rate_period_length_++;
598 } else {
599 high_survival_rate_period_length_ = 0;
600 }
601
602 double survival_rate_diff = survival_rate_ - survival_rate;
603
604 if (survival_rate_diff > kYoungSurvivalRateAllowedDeviation) {
605 set_survival_rate_trend(DECREASING);
606 } else if (survival_rate_diff < -kYoungSurvivalRateAllowedDeviation) {
607 set_survival_rate_trend(INCREASING);
608 } else {
609 set_survival_rate_trend(STABLE);
610 }
611
612 survival_rate_ = survival_rate;
613}
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100614
Steve Blocka7e24c12009-10-30 11:49:00 +0000615void Heap::PerformGarbageCollection(AllocationSpace space,
616 GarbageCollector collector,
617 GCTracer* tracer) {
618 VerifySymbolTable();
619 if (collector == MARK_COMPACTOR && global_gc_prologue_callback_) {
620 ASSERT(!allocation_allowed_);
Leon Clarkef7060e22010-06-03 12:02:55 +0100621 GCTracer::Scope scope(tracer, GCTracer::Scope::EXTERNAL);
Steve Blocka7e24c12009-10-30 11:49:00 +0000622 global_gc_prologue_callback_();
623 }
Steve Block6ded16b2010-05-10 14:33:55 +0100624
625 GCType gc_type =
626 collector == MARK_COMPACTOR ? kGCTypeMarkSweepCompact : kGCTypeScavenge;
627
628 for (int i = 0; i < gc_prologue_callbacks_.length(); ++i) {
629 if (gc_type & gc_prologue_callbacks_[i].gc_type) {
630 gc_prologue_callbacks_[i].callback(gc_type, kNoGCCallbackFlags);
631 }
632 }
633
Steve Blocka7e24c12009-10-30 11:49:00 +0000634 EnsureFromSpaceIsCommitted();
Steve Block6ded16b2010-05-10 14:33:55 +0100635
Steve Block8defd9f2010-07-08 12:39:36 +0100636 int start_new_space_size = Heap::new_space()->Size();
637
Steve Blocka7e24c12009-10-30 11:49:00 +0000638 if (collector == MARK_COMPACTOR) {
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100639 if (FLAG_flush_code) {
640 // Flush all potentially unused code.
Kristian Monsen50ef84f2010-07-29 15:18:00 +0100641 GCTracer::Scope gc_scope(tracer, GCTracer::Scope::MC_FLUSH_CODE);
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100642 FlushCode();
643 }
644
Steve Block6ded16b2010-05-10 14:33:55 +0100645 // Perform mark-sweep with optional compaction.
Steve Blocka7e24c12009-10-30 11:49:00 +0000646 MarkCompact(tracer);
647
Steve Block8defd9f2010-07-08 12:39:36 +0100648 bool high_survival_rate_during_scavenges = IsHighSurvivalRate() &&
649 IsStableOrIncreasingSurvivalTrend();
650
651 UpdateSurvivalRateTrend(start_new_space_size);
652
Steve Blocka7e24c12009-10-30 11:49:00 +0000653 int old_gen_size = PromotedSpaceSize();
654 old_gen_promotion_limit_ =
655 old_gen_size + Max(kMinimumPromotionLimit, old_gen_size / 3);
656 old_gen_allocation_limit_ =
657 old_gen_size + Max(kMinimumAllocationLimit, old_gen_size / 2);
Steve Block8defd9f2010-07-08 12:39:36 +0100658
659 if (high_survival_rate_during_scavenges &&
660 IsStableOrIncreasingSurvivalTrend()) {
661 // Stable high survival rates of young objects both during partial and
662 // full collection indicate that mutator is either building or modifying
663 // a structure with a long lifetime.
664 // In this case we aggressively raise old generation memory limits to
665 // postpone subsequent mark-sweep collection and thus trade memory
666 // space for the mutation speed.
667 old_gen_promotion_limit_ *= 2;
668 old_gen_allocation_limit_ *= 2;
669 }
670
Steve Blocka7e24c12009-10-30 11:49:00 +0000671 old_gen_exhausted_ = false;
Steve Block6ded16b2010-05-10 14:33:55 +0100672 } else {
Leon Clarkef7060e22010-06-03 12:02:55 +0100673 tracer_ = tracer;
Steve Block6ded16b2010-05-10 14:33:55 +0100674 Scavenge();
Leon Clarkef7060e22010-06-03 12:02:55 +0100675 tracer_ = NULL;
Steve Block8defd9f2010-07-08 12:39:36 +0100676
677 UpdateSurvivalRateTrend(start_new_space_size);
Steve Blocka7e24c12009-10-30 11:49:00 +0000678 }
Steve Blocka7e24c12009-10-30 11:49:00 +0000679
680 Counters::objs_since_last_young.Set(0);
681
Steve Block3ce2e202009-11-05 08:53:23 +0000682 if (collector == MARK_COMPACTOR) {
683 DisableAssertNoAllocation allow_allocation;
Leon Clarkef7060e22010-06-03 12:02:55 +0100684 GCTracer::Scope scope(tracer, GCTracer::Scope::EXTERNAL);
Steve Block3ce2e202009-11-05 08:53:23 +0000685 GlobalHandles::PostGarbageCollectionProcessing();
686 }
687
688 // Update relocatables.
689 Relocatable::PostGarbageCollectionProcessing();
Steve Blocka7e24c12009-10-30 11:49:00 +0000690
691 if (collector == MARK_COMPACTOR) {
692 // Register the amount of external allocated memory.
693 amount_of_external_allocated_memory_at_last_global_gc_ =
694 amount_of_external_allocated_memory_;
695 }
696
Steve Block6ded16b2010-05-10 14:33:55 +0100697 GCCallbackFlags callback_flags = tracer->is_compacting()
698 ? kGCCallbackFlagCompacted
699 : kNoGCCallbackFlags;
700 for (int i = 0; i < gc_epilogue_callbacks_.length(); ++i) {
701 if (gc_type & gc_epilogue_callbacks_[i].gc_type) {
702 gc_epilogue_callbacks_[i].callback(gc_type, callback_flags);
703 }
704 }
705
Steve Blocka7e24c12009-10-30 11:49:00 +0000706 if (collector == MARK_COMPACTOR && global_gc_epilogue_callback_) {
707 ASSERT(!allocation_allowed_);
Leon Clarkef7060e22010-06-03 12:02:55 +0100708 GCTracer::Scope scope(tracer, GCTracer::Scope::EXTERNAL);
Steve Blocka7e24c12009-10-30 11:49:00 +0000709 global_gc_epilogue_callback_();
710 }
711 VerifySymbolTable();
712}
713
714
Steve Blocka7e24c12009-10-30 11:49:00 +0000715void Heap::MarkCompact(GCTracer* tracer) {
716 gc_state_ = MARK_COMPACT;
Steve Blocka7e24c12009-10-30 11:49:00 +0000717 LOG(ResourceEvent("markcompact", "begin"));
718
719 MarkCompactCollector::Prepare(tracer);
720
721 bool is_compacting = MarkCompactCollector::IsCompacting();
722
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100723 if (is_compacting) {
724 mc_count_++;
725 } else {
726 ms_count_++;
727 }
728 tracer->set_full_gc_count(mc_count_ + ms_count_);
729
Steve Blocka7e24c12009-10-30 11:49:00 +0000730 MarkCompactPrologue(is_compacting);
731
732 MarkCompactCollector::CollectGarbage();
733
734 MarkCompactEpilogue(is_compacting);
735
736 LOG(ResourceEvent("markcompact", "end"));
737
738 gc_state_ = NOT_IN_GC;
739
740 Shrink();
741
742 Counters::objs_since_last_full.Set(0);
Steve Block6ded16b2010-05-10 14:33:55 +0100743
744 contexts_disposed_ = 0;
Steve Blocka7e24c12009-10-30 11:49:00 +0000745}
746
747
748void Heap::MarkCompactPrologue(bool is_compacting) {
749 // At any old GC clear the keyed lookup cache to enable collection of unused
750 // maps.
751 KeyedLookupCache::Clear();
752 ContextSlotCache::Clear();
753 DescriptorLookupCache::Clear();
754
755 CompilationCache::MarkCompactPrologue();
756
757 Top::MarkCompactPrologue(is_compacting);
758 ThreadManager::MarkCompactPrologue(is_compacting);
Leon Clarkee46be812010-01-19 14:06:41 +0000759
Kristian Monsen25f61362010-05-21 11:50:48 +0100760 CompletelyClearInstanceofCache();
761
Leon Clarkee46be812010-01-19 14:06:41 +0000762 if (is_compacting) FlushNumberStringCache();
Steve Blocka7e24c12009-10-30 11:49:00 +0000763}
764
765
766void Heap::MarkCompactEpilogue(bool is_compacting) {
767 Top::MarkCompactEpilogue(is_compacting);
768 ThreadManager::MarkCompactEpilogue(is_compacting);
769}
770
771
772Object* Heap::FindCodeObject(Address a) {
773 Object* obj = code_space_->FindObject(a);
774 if (obj->IsFailure()) {
775 obj = lo_space_->FindObject(a);
776 }
777 ASSERT(!obj->IsFailure());
778 return obj;
779}
780
781
782// Helper class for copying HeapObjects
783class ScavengeVisitor: public ObjectVisitor {
784 public:
785
786 void VisitPointer(Object** p) { ScavengePointer(p); }
787
788 void VisitPointers(Object** start, Object** end) {
789 // Copy all HeapObject pointers in [start, end)
790 for (Object** p = start; p < end; p++) ScavengePointer(p);
791 }
792
793 private:
794 void ScavengePointer(Object** p) {
795 Object* object = *p;
796 if (!Heap::InNewSpace(object)) return;
797 Heap::ScavengeObject(reinterpret_cast<HeapObject**>(p),
798 reinterpret_cast<HeapObject*>(object));
799 }
800};
801
802
Ben Murdoch3bec4d22010-07-22 14:51:16 +0100803// A queue of objects promoted during scavenge. Each object is accompanied
804// by it's size to avoid dereferencing a map pointer for scanning.
Steve Blocka7e24c12009-10-30 11:49:00 +0000805class PromotionQueue {
806 public:
807 void Initialize(Address start_address) {
Ben Murdoch3bec4d22010-07-22 14:51:16 +0100808 front_ = rear_ = reinterpret_cast<intptr_t*>(start_address);
Steve Blocka7e24c12009-10-30 11:49:00 +0000809 }
810
811 bool is_empty() { return front_ <= rear_; }
812
Ben Murdoch3bec4d22010-07-22 14:51:16 +0100813 void insert(HeapObject* target, int size) {
814 *(--rear_) = reinterpret_cast<intptr_t>(target);
815 *(--rear_) = size;
Steve Blocka7e24c12009-10-30 11:49:00 +0000816 // Assert no overflow into live objects.
817 ASSERT(reinterpret_cast<Address>(rear_) >= Heap::new_space()->top());
818 }
819
Ben Murdoch3bec4d22010-07-22 14:51:16 +0100820 void remove(HeapObject** target, int* size) {
821 *target = reinterpret_cast<HeapObject*>(*(--front_));
822 *size = static_cast<int>(*(--front_));
Steve Blocka7e24c12009-10-30 11:49:00 +0000823 // Assert no underflow.
824 ASSERT(front_ >= rear_);
825 }
826
827 private:
828 // The front of the queue is higher in memory than the rear.
Ben Murdoch3bec4d22010-07-22 14:51:16 +0100829 intptr_t* front_;
830 intptr_t* rear_;
Steve Blocka7e24c12009-10-30 11:49:00 +0000831};
832
833
834// Shared state read by the scavenge collector and set by ScavengeObject.
835static PromotionQueue promotion_queue;
836
837
838#ifdef DEBUG
839// Visitor class to verify pointers in code or data space do not point into
840// new space.
841class VerifyNonPointerSpacePointersVisitor: public ObjectVisitor {
842 public:
843 void VisitPointers(Object** start, Object**end) {
844 for (Object** current = start; current < end; current++) {
845 if ((*current)->IsHeapObject()) {
846 ASSERT(!Heap::InNewSpace(HeapObject::cast(*current)));
847 }
848 }
849 }
850};
851
852
853static void VerifyNonPointerSpacePointers() {
854 // Verify that there are no pointers to new space in spaces where we
855 // do not expect them.
856 VerifyNonPointerSpacePointersVisitor v;
857 HeapObjectIterator code_it(Heap::code_space());
Leon Clarked91b9f72010-01-27 17:25:45 +0000858 for (HeapObject* object = code_it.next();
859 object != NULL; object = code_it.next())
Steve Blocka7e24c12009-10-30 11:49:00 +0000860 object->Iterate(&v);
Steve Blocka7e24c12009-10-30 11:49:00 +0000861
862 HeapObjectIterator data_it(Heap::old_data_space());
Leon Clarked91b9f72010-01-27 17:25:45 +0000863 for (HeapObject* object = data_it.next();
864 object != NULL; object = data_it.next())
865 object->Iterate(&v);
Steve Blocka7e24c12009-10-30 11:49:00 +0000866}
867#endif
868
869
Steve Block6ded16b2010-05-10 14:33:55 +0100870void Heap::CheckNewSpaceExpansionCriteria() {
871 if (new_space_.Capacity() < new_space_.MaximumCapacity() &&
872 survived_since_last_expansion_ > new_space_.Capacity()) {
873 // Grow the size of new space if there is room to grow and enough
874 // data has survived scavenge since the last expansion.
875 new_space_.Grow();
876 survived_since_last_expansion_ = 0;
877 }
878}
879
880
Steve Blocka7e24c12009-10-30 11:49:00 +0000881void Heap::Scavenge() {
882#ifdef DEBUG
883 if (FLAG_enable_slow_asserts) VerifyNonPointerSpacePointers();
884#endif
885
886 gc_state_ = SCAVENGE;
887
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100888 Page::FlipMeaningOfInvalidatedWatermarkFlag();
889#ifdef DEBUG
890 VerifyPageWatermarkValidity(old_pointer_space_, ALL_VALID);
891 VerifyPageWatermarkValidity(map_space_, ALL_VALID);
892#endif
893
894 // We do not update an allocation watermark of the top page during linear
895 // allocation to avoid overhead. So to maintain the watermark invariant
896 // we have to manually cache the watermark and mark the top page as having an
897 // invalid watermark. This guarantees that dirty regions iteration will use a
898 // correct watermark even if a linear allocation happens.
899 old_pointer_space_->FlushTopPageWatermark();
900 map_space_->FlushTopPageWatermark();
901
Steve Blocka7e24c12009-10-30 11:49:00 +0000902 // Implements Cheney's copying algorithm
903 LOG(ResourceEvent("scavenge", "begin"));
904
905 // Clear descriptor cache.
906 DescriptorLookupCache::Clear();
907
908 // Used for updating survived_since_last_expansion_ at function end.
909 int survived_watermark = PromotedSpaceSize();
910
Steve Block6ded16b2010-05-10 14:33:55 +0100911 CheckNewSpaceExpansionCriteria();
Steve Blocka7e24c12009-10-30 11:49:00 +0000912
913 // Flip the semispaces. After flipping, to space is empty, from space has
914 // live objects.
915 new_space_.Flip();
916 new_space_.ResetAllocationInfo();
917
918 // We need to sweep newly copied objects which can be either in the
919 // to space or promoted to the old generation. For to-space
920 // objects, we treat the bottom of the to space as a queue. Newly
921 // copied and unswept objects lie between a 'front' mark and the
922 // allocation pointer.
923 //
924 // Promoted objects can go into various old-generation spaces, and
925 // can be allocated internally in the spaces (from the free list).
926 // We treat the top of the to space as a queue of addresses of
927 // promoted objects. The addresses of newly promoted and unswept
928 // objects lie between a 'front' mark and a 'rear' mark that is
929 // updated as a side effect of promoting an object.
930 //
931 // There is guaranteed to be enough room at the top of the to space
932 // for the addresses of promoted objects: every object promoted
933 // frees up its size in bytes from the top of the new space, and
934 // objects are at least one pointer in size.
935 Address new_space_front = new_space_.ToSpaceLow();
936 promotion_queue.Initialize(new_space_.ToSpaceHigh());
937
938 ScavengeVisitor scavenge_visitor;
939 // Copy roots.
Leon Clarkee46be812010-01-19 14:06:41 +0000940 IterateRoots(&scavenge_visitor, VISIT_ALL_IN_SCAVENGE);
Steve Blocka7e24c12009-10-30 11:49:00 +0000941
942 // Copy objects reachable from the old generation. By definition,
943 // there are no intergenerational pointers in code or data spaces.
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100944 IterateDirtyRegions(old_pointer_space_,
945 &IteratePointersInDirtyRegion,
946 &ScavengePointer,
947 WATERMARK_CAN_BE_INVALID);
948
949 IterateDirtyRegions(map_space_,
950 &IteratePointersInDirtyMapsRegion,
951 &ScavengePointer,
952 WATERMARK_CAN_BE_INVALID);
953
954 lo_space_->IterateDirtyRegions(&ScavengePointer);
Steve Blocka7e24c12009-10-30 11:49:00 +0000955
956 // Copy objects reachable from cells by scavenging cell values directly.
957 HeapObjectIterator cell_iterator(cell_space_);
Leon Clarked91b9f72010-01-27 17:25:45 +0000958 for (HeapObject* cell = cell_iterator.next();
959 cell != NULL; cell = cell_iterator.next()) {
Steve Blocka7e24c12009-10-30 11:49:00 +0000960 if (cell->IsJSGlobalPropertyCell()) {
961 Address value_address =
962 reinterpret_cast<Address>(cell) +
963 (JSGlobalPropertyCell::kValueOffset - kHeapObjectTag);
964 scavenge_visitor.VisitPointer(reinterpret_cast<Object**>(value_address));
965 }
966 }
967
Leon Clarkee46be812010-01-19 14:06:41 +0000968 new_space_front = DoScavenge(&scavenge_visitor, new_space_front);
969
Steve Block6ded16b2010-05-10 14:33:55 +0100970 UpdateNewSpaceReferencesInExternalStringTable(
971 &UpdateNewSpaceReferenceInExternalStringTableEntry);
972
Leon Clarkee46be812010-01-19 14:06:41 +0000973 ASSERT(new_space_front == new_space_.top());
974
975 // Set age mark.
976 new_space_.set_age_mark(new_space_.top());
977
978 // Update how much has survived scavenge.
Steve Block6ded16b2010-05-10 14:33:55 +0100979 IncrementYoungSurvivorsCounter(
980 (PromotedSpaceSize() - survived_watermark) + new_space_.Size());
Leon Clarkee46be812010-01-19 14:06:41 +0000981
982 LOG(ResourceEvent("scavenge", "end"));
983
984 gc_state_ = NOT_IN_GC;
985}
986
987
Steve Block6ded16b2010-05-10 14:33:55 +0100988String* Heap::UpdateNewSpaceReferenceInExternalStringTableEntry(Object** p) {
989 MapWord first_word = HeapObject::cast(*p)->map_word();
990
991 if (!first_word.IsForwardingAddress()) {
992 // Unreachable external string can be finalized.
993 FinalizeExternalString(String::cast(*p));
994 return NULL;
995 }
996
997 // String is still reachable.
998 return String::cast(first_word.ToForwardingAddress());
999}
1000
1001
1002void Heap::UpdateNewSpaceReferencesInExternalStringTable(
1003 ExternalStringTableUpdaterCallback updater_func) {
Leon Clarkee46be812010-01-19 14:06:41 +00001004 ExternalStringTable::Verify();
1005
1006 if (ExternalStringTable::new_space_strings_.is_empty()) return;
1007
1008 Object** start = &ExternalStringTable::new_space_strings_[0];
1009 Object** end = start + ExternalStringTable::new_space_strings_.length();
1010 Object** last = start;
1011
1012 for (Object** p = start; p < end; ++p) {
1013 ASSERT(Heap::InFromSpace(*p));
Steve Block6ded16b2010-05-10 14:33:55 +01001014 String* target = updater_func(p);
Leon Clarkee46be812010-01-19 14:06:41 +00001015
Steve Block6ded16b2010-05-10 14:33:55 +01001016 if (target == NULL) continue;
Leon Clarkee46be812010-01-19 14:06:41 +00001017
Leon Clarkee46be812010-01-19 14:06:41 +00001018 ASSERT(target->IsExternalString());
1019
1020 if (Heap::InNewSpace(target)) {
1021 // String is still in new space. Update the table entry.
1022 *last = target;
1023 ++last;
1024 } else {
1025 // String got promoted. Move it to the old string list.
1026 ExternalStringTable::AddOldString(target);
1027 }
1028 }
1029
1030 ASSERT(last <= end);
1031 ExternalStringTable::ShrinkNewStrings(static_cast<int>(last - start));
1032}
1033
1034
1035Address Heap::DoScavenge(ObjectVisitor* scavenge_visitor,
1036 Address new_space_front) {
Steve Blocka7e24c12009-10-30 11:49:00 +00001037 do {
1038 ASSERT(new_space_front <= new_space_.top());
1039
1040 // The addresses new_space_front and new_space_.top() define a
1041 // queue of unprocessed copied objects. Process them until the
1042 // queue is empty.
1043 while (new_space_front < new_space_.top()) {
1044 HeapObject* object = HeapObject::FromAddress(new_space_front);
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001045 Map* map = object->map();
1046 int size = object->SizeFromMap(map);
1047 object->IterateBody(map->instance_type(), size, scavenge_visitor);
1048 new_space_front += size;
Steve Blocka7e24c12009-10-30 11:49:00 +00001049 }
1050
1051 // Promote and process all the to-be-promoted objects.
1052 while (!promotion_queue.is_empty()) {
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001053 HeapObject* target;
1054 int size;
1055 promotion_queue.remove(&target, &size);
Steve Blocka7e24c12009-10-30 11:49:00 +00001056
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001057 // Promoted object might be already partially visited
1058 // during dirty regions iteration. Thus we search specificly
1059 // for pointers to from semispace instead of looking for pointers
1060 // to new space.
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01001061 ASSERT(!target->IsMap());
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001062 IterateAndMarkPointersToFromSpace(target->address(),
1063 target->address() + size,
1064 &ScavengePointer);
Steve Blocka7e24c12009-10-30 11:49:00 +00001065 }
1066
1067 // Take another spin if there are now unswept objects in new space
1068 // (there are currently no more unswept promoted objects).
1069 } while (new_space_front < new_space_.top());
1070
Leon Clarkee46be812010-01-19 14:06:41 +00001071 return new_space_front;
Steve Blocka7e24c12009-10-30 11:49:00 +00001072}
1073
1074
Steve Blocka7e24c12009-10-30 11:49:00 +00001075#if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001076static void RecordCopiedObject(HeapObject* obj) {
Steve Blocka7e24c12009-10-30 11:49:00 +00001077 bool should_record = false;
1078#ifdef DEBUG
1079 should_record = FLAG_heap_stats;
1080#endif
1081#ifdef ENABLE_LOGGING_AND_PROFILING
1082 should_record = should_record || FLAG_log_gc;
1083#endif
1084 if (should_record) {
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001085 if (Heap::new_space()->Contains(obj)) {
1086 Heap::new_space()->RecordAllocation(obj);
Steve Blocka7e24c12009-10-30 11:49:00 +00001087 } else {
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001088 Heap::new_space()->RecordPromotion(obj);
Steve Blocka7e24c12009-10-30 11:49:00 +00001089 }
1090 }
1091}
1092#endif // defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
1093
1094
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001095// Helper function used by CopyObject to copy a source object to an
1096// allocated target object and update the forwarding pointer in the source
1097// object. Returns the target object.
1098inline static HeapObject* MigrateObject(HeapObject* source,
1099 HeapObject* target,
1100 int size) {
Steve Blocka7e24c12009-10-30 11:49:00 +00001101 // Copy the content of source to target.
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001102 Heap::CopyBlock(target->address(), source->address(), size);
Steve Blocka7e24c12009-10-30 11:49:00 +00001103
1104 // Set the forwarding address.
1105 source->set_map_word(MapWord::FromForwardingAddress(target));
1106
1107#if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
1108 // Update NewSpace stats if necessary.
1109 RecordCopiedObject(target);
1110#endif
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001111 HEAP_PROFILE(ObjectMoveEvent(source->address(), target->address()));
Steve Blocka7e24c12009-10-30 11:49:00 +00001112
1113 return target;
1114}
1115
1116
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001117enum ObjectContents { DATA_OBJECT, POINTER_OBJECT };
1118enum SizeRestriction { SMALL, UNKNOWN_SIZE };
1119
1120
1121template<ObjectContents object_contents, SizeRestriction size_restriction>
1122static inline void EvacuateObject(Map* map,
1123 HeapObject** slot,
1124 HeapObject* object,
1125 int object_size) {
1126 ASSERT((size_restriction != SMALL) ||
1127 (object_size <= Page::kMaxHeapObjectSize));
1128 ASSERT(object->Size() == object_size);
1129
1130 if (Heap::ShouldBePromoted(object->address(), object_size)) {
1131 Object* result;
1132
1133 if ((size_restriction != SMALL) &&
1134 (object_size > Page::kMaxHeapObjectSize)) {
1135 result = Heap::lo_space()->AllocateRawFixedArray(object_size);
1136 } else {
1137 if (object_contents == DATA_OBJECT) {
1138 result = Heap::old_data_space()->AllocateRaw(object_size);
1139 } else {
1140 result = Heap::old_pointer_space()->AllocateRaw(object_size);
1141 }
1142 }
1143
1144 if (!result->IsFailure()) {
1145 HeapObject* target = HeapObject::cast(result);
1146 *slot = MigrateObject(object, target, object_size);
1147
1148 if (object_contents == POINTER_OBJECT) {
1149 promotion_queue.insert(target, object_size);
1150 }
1151
1152 Heap::tracer()->increment_promoted_objects_size(object_size);
1153 return;
1154 }
1155 }
1156 Object* result = Heap::new_space()->AllocateRaw(object_size);
1157 ASSERT(!result->IsFailure());
1158 *slot = MigrateObject(object, HeapObject::cast(result), object_size);
1159 return;
1160}
1161
1162
1163template<int object_size_in_words, ObjectContents object_contents>
1164static inline void EvacuateObjectOfFixedSize(Map* map,
1165 HeapObject** slot,
1166 HeapObject* object) {
1167 const int object_size = object_size_in_words << kPointerSizeLog2;
1168 EvacuateObject<object_contents, SMALL>(map, slot, object, object_size);
1169}
1170
1171
1172template<ObjectContents object_contents>
1173static inline void EvacuateObjectOfFixedSize(Map* map,
1174 HeapObject** slot,
1175 HeapObject* object) {
1176 int object_size = map->instance_size();
1177 EvacuateObject<object_contents, SMALL>(map, slot, object, object_size);
1178}
1179
1180
1181static inline void EvacuateFixedArray(Map* map,
1182 HeapObject** slot,
1183 HeapObject* object) {
1184 int object_size = FixedArray::cast(object)->FixedArraySize();
1185 EvacuateObject<POINTER_OBJECT, UNKNOWN_SIZE>(map, slot, object, object_size);
1186}
1187
1188
1189static inline void EvacuateByteArray(Map* map,
1190 HeapObject** slot,
1191 HeapObject* object) {
1192 int object_size = ByteArray::cast(object)->ByteArraySize();
1193 EvacuateObject<DATA_OBJECT, UNKNOWN_SIZE>(map, slot, object, object_size);
1194}
1195
1196
1197static Scavenger GetScavengerForSize(int object_size,
1198 ObjectContents object_contents) {
1199 ASSERT(IsAligned(object_size, kPointerSize));
1200 ASSERT(object_size < Page::kMaxHeapObjectSize);
1201
1202 switch (object_size >> kPointerSizeLog2) {
1203#define CASE(n) \
1204 case n: \
1205 if (object_contents == DATA_OBJECT) { \
1206 return static_cast<Scavenger>( \
1207 &EvacuateObjectOfFixedSize<n, DATA_OBJECT>); \
1208 } else { \
1209 return static_cast<Scavenger>( \
1210 &EvacuateObjectOfFixedSize<n, POINTER_OBJECT>); \
1211 }
1212
1213 CASE(1);
1214 CASE(2);
1215 CASE(3);
1216 CASE(4);
1217 CASE(5);
1218 CASE(6);
1219 CASE(7);
1220 CASE(8);
1221 CASE(9);
1222 CASE(10);
1223 CASE(11);
1224 CASE(12);
1225 CASE(13);
1226 CASE(14);
1227 CASE(15);
1228 CASE(16);
1229 default:
1230 if (object_contents == DATA_OBJECT) {
1231 return static_cast<Scavenger>(&EvacuateObjectOfFixedSize<DATA_OBJECT>);
1232 } else {
1233 return static_cast<Scavenger>(
1234 &EvacuateObjectOfFixedSize<POINTER_OBJECT>);
1235 }
1236
1237#undef CASE
1238 }
1239}
1240
1241
1242static inline void EvacuateSeqAsciiString(Map* map,
1243 HeapObject** slot,
1244 HeapObject* object) {
1245 int object_size = SeqAsciiString::cast(object)->
1246 SeqAsciiStringSize(map->instance_type());
1247 EvacuateObject<DATA_OBJECT, UNKNOWN_SIZE>(map, slot, object, object_size);
1248}
1249
1250
1251static inline void EvacuateSeqTwoByteString(Map* map,
1252 HeapObject** slot,
1253 HeapObject* object) {
1254 int object_size = SeqTwoByteString::cast(object)->
1255 SeqTwoByteStringSize(map->instance_type());
1256 EvacuateObject<DATA_OBJECT, UNKNOWN_SIZE>(map, slot, object, object_size);
1257}
1258
1259
1260static inline bool IsShortcutCandidate(int type) {
1261 return ((type & kShortcutTypeMask) == kShortcutTypeTag);
1262}
1263
1264
1265static inline void EvacuateShortcutCandidate(Map* map,
1266 HeapObject** slot,
1267 HeapObject* object) {
1268 ASSERT(IsShortcutCandidate(map->instance_type()));
1269
1270 if (ConsString::cast(object)->unchecked_second() == Heap::empty_string()) {
1271 HeapObject* first =
1272 HeapObject::cast(ConsString::cast(object)->unchecked_first());
1273
1274 *slot = first;
1275
1276 if (!Heap::InNewSpace(first)) {
1277 object->set_map_word(MapWord::FromForwardingAddress(first));
1278 return;
1279 }
1280
1281 MapWord first_word = first->map_word();
1282 if (first_word.IsForwardingAddress()) {
1283 HeapObject* target = first_word.ToForwardingAddress();
1284
1285 *slot = target;
1286 object->set_map_word(MapWord::FromForwardingAddress(target));
1287 return;
1288 }
1289
1290 first->map()->Scavenge(slot, first);
1291 object->set_map_word(MapWord::FromForwardingAddress(*slot));
1292 return;
1293 }
1294
1295 int object_size = ConsString::kSize;
1296 EvacuateObject<POINTER_OBJECT, SMALL>(map, slot, object, object_size);
1297}
1298
1299
1300Scavenger Heap::GetScavenger(int instance_type, int instance_size) {
1301 if (instance_type < FIRST_NONSTRING_TYPE) {
1302 switch (instance_type & kStringRepresentationMask) {
1303 case kSeqStringTag:
1304 if ((instance_type & kStringEncodingMask) == kAsciiStringTag) {
1305 return &EvacuateSeqAsciiString;
1306 } else {
1307 return &EvacuateSeqTwoByteString;
1308 }
1309
1310 case kConsStringTag:
1311 if (IsShortcutCandidate(instance_type)) {
1312 return &EvacuateShortcutCandidate;
1313 } else {
1314 ASSERT(instance_size == ConsString::kSize);
1315 return GetScavengerForSize(ConsString::kSize, POINTER_OBJECT);
1316 }
1317
1318 case kExternalStringTag:
1319 ASSERT(instance_size == ExternalString::kSize);
1320 return GetScavengerForSize(ExternalString::kSize, DATA_OBJECT);
1321 }
1322 UNREACHABLE();
1323 }
1324
1325 switch (instance_type) {
1326 case BYTE_ARRAY_TYPE:
1327 return reinterpret_cast<Scavenger>(&EvacuateByteArray);
1328
1329 case FIXED_ARRAY_TYPE:
1330 return reinterpret_cast<Scavenger>(&EvacuateFixedArray);
1331
1332 case JS_OBJECT_TYPE:
1333 case JS_CONTEXT_EXTENSION_OBJECT_TYPE:
1334 case JS_VALUE_TYPE:
1335 case JS_ARRAY_TYPE:
1336 case JS_REGEXP_TYPE:
1337 case JS_FUNCTION_TYPE:
1338 case JS_GLOBAL_PROXY_TYPE:
1339 case JS_GLOBAL_OBJECT_TYPE:
1340 case JS_BUILTINS_OBJECT_TYPE:
1341 return GetScavengerForSize(instance_size, POINTER_OBJECT);
1342
1343 case ODDBALL_TYPE:
1344 return NULL;
1345
1346 case PROXY_TYPE:
1347 return GetScavengerForSize(Proxy::kSize, DATA_OBJECT);
1348
1349 case MAP_TYPE:
1350 return NULL;
1351
1352 case CODE_TYPE:
1353 return NULL;
1354
1355 case JS_GLOBAL_PROPERTY_CELL_TYPE:
1356 return NULL;
1357
1358 case HEAP_NUMBER_TYPE:
1359 case FILLER_TYPE:
1360 case PIXEL_ARRAY_TYPE:
1361 case EXTERNAL_BYTE_ARRAY_TYPE:
1362 case EXTERNAL_UNSIGNED_BYTE_ARRAY_TYPE:
1363 case EXTERNAL_SHORT_ARRAY_TYPE:
1364 case EXTERNAL_UNSIGNED_SHORT_ARRAY_TYPE:
1365 case EXTERNAL_INT_ARRAY_TYPE:
1366 case EXTERNAL_UNSIGNED_INT_ARRAY_TYPE:
1367 case EXTERNAL_FLOAT_ARRAY_TYPE:
1368 return GetScavengerForSize(instance_size, DATA_OBJECT);
1369
1370 case SHARED_FUNCTION_INFO_TYPE:
1371 return GetScavengerForSize(SharedFunctionInfo::kAlignedSize,
1372 POINTER_OBJECT);
1373
1374#define MAKE_STRUCT_CASE(NAME, Name, name) \
1375 case NAME##_TYPE:
1376 STRUCT_LIST(MAKE_STRUCT_CASE)
1377#undef MAKE_STRUCT_CASE
1378 return GetScavengerForSize(instance_size, POINTER_OBJECT);
1379 default:
1380 UNREACHABLE();
1381 return NULL;
1382 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001383}
1384
1385
1386void Heap::ScavengeObjectSlow(HeapObject** p, HeapObject* object) {
1387 ASSERT(InFromSpace(object));
1388 MapWord first_word = object->map_word();
1389 ASSERT(!first_word.IsForwardingAddress());
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001390 Map* map = first_word.ToMap();
1391 map->Scavenge(p, object);
Steve Blocka7e24c12009-10-30 11:49:00 +00001392}
1393
1394
1395void Heap::ScavengePointer(HeapObject** p) {
1396 ScavengeObject(p, *p);
1397}
1398
1399
1400Object* Heap::AllocatePartialMap(InstanceType instance_type,
1401 int instance_size) {
1402 Object* result = AllocateRawMap();
1403 if (result->IsFailure()) return result;
1404
1405 // Map::cast cannot be used due to uninitialized map field.
1406 reinterpret_cast<Map*>(result)->set_map(raw_unchecked_meta_map());
1407 reinterpret_cast<Map*>(result)->set_instance_type(instance_type);
1408 reinterpret_cast<Map*>(result)->set_instance_size(instance_size);
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001409 reinterpret_cast<Map*>(result)->
1410 set_scavenger(GetScavenger(instance_type, instance_size));
Steve Blocka7e24c12009-10-30 11:49:00 +00001411 reinterpret_cast<Map*>(result)->set_inobject_properties(0);
Leon Clarke4515c472010-02-03 11:58:03 +00001412 reinterpret_cast<Map*>(result)->set_pre_allocated_property_fields(0);
Steve Blocka7e24c12009-10-30 11:49:00 +00001413 reinterpret_cast<Map*>(result)->set_unused_property_fields(0);
Leon Clarke4515c472010-02-03 11:58:03 +00001414 reinterpret_cast<Map*>(result)->set_bit_field(0);
1415 reinterpret_cast<Map*>(result)->set_bit_field2(0);
Steve Blocka7e24c12009-10-30 11:49:00 +00001416 return result;
1417}
1418
1419
1420Object* Heap::AllocateMap(InstanceType instance_type, int instance_size) {
1421 Object* result = AllocateRawMap();
1422 if (result->IsFailure()) return result;
1423
1424 Map* map = reinterpret_cast<Map*>(result);
1425 map->set_map(meta_map());
1426 map->set_instance_type(instance_type);
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001427 map->set_scavenger(GetScavenger(instance_type, instance_size));
Steve Blocka7e24c12009-10-30 11:49:00 +00001428 map->set_prototype(null_value());
1429 map->set_constructor(null_value());
1430 map->set_instance_size(instance_size);
1431 map->set_inobject_properties(0);
1432 map->set_pre_allocated_property_fields(0);
1433 map->set_instance_descriptors(empty_descriptor_array());
1434 map->set_code_cache(empty_fixed_array());
1435 map->set_unused_property_fields(0);
1436 map->set_bit_field(0);
Steve Block8defd9f2010-07-08 12:39:36 +01001437 map->set_bit_field2((1 << Map::kIsExtensible) | (1 << Map::kHasFastElements));
Leon Clarkee46be812010-01-19 14:06:41 +00001438
1439 // If the map object is aligned fill the padding area with Smi 0 objects.
1440 if (Map::kPadStart < Map::kSize) {
1441 memset(reinterpret_cast<byte*>(map) + Map::kPadStart - kHeapObjectTag,
1442 0,
1443 Map::kSize - Map::kPadStart);
1444 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001445 return map;
1446}
1447
1448
Steve Block6ded16b2010-05-10 14:33:55 +01001449Object* Heap::AllocateCodeCache() {
1450 Object* result = AllocateStruct(CODE_CACHE_TYPE);
1451 if (result->IsFailure()) return result;
1452 CodeCache* code_cache = CodeCache::cast(result);
1453 code_cache->set_default_cache(empty_fixed_array());
1454 code_cache->set_normal_type_cache(undefined_value());
1455 return code_cache;
1456}
1457
1458
Steve Blocka7e24c12009-10-30 11:49:00 +00001459const Heap::StringTypeTable Heap::string_type_table[] = {
1460#define STRING_TYPE_ELEMENT(type, size, name, camel_name) \
1461 {type, size, k##camel_name##MapRootIndex},
1462 STRING_TYPE_LIST(STRING_TYPE_ELEMENT)
1463#undef STRING_TYPE_ELEMENT
1464};
1465
1466
1467const Heap::ConstantSymbolTable Heap::constant_symbol_table[] = {
1468#define CONSTANT_SYMBOL_ELEMENT(name, contents) \
1469 {contents, k##name##RootIndex},
1470 SYMBOL_LIST(CONSTANT_SYMBOL_ELEMENT)
1471#undef CONSTANT_SYMBOL_ELEMENT
1472};
1473
1474
1475const Heap::StructTable Heap::struct_table[] = {
1476#define STRUCT_TABLE_ELEMENT(NAME, Name, name) \
1477 { NAME##_TYPE, Name::kSize, k##Name##MapRootIndex },
1478 STRUCT_LIST(STRUCT_TABLE_ELEMENT)
1479#undef STRUCT_TABLE_ELEMENT
1480};
1481
1482
1483bool Heap::CreateInitialMaps() {
1484 Object* obj = AllocatePartialMap(MAP_TYPE, Map::kSize);
1485 if (obj->IsFailure()) return false;
1486 // Map::cast cannot be used due to uninitialized map field.
1487 Map* new_meta_map = reinterpret_cast<Map*>(obj);
1488 set_meta_map(new_meta_map);
1489 new_meta_map->set_map(new_meta_map);
1490
1491 obj = AllocatePartialMap(FIXED_ARRAY_TYPE, FixedArray::kHeaderSize);
1492 if (obj->IsFailure()) return false;
1493 set_fixed_array_map(Map::cast(obj));
1494
1495 obj = AllocatePartialMap(ODDBALL_TYPE, Oddball::kSize);
1496 if (obj->IsFailure()) return false;
1497 set_oddball_map(Map::cast(obj));
1498
Steve Block6ded16b2010-05-10 14:33:55 +01001499 // Allocate the empty array.
Steve Blocka7e24c12009-10-30 11:49:00 +00001500 obj = AllocateEmptyFixedArray();
1501 if (obj->IsFailure()) return false;
1502 set_empty_fixed_array(FixedArray::cast(obj));
1503
1504 obj = Allocate(oddball_map(), OLD_DATA_SPACE);
1505 if (obj->IsFailure()) return false;
1506 set_null_value(obj);
1507
1508 // Allocate the empty descriptor array.
1509 obj = AllocateEmptyFixedArray();
1510 if (obj->IsFailure()) return false;
1511 set_empty_descriptor_array(DescriptorArray::cast(obj));
1512
1513 // Fix the instance_descriptors for the existing maps.
1514 meta_map()->set_instance_descriptors(empty_descriptor_array());
1515 meta_map()->set_code_cache(empty_fixed_array());
1516
1517 fixed_array_map()->set_instance_descriptors(empty_descriptor_array());
1518 fixed_array_map()->set_code_cache(empty_fixed_array());
1519
1520 oddball_map()->set_instance_descriptors(empty_descriptor_array());
1521 oddball_map()->set_code_cache(empty_fixed_array());
1522
1523 // Fix prototype object for existing maps.
1524 meta_map()->set_prototype(null_value());
1525 meta_map()->set_constructor(null_value());
1526
1527 fixed_array_map()->set_prototype(null_value());
1528 fixed_array_map()->set_constructor(null_value());
1529
1530 oddball_map()->set_prototype(null_value());
1531 oddball_map()->set_constructor(null_value());
1532
1533 obj = AllocateMap(HEAP_NUMBER_TYPE, HeapNumber::kSize);
1534 if (obj->IsFailure()) return false;
1535 set_heap_number_map(Map::cast(obj));
1536
1537 obj = AllocateMap(PROXY_TYPE, Proxy::kSize);
1538 if (obj->IsFailure()) return false;
1539 set_proxy_map(Map::cast(obj));
1540
1541 for (unsigned i = 0; i < ARRAY_SIZE(string_type_table); i++) {
1542 const StringTypeTable& entry = string_type_table[i];
1543 obj = AllocateMap(entry.type, entry.size);
1544 if (obj->IsFailure()) return false;
1545 roots_[entry.index] = Map::cast(obj);
1546 }
1547
Steve Blockd0582a62009-12-15 09:54:21 +00001548 obj = AllocateMap(STRING_TYPE, SeqTwoByteString::kAlignedSize);
Steve Blocka7e24c12009-10-30 11:49:00 +00001549 if (obj->IsFailure()) return false;
Steve Blockd0582a62009-12-15 09:54:21 +00001550 set_undetectable_string_map(Map::cast(obj));
Steve Blocka7e24c12009-10-30 11:49:00 +00001551 Map::cast(obj)->set_is_undetectable();
1552
Steve Blockd0582a62009-12-15 09:54:21 +00001553 obj = AllocateMap(ASCII_STRING_TYPE, SeqAsciiString::kAlignedSize);
Steve Blocka7e24c12009-10-30 11:49:00 +00001554 if (obj->IsFailure()) return false;
Steve Blockd0582a62009-12-15 09:54:21 +00001555 set_undetectable_ascii_string_map(Map::cast(obj));
Steve Blocka7e24c12009-10-30 11:49:00 +00001556 Map::cast(obj)->set_is_undetectable();
1557
1558 obj = AllocateMap(BYTE_ARRAY_TYPE, ByteArray::kAlignedSize);
1559 if (obj->IsFailure()) return false;
1560 set_byte_array_map(Map::cast(obj));
1561
1562 obj = AllocateMap(PIXEL_ARRAY_TYPE, PixelArray::kAlignedSize);
1563 if (obj->IsFailure()) return false;
1564 set_pixel_array_map(Map::cast(obj));
1565
Steve Block3ce2e202009-11-05 08:53:23 +00001566 obj = AllocateMap(EXTERNAL_BYTE_ARRAY_TYPE,
1567 ExternalArray::kAlignedSize);
1568 if (obj->IsFailure()) return false;
1569 set_external_byte_array_map(Map::cast(obj));
1570
1571 obj = AllocateMap(EXTERNAL_UNSIGNED_BYTE_ARRAY_TYPE,
1572 ExternalArray::kAlignedSize);
1573 if (obj->IsFailure()) return false;
1574 set_external_unsigned_byte_array_map(Map::cast(obj));
1575
1576 obj = AllocateMap(EXTERNAL_SHORT_ARRAY_TYPE,
1577 ExternalArray::kAlignedSize);
1578 if (obj->IsFailure()) return false;
1579 set_external_short_array_map(Map::cast(obj));
1580
1581 obj = AllocateMap(EXTERNAL_UNSIGNED_SHORT_ARRAY_TYPE,
1582 ExternalArray::kAlignedSize);
1583 if (obj->IsFailure()) return false;
1584 set_external_unsigned_short_array_map(Map::cast(obj));
1585
1586 obj = AllocateMap(EXTERNAL_INT_ARRAY_TYPE,
1587 ExternalArray::kAlignedSize);
1588 if (obj->IsFailure()) return false;
1589 set_external_int_array_map(Map::cast(obj));
1590
1591 obj = AllocateMap(EXTERNAL_UNSIGNED_INT_ARRAY_TYPE,
1592 ExternalArray::kAlignedSize);
1593 if (obj->IsFailure()) return false;
1594 set_external_unsigned_int_array_map(Map::cast(obj));
1595
1596 obj = AllocateMap(EXTERNAL_FLOAT_ARRAY_TYPE,
1597 ExternalArray::kAlignedSize);
1598 if (obj->IsFailure()) return false;
1599 set_external_float_array_map(Map::cast(obj));
1600
Steve Blocka7e24c12009-10-30 11:49:00 +00001601 obj = AllocateMap(CODE_TYPE, Code::kHeaderSize);
1602 if (obj->IsFailure()) return false;
1603 set_code_map(Map::cast(obj));
1604
1605 obj = AllocateMap(JS_GLOBAL_PROPERTY_CELL_TYPE,
1606 JSGlobalPropertyCell::kSize);
1607 if (obj->IsFailure()) return false;
1608 set_global_property_cell_map(Map::cast(obj));
1609
1610 obj = AllocateMap(FILLER_TYPE, kPointerSize);
1611 if (obj->IsFailure()) return false;
1612 set_one_pointer_filler_map(Map::cast(obj));
1613
1614 obj = AllocateMap(FILLER_TYPE, 2 * kPointerSize);
1615 if (obj->IsFailure()) return false;
1616 set_two_pointer_filler_map(Map::cast(obj));
1617
1618 for (unsigned i = 0; i < ARRAY_SIZE(struct_table); i++) {
1619 const StructTable& entry = struct_table[i];
1620 obj = AllocateMap(entry.type, entry.size);
1621 if (obj->IsFailure()) return false;
1622 roots_[entry.index] = Map::cast(obj);
1623 }
1624
1625 obj = AllocateMap(FIXED_ARRAY_TYPE, HeapObject::kHeaderSize);
1626 if (obj->IsFailure()) return false;
1627 set_hash_table_map(Map::cast(obj));
1628
1629 obj = AllocateMap(FIXED_ARRAY_TYPE, HeapObject::kHeaderSize);
1630 if (obj->IsFailure()) return false;
1631 set_context_map(Map::cast(obj));
1632
1633 obj = AllocateMap(FIXED_ARRAY_TYPE, HeapObject::kHeaderSize);
1634 if (obj->IsFailure()) return false;
1635 set_catch_context_map(Map::cast(obj));
1636
1637 obj = AllocateMap(FIXED_ARRAY_TYPE, HeapObject::kHeaderSize);
1638 if (obj->IsFailure()) return false;
1639 set_global_context_map(Map::cast(obj));
1640
Steve Block6ded16b2010-05-10 14:33:55 +01001641 obj = AllocateMap(SHARED_FUNCTION_INFO_TYPE,
1642 SharedFunctionInfo::kAlignedSize);
Steve Blocka7e24c12009-10-30 11:49:00 +00001643 if (obj->IsFailure()) return false;
1644 set_shared_function_info_map(Map::cast(obj));
1645
1646 ASSERT(!Heap::InNewSpace(Heap::empty_fixed_array()));
1647 return true;
1648}
1649
1650
1651Object* Heap::AllocateHeapNumber(double value, PretenureFlag pretenure) {
1652 // Statically ensure that it is safe to allocate heap numbers in paged
1653 // spaces.
1654 STATIC_ASSERT(HeapNumber::kSize <= Page::kMaxHeapObjectSize);
1655 AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
1656
Steve Blocka7e24c12009-10-30 11:49:00 +00001657 Object* result = AllocateRaw(HeapNumber::kSize, space, OLD_DATA_SPACE);
1658 if (result->IsFailure()) return result;
1659
1660 HeapObject::cast(result)->set_map(heap_number_map());
1661 HeapNumber::cast(result)->set_value(value);
1662 return result;
1663}
1664
1665
1666Object* Heap::AllocateHeapNumber(double value) {
1667 // Use general version, if we're forced to always allocate.
1668 if (always_allocate()) return AllocateHeapNumber(value, TENURED);
1669
1670 // This version of AllocateHeapNumber is optimized for
1671 // allocation in new space.
1672 STATIC_ASSERT(HeapNumber::kSize <= Page::kMaxHeapObjectSize);
1673 ASSERT(allocation_allowed_ && gc_state_ == NOT_IN_GC);
1674 Object* result = new_space_.AllocateRaw(HeapNumber::kSize);
1675 if (result->IsFailure()) return result;
1676 HeapObject::cast(result)->set_map(heap_number_map());
1677 HeapNumber::cast(result)->set_value(value);
1678 return result;
1679}
1680
1681
1682Object* Heap::AllocateJSGlobalPropertyCell(Object* value) {
1683 Object* result = AllocateRawCell();
1684 if (result->IsFailure()) return result;
1685 HeapObject::cast(result)->set_map(global_property_cell_map());
1686 JSGlobalPropertyCell::cast(result)->set_value(value);
1687 return result;
1688}
1689
1690
Steve Block6ded16b2010-05-10 14:33:55 +01001691Object* Heap::CreateOddball(const char* to_string,
Steve Blocka7e24c12009-10-30 11:49:00 +00001692 Object* to_number) {
Steve Block6ded16b2010-05-10 14:33:55 +01001693 Object* result = Allocate(oddball_map(), OLD_DATA_SPACE);
Steve Blocka7e24c12009-10-30 11:49:00 +00001694 if (result->IsFailure()) return result;
1695 return Oddball::cast(result)->Initialize(to_string, to_number);
1696}
1697
1698
1699bool Heap::CreateApiObjects() {
1700 Object* obj;
1701
1702 obj = AllocateMap(JS_OBJECT_TYPE, JSObject::kHeaderSize);
1703 if (obj->IsFailure()) return false;
1704 set_neander_map(Map::cast(obj));
1705
1706 obj = Heap::AllocateJSObjectFromMap(neander_map());
1707 if (obj->IsFailure()) return false;
1708 Object* elements = AllocateFixedArray(2);
1709 if (elements->IsFailure()) return false;
1710 FixedArray::cast(elements)->set(0, Smi::FromInt(0));
1711 JSObject::cast(obj)->set_elements(FixedArray::cast(elements));
1712 set_message_listeners(JSObject::cast(obj));
1713
1714 return true;
1715}
1716
1717
1718void Heap::CreateCEntryStub() {
1719 CEntryStub stub(1);
1720 set_c_entry_code(*stub.GetCode());
1721}
1722
1723
Steve Block6ded16b2010-05-10 14:33:55 +01001724#if V8_TARGET_ARCH_ARM && !V8_INTERPRETED_REGEXP
Steve Blocka7e24c12009-10-30 11:49:00 +00001725void Heap::CreateRegExpCEntryStub() {
1726 RegExpCEntryStub stub;
1727 set_re_c_entry_code(*stub.GetCode());
1728}
1729#endif
1730
1731
Steve Blocka7e24c12009-10-30 11:49:00 +00001732void Heap::CreateJSEntryStub() {
1733 JSEntryStub stub;
1734 set_js_entry_code(*stub.GetCode());
1735}
1736
1737
1738void Heap::CreateJSConstructEntryStub() {
1739 JSConstructEntryStub stub;
1740 set_js_construct_entry_code(*stub.GetCode());
1741}
1742
1743
1744void Heap::CreateFixedStubs() {
1745 // Here we create roots for fixed stubs. They are needed at GC
1746 // for cooking and uncooking (check out frames.cc).
1747 // The eliminates the need for doing dictionary lookup in the
1748 // stub cache for these stubs.
1749 HandleScope scope;
1750 // gcc-4.4 has problem generating correct code of following snippet:
1751 // { CEntryStub stub;
1752 // c_entry_code_ = *stub.GetCode();
1753 // }
Leon Clarke4515c472010-02-03 11:58:03 +00001754 // { DebuggerStatementStub stub;
1755 // debugger_statement_code_ = *stub.GetCode();
Steve Blocka7e24c12009-10-30 11:49:00 +00001756 // }
1757 // To workaround the problem, make separate functions without inlining.
1758 Heap::CreateCEntryStub();
Steve Blocka7e24c12009-10-30 11:49:00 +00001759 Heap::CreateJSEntryStub();
1760 Heap::CreateJSConstructEntryStub();
Steve Block6ded16b2010-05-10 14:33:55 +01001761#if V8_TARGET_ARCH_ARM && !V8_INTERPRETED_REGEXP
Steve Blocka7e24c12009-10-30 11:49:00 +00001762 Heap::CreateRegExpCEntryStub();
1763#endif
1764}
1765
1766
1767bool Heap::CreateInitialObjects() {
1768 Object* obj;
1769
1770 // The -0 value must be set before NumberFromDouble works.
1771 obj = AllocateHeapNumber(-0.0, TENURED);
1772 if (obj->IsFailure()) return false;
1773 set_minus_zero_value(obj);
1774 ASSERT(signbit(minus_zero_value()->Number()) != 0);
1775
1776 obj = AllocateHeapNumber(OS::nan_value(), TENURED);
1777 if (obj->IsFailure()) return false;
1778 set_nan_value(obj);
1779
1780 obj = Allocate(oddball_map(), OLD_DATA_SPACE);
1781 if (obj->IsFailure()) return false;
1782 set_undefined_value(obj);
1783 ASSERT(!InNewSpace(undefined_value()));
1784
1785 // Allocate initial symbol table.
1786 obj = SymbolTable::Allocate(kInitialSymbolTableSize);
1787 if (obj->IsFailure()) return false;
1788 // Don't use set_symbol_table() due to asserts.
1789 roots_[kSymbolTableRootIndex] = obj;
1790
1791 // Assign the print strings for oddballs after creating symboltable.
1792 Object* symbol = LookupAsciiSymbol("undefined");
1793 if (symbol->IsFailure()) return false;
1794 Oddball::cast(undefined_value())->set_to_string(String::cast(symbol));
1795 Oddball::cast(undefined_value())->set_to_number(nan_value());
1796
Steve Blocka7e24c12009-10-30 11:49:00 +00001797 // Allocate the null_value
1798 obj = Oddball::cast(null_value())->Initialize("null", Smi::FromInt(0));
1799 if (obj->IsFailure()) return false;
1800
Steve Block6ded16b2010-05-10 14:33:55 +01001801 obj = CreateOddball("true", Smi::FromInt(1));
Steve Blocka7e24c12009-10-30 11:49:00 +00001802 if (obj->IsFailure()) return false;
1803 set_true_value(obj);
1804
Steve Block6ded16b2010-05-10 14:33:55 +01001805 obj = CreateOddball("false", Smi::FromInt(0));
Steve Blocka7e24c12009-10-30 11:49:00 +00001806 if (obj->IsFailure()) return false;
1807 set_false_value(obj);
1808
Steve Block6ded16b2010-05-10 14:33:55 +01001809 obj = CreateOddball("hole", Smi::FromInt(-1));
Steve Blocka7e24c12009-10-30 11:49:00 +00001810 if (obj->IsFailure()) return false;
1811 set_the_hole_value(obj);
1812
Steve Block6ded16b2010-05-10 14:33:55 +01001813 obj = CreateOddball("no_interceptor_result_sentinel", Smi::FromInt(-2));
Steve Blocka7e24c12009-10-30 11:49:00 +00001814 if (obj->IsFailure()) return false;
1815 set_no_interceptor_result_sentinel(obj);
1816
Steve Block6ded16b2010-05-10 14:33:55 +01001817 obj = CreateOddball("termination_exception", Smi::FromInt(-3));
Steve Blocka7e24c12009-10-30 11:49:00 +00001818 if (obj->IsFailure()) return false;
1819 set_termination_exception(obj);
1820
1821 // Allocate the empty string.
1822 obj = AllocateRawAsciiString(0, TENURED);
1823 if (obj->IsFailure()) return false;
1824 set_empty_string(String::cast(obj));
1825
1826 for (unsigned i = 0; i < ARRAY_SIZE(constant_symbol_table); i++) {
1827 obj = LookupAsciiSymbol(constant_symbol_table[i].contents);
1828 if (obj->IsFailure()) return false;
1829 roots_[constant_symbol_table[i].index] = String::cast(obj);
1830 }
1831
1832 // Allocate the hidden symbol which is used to identify the hidden properties
1833 // in JSObjects. The hash code has a special value so that it will not match
1834 // the empty string when searching for the property. It cannot be part of the
1835 // loop above because it needs to be allocated manually with the special
1836 // hash code in place. The hash code for the hidden_symbol is zero to ensure
1837 // that it will always be at the first entry in property descriptors.
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01001838 obj = AllocateSymbol(CStrVector(""), 0, String::kZeroHash);
Steve Blocka7e24c12009-10-30 11:49:00 +00001839 if (obj->IsFailure()) return false;
1840 hidden_symbol_ = String::cast(obj);
1841
1842 // Allocate the proxy for __proto__.
1843 obj = AllocateProxy((Address) &Accessors::ObjectPrototype);
1844 if (obj->IsFailure()) return false;
1845 set_prototype_accessors(Proxy::cast(obj));
1846
1847 // Allocate the code_stubs dictionary. The initial size is set to avoid
1848 // expanding the dictionary during bootstrapping.
1849 obj = NumberDictionary::Allocate(128);
1850 if (obj->IsFailure()) return false;
1851 set_code_stubs(NumberDictionary::cast(obj));
1852
1853 // Allocate the non_monomorphic_cache used in stub-cache.cc. The initial size
1854 // is set to avoid expanding the dictionary during bootstrapping.
1855 obj = NumberDictionary::Allocate(64);
1856 if (obj->IsFailure()) return false;
1857 set_non_monomorphic_cache(NumberDictionary::cast(obj));
1858
Kristian Monsen25f61362010-05-21 11:50:48 +01001859 set_instanceof_cache_function(Smi::FromInt(0));
1860 set_instanceof_cache_map(Smi::FromInt(0));
1861 set_instanceof_cache_answer(Smi::FromInt(0));
1862
Steve Blocka7e24c12009-10-30 11:49:00 +00001863 CreateFixedStubs();
1864
Leon Clarkee46be812010-01-19 14:06:41 +00001865 if (InitializeNumberStringCache()->IsFailure()) return false;
Steve Blocka7e24c12009-10-30 11:49:00 +00001866
Steve Block6ded16b2010-05-10 14:33:55 +01001867 // Allocate cache for single character ASCII strings.
1868 obj = AllocateFixedArray(String::kMaxAsciiCharCode + 1, TENURED);
Steve Blocka7e24c12009-10-30 11:49:00 +00001869 if (obj->IsFailure()) return false;
1870 set_single_character_string_cache(FixedArray::cast(obj));
1871
1872 // Allocate cache for external strings pointing to native source code.
1873 obj = AllocateFixedArray(Natives::GetBuiltinsCount());
1874 if (obj->IsFailure()) return false;
1875 set_natives_source_cache(FixedArray::cast(obj));
1876
1877 // Handling of script id generation is in Factory::NewScript.
1878 set_last_script_id(undefined_value());
1879
1880 // Initialize keyed lookup cache.
1881 KeyedLookupCache::Clear();
1882
1883 // Initialize context slot cache.
1884 ContextSlotCache::Clear();
1885
1886 // Initialize descriptor cache.
1887 DescriptorLookupCache::Clear();
1888
1889 // Initialize compilation cache.
1890 CompilationCache::Clear();
1891
1892 return true;
1893}
1894
1895
Leon Clarkee46be812010-01-19 14:06:41 +00001896Object* Heap::InitializeNumberStringCache() {
1897 // Compute the size of the number string cache based on the max heap size.
1898 // max_semispace_size_ == 512 KB => number_string_cache_size = 32.
1899 // max_semispace_size_ == 8 MB => number_string_cache_size = 16KB.
1900 int number_string_cache_size = max_semispace_size_ / 512;
1901 number_string_cache_size = Max(32, Min(16*KB, number_string_cache_size));
Steve Block6ded16b2010-05-10 14:33:55 +01001902 Object* obj = AllocateFixedArray(number_string_cache_size * 2, TENURED);
Leon Clarkee46be812010-01-19 14:06:41 +00001903 if (!obj->IsFailure()) set_number_string_cache(FixedArray::cast(obj));
1904 return obj;
1905}
1906
1907
1908void Heap::FlushNumberStringCache() {
1909 // Flush the number to string cache.
1910 int len = number_string_cache()->length();
1911 for (int i = 0; i < len; i++) {
1912 number_string_cache()->set_undefined(i);
1913 }
1914}
1915
1916
Steve Blocka7e24c12009-10-30 11:49:00 +00001917static inline int double_get_hash(double d) {
1918 DoubleRepresentation rep(d);
Leon Clarkee46be812010-01-19 14:06:41 +00001919 return static_cast<int>(rep.bits) ^ static_cast<int>(rep.bits >> 32);
Steve Blocka7e24c12009-10-30 11:49:00 +00001920}
1921
1922
1923static inline int smi_get_hash(Smi* smi) {
Leon Clarkee46be812010-01-19 14:06:41 +00001924 return smi->value();
Steve Blocka7e24c12009-10-30 11:49:00 +00001925}
1926
1927
Steve Blocka7e24c12009-10-30 11:49:00 +00001928Object* Heap::GetNumberStringCache(Object* number) {
1929 int hash;
Leon Clarkee46be812010-01-19 14:06:41 +00001930 int mask = (number_string_cache()->length() >> 1) - 1;
Steve Blocka7e24c12009-10-30 11:49:00 +00001931 if (number->IsSmi()) {
Leon Clarkee46be812010-01-19 14:06:41 +00001932 hash = smi_get_hash(Smi::cast(number)) & mask;
Steve Blocka7e24c12009-10-30 11:49:00 +00001933 } else {
Leon Clarkee46be812010-01-19 14:06:41 +00001934 hash = double_get_hash(number->Number()) & mask;
Steve Blocka7e24c12009-10-30 11:49:00 +00001935 }
1936 Object* key = number_string_cache()->get(hash * 2);
1937 if (key == number) {
1938 return String::cast(number_string_cache()->get(hash * 2 + 1));
1939 } else if (key->IsHeapNumber() &&
1940 number->IsHeapNumber() &&
1941 key->Number() == number->Number()) {
1942 return String::cast(number_string_cache()->get(hash * 2 + 1));
1943 }
1944 return undefined_value();
1945}
1946
1947
1948void Heap::SetNumberStringCache(Object* number, String* string) {
1949 int hash;
Leon Clarkee46be812010-01-19 14:06:41 +00001950 int mask = (number_string_cache()->length() >> 1) - 1;
Steve Blocka7e24c12009-10-30 11:49:00 +00001951 if (number->IsSmi()) {
Leon Clarkee46be812010-01-19 14:06:41 +00001952 hash = smi_get_hash(Smi::cast(number)) & mask;
Leon Clarke4515c472010-02-03 11:58:03 +00001953 number_string_cache()->set(hash * 2, Smi::cast(number));
Steve Blocka7e24c12009-10-30 11:49:00 +00001954 } else {
Leon Clarkee46be812010-01-19 14:06:41 +00001955 hash = double_get_hash(number->Number()) & mask;
Steve Blocka7e24c12009-10-30 11:49:00 +00001956 number_string_cache()->set(hash * 2, number);
1957 }
1958 number_string_cache()->set(hash * 2 + 1, string);
1959}
1960
1961
Steve Block6ded16b2010-05-10 14:33:55 +01001962Object* Heap::NumberToString(Object* number, bool check_number_string_cache) {
Andrei Popescu402d9372010-02-26 13:31:12 +00001963 Counters::number_to_string_runtime.Increment();
Steve Block6ded16b2010-05-10 14:33:55 +01001964 if (check_number_string_cache) {
1965 Object* cached = GetNumberStringCache(number);
1966 if (cached != undefined_value()) {
1967 return cached;
1968 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001969 }
1970
1971 char arr[100];
1972 Vector<char> buffer(arr, ARRAY_SIZE(arr));
1973 const char* str;
1974 if (number->IsSmi()) {
1975 int num = Smi::cast(number)->value();
1976 str = IntToCString(num, buffer);
1977 } else {
1978 double num = HeapNumber::cast(number)->value();
1979 str = DoubleToCString(num, buffer);
1980 }
1981 Object* result = AllocateStringFromAscii(CStrVector(str));
1982
1983 if (!result->IsFailure()) {
1984 SetNumberStringCache(number, String::cast(result));
1985 }
1986 return result;
1987}
1988
1989
Steve Block3ce2e202009-11-05 08:53:23 +00001990Map* Heap::MapForExternalArrayType(ExternalArrayType array_type) {
1991 return Map::cast(roots_[RootIndexForExternalArrayType(array_type)]);
1992}
1993
1994
1995Heap::RootListIndex Heap::RootIndexForExternalArrayType(
1996 ExternalArrayType array_type) {
1997 switch (array_type) {
1998 case kExternalByteArray:
1999 return kExternalByteArrayMapRootIndex;
2000 case kExternalUnsignedByteArray:
2001 return kExternalUnsignedByteArrayMapRootIndex;
2002 case kExternalShortArray:
2003 return kExternalShortArrayMapRootIndex;
2004 case kExternalUnsignedShortArray:
2005 return kExternalUnsignedShortArrayMapRootIndex;
2006 case kExternalIntArray:
2007 return kExternalIntArrayMapRootIndex;
2008 case kExternalUnsignedIntArray:
2009 return kExternalUnsignedIntArrayMapRootIndex;
2010 case kExternalFloatArray:
2011 return kExternalFloatArrayMapRootIndex;
2012 default:
2013 UNREACHABLE();
2014 return kUndefinedValueRootIndex;
2015 }
2016}
2017
2018
Steve Blocka7e24c12009-10-30 11:49:00 +00002019Object* Heap::NumberFromDouble(double value, PretenureFlag pretenure) {
Steve Block6ded16b2010-05-10 14:33:55 +01002020 // We need to distinguish the minus zero value and this cannot be
2021 // done after conversion to int. Doing this by comparing bit
2022 // patterns is faster than using fpclassify() et al.
2023 static const DoubleRepresentation minus_zero(-0.0);
2024
2025 DoubleRepresentation rep(value);
2026 if (rep.bits == minus_zero.bits) {
2027 return AllocateHeapNumber(-0.0, pretenure);
2028 }
2029
2030 int int_value = FastD2I(value);
2031 if (value == int_value && Smi::IsValid(int_value)) {
2032 return Smi::FromInt(int_value);
2033 }
2034
2035 // Materialize the value in the heap.
2036 return AllocateHeapNumber(value, pretenure);
Steve Blocka7e24c12009-10-30 11:49:00 +00002037}
2038
2039
2040Object* Heap::AllocateProxy(Address proxy, PretenureFlag pretenure) {
2041 // Statically ensure that it is safe to allocate proxies in paged spaces.
2042 STATIC_ASSERT(Proxy::kSize <= Page::kMaxHeapObjectSize);
2043 AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
2044 Object* result = Allocate(proxy_map(), space);
2045 if (result->IsFailure()) return result;
2046
2047 Proxy::cast(result)->set_proxy(proxy);
2048 return result;
2049}
2050
2051
2052Object* Heap::AllocateSharedFunctionInfo(Object* name) {
2053 Object* result = Allocate(shared_function_info_map(), OLD_POINTER_SPACE);
2054 if (result->IsFailure()) return result;
2055
2056 SharedFunctionInfo* share = SharedFunctionInfo::cast(result);
2057 share->set_name(name);
2058 Code* illegal = Builtins::builtin(Builtins::Illegal);
2059 share->set_code(illegal);
Ben Murdoch3bec4d22010-07-22 14:51:16 +01002060 share->set_scope_info(SerializedScopeInfo::Empty());
Steve Blocka7e24c12009-10-30 11:49:00 +00002061 Code* construct_stub = Builtins::builtin(Builtins::JSConstructStubGeneric);
2062 share->set_construct_stub(construct_stub);
2063 share->set_expected_nof_properties(0);
2064 share->set_length(0);
2065 share->set_formal_parameter_count(0);
2066 share->set_instance_class_name(Object_symbol());
2067 share->set_function_data(undefined_value());
2068 share->set_script(undefined_value());
2069 share->set_start_position_and_type(0);
2070 share->set_debug_info(undefined_value());
2071 share->set_inferred_name(empty_string());
2072 share->set_compiler_hints(0);
2073 share->set_this_property_assignments_count(0);
2074 share->set_this_property_assignments(undefined_value());
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002075 share->set_num_literals(0);
2076 share->set_end_position(0);
2077 share->set_function_token_position(0);
Steve Blocka7e24c12009-10-30 11:49:00 +00002078 return result;
2079}
2080
2081
Steve Blockd0582a62009-12-15 09:54:21 +00002082// Returns true for a character in a range. Both limits are inclusive.
2083static inline bool Between(uint32_t character, uint32_t from, uint32_t to) {
2084 // This makes uses of the the unsigned wraparound.
2085 return character - from <= to - from;
2086}
2087
2088
2089static inline Object* MakeOrFindTwoCharacterString(uint32_t c1, uint32_t c2) {
2090 String* symbol;
2091 // Numeric strings have a different hash algorithm not known by
2092 // LookupTwoCharsSymbolIfExists, so we skip this step for such strings.
2093 if ((!Between(c1, '0', '9') || !Between(c2, '0', '9')) &&
2094 Heap::symbol_table()->LookupTwoCharsSymbolIfExists(c1, c2, &symbol)) {
2095 return symbol;
2096 // Now we know the length is 2, we might as well make use of that fact
2097 // when building the new string.
2098 } else if ((c1 | c2) <= String::kMaxAsciiCharCodeU) { // We can do this
2099 ASSERT(IsPowerOf2(String::kMaxAsciiCharCodeU + 1)); // because of this.
2100 Object* result = Heap::AllocateRawAsciiString(2);
2101 if (result->IsFailure()) return result;
2102 char* dest = SeqAsciiString::cast(result)->GetChars();
2103 dest[0] = c1;
2104 dest[1] = c2;
2105 return result;
2106 } else {
2107 Object* result = Heap::AllocateRawTwoByteString(2);
2108 if (result->IsFailure()) return result;
2109 uc16* dest = SeqTwoByteString::cast(result)->GetChars();
2110 dest[0] = c1;
2111 dest[1] = c2;
2112 return result;
2113 }
2114}
2115
2116
Steve Blocka7e24c12009-10-30 11:49:00 +00002117Object* Heap::AllocateConsString(String* first, String* second) {
2118 int first_length = first->length();
Steve Blockd0582a62009-12-15 09:54:21 +00002119 if (first_length == 0) {
2120 return second;
2121 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002122
2123 int second_length = second->length();
Steve Blockd0582a62009-12-15 09:54:21 +00002124 if (second_length == 0) {
2125 return first;
2126 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002127
2128 int length = first_length + second_length;
Steve Blockd0582a62009-12-15 09:54:21 +00002129
2130 // Optimization for 2-byte strings often used as keys in a decompression
2131 // dictionary. Check whether we already have the string in the symbol
2132 // table to prevent creation of many unneccesary strings.
2133 if (length == 2) {
2134 unsigned c1 = first->Get(0);
2135 unsigned c2 = second->Get(0);
2136 return MakeOrFindTwoCharacterString(c1, c2);
2137 }
2138
Steve Block6ded16b2010-05-10 14:33:55 +01002139 bool first_is_ascii = first->IsAsciiRepresentation();
2140 bool second_is_ascii = second->IsAsciiRepresentation();
2141 bool is_ascii = first_is_ascii && second_is_ascii;
Steve Blocka7e24c12009-10-30 11:49:00 +00002142
2143 // Make sure that an out of memory exception is thrown if the length
Steve Block3ce2e202009-11-05 08:53:23 +00002144 // of the new cons string is too large.
2145 if (length > String::kMaxLength || length < 0) {
Steve Blocka7e24c12009-10-30 11:49:00 +00002146 Top::context()->mark_out_of_memory();
2147 return Failure::OutOfMemoryException();
2148 }
2149
Kristian Monsen9dcf7e22010-06-28 14:14:28 +01002150 bool is_ascii_data_in_two_byte_string = false;
2151 if (!is_ascii) {
2152 // At least one of the strings uses two-byte representation so we
2153 // can't use the fast case code for short ascii strings below, but
2154 // we can try to save memory if all chars actually fit in ascii.
2155 is_ascii_data_in_two_byte_string =
2156 first->HasOnlyAsciiChars() && second->HasOnlyAsciiChars();
2157 if (is_ascii_data_in_two_byte_string) {
2158 Counters::string_add_runtime_ext_to_ascii.Increment();
2159 }
2160 }
2161
Steve Blocka7e24c12009-10-30 11:49:00 +00002162 // If the resulting string is small make a flat string.
2163 if (length < String::kMinNonFlatLength) {
2164 ASSERT(first->IsFlat());
2165 ASSERT(second->IsFlat());
2166 if (is_ascii) {
2167 Object* result = AllocateRawAsciiString(length);
2168 if (result->IsFailure()) return result;
2169 // Copy the characters into the new object.
2170 char* dest = SeqAsciiString::cast(result)->GetChars();
2171 // Copy first part.
Steve Blockd0582a62009-12-15 09:54:21 +00002172 const char* src;
2173 if (first->IsExternalString()) {
2174 src = ExternalAsciiString::cast(first)->resource()->data();
2175 } else {
2176 src = SeqAsciiString::cast(first)->GetChars();
2177 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002178 for (int i = 0; i < first_length; i++) *dest++ = src[i];
2179 // Copy second part.
Steve Blockd0582a62009-12-15 09:54:21 +00002180 if (second->IsExternalString()) {
2181 src = ExternalAsciiString::cast(second)->resource()->data();
2182 } else {
2183 src = SeqAsciiString::cast(second)->GetChars();
2184 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002185 for (int i = 0; i < second_length; i++) *dest++ = src[i];
2186 return result;
2187 } else {
Kristian Monsen9dcf7e22010-06-28 14:14:28 +01002188 if (is_ascii_data_in_two_byte_string) {
Steve Block6ded16b2010-05-10 14:33:55 +01002189 Object* result = AllocateRawAsciiString(length);
2190 if (result->IsFailure()) return result;
2191 // Copy the characters into the new object.
2192 char* dest = SeqAsciiString::cast(result)->GetChars();
2193 String::WriteToFlat(first, dest, 0, first_length);
2194 String::WriteToFlat(second, dest + first_length, 0, second_length);
Steve Block6ded16b2010-05-10 14:33:55 +01002195 return result;
2196 }
2197
Steve Blocka7e24c12009-10-30 11:49:00 +00002198 Object* result = AllocateRawTwoByteString(length);
2199 if (result->IsFailure()) return result;
2200 // Copy the characters into the new object.
2201 uc16* dest = SeqTwoByteString::cast(result)->GetChars();
2202 String::WriteToFlat(first, dest, 0, first_length);
2203 String::WriteToFlat(second, dest + first_length, 0, second_length);
2204 return result;
2205 }
2206 }
2207
Kristian Monsen9dcf7e22010-06-28 14:14:28 +01002208 Map* map = (is_ascii || is_ascii_data_in_two_byte_string) ?
2209 cons_ascii_string_map() : cons_string_map();
Steve Blocka7e24c12009-10-30 11:49:00 +00002210
Leon Clarkee46be812010-01-19 14:06:41 +00002211 Object* result = Allocate(map, NEW_SPACE);
Steve Blocka7e24c12009-10-30 11:49:00 +00002212 if (result->IsFailure()) return result;
Leon Clarke4515c472010-02-03 11:58:03 +00002213
2214 AssertNoAllocation no_gc;
Steve Blocka7e24c12009-10-30 11:49:00 +00002215 ConsString* cons_string = ConsString::cast(result);
Leon Clarke4515c472010-02-03 11:58:03 +00002216 WriteBarrierMode mode = cons_string->GetWriteBarrierMode(no_gc);
Steve Blocka7e24c12009-10-30 11:49:00 +00002217 cons_string->set_length(length);
Steve Blockd0582a62009-12-15 09:54:21 +00002218 cons_string->set_hash_field(String::kEmptyHashField);
2219 cons_string->set_first(first, mode);
2220 cons_string->set_second(second, mode);
Steve Blocka7e24c12009-10-30 11:49:00 +00002221 return result;
2222}
2223
2224
2225Object* Heap::AllocateSubString(String* buffer,
2226 int start,
Steve Block6ded16b2010-05-10 14:33:55 +01002227 int end,
2228 PretenureFlag pretenure) {
Steve Blocka7e24c12009-10-30 11:49:00 +00002229 int length = end - start;
2230
2231 if (length == 1) {
2232 return Heap::LookupSingleCharacterStringFromCode(
2233 buffer->Get(start));
Steve Blockd0582a62009-12-15 09:54:21 +00002234 } else if (length == 2) {
2235 // Optimization for 2-byte strings often used as keys in a decompression
2236 // dictionary. Check whether we already have the string in the symbol
2237 // table to prevent creation of many unneccesary strings.
2238 unsigned c1 = buffer->Get(start);
2239 unsigned c2 = buffer->Get(start + 1);
2240 return MakeOrFindTwoCharacterString(c1, c2);
Steve Blocka7e24c12009-10-30 11:49:00 +00002241 }
2242
2243 // Make an attempt to flatten the buffer to reduce access time.
Leon Clarkef7060e22010-06-03 12:02:55 +01002244 buffer = buffer->TryFlattenGetString();
Steve Blocka7e24c12009-10-30 11:49:00 +00002245
2246 Object* result = buffer->IsAsciiRepresentation()
Steve Block6ded16b2010-05-10 14:33:55 +01002247 ? AllocateRawAsciiString(length, pretenure )
2248 : AllocateRawTwoByteString(length, pretenure);
Steve Blocka7e24c12009-10-30 11:49:00 +00002249 if (result->IsFailure()) return result;
Steve Blockd0582a62009-12-15 09:54:21 +00002250 String* string_result = String::cast(result);
Steve Blocka7e24c12009-10-30 11:49:00 +00002251 // Copy the characters into the new object.
Steve Blockd0582a62009-12-15 09:54:21 +00002252 if (buffer->IsAsciiRepresentation()) {
2253 ASSERT(string_result->IsAsciiRepresentation());
2254 char* dest = SeqAsciiString::cast(string_result)->GetChars();
2255 String::WriteToFlat(buffer, dest, start, end);
2256 } else {
2257 ASSERT(string_result->IsTwoByteRepresentation());
2258 uc16* dest = SeqTwoByteString::cast(string_result)->GetChars();
2259 String::WriteToFlat(buffer, dest, start, end);
Steve Blocka7e24c12009-10-30 11:49:00 +00002260 }
Steve Blockd0582a62009-12-15 09:54:21 +00002261
Steve Blocka7e24c12009-10-30 11:49:00 +00002262 return result;
2263}
2264
2265
2266Object* Heap::AllocateExternalStringFromAscii(
2267 ExternalAsciiString::Resource* resource) {
Steve Blockd0582a62009-12-15 09:54:21 +00002268 size_t length = resource->length();
2269 if (length > static_cast<size_t>(String::kMaxLength)) {
2270 Top::context()->mark_out_of_memory();
2271 return Failure::OutOfMemoryException();
Steve Blocka7e24c12009-10-30 11:49:00 +00002272 }
2273
Steve Blockd0582a62009-12-15 09:54:21 +00002274 Map* map = external_ascii_string_map();
Leon Clarkee46be812010-01-19 14:06:41 +00002275 Object* result = Allocate(map, NEW_SPACE);
Steve Blocka7e24c12009-10-30 11:49:00 +00002276 if (result->IsFailure()) return result;
2277
2278 ExternalAsciiString* external_string = ExternalAsciiString::cast(result);
Steve Blockd0582a62009-12-15 09:54:21 +00002279 external_string->set_length(static_cast<int>(length));
2280 external_string->set_hash_field(String::kEmptyHashField);
Steve Blocka7e24c12009-10-30 11:49:00 +00002281 external_string->set_resource(resource);
2282
2283 return result;
2284}
2285
2286
2287Object* Heap::AllocateExternalStringFromTwoByte(
2288 ExternalTwoByteString::Resource* resource) {
Steve Blockd0582a62009-12-15 09:54:21 +00002289 size_t length = resource->length();
2290 if (length > static_cast<size_t>(String::kMaxLength)) {
2291 Top::context()->mark_out_of_memory();
2292 return Failure::OutOfMemoryException();
2293 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002294
Kristian Monsen9dcf7e22010-06-28 14:14:28 +01002295 // For small strings we check whether the resource contains only
2296 // ascii characters. If yes, we use a different string map.
2297 bool is_ascii = true;
2298 if (length >= static_cast<size_t>(String::kMinNonFlatLength)) {
2299 is_ascii = false;
2300 } else {
2301 const uc16* data = resource->data();
2302 for (size_t i = 0; i < length; i++) {
2303 if (data[i] > String::kMaxAsciiCharCode) {
2304 is_ascii = false;
2305 break;
2306 }
2307 }
2308 }
2309
2310 Map* map = is_ascii ?
2311 Heap::external_string_with_ascii_data_map() : Heap::external_string_map();
Leon Clarkee46be812010-01-19 14:06:41 +00002312 Object* result = Allocate(map, NEW_SPACE);
Steve Blocka7e24c12009-10-30 11:49:00 +00002313 if (result->IsFailure()) return result;
2314
2315 ExternalTwoByteString* external_string = ExternalTwoByteString::cast(result);
Steve Blockd0582a62009-12-15 09:54:21 +00002316 external_string->set_length(static_cast<int>(length));
2317 external_string->set_hash_field(String::kEmptyHashField);
Steve Blocka7e24c12009-10-30 11:49:00 +00002318 external_string->set_resource(resource);
2319
2320 return result;
2321}
2322
2323
2324Object* Heap::LookupSingleCharacterStringFromCode(uint16_t code) {
2325 if (code <= String::kMaxAsciiCharCode) {
2326 Object* value = Heap::single_character_string_cache()->get(code);
2327 if (value != Heap::undefined_value()) return value;
2328
2329 char buffer[1];
2330 buffer[0] = static_cast<char>(code);
2331 Object* result = LookupSymbol(Vector<const char>(buffer, 1));
2332
2333 if (result->IsFailure()) return result;
2334 Heap::single_character_string_cache()->set(code, result);
2335 return result;
2336 }
2337
2338 Object* result = Heap::AllocateRawTwoByteString(1);
2339 if (result->IsFailure()) return result;
2340 String* answer = String::cast(result);
2341 answer->Set(0, code);
2342 return answer;
2343}
2344
2345
2346Object* Heap::AllocateByteArray(int length, PretenureFlag pretenure) {
Leon Clarkee46be812010-01-19 14:06:41 +00002347 if (length < 0 || length > ByteArray::kMaxLength) {
2348 return Failure::OutOfMemoryException();
2349 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002350 if (pretenure == NOT_TENURED) {
2351 return AllocateByteArray(length);
2352 }
2353 int size = ByteArray::SizeFor(length);
Leon Clarkee46be812010-01-19 14:06:41 +00002354 Object* result = (size <= MaxObjectSizeInPagedSpace())
2355 ? old_data_space_->AllocateRaw(size)
2356 : lo_space_->AllocateRaw(size);
Steve Blocka7e24c12009-10-30 11:49:00 +00002357 if (result->IsFailure()) return result;
2358
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002359 reinterpret_cast<ByteArray*>(result)->set_map(byte_array_map());
2360 reinterpret_cast<ByteArray*>(result)->set_length(length);
Steve Blocka7e24c12009-10-30 11:49:00 +00002361 return result;
2362}
2363
2364
2365Object* Heap::AllocateByteArray(int length) {
Leon Clarkee46be812010-01-19 14:06:41 +00002366 if (length < 0 || length > ByteArray::kMaxLength) {
2367 return Failure::OutOfMemoryException();
2368 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002369 int size = ByteArray::SizeFor(length);
2370 AllocationSpace space =
Leon Clarkee46be812010-01-19 14:06:41 +00002371 (size > MaxObjectSizeInPagedSpace()) ? LO_SPACE : NEW_SPACE;
Steve Blocka7e24c12009-10-30 11:49:00 +00002372 Object* result = AllocateRaw(size, space, OLD_DATA_SPACE);
Steve Blocka7e24c12009-10-30 11:49:00 +00002373 if (result->IsFailure()) return result;
2374
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002375 reinterpret_cast<ByteArray*>(result)->set_map(byte_array_map());
2376 reinterpret_cast<ByteArray*>(result)->set_length(length);
Steve Blocka7e24c12009-10-30 11:49:00 +00002377 return result;
2378}
2379
2380
2381void Heap::CreateFillerObjectAt(Address addr, int size) {
2382 if (size == 0) return;
2383 HeapObject* filler = HeapObject::FromAddress(addr);
2384 if (size == kPointerSize) {
Steve Block6ded16b2010-05-10 14:33:55 +01002385 filler->set_map(one_pointer_filler_map());
2386 } else if (size == 2 * kPointerSize) {
2387 filler->set_map(two_pointer_filler_map());
Steve Blocka7e24c12009-10-30 11:49:00 +00002388 } else {
Steve Block6ded16b2010-05-10 14:33:55 +01002389 filler->set_map(byte_array_map());
Steve Blocka7e24c12009-10-30 11:49:00 +00002390 ByteArray::cast(filler)->set_length(ByteArray::LengthFor(size));
2391 }
2392}
2393
2394
2395Object* Heap::AllocatePixelArray(int length,
2396 uint8_t* external_pointer,
2397 PretenureFlag pretenure) {
2398 AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
Steve Blocka7e24c12009-10-30 11:49:00 +00002399 Object* result = AllocateRaw(PixelArray::kAlignedSize, space, OLD_DATA_SPACE);
Steve Blocka7e24c12009-10-30 11:49:00 +00002400 if (result->IsFailure()) return result;
2401
2402 reinterpret_cast<PixelArray*>(result)->set_map(pixel_array_map());
2403 reinterpret_cast<PixelArray*>(result)->set_length(length);
2404 reinterpret_cast<PixelArray*>(result)->set_external_pointer(external_pointer);
2405
2406 return result;
2407}
2408
2409
Steve Block3ce2e202009-11-05 08:53:23 +00002410Object* Heap::AllocateExternalArray(int length,
2411 ExternalArrayType array_type,
2412 void* external_pointer,
2413 PretenureFlag pretenure) {
2414 AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
Steve Block3ce2e202009-11-05 08:53:23 +00002415 Object* result = AllocateRaw(ExternalArray::kAlignedSize,
2416 space,
2417 OLD_DATA_SPACE);
Steve Block3ce2e202009-11-05 08:53:23 +00002418 if (result->IsFailure()) return result;
2419
2420 reinterpret_cast<ExternalArray*>(result)->set_map(
2421 MapForExternalArrayType(array_type));
2422 reinterpret_cast<ExternalArray*>(result)->set_length(length);
2423 reinterpret_cast<ExternalArray*>(result)->set_external_pointer(
2424 external_pointer);
2425
2426 return result;
2427}
2428
2429
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002430// The StackVisitor is used to traverse all the archived threads to see if
2431// there are activations on any of the stacks corresponding to the code.
2432class FlushingStackVisitor : public ThreadVisitor {
2433 public:
2434 explicit FlushingStackVisitor(Code* code) : found_(false), code_(code) {}
2435
2436 void VisitThread(ThreadLocalTop* top) {
2437 // If we already found the code in a previous traversed thread we return.
2438 if (found_) return;
2439
2440 for (StackFrameIterator it(top); !it.done(); it.Advance()) {
2441 if (code_->contains(it.frame()->pc())) {
2442 found_ = true;
2443 return;
2444 }
2445 }
2446 }
2447 bool FoundCode() {return found_;}
2448
2449 private:
2450 bool found_;
2451 Code* code_;
2452};
2453
2454
2455static void FlushCodeForFunction(SharedFunctionInfo* function_info) {
2456 // The function must be compiled and have the source code available,
2457 // to be able to recompile it in case we need the function again.
2458 if (!(function_info->is_compiled() && function_info->HasSourceCode())) return;
2459
2460 // We never flush code for Api functions.
2461 if (function_info->IsApiFunction()) return;
2462
2463 // Only flush code for functions.
2464 if (!function_info->code()->kind() == Code::FUNCTION) return;
2465
2466 // Function must be lazy compilable.
2467 if (!function_info->allows_lazy_compilation()) return;
2468
2469 // If this is a full script wrapped in a function we do no flush the code.
2470 if (function_info->is_toplevel()) return;
2471
2472 // If this function is in the compilation cache we do not flush the code.
2473 if (CompilationCache::HasFunction(function_info)) return;
2474
2475 // Make sure we are not referencing the code from the stack.
2476 for (StackFrameIterator it; !it.done(); it.Advance()) {
2477 if (function_info->code()->contains(it.frame()->pc())) return;
2478 }
2479 // Iterate the archived stacks in all threads to check if
2480 // the code is referenced.
2481 FlushingStackVisitor threadvisitor(function_info->code());
2482 ThreadManager::IterateArchivedThreads(&threadvisitor);
2483 if (threadvisitor.FoundCode()) return;
2484
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002485 // Compute the lazy compilable version of the code.
Ben Murdoch3bec4d22010-07-22 14:51:16 +01002486 HandleScope scope;
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002487 function_info->set_code(*ComputeLazyCompile(function_info->length()));
2488}
2489
2490
2491void Heap::FlushCode() {
2492#ifdef ENABLE_DEBUGGER_SUPPORT
2493 // Do not flush code if the debugger is loaded or there are breakpoints.
2494 if (Debug::IsLoaded() || Debug::has_break_points()) return;
2495#endif
2496 HeapObjectIterator it(old_pointer_space());
2497 for (HeapObject* obj = it.next(); obj != NULL; obj = it.next()) {
2498 if (obj->IsJSFunction()) {
2499 JSFunction* jsfunction = JSFunction::cast(obj);
2500
2501 // The function must have a valid context and not be a builtin.
2502 if (jsfunction->unchecked_context()->IsContext() &&
2503 !jsfunction->IsBuiltin()) {
2504 FlushCodeForFunction(jsfunction->shared());
2505 }
2506 }
2507 }
2508}
2509
2510
Steve Blocka7e24c12009-10-30 11:49:00 +00002511Object* Heap::CreateCode(const CodeDesc& desc,
Steve Blocka7e24c12009-10-30 11:49:00 +00002512 Code::Flags flags,
2513 Handle<Object> self_reference) {
Leon Clarkeac952652010-07-15 11:15:24 +01002514 // Allocate ByteArray before the Code object, so that we do not risk
2515 // leaving uninitialized Code object (and breaking the heap).
2516 Object* reloc_info = AllocateByteArray(desc.reloc_size, TENURED);
2517 if (reloc_info->IsFailure()) return reloc_info;
2518
Steve Blocka7e24c12009-10-30 11:49:00 +00002519 // Compute size
Leon Clarkeac952652010-07-15 11:15:24 +01002520 int body_size = RoundUp(desc.instr_size, kObjectAlignment);
Ben Murdoch3bec4d22010-07-22 14:51:16 +01002521 int obj_size = Code::SizeFor(body_size);
Steve Blocka7e24c12009-10-30 11:49:00 +00002522 ASSERT(IsAligned(obj_size, Code::kCodeAlignment));
2523 Object* result;
2524 if (obj_size > MaxObjectSizeInPagedSpace()) {
2525 result = lo_space_->AllocateRawCode(obj_size);
2526 } else {
2527 result = code_space_->AllocateRaw(obj_size);
2528 }
2529
2530 if (result->IsFailure()) return result;
2531
2532 // Initialize the object
2533 HeapObject::cast(result)->set_map(code_map());
2534 Code* code = Code::cast(result);
2535 ASSERT(!CodeRange::exists() || CodeRange::contains(code->address()));
2536 code->set_instruction_size(desc.instr_size);
Leon Clarkeac952652010-07-15 11:15:24 +01002537 code->set_relocation_info(ByteArray::cast(reloc_info));
Steve Blocka7e24c12009-10-30 11:49:00 +00002538 code->set_flags(flags);
2539 // Allow self references to created code object by patching the handle to
2540 // point to the newly allocated Code object.
2541 if (!self_reference.is_null()) {
2542 *(self_reference.location()) = code;
2543 }
2544 // Migrate generated code.
2545 // The generated code can contain Object** values (typically from handles)
2546 // that are dereferenced during the copy to point directly to the actual heap
2547 // objects. These pointers can include references to the code object itself,
2548 // through the self_reference parameter.
2549 code->CopyFrom(desc);
Steve Blocka7e24c12009-10-30 11:49:00 +00002550
2551#ifdef DEBUG
2552 code->Verify();
2553#endif
2554 return code;
2555}
2556
2557
2558Object* Heap::CopyCode(Code* code) {
2559 // Allocate an object the same size as the code object.
2560 int obj_size = code->Size();
2561 Object* result;
2562 if (obj_size > MaxObjectSizeInPagedSpace()) {
2563 result = lo_space_->AllocateRawCode(obj_size);
2564 } else {
2565 result = code_space_->AllocateRaw(obj_size);
2566 }
2567
2568 if (result->IsFailure()) return result;
2569
2570 // Copy code object.
2571 Address old_addr = code->address();
2572 Address new_addr = reinterpret_cast<HeapObject*>(result)->address();
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002573 CopyBlock(new_addr, old_addr, obj_size);
Steve Blocka7e24c12009-10-30 11:49:00 +00002574 // Relocate the copy.
2575 Code* new_code = Code::cast(result);
2576 ASSERT(!CodeRange::exists() || CodeRange::contains(code->address()));
2577 new_code->Relocate(new_addr - old_addr);
2578 return new_code;
2579}
2580
2581
Steve Block6ded16b2010-05-10 14:33:55 +01002582Object* Heap::CopyCode(Code* code, Vector<byte> reloc_info) {
Leon Clarkeac952652010-07-15 11:15:24 +01002583 // Allocate ByteArray before the Code object, so that we do not risk
2584 // leaving uninitialized Code object (and breaking the heap).
2585 Object* reloc_info_array = AllocateByteArray(reloc_info.length(), TENURED);
2586 if (reloc_info_array->IsFailure()) return reloc_info_array;
2587
2588 int new_body_size = RoundUp(code->instruction_size(), kObjectAlignment);
Steve Block6ded16b2010-05-10 14:33:55 +01002589
Ben Murdoch3bec4d22010-07-22 14:51:16 +01002590 int new_obj_size = Code::SizeFor(new_body_size);
Steve Block6ded16b2010-05-10 14:33:55 +01002591
2592 Address old_addr = code->address();
2593
2594 size_t relocation_offset =
Leon Clarkeac952652010-07-15 11:15:24 +01002595 static_cast<size_t>(code->instruction_end() - old_addr);
Steve Block6ded16b2010-05-10 14:33:55 +01002596
2597 Object* result;
2598 if (new_obj_size > MaxObjectSizeInPagedSpace()) {
2599 result = lo_space_->AllocateRawCode(new_obj_size);
2600 } else {
2601 result = code_space_->AllocateRaw(new_obj_size);
2602 }
2603
2604 if (result->IsFailure()) return result;
2605
2606 // Copy code object.
2607 Address new_addr = reinterpret_cast<HeapObject*>(result)->address();
2608
2609 // Copy header and instructions.
2610 memcpy(new_addr, old_addr, relocation_offset);
2611
Steve Block6ded16b2010-05-10 14:33:55 +01002612 Code* new_code = Code::cast(result);
Leon Clarkeac952652010-07-15 11:15:24 +01002613 new_code->set_relocation_info(ByteArray::cast(reloc_info_array));
Steve Block6ded16b2010-05-10 14:33:55 +01002614
Leon Clarkeac952652010-07-15 11:15:24 +01002615 // Copy patched rinfo.
2616 memcpy(new_code->relocation_start(), reloc_info.start(), reloc_info.length());
Steve Block6ded16b2010-05-10 14:33:55 +01002617
2618 // Relocate the copy.
2619 ASSERT(!CodeRange::exists() || CodeRange::contains(code->address()));
2620 new_code->Relocate(new_addr - old_addr);
2621
2622#ifdef DEBUG
2623 code->Verify();
2624#endif
2625 return new_code;
2626}
2627
2628
Steve Blocka7e24c12009-10-30 11:49:00 +00002629Object* Heap::Allocate(Map* map, AllocationSpace space) {
2630 ASSERT(gc_state_ == NOT_IN_GC);
2631 ASSERT(map->instance_type() != MAP_TYPE);
Leon Clarkee46be812010-01-19 14:06:41 +00002632 // If allocation failures are disallowed, we may allocate in a different
2633 // space when new space is full and the object is not a large object.
2634 AllocationSpace retry_space =
2635 (space != NEW_SPACE) ? space : TargetSpaceId(map->instance_type());
2636 Object* result =
2637 AllocateRaw(map->instance_size(), space, retry_space);
Steve Blocka7e24c12009-10-30 11:49:00 +00002638 if (result->IsFailure()) return result;
2639 HeapObject::cast(result)->set_map(map);
Steve Block3ce2e202009-11-05 08:53:23 +00002640#ifdef ENABLE_LOGGING_AND_PROFILING
2641 ProducerHeapProfile::RecordJSObjectAllocation(result);
2642#endif
Steve Blocka7e24c12009-10-30 11:49:00 +00002643 return result;
2644}
2645
2646
2647Object* Heap::InitializeFunction(JSFunction* function,
2648 SharedFunctionInfo* shared,
2649 Object* prototype) {
2650 ASSERT(!prototype->IsMap());
2651 function->initialize_properties();
2652 function->initialize_elements();
2653 function->set_shared(shared);
2654 function->set_prototype_or_initial_map(prototype);
2655 function->set_context(undefined_value());
Leon Clarke4515c472010-02-03 11:58:03 +00002656 function->set_literals(empty_fixed_array());
Steve Blocka7e24c12009-10-30 11:49:00 +00002657 return function;
2658}
2659
2660
2661Object* Heap::AllocateFunctionPrototype(JSFunction* function) {
2662 // Allocate the prototype. Make sure to use the object function
2663 // from the function's context, since the function can be from a
2664 // different context.
2665 JSFunction* object_function =
2666 function->context()->global_context()->object_function();
2667 Object* prototype = AllocateJSObject(object_function);
2668 if (prototype->IsFailure()) return prototype;
2669 // When creating the prototype for the function we must set its
2670 // constructor to the function.
2671 Object* result =
2672 JSObject::cast(prototype)->SetProperty(constructor_symbol(),
2673 function,
2674 DONT_ENUM);
2675 if (result->IsFailure()) return result;
2676 return prototype;
2677}
2678
2679
2680Object* Heap::AllocateFunction(Map* function_map,
2681 SharedFunctionInfo* shared,
Leon Clarkee46be812010-01-19 14:06:41 +00002682 Object* prototype,
2683 PretenureFlag pretenure) {
2684 AllocationSpace space =
2685 (pretenure == TENURED) ? OLD_POINTER_SPACE : NEW_SPACE;
2686 Object* result = Allocate(function_map, space);
Steve Blocka7e24c12009-10-30 11:49:00 +00002687 if (result->IsFailure()) return result;
2688 return InitializeFunction(JSFunction::cast(result), shared, prototype);
2689}
2690
2691
2692Object* Heap::AllocateArgumentsObject(Object* callee, int length) {
2693 // To get fast allocation and map sharing for arguments objects we
2694 // allocate them based on an arguments boilerplate.
2695
2696 // This calls Copy directly rather than using Heap::AllocateRaw so we
2697 // duplicate the check here.
2698 ASSERT(allocation_allowed_ && gc_state_ == NOT_IN_GC);
2699
2700 JSObject* boilerplate =
2701 Top::context()->global_context()->arguments_boilerplate();
2702
Leon Clarkee46be812010-01-19 14:06:41 +00002703 // Check that the size of the boilerplate matches our
2704 // expectations. The ArgumentsAccessStub::GenerateNewObject relies
2705 // on the size being a known constant.
2706 ASSERT(kArgumentsObjectSize == boilerplate->map()->instance_size());
2707
2708 // Do the allocation.
2709 Object* result =
2710 AllocateRaw(kArgumentsObjectSize, NEW_SPACE, OLD_POINTER_SPACE);
Steve Blocka7e24c12009-10-30 11:49:00 +00002711 if (result->IsFailure()) return result;
2712
2713 // Copy the content. The arguments boilerplate doesn't have any
2714 // fields that point to new space so it's safe to skip the write
2715 // barrier here.
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002716 CopyBlock(HeapObject::cast(result)->address(),
2717 boilerplate->address(),
Leon Clarkee46be812010-01-19 14:06:41 +00002718 kArgumentsObjectSize);
Steve Blocka7e24c12009-10-30 11:49:00 +00002719
2720 // Set the two properties.
2721 JSObject::cast(result)->InObjectPropertyAtPut(arguments_callee_index,
2722 callee);
2723 JSObject::cast(result)->InObjectPropertyAtPut(arguments_length_index,
2724 Smi::FromInt(length),
2725 SKIP_WRITE_BARRIER);
2726
2727 // Check the state of the object
2728 ASSERT(JSObject::cast(result)->HasFastProperties());
2729 ASSERT(JSObject::cast(result)->HasFastElements());
2730
2731 return result;
2732}
2733
2734
2735Object* Heap::AllocateInitialMap(JSFunction* fun) {
2736 ASSERT(!fun->has_initial_map());
2737
2738 // First create a new map with the size and number of in-object properties
2739 // suggested by the function.
2740 int instance_size = fun->shared()->CalculateInstanceSize();
2741 int in_object_properties = fun->shared()->CalculateInObjectProperties();
2742 Object* map_obj = Heap::AllocateMap(JS_OBJECT_TYPE, instance_size);
2743 if (map_obj->IsFailure()) return map_obj;
2744
2745 // Fetch or allocate prototype.
2746 Object* prototype;
2747 if (fun->has_instance_prototype()) {
2748 prototype = fun->instance_prototype();
2749 } else {
2750 prototype = AllocateFunctionPrototype(fun);
2751 if (prototype->IsFailure()) return prototype;
2752 }
2753 Map* map = Map::cast(map_obj);
2754 map->set_inobject_properties(in_object_properties);
2755 map->set_unused_property_fields(in_object_properties);
2756 map->set_prototype(prototype);
Steve Block8defd9f2010-07-08 12:39:36 +01002757 ASSERT(map->has_fast_elements());
Steve Blocka7e24c12009-10-30 11:49:00 +00002758
Andrei Popescu402d9372010-02-26 13:31:12 +00002759 // If the function has only simple this property assignments add
2760 // field descriptors for these to the initial map as the object
2761 // cannot be constructed without having these properties. Guard by
2762 // the inline_new flag so we only change the map if we generate a
2763 // specialized construct stub.
Steve Blocka7e24c12009-10-30 11:49:00 +00002764 ASSERT(in_object_properties <= Map::kMaxPreAllocatedPropertyFields);
Andrei Popescu402d9372010-02-26 13:31:12 +00002765 if (fun->shared()->CanGenerateInlineConstructor(prototype)) {
Steve Blocka7e24c12009-10-30 11:49:00 +00002766 int count = fun->shared()->this_property_assignments_count();
2767 if (count > in_object_properties) {
2768 count = in_object_properties;
2769 }
2770 Object* descriptors_obj = DescriptorArray::Allocate(count);
2771 if (descriptors_obj->IsFailure()) return descriptors_obj;
2772 DescriptorArray* descriptors = DescriptorArray::cast(descriptors_obj);
2773 for (int i = 0; i < count; i++) {
2774 String* name = fun->shared()->GetThisPropertyAssignmentName(i);
2775 ASSERT(name->IsSymbol());
2776 FieldDescriptor field(name, i, NONE);
Leon Clarke4515c472010-02-03 11:58:03 +00002777 field.SetEnumerationIndex(i);
Steve Blocka7e24c12009-10-30 11:49:00 +00002778 descriptors->Set(i, &field);
2779 }
Leon Clarke4515c472010-02-03 11:58:03 +00002780 descriptors->SetNextEnumerationIndex(count);
Steve Blocka7e24c12009-10-30 11:49:00 +00002781 descriptors->Sort();
2782 map->set_instance_descriptors(descriptors);
2783 map->set_pre_allocated_property_fields(count);
2784 map->set_unused_property_fields(in_object_properties - count);
2785 }
2786 return map;
2787}
2788
2789
2790void Heap::InitializeJSObjectFromMap(JSObject* obj,
2791 FixedArray* properties,
2792 Map* map) {
2793 obj->set_properties(properties);
2794 obj->initialize_elements();
2795 // TODO(1240798): Initialize the object's body using valid initial values
2796 // according to the object's initial map. For example, if the map's
2797 // instance type is JS_ARRAY_TYPE, the length field should be initialized
2798 // to a number (eg, Smi::FromInt(0)) and the elements initialized to a
2799 // fixed array (eg, Heap::empty_fixed_array()). Currently, the object
2800 // verification code has to cope with (temporarily) invalid objects. See
2801 // for example, JSArray::JSArrayVerify).
2802 obj->InitializeBody(map->instance_size());
2803}
2804
2805
2806Object* Heap::AllocateJSObjectFromMap(Map* map, PretenureFlag pretenure) {
2807 // JSFunctions should be allocated using AllocateFunction to be
2808 // properly initialized.
2809 ASSERT(map->instance_type() != JS_FUNCTION_TYPE);
2810
Steve Block8defd9f2010-07-08 12:39:36 +01002811 // Both types of global objects should be allocated using
2812 // AllocateGlobalObject to be properly initialized.
Steve Blocka7e24c12009-10-30 11:49:00 +00002813 ASSERT(map->instance_type() != JS_GLOBAL_OBJECT_TYPE);
2814 ASSERT(map->instance_type() != JS_BUILTINS_OBJECT_TYPE);
2815
2816 // Allocate the backing storage for the properties.
2817 int prop_size =
2818 map->pre_allocated_property_fields() +
2819 map->unused_property_fields() -
2820 map->inobject_properties();
2821 ASSERT(prop_size >= 0);
2822 Object* properties = AllocateFixedArray(prop_size, pretenure);
2823 if (properties->IsFailure()) return properties;
2824
2825 // Allocate the JSObject.
2826 AllocationSpace space =
2827 (pretenure == TENURED) ? OLD_POINTER_SPACE : NEW_SPACE;
2828 if (map->instance_size() > MaxObjectSizeInPagedSpace()) space = LO_SPACE;
2829 Object* obj = Allocate(map, space);
2830 if (obj->IsFailure()) return obj;
2831
2832 // Initialize the JSObject.
2833 InitializeJSObjectFromMap(JSObject::cast(obj),
2834 FixedArray::cast(properties),
2835 map);
Steve Block8defd9f2010-07-08 12:39:36 +01002836 ASSERT(JSObject::cast(obj)->HasFastElements());
Steve Blocka7e24c12009-10-30 11:49:00 +00002837 return obj;
2838}
2839
2840
2841Object* Heap::AllocateJSObject(JSFunction* constructor,
2842 PretenureFlag pretenure) {
2843 // Allocate the initial map if absent.
2844 if (!constructor->has_initial_map()) {
2845 Object* initial_map = AllocateInitialMap(constructor);
2846 if (initial_map->IsFailure()) return initial_map;
2847 constructor->set_initial_map(Map::cast(initial_map));
2848 Map::cast(initial_map)->set_constructor(constructor);
2849 }
2850 // Allocate the object based on the constructors initial map.
2851 Object* result =
2852 AllocateJSObjectFromMap(constructor->initial_map(), pretenure);
2853 // Make sure result is NOT a global object if valid.
2854 ASSERT(result->IsFailure() || !result->IsGlobalObject());
2855 return result;
2856}
2857
2858
2859Object* Heap::AllocateGlobalObject(JSFunction* constructor) {
2860 ASSERT(constructor->has_initial_map());
2861 Map* map = constructor->initial_map();
2862
2863 // Make sure no field properties are described in the initial map.
2864 // This guarantees us that normalizing the properties does not
2865 // require us to change property values to JSGlobalPropertyCells.
2866 ASSERT(map->NextFreePropertyIndex() == 0);
2867
2868 // Make sure we don't have a ton of pre-allocated slots in the
2869 // global objects. They will be unused once we normalize the object.
2870 ASSERT(map->unused_property_fields() == 0);
2871 ASSERT(map->inobject_properties() == 0);
2872
2873 // Initial size of the backing store to avoid resize of the storage during
2874 // bootstrapping. The size differs between the JS global object ad the
2875 // builtins object.
2876 int initial_size = map->instance_type() == JS_GLOBAL_OBJECT_TYPE ? 64 : 512;
2877
2878 // Allocate a dictionary object for backing storage.
2879 Object* obj =
2880 StringDictionary::Allocate(
2881 map->NumberOfDescribedProperties() * 2 + initial_size);
2882 if (obj->IsFailure()) return obj;
2883 StringDictionary* dictionary = StringDictionary::cast(obj);
2884
2885 // The global object might be created from an object template with accessors.
2886 // Fill these accessors into the dictionary.
2887 DescriptorArray* descs = map->instance_descriptors();
2888 for (int i = 0; i < descs->number_of_descriptors(); i++) {
2889 PropertyDetails details = descs->GetDetails(i);
2890 ASSERT(details.type() == CALLBACKS); // Only accessors are expected.
2891 PropertyDetails d =
2892 PropertyDetails(details.attributes(), CALLBACKS, details.index());
2893 Object* value = descs->GetCallbacksObject(i);
2894 value = Heap::AllocateJSGlobalPropertyCell(value);
2895 if (value->IsFailure()) return value;
2896
2897 Object* result = dictionary->Add(descs->GetKey(i), value, d);
2898 if (result->IsFailure()) return result;
2899 dictionary = StringDictionary::cast(result);
2900 }
2901
2902 // Allocate the global object and initialize it with the backing store.
2903 obj = Allocate(map, OLD_POINTER_SPACE);
2904 if (obj->IsFailure()) return obj;
2905 JSObject* global = JSObject::cast(obj);
2906 InitializeJSObjectFromMap(global, dictionary, map);
2907
2908 // Create a new map for the global object.
2909 obj = map->CopyDropDescriptors();
2910 if (obj->IsFailure()) return obj;
2911 Map* new_map = Map::cast(obj);
2912
2913 // Setup the global object as a normalized object.
2914 global->set_map(new_map);
2915 global->map()->set_instance_descriptors(Heap::empty_descriptor_array());
2916 global->set_properties(dictionary);
2917
2918 // Make sure result is a global object with properties in dictionary.
2919 ASSERT(global->IsGlobalObject());
2920 ASSERT(!global->HasFastProperties());
2921 return global;
2922}
2923
2924
2925Object* Heap::CopyJSObject(JSObject* source) {
2926 // Never used to copy functions. If functions need to be copied we
2927 // have to be careful to clear the literals array.
2928 ASSERT(!source->IsJSFunction());
2929
2930 // Make the clone.
2931 Map* map = source->map();
2932 int object_size = map->instance_size();
2933 Object* clone;
2934
2935 // If we're forced to always allocate, we use the general allocation
2936 // functions which may leave us with an object in old space.
2937 if (always_allocate()) {
2938 clone = AllocateRaw(object_size, NEW_SPACE, OLD_POINTER_SPACE);
2939 if (clone->IsFailure()) return clone;
2940 Address clone_address = HeapObject::cast(clone)->address();
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002941 CopyBlock(clone_address,
2942 source->address(),
Steve Blocka7e24c12009-10-30 11:49:00 +00002943 object_size);
2944 // Update write barrier for all fields that lie beyond the header.
Steve Block6ded16b2010-05-10 14:33:55 +01002945 RecordWrites(clone_address,
2946 JSObject::kHeaderSize,
2947 (object_size - JSObject::kHeaderSize) / kPointerSize);
Steve Blocka7e24c12009-10-30 11:49:00 +00002948 } else {
2949 clone = new_space_.AllocateRaw(object_size);
2950 if (clone->IsFailure()) return clone;
2951 ASSERT(Heap::InNewSpace(clone));
2952 // Since we know the clone is allocated in new space, we can copy
2953 // the contents without worrying about updating the write barrier.
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002954 CopyBlock(HeapObject::cast(clone)->address(),
2955 source->address(),
Steve Blocka7e24c12009-10-30 11:49:00 +00002956 object_size);
2957 }
2958
2959 FixedArray* elements = FixedArray::cast(source->elements());
2960 FixedArray* properties = FixedArray::cast(source->properties());
2961 // Update elements if necessary.
Steve Block6ded16b2010-05-10 14:33:55 +01002962 if (elements->length() > 0) {
Steve Blocka7e24c12009-10-30 11:49:00 +00002963 Object* elem = CopyFixedArray(elements);
2964 if (elem->IsFailure()) return elem;
2965 JSObject::cast(clone)->set_elements(FixedArray::cast(elem));
2966 }
2967 // Update properties if necessary.
2968 if (properties->length() > 0) {
2969 Object* prop = CopyFixedArray(properties);
2970 if (prop->IsFailure()) return prop;
2971 JSObject::cast(clone)->set_properties(FixedArray::cast(prop));
2972 }
2973 // Return the new clone.
Steve Block3ce2e202009-11-05 08:53:23 +00002974#ifdef ENABLE_LOGGING_AND_PROFILING
2975 ProducerHeapProfile::RecordJSObjectAllocation(clone);
2976#endif
Steve Blocka7e24c12009-10-30 11:49:00 +00002977 return clone;
2978}
2979
2980
2981Object* Heap::ReinitializeJSGlobalProxy(JSFunction* constructor,
2982 JSGlobalProxy* object) {
2983 // Allocate initial map if absent.
2984 if (!constructor->has_initial_map()) {
2985 Object* initial_map = AllocateInitialMap(constructor);
2986 if (initial_map->IsFailure()) return initial_map;
2987 constructor->set_initial_map(Map::cast(initial_map));
2988 Map::cast(initial_map)->set_constructor(constructor);
2989 }
2990
2991 Map* map = constructor->initial_map();
2992
2993 // Check that the already allocated object has the same size as
2994 // objects allocated using the constructor.
2995 ASSERT(map->instance_size() == object->map()->instance_size());
2996
2997 // Allocate the backing storage for the properties.
2998 int prop_size = map->unused_property_fields() - map->inobject_properties();
2999 Object* properties = AllocateFixedArray(prop_size, TENURED);
3000 if (properties->IsFailure()) return properties;
3001
3002 // Reset the map for the object.
3003 object->set_map(constructor->initial_map());
3004
3005 // Reinitialize the object from the constructor map.
3006 InitializeJSObjectFromMap(object, FixedArray::cast(properties), map);
3007 return object;
3008}
3009
3010
3011Object* Heap::AllocateStringFromAscii(Vector<const char> string,
3012 PretenureFlag pretenure) {
3013 Object* result = AllocateRawAsciiString(string.length(), pretenure);
3014 if (result->IsFailure()) return result;
3015
3016 // Copy the characters into the new object.
3017 SeqAsciiString* string_result = SeqAsciiString::cast(result);
3018 for (int i = 0; i < string.length(); i++) {
3019 string_result->SeqAsciiStringSet(i, string[i]);
3020 }
3021 return result;
3022}
3023
3024
3025Object* Heap::AllocateStringFromUtf8(Vector<const char> string,
3026 PretenureFlag pretenure) {
Leon Clarkeac952652010-07-15 11:15:24 +01003027 // V8 only supports characters in the Basic Multilingual Plane.
3028 const uc32 kMaxSupportedChar = 0xFFFF;
Steve Blocka7e24c12009-10-30 11:49:00 +00003029 // Count the number of characters in the UTF-8 string and check if
3030 // it is an ASCII string.
3031 Access<Scanner::Utf8Decoder> decoder(Scanner::utf8_decoder());
3032 decoder->Reset(string.start(), string.length());
3033 int chars = 0;
3034 bool is_ascii = true;
3035 while (decoder->has_more()) {
3036 uc32 r = decoder->GetNext();
3037 if (r > String::kMaxAsciiCharCode) is_ascii = false;
3038 chars++;
3039 }
3040
3041 // If the string is ascii, we do not need to convert the characters
3042 // since UTF8 is backwards compatible with ascii.
3043 if (is_ascii) return AllocateStringFromAscii(string, pretenure);
3044
3045 Object* result = AllocateRawTwoByteString(chars, pretenure);
3046 if (result->IsFailure()) return result;
3047
3048 // Convert and copy the characters into the new object.
3049 String* string_result = String::cast(result);
3050 decoder->Reset(string.start(), string.length());
3051 for (int i = 0; i < chars; i++) {
3052 uc32 r = decoder->GetNext();
Leon Clarkeac952652010-07-15 11:15:24 +01003053 if (r > kMaxSupportedChar) { r = unibrow::Utf8::kBadChar; }
Steve Blocka7e24c12009-10-30 11:49:00 +00003054 string_result->Set(i, r);
3055 }
3056 return result;
3057}
3058
3059
3060Object* Heap::AllocateStringFromTwoByte(Vector<const uc16> string,
3061 PretenureFlag pretenure) {
3062 // Check if the string is an ASCII string.
3063 int i = 0;
3064 while (i < string.length() && string[i] <= String::kMaxAsciiCharCode) i++;
3065
3066 Object* result;
3067 if (i == string.length()) { // It's an ASCII string.
3068 result = AllocateRawAsciiString(string.length(), pretenure);
3069 } else { // It's not an ASCII string.
3070 result = AllocateRawTwoByteString(string.length(), pretenure);
3071 }
3072 if (result->IsFailure()) return result;
3073
3074 // Copy the characters into the new object, which may be either ASCII or
3075 // UTF-16.
3076 String* string_result = String::cast(result);
3077 for (int i = 0; i < string.length(); i++) {
3078 string_result->Set(i, string[i]);
3079 }
3080 return result;
3081}
3082
3083
3084Map* Heap::SymbolMapForString(String* string) {
3085 // If the string is in new space it cannot be used as a symbol.
3086 if (InNewSpace(string)) return NULL;
3087
3088 // Find the corresponding symbol map for strings.
3089 Map* map = string->map();
Steve Blockd0582a62009-12-15 09:54:21 +00003090 if (map == ascii_string_map()) return ascii_symbol_map();
3091 if (map == string_map()) return symbol_map();
3092 if (map == cons_string_map()) return cons_symbol_map();
3093 if (map == cons_ascii_string_map()) return cons_ascii_symbol_map();
3094 if (map == external_string_map()) return external_symbol_map();
3095 if (map == external_ascii_string_map()) return external_ascii_symbol_map();
Kristian Monsen9dcf7e22010-06-28 14:14:28 +01003096 if (map == external_string_with_ascii_data_map()) {
3097 return external_symbol_with_ascii_data_map();
3098 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003099
3100 // No match found.
3101 return NULL;
3102}
3103
3104
3105Object* Heap::AllocateInternalSymbol(unibrow::CharacterStream* buffer,
3106 int chars,
Steve Blockd0582a62009-12-15 09:54:21 +00003107 uint32_t hash_field) {
Leon Clarkee46be812010-01-19 14:06:41 +00003108 ASSERT(chars >= 0);
Steve Blocka7e24c12009-10-30 11:49:00 +00003109 // Ensure the chars matches the number of characters in the buffer.
3110 ASSERT(static_cast<unsigned>(chars) == buffer->Length());
3111 // Determine whether the string is ascii.
3112 bool is_ascii = true;
Leon Clarkee46be812010-01-19 14:06:41 +00003113 while (buffer->has_more()) {
3114 if (buffer->GetNext() > unibrow::Utf8::kMaxOneByteChar) {
3115 is_ascii = false;
3116 break;
3117 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003118 }
3119 buffer->Rewind();
3120
3121 // Compute map and object size.
3122 int size;
3123 Map* map;
3124
3125 if (is_ascii) {
Leon Clarkee46be812010-01-19 14:06:41 +00003126 if (chars > SeqAsciiString::kMaxLength) {
3127 return Failure::OutOfMemoryException();
3128 }
Steve Blockd0582a62009-12-15 09:54:21 +00003129 map = ascii_symbol_map();
Steve Blocka7e24c12009-10-30 11:49:00 +00003130 size = SeqAsciiString::SizeFor(chars);
3131 } else {
Leon Clarkee46be812010-01-19 14:06:41 +00003132 if (chars > SeqTwoByteString::kMaxLength) {
3133 return Failure::OutOfMemoryException();
3134 }
Steve Blockd0582a62009-12-15 09:54:21 +00003135 map = symbol_map();
Steve Blocka7e24c12009-10-30 11:49:00 +00003136 size = SeqTwoByteString::SizeFor(chars);
3137 }
3138
3139 // Allocate string.
Leon Clarkee46be812010-01-19 14:06:41 +00003140 Object* result = (size > MaxObjectSizeInPagedSpace())
3141 ? lo_space_->AllocateRaw(size)
3142 : old_data_space_->AllocateRaw(size);
Steve Blocka7e24c12009-10-30 11:49:00 +00003143 if (result->IsFailure()) return result;
3144
3145 reinterpret_cast<HeapObject*>(result)->set_map(map);
Steve Blockd0582a62009-12-15 09:54:21 +00003146 // Set length and hash fields of the allocated string.
Steve Blocka7e24c12009-10-30 11:49:00 +00003147 String* answer = String::cast(result);
Steve Blockd0582a62009-12-15 09:54:21 +00003148 answer->set_length(chars);
3149 answer->set_hash_field(hash_field);
Steve Blocka7e24c12009-10-30 11:49:00 +00003150
3151 ASSERT_EQ(size, answer->Size());
3152
3153 // Fill in the characters.
3154 for (int i = 0; i < chars; i++) {
3155 answer->Set(i, buffer->GetNext());
3156 }
3157 return answer;
3158}
3159
3160
3161Object* Heap::AllocateRawAsciiString(int length, PretenureFlag pretenure) {
Leon Clarkee46be812010-01-19 14:06:41 +00003162 if (length < 0 || length > SeqAsciiString::kMaxLength) {
3163 return Failure::OutOfMemoryException();
3164 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003165
3166 int size = SeqAsciiString::SizeFor(length);
Leon Clarkee46be812010-01-19 14:06:41 +00003167 ASSERT(size <= SeqAsciiString::kMaxSize);
Steve Blocka7e24c12009-10-30 11:49:00 +00003168
Leon Clarkee46be812010-01-19 14:06:41 +00003169 AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
3170 AllocationSpace retry_space = OLD_DATA_SPACE;
3171
Steve Blocka7e24c12009-10-30 11:49:00 +00003172 if (space == NEW_SPACE) {
Leon Clarkee46be812010-01-19 14:06:41 +00003173 if (size > kMaxObjectSizeInNewSpace) {
3174 // Allocate in large object space, retry space will be ignored.
3175 space = LO_SPACE;
3176 } else if (size > MaxObjectSizeInPagedSpace()) {
3177 // Allocate in new space, retry in large object space.
3178 retry_space = LO_SPACE;
3179 }
3180 } else if (space == OLD_DATA_SPACE && size > MaxObjectSizeInPagedSpace()) {
3181 space = LO_SPACE;
Steve Blocka7e24c12009-10-30 11:49:00 +00003182 }
Leon Clarkee46be812010-01-19 14:06:41 +00003183 Object* result = AllocateRaw(size, space, retry_space);
Steve Blocka7e24c12009-10-30 11:49:00 +00003184 if (result->IsFailure()) return result;
3185
Steve Blocka7e24c12009-10-30 11:49:00 +00003186 // Partially initialize the object.
Steve Blockd0582a62009-12-15 09:54:21 +00003187 HeapObject::cast(result)->set_map(ascii_string_map());
Steve Blocka7e24c12009-10-30 11:49:00 +00003188 String::cast(result)->set_length(length);
Steve Blockd0582a62009-12-15 09:54:21 +00003189 String::cast(result)->set_hash_field(String::kEmptyHashField);
Steve Blocka7e24c12009-10-30 11:49:00 +00003190 ASSERT_EQ(size, HeapObject::cast(result)->Size());
3191 return result;
3192}
3193
3194
3195Object* Heap::AllocateRawTwoByteString(int length, PretenureFlag pretenure) {
Leon Clarkee46be812010-01-19 14:06:41 +00003196 if (length < 0 || length > SeqTwoByteString::kMaxLength) {
3197 return Failure::OutOfMemoryException();
Steve Blocka7e24c12009-10-30 11:49:00 +00003198 }
Leon Clarkee46be812010-01-19 14:06:41 +00003199 int size = SeqTwoByteString::SizeFor(length);
3200 ASSERT(size <= SeqTwoByteString::kMaxSize);
3201 AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
3202 AllocationSpace retry_space = OLD_DATA_SPACE;
3203
3204 if (space == NEW_SPACE) {
3205 if (size > kMaxObjectSizeInNewSpace) {
3206 // Allocate in large object space, retry space will be ignored.
3207 space = LO_SPACE;
3208 } else if (size > MaxObjectSizeInPagedSpace()) {
3209 // Allocate in new space, retry in large object space.
3210 retry_space = LO_SPACE;
3211 }
3212 } else if (space == OLD_DATA_SPACE && size > MaxObjectSizeInPagedSpace()) {
3213 space = LO_SPACE;
3214 }
3215 Object* result = AllocateRaw(size, space, retry_space);
Steve Blocka7e24c12009-10-30 11:49:00 +00003216 if (result->IsFailure()) return result;
3217
Steve Blocka7e24c12009-10-30 11:49:00 +00003218 // Partially initialize the object.
Steve Blockd0582a62009-12-15 09:54:21 +00003219 HeapObject::cast(result)->set_map(string_map());
Steve Blocka7e24c12009-10-30 11:49:00 +00003220 String::cast(result)->set_length(length);
Steve Blockd0582a62009-12-15 09:54:21 +00003221 String::cast(result)->set_hash_field(String::kEmptyHashField);
Steve Blocka7e24c12009-10-30 11:49:00 +00003222 ASSERT_EQ(size, HeapObject::cast(result)->Size());
3223 return result;
3224}
3225
3226
3227Object* Heap::AllocateEmptyFixedArray() {
3228 int size = FixedArray::SizeFor(0);
3229 Object* result = AllocateRaw(size, OLD_DATA_SPACE, OLD_DATA_SPACE);
3230 if (result->IsFailure()) return result;
3231 // Initialize the object.
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01003232 reinterpret_cast<FixedArray*>(result)->set_map(fixed_array_map());
3233 reinterpret_cast<FixedArray*>(result)->set_length(0);
Steve Blocka7e24c12009-10-30 11:49:00 +00003234 return result;
3235}
3236
3237
3238Object* Heap::AllocateRawFixedArray(int length) {
Leon Clarkee46be812010-01-19 14:06:41 +00003239 if (length < 0 || length > FixedArray::kMaxLength) {
3240 return Failure::OutOfMemoryException();
3241 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003242 // Use the general function if we're forced to always allocate.
3243 if (always_allocate()) return AllocateFixedArray(length, TENURED);
3244 // Allocate the raw data for a fixed array.
3245 int size = FixedArray::SizeFor(length);
3246 return size <= kMaxObjectSizeInNewSpace
3247 ? new_space_.AllocateRaw(size)
3248 : lo_space_->AllocateRawFixedArray(size);
3249}
3250
3251
3252Object* Heap::CopyFixedArray(FixedArray* src) {
3253 int len = src->length();
3254 Object* obj = AllocateRawFixedArray(len);
3255 if (obj->IsFailure()) return obj;
3256 if (Heap::InNewSpace(obj)) {
3257 HeapObject* dst = HeapObject::cast(obj);
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01003258 CopyBlock(dst->address(), src->address(), FixedArray::SizeFor(len));
Steve Blocka7e24c12009-10-30 11:49:00 +00003259 return obj;
3260 }
3261 HeapObject::cast(obj)->set_map(src->map());
3262 FixedArray* result = FixedArray::cast(obj);
3263 result->set_length(len);
Leon Clarke4515c472010-02-03 11:58:03 +00003264
Steve Blocka7e24c12009-10-30 11:49:00 +00003265 // Copy the content
Leon Clarke4515c472010-02-03 11:58:03 +00003266 AssertNoAllocation no_gc;
3267 WriteBarrierMode mode = result->GetWriteBarrierMode(no_gc);
Steve Blocka7e24c12009-10-30 11:49:00 +00003268 for (int i = 0; i < len; i++) result->set(i, src->get(i), mode);
3269 return result;
3270}
3271
3272
3273Object* Heap::AllocateFixedArray(int length) {
3274 ASSERT(length >= 0);
3275 if (length == 0) return empty_fixed_array();
3276 Object* result = AllocateRawFixedArray(length);
3277 if (!result->IsFailure()) {
3278 // Initialize header.
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01003279 FixedArray* array = reinterpret_cast<FixedArray*>(result);
3280 array->set_map(fixed_array_map());
Steve Blocka7e24c12009-10-30 11:49:00 +00003281 array->set_length(length);
Steve Blocka7e24c12009-10-30 11:49:00 +00003282 // Initialize body.
Steve Block6ded16b2010-05-10 14:33:55 +01003283 ASSERT(!Heap::InNewSpace(undefined_value()));
3284 MemsetPointer(array->data_start(), undefined_value(), length);
Steve Blocka7e24c12009-10-30 11:49:00 +00003285 }
3286 return result;
3287}
3288
3289
Steve Block6ded16b2010-05-10 14:33:55 +01003290Object* Heap::AllocateRawFixedArray(int length, PretenureFlag pretenure) {
Leon Clarkee46be812010-01-19 14:06:41 +00003291 if (length < 0 || length > FixedArray::kMaxLength) {
3292 return Failure::OutOfMemoryException();
3293 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003294
Leon Clarkee46be812010-01-19 14:06:41 +00003295 AllocationSpace space =
3296 (pretenure == TENURED) ? OLD_POINTER_SPACE : NEW_SPACE;
Steve Blocka7e24c12009-10-30 11:49:00 +00003297 int size = FixedArray::SizeFor(length);
Leon Clarkee46be812010-01-19 14:06:41 +00003298 if (space == NEW_SPACE && size > kMaxObjectSizeInNewSpace) {
3299 // Too big for new space.
3300 space = LO_SPACE;
3301 } else if (space == OLD_POINTER_SPACE &&
3302 size > MaxObjectSizeInPagedSpace()) {
3303 // Too big for old pointer space.
3304 space = LO_SPACE;
3305 }
3306
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01003307 AllocationSpace retry_space =
3308 (size <= MaxObjectSizeInPagedSpace()) ? OLD_POINTER_SPACE : LO_SPACE;
3309
3310 return AllocateRaw(size, space, retry_space);
Steve Blocka7e24c12009-10-30 11:49:00 +00003311}
3312
3313
Steve Block6ded16b2010-05-10 14:33:55 +01003314static Object* AllocateFixedArrayWithFiller(int length,
3315 PretenureFlag pretenure,
3316 Object* filler) {
3317 ASSERT(length >= 0);
3318 ASSERT(Heap::empty_fixed_array()->IsFixedArray());
3319 if (length == 0) return Heap::empty_fixed_array();
3320
3321 ASSERT(!Heap::InNewSpace(filler));
3322 Object* result = Heap::AllocateRawFixedArray(length, pretenure);
3323 if (result->IsFailure()) return result;
3324
3325 HeapObject::cast(result)->set_map(Heap::fixed_array_map());
3326 FixedArray* array = FixedArray::cast(result);
3327 array->set_length(length);
3328 MemsetPointer(array->data_start(), filler, length);
3329 return array;
3330}
3331
3332
3333Object* Heap::AllocateFixedArray(int length, PretenureFlag pretenure) {
3334 return AllocateFixedArrayWithFiller(length, pretenure, undefined_value());
3335}
3336
3337
3338Object* Heap::AllocateFixedArrayWithHoles(int length, PretenureFlag pretenure) {
3339 return AllocateFixedArrayWithFiller(length, pretenure, the_hole_value());
3340}
3341
3342
3343Object* Heap::AllocateUninitializedFixedArray(int length) {
3344 if (length == 0) return empty_fixed_array();
3345
3346 Object* obj = AllocateRawFixedArray(length);
3347 if (obj->IsFailure()) return obj;
3348
3349 reinterpret_cast<FixedArray*>(obj)->set_map(fixed_array_map());
3350 FixedArray::cast(obj)->set_length(length);
3351 return obj;
3352}
3353
3354
3355Object* Heap::AllocateHashTable(int length, PretenureFlag pretenure) {
3356 Object* result = Heap::AllocateFixedArray(length, pretenure);
Steve Blocka7e24c12009-10-30 11:49:00 +00003357 if (result->IsFailure()) return result;
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01003358 reinterpret_cast<HeapObject*>(result)->set_map(hash_table_map());
Steve Blocka7e24c12009-10-30 11:49:00 +00003359 ASSERT(result->IsHashTable());
3360 return result;
3361}
3362
3363
3364Object* Heap::AllocateGlobalContext() {
3365 Object* result = Heap::AllocateFixedArray(Context::GLOBAL_CONTEXT_SLOTS);
3366 if (result->IsFailure()) return result;
3367 Context* context = reinterpret_cast<Context*>(result);
3368 context->set_map(global_context_map());
3369 ASSERT(context->IsGlobalContext());
3370 ASSERT(result->IsContext());
3371 return result;
3372}
3373
3374
3375Object* Heap::AllocateFunctionContext(int length, JSFunction* function) {
3376 ASSERT(length >= Context::MIN_CONTEXT_SLOTS);
3377 Object* result = Heap::AllocateFixedArray(length);
3378 if (result->IsFailure()) return result;
3379 Context* context = reinterpret_cast<Context*>(result);
3380 context->set_map(context_map());
3381 context->set_closure(function);
3382 context->set_fcontext(context);
3383 context->set_previous(NULL);
3384 context->set_extension(NULL);
3385 context->set_global(function->context()->global());
3386 ASSERT(!context->IsGlobalContext());
3387 ASSERT(context->is_function_context());
3388 ASSERT(result->IsContext());
3389 return result;
3390}
3391
3392
3393Object* Heap::AllocateWithContext(Context* previous,
3394 JSObject* extension,
3395 bool is_catch_context) {
3396 Object* result = Heap::AllocateFixedArray(Context::MIN_CONTEXT_SLOTS);
3397 if (result->IsFailure()) return result;
3398 Context* context = reinterpret_cast<Context*>(result);
3399 context->set_map(is_catch_context ? catch_context_map() : context_map());
3400 context->set_closure(previous->closure());
3401 context->set_fcontext(previous->fcontext());
3402 context->set_previous(previous);
3403 context->set_extension(extension);
3404 context->set_global(previous->global());
3405 ASSERT(!context->IsGlobalContext());
3406 ASSERT(!context->is_function_context());
3407 ASSERT(result->IsContext());
3408 return result;
3409}
3410
3411
3412Object* Heap::AllocateStruct(InstanceType type) {
3413 Map* map;
3414 switch (type) {
3415#define MAKE_CASE(NAME, Name, name) case NAME##_TYPE: map = name##_map(); break;
3416STRUCT_LIST(MAKE_CASE)
3417#undef MAKE_CASE
3418 default:
3419 UNREACHABLE();
3420 return Failure::InternalError();
3421 }
3422 int size = map->instance_size();
3423 AllocationSpace space =
3424 (size > MaxObjectSizeInPagedSpace()) ? LO_SPACE : OLD_POINTER_SPACE;
3425 Object* result = Heap::Allocate(map, space);
3426 if (result->IsFailure()) return result;
3427 Struct::cast(result)->InitializeBody(size);
3428 return result;
3429}
3430
3431
3432bool Heap::IdleNotification() {
3433 static const int kIdlesBeforeScavenge = 4;
3434 static const int kIdlesBeforeMarkSweep = 7;
3435 static const int kIdlesBeforeMarkCompact = 8;
3436 static int number_idle_notifications = 0;
3437 static int last_gc_count = gc_count_;
3438
Steve Block6ded16b2010-05-10 14:33:55 +01003439 bool uncommit = true;
Steve Blocka7e24c12009-10-30 11:49:00 +00003440 bool finished = false;
3441
3442 if (last_gc_count == gc_count_) {
3443 number_idle_notifications++;
3444 } else {
3445 number_idle_notifications = 0;
3446 last_gc_count = gc_count_;
3447 }
3448
3449 if (number_idle_notifications == kIdlesBeforeScavenge) {
Steve Block6ded16b2010-05-10 14:33:55 +01003450 if (contexts_disposed_ > 0) {
3451 HistogramTimerScope scope(&Counters::gc_context);
3452 CollectAllGarbage(false);
3453 } else {
3454 CollectGarbage(0, NEW_SPACE);
3455 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003456 new_space_.Shrink();
3457 last_gc_count = gc_count_;
3458
3459 } else if (number_idle_notifications == kIdlesBeforeMarkSweep) {
Steve Blockd0582a62009-12-15 09:54:21 +00003460 // Before doing the mark-sweep collections we clear the
3461 // compilation cache to avoid hanging on to source code and
3462 // generated code for cached functions.
3463 CompilationCache::Clear();
3464
Steve Blocka7e24c12009-10-30 11:49:00 +00003465 CollectAllGarbage(false);
3466 new_space_.Shrink();
3467 last_gc_count = gc_count_;
3468
3469 } else if (number_idle_notifications == kIdlesBeforeMarkCompact) {
3470 CollectAllGarbage(true);
3471 new_space_.Shrink();
3472 last_gc_count = gc_count_;
3473 number_idle_notifications = 0;
3474 finished = true;
Steve Block6ded16b2010-05-10 14:33:55 +01003475
3476 } else if (contexts_disposed_ > 0) {
3477 if (FLAG_expose_gc) {
3478 contexts_disposed_ = 0;
3479 } else {
3480 HistogramTimerScope scope(&Counters::gc_context);
3481 CollectAllGarbage(false);
3482 last_gc_count = gc_count_;
3483 }
3484 // If this is the first idle notification, we reset the
3485 // notification count to avoid letting idle notifications for
3486 // context disposal garbage collections start a potentially too
3487 // aggressive idle GC cycle.
3488 if (number_idle_notifications <= 1) {
3489 number_idle_notifications = 0;
3490 uncommit = false;
3491 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003492 }
3493
Steve Block6ded16b2010-05-10 14:33:55 +01003494 // Make sure that we have no pending context disposals and
3495 // conditionally uncommit from space.
3496 ASSERT(contexts_disposed_ == 0);
3497 if (uncommit) Heap::UncommitFromSpace();
Steve Blocka7e24c12009-10-30 11:49:00 +00003498 return finished;
3499}
3500
3501
3502#ifdef DEBUG
3503
3504void Heap::Print() {
3505 if (!HasBeenSetup()) return;
3506 Top::PrintStack();
3507 AllSpaces spaces;
Leon Clarked91b9f72010-01-27 17:25:45 +00003508 for (Space* space = spaces.next(); space != NULL; space = spaces.next())
3509 space->Print();
Steve Blocka7e24c12009-10-30 11:49:00 +00003510}
3511
3512
3513void Heap::ReportCodeStatistics(const char* title) {
3514 PrintF(">>>>>> Code Stats (%s) >>>>>>\n", title);
3515 PagedSpace::ResetCodeStatistics();
3516 // We do not look for code in new space, map space, or old space. If code
3517 // somehow ends up in those spaces, we would miss it here.
3518 code_space_->CollectCodeStatistics();
3519 lo_space_->CollectCodeStatistics();
3520 PagedSpace::ReportCodeStatistics();
3521}
3522
3523
3524// This function expects that NewSpace's allocated objects histogram is
3525// populated (via a call to CollectStatistics or else as a side effect of a
3526// just-completed scavenge collection).
3527void Heap::ReportHeapStatistics(const char* title) {
3528 USE(title);
3529 PrintF(">>>>>> =============== %s (%d) =============== >>>>>>\n",
3530 title, gc_count_);
3531 PrintF("mark-compact GC : %d\n", mc_count_);
3532 PrintF("old_gen_promotion_limit_ %d\n", old_gen_promotion_limit_);
3533 PrintF("old_gen_allocation_limit_ %d\n", old_gen_allocation_limit_);
3534
3535 PrintF("\n");
3536 PrintF("Number of handles : %d\n", HandleScope::NumberOfHandles());
3537 GlobalHandles::PrintStats();
3538 PrintF("\n");
3539
3540 PrintF("Heap statistics : ");
3541 MemoryAllocator::ReportStatistics();
3542 PrintF("To space : ");
3543 new_space_.ReportStatistics();
3544 PrintF("Old pointer space : ");
3545 old_pointer_space_->ReportStatistics();
3546 PrintF("Old data space : ");
3547 old_data_space_->ReportStatistics();
3548 PrintF("Code space : ");
3549 code_space_->ReportStatistics();
3550 PrintF("Map space : ");
3551 map_space_->ReportStatistics();
3552 PrintF("Cell space : ");
3553 cell_space_->ReportStatistics();
3554 PrintF("Large object space : ");
3555 lo_space_->ReportStatistics();
3556 PrintF(">>>>>> ========================================= >>>>>>\n");
3557}
3558
3559#endif // DEBUG
3560
3561bool Heap::Contains(HeapObject* value) {
3562 return Contains(value->address());
3563}
3564
3565
3566bool Heap::Contains(Address addr) {
3567 if (OS::IsOutsideAllocatedSpace(addr)) return false;
3568 return HasBeenSetup() &&
3569 (new_space_.ToSpaceContains(addr) ||
3570 old_pointer_space_->Contains(addr) ||
3571 old_data_space_->Contains(addr) ||
3572 code_space_->Contains(addr) ||
3573 map_space_->Contains(addr) ||
3574 cell_space_->Contains(addr) ||
3575 lo_space_->SlowContains(addr));
3576}
3577
3578
3579bool Heap::InSpace(HeapObject* value, AllocationSpace space) {
3580 return InSpace(value->address(), space);
3581}
3582
3583
3584bool Heap::InSpace(Address addr, AllocationSpace space) {
3585 if (OS::IsOutsideAllocatedSpace(addr)) return false;
3586 if (!HasBeenSetup()) return false;
3587
3588 switch (space) {
3589 case NEW_SPACE:
3590 return new_space_.ToSpaceContains(addr);
3591 case OLD_POINTER_SPACE:
3592 return old_pointer_space_->Contains(addr);
3593 case OLD_DATA_SPACE:
3594 return old_data_space_->Contains(addr);
3595 case CODE_SPACE:
3596 return code_space_->Contains(addr);
3597 case MAP_SPACE:
3598 return map_space_->Contains(addr);
3599 case CELL_SPACE:
3600 return cell_space_->Contains(addr);
3601 case LO_SPACE:
3602 return lo_space_->SlowContains(addr);
3603 }
3604
3605 return false;
3606}
3607
3608
3609#ifdef DEBUG
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01003610static void DummyScavengePointer(HeapObject** p) {
3611}
3612
3613
3614static void VerifyPointersUnderWatermark(
3615 PagedSpace* space,
3616 DirtyRegionCallback visit_dirty_region) {
3617 PageIterator it(space, PageIterator::PAGES_IN_USE);
3618
3619 while (it.has_next()) {
3620 Page* page = it.next();
3621 Address start = page->ObjectAreaStart();
3622 Address end = page->AllocationWatermark();
3623
3624 Heap::IterateDirtyRegions(Page::kAllRegionsDirtyMarks,
3625 start,
3626 end,
3627 visit_dirty_region,
3628 &DummyScavengePointer);
3629 }
3630}
3631
3632
3633static void VerifyPointersUnderWatermark(LargeObjectSpace* space) {
3634 LargeObjectIterator it(space);
3635 for (HeapObject* object = it.next(); object != NULL; object = it.next()) {
3636 if (object->IsFixedArray()) {
3637 Address slot_address = object->address();
3638 Address end = object->address() + object->Size();
3639
3640 while (slot_address < end) {
3641 HeapObject** slot = reinterpret_cast<HeapObject**>(slot_address);
3642 // When we are not in GC the Heap::InNewSpace() predicate
3643 // checks that pointers which satisfy predicate point into
3644 // the active semispace.
3645 Heap::InNewSpace(*slot);
3646 slot_address += kPointerSize;
3647 }
3648 }
3649 }
3650}
3651
3652
Steve Blocka7e24c12009-10-30 11:49:00 +00003653void Heap::Verify() {
3654 ASSERT(HasBeenSetup());
3655
3656 VerifyPointersVisitor visitor;
Steve Blockd0582a62009-12-15 09:54:21 +00003657 IterateRoots(&visitor, VISIT_ONLY_STRONG);
Steve Blocka7e24c12009-10-30 11:49:00 +00003658
3659 new_space_.Verify();
3660
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01003661 VerifyPointersAndDirtyRegionsVisitor dirty_regions_visitor;
3662 old_pointer_space_->Verify(&dirty_regions_visitor);
3663 map_space_->Verify(&dirty_regions_visitor);
Steve Blocka7e24c12009-10-30 11:49:00 +00003664
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01003665 VerifyPointersUnderWatermark(old_pointer_space_,
3666 &IteratePointersInDirtyRegion);
3667 VerifyPointersUnderWatermark(map_space_,
3668 &IteratePointersInDirtyMapsRegion);
3669 VerifyPointersUnderWatermark(lo_space_);
3670
3671 VerifyPageWatermarkValidity(old_pointer_space_, ALL_INVALID);
3672 VerifyPageWatermarkValidity(map_space_, ALL_INVALID);
3673
3674 VerifyPointersVisitor no_dirty_regions_visitor;
3675 old_data_space_->Verify(&no_dirty_regions_visitor);
3676 code_space_->Verify(&no_dirty_regions_visitor);
3677 cell_space_->Verify(&no_dirty_regions_visitor);
Steve Blocka7e24c12009-10-30 11:49:00 +00003678
3679 lo_space_->Verify();
3680}
3681#endif // DEBUG
3682
3683
3684Object* Heap::LookupSymbol(Vector<const char> string) {
3685 Object* symbol = NULL;
3686 Object* new_table = symbol_table()->LookupSymbol(string, &symbol);
3687 if (new_table->IsFailure()) return new_table;
3688 // Can't use set_symbol_table because SymbolTable::cast knows that
3689 // SymbolTable is a singleton and checks for identity.
3690 roots_[kSymbolTableRootIndex] = new_table;
3691 ASSERT(symbol != NULL);
3692 return symbol;
3693}
3694
3695
3696Object* Heap::LookupSymbol(String* string) {
3697 if (string->IsSymbol()) return string;
3698 Object* symbol = NULL;
3699 Object* new_table = symbol_table()->LookupString(string, &symbol);
3700 if (new_table->IsFailure()) return new_table;
3701 // Can't use set_symbol_table because SymbolTable::cast knows that
3702 // SymbolTable is a singleton and checks for identity.
3703 roots_[kSymbolTableRootIndex] = new_table;
3704 ASSERT(symbol != NULL);
3705 return symbol;
3706}
3707
3708
3709bool Heap::LookupSymbolIfExists(String* string, String** symbol) {
3710 if (string->IsSymbol()) {
3711 *symbol = string;
3712 return true;
3713 }
3714 return symbol_table()->LookupSymbolIfExists(string, symbol);
3715}
3716
3717
3718#ifdef DEBUG
3719void Heap::ZapFromSpace() {
3720 ASSERT(reinterpret_cast<Object*>(kFromSpaceZapValue)->IsHeapObject());
3721 for (Address a = new_space_.FromSpaceLow();
3722 a < new_space_.FromSpaceHigh();
3723 a += kPointerSize) {
3724 Memory::Address_at(a) = kFromSpaceZapValue;
3725 }
3726}
3727#endif // DEBUG
3728
3729
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01003730bool Heap::IteratePointersInDirtyRegion(Address start,
3731 Address end,
3732 ObjectSlotCallback copy_object_func) {
3733 Address slot_address = start;
3734 bool pointers_to_new_space_found = false;
Steve Blocka7e24c12009-10-30 11:49:00 +00003735
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01003736 while (slot_address < end) {
3737 Object** slot = reinterpret_cast<Object**>(slot_address);
3738 if (Heap::InNewSpace(*slot)) {
3739 ASSERT((*slot)->IsHeapObject());
3740 copy_object_func(reinterpret_cast<HeapObject**>(slot));
3741 if (Heap::InNewSpace(*slot)) {
3742 ASSERT((*slot)->IsHeapObject());
3743 pointers_to_new_space_found = true;
Steve Blocka7e24c12009-10-30 11:49:00 +00003744 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003745 }
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01003746 slot_address += kPointerSize;
Steve Blocka7e24c12009-10-30 11:49:00 +00003747 }
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01003748 return pointers_to_new_space_found;
Steve Blocka7e24c12009-10-30 11:49:00 +00003749}
3750
3751
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01003752// Compute start address of the first map following given addr.
3753static inline Address MapStartAlign(Address addr) {
3754 Address page = Page::FromAddress(addr)->ObjectAreaStart();
3755 return page + (((addr - page) + (Map::kSize - 1)) / Map::kSize * Map::kSize);
3756}
Steve Blocka7e24c12009-10-30 11:49:00 +00003757
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01003758
3759// Compute end address of the first map preceding given addr.
3760static inline Address MapEndAlign(Address addr) {
3761 Address page = Page::FromAllocationTop(addr)->ObjectAreaStart();
3762 return page + ((addr - page) / Map::kSize * Map::kSize);
3763}
3764
3765
3766static bool IteratePointersInDirtyMaps(Address start,
3767 Address end,
3768 ObjectSlotCallback copy_object_func) {
3769 ASSERT(MapStartAlign(start) == start);
3770 ASSERT(MapEndAlign(end) == end);
3771
3772 Address map_address = start;
3773 bool pointers_to_new_space_found = false;
3774
3775 while (map_address < end) {
3776 ASSERT(!Heap::InNewSpace(Memory::Object_at(map_address)));
3777 ASSERT(Memory::Object_at(map_address)->IsMap());
3778
3779 Address pointer_fields_start = map_address + Map::kPointerFieldsBeginOffset;
3780 Address pointer_fields_end = map_address + Map::kPointerFieldsEndOffset;
3781
3782 if (Heap::IteratePointersInDirtyRegion(pointer_fields_start,
3783 pointer_fields_end,
3784 copy_object_func)) {
3785 pointers_to_new_space_found = true;
3786 }
3787
3788 map_address += Map::kSize;
3789 }
3790
3791 return pointers_to_new_space_found;
3792}
3793
3794
3795bool Heap::IteratePointersInDirtyMapsRegion(
3796 Address start,
3797 Address end,
3798 ObjectSlotCallback copy_object_func) {
3799 Address map_aligned_start = MapStartAlign(start);
3800 Address map_aligned_end = MapEndAlign(end);
3801
3802 bool contains_pointers_to_new_space = false;
3803
3804 if (map_aligned_start != start) {
3805 Address prev_map = map_aligned_start - Map::kSize;
3806 ASSERT(Memory::Object_at(prev_map)->IsMap());
3807
3808 Address pointer_fields_start =
3809 Max(start, prev_map + Map::kPointerFieldsBeginOffset);
3810
3811 Address pointer_fields_end =
Ben Murdoch3bec4d22010-07-22 14:51:16 +01003812 Min(prev_map + Map::kPointerFieldsEndOffset, end);
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01003813
3814 contains_pointers_to_new_space =
3815 IteratePointersInDirtyRegion(pointer_fields_start,
3816 pointer_fields_end,
3817 copy_object_func)
3818 || contains_pointers_to_new_space;
3819 }
3820
3821 contains_pointers_to_new_space =
3822 IteratePointersInDirtyMaps(map_aligned_start,
3823 map_aligned_end,
3824 copy_object_func)
3825 || contains_pointers_to_new_space;
3826
3827 if (map_aligned_end != end) {
3828 ASSERT(Memory::Object_at(map_aligned_end)->IsMap());
3829
Ben Murdoch3bec4d22010-07-22 14:51:16 +01003830 Address pointer_fields_start =
3831 map_aligned_end + Map::kPointerFieldsBeginOffset;
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01003832
3833 Address pointer_fields_end =
Ben Murdoch3bec4d22010-07-22 14:51:16 +01003834 Min(end, map_aligned_end + Map::kPointerFieldsEndOffset);
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01003835
3836 contains_pointers_to_new_space =
3837 IteratePointersInDirtyRegion(pointer_fields_start,
3838 pointer_fields_end,
3839 copy_object_func)
3840 || contains_pointers_to_new_space;
3841 }
3842
3843 return contains_pointers_to_new_space;
3844}
3845
3846
Ben Murdoch3bec4d22010-07-22 14:51:16 +01003847void Heap::IterateAndMarkPointersToFromSpace(Address start,
3848 Address end,
3849 ObjectSlotCallback callback) {
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01003850 Address slot_address = start;
3851 Page* page = Page::FromAddress(start);
3852
3853 uint32_t marks = page->GetRegionMarks();
3854
3855 while (slot_address < end) {
3856 Object** slot = reinterpret_cast<Object**>(slot_address);
Ben Murdoch3bec4d22010-07-22 14:51:16 +01003857 if (Heap::InFromSpace(*slot)) {
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01003858 ASSERT((*slot)->IsHeapObject());
3859 callback(reinterpret_cast<HeapObject**>(slot));
3860 if (Heap::InNewSpace(*slot)) {
3861 ASSERT((*slot)->IsHeapObject());
3862 marks |= page->GetRegionMaskForAddress(slot_address);
3863 }
3864 }
3865 slot_address += kPointerSize;
3866 }
3867
3868 page->SetRegionMarks(marks);
3869}
3870
3871
3872uint32_t Heap::IterateDirtyRegions(
3873 uint32_t marks,
3874 Address area_start,
3875 Address area_end,
3876 DirtyRegionCallback visit_dirty_region,
3877 ObjectSlotCallback copy_object_func) {
3878 uint32_t newmarks = 0;
3879 uint32_t mask = 1;
3880
3881 if (area_start >= area_end) {
3882 return newmarks;
3883 }
3884
3885 Address region_start = area_start;
3886
3887 // area_start does not necessarily coincide with start of the first region.
3888 // Thus to calculate the beginning of the next region we have to align
3889 // area_start by Page::kRegionSize.
3890 Address second_region =
3891 reinterpret_cast<Address>(
3892 reinterpret_cast<intptr_t>(area_start + Page::kRegionSize) &
3893 ~Page::kRegionAlignmentMask);
3894
3895 // Next region might be beyond area_end.
3896 Address region_end = Min(second_region, area_end);
3897
3898 if (marks & mask) {
3899 if (visit_dirty_region(region_start, region_end, copy_object_func)) {
3900 newmarks |= mask;
3901 }
3902 }
3903 mask <<= 1;
3904
3905 // Iterate subsequent regions which fully lay inside [area_start, area_end[.
3906 region_start = region_end;
3907 region_end = region_start + Page::kRegionSize;
3908
3909 while (region_end <= area_end) {
3910 if (marks & mask) {
3911 if (visit_dirty_region(region_start, region_end, copy_object_func)) {
3912 newmarks |= mask;
3913 }
3914 }
3915
3916 region_start = region_end;
3917 region_end = region_start + Page::kRegionSize;
3918
3919 mask <<= 1;
3920 }
3921
3922 if (region_start != area_end) {
3923 // A small piece of area left uniterated because area_end does not coincide
3924 // with region end. Check whether region covering last part of area is
3925 // dirty.
3926 if (marks & mask) {
3927 if (visit_dirty_region(region_start, area_end, copy_object_func)) {
3928 newmarks |= mask;
3929 }
3930 }
3931 }
3932
3933 return newmarks;
3934}
3935
3936
3937
3938void Heap::IterateDirtyRegions(
3939 PagedSpace* space,
3940 DirtyRegionCallback visit_dirty_region,
3941 ObjectSlotCallback copy_object_func,
3942 ExpectedPageWatermarkState expected_page_watermark_state) {
Steve Blocka7e24c12009-10-30 11:49:00 +00003943
3944 PageIterator it(space, PageIterator::PAGES_IN_USE);
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01003945
Steve Blocka7e24c12009-10-30 11:49:00 +00003946 while (it.has_next()) {
3947 Page* page = it.next();
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01003948 uint32_t marks = page->GetRegionMarks();
3949
3950 if (marks != Page::kAllRegionsCleanMarks) {
3951 Address start = page->ObjectAreaStart();
3952
3953 // Do not try to visit pointers beyond page allocation watermark.
3954 // Page can contain garbage pointers there.
3955 Address end;
3956
3957 if ((expected_page_watermark_state == WATERMARK_SHOULD_BE_VALID) ||
3958 page->IsWatermarkValid()) {
3959 end = page->AllocationWatermark();
3960 } else {
3961 end = page->CachedAllocationWatermark();
3962 }
3963
3964 ASSERT(space == old_pointer_space_ ||
3965 (space == map_space_ &&
3966 ((page->ObjectAreaStart() - end) % Map::kSize == 0)));
3967
3968 page->SetRegionMarks(IterateDirtyRegions(marks,
3969 start,
3970 end,
3971 visit_dirty_region,
3972 copy_object_func));
Steve Blocka7e24c12009-10-30 11:49:00 +00003973 }
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01003974
3975 // Mark page watermark as invalid to maintain watermark validity invariant.
3976 // See Page::FlipMeaningOfInvalidatedWatermarkFlag() for details.
3977 page->InvalidateWatermark(true);
Steve Blocka7e24c12009-10-30 11:49:00 +00003978 }
3979}
3980
3981
Steve Blockd0582a62009-12-15 09:54:21 +00003982void Heap::IterateRoots(ObjectVisitor* v, VisitMode mode) {
3983 IterateStrongRoots(v, mode);
Leon Clarked91b9f72010-01-27 17:25:45 +00003984 IterateWeakRoots(v, mode);
3985}
3986
3987
3988void Heap::IterateWeakRoots(ObjectVisitor* v, VisitMode mode) {
Steve Blocka7e24c12009-10-30 11:49:00 +00003989 v->VisitPointer(reinterpret_cast<Object**>(&roots_[kSymbolTableRootIndex]));
Steve Blockd0582a62009-12-15 09:54:21 +00003990 v->Synchronize("symbol_table");
Leon Clarkee46be812010-01-19 14:06:41 +00003991 if (mode != VISIT_ALL_IN_SCAVENGE) {
3992 // Scavenge collections have special processing for this.
3993 ExternalStringTable::Iterate(v);
3994 }
3995 v->Synchronize("external_string_table");
Steve Blocka7e24c12009-10-30 11:49:00 +00003996}
3997
3998
Steve Blockd0582a62009-12-15 09:54:21 +00003999void Heap::IterateStrongRoots(ObjectVisitor* v, VisitMode mode) {
Steve Blocka7e24c12009-10-30 11:49:00 +00004000 v->VisitPointers(&roots_[0], &roots_[kStrongRootListLength]);
Steve Blockd0582a62009-12-15 09:54:21 +00004001 v->Synchronize("strong_root_list");
Steve Blocka7e24c12009-10-30 11:49:00 +00004002
Steve Block6ded16b2010-05-10 14:33:55 +01004003 v->VisitPointer(BitCast<Object**, String**>(&hidden_symbol_));
Steve Blockd0582a62009-12-15 09:54:21 +00004004 v->Synchronize("symbol");
Steve Blocka7e24c12009-10-30 11:49:00 +00004005
4006 Bootstrapper::Iterate(v);
Steve Blockd0582a62009-12-15 09:54:21 +00004007 v->Synchronize("bootstrapper");
Steve Blocka7e24c12009-10-30 11:49:00 +00004008 Top::Iterate(v);
Steve Blockd0582a62009-12-15 09:54:21 +00004009 v->Synchronize("top");
Steve Blocka7e24c12009-10-30 11:49:00 +00004010 Relocatable::Iterate(v);
Steve Blockd0582a62009-12-15 09:54:21 +00004011 v->Synchronize("relocatable");
Steve Blocka7e24c12009-10-30 11:49:00 +00004012
4013#ifdef ENABLE_DEBUGGER_SUPPORT
4014 Debug::Iterate(v);
4015#endif
Steve Blockd0582a62009-12-15 09:54:21 +00004016 v->Synchronize("debug");
Steve Blocka7e24c12009-10-30 11:49:00 +00004017 CompilationCache::Iterate(v);
Steve Blockd0582a62009-12-15 09:54:21 +00004018 v->Synchronize("compilationcache");
Steve Blocka7e24c12009-10-30 11:49:00 +00004019
4020 // Iterate over local handles in handle scopes.
4021 HandleScopeImplementer::Iterate(v);
Steve Blockd0582a62009-12-15 09:54:21 +00004022 v->Synchronize("handlescope");
Steve Blocka7e24c12009-10-30 11:49:00 +00004023
Leon Clarkee46be812010-01-19 14:06:41 +00004024 // Iterate over the builtin code objects and code stubs in the
4025 // heap. Note that it is not necessary to iterate over code objects
4026 // on scavenge collections.
4027 if (mode != VISIT_ALL_IN_SCAVENGE) {
4028 Builtins::IterateBuiltins(v);
4029 }
Steve Blockd0582a62009-12-15 09:54:21 +00004030 v->Synchronize("builtins");
Steve Blocka7e24c12009-10-30 11:49:00 +00004031
4032 // Iterate over global handles.
Steve Blockd0582a62009-12-15 09:54:21 +00004033 if (mode == VISIT_ONLY_STRONG) {
4034 GlobalHandles::IterateStrongRoots(v);
4035 } else {
4036 GlobalHandles::IterateAllRoots(v);
4037 }
4038 v->Synchronize("globalhandles");
Steve Blocka7e24c12009-10-30 11:49:00 +00004039
4040 // Iterate over pointers being held by inactive threads.
4041 ThreadManager::Iterate(v);
Steve Blockd0582a62009-12-15 09:54:21 +00004042 v->Synchronize("threadmanager");
Leon Clarked91b9f72010-01-27 17:25:45 +00004043
4044 // Iterate over the pointers the Serialization/Deserialization code is
4045 // holding.
4046 // During garbage collection this keeps the partial snapshot cache alive.
4047 // During deserialization of the startup snapshot this creates the partial
4048 // snapshot cache and deserializes the objects it refers to. During
4049 // serialization this does nothing, since the partial snapshot cache is
4050 // empty. However the next thing we do is create the partial snapshot,
4051 // filling up the partial snapshot cache with objects it needs as we go.
4052 SerializerDeserializer::Iterate(v);
4053 // We don't do a v->Synchronize call here, because in debug mode that will
4054 // output a flag to the snapshot. However at this point the serializer and
4055 // deserializer are deliberately a little unsynchronized (see above) so the
4056 // checking of the sync flag in the snapshot would fail.
Steve Blocka7e24c12009-10-30 11:49:00 +00004057}
Steve Blocka7e24c12009-10-30 11:49:00 +00004058
4059
4060// Flag is set when the heap has been configured. The heap can be repeatedly
4061// configured through the API until it is setup.
4062static bool heap_configured = false;
4063
4064// TODO(1236194): Since the heap size is configurable on the command line
4065// and through the API, we should gracefully handle the case that the heap
4066// size is not big enough to fit all the initial objects.
Steve Block3ce2e202009-11-05 08:53:23 +00004067bool Heap::ConfigureHeap(int max_semispace_size, int max_old_gen_size) {
Steve Blocka7e24c12009-10-30 11:49:00 +00004068 if (HasBeenSetup()) return false;
4069
Steve Block3ce2e202009-11-05 08:53:23 +00004070 if (max_semispace_size > 0) max_semispace_size_ = max_semispace_size;
4071
4072 if (Snapshot::IsEnabled()) {
4073 // If we are using a snapshot we always reserve the default amount
4074 // of memory for each semispace because code in the snapshot has
4075 // write-barrier code that relies on the size and alignment of new
4076 // space. We therefore cannot use a larger max semispace size
4077 // than the default reserved semispace size.
4078 if (max_semispace_size_ > reserved_semispace_size_) {
4079 max_semispace_size_ = reserved_semispace_size_;
4080 }
4081 } else {
4082 // If we are not using snapshots we reserve space for the actual
4083 // max semispace size.
4084 reserved_semispace_size_ = max_semispace_size_;
4085 }
4086
4087 if (max_old_gen_size > 0) max_old_generation_size_ = max_old_gen_size;
Steve Blocka7e24c12009-10-30 11:49:00 +00004088
4089 // The new space size must be a power of two to support single-bit testing
4090 // for containment.
Steve Block3ce2e202009-11-05 08:53:23 +00004091 max_semispace_size_ = RoundUpToPowerOf2(max_semispace_size_);
4092 reserved_semispace_size_ = RoundUpToPowerOf2(reserved_semispace_size_);
4093 initial_semispace_size_ = Min(initial_semispace_size_, max_semispace_size_);
4094 external_allocation_limit_ = 10 * max_semispace_size_;
Steve Blocka7e24c12009-10-30 11:49:00 +00004095
4096 // The old generation is paged.
Steve Block3ce2e202009-11-05 08:53:23 +00004097 max_old_generation_size_ = RoundUp(max_old_generation_size_, Page::kPageSize);
Steve Blocka7e24c12009-10-30 11:49:00 +00004098
4099 heap_configured = true;
4100 return true;
4101}
4102
4103
4104bool Heap::ConfigureHeapDefault() {
Steve Block3ce2e202009-11-05 08:53:23 +00004105 return ConfigureHeap(FLAG_max_new_space_size / 2, FLAG_max_old_space_size);
Steve Blocka7e24c12009-10-30 11:49:00 +00004106}
4107
4108
Ben Murdochbb769b22010-08-11 14:56:33 +01004109void Heap::RecordStats(HeapStats* stats, bool take_snapshot) {
Steve Blockd0582a62009-12-15 09:54:21 +00004110 *stats->start_marker = 0xDECADE00;
4111 *stats->end_marker = 0xDECADE01;
4112 *stats->new_space_size = new_space_.Size();
4113 *stats->new_space_capacity = new_space_.Capacity();
4114 *stats->old_pointer_space_size = old_pointer_space_->Size();
4115 *stats->old_pointer_space_capacity = old_pointer_space_->Capacity();
4116 *stats->old_data_space_size = old_data_space_->Size();
4117 *stats->old_data_space_capacity = old_data_space_->Capacity();
4118 *stats->code_space_size = code_space_->Size();
4119 *stats->code_space_capacity = code_space_->Capacity();
4120 *stats->map_space_size = map_space_->Size();
4121 *stats->map_space_capacity = map_space_->Capacity();
4122 *stats->cell_space_size = cell_space_->Size();
4123 *stats->cell_space_capacity = cell_space_->Capacity();
4124 *stats->lo_space_size = lo_space_->Size();
4125 GlobalHandles::RecordStats(stats);
Ben Murdochbb769b22010-08-11 14:56:33 +01004126 *stats->memory_allocator_size = MemoryAllocator::Size();
4127 *stats->memory_allocator_capacity =
4128 MemoryAllocator::Size() + MemoryAllocator::Available();
4129 if (take_snapshot) {
4130 HeapIterator iterator;
4131 for (HeapObject* obj = iterator.next();
4132 obj != NULL;
4133 obj = iterator.next()) {
4134 // Note: snapshot won't be precise because IsFreeListNode returns true
4135 // for any bytearray.
4136 if (FreeListNode::IsFreeListNode(obj)) continue;
4137 InstanceType type = obj->map()->instance_type();
4138 ASSERT(0 <= type && type <= LAST_TYPE);
4139 stats->objects_per_type[type]++;
4140 stats->size_per_type[type] += obj->Size();
4141 }
4142 }
Steve Blockd0582a62009-12-15 09:54:21 +00004143}
4144
4145
Steve Blocka7e24c12009-10-30 11:49:00 +00004146int Heap::PromotedSpaceSize() {
4147 return old_pointer_space_->Size()
4148 + old_data_space_->Size()
4149 + code_space_->Size()
4150 + map_space_->Size()
4151 + cell_space_->Size()
4152 + lo_space_->Size();
4153}
4154
4155
4156int Heap::PromotedExternalMemorySize() {
4157 if (amount_of_external_allocated_memory_
4158 <= amount_of_external_allocated_memory_at_last_global_gc_) return 0;
4159 return amount_of_external_allocated_memory_
4160 - amount_of_external_allocated_memory_at_last_global_gc_;
4161}
4162
4163
4164bool Heap::Setup(bool create_heap_objects) {
4165 // Initialize heap spaces and initial maps and objects. Whenever something
4166 // goes wrong, just return false. The caller should check the results and
4167 // call Heap::TearDown() to release allocated memory.
4168 //
4169 // If the heap is not yet configured (eg, through the API), configure it.
4170 // Configuration is based on the flags new-space-size (really the semispace
4171 // size) and old-space-size if set or the initial values of semispace_size_
4172 // and old_generation_size_ otherwise.
4173 if (!heap_configured) {
4174 if (!ConfigureHeapDefault()) return false;
4175 }
4176
4177 // Setup memory allocator and reserve a chunk of memory for new
Steve Block3ce2e202009-11-05 08:53:23 +00004178 // space. The chunk is double the size of the requested reserved
4179 // new space size to ensure that we can find a pair of semispaces that
4180 // are contiguous and aligned to their size.
4181 if (!MemoryAllocator::Setup(MaxReserved())) return false;
Steve Blocka7e24c12009-10-30 11:49:00 +00004182 void* chunk =
Steve Block3ce2e202009-11-05 08:53:23 +00004183 MemoryAllocator::ReserveInitialChunk(4 * reserved_semispace_size_);
Steve Blocka7e24c12009-10-30 11:49:00 +00004184 if (chunk == NULL) return false;
4185
4186 // Align the pair of semispaces to their size, which must be a power
4187 // of 2.
Steve Blocka7e24c12009-10-30 11:49:00 +00004188 Address new_space_start =
Steve Block3ce2e202009-11-05 08:53:23 +00004189 RoundUp(reinterpret_cast<byte*>(chunk), 2 * reserved_semispace_size_);
4190 if (!new_space_.Setup(new_space_start, 2 * reserved_semispace_size_)) {
4191 return false;
4192 }
Steve Blocka7e24c12009-10-30 11:49:00 +00004193
4194 // Initialize old pointer space.
4195 old_pointer_space_ =
Steve Block3ce2e202009-11-05 08:53:23 +00004196 new OldSpace(max_old_generation_size_, OLD_POINTER_SPACE, NOT_EXECUTABLE);
Steve Blocka7e24c12009-10-30 11:49:00 +00004197 if (old_pointer_space_ == NULL) return false;
4198 if (!old_pointer_space_->Setup(NULL, 0)) return false;
4199
4200 // Initialize old data space.
4201 old_data_space_ =
Steve Block3ce2e202009-11-05 08:53:23 +00004202 new OldSpace(max_old_generation_size_, OLD_DATA_SPACE, NOT_EXECUTABLE);
Steve Blocka7e24c12009-10-30 11:49:00 +00004203 if (old_data_space_ == NULL) return false;
4204 if (!old_data_space_->Setup(NULL, 0)) return false;
4205
4206 // Initialize the code space, set its maximum capacity to the old
4207 // generation size. It needs executable memory.
4208 // On 64-bit platform(s), we put all code objects in a 2 GB range of
4209 // virtual address space, so that they can call each other with near calls.
4210 if (code_range_size_ > 0) {
4211 if (!CodeRange::Setup(code_range_size_)) {
4212 return false;
4213 }
4214 }
4215
4216 code_space_ =
Steve Block3ce2e202009-11-05 08:53:23 +00004217 new OldSpace(max_old_generation_size_, CODE_SPACE, EXECUTABLE);
Steve Blocka7e24c12009-10-30 11:49:00 +00004218 if (code_space_ == NULL) return false;
4219 if (!code_space_->Setup(NULL, 0)) return false;
4220
4221 // Initialize map space.
Leon Clarkee46be812010-01-19 14:06:41 +00004222 map_space_ = new MapSpace(FLAG_use_big_map_space
4223 ? max_old_generation_size_
Leon Clarked91b9f72010-01-27 17:25:45 +00004224 : MapSpace::kMaxMapPageIndex * Page::kPageSize,
4225 FLAG_max_map_space_pages,
Leon Clarkee46be812010-01-19 14:06:41 +00004226 MAP_SPACE);
Steve Blocka7e24c12009-10-30 11:49:00 +00004227 if (map_space_ == NULL) return false;
4228 if (!map_space_->Setup(NULL, 0)) return false;
4229
4230 // Initialize global property cell space.
Steve Block3ce2e202009-11-05 08:53:23 +00004231 cell_space_ = new CellSpace(max_old_generation_size_, CELL_SPACE);
Steve Blocka7e24c12009-10-30 11:49:00 +00004232 if (cell_space_ == NULL) return false;
4233 if (!cell_space_->Setup(NULL, 0)) return false;
4234
4235 // The large object code space may contain code or data. We set the memory
4236 // to be non-executable here for safety, but this means we need to enable it
4237 // explicitly when allocating large code objects.
4238 lo_space_ = new LargeObjectSpace(LO_SPACE);
4239 if (lo_space_ == NULL) return false;
4240 if (!lo_space_->Setup()) return false;
4241
4242 if (create_heap_objects) {
4243 // Create initial maps.
4244 if (!CreateInitialMaps()) return false;
4245 if (!CreateApiObjects()) return false;
4246
4247 // Create initial objects
4248 if (!CreateInitialObjects()) return false;
4249 }
4250
4251 LOG(IntEvent("heap-capacity", Capacity()));
4252 LOG(IntEvent("heap-available", Available()));
4253
Steve Block3ce2e202009-11-05 08:53:23 +00004254#ifdef ENABLE_LOGGING_AND_PROFILING
4255 // This should be called only after initial objects have been created.
4256 ProducerHeapProfile::Setup();
4257#endif
4258
Steve Blocka7e24c12009-10-30 11:49:00 +00004259 return true;
4260}
4261
4262
Steve Blockd0582a62009-12-15 09:54:21 +00004263void Heap::SetStackLimits() {
Steve Blocka7e24c12009-10-30 11:49:00 +00004264 // On 64 bit machines, pointers are generally out of range of Smis. We write
4265 // something that looks like an out of range Smi to the GC.
4266
Steve Blockd0582a62009-12-15 09:54:21 +00004267 // Set up the special root array entries containing the stack limits.
4268 // These are actually addresses, but the tag makes the GC ignore it.
Steve Blocka7e24c12009-10-30 11:49:00 +00004269 roots_[kStackLimitRootIndex] =
Steve Blockd0582a62009-12-15 09:54:21 +00004270 reinterpret_cast<Object*>(
4271 (StackGuard::jslimit() & ~kSmiTagMask) | kSmiTag);
4272 roots_[kRealStackLimitRootIndex] =
4273 reinterpret_cast<Object*>(
4274 (StackGuard::real_jslimit() & ~kSmiTagMask) | kSmiTag);
Steve Blocka7e24c12009-10-30 11:49:00 +00004275}
4276
4277
4278void Heap::TearDown() {
Leon Clarkef7060e22010-06-03 12:02:55 +01004279 if (FLAG_print_cumulative_gc_stat) {
4280 PrintF("\n\n");
4281 PrintF("gc_count=%d ", gc_count_);
4282 PrintF("mark_sweep_count=%d ", ms_count_);
4283 PrintF("mark_compact_count=%d ", mc_count_);
4284 PrintF("max_gc_pause=%d ", GCTracer::get_max_gc_pause());
4285 PrintF("min_in_mutator=%d ", GCTracer::get_min_in_mutator());
4286 PrintF("max_alive_after_gc=%d ", GCTracer::get_max_alive_after_gc());
4287 PrintF("\n\n");
4288 }
4289
Steve Blocka7e24c12009-10-30 11:49:00 +00004290 GlobalHandles::TearDown();
4291
Leon Clarkee46be812010-01-19 14:06:41 +00004292 ExternalStringTable::TearDown();
4293
Steve Blocka7e24c12009-10-30 11:49:00 +00004294 new_space_.TearDown();
4295
4296 if (old_pointer_space_ != NULL) {
4297 old_pointer_space_->TearDown();
4298 delete old_pointer_space_;
4299 old_pointer_space_ = NULL;
4300 }
4301
4302 if (old_data_space_ != NULL) {
4303 old_data_space_->TearDown();
4304 delete old_data_space_;
4305 old_data_space_ = NULL;
4306 }
4307
4308 if (code_space_ != NULL) {
4309 code_space_->TearDown();
4310 delete code_space_;
4311 code_space_ = NULL;
4312 }
4313
4314 if (map_space_ != NULL) {
4315 map_space_->TearDown();
4316 delete map_space_;
4317 map_space_ = NULL;
4318 }
4319
4320 if (cell_space_ != NULL) {
4321 cell_space_->TearDown();
4322 delete cell_space_;
4323 cell_space_ = NULL;
4324 }
4325
4326 if (lo_space_ != NULL) {
4327 lo_space_->TearDown();
4328 delete lo_space_;
4329 lo_space_ = NULL;
4330 }
4331
4332 MemoryAllocator::TearDown();
4333}
4334
4335
4336void Heap::Shrink() {
4337 // Try to shrink all paged spaces.
4338 PagedSpaces spaces;
Leon Clarked91b9f72010-01-27 17:25:45 +00004339 for (PagedSpace* space = spaces.next(); space != NULL; space = spaces.next())
4340 space->Shrink();
Steve Blocka7e24c12009-10-30 11:49:00 +00004341}
4342
4343
4344#ifdef ENABLE_HEAP_PROTECTION
4345
4346void Heap::Protect() {
4347 if (HasBeenSetup()) {
4348 AllSpaces spaces;
Leon Clarked91b9f72010-01-27 17:25:45 +00004349 for (Space* space = spaces.next(); space != NULL; space = spaces.next())
4350 space->Protect();
Steve Blocka7e24c12009-10-30 11:49:00 +00004351 }
4352}
4353
4354
4355void Heap::Unprotect() {
4356 if (HasBeenSetup()) {
4357 AllSpaces spaces;
Leon Clarked91b9f72010-01-27 17:25:45 +00004358 for (Space* space = spaces.next(); space != NULL; space = spaces.next())
4359 space->Unprotect();
Steve Blocka7e24c12009-10-30 11:49:00 +00004360 }
4361}
4362
4363#endif
4364
4365
Steve Block6ded16b2010-05-10 14:33:55 +01004366void Heap::AddGCPrologueCallback(GCPrologueCallback callback, GCType gc_type) {
4367 ASSERT(callback != NULL);
4368 GCPrologueCallbackPair pair(callback, gc_type);
4369 ASSERT(!gc_prologue_callbacks_.Contains(pair));
4370 return gc_prologue_callbacks_.Add(pair);
4371}
4372
4373
4374void Heap::RemoveGCPrologueCallback(GCPrologueCallback callback) {
4375 ASSERT(callback != NULL);
4376 for (int i = 0; i < gc_prologue_callbacks_.length(); ++i) {
4377 if (gc_prologue_callbacks_[i].callback == callback) {
4378 gc_prologue_callbacks_.Remove(i);
4379 return;
4380 }
4381 }
4382 UNREACHABLE();
4383}
4384
4385
4386void Heap::AddGCEpilogueCallback(GCEpilogueCallback callback, GCType gc_type) {
4387 ASSERT(callback != NULL);
4388 GCEpilogueCallbackPair pair(callback, gc_type);
4389 ASSERT(!gc_epilogue_callbacks_.Contains(pair));
4390 return gc_epilogue_callbacks_.Add(pair);
4391}
4392
4393
4394void Heap::RemoveGCEpilogueCallback(GCEpilogueCallback callback) {
4395 ASSERT(callback != NULL);
4396 for (int i = 0; i < gc_epilogue_callbacks_.length(); ++i) {
4397 if (gc_epilogue_callbacks_[i].callback == callback) {
4398 gc_epilogue_callbacks_.Remove(i);
4399 return;
4400 }
4401 }
4402 UNREACHABLE();
4403}
4404
4405
Steve Blocka7e24c12009-10-30 11:49:00 +00004406#ifdef DEBUG
4407
4408class PrintHandleVisitor: public ObjectVisitor {
4409 public:
4410 void VisitPointers(Object** start, Object** end) {
4411 for (Object** p = start; p < end; p++)
4412 PrintF(" handle %p to %p\n", p, *p);
4413 }
4414};
4415
4416void Heap::PrintHandles() {
4417 PrintF("Handles:\n");
4418 PrintHandleVisitor v;
4419 HandleScopeImplementer::Iterate(&v);
4420}
4421
4422#endif
4423
4424
4425Space* AllSpaces::next() {
4426 switch (counter_++) {
4427 case NEW_SPACE:
4428 return Heap::new_space();
4429 case OLD_POINTER_SPACE:
4430 return Heap::old_pointer_space();
4431 case OLD_DATA_SPACE:
4432 return Heap::old_data_space();
4433 case CODE_SPACE:
4434 return Heap::code_space();
4435 case MAP_SPACE:
4436 return Heap::map_space();
4437 case CELL_SPACE:
4438 return Heap::cell_space();
4439 case LO_SPACE:
4440 return Heap::lo_space();
4441 default:
4442 return NULL;
4443 }
4444}
4445
4446
4447PagedSpace* PagedSpaces::next() {
4448 switch (counter_++) {
4449 case OLD_POINTER_SPACE:
4450 return Heap::old_pointer_space();
4451 case OLD_DATA_SPACE:
4452 return Heap::old_data_space();
4453 case CODE_SPACE:
4454 return Heap::code_space();
4455 case MAP_SPACE:
4456 return Heap::map_space();
4457 case CELL_SPACE:
4458 return Heap::cell_space();
4459 default:
4460 return NULL;
4461 }
4462}
4463
4464
4465
4466OldSpace* OldSpaces::next() {
4467 switch (counter_++) {
4468 case OLD_POINTER_SPACE:
4469 return Heap::old_pointer_space();
4470 case OLD_DATA_SPACE:
4471 return Heap::old_data_space();
4472 case CODE_SPACE:
4473 return Heap::code_space();
4474 default:
4475 return NULL;
4476 }
4477}
4478
4479
4480SpaceIterator::SpaceIterator() : current_space_(FIRST_SPACE), iterator_(NULL) {
4481}
4482
4483
4484SpaceIterator::~SpaceIterator() {
4485 // Delete active iterator if any.
4486 delete iterator_;
4487}
4488
4489
4490bool SpaceIterator::has_next() {
4491 // Iterate until no more spaces.
4492 return current_space_ != LAST_SPACE;
4493}
4494
4495
4496ObjectIterator* SpaceIterator::next() {
4497 if (iterator_ != NULL) {
4498 delete iterator_;
4499 iterator_ = NULL;
4500 // Move to the next space
4501 current_space_++;
4502 if (current_space_ > LAST_SPACE) {
4503 return NULL;
4504 }
4505 }
4506
4507 // Return iterator for the new current space.
4508 return CreateIterator();
4509}
4510
4511
4512// Create an iterator for the space to iterate.
4513ObjectIterator* SpaceIterator::CreateIterator() {
4514 ASSERT(iterator_ == NULL);
4515
4516 switch (current_space_) {
4517 case NEW_SPACE:
4518 iterator_ = new SemiSpaceIterator(Heap::new_space());
4519 break;
4520 case OLD_POINTER_SPACE:
4521 iterator_ = new HeapObjectIterator(Heap::old_pointer_space());
4522 break;
4523 case OLD_DATA_SPACE:
4524 iterator_ = new HeapObjectIterator(Heap::old_data_space());
4525 break;
4526 case CODE_SPACE:
4527 iterator_ = new HeapObjectIterator(Heap::code_space());
4528 break;
4529 case MAP_SPACE:
4530 iterator_ = new HeapObjectIterator(Heap::map_space());
4531 break;
4532 case CELL_SPACE:
4533 iterator_ = new HeapObjectIterator(Heap::cell_space());
4534 break;
4535 case LO_SPACE:
4536 iterator_ = new LargeObjectIterator(Heap::lo_space());
4537 break;
4538 }
4539
4540 // Return the newly allocated iterator;
4541 ASSERT(iterator_ != NULL);
4542 return iterator_;
4543}
4544
4545
4546HeapIterator::HeapIterator() {
4547 Init();
4548}
4549
4550
4551HeapIterator::~HeapIterator() {
4552 Shutdown();
4553}
4554
4555
4556void HeapIterator::Init() {
4557 // Start the iteration.
4558 space_iterator_ = new SpaceIterator();
4559 object_iterator_ = space_iterator_->next();
4560}
4561
4562
4563void HeapIterator::Shutdown() {
4564 // Make sure the last iterator is deallocated.
4565 delete space_iterator_;
4566 space_iterator_ = NULL;
4567 object_iterator_ = NULL;
4568}
4569
4570
Leon Clarked91b9f72010-01-27 17:25:45 +00004571HeapObject* HeapIterator::next() {
Steve Blocka7e24c12009-10-30 11:49:00 +00004572 // No iterator means we are done.
Leon Clarked91b9f72010-01-27 17:25:45 +00004573 if (object_iterator_ == NULL) return NULL;
Steve Blocka7e24c12009-10-30 11:49:00 +00004574
Leon Clarked91b9f72010-01-27 17:25:45 +00004575 if (HeapObject* obj = object_iterator_->next_object()) {
Steve Blocka7e24c12009-10-30 11:49:00 +00004576 // If the current iterator has more objects we are fine.
Leon Clarked91b9f72010-01-27 17:25:45 +00004577 return obj;
Steve Blocka7e24c12009-10-30 11:49:00 +00004578 } else {
4579 // Go though the spaces looking for one that has objects.
4580 while (space_iterator_->has_next()) {
4581 object_iterator_ = space_iterator_->next();
Leon Clarked91b9f72010-01-27 17:25:45 +00004582 if (HeapObject* obj = object_iterator_->next_object()) {
4583 return obj;
Steve Blocka7e24c12009-10-30 11:49:00 +00004584 }
4585 }
4586 }
4587 // Done with the last space.
4588 object_iterator_ = NULL;
Leon Clarked91b9f72010-01-27 17:25:45 +00004589 return NULL;
Steve Blocka7e24c12009-10-30 11:49:00 +00004590}
4591
4592
4593void HeapIterator::reset() {
4594 // Restart the iterator.
4595 Shutdown();
4596 Init();
4597}
4598
4599
4600#ifdef DEBUG
4601
4602static bool search_for_any_global;
4603static Object* search_target;
4604static bool found_target;
4605static List<Object*> object_stack(20);
4606
4607
4608// Tags 0, 1, and 3 are used. Use 2 for marking visited HeapObject.
4609static const int kMarkTag = 2;
4610
4611static void MarkObjectRecursively(Object** p);
4612class MarkObjectVisitor : public ObjectVisitor {
4613 public:
4614 void VisitPointers(Object** start, Object** end) {
4615 // Copy all HeapObject pointers in [start, end)
4616 for (Object** p = start; p < end; p++) {
4617 if ((*p)->IsHeapObject())
4618 MarkObjectRecursively(p);
4619 }
4620 }
4621};
4622
4623static MarkObjectVisitor mark_visitor;
4624
4625static void MarkObjectRecursively(Object** p) {
4626 if (!(*p)->IsHeapObject()) return;
4627
4628 HeapObject* obj = HeapObject::cast(*p);
4629
4630 Object* map = obj->map();
4631
4632 if (!map->IsHeapObject()) return; // visited before
4633
4634 if (found_target) return; // stop if target found
4635 object_stack.Add(obj);
4636 if ((search_for_any_global && obj->IsJSGlobalObject()) ||
4637 (!search_for_any_global && (obj == search_target))) {
4638 found_target = true;
4639 return;
4640 }
4641
4642 // not visited yet
4643 Map* map_p = reinterpret_cast<Map*>(HeapObject::cast(map));
4644
4645 Address map_addr = map_p->address();
4646
4647 obj->set_map(reinterpret_cast<Map*>(map_addr + kMarkTag));
4648
4649 MarkObjectRecursively(&map);
4650
4651 obj->IterateBody(map_p->instance_type(), obj->SizeFromMap(map_p),
4652 &mark_visitor);
4653
4654 if (!found_target) // don't pop if found the target
4655 object_stack.RemoveLast();
4656}
4657
4658
4659static void UnmarkObjectRecursively(Object** p);
4660class UnmarkObjectVisitor : public ObjectVisitor {
4661 public:
4662 void VisitPointers(Object** start, Object** end) {
4663 // Copy all HeapObject pointers in [start, end)
4664 for (Object** p = start; p < end; p++) {
4665 if ((*p)->IsHeapObject())
4666 UnmarkObjectRecursively(p);
4667 }
4668 }
4669};
4670
4671static UnmarkObjectVisitor unmark_visitor;
4672
4673static void UnmarkObjectRecursively(Object** p) {
4674 if (!(*p)->IsHeapObject()) return;
4675
4676 HeapObject* obj = HeapObject::cast(*p);
4677
4678 Object* map = obj->map();
4679
4680 if (map->IsHeapObject()) return; // unmarked already
4681
4682 Address map_addr = reinterpret_cast<Address>(map);
4683
4684 map_addr -= kMarkTag;
4685
4686 ASSERT_TAG_ALIGNED(map_addr);
4687
4688 HeapObject* map_p = HeapObject::FromAddress(map_addr);
4689
4690 obj->set_map(reinterpret_cast<Map*>(map_p));
4691
4692 UnmarkObjectRecursively(reinterpret_cast<Object**>(&map_p));
4693
4694 obj->IterateBody(Map::cast(map_p)->instance_type(),
4695 obj->SizeFromMap(Map::cast(map_p)),
4696 &unmark_visitor);
4697}
4698
4699
4700static void MarkRootObjectRecursively(Object** root) {
4701 if (search_for_any_global) {
4702 ASSERT(search_target == NULL);
4703 } else {
4704 ASSERT(search_target->IsHeapObject());
4705 }
4706 found_target = false;
4707 object_stack.Clear();
4708
4709 MarkObjectRecursively(root);
4710 UnmarkObjectRecursively(root);
4711
4712 if (found_target) {
4713 PrintF("=====================================\n");
4714 PrintF("==== Path to object ====\n");
4715 PrintF("=====================================\n\n");
4716
4717 ASSERT(!object_stack.is_empty());
4718 for (int i = 0; i < object_stack.length(); i++) {
4719 if (i > 0) PrintF("\n |\n |\n V\n\n");
4720 Object* obj = object_stack[i];
4721 obj->Print();
4722 }
4723 PrintF("=====================================\n");
4724 }
4725}
4726
4727
4728// Helper class for visiting HeapObjects recursively.
4729class MarkRootVisitor: public ObjectVisitor {
4730 public:
4731 void VisitPointers(Object** start, Object** end) {
4732 // Visit all HeapObject pointers in [start, end)
4733 for (Object** p = start; p < end; p++) {
4734 if ((*p)->IsHeapObject())
4735 MarkRootObjectRecursively(p);
4736 }
4737 }
4738};
4739
4740
4741// Triggers a depth-first traversal of reachable objects from roots
4742// and finds a path to a specific heap object and prints it.
Leon Clarkee46be812010-01-19 14:06:41 +00004743void Heap::TracePathToObject(Object* target) {
4744 search_target = target;
Steve Blocka7e24c12009-10-30 11:49:00 +00004745 search_for_any_global = false;
4746
4747 MarkRootVisitor root_visitor;
Steve Blockd0582a62009-12-15 09:54:21 +00004748 IterateRoots(&root_visitor, VISIT_ONLY_STRONG);
Steve Blocka7e24c12009-10-30 11:49:00 +00004749}
4750
4751
4752// Triggers a depth-first traversal of reachable objects from roots
4753// and finds a path to any global object and prints it. Useful for
4754// determining the source for leaks of global objects.
4755void Heap::TracePathToGlobal() {
4756 search_target = NULL;
4757 search_for_any_global = true;
4758
4759 MarkRootVisitor root_visitor;
Steve Blockd0582a62009-12-15 09:54:21 +00004760 IterateRoots(&root_visitor, VISIT_ONLY_STRONG);
Steve Blocka7e24c12009-10-30 11:49:00 +00004761}
4762#endif
4763
4764
Leon Clarkef7060e22010-06-03 12:02:55 +01004765static int CountTotalHolesSize() {
4766 int holes_size = 0;
4767 OldSpaces spaces;
4768 for (OldSpace* space = spaces.next();
4769 space != NULL;
4770 space = spaces.next()) {
4771 holes_size += space->Waste() + space->AvailableFree();
4772 }
4773 return holes_size;
4774}
4775
4776
Steve Blocka7e24c12009-10-30 11:49:00 +00004777GCTracer::GCTracer()
4778 : start_time_(0.0),
Leon Clarkef7060e22010-06-03 12:02:55 +01004779 start_size_(0),
Steve Blocka7e24c12009-10-30 11:49:00 +00004780 gc_count_(0),
4781 full_gc_count_(0),
4782 is_compacting_(false),
Leon Clarkef7060e22010-06-03 12:02:55 +01004783 marked_count_(0),
4784 allocated_since_last_gc_(0),
4785 spent_in_mutator_(0),
4786 promoted_objects_size_(0) {
Steve Blocka7e24c12009-10-30 11:49:00 +00004787 // These two fields reflect the state of the previous full collection.
4788 // Set them before they are changed by the collector.
4789 previous_has_compacted_ = MarkCompactCollector::HasCompacted();
4790 previous_marked_count_ = MarkCompactCollector::previous_marked_count();
Leon Clarkef7060e22010-06-03 12:02:55 +01004791 if (!FLAG_trace_gc && !FLAG_print_cumulative_gc_stat) return;
Steve Blocka7e24c12009-10-30 11:49:00 +00004792 start_time_ = OS::TimeCurrentMillis();
Leon Clarkef7060e22010-06-03 12:02:55 +01004793 start_size_ = Heap::SizeOfObjects();
4794
4795 for (int i = 0; i < Scope::kNumberOfScopes; i++) {
4796 scopes_[i] = 0;
4797 }
4798
4799 in_free_list_or_wasted_before_gc_ = CountTotalHolesSize();
4800
4801 allocated_since_last_gc_ = Heap::SizeOfObjects() - alive_after_last_gc_;
4802
4803 if (last_gc_end_timestamp_ > 0) {
4804 spent_in_mutator_ = Max(start_time_ - last_gc_end_timestamp_, 0.0);
4805 }
Steve Blocka7e24c12009-10-30 11:49:00 +00004806}
4807
4808
4809GCTracer::~GCTracer() {
Steve Blocka7e24c12009-10-30 11:49:00 +00004810 // Printf ONE line iff flag is set.
Leon Clarkef7060e22010-06-03 12:02:55 +01004811 if (!FLAG_trace_gc && !FLAG_print_cumulative_gc_stat) return;
4812
4813 bool first_gc = (last_gc_end_timestamp_ == 0);
4814
4815 alive_after_last_gc_ = Heap::SizeOfObjects();
4816 last_gc_end_timestamp_ = OS::TimeCurrentMillis();
4817
4818 int time = static_cast<int>(last_gc_end_timestamp_ - start_time_);
4819
4820 // Update cumulative GC statistics if required.
4821 if (FLAG_print_cumulative_gc_stat) {
4822 max_gc_pause_ = Max(max_gc_pause_, time);
4823 max_alive_after_gc_ = Max(max_alive_after_gc_, alive_after_last_gc_);
4824 if (!first_gc) {
4825 min_in_mutator_ = Min(min_in_mutator_,
4826 static_cast<int>(spent_in_mutator_));
4827 }
4828 }
4829
4830 if (!FLAG_trace_gc_nvp) {
4831 int external_time = static_cast<int>(scopes_[Scope::EXTERNAL]);
4832
4833 PrintF("%s %.1f -> %.1f MB, ",
4834 CollectorString(),
4835 static_cast<double>(start_size_) / MB,
4836 SizeOfHeapObjects());
4837
4838 if (external_time > 0) PrintF("%d / ", external_time);
4839 PrintF("%d ms.\n", time);
4840 } else {
4841 PrintF("pause=%d ", time);
4842 PrintF("mutator=%d ",
4843 static_cast<int>(spent_in_mutator_));
4844
4845 PrintF("gc=");
4846 switch (collector_) {
4847 case SCAVENGER:
4848 PrintF("s");
4849 break;
4850 case MARK_COMPACTOR:
4851 PrintF(MarkCompactCollector::HasCompacted() ? "mc" : "ms");
4852 break;
4853 default:
4854 UNREACHABLE();
4855 }
4856 PrintF(" ");
4857
4858 PrintF("external=%d ", static_cast<int>(scopes_[Scope::EXTERNAL]));
4859 PrintF("mark=%d ", static_cast<int>(scopes_[Scope::MC_MARK]));
4860 PrintF("sweep=%d ", static_cast<int>(scopes_[Scope::MC_SWEEP]));
4861 PrintF("compact=%d ", static_cast<int>(scopes_[Scope::MC_COMPACT]));
Kristian Monsen50ef84f2010-07-29 15:18:00 +01004862 PrintF("flushcode=%d ", static_cast<int>(scopes_[Scope::MC_FLUSH_CODE]));
Leon Clarkef7060e22010-06-03 12:02:55 +01004863
4864 PrintF("total_size_before=%d ", start_size_);
4865 PrintF("total_size_after=%d ", Heap::SizeOfObjects());
4866 PrintF("holes_size_before=%d ", in_free_list_or_wasted_before_gc_);
4867 PrintF("holes_size_after=%d ", CountTotalHolesSize());
4868
4869 PrintF("allocated=%d ", allocated_since_last_gc_);
4870 PrintF("promoted=%d ", promoted_objects_size_);
4871
4872 PrintF("\n");
4873 }
Steve Blocka7e24c12009-10-30 11:49:00 +00004874
4875#if defined(ENABLE_LOGGING_AND_PROFILING)
4876 Heap::PrintShortHeapStatistics();
4877#endif
4878}
4879
4880
4881const char* GCTracer::CollectorString() {
4882 switch (collector_) {
4883 case SCAVENGER:
4884 return "Scavenge";
4885 case MARK_COMPACTOR:
4886 return MarkCompactCollector::HasCompacted() ? "Mark-compact"
4887 : "Mark-sweep";
4888 }
4889 return "Unknown GC";
4890}
4891
4892
4893int KeyedLookupCache::Hash(Map* map, String* name) {
4894 // Uses only lower 32 bits if pointers are larger.
4895 uintptr_t addr_hash =
Leon Clarkee46be812010-01-19 14:06:41 +00004896 static_cast<uint32_t>(reinterpret_cast<uintptr_t>(map)) >> kMapHashShift;
Andrei Popescu402d9372010-02-26 13:31:12 +00004897 return static_cast<uint32_t>((addr_hash ^ name->Hash()) & kCapacityMask);
Steve Blocka7e24c12009-10-30 11:49:00 +00004898}
4899
4900
4901int KeyedLookupCache::Lookup(Map* map, String* name) {
4902 int index = Hash(map, name);
4903 Key& key = keys_[index];
4904 if ((key.map == map) && key.name->Equals(name)) {
4905 return field_offsets_[index];
4906 }
4907 return -1;
4908}
4909
4910
4911void KeyedLookupCache::Update(Map* map, String* name, int field_offset) {
4912 String* symbol;
4913 if (Heap::LookupSymbolIfExists(name, &symbol)) {
4914 int index = Hash(map, symbol);
4915 Key& key = keys_[index];
4916 key.map = map;
4917 key.name = symbol;
4918 field_offsets_[index] = field_offset;
4919 }
4920}
4921
4922
4923void KeyedLookupCache::Clear() {
4924 for (int index = 0; index < kLength; index++) keys_[index].map = NULL;
4925}
4926
4927
4928KeyedLookupCache::Key KeyedLookupCache::keys_[KeyedLookupCache::kLength];
4929
4930
4931int KeyedLookupCache::field_offsets_[KeyedLookupCache::kLength];
4932
4933
4934void DescriptorLookupCache::Clear() {
4935 for (int index = 0; index < kLength; index++) keys_[index].array = NULL;
4936}
4937
4938
4939DescriptorLookupCache::Key
4940DescriptorLookupCache::keys_[DescriptorLookupCache::kLength];
4941
4942int DescriptorLookupCache::results_[DescriptorLookupCache::kLength];
4943
4944
4945#ifdef DEBUG
4946bool Heap::GarbageCollectionGreedyCheck() {
4947 ASSERT(FLAG_gc_greedy);
4948 if (Bootstrapper::IsActive()) return true;
4949 if (disallow_allocation_failure()) return true;
4950 return CollectGarbage(0, NEW_SPACE);
4951}
4952#endif
4953
4954
4955TranscendentalCache::TranscendentalCache(TranscendentalCache::Type t)
4956 : type_(t) {
4957 uint32_t in0 = 0xffffffffu; // Bit-pattern for a NaN that isn't
4958 uint32_t in1 = 0xffffffffu; // generated by the FPU.
4959 for (int i = 0; i < kCacheSize; i++) {
4960 elements_[i].in[0] = in0;
4961 elements_[i].in[1] = in1;
4962 elements_[i].output = NULL;
4963 }
4964}
4965
4966
4967TranscendentalCache* TranscendentalCache::caches_[kNumberOfCaches];
4968
4969
4970void TranscendentalCache::Clear() {
4971 for (int i = 0; i < kNumberOfCaches; i++) {
4972 if (caches_[i] != NULL) {
4973 delete caches_[i];
4974 caches_[i] = NULL;
4975 }
4976 }
4977}
4978
4979
Leon Clarkee46be812010-01-19 14:06:41 +00004980void ExternalStringTable::CleanUp() {
4981 int last = 0;
4982 for (int i = 0; i < new_space_strings_.length(); ++i) {
4983 if (new_space_strings_[i] == Heap::raw_unchecked_null_value()) continue;
4984 if (Heap::InNewSpace(new_space_strings_[i])) {
4985 new_space_strings_[last++] = new_space_strings_[i];
4986 } else {
4987 old_space_strings_.Add(new_space_strings_[i]);
4988 }
4989 }
4990 new_space_strings_.Rewind(last);
4991 last = 0;
4992 for (int i = 0; i < old_space_strings_.length(); ++i) {
4993 if (old_space_strings_[i] == Heap::raw_unchecked_null_value()) continue;
4994 ASSERT(!Heap::InNewSpace(old_space_strings_[i]));
4995 old_space_strings_[last++] = old_space_strings_[i];
4996 }
4997 old_space_strings_.Rewind(last);
4998 Verify();
4999}
5000
5001
5002void ExternalStringTable::TearDown() {
5003 new_space_strings_.Free();
5004 old_space_strings_.Free();
5005}
5006
5007
5008List<Object*> ExternalStringTable::new_space_strings_;
5009List<Object*> ExternalStringTable::old_space_strings_;
5010
Steve Blocka7e24c12009-10-30 11:49:00 +00005011} } // namespace v8::internal