blob: 32d751a392924fd088e67d4dd2edc25b3c4815e7 [file] [log] [blame]
Kristian Monsen0d5e1162010-09-30 15:31:59 +01001// Copyright 2010 the V8 project authors. All rights reserved.
Steve Blocka7e24c12009-10-30 11:49:00 +00002// Redistribution and use in source and binary forms, with or without
3// modification, are permitted provided that the following conditions are
4// met:
5//
6// * Redistributions of source code must retain the above copyright
7// notice, this list of conditions and the following disclaimer.
8// * Redistributions in binary form must reproduce the above
9// copyright notice, this list of conditions and the following
10// disclaimer in the documentation and/or other materials provided
11// with the distribution.
12// * Neither the name of Google Inc. nor the names of its
13// contributors may be used to endorse or promote products derived
14// from this software without specific prior written permission.
15//
16// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
28#include "v8.h"
29
30#include "accessors.h"
31#include "api.h"
32#include "bootstrapper.h"
33#include "codegen-inl.h"
34#include "compilation-cache.h"
35#include "debug.h"
36#include "heap-profiler.h"
37#include "global-handles.h"
38#include "mark-compact.h"
39#include "natives.h"
Iain Merrick75681382010-08-19 15:07:18 +010040#include "objects-visiting.h"
Ben Murdochb0fe1622011-05-05 13:52:32 +010041#include "runtime-profiler.h"
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -080042#include "scanner-base.h"
Steve Blocka7e24c12009-10-30 11:49:00 +000043#include "scopeinfo.h"
Steve Block3ce2e202009-11-05 08:53:23 +000044#include "snapshot.h"
Steve Blocka7e24c12009-10-30 11:49:00 +000045#include "v8threads.h"
Ben Murdochb0fe1622011-05-05 13:52:32 +010046#include "vm-state-inl.h"
Steve Block6ded16b2010-05-10 14:33:55 +010047#if V8_TARGET_ARCH_ARM && !V8_INTERPRETED_REGEXP
Steve Blocka7e24c12009-10-30 11:49:00 +000048#include "regexp-macro-assembler.h"
Steve Blockd0582a62009-12-15 09:54:21 +000049#include "arm/regexp-macro-assembler-arm.h"
Steve Blocka7e24c12009-10-30 11:49:00 +000050#endif
51
Steve Block6ded16b2010-05-10 14:33:55 +010052
Steve Blocka7e24c12009-10-30 11:49:00 +000053namespace v8 {
54namespace internal {
55
56
57String* Heap::hidden_symbol_;
58Object* Heap::roots_[Heap::kRootListLength];
Ben Murdochf87a2032010-10-22 12:50:53 +010059Object* Heap::global_contexts_list_;
Steve Blocka7e24c12009-10-30 11:49:00 +000060
John Reck59135872010-11-02 12:39:01 -070061
Steve Blocka7e24c12009-10-30 11:49:00 +000062NewSpace Heap::new_space_;
63OldSpace* Heap::old_pointer_space_ = NULL;
64OldSpace* Heap::old_data_space_ = NULL;
65OldSpace* Heap::code_space_ = NULL;
66MapSpace* Heap::map_space_ = NULL;
67CellSpace* Heap::cell_space_ = NULL;
68LargeObjectSpace* Heap::lo_space_ = NULL;
69
John Reck59135872010-11-02 12:39:01 -070070static const intptr_t kMinimumPromotionLimit = 2 * MB;
71static const intptr_t kMinimumAllocationLimit = 8 * MB;
72
Ben Murdochf87a2032010-10-22 12:50:53 +010073intptr_t Heap::old_gen_promotion_limit_ = kMinimumPromotionLimit;
74intptr_t Heap::old_gen_allocation_limit_ = kMinimumAllocationLimit;
Steve Blocka7e24c12009-10-30 11:49:00 +000075
76int Heap::old_gen_exhausted_ = false;
77
78int Heap::amount_of_external_allocated_memory_ = 0;
79int Heap::amount_of_external_allocated_memory_at_last_global_gc_ = 0;
80
81// semispace_size_ should be a power of 2 and old_generation_size_ should be
82// a multiple of Page::kPageSize.
83#if defined(ANDROID)
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -080084static const int default_max_semispace_size_ = 2*MB;
Ben Murdochf87a2032010-10-22 12:50:53 +010085intptr_t Heap::max_old_generation_size_ = 192*MB;
Steve Blocka7e24c12009-10-30 11:49:00 +000086int Heap::initial_semispace_size_ = 128*KB;
Ben Murdochf87a2032010-10-22 12:50:53 +010087intptr_t Heap::code_range_size_ = 0;
Russell Brenner90bac252010-11-18 13:33:46 -080088intptr_t Heap::max_executable_size_ = max_old_generation_size_;
Steve Blocka7e24c12009-10-30 11:49:00 +000089#elif defined(V8_TARGET_ARCH_X64)
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -080090static const int default_max_semispace_size_ = 16*MB;
Ben Murdochf87a2032010-10-22 12:50:53 +010091intptr_t Heap::max_old_generation_size_ = 1*GB;
Steve Blocka7e24c12009-10-30 11:49:00 +000092int Heap::initial_semispace_size_ = 1*MB;
Ben Murdochf87a2032010-10-22 12:50:53 +010093intptr_t Heap::code_range_size_ = 512*MB;
Russell Brenner90bac252010-11-18 13:33:46 -080094intptr_t Heap::max_executable_size_ = 256*MB;
Steve Blocka7e24c12009-10-30 11:49:00 +000095#else
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -080096static const int default_max_semispace_size_ = 8*MB;
Ben Murdochf87a2032010-10-22 12:50:53 +010097intptr_t Heap::max_old_generation_size_ = 512*MB;
Steve Blocka7e24c12009-10-30 11:49:00 +000098int Heap::initial_semispace_size_ = 512*KB;
Ben Murdochf87a2032010-10-22 12:50:53 +010099intptr_t Heap::code_range_size_ = 0;
Russell Brenner90bac252010-11-18 13:33:46 -0800100intptr_t Heap::max_executable_size_ = 128*MB;
Steve Blocka7e24c12009-10-30 11:49:00 +0000101#endif
102
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -0800103// Allow build-time customization of the max semispace size. Building
104// V8 with snapshots and a non-default max semispace size is much
105// easier if you can define it as part of the build environment.
106#if defined(V8_MAX_SEMISPACE_SIZE)
107int Heap::max_semispace_size_ = V8_MAX_SEMISPACE_SIZE;
108#else
109int Heap::max_semispace_size_ = default_max_semispace_size_;
110#endif
111
Steve Block3ce2e202009-11-05 08:53:23 +0000112// The snapshot semispace size will be the default semispace size if
113// snapshotting is used and will be the requested semispace size as
114// set up by ConfigureHeap otherwise.
115int Heap::reserved_semispace_size_ = Heap::max_semispace_size_;
116
Steve Block6ded16b2010-05-10 14:33:55 +0100117List<Heap::GCPrologueCallbackPair> Heap::gc_prologue_callbacks_;
118List<Heap::GCEpilogueCallbackPair> Heap::gc_epilogue_callbacks_;
119
Steve Blocka7e24c12009-10-30 11:49:00 +0000120GCCallback Heap::global_gc_prologue_callback_ = NULL;
121GCCallback Heap::global_gc_epilogue_callback_ = NULL;
Kristian Monsen80d68ea2010-09-08 11:05:35 +0100122HeapObjectCallback Heap::gc_safe_size_of_old_object_ = NULL;
Steve Blocka7e24c12009-10-30 11:49:00 +0000123
124// Variables set based on semispace_size_ and old_generation_size_ in
125// ConfigureHeap.
Steve Block3ce2e202009-11-05 08:53:23 +0000126
127// Will be 4 * reserved_semispace_size_ to ensure that young
128// generation can be aligned to its size.
Steve Blocka7e24c12009-10-30 11:49:00 +0000129int Heap::survived_since_last_expansion_ = 0;
Ben Murdochf87a2032010-10-22 12:50:53 +0100130intptr_t Heap::external_allocation_limit_ = 0;
Steve Blocka7e24c12009-10-30 11:49:00 +0000131
132Heap::HeapState Heap::gc_state_ = NOT_IN_GC;
133
134int Heap::mc_count_ = 0;
Leon Clarkef7060e22010-06-03 12:02:55 +0100135int Heap::ms_count_ = 0;
Steve Blocka7e24c12009-10-30 11:49:00 +0000136int Heap::gc_count_ = 0;
137
Leon Clarkef7060e22010-06-03 12:02:55 +0100138GCTracer* Heap::tracer_ = NULL;
139
Steve Block6ded16b2010-05-10 14:33:55 +0100140int Heap::unflattened_strings_length_ = 0;
141
Steve Blocka7e24c12009-10-30 11:49:00 +0000142int Heap::always_allocate_scope_depth_ = 0;
Steve Blockd0582a62009-12-15 09:54:21 +0000143int Heap::linear_allocation_scope_depth_ = 0;
Steve Block6ded16b2010-05-10 14:33:55 +0100144int Heap::contexts_disposed_ = 0;
Steve Blocka7e24c12009-10-30 11:49:00 +0000145
Steve Block8defd9f2010-07-08 12:39:36 +0100146int Heap::young_survivors_after_last_gc_ = 0;
147int Heap::high_survival_rate_period_length_ = 0;
148double Heap::survival_rate_ = 0;
149Heap::SurvivalRateTrend Heap::previous_survival_rate_trend_ = Heap::STABLE;
150Heap::SurvivalRateTrend Heap::survival_rate_trend_ = Heap::STABLE;
151
Steve Blocka7e24c12009-10-30 11:49:00 +0000152#ifdef DEBUG
153bool Heap::allocation_allowed_ = true;
154
155int Heap::allocation_timeout_ = 0;
156bool Heap::disallow_allocation_failure_ = false;
157#endif // DEBUG
158
Ben Murdochf87a2032010-10-22 12:50:53 +0100159intptr_t GCTracer::alive_after_last_gc_ = 0;
Leon Clarkef7060e22010-06-03 12:02:55 +0100160double GCTracer::last_gc_end_timestamp_ = 0.0;
161int GCTracer::max_gc_pause_ = 0;
Ben Murdochf87a2032010-10-22 12:50:53 +0100162intptr_t GCTracer::max_alive_after_gc_ = 0;
Leon Clarkef7060e22010-06-03 12:02:55 +0100163int GCTracer::min_in_mutator_ = kMaxInt;
Steve Blocka7e24c12009-10-30 11:49:00 +0000164
Ben Murdochf87a2032010-10-22 12:50:53 +0100165intptr_t Heap::Capacity() {
Steve Blocka7e24c12009-10-30 11:49:00 +0000166 if (!HasBeenSetup()) return 0;
167
168 return new_space_.Capacity() +
169 old_pointer_space_->Capacity() +
170 old_data_space_->Capacity() +
171 code_space_->Capacity() +
172 map_space_->Capacity() +
173 cell_space_->Capacity();
174}
175
176
Ben Murdochf87a2032010-10-22 12:50:53 +0100177intptr_t Heap::CommittedMemory() {
Steve Block3ce2e202009-11-05 08:53:23 +0000178 if (!HasBeenSetup()) return 0;
179
180 return new_space_.CommittedMemory() +
181 old_pointer_space_->CommittedMemory() +
182 old_data_space_->CommittedMemory() +
183 code_space_->CommittedMemory() +
184 map_space_->CommittedMemory() +
185 cell_space_->CommittedMemory() +
186 lo_space_->Size();
187}
188
Russell Brenner90bac252010-11-18 13:33:46 -0800189intptr_t Heap::CommittedMemoryExecutable() {
190 if (!HasBeenSetup()) return 0;
191
192 return MemoryAllocator::SizeExecutable();
193}
194
Steve Block3ce2e202009-11-05 08:53:23 +0000195
Ben Murdochf87a2032010-10-22 12:50:53 +0100196intptr_t Heap::Available() {
Steve Blocka7e24c12009-10-30 11:49:00 +0000197 if (!HasBeenSetup()) return 0;
198
199 return new_space_.Available() +
200 old_pointer_space_->Available() +
201 old_data_space_->Available() +
202 code_space_->Available() +
203 map_space_->Available() +
204 cell_space_->Available();
205}
206
207
208bool Heap::HasBeenSetup() {
209 return old_pointer_space_ != NULL &&
210 old_data_space_ != NULL &&
211 code_space_ != NULL &&
212 map_space_ != NULL &&
213 cell_space_ != NULL &&
214 lo_space_ != NULL;
215}
216
217
Kristian Monsen80d68ea2010-09-08 11:05:35 +0100218int Heap::GcSafeSizeOfOldObject(HeapObject* object) {
219 ASSERT(!Heap::InNewSpace(object)); // Code only works for old objects.
220 ASSERT(!MarkCompactCollector::are_map_pointers_encoded());
221 MapWord map_word = object->map_word();
222 map_word.ClearMark();
223 map_word.ClearOverflow();
224 return object->SizeFromMap(map_word.ToMap());
225}
226
227
228int Heap::GcSafeSizeOfOldObjectWithEncodedMap(HeapObject* object) {
229 ASSERT(!Heap::InNewSpace(object)); // Code only works for old objects.
230 ASSERT(MarkCompactCollector::are_map_pointers_encoded());
231 uint32_t marker = Memory::uint32_at(object->address());
232 if (marker == MarkCompactCollector::kSingleFreeEncoding) {
233 return kIntSize;
234 } else if (marker == MarkCompactCollector::kMultiFreeEncoding) {
235 return Memory::int_at(object->address() + kIntSize);
236 } else {
237 MapWord map_word = object->map_word();
238 Address map_address = map_word.DecodeMapAddress(Heap::map_space());
239 Map* map = reinterpret_cast<Map*>(HeapObject::FromAddress(map_address));
240 return object->SizeFromMap(map);
241 }
242}
243
244
Steve Blocka7e24c12009-10-30 11:49:00 +0000245GarbageCollector Heap::SelectGarbageCollector(AllocationSpace space) {
246 // Is global GC requested?
247 if (space != NEW_SPACE || FLAG_gc_global) {
248 Counters::gc_compactor_caused_by_request.Increment();
249 return MARK_COMPACTOR;
250 }
251
252 // Is enough data promoted to justify a global GC?
253 if (OldGenerationPromotionLimitReached()) {
254 Counters::gc_compactor_caused_by_promoted_data.Increment();
255 return MARK_COMPACTOR;
256 }
257
258 // Have allocation in OLD and LO failed?
259 if (old_gen_exhausted_) {
260 Counters::gc_compactor_caused_by_oldspace_exhaustion.Increment();
261 return MARK_COMPACTOR;
262 }
263
264 // Is there enough space left in OLD to guarantee that a scavenge can
265 // succeed?
266 //
267 // Note that MemoryAllocator->MaxAvailable() undercounts the memory available
268 // for object promotion. It counts only the bytes that the memory
269 // allocator has not yet allocated from the OS and assigned to any space,
270 // and does not count available bytes already in the old space or code
271 // space. Undercounting is safe---we may get an unrequested full GC when
272 // a scavenge would have succeeded.
273 if (MemoryAllocator::MaxAvailable() <= new_space_.Size()) {
274 Counters::gc_compactor_caused_by_oldspace_exhaustion.Increment();
275 return MARK_COMPACTOR;
276 }
277
278 // Default
279 return SCAVENGER;
280}
281
282
283// TODO(1238405): Combine the infrastructure for --heap-stats and
284// --log-gc to avoid the complicated preprocessor and flag testing.
285#if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
286void Heap::ReportStatisticsBeforeGC() {
287 // Heap::ReportHeapStatistics will also log NewSpace statistics when
288 // compiled with ENABLE_LOGGING_AND_PROFILING and --log-gc is set. The
289 // following logic is used to avoid double logging.
290#if defined(DEBUG) && defined(ENABLE_LOGGING_AND_PROFILING)
291 if (FLAG_heap_stats || FLAG_log_gc) new_space_.CollectStatistics();
292 if (FLAG_heap_stats) {
293 ReportHeapStatistics("Before GC");
294 } else if (FLAG_log_gc) {
295 new_space_.ReportStatistics();
296 }
297 if (FLAG_heap_stats || FLAG_log_gc) new_space_.ClearHistograms();
298#elif defined(DEBUG)
299 if (FLAG_heap_stats) {
300 new_space_.CollectStatistics();
301 ReportHeapStatistics("Before GC");
302 new_space_.ClearHistograms();
303 }
304#elif defined(ENABLE_LOGGING_AND_PROFILING)
305 if (FLAG_log_gc) {
306 new_space_.CollectStatistics();
307 new_space_.ReportStatistics();
308 new_space_.ClearHistograms();
309 }
310#endif
311}
312
313
314#if defined(ENABLE_LOGGING_AND_PROFILING)
315void Heap::PrintShortHeapStatistics() {
316 if (!FLAG_trace_gc_verbose) return;
Ben Murdochf87a2032010-10-22 12:50:53 +0100317 PrintF("Memory allocator, used: %8" V8_PTR_PREFIX "d"
318 ", available: %8" V8_PTR_PREFIX "d\n",
Steve Block3ce2e202009-11-05 08:53:23 +0000319 MemoryAllocator::Size(),
320 MemoryAllocator::Available());
Ben Murdochf87a2032010-10-22 12:50:53 +0100321 PrintF("New space, used: %8" V8_PTR_PREFIX "d"
322 ", available: %8" V8_PTR_PREFIX "d\n",
Steve Block3ce2e202009-11-05 08:53:23 +0000323 Heap::new_space_.Size(),
324 new_space_.Available());
Ben Murdochf87a2032010-10-22 12:50:53 +0100325 PrintF("Old pointers, used: %8" V8_PTR_PREFIX "d"
326 ", available: %8" V8_PTR_PREFIX "d"
327 ", waste: %8" V8_PTR_PREFIX "d\n",
Steve Block3ce2e202009-11-05 08:53:23 +0000328 old_pointer_space_->Size(),
329 old_pointer_space_->Available(),
330 old_pointer_space_->Waste());
Ben Murdochf87a2032010-10-22 12:50:53 +0100331 PrintF("Old data space, used: %8" V8_PTR_PREFIX "d"
332 ", available: %8" V8_PTR_PREFIX "d"
333 ", waste: %8" V8_PTR_PREFIX "d\n",
Steve Block3ce2e202009-11-05 08:53:23 +0000334 old_data_space_->Size(),
335 old_data_space_->Available(),
336 old_data_space_->Waste());
Ben Murdochf87a2032010-10-22 12:50:53 +0100337 PrintF("Code space, used: %8" V8_PTR_PREFIX "d"
338 ", available: %8" V8_PTR_PREFIX "d"
339 ", waste: %8" V8_PTR_PREFIX "d\n",
Steve Block3ce2e202009-11-05 08:53:23 +0000340 code_space_->Size(),
341 code_space_->Available(),
342 code_space_->Waste());
Ben Murdochf87a2032010-10-22 12:50:53 +0100343 PrintF("Map space, used: %8" V8_PTR_PREFIX "d"
344 ", available: %8" V8_PTR_PREFIX "d"
345 ", waste: %8" V8_PTR_PREFIX "d\n",
Steve Block3ce2e202009-11-05 08:53:23 +0000346 map_space_->Size(),
347 map_space_->Available(),
348 map_space_->Waste());
Ben Murdochf87a2032010-10-22 12:50:53 +0100349 PrintF("Cell space, used: %8" V8_PTR_PREFIX "d"
350 ", available: %8" V8_PTR_PREFIX "d"
351 ", waste: %8" V8_PTR_PREFIX "d\n",
Steve Block3ce2e202009-11-05 08:53:23 +0000352 cell_space_->Size(),
353 cell_space_->Available(),
354 cell_space_->Waste());
Ben Murdochf87a2032010-10-22 12:50:53 +0100355 PrintF("Large object space, used: %8" V8_PTR_PREFIX "d"
356 ", available: %8" V8_PTR_PREFIX "d\n",
Steve Block3ce2e202009-11-05 08:53:23 +0000357 lo_space_->Size(),
358 lo_space_->Available());
Steve Blocka7e24c12009-10-30 11:49:00 +0000359}
360#endif
361
362
363// TODO(1238405): Combine the infrastructure for --heap-stats and
364// --log-gc to avoid the complicated preprocessor and flag testing.
365void Heap::ReportStatisticsAfterGC() {
366 // Similar to the before GC, we use some complicated logic to ensure that
367 // NewSpace statistics are logged exactly once when --log-gc is turned on.
368#if defined(DEBUG) && defined(ENABLE_LOGGING_AND_PROFILING)
369 if (FLAG_heap_stats) {
370 new_space_.CollectStatistics();
371 ReportHeapStatistics("After GC");
372 } else if (FLAG_log_gc) {
373 new_space_.ReportStatistics();
374 }
375#elif defined(DEBUG)
376 if (FLAG_heap_stats) ReportHeapStatistics("After GC");
377#elif defined(ENABLE_LOGGING_AND_PROFILING)
378 if (FLAG_log_gc) new_space_.ReportStatistics();
379#endif
380}
381#endif // defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
382
383
384void Heap::GarbageCollectionPrologue() {
385 TranscendentalCache::Clear();
Steve Block6ded16b2010-05-10 14:33:55 +0100386 ClearJSFunctionResultCaches();
Steve Blocka7e24c12009-10-30 11:49:00 +0000387 gc_count_++;
Steve Block6ded16b2010-05-10 14:33:55 +0100388 unflattened_strings_length_ = 0;
Steve Blocka7e24c12009-10-30 11:49:00 +0000389#ifdef DEBUG
390 ASSERT(allocation_allowed_ && gc_state_ == NOT_IN_GC);
391 allow_allocation(false);
392
393 if (FLAG_verify_heap) {
394 Verify();
395 }
396
397 if (FLAG_gc_verbose) Print();
Steve Blocka7e24c12009-10-30 11:49:00 +0000398#endif
399
400#if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
401 ReportStatisticsBeforeGC();
402#endif
403}
404
Ben Murdochf87a2032010-10-22 12:50:53 +0100405intptr_t Heap::SizeOfObjects() {
406 intptr_t total = 0;
Steve Blocka7e24c12009-10-30 11:49:00 +0000407 AllSpaces spaces;
Leon Clarked91b9f72010-01-27 17:25:45 +0000408 for (Space* space = spaces.next(); space != NULL; space = spaces.next()) {
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -0800409 total += space->SizeOfObjects();
Steve Blocka7e24c12009-10-30 11:49:00 +0000410 }
411 return total;
412}
413
414void Heap::GarbageCollectionEpilogue() {
415#ifdef DEBUG
416 allow_allocation(true);
417 ZapFromSpace();
418
419 if (FLAG_verify_heap) {
420 Verify();
421 }
422
423 if (FLAG_print_global_handles) GlobalHandles::Print();
424 if (FLAG_print_handles) PrintHandles();
425 if (FLAG_gc_verbose) Print();
426 if (FLAG_code_stats) ReportCodeStatistics("After GC");
427#endif
428
Ben Murdochf87a2032010-10-22 12:50:53 +0100429 Counters::alive_after_last_gc.Set(static_cast<int>(SizeOfObjects()));
Steve Blocka7e24c12009-10-30 11:49:00 +0000430
431 Counters::symbol_table_capacity.Set(symbol_table()->Capacity());
432 Counters::number_of_symbols.Set(symbol_table()->NumberOfElements());
433#if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
434 ReportStatisticsAfterGC();
435#endif
436#ifdef ENABLE_DEBUGGER_SUPPORT
437 Debug::AfterGarbageCollection();
438#endif
439}
440
441
John Reck59135872010-11-02 12:39:01 -0700442void Heap::CollectAllGarbage(bool force_compaction) {
Steve Blocka7e24c12009-10-30 11:49:00 +0000443 // Since we are ignoring the return value, the exact choice of space does
444 // not matter, so long as we do not specify NEW_SPACE, which would not
445 // cause a full GC.
446 MarkCompactCollector::SetForceCompaction(force_compaction);
John Reck59135872010-11-02 12:39:01 -0700447 CollectGarbage(OLD_POINTER_SPACE);
Steve Blocka7e24c12009-10-30 11:49:00 +0000448 MarkCompactCollector::SetForceCompaction(false);
449}
450
451
Teng-Hui Zhu3e5fa292010-11-09 16:16:48 -0800452void Heap::CollectAllAvailableGarbage() {
453 // Since we are ignoring the return value, the exact choice of space does
454 // not matter, so long as we do not specify NEW_SPACE, which would not
455 // cause a full GC.
456 MarkCompactCollector::SetForceCompaction(true);
457
458 // Major GC would invoke weak handle callbacks on weakly reachable
459 // handles, but won't collect weakly reachable objects until next
460 // major GC. Therefore if we collect aggressively and weak handle callback
461 // has been invoked, we rerun major GC to release objects which become
462 // garbage.
463 // Note: as weak callbacks can execute arbitrary code, we cannot
464 // hope that eventually there will be no weak callbacks invocations.
465 // Therefore stop recollecting after several attempts.
466 const int kMaxNumberOfAttempts = 7;
467 for (int attempt = 0; attempt < kMaxNumberOfAttempts; attempt++) {
468 if (!CollectGarbage(OLD_POINTER_SPACE, MARK_COMPACTOR)) {
469 break;
470 }
471 }
472 MarkCompactCollector::SetForceCompaction(false);
473}
474
475
476bool Heap::CollectGarbage(AllocationSpace space, GarbageCollector collector) {
Steve Blocka7e24c12009-10-30 11:49:00 +0000477 // The VM is in the GC state until exiting this function.
478 VMState state(GC);
479
480#ifdef DEBUG
481 // Reset the allocation timeout to the GC interval, but make sure to
482 // allow at least a few allocations after a collection. The reason
483 // for this is that we have a lot of allocation sequences and we
484 // assume that a garbage collection will allow the subsequent
485 // allocation attempts to go through.
486 allocation_timeout_ = Max(6, FLAG_gc_interval);
487#endif
488
Teng-Hui Zhu3e5fa292010-11-09 16:16:48 -0800489 bool next_gc_likely_to_collect_more = false;
490
Steve Blocka7e24c12009-10-30 11:49:00 +0000491 { GCTracer tracer;
492 GarbageCollectionPrologue();
493 // The GC count was incremented in the prologue. Tell the tracer about
494 // it.
495 tracer.set_gc_count(gc_count_);
496
Steve Blocka7e24c12009-10-30 11:49:00 +0000497 // Tell the tracer which collector we've selected.
498 tracer.set_collector(collector);
499
500 HistogramTimer* rate = (collector == SCAVENGER)
501 ? &Counters::gc_scavenger
502 : &Counters::gc_compactor;
503 rate->Start();
Teng-Hui Zhu3e5fa292010-11-09 16:16:48 -0800504 next_gc_likely_to_collect_more =
505 PerformGarbageCollection(collector, &tracer);
Steve Blocka7e24c12009-10-30 11:49:00 +0000506 rate->Stop();
507
508 GarbageCollectionEpilogue();
509 }
510
511
512#ifdef ENABLE_LOGGING_AND_PROFILING
513 if (FLAG_log_gc) HeapProfiler::WriteSample();
Ben Murdochf87a2032010-10-22 12:50:53 +0100514 if (CpuProfiler::is_profiling()) CpuProfiler::ProcessMovedFunctions();
Steve Blocka7e24c12009-10-30 11:49:00 +0000515#endif
Teng-Hui Zhu3e5fa292010-11-09 16:16:48 -0800516
517 return next_gc_likely_to_collect_more;
Steve Blocka7e24c12009-10-30 11:49:00 +0000518}
519
520
521void Heap::PerformScavenge() {
522 GCTracer tracer;
John Reck59135872010-11-02 12:39:01 -0700523 PerformGarbageCollection(SCAVENGER, &tracer);
Steve Blocka7e24c12009-10-30 11:49:00 +0000524}
525
526
527#ifdef DEBUG
528// Helper class for verifying the symbol table.
529class SymbolTableVerifier : public ObjectVisitor {
530 public:
531 SymbolTableVerifier() { }
532 void VisitPointers(Object** start, Object** end) {
533 // Visit all HeapObject pointers in [start, end).
534 for (Object** p = start; p < end; p++) {
535 if ((*p)->IsHeapObject()) {
536 // Check that the symbol is actually a symbol.
537 ASSERT((*p)->IsNull() || (*p)->IsUndefined() || (*p)->IsSymbol());
538 }
539 }
540 }
541};
542#endif // DEBUG
543
544
545static void VerifySymbolTable() {
546#ifdef DEBUG
547 SymbolTableVerifier verifier;
548 Heap::symbol_table()->IterateElements(&verifier);
549#endif // DEBUG
550}
551
552
Leon Clarkee46be812010-01-19 14:06:41 +0000553void Heap::ReserveSpace(
554 int new_space_size,
555 int pointer_space_size,
556 int data_space_size,
557 int code_space_size,
558 int map_space_size,
559 int cell_space_size,
560 int large_object_size) {
561 NewSpace* new_space = Heap::new_space();
562 PagedSpace* old_pointer_space = Heap::old_pointer_space();
563 PagedSpace* old_data_space = Heap::old_data_space();
564 PagedSpace* code_space = Heap::code_space();
565 PagedSpace* map_space = Heap::map_space();
566 PagedSpace* cell_space = Heap::cell_space();
567 LargeObjectSpace* lo_space = Heap::lo_space();
568 bool gc_performed = true;
569 while (gc_performed) {
570 gc_performed = false;
571 if (!new_space->ReserveSpace(new_space_size)) {
Ben Murdochf87a2032010-10-22 12:50:53 +0100572 Heap::CollectGarbage(NEW_SPACE);
Leon Clarkee46be812010-01-19 14:06:41 +0000573 gc_performed = true;
574 }
575 if (!old_pointer_space->ReserveSpace(pointer_space_size)) {
Ben Murdochf87a2032010-10-22 12:50:53 +0100576 Heap::CollectGarbage(OLD_POINTER_SPACE);
Leon Clarkee46be812010-01-19 14:06:41 +0000577 gc_performed = true;
578 }
579 if (!(old_data_space->ReserveSpace(data_space_size))) {
Ben Murdochf87a2032010-10-22 12:50:53 +0100580 Heap::CollectGarbage(OLD_DATA_SPACE);
Leon Clarkee46be812010-01-19 14:06:41 +0000581 gc_performed = true;
582 }
583 if (!(code_space->ReserveSpace(code_space_size))) {
Ben Murdochf87a2032010-10-22 12:50:53 +0100584 Heap::CollectGarbage(CODE_SPACE);
Leon Clarkee46be812010-01-19 14:06:41 +0000585 gc_performed = true;
586 }
587 if (!(map_space->ReserveSpace(map_space_size))) {
Ben Murdochf87a2032010-10-22 12:50:53 +0100588 Heap::CollectGarbage(MAP_SPACE);
Leon Clarkee46be812010-01-19 14:06:41 +0000589 gc_performed = true;
590 }
591 if (!(cell_space->ReserveSpace(cell_space_size))) {
Ben Murdochf87a2032010-10-22 12:50:53 +0100592 Heap::CollectGarbage(CELL_SPACE);
Leon Clarkee46be812010-01-19 14:06:41 +0000593 gc_performed = true;
594 }
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100595 // We add a slack-factor of 2 in order to have space for a series of
596 // large-object allocations that are only just larger than the page size.
Leon Clarkee46be812010-01-19 14:06:41 +0000597 large_object_size *= 2;
598 // The ReserveSpace method on the large object space checks how much
599 // we can expand the old generation. This includes expansion caused by
600 // allocation in the other spaces.
601 large_object_size += cell_space_size + map_space_size + code_space_size +
602 data_space_size + pointer_space_size;
603 if (!(lo_space->ReserveSpace(large_object_size))) {
Ben Murdochf87a2032010-10-22 12:50:53 +0100604 Heap::CollectGarbage(LO_SPACE);
Leon Clarkee46be812010-01-19 14:06:41 +0000605 gc_performed = true;
606 }
607 }
608}
609
610
Steve Blocka7e24c12009-10-30 11:49:00 +0000611void Heap::EnsureFromSpaceIsCommitted() {
612 if (new_space_.CommitFromSpaceIfNeeded()) return;
613
614 // Committing memory to from space failed.
615 // Try shrinking and try again.
Kristian Monsen80d68ea2010-09-08 11:05:35 +0100616 PagedSpaces spaces;
617 for (PagedSpace* space = spaces.next();
618 space != NULL;
619 space = spaces.next()) {
620 space->RelinkPageListInChunkOrder(true);
621 }
622
Steve Blocka7e24c12009-10-30 11:49:00 +0000623 Shrink();
624 if (new_space_.CommitFromSpaceIfNeeded()) return;
625
626 // Committing memory to from space failed again.
627 // Memory is exhausted and we will die.
628 V8::FatalProcessOutOfMemory("Committing semi space failed.");
629}
630
631
Teng-Hui Zhu3e5fa292010-11-09 16:16:48 -0800632void Heap::ClearJSFunctionResultCaches() {
633 if (Bootstrapper::IsActive()) return;
Steve Block6ded16b2010-05-10 14:33:55 +0100634
Teng-Hui Zhu3e5fa292010-11-09 16:16:48 -0800635 Object* context = global_contexts_list_;
636 while (!context->IsUndefined()) {
637 // Get the caches for this context:
Steve Block6ded16b2010-05-10 14:33:55 +0100638 FixedArray* caches =
Teng-Hui Zhu3e5fa292010-11-09 16:16:48 -0800639 Context::cast(context)->jsfunction_result_caches();
640 // Clear the caches:
Steve Block6ded16b2010-05-10 14:33:55 +0100641 int length = caches->length();
642 for (int i = 0; i < length; i++) {
643 JSFunctionResultCache::cast(caches->get(i))->Clear();
644 }
Teng-Hui Zhu3e5fa292010-11-09 16:16:48 -0800645 // Get the next context:
646 context = Context::cast(context)->get(Context::NEXT_CONTEXT_LINK);
Steve Block6ded16b2010-05-10 14:33:55 +0100647 }
Steve Block6ded16b2010-05-10 14:33:55 +0100648}
649
650
Kristian Monsen80d68ea2010-09-08 11:05:35 +0100651void Heap::ClearNormalizedMapCaches() {
652 if (Bootstrapper::IsActive()) return;
Ben Murdochf87a2032010-10-22 12:50:53 +0100653
654 Object* context = global_contexts_list_;
655 while (!context->IsUndefined()) {
656 Context::cast(context)->normalized_map_cache()->Clear();
657 context = Context::cast(context)->get(Context::NEXT_CONTEXT_LINK);
658 }
Kristian Monsen80d68ea2010-09-08 11:05:35 +0100659}
660
661
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100662#ifdef DEBUG
663
664enum PageWatermarkValidity {
665 ALL_VALID,
666 ALL_INVALID
667};
668
669static void VerifyPageWatermarkValidity(PagedSpace* space,
670 PageWatermarkValidity validity) {
671 PageIterator it(space, PageIterator::PAGES_IN_USE);
672 bool expected_value = (validity == ALL_VALID);
673 while (it.has_next()) {
674 Page* page = it.next();
675 ASSERT(page->IsWatermarkValid() == expected_value);
676 }
677}
678#endif
679
Steve Block8defd9f2010-07-08 12:39:36 +0100680void Heap::UpdateSurvivalRateTrend(int start_new_space_size) {
681 double survival_rate =
682 (static_cast<double>(young_survivors_after_last_gc_) * 100) /
683 start_new_space_size;
684
685 if (survival_rate > kYoungSurvivalRateThreshold) {
686 high_survival_rate_period_length_++;
687 } else {
688 high_survival_rate_period_length_ = 0;
689 }
690
691 double survival_rate_diff = survival_rate_ - survival_rate;
692
693 if (survival_rate_diff > kYoungSurvivalRateAllowedDeviation) {
694 set_survival_rate_trend(DECREASING);
695 } else if (survival_rate_diff < -kYoungSurvivalRateAllowedDeviation) {
696 set_survival_rate_trend(INCREASING);
697 } else {
698 set_survival_rate_trend(STABLE);
699 }
700
701 survival_rate_ = survival_rate;
702}
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100703
Teng-Hui Zhu3e5fa292010-11-09 16:16:48 -0800704bool Heap::PerformGarbageCollection(GarbageCollector collector,
John Reck59135872010-11-02 12:39:01 -0700705 GCTracer* tracer) {
Teng-Hui Zhu3e5fa292010-11-09 16:16:48 -0800706 bool next_gc_likely_to_collect_more = false;
707
Ben Murdochf87a2032010-10-22 12:50:53 +0100708 if (collector != SCAVENGER) {
709 PROFILE(CodeMovingGCEvent());
710 }
711
Steve Blocka7e24c12009-10-30 11:49:00 +0000712 VerifySymbolTable();
713 if (collector == MARK_COMPACTOR && global_gc_prologue_callback_) {
714 ASSERT(!allocation_allowed_);
Leon Clarkef7060e22010-06-03 12:02:55 +0100715 GCTracer::Scope scope(tracer, GCTracer::Scope::EXTERNAL);
Steve Blocka7e24c12009-10-30 11:49:00 +0000716 global_gc_prologue_callback_();
717 }
Steve Block6ded16b2010-05-10 14:33:55 +0100718
719 GCType gc_type =
720 collector == MARK_COMPACTOR ? kGCTypeMarkSweepCompact : kGCTypeScavenge;
721
722 for (int i = 0; i < gc_prologue_callbacks_.length(); ++i) {
723 if (gc_type & gc_prologue_callbacks_[i].gc_type) {
724 gc_prologue_callbacks_[i].callback(gc_type, kNoGCCallbackFlags);
725 }
726 }
727
Steve Blocka7e24c12009-10-30 11:49:00 +0000728 EnsureFromSpaceIsCommitted();
Steve Block6ded16b2010-05-10 14:33:55 +0100729
Ben Murdochf87a2032010-10-22 12:50:53 +0100730 int start_new_space_size = Heap::new_space()->SizeAsInt();
Steve Block8defd9f2010-07-08 12:39:36 +0100731
Steve Blocka7e24c12009-10-30 11:49:00 +0000732 if (collector == MARK_COMPACTOR) {
Steve Block6ded16b2010-05-10 14:33:55 +0100733 // Perform mark-sweep with optional compaction.
Steve Blocka7e24c12009-10-30 11:49:00 +0000734 MarkCompact(tracer);
735
Steve Block8defd9f2010-07-08 12:39:36 +0100736 bool high_survival_rate_during_scavenges = IsHighSurvivalRate() &&
737 IsStableOrIncreasingSurvivalTrend();
738
739 UpdateSurvivalRateTrend(start_new_space_size);
740
John Reck59135872010-11-02 12:39:01 -0700741 intptr_t old_gen_size = PromotedSpaceSize();
742 old_gen_promotion_limit_ =
743 old_gen_size + Max(kMinimumPromotionLimit, old_gen_size / 3);
744 old_gen_allocation_limit_ =
745 old_gen_size + Max(kMinimumAllocationLimit, old_gen_size / 2);
Steve Block8defd9f2010-07-08 12:39:36 +0100746
John Reck59135872010-11-02 12:39:01 -0700747 if (high_survival_rate_during_scavenges &&
748 IsStableOrIncreasingSurvivalTrend()) {
749 // Stable high survival rates of young objects both during partial and
750 // full collection indicate that mutator is either building or modifying
751 // a structure with a long lifetime.
752 // In this case we aggressively raise old generation memory limits to
753 // postpone subsequent mark-sweep collection and thus trade memory
754 // space for the mutation speed.
755 old_gen_promotion_limit_ *= 2;
756 old_gen_allocation_limit_ *= 2;
Steve Block8defd9f2010-07-08 12:39:36 +0100757 }
758
John Reck59135872010-11-02 12:39:01 -0700759 old_gen_exhausted_ = false;
Steve Block6ded16b2010-05-10 14:33:55 +0100760 } else {
Leon Clarkef7060e22010-06-03 12:02:55 +0100761 tracer_ = tracer;
Steve Block6ded16b2010-05-10 14:33:55 +0100762 Scavenge();
Leon Clarkef7060e22010-06-03 12:02:55 +0100763 tracer_ = NULL;
Steve Block8defd9f2010-07-08 12:39:36 +0100764
765 UpdateSurvivalRateTrend(start_new_space_size);
Steve Blocka7e24c12009-10-30 11:49:00 +0000766 }
Steve Blocka7e24c12009-10-30 11:49:00 +0000767
768 Counters::objs_since_last_young.Set(0);
769
John Reck59135872010-11-02 12:39:01 -0700770 if (collector == MARK_COMPACTOR) {
771 DisableAssertNoAllocation allow_allocation;
772 GCTracer::Scope scope(tracer, GCTracer::Scope::EXTERNAL);
Teng-Hui Zhu3e5fa292010-11-09 16:16:48 -0800773 next_gc_likely_to_collect_more =
774 GlobalHandles::PostGarbageCollectionProcessing();
John Reck59135872010-11-02 12:39:01 -0700775 }
776
Steve Block3ce2e202009-11-05 08:53:23 +0000777 // Update relocatables.
778 Relocatable::PostGarbageCollectionProcessing();
Steve Blocka7e24c12009-10-30 11:49:00 +0000779
780 if (collector == MARK_COMPACTOR) {
781 // Register the amount of external allocated memory.
782 amount_of_external_allocated_memory_at_last_global_gc_ =
783 amount_of_external_allocated_memory_;
784 }
785
Steve Block6ded16b2010-05-10 14:33:55 +0100786 GCCallbackFlags callback_flags = tracer->is_compacting()
787 ? kGCCallbackFlagCompacted
788 : kNoGCCallbackFlags;
789 for (int i = 0; i < gc_epilogue_callbacks_.length(); ++i) {
790 if (gc_type & gc_epilogue_callbacks_[i].gc_type) {
791 gc_epilogue_callbacks_[i].callback(gc_type, callback_flags);
792 }
793 }
794
Steve Blocka7e24c12009-10-30 11:49:00 +0000795 if (collector == MARK_COMPACTOR && global_gc_epilogue_callback_) {
796 ASSERT(!allocation_allowed_);
Leon Clarkef7060e22010-06-03 12:02:55 +0100797 GCTracer::Scope scope(tracer, GCTracer::Scope::EXTERNAL);
Steve Blocka7e24c12009-10-30 11:49:00 +0000798 global_gc_epilogue_callback_();
799 }
800 VerifySymbolTable();
Teng-Hui Zhu3e5fa292010-11-09 16:16:48 -0800801
802 return next_gc_likely_to_collect_more;
Steve Blocka7e24c12009-10-30 11:49:00 +0000803}
804
805
Steve Blocka7e24c12009-10-30 11:49:00 +0000806void Heap::MarkCompact(GCTracer* tracer) {
807 gc_state_ = MARK_COMPACT;
Steve Blocka7e24c12009-10-30 11:49:00 +0000808 LOG(ResourceEvent("markcompact", "begin"));
809
810 MarkCompactCollector::Prepare(tracer);
811
812 bool is_compacting = MarkCompactCollector::IsCompacting();
813
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100814 if (is_compacting) {
815 mc_count_++;
816 } else {
817 ms_count_++;
818 }
819 tracer->set_full_gc_count(mc_count_ + ms_count_);
820
Steve Blocka7e24c12009-10-30 11:49:00 +0000821 MarkCompactPrologue(is_compacting);
822
823 MarkCompactCollector::CollectGarbage();
824
Steve Blocka7e24c12009-10-30 11:49:00 +0000825 LOG(ResourceEvent("markcompact", "end"));
826
827 gc_state_ = NOT_IN_GC;
828
829 Shrink();
830
831 Counters::objs_since_last_full.Set(0);
Steve Block6ded16b2010-05-10 14:33:55 +0100832
833 contexts_disposed_ = 0;
Steve Blocka7e24c12009-10-30 11:49:00 +0000834}
835
836
837void Heap::MarkCompactPrologue(bool is_compacting) {
838 // At any old GC clear the keyed lookup cache to enable collection of unused
839 // maps.
840 KeyedLookupCache::Clear();
841 ContextSlotCache::Clear();
842 DescriptorLookupCache::Clear();
843
Ben Murdochb0fe1622011-05-05 13:52:32 +0100844 RuntimeProfiler::MarkCompactPrologue(is_compacting);
845
Steve Blocka7e24c12009-10-30 11:49:00 +0000846 CompilationCache::MarkCompactPrologue();
847
Kristian Monsen25f61362010-05-21 11:50:48 +0100848 CompletelyClearInstanceofCache();
849
Leon Clarkee46be812010-01-19 14:06:41 +0000850 if (is_compacting) FlushNumberStringCache();
Steve Blocka7e24c12009-10-30 11:49:00 +0000851
Kristian Monsen80d68ea2010-09-08 11:05:35 +0100852 ClearNormalizedMapCaches();
Steve Blocka7e24c12009-10-30 11:49:00 +0000853}
854
855
856Object* Heap::FindCodeObject(Address a) {
John Reck59135872010-11-02 12:39:01 -0700857 Object* obj = NULL; // Initialization to please compiler.
858 { MaybeObject* maybe_obj = code_space_->FindObject(a);
859 if (!maybe_obj->ToObject(&obj)) {
860 obj = lo_space_->FindObject(a)->ToObjectUnchecked();
861 }
Steve Blocka7e24c12009-10-30 11:49:00 +0000862 }
Steve Blocka7e24c12009-10-30 11:49:00 +0000863 return obj;
864}
865
866
867// Helper class for copying HeapObjects
868class ScavengeVisitor: public ObjectVisitor {
869 public:
870
871 void VisitPointer(Object** p) { ScavengePointer(p); }
872
873 void VisitPointers(Object** start, Object** end) {
874 // Copy all HeapObject pointers in [start, end)
875 for (Object** p = start; p < end; p++) ScavengePointer(p);
876 }
877
878 private:
879 void ScavengePointer(Object** p) {
880 Object* object = *p;
881 if (!Heap::InNewSpace(object)) return;
882 Heap::ScavengeObject(reinterpret_cast<HeapObject**>(p),
883 reinterpret_cast<HeapObject*>(object));
884 }
885};
886
887
Ben Murdoch3bec4d22010-07-22 14:51:16 +0100888// A queue of objects promoted during scavenge. Each object is accompanied
889// by it's size to avoid dereferencing a map pointer for scanning.
Steve Blocka7e24c12009-10-30 11:49:00 +0000890class PromotionQueue {
891 public:
892 void Initialize(Address start_address) {
Ben Murdoch3bec4d22010-07-22 14:51:16 +0100893 front_ = rear_ = reinterpret_cast<intptr_t*>(start_address);
Steve Blocka7e24c12009-10-30 11:49:00 +0000894 }
895
896 bool is_empty() { return front_ <= rear_; }
897
Ben Murdoch3bec4d22010-07-22 14:51:16 +0100898 void insert(HeapObject* target, int size) {
899 *(--rear_) = reinterpret_cast<intptr_t>(target);
900 *(--rear_) = size;
Steve Blocka7e24c12009-10-30 11:49:00 +0000901 // Assert no overflow into live objects.
902 ASSERT(reinterpret_cast<Address>(rear_) >= Heap::new_space()->top());
903 }
904
Ben Murdoch3bec4d22010-07-22 14:51:16 +0100905 void remove(HeapObject** target, int* size) {
906 *target = reinterpret_cast<HeapObject*>(*(--front_));
907 *size = static_cast<int>(*(--front_));
Steve Blocka7e24c12009-10-30 11:49:00 +0000908 // Assert no underflow.
909 ASSERT(front_ >= rear_);
910 }
911
912 private:
913 // The front of the queue is higher in memory than the rear.
Ben Murdoch3bec4d22010-07-22 14:51:16 +0100914 intptr_t* front_;
915 intptr_t* rear_;
Steve Blocka7e24c12009-10-30 11:49:00 +0000916};
917
918
919// Shared state read by the scavenge collector and set by ScavengeObject.
920static PromotionQueue promotion_queue;
921
922
923#ifdef DEBUG
924// Visitor class to verify pointers in code or data space do not point into
925// new space.
926class VerifyNonPointerSpacePointersVisitor: public ObjectVisitor {
927 public:
928 void VisitPointers(Object** start, Object**end) {
929 for (Object** current = start; current < end; current++) {
930 if ((*current)->IsHeapObject()) {
931 ASSERT(!Heap::InNewSpace(HeapObject::cast(*current)));
932 }
933 }
934 }
935};
936
937
938static void VerifyNonPointerSpacePointers() {
939 // Verify that there are no pointers to new space in spaces where we
940 // do not expect them.
941 VerifyNonPointerSpacePointersVisitor v;
942 HeapObjectIterator code_it(Heap::code_space());
Leon Clarked91b9f72010-01-27 17:25:45 +0000943 for (HeapObject* object = code_it.next();
944 object != NULL; object = code_it.next())
Steve Blocka7e24c12009-10-30 11:49:00 +0000945 object->Iterate(&v);
Steve Blocka7e24c12009-10-30 11:49:00 +0000946
947 HeapObjectIterator data_it(Heap::old_data_space());
Leon Clarked91b9f72010-01-27 17:25:45 +0000948 for (HeapObject* object = data_it.next();
949 object != NULL; object = data_it.next())
950 object->Iterate(&v);
Steve Blocka7e24c12009-10-30 11:49:00 +0000951}
952#endif
953
954
Steve Block6ded16b2010-05-10 14:33:55 +0100955void Heap::CheckNewSpaceExpansionCriteria() {
956 if (new_space_.Capacity() < new_space_.MaximumCapacity() &&
957 survived_since_last_expansion_ > new_space_.Capacity()) {
958 // Grow the size of new space if there is room to grow and enough
959 // data has survived scavenge since the last expansion.
960 new_space_.Grow();
961 survived_since_last_expansion_ = 0;
962 }
963}
964
965
Steve Blocka7e24c12009-10-30 11:49:00 +0000966void Heap::Scavenge() {
967#ifdef DEBUG
968 if (FLAG_enable_slow_asserts) VerifyNonPointerSpacePointers();
969#endif
970
971 gc_state_ = SCAVENGE;
972
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100973 Page::FlipMeaningOfInvalidatedWatermarkFlag();
974#ifdef DEBUG
975 VerifyPageWatermarkValidity(old_pointer_space_, ALL_VALID);
976 VerifyPageWatermarkValidity(map_space_, ALL_VALID);
977#endif
978
979 // We do not update an allocation watermark of the top page during linear
980 // allocation to avoid overhead. So to maintain the watermark invariant
981 // we have to manually cache the watermark and mark the top page as having an
982 // invalid watermark. This guarantees that dirty regions iteration will use a
983 // correct watermark even if a linear allocation happens.
984 old_pointer_space_->FlushTopPageWatermark();
985 map_space_->FlushTopPageWatermark();
986
Steve Blocka7e24c12009-10-30 11:49:00 +0000987 // Implements Cheney's copying algorithm
988 LOG(ResourceEvent("scavenge", "begin"));
989
990 // Clear descriptor cache.
991 DescriptorLookupCache::Clear();
992
993 // Used for updating survived_since_last_expansion_ at function end.
Ben Murdochf87a2032010-10-22 12:50:53 +0100994 intptr_t survived_watermark = PromotedSpaceSize();
Steve Blocka7e24c12009-10-30 11:49:00 +0000995
Steve Block6ded16b2010-05-10 14:33:55 +0100996 CheckNewSpaceExpansionCriteria();
Steve Blocka7e24c12009-10-30 11:49:00 +0000997
998 // Flip the semispaces. After flipping, to space is empty, from space has
999 // live objects.
1000 new_space_.Flip();
1001 new_space_.ResetAllocationInfo();
1002
1003 // We need to sweep newly copied objects which can be either in the
1004 // to space or promoted to the old generation. For to-space
1005 // objects, we treat the bottom of the to space as a queue. Newly
1006 // copied and unswept objects lie between a 'front' mark and the
1007 // allocation pointer.
1008 //
1009 // Promoted objects can go into various old-generation spaces, and
1010 // can be allocated internally in the spaces (from the free list).
1011 // We treat the top of the to space as a queue of addresses of
1012 // promoted objects. The addresses of newly promoted and unswept
1013 // objects lie between a 'front' mark and a 'rear' mark that is
1014 // updated as a side effect of promoting an object.
1015 //
1016 // There is guaranteed to be enough room at the top of the to space
1017 // for the addresses of promoted objects: every object promoted
1018 // frees up its size in bytes from the top of the new space, and
1019 // objects are at least one pointer in size.
1020 Address new_space_front = new_space_.ToSpaceLow();
1021 promotion_queue.Initialize(new_space_.ToSpaceHigh());
1022
1023 ScavengeVisitor scavenge_visitor;
1024 // Copy roots.
Leon Clarkee46be812010-01-19 14:06:41 +00001025 IterateRoots(&scavenge_visitor, VISIT_ALL_IN_SCAVENGE);
Steve Blocka7e24c12009-10-30 11:49:00 +00001026
1027 // Copy objects reachable from the old generation. By definition,
1028 // there are no intergenerational pointers in code or data spaces.
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01001029 IterateDirtyRegions(old_pointer_space_,
1030 &IteratePointersInDirtyRegion,
1031 &ScavengePointer,
1032 WATERMARK_CAN_BE_INVALID);
1033
1034 IterateDirtyRegions(map_space_,
1035 &IteratePointersInDirtyMapsRegion,
1036 &ScavengePointer,
1037 WATERMARK_CAN_BE_INVALID);
1038
1039 lo_space_->IterateDirtyRegions(&ScavengePointer);
Steve Blocka7e24c12009-10-30 11:49:00 +00001040
1041 // Copy objects reachable from cells by scavenging cell values directly.
1042 HeapObjectIterator cell_iterator(cell_space_);
Leon Clarked91b9f72010-01-27 17:25:45 +00001043 for (HeapObject* cell = cell_iterator.next();
1044 cell != NULL; cell = cell_iterator.next()) {
Steve Blocka7e24c12009-10-30 11:49:00 +00001045 if (cell->IsJSGlobalPropertyCell()) {
1046 Address value_address =
1047 reinterpret_cast<Address>(cell) +
1048 (JSGlobalPropertyCell::kValueOffset - kHeapObjectTag);
1049 scavenge_visitor.VisitPointer(reinterpret_cast<Object**>(value_address));
1050 }
1051 }
1052
Ben Murdochf87a2032010-10-22 12:50:53 +01001053 // Scavenge object reachable from the global contexts list directly.
1054 scavenge_visitor.VisitPointer(BitCast<Object**>(&global_contexts_list_));
1055
Ben Murdochb0fe1622011-05-05 13:52:32 +01001056 // Scavenge objects reachable from the runtime-profiler sampler
1057 // window directly.
1058 Object** sampler_window_address = RuntimeProfiler::SamplerWindowAddress();
1059 int sampler_window_size = RuntimeProfiler::SamplerWindowSize();
1060 scavenge_visitor.VisitPointers(
1061 sampler_window_address,
1062 sampler_window_address + sampler_window_size);
1063
Leon Clarkee46be812010-01-19 14:06:41 +00001064 new_space_front = DoScavenge(&scavenge_visitor, new_space_front);
1065
Steve Block6ded16b2010-05-10 14:33:55 +01001066 UpdateNewSpaceReferencesInExternalStringTable(
1067 &UpdateNewSpaceReferenceInExternalStringTableEntry);
1068
Leon Clarkee46be812010-01-19 14:06:41 +00001069 ASSERT(new_space_front == new_space_.top());
1070
1071 // Set age mark.
1072 new_space_.set_age_mark(new_space_.top());
1073
1074 // Update how much has survived scavenge.
Ben Murdochf87a2032010-10-22 12:50:53 +01001075 IncrementYoungSurvivorsCounter(static_cast<int>(
1076 (PromotedSpaceSize() - survived_watermark) + new_space_.Size()));
Leon Clarkee46be812010-01-19 14:06:41 +00001077
1078 LOG(ResourceEvent("scavenge", "end"));
1079
1080 gc_state_ = NOT_IN_GC;
1081}
1082
1083
Steve Block6ded16b2010-05-10 14:33:55 +01001084String* Heap::UpdateNewSpaceReferenceInExternalStringTableEntry(Object** p) {
1085 MapWord first_word = HeapObject::cast(*p)->map_word();
1086
1087 if (!first_word.IsForwardingAddress()) {
1088 // Unreachable external string can be finalized.
1089 FinalizeExternalString(String::cast(*p));
1090 return NULL;
1091 }
1092
1093 // String is still reachable.
1094 return String::cast(first_word.ToForwardingAddress());
1095}
1096
1097
1098void Heap::UpdateNewSpaceReferencesInExternalStringTable(
1099 ExternalStringTableUpdaterCallback updater_func) {
Leon Clarkee46be812010-01-19 14:06:41 +00001100 ExternalStringTable::Verify();
1101
1102 if (ExternalStringTable::new_space_strings_.is_empty()) return;
1103
1104 Object** start = &ExternalStringTable::new_space_strings_[0];
1105 Object** end = start + ExternalStringTable::new_space_strings_.length();
1106 Object** last = start;
1107
1108 for (Object** p = start; p < end; ++p) {
1109 ASSERT(Heap::InFromSpace(*p));
Steve Block6ded16b2010-05-10 14:33:55 +01001110 String* target = updater_func(p);
Leon Clarkee46be812010-01-19 14:06:41 +00001111
Steve Block6ded16b2010-05-10 14:33:55 +01001112 if (target == NULL) continue;
Leon Clarkee46be812010-01-19 14:06:41 +00001113
Leon Clarkee46be812010-01-19 14:06:41 +00001114 ASSERT(target->IsExternalString());
1115
1116 if (Heap::InNewSpace(target)) {
1117 // String is still in new space. Update the table entry.
1118 *last = target;
1119 ++last;
1120 } else {
1121 // String got promoted. Move it to the old string list.
1122 ExternalStringTable::AddOldString(target);
1123 }
1124 }
1125
1126 ASSERT(last <= end);
1127 ExternalStringTable::ShrinkNewStrings(static_cast<int>(last - start));
1128}
1129
1130
Ben Murdochb0fe1622011-05-05 13:52:32 +01001131static Object* ProcessFunctionWeakReferences(Object* function,
1132 WeakObjectRetainer* retainer) {
1133 Object* head = Heap::undefined_value();
1134 JSFunction* tail = NULL;
1135 Object* candidate = function;
1136 while (!candidate->IsUndefined()) {
1137 // Check whether to keep the candidate in the list.
1138 JSFunction* candidate_function = reinterpret_cast<JSFunction*>(candidate);
1139 Object* retain = retainer->RetainAs(candidate);
1140 if (retain != NULL) {
1141 if (head->IsUndefined()) {
1142 // First element in the list.
1143 head = candidate_function;
1144 } else {
1145 // Subsequent elements in the list.
1146 ASSERT(tail != NULL);
1147 tail->set_next_function_link(candidate_function);
1148 }
1149 // Retained function is new tail.
1150 tail = candidate_function;
1151 }
1152 // Move to next element in the list.
1153 candidate = candidate_function->next_function_link();
1154 }
1155
1156 // Terminate the list if there is one or more elements.
1157 if (tail != NULL) {
1158 tail->set_next_function_link(Heap::undefined_value());
1159 }
1160
1161 return head;
1162}
1163
1164
Ben Murdochf87a2032010-10-22 12:50:53 +01001165void Heap::ProcessWeakReferences(WeakObjectRetainer* retainer) {
1166 Object* head = undefined_value();
1167 Context* tail = NULL;
1168 Object* candidate = global_contexts_list_;
1169 while (!candidate->IsUndefined()) {
1170 // Check whether to keep the candidate in the list.
1171 Context* candidate_context = reinterpret_cast<Context*>(candidate);
1172 Object* retain = retainer->RetainAs(candidate);
1173 if (retain != NULL) {
1174 if (head->IsUndefined()) {
1175 // First element in the list.
1176 head = candidate_context;
1177 } else {
1178 // Subsequent elements in the list.
1179 ASSERT(tail != NULL);
1180 tail->set_unchecked(Context::NEXT_CONTEXT_LINK,
1181 candidate_context,
1182 UPDATE_WRITE_BARRIER);
1183 }
1184 // Retained context is new tail.
1185 tail = candidate_context;
Ben Murdochb0fe1622011-05-05 13:52:32 +01001186
1187 // Process the weak list of optimized functions for the context.
1188 Object* function_list_head =
1189 ProcessFunctionWeakReferences(
1190 candidate_context->get(Context::OPTIMIZED_FUNCTIONS_LIST),
1191 retainer);
1192 candidate_context->set_unchecked(Context::OPTIMIZED_FUNCTIONS_LIST,
1193 function_list_head,
1194 UPDATE_WRITE_BARRIER);
Ben Murdochf87a2032010-10-22 12:50:53 +01001195 }
1196 // Move to next element in the list.
1197 candidate = candidate_context->get(Context::NEXT_CONTEXT_LINK);
1198 }
1199
1200 // Terminate the list if there is one or more elements.
1201 if (tail != NULL) {
1202 tail->set_unchecked(Context::NEXT_CONTEXT_LINK,
1203 Heap::undefined_value(),
1204 UPDATE_WRITE_BARRIER);
1205 }
1206
1207 // Update the head of the list of contexts.
1208 Heap::global_contexts_list_ = head;
1209}
1210
1211
Iain Merrick75681382010-08-19 15:07:18 +01001212class NewSpaceScavenger : public StaticNewSpaceVisitor<NewSpaceScavenger> {
1213 public:
1214 static inline void VisitPointer(Object** p) {
1215 Object* object = *p;
1216 if (!Heap::InNewSpace(object)) return;
1217 Heap::ScavengeObject(reinterpret_cast<HeapObject**>(p),
1218 reinterpret_cast<HeapObject*>(object));
1219 }
1220};
1221
1222
Leon Clarkee46be812010-01-19 14:06:41 +00001223Address Heap::DoScavenge(ObjectVisitor* scavenge_visitor,
1224 Address new_space_front) {
Steve Blocka7e24c12009-10-30 11:49:00 +00001225 do {
1226 ASSERT(new_space_front <= new_space_.top());
1227
1228 // The addresses new_space_front and new_space_.top() define a
1229 // queue of unprocessed copied objects. Process them until the
1230 // queue is empty.
1231 while (new_space_front < new_space_.top()) {
1232 HeapObject* object = HeapObject::FromAddress(new_space_front);
Iain Merrick75681382010-08-19 15:07:18 +01001233 new_space_front += NewSpaceScavenger::IterateBody(object->map(), object);
Steve Blocka7e24c12009-10-30 11:49:00 +00001234 }
1235
1236 // Promote and process all the to-be-promoted objects.
1237 while (!promotion_queue.is_empty()) {
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001238 HeapObject* target;
1239 int size;
1240 promotion_queue.remove(&target, &size);
Steve Blocka7e24c12009-10-30 11:49:00 +00001241
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001242 // Promoted object might be already partially visited
1243 // during dirty regions iteration. Thus we search specificly
1244 // for pointers to from semispace instead of looking for pointers
1245 // to new space.
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01001246 ASSERT(!target->IsMap());
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001247 IterateAndMarkPointersToFromSpace(target->address(),
1248 target->address() + size,
1249 &ScavengePointer);
Steve Blocka7e24c12009-10-30 11:49:00 +00001250 }
1251
1252 // Take another spin if there are now unswept objects in new space
1253 // (there are currently no more unswept promoted objects).
1254 } while (new_space_front < new_space_.top());
1255
Leon Clarkee46be812010-01-19 14:06:41 +00001256 return new_space_front;
Steve Blocka7e24c12009-10-30 11:49:00 +00001257}
1258
1259
Iain Merrick75681382010-08-19 15:07:18 +01001260class ScavengingVisitor : public StaticVisitorBase {
1261 public:
1262 static void Initialize() {
1263 table_.Register(kVisitSeqAsciiString, &EvacuateSeqAsciiString);
1264 table_.Register(kVisitSeqTwoByteString, &EvacuateSeqTwoByteString);
1265 table_.Register(kVisitShortcutCandidate, &EvacuateShortcutCandidate);
1266 table_.Register(kVisitByteArray, &EvacuateByteArray);
1267 table_.Register(kVisitFixedArray, &EvacuateFixedArray);
Ben Murdochf87a2032010-10-22 12:50:53 +01001268 table_.Register(kVisitGlobalContext,
1269 &ObjectEvacuationStrategy<POINTER_OBJECT>::
1270 VisitSpecialized<Context::kSize>);
Iain Merrick75681382010-08-19 15:07:18 +01001271
1272 typedef ObjectEvacuationStrategy<POINTER_OBJECT> PointerObject;
1273
1274 table_.Register(kVisitConsString,
1275 &ObjectEvacuationStrategy<POINTER_OBJECT>::
1276 VisitSpecialized<ConsString::kSize>);
1277
1278 table_.Register(kVisitSharedFunctionInfo,
1279 &ObjectEvacuationStrategy<POINTER_OBJECT>::
1280 VisitSpecialized<SharedFunctionInfo::kSize>);
1281
1282 table_.Register(kVisitJSFunction,
1283 &ObjectEvacuationStrategy<POINTER_OBJECT>::
1284 VisitSpecialized<JSFunction::kSize>);
1285
1286 table_.RegisterSpecializations<ObjectEvacuationStrategy<DATA_OBJECT>,
1287 kVisitDataObject,
1288 kVisitDataObjectGeneric>();
1289
1290 table_.RegisterSpecializations<ObjectEvacuationStrategy<POINTER_OBJECT>,
1291 kVisitJSObject,
1292 kVisitJSObjectGeneric>();
1293
1294 table_.RegisterSpecializations<ObjectEvacuationStrategy<POINTER_OBJECT>,
1295 kVisitStruct,
1296 kVisitStructGeneric>();
1297 }
1298
1299
1300 static inline void Scavenge(Map* map, HeapObject** slot, HeapObject* obj) {
1301 table_.GetVisitor(map)(map, slot, obj);
1302 }
1303
1304
1305 private:
1306 enum ObjectContents { DATA_OBJECT, POINTER_OBJECT };
1307 enum SizeRestriction { SMALL, UNKNOWN_SIZE };
1308
Steve Blocka7e24c12009-10-30 11:49:00 +00001309#if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
Iain Merrick75681382010-08-19 15:07:18 +01001310 static void RecordCopiedObject(HeapObject* obj) {
1311 bool should_record = false;
Steve Blocka7e24c12009-10-30 11:49:00 +00001312#ifdef DEBUG
Iain Merrick75681382010-08-19 15:07:18 +01001313 should_record = FLAG_heap_stats;
Steve Blocka7e24c12009-10-30 11:49:00 +00001314#endif
1315#ifdef ENABLE_LOGGING_AND_PROFILING
Iain Merrick75681382010-08-19 15:07:18 +01001316 should_record = should_record || FLAG_log_gc;
Steve Blocka7e24c12009-10-30 11:49:00 +00001317#endif
Iain Merrick75681382010-08-19 15:07:18 +01001318 if (should_record) {
1319 if (Heap::new_space()->Contains(obj)) {
1320 Heap::new_space()->RecordAllocation(obj);
1321 } else {
1322 Heap::new_space()->RecordPromotion(obj);
1323 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001324 }
1325 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001326#endif // defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
1327
Iain Merrick75681382010-08-19 15:07:18 +01001328 // Helper function used by CopyObject to copy a source object to an
1329 // allocated target object and update the forwarding pointer in the source
1330 // object. Returns the target object.
1331 INLINE(static HeapObject* MigrateObject(HeapObject* source,
1332 HeapObject* target,
1333 int size)) {
1334 // Copy the content of source to target.
1335 Heap::CopyBlock(target->address(), source->address(), size);
Steve Blocka7e24c12009-10-30 11:49:00 +00001336
Iain Merrick75681382010-08-19 15:07:18 +01001337 // Set the forwarding address.
1338 source->set_map_word(MapWord::FromForwardingAddress(target));
Steve Blocka7e24c12009-10-30 11:49:00 +00001339
1340#if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
Iain Merrick75681382010-08-19 15:07:18 +01001341 // Update NewSpace stats if necessary.
1342 RecordCopiedObject(target);
Steve Blocka7e24c12009-10-30 11:49:00 +00001343#endif
Iain Merrick75681382010-08-19 15:07:18 +01001344 HEAP_PROFILE(ObjectMoveEvent(source->address(), target->address()));
Kristian Monsen0d5e1162010-09-30 15:31:59 +01001345#if defined(ENABLE_LOGGING_AND_PROFILING)
1346 if (Logger::is_logging() || CpuProfiler::is_profiling()) {
1347 if (target->IsJSFunction()) {
1348 PROFILE(FunctionMoveEvent(source->address(), target->address()));
Ben Murdochf87a2032010-10-22 12:50:53 +01001349 PROFILE(FunctionCreateEventFromMove(JSFunction::cast(target)));
Kristian Monsen0d5e1162010-09-30 15:31:59 +01001350 }
1351 }
1352#endif
Iain Merrick75681382010-08-19 15:07:18 +01001353 return target;
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001354 }
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001355
1356
Iain Merrick75681382010-08-19 15:07:18 +01001357 template<ObjectContents object_contents, SizeRestriction size_restriction>
1358 static inline void EvacuateObject(Map* map,
1359 HeapObject** slot,
1360 HeapObject* object,
1361 int object_size) {
1362 ASSERT((size_restriction != SMALL) ||
1363 (object_size <= Page::kMaxHeapObjectSize));
1364 ASSERT(object->Size() == object_size);
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001365
Iain Merrick75681382010-08-19 15:07:18 +01001366 if (Heap::ShouldBePromoted(object->address(), object_size)) {
John Reck59135872010-11-02 12:39:01 -07001367 MaybeObject* maybe_result;
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001368
Iain Merrick75681382010-08-19 15:07:18 +01001369 if ((size_restriction != SMALL) &&
1370 (object_size > Page::kMaxHeapObjectSize)) {
John Reck59135872010-11-02 12:39:01 -07001371 maybe_result = Heap::lo_space()->AllocateRawFixedArray(object_size);
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001372 } else {
Iain Merrick75681382010-08-19 15:07:18 +01001373 if (object_contents == DATA_OBJECT) {
John Reck59135872010-11-02 12:39:01 -07001374 maybe_result = Heap::old_data_space()->AllocateRaw(object_size);
Iain Merrick75681382010-08-19 15:07:18 +01001375 } else {
John Reck59135872010-11-02 12:39:01 -07001376 maybe_result = Heap::old_pointer_space()->AllocateRaw(object_size);
Iain Merrick75681382010-08-19 15:07:18 +01001377 }
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001378 }
1379
John Reck59135872010-11-02 12:39:01 -07001380 Object* result = NULL; // Initialization to please compiler.
1381 if (maybe_result->ToObject(&result)) {
Iain Merrick75681382010-08-19 15:07:18 +01001382 HeapObject* target = HeapObject::cast(result);
1383 *slot = MigrateObject(object, target, object_size);
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001384
Iain Merrick75681382010-08-19 15:07:18 +01001385 if (object_contents == POINTER_OBJECT) {
1386 promotion_queue.insert(target, object_size);
1387 }
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001388
Iain Merrick75681382010-08-19 15:07:18 +01001389 Heap::tracer()->increment_promoted_objects_size(object_size);
1390 return;
1391 }
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001392 }
John Reck59135872010-11-02 12:39:01 -07001393 Object* result =
1394 Heap::new_space()->AllocateRaw(object_size)->ToObjectUnchecked();
Iain Merrick75681382010-08-19 15:07:18 +01001395 *slot = MigrateObject(object, HeapObject::cast(result), object_size);
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001396 return;
1397 }
1398
Iain Merrick75681382010-08-19 15:07:18 +01001399
1400 static inline void EvacuateFixedArray(Map* map,
1401 HeapObject** slot,
1402 HeapObject* object) {
1403 int object_size = FixedArray::BodyDescriptor::SizeOf(map, object);
1404 EvacuateObject<POINTER_OBJECT, UNKNOWN_SIZE>(map,
1405 slot,
1406 object,
1407 object_size);
1408 }
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001409
1410
Iain Merrick75681382010-08-19 15:07:18 +01001411 static inline void EvacuateByteArray(Map* map,
1412 HeapObject** slot,
1413 HeapObject* object) {
1414 int object_size = reinterpret_cast<ByteArray*>(object)->ByteArraySize();
1415 EvacuateObject<DATA_OBJECT, UNKNOWN_SIZE>(map, slot, object, object_size);
1416 }
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001417
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001418
Iain Merrick75681382010-08-19 15:07:18 +01001419 static inline void EvacuateSeqAsciiString(Map* map,
1420 HeapObject** slot,
1421 HeapObject* object) {
1422 int object_size = SeqAsciiString::cast(object)->
1423 SeqAsciiStringSize(map->instance_type());
1424 EvacuateObject<DATA_OBJECT, UNKNOWN_SIZE>(map, slot, object, object_size);
1425 }
1426
1427
1428 static inline void EvacuateSeqTwoByteString(Map* map,
1429 HeapObject** slot,
1430 HeapObject* object) {
1431 int object_size = SeqTwoByteString::cast(object)->
1432 SeqTwoByteStringSize(map->instance_type());
1433 EvacuateObject<DATA_OBJECT, UNKNOWN_SIZE>(map, slot, object, object_size);
1434 }
1435
1436
1437 static inline bool IsShortcutCandidate(int type) {
1438 return ((type & kShortcutTypeMask) == kShortcutTypeTag);
1439 }
1440
1441 static inline void EvacuateShortcutCandidate(Map* map,
1442 HeapObject** slot,
1443 HeapObject* object) {
1444 ASSERT(IsShortcutCandidate(map->instance_type()));
1445
1446 if (ConsString::cast(object)->unchecked_second() == Heap::empty_string()) {
1447 HeapObject* first =
1448 HeapObject::cast(ConsString::cast(object)->unchecked_first());
1449
1450 *slot = first;
1451
1452 if (!Heap::InNewSpace(first)) {
1453 object->set_map_word(MapWord::FromForwardingAddress(first));
1454 return;
1455 }
1456
1457 MapWord first_word = first->map_word();
1458 if (first_word.IsForwardingAddress()) {
1459 HeapObject* target = first_word.ToForwardingAddress();
1460
1461 *slot = target;
1462 object->set_map_word(MapWord::FromForwardingAddress(target));
1463 return;
1464 }
1465
1466 Scavenge(first->map(), slot, first);
1467 object->set_map_word(MapWord::FromForwardingAddress(*slot));
1468 return;
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001469 }
Iain Merrick75681382010-08-19 15:07:18 +01001470
1471 int object_size = ConsString::kSize;
1472 EvacuateObject<POINTER_OBJECT, SMALL>(map, slot, object, object_size);
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001473 }
1474
Iain Merrick75681382010-08-19 15:07:18 +01001475 template<ObjectContents object_contents>
1476 class ObjectEvacuationStrategy {
1477 public:
1478 template<int object_size>
1479 static inline void VisitSpecialized(Map* map,
1480 HeapObject** slot,
1481 HeapObject* object) {
1482 EvacuateObject<object_contents, SMALL>(map, slot, object, object_size);
1483 }
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001484
Iain Merrick75681382010-08-19 15:07:18 +01001485 static inline void Visit(Map* map,
1486 HeapObject** slot,
1487 HeapObject* object) {
1488 int object_size = map->instance_size();
1489 EvacuateObject<object_contents, SMALL>(map, slot, object, object_size);
1490 }
1491 };
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001492
Iain Merrick75681382010-08-19 15:07:18 +01001493 typedef void (*Callback)(Map* map, HeapObject** slot, HeapObject* object);
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001494
Iain Merrick75681382010-08-19 15:07:18 +01001495 static VisitorDispatchTable<Callback> table_;
1496};
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001497
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001498
Iain Merrick75681382010-08-19 15:07:18 +01001499VisitorDispatchTable<ScavengingVisitor::Callback> ScavengingVisitor::table_;
Steve Blocka7e24c12009-10-30 11:49:00 +00001500
1501
1502void Heap::ScavengeObjectSlow(HeapObject** p, HeapObject* object) {
1503 ASSERT(InFromSpace(object));
1504 MapWord first_word = object->map_word();
1505 ASSERT(!first_word.IsForwardingAddress());
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001506 Map* map = first_word.ToMap();
Iain Merrick75681382010-08-19 15:07:18 +01001507 ScavengingVisitor::Scavenge(map, p, object);
Steve Blocka7e24c12009-10-30 11:49:00 +00001508}
1509
1510
1511void Heap::ScavengePointer(HeapObject** p) {
1512 ScavengeObject(p, *p);
1513}
1514
1515
John Reck59135872010-11-02 12:39:01 -07001516MaybeObject* Heap::AllocatePartialMap(InstanceType instance_type,
1517 int instance_size) {
1518 Object* result;
1519 { MaybeObject* maybe_result = AllocateRawMap();
1520 if (!maybe_result->ToObject(&result)) return maybe_result;
1521 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001522
1523 // Map::cast cannot be used due to uninitialized map field.
1524 reinterpret_cast<Map*>(result)->set_map(raw_unchecked_meta_map());
1525 reinterpret_cast<Map*>(result)->set_instance_type(instance_type);
1526 reinterpret_cast<Map*>(result)->set_instance_size(instance_size);
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001527 reinterpret_cast<Map*>(result)->
Iain Merrick75681382010-08-19 15:07:18 +01001528 set_visitor_id(
1529 StaticVisitorBase::GetVisitorId(instance_type, instance_size));
Steve Blocka7e24c12009-10-30 11:49:00 +00001530 reinterpret_cast<Map*>(result)->set_inobject_properties(0);
Leon Clarke4515c472010-02-03 11:58:03 +00001531 reinterpret_cast<Map*>(result)->set_pre_allocated_property_fields(0);
Steve Blocka7e24c12009-10-30 11:49:00 +00001532 reinterpret_cast<Map*>(result)->set_unused_property_fields(0);
Leon Clarke4515c472010-02-03 11:58:03 +00001533 reinterpret_cast<Map*>(result)->set_bit_field(0);
1534 reinterpret_cast<Map*>(result)->set_bit_field2(0);
Steve Blocka7e24c12009-10-30 11:49:00 +00001535 return result;
1536}
1537
1538
John Reck59135872010-11-02 12:39:01 -07001539MaybeObject* Heap::AllocateMap(InstanceType instance_type, int instance_size) {
1540 Object* result;
1541 { MaybeObject* maybe_result = AllocateRawMap();
1542 if (!maybe_result->ToObject(&result)) return maybe_result;
1543 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001544
1545 Map* map = reinterpret_cast<Map*>(result);
1546 map->set_map(meta_map());
1547 map->set_instance_type(instance_type);
Iain Merrick75681382010-08-19 15:07:18 +01001548 map->set_visitor_id(
1549 StaticVisitorBase::GetVisitorId(instance_type, instance_size));
Steve Blocka7e24c12009-10-30 11:49:00 +00001550 map->set_prototype(null_value());
1551 map->set_constructor(null_value());
1552 map->set_instance_size(instance_size);
1553 map->set_inobject_properties(0);
1554 map->set_pre_allocated_property_fields(0);
1555 map->set_instance_descriptors(empty_descriptor_array());
1556 map->set_code_cache(empty_fixed_array());
1557 map->set_unused_property_fields(0);
1558 map->set_bit_field(0);
Steve Block8defd9f2010-07-08 12:39:36 +01001559 map->set_bit_field2((1 << Map::kIsExtensible) | (1 << Map::kHasFastElements));
Leon Clarkee46be812010-01-19 14:06:41 +00001560
1561 // If the map object is aligned fill the padding area with Smi 0 objects.
1562 if (Map::kPadStart < Map::kSize) {
1563 memset(reinterpret_cast<byte*>(map) + Map::kPadStart - kHeapObjectTag,
1564 0,
1565 Map::kSize - Map::kPadStart);
1566 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001567 return map;
1568}
1569
1570
John Reck59135872010-11-02 12:39:01 -07001571MaybeObject* Heap::AllocateCodeCache() {
1572 Object* result;
1573 { MaybeObject* maybe_result = AllocateStruct(CODE_CACHE_TYPE);
1574 if (!maybe_result->ToObject(&result)) return maybe_result;
1575 }
Steve Block6ded16b2010-05-10 14:33:55 +01001576 CodeCache* code_cache = CodeCache::cast(result);
1577 code_cache->set_default_cache(empty_fixed_array());
1578 code_cache->set_normal_type_cache(undefined_value());
1579 return code_cache;
1580}
1581
1582
Steve Blocka7e24c12009-10-30 11:49:00 +00001583const Heap::StringTypeTable Heap::string_type_table[] = {
1584#define STRING_TYPE_ELEMENT(type, size, name, camel_name) \
1585 {type, size, k##camel_name##MapRootIndex},
1586 STRING_TYPE_LIST(STRING_TYPE_ELEMENT)
1587#undef STRING_TYPE_ELEMENT
1588};
1589
1590
1591const Heap::ConstantSymbolTable Heap::constant_symbol_table[] = {
1592#define CONSTANT_SYMBOL_ELEMENT(name, contents) \
1593 {contents, k##name##RootIndex},
1594 SYMBOL_LIST(CONSTANT_SYMBOL_ELEMENT)
1595#undef CONSTANT_SYMBOL_ELEMENT
1596};
1597
1598
1599const Heap::StructTable Heap::struct_table[] = {
1600#define STRUCT_TABLE_ELEMENT(NAME, Name, name) \
1601 { NAME##_TYPE, Name::kSize, k##Name##MapRootIndex },
1602 STRUCT_LIST(STRUCT_TABLE_ELEMENT)
1603#undef STRUCT_TABLE_ELEMENT
1604};
1605
1606
1607bool Heap::CreateInitialMaps() {
John Reck59135872010-11-02 12:39:01 -07001608 Object* obj;
1609 { MaybeObject* maybe_obj = AllocatePartialMap(MAP_TYPE, Map::kSize);
1610 if (!maybe_obj->ToObject(&obj)) return false;
1611 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001612 // Map::cast cannot be used due to uninitialized map field.
1613 Map* new_meta_map = reinterpret_cast<Map*>(obj);
1614 set_meta_map(new_meta_map);
1615 new_meta_map->set_map(new_meta_map);
1616
John Reck59135872010-11-02 12:39:01 -07001617 { MaybeObject* maybe_obj =
1618 AllocatePartialMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
1619 if (!maybe_obj->ToObject(&obj)) return false;
1620 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001621 set_fixed_array_map(Map::cast(obj));
1622
John Reck59135872010-11-02 12:39:01 -07001623 { MaybeObject* maybe_obj = AllocatePartialMap(ODDBALL_TYPE, Oddball::kSize);
1624 if (!maybe_obj->ToObject(&obj)) return false;
1625 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001626 set_oddball_map(Map::cast(obj));
1627
Steve Block6ded16b2010-05-10 14:33:55 +01001628 // Allocate the empty array.
John Reck59135872010-11-02 12:39:01 -07001629 { MaybeObject* maybe_obj = AllocateEmptyFixedArray();
1630 if (!maybe_obj->ToObject(&obj)) return false;
1631 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001632 set_empty_fixed_array(FixedArray::cast(obj));
1633
John Reck59135872010-11-02 12:39:01 -07001634 { MaybeObject* maybe_obj = Allocate(oddball_map(), OLD_DATA_SPACE);
1635 if (!maybe_obj->ToObject(&obj)) return false;
1636 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001637 set_null_value(obj);
1638
1639 // Allocate the empty descriptor array.
John Reck59135872010-11-02 12:39:01 -07001640 { MaybeObject* maybe_obj = AllocateEmptyFixedArray();
1641 if (!maybe_obj->ToObject(&obj)) return false;
1642 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001643 set_empty_descriptor_array(DescriptorArray::cast(obj));
1644
1645 // Fix the instance_descriptors for the existing maps.
1646 meta_map()->set_instance_descriptors(empty_descriptor_array());
1647 meta_map()->set_code_cache(empty_fixed_array());
1648
1649 fixed_array_map()->set_instance_descriptors(empty_descriptor_array());
1650 fixed_array_map()->set_code_cache(empty_fixed_array());
1651
1652 oddball_map()->set_instance_descriptors(empty_descriptor_array());
1653 oddball_map()->set_code_cache(empty_fixed_array());
1654
1655 // Fix prototype object for existing maps.
1656 meta_map()->set_prototype(null_value());
1657 meta_map()->set_constructor(null_value());
1658
1659 fixed_array_map()->set_prototype(null_value());
1660 fixed_array_map()->set_constructor(null_value());
1661
1662 oddball_map()->set_prototype(null_value());
1663 oddball_map()->set_constructor(null_value());
1664
John Reck59135872010-11-02 12:39:01 -07001665 { MaybeObject* maybe_obj =
1666 AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
1667 if (!maybe_obj->ToObject(&obj)) return false;
1668 }
Iain Merrick75681382010-08-19 15:07:18 +01001669 set_fixed_cow_array_map(Map::cast(obj));
1670 ASSERT(fixed_array_map() != fixed_cow_array_map());
1671
John Reck59135872010-11-02 12:39:01 -07001672 { MaybeObject* maybe_obj = AllocateMap(HEAP_NUMBER_TYPE, HeapNumber::kSize);
1673 if (!maybe_obj->ToObject(&obj)) return false;
1674 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001675 set_heap_number_map(Map::cast(obj));
1676
John Reck59135872010-11-02 12:39:01 -07001677 { MaybeObject* maybe_obj = AllocateMap(PROXY_TYPE, Proxy::kSize);
1678 if (!maybe_obj->ToObject(&obj)) return false;
1679 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001680 set_proxy_map(Map::cast(obj));
1681
1682 for (unsigned i = 0; i < ARRAY_SIZE(string_type_table); i++) {
1683 const StringTypeTable& entry = string_type_table[i];
John Reck59135872010-11-02 12:39:01 -07001684 { MaybeObject* maybe_obj = AllocateMap(entry.type, entry.size);
1685 if (!maybe_obj->ToObject(&obj)) return false;
1686 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001687 roots_[entry.index] = Map::cast(obj);
1688 }
1689
John Reck59135872010-11-02 12:39:01 -07001690 { MaybeObject* maybe_obj = AllocateMap(STRING_TYPE, kVariableSizeSentinel);
1691 if (!maybe_obj->ToObject(&obj)) return false;
1692 }
Steve Blockd0582a62009-12-15 09:54:21 +00001693 set_undetectable_string_map(Map::cast(obj));
Steve Blocka7e24c12009-10-30 11:49:00 +00001694 Map::cast(obj)->set_is_undetectable();
1695
John Reck59135872010-11-02 12:39:01 -07001696 { MaybeObject* maybe_obj =
1697 AllocateMap(ASCII_STRING_TYPE, kVariableSizeSentinel);
1698 if (!maybe_obj->ToObject(&obj)) return false;
1699 }
Steve Blockd0582a62009-12-15 09:54:21 +00001700 set_undetectable_ascii_string_map(Map::cast(obj));
Steve Blocka7e24c12009-10-30 11:49:00 +00001701 Map::cast(obj)->set_is_undetectable();
1702
John Reck59135872010-11-02 12:39:01 -07001703 { MaybeObject* maybe_obj =
1704 AllocateMap(BYTE_ARRAY_TYPE, kVariableSizeSentinel);
1705 if (!maybe_obj->ToObject(&obj)) return false;
1706 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001707 set_byte_array_map(Map::cast(obj));
1708
Ben Murdochb0fe1622011-05-05 13:52:32 +01001709 { MaybeObject* maybe_obj = AllocateByteArray(0, TENURED);
1710 if (!maybe_obj->ToObject(&obj)) return false;
1711 }
1712 set_empty_byte_array(ByteArray::cast(obj));
1713
John Reck59135872010-11-02 12:39:01 -07001714 { MaybeObject* maybe_obj =
1715 AllocateMap(PIXEL_ARRAY_TYPE, PixelArray::kAlignedSize);
1716 if (!maybe_obj->ToObject(&obj)) return false;
1717 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001718 set_pixel_array_map(Map::cast(obj));
1719
John Reck59135872010-11-02 12:39:01 -07001720 { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_BYTE_ARRAY_TYPE,
1721 ExternalArray::kAlignedSize);
1722 if (!maybe_obj->ToObject(&obj)) return false;
1723 }
Steve Block3ce2e202009-11-05 08:53:23 +00001724 set_external_byte_array_map(Map::cast(obj));
1725
John Reck59135872010-11-02 12:39:01 -07001726 { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_UNSIGNED_BYTE_ARRAY_TYPE,
1727 ExternalArray::kAlignedSize);
1728 if (!maybe_obj->ToObject(&obj)) return false;
1729 }
Steve Block3ce2e202009-11-05 08:53:23 +00001730 set_external_unsigned_byte_array_map(Map::cast(obj));
1731
John Reck59135872010-11-02 12:39:01 -07001732 { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_SHORT_ARRAY_TYPE,
1733 ExternalArray::kAlignedSize);
1734 if (!maybe_obj->ToObject(&obj)) return false;
1735 }
Steve Block3ce2e202009-11-05 08:53:23 +00001736 set_external_short_array_map(Map::cast(obj));
1737
John Reck59135872010-11-02 12:39:01 -07001738 { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_UNSIGNED_SHORT_ARRAY_TYPE,
1739 ExternalArray::kAlignedSize);
1740 if (!maybe_obj->ToObject(&obj)) return false;
1741 }
Steve Block3ce2e202009-11-05 08:53:23 +00001742 set_external_unsigned_short_array_map(Map::cast(obj));
1743
John Reck59135872010-11-02 12:39:01 -07001744 { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_INT_ARRAY_TYPE,
1745 ExternalArray::kAlignedSize);
1746 if (!maybe_obj->ToObject(&obj)) return false;
1747 }
Steve Block3ce2e202009-11-05 08:53:23 +00001748 set_external_int_array_map(Map::cast(obj));
1749
John Reck59135872010-11-02 12:39:01 -07001750 { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_UNSIGNED_INT_ARRAY_TYPE,
1751 ExternalArray::kAlignedSize);
1752 if (!maybe_obj->ToObject(&obj)) return false;
1753 }
Steve Block3ce2e202009-11-05 08:53:23 +00001754 set_external_unsigned_int_array_map(Map::cast(obj));
1755
John Reck59135872010-11-02 12:39:01 -07001756 { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_FLOAT_ARRAY_TYPE,
1757 ExternalArray::kAlignedSize);
1758 if (!maybe_obj->ToObject(&obj)) return false;
1759 }
Steve Block3ce2e202009-11-05 08:53:23 +00001760 set_external_float_array_map(Map::cast(obj));
1761
John Reck59135872010-11-02 12:39:01 -07001762 { MaybeObject* maybe_obj = AllocateMap(CODE_TYPE, kVariableSizeSentinel);
1763 if (!maybe_obj->ToObject(&obj)) return false;
1764 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001765 set_code_map(Map::cast(obj));
1766
John Reck59135872010-11-02 12:39:01 -07001767 { MaybeObject* maybe_obj = AllocateMap(JS_GLOBAL_PROPERTY_CELL_TYPE,
1768 JSGlobalPropertyCell::kSize);
1769 if (!maybe_obj->ToObject(&obj)) return false;
1770 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001771 set_global_property_cell_map(Map::cast(obj));
1772
John Reck59135872010-11-02 12:39:01 -07001773 { MaybeObject* maybe_obj = AllocateMap(FILLER_TYPE, kPointerSize);
1774 if (!maybe_obj->ToObject(&obj)) return false;
1775 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001776 set_one_pointer_filler_map(Map::cast(obj));
1777
John Reck59135872010-11-02 12:39:01 -07001778 { MaybeObject* maybe_obj = AllocateMap(FILLER_TYPE, 2 * kPointerSize);
1779 if (!maybe_obj->ToObject(&obj)) return false;
1780 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001781 set_two_pointer_filler_map(Map::cast(obj));
1782
1783 for (unsigned i = 0; i < ARRAY_SIZE(struct_table); i++) {
1784 const StructTable& entry = struct_table[i];
John Reck59135872010-11-02 12:39:01 -07001785 { MaybeObject* maybe_obj = AllocateMap(entry.type, entry.size);
1786 if (!maybe_obj->ToObject(&obj)) return false;
1787 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001788 roots_[entry.index] = Map::cast(obj);
1789 }
1790
John Reck59135872010-11-02 12:39:01 -07001791 { MaybeObject* maybe_obj =
1792 AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
1793 if (!maybe_obj->ToObject(&obj)) return false;
1794 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001795 set_hash_table_map(Map::cast(obj));
1796
John Reck59135872010-11-02 12:39:01 -07001797 { MaybeObject* maybe_obj =
1798 AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
1799 if (!maybe_obj->ToObject(&obj)) return false;
1800 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001801 set_context_map(Map::cast(obj));
1802
John Reck59135872010-11-02 12:39:01 -07001803 { MaybeObject* maybe_obj =
1804 AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
1805 if (!maybe_obj->ToObject(&obj)) return false;
1806 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001807 set_catch_context_map(Map::cast(obj));
1808
John Reck59135872010-11-02 12:39:01 -07001809 { MaybeObject* maybe_obj =
1810 AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
1811 if (!maybe_obj->ToObject(&obj)) return false;
1812 }
Ben Murdochf87a2032010-10-22 12:50:53 +01001813 Map* global_context_map = Map::cast(obj);
1814 global_context_map->set_visitor_id(StaticVisitorBase::kVisitGlobalContext);
1815 set_global_context_map(global_context_map);
Steve Blocka7e24c12009-10-30 11:49:00 +00001816
John Reck59135872010-11-02 12:39:01 -07001817 { MaybeObject* maybe_obj = AllocateMap(SHARED_FUNCTION_INFO_TYPE,
1818 SharedFunctionInfo::kAlignedSize);
1819 if (!maybe_obj->ToObject(&obj)) return false;
1820 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001821 set_shared_function_info_map(Map::cast(obj));
1822
1823 ASSERT(!Heap::InNewSpace(Heap::empty_fixed_array()));
1824 return true;
1825}
1826
1827
John Reck59135872010-11-02 12:39:01 -07001828MaybeObject* Heap::AllocateHeapNumber(double value, PretenureFlag pretenure) {
Steve Blocka7e24c12009-10-30 11:49:00 +00001829 // Statically ensure that it is safe to allocate heap numbers in paged
1830 // spaces.
1831 STATIC_ASSERT(HeapNumber::kSize <= Page::kMaxHeapObjectSize);
1832 AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
1833
John Reck59135872010-11-02 12:39:01 -07001834 Object* result;
1835 { MaybeObject* maybe_result =
1836 AllocateRaw(HeapNumber::kSize, space, OLD_DATA_SPACE);
1837 if (!maybe_result->ToObject(&result)) return maybe_result;
1838 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001839
1840 HeapObject::cast(result)->set_map(heap_number_map());
1841 HeapNumber::cast(result)->set_value(value);
1842 return result;
1843}
1844
1845
John Reck59135872010-11-02 12:39:01 -07001846MaybeObject* Heap::AllocateHeapNumber(double value) {
Steve Blocka7e24c12009-10-30 11:49:00 +00001847 // Use general version, if we're forced to always allocate.
1848 if (always_allocate()) return AllocateHeapNumber(value, TENURED);
1849
1850 // This version of AllocateHeapNumber is optimized for
1851 // allocation in new space.
1852 STATIC_ASSERT(HeapNumber::kSize <= Page::kMaxHeapObjectSize);
1853 ASSERT(allocation_allowed_ && gc_state_ == NOT_IN_GC);
John Reck59135872010-11-02 12:39:01 -07001854 Object* result;
1855 { MaybeObject* maybe_result = new_space_.AllocateRaw(HeapNumber::kSize);
1856 if (!maybe_result->ToObject(&result)) return maybe_result;
1857 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001858 HeapObject::cast(result)->set_map(heap_number_map());
1859 HeapNumber::cast(result)->set_value(value);
1860 return result;
1861}
1862
1863
John Reck59135872010-11-02 12:39:01 -07001864MaybeObject* Heap::AllocateJSGlobalPropertyCell(Object* value) {
1865 Object* result;
1866 { MaybeObject* maybe_result = AllocateRawCell();
1867 if (!maybe_result->ToObject(&result)) return maybe_result;
1868 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001869 HeapObject::cast(result)->set_map(global_property_cell_map());
1870 JSGlobalPropertyCell::cast(result)->set_value(value);
1871 return result;
1872}
1873
1874
John Reck59135872010-11-02 12:39:01 -07001875MaybeObject* Heap::CreateOddball(const char* to_string,
1876 Object* to_number) {
1877 Object* result;
1878 { MaybeObject* maybe_result = Allocate(oddball_map(), OLD_DATA_SPACE);
1879 if (!maybe_result->ToObject(&result)) return maybe_result;
1880 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001881 return Oddball::cast(result)->Initialize(to_string, to_number);
1882}
1883
1884
1885bool Heap::CreateApiObjects() {
1886 Object* obj;
1887
John Reck59135872010-11-02 12:39:01 -07001888 { MaybeObject* maybe_obj = AllocateMap(JS_OBJECT_TYPE, JSObject::kHeaderSize);
1889 if (!maybe_obj->ToObject(&obj)) return false;
1890 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001891 set_neander_map(Map::cast(obj));
1892
John Reck59135872010-11-02 12:39:01 -07001893 { MaybeObject* maybe_obj = Heap::AllocateJSObjectFromMap(neander_map());
1894 if (!maybe_obj->ToObject(&obj)) return false;
1895 }
1896 Object* elements;
1897 { MaybeObject* maybe_elements = AllocateFixedArray(2);
1898 if (!maybe_elements->ToObject(&elements)) return false;
1899 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001900 FixedArray::cast(elements)->set(0, Smi::FromInt(0));
1901 JSObject::cast(obj)->set_elements(FixedArray::cast(elements));
1902 set_message_listeners(JSObject::cast(obj));
1903
1904 return true;
1905}
1906
1907
1908void Heap::CreateCEntryStub() {
1909 CEntryStub stub(1);
1910 set_c_entry_code(*stub.GetCode());
1911}
1912
1913
Steve Block6ded16b2010-05-10 14:33:55 +01001914#if V8_TARGET_ARCH_ARM && !V8_INTERPRETED_REGEXP
Steve Blocka7e24c12009-10-30 11:49:00 +00001915void Heap::CreateRegExpCEntryStub() {
1916 RegExpCEntryStub stub;
1917 set_re_c_entry_code(*stub.GetCode());
1918}
1919#endif
1920
1921
Steve Blocka7e24c12009-10-30 11:49:00 +00001922void Heap::CreateJSEntryStub() {
1923 JSEntryStub stub;
1924 set_js_entry_code(*stub.GetCode());
1925}
1926
1927
1928void Heap::CreateJSConstructEntryStub() {
1929 JSConstructEntryStub stub;
1930 set_js_construct_entry_code(*stub.GetCode());
1931}
1932
1933
1934void Heap::CreateFixedStubs() {
1935 // Here we create roots for fixed stubs. They are needed at GC
1936 // for cooking and uncooking (check out frames.cc).
1937 // The eliminates the need for doing dictionary lookup in the
1938 // stub cache for these stubs.
1939 HandleScope scope;
1940 // gcc-4.4 has problem generating correct code of following snippet:
1941 // { CEntryStub stub;
1942 // c_entry_code_ = *stub.GetCode();
1943 // }
Leon Clarke4515c472010-02-03 11:58:03 +00001944 // { DebuggerStatementStub stub;
1945 // debugger_statement_code_ = *stub.GetCode();
Steve Blocka7e24c12009-10-30 11:49:00 +00001946 // }
1947 // To workaround the problem, make separate functions without inlining.
1948 Heap::CreateCEntryStub();
Steve Blocka7e24c12009-10-30 11:49:00 +00001949 Heap::CreateJSEntryStub();
1950 Heap::CreateJSConstructEntryStub();
Steve Block6ded16b2010-05-10 14:33:55 +01001951#if V8_TARGET_ARCH_ARM && !V8_INTERPRETED_REGEXP
Steve Blocka7e24c12009-10-30 11:49:00 +00001952 Heap::CreateRegExpCEntryStub();
1953#endif
1954}
1955
1956
1957bool Heap::CreateInitialObjects() {
1958 Object* obj;
1959
1960 // The -0 value must be set before NumberFromDouble works.
John Reck59135872010-11-02 12:39:01 -07001961 { MaybeObject* maybe_obj = AllocateHeapNumber(-0.0, TENURED);
1962 if (!maybe_obj->ToObject(&obj)) return false;
1963 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001964 set_minus_zero_value(obj);
1965 ASSERT(signbit(minus_zero_value()->Number()) != 0);
1966
John Reck59135872010-11-02 12:39:01 -07001967 { MaybeObject* maybe_obj = AllocateHeapNumber(OS::nan_value(), TENURED);
1968 if (!maybe_obj->ToObject(&obj)) return false;
1969 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001970 set_nan_value(obj);
1971
John Reck59135872010-11-02 12:39:01 -07001972 { MaybeObject* maybe_obj = Allocate(oddball_map(), OLD_DATA_SPACE);
1973 if (!maybe_obj->ToObject(&obj)) return false;
1974 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001975 set_undefined_value(obj);
1976 ASSERT(!InNewSpace(undefined_value()));
1977
1978 // Allocate initial symbol table.
John Reck59135872010-11-02 12:39:01 -07001979 { MaybeObject* maybe_obj = SymbolTable::Allocate(kInitialSymbolTableSize);
1980 if (!maybe_obj->ToObject(&obj)) return false;
1981 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001982 // Don't use set_symbol_table() due to asserts.
1983 roots_[kSymbolTableRootIndex] = obj;
1984
1985 // Assign the print strings for oddballs after creating symboltable.
John Reck59135872010-11-02 12:39:01 -07001986 Object* symbol;
1987 { MaybeObject* maybe_symbol = LookupAsciiSymbol("undefined");
1988 if (!maybe_symbol->ToObject(&symbol)) return false;
1989 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001990 Oddball::cast(undefined_value())->set_to_string(String::cast(symbol));
1991 Oddball::cast(undefined_value())->set_to_number(nan_value());
1992
Steve Blocka7e24c12009-10-30 11:49:00 +00001993 // Allocate the null_value
John Reck59135872010-11-02 12:39:01 -07001994 { MaybeObject* maybe_obj =
1995 Oddball::cast(null_value())->Initialize("null", Smi::FromInt(0));
1996 if (!maybe_obj->ToObject(&obj)) return false;
1997 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001998
John Reck59135872010-11-02 12:39:01 -07001999 { MaybeObject* maybe_obj = CreateOddball("true", Smi::FromInt(1));
2000 if (!maybe_obj->ToObject(&obj)) return false;
2001 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002002 set_true_value(obj);
2003
John Reck59135872010-11-02 12:39:01 -07002004 { MaybeObject* maybe_obj = CreateOddball("false", Smi::FromInt(0));
2005 if (!maybe_obj->ToObject(&obj)) return false;
2006 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002007 set_false_value(obj);
2008
John Reck59135872010-11-02 12:39:01 -07002009 { MaybeObject* maybe_obj = CreateOddball("hole", Smi::FromInt(-1));
2010 if (!maybe_obj->ToObject(&obj)) return false;
2011 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002012 set_the_hole_value(obj);
2013
Ben Murdoch086aeea2011-05-13 15:57:08 +01002014 { MaybeObject* maybe_obj = CreateOddball("arguments_marker",
2015 Smi::FromInt(-4));
2016 if (!maybe_obj->ToObject(&obj)) return false;
2017 }
2018 set_arguments_marker(obj);
2019
John Reck59135872010-11-02 12:39:01 -07002020 { MaybeObject* maybe_obj =
2021 CreateOddball("no_interceptor_result_sentinel", Smi::FromInt(-2));
2022 if (!maybe_obj->ToObject(&obj)) return false;
2023 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002024 set_no_interceptor_result_sentinel(obj);
2025
John Reck59135872010-11-02 12:39:01 -07002026 { MaybeObject* maybe_obj =
2027 CreateOddball("termination_exception", Smi::FromInt(-3));
2028 if (!maybe_obj->ToObject(&obj)) return false;
2029 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002030 set_termination_exception(obj);
2031
2032 // Allocate the empty string.
John Reck59135872010-11-02 12:39:01 -07002033 { MaybeObject* maybe_obj = AllocateRawAsciiString(0, TENURED);
2034 if (!maybe_obj->ToObject(&obj)) return false;
2035 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002036 set_empty_string(String::cast(obj));
2037
2038 for (unsigned i = 0; i < ARRAY_SIZE(constant_symbol_table); i++) {
John Reck59135872010-11-02 12:39:01 -07002039 { MaybeObject* maybe_obj =
2040 LookupAsciiSymbol(constant_symbol_table[i].contents);
2041 if (!maybe_obj->ToObject(&obj)) return false;
2042 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002043 roots_[constant_symbol_table[i].index] = String::cast(obj);
2044 }
2045
2046 // Allocate the hidden symbol which is used to identify the hidden properties
2047 // in JSObjects. The hash code has a special value so that it will not match
2048 // the empty string when searching for the property. It cannot be part of the
2049 // loop above because it needs to be allocated manually with the special
2050 // hash code in place. The hash code for the hidden_symbol is zero to ensure
2051 // that it will always be at the first entry in property descriptors.
John Reck59135872010-11-02 12:39:01 -07002052 { MaybeObject* maybe_obj =
2053 AllocateSymbol(CStrVector(""), 0, String::kZeroHash);
2054 if (!maybe_obj->ToObject(&obj)) return false;
2055 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002056 hidden_symbol_ = String::cast(obj);
2057
2058 // Allocate the proxy for __proto__.
John Reck59135872010-11-02 12:39:01 -07002059 { MaybeObject* maybe_obj =
2060 AllocateProxy((Address) &Accessors::ObjectPrototype);
2061 if (!maybe_obj->ToObject(&obj)) return false;
2062 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002063 set_prototype_accessors(Proxy::cast(obj));
2064
2065 // Allocate the code_stubs dictionary. The initial size is set to avoid
2066 // expanding the dictionary during bootstrapping.
John Reck59135872010-11-02 12:39:01 -07002067 { MaybeObject* maybe_obj = NumberDictionary::Allocate(128);
2068 if (!maybe_obj->ToObject(&obj)) return false;
2069 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002070 set_code_stubs(NumberDictionary::cast(obj));
2071
2072 // Allocate the non_monomorphic_cache used in stub-cache.cc. The initial size
2073 // is set to avoid expanding the dictionary during bootstrapping.
John Reck59135872010-11-02 12:39:01 -07002074 { MaybeObject* maybe_obj = NumberDictionary::Allocate(64);
2075 if (!maybe_obj->ToObject(&obj)) return false;
2076 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002077 set_non_monomorphic_cache(NumberDictionary::cast(obj));
2078
Kristian Monsen25f61362010-05-21 11:50:48 +01002079 set_instanceof_cache_function(Smi::FromInt(0));
2080 set_instanceof_cache_map(Smi::FromInt(0));
2081 set_instanceof_cache_answer(Smi::FromInt(0));
2082
Steve Blocka7e24c12009-10-30 11:49:00 +00002083 CreateFixedStubs();
2084
Kristian Monsen0d5e1162010-09-30 15:31:59 +01002085 // Allocate the dictionary of intrinsic function names.
John Reck59135872010-11-02 12:39:01 -07002086 { MaybeObject* maybe_obj = StringDictionary::Allocate(Runtime::kNumFunctions);
2087 if (!maybe_obj->ToObject(&obj)) return false;
2088 }
2089 { MaybeObject* maybe_obj = Runtime::InitializeIntrinsicFunctionNames(obj);
2090 if (!maybe_obj->ToObject(&obj)) return false;
2091 }
Kristian Monsen0d5e1162010-09-30 15:31:59 +01002092 set_intrinsic_function_names(StringDictionary::cast(obj));
2093
Leon Clarkee46be812010-01-19 14:06:41 +00002094 if (InitializeNumberStringCache()->IsFailure()) return false;
Steve Blocka7e24c12009-10-30 11:49:00 +00002095
Steve Block6ded16b2010-05-10 14:33:55 +01002096 // Allocate cache for single character ASCII strings.
John Reck59135872010-11-02 12:39:01 -07002097 { MaybeObject* maybe_obj =
2098 AllocateFixedArray(String::kMaxAsciiCharCode + 1, TENURED);
2099 if (!maybe_obj->ToObject(&obj)) return false;
2100 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002101 set_single_character_string_cache(FixedArray::cast(obj));
2102
2103 // Allocate cache for external strings pointing to native source code.
John Reck59135872010-11-02 12:39:01 -07002104 { MaybeObject* maybe_obj = AllocateFixedArray(Natives::GetBuiltinsCount());
2105 if (!maybe_obj->ToObject(&obj)) return false;
2106 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002107 set_natives_source_cache(FixedArray::cast(obj));
2108
2109 // Handling of script id generation is in Factory::NewScript.
2110 set_last_script_id(undefined_value());
2111
2112 // Initialize keyed lookup cache.
2113 KeyedLookupCache::Clear();
2114
2115 // Initialize context slot cache.
2116 ContextSlotCache::Clear();
2117
2118 // Initialize descriptor cache.
2119 DescriptorLookupCache::Clear();
2120
2121 // Initialize compilation cache.
2122 CompilationCache::Clear();
2123
2124 return true;
2125}
2126
2127
John Reck59135872010-11-02 12:39:01 -07002128MaybeObject* Heap::InitializeNumberStringCache() {
Leon Clarkee46be812010-01-19 14:06:41 +00002129 // Compute the size of the number string cache based on the max heap size.
2130 // max_semispace_size_ == 512 KB => number_string_cache_size = 32.
2131 // max_semispace_size_ == 8 MB => number_string_cache_size = 16KB.
2132 int number_string_cache_size = max_semispace_size_ / 512;
2133 number_string_cache_size = Max(32, Min(16*KB, number_string_cache_size));
John Reck59135872010-11-02 12:39:01 -07002134 Object* obj;
2135 MaybeObject* maybe_obj =
2136 AllocateFixedArray(number_string_cache_size * 2, TENURED);
2137 if (maybe_obj->ToObject(&obj)) set_number_string_cache(FixedArray::cast(obj));
2138 return maybe_obj;
Leon Clarkee46be812010-01-19 14:06:41 +00002139}
2140
2141
2142void Heap::FlushNumberStringCache() {
2143 // Flush the number to string cache.
2144 int len = number_string_cache()->length();
2145 for (int i = 0; i < len; i++) {
2146 number_string_cache()->set_undefined(i);
2147 }
2148}
2149
2150
Steve Blocka7e24c12009-10-30 11:49:00 +00002151static inline int double_get_hash(double d) {
2152 DoubleRepresentation rep(d);
Leon Clarkee46be812010-01-19 14:06:41 +00002153 return static_cast<int>(rep.bits) ^ static_cast<int>(rep.bits >> 32);
Steve Blocka7e24c12009-10-30 11:49:00 +00002154}
2155
2156
2157static inline int smi_get_hash(Smi* smi) {
Leon Clarkee46be812010-01-19 14:06:41 +00002158 return smi->value();
Steve Blocka7e24c12009-10-30 11:49:00 +00002159}
2160
2161
Steve Blocka7e24c12009-10-30 11:49:00 +00002162Object* Heap::GetNumberStringCache(Object* number) {
2163 int hash;
Leon Clarkee46be812010-01-19 14:06:41 +00002164 int mask = (number_string_cache()->length() >> 1) - 1;
Steve Blocka7e24c12009-10-30 11:49:00 +00002165 if (number->IsSmi()) {
Leon Clarkee46be812010-01-19 14:06:41 +00002166 hash = smi_get_hash(Smi::cast(number)) & mask;
Steve Blocka7e24c12009-10-30 11:49:00 +00002167 } else {
Leon Clarkee46be812010-01-19 14:06:41 +00002168 hash = double_get_hash(number->Number()) & mask;
Steve Blocka7e24c12009-10-30 11:49:00 +00002169 }
2170 Object* key = number_string_cache()->get(hash * 2);
2171 if (key == number) {
2172 return String::cast(number_string_cache()->get(hash * 2 + 1));
2173 } else if (key->IsHeapNumber() &&
2174 number->IsHeapNumber() &&
2175 key->Number() == number->Number()) {
2176 return String::cast(number_string_cache()->get(hash * 2 + 1));
2177 }
2178 return undefined_value();
2179}
2180
2181
2182void Heap::SetNumberStringCache(Object* number, String* string) {
2183 int hash;
Leon Clarkee46be812010-01-19 14:06:41 +00002184 int mask = (number_string_cache()->length() >> 1) - 1;
Steve Blocka7e24c12009-10-30 11:49:00 +00002185 if (number->IsSmi()) {
Leon Clarkee46be812010-01-19 14:06:41 +00002186 hash = smi_get_hash(Smi::cast(number)) & mask;
Leon Clarke4515c472010-02-03 11:58:03 +00002187 number_string_cache()->set(hash * 2, Smi::cast(number));
Steve Blocka7e24c12009-10-30 11:49:00 +00002188 } else {
Leon Clarkee46be812010-01-19 14:06:41 +00002189 hash = double_get_hash(number->Number()) & mask;
Steve Blocka7e24c12009-10-30 11:49:00 +00002190 number_string_cache()->set(hash * 2, number);
2191 }
2192 number_string_cache()->set(hash * 2 + 1, string);
2193}
2194
2195
John Reck59135872010-11-02 12:39:01 -07002196MaybeObject* Heap::NumberToString(Object* number,
2197 bool check_number_string_cache) {
Andrei Popescu402d9372010-02-26 13:31:12 +00002198 Counters::number_to_string_runtime.Increment();
Steve Block6ded16b2010-05-10 14:33:55 +01002199 if (check_number_string_cache) {
2200 Object* cached = GetNumberStringCache(number);
2201 if (cached != undefined_value()) {
2202 return cached;
2203 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002204 }
2205
2206 char arr[100];
2207 Vector<char> buffer(arr, ARRAY_SIZE(arr));
2208 const char* str;
2209 if (number->IsSmi()) {
2210 int num = Smi::cast(number)->value();
2211 str = IntToCString(num, buffer);
2212 } else {
2213 double num = HeapNumber::cast(number)->value();
2214 str = DoubleToCString(num, buffer);
2215 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002216
John Reck59135872010-11-02 12:39:01 -07002217 Object* js_string;
2218 MaybeObject* maybe_js_string = AllocateStringFromAscii(CStrVector(str));
2219 if (maybe_js_string->ToObject(&js_string)) {
2220 SetNumberStringCache(number, String::cast(js_string));
Steve Blocka7e24c12009-10-30 11:49:00 +00002221 }
John Reck59135872010-11-02 12:39:01 -07002222 return maybe_js_string;
Steve Blocka7e24c12009-10-30 11:49:00 +00002223}
2224
2225
Steve Block3ce2e202009-11-05 08:53:23 +00002226Map* Heap::MapForExternalArrayType(ExternalArrayType array_type) {
2227 return Map::cast(roots_[RootIndexForExternalArrayType(array_type)]);
2228}
2229
2230
2231Heap::RootListIndex Heap::RootIndexForExternalArrayType(
2232 ExternalArrayType array_type) {
2233 switch (array_type) {
2234 case kExternalByteArray:
2235 return kExternalByteArrayMapRootIndex;
2236 case kExternalUnsignedByteArray:
2237 return kExternalUnsignedByteArrayMapRootIndex;
2238 case kExternalShortArray:
2239 return kExternalShortArrayMapRootIndex;
2240 case kExternalUnsignedShortArray:
2241 return kExternalUnsignedShortArrayMapRootIndex;
2242 case kExternalIntArray:
2243 return kExternalIntArrayMapRootIndex;
2244 case kExternalUnsignedIntArray:
2245 return kExternalUnsignedIntArrayMapRootIndex;
2246 case kExternalFloatArray:
2247 return kExternalFloatArrayMapRootIndex;
2248 default:
2249 UNREACHABLE();
2250 return kUndefinedValueRootIndex;
2251 }
2252}
2253
2254
John Reck59135872010-11-02 12:39:01 -07002255MaybeObject* Heap::NumberFromDouble(double value, PretenureFlag pretenure) {
Steve Block6ded16b2010-05-10 14:33:55 +01002256 // We need to distinguish the minus zero value and this cannot be
2257 // done after conversion to int. Doing this by comparing bit
2258 // patterns is faster than using fpclassify() et al.
2259 static const DoubleRepresentation minus_zero(-0.0);
2260
2261 DoubleRepresentation rep(value);
2262 if (rep.bits == minus_zero.bits) {
2263 return AllocateHeapNumber(-0.0, pretenure);
2264 }
2265
2266 int int_value = FastD2I(value);
2267 if (value == int_value && Smi::IsValid(int_value)) {
2268 return Smi::FromInt(int_value);
2269 }
2270
2271 // Materialize the value in the heap.
2272 return AllocateHeapNumber(value, pretenure);
Steve Blocka7e24c12009-10-30 11:49:00 +00002273}
2274
2275
John Reck59135872010-11-02 12:39:01 -07002276MaybeObject* Heap::AllocateProxy(Address proxy, PretenureFlag pretenure) {
Steve Blocka7e24c12009-10-30 11:49:00 +00002277 // Statically ensure that it is safe to allocate proxies in paged spaces.
2278 STATIC_ASSERT(Proxy::kSize <= Page::kMaxHeapObjectSize);
2279 AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
John Reck59135872010-11-02 12:39:01 -07002280 Object* result;
2281 { MaybeObject* maybe_result = Allocate(proxy_map(), space);
2282 if (!maybe_result->ToObject(&result)) return maybe_result;
2283 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002284
2285 Proxy::cast(result)->set_proxy(proxy);
2286 return result;
2287}
2288
2289
John Reck59135872010-11-02 12:39:01 -07002290MaybeObject* Heap::AllocateSharedFunctionInfo(Object* name) {
2291 Object* result;
2292 { MaybeObject* maybe_result =
2293 Allocate(shared_function_info_map(), OLD_POINTER_SPACE);
2294 if (!maybe_result->ToObject(&result)) return maybe_result;
2295 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002296
2297 SharedFunctionInfo* share = SharedFunctionInfo::cast(result);
2298 share->set_name(name);
2299 Code* illegal = Builtins::builtin(Builtins::Illegal);
2300 share->set_code(illegal);
Ben Murdoch3bec4d22010-07-22 14:51:16 +01002301 share->set_scope_info(SerializedScopeInfo::Empty());
Steve Blocka7e24c12009-10-30 11:49:00 +00002302 Code* construct_stub = Builtins::builtin(Builtins::JSConstructStubGeneric);
2303 share->set_construct_stub(construct_stub);
2304 share->set_expected_nof_properties(0);
2305 share->set_length(0);
2306 share->set_formal_parameter_count(0);
2307 share->set_instance_class_name(Object_symbol());
2308 share->set_function_data(undefined_value());
2309 share->set_script(undefined_value());
2310 share->set_start_position_and_type(0);
2311 share->set_debug_info(undefined_value());
2312 share->set_inferred_name(empty_string());
2313 share->set_compiler_hints(0);
Ben Murdochb0fe1622011-05-05 13:52:32 +01002314 share->set_deopt_counter(Smi::FromInt(FLAG_deopt_every_n_times));
Kristian Monsen0d5e1162010-09-30 15:31:59 +01002315 share->set_initial_map(undefined_value());
Steve Blocka7e24c12009-10-30 11:49:00 +00002316 share->set_this_property_assignments_count(0);
2317 share->set_this_property_assignments(undefined_value());
Ben Murdochb0fe1622011-05-05 13:52:32 +01002318 share->set_opt_count(0);
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002319 share->set_num_literals(0);
2320 share->set_end_position(0);
2321 share->set_function_token_position(0);
Steve Blocka7e24c12009-10-30 11:49:00 +00002322 return result;
2323}
2324
2325
Steve Blockd0582a62009-12-15 09:54:21 +00002326// Returns true for a character in a range. Both limits are inclusive.
2327static inline bool Between(uint32_t character, uint32_t from, uint32_t to) {
2328 // This makes uses of the the unsigned wraparound.
2329 return character - from <= to - from;
2330}
2331
2332
John Reck59135872010-11-02 12:39:01 -07002333MUST_USE_RESULT static inline MaybeObject* MakeOrFindTwoCharacterString(
2334 uint32_t c1,
2335 uint32_t c2) {
Steve Blockd0582a62009-12-15 09:54:21 +00002336 String* symbol;
2337 // Numeric strings have a different hash algorithm not known by
2338 // LookupTwoCharsSymbolIfExists, so we skip this step for such strings.
2339 if ((!Between(c1, '0', '9') || !Between(c2, '0', '9')) &&
2340 Heap::symbol_table()->LookupTwoCharsSymbolIfExists(c1, c2, &symbol)) {
2341 return symbol;
2342 // Now we know the length is 2, we might as well make use of that fact
2343 // when building the new string.
2344 } else if ((c1 | c2) <= String::kMaxAsciiCharCodeU) { // We can do this
2345 ASSERT(IsPowerOf2(String::kMaxAsciiCharCodeU + 1)); // because of this.
John Reck59135872010-11-02 12:39:01 -07002346 Object* result;
2347 { MaybeObject* maybe_result = Heap::AllocateRawAsciiString(2);
2348 if (!maybe_result->ToObject(&result)) return maybe_result;
2349 }
Steve Blockd0582a62009-12-15 09:54:21 +00002350 char* dest = SeqAsciiString::cast(result)->GetChars();
2351 dest[0] = c1;
2352 dest[1] = c2;
2353 return result;
2354 } else {
John Reck59135872010-11-02 12:39:01 -07002355 Object* result;
2356 { MaybeObject* maybe_result = Heap::AllocateRawTwoByteString(2);
2357 if (!maybe_result->ToObject(&result)) return maybe_result;
2358 }
Steve Blockd0582a62009-12-15 09:54:21 +00002359 uc16* dest = SeqTwoByteString::cast(result)->GetChars();
2360 dest[0] = c1;
2361 dest[1] = c2;
2362 return result;
2363 }
2364}
2365
2366
John Reck59135872010-11-02 12:39:01 -07002367MaybeObject* Heap::AllocateConsString(String* first, String* second) {
Steve Blocka7e24c12009-10-30 11:49:00 +00002368 int first_length = first->length();
Steve Blockd0582a62009-12-15 09:54:21 +00002369 if (first_length == 0) {
2370 return second;
2371 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002372
2373 int second_length = second->length();
Steve Blockd0582a62009-12-15 09:54:21 +00002374 if (second_length == 0) {
2375 return first;
2376 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002377
2378 int length = first_length + second_length;
Steve Blockd0582a62009-12-15 09:54:21 +00002379
2380 // Optimization for 2-byte strings often used as keys in a decompression
2381 // dictionary. Check whether we already have the string in the symbol
2382 // table to prevent creation of many unneccesary strings.
2383 if (length == 2) {
2384 unsigned c1 = first->Get(0);
2385 unsigned c2 = second->Get(0);
2386 return MakeOrFindTwoCharacterString(c1, c2);
2387 }
2388
Steve Block6ded16b2010-05-10 14:33:55 +01002389 bool first_is_ascii = first->IsAsciiRepresentation();
2390 bool second_is_ascii = second->IsAsciiRepresentation();
2391 bool is_ascii = first_is_ascii && second_is_ascii;
Steve Blocka7e24c12009-10-30 11:49:00 +00002392
2393 // Make sure that an out of memory exception is thrown if the length
Steve Block3ce2e202009-11-05 08:53:23 +00002394 // of the new cons string is too large.
2395 if (length > String::kMaxLength || length < 0) {
Steve Blocka7e24c12009-10-30 11:49:00 +00002396 Top::context()->mark_out_of_memory();
2397 return Failure::OutOfMemoryException();
2398 }
2399
Kristian Monsen9dcf7e22010-06-28 14:14:28 +01002400 bool is_ascii_data_in_two_byte_string = false;
2401 if (!is_ascii) {
2402 // At least one of the strings uses two-byte representation so we
2403 // can't use the fast case code for short ascii strings below, but
2404 // we can try to save memory if all chars actually fit in ascii.
2405 is_ascii_data_in_two_byte_string =
2406 first->HasOnlyAsciiChars() && second->HasOnlyAsciiChars();
2407 if (is_ascii_data_in_two_byte_string) {
2408 Counters::string_add_runtime_ext_to_ascii.Increment();
2409 }
2410 }
2411
Steve Blocka7e24c12009-10-30 11:49:00 +00002412 // If the resulting string is small make a flat string.
2413 if (length < String::kMinNonFlatLength) {
2414 ASSERT(first->IsFlat());
2415 ASSERT(second->IsFlat());
2416 if (is_ascii) {
John Reck59135872010-11-02 12:39:01 -07002417 Object* result;
2418 { MaybeObject* maybe_result = AllocateRawAsciiString(length);
2419 if (!maybe_result->ToObject(&result)) return maybe_result;
2420 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002421 // Copy the characters into the new object.
2422 char* dest = SeqAsciiString::cast(result)->GetChars();
2423 // Copy first part.
Steve Blockd0582a62009-12-15 09:54:21 +00002424 const char* src;
2425 if (first->IsExternalString()) {
2426 src = ExternalAsciiString::cast(first)->resource()->data();
2427 } else {
2428 src = SeqAsciiString::cast(first)->GetChars();
2429 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002430 for (int i = 0; i < first_length; i++) *dest++ = src[i];
2431 // Copy second part.
Steve Blockd0582a62009-12-15 09:54:21 +00002432 if (second->IsExternalString()) {
2433 src = ExternalAsciiString::cast(second)->resource()->data();
2434 } else {
2435 src = SeqAsciiString::cast(second)->GetChars();
2436 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002437 for (int i = 0; i < second_length; i++) *dest++ = src[i];
2438 return result;
2439 } else {
Kristian Monsen9dcf7e22010-06-28 14:14:28 +01002440 if (is_ascii_data_in_two_byte_string) {
John Reck59135872010-11-02 12:39:01 -07002441 Object* result;
2442 { MaybeObject* maybe_result = AllocateRawAsciiString(length);
2443 if (!maybe_result->ToObject(&result)) return maybe_result;
2444 }
Steve Block6ded16b2010-05-10 14:33:55 +01002445 // Copy the characters into the new object.
2446 char* dest = SeqAsciiString::cast(result)->GetChars();
2447 String::WriteToFlat(first, dest, 0, first_length);
2448 String::WriteToFlat(second, dest + first_length, 0, second_length);
Steve Block6ded16b2010-05-10 14:33:55 +01002449 return result;
2450 }
2451
John Reck59135872010-11-02 12:39:01 -07002452 Object* result;
2453 { MaybeObject* maybe_result = AllocateRawTwoByteString(length);
2454 if (!maybe_result->ToObject(&result)) return maybe_result;
2455 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002456 // Copy the characters into the new object.
2457 uc16* dest = SeqTwoByteString::cast(result)->GetChars();
2458 String::WriteToFlat(first, dest, 0, first_length);
2459 String::WriteToFlat(second, dest + first_length, 0, second_length);
2460 return result;
2461 }
2462 }
2463
Kristian Monsen9dcf7e22010-06-28 14:14:28 +01002464 Map* map = (is_ascii || is_ascii_data_in_two_byte_string) ?
2465 cons_ascii_string_map() : cons_string_map();
Steve Blocka7e24c12009-10-30 11:49:00 +00002466
John Reck59135872010-11-02 12:39:01 -07002467 Object* result;
2468 { MaybeObject* maybe_result = Allocate(map, NEW_SPACE);
2469 if (!maybe_result->ToObject(&result)) return maybe_result;
2470 }
Leon Clarke4515c472010-02-03 11:58:03 +00002471
2472 AssertNoAllocation no_gc;
Steve Blocka7e24c12009-10-30 11:49:00 +00002473 ConsString* cons_string = ConsString::cast(result);
Leon Clarke4515c472010-02-03 11:58:03 +00002474 WriteBarrierMode mode = cons_string->GetWriteBarrierMode(no_gc);
Steve Blocka7e24c12009-10-30 11:49:00 +00002475 cons_string->set_length(length);
Steve Blockd0582a62009-12-15 09:54:21 +00002476 cons_string->set_hash_field(String::kEmptyHashField);
2477 cons_string->set_first(first, mode);
2478 cons_string->set_second(second, mode);
Steve Blocka7e24c12009-10-30 11:49:00 +00002479 return result;
2480}
2481
2482
John Reck59135872010-11-02 12:39:01 -07002483MaybeObject* Heap::AllocateSubString(String* buffer,
Steve Blocka7e24c12009-10-30 11:49:00 +00002484 int start,
Steve Block6ded16b2010-05-10 14:33:55 +01002485 int end,
2486 PretenureFlag pretenure) {
Steve Blocka7e24c12009-10-30 11:49:00 +00002487 int length = end - start;
2488
2489 if (length == 1) {
2490 return Heap::LookupSingleCharacterStringFromCode(
2491 buffer->Get(start));
Steve Blockd0582a62009-12-15 09:54:21 +00002492 } else if (length == 2) {
2493 // Optimization for 2-byte strings often used as keys in a decompression
2494 // dictionary. Check whether we already have the string in the symbol
2495 // table to prevent creation of many unneccesary strings.
2496 unsigned c1 = buffer->Get(start);
2497 unsigned c2 = buffer->Get(start + 1);
2498 return MakeOrFindTwoCharacterString(c1, c2);
Steve Blocka7e24c12009-10-30 11:49:00 +00002499 }
2500
2501 // Make an attempt to flatten the buffer to reduce access time.
Leon Clarkef7060e22010-06-03 12:02:55 +01002502 buffer = buffer->TryFlattenGetString();
Steve Blocka7e24c12009-10-30 11:49:00 +00002503
John Reck59135872010-11-02 12:39:01 -07002504 Object* result;
2505 { MaybeObject* maybe_result = buffer->IsAsciiRepresentation()
2506 ? AllocateRawAsciiString(length, pretenure )
2507 : AllocateRawTwoByteString(length, pretenure);
2508 if (!maybe_result->ToObject(&result)) return maybe_result;
2509 }
Steve Blockd0582a62009-12-15 09:54:21 +00002510 String* string_result = String::cast(result);
Steve Blocka7e24c12009-10-30 11:49:00 +00002511 // Copy the characters into the new object.
Steve Blockd0582a62009-12-15 09:54:21 +00002512 if (buffer->IsAsciiRepresentation()) {
2513 ASSERT(string_result->IsAsciiRepresentation());
2514 char* dest = SeqAsciiString::cast(string_result)->GetChars();
2515 String::WriteToFlat(buffer, dest, start, end);
2516 } else {
2517 ASSERT(string_result->IsTwoByteRepresentation());
2518 uc16* dest = SeqTwoByteString::cast(string_result)->GetChars();
2519 String::WriteToFlat(buffer, dest, start, end);
Steve Blocka7e24c12009-10-30 11:49:00 +00002520 }
Steve Blockd0582a62009-12-15 09:54:21 +00002521
Steve Blocka7e24c12009-10-30 11:49:00 +00002522 return result;
2523}
2524
2525
John Reck59135872010-11-02 12:39:01 -07002526MaybeObject* Heap::AllocateExternalStringFromAscii(
Steve Blocka7e24c12009-10-30 11:49:00 +00002527 ExternalAsciiString::Resource* resource) {
Steve Blockd0582a62009-12-15 09:54:21 +00002528 size_t length = resource->length();
2529 if (length > static_cast<size_t>(String::kMaxLength)) {
2530 Top::context()->mark_out_of_memory();
2531 return Failure::OutOfMemoryException();
Steve Blocka7e24c12009-10-30 11:49:00 +00002532 }
2533
Steve Blockd0582a62009-12-15 09:54:21 +00002534 Map* map = external_ascii_string_map();
John Reck59135872010-11-02 12:39:01 -07002535 Object* result;
2536 { MaybeObject* maybe_result = Allocate(map, NEW_SPACE);
2537 if (!maybe_result->ToObject(&result)) return maybe_result;
2538 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002539
2540 ExternalAsciiString* external_string = ExternalAsciiString::cast(result);
Steve Blockd0582a62009-12-15 09:54:21 +00002541 external_string->set_length(static_cast<int>(length));
2542 external_string->set_hash_field(String::kEmptyHashField);
Steve Blocka7e24c12009-10-30 11:49:00 +00002543 external_string->set_resource(resource);
2544
2545 return result;
2546}
2547
2548
John Reck59135872010-11-02 12:39:01 -07002549MaybeObject* Heap::AllocateExternalStringFromTwoByte(
Steve Blocka7e24c12009-10-30 11:49:00 +00002550 ExternalTwoByteString::Resource* resource) {
Steve Blockd0582a62009-12-15 09:54:21 +00002551 size_t length = resource->length();
2552 if (length > static_cast<size_t>(String::kMaxLength)) {
2553 Top::context()->mark_out_of_memory();
2554 return Failure::OutOfMemoryException();
2555 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002556
Kristian Monsen9dcf7e22010-06-28 14:14:28 +01002557 // For small strings we check whether the resource contains only
Steve Block9fac8402011-05-12 15:51:54 +01002558 // ASCII characters. If yes, we use a different string map.
2559 static const size_t kAsciiCheckLengthLimit = 32;
2560 bool is_ascii = length <= kAsciiCheckLengthLimit &&
2561 String::IsAscii(resource->data(), static_cast<int>(length));
Kristian Monsen9dcf7e22010-06-28 14:14:28 +01002562 Map* map = is_ascii ?
2563 Heap::external_string_with_ascii_data_map() : Heap::external_string_map();
John Reck59135872010-11-02 12:39:01 -07002564 Object* result;
2565 { MaybeObject* maybe_result = Allocate(map, NEW_SPACE);
2566 if (!maybe_result->ToObject(&result)) return maybe_result;
2567 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002568
2569 ExternalTwoByteString* external_string = ExternalTwoByteString::cast(result);
Steve Blockd0582a62009-12-15 09:54:21 +00002570 external_string->set_length(static_cast<int>(length));
2571 external_string->set_hash_field(String::kEmptyHashField);
Steve Blocka7e24c12009-10-30 11:49:00 +00002572 external_string->set_resource(resource);
2573
2574 return result;
2575}
2576
2577
John Reck59135872010-11-02 12:39:01 -07002578MaybeObject* Heap::LookupSingleCharacterStringFromCode(uint16_t code) {
Steve Blocka7e24c12009-10-30 11:49:00 +00002579 if (code <= String::kMaxAsciiCharCode) {
2580 Object* value = Heap::single_character_string_cache()->get(code);
2581 if (value != Heap::undefined_value()) return value;
2582
2583 char buffer[1];
2584 buffer[0] = static_cast<char>(code);
John Reck59135872010-11-02 12:39:01 -07002585 Object* result;
2586 MaybeObject* maybe_result = LookupSymbol(Vector<const char>(buffer, 1));
Steve Blocka7e24c12009-10-30 11:49:00 +00002587
John Reck59135872010-11-02 12:39:01 -07002588 if (!maybe_result->ToObject(&result)) return maybe_result;
Steve Blocka7e24c12009-10-30 11:49:00 +00002589 Heap::single_character_string_cache()->set(code, result);
2590 return result;
2591 }
2592
John Reck59135872010-11-02 12:39:01 -07002593 Object* result;
2594 { MaybeObject* maybe_result = Heap::AllocateRawTwoByteString(1);
2595 if (!maybe_result->ToObject(&result)) return maybe_result;
2596 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002597 String* answer = String::cast(result);
2598 answer->Set(0, code);
2599 return answer;
2600}
2601
2602
John Reck59135872010-11-02 12:39:01 -07002603MaybeObject* Heap::AllocateByteArray(int length, PretenureFlag pretenure) {
Leon Clarkee46be812010-01-19 14:06:41 +00002604 if (length < 0 || length > ByteArray::kMaxLength) {
2605 return Failure::OutOfMemoryException();
2606 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002607 if (pretenure == NOT_TENURED) {
2608 return AllocateByteArray(length);
2609 }
2610 int size = ByteArray::SizeFor(length);
John Reck59135872010-11-02 12:39:01 -07002611 Object* result;
2612 { MaybeObject* maybe_result = (size <= MaxObjectSizeInPagedSpace())
2613 ? old_data_space_->AllocateRaw(size)
2614 : lo_space_->AllocateRaw(size);
2615 if (!maybe_result->ToObject(&result)) return maybe_result;
2616 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002617
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002618 reinterpret_cast<ByteArray*>(result)->set_map(byte_array_map());
2619 reinterpret_cast<ByteArray*>(result)->set_length(length);
Steve Blocka7e24c12009-10-30 11:49:00 +00002620 return result;
2621}
2622
2623
John Reck59135872010-11-02 12:39:01 -07002624MaybeObject* Heap::AllocateByteArray(int length) {
Leon Clarkee46be812010-01-19 14:06:41 +00002625 if (length < 0 || length > ByteArray::kMaxLength) {
2626 return Failure::OutOfMemoryException();
2627 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002628 int size = ByteArray::SizeFor(length);
2629 AllocationSpace space =
Leon Clarkee46be812010-01-19 14:06:41 +00002630 (size > MaxObjectSizeInPagedSpace()) ? LO_SPACE : NEW_SPACE;
John Reck59135872010-11-02 12:39:01 -07002631 Object* result;
2632 { MaybeObject* maybe_result = AllocateRaw(size, space, OLD_DATA_SPACE);
2633 if (!maybe_result->ToObject(&result)) return maybe_result;
2634 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002635
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002636 reinterpret_cast<ByteArray*>(result)->set_map(byte_array_map());
2637 reinterpret_cast<ByteArray*>(result)->set_length(length);
Steve Blocka7e24c12009-10-30 11:49:00 +00002638 return result;
2639}
2640
2641
2642void Heap::CreateFillerObjectAt(Address addr, int size) {
2643 if (size == 0) return;
2644 HeapObject* filler = HeapObject::FromAddress(addr);
2645 if (size == kPointerSize) {
Steve Block6ded16b2010-05-10 14:33:55 +01002646 filler->set_map(one_pointer_filler_map());
2647 } else if (size == 2 * kPointerSize) {
2648 filler->set_map(two_pointer_filler_map());
Steve Blocka7e24c12009-10-30 11:49:00 +00002649 } else {
Steve Block6ded16b2010-05-10 14:33:55 +01002650 filler->set_map(byte_array_map());
Steve Blocka7e24c12009-10-30 11:49:00 +00002651 ByteArray::cast(filler)->set_length(ByteArray::LengthFor(size));
2652 }
2653}
2654
2655
John Reck59135872010-11-02 12:39:01 -07002656MaybeObject* Heap::AllocatePixelArray(int length,
Steve Blocka7e24c12009-10-30 11:49:00 +00002657 uint8_t* external_pointer,
2658 PretenureFlag pretenure) {
2659 AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
John Reck59135872010-11-02 12:39:01 -07002660 Object* result;
2661 { MaybeObject* maybe_result =
2662 AllocateRaw(PixelArray::kAlignedSize, space, OLD_DATA_SPACE);
2663 if (!maybe_result->ToObject(&result)) return maybe_result;
2664 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002665
2666 reinterpret_cast<PixelArray*>(result)->set_map(pixel_array_map());
2667 reinterpret_cast<PixelArray*>(result)->set_length(length);
2668 reinterpret_cast<PixelArray*>(result)->set_external_pointer(external_pointer);
2669
2670 return result;
2671}
2672
2673
John Reck59135872010-11-02 12:39:01 -07002674MaybeObject* Heap::AllocateExternalArray(int length,
2675 ExternalArrayType array_type,
2676 void* external_pointer,
2677 PretenureFlag pretenure) {
Steve Block3ce2e202009-11-05 08:53:23 +00002678 AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
John Reck59135872010-11-02 12:39:01 -07002679 Object* result;
2680 { MaybeObject* maybe_result = AllocateRaw(ExternalArray::kAlignedSize,
2681 space,
2682 OLD_DATA_SPACE);
2683 if (!maybe_result->ToObject(&result)) return maybe_result;
2684 }
Steve Block3ce2e202009-11-05 08:53:23 +00002685
2686 reinterpret_cast<ExternalArray*>(result)->set_map(
2687 MapForExternalArrayType(array_type));
2688 reinterpret_cast<ExternalArray*>(result)->set_length(length);
2689 reinterpret_cast<ExternalArray*>(result)->set_external_pointer(
2690 external_pointer);
2691
2692 return result;
2693}
2694
2695
John Reck59135872010-11-02 12:39:01 -07002696MaybeObject* Heap::CreateCode(const CodeDesc& desc,
2697 Code::Flags flags,
2698 Handle<Object> self_reference) {
Leon Clarkeac952652010-07-15 11:15:24 +01002699 // Allocate ByteArray before the Code object, so that we do not risk
2700 // leaving uninitialized Code object (and breaking the heap).
John Reck59135872010-11-02 12:39:01 -07002701 Object* reloc_info;
2702 { MaybeObject* maybe_reloc_info = AllocateByteArray(desc.reloc_size, TENURED);
2703 if (!maybe_reloc_info->ToObject(&reloc_info)) return maybe_reloc_info;
2704 }
Leon Clarkeac952652010-07-15 11:15:24 +01002705
Steve Blocka7e24c12009-10-30 11:49:00 +00002706 // Compute size
Leon Clarkeac952652010-07-15 11:15:24 +01002707 int body_size = RoundUp(desc.instr_size, kObjectAlignment);
Ben Murdoch3bec4d22010-07-22 14:51:16 +01002708 int obj_size = Code::SizeFor(body_size);
Kristian Monsen0d5e1162010-09-30 15:31:59 +01002709 ASSERT(IsAligned(static_cast<intptr_t>(obj_size), kCodeAlignment));
John Reck59135872010-11-02 12:39:01 -07002710 MaybeObject* maybe_result;
Steve Blocka7e24c12009-10-30 11:49:00 +00002711 if (obj_size > MaxObjectSizeInPagedSpace()) {
John Reck59135872010-11-02 12:39:01 -07002712 maybe_result = lo_space_->AllocateRawCode(obj_size);
Steve Blocka7e24c12009-10-30 11:49:00 +00002713 } else {
John Reck59135872010-11-02 12:39:01 -07002714 maybe_result = code_space_->AllocateRaw(obj_size);
Steve Blocka7e24c12009-10-30 11:49:00 +00002715 }
2716
John Reck59135872010-11-02 12:39:01 -07002717 Object* result;
2718 if (!maybe_result->ToObject(&result)) return maybe_result;
Steve Blocka7e24c12009-10-30 11:49:00 +00002719
2720 // Initialize the object
2721 HeapObject::cast(result)->set_map(code_map());
2722 Code* code = Code::cast(result);
2723 ASSERT(!CodeRange::exists() || CodeRange::contains(code->address()));
2724 code->set_instruction_size(desc.instr_size);
Leon Clarkeac952652010-07-15 11:15:24 +01002725 code->set_relocation_info(ByteArray::cast(reloc_info));
Steve Blocka7e24c12009-10-30 11:49:00 +00002726 code->set_flags(flags);
Ben Murdochb8e0da22011-05-16 14:20:40 +01002727 if (code->is_call_stub() || code->is_keyed_call_stub()) {
2728 code->set_check_type(RECEIVER_MAP_CHECK);
2729 }
Ben Murdochb0fe1622011-05-05 13:52:32 +01002730 code->set_deoptimization_data(empty_fixed_array());
Steve Blocka7e24c12009-10-30 11:49:00 +00002731 // Allow self references to created code object by patching the handle to
2732 // point to the newly allocated Code object.
2733 if (!self_reference.is_null()) {
2734 *(self_reference.location()) = code;
2735 }
2736 // Migrate generated code.
2737 // The generated code can contain Object** values (typically from handles)
2738 // that are dereferenced during the copy to point directly to the actual heap
2739 // objects. These pointers can include references to the code object itself,
2740 // through the self_reference parameter.
2741 code->CopyFrom(desc);
Steve Blocka7e24c12009-10-30 11:49:00 +00002742
2743#ifdef DEBUG
2744 code->Verify();
2745#endif
2746 return code;
2747}
2748
2749
John Reck59135872010-11-02 12:39:01 -07002750MaybeObject* Heap::CopyCode(Code* code) {
Steve Blocka7e24c12009-10-30 11:49:00 +00002751 // Allocate an object the same size as the code object.
2752 int obj_size = code->Size();
John Reck59135872010-11-02 12:39:01 -07002753 MaybeObject* maybe_result;
Steve Blocka7e24c12009-10-30 11:49:00 +00002754 if (obj_size > MaxObjectSizeInPagedSpace()) {
John Reck59135872010-11-02 12:39:01 -07002755 maybe_result = lo_space_->AllocateRawCode(obj_size);
Steve Blocka7e24c12009-10-30 11:49:00 +00002756 } else {
John Reck59135872010-11-02 12:39:01 -07002757 maybe_result = code_space_->AllocateRaw(obj_size);
Steve Blocka7e24c12009-10-30 11:49:00 +00002758 }
2759
John Reck59135872010-11-02 12:39:01 -07002760 Object* result;
2761 if (!maybe_result->ToObject(&result)) return maybe_result;
Steve Blocka7e24c12009-10-30 11:49:00 +00002762
2763 // Copy code object.
2764 Address old_addr = code->address();
2765 Address new_addr = reinterpret_cast<HeapObject*>(result)->address();
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002766 CopyBlock(new_addr, old_addr, obj_size);
Steve Blocka7e24c12009-10-30 11:49:00 +00002767 // Relocate the copy.
2768 Code* new_code = Code::cast(result);
2769 ASSERT(!CodeRange::exists() || CodeRange::contains(code->address()));
2770 new_code->Relocate(new_addr - old_addr);
2771 return new_code;
2772}
2773
2774
John Reck59135872010-11-02 12:39:01 -07002775MaybeObject* Heap::CopyCode(Code* code, Vector<byte> reloc_info) {
Leon Clarkeac952652010-07-15 11:15:24 +01002776 // Allocate ByteArray before the Code object, so that we do not risk
2777 // leaving uninitialized Code object (and breaking the heap).
John Reck59135872010-11-02 12:39:01 -07002778 Object* reloc_info_array;
2779 { MaybeObject* maybe_reloc_info_array =
2780 AllocateByteArray(reloc_info.length(), TENURED);
2781 if (!maybe_reloc_info_array->ToObject(&reloc_info_array)) {
2782 return maybe_reloc_info_array;
2783 }
2784 }
Leon Clarkeac952652010-07-15 11:15:24 +01002785
2786 int new_body_size = RoundUp(code->instruction_size(), kObjectAlignment);
Steve Block6ded16b2010-05-10 14:33:55 +01002787
Ben Murdoch3bec4d22010-07-22 14:51:16 +01002788 int new_obj_size = Code::SizeFor(new_body_size);
Steve Block6ded16b2010-05-10 14:33:55 +01002789
2790 Address old_addr = code->address();
2791
2792 size_t relocation_offset =
Leon Clarkeac952652010-07-15 11:15:24 +01002793 static_cast<size_t>(code->instruction_end() - old_addr);
Steve Block6ded16b2010-05-10 14:33:55 +01002794
John Reck59135872010-11-02 12:39:01 -07002795 MaybeObject* maybe_result;
Steve Block6ded16b2010-05-10 14:33:55 +01002796 if (new_obj_size > MaxObjectSizeInPagedSpace()) {
John Reck59135872010-11-02 12:39:01 -07002797 maybe_result = lo_space_->AllocateRawCode(new_obj_size);
Steve Block6ded16b2010-05-10 14:33:55 +01002798 } else {
John Reck59135872010-11-02 12:39:01 -07002799 maybe_result = code_space_->AllocateRaw(new_obj_size);
Steve Block6ded16b2010-05-10 14:33:55 +01002800 }
2801
John Reck59135872010-11-02 12:39:01 -07002802 Object* result;
2803 if (!maybe_result->ToObject(&result)) return maybe_result;
Steve Block6ded16b2010-05-10 14:33:55 +01002804
2805 // Copy code object.
2806 Address new_addr = reinterpret_cast<HeapObject*>(result)->address();
2807
2808 // Copy header and instructions.
2809 memcpy(new_addr, old_addr, relocation_offset);
2810
Steve Block6ded16b2010-05-10 14:33:55 +01002811 Code* new_code = Code::cast(result);
Leon Clarkeac952652010-07-15 11:15:24 +01002812 new_code->set_relocation_info(ByteArray::cast(reloc_info_array));
Steve Block6ded16b2010-05-10 14:33:55 +01002813
Leon Clarkeac952652010-07-15 11:15:24 +01002814 // Copy patched rinfo.
2815 memcpy(new_code->relocation_start(), reloc_info.start(), reloc_info.length());
Steve Block6ded16b2010-05-10 14:33:55 +01002816
2817 // Relocate the copy.
2818 ASSERT(!CodeRange::exists() || CodeRange::contains(code->address()));
2819 new_code->Relocate(new_addr - old_addr);
2820
2821#ifdef DEBUG
2822 code->Verify();
2823#endif
2824 return new_code;
2825}
2826
2827
John Reck59135872010-11-02 12:39:01 -07002828MaybeObject* Heap::Allocate(Map* map, AllocationSpace space) {
Steve Blocka7e24c12009-10-30 11:49:00 +00002829 ASSERT(gc_state_ == NOT_IN_GC);
2830 ASSERT(map->instance_type() != MAP_TYPE);
Leon Clarkee46be812010-01-19 14:06:41 +00002831 // If allocation failures are disallowed, we may allocate in a different
2832 // space when new space is full and the object is not a large object.
2833 AllocationSpace retry_space =
2834 (space != NEW_SPACE) ? space : TargetSpaceId(map->instance_type());
John Reck59135872010-11-02 12:39:01 -07002835 Object* result;
2836 { MaybeObject* maybe_result =
2837 AllocateRaw(map->instance_size(), space, retry_space);
2838 if (!maybe_result->ToObject(&result)) return maybe_result;
2839 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002840 HeapObject::cast(result)->set_map(map);
Steve Block3ce2e202009-11-05 08:53:23 +00002841#ifdef ENABLE_LOGGING_AND_PROFILING
2842 ProducerHeapProfile::RecordJSObjectAllocation(result);
2843#endif
Steve Blocka7e24c12009-10-30 11:49:00 +00002844 return result;
2845}
2846
2847
John Reck59135872010-11-02 12:39:01 -07002848MaybeObject* Heap::InitializeFunction(JSFunction* function,
2849 SharedFunctionInfo* shared,
2850 Object* prototype) {
Steve Blocka7e24c12009-10-30 11:49:00 +00002851 ASSERT(!prototype->IsMap());
2852 function->initialize_properties();
2853 function->initialize_elements();
2854 function->set_shared(shared);
Iain Merrick75681382010-08-19 15:07:18 +01002855 function->set_code(shared->code());
Steve Blocka7e24c12009-10-30 11:49:00 +00002856 function->set_prototype_or_initial_map(prototype);
2857 function->set_context(undefined_value());
Leon Clarke4515c472010-02-03 11:58:03 +00002858 function->set_literals(empty_fixed_array());
Ben Murdochb0fe1622011-05-05 13:52:32 +01002859 function->set_next_function_link(undefined_value());
Steve Blocka7e24c12009-10-30 11:49:00 +00002860 return function;
2861}
2862
2863
John Reck59135872010-11-02 12:39:01 -07002864MaybeObject* Heap::AllocateFunctionPrototype(JSFunction* function) {
Steve Blocka7e24c12009-10-30 11:49:00 +00002865 // Allocate the prototype. Make sure to use the object function
2866 // from the function's context, since the function can be from a
2867 // different context.
2868 JSFunction* object_function =
2869 function->context()->global_context()->object_function();
John Reck59135872010-11-02 12:39:01 -07002870 Object* prototype;
2871 { MaybeObject* maybe_prototype = AllocateJSObject(object_function);
2872 if (!maybe_prototype->ToObject(&prototype)) return maybe_prototype;
2873 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002874 // When creating the prototype for the function we must set its
2875 // constructor to the function.
John Reck59135872010-11-02 12:39:01 -07002876 Object* result;
2877 { MaybeObject* maybe_result =
2878 JSObject::cast(prototype)->SetProperty(constructor_symbol(),
2879 function,
2880 DONT_ENUM);
2881 if (!maybe_result->ToObject(&result)) return maybe_result;
2882 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002883 return prototype;
2884}
2885
2886
John Reck59135872010-11-02 12:39:01 -07002887MaybeObject* Heap::AllocateFunction(Map* function_map,
2888 SharedFunctionInfo* shared,
2889 Object* prototype,
2890 PretenureFlag pretenure) {
Leon Clarkee46be812010-01-19 14:06:41 +00002891 AllocationSpace space =
2892 (pretenure == TENURED) ? OLD_POINTER_SPACE : NEW_SPACE;
John Reck59135872010-11-02 12:39:01 -07002893 Object* result;
2894 { MaybeObject* maybe_result = Allocate(function_map, space);
2895 if (!maybe_result->ToObject(&result)) return maybe_result;
2896 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002897 return InitializeFunction(JSFunction::cast(result), shared, prototype);
2898}
2899
2900
John Reck59135872010-11-02 12:39:01 -07002901MaybeObject* Heap::AllocateArgumentsObject(Object* callee, int length) {
Steve Blocka7e24c12009-10-30 11:49:00 +00002902 // To get fast allocation and map sharing for arguments objects we
2903 // allocate them based on an arguments boilerplate.
2904
2905 // This calls Copy directly rather than using Heap::AllocateRaw so we
2906 // duplicate the check here.
2907 ASSERT(allocation_allowed_ && gc_state_ == NOT_IN_GC);
2908
2909 JSObject* boilerplate =
2910 Top::context()->global_context()->arguments_boilerplate();
2911
Leon Clarkee46be812010-01-19 14:06:41 +00002912 // Check that the size of the boilerplate matches our
2913 // expectations. The ArgumentsAccessStub::GenerateNewObject relies
2914 // on the size being a known constant.
2915 ASSERT(kArgumentsObjectSize == boilerplate->map()->instance_size());
2916
2917 // Do the allocation.
John Reck59135872010-11-02 12:39:01 -07002918 Object* result;
2919 { MaybeObject* maybe_result =
2920 AllocateRaw(kArgumentsObjectSize, NEW_SPACE, OLD_POINTER_SPACE);
2921 if (!maybe_result->ToObject(&result)) return maybe_result;
2922 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002923
2924 // Copy the content. The arguments boilerplate doesn't have any
2925 // fields that point to new space so it's safe to skip the write
2926 // barrier here.
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002927 CopyBlock(HeapObject::cast(result)->address(),
2928 boilerplate->address(),
Leon Clarkee46be812010-01-19 14:06:41 +00002929 kArgumentsObjectSize);
Steve Blocka7e24c12009-10-30 11:49:00 +00002930
2931 // Set the two properties.
2932 JSObject::cast(result)->InObjectPropertyAtPut(arguments_callee_index,
2933 callee);
2934 JSObject::cast(result)->InObjectPropertyAtPut(arguments_length_index,
2935 Smi::FromInt(length),
2936 SKIP_WRITE_BARRIER);
2937
2938 // Check the state of the object
2939 ASSERT(JSObject::cast(result)->HasFastProperties());
2940 ASSERT(JSObject::cast(result)->HasFastElements());
2941
2942 return result;
2943}
2944
2945
Kristian Monsen0d5e1162010-09-30 15:31:59 +01002946static bool HasDuplicates(DescriptorArray* descriptors) {
2947 int count = descriptors->number_of_descriptors();
2948 if (count > 1) {
2949 String* prev_key = descriptors->GetKey(0);
2950 for (int i = 1; i != count; i++) {
2951 String* current_key = descriptors->GetKey(i);
2952 if (prev_key == current_key) return true;
2953 prev_key = current_key;
2954 }
2955 }
2956 return false;
2957}
2958
2959
John Reck59135872010-11-02 12:39:01 -07002960MaybeObject* Heap::AllocateInitialMap(JSFunction* fun) {
Steve Blocka7e24c12009-10-30 11:49:00 +00002961 ASSERT(!fun->has_initial_map());
2962
2963 // First create a new map with the size and number of in-object properties
2964 // suggested by the function.
2965 int instance_size = fun->shared()->CalculateInstanceSize();
2966 int in_object_properties = fun->shared()->CalculateInObjectProperties();
John Reck59135872010-11-02 12:39:01 -07002967 Object* map_obj;
2968 { MaybeObject* maybe_map_obj =
2969 Heap::AllocateMap(JS_OBJECT_TYPE, instance_size);
2970 if (!maybe_map_obj->ToObject(&map_obj)) return maybe_map_obj;
2971 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002972
2973 // Fetch or allocate prototype.
2974 Object* prototype;
2975 if (fun->has_instance_prototype()) {
2976 prototype = fun->instance_prototype();
2977 } else {
John Reck59135872010-11-02 12:39:01 -07002978 { MaybeObject* maybe_prototype = AllocateFunctionPrototype(fun);
2979 if (!maybe_prototype->ToObject(&prototype)) return maybe_prototype;
2980 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002981 }
2982 Map* map = Map::cast(map_obj);
2983 map->set_inobject_properties(in_object_properties);
2984 map->set_unused_property_fields(in_object_properties);
2985 map->set_prototype(prototype);
Steve Block8defd9f2010-07-08 12:39:36 +01002986 ASSERT(map->has_fast_elements());
Steve Blocka7e24c12009-10-30 11:49:00 +00002987
Andrei Popescu402d9372010-02-26 13:31:12 +00002988 // If the function has only simple this property assignments add
2989 // field descriptors for these to the initial map as the object
2990 // cannot be constructed without having these properties. Guard by
2991 // the inline_new flag so we only change the map if we generate a
2992 // specialized construct stub.
Steve Blocka7e24c12009-10-30 11:49:00 +00002993 ASSERT(in_object_properties <= Map::kMaxPreAllocatedPropertyFields);
Andrei Popescu402d9372010-02-26 13:31:12 +00002994 if (fun->shared()->CanGenerateInlineConstructor(prototype)) {
Steve Blocka7e24c12009-10-30 11:49:00 +00002995 int count = fun->shared()->this_property_assignments_count();
2996 if (count > in_object_properties) {
Kristian Monsen0d5e1162010-09-30 15:31:59 +01002997 // Inline constructor can only handle inobject properties.
2998 fun->shared()->ForbidInlineConstructor();
2999 } else {
John Reck59135872010-11-02 12:39:01 -07003000 Object* descriptors_obj;
3001 { MaybeObject* maybe_descriptors_obj = DescriptorArray::Allocate(count);
3002 if (!maybe_descriptors_obj->ToObject(&descriptors_obj)) {
3003 return maybe_descriptors_obj;
3004 }
3005 }
Kristian Monsen0d5e1162010-09-30 15:31:59 +01003006 DescriptorArray* descriptors = DescriptorArray::cast(descriptors_obj);
3007 for (int i = 0; i < count; i++) {
3008 String* name = fun->shared()->GetThisPropertyAssignmentName(i);
3009 ASSERT(name->IsSymbol());
3010 FieldDescriptor field(name, i, NONE);
3011 field.SetEnumerationIndex(i);
3012 descriptors->Set(i, &field);
3013 }
3014 descriptors->SetNextEnumerationIndex(count);
3015 descriptors->SortUnchecked();
3016
3017 // The descriptors may contain duplicates because the compiler does not
3018 // guarantee the uniqueness of property names (it would have required
3019 // quadratic time). Once the descriptors are sorted we can check for
3020 // duplicates in linear time.
3021 if (HasDuplicates(descriptors)) {
3022 fun->shared()->ForbidInlineConstructor();
3023 } else {
3024 map->set_instance_descriptors(descriptors);
3025 map->set_pre_allocated_property_fields(count);
3026 map->set_unused_property_fields(in_object_properties - count);
3027 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003028 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003029 }
Kristian Monsen0d5e1162010-09-30 15:31:59 +01003030
3031 fun->shared()->StartInobjectSlackTracking(map);
3032
Steve Blocka7e24c12009-10-30 11:49:00 +00003033 return map;
3034}
3035
3036
3037void Heap::InitializeJSObjectFromMap(JSObject* obj,
3038 FixedArray* properties,
3039 Map* map) {
3040 obj->set_properties(properties);
3041 obj->initialize_elements();
3042 // TODO(1240798): Initialize the object's body using valid initial values
3043 // according to the object's initial map. For example, if the map's
3044 // instance type is JS_ARRAY_TYPE, the length field should be initialized
3045 // to a number (eg, Smi::FromInt(0)) and the elements initialized to a
3046 // fixed array (eg, Heap::empty_fixed_array()). Currently, the object
3047 // verification code has to cope with (temporarily) invalid objects. See
3048 // for example, JSArray::JSArrayVerify).
Kristian Monsen0d5e1162010-09-30 15:31:59 +01003049 Object* filler;
3050 // We cannot always fill with one_pointer_filler_map because objects
3051 // created from API functions expect their internal fields to be initialized
3052 // with undefined_value.
3053 if (map->constructor()->IsJSFunction() &&
3054 JSFunction::cast(map->constructor())->shared()->
3055 IsInobjectSlackTrackingInProgress()) {
3056 // We might want to shrink the object later.
3057 ASSERT(obj->GetInternalFieldCount() == 0);
3058 filler = Heap::one_pointer_filler_map();
3059 } else {
3060 filler = Heap::undefined_value();
3061 }
3062 obj->InitializeBody(map->instance_size(), filler);
Steve Blocka7e24c12009-10-30 11:49:00 +00003063}
3064
3065
John Reck59135872010-11-02 12:39:01 -07003066MaybeObject* Heap::AllocateJSObjectFromMap(Map* map, PretenureFlag pretenure) {
Steve Blocka7e24c12009-10-30 11:49:00 +00003067 // JSFunctions should be allocated using AllocateFunction to be
3068 // properly initialized.
3069 ASSERT(map->instance_type() != JS_FUNCTION_TYPE);
3070
Steve Block8defd9f2010-07-08 12:39:36 +01003071 // Both types of global objects should be allocated using
3072 // AllocateGlobalObject to be properly initialized.
Steve Blocka7e24c12009-10-30 11:49:00 +00003073 ASSERT(map->instance_type() != JS_GLOBAL_OBJECT_TYPE);
3074 ASSERT(map->instance_type() != JS_BUILTINS_OBJECT_TYPE);
3075
3076 // Allocate the backing storage for the properties.
3077 int prop_size =
3078 map->pre_allocated_property_fields() +
3079 map->unused_property_fields() -
3080 map->inobject_properties();
3081 ASSERT(prop_size >= 0);
John Reck59135872010-11-02 12:39:01 -07003082 Object* properties;
3083 { MaybeObject* maybe_properties = AllocateFixedArray(prop_size, pretenure);
3084 if (!maybe_properties->ToObject(&properties)) return maybe_properties;
3085 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003086
3087 // Allocate the JSObject.
3088 AllocationSpace space =
3089 (pretenure == TENURED) ? OLD_POINTER_SPACE : NEW_SPACE;
3090 if (map->instance_size() > MaxObjectSizeInPagedSpace()) space = LO_SPACE;
John Reck59135872010-11-02 12:39:01 -07003091 Object* obj;
3092 { MaybeObject* maybe_obj = Allocate(map, space);
3093 if (!maybe_obj->ToObject(&obj)) return maybe_obj;
3094 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003095
3096 // Initialize the JSObject.
3097 InitializeJSObjectFromMap(JSObject::cast(obj),
3098 FixedArray::cast(properties),
3099 map);
Steve Block8defd9f2010-07-08 12:39:36 +01003100 ASSERT(JSObject::cast(obj)->HasFastElements());
Steve Blocka7e24c12009-10-30 11:49:00 +00003101 return obj;
3102}
3103
3104
John Reck59135872010-11-02 12:39:01 -07003105MaybeObject* Heap::AllocateJSObject(JSFunction* constructor,
3106 PretenureFlag pretenure) {
Steve Blocka7e24c12009-10-30 11:49:00 +00003107 // Allocate the initial map if absent.
3108 if (!constructor->has_initial_map()) {
John Reck59135872010-11-02 12:39:01 -07003109 Object* initial_map;
3110 { MaybeObject* maybe_initial_map = AllocateInitialMap(constructor);
3111 if (!maybe_initial_map->ToObject(&initial_map)) return maybe_initial_map;
3112 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003113 constructor->set_initial_map(Map::cast(initial_map));
3114 Map::cast(initial_map)->set_constructor(constructor);
3115 }
3116 // Allocate the object based on the constructors initial map.
John Reck59135872010-11-02 12:39:01 -07003117 MaybeObject* result =
Steve Blocka7e24c12009-10-30 11:49:00 +00003118 AllocateJSObjectFromMap(constructor->initial_map(), pretenure);
John Reck59135872010-11-02 12:39:01 -07003119#ifdef DEBUG
Steve Blocka7e24c12009-10-30 11:49:00 +00003120 // Make sure result is NOT a global object if valid.
John Reck59135872010-11-02 12:39:01 -07003121 Object* non_failure;
3122 ASSERT(!result->ToObject(&non_failure) || !non_failure->IsGlobalObject());
3123#endif
Steve Blocka7e24c12009-10-30 11:49:00 +00003124 return result;
3125}
3126
3127
John Reck59135872010-11-02 12:39:01 -07003128MaybeObject* Heap::AllocateGlobalObject(JSFunction* constructor) {
Steve Blocka7e24c12009-10-30 11:49:00 +00003129 ASSERT(constructor->has_initial_map());
3130 Map* map = constructor->initial_map();
3131
3132 // Make sure no field properties are described in the initial map.
3133 // This guarantees us that normalizing the properties does not
3134 // require us to change property values to JSGlobalPropertyCells.
3135 ASSERT(map->NextFreePropertyIndex() == 0);
3136
3137 // Make sure we don't have a ton of pre-allocated slots in the
3138 // global objects. They will be unused once we normalize the object.
3139 ASSERT(map->unused_property_fields() == 0);
3140 ASSERT(map->inobject_properties() == 0);
3141
3142 // Initial size of the backing store to avoid resize of the storage during
3143 // bootstrapping. The size differs between the JS global object ad the
3144 // builtins object.
3145 int initial_size = map->instance_type() == JS_GLOBAL_OBJECT_TYPE ? 64 : 512;
3146
3147 // Allocate a dictionary object for backing storage.
John Reck59135872010-11-02 12:39:01 -07003148 Object* obj;
3149 { MaybeObject* maybe_obj =
3150 StringDictionary::Allocate(
3151 map->NumberOfDescribedProperties() * 2 + initial_size);
3152 if (!maybe_obj->ToObject(&obj)) return maybe_obj;
3153 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003154 StringDictionary* dictionary = StringDictionary::cast(obj);
3155
3156 // The global object might be created from an object template with accessors.
3157 // Fill these accessors into the dictionary.
3158 DescriptorArray* descs = map->instance_descriptors();
3159 for (int i = 0; i < descs->number_of_descriptors(); i++) {
3160 PropertyDetails details = descs->GetDetails(i);
3161 ASSERT(details.type() == CALLBACKS); // Only accessors are expected.
3162 PropertyDetails d =
3163 PropertyDetails(details.attributes(), CALLBACKS, details.index());
3164 Object* value = descs->GetCallbacksObject(i);
John Reck59135872010-11-02 12:39:01 -07003165 { MaybeObject* maybe_value = Heap::AllocateJSGlobalPropertyCell(value);
3166 if (!maybe_value->ToObject(&value)) return maybe_value;
3167 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003168
John Reck59135872010-11-02 12:39:01 -07003169 Object* result;
3170 { MaybeObject* maybe_result = dictionary->Add(descs->GetKey(i), value, d);
3171 if (!maybe_result->ToObject(&result)) return maybe_result;
3172 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003173 dictionary = StringDictionary::cast(result);
3174 }
3175
3176 // Allocate the global object and initialize it with the backing store.
John Reck59135872010-11-02 12:39:01 -07003177 { MaybeObject* maybe_obj = Allocate(map, OLD_POINTER_SPACE);
3178 if (!maybe_obj->ToObject(&obj)) return maybe_obj;
3179 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003180 JSObject* global = JSObject::cast(obj);
3181 InitializeJSObjectFromMap(global, dictionary, map);
3182
3183 // Create a new map for the global object.
John Reck59135872010-11-02 12:39:01 -07003184 { MaybeObject* maybe_obj = map->CopyDropDescriptors();
3185 if (!maybe_obj->ToObject(&obj)) return maybe_obj;
3186 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003187 Map* new_map = Map::cast(obj);
3188
3189 // Setup the global object as a normalized object.
3190 global->set_map(new_map);
3191 global->map()->set_instance_descriptors(Heap::empty_descriptor_array());
3192 global->set_properties(dictionary);
3193
3194 // Make sure result is a global object with properties in dictionary.
3195 ASSERT(global->IsGlobalObject());
3196 ASSERT(!global->HasFastProperties());
3197 return global;
3198}
3199
3200
John Reck59135872010-11-02 12:39:01 -07003201MaybeObject* Heap::CopyJSObject(JSObject* source) {
Steve Blocka7e24c12009-10-30 11:49:00 +00003202 // Never used to copy functions. If functions need to be copied we
3203 // have to be careful to clear the literals array.
3204 ASSERT(!source->IsJSFunction());
3205
3206 // Make the clone.
3207 Map* map = source->map();
3208 int object_size = map->instance_size();
3209 Object* clone;
3210
3211 // If we're forced to always allocate, we use the general allocation
3212 // functions which may leave us with an object in old space.
3213 if (always_allocate()) {
John Reck59135872010-11-02 12:39:01 -07003214 { MaybeObject* maybe_clone =
3215 AllocateRaw(object_size, NEW_SPACE, OLD_POINTER_SPACE);
3216 if (!maybe_clone->ToObject(&clone)) return maybe_clone;
3217 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003218 Address clone_address = HeapObject::cast(clone)->address();
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01003219 CopyBlock(clone_address,
3220 source->address(),
Steve Blocka7e24c12009-10-30 11:49:00 +00003221 object_size);
3222 // Update write barrier for all fields that lie beyond the header.
Steve Block6ded16b2010-05-10 14:33:55 +01003223 RecordWrites(clone_address,
3224 JSObject::kHeaderSize,
3225 (object_size - JSObject::kHeaderSize) / kPointerSize);
Steve Blocka7e24c12009-10-30 11:49:00 +00003226 } else {
John Reck59135872010-11-02 12:39:01 -07003227 { MaybeObject* maybe_clone = new_space_.AllocateRaw(object_size);
3228 if (!maybe_clone->ToObject(&clone)) return maybe_clone;
3229 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003230 ASSERT(Heap::InNewSpace(clone));
3231 // Since we know the clone is allocated in new space, we can copy
3232 // the contents without worrying about updating the write barrier.
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01003233 CopyBlock(HeapObject::cast(clone)->address(),
3234 source->address(),
Steve Blocka7e24c12009-10-30 11:49:00 +00003235 object_size);
3236 }
3237
3238 FixedArray* elements = FixedArray::cast(source->elements());
3239 FixedArray* properties = FixedArray::cast(source->properties());
3240 // Update elements if necessary.
Steve Block6ded16b2010-05-10 14:33:55 +01003241 if (elements->length() > 0) {
John Reck59135872010-11-02 12:39:01 -07003242 Object* elem;
3243 { MaybeObject* maybe_elem =
3244 (elements->map() == fixed_cow_array_map()) ?
3245 elements : CopyFixedArray(elements);
3246 if (!maybe_elem->ToObject(&elem)) return maybe_elem;
3247 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003248 JSObject::cast(clone)->set_elements(FixedArray::cast(elem));
3249 }
3250 // Update properties if necessary.
3251 if (properties->length() > 0) {
John Reck59135872010-11-02 12:39:01 -07003252 Object* prop;
3253 { MaybeObject* maybe_prop = CopyFixedArray(properties);
3254 if (!maybe_prop->ToObject(&prop)) return maybe_prop;
3255 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003256 JSObject::cast(clone)->set_properties(FixedArray::cast(prop));
3257 }
3258 // Return the new clone.
Steve Block3ce2e202009-11-05 08:53:23 +00003259#ifdef ENABLE_LOGGING_AND_PROFILING
3260 ProducerHeapProfile::RecordJSObjectAllocation(clone);
3261#endif
Steve Blocka7e24c12009-10-30 11:49:00 +00003262 return clone;
3263}
3264
3265
John Reck59135872010-11-02 12:39:01 -07003266MaybeObject* Heap::ReinitializeJSGlobalProxy(JSFunction* constructor,
3267 JSGlobalProxy* object) {
Kristian Monsen0d5e1162010-09-30 15:31:59 +01003268 ASSERT(constructor->has_initial_map());
Steve Blocka7e24c12009-10-30 11:49:00 +00003269 Map* map = constructor->initial_map();
3270
Kristian Monsen0d5e1162010-09-30 15:31:59 +01003271 // Check that the already allocated object has the same size and type as
Steve Blocka7e24c12009-10-30 11:49:00 +00003272 // objects allocated using the constructor.
3273 ASSERT(map->instance_size() == object->map()->instance_size());
Kristian Monsen0d5e1162010-09-30 15:31:59 +01003274 ASSERT(map->instance_type() == object->map()->instance_type());
Steve Blocka7e24c12009-10-30 11:49:00 +00003275
3276 // Allocate the backing storage for the properties.
3277 int prop_size = map->unused_property_fields() - map->inobject_properties();
John Reck59135872010-11-02 12:39:01 -07003278 Object* properties;
3279 { MaybeObject* maybe_properties = AllocateFixedArray(prop_size, TENURED);
3280 if (!maybe_properties->ToObject(&properties)) return maybe_properties;
3281 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003282
3283 // Reset the map for the object.
3284 object->set_map(constructor->initial_map());
3285
3286 // Reinitialize the object from the constructor map.
3287 InitializeJSObjectFromMap(object, FixedArray::cast(properties), map);
3288 return object;
3289}
3290
3291
John Reck59135872010-11-02 12:39:01 -07003292MaybeObject* Heap::AllocateStringFromAscii(Vector<const char> string,
3293 PretenureFlag pretenure) {
3294 Object* result;
3295 { MaybeObject* maybe_result =
3296 AllocateRawAsciiString(string.length(), pretenure);
3297 if (!maybe_result->ToObject(&result)) return maybe_result;
3298 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003299
3300 // Copy the characters into the new object.
3301 SeqAsciiString* string_result = SeqAsciiString::cast(result);
3302 for (int i = 0; i < string.length(); i++) {
3303 string_result->SeqAsciiStringSet(i, string[i]);
3304 }
3305 return result;
3306}
3307
3308
Steve Block9fac8402011-05-12 15:51:54 +01003309MaybeObject* Heap::AllocateStringFromUtf8Slow(Vector<const char> string,
3310 PretenureFlag pretenure) {
Leon Clarkeac952652010-07-15 11:15:24 +01003311 // V8 only supports characters in the Basic Multilingual Plane.
3312 const uc32 kMaxSupportedChar = 0xFFFF;
Steve Blocka7e24c12009-10-30 11:49:00 +00003313 // Count the number of characters in the UTF-8 string and check if
3314 // it is an ASCII string.
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -08003315 Access<ScannerConstants::Utf8Decoder>
3316 decoder(ScannerConstants::utf8_decoder());
Steve Blocka7e24c12009-10-30 11:49:00 +00003317 decoder->Reset(string.start(), string.length());
3318 int chars = 0;
Steve Blocka7e24c12009-10-30 11:49:00 +00003319 while (decoder->has_more()) {
Steve Block9fac8402011-05-12 15:51:54 +01003320 decoder->GetNext();
Steve Blocka7e24c12009-10-30 11:49:00 +00003321 chars++;
3322 }
3323
John Reck59135872010-11-02 12:39:01 -07003324 Object* result;
3325 { MaybeObject* maybe_result = AllocateRawTwoByteString(chars, pretenure);
3326 if (!maybe_result->ToObject(&result)) return maybe_result;
3327 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003328
3329 // Convert and copy the characters into the new object.
3330 String* string_result = String::cast(result);
3331 decoder->Reset(string.start(), string.length());
3332 for (int i = 0; i < chars; i++) {
3333 uc32 r = decoder->GetNext();
Leon Clarkeac952652010-07-15 11:15:24 +01003334 if (r > kMaxSupportedChar) { r = unibrow::Utf8::kBadChar; }
Steve Blocka7e24c12009-10-30 11:49:00 +00003335 string_result->Set(i, r);
3336 }
3337 return result;
3338}
3339
3340
John Reck59135872010-11-02 12:39:01 -07003341MaybeObject* Heap::AllocateStringFromTwoByte(Vector<const uc16> string,
3342 PretenureFlag pretenure) {
Steve Blocka7e24c12009-10-30 11:49:00 +00003343 // Check if the string is an ASCII string.
John Reck59135872010-11-02 12:39:01 -07003344 MaybeObject* maybe_result;
Steve Block9fac8402011-05-12 15:51:54 +01003345 if (String::IsAscii(string.start(), string.length())) {
John Reck59135872010-11-02 12:39:01 -07003346 maybe_result = AllocateRawAsciiString(string.length(), pretenure);
Steve Blocka7e24c12009-10-30 11:49:00 +00003347 } else { // It's not an ASCII string.
John Reck59135872010-11-02 12:39:01 -07003348 maybe_result = AllocateRawTwoByteString(string.length(), pretenure);
Steve Blocka7e24c12009-10-30 11:49:00 +00003349 }
John Reck59135872010-11-02 12:39:01 -07003350 Object* result;
3351 if (!maybe_result->ToObject(&result)) return maybe_result;
Steve Blocka7e24c12009-10-30 11:49:00 +00003352
3353 // Copy the characters into the new object, which may be either ASCII or
3354 // UTF-16.
3355 String* string_result = String::cast(result);
3356 for (int i = 0; i < string.length(); i++) {
3357 string_result->Set(i, string[i]);
3358 }
3359 return result;
3360}
3361
3362
3363Map* Heap::SymbolMapForString(String* string) {
3364 // If the string is in new space it cannot be used as a symbol.
3365 if (InNewSpace(string)) return NULL;
3366
3367 // Find the corresponding symbol map for strings.
3368 Map* map = string->map();
Steve Blockd0582a62009-12-15 09:54:21 +00003369 if (map == ascii_string_map()) return ascii_symbol_map();
3370 if (map == string_map()) return symbol_map();
3371 if (map == cons_string_map()) return cons_symbol_map();
3372 if (map == cons_ascii_string_map()) return cons_ascii_symbol_map();
3373 if (map == external_string_map()) return external_symbol_map();
3374 if (map == external_ascii_string_map()) return external_ascii_symbol_map();
Kristian Monsen9dcf7e22010-06-28 14:14:28 +01003375 if (map == external_string_with_ascii_data_map()) {
3376 return external_symbol_with_ascii_data_map();
3377 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003378
3379 // No match found.
3380 return NULL;
3381}
3382
3383
John Reck59135872010-11-02 12:39:01 -07003384MaybeObject* Heap::AllocateInternalSymbol(unibrow::CharacterStream* buffer,
3385 int chars,
3386 uint32_t hash_field) {
Leon Clarkee46be812010-01-19 14:06:41 +00003387 ASSERT(chars >= 0);
Steve Blocka7e24c12009-10-30 11:49:00 +00003388 // Ensure the chars matches the number of characters in the buffer.
3389 ASSERT(static_cast<unsigned>(chars) == buffer->Length());
3390 // Determine whether the string is ascii.
3391 bool is_ascii = true;
Leon Clarkee46be812010-01-19 14:06:41 +00003392 while (buffer->has_more()) {
3393 if (buffer->GetNext() > unibrow::Utf8::kMaxOneByteChar) {
3394 is_ascii = false;
3395 break;
3396 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003397 }
3398 buffer->Rewind();
3399
3400 // Compute map and object size.
3401 int size;
3402 Map* map;
3403
3404 if (is_ascii) {
Leon Clarkee46be812010-01-19 14:06:41 +00003405 if (chars > SeqAsciiString::kMaxLength) {
3406 return Failure::OutOfMemoryException();
3407 }
Steve Blockd0582a62009-12-15 09:54:21 +00003408 map = ascii_symbol_map();
Steve Blocka7e24c12009-10-30 11:49:00 +00003409 size = SeqAsciiString::SizeFor(chars);
3410 } else {
Leon Clarkee46be812010-01-19 14:06:41 +00003411 if (chars > SeqTwoByteString::kMaxLength) {
3412 return Failure::OutOfMemoryException();
3413 }
Steve Blockd0582a62009-12-15 09:54:21 +00003414 map = symbol_map();
Steve Blocka7e24c12009-10-30 11:49:00 +00003415 size = SeqTwoByteString::SizeFor(chars);
3416 }
3417
3418 // Allocate string.
John Reck59135872010-11-02 12:39:01 -07003419 Object* result;
3420 { MaybeObject* maybe_result = (size > MaxObjectSizeInPagedSpace())
3421 ? lo_space_->AllocateRaw(size)
3422 : old_data_space_->AllocateRaw(size);
3423 if (!maybe_result->ToObject(&result)) return maybe_result;
3424 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003425
3426 reinterpret_cast<HeapObject*>(result)->set_map(map);
Steve Blockd0582a62009-12-15 09:54:21 +00003427 // Set length and hash fields of the allocated string.
Steve Blocka7e24c12009-10-30 11:49:00 +00003428 String* answer = String::cast(result);
Steve Blockd0582a62009-12-15 09:54:21 +00003429 answer->set_length(chars);
3430 answer->set_hash_field(hash_field);
Steve Blocka7e24c12009-10-30 11:49:00 +00003431
3432 ASSERT_EQ(size, answer->Size());
3433
3434 // Fill in the characters.
3435 for (int i = 0; i < chars; i++) {
3436 answer->Set(i, buffer->GetNext());
3437 }
3438 return answer;
3439}
3440
3441
John Reck59135872010-11-02 12:39:01 -07003442MaybeObject* Heap::AllocateRawAsciiString(int length, PretenureFlag pretenure) {
Leon Clarkee46be812010-01-19 14:06:41 +00003443 if (length < 0 || length > SeqAsciiString::kMaxLength) {
3444 return Failure::OutOfMemoryException();
3445 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003446
3447 int size = SeqAsciiString::SizeFor(length);
Leon Clarkee46be812010-01-19 14:06:41 +00003448 ASSERT(size <= SeqAsciiString::kMaxSize);
Steve Blocka7e24c12009-10-30 11:49:00 +00003449
Leon Clarkee46be812010-01-19 14:06:41 +00003450 AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
3451 AllocationSpace retry_space = OLD_DATA_SPACE;
3452
Steve Blocka7e24c12009-10-30 11:49:00 +00003453 if (space == NEW_SPACE) {
Leon Clarkee46be812010-01-19 14:06:41 +00003454 if (size > kMaxObjectSizeInNewSpace) {
3455 // Allocate in large object space, retry space will be ignored.
3456 space = LO_SPACE;
3457 } else if (size > MaxObjectSizeInPagedSpace()) {
3458 // Allocate in new space, retry in large object space.
3459 retry_space = LO_SPACE;
3460 }
3461 } else if (space == OLD_DATA_SPACE && size > MaxObjectSizeInPagedSpace()) {
3462 space = LO_SPACE;
Steve Blocka7e24c12009-10-30 11:49:00 +00003463 }
John Reck59135872010-11-02 12:39:01 -07003464 Object* result;
3465 { MaybeObject* maybe_result = AllocateRaw(size, space, retry_space);
3466 if (!maybe_result->ToObject(&result)) return maybe_result;
3467 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003468
Steve Blocka7e24c12009-10-30 11:49:00 +00003469 // Partially initialize the object.
Steve Blockd0582a62009-12-15 09:54:21 +00003470 HeapObject::cast(result)->set_map(ascii_string_map());
Steve Blocka7e24c12009-10-30 11:49:00 +00003471 String::cast(result)->set_length(length);
Steve Blockd0582a62009-12-15 09:54:21 +00003472 String::cast(result)->set_hash_field(String::kEmptyHashField);
Steve Blocka7e24c12009-10-30 11:49:00 +00003473 ASSERT_EQ(size, HeapObject::cast(result)->Size());
3474 return result;
3475}
3476
3477
John Reck59135872010-11-02 12:39:01 -07003478MaybeObject* Heap::AllocateRawTwoByteString(int length,
3479 PretenureFlag pretenure) {
Leon Clarkee46be812010-01-19 14:06:41 +00003480 if (length < 0 || length > SeqTwoByteString::kMaxLength) {
3481 return Failure::OutOfMemoryException();
Steve Blocka7e24c12009-10-30 11:49:00 +00003482 }
Leon Clarkee46be812010-01-19 14:06:41 +00003483 int size = SeqTwoByteString::SizeFor(length);
3484 ASSERT(size <= SeqTwoByteString::kMaxSize);
3485 AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
3486 AllocationSpace retry_space = OLD_DATA_SPACE;
3487
3488 if (space == NEW_SPACE) {
3489 if (size > kMaxObjectSizeInNewSpace) {
3490 // Allocate in large object space, retry space will be ignored.
3491 space = LO_SPACE;
3492 } else if (size > MaxObjectSizeInPagedSpace()) {
3493 // Allocate in new space, retry in large object space.
3494 retry_space = LO_SPACE;
3495 }
3496 } else if (space == OLD_DATA_SPACE && size > MaxObjectSizeInPagedSpace()) {
3497 space = LO_SPACE;
3498 }
John Reck59135872010-11-02 12:39:01 -07003499 Object* result;
3500 { MaybeObject* maybe_result = AllocateRaw(size, space, retry_space);
3501 if (!maybe_result->ToObject(&result)) return maybe_result;
3502 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003503
Steve Blocka7e24c12009-10-30 11:49:00 +00003504 // Partially initialize the object.
Steve Blockd0582a62009-12-15 09:54:21 +00003505 HeapObject::cast(result)->set_map(string_map());
Steve Blocka7e24c12009-10-30 11:49:00 +00003506 String::cast(result)->set_length(length);
Steve Blockd0582a62009-12-15 09:54:21 +00003507 String::cast(result)->set_hash_field(String::kEmptyHashField);
Steve Blocka7e24c12009-10-30 11:49:00 +00003508 ASSERT_EQ(size, HeapObject::cast(result)->Size());
3509 return result;
3510}
3511
3512
John Reck59135872010-11-02 12:39:01 -07003513MaybeObject* Heap::AllocateEmptyFixedArray() {
Steve Blocka7e24c12009-10-30 11:49:00 +00003514 int size = FixedArray::SizeFor(0);
John Reck59135872010-11-02 12:39:01 -07003515 Object* result;
3516 { MaybeObject* maybe_result =
3517 AllocateRaw(size, OLD_DATA_SPACE, OLD_DATA_SPACE);
3518 if (!maybe_result->ToObject(&result)) return maybe_result;
3519 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003520 // Initialize the object.
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01003521 reinterpret_cast<FixedArray*>(result)->set_map(fixed_array_map());
3522 reinterpret_cast<FixedArray*>(result)->set_length(0);
Steve Blocka7e24c12009-10-30 11:49:00 +00003523 return result;
3524}
3525
3526
John Reck59135872010-11-02 12:39:01 -07003527MaybeObject* Heap::AllocateRawFixedArray(int length) {
Leon Clarkee46be812010-01-19 14:06:41 +00003528 if (length < 0 || length > FixedArray::kMaxLength) {
3529 return Failure::OutOfMemoryException();
3530 }
Kristian Monsen0d5e1162010-09-30 15:31:59 +01003531 ASSERT(length > 0);
Steve Blocka7e24c12009-10-30 11:49:00 +00003532 // Use the general function if we're forced to always allocate.
3533 if (always_allocate()) return AllocateFixedArray(length, TENURED);
3534 // Allocate the raw data for a fixed array.
3535 int size = FixedArray::SizeFor(length);
3536 return size <= kMaxObjectSizeInNewSpace
3537 ? new_space_.AllocateRaw(size)
3538 : lo_space_->AllocateRawFixedArray(size);
3539}
3540
3541
John Reck59135872010-11-02 12:39:01 -07003542MaybeObject* Heap::CopyFixedArrayWithMap(FixedArray* src, Map* map) {
Steve Blocka7e24c12009-10-30 11:49:00 +00003543 int len = src->length();
John Reck59135872010-11-02 12:39:01 -07003544 Object* obj;
3545 { MaybeObject* maybe_obj = AllocateRawFixedArray(len);
3546 if (!maybe_obj->ToObject(&obj)) return maybe_obj;
3547 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003548 if (Heap::InNewSpace(obj)) {
3549 HeapObject* dst = HeapObject::cast(obj);
Kristian Monsen0d5e1162010-09-30 15:31:59 +01003550 dst->set_map(map);
3551 CopyBlock(dst->address() + kPointerSize,
3552 src->address() + kPointerSize,
3553 FixedArray::SizeFor(len) - kPointerSize);
Steve Blocka7e24c12009-10-30 11:49:00 +00003554 return obj;
3555 }
Kristian Monsen0d5e1162010-09-30 15:31:59 +01003556 HeapObject::cast(obj)->set_map(map);
Steve Blocka7e24c12009-10-30 11:49:00 +00003557 FixedArray* result = FixedArray::cast(obj);
3558 result->set_length(len);
Leon Clarke4515c472010-02-03 11:58:03 +00003559
Steve Blocka7e24c12009-10-30 11:49:00 +00003560 // Copy the content
Leon Clarke4515c472010-02-03 11:58:03 +00003561 AssertNoAllocation no_gc;
3562 WriteBarrierMode mode = result->GetWriteBarrierMode(no_gc);
Steve Blocka7e24c12009-10-30 11:49:00 +00003563 for (int i = 0; i < len; i++) result->set(i, src->get(i), mode);
3564 return result;
3565}
3566
3567
John Reck59135872010-11-02 12:39:01 -07003568MaybeObject* Heap::AllocateFixedArray(int length) {
Steve Blocka7e24c12009-10-30 11:49:00 +00003569 ASSERT(length >= 0);
3570 if (length == 0) return empty_fixed_array();
John Reck59135872010-11-02 12:39:01 -07003571 Object* result;
3572 { MaybeObject* maybe_result = AllocateRawFixedArray(length);
3573 if (!maybe_result->ToObject(&result)) return maybe_result;
Steve Blocka7e24c12009-10-30 11:49:00 +00003574 }
John Reck59135872010-11-02 12:39:01 -07003575 // Initialize header.
3576 FixedArray* array = reinterpret_cast<FixedArray*>(result);
3577 array->set_map(fixed_array_map());
3578 array->set_length(length);
3579 // Initialize body.
3580 ASSERT(!Heap::InNewSpace(undefined_value()));
3581 MemsetPointer(array->data_start(), undefined_value(), length);
Steve Blocka7e24c12009-10-30 11:49:00 +00003582 return result;
3583}
3584
3585
John Reck59135872010-11-02 12:39:01 -07003586MaybeObject* Heap::AllocateRawFixedArray(int length, PretenureFlag pretenure) {
Leon Clarkee46be812010-01-19 14:06:41 +00003587 if (length < 0 || length > FixedArray::kMaxLength) {
3588 return Failure::OutOfMemoryException();
3589 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003590
Leon Clarkee46be812010-01-19 14:06:41 +00003591 AllocationSpace space =
3592 (pretenure == TENURED) ? OLD_POINTER_SPACE : NEW_SPACE;
Steve Blocka7e24c12009-10-30 11:49:00 +00003593 int size = FixedArray::SizeFor(length);
Leon Clarkee46be812010-01-19 14:06:41 +00003594 if (space == NEW_SPACE && size > kMaxObjectSizeInNewSpace) {
3595 // Too big for new space.
3596 space = LO_SPACE;
3597 } else if (space == OLD_POINTER_SPACE &&
3598 size > MaxObjectSizeInPagedSpace()) {
3599 // Too big for old pointer space.
3600 space = LO_SPACE;
3601 }
3602
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01003603 AllocationSpace retry_space =
3604 (size <= MaxObjectSizeInPagedSpace()) ? OLD_POINTER_SPACE : LO_SPACE;
3605
3606 return AllocateRaw(size, space, retry_space);
Steve Blocka7e24c12009-10-30 11:49:00 +00003607}
3608
3609
John Reck59135872010-11-02 12:39:01 -07003610MUST_USE_RESULT static MaybeObject* AllocateFixedArrayWithFiller(
3611 int length,
3612 PretenureFlag pretenure,
3613 Object* filler) {
Steve Block6ded16b2010-05-10 14:33:55 +01003614 ASSERT(length >= 0);
3615 ASSERT(Heap::empty_fixed_array()->IsFixedArray());
3616 if (length == 0) return Heap::empty_fixed_array();
3617
3618 ASSERT(!Heap::InNewSpace(filler));
John Reck59135872010-11-02 12:39:01 -07003619 Object* result;
3620 { MaybeObject* maybe_result = Heap::AllocateRawFixedArray(length, pretenure);
3621 if (!maybe_result->ToObject(&result)) return maybe_result;
3622 }
Steve Block6ded16b2010-05-10 14:33:55 +01003623
3624 HeapObject::cast(result)->set_map(Heap::fixed_array_map());
3625 FixedArray* array = FixedArray::cast(result);
3626 array->set_length(length);
3627 MemsetPointer(array->data_start(), filler, length);
3628 return array;
3629}
3630
3631
John Reck59135872010-11-02 12:39:01 -07003632MaybeObject* Heap::AllocateFixedArray(int length, PretenureFlag pretenure) {
Steve Block6ded16b2010-05-10 14:33:55 +01003633 return AllocateFixedArrayWithFiller(length, pretenure, undefined_value());
3634}
3635
3636
John Reck59135872010-11-02 12:39:01 -07003637MaybeObject* Heap::AllocateFixedArrayWithHoles(int length,
3638 PretenureFlag pretenure) {
Steve Block6ded16b2010-05-10 14:33:55 +01003639 return AllocateFixedArrayWithFiller(length, pretenure, the_hole_value());
3640}
3641
3642
John Reck59135872010-11-02 12:39:01 -07003643MaybeObject* Heap::AllocateUninitializedFixedArray(int length) {
Steve Block6ded16b2010-05-10 14:33:55 +01003644 if (length == 0) return empty_fixed_array();
3645
John Reck59135872010-11-02 12:39:01 -07003646 Object* obj;
3647 { MaybeObject* maybe_obj = AllocateRawFixedArray(length);
3648 if (!maybe_obj->ToObject(&obj)) return maybe_obj;
3649 }
Steve Block6ded16b2010-05-10 14:33:55 +01003650
3651 reinterpret_cast<FixedArray*>(obj)->set_map(fixed_array_map());
3652 FixedArray::cast(obj)->set_length(length);
3653 return obj;
3654}
3655
3656
John Reck59135872010-11-02 12:39:01 -07003657MaybeObject* Heap::AllocateHashTable(int length, PretenureFlag pretenure) {
3658 Object* result;
3659 { MaybeObject* maybe_result = Heap::AllocateFixedArray(length, pretenure);
3660 if (!maybe_result->ToObject(&result)) return maybe_result;
3661 }
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01003662 reinterpret_cast<HeapObject*>(result)->set_map(hash_table_map());
Steve Blocka7e24c12009-10-30 11:49:00 +00003663 ASSERT(result->IsHashTable());
3664 return result;
3665}
3666
3667
John Reck59135872010-11-02 12:39:01 -07003668MaybeObject* Heap::AllocateGlobalContext() {
3669 Object* result;
3670 { MaybeObject* maybe_result =
3671 Heap::AllocateFixedArray(Context::GLOBAL_CONTEXT_SLOTS);
3672 if (!maybe_result->ToObject(&result)) return maybe_result;
3673 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003674 Context* context = reinterpret_cast<Context*>(result);
3675 context->set_map(global_context_map());
3676 ASSERT(context->IsGlobalContext());
3677 ASSERT(result->IsContext());
3678 return result;
3679}
3680
3681
John Reck59135872010-11-02 12:39:01 -07003682MaybeObject* Heap::AllocateFunctionContext(int length, JSFunction* function) {
Steve Blocka7e24c12009-10-30 11:49:00 +00003683 ASSERT(length >= Context::MIN_CONTEXT_SLOTS);
John Reck59135872010-11-02 12:39:01 -07003684 Object* result;
3685 { MaybeObject* maybe_result = Heap::AllocateFixedArray(length);
3686 if (!maybe_result->ToObject(&result)) return maybe_result;
3687 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003688 Context* context = reinterpret_cast<Context*>(result);
3689 context->set_map(context_map());
3690 context->set_closure(function);
3691 context->set_fcontext(context);
3692 context->set_previous(NULL);
3693 context->set_extension(NULL);
3694 context->set_global(function->context()->global());
3695 ASSERT(!context->IsGlobalContext());
3696 ASSERT(context->is_function_context());
3697 ASSERT(result->IsContext());
3698 return result;
3699}
3700
3701
John Reck59135872010-11-02 12:39:01 -07003702MaybeObject* Heap::AllocateWithContext(Context* previous,
3703 JSObject* extension,
3704 bool is_catch_context) {
3705 Object* result;
3706 { MaybeObject* maybe_result =
3707 Heap::AllocateFixedArray(Context::MIN_CONTEXT_SLOTS);
3708 if (!maybe_result->ToObject(&result)) return maybe_result;
3709 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003710 Context* context = reinterpret_cast<Context*>(result);
3711 context->set_map(is_catch_context ? catch_context_map() : context_map());
3712 context->set_closure(previous->closure());
3713 context->set_fcontext(previous->fcontext());
3714 context->set_previous(previous);
3715 context->set_extension(extension);
3716 context->set_global(previous->global());
3717 ASSERT(!context->IsGlobalContext());
3718 ASSERT(!context->is_function_context());
3719 ASSERT(result->IsContext());
3720 return result;
3721}
3722
3723
John Reck59135872010-11-02 12:39:01 -07003724MaybeObject* Heap::AllocateStruct(InstanceType type) {
Steve Blocka7e24c12009-10-30 11:49:00 +00003725 Map* map;
3726 switch (type) {
3727#define MAKE_CASE(NAME, Name, name) case NAME##_TYPE: map = name##_map(); break;
3728STRUCT_LIST(MAKE_CASE)
3729#undef MAKE_CASE
3730 default:
3731 UNREACHABLE();
3732 return Failure::InternalError();
3733 }
3734 int size = map->instance_size();
3735 AllocationSpace space =
3736 (size > MaxObjectSizeInPagedSpace()) ? LO_SPACE : OLD_POINTER_SPACE;
John Reck59135872010-11-02 12:39:01 -07003737 Object* result;
3738 { MaybeObject* maybe_result = Heap::Allocate(map, space);
3739 if (!maybe_result->ToObject(&result)) return maybe_result;
3740 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003741 Struct::cast(result)->InitializeBody(size);
3742 return result;
3743}
3744
3745
3746bool Heap::IdleNotification() {
3747 static const int kIdlesBeforeScavenge = 4;
3748 static const int kIdlesBeforeMarkSweep = 7;
3749 static const int kIdlesBeforeMarkCompact = 8;
Ben Murdochdb5a90a2011-01-06 18:27:03 +00003750 static const int kMaxIdleCount = kIdlesBeforeMarkCompact + 1;
3751 static const int kGCsBetweenCleanup = 4;
Steve Blocka7e24c12009-10-30 11:49:00 +00003752 static int number_idle_notifications = 0;
3753 static int last_gc_count = gc_count_;
3754
Steve Block6ded16b2010-05-10 14:33:55 +01003755 bool uncommit = true;
Steve Blocka7e24c12009-10-30 11:49:00 +00003756 bool finished = false;
3757
Ben Murdochdb5a90a2011-01-06 18:27:03 +00003758 // Reset the number of idle notifications received when a number of
3759 // GCs have taken place. This allows another round of cleanup based
3760 // on idle notifications if enough work has been carried out to
3761 // provoke a number of garbage collections.
3762 if (gc_count_ < last_gc_count + kGCsBetweenCleanup) {
3763 number_idle_notifications =
3764 Min(number_idle_notifications + 1, kMaxIdleCount);
Steve Blocka7e24c12009-10-30 11:49:00 +00003765 } else {
3766 number_idle_notifications = 0;
3767 last_gc_count = gc_count_;
3768 }
3769
3770 if (number_idle_notifications == kIdlesBeforeScavenge) {
Steve Block6ded16b2010-05-10 14:33:55 +01003771 if (contexts_disposed_ > 0) {
3772 HistogramTimerScope scope(&Counters::gc_context);
3773 CollectAllGarbage(false);
3774 } else {
Ben Murdochf87a2032010-10-22 12:50:53 +01003775 CollectGarbage(NEW_SPACE);
Steve Block6ded16b2010-05-10 14:33:55 +01003776 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003777 new_space_.Shrink();
3778 last_gc_count = gc_count_;
Steve Blocka7e24c12009-10-30 11:49:00 +00003779 } else if (number_idle_notifications == kIdlesBeforeMarkSweep) {
Steve Blockd0582a62009-12-15 09:54:21 +00003780 // Before doing the mark-sweep collections we clear the
3781 // compilation cache to avoid hanging on to source code and
3782 // generated code for cached functions.
3783 CompilationCache::Clear();
3784
Steve Blocka7e24c12009-10-30 11:49:00 +00003785 CollectAllGarbage(false);
3786 new_space_.Shrink();
3787 last_gc_count = gc_count_;
3788
3789 } else if (number_idle_notifications == kIdlesBeforeMarkCompact) {
3790 CollectAllGarbage(true);
3791 new_space_.Shrink();
3792 last_gc_count = gc_count_;
Steve Blocka7e24c12009-10-30 11:49:00 +00003793 finished = true;
Steve Block6ded16b2010-05-10 14:33:55 +01003794
3795 } else if (contexts_disposed_ > 0) {
3796 if (FLAG_expose_gc) {
3797 contexts_disposed_ = 0;
3798 } else {
3799 HistogramTimerScope scope(&Counters::gc_context);
3800 CollectAllGarbage(false);
3801 last_gc_count = gc_count_;
3802 }
3803 // If this is the first idle notification, we reset the
3804 // notification count to avoid letting idle notifications for
3805 // context disposal garbage collections start a potentially too
3806 // aggressive idle GC cycle.
3807 if (number_idle_notifications <= 1) {
3808 number_idle_notifications = 0;
3809 uncommit = false;
3810 }
Ben Murdochdb5a90a2011-01-06 18:27:03 +00003811 } else if (number_idle_notifications > kIdlesBeforeMarkCompact) {
3812 // If we have received more than kIdlesBeforeMarkCompact idle
3813 // notifications we do not perform any cleanup because we don't
3814 // expect to gain much by doing so.
3815 finished = true;
Steve Blocka7e24c12009-10-30 11:49:00 +00003816 }
3817
Steve Block6ded16b2010-05-10 14:33:55 +01003818 // Make sure that we have no pending context disposals and
3819 // conditionally uncommit from space.
3820 ASSERT(contexts_disposed_ == 0);
3821 if (uncommit) Heap::UncommitFromSpace();
Steve Blocka7e24c12009-10-30 11:49:00 +00003822 return finished;
3823}
3824
3825
3826#ifdef DEBUG
3827
3828void Heap::Print() {
3829 if (!HasBeenSetup()) return;
3830 Top::PrintStack();
3831 AllSpaces spaces;
Leon Clarked91b9f72010-01-27 17:25:45 +00003832 for (Space* space = spaces.next(); space != NULL; space = spaces.next())
3833 space->Print();
Steve Blocka7e24c12009-10-30 11:49:00 +00003834}
3835
3836
3837void Heap::ReportCodeStatistics(const char* title) {
3838 PrintF(">>>>>> Code Stats (%s) >>>>>>\n", title);
3839 PagedSpace::ResetCodeStatistics();
3840 // We do not look for code in new space, map space, or old space. If code
3841 // somehow ends up in those spaces, we would miss it here.
3842 code_space_->CollectCodeStatistics();
3843 lo_space_->CollectCodeStatistics();
3844 PagedSpace::ReportCodeStatistics();
3845}
3846
3847
3848// This function expects that NewSpace's allocated objects histogram is
3849// populated (via a call to CollectStatistics or else as a side effect of a
3850// just-completed scavenge collection).
3851void Heap::ReportHeapStatistics(const char* title) {
3852 USE(title);
3853 PrintF(">>>>>> =============== %s (%d) =============== >>>>>>\n",
3854 title, gc_count_);
3855 PrintF("mark-compact GC : %d\n", mc_count_);
Ben Murdochf87a2032010-10-22 12:50:53 +01003856 PrintF("old_gen_promotion_limit_ %" V8_PTR_PREFIX "d\n",
3857 old_gen_promotion_limit_);
3858 PrintF("old_gen_allocation_limit_ %" V8_PTR_PREFIX "d\n",
3859 old_gen_allocation_limit_);
Steve Blocka7e24c12009-10-30 11:49:00 +00003860
3861 PrintF("\n");
3862 PrintF("Number of handles : %d\n", HandleScope::NumberOfHandles());
3863 GlobalHandles::PrintStats();
3864 PrintF("\n");
3865
3866 PrintF("Heap statistics : ");
3867 MemoryAllocator::ReportStatistics();
3868 PrintF("To space : ");
3869 new_space_.ReportStatistics();
3870 PrintF("Old pointer space : ");
3871 old_pointer_space_->ReportStatistics();
3872 PrintF("Old data space : ");
3873 old_data_space_->ReportStatistics();
3874 PrintF("Code space : ");
3875 code_space_->ReportStatistics();
3876 PrintF("Map space : ");
3877 map_space_->ReportStatistics();
3878 PrintF("Cell space : ");
3879 cell_space_->ReportStatistics();
3880 PrintF("Large object space : ");
3881 lo_space_->ReportStatistics();
3882 PrintF(">>>>>> ========================================= >>>>>>\n");
3883}
3884
3885#endif // DEBUG
3886
3887bool Heap::Contains(HeapObject* value) {
3888 return Contains(value->address());
3889}
3890
3891
3892bool Heap::Contains(Address addr) {
3893 if (OS::IsOutsideAllocatedSpace(addr)) return false;
3894 return HasBeenSetup() &&
3895 (new_space_.ToSpaceContains(addr) ||
3896 old_pointer_space_->Contains(addr) ||
3897 old_data_space_->Contains(addr) ||
3898 code_space_->Contains(addr) ||
3899 map_space_->Contains(addr) ||
3900 cell_space_->Contains(addr) ||
3901 lo_space_->SlowContains(addr));
3902}
3903
3904
3905bool Heap::InSpace(HeapObject* value, AllocationSpace space) {
3906 return InSpace(value->address(), space);
3907}
3908
3909
3910bool Heap::InSpace(Address addr, AllocationSpace space) {
3911 if (OS::IsOutsideAllocatedSpace(addr)) return false;
3912 if (!HasBeenSetup()) return false;
3913
3914 switch (space) {
3915 case NEW_SPACE:
3916 return new_space_.ToSpaceContains(addr);
3917 case OLD_POINTER_SPACE:
3918 return old_pointer_space_->Contains(addr);
3919 case OLD_DATA_SPACE:
3920 return old_data_space_->Contains(addr);
3921 case CODE_SPACE:
3922 return code_space_->Contains(addr);
3923 case MAP_SPACE:
3924 return map_space_->Contains(addr);
3925 case CELL_SPACE:
3926 return cell_space_->Contains(addr);
3927 case LO_SPACE:
3928 return lo_space_->SlowContains(addr);
3929 }
3930
3931 return false;
3932}
3933
3934
3935#ifdef DEBUG
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01003936static void DummyScavengePointer(HeapObject** p) {
3937}
3938
3939
3940static void VerifyPointersUnderWatermark(
3941 PagedSpace* space,
3942 DirtyRegionCallback visit_dirty_region) {
3943 PageIterator it(space, PageIterator::PAGES_IN_USE);
3944
3945 while (it.has_next()) {
3946 Page* page = it.next();
3947 Address start = page->ObjectAreaStart();
3948 Address end = page->AllocationWatermark();
3949
3950 Heap::IterateDirtyRegions(Page::kAllRegionsDirtyMarks,
3951 start,
3952 end,
3953 visit_dirty_region,
3954 &DummyScavengePointer);
3955 }
3956}
3957
3958
3959static void VerifyPointersUnderWatermark(LargeObjectSpace* space) {
3960 LargeObjectIterator it(space);
3961 for (HeapObject* object = it.next(); object != NULL; object = it.next()) {
3962 if (object->IsFixedArray()) {
3963 Address slot_address = object->address();
3964 Address end = object->address() + object->Size();
3965
3966 while (slot_address < end) {
3967 HeapObject** slot = reinterpret_cast<HeapObject**>(slot_address);
3968 // When we are not in GC the Heap::InNewSpace() predicate
3969 // checks that pointers which satisfy predicate point into
3970 // the active semispace.
3971 Heap::InNewSpace(*slot);
3972 slot_address += kPointerSize;
3973 }
3974 }
3975 }
3976}
3977
3978
Steve Blocka7e24c12009-10-30 11:49:00 +00003979void Heap::Verify() {
3980 ASSERT(HasBeenSetup());
3981
3982 VerifyPointersVisitor visitor;
Steve Blockd0582a62009-12-15 09:54:21 +00003983 IterateRoots(&visitor, VISIT_ONLY_STRONG);
Steve Blocka7e24c12009-10-30 11:49:00 +00003984
3985 new_space_.Verify();
3986
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01003987 VerifyPointersAndDirtyRegionsVisitor dirty_regions_visitor;
3988 old_pointer_space_->Verify(&dirty_regions_visitor);
3989 map_space_->Verify(&dirty_regions_visitor);
Steve Blocka7e24c12009-10-30 11:49:00 +00003990
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01003991 VerifyPointersUnderWatermark(old_pointer_space_,
3992 &IteratePointersInDirtyRegion);
3993 VerifyPointersUnderWatermark(map_space_,
3994 &IteratePointersInDirtyMapsRegion);
3995 VerifyPointersUnderWatermark(lo_space_);
3996
3997 VerifyPageWatermarkValidity(old_pointer_space_, ALL_INVALID);
3998 VerifyPageWatermarkValidity(map_space_, ALL_INVALID);
3999
4000 VerifyPointersVisitor no_dirty_regions_visitor;
4001 old_data_space_->Verify(&no_dirty_regions_visitor);
4002 code_space_->Verify(&no_dirty_regions_visitor);
4003 cell_space_->Verify(&no_dirty_regions_visitor);
Steve Blocka7e24c12009-10-30 11:49:00 +00004004
4005 lo_space_->Verify();
4006}
4007#endif // DEBUG
4008
4009
John Reck59135872010-11-02 12:39:01 -07004010MaybeObject* Heap::LookupSymbol(Vector<const char> string) {
Steve Blocka7e24c12009-10-30 11:49:00 +00004011 Object* symbol = NULL;
John Reck59135872010-11-02 12:39:01 -07004012 Object* new_table;
4013 { MaybeObject* maybe_new_table =
4014 symbol_table()->LookupSymbol(string, &symbol);
4015 if (!maybe_new_table->ToObject(&new_table)) return maybe_new_table;
4016 }
Steve Blocka7e24c12009-10-30 11:49:00 +00004017 // Can't use set_symbol_table because SymbolTable::cast knows that
4018 // SymbolTable is a singleton and checks for identity.
4019 roots_[kSymbolTableRootIndex] = new_table;
4020 ASSERT(symbol != NULL);
4021 return symbol;
4022}
4023
4024
Steve Block9fac8402011-05-12 15:51:54 +01004025MaybeObject* Heap::LookupAsciiSymbol(Vector<const char> string) {
4026 Object* symbol = NULL;
4027 Object* new_table;
4028 { MaybeObject* maybe_new_table =
4029 symbol_table()->LookupAsciiSymbol(string, &symbol);
4030 if (!maybe_new_table->ToObject(&new_table)) return maybe_new_table;
4031 }
4032 // Can't use set_symbol_table because SymbolTable::cast knows that
4033 // SymbolTable is a singleton and checks for identity.
4034 roots_[kSymbolTableRootIndex] = new_table;
4035 ASSERT(symbol != NULL);
4036 return symbol;
4037}
4038
4039
4040MaybeObject* Heap::LookupTwoByteSymbol(Vector<const uc16> string) {
4041 Object* symbol = NULL;
4042 Object* new_table;
4043 { MaybeObject* maybe_new_table =
4044 symbol_table()->LookupTwoByteSymbol(string, &symbol);
4045 if (!maybe_new_table->ToObject(&new_table)) return maybe_new_table;
4046 }
4047 // Can't use set_symbol_table because SymbolTable::cast knows that
4048 // SymbolTable is a singleton and checks for identity.
4049 roots_[kSymbolTableRootIndex] = new_table;
4050 ASSERT(symbol != NULL);
4051 return symbol;
4052}
4053
4054
John Reck59135872010-11-02 12:39:01 -07004055MaybeObject* Heap::LookupSymbol(String* string) {
Steve Blocka7e24c12009-10-30 11:49:00 +00004056 if (string->IsSymbol()) return string;
4057 Object* symbol = NULL;
John Reck59135872010-11-02 12:39:01 -07004058 Object* new_table;
4059 { MaybeObject* maybe_new_table =
4060 symbol_table()->LookupString(string, &symbol);
4061 if (!maybe_new_table->ToObject(&new_table)) return maybe_new_table;
4062 }
Steve Blocka7e24c12009-10-30 11:49:00 +00004063 // Can't use set_symbol_table because SymbolTable::cast knows that
4064 // SymbolTable is a singleton and checks for identity.
4065 roots_[kSymbolTableRootIndex] = new_table;
4066 ASSERT(symbol != NULL);
4067 return symbol;
4068}
4069
4070
4071bool Heap::LookupSymbolIfExists(String* string, String** symbol) {
4072 if (string->IsSymbol()) {
4073 *symbol = string;
4074 return true;
4075 }
4076 return symbol_table()->LookupSymbolIfExists(string, symbol);
4077}
4078
4079
4080#ifdef DEBUG
4081void Heap::ZapFromSpace() {
4082 ASSERT(reinterpret_cast<Object*>(kFromSpaceZapValue)->IsHeapObject());
4083 for (Address a = new_space_.FromSpaceLow();
4084 a < new_space_.FromSpaceHigh();
4085 a += kPointerSize) {
4086 Memory::Address_at(a) = kFromSpaceZapValue;
4087 }
4088}
4089#endif // DEBUG
4090
4091
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004092bool Heap::IteratePointersInDirtyRegion(Address start,
4093 Address end,
4094 ObjectSlotCallback copy_object_func) {
4095 Address slot_address = start;
4096 bool pointers_to_new_space_found = false;
Steve Blocka7e24c12009-10-30 11:49:00 +00004097
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004098 while (slot_address < end) {
4099 Object** slot = reinterpret_cast<Object**>(slot_address);
4100 if (Heap::InNewSpace(*slot)) {
4101 ASSERT((*slot)->IsHeapObject());
4102 copy_object_func(reinterpret_cast<HeapObject**>(slot));
4103 if (Heap::InNewSpace(*slot)) {
4104 ASSERT((*slot)->IsHeapObject());
4105 pointers_to_new_space_found = true;
Steve Blocka7e24c12009-10-30 11:49:00 +00004106 }
Steve Blocka7e24c12009-10-30 11:49:00 +00004107 }
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004108 slot_address += kPointerSize;
Steve Blocka7e24c12009-10-30 11:49:00 +00004109 }
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004110 return pointers_to_new_space_found;
Steve Blocka7e24c12009-10-30 11:49:00 +00004111}
4112
4113
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004114// Compute start address of the first map following given addr.
4115static inline Address MapStartAlign(Address addr) {
4116 Address page = Page::FromAddress(addr)->ObjectAreaStart();
4117 return page + (((addr - page) + (Map::kSize - 1)) / Map::kSize * Map::kSize);
4118}
Steve Blocka7e24c12009-10-30 11:49:00 +00004119
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004120
4121// Compute end address of the first map preceding given addr.
4122static inline Address MapEndAlign(Address addr) {
4123 Address page = Page::FromAllocationTop(addr)->ObjectAreaStart();
4124 return page + ((addr - page) / Map::kSize * Map::kSize);
4125}
4126
4127
4128static bool IteratePointersInDirtyMaps(Address start,
4129 Address end,
4130 ObjectSlotCallback copy_object_func) {
4131 ASSERT(MapStartAlign(start) == start);
4132 ASSERT(MapEndAlign(end) == end);
4133
4134 Address map_address = start;
4135 bool pointers_to_new_space_found = false;
4136
4137 while (map_address < end) {
4138 ASSERT(!Heap::InNewSpace(Memory::Object_at(map_address)));
4139 ASSERT(Memory::Object_at(map_address)->IsMap());
4140
4141 Address pointer_fields_start = map_address + Map::kPointerFieldsBeginOffset;
4142 Address pointer_fields_end = map_address + Map::kPointerFieldsEndOffset;
4143
4144 if (Heap::IteratePointersInDirtyRegion(pointer_fields_start,
4145 pointer_fields_end,
4146 copy_object_func)) {
4147 pointers_to_new_space_found = true;
4148 }
4149
4150 map_address += Map::kSize;
4151 }
4152
4153 return pointers_to_new_space_found;
4154}
4155
4156
4157bool Heap::IteratePointersInDirtyMapsRegion(
4158 Address start,
4159 Address end,
4160 ObjectSlotCallback copy_object_func) {
4161 Address map_aligned_start = MapStartAlign(start);
4162 Address map_aligned_end = MapEndAlign(end);
4163
4164 bool contains_pointers_to_new_space = false;
4165
4166 if (map_aligned_start != start) {
4167 Address prev_map = map_aligned_start - Map::kSize;
4168 ASSERT(Memory::Object_at(prev_map)->IsMap());
4169
4170 Address pointer_fields_start =
4171 Max(start, prev_map + Map::kPointerFieldsBeginOffset);
4172
4173 Address pointer_fields_end =
Ben Murdoch3bec4d22010-07-22 14:51:16 +01004174 Min(prev_map + Map::kPointerFieldsEndOffset, end);
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004175
4176 contains_pointers_to_new_space =
4177 IteratePointersInDirtyRegion(pointer_fields_start,
4178 pointer_fields_end,
4179 copy_object_func)
4180 || contains_pointers_to_new_space;
4181 }
4182
4183 contains_pointers_to_new_space =
4184 IteratePointersInDirtyMaps(map_aligned_start,
4185 map_aligned_end,
4186 copy_object_func)
4187 || contains_pointers_to_new_space;
4188
4189 if (map_aligned_end != end) {
4190 ASSERT(Memory::Object_at(map_aligned_end)->IsMap());
4191
Ben Murdoch3bec4d22010-07-22 14:51:16 +01004192 Address pointer_fields_start =
4193 map_aligned_end + Map::kPointerFieldsBeginOffset;
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004194
4195 Address pointer_fields_end =
Ben Murdoch3bec4d22010-07-22 14:51:16 +01004196 Min(end, map_aligned_end + Map::kPointerFieldsEndOffset);
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004197
4198 contains_pointers_to_new_space =
4199 IteratePointersInDirtyRegion(pointer_fields_start,
4200 pointer_fields_end,
4201 copy_object_func)
4202 || contains_pointers_to_new_space;
4203 }
4204
4205 return contains_pointers_to_new_space;
4206}
4207
4208
Ben Murdoch3bec4d22010-07-22 14:51:16 +01004209void Heap::IterateAndMarkPointersToFromSpace(Address start,
4210 Address end,
4211 ObjectSlotCallback callback) {
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004212 Address slot_address = start;
4213 Page* page = Page::FromAddress(start);
4214
4215 uint32_t marks = page->GetRegionMarks();
4216
4217 while (slot_address < end) {
4218 Object** slot = reinterpret_cast<Object**>(slot_address);
Ben Murdoch3bec4d22010-07-22 14:51:16 +01004219 if (Heap::InFromSpace(*slot)) {
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004220 ASSERT((*slot)->IsHeapObject());
4221 callback(reinterpret_cast<HeapObject**>(slot));
4222 if (Heap::InNewSpace(*slot)) {
4223 ASSERT((*slot)->IsHeapObject());
4224 marks |= page->GetRegionMaskForAddress(slot_address);
4225 }
4226 }
4227 slot_address += kPointerSize;
4228 }
4229
4230 page->SetRegionMarks(marks);
4231}
4232
4233
4234uint32_t Heap::IterateDirtyRegions(
4235 uint32_t marks,
4236 Address area_start,
4237 Address area_end,
4238 DirtyRegionCallback visit_dirty_region,
4239 ObjectSlotCallback copy_object_func) {
4240 uint32_t newmarks = 0;
4241 uint32_t mask = 1;
4242
4243 if (area_start >= area_end) {
4244 return newmarks;
4245 }
4246
4247 Address region_start = area_start;
4248
4249 // area_start does not necessarily coincide with start of the first region.
4250 // Thus to calculate the beginning of the next region we have to align
4251 // area_start by Page::kRegionSize.
4252 Address second_region =
4253 reinterpret_cast<Address>(
4254 reinterpret_cast<intptr_t>(area_start + Page::kRegionSize) &
4255 ~Page::kRegionAlignmentMask);
4256
4257 // Next region might be beyond area_end.
4258 Address region_end = Min(second_region, area_end);
4259
4260 if (marks & mask) {
4261 if (visit_dirty_region(region_start, region_end, copy_object_func)) {
4262 newmarks |= mask;
4263 }
4264 }
4265 mask <<= 1;
4266
4267 // Iterate subsequent regions which fully lay inside [area_start, area_end[.
4268 region_start = region_end;
4269 region_end = region_start + Page::kRegionSize;
4270
4271 while (region_end <= area_end) {
4272 if (marks & mask) {
4273 if (visit_dirty_region(region_start, region_end, copy_object_func)) {
4274 newmarks |= mask;
4275 }
4276 }
4277
4278 region_start = region_end;
4279 region_end = region_start + Page::kRegionSize;
4280
4281 mask <<= 1;
4282 }
4283
4284 if (region_start != area_end) {
4285 // A small piece of area left uniterated because area_end does not coincide
4286 // with region end. Check whether region covering last part of area is
4287 // dirty.
4288 if (marks & mask) {
4289 if (visit_dirty_region(region_start, area_end, copy_object_func)) {
4290 newmarks |= mask;
4291 }
4292 }
4293 }
4294
4295 return newmarks;
4296}
4297
4298
4299
4300void Heap::IterateDirtyRegions(
4301 PagedSpace* space,
4302 DirtyRegionCallback visit_dirty_region,
4303 ObjectSlotCallback copy_object_func,
4304 ExpectedPageWatermarkState expected_page_watermark_state) {
Steve Blocka7e24c12009-10-30 11:49:00 +00004305
4306 PageIterator it(space, PageIterator::PAGES_IN_USE);
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004307
Steve Blocka7e24c12009-10-30 11:49:00 +00004308 while (it.has_next()) {
4309 Page* page = it.next();
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004310 uint32_t marks = page->GetRegionMarks();
4311
4312 if (marks != Page::kAllRegionsCleanMarks) {
4313 Address start = page->ObjectAreaStart();
4314
4315 // Do not try to visit pointers beyond page allocation watermark.
4316 // Page can contain garbage pointers there.
4317 Address end;
4318
4319 if ((expected_page_watermark_state == WATERMARK_SHOULD_BE_VALID) ||
4320 page->IsWatermarkValid()) {
4321 end = page->AllocationWatermark();
4322 } else {
4323 end = page->CachedAllocationWatermark();
4324 }
4325
4326 ASSERT(space == old_pointer_space_ ||
4327 (space == map_space_ &&
4328 ((page->ObjectAreaStart() - end) % Map::kSize == 0)));
4329
4330 page->SetRegionMarks(IterateDirtyRegions(marks,
4331 start,
4332 end,
4333 visit_dirty_region,
4334 copy_object_func));
Steve Blocka7e24c12009-10-30 11:49:00 +00004335 }
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004336
4337 // Mark page watermark as invalid to maintain watermark validity invariant.
4338 // See Page::FlipMeaningOfInvalidatedWatermarkFlag() for details.
4339 page->InvalidateWatermark(true);
Steve Blocka7e24c12009-10-30 11:49:00 +00004340 }
4341}
4342
4343
Steve Blockd0582a62009-12-15 09:54:21 +00004344void Heap::IterateRoots(ObjectVisitor* v, VisitMode mode) {
4345 IterateStrongRoots(v, mode);
Leon Clarked91b9f72010-01-27 17:25:45 +00004346 IterateWeakRoots(v, mode);
4347}
4348
4349
4350void Heap::IterateWeakRoots(ObjectVisitor* v, VisitMode mode) {
Steve Blocka7e24c12009-10-30 11:49:00 +00004351 v->VisitPointer(reinterpret_cast<Object**>(&roots_[kSymbolTableRootIndex]));
Steve Blockd0582a62009-12-15 09:54:21 +00004352 v->Synchronize("symbol_table");
Leon Clarkee46be812010-01-19 14:06:41 +00004353 if (mode != VISIT_ALL_IN_SCAVENGE) {
4354 // Scavenge collections have special processing for this.
4355 ExternalStringTable::Iterate(v);
4356 }
4357 v->Synchronize("external_string_table");
Steve Blocka7e24c12009-10-30 11:49:00 +00004358}
4359
4360
Steve Blockd0582a62009-12-15 09:54:21 +00004361void Heap::IterateStrongRoots(ObjectVisitor* v, VisitMode mode) {
Steve Blocka7e24c12009-10-30 11:49:00 +00004362 v->VisitPointers(&roots_[0], &roots_[kStrongRootListLength]);
Steve Blockd0582a62009-12-15 09:54:21 +00004363 v->Synchronize("strong_root_list");
Steve Blocka7e24c12009-10-30 11:49:00 +00004364
Iain Merrick75681382010-08-19 15:07:18 +01004365 v->VisitPointer(BitCast<Object**>(&hidden_symbol_));
Steve Blockd0582a62009-12-15 09:54:21 +00004366 v->Synchronize("symbol");
Steve Blocka7e24c12009-10-30 11:49:00 +00004367
4368 Bootstrapper::Iterate(v);
Steve Blockd0582a62009-12-15 09:54:21 +00004369 v->Synchronize("bootstrapper");
Steve Blocka7e24c12009-10-30 11:49:00 +00004370 Top::Iterate(v);
Steve Blockd0582a62009-12-15 09:54:21 +00004371 v->Synchronize("top");
Steve Blocka7e24c12009-10-30 11:49:00 +00004372 Relocatable::Iterate(v);
Steve Blockd0582a62009-12-15 09:54:21 +00004373 v->Synchronize("relocatable");
Steve Blocka7e24c12009-10-30 11:49:00 +00004374
4375#ifdef ENABLE_DEBUGGER_SUPPORT
4376 Debug::Iterate(v);
4377#endif
Steve Blockd0582a62009-12-15 09:54:21 +00004378 v->Synchronize("debug");
Steve Blocka7e24c12009-10-30 11:49:00 +00004379 CompilationCache::Iterate(v);
Steve Blockd0582a62009-12-15 09:54:21 +00004380 v->Synchronize("compilationcache");
Steve Blocka7e24c12009-10-30 11:49:00 +00004381
4382 // Iterate over local handles in handle scopes.
4383 HandleScopeImplementer::Iterate(v);
Steve Blockd0582a62009-12-15 09:54:21 +00004384 v->Synchronize("handlescope");
Steve Blocka7e24c12009-10-30 11:49:00 +00004385
Leon Clarkee46be812010-01-19 14:06:41 +00004386 // Iterate over the builtin code objects and code stubs in the
4387 // heap. Note that it is not necessary to iterate over code objects
4388 // on scavenge collections.
4389 if (mode != VISIT_ALL_IN_SCAVENGE) {
4390 Builtins::IterateBuiltins(v);
4391 }
Steve Blockd0582a62009-12-15 09:54:21 +00004392 v->Synchronize("builtins");
Steve Blocka7e24c12009-10-30 11:49:00 +00004393
4394 // Iterate over global handles.
Steve Blockd0582a62009-12-15 09:54:21 +00004395 if (mode == VISIT_ONLY_STRONG) {
4396 GlobalHandles::IterateStrongRoots(v);
4397 } else {
4398 GlobalHandles::IterateAllRoots(v);
4399 }
4400 v->Synchronize("globalhandles");
Steve Blocka7e24c12009-10-30 11:49:00 +00004401
4402 // Iterate over pointers being held by inactive threads.
4403 ThreadManager::Iterate(v);
Steve Blockd0582a62009-12-15 09:54:21 +00004404 v->Synchronize("threadmanager");
Leon Clarked91b9f72010-01-27 17:25:45 +00004405
4406 // Iterate over the pointers the Serialization/Deserialization code is
4407 // holding.
4408 // During garbage collection this keeps the partial snapshot cache alive.
4409 // During deserialization of the startup snapshot this creates the partial
4410 // snapshot cache and deserializes the objects it refers to. During
4411 // serialization this does nothing, since the partial snapshot cache is
4412 // empty. However the next thing we do is create the partial snapshot,
4413 // filling up the partial snapshot cache with objects it needs as we go.
4414 SerializerDeserializer::Iterate(v);
4415 // We don't do a v->Synchronize call here, because in debug mode that will
4416 // output a flag to the snapshot. However at this point the serializer and
4417 // deserializer are deliberately a little unsynchronized (see above) so the
4418 // checking of the sync flag in the snapshot would fail.
Steve Blocka7e24c12009-10-30 11:49:00 +00004419}
Steve Blocka7e24c12009-10-30 11:49:00 +00004420
4421
4422// Flag is set when the heap has been configured. The heap can be repeatedly
4423// configured through the API until it is setup.
4424static bool heap_configured = false;
4425
4426// TODO(1236194): Since the heap size is configurable on the command line
4427// and through the API, we should gracefully handle the case that the heap
4428// size is not big enough to fit all the initial objects.
Russell Brenner90bac252010-11-18 13:33:46 -08004429bool Heap::ConfigureHeap(int max_semispace_size,
4430 int max_old_gen_size,
4431 int max_executable_size) {
Steve Blocka7e24c12009-10-30 11:49:00 +00004432 if (HasBeenSetup()) return false;
4433
Steve Block3ce2e202009-11-05 08:53:23 +00004434 if (max_semispace_size > 0) max_semispace_size_ = max_semispace_size;
4435
4436 if (Snapshot::IsEnabled()) {
4437 // If we are using a snapshot we always reserve the default amount
4438 // of memory for each semispace because code in the snapshot has
4439 // write-barrier code that relies on the size and alignment of new
4440 // space. We therefore cannot use a larger max semispace size
4441 // than the default reserved semispace size.
4442 if (max_semispace_size_ > reserved_semispace_size_) {
4443 max_semispace_size_ = reserved_semispace_size_;
4444 }
4445 } else {
4446 // If we are not using snapshots we reserve space for the actual
4447 // max semispace size.
4448 reserved_semispace_size_ = max_semispace_size_;
4449 }
4450
4451 if (max_old_gen_size > 0) max_old_generation_size_ = max_old_gen_size;
Russell Brenner90bac252010-11-18 13:33:46 -08004452 if (max_executable_size > 0) {
4453 max_executable_size_ = RoundUp(max_executable_size, Page::kPageSize);
4454 }
4455
4456 // The max executable size must be less than or equal to the max old
4457 // generation size.
4458 if (max_executable_size_ > max_old_generation_size_) {
4459 max_executable_size_ = max_old_generation_size_;
4460 }
Steve Blocka7e24c12009-10-30 11:49:00 +00004461
4462 // The new space size must be a power of two to support single-bit testing
4463 // for containment.
Steve Block3ce2e202009-11-05 08:53:23 +00004464 max_semispace_size_ = RoundUpToPowerOf2(max_semispace_size_);
4465 reserved_semispace_size_ = RoundUpToPowerOf2(reserved_semispace_size_);
4466 initial_semispace_size_ = Min(initial_semispace_size_, max_semispace_size_);
4467 external_allocation_limit_ = 10 * max_semispace_size_;
Steve Blocka7e24c12009-10-30 11:49:00 +00004468
4469 // The old generation is paged.
Steve Block3ce2e202009-11-05 08:53:23 +00004470 max_old_generation_size_ = RoundUp(max_old_generation_size_, Page::kPageSize);
Steve Blocka7e24c12009-10-30 11:49:00 +00004471
4472 heap_configured = true;
4473 return true;
4474}
4475
4476
4477bool Heap::ConfigureHeapDefault() {
Russell Brenner90bac252010-11-18 13:33:46 -08004478 return ConfigureHeap(FLAG_max_new_space_size / 2 * KB,
4479 FLAG_max_old_space_size * MB,
4480 FLAG_max_executable_size * MB);
Steve Blocka7e24c12009-10-30 11:49:00 +00004481}
4482
4483
Ben Murdochbb769b22010-08-11 14:56:33 +01004484void Heap::RecordStats(HeapStats* stats, bool take_snapshot) {
Iain Merrick75681382010-08-19 15:07:18 +01004485 *stats->start_marker = HeapStats::kStartMarker;
4486 *stats->end_marker = HeapStats::kEndMarker;
Ben Murdochf87a2032010-10-22 12:50:53 +01004487 *stats->new_space_size = new_space_.SizeAsInt();
4488 *stats->new_space_capacity = static_cast<int>(new_space_.Capacity());
Steve Blockd0582a62009-12-15 09:54:21 +00004489 *stats->old_pointer_space_size = old_pointer_space_->Size();
4490 *stats->old_pointer_space_capacity = old_pointer_space_->Capacity();
4491 *stats->old_data_space_size = old_data_space_->Size();
4492 *stats->old_data_space_capacity = old_data_space_->Capacity();
4493 *stats->code_space_size = code_space_->Size();
4494 *stats->code_space_capacity = code_space_->Capacity();
4495 *stats->map_space_size = map_space_->Size();
4496 *stats->map_space_capacity = map_space_->Capacity();
4497 *stats->cell_space_size = cell_space_->Size();
4498 *stats->cell_space_capacity = cell_space_->Capacity();
4499 *stats->lo_space_size = lo_space_->Size();
4500 GlobalHandles::RecordStats(stats);
Ben Murdochbb769b22010-08-11 14:56:33 +01004501 *stats->memory_allocator_size = MemoryAllocator::Size();
4502 *stats->memory_allocator_capacity =
4503 MemoryAllocator::Size() + MemoryAllocator::Available();
Iain Merrick75681382010-08-19 15:07:18 +01004504 *stats->os_error = OS::GetLastError();
Ben Murdochbb769b22010-08-11 14:56:33 +01004505 if (take_snapshot) {
Ben Murdochb0fe1622011-05-05 13:52:32 +01004506 HeapIterator iterator(HeapIterator::kFilterFreeListNodes);
Ben Murdochbb769b22010-08-11 14:56:33 +01004507 for (HeapObject* obj = iterator.next();
4508 obj != NULL;
4509 obj = iterator.next()) {
Ben Murdochbb769b22010-08-11 14:56:33 +01004510 InstanceType type = obj->map()->instance_type();
4511 ASSERT(0 <= type && type <= LAST_TYPE);
4512 stats->objects_per_type[type]++;
4513 stats->size_per_type[type] += obj->Size();
4514 }
4515 }
Steve Blockd0582a62009-12-15 09:54:21 +00004516}
4517
4518
Ben Murdochf87a2032010-10-22 12:50:53 +01004519intptr_t Heap::PromotedSpaceSize() {
Steve Blocka7e24c12009-10-30 11:49:00 +00004520 return old_pointer_space_->Size()
4521 + old_data_space_->Size()
4522 + code_space_->Size()
4523 + map_space_->Size()
4524 + cell_space_->Size()
4525 + lo_space_->Size();
4526}
4527
4528
4529int Heap::PromotedExternalMemorySize() {
4530 if (amount_of_external_allocated_memory_
4531 <= amount_of_external_allocated_memory_at_last_global_gc_) return 0;
4532 return amount_of_external_allocated_memory_
4533 - amount_of_external_allocated_memory_at_last_global_gc_;
4534}
4535
4536
4537bool Heap::Setup(bool create_heap_objects) {
4538 // Initialize heap spaces and initial maps and objects. Whenever something
4539 // goes wrong, just return false. The caller should check the results and
4540 // call Heap::TearDown() to release allocated memory.
4541 //
4542 // If the heap is not yet configured (eg, through the API), configure it.
4543 // Configuration is based on the flags new-space-size (really the semispace
4544 // size) and old-space-size if set or the initial values of semispace_size_
4545 // and old_generation_size_ otherwise.
4546 if (!heap_configured) {
4547 if (!ConfigureHeapDefault()) return false;
4548 }
4549
Iain Merrick75681382010-08-19 15:07:18 +01004550 ScavengingVisitor::Initialize();
4551 NewSpaceScavenger::Initialize();
4552 MarkCompactCollector::Initialize();
4553
Kristian Monsen80d68ea2010-09-08 11:05:35 +01004554 MarkMapPointersAsEncoded(false);
4555
Steve Blocka7e24c12009-10-30 11:49:00 +00004556 // Setup memory allocator and reserve a chunk of memory for new
Steve Block3ce2e202009-11-05 08:53:23 +00004557 // space. The chunk is double the size of the requested reserved
4558 // new space size to ensure that we can find a pair of semispaces that
4559 // are contiguous and aligned to their size.
Russell Brenner90bac252010-11-18 13:33:46 -08004560 if (!MemoryAllocator::Setup(MaxReserved(), MaxExecutableSize())) return false;
Steve Blocka7e24c12009-10-30 11:49:00 +00004561 void* chunk =
Steve Block3ce2e202009-11-05 08:53:23 +00004562 MemoryAllocator::ReserveInitialChunk(4 * reserved_semispace_size_);
Steve Blocka7e24c12009-10-30 11:49:00 +00004563 if (chunk == NULL) return false;
4564
4565 // Align the pair of semispaces to their size, which must be a power
4566 // of 2.
Steve Blocka7e24c12009-10-30 11:49:00 +00004567 Address new_space_start =
Steve Block3ce2e202009-11-05 08:53:23 +00004568 RoundUp(reinterpret_cast<byte*>(chunk), 2 * reserved_semispace_size_);
4569 if (!new_space_.Setup(new_space_start, 2 * reserved_semispace_size_)) {
4570 return false;
4571 }
Steve Blocka7e24c12009-10-30 11:49:00 +00004572
4573 // Initialize old pointer space.
4574 old_pointer_space_ =
Steve Block3ce2e202009-11-05 08:53:23 +00004575 new OldSpace(max_old_generation_size_, OLD_POINTER_SPACE, NOT_EXECUTABLE);
Steve Blocka7e24c12009-10-30 11:49:00 +00004576 if (old_pointer_space_ == NULL) return false;
4577 if (!old_pointer_space_->Setup(NULL, 0)) return false;
4578
4579 // Initialize old data space.
4580 old_data_space_ =
Steve Block3ce2e202009-11-05 08:53:23 +00004581 new OldSpace(max_old_generation_size_, OLD_DATA_SPACE, NOT_EXECUTABLE);
Steve Blocka7e24c12009-10-30 11:49:00 +00004582 if (old_data_space_ == NULL) return false;
4583 if (!old_data_space_->Setup(NULL, 0)) return false;
4584
4585 // Initialize the code space, set its maximum capacity to the old
4586 // generation size. It needs executable memory.
4587 // On 64-bit platform(s), we put all code objects in a 2 GB range of
4588 // virtual address space, so that they can call each other with near calls.
4589 if (code_range_size_ > 0) {
4590 if (!CodeRange::Setup(code_range_size_)) {
4591 return false;
4592 }
4593 }
4594
4595 code_space_ =
Steve Block3ce2e202009-11-05 08:53:23 +00004596 new OldSpace(max_old_generation_size_, CODE_SPACE, EXECUTABLE);
Steve Blocka7e24c12009-10-30 11:49:00 +00004597 if (code_space_ == NULL) return false;
4598 if (!code_space_->Setup(NULL, 0)) return false;
4599
4600 // Initialize map space.
Leon Clarkee46be812010-01-19 14:06:41 +00004601 map_space_ = new MapSpace(FLAG_use_big_map_space
4602 ? max_old_generation_size_
Leon Clarked91b9f72010-01-27 17:25:45 +00004603 : MapSpace::kMaxMapPageIndex * Page::kPageSize,
4604 FLAG_max_map_space_pages,
Leon Clarkee46be812010-01-19 14:06:41 +00004605 MAP_SPACE);
Steve Blocka7e24c12009-10-30 11:49:00 +00004606 if (map_space_ == NULL) return false;
4607 if (!map_space_->Setup(NULL, 0)) return false;
4608
4609 // Initialize global property cell space.
Steve Block3ce2e202009-11-05 08:53:23 +00004610 cell_space_ = new CellSpace(max_old_generation_size_, CELL_SPACE);
Steve Blocka7e24c12009-10-30 11:49:00 +00004611 if (cell_space_ == NULL) return false;
4612 if (!cell_space_->Setup(NULL, 0)) return false;
4613
4614 // The large object code space may contain code or data. We set the memory
4615 // to be non-executable here for safety, but this means we need to enable it
4616 // explicitly when allocating large code objects.
4617 lo_space_ = new LargeObjectSpace(LO_SPACE);
4618 if (lo_space_ == NULL) return false;
4619 if (!lo_space_->Setup()) return false;
4620
4621 if (create_heap_objects) {
4622 // Create initial maps.
4623 if (!CreateInitialMaps()) return false;
4624 if (!CreateApiObjects()) return false;
4625
4626 // Create initial objects
4627 if (!CreateInitialObjects()) return false;
Ben Murdochf87a2032010-10-22 12:50:53 +01004628
4629 global_contexts_list_ = undefined_value();
Steve Blocka7e24c12009-10-30 11:49:00 +00004630 }
4631
Ben Murdochf87a2032010-10-22 12:50:53 +01004632 LOG(IntPtrTEvent("heap-capacity", Capacity()));
4633 LOG(IntPtrTEvent("heap-available", Available()));
Steve Blocka7e24c12009-10-30 11:49:00 +00004634
Steve Block3ce2e202009-11-05 08:53:23 +00004635#ifdef ENABLE_LOGGING_AND_PROFILING
4636 // This should be called only after initial objects have been created.
4637 ProducerHeapProfile::Setup();
4638#endif
4639
Steve Blocka7e24c12009-10-30 11:49:00 +00004640 return true;
4641}
4642
4643
Steve Blockd0582a62009-12-15 09:54:21 +00004644void Heap::SetStackLimits() {
Steve Blocka7e24c12009-10-30 11:49:00 +00004645 // On 64 bit machines, pointers are generally out of range of Smis. We write
4646 // something that looks like an out of range Smi to the GC.
4647
Steve Blockd0582a62009-12-15 09:54:21 +00004648 // Set up the special root array entries containing the stack limits.
4649 // These are actually addresses, but the tag makes the GC ignore it.
Steve Blocka7e24c12009-10-30 11:49:00 +00004650 roots_[kStackLimitRootIndex] =
Steve Blockd0582a62009-12-15 09:54:21 +00004651 reinterpret_cast<Object*>(
4652 (StackGuard::jslimit() & ~kSmiTagMask) | kSmiTag);
4653 roots_[kRealStackLimitRootIndex] =
4654 reinterpret_cast<Object*>(
4655 (StackGuard::real_jslimit() & ~kSmiTagMask) | kSmiTag);
Steve Blocka7e24c12009-10-30 11:49:00 +00004656}
4657
4658
4659void Heap::TearDown() {
Leon Clarkef7060e22010-06-03 12:02:55 +01004660 if (FLAG_print_cumulative_gc_stat) {
4661 PrintF("\n\n");
4662 PrintF("gc_count=%d ", gc_count_);
4663 PrintF("mark_sweep_count=%d ", ms_count_);
4664 PrintF("mark_compact_count=%d ", mc_count_);
4665 PrintF("max_gc_pause=%d ", GCTracer::get_max_gc_pause());
4666 PrintF("min_in_mutator=%d ", GCTracer::get_min_in_mutator());
Ben Murdochf87a2032010-10-22 12:50:53 +01004667 PrintF("max_alive_after_gc=%" V8_PTR_PREFIX "d ",
4668 GCTracer::get_max_alive_after_gc());
Leon Clarkef7060e22010-06-03 12:02:55 +01004669 PrintF("\n\n");
4670 }
4671
Steve Blocka7e24c12009-10-30 11:49:00 +00004672 GlobalHandles::TearDown();
4673
Leon Clarkee46be812010-01-19 14:06:41 +00004674 ExternalStringTable::TearDown();
4675
Steve Blocka7e24c12009-10-30 11:49:00 +00004676 new_space_.TearDown();
4677
4678 if (old_pointer_space_ != NULL) {
4679 old_pointer_space_->TearDown();
4680 delete old_pointer_space_;
4681 old_pointer_space_ = NULL;
4682 }
4683
4684 if (old_data_space_ != NULL) {
4685 old_data_space_->TearDown();
4686 delete old_data_space_;
4687 old_data_space_ = NULL;
4688 }
4689
4690 if (code_space_ != NULL) {
4691 code_space_->TearDown();
4692 delete code_space_;
4693 code_space_ = NULL;
4694 }
4695
4696 if (map_space_ != NULL) {
4697 map_space_->TearDown();
4698 delete map_space_;
4699 map_space_ = NULL;
4700 }
4701
4702 if (cell_space_ != NULL) {
4703 cell_space_->TearDown();
4704 delete cell_space_;
4705 cell_space_ = NULL;
4706 }
4707
4708 if (lo_space_ != NULL) {
4709 lo_space_->TearDown();
4710 delete lo_space_;
4711 lo_space_ = NULL;
4712 }
4713
4714 MemoryAllocator::TearDown();
4715}
4716
4717
4718void Heap::Shrink() {
4719 // Try to shrink all paged spaces.
4720 PagedSpaces spaces;
Leon Clarked91b9f72010-01-27 17:25:45 +00004721 for (PagedSpace* space = spaces.next(); space != NULL; space = spaces.next())
4722 space->Shrink();
Steve Blocka7e24c12009-10-30 11:49:00 +00004723}
4724
4725
4726#ifdef ENABLE_HEAP_PROTECTION
4727
4728void Heap::Protect() {
4729 if (HasBeenSetup()) {
4730 AllSpaces spaces;
Leon Clarked91b9f72010-01-27 17:25:45 +00004731 for (Space* space = spaces.next(); space != NULL; space = spaces.next())
4732 space->Protect();
Steve Blocka7e24c12009-10-30 11:49:00 +00004733 }
4734}
4735
4736
4737void Heap::Unprotect() {
4738 if (HasBeenSetup()) {
4739 AllSpaces spaces;
Leon Clarked91b9f72010-01-27 17:25:45 +00004740 for (Space* space = spaces.next(); space != NULL; space = spaces.next())
4741 space->Unprotect();
Steve Blocka7e24c12009-10-30 11:49:00 +00004742 }
4743}
4744
4745#endif
4746
4747
Steve Block6ded16b2010-05-10 14:33:55 +01004748void Heap::AddGCPrologueCallback(GCPrologueCallback callback, GCType gc_type) {
4749 ASSERT(callback != NULL);
4750 GCPrologueCallbackPair pair(callback, gc_type);
4751 ASSERT(!gc_prologue_callbacks_.Contains(pair));
4752 return gc_prologue_callbacks_.Add(pair);
4753}
4754
4755
4756void Heap::RemoveGCPrologueCallback(GCPrologueCallback callback) {
4757 ASSERT(callback != NULL);
4758 for (int i = 0; i < gc_prologue_callbacks_.length(); ++i) {
4759 if (gc_prologue_callbacks_[i].callback == callback) {
4760 gc_prologue_callbacks_.Remove(i);
4761 return;
4762 }
4763 }
4764 UNREACHABLE();
4765}
4766
4767
4768void Heap::AddGCEpilogueCallback(GCEpilogueCallback callback, GCType gc_type) {
4769 ASSERT(callback != NULL);
4770 GCEpilogueCallbackPair pair(callback, gc_type);
4771 ASSERT(!gc_epilogue_callbacks_.Contains(pair));
4772 return gc_epilogue_callbacks_.Add(pair);
4773}
4774
4775
4776void Heap::RemoveGCEpilogueCallback(GCEpilogueCallback callback) {
4777 ASSERT(callback != NULL);
4778 for (int i = 0; i < gc_epilogue_callbacks_.length(); ++i) {
4779 if (gc_epilogue_callbacks_[i].callback == callback) {
4780 gc_epilogue_callbacks_.Remove(i);
4781 return;
4782 }
4783 }
4784 UNREACHABLE();
4785}
4786
4787
Steve Blocka7e24c12009-10-30 11:49:00 +00004788#ifdef DEBUG
4789
4790class PrintHandleVisitor: public ObjectVisitor {
4791 public:
4792 void VisitPointers(Object** start, Object** end) {
4793 for (Object** p = start; p < end; p++)
Ben Murdochf87a2032010-10-22 12:50:53 +01004794 PrintF(" handle %p to %p\n",
4795 reinterpret_cast<void*>(p),
4796 reinterpret_cast<void*>(*p));
Steve Blocka7e24c12009-10-30 11:49:00 +00004797 }
4798};
4799
4800void Heap::PrintHandles() {
4801 PrintF("Handles:\n");
4802 PrintHandleVisitor v;
4803 HandleScopeImplementer::Iterate(&v);
4804}
4805
4806#endif
4807
4808
4809Space* AllSpaces::next() {
4810 switch (counter_++) {
4811 case NEW_SPACE:
4812 return Heap::new_space();
4813 case OLD_POINTER_SPACE:
4814 return Heap::old_pointer_space();
4815 case OLD_DATA_SPACE:
4816 return Heap::old_data_space();
4817 case CODE_SPACE:
4818 return Heap::code_space();
4819 case MAP_SPACE:
4820 return Heap::map_space();
4821 case CELL_SPACE:
4822 return Heap::cell_space();
4823 case LO_SPACE:
4824 return Heap::lo_space();
4825 default:
4826 return NULL;
4827 }
4828}
4829
4830
4831PagedSpace* PagedSpaces::next() {
4832 switch (counter_++) {
4833 case OLD_POINTER_SPACE:
4834 return Heap::old_pointer_space();
4835 case OLD_DATA_SPACE:
4836 return Heap::old_data_space();
4837 case CODE_SPACE:
4838 return Heap::code_space();
4839 case MAP_SPACE:
4840 return Heap::map_space();
4841 case CELL_SPACE:
4842 return Heap::cell_space();
4843 default:
4844 return NULL;
4845 }
4846}
4847
4848
4849
4850OldSpace* OldSpaces::next() {
4851 switch (counter_++) {
4852 case OLD_POINTER_SPACE:
4853 return Heap::old_pointer_space();
4854 case OLD_DATA_SPACE:
4855 return Heap::old_data_space();
4856 case CODE_SPACE:
4857 return Heap::code_space();
4858 default:
4859 return NULL;
4860 }
4861}
4862
4863
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -08004864SpaceIterator::SpaceIterator()
4865 : current_space_(FIRST_SPACE),
4866 iterator_(NULL),
4867 size_func_(NULL) {
4868}
4869
4870
4871SpaceIterator::SpaceIterator(HeapObjectCallback size_func)
4872 : current_space_(FIRST_SPACE),
4873 iterator_(NULL),
4874 size_func_(size_func) {
Steve Blocka7e24c12009-10-30 11:49:00 +00004875}
4876
4877
4878SpaceIterator::~SpaceIterator() {
4879 // Delete active iterator if any.
4880 delete iterator_;
4881}
4882
4883
4884bool SpaceIterator::has_next() {
4885 // Iterate until no more spaces.
4886 return current_space_ != LAST_SPACE;
4887}
4888
4889
4890ObjectIterator* SpaceIterator::next() {
4891 if (iterator_ != NULL) {
4892 delete iterator_;
4893 iterator_ = NULL;
4894 // Move to the next space
4895 current_space_++;
4896 if (current_space_ > LAST_SPACE) {
4897 return NULL;
4898 }
4899 }
4900
4901 // Return iterator for the new current space.
4902 return CreateIterator();
4903}
4904
4905
4906// Create an iterator for the space to iterate.
4907ObjectIterator* SpaceIterator::CreateIterator() {
4908 ASSERT(iterator_ == NULL);
4909
4910 switch (current_space_) {
4911 case NEW_SPACE:
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -08004912 iterator_ = new SemiSpaceIterator(Heap::new_space(), size_func_);
Steve Blocka7e24c12009-10-30 11:49:00 +00004913 break;
4914 case OLD_POINTER_SPACE:
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -08004915 iterator_ = new HeapObjectIterator(Heap::old_pointer_space(), size_func_);
Steve Blocka7e24c12009-10-30 11:49:00 +00004916 break;
4917 case OLD_DATA_SPACE:
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -08004918 iterator_ = new HeapObjectIterator(Heap::old_data_space(), size_func_);
Steve Blocka7e24c12009-10-30 11:49:00 +00004919 break;
4920 case CODE_SPACE:
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -08004921 iterator_ = new HeapObjectIterator(Heap::code_space(), size_func_);
Steve Blocka7e24c12009-10-30 11:49:00 +00004922 break;
4923 case MAP_SPACE:
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -08004924 iterator_ = new HeapObjectIterator(Heap::map_space(), size_func_);
Steve Blocka7e24c12009-10-30 11:49:00 +00004925 break;
4926 case CELL_SPACE:
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -08004927 iterator_ = new HeapObjectIterator(Heap::cell_space(), size_func_);
Steve Blocka7e24c12009-10-30 11:49:00 +00004928 break;
4929 case LO_SPACE:
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -08004930 iterator_ = new LargeObjectIterator(Heap::lo_space(), size_func_);
Steve Blocka7e24c12009-10-30 11:49:00 +00004931 break;
4932 }
4933
4934 // Return the newly allocated iterator;
4935 ASSERT(iterator_ != NULL);
4936 return iterator_;
4937}
4938
4939
Ben Murdochb0fe1622011-05-05 13:52:32 +01004940class HeapObjectsFilter {
4941 public:
4942 virtual ~HeapObjectsFilter() {}
4943 virtual bool SkipObject(HeapObject* object) = 0;
4944};
4945
4946
4947class FreeListNodesFilter : public HeapObjectsFilter {
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -08004948 public:
4949 FreeListNodesFilter() {
4950 MarkFreeListNodes();
4951 }
4952
Ben Murdochb0fe1622011-05-05 13:52:32 +01004953 bool SkipObject(HeapObject* object) {
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -08004954 if (object->IsMarked()) {
4955 object->ClearMark();
4956 return true;
4957 } else {
4958 return false;
4959 }
4960 }
4961
4962 private:
4963 void MarkFreeListNodes() {
4964 Heap::old_pointer_space()->MarkFreeListNodes();
4965 Heap::old_data_space()->MarkFreeListNodes();
4966 MarkCodeSpaceFreeListNodes();
4967 Heap::map_space()->MarkFreeListNodes();
4968 Heap::cell_space()->MarkFreeListNodes();
4969 }
4970
4971 void MarkCodeSpaceFreeListNodes() {
4972 // For code space, using FreeListNode::IsFreeListNode is OK.
4973 HeapObjectIterator iter(Heap::code_space());
4974 for (HeapObject* obj = iter.next_object();
4975 obj != NULL;
4976 obj = iter.next_object()) {
4977 if (FreeListNode::IsFreeListNode(obj)) obj->SetMark();
4978 }
4979 }
4980
4981 AssertNoAllocation no_alloc;
4982};
4983
4984
Ben Murdochb0fe1622011-05-05 13:52:32 +01004985class UnreachableObjectsFilter : public HeapObjectsFilter {
4986 public:
4987 UnreachableObjectsFilter() {
4988 MarkUnreachableObjects();
4989 }
4990
4991 bool SkipObject(HeapObject* object) {
4992 if (object->IsMarked()) {
4993 object->ClearMark();
4994 return true;
4995 } else {
4996 return false;
4997 }
4998 }
4999
5000 private:
5001 class UnmarkingVisitor : public ObjectVisitor {
5002 public:
5003 UnmarkingVisitor() : list_(10) {}
5004
5005 void VisitPointers(Object** start, Object** end) {
5006 for (Object** p = start; p < end; p++) {
5007 if (!(*p)->IsHeapObject()) continue;
5008 HeapObject* obj = HeapObject::cast(*p);
5009 if (obj->IsMarked()) {
5010 obj->ClearMark();
5011 list_.Add(obj);
5012 }
5013 }
5014 }
5015
5016 bool can_process() { return !list_.is_empty(); }
5017
5018 void ProcessNext() {
5019 HeapObject* obj = list_.RemoveLast();
5020 obj->Iterate(this);
5021 }
5022
5023 private:
5024 List<HeapObject*> list_;
5025 };
5026
5027 void MarkUnreachableObjects() {
5028 HeapIterator iterator;
5029 for (HeapObject* obj = iterator.next();
5030 obj != NULL;
5031 obj = iterator.next()) {
5032 obj->SetMark();
5033 }
5034 UnmarkingVisitor visitor;
Ben Murdochb8e0da22011-05-16 14:20:40 +01005035 Heap::IterateRoots(&visitor, VISIT_ALL);
Ben Murdochb0fe1622011-05-05 13:52:32 +01005036 while (visitor.can_process())
5037 visitor.ProcessNext();
5038 }
5039
5040 AssertNoAllocation no_alloc;
5041};
5042
5043
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -08005044HeapIterator::HeapIterator()
5045 : filtering_(HeapIterator::kNoFiltering),
5046 filter_(NULL) {
5047 Init();
5048}
5049
5050
Ben Murdochb0fe1622011-05-05 13:52:32 +01005051HeapIterator::HeapIterator(HeapIterator::HeapObjectsFiltering filtering)
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -08005052 : filtering_(filtering),
5053 filter_(NULL) {
Steve Blocka7e24c12009-10-30 11:49:00 +00005054 Init();
5055}
5056
5057
5058HeapIterator::~HeapIterator() {
5059 Shutdown();
5060}
5061
5062
5063void HeapIterator::Init() {
5064 // Start the iteration.
Ben Murdochb0fe1622011-05-05 13:52:32 +01005065 space_iterator_ = filtering_ == kNoFiltering ? new SpaceIterator :
5066 new SpaceIterator(MarkCompactCollector::SizeOfMarkedObject);
5067 switch (filtering_) {
5068 case kFilterFreeListNodes:
5069 filter_ = new FreeListNodesFilter;
5070 break;
5071 case kFilterUnreachable:
5072 filter_ = new UnreachableObjectsFilter;
5073 break;
5074 default:
5075 break;
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -08005076 }
Steve Blocka7e24c12009-10-30 11:49:00 +00005077 object_iterator_ = space_iterator_->next();
5078}
5079
5080
5081void HeapIterator::Shutdown() {
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -08005082#ifdef DEBUG
Ben Murdochb0fe1622011-05-05 13:52:32 +01005083 // Assert that in filtering mode we have iterated through all
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -08005084 // objects. Otherwise, heap will be left in an inconsistent state.
Ben Murdochb0fe1622011-05-05 13:52:32 +01005085 if (filtering_ != kNoFiltering) {
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -08005086 ASSERT(object_iterator_ == NULL);
5087 }
5088#endif
Steve Blocka7e24c12009-10-30 11:49:00 +00005089 // Make sure the last iterator is deallocated.
5090 delete space_iterator_;
5091 space_iterator_ = NULL;
5092 object_iterator_ = NULL;
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -08005093 delete filter_;
5094 filter_ = NULL;
Steve Blocka7e24c12009-10-30 11:49:00 +00005095}
5096
5097
Leon Clarked91b9f72010-01-27 17:25:45 +00005098HeapObject* HeapIterator::next() {
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -08005099 if (filter_ == NULL) return NextObject();
5100
5101 HeapObject* obj = NextObject();
Ben Murdochb0fe1622011-05-05 13:52:32 +01005102 while (obj != NULL && filter_->SkipObject(obj)) obj = NextObject();
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -08005103 return obj;
5104}
5105
5106
5107HeapObject* HeapIterator::NextObject() {
Steve Blocka7e24c12009-10-30 11:49:00 +00005108 // No iterator means we are done.
Leon Clarked91b9f72010-01-27 17:25:45 +00005109 if (object_iterator_ == NULL) return NULL;
Steve Blocka7e24c12009-10-30 11:49:00 +00005110
Leon Clarked91b9f72010-01-27 17:25:45 +00005111 if (HeapObject* obj = object_iterator_->next_object()) {
Steve Blocka7e24c12009-10-30 11:49:00 +00005112 // If the current iterator has more objects we are fine.
Leon Clarked91b9f72010-01-27 17:25:45 +00005113 return obj;
Steve Blocka7e24c12009-10-30 11:49:00 +00005114 } else {
5115 // Go though the spaces looking for one that has objects.
5116 while (space_iterator_->has_next()) {
5117 object_iterator_ = space_iterator_->next();
Leon Clarked91b9f72010-01-27 17:25:45 +00005118 if (HeapObject* obj = object_iterator_->next_object()) {
5119 return obj;
Steve Blocka7e24c12009-10-30 11:49:00 +00005120 }
5121 }
5122 }
5123 // Done with the last space.
5124 object_iterator_ = NULL;
Leon Clarked91b9f72010-01-27 17:25:45 +00005125 return NULL;
Steve Blocka7e24c12009-10-30 11:49:00 +00005126}
5127
5128
5129void HeapIterator::reset() {
5130 // Restart the iterator.
5131 Shutdown();
5132 Init();
5133}
5134
5135
5136#ifdef DEBUG
5137
5138static bool search_for_any_global;
5139static Object* search_target;
5140static bool found_target;
5141static List<Object*> object_stack(20);
5142
5143
5144// Tags 0, 1, and 3 are used. Use 2 for marking visited HeapObject.
5145static const int kMarkTag = 2;
5146
5147static void MarkObjectRecursively(Object** p);
5148class MarkObjectVisitor : public ObjectVisitor {
5149 public:
5150 void VisitPointers(Object** start, Object** end) {
5151 // Copy all HeapObject pointers in [start, end)
5152 for (Object** p = start; p < end; p++) {
5153 if ((*p)->IsHeapObject())
5154 MarkObjectRecursively(p);
5155 }
5156 }
5157};
5158
5159static MarkObjectVisitor mark_visitor;
5160
5161static void MarkObjectRecursively(Object** p) {
5162 if (!(*p)->IsHeapObject()) return;
5163
5164 HeapObject* obj = HeapObject::cast(*p);
5165
5166 Object* map = obj->map();
5167
5168 if (!map->IsHeapObject()) return; // visited before
5169
5170 if (found_target) return; // stop if target found
5171 object_stack.Add(obj);
5172 if ((search_for_any_global && obj->IsJSGlobalObject()) ||
5173 (!search_for_any_global && (obj == search_target))) {
5174 found_target = true;
5175 return;
5176 }
5177
5178 // not visited yet
5179 Map* map_p = reinterpret_cast<Map*>(HeapObject::cast(map));
5180
5181 Address map_addr = map_p->address();
5182
5183 obj->set_map(reinterpret_cast<Map*>(map_addr + kMarkTag));
5184
5185 MarkObjectRecursively(&map);
5186
5187 obj->IterateBody(map_p->instance_type(), obj->SizeFromMap(map_p),
5188 &mark_visitor);
5189
5190 if (!found_target) // don't pop if found the target
5191 object_stack.RemoveLast();
5192}
5193
5194
5195static void UnmarkObjectRecursively(Object** p);
5196class UnmarkObjectVisitor : public ObjectVisitor {
5197 public:
5198 void VisitPointers(Object** start, Object** end) {
5199 // Copy all HeapObject pointers in [start, end)
5200 for (Object** p = start; p < end; p++) {
5201 if ((*p)->IsHeapObject())
5202 UnmarkObjectRecursively(p);
5203 }
5204 }
5205};
5206
5207static UnmarkObjectVisitor unmark_visitor;
5208
5209static void UnmarkObjectRecursively(Object** p) {
5210 if (!(*p)->IsHeapObject()) return;
5211
5212 HeapObject* obj = HeapObject::cast(*p);
5213
5214 Object* map = obj->map();
5215
5216 if (map->IsHeapObject()) return; // unmarked already
5217
5218 Address map_addr = reinterpret_cast<Address>(map);
5219
5220 map_addr -= kMarkTag;
5221
5222 ASSERT_TAG_ALIGNED(map_addr);
5223
5224 HeapObject* map_p = HeapObject::FromAddress(map_addr);
5225
5226 obj->set_map(reinterpret_cast<Map*>(map_p));
5227
5228 UnmarkObjectRecursively(reinterpret_cast<Object**>(&map_p));
5229
5230 obj->IterateBody(Map::cast(map_p)->instance_type(),
5231 obj->SizeFromMap(Map::cast(map_p)),
5232 &unmark_visitor);
5233}
5234
5235
5236static void MarkRootObjectRecursively(Object** root) {
5237 if (search_for_any_global) {
5238 ASSERT(search_target == NULL);
5239 } else {
5240 ASSERT(search_target->IsHeapObject());
5241 }
5242 found_target = false;
5243 object_stack.Clear();
5244
5245 MarkObjectRecursively(root);
5246 UnmarkObjectRecursively(root);
5247
5248 if (found_target) {
5249 PrintF("=====================================\n");
5250 PrintF("==== Path to object ====\n");
5251 PrintF("=====================================\n\n");
5252
5253 ASSERT(!object_stack.is_empty());
5254 for (int i = 0; i < object_stack.length(); i++) {
5255 if (i > 0) PrintF("\n |\n |\n V\n\n");
5256 Object* obj = object_stack[i];
5257 obj->Print();
5258 }
5259 PrintF("=====================================\n");
5260 }
5261}
5262
5263
5264// Helper class for visiting HeapObjects recursively.
5265class MarkRootVisitor: public ObjectVisitor {
5266 public:
5267 void VisitPointers(Object** start, Object** end) {
5268 // Visit all HeapObject pointers in [start, end)
5269 for (Object** p = start; p < end; p++) {
5270 if ((*p)->IsHeapObject())
5271 MarkRootObjectRecursively(p);
5272 }
5273 }
5274};
5275
5276
5277// Triggers a depth-first traversal of reachable objects from roots
5278// and finds a path to a specific heap object and prints it.
Leon Clarkee46be812010-01-19 14:06:41 +00005279void Heap::TracePathToObject(Object* target) {
5280 search_target = target;
Steve Blocka7e24c12009-10-30 11:49:00 +00005281 search_for_any_global = false;
5282
5283 MarkRootVisitor root_visitor;
Steve Blockd0582a62009-12-15 09:54:21 +00005284 IterateRoots(&root_visitor, VISIT_ONLY_STRONG);
Steve Blocka7e24c12009-10-30 11:49:00 +00005285}
5286
5287
5288// Triggers a depth-first traversal of reachable objects from roots
5289// and finds a path to any global object and prints it. Useful for
5290// determining the source for leaks of global objects.
5291void Heap::TracePathToGlobal() {
5292 search_target = NULL;
5293 search_for_any_global = true;
5294
5295 MarkRootVisitor root_visitor;
Steve Blockd0582a62009-12-15 09:54:21 +00005296 IterateRoots(&root_visitor, VISIT_ONLY_STRONG);
Steve Blocka7e24c12009-10-30 11:49:00 +00005297}
5298#endif
5299
5300
Ben Murdochf87a2032010-10-22 12:50:53 +01005301static intptr_t CountTotalHolesSize() {
5302 intptr_t holes_size = 0;
Leon Clarkef7060e22010-06-03 12:02:55 +01005303 OldSpaces spaces;
5304 for (OldSpace* space = spaces.next();
5305 space != NULL;
5306 space = spaces.next()) {
5307 holes_size += space->Waste() + space->AvailableFree();
5308 }
5309 return holes_size;
5310}
5311
5312
Steve Blocka7e24c12009-10-30 11:49:00 +00005313GCTracer::GCTracer()
5314 : start_time_(0.0),
Leon Clarkef7060e22010-06-03 12:02:55 +01005315 start_size_(0),
Steve Blocka7e24c12009-10-30 11:49:00 +00005316 gc_count_(0),
5317 full_gc_count_(0),
5318 is_compacting_(false),
Leon Clarkef7060e22010-06-03 12:02:55 +01005319 marked_count_(0),
5320 allocated_since_last_gc_(0),
5321 spent_in_mutator_(0),
5322 promoted_objects_size_(0) {
Steve Blocka7e24c12009-10-30 11:49:00 +00005323 // These two fields reflect the state of the previous full collection.
5324 // Set them before they are changed by the collector.
5325 previous_has_compacted_ = MarkCompactCollector::HasCompacted();
5326 previous_marked_count_ = MarkCompactCollector::previous_marked_count();
Leon Clarkef7060e22010-06-03 12:02:55 +01005327 if (!FLAG_trace_gc && !FLAG_print_cumulative_gc_stat) return;
Steve Blocka7e24c12009-10-30 11:49:00 +00005328 start_time_ = OS::TimeCurrentMillis();
Leon Clarkef7060e22010-06-03 12:02:55 +01005329 start_size_ = Heap::SizeOfObjects();
5330
5331 for (int i = 0; i < Scope::kNumberOfScopes; i++) {
5332 scopes_[i] = 0;
5333 }
5334
5335 in_free_list_or_wasted_before_gc_ = CountTotalHolesSize();
5336
5337 allocated_since_last_gc_ = Heap::SizeOfObjects() - alive_after_last_gc_;
5338
5339 if (last_gc_end_timestamp_ > 0) {
5340 spent_in_mutator_ = Max(start_time_ - last_gc_end_timestamp_, 0.0);
5341 }
Steve Blocka7e24c12009-10-30 11:49:00 +00005342}
5343
5344
5345GCTracer::~GCTracer() {
Steve Blocka7e24c12009-10-30 11:49:00 +00005346 // Printf ONE line iff flag is set.
Leon Clarkef7060e22010-06-03 12:02:55 +01005347 if (!FLAG_trace_gc && !FLAG_print_cumulative_gc_stat) return;
5348
5349 bool first_gc = (last_gc_end_timestamp_ == 0);
5350
5351 alive_after_last_gc_ = Heap::SizeOfObjects();
5352 last_gc_end_timestamp_ = OS::TimeCurrentMillis();
5353
5354 int time = static_cast<int>(last_gc_end_timestamp_ - start_time_);
5355
5356 // Update cumulative GC statistics if required.
5357 if (FLAG_print_cumulative_gc_stat) {
5358 max_gc_pause_ = Max(max_gc_pause_, time);
5359 max_alive_after_gc_ = Max(max_alive_after_gc_, alive_after_last_gc_);
5360 if (!first_gc) {
5361 min_in_mutator_ = Min(min_in_mutator_,
5362 static_cast<int>(spent_in_mutator_));
5363 }
5364 }
5365
5366 if (!FLAG_trace_gc_nvp) {
5367 int external_time = static_cast<int>(scopes_[Scope::EXTERNAL]);
5368
5369 PrintF("%s %.1f -> %.1f MB, ",
5370 CollectorString(),
5371 static_cast<double>(start_size_) / MB,
5372 SizeOfHeapObjects());
5373
5374 if (external_time > 0) PrintF("%d / ", external_time);
5375 PrintF("%d ms.\n", time);
5376 } else {
5377 PrintF("pause=%d ", time);
5378 PrintF("mutator=%d ",
5379 static_cast<int>(spent_in_mutator_));
5380
5381 PrintF("gc=");
5382 switch (collector_) {
5383 case SCAVENGER:
5384 PrintF("s");
5385 break;
5386 case MARK_COMPACTOR:
5387 PrintF(MarkCompactCollector::HasCompacted() ? "mc" : "ms");
5388 break;
5389 default:
5390 UNREACHABLE();
5391 }
5392 PrintF(" ");
5393
5394 PrintF("external=%d ", static_cast<int>(scopes_[Scope::EXTERNAL]));
5395 PrintF("mark=%d ", static_cast<int>(scopes_[Scope::MC_MARK]));
5396 PrintF("sweep=%d ", static_cast<int>(scopes_[Scope::MC_SWEEP]));
Iain Merrick75681382010-08-19 15:07:18 +01005397 PrintF("sweepns=%d ", static_cast<int>(scopes_[Scope::MC_SWEEP_NEWSPACE]));
Leon Clarkef7060e22010-06-03 12:02:55 +01005398 PrintF("compact=%d ", static_cast<int>(scopes_[Scope::MC_COMPACT]));
5399
Ben Murdochf87a2032010-10-22 12:50:53 +01005400 PrintF("total_size_before=%" V8_PTR_PREFIX "d ", start_size_);
5401 PrintF("total_size_after=%" V8_PTR_PREFIX "d ", Heap::SizeOfObjects());
5402 PrintF("holes_size_before=%" V8_PTR_PREFIX "d ",
5403 in_free_list_or_wasted_before_gc_);
5404 PrintF("holes_size_after=%" V8_PTR_PREFIX "d ", CountTotalHolesSize());
Leon Clarkef7060e22010-06-03 12:02:55 +01005405
Ben Murdochf87a2032010-10-22 12:50:53 +01005406 PrintF("allocated=%" V8_PTR_PREFIX "d ", allocated_since_last_gc_);
5407 PrintF("promoted=%" V8_PTR_PREFIX "d ", promoted_objects_size_);
Leon Clarkef7060e22010-06-03 12:02:55 +01005408
5409 PrintF("\n");
5410 }
Steve Blocka7e24c12009-10-30 11:49:00 +00005411
5412#if defined(ENABLE_LOGGING_AND_PROFILING)
5413 Heap::PrintShortHeapStatistics();
5414#endif
5415}
5416
5417
5418const char* GCTracer::CollectorString() {
5419 switch (collector_) {
5420 case SCAVENGER:
5421 return "Scavenge";
5422 case MARK_COMPACTOR:
5423 return MarkCompactCollector::HasCompacted() ? "Mark-compact"
5424 : "Mark-sweep";
5425 }
5426 return "Unknown GC";
5427}
5428
5429
5430int KeyedLookupCache::Hash(Map* map, String* name) {
5431 // Uses only lower 32 bits if pointers are larger.
5432 uintptr_t addr_hash =
Leon Clarkee46be812010-01-19 14:06:41 +00005433 static_cast<uint32_t>(reinterpret_cast<uintptr_t>(map)) >> kMapHashShift;
Andrei Popescu402d9372010-02-26 13:31:12 +00005434 return static_cast<uint32_t>((addr_hash ^ name->Hash()) & kCapacityMask);
Steve Blocka7e24c12009-10-30 11:49:00 +00005435}
5436
5437
5438int KeyedLookupCache::Lookup(Map* map, String* name) {
5439 int index = Hash(map, name);
5440 Key& key = keys_[index];
5441 if ((key.map == map) && key.name->Equals(name)) {
5442 return field_offsets_[index];
5443 }
5444 return -1;
5445}
5446
5447
5448void KeyedLookupCache::Update(Map* map, String* name, int field_offset) {
5449 String* symbol;
5450 if (Heap::LookupSymbolIfExists(name, &symbol)) {
5451 int index = Hash(map, symbol);
5452 Key& key = keys_[index];
5453 key.map = map;
5454 key.name = symbol;
5455 field_offsets_[index] = field_offset;
5456 }
5457}
5458
5459
5460void KeyedLookupCache::Clear() {
5461 for (int index = 0; index < kLength; index++) keys_[index].map = NULL;
5462}
5463
5464
5465KeyedLookupCache::Key KeyedLookupCache::keys_[KeyedLookupCache::kLength];
5466
5467
5468int KeyedLookupCache::field_offsets_[KeyedLookupCache::kLength];
5469
5470
5471void DescriptorLookupCache::Clear() {
5472 for (int index = 0; index < kLength; index++) keys_[index].array = NULL;
5473}
5474
5475
5476DescriptorLookupCache::Key
5477DescriptorLookupCache::keys_[DescriptorLookupCache::kLength];
5478
5479int DescriptorLookupCache::results_[DescriptorLookupCache::kLength];
5480
5481
5482#ifdef DEBUG
Ben Murdochf87a2032010-10-22 12:50:53 +01005483void Heap::GarbageCollectionGreedyCheck() {
Steve Blocka7e24c12009-10-30 11:49:00 +00005484 ASSERT(FLAG_gc_greedy);
Ben Murdochf87a2032010-10-22 12:50:53 +01005485 if (Bootstrapper::IsActive()) return;
5486 if (disallow_allocation_failure()) return;
5487 CollectGarbage(NEW_SPACE);
Steve Blocka7e24c12009-10-30 11:49:00 +00005488}
5489#endif
5490
5491
5492TranscendentalCache::TranscendentalCache(TranscendentalCache::Type t)
5493 : type_(t) {
5494 uint32_t in0 = 0xffffffffu; // Bit-pattern for a NaN that isn't
5495 uint32_t in1 = 0xffffffffu; // generated by the FPU.
5496 for (int i = 0; i < kCacheSize; i++) {
5497 elements_[i].in[0] = in0;
5498 elements_[i].in[1] = in1;
5499 elements_[i].output = NULL;
5500 }
5501}
5502
5503
5504TranscendentalCache* TranscendentalCache::caches_[kNumberOfCaches];
5505
5506
5507void TranscendentalCache::Clear() {
5508 for (int i = 0; i < kNumberOfCaches; i++) {
5509 if (caches_[i] != NULL) {
5510 delete caches_[i];
5511 caches_[i] = NULL;
5512 }
5513 }
5514}
5515
5516
Leon Clarkee46be812010-01-19 14:06:41 +00005517void ExternalStringTable::CleanUp() {
5518 int last = 0;
5519 for (int i = 0; i < new_space_strings_.length(); ++i) {
5520 if (new_space_strings_[i] == Heap::raw_unchecked_null_value()) continue;
5521 if (Heap::InNewSpace(new_space_strings_[i])) {
5522 new_space_strings_[last++] = new_space_strings_[i];
5523 } else {
5524 old_space_strings_.Add(new_space_strings_[i]);
5525 }
5526 }
5527 new_space_strings_.Rewind(last);
5528 last = 0;
5529 for (int i = 0; i < old_space_strings_.length(); ++i) {
5530 if (old_space_strings_[i] == Heap::raw_unchecked_null_value()) continue;
5531 ASSERT(!Heap::InNewSpace(old_space_strings_[i]));
5532 old_space_strings_[last++] = old_space_strings_[i];
5533 }
5534 old_space_strings_.Rewind(last);
5535 Verify();
5536}
5537
5538
5539void ExternalStringTable::TearDown() {
5540 new_space_strings_.Free();
5541 old_space_strings_.Free();
5542}
5543
5544
5545List<Object*> ExternalStringTable::new_space_strings_;
5546List<Object*> ExternalStringTable::old_space_strings_;
5547
Steve Blocka7e24c12009-10-30 11:49:00 +00005548} } // namespace v8::internal