blob: f82c83cbeaa08169718955d7d8fba63f1ccdf265 [file] [log] [blame]
Ben Murdoch8b112d22011-06-08 16:22:53 +01001// Copyright 2011 the V8 project authors. All rights reserved.
Steve Blocka7e24c12009-10-30 11:49:00 +00002// Redistribution and use in source and binary forms, with or without
3// modification, are permitted provided that the following conditions are
4// met:
5//
6// * Redistributions of source code must retain the above copyright
7// notice, this list of conditions and the following disclaimer.
8// * Redistributions in binary form must reproduce the above
9// copyright notice, this list of conditions and the following
10// disclaimer in the documentation and/or other materials provided
11// with the distribution.
12// * Neither the name of Google Inc. nor the names of its
13// contributors may be used to endorse or promote products derived
14// from this software without specific prior written permission.
15//
16// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
28#include "v8.h"
29
30#include "accessors.h"
31#include "api.h"
32#include "bootstrapper.h"
Ben Murdoch8b112d22011-06-08 16:22:53 +010033#include "codegen.h"
Steve Blocka7e24c12009-10-30 11:49:00 +000034#include "compilation-cache.h"
35#include "debug.h"
Steve Blocka7e24c12009-10-30 11:49:00 +000036#include "global-handles.h"
Ben Murdoch257744e2011-11-30 15:57:28 +000037#include "heap-profiler.h"
Steve Block1e0659c2011-05-24 12:43:12 +010038#include "liveobjectlist-inl.h"
Steve Blocka7e24c12009-10-30 11:49:00 +000039#include "mark-compact.h"
40#include "natives.h"
Iain Merrick75681382010-08-19 15:07:18 +010041#include "objects-visiting.h"
Ben Murdochb0fe1622011-05-05 13:52:32 +010042#include "runtime-profiler.h"
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -080043#include "scanner-base.h"
Steve Blocka7e24c12009-10-30 11:49:00 +000044#include "scopeinfo.h"
Steve Block3ce2e202009-11-05 08:53:23 +000045#include "snapshot.h"
Steve Blocka7e24c12009-10-30 11:49:00 +000046#include "v8threads.h"
Ben Murdochb0fe1622011-05-05 13:52:32 +010047#include "vm-state-inl.h"
Steve Block6ded16b2010-05-10 14:33:55 +010048#if V8_TARGET_ARCH_ARM && !V8_INTERPRETED_REGEXP
Steve Blocka7e24c12009-10-30 11:49:00 +000049#include "regexp-macro-assembler.h"
Steve Blockd0582a62009-12-15 09:54:21 +000050#include "arm/regexp-macro-assembler-arm.h"
Steve Blocka7e24c12009-10-30 11:49:00 +000051#endif
Steve Block44f0eee2011-05-26 01:26:41 +010052#if V8_TARGET_ARCH_MIPS && !V8_INTERPRETED_REGEXP
53#include "regexp-macro-assembler.h"
54#include "mips/regexp-macro-assembler-mips.h"
55#endif
Steve Block6ded16b2010-05-10 14:33:55 +010056
Steve Blocka7e24c12009-10-30 11:49:00 +000057namespace v8 {
58namespace internal {
59
60
John Reck59135872010-11-02 12:39:01 -070061static const intptr_t kMinimumPromotionLimit = 2 * MB;
62static const intptr_t kMinimumAllocationLimit = 8 * MB;
63
Steve Blocka7e24c12009-10-30 11:49:00 +000064
Steve Block44f0eee2011-05-26 01:26:41 +010065static Mutex* gc_initializer_mutex = OS::CreateMutex();
Steve Blocka7e24c12009-10-30 11:49:00 +000066
Steve Blocka7e24c12009-10-30 11:49:00 +000067
Steve Block44f0eee2011-05-26 01:26:41 +010068Heap::Heap()
69 : isolate_(NULL),
Steve Blocka7e24c12009-10-30 11:49:00 +000070// semispace_size_ should be a power of 2 and old_generation_size_ should be
71// a multiple of Page::kPageSize.
Ben Murdoch257744e2011-11-30 15:57:28 +000072#if 0//defined(ANDROID)
Steve Block44f0eee2011-05-26 01:26:41 +010073 reserved_semispace_size_(2*MB),
74 max_semispace_size_(2*MB),
75 initial_semispace_size_(128*KB),
Ben Murdoch257744e2011-11-30 15:57:28 +000076 max_old_generation_size_(512*MB),
Steve Block44f0eee2011-05-26 01:26:41 +010077 max_executable_size_(max_old_generation_size_),
78 code_range_size_(0),
Steve Blocka7e24c12009-10-30 11:49:00 +000079#elif defined(V8_TARGET_ARCH_X64)
Steve Block44f0eee2011-05-26 01:26:41 +010080 reserved_semispace_size_(16*MB),
81 max_semispace_size_(16*MB),
82 initial_semispace_size_(1*MB),
83 max_old_generation_size_(1*GB),
84 max_executable_size_(256*MB),
85 code_range_size_(512*MB),
Steve Blocka7e24c12009-10-30 11:49:00 +000086#else
Steve Block44f0eee2011-05-26 01:26:41 +010087 reserved_semispace_size_(8*MB),
88 max_semispace_size_(8*MB),
89 initial_semispace_size_(512*KB),
90 max_old_generation_size_(512*MB),
91 max_executable_size_(128*MB),
92 code_range_size_(0),
Steve Blocka7e24c12009-10-30 11:49:00 +000093#endif
Steve Blocka7e24c12009-10-30 11:49:00 +000094// Variables set based on semispace_size_ and old_generation_size_ in
Steve Block44f0eee2011-05-26 01:26:41 +010095// ConfigureHeap (survived_since_last_expansion_, external_allocation_limit_)
Steve Block3ce2e202009-11-05 08:53:23 +000096// Will be 4 * reserved_semispace_size_ to ensure that young
97// generation can be aligned to its size.
Steve Block44f0eee2011-05-26 01:26:41 +010098 survived_since_last_expansion_(0),
Ben Murdoch257744e2011-11-30 15:57:28 +000099 sweep_generation_(0),
Steve Block44f0eee2011-05-26 01:26:41 +0100100 always_allocate_scope_depth_(0),
101 linear_allocation_scope_depth_(0),
102 contexts_disposed_(0),
103 new_space_(this),
104 old_pointer_space_(NULL),
105 old_data_space_(NULL),
106 code_space_(NULL),
107 map_space_(NULL),
108 cell_space_(NULL),
109 lo_space_(NULL),
110 gc_state_(NOT_IN_GC),
111 mc_count_(0),
112 ms_count_(0),
113 gc_count_(0),
114 unflattened_strings_length_(0),
Steve Blocka7e24c12009-10-30 11:49:00 +0000115#ifdef DEBUG
Steve Block44f0eee2011-05-26 01:26:41 +0100116 allocation_allowed_(true),
117 allocation_timeout_(0),
118 disallow_allocation_failure_(false),
119 debug_utils_(NULL),
Steve Blocka7e24c12009-10-30 11:49:00 +0000120#endif // DEBUG
Steve Block44f0eee2011-05-26 01:26:41 +0100121 old_gen_promotion_limit_(kMinimumPromotionLimit),
122 old_gen_allocation_limit_(kMinimumAllocationLimit),
123 external_allocation_limit_(0),
124 amount_of_external_allocated_memory_(0),
125 amount_of_external_allocated_memory_at_last_global_gc_(0),
126 old_gen_exhausted_(false),
127 hidden_symbol_(NULL),
128 global_gc_prologue_callback_(NULL),
129 global_gc_epilogue_callback_(NULL),
130 gc_safe_size_of_old_object_(NULL),
Steve Block053d10c2011-06-13 19:13:29 +0100131 total_regexp_code_generated_(0),
Steve Block44f0eee2011-05-26 01:26:41 +0100132 tracer_(NULL),
133 young_survivors_after_last_gc_(0),
134 high_survival_rate_period_length_(0),
135 survival_rate_(0),
136 previous_survival_rate_trend_(Heap::STABLE),
137 survival_rate_trend_(Heap::STABLE),
138 max_gc_pause_(0),
139 max_alive_after_gc_(0),
140 min_in_mutator_(kMaxInt),
141 alive_after_last_gc_(0),
142 last_gc_end_timestamp_(0.0),
143 page_watermark_invalidated_mark_(1 << Page::WATERMARK_INVALIDATED),
144 number_idle_notifications_(0),
145 last_idle_notification_gc_count_(0),
146 last_idle_notification_gc_count_init_(false),
147 configured_(false),
148 is_safe_to_read_maps_(true) {
149 // Allow build-time customization of the max semispace size. Building
150 // V8 with snapshots and a non-default max semispace size is much
151 // easier if you can define it as part of the build environment.
152#if defined(V8_MAX_SEMISPACE_SIZE)
153 max_semispace_size_ = reserved_semispace_size_ = V8_MAX_SEMISPACE_SIZE;
154#endif
Steve Blocka7e24c12009-10-30 11:49:00 +0000155
Steve Block44f0eee2011-05-26 01:26:41 +0100156 memset(roots_, 0, sizeof(roots_[0]) * kRootListLength);
157 global_contexts_list_ = NULL;
158 mark_compact_collector_.heap_ = this;
159 external_string_table_.heap_ = this;
160}
161
Steve Blocka7e24c12009-10-30 11:49:00 +0000162
Ben Murdochf87a2032010-10-22 12:50:53 +0100163intptr_t Heap::Capacity() {
Steve Blocka7e24c12009-10-30 11:49:00 +0000164 if (!HasBeenSetup()) return 0;
165
166 return new_space_.Capacity() +
167 old_pointer_space_->Capacity() +
168 old_data_space_->Capacity() +
169 code_space_->Capacity() +
170 map_space_->Capacity() +
171 cell_space_->Capacity();
172}
173
174
Ben Murdochf87a2032010-10-22 12:50:53 +0100175intptr_t Heap::CommittedMemory() {
Steve Block3ce2e202009-11-05 08:53:23 +0000176 if (!HasBeenSetup()) return 0;
177
178 return new_space_.CommittedMemory() +
179 old_pointer_space_->CommittedMemory() +
180 old_data_space_->CommittedMemory() +
181 code_space_->CommittedMemory() +
182 map_space_->CommittedMemory() +
183 cell_space_->CommittedMemory() +
184 lo_space_->Size();
185}
186
Russell Brenner90bac252010-11-18 13:33:46 -0800187intptr_t Heap::CommittedMemoryExecutable() {
188 if (!HasBeenSetup()) return 0;
189
Steve Block44f0eee2011-05-26 01:26:41 +0100190 return isolate()->memory_allocator()->SizeExecutable();
Russell Brenner90bac252010-11-18 13:33:46 -0800191}
192
Steve Block3ce2e202009-11-05 08:53:23 +0000193
Ben Murdochf87a2032010-10-22 12:50:53 +0100194intptr_t Heap::Available() {
Steve Blocka7e24c12009-10-30 11:49:00 +0000195 if (!HasBeenSetup()) return 0;
196
197 return new_space_.Available() +
198 old_pointer_space_->Available() +
199 old_data_space_->Available() +
200 code_space_->Available() +
201 map_space_->Available() +
202 cell_space_->Available();
203}
204
205
206bool Heap::HasBeenSetup() {
207 return old_pointer_space_ != NULL &&
208 old_data_space_ != NULL &&
209 code_space_ != NULL &&
210 map_space_ != NULL &&
211 cell_space_ != NULL &&
212 lo_space_ != NULL;
213}
214
215
Kristian Monsen80d68ea2010-09-08 11:05:35 +0100216int Heap::GcSafeSizeOfOldObject(HeapObject* object) {
Steve Block44f0eee2011-05-26 01:26:41 +0100217 ASSERT(!HEAP->InNewSpace(object)); // Code only works for old objects.
218 ASSERT(!HEAP->mark_compact_collector()->are_map_pointers_encoded());
Kristian Monsen80d68ea2010-09-08 11:05:35 +0100219 MapWord map_word = object->map_word();
220 map_word.ClearMark();
221 map_word.ClearOverflow();
222 return object->SizeFromMap(map_word.ToMap());
223}
224
225
226int Heap::GcSafeSizeOfOldObjectWithEncodedMap(HeapObject* object) {
Steve Block44f0eee2011-05-26 01:26:41 +0100227 ASSERT(!HEAP->InNewSpace(object)); // Code only works for old objects.
228 ASSERT(HEAP->mark_compact_collector()->are_map_pointers_encoded());
Kristian Monsen80d68ea2010-09-08 11:05:35 +0100229 uint32_t marker = Memory::uint32_at(object->address());
230 if (marker == MarkCompactCollector::kSingleFreeEncoding) {
231 return kIntSize;
232 } else if (marker == MarkCompactCollector::kMultiFreeEncoding) {
233 return Memory::int_at(object->address() + kIntSize);
234 } else {
235 MapWord map_word = object->map_word();
Steve Block44f0eee2011-05-26 01:26:41 +0100236 Address map_address = map_word.DecodeMapAddress(HEAP->map_space());
Kristian Monsen80d68ea2010-09-08 11:05:35 +0100237 Map* map = reinterpret_cast<Map*>(HeapObject::FromAddress(map_address));
238 return object->SizeFromMap(map);
239 }
240}
241
242
Steve Blocka7e24c12009-10-30 11:49:00 +0000243GarbageCollector Heap::SelectGarbageCollector(AllocationSpace space) {
244 // Is global GC requested?
245 if (space != NEW_SPACE || FLAG_gc_global) {
Steve Block44f0eee2011-05-26 01:26:41 +0100246 isolate_->counters()->gc_compactor_caused_by_request()->Increment();
Steve Blocka7e24c12009-10-30 11:49:00 +0000247 return MARK_COMPACTOR;
248 }
249
250 // Is enough data promoted to justify a global GC?
251 if (OldGenerationPromotionLimitReached()) {
Steve Block44f0eee2011-05-26 01:26:41 +0100252 isolate_->counters()->gc_compactor_caused_by_promoted_data()->Increment();
Steve Blocka7e24c12009-10-30 11:49:00 +0000253 return MARK_COMPACTOR;
254 }
255
256 // Have allocation in OLD and LO failed?
257 if (old_gen_exhausted_) {
Steve Block44f0eee2011-05-26 01:26:41 +0100258 isolate_->counters()->
259 gc_compactor_caused_by_oldspace_exhaustion()->Increment();
Steve Blocka7e24c12009-10-30 11:49:00 +0000260 return MARK_COMPACTOR;
261 }
262
263 // Is there enough space left in OLD to guarantee that a scavenge can
264 // succeed?
265 //
266 // Note that MemoryAllocator->MaxAvailable() undercounts the memory available
267 // for object promotion. It counts only the bytes that the memory
268 // allocator has not yet allocated from the OS and assigned to any space,
269 // and does not count available bytes already in the old space or code
270 // space. Undercounting is safe---we may get an unrequested full GC when
271 // a scavenge would have succeeded.
Steve Block44f0eee2011-05-26 01:26:41 +0100272 if (isolate_->memory_allocator()->MaxAvailable() <= new_space_.Size()) {
273 isolate_->counters()->
274 gc_compactor_caused_by_oldspace_exhaustion()->Increment();
Steve Blocka7e24c12009-10-30 11:49:00 +0000275 return MARK_COMPACTOR;
276 }
277
278 // Default
279 return SCAVENGER;
280}
281
282
283// TODO(1238405): Combine the infrastructure for --heap-stats and
284// --log-gc to avoid the complicated preprocessor and flag testing.
285#if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
286void Heap::ReportStatisticsBeforeGC() {
287 // Heap::ReportHeapStatistics will also log NewSpace statistics when
288 // compiled with ENABLE_LOGGING_AND_PROFILING and --log-gc is set. The
289 // following logic is used to avoid double logging.
290#if defined(DEBUG) && defined(ENABLE_LOGGING_AND_PROFILING)
291 if (FLAG_heap_stats || FLAG_log_gc) new_space_.CollectStatistics();
292 if (FLAG_heap_stats) {
293 ReportHeapStatistics("Before GC");
294 } else if (FLAG_log_gc) {
295 new_space_.ReportStatistics();
296 }
297 if (FLAG_heap_stats || FLAG_log_gc) new_space_.ClearHistograms();
298#elif defined(DEBUG)
299 if (FLAG_heap_stats) {
300 new_space_.CollectStatistics();
301 ReportHeapStatistics("Before GC");
302 new_space_.ClearHistograms();
303 }
304#elif defined(ENABLE_LOGGING_AND_PROFILING)
305 if (FLAG_log_gc) {
306 new_space_.CollectStatistics();
307 new_space_.ReportStatistics();
308 new_space_.ClearHistograms();
309 }
310#endif
311}
312
313
314#if defined(ENABLE_LOGGING_AND_PROFILING)
315void Heap::PrintShortHeapStatistics() {
316 if (!FLAG_trace_gc_verbose) return;
Ben Murdochf87a2032010-10-22 12:50:53 +0100317 PrintF("Memory allocator, used: %8" V8_PTR_PREFIX "d"
318 ", available: %8" V8_PTR_PREFIX "d\n",
Steve Block44f0eee2011-05-26 01:26:41 +0100319 isolate_->memory_allocator()->Size(),
320 isolate_->memory_allocator()->Available());
Ben Murdochf87a2032010-10-22 12:50:53 +0100321 PrintF("New space, used: %8" V8_PTR_PREFIX "d"
322 ", available: %8" V8_PTR_PREFIX "d\n",
Steve Block3ce2e202009-11-05 08:53:23 +0000323 Heap::new_space_.Size(),
324 new_space_.Available());
Ben Murdochf87a2032010-10-22 12:50:53 +0100325 PrintF("Old pointers, used: %8" V8_PTR_PREFIX "d"
326 ", available: %8" V8_PTR_PREFIX "d"
327 ", waste: %8" V8_PTR_PREFIX "d\n",
Steve Block3ce2e202009-11-05 08:53:23 +0000328 old_pointer_space_->Size(),
329 old_pointer_space_->Available(),
330 old_pointer_space_->Waste());
Ben Murdochf87a2032010-10-22 12:50:53 +0100331 PrintF("Old data space, used: %8" V8_PTR_PREFIX "d"
332 ", available: %8" V8_PTR_PREFIX "d"
333 ", waste: %8" V8_PTR_PREFIX "d\n",
Steve Block3ce2e202009-11-05 08:53:23 +0000334 old_data_space_->Size(),
335 old_data_space_->Available(),
336 old_data_space_->Waste());
Ben Murdochf87a2032010-10-22 12:50:53 +0100337 PrintF("Code space, used: %8" V8_PTR_PREFIX "d"
338 ", available: %8" V8_PTR_PREFIX "d"
339 ", waste: %8" V8_PTR_PREFIX "d\n",
Steve Block3ce2e202009-11-05 08:53:23 +0000340 code_space_->Size(),
341 code_space_->Available(),
342 code_space_->Waste());
Ben Murdochf87a2032010-10-22 12:50:53 +0100343 PrintF("Map space, used: %8" V8_PTR_PREFIX "d"
344 ", available: %8" V8_PTR_PREFIX "d"
345 ", waste: %8" V8_PTR_PREFIX "d\n",
Steve Block3ce2e202009-11-05 08:53:23 +0000346 map_space_->Size(),
347 map_space_->Available(),
348 map_space_->Waste());
Ben Murdochf87a2032010-10-22 12:50:53 +0100349 PrintF("Cell space, used: %8" V8_PTR_PREFIX "d"
350 ", available: %8" V8_PTR_PREFIX "d"
351 ", waste: %8" V8_PTR_PREFIX "d\n",
Steve Block3ce2e202009-11-05 08:53:23 +0000352 cell_space_->Size(),
353 cell_space_->Available(),
354 cell_space_->Waste());
Ben Murdochf87a2032010-10-22 12:50:53 +0100355 PrintF("Large object space, used: %8" V8_PTR_PREFIX "d"
356 ", available: %8" V8_PTR_PREFIX "d\n",
Steve Block3ce2e202009-11-05 08:53:23 +0000357 lo_space_->Size(),
358 lo_space_->Available());
Steve Blocka7e24c12009-10-30 11:49:00 +0000359}
360#endif
361
362
363// TODO(1238405): Combine the infrastructure for --heap-stats and
364// --log-gc to avoid the complicated preprocessor and flag testing.
365void Heap::ReportStatisticsAfterGC() {
366 // Similar to the before GC, we use some complicated logic to ensure that
367 // NewSpace statistics are logged exactly once when --log-gc is turned on.
368#if defined(DEBUG) && defined(ENABLE_LOGGING_AND_PROFILING)
369 if (FLAG_heap_stats) {
370 new_space_.CollectStatistics();
371 ReportHeapStatistics("After GC");
372 } else if (FLAG_log_gc) {
373 new_space_.ReportStatistics();
374 }
375#elif defined(DEBUG)
376 if (FLAG_heap_stats) ReportHeapStatistics("After GC");
377#elif defined(ENABLE_LOGGING_AND_PROFILING)
378 if (FLAG_log_gc) new_space_.ReportStatistics();
379#endif
380}
381#endif // defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
382
383
384void Heap::GarbageCollectionPrologue() {
Steve Block44f0eee2011-05-26 01:26:41 +0100385 isolate_->transcendental_cache()->Clear();
Steve Block6ded16b2010-05-10 14:33:55 +0100386 ClearJSFunctionResultCaches();
Steve Blocka7e24c12009-10-30 11:49:00 +0000387 gc_count_++;
Steve Block6ded16b2010-05-10 14:33:55 +0100388 unflattened_strings_length_ = 0;
Steve Blocka7e24c12009-10-30 11:49:00 +0000389#ifdef DEBUG
390 ASSERT(allocation_allowed_ && gc_state_ == NOT_IN_GC);
391 allow_allocation(false);
392
393 if (FLAG_verify_heap) {
394 Verify();
395 }
396
397 if (FLAG_gc_verbose) Print();
Steve Blocka7e24c12009-10-30 11:49:00 +0000398#endif
399
400#if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
401 ReportStatisticsBeforeGC();
402#endif
Steve Block1e0659c2011-05-24 12:43:12 +0100403
404 LiveObjectList::GCPrologue();
Steve Blocka7e24c12009-10-30 11:49:00 +0000405}
406
Ben Murdochf87a2032010-10-22 12:50:53 +0100407intptr_t Heap::SizeOfObjects() {
408 intptr_t total = 0;
Steve Blocka7e24c12009-10-30 11:49:00 +0000409 AllSpaces spaces;
Leon Clarked91b9f72010-01-27 17:25:45 +0000410 for (Space* space = spaces.next(); space != NULL; space = spaces.next()) {
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -0800411 total += space->SizeOfObjects();
Steve Blocka7e24c12009-10-30 11:49:00 +0000412 }
413 return total;
414}
415
416void Heap::GarbageCollectionEpilogue() {
Steve Block1e0659c2011-05-24 12:43:12 +0100417 LiveObjectList::GCEpilogue();
Steve Blocka7e24c12009-10-30 11:49:00 +0000418#ifdef DEBUG
419 allow_allocation(true);
420 ZapFromSpace();
421
422 if (FLAG_verify_heap) {
423 Verify();
424 }
425
Steve Block44f0eee2011-05-26 01:26:41 +0100426 if (FLAG_print_global_handles) isolate_->global_handles()->Print();
Steve Blocka7e24c12009-10-30 11:49:00 +0000427 if (FLAG_print_handles) PrintHandles();
428 if (FLAG_gc_verbose) Print();
429 if (FLAG_code_stats) ReportCodeStatistics("After GC");
430#endif
431
Steve Block44f0eee2011-05-26 01:26:41 +0100432 isolate_->counters()->alive_after_last_gc()->Set(
433 static_cast<int>(SizeOfObjects()));
Steve Blocka7e24c12009-10-30 11:49:00 +0000434
Steve Block44f0eee2011-05-26 01:26:41 +0100435 isolate_->counters()->symbol_table_capacity()->Set(
436 symbol_table()->Capacity());
437 isolate_->counters()->number_of_symbols()->Set(
438 symbol_table()->NumberOfElements());
Steve Blocka7e24c12009-10-30 11:49:00 +0000439#if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
440 ReportStatisticsAfterGC();
441#endif
442#ifdef ENABLE_DEBUGGER_SUPPORT
Steve Block44f0eee2011-05-26 01:26:41 +0100443 isolate_->debug()->AfterGarbageCollection();
Steve Blocka7e24c12009-10-30 11:49:00 +0000444#endif
445}
446
447
John Reck59135872010-11-02 12:39:01 -0700448void Heap::CollectAllGarbage(bool force_compaction) {
Steve Blocka7e24c12009-10-30 11:49:00 +0000449 // Since we are ignoring the return value, the exact choice of space does
450 // not matter, so long as we do not specify NEW_SPACE, which would not
451 // cause a full GC.
Steve Block44f0eee2011-05-26 01:26:41 +0100452 mark_compact_collector_.SetForceCompaction(force_compaction);
John Reck59135872010-11-02 12:39:01 -0700453 CollectGarbage(OLD_POINTER_SPACE);
Steve Block44f0eee2011-05-26 01:26:41 +0100454 mark_compact_collector_.SetForceCompaction(false);
Steve Blocka7e24c12009-10-30 11:49:00 +0000455}
456
457
Teng-Hui Zhu3e5fa292010-11-09 16:16:48 -0800458void Heap::CollectAllAvailableGarbage() {
459 // Since we are ignoring the return value, the exact choice of space does
460 // not matter, so long as we do not specify NEW_SPACE, which would not
461 // cause a full GC.
Steve Block44f0eee2011-05-26 01:26:41 +0100462 mark_compact_collector()->SetForceCompaction(true);
Teng-Hui Zhu3e5fa292010-11-09 16:16:48 -0800463
464 // Major GC would invoke weak handle callbacks on weakly reachable
465 // handles, but won't collect weakly reachable objects until next
466 // major GC. Therefore if we collect aggressively and weak handle callback
467 // has been invoked, we rerun major GC to release objects which become
468 // garbage.
469 // Note: as weak callbacks can execute arbitrary code, we cannot
470 // hope that eventually there will be no weak callbacks invocations.
471 // Therefore stop recollecting after several attempts.
472 const int kMaxNumberOfAttempts = 7;
473 for (int attempt = 0; attempt < kMaxNumberOfAttempts; attempt++) {
474 if (!CollectGarbage(OLD_POINTER_SPACE, MARK_COMPACTOR)) {
475 break;
476 }
477 }
Steve Block44f0eee2011-05-26 01:26:41 +0100478 mark_compact_collector()->SetForceCompaction(false);
Teng-Hui Zhu3e5fa292010-11-09 16:16:48 -0800479}
480
481
482bool Heap::CollectGarbage(AllocationSpace space, GarbageCollector collector) {
Steve Blocka7e24c12009-10-30 11:49:00 +0000483 // The VM is in the GC state until exiting this function.
Steve Block44f0eee2011-05-26 01:26:41 +0100484 VMState state(isolate_, GC);
Steve Blocka7e24c12009-10-30 11:49:00 +0000485
486#ifdef DEBUG
487 // Reset the allocation timeout to the GC interval, but make sure to
488 // allow at least a few allocations after a collection. The reason
489 // for this is that we have a lot of allocation sequences and we
490 // assume that a garbage collection will allow the subsequent
491 // allocation attempts to go through.
492 allocation_timeout_ = Max(6, FLAG_gc_interval);
493#endif
494
Teng-Hui Zhu3e5fa292010-11-09 16:16:48 -0800495 bool next_gc_likely_to_collect_more = false;
496
Steve Block44f0eee2011-05-26 01:26:41 +0100497 { GCTracer tracer(this);
Steve Blocka7e24c12009-10-30 11:49:00 +0000498 GarbageCollectionPrologue();
499 // The GC count was incremented in the prologue. Tell the tracer about
500 // it.
501 tracer.set_gc_count(gc_count_);
502
Steve Blocka7e24c12009-10-30 11:49:00 +0000503 // Tell the tracer which collector we've selected.
504 tracer.set_collector(collector);
505
506 HistogramTimer* rate = (collector == SCAVENGER)
Steve Block44f0eee2011-05-26 01:26:41 +0100507 ? isolate_->counters()->gc_scavenger()
508 : isolate_->counters()->gc_compactor();
Steve Blocka7e24c12009-10-30 11:49:00 +0000509 rate->Start();
Teng-Hui Zhu3e5fa292010-11-09 16:16:48 -0800510 next_gc_likely_to_collect_more =
511 PerformGarbageCollection(collector, &tracer);
Steve Blocka7e24c12009-10-30 11:49:00 +0000512 rate->Stop();
513
514 GarbageCollectionEpilogue();
515 }
516
517
518#ifdef ENABLE_LOGGING_AND_PROFILING
519 if (FLAG_log_gc) HeapProfiler::WriteSample();
520#endif
Teng-Hui Zhu3e5fa292010-11-09 16:16:48 -0800521
522 return next_gc_likely_to_collect_more;
Steve Blocka7e24c12009-10-30 11:49:00 +0000523}
524
525
526void Heap::PerformScavenge() {
Steve Block44f0eee2011-05-26 01:26:41 +0100527 GCTracer tracer(this);
John Reck59135872010-11-02 12:39:01 -0700528 PerformGarbageCollection(SCAVENGER, &tracer);
Steve Blocka7e24c12009-10-30 11:49:00 +0000529}
530
531
532#ifdef DEBUG
533// Helper class for verifying the symbol table.
534class SymbolTableVerifier : public ObjectVisitor {
535 public:
Steve Blocka7e24c12009-10-30 11:49:00 +0000536 void VisitPointers(Object** start, Object** end) {
537 // Visit all HeapObject pointers in [start, end).
538 for (Object** p = start; p < end; p++) {
539 if ((*p)->IsHeapObject()) {
540 // Check that the symbol is actually a symbol.
541 ASSERT((*p)->IsNull() || (*p)->IsUndefined() || (*p)->IsSymbol());
542 }
543 }
544 }
545};
546#endif // DEBUG
547
548
549static void VerifySymbolTable() {
550#ifdef DEBUG
551 SymbolTableVerifier verifier;
Steve Block44f0eee2011-05-26 01:26:41 +0100552 HEAP->symbol_table()->IterateElements(&verifier);
Steve Blocka7e24c12009-10-30 11:49:00 +0000553#endif // DEBUG
554}
555
556
Leon Clarkee46be812010-01-19 14:06:41 +0000557void Heap::ReserveSpace(
558 int new_space_size,
559 int pointer_space_size,
560 int data_space_size,
561 int code_space_size,
562 int map_space_size,
563 int cell_space_size,
564 int large_object_size) {
565 NewSpace* new_space = Heap::new_space();
566 PagedSpace* old_pointer_space = Heap::old_pointer_space();
567 PagedSpace* old_data_space = Heap::old_data_space();
568 PagedSpace* code_space = Heap::code_space();
569 PagedSpace* map_space = Heap::map_space();
570 PagedSpace* cell_space = Heap::cell_space();
571 LargeObjectSpace* lo_space = Heap::lo_space();
572 bool gc_performed = true;
573 while (gc_performed) {
574 gc_performed = false;
575 if (!new_space->ReserveSpace(new_space_size)) {
Ben Murdochf87a2032010-10-22 12:50:53 +0100576 Heap::CollectGarbage(NEW_SPACE);
Leon Clarkee46be812010-01-19 14:06:41 +0000577 gc_performed = true;
578 }
579 if (!old_pointer_space->ReserveSpace(pointer_space_size)) {
Ben Murdochf87a2032010-10-22 12:50:53 +0100580 Heap::CollectGarbage(OLD_POINTER_SPACE);
Leon Clarkee46be812010-01-19 14:06:41 +0000581 gc_performed = true;
582 }
583 if (!(old_data_space->ReserveSpace(data_space_size))) {
Ben Murdochf87a2032010-10-22 12:50:53 +0100584 Heap::CollectGarbage(OLD_DATA_SPACE);
Leon Clarkee46be812010-01-19 14:06:41 +0000585 gc_performed = true;
586 }
587 if (!(code_space->ReserveSpace(code_space_size))) {
Ben Murdochf87a2032010-10-22 12:50:53 +0100588 Heap::CollectGarbage(CODE_SPACE);
Leon Clarkee46be812010-01-19 14:06:41 +0000589 gc_performed = true;
590 }
591 if (!(map_space->ReserveSpace(map_space_size))) {
Ben Murdochf87a2032010-10-22 12:50:53 +0100592 Heap::CollectGarbage(MAP_SPACE);
Leon Clarkee46be812010-01-19 14:06:41 +0000593 gc_performed = true;
594 }
595 if (!(cell_space->ReserveSpace(cell_space_size))) {
Ben Murdochf87a2032010-10-22 12:50:53 +0100596 Heap::CollectGarbage(CELL_SPACE);
Leon Clarkee46be812010-01-19 14:06:41 +0000597 gc_performed = true;
598 }
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100599 // We add a slack-factor of 2 in order to have space for a series of
600 // large-object allocations that are only just larger than the page size.
Leon Clarkee46be812010-01-19 14:06:41 +0000601 large_object_size *= 2;
602 // The ReserveSpace method on the large object space checks how much
603 // we can expand the old generation. This includes expansion caused by
604 // allocation in the other spaces.
605 large_object_size += cell_space_size + map_space_size + code_space_size +
606 data_space_size + pointer_space_size;
607 if (!(lo_space->ReserveSpace(large_object_size))) {
Ben Murdochf87a2032010-10-22 12:50:53 +0100608 Heap::CollectGarbage(LO_SPACE);
Leon Clarkee46be812010-01-19 14:06:41 +0000609 gc_performed = true;
610 }
611 }
612}
613
614
Steve Blocka7e24c12009-10-30 11:49:00 +0000615void Heap::EnsureFromSpaceIsCommitted() {
616 if (new_space_.CommitFromSpaceIfNeeded()) return;
617
618 // Committing memory to from space failed.
619 // Try shrinking and try again.
Kristian Monsen80d68ea2010-09-08 11:05:35 +0100620 PagedSpaces spaces;
621 for (PagedSpace* space = spaces.next();
622 space != NULL;
623 space = spaces.next()) {
624 space->RelinkPageListInChunkOrder(true);
625 }
626
Steve Blocka7e24c12009-10-30 11:49:00 +0000627 Shrink();
628 if (new_space_.CommitFromSpaceIfNeeded()) return;
629
630 // Committing memory to from space failed again.
631 // Memory is exhausted and we will die.
632 V8::FatalProcessOutOfMemory("Committing semi space failed.");
633}
634
635
Teng-Hui Zhu3e5fa292010-11-09 16:16:48 -0800636void Heap::ClearJSFunctionResultCaches() {
Steve Block44f0eee2011-05-26 01:26:41 +0100637 if (isolate_->bootstrapper()->IsActive()) return;
Steve Block6ded16b2010-05-10 14:33:55 +0100638
Teng-Hui Zhu3e5fa292010-11-09 16:16:48 -0800639 Object* context = global_contexts_list_;
640 while (!context->IsUndefined()) {
641 // Get the caches for this context:
Steve Block6ded16b2010-05-10 14:33:55 +0100642 FixedArray* caches =
Teng-Hui Zhu3e5fa292010-11-09 16:16:48 -0800643 Context::cast(context)->jsfunction_result_caches();
644 // Clear the caches:
Steve Block6ded16b2010-05-10 14:33:55 +0100645 int length = caches->length();
646 for (int i = 0; i < length; i++) {
647 JSFunctionResultCache::cast(caches->get(i))->Clear();
648 }
Teng-Hui Zhu3e5fa292010-11-09 16:16:48 -0800649 // Get the next context:
650 context = Context::cast(context)->get(Context::NEXT_CONTEXT_LINK);
Steve Block6ded16b2010-05-10 14:33:55 +0100651 }
Steve Block6ded16b2010-05-10 14:33:55 +0100652}
653
654
Steve Block44f0eee2011-05-26 01:26:41 +0100655
Kristian Monsen80d68ea2010-09-08 11:05:35 +0100656void Heap::ClearNormalizedMapCaches() {
Steve Block44f0eee2011-05-26 01:26:41 +0100657 if (isolate_->bootstrapper()->IsActive()) return;
Ben Murdochf87a2032010-10-22 12:50:53 +0100658
659 Object* context = global_contexts_list_;
660 while (!context->IsUndefined()) {
661 Context::cast(context)->normalized_map_cache()->Clear();
662 context = Context::cast(context)->get(Context::NEXT_CONTEXT_LINK);
663 }
Kristian Monsen80d68ea2010-09-08 11:05:35 +0100664}
665
666
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100667#ifdef DEBUG
668
669enum PageWatermarkValidity {
670 ALL_VALID,
671 ALL_INVALID
672};
673
674static void VerifyPageWatermarkValidity(PagedSpace* space,
675 PageWatermarkValidity validity) {
676 PageIterator it(space, PageIterator::PAGES_IN_USE);
677 bool expected_value = (validity == ALL_VALID);
678 while (it.has_next()) {
679 Page* page = it.next();
680 ASSERT(page->IsWatermarkValid() == expected_value);
681 }
682}
683#endif
684
Steve Block8defd9f2010-07-08 12:39:36 +0100685void Heap::UpdateSurvivalRateTrend(int start_new_space_size) {
686 double survival_rate =
687 (static_cast<double>(young_survivors_after_last_gc_) * 100) /
688 start_new_space_size;
689
690 if (survival_rate > kYoungSurvivalRateThreshold) {
691 high_survival_rate_period_length_++;
692 } else {
693 high_survival_rate_period_length_ = 0;
694 }
695
696 double survival_rate_diff = survival_rate_ - survival_rate;
697
698 if (survival_rate_diff > kYoungSurvivalRateAllowedDeviation) {
699 set_survival_rate_trend(DECREASING);
700 } else if (survival_rate_diff < -kYoungSurvivalRateAllowedDeviation) {
701 set_survival_rate_trend(INCREASING);
702 } else {
703 set_survival_rate_trend(STABLE);
704 }
705
706 survival_rate_ = survival_rate;
707}
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100708
Teng-Hui Zhu3e5fa292010-11-09 16:16:48 -0800709bool Heap::PerformGarbageCollection(GarbageCollector collector,
John Reck59135872010-11-02 12:39:01 -0700710 GCTracer* tracer) {
Teng-Hui Zhu3e5fa292010-11-09 16:16:48 -0800711 bool next_gc_likely_to_collect_more = false;
712
Ben Murdochf87a2032010-10-22 12:50:53 +0100713 if (collector != SCAVENGER) {
Steve Block44f0eee2011-05-26 01:26:41 +0100714 PROFILE(isolate_, CodeMovingGCEvent());
Ben Murdochf87a2032010-10-22 12:50:53 +0100715 }
716
Steve Blocka7e24c12009-10-30 11:49:00 +0000717 VerifySymbolTable();
718 if (collector == MARK_COMPACTOR && global_gc_prologue_callback_) {
719 ASSERT(!allocation_allowed_);
Leon Clarkef7060e22010-06-03 12:02:55 +0100720 GCTracer::Scope scope(tracer, GCTracer::Scope::EXTERNAL);
Steve Blocka7e24c12009-10-30 11:49:00 +0000721 global_gc_prologue_callback_();
722 }
Steve Block6ded16b2010-05-10 14:33:55 +0100723
724 GCType gc_type =
725 collector == MARK_COMPACTOR ? kGCTypeMarkSweepCompact : kGCTypeScavenge;
726
727 for (int i = 0; i < gc_prologue_callbacks_.length(); ++i) {
728 if (gc_type & gc_prologue_callbacks_[i].gc_type) {
729 gc_prologue_callbacks_[i].callback(gc_type, kNoGCCallbackFlags);
730 }
731 }
732
Steve Blocka7e24c12009-10-30 11:49:00 +0000733 EnsureFromSpaceIsCommitted();
Steve Block6ded16b2010-05-10 14:33:55 +0100734
Ben Murdochf87a2032010-10-22 12:50:53 +0100735 int start_new_space_size = Heap::new_space()->SizeAsInt();
Steve Block8defd9f2010-07-08 12:39:36 +0100736
Steve Blocka7e24c12009-10-30 11:49:00 +0000737 if (collector == MARK_COMPACTOR) {
Steve Block6ded16b2010-05-10 14:33:55 +0100738 // Perform mark-sweep with optional compaction.
Steve Blocka7e24c12009-10-30 11:49:00 +0000739 MarkCompact(tracer);
Ben Murdoch257744e2011-11-30 15:57:28 +0000740 sweep_generation_++;
Steve Block8defd9f2010-07-08 12:39:36 +0100741 bool high_survival_rate_during_scavenges = IsHighSurvivalRate() &&
742 IsStableOrIncreasingSurvivalTrend();
743
744 UpdateSurvivalRateTrend(start_new_space_size);
745
John Reck59135872010-11-02 12:39:01 -0700746 intptr_t old_gen_size = PromotedSpaceSize();
747 old_gen_promotion_limit_ =
748 old_gen_size + Max(kMinimumPromotionLimit, old_gen_size / 3);
749 old_gen_allocation_limit_ =
750 old_gen_size + Max(kMinimumAllocationLimit, old_gen_size / 2);
Steve Block8defd9f2010-07-08 12:39:36 +0100751
John Reck59135872010-11-02 12:39:01 -0700752 if (high_survival_rate_during_scavenges &&
753 IsStableOrIncreasingSurvivalTrend()) {
754 // Stable high survival rates of young objects both during partial and
755 // full collection indicate that mutator is either building or modifying
756 // a structure with a long lifetime.
757 // In this case we aggressively raise old generation memory limits to
758 // postpone subsequent mark-sweep collection and thus trade memory
759 // space for the mutation speed.
760 old_gen_promotion_limit_ *= 2;
761 old_gen_allocation_limit_ *= 2;
Steve Block8defd9f2010-07-08 12:39:36 +0100762 }
763
John Reck59135872010-11-02 12:39:01 -0700764 old_gen_exhausted_ = false;
Steve Block6ded16b2010-05-10 14:33:55 +0100765 } else {
Leon Clarkef7060e22010-06-03 12:02:55 +0100766 tracer_ = tracer;
Steve Block6ded16b2010-05-10 14:33:55 +0100767 Scavenge();
Leon Clarkef7060e22010-06-03 12:02:55 +0100768 tracer_ = NULL;
Steve Block8defd9f2010-07-08 12:39:36 +0100769
770 UpdateSurvivalRateTrend(start_new_space_size);
Steve Blocka7e24c12009-10-30 11:49:00 +0000771 }
Steve Blocka7e24c12009-10-30 11:49:00 +0000772
Steve Block44f0eee2011-05-26 01:26:41 +0100773 isolate_->counters()->objs_since_last_young()->Set(0);
Steve Blocka7e24c12009-10-30 11:49:00 +0000774
Ben Murdoch257744e2011-11-30 15:57:28 +0000775 { DisableAssertNoAllocation allow_allocation;
John Reck59135872010-11-02 12:39:01 -0700776 GCTracer::Scope scope(tracer, GCTracer::Scope::EXTERNAL);
Teng-Hui Zhu3e5fa292010-11-09 16:16:48 -0800777 next_gc_likely_to_collect_more =
Ben Murdoch257744e2011-11-30 15:57:28 +0000778 isolate_->global_handles()->PostGarbageCollectionProcessing(collector);
John Reck59135872010-11-02 12:39:01 -0700779 }
780
Steve Block3ce2e202009-11-05 08:53:23 +0000781 // Update relocatables.
782 Relocatable::PostGarbageCollectionProcessing();
Steve Blocka7e24c12009-10-30 11:49:00 +0000783
784 if (collector == MARK_COMPACTOR) {
785 // Register the amount of external allocated memory.
786 amount_of_external_allocated_memory_at_last_global_gc_ =
787 amount_of_external_allocated_memory_;
788 }
789
Steve Block6ded16b2010-05-10 14:33:55 +0100790 GCCallbackFlags callback_flags = tracer->is_compacting()
791 ? kGCCallbackFlagCompacted
792 : kNoGCCallbackFlags;
793 for (int i = 0; i < gc_epilogue_callbacks_.length(); ++i) {
794 if (gc_type & gc_epilogue_callbacks_[i].gc_type) {
795 gc_epilogue_callbacks_[i].callback(gc_type, callback_flags);
796 }
797 }
798
Steve Blocka7e24c12009-10-30 11:49:00 +0000799 if (collector == MARK_COMPACTOR && global_gc_epilogue_callback_) {
800 ASSERT(!allocation_allowed_);
Leon Clarkef7060e22010-06-03 12:02:55 +0100801 GCTracer::Scope scope(tracer, GCTracer::Scope::EXTERNAL);
Steve Blocka7e24c12009-10-30 11:49:00 +0000802 global_gc_epilogue_callback_();
803 }
804 VerifySymbolTable();
Teng-Hui Zhu3e5fa292010-11-09 16:16:48 -0800805
806 return next_gc_likely_to_collect_more;
Steve Blocka7e24c12009-10-30 11:49:00 +0000807}
808
809
Steve Blocka7e24c12009-10-30 11:49:00 +0000810void Heap::MarkCompact(GCTracer* tracer) {
811 gc_state_ = MARK_COMPACT;
Steve Block44f0eee2011-05-26 01:26:41 +0100812 LOG(isolate_, ResourceEvent("markcompact", "begin"));
Steve Blocka7e24c12009-10-30 11:49:00 +0000813
Steve Block44f0eee2011-05-26 01:26:41 +0100814 mark_compact_collector_.Prepare(tracer);
Steve Blocka7e24c12009-10-30 11:49:00 +0000815
Steve Block44f0eee2011-05-26 01:26:41 +0100816 bool is_compacting = mark_compact_collector_.IsCompacting();
Steve Blocka7e24c12009-10-30 11:49:00 +0000817
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100818 if (is_compacting) {
819 mc_count_++;
820 } else {
821 ms_count_++;
822 }
823 tracer->set_full_gc_count(mc_count_ + ms_count_);
824
Steve Blocka7e24c12009-10-30 11:49:00 +0000825 MarkCompactPrologue(is_compacting);
826
Steve Block44f0eee2011-05-26 01:26:41 +0100827 is_safe_to_read_maps_ = false;
828 mark_compact_collector_.CollectGarbage();
829 is_safe_to_read_maps_ = true;
Steve Blocka7e24c12009-10-30 11:49:00 +0000830
Steve Block44f0eee2011-05-26 01:26:41 +0100831 LOG(isolate_, ResourceEvent("markcompact", "end"));
Steve Blocka7e24c12009-10-30 11:49:00 +0000832
833 gc_state_ = NOT_IN_GC;
834
835 Shrink();
836
Steve Block44f0eee2011-05-26 01:26:41 +0100837 isolate_->counters()->objs_since_last_full()->Set(0);
Steve Block6ded16b2010-05-10 14:33:55 +0100838
839 contexts_disposed_ = 0;
Steve Blocka7e24c12009-10-30 11:49:00 +0000840}
841
842
843void Heap::MarkCompactPrologue(bool is_compacting) {
844 // At any old GC clear the keyed lookup cache to enable collection of unused
845 // maps.
Steve Block44f0eee2011-05-26 01:26:41 +0100846 isolate_->keyed_lookup_cache()->Clear();
847 isolate_->context_slot_cache()->Clear();
848 isolate_->descriptor_lookup_cache()->Clear();
Steve Blocka7e24c12009-10-30 11:49:00 +0000849
Steve Block44f0eee2011-05-26 01:26:41 +0100850 isolate_->compilation_cache()->MarkCompactPrologue();
Steve Blocka7e24c12009-10-30 11:49:00 +0000851
Kristian Monsen25f61362010-05-21 11:50:48 +0100852 CompletelyClearInstanceofCache();
853
Leon Clarkee46be812010-01-19 14:06:41 +0000854 if (is_compacting) FlushNumberStringCache();
Steve Blocka7e24c12009-10-30 11:49:00 +0000855
Kristian Monsen80d68ea2010-09-08 11:05:35 +0100856 ClearNormalizedMapCaches();
Steve Blocka7e24c12009-10-30 11:49:00 +0000857}
858
859
860Object* Heap::FindCodeObject(Address a) {
John Reck59135872010-11-02 12:39:01 -0700861 Object* obj = NULL; // Initialization to please compiler.
862 { MaybeObject* maybe_obj = code_space_->FindObject(a);
863 if (!maybe_obj->ToObject(&obj)) {
864 obj = lo_space_->FindObject(a)->ToObjectUnchecked();
865 }
Steve Blocka7e24c12009-10-30 11:49:00 +0000866 }
Steve Blocka7e24c12009-10-30 11:49:00 +0000867 return obj;
868}
869
870
871// Helper class for copying HeapObjects
872class ScavengeVisitor: public ObjectVisitor {
873 public:
Steve Block44f0eee2011-05-26 01:26:41 +0100874 explicit ScavengeVisitor(Heap* heap) : heap_(heap) {}
Steve Blocka7e24c12009-10-30 11:49:00 +0000875
876 void VisitPointer(Object** p) { ScavengePointer(p); }
877
878 void VisitPointers(Object** start, Object** end) {
879 // Copy all HeapObject pointers in [start, end)
880 for (Object** p = start; p < end; p++) ScavengePointer(p);
881 }
882
883 private:
884 void ScavengePointer(Object** p) {
885 Object* object = *p;
Steve Block44f0eee2011-05-26 01:26:41 +0100886 if (!heap_->InNewSpace(object)) return;
Steve Blocka7e24c12009-10-30 11:49:00 +0000887 Heap::ScavengeObject(reinterpret_cast<HeapObject**>(p),
888 reinterpret_cast<HeapObject*>(object));
889 }
Steve Block44f0eee2011-05-26 01:26:41 +0100890
891 Heap* heap_;
Steve Blocka7e24c12009-10-30 11:49:00 +0000892};
893
894
Steve Blocka7e24c12009-10-30 11:49:00 +0000895#ifdef DEBUG
896// Visitor class to verify pointers in code or data space do not point into
897// new space.
898class VerifyNonPointerSpacePointersVisitor: public ObjectVisitor {
899 public:
900 void VisitPointers(Object** start, Object**end) {
901 for (Object** current = start; current < end; current++) {
902 if ((*current)->IsHeapObject()) {
Steve Block44f0eee2011-05-26 01:26:41 +0100903 ASSERT(!HEAP->InNewSpace(HeapObject::cast(*current)));
Steve Blocka7e24c12009-10-30 11:49:00 +0000904 }
905 }
906 }
907};
908
909
910static void VerifyNonPointerSpacePointers() {
911 // Verify that there are no pointers to new space in spaces where we
912 // do not expect them.
913 VerifyNonPointerSpacePointersVisitor v;
Steve Block44f0eee2011-05-26 01:26:41 +0100914 HeapObjectIterator code_it(HEAP->code_space());
Leon Clarked91b9f72010-01-27 17:25:45 +0000915 for (HeapObject* object = code_it.next();
916 object != NULL; object = code_it.next())
Steve Blocka7e24c12009-10-30 11:49:00 +0000917 object->Iterate(&v);
Steve Blocka7e24c12009-10-30 11:49:00 +0000918
Steve Block44f0eee2011-05-26 01:26:41 +0100919 HeapObjectIterator data_it(HEAP->old_data_space());
Leon Clarked91b9f72010-01-27 17:25:45 +0000920 for (HeapObject* object = data_it.next();
921 object != NULL; object = data_it.next())
922 object->Iterate(&v);
Steve Blocka7e24c12009-10-30 11:49:00 +0000923}
924#endif
925
926
Steve Block6ded16b2010-05-10 14:33:55 +0100927void Heap::CheckNewSpaceExpansionCriteria() {
928 if (new_space_.Capacity() < new_space_.MaximumCapacity() &&
929 survived_since_last_expansion_ > new_space_.Capacity()) {
930 // Grow the size of new space if there is room to grow and enough
931 // data has survived scavenge since the last expansion.
932 new_space_.Grow();
933 survived_since_last_expansion_ = 0;
934 }
935}
936
937
Ben Murdoch257744e2011-11-30 15:57:28 +0000938static bool IsUnscavengedHeapObject(Heap* heap, Object** p) {
939 return heap->InNewSpace(*p) &&
940 !HeapObject::cast(*p)->map_word().IsForwardingAddress();
941}
942
943
Steve Blocka7e24c12009-10-30 11:49:00 +0000944void Heap::Scavenge() {
945#ifdef DEBUG
946 if (FLAG_enable_slow_asserts) VerifyNonPointerSpacePointers();
947#endif
948
949 gc_state_ = SCAVENGE;
950
Ben Murdoch8b112d22011-06-08 16:22:53 +0100951 SwitchScavengingVisitorsTableIfProfilingWasEnabled();
952
Steve Block44f0eee2011-05-26 01:26:41 +0100953 Page::FlipMeaningOfInvalidatedWatermarkFlag(this);
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100954#ifdef DEBUG
955 VerifyPageWatermarkValidity(old_pointer_space_, ALL_VALID);
956 VerifyPageWatermarkValidity(map_space_, ALL_VALID);
957#endif
958
959 // We do not update an allocation watermark of the top page during linear
960 // allocation to avoid overhead. So to maintain the watermark invariant
961 // we have to manually cache the watermark and mark the top page as having an
962 // invalid watermark. This guarantees that dirty regions iteration will use a
963 // correct watermark even if a linear allocation happens.
964 old_pointer_space_->FlushTopPageWatermark();
965 map_space_->FlushTopPageWatermark();
966
Steve Blocka7e24c12009-10-30 11:49:00 +0000967 // Implements Cheney's copying algorithm
Steve Block44f0eee2011-05-26 01:26:41 +0100968 LOG(isolate_, ResourceEvent("scavenge", "begin"));
Steve Blocka7e24c12009-10-30 11:49:00 +0000969
970 // Clear descriptor cache.
Steve Block44f0eee2011-05-26 01:26:41 +0100971 isolate_->descriptor_lookup_cache()->Clear();
Steve Blocka7e24c12009-10-30 11:49:00 +0000972
973 // Used for updating survived_since_last_expansion_ at function end.
Ben Murdochf87a2032010-10-22 12:50:53 +0100974 intptr_t survived_watermark = PromotedSpaceSize();
Steve Blocka7e24c12009-10-30 11:49:00 +0000975
Steve Block6ded16b2010-05-10 14:33:55 +0100976 CheckNewSpaceExpansionCriteria();
Steve Blocka7e24c12009-10-30 11:49:00 +0000977
978 // Flip the semispaces. After flipping, to space is empty, from space has
979 // live objects.
980 new_space_.Flip();
981 new_space_.ResetAllocationInfo();
982
983 // We need to sweep newly copied objects which can be either in the
984 // to space or promoted to the old generation. For to-space
985 // objects, we treat the bottom of the to space as a queue. Newly
986 // copied and unswept objects lie between a 'front' mark and the
987 // allocation pointer.
988 //
989 // Promoted objects can go into various old-generation spaces, and
990 // can be allocated internally in the spaces (from the free list).
991 // We treat the top of the to space as a queue of addresses of
992 // promoted objects. The addresses of newly promoted and unswept
993 // objects lie between a 'front' mark and a 'rear' mark that is
994 // updated as a side effect of promoting an object.
995 //
996 // There is guaranteed to be enough room at the top of the to space
997 // for the addresses of promoted objects: every object promoted
998 // frees up its size in bytes from the top of the new space, and
999 // objects are at least one pointer in size.
1000 Address new_space_front = new_space_.ToSpaceLow();
Steve Block44f0eee2011-05-26 01:26:41 +01001001 promotion_queue_.Initialize(new_space_.ToSpaceHigh());
Steve Blocka7e24c12009-10-30 11:49:00 +00001002
Steve Block44f0eee2011-05-26 01:26:41 +01001003 is_safe_to_read_maps_ = false;
1004 ScavengeVisitor scavenge_visitor(this);
Steve Blocka7e24c12009-10-30 11:49:00 +00001005 // Copy roots.
Leon Clarkee46be812010-01-19 14:06:41 +00001006 IterateRoots(&scavenge_visitor, VISIT_ALL_IN_SCAVENGE);
Steve Blocka7e24c12009-10-30 11:49:00 +00001007
1008 // Copy objects reachable from the old generation. By definition,
1009 // there are no intergenerational pointers in code or data spaces.
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01001010 IterateDirtyRegions(old_pointer_space_,
Steve Block44f0eee2011-05-26 01:26:41 +01001011 &Heap::IteratePointersInDirtyRegion,
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01001012 &ScavengePointer,
1013 WATERMARK_CAN_BE_INVALID);
1014
1015 IterateDirtyRegions(map_space_,
1016 &IteratePointersInDirtyMapsRegion,
1017 &ScavengePointer,
1018 WATERMARK_CAN_BE_INVALID);
1019
1020 lo_space_->IterateDirtyRegions(&ScavengePointer);
Steve Blocka7e24c12009-10-30 11:49:00 +00001021
1022 // Copy objects reachable from cells by scavenging cell values directly.
1023 HeapObjectIterator cell_iterator(cell_space_);
Leon Clarked91b9f72010-01-27 17:25:45 +00001024 for (HeapObject* cell = cell_iterator.next();
1025 cell != NULL; cell = cell_iterator.next()) {
Steve Blocka7e24c12009-10-30 11:49:00 +00001026 if (cell->IsJSGlobalPropertyCell()) {
1027 Address value_address =
1028 reinterpret_cast<Address>(cell) +
1029 (JSGlobalPropertyCell::kValueOffset - kHeapObjectTag);
1030 scavenge_visitor.VisitPointer(reinterpret_cast<Object**>(value_address));
1031 }
1032 }
1033
Ben Murdochf87a2032010-10-22 12:50:53 +01001034 // Scavenge object reachable from the global contexts list directly.
1035 scavenge_visitor.VisitPointer(BitCast<Object**>(&global_contexts_list_));
1036
Leon Clarkee46be812010-01-19 14:06:41 +00001037 new_space_front = DoScavenge(&scavenge_visitor, new_space_front);
Ben Murdoch257744e2011-11-30 15:57:28 +00001038 isolate_->global_handles()->IdentifyWeakIndependentHandles(
1039 &IsUnscavengedHeapObject);
1040 isolate_->global_handles()->IterateWeakIndependentRoots(&scavenge_visitor);
1041 new_space_front = DoScavenge(&scavenge_visitor, new_space_front);
1042
Leon Clarkee46be812010-01-19 14:06:41 +00001043
Steve Block6ded16b2010-05-10 14:33:55 +01001044 UpdateNewSpaceReferencesInExternalStringTable(
1045 &UpdateNewSpaceReferenceInExternalStringTableEntry);
1046
Steve Block1e0659c2011-05-24 12:43:12 +01001047 LiveObjectList::UpdateReferencesForScavengeGC();
Steve Block44f0eee2011-05-26 01:26:41 +01001048 isolate()->runtime_profiler()->UpdateSamplesAfterScavenge();
Steve Block1e0659c2011-05-24 12:43:12 +01001049
Leon Clarkee46be812010-01-19 14:06:41 +00001050 ASSERT(new_space_front == new_space_.top());
1051
Steve Block44f0eee2011-05-26 01:26:41 +01001052 is_safe_to_read_maps_ = true;
1053
Leon Clarkee46be812010-01-19 14:06:41 +00001054 // Set age mark.
1055 new_space_.set_age_mark(new_space_.top());
1056
1057 // Update how much has survived scavenge.
Ben Murdochf87a2032010-10-22 12:50:53 +01001058 IncrementYoungSurvivorsCounter(static_cast<int>(
1059 (PromotedSpaceSize() - survived_watermark) + new_space_.Size()));
Leon Clarkee46be812010-01-19 14:06:41 +00001060
Steve Block44f0eee2011-05-26 01:26:41 +01001061 LOG(isolate_, ResourceEvent("scavenge", "end"));
Leon Clarkee46be812010-01-19 14:06:41 +00001062
1063 gc_state_ = NOT_IN_GC;
1064}
1065
1066
Steve Block44f0eee2011-05-26 01:26:41 +01001067String* Heap::UpdateNewSpaceReferenceInExternalStringTableEntry(Heap* heap,
1068 Object** p) {
Steve Block6ded16b2010-05-10 14:33:55 +01001069 MapWord first_word = HeapObject::cast(*p)->map_word();
1070
1071 if (!first_word.IsForwardingAddress()) {
1072 // Unreachable external string can be finalized.
Steve Block44f0eee2011-05-26 01:26:41 +01001073 heap->FinalizeExternalString(String::cast(*p));
Steve Block6ded16b2010-05-10 14:33:55 +01001074 return NULL;
1075 }
1076
1077 // String is still reachable.
1078 return String::cast(first_word.ToForwardingAddress());
1079}
1080
1081
1082void Heap::UpdateNewSpaceReferencesInExternalStringTable(
1083 ExternalStringTableUpdaterCallback updater_func) {
Steve Block44f0eee2011-05-26 01:26:41 +01001084 external_string_table_.Verify();
Leon Clarkee46be812010-01-19 14:06:41 +00001085
Steve Block44f0eee2011-05-26 01:26:41 +01001086 if (external_string_table_.new_space_strings_.is_empty()) return;
Leon Clarkee46be812010-01-19 14:06:41 +00001087
Steve Block44f0eee2011-05-26 01:26:41 +01001088 Object** start = &external_string_table_.new_space_strings_[0];
1089 Object** end = start + external_string_table_.new_space_strings_.length();
Leon Clarkee46be812010-01-19 14:06:41 +00001090 Object** last = start;
1091
1092 for (Object** p = start; p < end; ++p) {
Steve Block44f0eee2011-05-26 01:26:41 +01001093 ASSERT(InFromSpace(*p));
1094 String* target = updater_func(this, p);
Leon Clarkee46be812010-01-19 14:06:41 +00001095
Steve Block6ded16b2010-05-10 14:33:55 +01001096 if (target == NULL) continue;
Leon Clarkee46be812010-01-19 14:06:41 +00001097
Leon Clarkee46be812010-01-19 14:06:41 +00001098 ASSERT(target->IsExternalString());
1099
Steve Block44f0eee2011-05-26 01:26:41 +01001100 if (InNewSpace(target)) {
Leon Clarkee46be812010-01-19 14:06:41 +00001101 // String is still in new space. Update the table entry.
1102 *last = target;
1103 ++last;
1104 } else {
1105 // String got promoted. Move it to the old string list.
Steve Block44f0eee2011-05-26 01:26:41 +01001106 external_string_table_.AddOldString(target);
Leon Clarkee46be812010-01-19 14:06:41 +00001107 }
1108 }
1109
1110 ASSERT(last <= end);
Steve Block44f0eee2011-05-26 01:26:41 +01001111 external_string_table_.ShrinkNewStrings(static_cast<int>(last - start));
Leon Clarkee46be812010-01-19 14:06:41 +00001112}
1113
1114
Steve Block44f0eee2011-05-26 01:26:41 +01001115static Object* ProcessFunctionWeakReferences(Heap* heap,
1116 Object* function,
Ben Murdochb0fe1622011-05-05 13:52:32 +01001117 WeakObjectRetainer* retainer) {
Steve Block44f0eee2011-05-26 01:26:41 +01001118 Object* head = heap->undefined_value();
Ben Murdochb0fe1622011-05-05 13:52:32 +01001119 JSFunction* tail = NULL;
1120 Object* candidate = function;
Steve Block44f0eee2011-05-26 01:26:41 +01001121 while (candidate != heap->undefined_value()) {
Ben Murdochb0fe1622011-05-05 13:52:32 +01001122 // Check whether to keep the candidate in the list.
1123 JSFunction* candidate_function = reinterpret_cast<JSFunction*>(candidate);
1124 Object* retain = retainer->RetainAs(candidate);
1125 if (retain != NULL) {
Steve Block44f0eee2011-05-26 01:26:41 +01001126 if (head == heap->undefined_value()) {
Ben Murdochb0fe1622011-05-05 13:52:32 +01001127 // First element in the list.
1128 head = candidate_function;
1129 } else {
1130 // Subsequent elements in the list.
1131 ASSERT(tail != NULL);
1132 tail->set_next_function_link(candidate_function);
1133 }
1134 // Retained function is new tail.
1135 tail = candidate_function;
1136 }
1137 // Move to next element in the list.
1138 candidate = candidate_function->next_function_link();
1139 }
1140
1141 // Terminate the list if there is one or more elements.
1142 if (tail != NULL) {
Steve Block44f0eee2011-05-26 01:26:41 +01001143 tail->set_next_function_link(heap->undefined_value());
Ben Murdochb0fe1622011-05-05 13:52:32 +01001144 }
1145
1146 return head;
1147}
1148
1149
Ben Murdochf87a2032010-10-22 12:50:53 +01001150void Heap::ProcessWeakReferences(WeakObjectRetainer* retainer) {
1151 Object* head = undefined_value();
1152 Context* tail = NULL;
1153 Object* candidate = global_contexts_list_;
Steve Block44f0eee2011-05-26 01:26:41 +01001154 while (candidate != undefined_value()) {
Ben Murdochf87a2032010-10-22 12:50:53 +01001155 // Check whether to keep the candidate in the list.
1156 Context* candidate_context = reinterpret_cast<Context*>(candidate);
1157 Object* retain = retainer->RetainAs(candidate);
1158 if (retain != NULL) {
Steve Block44f0eee2011-05-26 01:26:41 +01001159 if (head == undefined_value()) {
Ben Murdochf87a2032010-10-22 12:50:53 +01001160 // First element in the list.
1161 head = candidate_context;
1162 } else {
1163 // Subsequent elements in the list.
1164 ASSERT(tail != NULL);
Steve Block44f0eee2011-05-26 01:26:41 +01001165 tail->set_unchecked(this,
1166 Context::NEXT_CONTEXT_LINK,
Ben Murdochf87a2032010-10-22 12:50:53 +01001167 candidate_context,
1168 UPDATE_WRITE_BARRIER);
1169 }
1170 // Retained context is new tail.
1171 tail = candidate_context;
Ben Murdochb0fe1622011-05-05 13:52:32 +01001172
1173 // Process the weak list of optimized functions for the context.
1174 Object* function_list_head =
1175 ProcessFunctionWeakReferences(
Steve Block44f0eee2011-05-26 01:26:41 +01001176 this,
Ben Murdochb0fe1622011-05-05 13:52:32 +01001177 candidate_context->get(Context::OPTIMIZED_FUNCTIONS_LIST),
1178 retainer);
Steve Block44f0eee2011-05-26 01:26:41 +01001179 candidate_context->set_unchecked(this,
1180 Context::OPTIMIZED_FUNCTIONS_LIST,
Ben Murdochb0fe1622011-05-05 13:52:32 +01001181 function_list_head,
1182 UPDATE_WRITE_BARRIER);
Ben Murdochf87a2032010-10-22 12:50:53 +01001183 }
1184 // Move to next element in the list.
1185 candidate = candidate_context->get(Context::NEXT_CONTEXT_LINK);
1186 }
1187
1188 // Terminate the list if there is one or more elements.
1189 if (tail != NULL) {
Steve Block44f0eee2011-05-26 01:26:41 +01001190 tail->set_unchecked(this,
1191 Context::NEXT_CONTEXT_LINK,
Ben Murdochf87a2032010-10-22 12:50:53 +01001192 Heap::undefined_value(),
1193 UPDATE_WRITE_BARRIER);
1194 }
1195
1196 // Update the head of the list of contexts.
Steve Block44f0eee2011-05-26 01:26:41 +01001197 global_contexts_list_ = head;
Ben Murdochf87a2032010-10-22 12:50:53 +01001198}
1199
1200
Iain Merrick75681382010-08-19 15:07:18 +01001201class NewSpaceScavenger : public StaticNewSpaceVisitor<NewSpaceScavenger> {
1202 public:
Steve Block44f0eee2011-05-26 01:26:41 +01001203 static inline void VisitPointer(Heap* heap, Object** p) {
Iain Merrick75681382010-08-19 15:07:18 +01001204 Object* object = *p;
Steve Block44f0eee2011-05-26 01:26:41 +01001205 if (!heap->InNewSpace(object)) return;
Iain Merrick75681382010-08-19 15:07:18 +01001206 Heap::ScavengeObject(reinterpret_cast<HeapObject**>(p),
1207 reinterpret_cast<HeapObject*>(object));
1208 }
1209};
1210
1211
Leon Clarkee46be812010-01-19 14:06:41 +00001212Address Heap::DoScavenge(ObjectVisitor* scavenge_visitor,
1213 Address new_space_front) {
Steve Blocka7e24c12009-10-30 11:49:00 +00001214 do {
1215 ASSERT(new_space_front <= new_space_.top());
1216
1217 // The addresses new_space_front and new_space_.top() define a
1218 // queue of unprocessed copied objects. Process them until the
1219 // queue is empty.
1220 while (new_space_front < new_space_.top()) {
1221 HeapObject* object = HeapObject::FromAddress(new_space_front);
Iain Merrick75681382010-08-19 15:07:18 +01001222 new_space_front += NewSpaceScavenger::IterateBody(object->map(), object);
Steve Blocka7e24c12009-10-30 11:49:00 +00001223 }
1224
1225 // Promote and process all the to-be-promoted objects.
Steve Block44f0eee2011-05-26 01:26:41 +01001226 while (!promotion_queue_.is_empty()) {
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001227 HeapObject* target;
1228 int size;
Steve Block44f0eee2011-05-26 01:26:41 +01001229 promotion_queue_.remove(&target, &size);
Steve Blocka7e24c12009-10-30 11:49:00 +00001230
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001231 // Promoted object might be already partially visited
1232 // during dirty regions iteration. Thus we search specificly
1233 // for pointers to from semispace instead of looking for pointers
1234 // to new space.
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01001235 ASSERT(!target->IsMap());
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001236 IterateAndMarkPointersToFromSpace(target->address(),
1237 target->address() + size,
1238 &ScavengePointer);
Steve Blocka7e24c12009-10-30 11:49:00 +00001239 }
1240
1241 // Take another spin if there are now unswept objects in new space
1242 // (there are currently no more unswept promoted objects).
1243 } while (new_space_front < new_space_.top());
1244
Leon Clarkee46be812010-01-19 14:06:41 +00001245 return new_space_front;
Steve Blocka7e24c12009-10-30 11:49:00 +00001246}
1247
1248
Ben Murdoch8b112d22011-06-08 16:22:53 +01001249enum LoggingAndProfiling {
1250 LOGGING_AND_PROFILING_ENABLED,
1251 LOGGING_AND_PROFILING_DISABLED
1252};
1253
1254
1255typedef void (*ScavengingCallback)(Map* map,
1256 HeapObject** slot,
1257 HeapObject* object);
1258
1259
1260static Atomic32 scavenging_visitors_table_mode_;
1261static VisitorDispatchTable<ScavengingCallback> scavenging_visitors_table_;
1262
1263
1264INLINE(static void DoScavengeObject(Map* map,
1265 HeapObject** slot,
1266 HeapObject* obj));
1267
1268
1269void DoScavengeObject(Map* map, HeapObject** slot, HeapObject* obj) {
1270 scavenging_visitors_table_.GetVisitor(map)(map, slot, obj);
1271}
1272
1273
1274template<LoggingAndProfiling logging_and_profiling_mode>
Iain Merrick75681382010-08-19 15:07:18 +01001275class ScavengingVisitor : public StaticVisitorBase {
1276 public:
1277 static void Initialize() {
1278 table_.Register(kVisitSeqAsciiString, &EvacuateSeqAsciiString);
1279 table_.Register(kVisitSeqTwoByteString, &EvacuateSeqTwoByteString);
1280 table_.Register(kVisitShortcutCandidate, &EvacuateShortcutCandidate);
1281 table_.Register(kVisitByteArray, &EvacuateByteArray);
1282 table_.Register(kVisitFixedArray, &EvacuateFixedArray);
Ben Murdoch8b112d22011-06-08 16:22:53 +01001283
Ben Murdochf87a2032010-10-22 12:50:53 +01001284 table_.Register(kVisitGlobalContext,
1285 &ObjectEvacuationStrategy<POINTER_OBJECT>::
Ben Murdoch8b112d22011-06-08 16:22:53 +01001286 template VisitSpecialized<Context::kSize>);
Iain Merrick75681382010-08-19 15:07:18 +01001287
1288 table_.Register(kVisitConsString,
1289 &ObjectEvacuationStrategy<POINTER_OBJECT>::
Ben Murdoch8b112d22011-06-08 16:22:53 +01001290 template VisitSpecialized<ConsString::kSize>);
Iain Merrick75681382010-08-19 15:07:18 +01001291
1292 table_.Register(kVisitSharedFunctionInfo,
1293 &ObjectEvacuationStrategy<POINTER_OBJECT>::
Ben Murdoch8b112d22011-06-08 16:22:53 +01001294 template VisitSpecialized<SharedFunctionInfo::kSize>);
Iain Merrick75681382010-08-19 15:07:18 +01001295
Ben Murdoch257744e2011-11-30 15:57:28 +00001296 table_.Register(kVisitJSRegExp,
1297 &ObjectEvacuationStrategy<POINTER_OBJECT>::
1298 Visit);
1299
Iain Merrick75681382010-08-19 15:07:18 +01001300 table_.Register(kVisitJSFunction,
1301 &ObjectEvacuationStrategy<POINTER_OBJECT>::
Ben Murdoch8b112d22011-06-08 16:22:53 +01001302 template VisitSpecialized<JSFunction::kSize>);
Iain Merrick75681382010-08-19 15:07:18 +01001303
1304 table_.RegisterSpecializations<ObjectEvacuationStrategy<DATA_OBJECT>,
1305 kVisitDataObject,
1306 kVisitDataObjectGeneric>();
1307
1308 table_.RegisterSpecializations<ObjectEvacuationStrategy<POINTER_OBJECT>,
1309 kVisitJSObject,
1310 kVisitJSObjectGeneric>();
1311
1312 table_.RegisterSpecializations<ObjectEvacuationStrategy<POINTER_OBJECT>,
1313 kVisitStruct,
1314 kVisitStructGeneric>();
1315 }
1316
Ben Murdoch8b112d22011-06-08 16:22:53 +01001317 static VisitorDispatchTable<ScavengingCallback>* GetTable() {
1318 return &table_;
Iain Merrick75681382010-08-19 15:07:18 +01001319 }
1320
Iain Merrick75681382010-08-19 15:07:18 +01001321 private:
1322 enum ObjectContents { DATA_OBJECT, POINTER_OBJECT };
1323 enum SizeRestriction { SMALL, UNKNOWN_SIZE };
1324
Steve Blocka7e24c12009-10-30 11:49:00 +00001325#if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
Steve Block44f0eee2011-05-26 01:26:41 +01001326 static void RecordCopiedObject(Heap* heap, HeapObject* obj) {
Iain Merrick75681382010-08-19 15:07:18 +01001327 bool should_record = false;
Steve Blocka7e24c12009-10-30 11:49:00 +00001328#ifdef DEBUG
Iain Merrick75681382010-08-19 15:07:18 +01001329 should_record = FLAG_heap_stats;
Steve Blocka7e24c12009-10-30 11:49:00 +00001330#endif
1331#ifdef ENABLE_LOGGING_AND_PROFILING
Iain Merrick75681382010-08-19 15:07:18 +01001332 should_record = should_record || FLAG_log_gc;
Steve Blocka7e24c12009-10-30 11:49:00 +00001333#endif
Iain Merrick75681382010-08-19 15:07:18 +01001334 if (should_record) {
Steve Block44f0eee2011-05-26 01:26:41 +01001335 if (heap->new_space()->Contains(obj)) {
1336 heap->new_space()->RecordAllocation(obj);
Iain Merrick75681382010-08-19 15:07:18 +01001337 } else {
Steve Block44f0eee2011-05-26 01:26:41 +01001338 heap->new_space()->RecordPromotion(obj);
Iain Merrick75681382010-08-19 15:07:18 +01001339 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001340 }
1341 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001342#endif // defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
1343
Iain Merrick75681382010-08-19 15:07:18 +01001344 // Helper function used by CopyObject to copy a source object to an
1345 // allocated target object and update the forwarding pointer in the source
1346 // object. Returns the target object.
Steve Block44f0eee2011-05-26 01:26:41 +01001347 INLINE(static HeapObject* MigrateObject(Heap* heap,
1348 HeapObject* source,
Iain Merrick75681382010-08-19 15:07:18 +01001349 HeapObject* target,
1350 int size)) {
1351 // Copy the content of source to target.
Steve Block44f0eee2011-05-26 01:26:41 +01001352 heap->CopyBlock(target->address(), source->address(), size);
Steve Blocka7e24c12009-10-30 11:49:00 +00001353
Iain Merrick75681382010-08-19 15:07:18 +01001354 // Set the forwarding address.
1355 source->set_map_word(MapWord::FromForwardingAddress(target));
Steve Blocka7e24c12009-10-30 11:49:00 +00001356
Ben Murdoch8b112d22011-06-08 16:22:53 +01001357 if (logging_and_profiling_mode == LOGGING_AND_PROFILING_ENABLED) {
Steve Blocka7e24c12009-10-30 11:49:00 +00001358#if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
Ben Murdoch8b112d22011-06-08 16:22:53 +01001359 // Update NewSpace stats if necessary.
1360 RecordCopiedObject(heap, target);
Steve Blocka7e24c12009-10-30 11:49:00 +00001361#endif
Ben Murdoch8b112d22011-06-08 16:22:53 +01001362 HEAP_PROFILE(heap, ObjectMoveEvent(source->address(), target->address()));
Kristian Monsen0d5e1162010-09-30 15:31:59 +01001363#if defined(ENABLE_LOGGING_AND_PROFILING)
Ben Murdoch8b112d22011-06-08 16:22:53 +01001364 Isolate* isolate = heap->isolate();
1365 if (isolate->logger()->is_logging() ||
Ben Murdoch257744e2011-11-30 15:57:28 +00001366 CpuProfiler::is_profiling(isolate)) {
Ben Murdoch8b112d22011-06-08 16:22:53 +01001367 if (target->IsSharedFunctionInfo()) {
1368 PROFILE(isolate, SharedFunctionInfoMoveEvent(
1369 source->address(), target->address()));
1370 }
Kristian Monsen0d5e1162010-09-30 15:31:59 +01001371 }
Kristian Monsen0d5e1162010-09-30 15:31:59 +01001372#endif
Ben Murdoch8b112d22011-06-08 16:22:53 +01001373 }
1374
Iain Merrick75681382010-08-19 15:07:18 +01001375 return target;
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001376 }
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001377
1378
Iain Merrick75681382010-08-19 15:07:18 +01001379 template<ObjectContents object_contents, SizeRestriction size_restriction>
1380 static inline void EvacuateObject(Map* map,
1381 HeapObject** slot,
1382 HeapObject* object,
1383 int object_size) {
1384 ASSERT((size_restriction != SMALL) ||
1385 (object_size <= Page::kMaxHeapObjectSize));
1386 ASSERT(object->Size() == object_size);
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001387
Steve Block44f0eee2011-05-26 01:26:41 +01001388 Heap* heap = map->heap();
1389 if (heap->ShouldBePromoted(object->address(), object_size)) {
John Reck59135872010-11-02 12:39:01 -07001390 MaybeObject* maybe_result;
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001391
Iain Merrick75681382010-08-19 15:07:18 +01001392 if ((size_restriction != SMALL) &&
1393 (object_size > Page::kMaxHeapObjectSize)) {
Steve Block44f0eee2011-05-26 01:26:41 +01001394 maybe_result = heap->lo_space()->AllocateRawFixedArray(object_size);
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001395 } else {
Iain Merrick75681382010-08-19 15:07:18 +01001396 if (object_contents == DATA_OBJECT) {
Steve Block44f0eee2011-05-26 01:26:41 +01001397 maybe_result = heap->old_data_space()->AllocateRaw(object_size);
Iain Merrick75681382010-08-19 15:07:18 +01001398 } else {
Steve Block44f0eee2011-05-26 01:26:41 +01001399 maybe_result = heap->old_pointer_space()->AllocateRaw(object_size);
Iain Merrick75681382010-08-19 15:07:18 +01001400 }
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001401 }
1402
John Reck59135872010-11-02 12:39:01 -07001403 Object* result = NULL; // Initialization to please compiler.
1404 if (maybe_result->ToObject(&result)) {
Iain Merrick75681382010-08-19 15:07:18 +01001405 HeapObject* target = HeapObject::cast(result);
Steve Block44f0eee2011-05-26 01:26:41 +01001406 *slot = MigrateObject(heap, object , target, object_size);
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001407
Iain Merrick75681382010-08-19 15:07:18 +01001408 if (object_contents == POINTER_OBJECT) {
Steve Block44f0eee2011-05-26 01:26:41 +01001409 heap->promotion_queue()->insert(target, object_size);
Iain Merrick75681382010-08-19 15:07:18 +01001410 }
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001411
Steve Block44f0eee2011-05-26 01:26:41 +01001412 heap->tracer()->increment_promoted_objects_size(object_size);
Iain Merrick75681382010-08-19 15:07:18 +01001413 return;
1414 }
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001415 }
John Reck59135872010-11-02 12:39:01 -07001416 Object* result =
Steve Block44f0eee2011-05-26 01:26:41 +01001417 heap->new_space()->AllocateRaw(object_size)->ToObjectUnchecked();
1418 *slot = MigrateObject(heap, object, HeapObject::cast(result), object_size);
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001419 return;
1420 }
1421
Iain Merrick75681382010-08-19 15:07:18 +01001422
1423 static inline void EvacuateFixedArray(Map* map,
1424 HeapObject** slot,
1425 HeapObject* object) {
1426 int object_size = FixedArray::BodyDescriptor::SizeOf(map, object);
1427 EvacuateObject<POINTER_OBJECT, UNKNOWN_SIZE>(map,
1428 slot,
1429 object,
1430 object_size);
1431 }
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001432
1433
Iain Merrick75681382010-08-19 15:07:18 +01001434 static inline void EvacuateByteArray(Map* map,
1435 HeapObject** slot,
1436 HeapObject* object) {
1437 int object_size = reinterpret_cast<ByteArray*>(object)->ByteArraySize();
1438 EvacuateObject<DATA_OBJECT, UNKNOWN_SIZE>(map, slot, object, object_size);
1439 }
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001440
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001441
Iain Merrick75681382010-08-19 15:07:18 +01001442 static inline void EvacuateSeqAsciiString(Map* map,
1443 HeapObject** slot,
1444 HeapObject* object) {
1445 int object_size = SeqAsciiString::cast(object)->
1446 SeqAsciiStringSize(map->instance_type());
1447 EvacuateObject<DATA_OBJECT, UNKNOWN_SIZE>(map, slot, object, object_size);
1448 }
1449
1450
1451 static inline void EvacuateSeqTwoByteString(Map* map,
1452 HeapObject** slot,
1453 HeapObject* object) {
1454 int object_size = SeqTwoByteString::cast(object)->
1455 SeqTwoByteStringSize(map->instance_type());
1456 EvacuateObject<DATA_OBJECT, UNKNOWN_SIZE>(map, slot, object, object_size);
1457 }
1458
1459
1460 static inline bool IsShortcutCandidate(int type) {
1461 return ((type & kShortcutTypeMask) == kShortcutTypeTag);
1462 }
1463
1464 static inline void EvacuateShortcutCandidate(Map* map,
1465 HeapObject** slot,
1466 HeapObject* object) {
1467 ASSERT(IsShortcutCandidate(map->instance_type()));
1468
Steve Block44f0eee2011-05-26 01:26:41 +01001469 if (ConsString::cast(object)->unchecked_second() ==
1470 map->heap()->empty_string()) {
Iain Merrick75681382010-08-19 15:07:18 +01001471 HeapObject* first =
1472 HeapObject::cast(ConsString::cast(object)->unchecked_first());
1473
1474 *slot = first;
1475
Steve Block44f0eee2011-05-26 01:26:41 +01001476 if (!map->heap()->InNewSpace(first)) {
Iain Merrick75681382010-08-19 15:07:18 +01001477 object->set_map_word(MapWord::FromForwardingAddress(first));
1478 return;
1479 }
1480
1481 MapWord first_word = first->map_word();
1482 if (first_word.IsForwardingAddress()) {
1483 HeapObject* target = first_word.ToForwardingAddress();
1484
1485 *slot = target;
1486 object->set_map_word(MapWord::FromForwardingAddress(target));
1487 return;
1488 }
1489
Ben Murdoch8b112d22011-06-08 16:22:53 +01001490 DoScavengeObject(first->map(), slot, first);
Iain Merrick75681382010-08-19 15:07:18 +01001491 object->set_map_word(MapWord::FromForwardingAddress(*slot));
1492 return;
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001493 }
Iain Merrick75681382010-08-19 15:07:18 +01001494
1495 int object_size = ConsString::kSize;
1496 EvacuateObject<POINTER_OBJECT, SMALL>(map, slot, object, object_size);
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001497 }
1498
Iain Merrick75681382010-08-19 15:07:18 +01001499 template<ObjectContents object_contents>
1500 class ObjectEvacuationStrategy {
1501 public:
1502 template<int object_size>
1503 static inline void VisitSpecialized(Map* map,
1504 HeapObject** slot,
1505 HeapObject* object) {
1506 EvacuateObject<object_contents, SMALL>(map, slot, object, object_size);
1507 }
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001508
Iain Merrick75681382010-08-19 15:07:18 +01001509 static inline void Visit(Map* map,
1510 HeapObject** slot,
1511 HeapObject* object) {
1512 int object_size = map->instance_size();
1513 EvacuateObject<object_contents, SMALL>(map, slot, object, object_size);
1514 }
1515 };
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001516
Ben Murdoch8b112d22011-06-08 16:22:53 +01001517 static VisitorDispatchTable<ScavengingCallback> table_;
Iain Merrick75681382010-08-19 15:07:18 +01001518};
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001519
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001520
Ben Murdoch8b112d22011-06-08 16:22:53 +01001521template<LoggingAndProfiling logging_and_profiling_mode>
1522VisitorDispatchTable<ScavengingCallback>
1523 ScavengingVisitor<logging_and_profiling_mode>::table_;
1524
1525
1526static void InitializeScavengingVisitorsTables() {
1527 ScavengingVisitor<LOGGING_AND_PROFILING_DISABLED>::Initialize();
1528 ScavengingVisitor<LOGGING_AND_PROFILING_ENABLED>::Initialize();
1529 scavenging_visitors_table_.CopyFrom(
1530 ScavengingVisitor<LOGGING_AND_PROFILING_DISABLED>::GetTable());
1531 scavenging_visitors_table_mode_ = LOGGING_AND_PROFILING_DISABLED;
1532}
1533
1534
1535void Heap::SwitchScavengingVisitorsTableIfProfilingWasEnabled() {
1536 if (scavenging_visitors_table_mode_ == LOGGING_AND_PROFILING_ENABLED) {
1537 // Table was already updated by some isolate.
1538 return;
1539 }
1540
Ben Murdoch257744e2011-11-30 15:57:28 +00001541 if (isolate()->logger()->is_logging() |
1542 CpuProfiler::is_profiling(isolate()) ||
Ben Murdoch8b112d22011-06-08 16:22:53 +01001543 (isolate()->heap_profiler() != NULL &&
1544 isolate()->heap_profiler()->is_profiling())) {
1545 // If one of the isolates is doing scavenge at this moment of time
1546 // it might see this table in an inconsitent state when
1547 // some of the callbacks point to
1548 // ScavengingVisitor<LOGGING_AND_PROFILING_ENABLED> and others
1549 // to ScavengingVisitor<LOGGING_AND_PROFILING_DISABLED>.
1550 // However this does not lead to any bugs as such isolate does not have
1551 // profiling enabled and any isolate with enabled profiling is guaranteed
1552 // to see the table in the consistent state.
1553 scavenging_visitors_table_.CopyFrom(
1554 ScavengingVisitor<LOGGING_AND_PROFILING_ENABLED>::GetTable());
1555
1556 // We use Release_Store to prevent reordering of this write before writes
1557 // to the table.
1558 Release_Store(&scavenging_visitors_table_mode_,
1559 LOGGING_AND_PROFILING_ENABLED);
1560 }
1561}
Steve Blocka7e24c12009-10-30 11:49:00 +00001562
1563
1564void Heap::ScavengeObjectSlow(HeapObject** p, HeapObject* object) {
Steve Block44f0eee2011-05-26 01:26:41 +01001565 ASSERT(HEAP->InFromSpace(object));
Steve Blocka7e24c12009-10-30 11:49:00 +00001566 MapWord first_word = object->map_word();
1567 ASSERT(!first_word.IsForwardingAddress());
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001568 Map* map = first_word.ToMap();
Ben Murdoch8b112d22011-06-08 16:22:53 +01001569 DoScavengeObject(map, p, object);
Steve Blocka7e24c12009-10-30 11:49:00 +00001570}
1571
1572
John Reck59135872010-11-02 12:39:01 -07001573MaybeObject* Heap::AllocatePartialMap(InstanceType instance_type,
1574 int instance_size) {
1575 Object* result;
1576 { MaybeObject* maybe_result = AllocateRawMap();
1577 if (!maybe_result->ToObject(&result)) return maybe_result;
1578 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001579
1580 // Map::cast cannot be used due to uninitialized map field.
1581 reinterpret_cast<Map*>(result)->set_map(raw_unchecked_meta_map());
1582 reinterpret_cast<Map*>(result)->set_instance_type(instance_type);
1583 reinterpret_cast<Map*>(result)->set_instance_size(instance_size);
Steve Block44f0eee2011-05-26 01:26:41 +01001584 reinterpret_cast<Map*>(result)->set_visitor_id(
1585 StaticVisitorBase::GetVisitorId(instance_type, instance_size));
Steve Blocka7e24c12009-10-30 11:49:00 +00001586 reinterpret_cast<Map*>(result)->set_inobject_properties(0);
Leon Clarke4515c472010-02-03 11:58:03 +00001587 reinterpret_cast<Map*>(result)->set_pre_allocated_property_fields(0);
Steve Blocka7e24c12009-10-30 11:49:00 +00001588 reinterpret_cast<Map*>(result)->set_unused_property_fields(0);
Leon Clarke4515c472010-02-03 11:58:03 +00001589 reinterpret_cast<Map*>(result)->set_bit_field(0);
1590 reinterpret_cast<Map*>(result)->set_bit_field2(0);
Steve Blocka7e24c12009-10-30 11:49:00 +00001591 return result;
1592}
1593
1594
John Reck59135872010-11-02 12:39:01 -07001595MaybeObject* Heap::AllocateMap(InstanceType instance_type, int instance_size) {
1596 Object* result;
1597 { MaybeObject* maybe_result = AllocateRawMap();
1598 if (!maybe_result->ToObject(&result)) return maybe_result;
1599 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001600
1601 Map* map = reinterpret_cast<Map*>(result);
1602 map->set_map(meta_map());
1603 map->set_instance_type(instance_type);
Iain Merrick75681382010-08-19 15:07:18 +01001604 map->set_visitor_id(
1605 StaticVisitorBase::GetVisitorId(instance_type, instance_size));
Steve Blocka7e24c12009-10-30 11:49:00 +00001606 map->set_prototype(null_value());
1607 map->set_constructor(null_value());
1608 map->set_instance_size(instance_size);
1609 map->set_inobject_properties(0);
1610 map->set_pre_allocated_property_fields(0);
Ben Murdoch257744e2011-11-30 15:57:28 +00001611 map->init_instance_descriptors();
Steve Blocka7e24c12009-10-30 11:49:00 +00001612 map->set_code_cache(empty_fixed_array());
Steve Block053d10c2011-06-13 19:13:29 +01001613 map->set_prototype_transitions(empty_fixed_array());
Steve Blocka7e24c12009-10-30 11:49:00 +00001614 map->set_unused_property_fields(0);
1615 map->set_bit_field(0);
Steve Block8defd9f2010-07-08 12:39:36 +01001616 map->set_bit_field2((1 << Map::kIsExtensible) | (1 << Map::kHasFastElements));
Leon Clarkee46be812010-01-19 14:06:41 +00001617
1618 // If the map object is aligned fill the padding area with Smi 0 objects.
1619 if (Map::kPadStart < Map::kSize) {
1620 memset(reinterpret_cast<byte*>(map) + Map::kPadStart - kHeapObjectTag,
1621 0,
1622 Map::kSize - Map::kPadStart);
1623 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001624 return map;
1625}
1626
1627
John Reck59135872010-11-02 12:39:01 -07001628MaybeObject* Heap::AllocateCodeCache() {
1629 Object* result;
1630 { MaybeObject* maybe_result = AllocateStruct(CODE_CACHE_TYPE);
1631 if (!maybe_result->ToObject(&result)) return maybe_result;
1632 }
Steve Block6ded16b2010-05-10 14:33:55 +01001633 CodeCache* code_cache = CodeCache::cast(result);
1634 code_cache->set_default_cache(empty_fixed_array());
1635 code_cache->set_normal_type_cache(undefined_value());
1636 return code_cache;
1637}
1638
1639
Steve Blocka7e24c12009-10-30 11:49:00 +00001640const Heap::StringTypeTable Heap::string_type_table[] = {
1641#define STRING_TYPE_ELEMENT(type, size, name, camel_name) \
1642 {type, size, k##camel_name##MapRootIndex},
1643 STRING_TYPE_LIST(STRING_TYPE_ELEMENT)
1644#undef STRING_TYPE_ELEMENT
1645};
1646
1647
1648const Heap::ConstantSymbolTable Heap::constant_symbol_table[] = {
1649#define CONSTANT_SYMBOL_ELEMENT(name, contents) \
1650 {contents, k##name##RootIndex},
1651 SYMBOL_LIST(CONSTANT_SYMBOL_ELEMENT)
1652#undef CONSTANT_SYMBOL_ELEMENT
1653};
1654
1655
1656const Heap::StructTable Heap::struct_table[] = {
1657#define STRUCT_TABLE_ELEMENT(NAME, Name, name) \
1658 { NAME##_TYPE, Name::kSize, k##Name##MapRootIndex },
1659 STRUCT_LIST(STRUCT_TABLE_ELEMENT)
1660#undef STRUCT_TABLE_ELEMENT
1661};
1662
1663
1664bool Heap::CreateInitialMaps() {
John Reck59135872010-11-02 12:39:01 -07001665 Object* obj;
1666 { MaybeObject* maybe_obj = AllocatePartialMap(MAP_TYPE, Map::kSize);
1667 if (!maybe_obj->ToObject(&obj)) return false;
1668 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001669 // Map::cast cannot be used due to uninitialized map field.
1670 Map* new_meta_map = reinterpret_cast<Map*>(obj);
1671 set_meta_map(new_meta_map);
1672 new_meta_map->set_map(new_meta_map);
1673
John Reck59135872010-11-02 12:39:01 -07001674 { MaybeObject* maybe_obj =
1675 AllocatePartialMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
1676 if (!maybe_obj->ToObject(&obj)) return false;
1677 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001678 set_fixed_array_map(Map::cast(obj));
1679
John Reck59135872010-11-02 12:39:01 -07001680 { MaybeObject* maybe_obj = AllocatePartialMap(ODDBALL_TYPE, Oddball::kSize);
1681 if (!maybe_obj->ToObject(&obj)) return false;
1682 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001683 set_oddball_map(Map::cast(obj));
1684
Steve Block6ded16b2010-05-10 14:33:55 +01001685 // Allocate the empty array.
John Reck59135872010-11-02 12:39:01 -07001686 { MaybeObject* maybe_obj = AllocateEmptyFixedArray();
1687 if (!maybe_obj->ToObject(&obj)) return false;
1688 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001689 set_empty_fixed_array(FixedArray::cast(obj));
1690
John Reck59135872010-11-02 12:39:01 -07001691 { MaybeObject* maybe_obj = Allocate(oddball_map(), OLD_DATA_SPACE);
1692 if (!maybe_obj->ToObject(&obj)) return false;
1693 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001694 set_null_value(obj);
Steve Block44f0eee2011-05-26 01:26:41 +01001695 Oddball::cast(obj)->set_kind(Oddball::kNull);
Steve Blocka7e24c12009-10-30 11:49:00 +00001696
1697 // Allocate the empty descriptor array.
John Reck59135872010-11-02 12:39:01 -07001698 { MaybeObject* maybe_obj = AllocateEmptyFixedArray();
1699 if (!maybe_obj->ToObject(&obj)) return false;
1700 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001701 set_empty_descriptor_array(DescriptorArray::cast(obj));
1702
1703 // Fix the instance_descriptors for the existing maps.
Ben Murdoch257744e2011-11-30 15:57:28 +00001704 meta_map()->init_instance_descriptors();
Steve Blocka7e24c12009-10-30 11:49:00 +00001705 meta_map()->set_code_cache(empty_fixed_array());
Steve Block053d10c2011-06-13 19:13:29 +01001706 meta_map()->set_prototype_transitions(empty_fixed_array());
Steve Blocka7e24c12009-10-30 11:49:00 +00001707
Ben Murdoch257744e2011-11-30 15:57:28 +00001708 fixed_array_map()->init_instance_descriptors();
Steve Blocka7e24c12009-10-30 11:49:00 +00001709 fixed_array_map()->set_code_cache(empty_fixed_array());
Steve Block053d10c2011-06-13 19:13:29 +01001710 fixed_array_map()->set_prototype_transitions(empty_fixed_array());
Steve Blocka7e24c12009-10-30 11:49:00 +00001711
Ben Murdoch257744e2011-11-30 15:57:28 +00001712 oddball_map()->init_instance_descriptors();
Steve Blocka7e24c12009-10-30 11:49:00 +00001713 oddball_map()->set_code_cache(empty_fixed_array());
Steve Block053d10c2011-06-13 19:13:29 +01001714 oddball_map()->set_prototype_transitions(empty_fixed_array());
Steve Blocka7e24c12009-10-30 11:49:00 +00001715
1716 // Fix prototype object for existing maps.
1717 meta_map()->set_prototype(null_value());
1718 meta_map()->set_constructor(null_value());
1719
1720 fixed_array_map()->set_prototype(null_value());
1721 fixed_array_map()->set_constructor(null_value());
1722
1723 oddball_map()->set_prototype(null_value());
1724 oddball_map()->set_constructor(null_value());
1725
John Reck59135872010-11-02 12:39:01 -07001726 { MaybeObject* maybe_obj =
1727 AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
1728 if (!maybe_obj->ToObject(&obj)) return false;
1729 }
Iain Merrick75681382010-08-19 15:07:18 +01001730 set_fixed_cow_array_map(Map::cast(obj));
1731 ASSERT(fixed_array_map() != fixed_cow_array_map());
1732
John Reck59135872010-11-02 12:39:01 -07001733 { MaybeObject* maybe_obj = AllocateMap(HEAP_NUMBER_TYPE, HeapNumber::kSize);
1734 if (!maybe_obj->ToObject(&obj)) return false;
1735 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001736 set_heap_number_map(Map::cast(obj));
1737
Ben Murdoch257744e2011-11-30 15:57:28 +00001738 { MaybeObject* maybe_obj = AllocateMap(FOREIGN_TYPE, Foreign::kSize);
John Reck59135872010-11-02 12:39:01 -07001739 if (!maybe_obj->ToObject(&obj)) return false;
1740 }
Ben Murdoch257744e2011-11-30 15:57:28 +00001741 set_foreign_map(Map::cast(obj));
Steve Blocka7e24c12009-10-30 11:49:00 +00001742
1743 for (unsigned i = 0; i < ARRAY_SIZE(string_type_table); i++) {
1744 const StringTypeTable& entry = string_type_table[i];
John Reck59135872010-11-02 12:39:01 -07001745 { MaybeObject* maybe_obj = AllocateMap(entry.type, entry.size);
1746 if (!maybe_obj->ToObject(&obj)) return false;
1747 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001748 roots_[entry.index] = Map::cast(obj);
1749 }
1750
John Reck59135872010-11-02 12:39:01 -07001751 { MaybeObject* maybe_obj = AllocateMap(STRING_TYPE, kVariableSizeSentinel);
1752 if (!maybe_obj->ToObject(&obj)) return false;
1753 }
Steve Blockd0582a62009-12-15 09:54:21 +00001754 set_undetectable_string_map(Map::cast(obj));
Steve Blocka7e24c12009-10-30 11:49:00 +00001755 Map::cast(obj)->set_is_undetectable();
1756
John Reck59135872010-11-02 12:39:01 -07001757 { MaybeObject* maybe_obj =
1758 AllocateMap(ASCII_STRING_TYPE, kVariableSizeSentinel);
1759 if (!maybe_obj->ToObject(&obj)) return false;
1760 }
Steve Blockd0582a62009-12-15 09:54:21 +00001761 set_undetectable_ascii_string_map(Map::cast(obj));
Steve Blocka7e24c12009-10-30 11:49:00 +00001762 Map::cast(obj)->set_is_undetectable();
1763
John Reck59135872010-11-02 12:39:01 -07001764 { MaybeObject* maybe_obj =
1765 AllocateMap(BYTE_ARRAY_TYPE, kVariableSizeSentinel);
1766 if (!maybe_obj->ToObject(&obj)) return false;
1767 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001768 set_byte_array_map(Map::cast(obj));
1769
Ben Murdochb0fe1622011-05-05 13:52:32 +01001770 { MaybeObject* maybe_obj = AllocateByteArray(0, TENURED);
1771 if (!maybe_obj->ToObject(&obj)) return false;
1772 }
1773 set_empty_byte_array(ByteArray::cast(obj));
1774
John Reck59135872010-11-02 12:39:01 -07001775 { MaybeObject* maybe_obj =
Steve Block44f0eee2011-05-26 01:26:41 +01001776 AllocateMap(EXTERNAL_PIXEL_ARRAY_TYPE, ExternalArray::kAlignedSize);
John Reck59135872010-11-02 12:39:01 -07001777 if (!maybe_obj->ToObject(&obj)) return false;
1778 }
Steve Block44f0eee2011-05-26 01:26:41 +01001779 set_external_pixel_array_map(Map::cast(obj));
Steve Blocka7e24c12009-10-30 11:49:00 +00001780
John Reck59135872010-11-02 12:39:01 -07001781 { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_BYTE_ARRAY_TYPE,
1782 ExternalArray::kAlignedSize);
1783 if (!maybe_obj->ToObject(&obj)) return false;
1784 }
Steve Block3ce2e202009-11-05 08:53:23 +00001785 set_external_byte_array_map(Map::cast(obj));
1786
John Reck59135872010-11-02 12:39:01 -07001787 { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_UNSIGNED_BYTE_ARRAY_TYPE,
1788 ExternalArray::kAlignedSize);
1789 if (!maybe_obj->ToObject(&obj)) return false;
1790 }
Steve Block3ce2e202009-11-05 08:53:23 +00001791 set_external_unsigned_byte_array_map(Map::cast(obj));
1792
John Reck59135872010-11-02 12:39:01 -07001793 { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_SHORT_ARRAY_TYPE,
1794 ExternalArray::kAlignedSize);
1795 if (!maybe_obj->ToObject(&obj)) return false;
1796 }
Steve Block3ce2e202009-11-05 08:53:23 +00001797 set_external_short_array_map(Map::cast(obj));
1798
John Reck59135872010-11-02 12:39:01 -07001799 { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_UNSIGNED_SHORT_ARRAY_TYPE,
1800 ExternalArray::kAlignedSize);
1801 if (!maybe_obj->ToObject(&obj)) return false;
1802 }
Steve Block3ce2e202009-11-05 08:53:23 +00001803 set_external_unsigned_short_array_map(Map::cast(obj));
1804
John Reck59135872010-11-02 12:39:01 -07001805 { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_INT_ARRAY_TYPE,
1806 ExternalArray::kAlignedSize);
1807 if (!maybe_obj->ToObject(&obj)) return false;
1808 }
Steve Block3ce2e202009-11-05 08:53:23 +00001809 set_external_int_array_map(Map::cast(obj));
1810
John Reck59135872010-11-02 12:39:01 -07001811 { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_UNSIGNED_INT_ARRAY_TYPE,
1812 ExternalArray::kAlignedSize);
1813 if (!maybe_obj->ToObject(&obj)) return false;
1814 }
Steve Block3ce2e202009-11-05 08:53:23 +00001815 set_external_unsigned_int_array_map(Map::cast(obj));
1816
John Reck59135872010-11-02 12:39:01 -07001817 { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_FLOAT_ARRAY_TYPE,
1818 ExternalArray::kAlignedSize);
1819 if (!maybe_obj->ToObject(&obj)) return false;
1820 }
Steve Block3ce2e202009-11-05 08:53:23 +00001821 set_external_float_array_map(Map::cast(obj));
1822
Ben Murdoch257744e2011-11-30 15:57:28 +00001823 { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_DOUBLE_ARRAY_TYPE,
1824 ExternalArray::kAlignedSize);
1825 if (!maybe_obj->ToObject(&obj)) return false;
1826 }
1827 set_external_double_array_map(Map::cast(obj));
1828
John Reck59135872010-11-02 12:39:01 -07001829 { MaybeObject* maybe_obj = AllocateMap(CODE_TYPE, kVariableSizeSentinel);
1830 if (!maybe_obj->ToObject(&obj)) return false;
1831 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001832 set_code_map(Map::cast(obj));
1833
John Reck59135872010-11-02 12:39:01 -07001834 { MaybeObject* maybe_obj = AllocateMap(JS_GLOBAL_PROPERTY_CELL_TYPE,
1835 JSGlobalPropertyCell::kSize);
1836 if (!maybe_obj->ToObject(&obj)) return false;
1837 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001838 set_global_property_cell_map(Map::cast(obj));
1839
John Reck59135872010-11-02 12:39:01 -07001840 { MaybeObject* maybe_obj = AllocateMap(FILLER_TYPE, kPointerSize);
1841 if (!maybe_obj->ToObject(&obj)) return false;
1842 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001843 set_one_pointer_filler_map(Map::cast(obj));
1844
John Reck59135872010-11-02 12:39:01 -07001845 { MaybeObject* maybe_obj = AllocateMap(FILLER_TYPE, 2 * kPointerSize);
1846 if (!maybe_obj->ToObject(&obj)) return false;
1847 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001848 set_two_pointer_filler_map(Map::cast(obj));
1849
1850 for (unsigned i = 0; i < ARRAY_SIZE(struct_table); i++) {
1851 const StructTable& entry = struct_table[i];
John Reck59135872010-11-02 12:39:01 -07001852 { MaybeObject* maybe_obj = AllocateMap(entry.type, entry.size);
1853 if (!maybe_obj->ToObject(&obj)) return false;
1854 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001855 roots_[entry.index] = Map::cast(obj);
1856 }
1857
John Reck59135872010-11-02 12:39:01 -07001858 { MaybeObject* maybe_obj =
1859 AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
1860 if (!maybe_obj->ToObject(&obj)) return false;
1861 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001862 set_hash_table_map(Map::cast(obj));
1863
John Reck59135872010-11-02 12:39:01 -07001864 { MaybeObject* maybe_obj =
1865 AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
1866 if (!maybe_obj->ToObject(&obj)) return false;
1867 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001868 set_context_map(Map::cast(obj));
1869
John Reck59135872010-11-02 12:39:01 -07001870 { MaybeObject* maybe_obj =
1871 AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
1872 if (!maybe_obj->ToObject(&obj)) return false;
1873 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001874 set_catch_context_map(Map::cast(obj));
1875
John Reck59135872010-11-02 12:39:01 -07001876 { MaybeObject* maybe_obj =
1877 AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
1878 if (!maybe_obj->ToObject(&obj)) return false;
1879 }
Ben Murdochf87a2032010-10-22 12:50:53 +01001880 Map* global_context_map = Map::cast(obj);
1881 global_context_map->set_visitor_id(StaticVisitorBase::kVisitGlobalContext);
1882 set_global_context_map(global_context_map);
Steve Blocka7e24c12009-10-30 11:49:00 +00001883
John Reck59135872010-11-02 12:39:01 -07001884 { MaybeObject* maybe_obj = AllocateMap(SHARED_FUNCTION_INFO_TYPE,
1885 SharedFunctionInfo::kAlignedSize);
1886 if (!maybe_obj->ToObject(&obj)) return false;
1887 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001888 set_shared_function_info_map(Map::cast(obj));
1889
Steve Block1e0659c2011-05-24 12:43:12 +01001890 { MaybeObject* maybe_obj = AllocateMap(JS_MESSAGE_OBJECT_TYPE,
1891 JSMessageObject::kSize);
1892 if (!maybe_obj->ToObject(&obj)) return false;
1893 }
1894 set_message_object_map(Map::cast(obj));
1895
Steve Block44f0eee2011-05-26 01:26:41 +01001896 ASSERT(!InNewSpace(empty_fixed_array()));
Steve Blocka7e24c12009-10-30 11:49:00 +00001897 return true;
1898}
1899
1900
John Reck59135872010-11-02 12:39:01 -07001901MaybeObject* Heap::AllocateHeapNumber(double value, PretenureFlag pretenure) {
Steve Blocka7e24c12009-10-30 11:49:00 +00001902 // Statically ensure that it is safe to allocate heap numbers in paged
1903 // spaces.
1904 STATIC_ASSERT(HeapNumber::kSize <= Page::kMaxHeapObjectSize);
1905 AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
1906
John Reck59135872010-11-02 12:39:01 -07001907 Object* result;
1908 { MaybeObject* maybe_result =
1909 AllocateRaw(HeapNumber::kSize, space, OLD_DATA_SPACE);
1910 if (!maybe_result->ToObject(&result)) return maybe_result;
1911 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001912
1913 HeapObject::cast(result)->set_map(heap_number_map());
1914 HeapNumber::cast(result)->set_value(value);
1915 return result;
1916}
1917
1918
John Reck59135872010-11-02 12:39:01 -07001919MaybeObject* Heap::AllocateHeapNumber(double value) {
Steve Blocka7e24c12009-10-30 11:49:00 +00001920 // Use general version, if we're forced to always allocate.
1921 if (always_allocate()) return AllocateHeapNumber(value, TENURED);
1922
1923 // This version of AllocateHeapNumber is optimized for
1924 // allocation in new space.
1925 STATIC_ASSERT(HeapNumber::kSize <= Page::kMaxHeapObjectSize);
1926 ASSERT(allocation_allowed_ && gc_state_ == NOT_IN_GC);
John Reck59135872010-11-02 12:39:01 -07001927 Object* result;
1928 { MaybeObject* maybe_result = new_space_.AllocateRaw(HeapNumber::kSize);
1929 if (!maybe_result->ToObject(&result)) return maybe_result;
1930 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001931 HeapObject::cast(result)->set_map(heap_number_map());
1932 HeapNumber::cast(result)->set_value(value);
1933 return result;
1934}
1935
1936
John Reck59135872010-11-02 12:39:01 -07001937MaybeObject* Heap::AllocateJSGlobalPropertyCell(Object* value) {
1938 Object* result;
1939 { MaybeObject* maybe_result = AllocateRawCell();
1940 if (!maybe_result->ToObject(&result)) return maybe_result;
1941 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001942 HeapObject::cast(result)->set_map(global_property_cell_map());
1943 JSGlobalPropertyCell::cast(result)->set_value(value);
1944 return result;
1945}
1946
1947
John Reck59135872010-11-02 12:39:01 -07001948MaybeObject* Heap::CreateOddball(const char* to_string,
Steve Block44f0eee2011-05-26 01:26:41 +01001949 Object* to_number,
1950 byte kind) {
John Reck59135872010-11-02 12:39:01 -07001951 Object* result;
1952 { MaybeObject* maybe_result = Allocate(oddball_map(), OLD_DATA_SPACE);
1953 if (!maybe_result->ToObject(&result)) return maybe_result;
1954 }
Steve Block44f0eee2011-05-26 01:26:41 +01001955 return Oddball::cast(result)->Initialize(to_string, to_number, kind);
Steve Blocka7e24c12009-10-30 11:49:00 +00001956}
1957
1958
1959bool Heap::CreateApiObjects() {
1960 Object* obj;
1961
John Reck59135872010-11-02 12:39:01 -07001962 { MaybeObject* maybe_obj = AllocateMap(JS_OBJECT_TYPE, JSObject::kHeaderSize);
1963 if (!maybe_obj->ToObject(&obj)) return false;
1964 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001965 set_neander_map(Map::cast(obj));
1966
Steve Block44f0eee2011-05-26 01:26:41 +01001967 { MaybeObject* maybe_obj = AllocateJSObjectFromMap(neander_map());
John Reck59135872010-11-02 12:39:01 -07001968 if (!maybe_obj->ToObject(&obj)) return false;
1969 }
1970 Object* elements;
1971 { MaybeObject* maybe_elements = AllocateFixedArray(2);
1972 if (!maybe_elements->ToObject(&elements)) return false;
1973 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001974 FixedArray::cast(elements)->set(0, Smi::FromInt(0));
1975 JSObject::cast(obj)->set_elements(FixedArray::cast(elements));
1976 set_message_listeners(JSObject::cast(obj));
1977
1978 return true;
1979}
1980
1981
Steve Blocka7e24c12009-10-30 11:49:00 +00001982void Heap::CreateJSEntryStub() {
1983 JSEntryStub stub;
1984 set_js_entry_code(*stub.GetCode());
1985}
1986
1987
1988void Heap::CreateJSConstructEntryStub() {
1989 JSConstructEntryStub stub;
1990 set_js_construct_entry_code(*stub.GetCode());
1991}
1992
1993
1994void Heap::CreateFixedStubs() {
1995 // Here we create roots for fixed stubs. They are needed at GC
1996 // for cooking and uncooking (check out frames.cc).
1997 // The eliminates the need for doing dictionary lookup in the
1998 // stub cache for these stubs.
1999 HandleScope scope;
2000 // gcc-4.4 has problem generating correct code of following snippet:
Steve Block44f0eee2011-05-26 01:26:41 +01002001 // { JSEntryStub stub;
2002 // js_entry_code_ = *stub.GetCode();
Steve Blocka7e24c12009-10-30 11:49:00 +00002003 // }
Steve Block44f0eee2011-05-26 01:26:41 +01002004 // { JSConstructEntryStub stub;
2005 // js_construct_entry_code_ = *stub.GetCode();
Steve Blocka7e24c12009-10-30 11:49:00 +00002006 // }
2007 // To workaround the problem, make separate functions without inlining.
Steve Blocka7e24c12009-10-30 11:49:00 +00002008 Heap::CreateJSEntryStub();
2009 Heap::CreateJSConstructEntryStub();
Steve Blocka7e24c12009-10-30 11:49:00 +00002010}
2011
2012
2013bool Heap::CreateInitialObjects() {
2014 Object* obj;
2015
2016 // The -0 value must be set before NumberFromDouble works.
John Reck59135872010-11-02 12:39:01 -07002017 { MaybeObject* maybe_obj = AllocateHeapNumber(-0.0, TENURED);
2018 if (!maybe_obj->ToObject(&obj)) return false;
2019 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002020 set_minus_zero_value(obj);
2021 ASSERT(signbit(minus_zero_value()->Number()) != 0);
2022
John Reck59135872010-11-02 12:39:01 -07002023 { MaybeObject* maybe_obj = AllocateHeapNumber(OS::nan_value(), TENURED);
2024 if (!maybe_obj->ToObject(&obj)) return false;
2025 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002026 set_nan_value(obj);
2027
John Reck59135872010-11-02 12:39:01 -07002028 { MaybeObject* maybe_obj = Allocate(oddball_map(), OLD_DATA_SPACE);
2029 if (!maybe_obj->ToObject(&obj)) return false;
2030 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002031 set_undefined_value(obj);
Steve Block44f0eee2011-05-26 01:26:41 +01002032 Oddball::cast(obj)->set_kind(Oddball::kUndefined);
Steve Blocka7e24c12009-10-30 11:49:00 +00002033 ASSERT(!InNewSpace(undefined_value()));
2034
2035 // Allocate initial symbol table.
John Reck59135872010-11-02 12:39:01 -07002036 { MaybeObject* maybe_obj = SymbolTable::Allocate(kInitialSymbolTableSize);
2037 if (!maybe_obj->ToObject(&obj)) return false;
2038 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002039 // Don't use set_symbol_table() due to asserts.
2040 roots_[kSymbolTableRootIndex] = obj;
2041
2042 // Assign the print strings for oddballs after creating symboltable.
John Reck59135872010-11-02 12:39:01 -07002043 Object* symbol;
2044 { MaybeObject* maybe_symbol = LookupAsciiSymbol("undefined");
2045 if (!maybe_symbol->ToObject(&symbol)) return false;
2046 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002047 Oddball::cast(undefined_value())->set_to_string(String::cast(symbol));
2048 Oddball::cast(undefined_value())->set_to_number(nan_value());
2049
Steve Blocka7e24c12009-10-30 11:49:00 +00002050 // Allocate the null_value
John Reck59135872010-11-02 12:39:01 -07002051 { MaybeObject* maybe_obj =
Steve Block44f0eee2011-05-26 01:26:41 +01002052 Oddball::cast(null_value())->Initialize("null",
2053 Smi::FromInt(0),
2054 Oddball::kNull);
John Reck59135872010-11-02 12:39:01 -07002055 if (!maybe_obj->ToObject(&obj)) return false;
2056 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002057
Steve Block44f0eee2011-05-26 01:26:41 +01002058 { MaybeObject* maybe_obj = CreateOddball("true",
2059 Smi::FromInt(1),
2060 Oddball::kTrue);
John Reck59135872010-11-02 12:39:01 -07002061 if (!maybe_obj->ToObject(&obj)) return false;
2062 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002063 set_true_value(obj);
2064
Steve Block44f0eee2011-05-26 01:26:41 +01002065 { MaybeObject* maybe_obj = CreateOddball("false",
2066 Smi::FromInt(0),
2067 Oddball::kFalse);
John Reck59135872010-11-02 12:39:01 -07002068 if (!maybe_obj->ToObject(&obj)) return false;
2069 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002070 set_false_value(obj);
2071
Steve Block44f0eee2011-05-26 01:26:41 +01002072 { MaybeObject* maybe_obj = CreateOddball("hole",
2073 Smi::FromInt(-1),
2074 Oddball::kTheHole);
John Reck59135872010-11-02 12:39:01 -07002075 if (!maybe_obj->ToObject(&obj)) return false;
2076 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002077 set_the_hole_value(obj);
2078
Ben Murdoch086aeea2011-05-13 15:57:08 +01002079 { MaybeObject* maybe_obj = CreateOddball("arguments_marker",
Steve Block44f0eee2011-05-26 01:26:41 +01002080 Smi::FromInt(-4),
2081 Oddball::kArgumentMarker);
Ben Murdoch086aeea2011-05-13 15:57:08 +01002082 if (!maybe_obj->ToObject(&obj)) return false;
2083 }
2084 set_arguments_marker(obj);
2085
Steve Block44f0eee2011-05-26 01:26:41 +01002086 { MaybeObject* maybe_obj = CreateOddball("no_interceptor_result_sentinel",
2087 Smi::FromInt(-2),
2088 Oddball::kOther);
John Reck59135872010-11-02 12:39:01 -07002089 if (!maybe_obj->ToObject(&obj)) return false;
2090 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002091 set_no_interceptor_result_sentinel(obj);
2092
Steve Block44f0eee2011-05-26 01:26:41 +01002093 { MaybeObject* maybe_obj = CreateOddball("termination_exception",
2094 Smi::FromInt(-3),
2095 Oddball::kOther);
John Reck59135872010-11-02 12:39:01 -07002096 if (!maybe_obj->ToObject(&obj)) return false;
2097 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002098 set_termination_exception(obj);
2099
2100 // Allocate the empty string.
John Reck59135872010-11-02 12:39:01 -07002101 { MaybeObject* maybe_obj = AllocateRawAsciiString(0, TENURED);
2102 if (!maybe_obj->ToObject(&obj)) return false;
2103 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002104 set_empty_string(String::cast(obj));
2105
2106 for (unsigned i = 0; i < ARRAY_SIZE(constant_symbol_table); i++) {
John Reck59135872010-11-02 12:39:01 -07002107 { MaybeObject* maybe_obj =
2108 LookupAsciiSymbol(constant_symbol_table[i].contents);
2109 if (!maybe_obj->ToObject(&obj)) return false;
2110 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002111 roots_[constant_symbol_table[i].index] = String::cast(obj);
2112 }
2113
2114 // Allocate the hidden symbol which is used to identify the hidden properties
2115 // in JSObjects. The hash code has a special value so that it will not match
2116 // the empty string when searching for the property. It cannot be part of the
2117 // loop above because it needs to be allocated manually with the special
2118 // hash code in place. The hash code for the hidden_symbol is zero to ensure
2119 // that it will always be at the first entry in property descriptors.
John Reck59135872010-11-02 12:39:01 -07002120 { MaybeObject* maybe_obj =
2121 AllocateSymbol(CStrVector(""), 0, String::kZeroHash);
2122 if (!maybe_obj->ToObject(&obj)) return false;
2123 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002124 hidden_symbol_ = String::cast(obj);
2125
Ben Murdoch257744e2011-11-30 15:57:28 +00002126 // Allocate the foreign for __proto__.
John Reck59135872010-11-02 12:39:01 -07002127 { MaybeObject* maybe_obj =
Ben Murdoch257744e2011-11-30 15:57:28 +00002128 AllocateForeign((Address) &Accessors::ObjectPrototype);
John Reck59135872010-11-02 12:39:01 -07002129 if (!maybe_obj->ToObject(&obj)) return false;
2130 }
Ben Murdoch257744e2011-11-30 15:57:28 +00002131 set_prototype_accessors(Foreign::cast(obj));
Steve Blocka7e24c12009-10-30 11:49:00 +00002132
2133 // Allocate the code_stubs dictionary. The initial size is set to avoid
2134 // expanding the dictionary during bootstrapping.
John Reck59135872010-11-02 12:39:01 -07002135 { MaybeObject* maybe_obj = NumberDictionary::Allocate(128);
2136 if (!maybe_obj->ToObject(&obj)) return false;
2137 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002138 set_code_stubs(NumberDictionary::cast(obj));
2139
2140 // Allocate the non_monomorphic_cache used in stub-cache.cc. The initial size
2141 // is set to avoid expanding the dictionary during bootstrapping.
John Reck59135872010-11-02 12:39:01 -07002142 { MaybeObject* maybe_obj = NumberDictionary::Allocate(64);
2143 if (!maybe_obj->ToObject(&obj)) return false;
2144 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002145 set_non_monomorphic_cache(NumberDictionary::cast(obj));
2146
Kristian Monsen25f61362010-05-21 11:50:48 +01002147 set_instanceof_cache_function(Smi::FromInt(0));
2148 set_instanceof_cache_map(Smi::FromInt(0));
2149 set_instanceof_cache_answer(Smi::FromInt(0));
2150
Steve Blocka7e24c12009-10-30 11:49:00 +00002151 CreateFixedStubs();
2152
Kristian Monsen0d5e1162010-09-30 15:31:59 +01002153 // Allocate the dictionary of intrinsic function names.
John Reck59135872010-11-02 12:39:01 -07002154 { MaybeObject* maybe_obj = StringDictionary::Allocate(Runtime::kNumFunctions);
2155 if (!maybe_obj->ToObject(&obj)) return false;
2156 }
Steve Block44f0eee2011-05-26 01:26:41 +01002157 { MaybeObject* maybe_obj = Runtime::InitializeIntrinsicFunctionNames(this,
2158 obj);
John Reck59135872010-11-02 12:39:01 -07002159 if (!maybe_obj->ToObject(&obj)) return false;
2160 }
Kristian Monsen0d5e1162010-09-30 15:31:59 +01002161 set_intrinsic_function_names(StringDictionary::cast(obj));
2162
Leon Clarkee46be812010-01-19 14:06:41 +00002163 if (InitializeNumberStringCache()->IsFailure()) return false;
Steve Blocka7e24c12009-10-30 11:49:00 +00002164
Steve Block6ded16b2010-05-10 14:33:55 +01002165 // Allocate cache for single character ASCII strings.
John Reck59135872010-11-02 12:39:01 -07002166 { MaybeObject* maybe_obj =
2167 AllocateFixedArray(String::kMaxAsciiCharCode + 1, TENURED);
2168 if (!maybe_obj->ToObject(&obj)) return false;
2169 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002170 set_single_character_string_cache(FixedArray::cast(obj));
2171
2172 // Allocate cache for external strings pointing to native source code.
John Reck59135872010-11-02 12:39:01 -07002173 { MaybeObject* maybe_obj = AllocateFixedArray(Natives::GetBuiltinsCount());
2174 if (!maybe_obj->ToObject(&obj)) return false;
2175 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002176 set_natives_source_cache(FixedArray::cast(obj));
2177
Steve Block44f0eee2011-05-26 01:26:41 +01002178 // Handling of script id generation is in FACTORY->NewScript.
Steve Blocka7e24c12009-10-30 11:49:00 +00002179 set_last_script_id(undefined_value());
2180
2181 // Initialize keyed lookup cache.
Steve Block44f0eee2011-05-26 01:26:41 +01002182 isolate_->keyed_lookup_cache()->Clear();
Steve Blocka7e24c12009-10-30 11:49:00 +00002183
2184 // Initialize context slot cache.
Steve Block44f0eee2011-05-26 01:26:41 +01002185 isolate_->context_slot_cache()->Clear();
Steve Blocka7e24c12009-10-30 11:49:00 +00002186
2187 // Initialize descriptor cache.
Steve Block44f0eee2011-05-26 01:26:41 +01002188 isolate_->descriptor_lookup_cache()->Clear();
Steve Blocka7e24c12009-10-30 11:49:00 +00002189
2190 // Initialize compilation cache.
Steve Block44f0eee2011-05-26 01:26:41 +01002191 isolate_->compilation_cache()->Clear();
Steve Blocka7e24c12009-10-30 11:49:00 +00002192
2193 return true;
2194}
2195
2196
John Reck59135872010-11-02 12:39:01 -07002197MaybeObject* Heap::InitializeNumberStringCache() {
Leon Clarkee46be812010-01-19 14:06:41 +00002198 // Compute the size of the number string cache based on the max heap size.
2199 // max_semispace_size_ == 512 KB => number_string_cache_size = 32.
2200 // max_semispace_size_ == 8 MB => number_string_cache_size = 16KB.
2201 int number_string_cache_size = max_semispace_size_ / 512;
2202 number_string_cache_size = Max(32, Min(16*KB, number_string_cache_size));
John Reck59135872010-11-02 12:39:01 -07002203 Object* obj;
2204 MaybeObject* maybe_obj =
2205 AllocateFixedArray(number_string_cache_size * 2, TENURED);
2206 if (maybe_obj->ToObject(&obj)) set_number_string_cache(FixedArray::cast(obj));
2207 return maybe_obj;
Leon Clarkee46be812010-01-19 14:06:41 +00002208}
2209
2210
2211void Heap::FlushNumberStringCache() {
2212 // Flush the number to string cache.
2213 int len = number_string_cache()->length();
2214 for (int i = 0; i < len; i++) {
Steve Block44f0eee2011-05-26 01:26:41 +01002215 number_string_cache()->set_undefined(this, i);
Leon Clarkee46be812010-01-19 14:06:41 +00002216 }
2217}
2218
2219
Steve Blocka7e24c12009-10-30 11:49:00 +00002220static inline int double_get_hash(double d) {
2221 DoubleRepresentation rep(d);
Leon Clarkee46be812010-01-19 14:06:41 +00002222 return static_cast<int>(rep.bits) ^ static_cast<int>(rep.bits >> 32);
Steve Blocka7e24c12009-10-30 11:49:00 +00002223}
2224
2225
2226static inline int smi_get_hash(Smi* smi) {
Leon Clarkee46be812010-01-19 14:06:41 +00002227 return smi->value();
Steve Blocka7e24c12009-10-30 11:49:00 +00002228}
2229
2230
Steve Blocka7e24c12009-10-30 11:49:00 +00002231Object* Heap::GetNumberStringCache(Object* number) {
2232 int hash;
Leon Clarkee46be812010-01-19 14:06:41 +00002233 int mask = (number_string_cache()->length() >> 1) - 1;
Steve Blocka7e24c12009-10-30 11:49:00 +00002234 if (number->IsSmi()) {
Leon Clarkee46be812010-01-19 14:06:41 +00002235 hash = smi_get_hash(Smi::cast(number)) & mask;
Steve Blocka7e24c12009-10-30 11:49:00 +00002236 } else {
Leon Clarkee46be812010-01-19 14:06:41 +00002237 hash = double_get_hash(number->Number()) & mask;
Steve Blocka7e24c12009-10-30 11:49:00 +00002238 }
2239 Object* key = number_string_cache()->get(hash * 2);
2240 if (key == number) {
2241 return String::cast(number_string_cache()->get(hash * 2 + 1));
2242 } else if (key->IsHeapNumber() &&
2243 number->IsHeapNumber() &&
2244 key->Number() == number->Number()) {
2245 return String::cast(number_string_cache()->get(hash * 2 + 1));
2246 }
2247 return undefined_value();
2248}
2249
2250
2251void Heap::SetNumberStringCache(Object* number, String* string) {
2252 int hash;
Leon Clarkee46be812010-01-19 14:06:41 +00002253 int mask = (number_string_cache()->length() >> 1) - 1;
Steve Blocka7e24c12009-10-30 11:49:00 +00002254 if (number->IsSmi()) {
Leon Clarkee46be812010-01-19 14:06:41 +00002255 hash = smi_get_hash(Smi::cast(number)) & mask;
Leon Clarke4515c472010-02-03 11:58:03 +00002256 number_string_cache()->set(hash * 2, Smi::cast(number));
Steve Blocka7e24c12009-10-30 11:49:00 +00002257 } else {
Leon Clarkee46be812010-01-19 14:06:41 +00002258 hash = double_get_hash(number->Number()) & mask;
Steve Blocka7e24c12009-10-30 11:49:00 +00002259 number_string_cache()->set(hash * 2, number);
2260 }
2261 number_string_cache()->set(hash * 2 + 1, string);
2262}
2263
2264
John Reck59135872010-11-02 12:39:01 -07002265MaybeObject* Heap::NumberToString(Object* number,
2266 bool check_number_string_cache) {
Steve Block44f0eee2011-05-26 01:26:41 +01002267 isolate_->counters()->number_to_string_runtime()->Increment();
Steve Block6ded16b2010-05-10 14:33:55 +01002268 if (check_number_string_cache) {
2269 Object* cached = GetNumberStringCache(number);
2270 if (cached != undefined_value()) {
2271 return cached;
2272 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002273 }
2274
2275 char arr[100];
2276 Vector<char> buffer(arr, ARRAY_SIZE(arr));
2277 const char* str;
2278 if (number->IsSmi()) {
2279 int num = Smi::cast(number)->value();
2280 str = IntToCString(num, buffer);
2281 } else {
2282 double num = HeapNumber::cast(number)->value();
2283 str = DoubleToCString(num, buffer);
2284 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002285
John Reck59135872010-11-02 12:39:01 -07002286 Object* js_string;
2287 MaybeObject* maybe_js_string = AllocateStringFromAscii(CStrVector(str));
2288 if (maybe_js_string->ToObject(&js_string)) {
2289 SetNumberStringCache(number, String::cast(js_string));
Steve Blocka7e24c12009-10-30 11:49:00 +00002290 }
John Reck59135872010-11-02 12:39:01 -07002291 return maybe_js_string;
Steve Blocka7e24c12009-10-30 11:49:00 +00002292}
2293
2294
Steve Block3ce2e202009-11-05 08:53:23 +00002295Map* Heap::MapForExternalArrayType(ExternalArrayType array_type) {
2296 return Map::cast(roots_[RootIndexForExternalArrayType(array_type)]);
2297}
2298
2299
2300Heap::RootListIndex Heap::RootIndexForExternalArrayType(
2301 ExternalArrayType array_type) {
2302 switch (array_type) {
2303 case kExternalByteArray:
2304 return kExternalByteArrayMapRootIndex;
2305 case kExternalUnsignedByteArray:
2306 return kExternalUnsignedByteArrayMapRootIndex;
2307 case kExternalShortArray:
2308 return kExternalShortArrayMapRootIndex;
2309 case kExternalUnsignedShortArray:
2310 return kExternalUnsignedShortArrayMapRootIndex;
2311 case kExternalIntArray:
2312 return kExternalIntArrayMapRootIndex;
2313 case kExternalUnsignedIntArray:
2314 return kExternalUnsignedIntArrayMapRootIndex;
2315 case kExternalFloatArray:
2316 return kExternalFloatArrayMapRootIndex;
Ben Murdoch257744e2011-11-30 15:57:28 +00002317 case kExternalDoubleArray:
2318 return kExternalDoubleArrayMapRootIndex;
Steve Block44f0eee2011-05-26 01:26:41 +01002319 case kExternalPixelArray:
2320 return kExternalPixelArrayMapRootIndex;
Steve Block3ce2e202009-11-05 08:53:23 +00002321 default:
2322 UNREACHABLE();
2323 return kUndefinedValueRootIndex;
2324 }
2325}
2326
2327
John Reck59135872010-11-02 12:39:01 -07002328MaybeObject* Heap::NumberFromDouble(double value, PretenureFlag pretenure) {
Steve Block6ded16b2010-05-10 14:33:55 +01002329 // We need to distinguish the minus zero value and this cannot be
2330 // done after conversion to int. Doing this by comparing bit
2331 // patterns is faster than using fpclassify() et al.
2332 static const DoubleRepresentation minus_zero(-0.0);
2333
2334 DoubleRepresentation rep(value);
2335 if (rep.bits == minus_zero.bits) {
2336 return AllocateHeapNumber(-0.0, pretenure);
2337 }
2338
2339 int int_value = FastD2I(value);
2340 if (value == int_value && Smi::IsValid(int_value)) {
2341 return Smi::FromInt(int_value);
2342 }
2343
2344 // Materialize the value in the heap.
2345 return AllocateHeapNumber(value, pretenure);
Steve Blocka7e24c12009-10-30 11:49:00 +00002346}
2347
2348
Ben Murdoch257744e2011-11-30 15:57:28 +00002349MaybeObject* Heap::AllocateForeign(Address address, PretenureFlag pretenure) {
2350 // Statically ensure that it is safe to allocate foreigns in paged spaces.
2351 STATIC_ASSERT(Foreign::kSize <= Page::kMaxHeapObjectSize);
Steve Blocka7e24c12009-10-30 11:49:00 +00002352 AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
John Reck59135872010-11-02 12:39:01 -07002353 Object* result;
Ben Murdoch257744e2011-11-30 15:57:28 +00002354 { MaybeObject* maybe_result = Allocate(foreign_map(), space);
John Reck59135872010-11-02 12:39:01 -07002355 if (!maybe_result->ToObject(&result)) return maybe_result;
2356 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002357
Ben Murdoch257744e2011-11-30 15:57:28 +00002358 Foreign::cast(result)->set_address(address);
Steve Blocka7e24c12009-10-30 11:49:00 +00002359 return result;
2360}
2361
2362
John Reck59135872010-11-02 12:39:01 -07002363MaybeObject* Heap::AllocateSharedFunctionInfo(Object* name) {
2364 Object* result;
2365 { MaybeObject* maybe_result =
2366 Allocate(shared_function_info_map(), OLD_POINTER_SPACE);
2367 if (!maybe_result->ToObject(&result)) return maybe_result;
2368 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002369
2370 SharedFunctionInfo* share = SharedFunctionInfo::cast(result);
2371 share->set_name(name);
Steve Block44f0eee2011-05-26 01:26:41 +01002372 Code* illegal = isolate_->builtins()->builtin(Builtins::kIllegal);
Steve Blocka7e24c12009-10-30 11:49:00 +00002373 share->set_code(illegal);
Ben Murdoch3bec4d22010-07-22 14:51:16 +01002374 share->set_scope_info(SerializedScopeInfo::Empty());
Steve Block44f0eee2011-05-26 01:26:41 +01002375 Code* construct_stub = isolate_->builtins()->builtin(
2376 Builtins::kJSConstructStubGeneric);
Steve Blocka7e24c12009-10-30 11:49:00 +00002377 share->set_construct_stub(construct_stub);
2378 share->set_expected_nof_properties(0);
2379 share->set_length(0);
2380 share->set_formal_parameter_count(0);
2381 share->set_instance_class_name(Object_symbol());
2382 share->set_function_data(undefined_value());
2383 share->set_script(undefined_value());
2384 share->set_start_position_and_type(0);
2385 share->set_debug_info(undefined_value());
2386 share->set_inferred_name(empty_string());
2387 share->set_compiler_hints(0);
Ben Murdochb0fe1622011-05-05 13:52:32 +01002388 share->set_deopt_counter(Smi::FromInt(FLAG_deopt_every_n_times));
Kristian Monsen0d5e1162010-09-30 15:31:59 +01002389 share->set_initial_map(undefined_value());
Steve Blocka7e24c12009-10-30 11:49:00 +00002390 share->set_this_property_assignments_count(0);
2391 share->set_this_property_assignments(undefined_value());
Ben Murdochb0fe1622011-05-05 13:52:32 +01002392 share->set_opt_count(0);
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002393 share->set_num_literals(0);
2394 share->set_end_position(0);
2395 share->set_function_token_position(0);
Ben Murdoch257744e2011-11-30 15:57:28 +00002396 share->set_es5_native(false);
Steve Blocka7e24c12009-10-30 11:49:00 +00002397 return result;
2398}
2399
2400
Steve Block1e0659c2011-05-24 12:43:12 +01002401MaybeObject* Heap::AllocateJSMessageObject(String* type,
2402 JSArray* arguments,
2403 int start_position,
2404 int end_position,
2405 Object* script,
2406 Object* stack_trace,
2407 Object* stack_frames) {
2408 Object* result;
2409 { MaybeObject* maybe_result = Allocate(message_object_map(), NEW_SPACE);
2410 if (!maybe_result->ToObject(&result)) return maybe_result;
2411 }
2412 JSMessageObject* message = JSMessageObject::cast(result);
2413 message->set_properties(Heap::empty_fixed_array());
2414 message->set_elements(Heap::empty_fixed_array());
2415 message->set_type(type);
2416 message->set_arguments(arguments);
2417 message->set_start_position(start_position);
2418 message->set_end_position(end_position);
2419 message->set_script(script);
2420 message->set_stack_trace(stack_trace);
2421 message->set_stack_frames(stack_frames);
2422 return result;
2423}
2424
2425
2426
Steve Blockd0582a62009-12-15 09:54:21 +00002427// Returns true for a character in a range. Both limits are inclusive.
2428static inline bool Between(uint32_t character, uint32_t from, uint32_t to) {
2429 // This makes uses of the the unsigned wraparound.
2430 return character - from <= to - from;
2431}
2432
2433
John Reck59135872010-11-02 12:39:01 -07002434MUST_USE_RESULT static inline MaybeObject* MakeOrFindTwoCharacterString(
Steve Block44f0eee2011-05-26 01:26:41 +01002435 Heap* heap,
John Reck59135872010-11-02 12:39:01 -07002436 uint32_t c1,
2437 uint32_t c2) {
Steve Blockd0582a62009-12-15 09:54:21 +00002438 String* symbol;
2439 // Numeric strings have a different hash algorithm not known by
2440 // LookupTwoCharsSymbolIfExists, so we skip this step for such strings.
2441 if ((!Between(c1, '0', '9') || !Between(c2, '0', '9')) &&
Steve Block44f0eee2011-05-26 01:26:41 +01002442 heap->symbol_table()->LookupTwoCharsSymbolIfExists(c1, c2, &symbol)) {
Steve Blockd0582a62009-12-15 09:54:21 +00002443 return symbol;
2444 // Now we know the length is 2, we might as well make use of that fact
2445 // when building the new string.
2446 } else if ((c1 | c2) <= String::kMaxAsciiCharCodeU) { // We can do this
2447 ASSERT(IsPowerOf2(String::kMaxAsciiCharCodeU + 1)); // because of this.
John Reck59135872010-11-02 12:39:01 -07002448 Object* result;
Steve Block44f0eee2011-05-26 01:26:41 +01002449 { MaybeObject* maybe_result = heap->AllocateRawAsciiString(2);
John Reck59135872010-11-02 12:39:01 -07002450 if (!maybe_result->ToObject(&result)) return maybe_result;
2451 }
Steve Blockd0582a62009-12-15 09:54:21 +00002452 char* dest = SeqAsciiString::cast(result)->GetChars();
2453 dest[0] = c1;
2454 dest[1] = c2;
2455 return result;
2456 } else {
John Reck59135872010-11-02 12:39:01 -07002457 Object* result;
Steve Block44f0eee2011-05-26 01:26:41 +01002458 { MaybeObject* maybe_result = heap->AllocateRawTwoByteString(2);
John Reck59135872010-11-02 12:39:01 -07002459 if (!maybe_result->ToObject(&result)) return maybe_result;
2460 }
Steve Blockd0582a62009-12-15 09:54:21 +00002461 uc16* dest = SeqTwoByteString::cast(result)->GetChars();
2462 dest[0] = c1;
2463 dest[1] = c2;
2464 return result;
2465 }
2466}
2467
2468
John Reck59135872010-11-02 12:39:01 -07002469MaybeObject* Heap::AllocateConsString(String* first, String* second) {
Steve Blocka7e24c12009-10-30 11:49:00 +00002470 int first_length = first->length();
Steve Blockd0582a62009-12-15 09:54:21 +00002471 if (first_length == 0) {
2472 return second;
2473 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002474
2475 int second_length = second->length();
Steve Blockd0582a62009-12-15 09:54:21 +00002476 if (second_length == 0) {
2477 return first;
2478 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002479
2480 int length = first_length + second_length;
Steve Blockd0582a62009-12-15 09:54:21 +00002481
2482 // Optimization for 2-byte strings often used as keys in a decompression
2483 // dictionary. Check whether we already have the string in the symbol
2484 // table to prevent creation of many unneccesary strings.
2485 if (length == 2) {
2486 unsigned c1 = first->Get(0);
2487 unsigned c2 = second->Get(0);
Steve Block44f0eee2011-05-26 01:26:41 +01002488 return MakeOrFindTwoCharacterString(this, c1, c2);
Steve Blockd0582a62009-12-15 09:54:21 +00002489 }
2490
Steve Block6ded16b2010-05-10 14:33:55 +01002491 bool first_is_ascii = first->IsAsciiRepresentation();
2492 bool second_is_ascii = second->IsAsciiRepresentation();
2493 bool is_ascii = first_is_ascii && second_is_ascii;
Steve Blocka7e24c12009-10-30 11:49:00 +00002494
2495 // Make sure that an out of memory exception is thrown if the length
Steve Block3ce2e202009-11-05 08:53:23 +00002496 // of the new cons string is too large.
2497 if (length > String::kMaxLength || length < 0) {
Steve Block44f0eee2011-05-26 01:26:41 +01002498 isolate()->context()->mark_out_of_memory();
Steve Blocka7e24c12009-10-30 11:49:00 +00002499 return Failure::OutOfMemoryException();
2500 }
2501
Kristian Monsen9dcf7e22010-06-28 14:14:28 +01002502 bool is_ascii_data_in_two_byte_string = false;
2503 if (!is_ascii) {
2504 // At least one of the strings uses two-byte representation so we
2505 // can't use the fast case code for short ascii strings below, but
2506 // we can try to save memory if all chars actually fit in ascii.
2507 is_ascii_data_in_two_byte_string =
2508 first->HasOnlyAsciiChars() && second->HasOnlyAsciiChars();
2509 if (is_ascii_data_in_two_byte_string) {
Steve Block44f0eee2011-05-26 01:26:41 +01002510 isolate_->counters()->string_add_runtime_ext_to_ascii()->Increment();
Kristian Monsen9dcf7e22010-06-28 14:14:28 +01002511 }
2512 }
2513
Steve Blocka7e24c12009-10-30 11:49:00 +00002514 // If the resulting string is small make a flat string.
2515 if (length < String::kMinNonFlatLength) {
2516 ASSERT(first->IsFlat());
2517 ASSERT(second->IsFlat());
2518 if (is_ascii) {
John Reck59135872010-11-02 12:39:01 -07002519 Object* result;
2520 { MaybeObject* maybe_result = AllocateRawAsciiString(length);
2521 if (!maybe_result->ToObject(&result)) return maybe_result;
2522 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002523 // Copy the characters into the new object.
2524 char* dest = SeqAsciiString::cast(result)->GetChars();
2525 // Copy first part.
Steve Blockd0582a62009-12-15 09:54:21 +00002526 const char* src;
2527 if (first->IsExternalString()) {
2528 src = ExternalAsciiString::cast(first)->resource()->data();
2529 } else {
2530 src = SeqAsciiString::cast(first)->GetChars();
2531 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002532 for (int i = 0; i < first_length; i++) *dest++ = src[i];
2533 // Copy second part.
Steve Blockd0582a62009-12-15 09:54:21 +00002534 if (second->IsExternalString()) {
2535 src = ExternalAsciiString::cast(second)->resource()->data();
2536 } else {
2537 src = SeqAsciiString::cast(second)->GetChars();
2538 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002539 for (int i = 0; i < second_length; i++) *dest++ = src[i];
2540 return result;
2541 } else {
Kristian Monsen9dcf7e22010-06-28 14:14:28 +01002542 if (is_ascii_data_in_two_byte_string) {
John Reck59135872010-11-02 12:39:01 -07002543 Object* result;
2544 { MaybeObject* maybe_result = AllocateRawAsciiString(length);
2545 if (!maybe_result->ToObject(&result)) return maybe_result;
2546 }
Steve Block6ded16b2010-05-10 14:33:55 +01002547 // Copy the characters into the new object.
2548 char* dest = SeqAsciiString::cast(result)->GetChars();
2549 String::WriteToFlat(first, dest, 0, first_length);
2550 String::WriteToFlat(second, dest + first_length, 0, second_length);
Steve Block44f0eee2011-05-26 01:26:41 +01002551 isolate_->counters()->string_add_runtime_ext_to_ascii()->Increment();
Steve Block6ded16b2010-05-10 14:33:55 +01002552 return result;
2553 }
2554
John Reck59135872010-11-02 12:39:01 -07002555 Object* result;
2556 { MaybeObject* maybe_result = AllocateRawTwoByteString(length);
2557 if (!maybe_result->ToObject(&result)) return maybe_result;
2558 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002559 // Copy the characters into the new object.
2560 uc16* dest = SeqTwoByteString::cast(result)->GetChars();
2561 String::WriteToFlat(first, dest, 0, first_length);
2562 String::WriteToFlat(second, dest + first_length, 0, second_length);
2563 return result;
2564 }
2565 }
2566
Kristian Monsen9dcf7e22010-06-28 14:14:28 +01002567 Map* map = (is_ascii || is_ascii_data_in_two_byte_string) ?
2568 cons_ascii_string_map() : cons_string_map();
Steve Blocka7e24c12009-10-30 11:49:00 +00002569
John Reck59135872010-11-02 12:39:01 -07002570 Object* result;
2571 { MaybeObject* maybe_result = Allocate(map, NEW_SPACE);
2572 if (!maybe_result->ToObject(&result)) return maybe_result;
2573 }
Leon Clarke4515c472010-02-03 11:58:03 +00002574
2575 AssertNoAllocation no_gc;
Steve Blocka7e24c12009-10-30 11:49:00 +00002576 ConsString* cons_string = ConsString::cast(result);
Leon Clarke4515c472010-02-03 11:58:03 +00002577 WriteBarrierMode mode = cons_string->GetWriteBarrierMode(no_gc);
Steve Blocka7e24c12009-10-30 11:49:00 +00002578 cons_string->set_length(length);
Steve Blockd0582a62009-12-15 09:54:21 +00002579 cons_string->set_hash_field(String::kEmptyHashField);
2580 cons_string->set_first(first, mode);
2581 cons_string->set_second(second, mode);
Steve Blocka7e24c12009-10-30 11:49:00 +00002582 return result;
2583}
2584
2585
John Reck59135872010-11-02 12:39:01 -07002586MaybeObject* Heap::AllocateSubString(String* buffer,
Steve Blocka7e24c12009-10-30 11:49:00 +00002587 int start,
Steve Block6ded16b2010-05-10 14:33:55 +01002588 int end,
2589 PretenureFlag pretenure) {
Steve Blocka7e24c12009-10-30 11:49:00 +00002590 int length = end - start;
2591
2592 if (length == 1) {
Steve Block44f0eee2011-05-26 01:26:41 +01002593 return LookupSingleCharacterStringFromCode(buffer->Get(start));
Steve Blockd0582a62009-12-15 09:54:21 +00002594 } else if (length == 2) {
2595 // Optimization for 2-byte strings often used as keys in a decompression
2596 // dictionary. Check whether we already have the string in the symbol
2597 // table to prevent creation of many unneccesary strings.
2598 unsigned c1 = buffer->Get(start);
2599 unsigned c2 = buffer->Get(start + 1);
Steve Block44f0eee2011-05-26 01:26:41 +01002600 return MakeOrFindTwoCharacterString(this, c1, c2);
Steve Blocka7e24c12009-10-30 11:49:00 +00002601 }
2602
2603 // Make an attempt to flatten the buffer to reduce access time.
Leon Clarkef7060e22010-06-03 12:02:55 +01002604 buffer = buffer->TryFlattenGetString();
Steve Blocka7e24c12009-10-30 11:49:00 +00002605
John Reck59135872010-11-02 12:39:01 -07002606 Object* result;
2607 { MaybeObject* maybe_result = buffer->IsAsciiRepresentation()
2608 ? AllocateRawAsciiString(length, pretenure )
2609 : AllocateRawTwoByteString(length, pretenure);
2610 if (!maybe_result->ToObject(&result)) return maybe_result;
2611 }
Steve Blockd0582a62009-12-15 09:54:21 +00002612 String* string_result = String::cast(result);
Steve Blocka7e24c12009-10-30 11:49:00 +00002613 // Copy the characters into the new object.
Steve Blockd0582a62009-12-15 09:54:21 +00002614 if (buffer->IsAsciiRepresentation()) {
2615 ASSERT(string_result->IsAsciiRepresentation());
2616 char* dest = SeqAsciiString::cast(string_result)->GetChars();
2617 String::WriteToFlat(buffer, dest, start, end);
2618 } else {
2619 ASSERT(string_result->IsTwoByteRepresentation());
2620 uc16* dest = SeqTwoByteString::cast(string_result)->GetChars();
2621 String::WriteToFlat(buffer, dest, start, end);
Steve Blocka7e24c12009-10-30 11:49:00 +00002622 }
Steve Blockd0582a62009-12-15 09:54:21 +00002623
Steve Blocka7e24c12009-10-30 11:49:00 +00002624 return result;
2625}
2626
2627
John Reck59135872010-11-02 12:39:01 -07002628MaybeObject* Heap::AllocateExternalStringFromAscii(
Steve Blocka7e24c12009-10-30 11:49:00 +00002629 ExternalAsciiString::Resource* resource) {
Steve Blockd0582a62009-12-15 09:54:21 +00002630 size_t length = resource->length();
2631 if (length > static_cast<size_t>(String::kMaxLength)) {
Steve Block44f0eee2011-05-26 01:26:41 +01002632 isolate()->context()->mark_out_of_memory();
Steve Blockd0582a62009-12-15 09:54:21 +00002633 return Failure::OutOfMemoryException();
Steve Blocka7e24c12009-10-30 11:49:00 +00002634 }
2635
Steve Blockd0582a62009-12-15 09:54:21 +00002636 Map* map = external_ascii_string_map();
John Reck59135872010-11-02 12:39:01 -07002637 Object* result;
2638 { MaybeObject* maybe_result = Allocate(map, NEW_SPACE);
2639 if (!maybe_result->ToObject(&result)) return maybe_result;
2640 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002641
2642 ExternalAsciiString* external_string = ExternalAsciiString::cast(result);
Steve Blockd0582a62009-12-15 09:54:21 +00002643 external_string->set_length(static_cast<int>(length));
2644 external_string->set_hash_field(String::kEmptyHashField);
Steve Blocka7e24c12009-10-30 11:49:00 +00002645 external_string->set_resource(resource);
2646
2647 return result;
2648}
2649
2650
John Reck59135872010-11-02 12:39:01 -07002651MaybeObject* Heap::AllocateExternalStringFromTwoByte(
Steve Blocka7e24c12009-10-30 11:49:00 +00002652 ExternalTwoByteString::Resource* resource) {
Steve Blockd0582a62009-12-15 09:54:21 +00002653 size_t length = resource->length();
2654 if (length > static_cast<size_t>(String::kMaxLength)) {
Steve Block44f0eee2011-05-26 01:26:41 +01002655 isolate()->context()->mark_out_of_memory();
Steve Blockd0582a62009-12-15 09:54:21 +00002656 return Failure::OutOfMemoryException();
2657 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002658
Kristian Monsen9dcf7e22010-06-28 14:14:28 +01002659 // For small strings we check whether the resource contains only
Steve Block9fac8402011-05-12 15:51:54 +01002660 // ASCII characters. If yes, we use a different string map.
2661 static const size_t kAsciiCheckLengthLimit = 32;
2662 bool is_ascii = length <= kAsciiCheckLengthLimit &&
2663 String::IsAscii(resource->data(), static_cast<int>(length));
Kristian Monsen9dcf7e22010-06-28 14:14:28 +01002664 Map* map = is_ascii ?
Steve Block44f0eee2011-05-26 01:26:41 +01002665 external_string_with_ascii_data_map() : external_string_map();
John Reck59135872010-11-02 12:39:01 -07002666 Object* result;
2667 { MaybeObject* maybe_result = Allocate(map, NEW_SPACE);
2668 if (!maybe_result->ToObject(&result)) return maybe_result;
2669 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002670
2671 ExternalTwoByteString* external_string = ExternalTwoByteString::cast(result);
Steve Blockd0582a62009-12-15 09:54:21 +00002672 external_string->set_length(static_cast<int>(length));
2673 external_string->set_hash_field(String::kEmptyHashField);
Steve Blocka7e24c12009-10-30 11:49:00 +00002674 external_string->set_resource(resource);
2675
2676 return result;
2677}
2678
2679
John Reck59135872010-11-02 12:39:01 -07002680MaybeObject* Heap::LookupSingleCharacterStringFromCode(uint16_t code) {
Steve Blocka7e24c12009-10-30 11:49:00 +00002681 if (code <= String::kMaxAsciiCharCode) {
Steve Block44f0eee2011-05-26 01:26:41 +01002682 Object* value = single_character_string_cache()->get(code);
2683 if (value != undefined_value()) return value;
Steve Blocka7e24c12009-10-30 11:49:00 +00002684
2685 char buffer[1];
2686 buffer[0] = static_cast<char>(code);
John Reck59135872010-11-02 12:39:01 -07002687 Object* result;
2688 MaybeObject* maybe_result = LookupSymbol(Vector<const char>(buffer, 1));
Steve Blocka7e24c12009-10-30 11:49:00 +00002689
John Reck59135872010-11-02 12:39:01 -07002690 if (!maybe_result->ToObject(&result)) return maybe_result;
Steve Block44f0eee2011-05-26 01:26:41 +01002691 single_character_string_cache()->set(code, result);
Steve Blocka7e24c12009-10-30 11:49:00 +00002692 return result;
2693 }
2694
John Reck59135872010-11-02 12:39:01 -07002695 Object* result;
Steve Block44f0eee2011-05-26 01:26:41 +01002696 { MaybeObject* maybe_result = AllocateRawTwoByteString(1);
John Reck59135872010-11-02 12:39:01 -07002697 if (!maybe_result->ToObject(&result)) return maybe_result;
2698 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002699 String* answer = String::cast(result);
2700 answer->Set(0, code);
2701 return answer;
2702}
2703
2704
John Reck59135872010-11-02 12:39:01 -07002705MaybeObject* Heap::AllocateByteArray(int length, PretenureFlag pretenure) {
Leon Clarkee46be812010-01-19 14:06:41 +00002706 if (length < 0 || length > ByteArray::kMaxLength) {
2707 return Failure::OutOfMemoryException();
2708 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002709 if (pretenure == NOT_TENURED) {
2710 return AllocateByteArray(length);
2711 }
2712 int size = ByteArray::SizeFor(length);
John Reck59135872010-11-02 12:39:01 -07002713 Object* result;
2714 { MaybeObject* maybe_result = (size <= MaxObjectSizeInPagedSpace())
2715 ? old_data_space_->AllocateRaw(size)
2716 : lo_space_->AllocateRaw(size);
2717 if (!maybe_result->ToObject(&result)) return maybe_result;
2718 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002719
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002720 reinterpret_cast<ByteArray*>(result)->set_map(byte_array_map());
2721 reinterpret_cast<ByteArray*>(result)->set_length(length);
Steve Blocka7e24c12009-10-30 11:49:00 +00002722 return result;
2723}
2724
2725
John Reck59135872010-11-02 12:39:01 -07002726MaybeObject* Heap::AllocateByteArray(int length) {
Leon Clarkee46be812010-01-19 14:06:41 +00002727 if (length < 0 || length > ByteArray::kMaxLength) {
2728 return Failure::OutOfMemoryException();
2729 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002730 int size = ByteArray::SizeFor(length);
2731 AllocationSpace space =
Leon Clarkee46be812010-01-19 14:06:41 +00002732 (size > MaxObjectSizeInPagedSpace()) ? LO_SPACE : NEW_SPACE;
John Reck59135872010-11-02 12:39:01 -07002733 Object* result;
2734 { MaybeObject* maybe_result = AllocateRaw(size, space, OLD_DATA_SPACE);
2735 if (!maybe_result->ToObject(&result)) return maybe_result;
2736 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002737
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002738 reinterpret_cast<ByteArray*>(result)->set_map(byte_array_map());
2739 reinterpret_cast<ByteArray*>(result)->set_length(length);
Steve Blocka7e24c12009-10-30 11:49:00 +00002740 return result;
2741}
2742
2743
2744void Heap::CreateFillerObjectAt(Address addr, int size) {
2745 if (size == 0) return;
2746 HeapObject* filler = HeapObject::FromAddress(addr);
2747 if (size == kPointerSize) {
Steve Block6ded16b2010-05-10 14:33:55 +01002748 filler->set_map(one_pointer_filler_map());
2749 } else if (size == 2 * kPointerSize) {
2750 filler->set_map(two_pointer_filler_map());
Steve Blocka7e24c12009-10-30 11:49:00 +00002751 } else {
Steve Block6ded16b2010-05-10 14:33:55 +01002752 filler->set_map(byte_array_map());
Steve Blocka7e24c12009-10-30 11:49:00 +00002753 ByteArray::cast(filler)->set_length(ByteArray::LengthFor(size));
2754 }
2755}
2756
2757
John Reck59135872010-11-02 12:39:01 -07002758MaybeObject* Heap::AllocateExternalArray(int length,
2759 ExternalArrayType array_type,
2760 void* external_pointer,
2761 PretenureFlag pretenure) {
Steve Block3ce2e202009-11-05 08:53:23 +00002762 AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
John Reck59135872010-11-02 12:39:01 -07002763 Object* result;
2764 { MaybeObject* maybe_result = AllocateRaw(ExternalArray::kAlignedSize,
2765 space,
2766 OLD_DATA_SPACE);
2767 if (!maybe_result->ToObject(&result)) return maybe_result;
2768 }
Steve Block3ce2e202009-11-05 08:53:23 +00002769
2770 reinterpret_cast<ExternalArray*>(result)->set_map(
2771 MapForExternalArrayType(array_type));
2772 reinterpret_cast<ExternalArray*>(result)->set_length(length);
2773 reinterpret_cast<ExternalArray*>(result)->set_external_pointer(
2774 external_pointer);
2775
2776 return result;
2777}
2778
2779
John Reck59135872010-11-02 12:39:01 -07002780MaybeObject* Heap::CreateCode(const CodeDesc& desc,
2781 Code::Flags flags,
Steve Block44f0eee2011-05-26 01:26:41 +01002782 Handle<Object> self_reference,
2783 bool immovable) {
Leon Clarkeac952652010-07-15 11:15:24 +01002784 // Allocate ByteArray before the Code object, so that we do not risk
2785 // leaving uninitialized Code object (and breaking the heap).
John Reck59135872010-11-02 12:39:01 -07002786 Object* reloc_info;
2787 { MaybeObject* maybe_reloc_info = AllocateByteArray(desc.reloc_size, TENURED);
2788 if (!maybe_reloc_info->ToObject(&reloc_info)) return maybe_reloc_info;
2789 }
Leon Clarkeac952652010-07-15 11:15:24 +01002790
Steve Block44f0eee2011-05-26 01:26:41 +01002791 // Compute size.
Leon Clarkeac952652010-07-15 11:15:24 +01002792 int body_size = RoundUp(desc.instr_size, kObjectAlignment);
Ben Murdoch3bec4d22010-07-22 14:51:16 +01002793 int obj_size = Code::SizeFor(body_size);
Kristian Monsen0d5e1162010-09-30 15:31:59 +01002794 ASSERT(IsAligned(static_cast<intptr_t>(obj_size), kCodeAlignment));
John Reck59135872010-11-02 12:39:01 -07002795 MaybeObject* maybe_result;
Steve Block44f0eee2011-05-26 01:26:41 +01002796 // Large code objects and code objects which should stay at a fixed address
2797 // are allocated in large object space.
2798 if (obj_size > MaxObjectSizeInPagedSpace() || immovable) {
John Reck59135872010-11-02 12:39:01 -07002799 maybe_result = lo_space_->AllocateRawCode(obj_size);
Steve Blocka7e24c12009-10-30 11:49:00 +00002800 } else {
John Reck59135872010-11-02 12:39:01 -07002801 maybe_result = code_space_->AllocateRaw(obj_size);
Steve Blocka7e24c12009-10-30 11:49:00 +00002802 }
2803
John Reck59135872010-11-02 12:39:01 -07002804 Object* result;
2805 if (!maybe_result->ToObject(&result)) return maybe_result;
Steve Blocka7e24c12009-10-30 11:49:00 +00002806
2807 // Initialize the object
2808 HeapObject::cast(result)->set_map(code_map());
2809 Code* code = Code::cast(result);
Steve Block44f0eee2011-05-26 01:26:41 +01002810 ASSERT(!isolate_->code_range()->exists() ||
2811 isolate_->code_range()->contains(code->address()));
Steve Blocka7e24c12009-10-30 11:49:00 +00002812 code->set_instruction_size(desc.instr_size);
Leon Clarkeac952652010-07-15 11:15:24 +01002813 code->set_relocation_info(ByteArray::cast(reloc_info));
Steve Blocka7e24c12009-10-30 11:49:00 +00002814 code->set_flags(flags);
Ben Murdochb8e0da22011-05-16 14:20:40 +01002815 if (code->is_call_stub() || code->is_keyed_call_stub()) {
2816 code->set_check_type(RECEIVER_MAP_CHECK);
2817 }
Ben Murdochb0fe1622011-05-05 13:52:32 +01002818 code->set_deoptimization_data(empty_fixed_array());
Ben Murdoch257744e2011-11-30 15:57:28 +00002819 code->set_next_code_flushing_candidate(undefined_value());
Steve Blocka7e24c12009-10-30 11:49:00 +00002820 // Allow self references to created code object by patching the handle to
2821 // point to the newly allocated Code object.
2822 if (!self_reference.is_null()) {
2823 *(self_reference.location()) = code;
2824 }
2825 // Migrate generated code.
2826 // The generated code can contain Object** values (typically from handles)
2827 // that are dereferenced during the copy to point directly to the actual heap
2828 // objects. These pointers can include references to the code object itself,
2829 // through the self_reference parameter.
2830 code->CopyFrom(desc);
Steve Blocka7e24c12009-10-30 11:49:00 +00002831
2832#ifdef DEBUG
2833 code->Verify();
2834#endif
2835 return code;
2836}
2837
2838
John Reck59135872010-11-02 12:39:01 -07002839MaybeObject* Heap::CopyCode(Code* code) {
Steve Blocka7e24c12009-10-30 11:49:00 +00002840 // Allocate an object the same size as the code object.
2841 int obj_size = code->Size();
John Reck59135872010-11-02 12:39:01 -07002842 MaybeObject* maybe_result;
Steve Blocka7e24c12009-10-30 11:49:00 +00002843 if (obj_size > MaxObjectSizeInPagedSpace()) {
John Reck59135872010-11-02 12:39:01 -07002844 maybe_result = lo_space_->AllocateRawCode(obj_size);
Steve Blocka7e24c12009-10-30 11:49:00 +00002845 } else {
John Reck59135872010-11-02 12:39:01 -07002846 maybe_result = code_space_->AllocateRaw(obj_size);
Steve Blocka7e24c12009-10-30 11:49:00 +00002847 }
2848
John Reck59135872010-11-02 12:39:01 -07002849 Object* result;
2850 if (!maybe_result->ToObject(&result)) return maybe_result;
Steve Blocka7e24c12009-10-30 11:49:00 +00002851
2852 // Copy code object.
2853 Address old_addr = code->address();
2854 Address new_addr = reinterpret_cast<HeapObject*>(result)->address();
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002855 CopyBlock(new_addr, old_addr, obj_size);
Steve Blocka7e24c12009-10-30 11:49:00 +00002856 // Relocate the copy.
2857 Code* new_code = Code::cast(result);
Steve Block44f0eee2011-05-26 01:26:41 +01002858 ASSERT(!isolate_->code_range()->exists() ||
2859 isolate_->code_range()->contains(code->address()));
Steve Blocka7e24c12009-10-30 11:49:00 +00002860 new_code->Relocate(new_addr - old_addr);
2861 return new_code;
2862}
2863
2864
John Reck59135872010-11-02 12:39:01 -07002865MaybeObject* Heap::CopyCode(Code* code, Vector<byte> reloc_info) {
Leon Clarkeac952652010-07-15 11:15:24 +01002866 // Allocate ByteArray before the Code object, so that we do not risk
2867 // leaving uninitialized Code object (and breaking the heap).
John Reck59135872010-11-02 12:39:01 -07002868 Object* reloc_info_array;
2869 { MaybeObject* maybe_reloc_info_array =
2870 AllocateByteArray(reloc_info.length(), TENURED);
2871 if (!maybe_reloc_info_array->ToObject(&reloc_info_array)) {
2872 return maybe_reloc_info_array;
2873 }
2874 }
Leon Clarkeac952652010-07-15 11:15:24 +01002875
2876 int new_body_size = RoundUp(code->instruction_size(), kObjectAlignment);
Steve Block6ded16b2010-05-10 14:33:55 +01002877
Ben Murdoch3bec4d22010-07-22 14:51:16 +01002878 int new_obj_size = Code::SizeFor(new_body_size);
Steve Block6ded16b2010-05-10 14:33:55 +01002879
2880 Address old_addr = code->address();
2881
2882 size_t relocation_offset =
Leon Clarkeac952652010-07-15 11:15:24 +01002883 static_cast<size_t>(code->instruction_end() - old_addr);
Steve Block6ded16b2010-05-10 14:33:55 +01002884
John Reck59135872010-11-02 12:39:01 -07002885 MaybeObject* maybe_result;
Steve Block6ded16b2010-05-10 14:33:55 +01002886 if (new_obj_size > MaxObjectSizeInPagedSpace()) {
John Reck59135872010-11-02 12:39:01 -07002887 maybe_result = lo_space_->AllocateRawCode(new_obj_size);
Steve Block6ded16b2010-05-10 14:33:55 +01002888 } else {
John Reck59135872010-11-02 12:39:01 -07002889 maybe_result = code_space_->AllocateRaw(new_obj_size);
Steve Block6ded16b2010-05-10 14:33:55 +01002890 }
2891
John Reck59135872010-11-02 12:39:01 -07002892 Object* result;
2893 if (!maybe_result->ToObject(&result)) return maybe_result;
Steve Block6ded16b2010-05-10 14:33:55 +01002894
2895 // Copy code object.
2896 Address new_addr = reinterpret_cast<HeapObject*>(result)->address();
2897
2898 // Copy header and instructions.
2899 memcpy(new_addr, old_addr, relocation_offset);
2900
Steve Block6ded16b2010-05-10 14:33:55 +01002901 Code* new_code = Code::cast(result);
Leon Clarkeac952652010-07-15 11:15:24 +01002902 new_code->set_relocation_info(ByteArray::cast(reloc_info_array));
Steve Block6ded16b2010-05-10 14:33:55 +01002903
Leon Clarkeac952652010-07-15 11:15:24 +01002904 // Copy patched rinfo.
2905 memcpy(new_code->relocation_start(), reloc_info.start(), reloc_info.length());
Steve Block6ded16b2010-05-10 14:33:55 +01002906
2907 // Relocate the copy.
Steve Block44f0eee2011-05-26 01:26:41 +01002908 ASSERT(!isolate_->code_range()->exists() ||
2909 isolate_->code_range()->contains(code->address()));
Steve Block6ded16b2010-05-10 14:33:55 +01002910 new_code->Relocate(new_addr - old_addr);
2911
2912#ifdef DEBUG
2913 code->Verify();
2914#endif
2915 return new_code;
2916}
2917
2918
John Reck59135872010-11-02 12:39:01 -07002919MaybeObject* Heap::Allocate(Map* map, AllocationSpace space) {
Steve Blocka7e24c12009-10-30 11:49:00 +00002920 ASSERT(gc_state_ == NOT_IN_GC);
2921 ASSERT(map->instance_type() != MAP_TYPE);
Leon Clarkee46be812010-01-19 14:06:41 +00002922 // If allocation failures are disallowed, we may allocate in a different
2923 // space when new space is full and the object is not a large object.
2924 AllocationSpace retry_space =
2925 (space != NEW_SPACE) ? space : TargetSpaceId(map->instance_type());
John Reck59135872010-11-02 12:39:01 -07002926 Object* result;
2927 { MaybeObject* maybe_result =
2928 AllocateRaw(map->instance_size(), space, retry_space);
2929 if (!maybe_result->ToObject(&result)) return maybe_result;
2930 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002931 HeapObject::cast(result)->set_map(map);
Steve Block3ce2e202009-11-05 08:53:23 +00002932#ifdef ENABLE_LOGGING_AND_PROFILING
Steve Block44f0eee2011-05-26 01:26:41 +01002933 isolate_->producer_heap_profile()->RecordJSObjectAllocation(result);
Steve Block3ce2e202009-11-05 08:53:23 +00002934#endif
Steve Blocka7e24c12009-10-30 11:49:00 +00002935 return result;
2936}
2937
2938
John Reck59135872010-11-02 12:39:01 -07002939MaybeObject* Heap::InitializeFunction(JSFunction* function,
2940 SharedFunctionInfo* shared,
2941 Object* prototype) {
Steve Blocka7e24c12009-10-30 11:49:00 +00002942 ASSERT(!prototype->IsMap());
2943 function->initialize_properties();
2944 function->initialize_elements();
2945 function->set_shared(shared);
Iain Merrick75681382010-08-19 15:07:18 +01002946 function->set_code(shared->code());
Steve Blocka7e24c12009-10-30 11:49:00 +00002947 function->set_prototype_or_initial_map(prototype);
2948 function->set_context(undefined_value());
Leon Clarke4515c472010-02-03 11:58:03 +00002949 function->set_literals(empty_fixed_array());
Ben Murdochb0fe1622011-05-05 13:52:32 +01002950 function->set_next_function_link(undefined_value());
Steve Blocka7e24c12009-10-30 11:49:00 +00002951 return function;
2952}
2953
2954
John Reck59135872010-11-02 12:39:01 -07002955MaybeObject* Heap::AllocateFunctionPrototype(JSFunction* function) {
Steve Blocka7e24c12009-10-30 11:49:00 +00002956 // Allocate the prototype. Make sure to use the object function
2957 // from the function's context, since the function can be from a
2958 // different context.
2959 JSFunction* object_function =
2960 function->context()->global_context()->object_function();
John Reck59135872010-11-02 12:39:01 -07002961 Object* prototype;
2962 { MaybeObject* maybe_prototype = AllocateJSObject(object_function);
2963 if (!maybe_prototype->ToObject(&prototype)) return maybe_prototype;
2964 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002965 // When creating the prototype for the function we must set its
2966 // constructor to the function.
John Reck59135872010-11-02 12:39:01 -07002967 Object* result;
2968 { MaybeObject* maybe_result =
Ben Murdoche0cee9b2011-05-25 10:26:03 +01002969 JSObject::cast(prototype)->SetLocalPropertyIgnoreAttributes(
2970 constructor_symbol(), function, DONT_ENUM);
John Reck59135872010-11-02 12:39:01 -07002971 if (!maybe_result->ToObject(&result)) return maybe_result;
2972 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002973 return prototype;
2974}
2975
2976
John Reck59135872010-11-02 12:39:01 -07002977MaybeObject* Heap::AllocateFunction(Map* function_map,
2978 SharedFunctionInfo* shared,
2979 Object* prototype,
2980 PretenureFlag pretenure) {
Leon Clarkee46be812010-01-19 14:06:41 +00002981 AllocationSpace space =
2982 (pretenure == TENURED) ? OLD_POINTER_SPACE : NEW_SPACE;
John Reck59135872010-11-02 12:39:01 -07002983 Object* result;
2984 { MaybeObject* maybe_result = Allocate(function_map, space);
2985 if (!maybe_result->ToObject(&result)) return maybe_result;
2986 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002987 return InitializeFunction(JSFunction::cast(result), shared, prototype);
2988}
2989
2990
John Reck59135872010-11-02 12:39:01 -07002991MaybeObject* Heap::AllocateArgumentsObject(Object* callee, int length) {
Steve Blocka7e24c12009-10-30 11:49:00 +00002992 // To get fast allocation and map sharing for arguments objects we
2993 // allocate them based on an arguments boilerplate.
2994
Steve Block44f0eee2011-05-26 01:26:41 +01002995 JSObject* boilerplate;
2996 int arguments_object_size;
2997 bool strict_mode_callee = callee->IsJSFunction() &&
2998 JSFunction::cast(callee)->shared()->strict_mode();
2999 if (strict_mode_callee) {
3000 boilerplate =
3001 isolate()->context()->global_context()->
3002 strict_mode_arguments_boilerplate();
3003 arguments_object_size = kArgumentsObjectSizeStrict;
3004 } else {
3005 boilerplate =
3006 isolate()->context()->global_context()->arguments_boilerplate();
3007 arguments_object_size = kArgumentsObjectSize;
3008 }
3009
Steve Blocka7e24c12009-10-30 11:49:00 +00003010 // This calls Copy directly rather than using Heap::AllocateRaw so we
3011 // duplicate the check here.
3012 ASSERT(allocation_allowed_ && gc_state_ == NOT_IN_GC);
3013
Leon Clarkee46be812010-01-19 14:06:41 +00003014 // Check that the size of the boilerplate matches our
3015 // expectations. The ArgumentsAccessStub::GenerateNewObject relies
3016 // on the size being a known constant.
Steve Block44f0eee2011-05-26 01:26:41 +01003017 ASSERT(arguments_object_size == boilerplate->map()->instance_size());
Leon Clarkee46be812010-01-19 14:06:41 +00003018
3019 // Do the allocation.
John Reck59135872010-11-02 12:39:01 -07003020 Object* result;
3021 { MaybeObject* maybe_result =
Steve Block44f0eee2011-05-26 01:26:41 +01003022 AllocateRaw(arguments_object_size, NEW_SPACE, OLD_POINTER_SPACE);
John Reck59135872010-11-02 12:39:01 -07003023 if (!maybe_result->ToObject(&result)) return maybe_result;
3024 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003025
3026 // Copy the content. The arguments boilerplate doesn't have any
3027 // fields that point to new space so it's safe to skip the write
3028 // barrier here.
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01003029 CopyBlock(HeapObject::cast(result)->address(),
3030 boilerplate->address(),
Steve Block44f0eee2011-05-26 01:26:41 +01003031 JSObject::kHeaderSize);
Steve Blocka7e24c12009-10-30 11:49:00 +00003032
Steve Block44f0eee2011-05-26 01:26:41 +01003033 // Set the length property.
3034 JSObject::cast(result)->InObjectPropertyAtPut(kArgumentsLengthIndex,
Steve Blocka7e24c12009-10-30 11:49:00 +00003035 Smi::FromInt(length),
3036 SKIP_WRITE_BARRIER);
Steve Block44f0eee2011-05-26 01:26:41 +01003037 // Set the callee property for non-strict mode arguments object only.
3038 if (!strict_mode_callee) {
3039 JSObject::cast(result)->InObjectPropertyAtPut(kArgumentsCalleeIndex,
3040 callee);
3041 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003042
3043 // Check the state of the object
3044 ASSERT(JSObject::cast(result)->HasFastProperties());
3045 ASSERT(JSObject::cast(result)->HasFastElements());
3046
3047 return result;
3048}
3049
3050
Kristian Monsen0d5e1162010-09-30 15:31:59 +01003051static bool HasDuplicates(DescriptorArray* descriptors) {
3052 int count = descriptors->number_of_descriptors();
3053 if (count > 1) {
3054 String* prev_key = descriptors->GetKey(0);
3055 for (int i = 1; i != count; i++) {
3056 String* current_key = descriptors->GetKey(i);
3057 if (prev_key == current_key) return true;
3058 prev_key = current_key;
3059 }
3060 }
3061 return false;
3062}
3063
3064
John Reck59135872010-11-02 12:39:01 -07003065MaybeObject* Heap::AllocateInitialMap(JSFunction* fun) {
Steve Blocka7e24c12009-10-30 11:49:00 +00003066 ASSERT(!fun->has_initial_map());
3067
3068 // First create a new map with the size and number of in-object properties
3069 // suggested by the function.
3070 int instance_size = fun->shared()->CalculateInstanceSize();
3071 int in_object_properties = fun->shared()->CalculateInObjectProperties();
John Reck59135872010-11-02 12:39:01 -07003072 Object* map_obj;
Steve Block44f0eee2011-05-26 01:26:41 +01003073 { MaybeObject* maybe_map_obj = AllocateMap(JS_OBJECT_TYPE, instance_size);
John Reck59135872010-11-02 12:39:01 -07003074 if (!maybe_map_obj->ToObject(&map_obj)) return maybe_map_obj;
3075 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003076
3077 // Fetch or allocate prototype.
3078 Object* prototype;
3079 if (fun->has_instance_prototype()) {
3080 prototype = fun->instance_prototype();
3081 } else {
John Reck59135872010-11-02 12:39:01 -07003082 { MaybeObject* maybe_prototype = AllocateFunctionPrototype(fun);
3083 if (!maybe_prototype->ToObject(&prototype)) return maybe_prototype;
3084 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003085 }
3086 Map* map = Map::cast(map_obj);
3087 map->set_inobject_properties(in_object_properties);
3088 map->set_unused_property_fields(in_object_properties);
3089 map->set_prototype(prototype);
Steve Block8defd9f2010-07-08 12:39:36 +01003090 ASSERT(map->has_fast_elements());
Steve Blocka7e24c12009-10-30 11:49:00 +00003091
Andrei Popescu402d9372010-02-26 13:31:12 +00003092 // If the function has only simple this property assignments add
3093 // field descriptors for these to the initial map as the object
3094 // cannot be constructed without having these properties. Guard by
3095 // the inline_new flag so we only change the map if we generate a
3096 // specialized construct stub.
Steve Blocka7e24c12009-10-30 11:49:00 +00003097 ASSERT(in_object_properties <= Map::kMaxPreAllocatedPropertyFields);
Andrei Popescu402d9372010-02-26 13:31:12 +00003098 if (fun->shared()->CanGenerateInlineConstructor(prototype)) {
Steve Blocka7e24c12009-10-30 11:49:00 +00003099 int count = fun->shared()->this_property_assignments_count();
3100 if (count > in_object_properties) {
Kristian Monsen0d5e1162010-09-30 15:31:59 +01003101 // Inline constructor can only handle inobject properties.
3102 fun->shared()->ForbidInlineConstructor();
3103 } else {
John Reck59135872010-11-02 12:39:01 -07003104 Object* descriptors_obj;
3105 { MaybeObject* maybe_descriptors_obj = DescriptorArray::Allocate(count);
3106 if (!maybe_descriptors_obj->ToObject(&descriptors_obj)) {
3107 return maybe_descriptors_obj;
3108 }
3109 }
Kristian Monsen0d5e1162010-09-30 15:31:59 +01003110 DescriptorArray* descriptors = DescriptorArray::cast(descriptors_obj);
3111 for (int i = 0; i < count; i++) {
3112 String* name = fun->shared()->GetThisPropertyAssignmentName(i);
3113 ASSERT(name->IsSymbol());
3114 FieldDescriptor field(name, i, NONE);
3115 field.SetEnumerationIndex(i);
3116 descriptors->Set(i, &field);
3117 }
3118 descriptors->SetNextEnumerationIndex(count);
3119 descriptors->SortUnchecked();
3120
3121 // The descriptors may contain duplicates because the compiler does not
3122 // guarantee the uniqueness of property names (it would have required
3123 // quadratic time). Once the descriptors are sorted we can check for
3124 // duplicates in linear time.
3125 if (HasDuplicates(descriptors)) {
3126 fun->shared()->ForbidInlineConstructor();
3127 } else {
3128 map->set_instance_descriptors(descriptors);
3129 map->set_pre_allocated_property_fields(count);
3130 map->set_unused_property_fields(in_object_properties - count);
3131 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003132 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003133 }
Kristian Monsen0d5e1162010-09-30 15:31:59 +01003134
3135 fun->shared()->StartInobjectSlackTracking(map);
3136
Steve Blocka7e24c12009-10-30 11:49:00 +00003137 return map;
3138}
3139
3140
3141void Heap::InitializeJSObjectFromMap(JSObject* obj,
3142 FixedArray* properties,
3143 Map* map) {
3144 obj->set_properties(properties);
3145 obj->initialize_elements();
3146 // TODO(1240798): Initialize the object's body using valid initial values
3147 // according to the object's initial map. For example, if the map's
3148 // instance type is JS_ARRAY_TYPE, the length field should be initialized
3149 // to a number (eg, Smi::FromInt(0)) and the elements initialized to a
3150 // fixed array (eg, Heap::empty_fixed_array()). Currently, the object
3151 // verification code has to cope with (temporarily) invalid objects. See
3152 // for example, JSArray::JSArrayVerify).
Kristian Monsen0d5e1162010-09-30 15:31:59 +01003153 Object* filler;
3154 // We cannot always fill with one_pointer_filler_map because objects
3155 // created from API functions expect their internal fields to be initialized
3156 // with undefined_value.
3157 if (map->constructor()->IsJSFunction() &&
3158 JSFunction::cast(map->constructor())->shared()->
3159 IsInobjectSlackTrackingInProgress()) {
3160 // We might want to shrink the object later.
3161 ASSERT(obj->GetInternalFieldCount() == 0);
3162 filler = Heap::one_pointer_filler_map();
3163 } else {
3164 filler = Heap::undefined_value();
3165 }
3166 obj->InitializeBody(map->instance_size(), filler);
Steve Blocka7e24c12009-10-30 11:49:00 +00003167}
3168
3169
John Reck59135872010-11-02 12:39:01 -07003170MaybeObject* Heap::AllocateJSObjectFromMap(Map* map, PretenureFlag pretenure) {
Steve Blocka7e24c12009-10-30 11:49:00 +00003171 // JSFunctions should be allocated using AllocateFunction to be
3172 // properly initialized.
3173 ASSERT(map->instance_type() != JS_FUNCTION_TYPE);
3174
Steve Block8defd9f2010-07-08 12:39:36 +01003175 // Both types of global objects should be allocated using
3176 // AllocateGlobalObject to be properly initialized.
Steve Blocka7e24c12009-10-30 11:49:00 +00003177 ASSERT(map->instance_type() != JS_GLOBAL_OBJECT_TYPE);
3178 ASSERT(map->instance_type() != JS_BUILTINS_OBJECT_TYPE);
3179
3180 // Allocate the backing storage for the properties.
3181 int prop_size =
3182 map->pre_allocated_property_fields() +
3183 map->unused_property_fields() -
3184 map->inobject_properties();
3185 ASSERT(prop_size >= 0);
John Reck59135872010-11-02 12:39:01 -07003186 Object* properties;
3187 { MaybeObject* maybe_properties = AllocateFixedArray(prop_size, pretenure);
3188 if (!maybe_properties->ToObject(&properties)) return maybe_properties;
3189 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003190
3191 // Allocate the JSObject.
3192 AllocationSpace space =
3193 (pretenure == TENURED) ? OLD_POINTER_SPACE : NEW_SPACE;
3194 if (map->instance_size() > MaxObjectSizeInPagedSpace()) space = LO_SPACE;
John Reck59135872010-11-02 12:39:01 -07003195 Object* obj;
3196 { MaybeObject* maybe_obj = Allocate(map, space);
3197 if (!maybe_obj->ToObject(&obj)) return maybe_obj;
3198 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003199
3200 // Initialize the JSObject.
3201 InitializeJSObjectFromMap(JSObject::cast(obj),
3202 FixedArray::cast(properties),
3203 map);
Steve Block8defd9f2010-07-08 12:39:36 +01003204 ASSERT(JSObject::cast(obj)->HasFastElements());
Steve Blocka7e24c12009-10-30 11:49:00 +00003205 return obj;
3206}
3207
3208
John Reck59135872010-11-02 12:39:01 -07003209MaybeObject* Heap::AllocateJSObject(JSFunction* constructor,
3210 PretenureFlag pretenure) {
Steve Blocka7e24c12009-10-30 11:49:00 +00003211 // Allocate the initial map if absent.
3212 if (!constructor->has_initial_map()) {
John Reck59135872010-11-02 12:39:01 -07003213 Object* initial_map;
3214 { MaybeObject* maybe_initial_map = AllocateInitialMap(constructor);
3215 if (!maybe_initial_map->ToObject(&initial_map)) return maybe_initial_map;
3216 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003217 constructor->set_initial_map(Map::cast(initial_map));
3218 Map::cast(initial_map)->set_constructor(constructor);
3219 }
3220 // Allocate the object based on the constructors initial map.
John Reck59135872010-11-02 12:39:01 -07003221 MaybeObject* result =
Steve Blocka7e24c12009-10-30 11:49:00 +00003222 AllocateJSObjectFromMap(constructor->initial_map(), pretenure);
John Reck59135872010-11-02 12:39:01 -07003223#ifdef DEBUG
Steve Blocka7e24c12009-10-30 11:49:00 +00003224 // Make sure result is NOT a global object if valid.
John Reck59135872010-11-02 12:39:01 -07003225 Object* non_failure;
3226 ASSERT(!result->ToObject(&non_failure) || !non_failure->IsGlobalObject());
3227#endif
Steve Blocka7e24c12009-10-30 11:49:00 +00003228 return result;
3229}
3230
3231
Ben Murdoch257744e2011-11-30 15:57:28 +00003232MaybeObject* Heap::AllocateJSProxy(Object* handler, Object* prototype) {
3233 // Allocate map.
3234 // TODO(rossberg): Once we optimize proxies, think about a scheme to share
3235 // maps. Will probably depend on the identity of the handler object, too.
3236 Map* map;
3237 MaybeObject* maybe_map_obj = AllocateMap(JS_PROXY_TYPE, JSProxy::kSize);
3238 if (!maybe_map_obj->To<Map>(&map)) return maybe_map_obj;
3239 map->set_prototype(prototype);
3240 map->set_pre_allocated_property_fields(1);
3241 map->set_inobject_properties(1);
3242
3243 // Allocate the proxy object.
3244 Object* result;
3245 MaybeObject* maybe_result = Allocate(map, NEW_SPACE);
3246 if (!maybe_result->ToObject(&result)) return maybe_result;
3247 JSProxy::cast(result)->set_handler(handler);
3248 return result;
3249}
3250
3251
John Reck59135872010-11-02 12:39:01 -07003252MaybeObject* Heap::AllocateGlobalObject(JSFunction* constructor) {
Steve Blocka7e24c12009-10-30 11:49:00 +00003253 ASSERT(constructor->has_initial_map());
3254 Map* map = constructor->initial_map();
3255
3256 // Make sure no field properties are described in the initial map.
3257 // This guarantees us that normalizing the properties does not
3258 // require us to change property values to JSGlobalPropertyCells.
3259 ASSERT(map->NextFreePropertyIndex() == 0);
3260
3261 // Make sure we don't have a ton of pre-allocated slots in the
3262 // global objects. They will be unused once we normalize the object.
3263 ASSERT(map->unused_property_fields() == 0);
3264 ASSERT(map->inobject_properties() == 0);
3265
3266 // Initial size of the backing store to avoid resize of the storage during
3267 // bootstrapping. The size differs between the JS global object ad the
3268 // builtins object.
3269 int initial_size = map->instance_type() == JS_GLOBAL_OBJECT_TYPE ? 64 : 512;
3270
3271 // Allocate a dictionary object for backing storage.
John Reck59135872010-11-02 12:39:01 -07003272 Object* obj;
3273 { MaybeObject* maybe_obj =
3274 StringDictionary::Allocate(
3275 map->NumberOfDescribedProperties() * 2 + initial_size);
3276 if (!maybe_obj->ToObject(&obj)) return maybe_obj;
3277 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003278 StringDictionary* dictionary = StringDictionary::cast(obj);
3279
3280 // The global object might be created from an object template with accessors.
3281 // Fill these accessors into the dictionary.
3282 DescriptorArray* descs = map->instance_descriptors();
3283 for (int i = 0; i < descs->number_of_descriptors(); i++) {
Ben Murdoch8b112d22011-06-08 16:22:53 +01003284 PropertyDetails details(descs->GetDetails(i));
Steve Blocka7e24c12009-10-30 11:49:00 +00003285 ASSERT(details.type() == CALLBACKS); // Only accessors are expected.
3286 PropertyDetails d =
3287 PropertyDetails(details.attributes(), CALLBACKS, details.index());
3288 Object* value = descs->GetCallbacksObject(i);
Steve Block44f0eee2011-05-26 01:26:41 +01003289 { MaybeObject* maybe_value = AllocateJSGlobalPropertyCell(value);
John Reck59135872010-11-02 12:39:01 -07003290 if (!maybe_value->ToObject(&value)) return maybe_value;
3291 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003292
John Reck59135872010-11-02 12:39:01 -07003293 Object* result;
3294 { MaybeObject* maybe_result = dictionary->Add(descs->GetKey(i), value, d);
3295 if (!maybe_result->ToObject(&result)) return maybe_result;
3296 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003297 dictionary = StringDictionary::cast(result);
3298 }
3299
3300 // Allocate the global object and initialize it with the backing store.
John Reck59135872010-11-02 12:39:01 -07003301 { MaybeObject* maybe_obj = Allocate(map, OLD_POINTER_SPACE);
3302 if (!maybe_obj->ToObject(&obj)) return maybe_obj;
3303 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003304 JSObject* global = JSObject::cast(obj);
3305 InitializeJSObjectFromMap(global, dictionary, map);
3306
3307 // Create a new map for the global object.
John Reck59135872010-11-02 12:39:01 -07003308 { MaybeObject* maybe_obj = map->CopyDropDescriptors();
3309 if (!maybe_obj->ToObject(&obj)) return maybe_obj;
3310 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003311 Map* new_map = Map::cast(obj);
3312
3313 // Setup the global object as a normalized object.
3314 global->set_map(new_map);
Ben Murdoch257744e2011-11-30 15:57:28 +00003315 global->map()->clear_instance_descriptors();
Steve Blocka7e24c12009-10-30 11:49:00 +00003316 global->set_properties(dictionary);
3317
3318 // Make sure result is a global object with properties in dictionary.
3319 ASSERT(global->IsGlobalObject());
3320 ASSERT(!global->HasFastProperties());
3321 return global;
3322}
3323
3324
John Reck59135872010-11-02 12:39:01 -07003325MaybeObject* Heap::CopyJSObject(JSObject* source) {
Steve Blocka7e24c12009-10-30 11:49:00 +00003326 // Never used to copy functions. If functions need to be copied we
3327 // have to be careful to clear the literals array.
3328 ASSERT(!source->IsJSFunction());
3329
3330 // Make the clone.
3331 Map* map = source->map();
3332 int object_size = map->instance_size();
3333 Object* clone;
3334
3335 // If we're forced to always allocate, we use the general allocation
3336 // functions which may leave us with an object in old space.
3337 if (always_allocate()) {
John Reck59135872010-11-02 12:39:01 -07003338 { MaybeObject* maybe_clone =
3339 AllocateRaw(object_size, NEW_SPACE, OLD_POINTER_SPACE);
3340 if (!maybe_clone->ToObject(&clone)) return maybe_clone;
3341 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003342 Address clone_address = HeapObject::cast(clone)->address();
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01003343 CopyBlock(clone_address,
3344 source->address(),
Steve Blocka7e24c12009-10-30 11:49:00 +00003345 object_size);
3346 // Update write barrier for all fields that lie beyond the header.
Steve Block6ded16b2010-05-10 14:33:55 +01003347 RecordWrites(clone_address,
3348 JSObject::kHeaderSize,
3349 (object_size - JSObject::kHeaderSize) / kPointerSize);
Steve Blocka7e24c12009-10-30 11:49:00 +00003350 } else {
John Reck59135872010-11-02 12:39:01 -07003351 { MaybeObject* maybe_clone = new_space_.AllocateRaw(object_size);
3352 if (!maybe_clone->ToObject(&clone)) return maybe_clone;
3353 }
Steve Block44f0eee2011-05-26 01:26:41 +01003354 ASSERT(InNewSpace(clone));
Steve Blocka7e24c12009-10-30 11:49:00 +00003355 // Since we know the clone is allocated in new space, we can copy
3356 // the contents without worrying about updating the write barrier.
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01003357 CopyBlock(HeapObject::cast(clone)->address(),
3358 source->address(),
Steve Blocka7e24c12009-10-30 11:49:00 +00003359 object_size);
3360 }
3361
3362 FixedArray* elements = FixedArray::cast(source->elements());
3363 FixedArray* properties = FixedArray::cast(source->properties());
3364 // Update elements if necessary.
Steve Block6ded16b2010-05-10 14:33:55 +01003365 if (elements->length() > 0) {
John Reck59135872010-11-02 12:39:01 -07003366 Object* elem;
3367 { MaybeObject* maybe_elem =
3368 (elements->map() == fixed_cow_array_map()) ?
3369 elements : CopyFixedArray(elements);
3370 if (!maybe_elem->ToObject(&elem)) return maybe_elem;
3371 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003372 JSObject::cast(clone)->set_elements(FixedArray::cast(elem));
3373 }
3374 // Update properties if necessary.
3375 if (properties->length() > 0) {
John Reck59135872010-11-02 12:39:01 -07003376 Object* prop;
3377 { MaybeObject* maybe_prop = CopyFixedArray(properties);
3378 if (!maybe_prop->ToObject(&prop)) return maybe_prop;
3379 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003380 JSObject::cast(clone)->set_properties(FixedArray::cast(prop));
3381 }
3382 // Return the new clone.
Steve Block3ce2e202009-11-05 08:53:23 +00003383#ifdef ENABLE_LOGGING_AND_PROFILING
Steve Block44f0eee2011-05-26 01:26:41 +01003384 isolate_->producer_heap_profile()->RecordJSObjectAllocation(clone);
Steve Block3ce2e202009-11-05 08:53:23 +00003385#endif
Steve Blocka7e24c12009-10-30 11:49:00 +00003386 return clone;
3387}
3388
3389
John Reck59135872010-11-02 12:39:01 -07003390MaybeObject* Heap::ReinitializeJSGlobalProxy(JSFunction* constructor,
3391 JSGlobalProxy* object) {
Kristian Monsen0d5e1162010-09-30 15:31:59 +01003392 ASSERT(constructor->has_initial_map());
Steve Blocka7e24c12009-10-30 11:49:00 +00003393 Map* map = constructor->initial_map();
3394
Kristian Monsen0d5e1162010-09-30 15:31:59 +01003395 // Check that the already allocated object has the same size and type as
Steve Blocka7e24c12009-10-30 11:49:00 +00003396 // objects allocated using the constructor.
3397 ASSERT(map->instance_size() == object->map()->instance_size());
Kristian Monsen0d5e1162010-09-30 15:31:59 +01003398 ASSERT(map->instance_type() == object->map()->instance_type());
Steve Blocka7e24c12009-10-30 11:49:00 +00003399
3400 // Allocate the backing storage for the properties.
3401 int prop_size = map->unused_property_fields() - map->inobject_properties();
John Reck59135872010-11-02 12:39:01 -07003402 Object* properties;
3403 { MaybeObject* maybe_properties = AllocateFixedArray(prop_size, TENURED);
3404 if (!maybe_properties->ToObject(&properties)) return maybe_properties;
3405 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003406
3407 // Reset the map for the object.
3408 object->set_map(constructor->initial_map());
3409
3410 // Reinitialize the object from the constructor map.
3411 InitializeJSObjectFromMap(object, FixedArray::cast(properties), map);
3412 return object;
3413}
3414
3415
John Reck59135872010-11-02 12:39:01 -07003416MaybeObject* Heap::AllocateStringFromAscii(Vector<const char> string,
3417 PretenureFlag pretenure) {
3418 Object* result;
3419 { MaybeObject* maybe_result =
3420 AllocateRawAsciiString(string.length(), pretenure);
3421 if (!maybe_result->ToObject(&result)) return maybe_result;
3422 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003423
3424 // Copy the characters into the new object.
3425 SeqAsciiString* string_result = SeqAsciiString::cast(result);
3426 for (int i = 0; i < string.length(); i++) {
3427 string_result->SeqAsciiStringSet(i, string[i]);
3428 }
3429 return result;
3430}
3431
3432
Steve Block9fac8402011-05-12 15:51:54 +01003433MaybeObject* Heap::AllocateStringFromUtf8Slow(Vector<const char> string,
3434 PretenureFlag pretenure) {
Leon Clarkeac952652010-07-15 11:15:24 +01003435 // V8 only supports characters in the Basic Multilingual Plane.
3436 const uc32 kMaxSupportedChar = 0xFFFF;
Steve Blocka7e24c12009-10-30 11:49:00 +00003437 // Count the number of characters in the UTF-8 string and check if
3438 // it is an ASCII string.
Ben Murdoch8b112d22011-06-08 16:22:53 +01003439 Access<UnicodeCache::Utf8Decoder>
3440 decoder(isolate_->unicode_cache()->utf8_decoder());
Steve Blocka7e24c12009-10-30 11:49:00 +00003441 decoder->Reset(string.start(), string.length());
3442 int chars = 0;
Steve Blocka7e24c12009-10-30 11:49:00 +00003443 while (decoder->has_more()) {
Steve Block9fac8402011-05-12 15:51:54 +01003444 decoder->GetNext();
Steve Blocka7e24c12009-10-30 11:49:00 +00003445 chars++;
3446 }
3447
John Reck59135872010-11-02 12:39:01 -07003448 Object* result;
3449 { MaybeObject* maybe_result = AllocateRawTwoByteString(chars, pretenure);
3450 if (!maybe_result->ToObject(&result)) return maybe_result;
3451 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003452
3453 // Convert and copy the characters into the new object.
3454 String* string_result = String::cast(result);
3455 decoder->Reset(string.start(), string.length());
3456 for (int i = 0; i < chars; i++) {
3457 uc32 r = decoder->GetNext();
Leon Clarkeac952652010-07-15 11:15:24 +01003458 if (r > kMaxSupportedChar) { r = unibrow::Utf8::kBadChar; }
Steve Blocka7e24c12009-10-30 11:49:00 +00003459 string_result->Set(i, r);
3460 }
3461 return result;
3462}
3463
3464
John Reck59135872010-11-02 12:39:01 -07003465MaybeObject* Heap::AllocateStringFromTwoByte(Vector<const uc16> string,
3466 PretenureFlag pretenure) {
Steve Blocka7e24c12009-10-30 11:49:00 +00003467 // Check if the string is an ASCII string.
John Reck59135872010-11-02 12:39:01 -07003468 MaybeObject* maybe_result;
Steve Block9fac8402011-05-12 15:51:54 +01003469 if (String::IsAscii(string.start(), string.length())) {
John Reck59135872010-11-02 12:39:01 -07003470 maybe_result = AllocateRawAsciiString(string.length(), pretenure);
Steve Blocka7e24c12009-10-30 11:49:00 +00003471 } else { // It's not an ASCII string.
John Reck59135872010-11-02 12:39:01 -07003472 maybe_result = AllocateRawTwoByteString(string.length(), pretenure);
Steve Blocka7e24c12009-10-30 11:49:00 +00003473 }
John Reck59135872010-11-02 12:39:01 -07003474 Object* result;
3475 if (!maybe_result->ToObject(&result)) return maybe_result;
Steve Blocka7e24c12009-10-30 11:49:00 +00003476
3477 // Copy the characters into the new object, which may be either ASCII or
3478 // UTF-16.
3479 String* string_result = String::cast(result);
3480 for (int i = 0; i < string.length(); i++) {
3481 string_result->Set(i, string[i]);
3482 }
3483 return result;
3484}
3485
3486
3487Map* Heap::SymbolMapForString(String* string) {
3488 // If the string is in new space it cannot be used as a symbol.
3489 if (InNewSpace(string)) return NULL;
3490
3491 // Find the corresponding symbol map for strings.
3492 Map* map = string->map();
Steve Block44f0eee2011-05-26 01:26:41 +01003493 if (map == ascii_string_map()) {
3494 return ascii_symbol_map();
3495 }
3496 if (map == string_map()) {
3497 return symbol_map();
3498 }
3499 if (map == cons_string_map()) {
3500 return cons_symbol_map();
3501 }
3502 if (map == cons_ascii_string_map()) {
3503 return cons_ascii_symbol_map();
3504 }
3505 if (map == external_string_map()) {
3506 return external_symbol_map();
3507 }
3508 if (map == external_ascii_string_map()) {
3509 return external_ascii_symbol_map();
3510 }
Kristian Monsen9dcf7e22010-06-28 14:14:28 +01003511 if (map == external_string_with_ascii_data_map()) {
3512 return external_symbol_with_ascii_data_map();
3513 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003514
3515 // No match found.
3516 return NULL;
3517}
3518
3519
John Reck59135872010-11-02 12:39:01 -07003520MaybeObject* Heap::AllocateInternalSymbol(unibrow::CharacterStream* buffer,
3521 int chars,
3522 uint32_t hash_field) {
Leon Clarkee46be812010-01-19 14:06:41 +00003523 ASSERT(chars >= 0);
Steve Blocka7e24c12009-10-30 11:49:00 +00003524 // Ensure the chars matches the number of characters in the buffer.
3525 ASSERT(static_cast<unsigned>(chars) == buffer->Length());
3526 // Determine whether the string is ascii.
3527 bool is_ascii = true;
Leon Clarkee46be812010-01-19 14:06:41 +00003528 while (buffer->has_more()) {
3529 if (buffer->GetNext() > unibrow::Utf8::kMaxOneByteChar) {
3530 is_ascii = false;
3531 break;
3532 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003533 }
3534 buffer->Rewind();
3535
3536 // Compute map and object size.
3537 int size;
3538 Map* map;
3539
3540 if (is_ascii) {
Leon Clarkee46be812010-01-19 14:06:41 +00003541 if (chars > SeqAsciiString::kMaxLength) {
3542 return Failure::OutOfMemoryException();
3543 }
Steve Blockd0582a62009-12-15 09:54:21 +00003544 map = ascii_symbol_map();
Steve Blocka7e24c12009-10-30 11:49:00 +00003545 size = SeqAsciiString::SizeFor(chars);
3546 } else {
Leon Clarkee46be812010-01-19 14:06:41 +00003547 if (chars > SeqTwoByteString::kMaxLength) {
3548 return Failure::OutOfMemoryException();
3549 }
Steve Blockd0582a62009-12-15 09:54:21 +00003550 map = symbol_map();
Steve Blocka7e24c12009-10-30 11:49:00 +00003551 size = SeqTwoByteString::SizeFor(chars);
3552 }
3553
3554 // Allocate string.
John Reck59135872010-11-02 12:39:01 -07003555 Object* result;
3556 { MaybeObject* maybe_result = (size > MaxObjectSizeInPagedSpace())
3557 ? lo_space_->AllocateRaw(size)
3558 : old_data_space_->AllocateRaw(size);
3559 if (!maybe_result->ToObject(&result)) return maybe_result;
3560 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003561
3562 reinterpret_cast<HeapObject*>(result)->set_map(map);
Steve Blockd0582a62009-12-15 09:54:21 +00003563 // Set length and hash fields of the allocated string.
Steve Blocka7e24c12009-10-30 11:49:00 +00003564 String* answer = String::cast(result);
Steve Blockd0582a62009-12-15 09:54:21 +00003565 answer->set_length(chars);
3566 answer->set_hash_field(hash_field);
Steve Blocka7e24c12009-10-30 11:49:00 +00003567
3568 ASSERT_EQ(size, answer->Size());
3569
3570 // Fill in the characters.
3571 for (int i = 0; i < chars; i++) {
3572 answer->Set(i, buffer->GetNext());
3573 }
3574 return answer;
3575}
3576
3577
John Reck59135872010-11-02 12:39:01 -07003578MaybeObject* Heap::AllocateRawAsciiString(int length, PretenureFlag pretenure) {
Leon Clarkee46be812010-01-19 14:06:41 +00003579 if (length < 0 || length > SeqAsciiString::kMaxLength) {
3580 return Failure::OutOfMemoryException();
3581 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003582
3583 int size = SeqAsciiString::SizeFor(length);
Leon Clarkee46be812010-01-19 14:06:41 +00003584 ASSERT(size <= SeqAsciiString::kMaxSize);
Steve Blocka7e24c12009-10-30 11:49:00 +00003585
Leon Clarkee46be812010-01-19 14:06:41 +00003586 AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
3587 AllocationSpace retry_space = OLD_DATA_SPACE;
3588
Steve Blocka7e24c12009-10-30 11:49:00 +00003589 if (space == NEW_SPACE) {
Leon Clarkee46be812010-01-19 14:06:41 +00003590 if (size > kMaxObjectSizeInNewSpace) {
3591 // Allocate in large object space, retry space will be ignored.
3592 space = LO_SPACE;
3593 } else if (size > MaxObjectSizeInPagedSpace()) {
3594 // Allocate in new space, retry in large object space.
3595 retry_space = LO_SPACE;
3596 }
3597 } else if (space == OLD_DATA_SPACE && size > MaxObjectSizeInPagedSpace()) {
3598 space = LO_SPACE;
Steve Blocka7e24c12009-10-30 11:49:00 +00003599 }
John Reck59135872010-11-02 12:39:01 -07003600 Object* result;
3601 { MaybeObject* maybe_result = AllocateRaw(size, space, retry_space);
3602 if (!maybe_result->ToObject(&result)) return maybe_result;
3603 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003604
Steve Blocka7e24c12009-10-30 11:49:00 +00003605 // Partially initialize the object.
Steve Blockd0582a62009-12-15 09:54:21 +00003606 HeapObject::cast(result)->set_map(ascii_string_map());
Steve Blocka7e24c12009-10-30 11:49:00 +00003607 String::cast(result)->set_length(length);
Steve Blockd0582a62009-12-15 09:54:21 +00003608 String::cast(result)->set_hash_field(String::kEmptyHashField);
Steve Blocka7e24c12009-10-30 11:49:00 +00003609 ASSERT_EQ(size, HeapObject::cast(result)->Size());
3610 return result;
3611}
3612
3613
John Reck59135872010-11-02 12:39:01 -07003614MaybeObject* Heap::AllocateRawTwoByteString(int length,
3615 PretenureFlag pretenure) {
Leon Clarkee46be812010-01-19 14:06:41 +00003616 if (length < 0 || length > SeqTwoByteString::kMaxLength) {
3617 return Failure::OutOfMemoryException();
Steve Blocka7e24c12009-10-30 11:49:00 +00003618 }
Leon Clarkee46be812010-01-19 14:06:41 +00003619 int size = SeqTwoByteString::SizeFor(length);
3620 ASSERT(size <= SeqTwoByteString::kMaxSize);
3621 AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
3622 AllocationSpace retry_space = OLD_DATA_SPACE;
3623
3624 if (space == NEW_SPACE) {
3625 if (size > kMaxObjectSizeInNewSpace) {
3626 // Allocate in large object space, retry space will be ignored.
3627 space = LO_SPACE;
3628 } else if (size > MaxObjectSizeInPagedSpace()) {
3629 // Allocate in new space, retry in large object space.
3630 retry_space = LO_SPACE;
3631 }
3632 } else if (space == OLD_DATA_SPACE && size > MaxObjectSizeInPagedSpace()) {
3633 space = LO_SPACE;
3634 }
John Reck59135872010-11-02 12:39:01 -07003635 Object* result;
3636 { MaybeObject* maybe_result = AllocateRaw(size, space, retry_space);
3637 if (!maybe_result->ToObject(&result)) return maybe_result;
3638 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003639
Steve Blocka7e24c12009-10-30 11:49:00 +00003640 // Partially initialize the object.
Steve Blockd0582a62009-12-15 09:54:21 +00003641 HeapObject::cast(result)->set_map(string_map());
Steve Blocka7e24c12009-10-30 11:49:00 +00003642 String::cast(result)->set_length(length);
Steve Blockd0582a62009-12-15 09:54:21 +00003643 String::cast(result)->set_hash_field(String::kEmptyHashField);
Steve Blocka7e24c12009-10-30 11:49:00 +00003644 ASSERT_EQ(size, HeapObject::cast(result)->Size());
3645 return result;
3646}
3647
3648
John Reck59135872010-11-02 12:39:01 -07003649MaybeObject* Heap::AllocateEmptyFixedArray() {
Steve Blocka7e24c12009-10-30 11:49:00 +00003650 int size = FixedArray::SizeFor(0);
John Reck59135872010-11-02 12:39:01 -07003651 Object* result;
3652 { MaybeObject* maybe_result =
3653 AllocateRaw(size, OLD_DATA_SPACE, OLD_DATA_SPACE);
3654 if (!maybe_result->ToObject(&result)) return maybe_result;
3655 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003656 // Initialize the object.
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01003657 reinterpret_cast<FixedArray*>(result)->set_map(fixed_array_map());
3658 reinterpret_cast<FixedArray*>(result)->set_length(0);
Steve Blocka7e24c12009-10-30 11:49:00 +00003659 return result;
3660}
3661
3662
John Reck59135872010-11-02 12:39:01 -07003663MaybeObject* Heap::AllocateRawFixedArray(int length) {
Leon Clarkee46be812010-01-19 14:06:41 +00003664 if (length < 0 || length > FixedArray::kMaxLength) {
3665 return Failure::OutOfMemoryException();
3666 }
Kristian Monsen0d5e1162010-09-30 15:31:59 +01003667 ASSERT(length > 0);
Steve Blocka7e24c12009-10-30 11:49:00 +00003668 // Use the general function if we're forced to always allocate.
3669 if (always_allocate()) return AllocateFixedArray(length, TENURED);
3670 // Allocate the raw data for a fixed array.
3671 int size = FixedArray::SizeFor(length);
3672 return size <= kMaxObjectSizeInNewSpace
3673 ? new_space_.AllocateRaw(size)
3674 : lo_space_->AllocateRawFixedArray(size);
3675}
3676
3677
John Reck59135872010-11-02 12:39:01 -07003678MaybeObject* Heap::CopyFixedArrayWithMap(FixedArray* src, Map* map) {
Steve Blocka7e24c12009-10-30 11:49:00 +00003679 int len = src->length();
John Reck59135872010-11-02 12:39:01 -07003680 Object* obj;
3681 { MaybeObject* maybe_obj = AllocateRawFixedArray(len);
3682 if (!maybe_obj->ToObject(&obj)) return maybe_obj;
3683 }
Steve Block44f0eee2011-05-26 01:26:41 +01003684 if (InNewSpace(obj)) {
Steve Blocka7e24c12009-10-30 11:49:00 +00003685 HeapObject* dst = HeapObject::cast(obj);
Kristian Monsen0d5e1162010-09-30 15:31:59 +01003686 dst->set_map(map);
3687 CopyBlock(dst->address() + kPointerSize,
3688 src->address() + kPointerSize,
3689 FixedArray::SizeFor(len) - kPointerSize);
Steve Blocka7e24c12009-10-30 11:49:00 +00003690 return obj;
3691 }
Kristian Monsen0d5e1162010-09-30 15:31:59 +01003692 HeapObject::cast(obj)->set_map(map);
Steve Blocka7e24c12009-10-30 11:49:00 +00003693 FixedArray* result = FixedArray::cast(obj);
3694 result->set_length(len);
Leon Clarke4515c472010-02-03 11:58:03 +00003695
Steve Blocka7e24c12009-10-30 11:49:00 +00003696 // Copy the content
Leon Clarke4515c472010-02-03 11:58:03 +00003697 AssertNoAllocation no_gc;
3698 WriteBarrierMode mode = result->GetWriteBarrierMode(no_gc);
Steve Blocka7e24c12009-10-30 11:49:00 +00003699 for (int i = 0; i < len; i++) result->set(i, src->get(i), mode);
3700 return result;
3701}
3702
3703
John Reck59135872010-11-02 12:39:01 -07003704MaybeObject* Heap::AllocateFixedArray(int length) {
Steve Blocka7e24c12009-10-30 11:49:00 +00003705 ASSERT(length >= 0);
3706 if (length == 0) return empty_fixed_array();
John Reck59135872010-11-02 12:39:01 -07003707 Object* result;
3708 { MaybeObject* maybe_result = AllocateRawFixedArray(length);
3709 if (!maybe_result->ToObject(&result)) return maybe_result;
Steve Blocka7e24c12009-10-30 11:49:00 +00003710 }
John Reck59135872010-11-02 12:39:01 -07003711 // Initialize header.
3712 FixedArray* array = reinterpret_cast<FixedArray*>(result);
3713 array->set_map(fixed_array_map());
3714 array->set_length(length);
3715 // Initialize body.
Steve Block44f0eee2011-05-26 01:26:41 +01003716 ASSERT(!InNewSpace(undefined_value()));
John Reck59135872010-11-02 12:39:01 -07003717 MemsetPointer(array->data_start(), undefined_value(), length);
Steve Blocka7e24c12009-10-30 11:49:00 +00003718 return result;
3719}
3720
3721
John Reck59135872010-11-02 12:39:01 -07003722MaybeObject* Heap::AllocateRawFixedArray(int length, PretenureFlag pretenure) {
Leon Clarkee46be812010-01-19 14:06:41 +00003723 if (length < 0 || length > FixedArray::kMaxLength) {
3724 return Failure::OutOfMemoryException();
3725 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003726
Leon Clarkee46be812010-01-19 14:06:41 +00003727 AllocationSpace space =
3728 (pretenure == TENURED) ? OLD_POINTER_SPACE : NEW_SPACE;
Steve Blocka7e24c12009-10-30 11:49:00 +00003729 int size = FixedArray::SizeFor(length);
Leon Clarkee46be812010-01-19 14:06:41 +00003730 if (space == NEW_SPACE && size > kMaxObjectSizeInNewSpace) {
3731 // Too big for new space.
3732 space = LO_SPACE;
3733 } else if (space == OLD_POINTER_SPACE &&
3734 size > MaxObjectSizeInPagedSpace()) {
3735 // Too big for old pointer space.
3736 space = LO_SPACE;
3737 }
3738
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01003739 AllocationSpace retry_space =
3740 (size <= MaxObjectSizeInPagedSpace()) ? OLD_POINTER_SPACE : LO_SPACE;
3741
3742 return AllocateRaw(size, space, retry_space);
Steve Blocka7e24c12009-10-30 11:49:00 +00003743}
3744
3745
John Reck59135872010-11-02 12:39:01 -07003746MUST_USE_RESULT static MaybeObject* AllocateFixedArrayWithFiller(
Steve Block44f0eee2011-05-26 01:26:41 +01003747 Heap* heap,
John Reck59135872010-11-02 12:39:01 -07003748 int length,
3749 PretenureFlag pretenure,
3750 Object* filler) {
Steve Block6ded16b2010-05-10 14:33:55 +01003751 ASSERT(length >= 0);
Steve Block44f0eee2011-05-26 01:26:41 +01003752 ASSERT(heap->empty_fixed_array()->IsFixedArray());
3753 if (length == 0) return heap->empty_fixed_array();
Steve Block6ded16b2010-05-10 14:33:55 +01003754
Steve Block44f0eee2011-05-26 01:26:41 +01003755 ASSERT(!heap->InNewSpace(filler));
John Reck59135872010-11-02 12:39:01 -07003756 Object* result;
Steve Block44f0eee2011-05-26 01:26:41 +01003757 { MaybeObject* maybe_result = heap->AllocateRawFixedArray(length, pretenure);
John Reck59135872010-11-02 12:39:01 -07003758 if (!maybe_result->ToObject(&result)) return maybe_result;
3759 }
Steve Block6ded16b2010-05-10 14:33:55 +01003760
Steve Block44f0eee2011-05-26 01:26:41 +01003761 HeapObject::cast(result)->set_map(heap->fixed_array_map());
Steve Block6ded16b2010-05-10 14:33:55 +01003762 FixedArray* array = FixedArray::cast(result);
3763 array->set_length(length);
3764 MemsetPointer(array->data_start(), filler, length);
3765 return array;
3766}
3767
3768
John Reck59135872010-11-02 12:39:01 -07003769MaybeObject* Heap::AllocateFixedArray(int length, PretenureFlag pretenure) {
Steve Block44f0eee2011-05-26 01:26:41 +01003770 return AllocateFixedArrayWithFiller(this,
3771 length,
3772 pretenure,
3773 undefined_value());
Steve Block6ded16b2010-05-10 14:33:55 +01003774}
3775
3776
John Reck59135872010-11-02 12:39:01 -07003777MaybeObject* Heap::AllocateFixedArrayWithHoles(int length,
3778 PretenureFlag pretenure) {
Steve Block44f0eee2011-05-26 01:26:41 +01003779 return AllocateFixedArrayWithFiller(this,
3780 length,
3781 pretenure,
3782 the_hole_value());
Steve Block6ded16b2010-05-10 14:33:55 +01003783}
3784
3785
John Reck59135872010-11-02 12:39:01 -07003786MaybeObject* Heap::AllocateUninitializedFixedArray(int length) {
Steve Block6ded16b2010-05-10 14:33:55 +01003787 if (length == 0) return empty_fixed_array();
3788
John Reck59135872010-11-02 12:39:01 -07003789 Object* obj;
3790 { MaybeObject* maybe_obj = AllocateRawFixedArray(length);
3791 if (!maybe_obj->ToObject(&obj)) return maybe_obj;
3792 }
Steve Block6ded16b2010-05-10 14:33:55 +01003793
3794 reinterpret_cast<FixedArray*>(obj)->set_map(fixed_array_map());
3795 FixedArray::cast(obj)->set_length(length);
3796 return obj;
3797}
3798
3799
John Reck59135872010-11-02 12:39:01 -07003800MaybeObject* Heap::AllocateHashTable(int length, PretenureFlag pretenure) {
3801 Object* result;
Steve Block44f0eee2011-05-26 01:26:41 +01003802 { MaybeObject* maybe_result = AllocateFixedArray(length, pretenure);
John Reck59135872010-11-02 12:39:01 -07003803 if (!maybe_result->ToObject(&result)) return maybe_result;
3804 }
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01003805 reinterpret_cast<HeapObject*>(result)->set_map(hash_table_map());
Steve Blocka7e24c12009-10-30 11:49:00 +00003806 ASSERT(result->IsHashTable());
3807 return result;
3808}
3809
3810
John Reck59135872010-11-02 12:39:01 -07003811MaybeObject* Heap::AllocateGlobalContext() {
3812 Object* result;
3813 { MaybeObject* maybe_result =
Steve Block44f0eee2011-05-26 01:26:41 +01003814 AllocateFixedArray(Context::GLOBAL_CONTEXT_SLOTS);
John Reck59135872010-11-02 12:39:01 -07003815 if (!maybe_result->ToObject(&result)) return maybe_result;
3816 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003817 Context* context = reinterpret_cast<Context*>(result);
3818 context->set_map(global_context_map());
3819 ASSERT(context->IsGlobalContext());
3820 ASSERT(result->IsContext());
3821 return result;
3822}
3823
3824
John Reck59135872010-11-02 12:39:01 -07003825MaybeObject* Heap::AllocateFunctionContext(int length, JSFunction* function) {
Steve Blocka7e24c12009-10-30 11:49:00 +00003826 ASSERT(length >= Context::MIN_CONTEXT_SLOTS);
John Reck59135872010-11-02 12:39:01 -07003827 Object* result;
Steve Block44f0eee2011-05-26 01:26:41 +01003828 { MaybeObject* maybe_result = AllocateFixedArray(length);
John Reck59135872010-11-02 12:39:01 -07003829 if (!maybe_result->ToObject(&result)) return maybe_result;
3830 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003831 Context* context = reinterpret_cast<Context*>(result);
3832 context->set_map(context_map());
3833 context->set_closure(function);
3834 context->set_fcontext(context);
3835 context->set_previous(NULL);
3836 context->set_extension(NULL);
3837 context->set_global(function->context()->global());
3838 ASSERT(!context->IsGlobalContext());
3839 ASSERT(context->is_function_context());
3840 ASSERT(result->IsContext());
3841 return result;
3842}
3843
3844
John Reck59135872010-11-02 12:39:01 -07003845MaybeObject* Heap::AllocateWithContext(Context* previous,
3846 JSObject* extension,
3847 bool is_catch_context) {
3848 Object* result;
Steve Block44f0eee2011-05-26 01:26:41 +01003849 { MaybeObject* maybe_result = AllocateFixedArray(Context::MIN_CONTEXT_SLOTS);
John Reck59135872010-11-02 12:39:01 -07003850 if (!maybe_result->ToObject(&result)) return maybe_result;
3851 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003852 Context* context = reinterpret_cast<Context*>(result);
Steve Block44f0eee2011-05-26 01:26:41 +01003853 context->set_map(is_catch_context ? catch_context_map() :
3854 context_map());
Steve Blocka7e24c12009-10-30 11:49:00 +00003855 context->set_closure(previous->closure());
3856 context->set_fcontext(previous->fcontext());
3857 context->set_previous(previous);
3858 context->set_extension(extension);
3859 context->set_global(previous->global());
3860 ASSERT(!context->IsGlobalContext());
3861 ASSERT(!context->is_function_context());
3862 ASSERT(result->IsContext());
3863 return result;
3864}
3865
3866
John Reck59135872010-11-02 12:39:01 -07003867MaybeObject* Heap::AllocateStruct(InstanceType type) {
Steve Blocka7e24c12009-10-30 11:49:00 +00003868 Map* map;
3869 switch (type) {
Steve Block44f0eee2011-05-26 01:26:41 +01003870#define MAKE_CASE(NAME, Name, name) \
3871 case NAME##_TYPE: map = name##_map(); break;
Steve Blocka7e24c12009-10-30 11:49:00 +00003872STRUCT_LIST(MAKE_CASE)
3873#undef MAKE_CASE
3874 default:
3875 UNREACHABLE();
3876 return Failure::InternalError();
3877 }
3878 int size = map->instance_size();
3879 AllocationSpace space =
3880 (size > MaxObjectSizeInPagedSpace()) ? LO_SPACE : OLD_POINTER_SPACE;
John Reck59135872010-11-02 12:39:01 -07003881 Object* result;
Steve Block44f0eee2011-05-26 01:26:41 +01003882 { MaybeObject* maybe_result = Allocate(map, space);
John Reck59135872010-11-02 12:39:01 -07003883 if (!maybe_result->ToObject(&result)) return maybe_result;
3884 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003885 Struct::cast(result)->InitializeBody(size);
3886 return result;
3887}
3888
3889
3890bool Heap::IdleNotification() {
3891 static const int kIdlesBeforeScavenge = 4;
3892 static const int kIdlesBeforeMarkSweep = 7;
3893 static const int kIdlesBeforeMarkCompact = 8;
Ben Murdochdb5a90a2011-01-06 18:27:03 +00003894 static const int kMaxIdleCount = kIdlesBeforeMarkCompact + 1;
Ben Murdoche0cee9b2011-05-25 10:26:03 +01003895 static const unsigned int kGCsBetweenCleanup = 4;
Steve Block44f0eee2011-05-26 01:26:41 +01003896
3897 if (!last_idle_notification_gc_count_init_) {
3898 last_idle_notification_gc_count_ = gc_count_;
3899 last_idle_notification_gc_count_init_ = true;
3900 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003901
Steve Block6ded16b2010-05-10 14:33:55 +01003902 bool uncommit = true;
Steve Blocka7e24c12009-10-30 11:49:00 +00003903 bool finished = false;
3904
Ben Murdochdb5a90a2011-01-06 18:27:03 +00003905 // Reset the number of idle notifications received when a number of
3906 // GCs have taken place. This allows another round of cleanup based
3907 // on idle notifications if enough work has been carried out to
3908 // provoke a number of garbage collections.
Steve Block44f0eee2011-05-26 01:26:41 +01003909 if (gc_count_ - last_idle_notification_gc_count_ < kGCsBetweenCleanup) {
3910 number_idle_notifications_ =
3911 Min(number_idle_notifications_ + 1, kMaxIdleCount);
Steve Blocka7e24c12009-10-30 11:49:00 +00003912 } else {
Steve Block44f0eee2011-05-26 01:26:41 +01003913 number_idle_notifications_ = 0;
3914 last_idle_notification_gc_count_ = gc_count_;
Steve Blocka7e24c12009-10-30 11:49:00 +00003915 }
3916
Steve Block44f0eee2011-05-26 01:26:41 +01003917 if (number_idle_notifications_ == kIdlesBeforeScavenge) {
Steve Block6ded16b2010-05-10 14:33:55 +01003918 if (contexts_disposed_ > 0) {
Steve Block44f0eee2011-05-26 01:26:41 +01003919 HistogramTimerScope scope(isolate_->counters()->gc_context());
Steve Block6ded16b2010-05-10 14:33:55 +01003920 CollectAllGarbage(false);
3921 } else {
Ben Murdochf87a2032010-10-22 12:50:53 +01003922 CollectGarbage(NEW_SPACE);
Steve Block6ded16b2010-05-10 14:33:55 +01003923 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003924 new_space_.Shrink();
Steve Block44f0eee2011-05-26 01:26:41 +01003925 last_idle_notification_gc_count_ = gc_count_;
3926 } else if (number_idle_notifications_ == kIdlesBeforeMarkSweep) {
Steve Blockd0582a62009-12-15 09:54:21 +00003927 // Before doing the mark-sweep collections we clear the
3928 // compilation cache to avoid hanging on to source code and
3929 // generated code for cached functions.
Steve Block44f0eee2011-05-26 01:26:41 +01003930 isolate_->compilation_cache()->Clear();
Steve Blockd0582a62009-12-15 09:54:21 +00003931
Steve Blocka7e24c12009-10-30 11:49:00 +00003932 CollectAllGarbage(false);
3933 new_space_.Shrink();
Steve Block44f0eee2011-05-26 01:26:41 +01003934 last_idle_notification_gc_count_ = gc_count_;
Steve Blocka7e24c12009-10-30 11:49:00 +00003935
Steve Block44f0eee2011-05-26 01:26:41 +01003936 } else if (number_idle_notifications_ == kIdlesBeforeMarkCompact) {
Steve Blocka7e24c12009-10-30 11:49:00 +00003937 CollectAllGarbage(true);
3938 new_space_.Shrink();
Steve Block44f0eee2011-05-26 01:26:41 +01003939 last_idle_notification_gc_count_ = gc_count_;
3940 number_idle_notifications_ = 0;
Steve Blocka7e24c12009-10-30 11:49:00 +00003941 finished = true;
Steve Block6ded16b2010-05-10 14:33:55 +01003942 } else if (contexts_disposed_ > 0) {
3943 if (FLAG_expose_gc) {
3944 contexts_disposed_ = 0;
3945 } else {
Steve Block44f0eee2011-05-26 01:26:41 +01003946 HistogramTimerScope scope(isolate_->counters()->gc_context());
Steve Block6ded16b2010-05-10 14:33:55 +01003947 CollectAllGarbage(false);
Steve Block44f0eee2011-05-26 01:26:41 +01003948 last_idle_notification_gc_count_ = gc_count_;
Steve Block6ded16b2010-05-10 14:33:55 +01003949 }
3950 // If this is the first idle notification, we reset the
3951 // notification count to avoid letting idle notifications for
3952 // context disposal garbage collections start a potentially too
3953 // aggressive idle GC cycle.
Steve Block44f0eee2011-05-26 01:26:41 +01003954 if (number_idle_notifications_ <= 1) {
3955 number_idle_notifications_ = 0;
Steve Block6ded16b2010-05-10 14:33:55 +01003956 uncommit = false;
3957 }
Steve Block44f0eee2011-05-26 01:26:41 +01003958 } else if (number_idle_notifications_ > kIdlesBeforeMarkCompact) {
Ben Murdochdb5a90a2011-01-06 18:27:03 +00003959 // If we have received more than kIdlesBeforeMarkCompact idle
3960 // notifications we do not perform any cleanup because we don't
3961 // expect to gain much by doing so.
3962 finished = true;
Steve Blocka7e24c12009-10-30 11:49:00 +00003963 }
3964
Steve Block6ded16b2010-05-10 14:33:55 +01003965 // Make sure that we have no pending context disposals and
3966 // conditionally uncommit from space.
3967 ASSERT(contexts_disposed_ == 0);
Steve Block44f0eee2011-05-26 01:26:41 +01003968 if (uncommit) UncommitFromSpace();
Steve Blocka7e24c12009-10-30 11:49:00 +00003969 return finished;
3970}
3971
3972
3973#ifdef DEBUG
3974
3975void Heap::Print() {
3976 if (!HasBeenSetup()) return;
Steve Block44f0eee2011-05-26 01:26:41 +01003977 isolate()->PrintStack();
Steve Blocka7e24c12009-10-30 11:49:00 +00003978 AllSpaces spaces;
Leon Clarked91b9f72010-01-27 17:25:45 +00003979 for (Space* space = spaces.next(); space != NULL; space = spaces.next())
3980 space->Print();
Steve Blocka7e24c12009-10-30 11:49:00 +00003981}
3982
3983
3984void Heap::ReportCodeStatistics(const char* title) {
3985 PrintF(">>>>>> Code Stats (%s) >>>>>>\n", title);
3986 PagedSpace::ResetCodeStatistics();
3987 // We do not look for code in new space, map space, or old space. If code
3988 // somehow ends up in those spaces, we would miss it here.
3989 code_space_->CollectCodeStatistics();
3990 lo_space_->CollectCodeStatistics();
3991 PagedSpace::ReportCodeStatistics();
3992}
3993
3994
3995// This function expects that NewSpace's allocated objects histogram is
3996// populated (via a call to CollectStatistics or else as a side effect of a
3997// just-completed scavenge collection).
3998void Heap::ReportHeapStatistics(const char* title) {
3999 USE(title);
4000 PrintF(">>>>>> =============== %s (%d) =============== >>>>>>\n",
4001 title, gc_count_);
4002 PrintF("mark-compact GC : %d\n", mc_count_);
Ben Murdochf87a2032010-10-22 12:50:53 +01004003 PrintF("old_gen_promotion_limit_ %" V8_PTR_PREFIX "d\n",
4004 old_gen_promotion_limit_);
4005 PrintF("old_gen_allocation_limit_ %" V8_PTR_PREFIX "d\n",
4006 old_gen_allocation_limit_);
Steve Blocka7e24c12009-10-30 11:49:00 +00004007
4008 PrintF("\n");
4009 PrintF("Number of handles : %d\n", HandleScope::NumberOfHandles());
Steve Block44f0eee2011-05-26 01:26:41 +01004010 isolate_->global_handles()->PrintStats();
Steve Blocka7e24c12009-10-30 11:49:00 +00004011 PrintF("\n");
4012
4013 PrintF("Heap statistics : ");
Steve Block44f0eee2011-05-26 01:26:41 +01004014 isolate_->memory_allocator()->ReportStatistics();
Steve Blocka7e24c12009-10-30 11:49:00 +00004015 PrintF("To space : ");
4016 new_space_.ReportStatistics();
4017 PrintF("Old pointer space : ");
4018 old_pointer_space_->ReportStatistics();
4019 PrintF("Old data space : ");
4020 old_data_space_->ReportStatistics();
4021 PrintF("Code space : ");
4022 code_space_->ReportStatistics();
4023 PrintF("Map space : ");
4024 map_space_->ReportStatistics();
4025 PrintF("Cell space : ");
4026 cell_space_->ReportStatistics();
4027 PrintF("Large object space : ");
4028 lo_space_->ReportStatistics();
4029 PrintF(">>>>>> ========================================= >>>>>>\n");
4030}
4031
4032#endif // DEBUG
4033
4034bool Heap::Contains(HeapObject* value) {
4035 return Contains(value->address());
4036}
4037
4038
4039bool Heap::Contains(Address addr) {
4040 if (OS::IsOutsideAllocatedSpace(addr)) return false;
4041 return HasBeenSetup() &&
4042 (new_space_.ToSpaceContains(addr) ||
4043 old_pointer_space_->Contains(addr) ||
4044 old_data_space_->Contains(addr) ||
4045 code_space_->Contains(addr) ||
4046 map_space_->Contains(addr) ||
4047 cell_space_->Contains(addr) ||
4048 lo_space_->SlowContains(addr));
4049}
4050
4051
4052bool Heap::InSpace(HeapObject* value, AllocationSpace space) {
4053 return InSpace(value->address(), space);
4054}
4055
4056
4057bool Heap::InSpace(Address addr, AllocationSpace space) {
4058 if (OS::IsOutsideAllocatedSpace(addr)) return false;
4059 if (!HasBeenSetup()) return false;
4060
4061 switch (space) {
4062 case NEW_SPACE:
4063 return new_space_.ToSpaceContains(addr);
4064 case OLD_POINTER_SPACE:
4065 return old_pointer_space_->Contains(addr);
4066 case OLD_DATA_SPACE:
4067 return old_data_space_->Contains(addr);
4068 case CODE_SPACE:
4069 return code_space_->Contains(addr);
4070 case MAP_SPACE:
4071 return map_space_->Contains(addr);
4072 case CELL_SPACE:
4073 return cell_space_->Contains(addr);
4074 case LO_SPACE:
4075 return lo_space_->SlowContains(addr);
4076 }
4077
4078 return false;
4079}
4080
4081
4082#ifdef DEBUG
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004083static void DummyScavengePointer(HeapObject** p) {
4084}
4085
4086
4087static void VerifyPointersUnderWatermark(
4088 PagedSpace* space,
4089 DirtyRegionCallback visit_dirty_region) {
4090 PageIterator it(space, PageIterator::PAGES_IN_USE);
4091
4092 while (it.has_next()) {
4093 Page* page = it.next();
4094 Address start = page->ObjectAreaStart();
4095 Address end = page->AllocationWatermark();
4096
Steve Block44f0eee2011-05-26 01:26:41 +01004097 HEAP->IterateDirtyRegions(Page::kAllRegionsDirtyMarks,
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004098 start,
4099 end,
4100 visit_dirty_region,
4101 &DummyScavengePointer);
4102 }
4103}
4104
4105
4106static void VerifyPointersUnderWatermark(LargeObjectSpace* space) {
4107 LargeObjectIterator it(space);
4108 for (HeapObject* object = it.next(); object != NULL; object = it.next()) {
4109 if (object->IsFixedArray()) {
4110 Address slot_address = object->address();
4111 Address end = object->address() + object->Size();
4112
4113 while (slot_address < end) {
4114 HeapObject** slot = reinterpret_cast<HeapObject**>(slot_address);
4115 // When we are not in GC the Heap::InNewSpace() predicate
4116 // checks that pointers which satisfy predicate point into
4117 // the active semispace.
Steve Block44f0eee2011-05-26 01:26:41 +01004118 HEAP->InNewSpace(*slot);
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004119 slot_address += kPointerSize;
4120 }
4121 }
4122 }
4123}
4124
4125
Steve Blocka7e24c12009-10-30 11:49:00 +00004126void Heap::Verify() {
4127 ASSERT(HasBeenSetup());
4128
4129 VerifyPointersVisitor visitor;
Steve Blockd0582a62009-12-15 09:54:21 +00004130 IterateRoots(&visitor, VISIT_ONLY_STRONG);
Steve Blocka7e24c12009-10-30 11:49:00 +00004131
4132 new_space_.Verify();
4133
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004134 VerifyPointersAndDirtyRegionsVisitor dirty_regions_visitor;
4135 old_pointer_space_->Verify(&dirty_regions_visitor);
4136 map_space_->Verify(&dirty_regions_visitor);
Steve Blocka7e24c12009-10-30 11:49:00 +00004137
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004138 VerifyPointersUnderWatermark(old_pointer_space_,
4139 &IteratePointersInDirtyRegion);
4140 VerifyPointersUnderWatermark(map_space_,
4141 &IteratePointersInDirtyMapsRegion);
4142 VerifyPointersUnderWatermark(lo_space_);
4143
4144 VerifyPageWatermarkValidity(old_pointer_space_, ALL_INVALID);
4145 VerifyPageWatermarkValidity(map_space_, ALL_INVALID);
4146
4147 VerifyPointersVisitor no_dirty_regions_visitor;
4148 old_data_space_->Verify(&no_dirty_regions_visitor);
4149 code_space_->Verify(&no_dirty_regions_visitor);
4150 cell_space_->Verify(&no_dirty_regions_visitor);
Steve Blocka7e24c12009-10-30 11:49:00 +00004151
4152 lo_space_->Verify();
4153}
4154#endif // DEBUG
4155
4156
John Reck59135872010-11-02 12:39:01 -07004157MaybeObject* Heap::LookupSymbol(Vector<const char> string) {
Steve Blocka7e24c12009-10-30 11:49:00 +00004158 Object* symbol = NULL;
John Reck59135872010-11-02 12:39:01 -07004159 Object* new_table;
4160 { MaybeObject* maybe_new_table =
4161 symbol_table()->LookupSymbol(string, &symbol);
4162 if (!maybe_new_table->ToObject(&new_table)) return maybe_new_table;
4163 }
Steve Blocka7e24c12009-10-30 11:49:00 +00004164 // Can't use set_symbol_table because SymbolTable::cast knows that
4165 // SymbolTable is a singleton and checks for identity.
4166 roots_[kSymbolTableRootIndex] = new_table;
4167 ASSERT(symbol != NULL);
4168 return symbol;
4169}
4170
4171
Steve Block9fac8402011-05-12 15:51:54 +01004172MaybeObject* Heap::LookupAsciiSymbol(Vector<const char> string) {
4173 Object* symbol = NULL;
4174 Object* new_table;
4175 { MaybeObject* maybe_new_table =
4176 symbol_table()->LookupAsciiSymbol(string, &symbol);
4177 if (!maybe_new_table->ToObject(&new_table)) return maybe_new_table;
4178 }
4179 // Can't use set_symbol_table because SymbolTable::cast knows that
4180 // SymbolTable is a singleton and checks for identity.
4181 roots_[kSymbolTableRootIndex] = new_table;
4182 ASSERT(symbol != NULL);
4183 return symbol;
4184}
4185
4186
Ben Murdoch257744e2011-11-30 15:57:28 +00004187MaybeObject* Heap::LookupAsciiSymbol(Handle<SeqAsciiString> string,
4188 int from,
4189 int length) {
4190 Object* symbol = NULL;
4191 Object* new_table;
4192 { MaybeObject* maybe_new_table =
4193 symbol_table()->LookupSubStringAsciiSymbol(string,
4194 from,
4195 length,
4196 &symbol);
4197 if (!maybe_new_table->ToObject(&new_table)) return maybe_new_table;
4198 }
4199 // Can't use set_symbol_table because SymbolTable::cast knows that
4200 // SymbolTable is a singleton and checks for identity.
4201 roots_[kSymbolTableRootIndex] = new_table;
4202 ASSERT(symbol != NULL);
4203 return symbol;
4204}
4205
4206
Steve Block9fac8402011-05-12 15:51:54 +01004207MaybeObject* Heap::LookupTwoByteSymbol(Vector<const uc16> string) {
4208 Object* symbol = NULL;
4209 Object* new_table;
4210 { MaybeObject* maybe_new_table =
4211 symbol_table()->LookupTwoByteSymbol(string, &symbol);
4212 if (!maybe_new_table->ToObject(&new_table)) return maybe_new_table;
4213 }
4214 // Can't use set_symbol_table because SymbolTable::cast knows that
4215 // SymbolTable is a singleton and checks for identity.
4216 roots_[kSymbolTableRootIndex] = new_table;
4217 ASSERT(symbol != NULL);
4218 return symbol;
4219}
4220
4221
John Reck59135872010-11-02 12:39:01 -07004222MaybeObject* Heap::LookupSymbol(String* string) {
Steve Blocka7e24c12009-10-30 11:49:00 +00004223 if (string->IsSymbol()) return string;
4224 Object* symbol = NULL;
John Reck59135872010-11-02 12:39:01 -07004225 Object* new_table;
4226 { MaybeObject* maybe_new_table =
4227 symbol_table()->LookupString(string, &symbol);
4228 if (!maybe_new_table->ToObject(&new_table)) return maybe_new_table;
4229 }
Steve Blocka7e24c12009-10-30 11:49:00 +00004230 // Can't use set_symbol_table because SymbolTable::cast knows that
4231 // SymbolTable is a singleton and checks for identity.
4232 roots_[kSymbolTableRootIndex] = new_table;
4233 ASSERT(symbol != NULL);
4234 return symbol;
4235}
4236
4237
4238bool Heap::LookupSymbolIfExists(String* string, String** symbol) {
4239 if (string->IsSymbol()) {
4240 *symbol = string;
4241 return true;
4242 }
4243 return symbol_table()->LookupSymbolIfExists(string, symbol);
4244}
4245
4246
4247#ifdef DEBUG
4248void Heap::ZapFromSpace() {
Steve Block1e0659c2011-05-24 12:43:12 +01004249 ASSERT(reinterpret_cast<Object*>(kFromSpaceZapValue)->IsFailure());
Steve Blocka7e24c12009-10-30 11:49:00 +00004250 for (Address a = new_space_.FromSpaceLow();
4251 a < new_space_.FromSpaceHigh();
4252 a += kPointerSize) {
4253 Memory::Address_at(a) = kFromSpaceZapValue;
4254 }
4255}
4256#endif // DEBUG
4257
4258
Steve Block44f0eee2011-05-26 01:26:41 +01004259bool Heap::IteratePointersInDirtyRegion(Heap* heap,
4260 Address start,
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004261 Address end,
4262 ObjectSlotCallback copy_object_func) {
4263 Address slot_address = start;
4264 bool pointers_to_new_space_found = false;
Steve Blocka7e24c12009-10-30 11:49:00 +00004265
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004266 while (slot_address < end) {
4267 Object** slot = reinterpret_cast<Object**>(slot_address);
Steve Block44f0eee2011-05-26 01:26:41 +01004268 if (heap->InNewSpace(*slot)) {
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004269 ASSERT((*slot)->IsHeapObject());
4270 copy_object_func(reinterpret_cast<HeapObject**>(slot));
Steve Block44f0eee2011-05-26 01:26:41 +01004271 if (heap->InNewSpace(*slot)) {
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004272 ASSERT((*slot)->IsHeapObject());
4273 pointers_to_new_space_found = true;
Steve Blocka7e24c12009-10-30 11:49:00 +00004274 }
Steve Blocka7e24c12009-10-30 11:49:00 +00004275 }
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004276 slot_address += kPointerSize;
Steve Blocka7e24c12009-10-30 11:49:00 +00004277 }
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004278 return pointers_to_new_space_found;
Steve Blocka7e24c12009-10-30 11:49:00 +00004279}
4280
4281
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004282// Compute start address of the first map following given addr.
4283static inline Address MapStartAlign(Address addr) {
4284 Address page = Page::FromAddress(addr)->ObjectAreaStart();
4285 return page + (((addr - page) + (Map::kSize - 1)) / Map::kSize * Map::kSize);
4286}
Steve Blocka7e24c12009-10-30 11:49:00 +00004287
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004288
4289// Compute end address of the first map preceding given addr.
4290static inline Address MapEndAlign(Address addr) {
4291 Address page = Page::FromAllocationTop(addr)->ObjectAreaStart();
4292 return page + ((addr - page) / Map::kSize * Map::kSize);
4293}
4294
4295
4296static bool IteratePointersInDirtyMaps(Address start,
4297 Address end,
4298 ObjectSlotCallback copy_object_func) {
4299 ASSERT(MapStartAlign(start) == start);
4300 ASSERT(MapEndAlign(end) == end);
4301
4302 Address map_address = start;
4303 bool pointers_to_new_space_found = false;
4304
Steve Block44f0eee2011-05-26 01:26:41 +01004305 Heap* heap = HEAP;
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004306 while (map_address < end) {
Steve Block44f0eee2011-05-26 01:26:41 +01004307 ASSERT(!heap->InNewSpace(Memory::Object_at(map_address)));
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004308 ASSERT(Memory::Object_at(map_address)->IsMap());
4309
4310 Address pointer_fields_start = map_address + Map::kPointerFieldsBeginOffset;
4311 Address pointer_fields_end = map_address + Map::kPointerFieldsEndOffset;
4312
Steve Block44f0eee2011-05-26 01:26:41 +01004313 if (Heap::IteratePointersInDirtyRegion(heap,
4314 pointer_fields_start,
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004315 pointer_fields_end,
4316 copy_object_func)) {
4317 pointers_to_new_space_found = true;
4318 }
4319
4320 map_address += Map::kSize;
4321 }
4322
4323 return pointers_to_new_space_found;
4324}
4325
4326
4327bool Heap::IteratePointersInDirtyMapsRegion(
Steve Block44f0eee2011-05-26 01:26:41 +01004328 Heap* heap,
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004329 Address start,
4330 Address end,
4331 ObjectSlotCallback copy_object_func) {
4332 Address map_aligned_start = MapStartAlign(start);
4333 Address map_aligned_end = MapEndAlign(end);
4334
4335 bool contains_pointers_to_new_space = false;
4336
4337 if (map_aligned_start != start) {
4338 Address prev_map = map_aligned_start - Map::kSize;
4339 ASSERT(Memory::Object_at(prev_map)->IsMap());
4340
4341 Address pointer_fields_start =
4342 Max(start, prev_map + Map::kPointerFieldsBeginOffset);
4343
4344 Address pointer_fields_end =
Ben Murdoch3bec4d22010-07-22 14:51:16 +01004345 Min(prev_map + Map::kPointerFieldsEndOffset, end);
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004346
4347 contains_pointers_to_new_space =
Steve Block44f0eee2011-05-26 01:26:41 +01004348 IteratePointersInDirtyRegion(heap,
4349 pointer_fields_start,
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004350 pointer_fields_end,
4351 copy_object_func)
4352 || contains_pointers_to_new_space;
4353 }
4354
4355 contains_pointers_to_new_space =
4356 IteratePointersInDirtyMaps(map_aligned_start,
4357 map_aligned_end,
4358 copy_object_func)
4359 || contains_pointers_to_new_space;
4360
4361 if (map_aligned_end != end) {
4362 ASSERT(Memory::Object_at(map_aligned_end)->IsMap());
4363
Ben Murdoch3bec4d22010-07-22 14:51:16 +01004364 Address pointer_fields_start =
4365 map_aligned_end + Map::kPointerFieldsBeginOffset;
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004366
4367 Address pointer_fields_end =
Ben Murdoch3bec4d22010-07-22 14:51:16 +01004368 Min(end, map_aligned_end + Map::kPointerFieldsEndOffset);
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004369
4370 contains_pointers_to_new_space =
Steve Block44f0eee2011-05-26 01:26:41 +01004371 IteratePointersInDirtyRegion(heap,
4372 pointer_fields_start,
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004373 pointer_fields_end,
4374 copy_object_func)
4375 || contains_pointers_to_new_space;
4376 }
4377
4378 return contains_pointers_to_new_space;
4379}
4380
4381
Ben Murdoch3bec4d22010-07-22 14:51:16 +01004382void Heap::IterateAndMarkPointersToFromSpace(Address start,
4383 Address end,
4384 ObjectSlotCallback callback) {
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004385 Address slot_address = start;
4386 Page* page = Page::FromAddress(start);
4387
4388 uint32_t marks = page->GetRegionMarks();
4389
4390 while (slot_address < end) {
4391 Object** slot = reinterpret_cast<Object**>(slot_address);
Steve Block44f0eee2011-05-26 01:26:41 +01004392 if (InFromSpace(*slot)) {
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004393 ASSERT((*slot)->IsHeapObject());
4394 callback(reinterpret_cast<HeapObject**>(slot));
Steve Block44f0eee2011-05-26 01:26:41 +01004395 if (InNewSpace(*slot)) {
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004396 ASSERT((*slot)->IsHeapObject());
4397 marks |= page->GetRegionMaskForAddress(slot_address);
4398 }
4399 }
4400 slot_address += kPointerSize;
4401 }
4402
4403 page->SetRegionMarks(marks);
4404}
4405
4406
4407uint32_t Heap::IterateDirtyRegions(
4408 uint32_t marks,
4409 Address area_start,
4410 Address area_end,
4411 DirtyRegionCallback visit_dirty_region,
4412 ObjectSlotCallback copy_object_func) {
4413 uint32_t newmarks = 0;
4414 uint32_t mask = 1;
4415
4416 if (area_start >= area_end) {
4417 return newmarks;
4418 }
4419
4420 Address region_start = area_start;
4421
4422 // area_start does not necessarily coincide with start of the first region.
4423 // Thus to calculate the beginning of the next region we have to align
4424 // area_start by Page::kRegionSize.
4425 Address second_region =
4426 reinterpret_cast<Address>(
4427 reinterpret_cast<intptr_t>(area_start + Page::kRegionSize) &
4428 ~Page::kRegionAlignmentMask);
4429
4430 // Next region might be beyond area_end.
4431 Address region_end = Min(second_region, area_end);
4432
4433 if (marks & mask) {
Steve Block44f0eee2011-05-26 01:26:41 +01004434 if (visit_dirty_region(this, region_start, region_end, copy_object_func)) {
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004435 newmarks |= mask;
4436 }
4437 }
4438 mask <<= 1;
4439
4440 // Iterate subsequent regions which fully lay inside [area_start, area_end[.
4441 region_start = region_end;
4442 region_end = region_start + Page::kRegionSize;
4443
4444 while (region_end <= area_end) {
4445 if (marks & mask) {
Steve Block44f0eee2011-05-26 01:26:41 +01004446 if (visit_dirty_region(this,
4447 region_start,
4448 region_end,
4449 copy_object_func)) {
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004450 newmarks |= mask;
4451 }
4452 }
4453
4454 region_start = region_end;
4455 region_end = region_start + Page::kRegionSize;
4456
4457 mask <<= 1;
4458 }
4459
4460 if (region_start != area_end) {
4461 // A small piece of area left uniterated because area_end does not coincide
4462 // with region end. Check whether region covering last part of area is
4463 // dirty.
4464 if (marks & mask) {
Steve Block44f0eee2011-05-26 01:26:41 +01004465 if (visit_dirty_region(this, region_start, area_end, copy_object_func)) {
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004466 newmarks |= mask;
4467 }
4468 }
4469 }
4470
4471 return newmarks;
4472}
4473
4474
4475
4476void Heap::IterateDirtyRegions(
4477 PagedSpace* space,
4478 DirtyRegionCallback visit_dirty_region,
4479 ObjectSlotCallback copy_object_func,
4480 ExpectedPageWatermarkState expected_page_watermark_state) {
Steve Blocka7e24c12009-10-30 11:49:00 +00004481
4482 PageIterator it(space, PageIterator::PAGES_IN_USE);
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004483
Steve Blocka7e24c12009-10-30 11:49:00 +00004484 while (it.has_next()) {
4485 Page* page = it.next();
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004486 uint32_t marks = page->GetRegionMarks();
4487
4488 if (marks != Page::kAllRegionsCleanMarks) {
4489 Address start = page->ObjectAreaStart();
4490
4491 // Do not try to visit pointers beyond page allocation watermark.
4492 // Page can contain garbage pointers there.
4493 Address end;
4494
4495 if ((expected_page_watermark_state == WATERMARK_SHOULD_BE_VALID) ||
4496 page->IsWatermarkValid()) {
4497 end = page->AllocationWatermark();
4498 } else {
4499 end = page->CachedAllocationWatermark();
4500 }
4501
4502 ASSERT(space == old_pointer_space_ ||
4503 (space == map_space_ &&
4504 ((page->ObjectAreaStart() - end) % Map::kSize == 0)));
4505
4506 page->SetRegionMarks(IterateDirtyRegions(marks,
4507 start,
4508 end,
4509 visit_dirty_region,
4510 copy_object_func));
Steve Blocka7e24c12009-10-30 11:49:00 +00004511 }
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004512
4513 // Mark page watermark as invalid to maintain watermark validity invariant.
4514 // See Page::FlipMeaningOfInvalidatedWatermarkFlag() for details.
4515 page->InvalidateWatermark(true);
Steve Blocka7e24c12009-10-30 11:49:00 +00004516 }
4517}
4518
4519
Steve Blockd0582a62009-12-15 09:54:21 +00004520void Heap::IterateRoots(ObjectVisitor* v, VisitMode mode) {
4521 IterateStrongRoots(v, mode);
Leon Clarked91b9f72010-01-27 17:25:45 +00004522 IterateWeakRoots(v, mode);
4523}
4524
4525
4526void Heap::IterateWeakRoots(ObjectVisitor* v, VisitMode mode) {
Steve Blocka7e24c12009-10-30 11:49:00 +00004527 v->VisitPointer(reinterpret_cast<Object**>(&roots_[kSymbolTableRootIndex]));
Steve Blockd0582a62009-12-15 09:54:21 +00004528 v->Synchronize("symbol_table");
Ben Murdoch257744e2011-11-30 15:57:28 +00004529 if (mode != VISIT_ALL_IN_SCAVENGE &&
4530 mode != VISIT_ALL_IN_SWEEP_NEWSPACE) {
Leon Clarkee46be812010-01-19 14:06:41 +00004531 // Scavenge collections have special processing for this.
Steve Block44f0eee2011-05-26 01:26:41 +01004532 external_string_table_.Iterate(v);
Leon Clarkee46be812010-01-19 14:06:41 +00004533 }
4534 v->Synchronize("external_string_table");
Steve Blocka7e24c12009-10-30 11:49:00 +00004535}
4536
4537
Steve Blockd0582a62009-12-15 09:54:21 +00004538void Heap::IterateStrongRoots(ObjectVisitor* v, VisitMode mode) {
Steve Blocka7e24c12009-10-30 11:49:00 +00004539 v->VisitPointers(&roots_[0], &roots_[kStrongRootListLength]);
Steve Blockd0582a62009-12-15 09:54:21 +00004540 v->Synchronize("strong_root_list");
Steve Blocka7e24c12009-10-30 11:49:00 +00004541
Iain Merrick75681382010-08-19 15:07:18 +01004542 v->VisitPointer(BitCast<Object**>(&hidden_symbol_));
Steve Blockd0582a62009-12-15 09:54:21 +00004543 v->Synchronize("symbol");
Steve Blocka7e24c12009-10-30 11:49:00 +00004544
Steve Block44f0eee2011-05-26 01:26:41 +01004545 isolate_->bootstrapper()->Iterate(v);
Steve Blockd0582a62009-12-15 09:54:21 +00004546 v->Synchronize("bootstrapper");
Steve Block44f0eee2011-05-26 01:26:41 +01004547 isolate_->Iterate(v);
Steve Blockd0582a62009-12-15 09:54:21 +00004548 v->Synchronize("top");
Steve Blocka7e24c12009-10-30 11:49:00 +00004549 Relocatable::Iterate(v);
Steve Blockd0582a62009-12-15 09:54:21 +00004550 v->Synchronize("relocatable");
Steve Blocka7e24c12009-10-30 11:49:00 +00004551
4552#ifdef ENABLE_DEBUGGER_SUPPORT
Steve Block44f0eee2011-05-26 01:26:41 +01004553 isolate_->debug()->Iterate(v);
Steve Blocka7e24c12009-10-30 11:49:00 +00004554#endif
Steve Blockd0582a62009-12-15 09:54:21 +00004555 v->Synchronize("debug");
Steve Block44f0eee2011-05-26 01:26:41 +01004556 isolate_->compilation_cache()->Iterate(v);
Steve Blockd0582a62009-12-15 09:54:21 +00004557 v->Synchronize("compilationcache");
Steve Blocka7e24c12009-10-30 11:49:00 +00004558
4559 // Iterate over local handles in handle scopes.
Steve Block44f0eee2011-05-26 01:26:41 +01004560 isolate_->handle_scope_implementer()->Iterate(v);
Steve Blockd0582a62009-12-15 09:54:21 +00004561 v->Synchronize("handlescope");
Steve Blocka7e24c12009-10-30 11:49:00 +00004562
Leon Clarkee46be812010-01-19 14:06:41 +00004563 // Iterate over the builtin code objects and code stubs in the
4564 // heap. Note that it is not necessary to iterate over code objects
4565 // on scavenge collections.
Ben Murdoch257744e2011-11-30 15:57:28 +00004566 if (mode != VISIT_ALL_IN_SCAVENGE &&
4567 mode != VISIT_ALL_IN_SWEEP_NEWSPACE) {
Steve Block44f0eee2011-05-26 01:26:41 +01004568 isolate_->builtins()->IterateBuiltins(v);
Leon Clarkee46be812010-01-19 14:06:41 +00004569 }
Steve Blockd0582a62009-12-15 09:54:21 +00004570 v->Synchronize("builtins");
Steve Blocka7e24c12009-10-30 11:49:00 +00004571
4572 // Iterate over global handles.
Ben Murdoch257744e2011-11-30 15:57:28 +00004573 switch (mode) {
4574 case VISIT_ONLY_STRONG:
4575 isolate_->global_handles()->IterateStrongRoots(v);
4576 break;
4577 case VISIT_ALL_IN_SCAVENGE:
4578 isolate_->global_handles()->IterateStrongAndDependentRoots(v);
4579 break;
4580 case VISIT_ALL_IN_SWEEP_NEWSPACE:
4581 case VISIT_ALL:
4582 isolate_->global_handles()->IterateAllRoots(v);
4583 break;
Steve Blockd0582a62009-12-15 09:54:21 +00004584 }
4585 v->Synchronize("globalhandles");
Steve Blocka7e24c12009-10-30 11:49:00 +00004586
4587 // Iterate over pointers being held by inactive threads.
Steve Block44f0eee2011-05-26 01:26:41 +01004588 isolate_->thread_manager()->Iterate(v);
Steve Blockd0582a62009-12-15 09:54:21 +00004589 v->Synchronize("threadmanager");
Leon Clarked91b9f72010-01-27 17:25:45 +00004590
4591 // Iterate over the pointers the Serialization/Deserialization code is
4592 // holding.
4593 // During garbage collection this keeps the partial snapshot cache alive.
4594 // During deserialization of the startup snapshot this creates the partial
4595 // snapshot cache and deserializes the objects it refers to. During
4596 // serialization this does nothing, since the partial snapshot cache is
4597 // empty. However the next thing we do is create the partial snapshot,
4598 // filling up the partial snapshot cache with objects it needs as we go.
4599 SerializerDeserializer::Iterate(v);
4600 // We don't do a v->Synchronize call here, because in debug mode that will
4601 // output a flag to the snapshot. However at this point the serializer and
4602 // deserializer are deliberately a little unsynchronized (see above) so the
4603 // checking of the sync flag in the snapshot would fail.
Steve Blocka7e24c12009-10-30 11:49:00 +00004604}
Steve Blocka7e24c12009-10-30 11:49:00 +00004605
4606
Steve Blocka7e24c12009-10-30 11:49:00 +00004607// TODO(1236194): Since the heap size is configurable on the command line
4608// and through the API, we should gracefully handle the case that the heap
4609// size is not big enough to fit all the initial objects.
Russell Brenner90bac252010-11-18 13:33:46 -08004610bool Heap::ConfigureHeap(int max_semispace_size,
4611 int max_old_gen_size,
4612 int max_executable_size) {
Steve Blocka7e24c12009-10-30 11:49:00 +00004613 if (HasBeenSetup()) return false;
4614
Steve Block3ce2e202009-11-05 08:53:23 +00004615 if (max_semispace_size > 0) max_semispace_size_ = max_semispace_size;
4616
4617 if (Snapshot::IsEnabled()) {
4618 // If we are using a snapshot we always reserve the default amount
4619 // of memory for each semispace because code in the snapshot has
4620 // write-barrier code that relies on the size and alignment of new
4621 // space. We therefore cannot use a larger max semispace size
4622 // than the default reserved semispace size.
4623 if (max_semispace_size_ > reserved_semispace_size_) {
4624 max_semispace_size_ = reserved_semispace_size_;
4625 }
4626 } else {
4627 // If we are not using snapshots we reserve space for the actual
4628 // max semispace size.
4629 reserved_semispace_size_ = max_semispace_size_;
4630 }
4631
4632 if (max_old_gen_size > 0) max_old_generation_size_ = max_old_gen_size;
Russell Brenner90bac252010-11-18 13:33:46 -08004633 if (max_executable_size > 0) {
4634 max_executable_size_ = RoundUp(max_executable_size, Page::kPageSize);
4635 }
4636
4637 // The max executable size must be less than or equal to the max old
4638 // generation size.
4639 if (max_executable_size_ > max_old_generation_size_) {
4640 max_executable_size_ = max_old_generation_size_;
4641 }
Steve Blocka7e24c12009-10-30 11:49:00 +00004642
4643 // The new space size must be a power of two to support single-bit testing
4644 // for containment.
Steve Block3ce2e202009-11-05 08:53:23 +00004645 max_semispace_size_ = RoundUpToPowerOf2(max_semispace_size_);
4646 reserved_semispace_size_ = RoundUpToPowerOf2(reserved_semispace_size_);
4647 initial_semispace_size_ = Min(initial_semispace_size_, max_semispace_size_);
4648 external_allocation_limit_ = 10 * max_semispace_size_;
Steve Blocka7e24c12009-10-30 11:49:00 +00004649
4650 // The old generation is paged.
Steve Block3ce2e202009-11-05 08:53:23 +00004651 max_old_generation_size_ = RoundUp(max_old_generation_size_, Page::kPageSize);
Steve Blocka7e24c12009-10-30 11:49:00 +00004652
Steve Block44f0eee2011-05-26 01:26:41 +01004653 configured_ = true;
Steve Blocka7e24c12009-10-30 11:49:00 +00004654 return true;
4655}
4656
4657
4658bool Heap::ConfigureHeapDefault() {
Russell Brenner90bac252010-11-18 13:33:46 -08004659 return ConfigureHeap(FLAG_max_new_space_size / 2 * KB,
4660 FLAG_max_old_space_size * MB,
4661 FLAG_max_executable_size * MB);
Steve Blocka7e24c12009-10-30 11:49:00 +00004662}
4663
4664
Ben Murdochbb769b22010-08-11 14:56:33 +01004665void Heap::RecordStats(HeapStats* stats, bool take_snapshot) {
Iain Merrick75681382010-08-19 15:07:18 +01004666 *stats->start_marker = HeapStats::kStartMarker;
4667 *stats->end_marker = HeapStats::kEndMarker;
Ben Murdochf87a2032010-10-22 12:50:53 +01004668 *stats->new_space_size = new_space_.SizeAsInt();
4669 *stats->new_space_capacity = static_cast<int>(new_space_.Capacity());
Steve Blockd0582a62009-12-15 09:54:21 +00004670 *stats->old_pointer_space_size = old_pointer_space_->Size();
4671 *stats->old_pointer_space_capacity = old_pointer_space_->Capacity();
4672 *stats->old_data_space_size = old_data_space_->Size();
4673 *stats->old_data_space_capacity = old_data_space_->Capacity();
4674 *stats->code_space_size = code_space_->Size();
4675 *stats->code_space_capacity = code_space_->Capacity();
4676 *stats->map_space_size = map_space_->Size();
4677 *stats->map_space_capacity = map_space_->Capacity();
4678 *stats->cell_space_size = cell_space_->Size();
4679 *stats->cell_space_capacity = cell_space_->Capacity();
4680 *stats->lo_space_size = lo_space_->Size();
Steve Block44f0eee2011-05-26 01:26:41 +01004681 isolate_->global_handles()->RecordStats(stats);
4682 *stats->memory_allocator_size = isolate()->memory_allocator()->Size();
Ben Murdochbb769b22010-08-11 14:56:33 +01004683 *stats->memory_allocator_capacity =
Steve Block44f0eee2011-05-26 01:26:41 +01004684 isolate()->memory_allocator()->Size() +
4685 isolate()->memory_allocator()->Available();
Iain Merrick75681382010-08-19 15:07:18 +01004686 *stats->os_error = OS::GetLastError();
Steve Block44f0eee2011-05-26 01:26:41 +01004687 isolate()->memory_allocator()->Available();
Ben Murdochbb769b22010-08-11 14:56:33 +01004688 if (take_snapshot) {
Ben Murdochb0fe1622011-05-05 13:52:32 +01004689 HeapIterator iterator(HeapIterator::kFilterFreeListNodes);
Ben Murdochbb769b22010-08-11 14:56:33 +01004690 for (HeapObject* obj = iterator.next();
4691 obj != NULL;
4692 obj = iterator.next()) {
Ben Murdochbb769b22010-08-11 14:56:33 +01004693 InstanceType type = obj->map()->instance_type();
4694 ASSERT(0 <= type && type <= LAST_TYPE);
4695 stats->objects_per_type[type]++;
4696 stats->size_per_type[type] += obj->Size();
4697 }
4698 }
Steve Blockd0582a62009-12-15 09:54:21 +00004699}
4700
4701
Ben Murdochf87a2032010-10-22 12:50:53 +01004702intptr_t Heap::PromotedSpaceSize() {
Steve Blocka7e24c12009-10-30 11:49:00 +00004703 return old_pointer_space_->Size()
4704 + old_data_space_->Size()
4705 + code_space_->Size()
4706 + map_space_->Size()
4707 + cell_space_->Size()
4708 + lo_space_->Size();
4709}
4710
4711
4712int Heap::PromotedExternalMemorySize() {
4713 if (amount_of_external_allocated_memory_
4714 <= amount_of_external_allocated_memory_at_last_global_gc_) return 0;
4715 return amount_of_external_allocated_memory_
4716 - amount_of_external_allocated_memory_at_last_global_gc_;
4717}
4718
Steve Block44f0eee2011-05-26 01:26:41 +01004719#ifdef DEBUG
4720
4721// Tags 0, 1, and 3 are used. Use 2 for marking visited HeapObject.
4722static const int kMarkTag = 2;
4723
4724
4725class HeapDebugUtils {
4726 public:
4727 explicit HeapDebugUtils(Heap* heap)
4728 : search_for_any_global_(false),
4729 search_target_(NULL),
4730 found_target_(false),
4731 object_stack_(20),
4732 heap_(heap) {
4733 }
4734
4735 class MarkObjectVisitor : public ObjectVisitor {
4736 public:
4737 explicit MarkObjectVisitor(HeapDebugUtils* utils) : utils_(utils) { }
4738
4739 void VisitPointers(Object** start, Object** end) {
4740 // Copy all HeapObject pointers in [start, end)
4741 for (Object** p = start; p < end; p++) {
4742 if ((*p)->IsHeapObject())
4743 utils_->MarkObjectRecursively(p);
4744 }
4745 }
4746
4747 HeapDebugUtils* utils_;
4748 };
4749
4750 void MarkObjectRecursively(Object** p) {
4751 if (!(*p)->IsHeapObject()) return;
4752
4753 HeapObject* obj = HeapObject::cast(*p);
4754
4755 Object* map = obj->map();
4756
4757 if (!map->IsHeapObject()) return; // visited before
4758
4759 if (found_target_) return; // stop if target found
4760 object_stack_.Add(obj);
4761 if ((search_for_any_global_ && obj->IsJSGlobalObject()) ||
4762 (!search_for_any_global_ && (obj == search_target_))) {
4763 found_target_ = true;
4764 return;
4765 }
4766
4767 // not visited yet
4768 Map* map_p = reinterpret_cast<Map*>(HeapObject::cast(map));
4769
4770 Address map_addr = map_p->address();
4771
4772 obj->set_map(reinterpret_cast<Map*>(map_addr + kMarkTag));
4773
4774 MarkObjectRecursively(&map);
4775
4776 MarkObjectVisitor mark_visitor(this);
4777
4778 obj->IterateBody(map_p->instance_type(), obj->SizeFromMap(map_p),
4779 &mark_visitor);
4780
4781 if (!found_target_) // don't pop if found the target
4782 object_stack_.RemoveLast();
4783 }
4784
4785
4786 class UnmarkObjectVisitor : public ObjectVisitor {
4787 public:
4788 explicit UnmarkObjectVisitor(HeapDebugUtils* utils) : utils_(utils) { }
4789
4790 void VisitPointers(Object** start, Object** end) {
4791 // Copy all HeapObject pointers in [start, end)
4792 for (Object** p = start; p < end; p++) {
4793 if ((*p)->IsHeapObject())
4794 utils_->UnmarkObjectRecursively(p);
4795 }
4796 }
4797
4798 HeapDebugUtils* utils_;
4799 };
4800
4801
4802 void UnmarkObjectRecursively(Object** p) {
4803 if (!(*p)->IsHeapObject()) return;
4804
4805 HeapObject* obj = HeapObject::cast(*p);
4806
4807 Object* map = obj->map();
4808
4809 if (map->IsHeapObject()) return; // unmarked already
4810
4811 Address map_addr = reinterpret_cast<Address>(map);
4812
4813 map_addr -= kMarkTag;
4814
4815 ASSERT_TAG_ALIGNED(map_addr);
4816
4817 HeapObject* map_p = HeapObject::FromAddress(map_addr);
4818
4819 obj->set_map(reinterpret_cast<Map*>(map_p));
4820
4821 UnmarkObjectRecursively(reinterpret_cast<Object**>(&map_p));
4822
4823 UnmarkObjectVisitor unmark_visitor(this);
4824
4825 obj->IterateBody(Map::cast(map_p)->instance_type(),
4826 obj->SizeFromMap(Map::cast(map_p)),
4827 &unmark_visitor);
4828 }
4829
4830
4831 void MarkRootObjectRecursively(Object** root) {
4832 if (search_for_any_global_) {
4833 ASSERT(search_target_ == NULL);
4834 } else {
4835 ASSERT(search_target_->IsHeapObject());
4836 }
4837 found_target_ = false;
4838 object_stack_.Clear();
4839
4840 MarkObjectRecursively(root);
4841 UnmarkObjectRecursively(root);
4842
4843 if (found_target_) {
4844 PrintF("=====================================\n");
4845 PrintF("==== Path to object ====\n");
4846 PrintF("=====================================\n\n");
4847
4848 ASSERT(!object_stack_.is_empty());
4849 for (int i = 0; i < object_stack_.length(); i++) {
4850 if (i > 0) PrintF("\n |\n |\n V\n\n");
4851 Object* obj = object_stack_[i];
4852 obj->Print();
4853 }
4854 PrintF("=====================================\n");
4855 }
4856 }
4857
4858 // Helper class for visiting HeapObjects recursively.
4859 class MarkRootVisitor: public ObjectVisitor {
4860 public:
4861 explicit MarkRootVisitor(HeapDebugUtils* utils) : utils_(utils) { }
4862
4863 void VisitPointers(Object** start, Object** end) {
4864 // Visit all HeapObject pointers in [start, end)
4865 for (Object** p = start; p < end; p++) {
4866 if ((*p)->IsHeapObject())
4867 utils_->MarkRootObjectRecursively(p);
4868 }
4869 }
4870
4871 HeapDebugUtils* utils_;
4872 };
4873
4874 bool search_for_any_global_;
4875 Object* search_target_;
4876 bool found_target_;
4877 List<Object*> object_stack_;
4878 Heap* heap_;
4879
4880 friend class Heap;
4881};
4882
4883#endif
Steve Blocka7e24c12009-10-30 11:49:00 +00004884
4885bool Heap::Setup(bool create_heap_objects) {
Steve Block44f0eee2011-05-26 01:26:41 +01004886#ifdef DEBUG
4887 debug_utils_ = new HeapDebugUtils(this);
4888#endif
4889
Steve Blocka7e24c12009-10-30 11:49:00 +00004890 // Initialize heap spaces and initial maps and objects. Whenever something
4891 // goes wrong, just return false. The caller should check the results and
4892 // call Heap::TearDown() to release allocated memory.
4893 //
4894 // If the heap is not yet configured (eg, through the API), configure it.
4895 // Configuration is based on the flags new-space-size (really the semispace
4896 // size) and old-space-size if set or the initial values of semispace_size_
4897 // and old_generation_size_ otherwise.
Steve Block44f0eee2011-05-26 01:26:41 +01004898 if (!configured_) {
Steve Blocka7e24c12009-10-30 11:49:00 +00004899 if (!ConfigureHeapDefault()) return false;
4900 }
4901
Steve Block44f0eee2011-05-26 01:26:41 +01004902 gc_initializer_mutex->Lock();
4903 static bool initialized_gc = false;
4904 if (!initialized_gc) {
Ben Murdoch8b112d22011-06-08 16:22:53 +01004905 initialized_gc = true;
4906 InitializeScavengingVisitorsTables();
4907 NewSpaceScavenger::Initialize();
4908 MarkCompactCollector::Initialize();
Steve Block44f0eee2011-05-26 01:26:41 +01004909 }
4910 gc_initializer_mutex->Unlock();
Iain Merrick75681382010-08-19 15:07:18 +01004911
Kristian Monsen80d68ea2010-09-08 11:05:35 +01004912 MarkMapPointersAsEncoded(false);
4913
Steve Blocka7e24c12009-10-30 11:49:00 +00004914 // Setup memory allocator and reserve a chunk of memory for new
Steve Block3ce2e202009-11-05 08:53:23 +00004915 // space. The chunk is double the size of the requested reserved
4916 // new space size to ensure that we can find a pair of semispaces that
4917 // are contiguous and aligned to their size.
Steve Block44f0eee2011-05-26 01:26:41 +01004918 if (!isolate_->memory_allocator()->Setup(MaxReserved(), MaxExecutableSize()))
4919 return false;
Steve Blocka7e24c12009-10-30 11:49:00 +00004920 void* chunk =
Steve Block44f0eee2011-05-26 01:26:41 +01004921 isolate_->memory_allocator()->ReserveInitialChunk(
4922 4 * reserved_semispace_size_);
Steve Blocka7e24c12009-10-30 11:49:00 +00004923 if (chunk == NULL) return false;
4924
4925 // Align the pair of semispaces to their size, which must be a power
4926 // of 2.
Steve Blocka7e24c12009-10-30 11:49:00 +00004927 Address new_space_start =
Steve Block3ce2e202009-11-05 08:53:23 +00004928 RoundUp(reinterpret_cast<byte*>(chunk), 2 * reserved_semispace_size_);
4929 if (!new_space_.Setup(new_space_start, 2 * reserved_semispace_size_)) {
4930 return false;
4931 }
Steve Blocka7e24c12009-10-30 11:49:00 +00004932
4933 // Initialize old pointer space.
4934 old_pointer_space_ =
Steve Block44f0eee2011-05-26 01:26:41 +01004935 new OldSpace(this,
4936 max_old_generation_size_,
4937 OLD_POINTER_SPACE,
4938 NOT_EXECUTABLE);
Steve Blocka7e24c12009-10-30 11:49:00 +00004939 if (old_pointer_space_ == NULL) return false;
4940 if (!old_pointer_space_->Setup(NULL, 0)) return false;
4941
4942 // Initialize old data space.
4943 old_data_space_ =
Steve Block44f0eee2011-05-26 01:26:41 +01004944 new OldSpace(this,
4945 max_old_generation_size_,
4946 OLD_DATA_SPACE,
4947 NOT_EXECUTABLE);
Steve Blocka7e24c12009-10-30 11:49:00 +00004948 if (old_data_space_ == NULL) return false;
4949 if (!old_data_space_->Setup(NULL, 0)) return false;
4950
4951 // Initialize the code space, set its maximum capacity to the old
4952 // generation size. It needs executable memory.
4953 // On 64-bit platform(s), we put all code objects in a 2 GB range of
4954 // virtual address space, so that they can call each other with near calls.
4955 if (code_range_size_ > 0) {
Steve Block44f0eee2011-05-26 01:26:41 +01004956 if (!isolate_->code_range()->Setup(code_range_size_)) {
Steve Blocka7e24c12009-10-30 11:49:00 +00004957 return false;
4958 }
4959 }
4960
4961 code_space_ =
Steve Block44f0eee2011-05-26 01:26:41 +01004962 new OldSpace(this, max_old_generation_size_, CODE_SPACE, EXECUTABLE);
Steve Blocka7e24c12009-10-30 11:49:00 +00004963 if (code_space_ == NULL) return false;
4964 if (!code_space_->Setup(NULL, 0)) return false;
4965
4966 // Initialize map space.
Steve Block44f0eee2011-05-26 01:26:41 +01004967 map_space_ = new MapSpace(this, FLAG_use_big_map_space
Leon Clarkee46be812010-01-19 14:06:41 +00004968 ? max_old_generation_size_
Leon Clarked91b9f72010-01-27 17:25:45 +00004969 : MapSpace::kMaxMapPageIndex * Page::kPageSize,
4970 FLAG_max_map_space_pages,
Leon Clarkee46be812010-01-19 14:06:41 +00004971 MAP_SPACE);
Steve Blocka7e24c12009-10-30 11:49:00 +00004972 if (map_space_ == NULL) return false;
4973 if (!map_space_->Setup(NULL, 0)) return false;
4974
4975 // Initialize global property cell space.
Steve Block44f0eee2011-05-26 01:26:41 +01004976 cell_space_ = new CellSpace(this, max_old_generation_size_, CELL_SPACE);
Steve Blocka7e24c12009-10-30 11:49:00 +00004977 if (cell_space_ == NULL) return false;
4978 if (!cell_space_->Setup(NULL, 0)) return false;
4979
4980 // The large object code space may contain code or data. We set the memory
4981 // to be non-executable here for safety, but this means we need to enable it
4982 // explicitly when allocating large code objects.
Steve Block44f0eee2011-05-26 01:26:41 +01004983 lo_space_ = new LargeObjectSpace(this, LO_SPACE);
Steve Blocka7e24c12009-10-30 11:49:00 +00004984 if (lo_space_ == NULL) return false;
4985 if (!lo_space_->Setup()) return false;
4986
4987 if (create_heap_objects) {
4988 // Create initial maps.
4989 if (!CreateInitialMaps()) return false;
4990 if (!CreateApiObjects()) return false;
4991
4992 // Create initial objects
4993 if (!CreateInitialObjects()) return false;
Ben Murdochf87a2032010-10-22 12:50:53 +01004994
4995 global_contexts_list_ = undefined_value();
Steve Blocka7e24c12009-10-30 11:49:00 +00004996 }
4997
Steve Block44f0eee2011-05-26 01:26:41 +01004998 LOG(isolate_, IntPtrTEvent("heap-capacity", Capacity()));
4999 LOG(isolate_, IntPtrTEvent("heap-available", Available()));
Steve Blocka7e24c12009-10-30 11:49:00 +00005000
Steve Block3ce2e202009-11-05 08:53:23 +00005001#ifdef ENABLE_LOGGING_AND_PROFILING
5002 // This should be called only after initial objects have been created.
Steve Block44f0eee2011-05-26 01:26:41 +01005003 isolate_->producer_heap_profile()->Setup();
Steve Block3ce2e202009-11-05 08:53:23 +00005004#endif
5005
Steve Blocka7e24c12009-10-30 11:49:00 +00005006 return true;
5007}
5008
5009
Steve Blockd0582a62009-12-15 09:54:21 +00005010void Heap::SetStackLimits() {
Steve Block44f0eee2011-05-26 01:26:41 +01005011 ASSERT(isolate_ != NULL);
5012 ASSERT(isolate_ == isolate());
Steve Blocka7e24c12009-10-30 11:49:00 +00005013 // On 64 bit machines, pointers are generally out of range of Smis. We write
5014 // something that looks like an out of range Smi to the GC.
5015
Steve Blockd0582a62009-12-15 09:54:21 +00005016 // Set up the special root array entries containing the stack limits.
5017 // These are actually addresses, but the tag makes the GC ignore it.
Steve Blocka7e24c12009-10-30 11:49:00 +00005018 roots_[kStackLimitRootIndex] =
Steve Blockd0582a62009-12-15 09:54:21 +00005019 reinterpret_cast<Object*>(
Steve Block44f0eee2011-05-26 01:26:41 +01005020 (isolate_->stack_guard()->jslimit() & ~kSmiTagMask) | kSmiTag);
Steve Blockd0582a62009-12-15 09:54:21 +00005021 roots_[kRealStackLimitRootIndex] =
5022 reinterpret_cast<Object*>(
Steve Block44f0eee2011-05-26 01:26:41 +01005023 (isolate_->stack_guard()->real_jslimit() & ~kSmiTagMask) | kSmiTag);
Steve Blocka7e24c12009-10-30 11:49:00 +00005024}
5025
5026
5027void Heap::TearDown() {
Leon Clarkef7060e22010-06-03 12:02:55 +01005028 if (FLAG_print_cumulative_gc_stat) {
5029 PrintF("\n\n");
5030 PrintF("gc_count=%d ", gc_count_);
5031 PrintF("mark_sweep_count=%d ", ms_count_);
5032 PrintF("mark_compact_count=%d ", mc_count_);
Steve Block44f0eee2011-05-26 01:26:41 +01005033 PrintF("max_gc_pause=%d ", get_max_gc_pause());
5034 PrintF("min_in_mutator=%d ", get_min_in_mutator());
Ben Murdochf87a2032010-10-22 12:50:53 +01005035 PrintF("max_alive_after_gc=%" V8_PTR_PREFIX "d ",
Steve Block44f0eee2011-05-26 01:26:41 +01005036 get_max_alive_after_gc());
Leon Clarkef7060e22010-06-03 12:02:55 +01005037 PrintF("\n\n");
5038 }
5039
Steve Block44f0eee2011-05-26 01:26:41 +01005040 isolate_->global_handles()->TearDown();
Steve Blocka7e24c12009-10-30 11:49:00 +00005041
Steve Block44f0eee2011-05-26 01:26:41 +01005042 external_string_table_.TearDown();
Leon Clarkee46be812010-01-19 14:06:41 +00005043
Steve Blocka7e24c12009-10-30 11:49:00 +00005044 new_space_.TearDown();
5045
5046 if (old_pointer_space_ != NULL) {
5047 old_pointer_space_->TearDown();
5048 delete old_pointer_space_;
5049 old_pointer_space_ = NULL;
5050 }
5051
5052 if (old_data_space_ != NULL) {
5053 old_data_space_->TearDown();
5054 delete old_data_space_;
5055 old_data_space_ = NULL;
5056 }
5057
5058 if (code_space_ != NULL) {
5059 code_space_->TearDown();
5060 delete code_space_;
5061 code_space_ = NULL;
5062 }
5063
5064 if (map_space_ != NULL) {
5065 map_space_->TearDown();
5066 delete map_space_;
5067 map_space_ = NULL;
5068 }
5069
5070 if (cell_space_ != NULL) {
5071 cell_space_->TearDown();
5072 delete cell_space_;
5073 cell_space_ = NULL;
5074 }
5075
5076 if (lo_space_ != NULL) {
5077 lo_space_->TearDown();
5078 delete lo_space_;
5079 lo_space_ = NULL;
5080 }
5081
Steve Block44f0eee2011-05-26 01:26:41 +01005082 isolate_->memory_allocator()->TearDown();
5083
5084#ifdef DEBUG
5085 delete debug_utils_;
5086 debug_utils_ = NULL;
5087#endif
Steve Blocka7e24c12009-10-30 11:49:00 +00005088}
5089
5090
5091void Heap::Shrink() {
5092 // Try to shrink all paged spaces.
5093 PagedSpaces spaces;
Leon Clarked91b9f72010-01-27 17:25:45 +00005094 for (PagedSpace* space = spaces.next(); space != NULL; space = spaces.next())
5095 space->Shrink();
Steve Blocka7e24c12009-10-30 11:49:00 +00005096}
5097
5098
5099#ifdef ENABLE_HEAP_PROTECTION
5100
5101void Heap::Protect() {
5102 if (HasBeenSetup()) {
5103 AllSpaces spaces;
Leon Clarked91b9f72010-01-27 17:25:45 +00005104 for (Space* space = spaces.next(); space != NULL; space = spaces.next())
5105 space->Protect();
Steve Blocka7e24c12009-10-30 11:49:00 +00005106 }
5107}
5108
5109
5110void Heap::Unprotect() {
5111 if (HasBeenSetup()) {
5112 AllSpaces spaces;
Leon Clarked91b9f72010-01-27 17:25:45 +00005113 for (Space* space = spaces.next(); space != NULL; space = spaces.next())
5114 space->Unprotect();
Steve Blocka7e24c12009-10-30 11:49:00 +00005115 }
5116}
5117
5118#endif
5119
5120
Steve Block6ded16b2010-05-10 14:33:55 +01005121void Heap::AddGCPrologueCallback(GCPrologueCallback callback, GCType gc_type) {
5122 ASSERT(callback != NULL);
5123 GCPrologueCallbackPair pair(callback, gc_type);
5124 ASSERT(!gc_prologue_callbacks_.Contains(pair));
5125 return gc_prologue_callbacks_.Add(pair);
5126}
5127
5128
5129void Heap::RemoveGCPrologueCallback(GCPrologueCallback callback) {
5130 ASSERT(callback != NULL);
5131 for (int i = 0; i < gc_prologue_callbacks_.length(); ++i) {
5132 if (gc_prologue_callbacks_[i].callback == callback) {
5133 gc_prologue_callbacks_.Remove(i);
5134 return;
5135 }
5136 }
5137 UNREACHABLE();
5138}
5139
5140
5141void Heap::AddGCEpilogueCallback(GCEpilogueCallback callback, GCType gc_type) {
5142 ASSERT(callback != NULL);
5143 GCEpilogueCallbackPair pair(callback, gc_type);
5144 ASSERT(!gc_epilogue_callbacks_.Contains(pair));
5145 return gc_epilogue_callbacks_.Add(pair);
5146}
5147
5148
5149void Heap::RemoveGCEpilogueCallback(GCEpilogueCallback callback) {
5150 ASSERT(callback != NULL);
5151 for (int i = 0; i < gc_epilogue_callbacks_.length(); ++i) {
5152 if (gc_epilogue_callbacks_[i].callback == callback) {
5153 gc_epilogue_callbacks_.Remove(i);
5154 return;
5155 }
5156 }
5157 UNREACHABLE();
5158}
5159
5160
Steve Blocka7e24c12009-10-30 11:49:00 +00005161#ifdef DEBUG
5162
5163class PrintHandleVisitor: public ObjectVisitor {
5164 public:
5165 void VisitPointers(Object** start, Object** end) {
5166 for (Object** p = start; p < end; p++)
Ben Murdochf87a2032010-10-22 12:50:53 +01005167 PrintF(" handle %p to %p\n",
5168 reinterpret_cast<void*>(p),
5169 reinterpret_cast<void*>(*p));
Steve Blocka7e24c12009-10-30 11:49:00 +00005170 }
5171};
5172
5173void Heap::PrintHandles() {
5174 PrintF("Handles:\n");
5175 PrintHandleVisitor v;
Steve Block44f0eee2011-05-26 01:26:41 +01005176 isolate_->handle_scope_implementer()->Iterate(&v);
Steve Blocka7e24c12009-10-30 11:49:00 +00005177}
5178
5179#endif
5180
5181
5182Space* AllSpaces::next() {
5183 switch (counter_++) {
5184 case NEW_SPACE:
Steve Block44f0eee2011-05-26 01:26:41 +01005185 return HEAP->new_space();
Steve Blocka7e24c12009-10-30 11:49:00 +00005186 case OLD_POINTER_SPACE:
Steve Block44f0eee2011-05-26 01:26:41 +01005187 return HEAP->old_pointer_space();
Steve Blocka7e24c12009-10-30 11:49:00 +00005188 case OLD_DATA_SPACE:
Steve Block44f0eee2011-05-26 01:26:41 +01005189 return HEAP->old_data_space();
Steve Blocka7e24c12009-10-30 11:49:00 +00005190 case CODE_SPACE:
Steve Block44f0eee2011-05-26 01:26:41 +01005191 return HEAP->code_space();
Steve Blocka7e24c12009-10-30 11:49:00 +00005192 case MAP_SPACE:
Steve Block44f0eee2011-05-26 01:26:41 +01005193 return HEAP->map_space();
Steve Blocka7e24c12009-10-30 11:49:00 +00005194 case CELL_SPACE:
Steve Block44f0eee2011-05-26 01:26:41 +01005195 return HEAP->cell_space();
Steve Blocka7e24c12009-10-30 11:49:00 +00005196 case LO_SPACE:
Steve Block44f0eee2011-05-26 01:26:41 +01005197 return HEAP->lo_space();
Steve Blocka7e24c12009-10-30 11:49:00 +00005198 default:
5199 return NULL;
5200 }
5201}
5202
5203
5204PagedSpace* PagedSpaces::next() {
5205 switch (counter_++) {
5206 case OLD_POINTER_SPACE:
Steve Block44f0eee2011-05-26 01:26:41 +01005207 return HEAP->old_pointer_space();
Steve Blocka7e24c12009-10-30 11:49:00 +00005208 case OLD_DATA_SPACE:
Steve Block44f0eee2011-05-26 01:26:41 +01005209 return HEAP->old_data_space();
Steve Blocka7e24c12009-10-30 11:49:00 +00005210 case CODE_SPACE:
Steve Block44f0eee2011-05-26 01:26:41 +01005211 return HEAP->code_space();
Steve Blocka7e24c12009-10-30 11:49:00 +00005212 case MAP_SPACE:
Steve Block44f0eee2011-05-26 01:26:41 +01005213 return HEAP->map_space();
Steve Blocka7e24c12009-10-30 11:49:00 +00005214 case CELL_SPACE:
Steve Block44f0eee2011-05-26 01:26:41 +01005215 return HEAP->cell_space();
Steve Blocka7e24c12009-10-30 11:49:00 +00005216 default:
5217 return NULL;
5218 }
5219}
5220
5221
5222
5223OldSpace* OldSpaces::next() {
5224 switch (counter_++) {
5225 case OLD_POINTER_SPACE:
Steve Block44f0eee2011-05-26 01:26:41 +01005226 return HEAP->old_pointer_space();
Steve Blocka7e24c12009-10-30 11:49:00 +00005227 case OLD_DATA_SPACE:
Steve Block44f0eee2011-05-26 01:26:41 +01005228 return HEAP->old_data_space();
Steve Blocka7e24c12009-10-30 11:49:00 +00005229 case CODE_SPACE:
Steve Block44f0eee2011-05-26 01:26:41 +01005230 return HEAP->code_space();
Steve Blocka7e24c12009-10-30 11:49:00 +00005231 default:
5232 return NULL;
5233 }
5234}
5235
5236
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -08005237SpaceIterator::SpaceIterator()
5238 : current_space_(FIRST_SPACE),
5239 iterator_(NULL),
5240 size_func_(NULL) {
5241}
5242
5243
5244SpaceIterator::SpaceIterator(HeapObjectCallback size_func)
5245 : current_space_(FIRST_SPACE),
5246 iterator_(NULL),
5247 size_func_(size_func) {
Steve Blocka7e24c12009-10-30 11:49:00 +00005248}
5249
5250
5251SpaceIterator::~SpaceIterator() {
5252 // Delete active iterator if any.
5253 delete iterator_;
5254}
5255
5256
5257bool SpaceIterator::has_next() {
5258 // Iterate until no more spaces.
5259 return current_space_ != LAST_SPACE;
5260}
5261
5262
5263ObjectIterator* SpaceIterator::next() {
5264 if (iterator_ != NULL) {
5265 delete iterator_;
5266 iterator_ = NULL;
5267 // Move to the next space
5268 current_space_++;
5269 if (current_space_ > LAST_SPACE) {
5270 return NULL;
5271 }
5272 }
5273
5274 // Return iterator for the new current space.
5275 return CreateIterator();
5276}
5277
5278
5279// Create an iterator for the space to iterate.
5280ObjectIterator* SpaceIterator::CreateIterator() {
5281 ASSERT(iterator_ == NULL);
5282
5283 switch (current_space_) {
5284 case NEW_SPACE:
Steve Block44f0eee2011-05-26 01:26:41 +01005285 iterator_ = new SemiSpaceIterator(HEAP->new_space(), size_func_);
Steve Blocka7e24c12009-10-30 11:49:00 +00005286 break;
5287 case OLD_POINTER_SPACE:
Steve Block44f0eee2011-05-26 01:26:41 +01005288 iterator_ = new HeapObjectIterator(HEAP->old_pointer_space(), size_func_);
Steve Blocka7e24c12009-10-30 11:49:00 +00005289 break;
5290 case OLD_DATA_SPACE:
Steve Block44f0eee2011-05-26 01:26:41 +01005291 iterator_ = new HeapObjectIterator(HEAP->old_data_space(), size_func_);
Steve Blocka7e24c12009-10-30 11:49:00 +00005292 break;
5293 case CODE_SPACE:
Steve Block44f0eee2011-05-26 01:26:41 +01005294 iterator_ = new HeapObjectIterator(HEAP->code_space(), size_func_);
Steve Blocka7e24c12009-10-30 11:49:00 +00005295 break;
5296 case MAP_SPACE:
Steve Block44f0eee2011-05-26 01:26:41 +01005297 iterator_ = new HeapObjectIterator(HEAP->map_space(), size_func_);
Steve Blocka7e24c12009-10-30 11:49:00 +00005298 break;
5299 case CELL_SPACE:
Steve Block44f0eee2011-05-26 01:26:41 +01005300 iterator_ = new HeapObjectIterator(HEAP->cell_space(), size_func_);
Steve Blocka7e24c12009-10-30 11:49:00 +00005301 break;
5302 case LO_SPACE:
Steve Block44f0eee2011-05-26 01:26:41 +01005303 iterator_ = new LargeObjectIterator(HEAP->lo_space(), size_func_);
Steve Blocka7e24c12009-10-30 11:49:00 +00005304 break;
5305 }
5306
5307 // Return the newly allocated iterator;
5308 ASSERT(iterator_ != NULL);
5309 return iterator_;
5310}
5311
5312
Ben Murdochb0fe1622011-05-05 13:52:32 +01005313class HeapObjectsFilter {
5314 public:
5315 virtual ~HeapObjectsFilter() {}
5316 virtual bool SkipObject(HeapObject* object) = 0;
5317};
5318
5319
5320class FreeListNodesFilter : public HeapObjectsFilter {
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -08005321 public:
5322 FreeListNodesFilter() {
5323 MarkFreeListNodes();
5324 }
5325
Ben Murdochb0fe1622011-05-05 13:52:32 +01005326 bool SkipObject(HeapObject* object) {
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -08005327 if (object->IsMarked()) {
5328 object->ClearMark();
5329 return true;
5330 } else {
5331 return false;
5332 }
5333 }
5334
5335 private:
5336 void MarkFreeListNodes() {
Steve Block44f0eee2011-05-26 01:26:41 +01005337 Heap* heap = HEAP;
5338 heap->old_pointer_space()->MarkFreeListNodes();
5339 heap->old_data_space()->MarkFreeListNodes();
5340 MarkCodeSpaceFreeListNodes(heap);
5341 heap->map_space()->MarkFreeListNodes();
5342 heap->cell_space()->MarkFreeListNodes();
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -08005343 }
5344
Steve Block44f0eee2011-05-26 01:26:41 +01005345 void MarkCodeSpaceFreeListNodes(Heap* heap) {
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -08005346 // For code space, using FreeListNode::IsFreeListNode is OK.
Steve Block44f0eee2011-05-26 01:26:41 +01005347 HeapObjectIterator iter(heap->code_space());
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -08005348 for (HeapObject* obj = iter.next_object();
5349 obj != NULL;
5350 obj = iter.next_object()) {
5351 if (FreeListNode::IsFreeListNode(obj)) obj->SetMark();
5352 }
5353 }
5354
5355 AssertNoAllocation no_alloc;
5356};
5357
5358
Ben Murdochb0fe1622011-05-05 13:52:32 +01005359class UnreachableObjectsFilter : public HeapObjectsFilter {
5360 public:
5361 UnreachableObjectsFilter() {
5362 MarkUnreachableObjects();
5363 }
5364
5365 bool SkipObject(HeapObject* object) {
5366 if (object->IsMarked()) {
5367 object->ClearMark();
5368 return true;
5369 } else {
5370 return false;
5371 }
5372 }
5373
5374 private:
5375 class UnmarkingVisitor : public ObjectVisitor {
5376 public:
5377 UnmarkingVisitor() : list_(10) {}
5378
5379 void VisitPointers(Object** start, Object** end) {
5380 for (Object** p = start; p < end; p++) {
5381 if (!(*p)->IsHeapObject()) continue;
5382 HeapObject* obj = HeapObject::cast(*p);
5383 if (obj->IsMarked()) {
5384 obj->ClearMark();
5385 list_.Add(obj);
5386 }
5387 }
5388 }
5389
5390 bool can_process() { return !list_.is_empty(); }
5391
5392 void ProcessNext() {
5393 HeapObject* obj = list_.RemoveLast();
5394 obj->Iterate(this);
5395 }
5396
5397 private:
5398 List<HeapObject*> list_;
5399 };
5400
5401 void MarkUnreachableObjects() {
5402 HeapIterator iterator;
5403 for (HeapObject* obj = iterator.next();
5404 obj != NULL;
5405 obj = iterator.next()) {
5406 obj->SetMark();
5407 }
5408 UnmarkingVisitor visitor;
Steve Block44f0eee2011-05-26 01:26:41 +01005409 HEAP->IterateRoots(&visitor, VISIT_ALL);
Ben Murdochb0fe1622011-05-05 13:52:32 +01005410 while (visitor.can_process())
5411 visitor.ProcessNext();
5412 }
5413
5414 AssertNoAllocation no_alloc;
5415};
5416
5417
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -08005418HeapIterator::HeapIterator()
5419 : filtering_(HeapIterator::kNoFiltering),
5420 filter_(NULL) {
5421 Init();
5422}
5423
5424
Ben Murdochb0fe1622011-05-05 13:52:32 +01005425HeapIterator::HeapIterator(HeapIterator::HeapObjectsFiltering filtering)
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -08005426 : filtering_(filtering),
5427 filter_(NULL) {
Steve Blocka7e24c12009-10-30 11:49:00 +00005428 Init();
5429}
5430
5431
5432HeapIterator::~HeapIterator() {
5433 Shutdown();
5434}
5435
5436
5437void HeapIterator::Init() {
5438 // Start the iteration.
Ben Murdochb0fe1622011-05-05 13:52:32 +01005439 space_iterator_ = filtering_ == kNoFiltering ? new SpaceIterator :
5440 new SpaceIterator(MarkCompactCollector::SizeOfMarkedObject);
5441 switch (filtering_) {
5442 case kFilterFreeListNodes:
5443 filter_ = new FreeListNodesFilter;
5444 break;
5445 case kFilterUnreachable:
5446 filter_ = new UnreachableObjectsFilter;
5447 break;
5448 default:
5449 break;
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -08005450 }
Steve Blocka7e24c12009-10-30 11:49:00 +00005451 object_iterator_ = space_iterator_->next();
5452}
5453
5454
5455void HeapIterator::Shutdown() {
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -08005456#ifdef DEBUG
Ben Murdochb0fe1622011-05-05 13:52:32 +01005457 // Assert that in filtering mode we have iterated through all
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -08005458 // objects. Otherwise, heap will be left in an inconsistent state.
Ben Murdochb0fe1622011-05-05 13:52:32 +01005459 if (filtering_ != kNoFiltering) {
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -08005460 ASSERT(object_iterator_ == NULL);
5461 }
5462#endif
Steve Blocka7e24c12009-10-30 11:49:00 +00005463 // Make sure the last iterator is deallocated.
5464 delete space_iterator_;
5465 space_iterator_ = NULL;
5466 object_iterator_ = NULL;
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -08005467 delete filter_;
5468 filter_ = NULL;
Steve Blocka7e24c12009-10-30 11:49:00 +00005469}
5470
5471
Leon Clarked91b9f72010-01-27 17:25:45 +00005472HeapObject* HeapIterator::next() {
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -08005473 if (filter_ == NULL) return NextObject();
5474
5475 HeapObject* obj = NextObject();
Ben Murdochb0fe1622011-05-05 13:52:32 +01005476 while (obj != NULL && filter_->SkipObject(obj)) obj = NextObject();
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -08005477 return obj;
5478}
5479
5480
5481HeapObject* HeapIterator::NextObject() {
Steve Blocka7e24c12009-10-30 11:49:00 +00005482 // No iterator means we are done.
Leon Clarked91b9f72010-01-27 17:25:45 +00005483 if (object_iterator_ == NULL) return NULL;
Steve Blocka7e24c12009-10-30 11:49:00 +00005484
Leon Clarked91b9f72010-01-27 17:25:45 +00005485 if (HeapObject* obj = object_iterator_->next_object()) {
Steve Blocka7e24c12009-10-30 11:49:00 +00005486 // If the current iterator has more objects we are fine.
Leon Clarked91b9f72010-01-27 17:25:45 +00005487 return obj;
Steve Blocka7e24c12009-10-30 11:49:00 +00005488 } else {
5489 // Go though the spaces looking for one that has objects.
5490 while (space_iterator_->has_next()) {
5491 object_iterator_ = space_iterator_->next();
Leon Clarked91b9f72010-01-27 17:25:45 +00005492 if (HeapObject* obj = object_iterator_->next_object()) {
5493 return obj;
Steve Blocka7e24c12009-10-30 11:49:00 +00005494 }
5495 }
5496 }
5497 // Done with the last space.
5498 object_iterator_ = NULL;
Leon Clarked91b9f72010-01-27 17:25:45 +00005499 return NULL;
Steve Blocka7e24c12009-10-30 11:49:00 +00005500}
5501
5502
5503void HeapIterator::reset() {
5504 // Restart the iterator.
5505 Shutdown();
5506 Init();
5507}
5508
5509
Ben Murdoche0cee9b2011-05-25 10:26:03 +01005510#if defined(DEBUG) || defined(LIVE_OBJECT_LIST)
Steve Blocka7e24c12009-10-30 11:49:00 +00005511
Ben Murdoche0cee9b2011-05-25 10:26:03 +01005512Object* const PathTracer::kAnyGlobalObject = reinterpret_cast<Object*>(NULL);
Steve Blocka7e24c12009-10-30 11:49:00 +00005513
Ben Murdoche0cee9b2011-05-25 10:26:03 +01005514class PathTracer::MarkVisitor: public ObjectVisitor {
Steve Blocka7e24c12009-10-30 11:49:00 +00005515 public:
Ben Murdoche0cee9b2011-05-25 10:26:03 +01005516 explicit MarkVisitor(PathTracer* tracer) : tracer_(tracer) {}
Steve Blocka7e24c12009-10-30 11:49:00 +00005517 void VisitPointers(Object** start, Object** end) {
Ben Murdoche0cee9b2011-05-25 10:26:03 +01005518 // Scan all HeapObject pointers in [start, end)
5519 for (Object** p = start; !tracer_->found() && (p < end); p++) {
Steve Blocka7e24c12009-10-30 11:49:00 +00005520 if ((*p)->IsHeapObject())
Ben Murdoche0cee9b2011-05-25 10:26:03 +01005521 tracer_->MarkRecursively(p, this);
Steve Blocka7e24c12009-10-30 11:49:00 +00005522 }
5523 }
Ben Murdoche0cee9b2011-05-25 10:26:03 +01005524
5525 private:
5526 PathTracer* tracer_;
Steve Blocka7e24c12009-10-30 11:49:00 +00005527};
5528
Steve Blocka7e24c12009-10-30 11:49:00 +00005529
Ben Murdoche0cee9b2011-05-25 10:26:03 +01005530class PathTracer::UnmarkVisitor: public ObjectVisitor {
5531 public:
5532 explicit UnmarkVisitor(PathTracer* tracer) : tracer_(tracer) {}
5533 void VisitPointers(Object** start, Object** end) {
5534 // Scan all HeapObject pointers in [start, end)
5535 for (Object** p = start; p < end; p++) {
5536 if ((*p)->IsHeapObject())
5537 tracer_->UnmarkRecursively(p, this);
5538 }
5539 }
5540
5541 private:
5542 PathTracer* tracer_;
5543};
5544
5545
5546void PathTracer::VisitPointers(Object** start, Object** end) {
5547 bool done = ((what_to_find_ == FIND_FIRST) && found_target_);
5548 // Visit all HeapObject pointers in [start, end)
5549 for (Object** p = start; !done && (p < end); p++) {
5550 if ((*p)->IsHeapObject()) {
5551 TracePathFrom(p);
5552 done = ((what_to_find_ == FIND_FIRST) && found_target_);
5553 }
5554 }
5555}
5556
5557
5558void PathTracer::Reset() {
5559 found_target_ = false;
5560 object_stack_.Clear();
5561}
5562
5563
5564void PathTracer::TracePathFrom(Object** root) {
5565 ASSERT((search_target_ == kAnyGlobalObject) ||
5566 search_target_->IsHeapObject());
5567 found_target_in_trace_ = false;
5568 object_stack_.Clear();
5569
5570 MarkVisitor mark_visitor(this);
5571 MarkRecursively(root, &mark_visitor);
5572
5573 UnmarkVisitor unmark_visitor(this);
5574 UnmarkRecursively(root, &unmark_visitor);
5575
5576 ProcessResults();
5577}
5578
5579
5580void PathTracer::MarkRecursively(Object** p, MarkVisitor* mark_visitor) {
Steve Blocka7e24c12009-10-30 11:49:00 +00005581 if (!(*p)->IsHeapObject()) return;
5582
5583 HeapObject* obj = HeapObject::cast(*p);
5584
5585 Object* map = obj->map();
5586
5587 if (!map->IsHeapObject()) return; // visited before
5588
Ben Murdoche0cee9b2011-05-25 10:26:03 +01005589 if (found_target_in_trace_) return; // stop if target found
5590 object_stack_.Add(obj);
5591 if (((search_target_ == kAnyGlobalObject) && obj->IsJSGlobalObject()) ||
5592 (obj == search_target_)) {
5593 found_target_in_trace_ = true;
5594 found_target_ = true;
Steve Blocka7e24c12009-10-30 11:49:00 +00005595 return;
5596 }
5597
Ben Murdoche0cee9b2011-05-25 10:26:03 +01005598 bool is_global_context = obj->IsGlobalContext();
5599
Steve Blocka7e24c12009-10-30 11:49:00 +00005600 // not visited yet
5601 Map* map_p = reinterpret_cast<Map*>(HeapObject::cast(map));
5602
5603 Address map_addr = map_p->address();
5604
5605 obj->set_map(reinterpret_cast<Map*>(map_addr + kMarkTag));
5606
Ben Murdoche0cee9b2011-05-25 10:26:03 +01005607 // Scan the object body.
5608 if (is_global_context && (visit_mode_ == VISIT_ONLY_STRONG)) {
5609 // This is specialized to scan Context's properly.
5610 Object** start = reinterpret_cast<Object**>(obj->address() +
5611 Context::kHeaderSize);
5612 Object** end = reinterpret_cast<Object**>(obj->address() +
5613 Context::kHeaderSize + Context::FIRST_WEAK_SLOT * kPointerSize);
5614 mark_visitor->VisitPointers(start, end);
5615 } else {
5616 obj->IterateBody(map_p->instance_type(),
5617 obj->SizeFromMap(map_p),
5618 mark_visitor);
5619 }
Steve Blocka7e24c12009-10-30 11:49:00 +00005620
Ben Murdoche0cee9b2011-05-25 10:26:03 +01005621 // Scan the map after the body because the body is a lot more interesting
5622 // when doing leak detection.
5623 MarkRecursively(&map, mark_visitor);
Steve Blocka7e24c12009-10-30 11:49:00 +00005624
Ben Murdoche0cee9b2011-05-25 10:26:03 +01005625 if (!found_target_in_trace_) // don't pop if found the target
5626 object_stack_.RemoveLast();
Steve Blocka7e24c12009-10-30 11:49:00 +00005627}
5628
5629
Ben Murdoche0cee9b2011-05-25 10:26:03 +01005630void PathTracer::UnmarkRecursively(Object** p, UnmarkVisitor* unmark_visitor) {
Steve Blocka7e24c12009-10-30 11:49:00 +00005631 if (!(*p)->IsHeapObject()) return;
5632
5633 HeapObject* obj = HeapObject::cast(*p);
5634
5635 Object* map = obj->map();
5636
5637 if (map->IsHeapObject()) return; // unmarked already
5638
5639 Address map_addr = reinterpret_cast<Address>(map);
5640
5641 map_addr -= kMarkTag;
5642
5643 ASSERT_TAG_ALIGNED(map_addr);
5644
5645 HeapObject* map_p = HeapObject::FromAddress(map_addr);
5646
5647 obj->set_map(reinterpret_cast<Map*>(map_p));
5648
Ben Murdoche0cee9b2011-05-25 10:26:03 +01005649 UnmarkRecursively(reinterpret_cast<Object**>(&map_p), unmark_visitor);
Steve Blocka7e24c12009-10-30 11:49:00 +00005650
5651 obj->IterateBody(Map::cast(map_p)->instance_type(),
5652 obj->SizeFromMap(Map::cast(map_p)),
Ben Murdoche0cee9b2011-05-25 10:26:03 +01005653 unmark_visitor);
Steve Blocka7e24c12009-10-30 11:49:00 +00005654}
5655
5656
Ben Murdoche0cee9b2011-05-25 10:26:03 +01005657void PathTracer::ProcessResults() {
5658 if (found_target_) {
Steve Blocka7e24c12009-10-30 11:49:00 +00005659 PrintF("=====================================\n");
5660 PrintF("==== Path to object ====\n");
5661 PrintF("=====================================\n\n");
5662
Ben Murdoche0cee9b2011-05-25 10:26:03 +01005663 ASSERT(!object_stack_.is_empty());
5664 for (int i = 0; i < object_stack_.length(); i++) {
Steve Blocka7e24c12009-10-30 11:49:00 +00005665 if (i > 0) PrintF("\n |\n |\n V\n\n");
Ben Murdoche0cee9b2011-05-25 10:26:03 +01005666 Object* obj = object_stack_[i];
5667#ifdef OBJECT_PRINT
Steve Blocka7e24c12009-10-30 11:49:00 +00005668 obj->Print();
Ben Murdoche0cee9b2011-05-25 10:26:03 +01005669#else
5670 obj->ShortPrint();
5671#endif
Steve Blocka7e24c12009-10-30 11:49:00 +00005672 }
5673 PrintF("=====================================\n");
5674 }
5675}
Ben Murdoche0cee9b2011-05-25 10:26:03 +01005676#endif // DEBUG || LIVE_OBJECT_LIST
Steve Blocka7e24c12009-10-30 11:49:00 +00005677
5678
Ben Murdoche0cee9b2011-05-25 10:26:03 +01005679#ifdef DEBUG
Steve Blocka7e24c12009-10-30 11:49:00 +00005680// Triggers a depth-first traversal of reachable objects from roots
5681// and finds a path to a specific heap object and prints it.
Leon Clarkee46be812010-01-19 14:06:41 +00005682void Heap::TracePathToObject(Object* target) {
Ben Murdoche0cee9b2011-05-25 10:26:03 +01005683 PathTracer tracer(target, PathTracer::FIND_ALL, VISIT_ALL);
5684 IterateRoots(&tracer, VISIT_ONLY_STRONG);
Steve Blocka7e24c12009-10-30 11:49:00 +00005685}
5686
5687
5688// Triggers a depth-first traversal of reachable objects from roots
5689// and finds a path to any global object and prints it. Useful for
5690// determining the source for leaks of global objects.
5691void Heap::TracePathToGlobal() {
Ben Murdoche0cee9b2011-05-25 10:26:03 +01005692 PathTracer tracer(PathTracer::kAnyGlobalObject,
5693 PathTracer::FIND_ALL,
5694 VISIT_ALL);
5695 IterateRoots(&tracer, VISIT_ONLY_STRONG);
Steve Blocka7e24c12009-10-30 11:49:00 +00005696}
5697#endif
5698
5699
Ben Murdochf87a2032010-10-22 12:50:53 +01005700static intptr_t CountTotalHolesSize() {
5701 intptr_t holes_size = 0;
Leon Clarkef7060e22010-06-03 12:02:55 +01005702 OldSpaces spaces;
5703 for (OldSpace* space = spaces.next();
5704 space != NULL;
5705 space = spaces.next()) {
5706 holes_size += space->Waste() + space->AvailableFree();
5707 }
5708 return holes_size;
5709}
5710
5711
Steve Block44f0eee2011-05-26 01:26:41 +01005712GCTracer::GCTracer(Heap* heap)
Steve Blocka7e24c12009-10-30 11:49:00 +00005713 : start_time_(0.0),
Leon Clarkef7060e22010-06-03 12:02:55 +01005714 start_size_(0),
Steve Blocka7e24c12009-10-30 11:49:00 +00005715 gc_count_(0),
5716 full_gc_count_(0),
5717 is_compacting_(false),
Leon Clarkef7060e22010-06-03 12:02:55 +01005718 marked_count_(0),
5719 allocated_since_last_gc_(0),
5720 spent_in_mutator_(0),
Steve Block44f0eee2011-05-26 01:26:41 +01005721 promoted_objects_size_(0),
5722 heap_(heap) {
Steve Blocka7e24c12009-10-30 11:49:00 +00005723 // These two fields reflect the state of the previous full collection.
5724 // Set them before they are changed by the collector.
Steve Block44f0eee2011-05-26 01:26:41 +01005725 previous_has_compacted_ = heap_->mark_compact_collector_.HasCompacted();
5726 previous_marked_count_ =
5727 heap_->mark_compact_collector_.previous_marked_count();
Leon Clarkef7060e22010-06-03 12:02:55 +01005728 if (!FLAG_trace_gc && !FLAG_print_cumulative_gc_stat) return;
Steve Blocka7e24c12009-10-30 11:49:00 +00005729 start_time_ = OS::TimeCurrentMillis();
Steve Block44f0eee2011-05-26 01:26:41 +01005730 start_size_ = heap_->SizeOfObjects();
Leon Clarkef7060e22010-06-03 12:02:55 +01005731
5732 for (int i = 0; i < Scope::kNumberOfScopes; i++) {
5733 scopes_[i] = 0;
5734 }
5735
5736 in_free_list_or_wasted_before_gc_ = CountTotalHolesSize();
5737
Steve Block44f0eee2011-05-26 01:26:41 +01005738 allocated_since_last_gc_ =
5739 heap_->SizeOfObjects() - heap_->alive_after_last_gc_;
Leon Clarkef7060e22010-06-03 12:02:55 +01005740
Steve Block44f0eee2011-05-26 01:26:41 +01005741 if (heap_->last_gc_end_timestamp_ > 0) {
5742 spent_in_mutator_ = Max(start_time_ - heap_->last_gc_end_timestamp_, 0.0);
Leon Clarkef7060e22010-06-03 12:02:55 +01005743 }
Steve Blocka7e24c12009-10-30 11:49:00 +00005744}
5745
5746
5747GCTracer::~GCTracer() {
Steve Blocka7e24c12009-10-30 11:49:00 +00005748 // Printf ONE line iff flag is set.
Leon Clarkef7060e22010-06-03 12:02:55 +01005749 if (!FLAG_trace_gc && !FLAG_print_cumulative_gc_stat) return;
5750
Steve Block44f0eee2011-05-26 01:26:41 +01005751 bool first_gc = (heap_->last_gc_end_timestamp_ == 0);
Leon Clarkef7060e22010-06-03 12:02:55 +01005752
Steve Block44f0eee2011-05-26 01:26:41 +01005753 heap_->alive_after_last_gc_ = heap_->SizeOfObjects();
5754 heap_->last_gc_end_timestamp_ = OS::TimeCurrentMillis();
Leon Clarkef7060e22010-06-03 12:02:55 +01005755
Steve Block44f0eee2011-05-26 01:26:41 +01005756 int time = static_cast<int>(heap_->last_gc_end_timestamp_ - start_time_);
Leon Clarkef7060e22010-06-03 12:02:55 +01005757
5758 // Update cumulative GC statistics if required.
5759 if (FLAG_print_cumulative_gc_stat) {
Steve Block44f0eee2011-05-26 01:26:41 +01005760 heap_->max_gc_pause_ = Max(heap_->max_gc_pause_, time);
5761 heap_->max_alive_after_gc_ = Max(heap_->max_alive_after_gc_,
5762 heap_->alive_after_last_gc_);
Leon Clarkef7060e22010-06-03 12:02:55 +01005763 if (!first_gc) {
Steve Block44f0eee2011-05-26 01:26:41 +01005764 heap_->min_in_mutator_ = Min(heap_->min_in_mutator_,
5765 static_cast<int>(spent_in_mutator_));
Leon Clarkef7060e22010-06-03 12:02:55 +01005766 }
5767 }
5768
5769 if (!FLAG_trace_gc_nvp) {
5770 int external_time = static_cast<int>(scopes_[Scope::EXTERNAL]);
5771
5772 PrintF("%s %.1f -> %.1f MB, ",
5773 CollectorString(),
5774 static_cast<double>(start_size_) / MB,
5775 SizeOfHeapObjects());
5776
5777 if (external_time > 0) PrintF("%d / ", external_time);
5778 PrintF("%d ms.\n", time);
5779 } else {
5780 PrintF("pause=%d ", time);
5781 PrintF("mutator=%d ",
5782 static_cast<int>(spent_in_mutator_));
5783
5784 PrintF("gc=");
5785 switch (collector_) {
5786 case SCAVENGER:
5787 PrintF("s");
5788 break;
5789 case MARK_COMPACTOR:
Steve Block44f0eee2011-05-26 01:26:41 +01005790 PrintF("%s",
5791 heap_->mark_compact_collector_.HasCompacted() ? "mc" : "ms");
Leon Clarkef7060e22010-06-03 12:02:55 +01005792 break;
5793 default:
5794 UNREACHABLE();
5795 }
5796 PrintF(" ");
5797
5798 PrintF("external=%d ", static_cast<int>(scopes_[Scope::EXTERNAL]));
5799 PrintF("mark=%d ", static_cast<int>(scopes_[Scope::MC_MARK]));
5800 PrintF("sweep=%d ", static_cast<int>(scopes_[Scope::MC_SWEEP]));
Iain Merrick75681382010-08-19 15:07:18 +01005801 PrintF("sweepns=%d ", static_cast<int>(scopes_[Scope::MC_SWEEP_NEWSPACE]));
Leon Clarkef7060e22010-06-03 12:02:55 +01005802 PrintF("compact=%d ", static_cast<int>(scopes_[Scope::MC_COMPACT]));
5803
Ben Murdochf87a2032010-10-22 12:50:53 +01005804 PrintF("total_size_before=%" V8_PTR_PREFIX "d ", start_size_);
Steve Block44f0eee2011-05-26 01:26:41 +01005805 PrintF("total_size_after=%" V8_PTR_PREFIX "d ", heap_->SizeOfObjects());
Ben Murdochf87a2032010-10-22 12:50:53 +01005806 PrintF("holes_size_before=%" V8_PTR_PREFIX "d ",
5807 in_free_list_or_wasted_before_gc_);
5808 PrintF("holes_size_after=%" V8_PTR_PREFIX "d ", CountTotalHolesSize());
Leon Clarkef7060e22010-06-03 12:02:55 +01005809
Ben Murdochf87a2032010-10-22 12:50:53 +01005810 PrintF("allocated=%" V8_PTR_PREFIX "d ", allocated_since_last_gc_);
5811 PrintF("promoted=%" V8_PTR_PREFIX "d ", promoted_objects_size_);
Leon Clarkef7060e22010-06-03 12:02:55 +01005812
5813 PrintF("\n");
5814 }
Steve Blocka7e24c12009-10-30 11:49:00 +00005815
5816#if defined(ENABLE_LOGGING_AND_PROFILING)
Steve Block44f0eee2011-05-26 01:26:41 +01005817 heap_->PrintShortHeapStatistics();
Steve Blocka7e24c12009-10-30 11:49:00 +00005818#endif
5819}
5820
5821
5822const char* GCTracer::CollectorString() {
5823 switch (collector_) {
5824 case SCAVENGER:
5825 return "Scavenge";
5826 case MARK_COMPACTOR:
Steve Block44f0eee2011-05-26 01:26:41 +01005827 return heap_->mark_compact_collector_.HasCompacted() ? "Mark-compact"
5828 : "Mark-sweep";
Steve Blocka7e24c12009-10-30 11:49:00 +00005829 }
5830 return "Unknown GC";
5831}
5832
5833
5834int KeyedLookupCache::Hash(Map* map, String* name) {
5835 // Uses only lower 32 bits if pointers are larger.
5836 uintptr_t addr_hash =
Leon Clarkee46be812010-01-19 14:06:41 +00005837 static_cast<uint32_t>(reinterpret_cast<uintptr_t>(map)) >> kMapHashShift;
Andrei Popescu402d9372010-02-26 13:31:12 +00005838 return static_cast<uint32_t>((addr_hash ^ name->Hash()) & kCapacityMask);
Steve Blocka7e24c12009-10-30 11:49:00 +00005839}
5840
5841
5842int KeyedLookupCache::Lookup(Map* map, String* name) {
5843 int index = Hash(map, name);
5844 Key& key = keys_[index];
5845 if ((key.map == map) && key.name->Equals(name)) {
5846 return field_offsets_[index];
5847 }
Steve Block44f0eee2011-05-26 01:26:41 +01005848 return kNotFound;
Steve Blocka7e24c12009-10-30 11:49:00 +00005849}
5850
5851
5852void KeyedLookupCache::Update(Map* map, String* name, int field_offset) {
5853 String* symbol;
Steve Block44f0eee2011-05-26 01:26:41 +01005854 if (HEAP->LookupSymbolIfExists(name, &symbol)) {
Steve Blocka7e24c12009-10-30 11:49:00 +00005855 int index = Hash(map, symbol);
5856 Key& key = keys_[index];
5857 key.map = map;
5858 key.name = symbol;
5859 field_offsets_[index] = field_offset;
5860 }
5861}
5862
5863
5864void KeyedLookupCache::Clear() {
5865 for (int index = 0; index < kLength; index++) keys_[index].map = NULL;
5866}
5867
5868
Steve Blocka7e24c12009-10-30 11:49:00 +00005869void DescriptorLookupCache::Clear() {
5870 for (int index = 0; index < kLength; index++) keys_[index].array = NULL;
5871}
5872
5873
Steve Blocka7e24c12009-10-30 11:49:00 +00005874#ifdef DEBUG
Ben Murdochf87a2032010-10-22 12:50:53 +01005875void Heap::GarbageCollectionGreedyCheck() {
Steve Blocka7e24c12009-10-30 11:49:00 +00005876 ASSERT(FLAG_gc_greedy);
Steve Block44f0eee2011-05-26 01:26:41 +01005877 if (isolate_->bootstrapper()->IsActive()) return;
Ben Murdochf87a2032010-10-22 12:50:53 +01005878 if (disallow_allocation_failure()) return;
5879 CollectGarbage(NEW_SPACE);
Steve Blocka7e24c12009-10-30 11:49:00 +00005880}
5881#endif
5882
5883
Steve Block44f0eee2011-05-26 01:26:41 +01005884TranscendentalCache::SubCache::SubCache(Type t)
5885 : type_(t),
5886 isolate_(Isolate::Current()) {
Steve Blocka7e24c12009-10-30 11:49:00 +00005887 uint32_t in0 = 0xffffffffu; // Bit-pattern for a NaN that isn't
5888 uint32_t in1 = 0xffffffffu; // generated by the FPU.
5889 for (int i = 0; i < kCacheSize; i++) {
5890 elements_[i].in[0] = in0;
5891 elements_[i].in[1] = in1;
5892 elements_[i].output = NULL;
5893 }
5894}
5895
5896
Steve Blocka7e24c12009-10-30 11:49:00 +00005897void TranscendentalCache::Clear() {
5898 for (int i = 0; i < kNumberOfCaches; i++) {
5899 if (caches_[i] != NULL) {
5900 delete caches_[i];
5901 caches_[i] = NULL;
5902 }
5903 }
5904}
5905
5906
Leon Clarkee46be812010-01-19 14:06:41 +00005907void ExternalStringTable::CleanUp() {
5908 int last = 0;
5909 for (int i = 0; i < new_space_strings_.length(); ++i) {
Steve Block44f0eee2011-05-26 01:26:41 +01005910 if (new_space_strings_[i] == heap_->raw_unchecked_null_value()) continue;
5911 if (heap_->InNewSpace(new_space_strings_[i])) {
Leon Clarkee46be812010-01-19 14:06:41 +00005912 new_space_strings_[last++] = new_space_strings_[i];
5913 } else {
5914 old_space_strings_.Add(new_space_strings_[i]);
5915 }
5916 }
5917 new_space_strings_.Rewind(last);
5918 last = 0;
5919 for (int i = 0; i < old_space_strings_.length(); ++i) {
Steve Block44f0eee2011-05-26 01:26:41 +01005920 if (old_space_strings_[i] == heap_->raw_unchecked_null_value()) continue;
5921 ASSERT(!heap_->InNewSpace(old_space_strings_[i]));
Leon Clarkee46be812010-01-19 14:06:41 +00005922 old_space_strings_[last++] = old_space_strings_[i];
5923 }
5924 old_space_strings_.Rewind(last);
5925 Verify();
5926}
5927
5928
5929void ExternalStringTable::TearDown() {
5930 new_space_strings_.Free();
5931 old_space_strings_.Free();
5932}
5933
5934
Steve Blocka7e24c12009-10-30 11:49:00 +00005935} } // namespace v8::internal