blob: 93bdc039180a08f3e2dd8f5a4b4c42880ab4088b [file] [log] [blame]
Ben Murdoch8b112d22011-06-08 16:22:53 +01001// Copyright 2011 the V8 project authors. All rights reserved.
Steve Blocka7e24c12009-10-30 11:49:00 +00002// Redistribution and use in source and binary forms, with or without
3// modification, are permitted provided that the following conditions are
4// met:
5//
6// * Redistributions of source code must retain the above copyright
7// notice, this list of conditions and the following disclaimer.
8// * Redistributions in binary form must reproduce the above
9// copyright notice, this list of conditions and the following
10// disclaimer in the documentation and/or other materials provided
11// with the distribution.
12// * Neither the name of Google Inc. nor the names of its
13// contributors may be used to endorse or promote products derived
14// from this software without specific prior written permission.
15//
16// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
28#include "v8.h"
29
30#include "accessors.h"
31#include "api.h"
32#include "bootstrapper.h"
Ben Murdoch8b112d22011-06-08 16:22:53 +010033#include "codegen.h"
Steve Blocka7e24c12009-10-30 11:49:00 +000034#include "compilation-cache.h"
35#include "debug.h"
36#include "heap-profiler.h"
37#include "global-handles.h"
Steve Block1e0659c2011-05-24 12:43:12 +010038#include "liveobjectlist-inl.h"
Steve Blocka7e24c12009-10-30 11:49:00 +000039#include "mark-compact.h"
40#include "natives.h"
Iain Merrick75681382010-08-19 15:07:18 +010041#include "objects-visiting.h"
Ben Murdochb0fe1622011-05-05 13:52:32 +010042#include "runtime-profiler.h"
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -080043#include "scanner-base.h"
Steve Blocka7e24c12009-10-30 11:49:00 +000044#include "scopeinfo.h"
Steve Block3ce2e202009-11-05 08:53:23 +000045#include "snapshot.h"
Steve Blocka7e24c12009-10-30 11:49:00 +000046#include "v8threads.h"
Ben Murdochb0fe1622011-05-05 13:52:32 +010047#include "vm-state-inl.h"
Steve Block6ded16b2010-05-10 14:33:55 +010048#if V8_TARGET_ARCH_ARM && !V8_INTERPRETED_REGEXP
Steve Blocka7e24c12009-10-30 11:49:00 +000049#include "regexp-macro-assembler.h"
Steve Blockd0582a62009-12-15 09:54:21 +000050#include "arm/regexp-macro-assembler-arm.h"
Steve Blocka7e24c12009-10-30 11:49:00 +000051#endif
Steve Block44f0eee2011-05-26 01:26:41 +010052#if V8_TARGET_ARCH_MIPS && !V8_INTERPRETED_REGEXP
53#include "regexp-macro-assembler.h"
54#include "mips/regexp-macro-assembler-mips.h"
55#endif
Steve Block6ded16b2010-05-10 14:33:55 +010056
Steve Blocka7e24c12009-10-30 11:49:00 +000057namespace v8 {
58namespace internal {
59
60
John Reck59135872010-11-02 12:39:01 -070061static const intptr_t kMinimumPromotionLimit = 2 * MB;
62static const intptr_t kMinimumAllocationLimit = 8 * MB;
63
Steve Blocka7e24c12009-10-30 11:49:00 +000064
Steve Block44f0eee2011-05-26 01:26:41 +010065static Mutex* gc_initializer_mutex = OS::CreateMutex();
Steve Blocka7e24c12009-10-30 11:49:00 +000066
Steve Blocka7e24c12009-10-30 11:49:00 +000067
Steve Block44f0eee2011-05-26 01:26:41 +010068Heap::Heap()
69 : isolate_(NULL),
Steve Blocka7e24c12009-10-30 11:49:00 +000070// semispace_size_ should be a power of 2 and old_generation_size_ should be
71// a multiple of Page::kPageSize.
72#if defined(ANDROID)
Steve Block44f0eee2011-05-26 01:26:41 +010073 reserved_semispace_size_(2*MB),
74 max_semispace_size_(2*MB),
75 initial_semispace_size_(128*KB),
76 max_old_generation_size_(192*MB),
77 max_executable_size_(max_old_generation_size_),
78 code_range_size_(0),
Steve Blocka7e24c12009-10-30 11:49:00 +000079#elif defined(V8_TARGET_ARCH_X64)
Steve Block44f0eee2011-05-26 01:26:41 +010080 reserved_semispace_size_(16*MB),
81 max_semispace_size_(16*MB),
82 initial_semispace_size_(1*MB),
83 max_old_generation_size_(1*GB),
84 max_executable_size_(256*MB),
85 code_range_size_(512*MB),
Steve Blocka7e24c12009-10-30 11:49:00 +000086#else
Steve Block44f0eee2011-05-26 01:26:41 +010087 reserved_semispace_size_(8*MB),
88 max_semispace_size_(8*MB),
89 initial_semispace_size_(512*KB),
90 max_old_generation_size_(512*MB),
91 max_executable_size_(128*MB),
92 code_range_size_(0),
Steve Blocka7e24c12009-10-30 11:49:00 +000093#endif
Steve Blocka7e24c12009-10-30 11:49:00 +000094// Variables set based on semispace_size_ and old_generation_size_ in
Steve Block44f0eee2011-05-26 01:26:41 +010095// ConfigureHeap (survived_since_last_expansion_, external_allocation_limit_)
Steve Block3ce2e202009-11-05 08:53:23 +000096// Will be 4 * reserved_semispace_size_ to ensure that young
97// generation can be aligned to its size.
Steve Block44f0eee2011-05-26 01:26:41 +010098 survived_since_last_expansion_(0),
99 always_allocate_scope_depth_(0),
100 linear_allocation_scope_depth_(0),
101 contexts_disposed_(0),
102 new_space_(this),
103 old_pointer_space_(NULL),
104 old_data_space_(NULL),
105 code_space_(NULL),
106 map_space_(NULL),
107 cell_space_(NULL),
108 lo_space_(NULL),
109 gc_state_(NOT_IN_GC),
110 mc_count_(0),
111 ms_count_(0),
112 gc_count_(0),
113 unflattened_strings_length_(0),
Steve Blocka7e24c12009-10-30 11:49:00 +0000114#ifdef DEBUG
Steve Block44f0eee2011-05-26 01:26:41 +0100115 allocation_allowed_(true),
116 allocation_timeout_(0),
117 disallow_allocation_failure_(false),
118 debug_utils_(NULL),
Steve Blocka7e24c12009-10-30 11:49:00 +0000119#endif // DEBUG
Steve Block44f0eee2011-05-26 01:26:41 +0100120 old_gen_promotion_limit_(kMinimumPromotionLimit),
121 old_gen_allocation_limit_(kMinimumAllocationLimit),
122 external_allocation_limit_(0),
123 amount_of_external_allocated_memory_(0),
124 amount_of_external_allocated_memory_at_last_global_gc_(0),
125 old_gen_exhausted_(false),
126 hidden_symbol_(NULL),
127 global_gc_prologue_callback_(NULL),
128 global_gc_epilogue_callback_(NULL),
129 gc_safe_size_of_old_object_(NULL),
130 tracer_(NULL),
131 young_survivors_after_last_gc_(0),
132 high_survival_rate_period_length_(0),
133 survival_rate_(0),
134 previous_survival_rate_trend_(Heap::STABLE),
135 survival_rate_trend_(Heap::STABLE),
136 max_gc_pause_(0),
137 max_alive_after_gc_(0),
138 min_in_mutator_(kMaxInt),
139 alive_after_last_gc_(0),
140 last_gc_end_timestamp_(0.0),
141 page_watermark_invalidated_mark_(1 << Page::WATERMARK_INVALIDATED),
142 number_idle_notifications_(0),
143 last_idle_notification_gc_count_(0),
144 last_idle_notification_gc_count_init_(false),
145 configured_(false),
146 is_safe_to_read_maps_(true) {
147 // Allow build-time customization of the max semispace size. Building
148 // V8 with snapshots and a non-default max semispace size is much
149 // easier if you can define it as part of the build environment.
150#if defined(V8_MAX_SEMISPACE_SIZE)
151 max_semispace_size_ = reserved_semispace_size_ = V8_MAX_SEMISPACE_SIZE;
152#endif
Steve Blocka7e24c12009-10-30 11:49:00 +0000153
Steve Block44f0eee2011-05-26 01:26:41 +0100154 memset(roots_, 0, sizeof(roots_[0]) * kRootListLength);
155 global_contexts_list_ = NULL;
156 mark_compact_collector_.heap_ = this;
157 external_string_table_.heap_ = this;
158}
159
Steve Blocka7e24c12009-10-30 11:49:00 +0000160
Ben Murdochf87a2032010-10-22 12:50:53 +0100161intptr_t Heap::Capacity() {
Steve Blocka7e24c12009-10-30 11:49:00 +0000162 if (!HasBeenSetup()) return 0;
163
164 return new_space_.Capacity() +
165 old_pointer_space_->Capacity() +
166 old_data_space_->Capacity() +
167 code_space_->Capacity() +
168 map_space_->Capacity() +
169 cell_space_->Capacity();
170}
171
172
Ben Murdochf87a2032010-10-22 12:50:53 +0100173intptr_t Heap::CommittedMemory() {
Steve Block3ce2e202009-11-05 08:53:23 +0000174 if (!HasBeenSetup()) return 0;
175
176 return new_space_.CommittedMemory() +
177 old_pointer_space_->CommittedMemory() +
178 old_data_space_->CommittedMemory() +
179 code_space_->CommittedMemory() +
180 map_space_->CommittedMemory() +
181 cell_space_->CommittedMemory() +
182 lo_space_->Size();
183}
184
Russell Brenner90bac252010-11-18 13:33:46 -0800185intptr_t Heap::CommittedMemoryExecutable() {
186 if (!HasBeenSetup()) return 0;
187
Steve Block44f0eee2011-05-26 01:26:41 +0100188 return isolate()->memory_allocator()->SizeExecutable();
Russell Brenner90bac252010-11-18 13:33:46 -0800189}
190
Steve Block3ce2e202009-11-05 08:53:23 +0000191
Ben Murdochf87a2032010-10-22 12:50:53 +0100192intptr_t Heap::Available() {
Steve Blocka7e24c12009-10-30 11:49:00 +0000193 if (!HasBeenSetup()) return 0;
194
195 return new_space_.Available() +
196 old_pointer_space_->Available() +
197 old_data_space_->Available() +
198 code_space_->Available() +
199 map_space_->Available() +
200 cell_space_->Available();
201}
202
203
204bool Heap::HasBeenSetup() {
205 return old_pointer_space_ != NULL &&
206 old_data_space_ != NULL &&
207 code_space_ != NULL &&
208 map_space_ != NULL &&
209 cell_space_ != NULL &&
210 lo_space_ != NULL;
211}
212
213
Kristian Monsen80d68ea2010-09-08 11:05:35 +0100214int Heap::GcSafeSizeOfOldObject(HeapObject* object) {
Steve Block44f0eee2011-05-26 01:26:41 +0100215 ASSERT(!HEAP->InNewSpace(object)); // Code only works for old objects.
216 ASSERT(!HEAP->mark_compact_collector()->are_map_pointers_encoded());
Kristian Monsen80d68ea2010-09-08 11:05:35 +0100217 MapWord map_word = object->map_word();
218 map_word.ClearMark();
219 map_word.ClearOverflow();
220 return object->SizeFromMap(map_word.ToMap());
221}
222
223
224int Heap::GcSafeSizeOfOldObjectWithEncodedMap(HeapObject* object) {
Steve Block44f0eee2011-05-26 01:26:41 +0100225 ASSERT(!HEAP->InNewSpace(object)); // Code only works for old objects.
226 ASSERT(HEAP->mark_compact_collector()->are_map_pointers_encoded());
Kristian Monsen80d68ea2010-09-08 11:05:35 +0100227 uint32_t marker = Memory::uint32_at(object->address());
228 if (marker == MarkCompactCollector::kSingleFreeEncoding) {
229 return kIntSize;
230 } else if (marker == MarkCompactCollector::kMultiFreeEncoding) {
231 return Memory::int_at(object->address() + kIntSize);
232 } else {
233 MapWord map_word = object->map_word();
Steve Block44f0eee2011-05-26 01:26:41 +0100234 Address map_address = map_word.DecodeMapAddress(HEAP->map_space());
Kristian Monsen80d68ea2010-09-08 11:05:35 +0100235 Map* map = reinterpret_cast<Map*>(HeapObject::FromAddress(map_address));
236 return object->SizeFromMap(map);
237 }
238}
239
240
Steve Blocka7e24c12009-10-30 11:49:00 +0000241GarbageCollector Heap::SelectGarbageCollector(AllocationSpace space) {
242 // Is global GC requested?
243 if (space != NEW_SPACE || FLAG_gc_global) {
Steve Block44f0eee2011-05-26 01:26:41 +0100244 isolate_->counters()->gc_compactor_caused_by_request()->Increment();
Steve Blocka7e24c12009-10-30 11:49:00 +0000245 return MARK_COMPACTOR;
246 }
247
248 // Is enough data promoted to justify a global GC?
249 if (OldGenerationPromotionLimitReached()) {
Steve Block44f0eee2011-05-26 01:26:41 +0100250 isolate_->counters()->gc_compactor_caused_by_promoted_data()->Increment();
Steve Blocka7e24c12009-10-30 11:49:00 +0000251 return MARK_COMPACTOR;
252 }
253
254 // Have allocation in OLD and LO failed?
255 if (old_gen_exhausted_) {
Steve Block44f0eee2011-05-26 01:26:41 +0100256 isolate_->counters()->
257 gc_compactor_caused_by_oldspace_exhaustion()->Increment();
Steve Blocka7e24c12009-10-30 11:49:00 +0000258 return MARK_COMPACTOR;
259 }
260
261 // Is there enough space left in OLD to guarantee that a scavenge can
262 // succeed?
263 //
264 // Note that MemoryAllocator->MaxAvailable() undercounts the memory available
265 // for object promotion. It counts only the bytes that the memory
266 // allocator has not yet allocated from the OS and assigned to any space,
267 // and does not count available bytes already in the old space or code
268 // space. Undercounting is safe---we may get an unrequested full GC when
269 // a scavenge would have succeeded.
Steve Block44f0eee2011-05-26 01:26:41 +0100270 if (isolate_->memory_allocator()->MaxAvailable() <= new_space_.Size()) {
271 isolate_->counters()->
272 gc_compactor_caused_by_oldspace_exhaustion()->Increment();
Steve Blocka7e24c12009-10-30 11:49:00 +0000273 return MARK_COMPACTOR;
274 }
275
276 // Default
277 return SCAVENGER;
278}
279
280
281// TODO(1238405): Combine the infrastructure for --heap-stats and
282// --log-gc to avoid the complicated preprocessor and flag testing.
283#if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
284void Heap::ReportStatisticsBeforeGC() {
285 // Heap::ReportHeapStatistics will also log NewSpace statistics when
286 // compiled with ENABLE_LOGGING_AND_PROFILING and --log-gc is set. The
287 // following logic is used to avoid double logging.
288#if defined(DEBUG) && defined(ENABLE_LOGGING_AND_PROFILING)
289 if (FLAG_heap_stats || FLAG_log_gc) new_space_.CollectStatistics();
290 if (FLAG_heap_stats) {
291 ReportHeapStatistics("Before GC");
292 } else if (FLAG_log_gc) {
293 new_space_.ReportStatistics();
294 }
295 if (FLAG_heap_stats || FLAG_log_gc) new_space_.ClearHistograms();
296#elif defined(DEBUG)
297 if (FLAG_heap_stats) {
298 new_space_.CollectStatistics();
299 ReportHeapStatistics("Before GC");
300 new_space_.ClearHistograms();
301 }
302#elif defined(ENABLE_LOGGING_AND_PROFILING)
303 if (FLAG_log_gc) {
304 new_space_.CollectStatistics();
305 new_space_.ReportStatistics();
306 new_space_.ClearHistograms();
307 }
308#endif
309}
310
311
312#if defined(ENABLE_LOGGING_AND_PROFILING)
313void Heap::PrintShortHeapStatistics() {
314 if (!FLAG_trace_gc_verbose) return;
Ben Murdochf87a2032010-10-22 12:50:53 +0100315 PrintF("Memory allocator, used: %8" V8_PTR_PREFIX "d"
316 ", available: %8" V8_PTR_PREFIX "d\n",
Steve Block44f0eee2011-05-26 01:26:41 +0100317 isolate_->memory_allocator()->Size(),
318 isolate_->memory_allocator()->Available());
Ben Murdochf87a2032010-10-22 12:50:53 +0100319 PrintF("New space, used: %8" V8_PTR_PREFIX "d"
320 ", available: %8" V8_PTR_PREFIX "d\n",
Steve Block3ce2e202009-11-05 08:53:23 +0000321 Heap::new_space_.Size(),
322 new_space_.Available());
Ben Murdochf87a2032010-10-22 12:50:53 +0100323 PrintF("Old pointers, used: %8" V8_PTR_PREFIX "d"
324 ", available: %8" V8_PTR_PREFIX "d"
325 ", waste: %8" V8_PTR_PREFIX "d\n",
Steve Block3ce2e202009-11-05 08:53:23 +0000326 old_pointer_space_->Size(),
327 old_pointer_space_->Available(),
328 old_pointer_space_->Waste());
Ben Murdochf87a2032010-10-22 12:50:53 +0100329 PrintF("Old data space, used: %8" V8_PTR_PREFIX "d"
330 ", available: %8" V8_PTR_PREFIX "d"
331 ", waste: %8" V8_PTR_PREFIX "d\n",
Steve Block3ce2e202009-11-05 08:53:23 +0000332 old_data_space_->Size(),
333 old_data_space_->Available(),
334 old_data_space_->Waste());
Ben Murdochf87a2032010-10-22 12:50:53 +0100335 PrintF("Code space, used: %8" V8_PTR_PREFIX "d"
336 ", available: %8" V8_PTR_PREFIX "d"
337 ", waste: %8" V8_PTR_PREFIX "d\n",
Steve Block3ce2e202009-11-05 08:53:23 +0000338 code_space_->Size(),
339 code_space_->Available(),
340 code_space_->Waste());
Ben Murdochf87a2032010-10-22 12:50:53 +0100341 PrintF("Map space, used: %8" V8_PTR_PREFIX "d"
342 ", available: %8" V8_PTR_PREFIX "d"
343 ", waste: %8" V8_PTR_PREFIX "d\n",
Steve Block3ce2e202009-11-05 08:53:23 +0000344 map_space_->Size(),
345 map_space_->Available(),
346 map_space_->Waste());
Ben Murdochf87a2032010-10-22 12:50:53 +0100347 PrintF("Cell space, used: %8" V8_PTR_PREFIX "d"
348 ", available: %8" V8_PTR_PREFIX "d"
349 ", waste: %8" V8_PTR_PREFIX "d\n",
Steve Block3ce2e202009-11-05 08:53:23 +0000350 cell_space_->Size(),
351 cell_space_->Available(),
352 cell_space_->Waste());
Ben Murdochf87a2032010-10-22 12:50:53 +0100353 PrintF("Large object space, used: %8" V8_PTR_PREFIX "d"
354 ", available: %8" V8_PTR_PREFIX "d\n",
Steve Block3ce2e202009-11-05 08:53:23 +0000355 lo_space_->Size(),
356 lo_space_->Available());
Steve Blocka7e24c12009-10-30 11:49:00 +0000357}
358#endif
359
360
361// TODO(1238405): Combine the infrastructure for --heap-stats and
362// --log-gc to avoid the complicated preprocessor and flag testing.
363void Heap::ReportStatisticsAfterGC() {
364 // Similar to the before GC, we use some complicated logic to ensure that
365 // NewSpace statistics are logged exactly once when --log-gc is turned on.
366#if defined(DEBUG) && defined(ENABLE_LOGGING_AND_PROFILING)
367 if (FLAG_heap_stats) {
368 new_space_.CollectStatistics();
369 ReportHeapStatistics("After GC");
370 } else if (FLAG_log_gc) {
371 new_space_.ReportStatistics();
372 }
373#elif defined(DEBUG)
374 if (FLAG_heap_stats) ReportHeapStatistics("After GC");
375#elif defined(ENABLE_LOGGING_AND_PROFILING)
376 if (FLAG_log_gc) new_space_.ReportStatistics();
377#endif
378}
379#endif // defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
380
381
382void Heap::GarbageCollectionPrologue() {
Steve Block44f0eee2011-05-26 01:26:41 +0100383 isolate_->transcendental_cache()->Clear();
Steve Block6ded16b2010-05-10 14:33:55 +0100384 ClearJSFunctionResultCaches();
Steve Blocka7e24c12009-10-30 11:49:00 +0000385 gc_count_++;
Steve Block6ded16b2010-05-10 14:33:55 +0100386 unflattened_strings_length_ = 0;
Steve Blocka7e24c12009-10-30 11:49:00 +0000387#ifdef DEBUG
388 ASSERT(allocation_allowed_ && gc_state_ == NOT_IN_GC);
389 allow_allocation(false);
390
391 if (FLAG_verify_heap) {
392 Verify();
393 }
394
395 if (FLAG_gc_verbose) Print();
Steve Blocka7e24c12009-10-30 11:49:00 +0000396#endif
397
398#if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
399 ReportStatisticsBeforeGC();
400#endif
Steve Block1e0659c2011-05-24 12:43:12 +0100401
402 LiveObjectList::GCPrologue();
Steve Blocka7e24c12009-10-30 11:49:00 +0000403}
404
Ben Murdochf87a2032010-10-22 12:50:53 +0100405intptr_t Heap::SizeOfObjects() {
406 intptr_t total = 0;
Steve Blocka7e24c12009-10-30 11:49:00 +0000407 AllSpaces spaces;
Leon Clarked91b9f72010-01-27 17:25:45 +0000408 for (Space* space = spaces.next(); space != NULL; space = spaces.next()) {
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -0800409 total += space->SizeOfObjects();
Steve Blocka7e24c12009-10-30 11:49:00 +0000410 }
411 return total;
412}
413
414void Heap::GarbageCollectionEpilogue() {
Steve Block1e0659c2011-05-24 12:43:12 +0100415 LiveObjectList::GCEpilogue();
Steve Blocka7e24c12009-10-30 11:49:00 +0000416#ifdef DEBUG
417 allow_allocation(true);
418 ZapFromSpace();
419
420 if (FLAG_verify_heap) {
421 Verify();
422 }
423
Steve Block44f0eee2011-05-26 01:26:41 +0100424 if (FLAG_print_global_handles) isolate_->global_handles()->Print();
Steve Blocka7e24c12009-10-30 11:49:00 +0000425 if (FLAG_print_handles) PrintHandles();
426 if (FLAG_gc_verbose) Print();
427 if (FLAG_code_stats) ReportCodeStatistics("After GC");
428#endif
429
Steve Block44f0eee2011-05-26 01:26:41 +0100430 isolate_->counters()->alive_after_last_gc()->Set(
431 static_cast<int>(SizeOfObjects()));
Steve Blocka7e24c12009-10-30 11:49:00 +0000432
Steve Block44f0eee2011-05-26 01:26:41 +0100433 isolate_->counters()->symbol_table_capacity()->Set(
434 symbol_table()->Capacity());
435 isolate_->counters()->number_of_symbols()->Set(
436 symbol_table()->NumberOfElements());
Steve Blocka7e24c12009-10-30 11:49:00 +0000437#if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
438 ReportStatisticsAfterGC();
439#endif
440#ifdef ENABLE_DEBUGGER_SUPPORT
Steve Block44f0eee2011-05-26 01:26:41 +0100441 isolate_->debug()->AfterGarbageCollection();
Steve Blocka7e24c12009-10-30 11:49:00 +0000442#endif
443}
444
445
John Reck59135872010-11-02 12:39:01 -0700446void Heap::CollectAllGarbage(bool force_compaction) {
Steve Blocka7e24c12009-10-30 11:49:00 +0000447 // Since we are ignoring the return value, the exact choice of space does
448 // not matter, so long as we do not specify NEW_SPACE, which would not
449 // cause a full GC.
Steve Block44f0eee2011-05-26 01:26:41 +0100450 mark_compact_collector_.SetForceCompaction(force_compaction);
John Reck59135872010-11-02 12:39:01 -0700451 CollectGarbage(OLD_POINTER_SPACE);
Steve Block44f0eee2011-05-26 01:26:41 +0100452 mark_compact_collector_.SetForceCompaction(false);
Steve Blocka7e24c12009-10-30 11:49:00 +0000453}
454
455
Teng-Hui Zhu3e5fa292010-11-09 16:16:48 -0800456void Heap::CollectAllAvailableGarbage() {
457 // Since we are ignoring the return value, the exact choice of space does
458 // not matter, so long as we do not specify NEW_SPACE, which would not
459 // cause a full GC.
Steve Block44f0eee2011-05-26 01:26:41 +0100460 mark_compact_collector()->SetForceCompaction(true);
Teng-Hui Zhu3e5fa292010-11-09 16:16:48 -0800461
462 // Major GC would invoke weak handle callbacks on weakly reachable
463 // handles, but won't collect weakly reachable objects until next
464 // major GC. Therefore if we collect aggressively and weak handle callback
465 // has been invoked, we rerun major GC to release objects which become
466 // garbage.
467 // Note: as weak callbacks can execute arbitrary code, we cannot
468 // hope that eventually there will be no weak callbacks invocations.
469 // Therefore stop recollecting after several attempts.
470 const int kMaxNumberOfAttempts = 7;
471 for (int attempt = 0; attempt < kMaxNumberOfAttempts; attempt++) {
472 if (!CollectGarbage(OLD_POINTER_SPACE, MARK_COMPACTOR)) {
473 break;
474 }
475 }
Steve Block44f0eee2011-05-26 01:26:41 +0100476 mark_compact_collector()->SetForceCompaction(false);
Teng-Hui Zhu3e5fa292010-11-09 16:16:48 -0800477}
478
479
480bool Heap::CollectGarbage(AllocationSpace space, GarbageCollector collector) {
Steve Blocka7e24c12009-10-30 11:49:00 +0000481 // The VM is in the GC state until exiting this function.
Steve Block44f0eee2011-05-26 01:26:41 +0100482 VMState state(isolate_, GC);
Steve Blocka7e24c12009-10-30 11:49:00 +0000483
484#ifdef DEBUG
485 // Reset the allocation timeout to the GC interval, but make sure to
486 // allow at least a few allocations after a collection. The reason
487 // for this is that we have a lot of allocation sequences and we
488 // assume that a garbage collection will allow the subsequent
489 // allocation attempts to go through.
490 allocation_timeout_ = Max(6, FLAG_gc_interval);
491#endif
492
Teng-Hui Zhu3e5fa292010-11-09 16:16:48 -0800493 bool next_gc_likely_to_collect_more = false;
494
Steve Block44f0eee2011-05-26 01:26:41 +0100495 { GCTracer tracer(this);
Steve Blocka7e24c12009-10-30 11:49:00 +0000496 GarbageCollectionPrologue();
497 // The GC count was incremented in the prologue. Tell the tracer about
498 // it.
499 tracer.set_gc_count(gc_count_);
500
Steve Blocka7e24c12009-10-30 11:49:00 +0000501 // Tell the tracer which collector we've selected.
502 tracer.set_collector(collector);
503
504 HistogramTimer* rate = (collector == SCAVENGER)
Steve Block44f0eee2011-05-26 01:26:41 +0100505 ? isolate_->counters()->gc_scavenger()
506 : isolate_->counters()->gc_compactor();
Steve Blocka7e24c12009-10-30 11:49:00 +0000507 rate->Start();
Teng-Hui Zhu3e5fa292010-11-09 16:16:48 -0800508 next_gc_likely_to_collect_more =
509 PerformGarbageCollection(collector, &tracer);
Steve Blocka7e24c12009-10-30 11:49:00 +0000510 rate->Stop();
511
512 GarbageCollectionEpilogue();
513 }
514
515
516#ifdef ENABLE_LOGGING_AND_PROFILING
517 if (FLAG_log_gc) HeapProfiler::WriteSample();
518#endif
Teng-Hui Zhu3e5fa292010-11-09 16:16:48 -0800519
520 return next_gc_likely_to_collect_more;
Steve Blocka7e24c12009-10-30 11:49:00 +0000521}
522
523
524void Heap::PerformScavenge() {
Steve Block44f0eee2011-05-26 01:26:41 +0100525 GCTracer tracer(this);
John Reck59135872010-11-02 12:39:01 -0700526 PerformGarbageCollection(SCAVENGER, &tracer);
Steve Blocka7e24c12009-10-30 11:49:00 +0000527}
528
529
530#ifdef DEBUG
531// Helper class for verifying the symbol table.
532class SymbolTableVerifier : public ObjectVisitor {
533 public:
Steve Blocka7e24c12009-10-30 11:49:00 +0000534 void VisitPointers(Object** start, Object** end) {
535 // Visit all HeapObject pointers in [start, end).
536 for (Object** p = start; p < end; p++) {
537 if ((*p)->IsHeapObject()) {
538 // Check that the symbol is actually a symbol.
539 ASSERT((*p)->IsNull() || (*p)->IsUndefined() || (*p)->IsSymbol());
540 }
541 }
542 }
543};
544#endif // DEBUG
545
546
547static void VerifySymbolTable() {
548#ifdef DEBUG
549 SymbolTableVerifier verifier;
Steve Block44f0eee2011-05-26 01:26:41 +0100550 HEAP->symbol_table()->IterateElements(&verifier);
Steve Blocka7e24c12009-10-30 11:49:00 +0000551#endif // DEBUG
552}
553
554
Leon Clarkee46be812010-01-19 14:06:41 +0000555void Heap::ReserveSpace(
556 int new_space_size,
557 int pointer_space_size,
558 int data_space_size,
559 int code_space_size,
560 int map_space_size,
561 int cell_space_size,
562 int large_object_size) {
563 NewSpace* new_space = Heap::new_space();
564 PagedSpace* old_pointer_space = Heap::old_pointer_space();
565 PagedSpace* old_data_space = Heap::old_data_space();
566 PagedSpace* code_space = Heap::code_space();
567 PagedSpace* map_space = Heap::map_space();
568 PagedSpace* cell_space = Heap::cell_space();
569 LargeObjectSpace* lo_space = Heap::lo_space();
570 bool gc_performed = true;
571 while (gc_performed) {
572 gc_performed = false;
573 if (!new_space->ReserveSpace(new_space_size)) {
Ben Murdochf87a2032010-10-22 12:50:53 +0100574 Heap::CollectGarbage(NEW_SPACE);
Leon Clarkee46be812010-01-19 14:06:41 +0000575 gc_performed = true;
576 }
577 if (!old_pointer_space->ReserveSpace(pointer_space_size)) {
Ben Murdochf87a2032010-10-22 12:50:53 +0100578 Heap::CollectGarbage(OLD_POINTER_SPACE);
Leon Clarkee46be812010-01-19 14:06:41 +0000579 gc_performed = true;
580 }
581 if (!(old_data_space->ReserveSpace(data_space_size))) {
Ben Murdochf87a2032010-10-22 12:50:53 +0100582 Heap::CollectGarbage(OLD_DATA_SPACE);
Leon Clarkee46be812010-01-19 14:06:41 +0000583 gc_performed = true;
584 }
585 if (!(code_space->ReserveSpace(code_space_size))) {
Ben Murdochf87a2032010-10-22 12:50:53 +0100586 Heap::CollectGarbage(CODE_SPACE);
Leon Clarkee46be812010-01-19 14:06:41 +0000587 gc_performed = true;
588 }
589 if (!(map_space->ReserveSpace(map_space_size))) {
Ben Murdochf87a2032010-10-22 12:50:53 +0100590 Heap::CollectGarbage(MAP_SPACE);
Leon Clarkee46be812010-01-19 14:06:41 +0000591 gc_performed = true;
592 }
593 if (!(cell_space->ReserveSpace(cell_space_size))) {
Ben Murdochf87a2032010-10-22 12:50:53 +0100594 Heap::CollectGarbage(CELL_SPACE);
Leon Clarkee46be812010-01-19 14:06:41 +0000595 gc_performed = true;
596 }
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100597 // We add a slack-factor of 2 in order to have space for a series of
598 // large-object allocations that are only just larger than the page size.
Leon Clarkee46be812010-01-19 14:06:41 +0000599 large_object_size *= 2;
600 // The ReserveSpace method on the large object space checks how much
601 // we can expand the old generation. This includes expansion caused by
602 // allocation in the other spaces.
603 large_object_size += cell_space_size + map_space_size + code_space_size +
604 data_space_size + pointer_space_size;
605 if (!(lo_space->ReserveSpace(large_object_size))) {
Ben Murdochf87a2032010-10-22 12:50:53 +0100606 Heap::CollectGarbage(LO_SPACE);
Leon Clarkee46be812010-01-19 14:06:41 +0000607 gc_performed = true;
608 }
609 }
610}
611
612
Steve Blocka7e24c12009-10-30 11:49:00 +0000613void Heap::EnsureFromSpaceIsCommitted() {
614 if (new_space_.CommitFromSpaceIfNeeded()) return;
615
616 // Committing memory to from space failed.
617 // Try shrinking and try again.
Kristian Monsen80d68ea2010-09-08 11:05:35 +0100618 PagedSpaces spaces;
619 for (PagedSpace* space = spaces.next();
620 space != NULL;
621 space = spaces.next()) {
622 space->RelinkPageListInChunkOrder(true);
623 }
624
Steve Blocka7e24c12009-10-30 11:49:00 +0000625 Shrink();
626 if (new_space_.CommitFromSpaceIfNeeded()) return;
627
628 // Committing memory to from space failed again.
629 // Memory is exhausted and we will die.
630 V8::FatalProcessOutOfMemory("Committing semi space failed.");
631}
632
633
Teng-Hui Zhu3e5fa292010-11-09 16:16:48 -0800634void Heap::ClearJSFunctionResultCaches() {
Steve Block44f0eee2011-05-26 01:26:41 +0100635 if (isolate_->bootstrapper()->IsActive()) return;
Steve Block6ded16b2010-05-10 14:33:55 +0100636
Teng-Hui Zhu3e5fa292010-11-09 16:16:48 -0800637 Object* context = global_contexts_list_;
638 while (!context->IsUndefined()) {
639 // Get the caches for this context:
Steve Block6ded16b2010-05-10 14:33:55 +0100640 FixedArray* caches =
Teng-Hui Zhu3e5fa292010-11-09 16:16:48 -0800641 Context::cast(context)->jsfunction_result_caches();
642 // Clear the caches:
Steve Block6ded16b2010-05-10 14:33:55 +0100643 int length = caches->length();
644 for (int i = 0; i < length; i++) {
645 JSFunctionResultCache::cast(caches->get(i))->Clear();
646 }
Teng-Hui Zhu3e5fa292010-11-09 16:16:48 -0800647 // Get the next context:
648 context = Context::cast(context)->get(Context::NEXT_CONTEXT_LINK);
Steve Block6ded16b2010-05-10 14:33:55 +0100649 }
Steve Block6ded16b2010-05-10 14:33:55 +0100650}
651
652
Steve Block44f0eee2011-05-26 01:26:41 +0100653
Kristian Monsen80d68ea2010-09-08 11:05:35 +0100654void Heap::ClearNormalizedMapCaches() {
Steve Block44f0eee2011-05-26 01:26:41 +0100655 if (isolate_->bootstrapper()->IsActive()) return;
Ben Murdochf87a2032010-10-22 12:50:53 +0100656
657 Object* context = global_contexts_list_;
658 while (!context->IsUndefined()) {
659 Context::cast(context)->normalized_map_cache()->Clear();
660 context = Context::cast(context)->get(Context::NEXT_CONTEXT_LINK);
661 }
Kristian Monsen80d68ea2010-09-08 11:05:35 +0100662}
663
664
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100665#ifdef DEBUG
666
667enum PageWatermarkValidity {
668 ALL_VALID,
669 ALL_INVALID
670};
671
672static void VerifyPageWatermarkValidity(PagedSpace* space,
673 PageWatermarkValidity validity) {
674 PageIterator it(space, PageIterator::PAGES_IN_USE);
675 bool expected_value = (validity == ALL_VALID);
676 while (it.has_next()) {
677 Page* page = it.next();
678 ASSERT(page->IsWatermarkValid() == expected_value);
679 }
680}
681#endif
682
Steve Block8defd9f2010-07-08 12:39:36 +0100683void Heap::UpdateSurvivalRateTrend(int start_new_space_size) {
684 double survival_rate =
685 (static_cast<double>(young_survivors_after_last_gc_) * 100) /
686 start_new_space_size;
687
688 if (survival_rate > kYoungSurvivalRateThreshold) {
689 high_survival_rate_period_length_++;
690 } else {
691 high_survival_rate_period_length_ = 0;
692 }
693
694 double survival_rate_diff = survival_rate_ - survival_rate;
695
696 if (survival_rate_diff > kYoungSurvivalRateAllowedDeviation) {
697 set_survival_rate_trend(DECREASING);
698 } else if (survival_rate_diff < -kYoungSurvivalRateAllowedDeviation) {
699 set_survival_rate_trend(INCREASING);
700 } else {
701 set_survival_rate_trend(STABLE);
702 }
703
704 survival_rate_ = survival_rate;
705}
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100706
Teng-Hui Zhu3e5fa292010-11-09 16:16:48 -0800707bool Heap::PerformGarbageCollection(GarbageCollector collector,
John Reck59135872010-11-02 12:39:01 -0700708 GCTracer* tracer) {
Teng-Hui Zhu3e5fa292010-11-09 16:16:48 -0800709 bool next_gc_likely_to_collect_more = false;
710
Ben Murdochf87a2032010-10-22 12:50:53 +0100711 if (collector != SCAVENGER) {
Steve Block44f0eee2011-05-26 01:26:41 +0100712 PROFILE(isolate_, CodeMovingGCEvent());
Ben Murdochf87a2032010-10-22 12:50:53 +0100713 }
714
Steve Blocka7e24c12009-10-30 11:49:00 +0000715 VerifySymbolTable();
716 if (collector == MARK_COMPACTOR && global_gc_prologue_callback_) {
717 ASSERT(!allocation_allowed_);
Leon Clarkef7060e22010-06-03 12:02:55 +0100718 GCTracer::Scope scope(tracer, GCTracer::Scope::EXTERNAL);
Steve Blocka7e24c12009-10-30 11:49:00 +0000719 global_gc_prologue_callback_();
720 }
Steve Block6ded16b2010-05-10 14:33:55 +0100721
722 GCType gc_type =
723 collector == MARK_COMPACTOR ? kGCTypeMarkSweepCompact : kGCTypeScavenge;
724
725 for (int i = 0; i < gc_prologue_callbacks_.length(); ++i) {
726 if (gc_type & gc_prologue_callbacks_[i].gc_type) {
727 gc_prologue_callbacks_[i].callback(gc_type, kNoGCCallbackFlags);
728 }
729 }
730
Steve Blocka7e24c12009-10-30 11:49:00 +0000731 EnsureFromSpaceIsCommitted();
Steve Block6ded16b2010-05-10 14:33:55 +0100732
Ben Murdochf87a2032010-10-22 12:50:53 +0100733 int start_new_space_size = Heap::new_space()->SizeAsInt();
Steve Block8defd9f2010-07-08 12:39:36 +0100734
Steve Blocka7e24c12009-10-30 11:49:00 +0000735 if (collector == MARK_COMPACTOR) {
Steve Block6ded16b2010-05-10 14:33:55 +0100736 // Perform mark-sweep with optional compaction.
Steve Blocka7e24c12009-10-30 11:49:00 +0000737 MarkCompact(tracer);
738
Steve Block8defd9f2010-07-08 12:39:36 +0100739 bool high_survival_rate_during_scavenges = IsHighSurvivalRate() &&
740 IsStableOrIncreasingSurvivalTrend();
741
742 UpdateSurvivalRateTrend(start_new_space_size);
743
John Reck59135872010-11-02 12:39:01 -0700744 intptr_t old_gen_size = PromotedSpaceSize();
745 old_gen_promotion_limit_ =
746 old_gen_size + Max(kMinimumPromotionLimit, old_gen_size / 3);
747 old_gen_allocation_limit_ =
748 old_gen_size + Max(kMinimumAllocationLimit, old_gen_size / 2);
Steve Block8defd9f2010-07-08 12:39:36 +0100749
John Reck59135872010-11-02 12:39:01 -0700750 if (high_survival_rate_during_scavenges &&
751 IsStableOrIncreasingSurvivalTrend()) {
752 // Stable high survival rates of young objects both during partial and
753 // full collection indicate that mutator is either building or modifying
754 // a structure with a long lifetime.
755 // In this case we aggressively raise old generation memory limits to
756 // postpone subsequent mark-sweep collection and thus trade memory
757 // space for the mutation speed.
758 old_gen_promotion_limit_ *= 2;
759 old_gen_allocation_limit_ *= 2;
Steve Block8defd9f2010-07-08 12:39:36 +0100760 }
761
John Reck59135872010-11-02 12:39:01 -0700762 old_gen_exhausted_ = false;
Steve Block6ded16b2010-05-10 14:33:55 +0100763 } else {
Leon Clarkef7060e22010-06-03 12:02:55 +0100764 tracer_ = tracer;
Steve Block6ded16b2010-05-10 14:33:55 +0100765 Scavenge();
Leon Clarkef7060e22010-06-03 12:02:55 +0100766 tracer_ = NULL;
Steve Block8defd9f2010-07-08 12:39:36 +0100767
768 UpdateSurvivalRateTrend(start_new_space_size);
Steve Blocka7e24c12009-10-30 11:49:00 +0000769 }
Steve Blocka7e24c12009-10-30 11:49:00 +0000770
Steve Block44f0eee2011-05-26 01:26:41 +0100771 isolate_->counters()->objs_since_last_young()->Set(0);
Steve Blocka7e24c12009-10-30 11:49:00 +0000772
John Reck59135872010-11-02 12:39:01 -0700773 if (collector == MARK_COMPACTOR) {
774 DisableAssertNoAllocation allow_allocation;
775 GCTracer::Scope scope(tracer, GCTracer::Scope::EXTERNAL);
Teng-Hui Zhu3e5fa292010-11-09 16:16:48 -0800776 next_gc_likely_to_collect_more =
Steve Block44f0eee2011-05-26 01:26:41 +0100777 isolate_->global_handles()->PostGarbageCollectionProcessing();
John Reck59135872010-11-02 12:39:01 -0700778 }
779
Steve Block3ce2e202009-11-05 08:53:23 +0000780 // Update relocatables.
781 Relocatable::PostGarbageCollectionProcessing();
Steve Blocka7e24c12009-10-30 11:49:00 +0000782
783 if (collector == MARK_COMPACTOR) {
784 // Register the amount of external allocated memory.
785 amount_of_external_allocated_memory_at_last_global_gc_ =
786 amount_of_external_allocated_memory_;
787 }
788
Steve Block6ded16b2010-05-10 14:33:55 +0100789 GCCallbackFlags callback_flags = tracer->is_compacting()
790 ? kGCCallbackFlagCompacted
791 : kNoGCCallbackFlags;
792 for (int i = 0; i < gc_epilogue_callbacks_.length(); ++i) {
793 if (gc_type & gc_epilogue_callbacks_[i].gc_type) {
794 gc_epilogue_callbacks_[i].callback(gc_type, callback_flags);
795 }
796 }
797
Steve Blocka7e24c12009-10-30 11:49:00 +0000798 if (collector == MARK_COMPACTOR && global_gc_epilogue_callback_) {
799 ASSERT(!allocation_allowed_);
Leon Clarkef7060e22010-06-03 12:02:55 +0100800 GCTracer::Scope scope(tracer, GCTracer::Scope::EXTERNAL);
Steve Blocka7e24c12009-10-30 11:49:00 +0000801 global_gc_epilogue_callback_();
802 }
803 VerifySymbolTable();
Teng-Hui Zhu3e5fa292010-11-09 16:16:48 -0800804
805 return next_gc_likely_to_collect_more;
Steve Blocka7e24c12009-10-30 11:49:00 +0000806}
807
808
Steve Blocka7e24c12009-10-30 11:49:00 +0000809void Heap::MarkCompact(GCTracer* tracer) {
810 gc_state_ = MARK_COMPACT;
Steve Block44f0eee2011-05-26 01:26:41 +0100811 LOG(isolate_, ResourceEvent("markcompact", "begin"));
Steve Blocka7e24c12009-10-30 11:49:00 +0000812
Steve Block44f0eee2011-05-26 01:26:41 +0100813 mark_compact_collector_.Prepare(tracer);
Steve Blocka7e24c12009-10-30 11:49:00 +0000814
Steve Block44f0eee2011-05-26 01:26:41 +0100815 bool is_compacting = mark_compact_collector_.IsCompacting();
Steve Blocka7e24c12009-10-30 11:49:00 +0000816
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100817 if (is_compacting) {
818 mc_count_++;
819 } else {
820 ms_count_++;
821 }
822 tracer->set_full_gc_count(mc_count_ + ms_count_);
823
Steve Blocka7e24c12009-10-30 11:49:00 +0000824 MarkCompactPrologue(is_compacting);
825
Steve Block44f0eee2011-05-26 01:26:41 +0100826 is_safe_to_read_maps_ = false;
827 mark_compact_collector_.CollectGarbage();
828 is_safe_to_read_maps_ = true;
Steve Blocka7e24c12009-10-30 11:49:00 +0000829
Steve Block44f0eee2011-05-26 01:26:41 +0100830 LOG(isolate_, ResourceEvent("markcompact", "end"));
Steve Blocka7e24c12009-10-30 11:49:00 +0000831
832 gc_state_ = NOT_IN_GC;
833
834 Shrink();
835
Steve Block44f0eee2011-05-26 01:26:41 +0100836 isolate_->counters()->objs_since_last_full()->Set(0);
Steve Block6ded16b2010-05-10 14:33:55 +0100837
838 contexts_disposed_ = 0;
Steve Blocka7e24c12009-10-30 11:49:00 +0000839}
840
841
842void Heap::MarkCompactPrologue(bool is_compacting) {
843 // At any old GC clear the keyed lookup cache to enable collection of unused
844 // maps.
Steve Block44f0eee2011-05-26 01:26:41 +0100845 isolate_->keyed_lookup_cache()->Clear();
846 isolate_->context_slot_cache()->Clear();
847 isolate_->descriptor_lookup_cache()->Clear();
Steve Blocka7e24c12009-10-30 11:49:00 +0000848
Steve Block44f0eee2011-05-26 01:26:41 +0100849 isolate_->compilation_cache()->MarkCompactPrologue();
Steve Blocka7e24c12009-10-30 11:49:00 +0000850
Kristian Monsen25f61362010-05-21 11:50:48 +0100851 CompletelyClearInstanceofCache();
852
Leon Clarkee46be812010-01-19 14:06:41 +0000853 if (is_compacting) FlushNumberStringCache();
Steve Blocka7e24c12009-10-30 11:49:00 +0000854
Kristian Monsen80d68ea2010-09-08 11:05:35 +0100855 ClearNormalizedMapCaches();
Steve Blocka7e24c12009-10-30 11:49:00 +0000856}
857
858
859Object* Heap::FindCodeObject(Address a) {
John Reck59135872010-11-02 12:39:01 -0700860 Object* obj = NULL; // Initialization to please compiler.
861 { MaybeObject* maybe_obj = code_space_->FindObject(a);
862 if (!maybe_obj->ToObject(&obj)) {
863 obj = lo_space_->FindObject(a)->ToObjectUnchecked();
864 }
Steve Blocka7e24c12009-10-30 11:49:00 +0000865 }
Steve Blocka7e24c12009-10-30 11:49:00 +0000866 return obj;
867}
868
869
870// Helper class for copying HeapObjects
871class ScavengeVisitor: public ObjectVisitor {
872 public:
Steve Block44f0eee2011-05-26 01:26:41 +0100873 explicit ScavengeVisitor(Heap* heap) : heap_(heap) {}
Steve Blocka7e24c12009-10-30 11:49:00 +0000874
875 void VisitPointer(Object** p) { ScavengePointer(p); }
876
877 void VisitPointers(Object** start, Object** end) {
878 // Copy all HeapObject pointers in [start, end)
879 for (Object** p = start; p < end; p++) ScavengePointer(p);
880 }
881
882 private:
883 void ScavengePointer(Object** p) {
884 Object* object = *p;
Steve Block44f0eee2011-05-26 01:26:41 +0100885 if (!heap_->InNewSpace(object)) return;
Steve Blocka7e24c12009-10-30 11:49:00 +0000886 Heap::ScavengeObject(reinterpret_cast<HeapObject**>(p),
887 reinterpret_cast<HeapObject*>(object));
888 }
Steve Block44f0eee2011-05-26 01:26:41 +0100889
890 Heap* heap_;
Steve Blocka7e24c12009-10-30 11:49:00 +0000891};
892
893
Steve Blocka7e24c12009-10-30 11:49:00 +0000894#ifdef DEBUG
895// Visitor class to verify pointers in code or data space do not point into
896// new space.
897class VerifyNonPointerSpacePointersVisitor: public ObjectVisitor {
898 public:
899 void VisitPointers(Object** start, Object**end) {
900 for (Object** current = start; current < end; current++) {
901 if ((*current)->IsHeapObject()) {
Steve Block44f0eee2011-05-26 01:26:41 +0100902 ASSERT(!HEAP->InNewSpace(HeapObject::cast(*current)));
Steve Blocka7e24c12009-10-30 11:49:00 +0000903 }
904 }
905 }
906};
907
908
909static void VerifyNonPointerSpacePointers() {
910 // Verify that there are no pointers to new space in spaces where we
911 // do not expect them.
912 VerifyNonPointerSpacePointersVisitor v;
Steve Block44f0eee2011-05-26 01:26:41 +0100913 HeapObjectIterator code_it(HEAP->code_space());
Leon Clarked91b9f72010-01-27 17:25:45 +0000914 for (HeapObject* object = code_it.next();
915 object != NULL; object = code_it.next())
Steve Blocka7e24c12009-10-30 11:49:00 +0000916 object->Iterate(&v);
Steve Blocka7e24c12009-10-30 11:49:00 +0000917
Steve Block44f0eee2011-05-26 01:26:41 +0100918 HeapObjectIterator data_it(HEAP->old_data_space());
Leon Clarked91b9f72010-01-27 17:25:45 +0000919 for (HeapObject* object = data_it.next();
920 object != NULL; object = data_it.next())
921 object->Iterate(&v);
Steve Blocka7e24c12009-10-30 11:49:00 +0000922}
923#endif
924
925
Steve Block6ded16b2010-05-10 14:33:55 +0100926void Heap::CheckNewSpaceExpansionCriteria() {
927 if (new_space_.Capacity() < new_space_.MaximumCapacity() &&
928 survived_since_last_expansion_ > new_space_.Capacity()) {
929 // Grow the size of new space if there is room to grow and enough
930 // data has survived scavenge since the last expansion.
931 new_space_.Grow();
932 survived_since_last_expansion_ = 0;
933 }
934}
935
936
Steve Blocka7e24c12009-10-30 11:49:00 +0000937void Heap::Scavenge() {
938#ifdef DEBUG
939 if (FLAG_enable_slow_asserts) VerifyNonPointerSpacePointers();
940#endif
941
942 gc_state_ = SCAVENGE;
943
Ben Murdoch8b112d22011-06-08 16:22:53 +0100944 SwitchScavengingVisitorsTableIfProfilingWasEnabled();
945
Steve Block44f0eee2011-05-26 01:26:41 +0100946 Page::FlipMeaningOfInvalidatedWatermarkFlag(this);
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100947#ifdef DEBUG
948 VerifyPageWatermarkValidity(old_pointer_space_, ALL_VALID);
949 VerifyPageWatermarkValidity(map_space_, ALL_VALID);
950#endif
951
952 // We do not update an allocation watermark of the top page during linear
953 // allocation to avoid overhead. So to maintain the watermark invariant
954 // we have to manually cache the watermark and mark the top page as having an
955 // invalid watermark. This guarantees that dirty regions iteration will use a
956 // correct watermark even if a linear allocation happens.
957 old_pointer_space_->FlushTopPageWatermark();
958 map_space_->FlushTopPageWatermark();
959
Steve Blocka7e24c12009-10-30 11:49:00 +0000960 // Implements Cheney's copying algorithm
Steve Block44f0eee2011-05-26 01:26:41 +0100961 LOG(isolate_, ResourceEvent("scavenge", "begin"));
Steve Blocka7e24c12009-10-30 11:49:00 +0000962
963 // Clear descriptor cache.
Steve Block44f0eee2011-05-26 01:26:41 +0100964 isolate_->descriptor_lookup_cache()->Clear();
Steve Blocka7e24c12009-10-30 11:49:00 +0000965
966 // Used for updating survived_since_last_expansion_ at function end.
Ben Murdochf87a2032010-10-22 12:50:53 +0100967 intptr_t survived_watermark = PromotedSpaceSize();
Steve Blocka7e24c12009-10-30 11:49:00 +0000968
Steve Block6ded16b2010-05-10 14:33:55 +0100969 CheckNewSpaceExpansionCriteria();
Steve Blocka7e24c12009-10-30 11:49:00 +0000970
971 // Flip the semispaces. After flipping, to space is empty, from space has
972 // live objects.
973 new_space_.Flip();
974 new_space_.ResetAllocationInfo();
975
976 // We need to sweep newly copied objects which can be either in the
977 // to space or promoted to the old generation. For to-space
978 // objects, we treat the bottom of the to space as a queue. Newly
979 // copied and unswept objects lie between a 'front' mark and the
980 // allocation pointer.
981 //
982 // Promoted objects can go into various old-generation spaces, and
983 // can be allocated internally in the spaces (from the free list).
984 // We treat the top of the to space as a queue of addresses of
985 // promoted objects. The addresses of newly promoted and unswept
986 // objects lie between a 'front' mark and a 'rear' mark that is
987 // updated as a side effect of promoting an object.
988 //
989 // There is guaranteed to be enough room at the top of the to space
990 // for the addresses of promoted objects: every object promoted
991 // frees up its size in bytes from the top of the new space, and
992 // objects are at least one pointer in size.
993 Address new_space_front = new_space_.ToSpaceLow();
Steve Block44f0eee2011-05-26 01:26:41 +0100994 promotion_queue_.Initialize(new_space_.ToSpaceHigh());
Steve Blocka7e24c12009-10-30 11:49:00 +0000995
Steve Block44f0eee2011-05-26 01:26:41 +0100996 is_safe_to_read_maps_ = false;
997 ScavengeVisitor scavenge_visitor(this);
Steve Blocka7e24c12009-10-30 11:49:00 +0000998 // Copy roots.
Leon Clarkee46be812010-01-19 14:06:41 +0000999 IterateRoots(&scavenge_visitor, VISIT_ALL_IN_SCAVENGE);
Steve Blocka7e24c12009-10-30 11:49:00 +00001000
1001 // Copy objects reachable from the old generation. By definition,
1002 // there are no intergenerational pointers in code or data spaces.
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01001003 IterateDirtyRegions(old_pointer_space_,
Steve Block44f0eee2011-05-26 01:26:41 +01001004 &Heap::IteratePointersInDirtyRegion,
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01001005 &ScavengePointer,
1006 WATERMARK_CAN_BE_INVALID);
1007
1008 IterateDirtyRegions(map_space_,
1009 &IteratePointersInDirtyMapsRegion,
1010 &ScavengePointer,
1011 WATERMARK_CAN_BE_INVALID);
1012
1013 lo_space_->IterateDirtyRegions(&ScavengePointer);
Steve Blocka7e24c12009-10-30 11:49:00 +00001014
1015 // Copy objects reachable from cells by scavenging cell values directly.
1016 HeapObjectIterator cell_iterator(cell_space_);
Leon Clarked91b9f72010-01-27 17:25:45 +00001017 for (HeapObject* cell = cell_iterator.next();
1018 cell != NULL; cell = cell_iterator.next()) {
Steve Blocka7e24c12009-10-30 11:49:00 +00001019 if (cell->IsJSGlobalPropertyCell()) {
1020 Address value_address =
1021 reinterpret_cast<Address>(cell) +
1022 (JSGlobalPropertyCell::kValueOffset - kHeapObjectTag);
1023 scavenge_visitor.VisitPointer(reinterpret_cast<Object**>(value_address));
1024 }
1025 }
1026
Ben Murdochf87a2032010-10-22 12:50:53 +01001027 // Scavenge object reachable from the global contexts list directly.
1028 scavenge_visitor.VisitPointer(BitCast<Object**>(&global_contexts_list_));
1029
Leon Clarkee46be812010-01-19 14:06:41 +00001030 new_space_front = DoScavenge(&scavenge_visitor, new_space_front);
1031
Steve Block6ded16b2010-05-10 14:33:55 +01001032 UpdateNewSpaceReferencesInExternalStringTable(
1033 &UpdateNewSpaceReferenceInExternalStringTableEntry);
1034
Steve Block1e0659c2011-05-24 12:43:12 +01001035 LiveObjectList::UpdateReferencesForScavengeGC();
Steve Block44f0eee2011-05-26 01:26:41 +01001036 isolate()->runtime_profiler()->UpdateSamplesAfterScavenge();
Steve Block1e0659c2011-05-24 12:43:12 +01001037
Leon Clarkee46be812010-01-19 14:06:41 +00001038 ASSERT(new_space_front == new_space_.top());
1039
Steve Block44f0eee2011-05-26 01:26:41 +01001040 is_safe_to_read_maps_ = true;
1041
Leon Clarkee46be812010-01-19 14:06:41 +00001042 // Set age mark.
1043 new_space_.set_age_mark(new_space_.top());
1044
1045 // Update how much has survived scavenge.
Ben Murdochf87a2032010-10-22 12:50:53 +01001046 IncrementYoungSurvivorsCounter(static_cast<int>(
1047 (PromotedSpaceSize() - survived_watermark) + new_space_.Size()));
Leon Clarkee46be812010-01-19 14:06:41 +00001048
Steve Block44f0eee2011-05-26 01:26:41 +01001049 LOG(isolate_, ResourceEvent("scavenge", "end"));
Leon Clarkee46be812010-01-19 14:06:41 +00001050
1051 gc_state_ = NOT_IN_GC;
1052}
1053
1054
Steve Block44f0eee2011-05-26 01:26:41 +01001055String* Heap::UpdateNewSpaceReferenceInExternalStringTableEntry(Heap* heap,
1056 Object** p) {
Steve Block6ded16b2010-05-10 14:33:55 +01001057 MapWord first_word = HeapObject::cast(*p)->map_word();
1058
1059 if (!first_word.IsForwardingAddress()) {
1060 // Unreachable external string can be finalized.
Steve Block44f0eee2011-05-26 01:26:41 +01001061 heap->FinalizeExternalString(String::cast(*p));
Steve Block6ded16b2010-05-10 14:33:55 +01001062 return NULL;
1063 }
1064
1065 // String is still reachable.
1066 return String::cast(first_word.ToForwardingAddress());
1067}
1068
1069
1070void Heap::UpdateNewSpaceReferencesInExternalStringTable(
1071 ExternalStringTableUpdaterCallback updater_func) {
Steve Block44f0eee2011-05-26 01:26:41 +01001072 external_string_table_.Verify();
Leon Clarkee46be812010-01-19 14:06:41 +00001073
Steve Block44f0eee2011-05-26 01:26:41 +01001074 if (external_string_table_.new_space_strings_.is_empty()) return;
Leon Clarkee46be812010-01-19 14:06:41 +00001075
Steve Block44f0eee2011-05-26 01:26:41 +01001076 Object** start = &external_string_table_.new_space_strings_[0];
1077 Object** end = start + external_string_table_.new_space_strings_.length();
Leon Clarkee46be812010-01-19 14:06:41 +00001078 Object** last = start;
1079
1080 for (Object** p = start; p < end; ++p) {
Steve Block44f0eee2011-05-26 01:26:41 +01001081 ASSERT(InFromSpace(*p));
1082 String* target = updater_func(this, p);
Leon Clarkee46be812010-01-19 14:06:41 +00001083
Steve Block6ded16b2010-05-10 14:33:55 +01001084 if (target == NULL) continue;
Leon Clarkee46be812010-01-19 14:06:41 +00001085
Leon Clarkee46be812010-01-19 14:06:41 +00001086 ASSERT(target->IsExternalString());
1087
Steve Block44f0eee2011-05-26 01:26:41 +01001088 if (InNewSpace(target)) {
Leon Clarkee46be812010-01-19 14:06:41 +00001089 // String is still in new space. Update the table entry.
1090 *last = target;
1091 ++last;
1092 } else {
1093 // String got promoted. Move it to the old string list.
Steve Block44f0eee2011-05-26 01:26:41 +01001094 external_string_table_.AddOldString(target);
Leon Clarkee46be812010-01-19 14:06:41 +00001095 }
1096 }
1097
1098 ASSERT(last <= end);
Steve Block44f0eee2011-05-26 01:26:41 +01001099 external_string_table_.ShrinkNewStrings(static_cast<int>(last - start));
Leon Clarkee46be812010-01-19 14:06:41 +00001100}
1101
1102
Steve Block44f0eee2011-05-26 01:26:41 +01001103static Object* ProcessFunctionWeakReferences(Heap* heap,
1104 Object* function,
Ben Murdochb0fe1622011-05-05 13:52:32 +01001105 WeakObjectRetainer* retainer) {
Steve Block44f0eee2011-05-26 01:26:41 +01001106 Object* head = heap->undefined_value();
Ben Murdochb0fe1622011-05-05 13:52:32 +01001107 JSFunction* tail = NULL;
1108 Object* candidate = function;
Steve Block44f0eee2011-05-26 01:26:41 +01001109 while (candidate != heap->undefined_value()) {
Ben Murdochb0fe1622011-05-05 13:52:32 +01001110 // Check whether to keep the candidate in the list.
1111 JSFunction* candidate_function = reinterpret_cast<JSFunction*>(candidate);
1112 Object* retain = retainer->RetainAs(candidate);
1113 if (retain != NULL) {
Steve Block44f0eee2011-05-26 01:26:41 +01001114 if (head == heap->undefined_value()) {
Ben Murdochb0fe1622011-05-05 13:52:32 +01001115 // First element in the list.
1116 head = candidate_function;
1117 } else {
1118 // Subsequent elements in the list.
1119 ASSERT(tail != NULL);
1120 tail->set_next_function_link(candidate_function);
1121 }
1122 // Retained function is new tail.
1123 tail = candidate_function;
1124 }
1125 // Move to next element in the list.
1126 candidate = candidate_function->next_function_link();
1127 }
1128
1129 // Terminate the list if there is one or more elements.
1130 if (tail != NULL) {
Steve Block44f0eee2011-05-26 01:26:41 +01001131 tail->set_next_function_link(heap->undefined_value());
Ben Murdochb0fe1622011-05-05 13:52:32 +01001132 }
1133
1134 return head;
1135}
1136
1137
Ben Murdochf87a2032010-10-22 12:50:53 +01001138void Heap::ProcessWeakReferences(WeakObjectRetainer* retainer) {
1139 Object* head = undefined_value();
1140 Context* tail = NULL;
1141 Object* candidate = global_contexts_list_;
Steve Block44f0eee2011-05-26 01:26:41 +01001142 while (candidate != undefined_value()) {
Ben Murdochf87a2032010-10-22 12:50:53 +01001143 // Check whether to keep the candidate in the list.
1144 Context* candidate_context = reinterpret_cast<Context*>(candidate);
1145 Object* retain = retainer->RetainAs(candidate);
1146 if (retain != NULL) {
Steve Block44f0eee2011-05-26 01:26:41 +01001147 if (head == undefined_value()) {
Ben Murdochf87a2032010-10-22 12:50:53 +01001148 // First element in the list.
1149 head = candidate_context;
1150 } else {
1151 // Subsequent elements in the list.
1152 ASSERT(tail != NULL);
Steve Block44f0eee2011-05-26 01:26:41 +01001153 tail->set_unchecked(this,
1154 Context::NEXT_CONTEXT_LINK,
Ben Murdochf87a2032010-10-22 12:50:53 +01001155 candidate_context,
1156 UPDATE_WRITE_BARRIER);
1157 }
1158 // Retained context is new tail.
1159 tail = candidate_context;
Ben Murdochb0fe1622011-05-05 13:52:32 +01001160
1161 // Process the weak list of optimized functions for the context.
1162 Object* function_list_head =
1163 ProcessFunctionWeakReferences(
Steve Block44f0eee2011-05-26 01:26:41 +01001164 this,
Ben Murdochb0fe1622011-05-05 13:52:32 +01001165 candidate_context->get(Context::OPTIMIZED_FUNCTIONS_LIST),
1166 retainer);
Steve Block44f0eee2011-05-26 01:26:41 +01001167 candidate_context->set_unchecked(this,
1168 Context::OPTIMIZED_FUNCTIONS_LIST,
Ben Murdochb0fe1622011-05-05 13:52:32 +01001169 function_list_head,
1170 UPDATE_WRITE_BARRIER);
Ben Murdochf87a2032010-10-22 12:50:53 +01001171 }
1172 // Move to next element in the list.
1173 candidate = candidate_context->get(Context::NEXT_CONTEXT_LINK);
1174 }
1175
1176 // Terminate the list if there is one or more elements.
1177 if (tail != NULL) {
Steve Block44f0eee2011-05-26 01:26:41 +01001178 tail->set_unchecked(this,
1179 Context::NEXT_CONTEXT_LINK,
Ben Murdochf87a2032010-10-22 12:50:53 +01001180 Heap::undefined_value(),
1181 UPDATE_WRITE_BARRIER);
1182 }
1183
1184 // Update the head of the list of contexts.
Steve Block44f0eee2011-05-26 01:26:41 +01001185 global_contexts_list_ = head;
Ben Murdochf87a2032010-10-22 12:50:53 +01001186}
1187
1188
Iain Merrick75681382010-08-19 15:07:18 +01001189class NewSpaceScavenger : public StaticNewSpaceVisitor<NewSpaceScavenger> {
1190 public:
Steve Block44f0eee2011-05-26 01:26:41 +01001191 static inline void VisitPointer(Heap* heap, Object** p) {
Iain Merrick75681382010-08-19 15:07:18 +01001192 Object* object = *p;
Steve Block44f0eee2011-05-26 01:26:41 +01001193 if (!heap->InNewSpace(object)) return;
Iain Merrick75681382010-08-19 15:07:18 +01001194 Heap::ScavengeObject(reinterpret_cast<HeapObject**>(p),
1195 reinterpret_cast<HeapObject*>(object));
1196 }
1197};
1198
1199
Leon Clarkee46be812010-01-19 14:06:41 +00001200Address Heap::DoScavenge(ObjectVisitor* scavenge_visitor,
1201 Address new_space_front) {
Steve Blocka7e24c12009-10-30 11:49:00 +00001202 do {
1203 ASSERT(new_space_front <= new_space_.top());
1204
1205 // The addresses new_space_front and new_space_.top() define a
1206 // queue of unprocessed copied objects. Process them until the
1207 // queue is empty.
1208 while (new_space_front < new_space_.top()) {
1209 HeapObject* object = HeapObject::FromAddress(new_space_front);
Iain Merrick75681382010-08-19 15:07:18 +01001210 new_space_front += NewSpaceScavenger::IterateBody(object->map(), object);
Steve Blocka7e24c12009-10-30 11:49:00 +00001211 }
1212
1213 // Promote and process all the to-be-promoted objects.
Steve Block44f0eee2011-05-26 01:26:41 +01001214 while (!promotion_queue_.is_empty()) {
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001215 HeapObject* target;
1216 int size;
Steve Block44f0eee2011-05-26 01:26:41 +01001217 promotion_queue_.remove(&target, &size);
Steve Blocka7e24c12009-10-30 11:49:00 +00001218
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001219 // Promoted object might be already partially visited
1220 // during dirty regions iteration. Thus we search specificly
1221 // for pointers to from semispace instead of looking for pointers
1222 // to new space.
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01001223 ASSERT(!target->IsMap());
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001224 IterateAndMarkPointersToFromSpace(target->address(),
1225 target->address() + size,
1226 &ScavengePointer);
Steve Blocka7e24c12009-10-30 11:49:00 +00001227 }
1228
1229 // Take another spin if there are now unswept objects in new space
1230 // (there are currently no more unswept promoted objects).
1231 } while (new_space_front < new_space_.top());
1232
Leon Clarkee46be812010-01-19 14:06:41 +00001233 return new_space_front;
Steve Blocka7e24c12009-10-30 11:49:00 +00001234}
1235
1236
Ben Murdoch8b112d22011-06-08 16:22:53 +01001237enum LoggingAndProfiling {
1238 LOGGING_AND_PROFILING_ENABLED,
1239 LOGGING_AND_PROFILING_DISABLED
1240};
1241
1242
1243typedef void (*ScavengingCallback)(Map* map,
1244 HeapObject** slot,
1245 HeapObject* object);
1246
1247
1248static Atomic32 scavenging_visitors_table_mode_;
1249static VisitorDispatchTable<ScavengingCallback> scavenging_visitors_table_;
1250
1251
1252INLINE(static void DoScavengeObject(Map* map,
1253 HeapObject** slot,
1254 HeapObject* obj));
1255
1256
1257void DoScavengeObject(Map* map, HeapObject** slot, HeapObject* obj) {
1258 scavenging_visitors_table_.GetVisitor(map)(map, slot, obj);
1259}
1260
1261
1262template<LoggingAndProfiling logging_and_profiling_mode>
Iain Merrick75681382010-08-19 15:07:18 +01001263class ScavengingVisitor : public StaticVisitorBase {
1264 public:
1265 static void Initialize() {
1266 table_.Register(kVisitSeqAsciiString, &EvacuateSeqAsciiString);
1267 table_.Register(kVisitSeqTwoByteString, &EvacuateSeqTwoByteString);
1268 table_.Register(kVisitShortcutCandidate, &EvacuateShortcutCandidate);
1269 table_.Register(kVisitByteArray, &EvacuateByteArray);
1270 table_.Register(kVisitFixedArray, &EvacuateFixedArray);
Ben Murdoch8b112d22011-06-08 16:22:53 +01001271
Ben Murdochf87a2032010-10-22 12:50:53 +01001272 table_.Register(kVisitGlobalContext,
1273 &ObjectEvacuationStrategy<POINTER_OBJECT>::
Ben Murdoch8b112d22011-06-08 16:22:53 +01001274 template VisitSpecialized<Context::kSize>);
Iain Merrick75681382010-08-19 15:07:18 +01001275
1276 table_.Register(kVisitConsString,
1277 &ObjectEvacuationStrategy<POINTER_OBJECT>::
Ben Murdoch8b112d22011-06-08 16:22:53 +01001278 template VisitSpecialized<ConsString::kSize>);
Iain Merrick75681382010-08-19 15:07:18 +01001279
1280 table_.Register(kVisitSharedFunctionInfo,
1281 &ObjectEvacuationStrategy<POINTER_OBJECT>::
Ben Murdoch8b112d22011-06-08 16:22:53 +01001282 template VisitSpecialized<SharedFunctionInfo::kSize>);
Iain Merrick75681382010-08-19 15:07:18 +01001283
1284 table_.Register(kVisitJSFunction,
1285 &ObjectEvacuationStrategy<POINTER_OBJECT>::
Ben Murdoch8b112d22011-06-08 16:22:53 +01001286 template VisitSpecialized<JSFunction::kSize>);
Iain Merrick75681382010-08-19 15:07:18 +01001287
1288 table_.RegisterSpecializations<ObjectEvacuationStrategy<DATA_OBJECT>,
1289 kVisitDataObject,
1290 kVisitDataObjectGeneric>();
1291
1292 table_.RegisterSpecializations<ObjectEvacuationStrategy<POINTER_OBJECT>,
1293 kVisitJSObject,
1294 kVisitJSObjectGeneric>();
1295
1296 table_.RegisterSpecializations<ObjectEvacuationStrategy<POINTER_OBJECT>,
1297 kVisitStruct,
1298 kVisitStructGeneric>();
1299 }
1300
Ben Murdoch8b112d22011-06-08 16:22:53 +01001301 static VisitorDispatchTable<ScavengingCallback>* GetTable() {
1302 return &table_;
Iain Merrick75681382010-08-19 15:07:18 +01001303 }
1304
Iain Merrick75681382010-08-19 15:07:18 +01001305 private:
1306 enum ObjectContents { DATA_OBJECT, POINTER_OBJECT };
1307 enum SizeRestriction { SMALL, UNKNOWN_SIZE };
1308
Steve Blocka7e24c12009-10-30 11:49:00 +00001309#if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
Steve Block44f0eee2011-05-26 01:26:41 +01001310 static void RecordCopiedObject(Heap* heap, HeapObject* obj) {
Iain Merrick75681382010-08-19 15:07:18 +01001311 bool should_record = false;
Steve Blocka7e24c12009-10-30 11:49:00 +00001312#ifdef DEBUG
Iain Merrick75681382010-08-19 15:07:18 +01001313 should_record = FLAG_heap_stats;
Steve Blocka7e24c12009-10-30 11:49:00 +00001314#endif
1315#ifdef ENABLE_LOGGING_AND_PROFILING
Iain Merrick75681382010-08-19 15:07:18 +01001316 should_record = should_record || FLAG_log_gc;
Steve Blocka7e24c12009-10-30 11:49:00 +00001317#endif
Iain Merrick75681382010-08-19 15:07:18 +01001318 if (should_record) {
Steve Block44f0eee2011-05-26 01:26:41 +01001319 if (heap->new_space()->Contains(obj)) {
1320 heap->new_space()->RecordAllocation(obj);
Iain Merrick75681382010-08-19 15:07:18 +01001321 } else {
Steve Block44f0eee2011-05-26 01:26:41 +01001322 heap->new_space()->RecordPromotion(obj);
Iain Merrick75681382010-08-19 15:07:18 +01001323 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001324 }
1325 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001326#endif // defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
1327
Iain Merrick75681382010-08-19 15:07:18 +01001328 // Helper function used by CopyObject to copy a source object to an
1329 // allocated target object and update the forwarding pointer in the source
1330 // object. Returns the target object.
Steve Block44f0eee2011-05-26 01:26:41 +01001331 INLINE(static HeapObject* MigrateObject(Heap* heap,
1332 HeapObject* source,
Iain Merrick75681382010-08-19 15:07:18 +01001333 HeapObject* target,
1334 int size)) {
1335 // Copy the content of source to target.
Steve Block44f0eee2011-05-26 01:26:41 +01001336 heap->CopyBlock(target->address(), source->address(), size);
Steve Blocka7e24c12009-10-30 11:49:00 +00001337
Iain Merrick75681382010-08-19 15:07:18 +01001338 // Set the forwarding address.
1339 source->set_map_word(MapWord::FromForwardingAddress(target));
Steve Blocka7e24c12009-10-30 11:49:00 +00001340
Ben Murdoch8b112d22011-06-08 16:22:53 +01001341 if (logging_and_profiling_mode == LOGGING_AND_PROFILING_ENABLED) {
Steve Blocka7e24c12009-10-30 11:49:00 +00001342#if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
Ben Murdoch8b112d22011-06-08 16:22:53 +01001343 // Update NewSpace stats if necessary.
1344 RecordCopiedObject(heap, target);
Steve Blocka7e24c12009-10-30 11:49:00 +00001345#endif
Ben Murdoch8b112d22011-06-08 16:22:53 +01001346 HEAP_PROFILE(heap, ObjectMoveEvent(source->address(), target->address()));
Kristian Monsen0d5e1162010-09-30 15:31:59 +01001347#if defined(ENABLE_LOGGING_AND_PROFILING)
Ben Murdoch8b112d22011-06-08 16:22:53 +01001348 Isolate* isolate = heap->isolate();
1349 if (isolate->logger()->is_logging() ||
1350 isolate->cpu_profiler()->is_profiling()) {
1351 if (target->IsSharedFunctionInfo()) {
1352 PROFILE(isolate, SharedFunctionInfoMoveEvent(
1353 source->address(), target->address()));
1354 }
Kristian Monsen0d5e1162010-09-30 15:31:59 +01001355 }
Kristian Monsen0d5e1162010-09-30 15:31:59 +01001356#endif
Ben Murdoch8b112d22011-06-08 16:22:53 +01001357 }
1358
Iain Merrick75681382010-08-19 15:07:18 +01001359 return target;
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001360 }
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001361
1362
Iain Merrick75681382010-08-19 15:07:18 +01001363 template<ObjectContents object_contents, SizeRestriction size_restriction>
1364 static inline void EvacuateObject(Map* map,
1365 HeapObject** slot,
1366 HeapObject* object,
1367 int object_size) {
1368 ASSERT((size_restriction != SMALL) ||
1369 (object_size <= Page::kMaxHeapObjectSize));
1370 ASSERT(object->Size() == object_size);
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001371
Steve Block44f0eee2011-05-26 01:26:41 +01001372 Heap* heap = map->heap();
1373 if (heap->ShouldBePromoted(object->address(), object_size)) {
John Reck59135872010-11-02 12:39:01 -07001374 MaybeObject* maybe_result;
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001375
Iain Merrick75681382010-08-19 15:07:18 +01001376 if ((size_restriction != SMALL) &&
1377 (object_size > Page::kMaxHeapObjectSize)) {
Steve Block44f0eee2011-05-26 01:26:41 +01001378 maybe_result = heap->lo_space()->AllocateRawFixedArray(object_size);
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001379 } else {
Iain Merrick75681382010-08-19 15:07:18 +01001380 if (object_contents == DATA_OBJECT) {
Steve Block44f0eee2011-05-26 01:26:41 +01001381 maybe_result = heap->old_data_space()->AllocateRaw(object_size);
Iain Merrick75681382010-08-19 15:07:18 +01001382 } else {
Steve Block44f0eee2011-05-26 01:26:41 +01001383 maybe_result = heap->old_pointer_space()->AllocateRaw(object_size);
Iain Merrick75681382010-08-19 15:07:18 +01001384 }
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001385 }
1386
John Reck59135872010-11-02 12:39:01 -07001387 Object* result = NULL; // Initialization to please compiler.
1388 if (maybe_result->ToObject(&result)) {
Iain Merrick75681382010-08-19 15:07:18 +01001389 HeapObject* target = HeapObject::cast(result);
Steve Block44f0eee2011-05-26 01:26:41 +01001390 *slot = MigrateObject(heap, object , target, object_size);
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001391
Iain Merrick75681382010-08-19 15:07:18 +01001392 if (object_contents == POINTER_OBJECT) {
Steve Block44f0eee2011-05-26 01:26:41 +01001393 heap->promotion_queue()->insert(target, object_size);
Iain Merrick75681382010-08-19 15:07:18 +01001394 }
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001395
Steve Block44f0eee2011-05-26 01:26:41 +01001396 heap->tracer()->increment_promoted_objects_size(object_size);
Iain Merrick75681382010-08-19 15:07:18 +01001397 return;
1398 }
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001399 }
John Reck59135872010-11-02 12:39:01 -07001400 Object* result =
Steve Block44f0eee2011-05-26 01:26:41 +01001401 heap->new_space()->AllocateRaw(object_size)->ToObjectUnchecked();
1402 *slot = MigrateObject(heap, object, HeapObject::cast(result), object_size);
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001403 return;
1404 }
1405
Iain Merrick75681382010-08-19 15:07:18 +01001406
1407 static inline void EvacuateFixedArray(Map* map,
1408 HeapObject** slot,
1409 HeapObject* object) {
1410 int object_size = FixedArray::BodyDescriptor::SizeOf(map, object);
1411 EvacuateObject<POINTER_OBJECT, UNKNOWN_SIZE>(map,
1412 slot,
1413 object,
1414 object_size);
1415 }
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001416
1417
Iain Merrick75681382010-08-19 15:07:18 +01001418 static inline void EvacuateByteArray(Map* map,
1419 HeapObject** slot,
1420 HeapObject* object) {
1421 int object_size = reinterpret_cast<ByteArray*>(object)->ByteArraySize();
1422 EvacuateObject<DATA_OBJECT, UNKNOWN_SIZE>(map, slot, object, object_size);
1423 }
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001424
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001425
Iain Merrick75681382010-08-19 15:07:18 +01001426 static inline void EvacuateSeqAsciiString(Map* map,
1427 HeapObject** slot,
1428 HeapObject* object) {
1429 int object_size = SeqAsciiString::cast(object)->
1430 SeqAsciiStringSize(map->instance_type());
1431 EvacuateObject<DATA_OBJECT, UNKNOWN_SIZE>(map, slot, object, object_size);
1432 }
1433
1434
1435 static inline void EvacuateSeqTwoByteString(Map* map,
1436 HeapObject** slot,
1437 HeapObject* object) {
1438 int object_size = SeqTwoByteString::cast(object)->
1439 SeqTwoByteStringSize(map->instance_type());
1440 EvacuateObject<DATA_OBJECT, UNKNOWN_SIZE>(map, slot, object, object_size);
1441 }
1442
1443
1444 static inline bool IsShortcutCandidate(int type) {
1445 return ((type & kShortcutTypeMask) == kShortcutTypeTag);
1446 }
1447
1448 static inline void EvacuateShortcutCandidate(Map* map,
1449 HeapObject** slot,
1450 HeapObject* object) {
1451 ASSERT(IsShortcutCandidate(map->instance_type()));
1452
Steve Block44f0eee2011-05-26 01:26:41 +01001453 if (ConsString::cast(object)->unchecked_second() ==
1454 map->heap()->empty_string()) {
Iain Merrick75681382010-08-19 15:07:18 +01001455 HeapObject* first =
1456 HeapObject::cast(ConsString::cast(object)->unchecked_first());
1457
1458 *slot = first;
1459
Steve Block44f0eee2011-05-26 01:26:41 +01001460 if (!map->heap()->InNewSpace(first)) {
Iain Merrick75681382010-08-19 15:07:18 +01001461 object->set_map_word(MapWord::FromForwardingAddress(first));
1462 return;
1463 }
1464
1465 MapWord first_word = first->map_word();
1466 if (first_word.IsForwardingAddress()) {
1467 HeapObject* target = first_word.ToForwardingAddress();
1468
1469 *slot = target;
1470 object->set_map_word(MapWord::FromForwardingAddress(target));
1471 return;
1472 }
1473
Ben Murdoch8b112d22011-06-08 16:22:53 +01001474 DoScavengeObject(first->map(), slot, first);
Iain Merrick75681382010-08-19 15:07:18 +01001475 object->set_map_word(MapWord::FromForwardingAddress(*slot));
1476 return;
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001477 }
Iain Merrick75681382010-08-19 15:07:18 +01001478
1479 int object_size = ConsString::kSize;
1480 EvacuateObject<POINTER_OBJECT, SMALL>(map, slot, object, object_size);
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001481 }
1482
Iain Merrick75681382010-08-19 15:07:18 +01001483 template<ObjectContents object_contents>
1484 class ObjectEvacuationStrategy {
1485 public:
1486 template<int object_size>
1487 static inline void VisitSpecialized(Map* map,
1488 HeapObject** slot,
1489 HeapObject* object) {
1490 EvacuateObject<object_contents, SMALL>(map, slot, object, object_size);
1491 }
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001492
Iain Merrick75681382010-08-19 15:07:18 +01001493 static inline void Visit(Map* map,
1494 HeapObject** slot,
1495 HeapObject* object) {
1496 int object_size = map->instance_size();
1497 EvacuateObject<object_contents, SMALL>(map, slot, object, object_size);
1498 }
1499 };
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001500
Ben Murdoch8b112d22011-06-08 16:22:53 +01001501 static VisitorDispatchTable<ScavengingCallback> table_;
Iain Merrick75681382010-08-19 15:07:18 +01001502};
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001503
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001504
Ben Murdoch8b112d22011-06-08 16:22:53 +01001505template<LoggingAndProfiling logging_and_profiling_mode>
1506VisitorDispatchTable<ScavengingCallback>
1507 ScavengingVisitor<logging_and_profiling_mode>::table_;
1508
1509
1510static void InitializeScavengingVisitorsTables() {
1511 ScavengingVisitor<LOGGING_AND_PROFILING_DISABLED>::Initialize();
1512 ScavengingVisitor<LOGGING_AND_PROFILING_ENABLED>::Initialize();
1513 scavenging_visitors_table_.CopyFrom(
1514 ScavengingVisitor<LOGGING_AND_PROFILING_DISABLED>::GetTable());
1515 scavenging_visitors_table_mode_ = LOGGING_AND_PROFILING_DISABLED;
1516}
1517
1518
1519void Heap::SwitchScavengingVisitorsTableIfProfilingWasEnabled() {
Ben Murdochbea578b2011-06-08 20:04:28 +01001520#ifdef ENABLE_LOGGING_AND_PROFILING
Ben Murdoch8b112d22011-06-08 16:22:53 +01001521 if (scavenging_visitors_table_mode_ == LOGGING_AND_PROFILING_ENABLED) {
1522 // Table was already updated by some isolate.
1523 return;
1524 }
1525
1526 if (isolate()->logger()->is_logging() ||
1527 isolate()->cpu_profiler()->is_profiling() ||
1528 (isolate()->heap_profiler() != NULL &&
1529 isolate()->heap_profiler()->is_profiling())) {
1530 // If one of the isolates is doing scavenge at this moment of time
1531 // it might see this table in an inconsitent state when
1532 // some of the callbacks point to
1533 // ScavengingVisitor<LOGGING_AND_PROFILING_ENABLED> and others
1534 // to ScavengingVisitor<LOGGING_AND_PROFILING_DISABLED>.
1535 // However this does not lead to any bugs as such isolate does not have
1536 // profiling enabled and any isolate with enabled profiling is guaranteed
1537 // to see the table in the consistent state.
1538 scavenging_visitors_table_.CopyFrom(
1539 ScavengingVisitor<LOGGING_AND_PROFILING_ENABLED>::GetTable());
1540
1541 // We use Release_Store to prevent reordering of this write before writes
1542 // to the table.
1543 Release_Store(&scavenging_visitors_table_mode_,
1544 LOGGING_AND_PROFILING_ENABLED);
1545 }
Ben Murdochbea578b2011-06-08 20:04:28 +01001546#endif
Ben Murdoch8b112d22011-06-08 16:22:53 +01001547}
Steve Blocka7e24c12009-10-30 11:49:00 +00001548
1549
1550void Heap::ScavengeObjectSlow(HeapObject** p, HeapObject* object) {
Steve Block44f0eee2011-05-26 01:26:41 +01001551 ASSERT(HEAP->InFromSpace(object));
Steve Blocka7e24c12009-10-30 11:49:00 +00001552 MapWord first_word = object->map_word();
1553 ASSERT(!first_word.IsForwardingAddress());
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001554 Map* map = first_word.ToMap();
Ben Murdoch8b112d22011-06-08 16:22:53 +01001555 DoScavengeObject(map, p, object);
Steve Blocka7e24c12009-10-30 11:49:00 +00001556}
1557
1558
John Reck59135872010-11-02 12:39:01 -07001559MaybeObject* Heap::AllocatePartialMap(InstanceType instance_type,
1560 int instance_size) {
1561 Object* result;
1562 { MaybeObject* maybe_result = AllocateRawMap();
1563 if (!maybe_result->ToObject(&result)) return maybe_result;
1564 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001565
1566 // Map::cast cannot be used due to uninitialized map field.
1567 reinterpret_cast<Map*>(result)->set_map(raw_unchecked_meta_map());
1568 reinterpret_cast<Map*>(result)->set_instance_type(instance_type);
1569 reinterpret_cast<Map*>(result)->set_instance_size(instance_size);
Steve Block44f0eee2011-05-26 01:26:41 +01001570 reinterpret_cast<Map*>(result)->set_visitor_id(
1571 StaticVisitorBase::GetVisitorId(instance_type, instance_size));
Steve Blocka7e24c12009-10-30 11:49:00 +00001572 reinterpret_cast<Map*>(result)->set_inobject_properties(0);
Leon Clarke4515c472010-02-03 11:58:03 +00001573 reinterpret_cast<Map*>(result)->set_pre_allocated_property_fields(0);
Steve Blocka7e24c12009-10-30 11:49:00 +00001574 reinterpret_cast<Map*>(result)->set_unused_property_fields(0);
Leon Clarke4515c472010-02-03 11:58:03 +00001575 reinterpret_cast<Map*>(result)->set_bit_field(0);
1576 reinterpret_cast<Map*>(result)->set_bit_field2(0);
Steve Blocka7e24c12009-10-30 11:49:00 +00001577 return result;
1578}
1579
1580
John Reck59135872010-11-02 12:39:01 -07001581MaybeObject* Heap::AllocateMap(InstanceType instance_type, int instance_size) {
1582 Object* result;
1583 { MaybeObject* maybe_result = AllocateRawMap();
1584 if (!maybe_result->ToObject(&result)) return maybe_result;
1585 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001586
1587 Map* map = reinterpret_cast<Map*>(result);
1588 map->set_map(meta_map());
1589 map->set_instance_type(instance_type);
Iain Merrick75681382010-08-19 15:07:18 +01001590 map->set_visitor_id(
1591 StaticVisitorBase::GetVisitorId(instance_type, instance_size));
Steve Blocka7e24c12009-10-30 11:49:00 +00001592 map->set_prototype(null_value());
1593 map->set_constructor(null_value());
1594 map->set_instance_size(instance_size);
1595 map->set_inobject_properties(0);
1596 map->set_pre_allocated_property_fields(0);
1597 map->set_instance_descriptors(empty_descriptor_array());
1598 map->set_code_cache(empty_fixed_array());
1599 map->set_unused_property_fields(0);
1600 map->set_bit_field(0);
Steve Block8defd9f2010-07-08 12:39:36 +01001601 map->set_bit_field2((1 << Map::kIsExtensible) | (1 << Map::kHasFastElements));
Leon Clarkee46be812010-01-19 14:06:41 +00001602
1603 // If the map object is aligned fill the padding area with Smi 0 objects.
1604 if (Map::kPadStart < Map::kSize) {
1605 memset(reinterpret_cast<byte*>(map) + Map::kPadStart - kHeapObjectTag,
1606 0,
1607 Map::kSize - Map::kPadStart);
1608 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001609 return map;
1610}
1611
1612
John Reck59135872010-11-02 12:39:01 -07001613MaybeObject* Heap::AllocateCodeCache() {
1614 Object* result;
1615 { MaybeObject* maybe_result = AllocateStruct(CODE_CACHE_TYPE);
1616 if (!maybe_result->ToObject(&result)) return maybe_result;
1617 }
Steve Block6ded16b2010-05-10 14:33:55 +01001618 CodeCache* code_cache = CodeCache::cast(result);
1619 code_cache->set_default_cache(empty_fixed_array());
1620 code_cache->set_normal_type_cache(undefined_value());
1621 return code_cache;
1622}
1623
1624
Steve Blocka7e24c12009-10-30 11:49:00 +00001625const Heap::StringTypeTable Heap::string_type_table[] = {
1626#define STRING_TYPE_ELEMENT(type, size, name, camel_name) \
1627 {type, size, k##camel_name##MapRootIndex},
1628 STRING_TYPE_LIST(STRING_TYPE_ELEMENT)
1629#undef STRING_TYPE_ELEMENT
1630};
1631
1632
1633const Heap::ConstantSymbolTable Heap::constant_symbol_table[] = {
1634#define CONSTANT_SYMBOL_ELEMENT(name, contents) \
1635 {contents, k##name##RootIndex},
1636 SYMBOL_LIST(CONSTANT_SYMBOL_ELEMENT)
1637#undef CONSTANT_SYMBOL_ELEMENT
1638};
1639
1640
1641const Heap::StructTable Heap::struct_table[] = {
1642#define STRUCT_TABLE_ELEMENT(NAME, Name, name) \
1643 { NAME##_TYPE, Name::kSize, k##Name##MapRootIndex },
1644 STRUCT_LIST(STRUCT_TABLE_ELEMENT)
1645#undef STRUCT_TABLE_ELEMENT
1646};
1647
1648
1649bool Heap::CreateInitialMaps() {
John Reck59135872010-11-02 12:39:01 -07001650 Object* obj;
1651 { MaybeObject* maybe_obj = AllocatePartialMap(MAP_TYPE, Map::kSize);
1652 if (!maybe_obj->ToObject(&obj)) return false;
1653 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001654 // Map::cast cannot be used due to uninitialized map field.
1655 Map* new_meta_map = reinterpret_cast<Map*>(obj);
1656 set_meta_map(new_meta_map);
1657 new_meta_map->set_map(new_meta_map);
1658
John Reck59135872010-11-02 12:39:01 -07001659 { MaybeObject* maybe_obj =
1660 AllocatePartialMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
1661 if (!maybe_obj->ToObject(&obj)) return false;
1662 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001663 set_fixed_array_map(Map::cast(obj));
1664
John Reck59135872010-11-02 12:39:01 -07001665 { MaybeObject* maybe_obj = AllocatePartialMap(ODDBALL_TYPE, Oddball::kSize);
1666 if (!maybe_obj->ToObject(&obj)) return false;
1667 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001668 set_oddball_map(Map::cast(obj));
1669
Steve Block6ded16b2010-05-10 14:33:55 +01001670 // Allocate the empty array.
John Reck59135872010-11-02 12:39:01 -07001671 { MaybeObject* maybe_obj = AllocateEmptyFixedArray();
1672 if (!maybe_obj->ToObject(&obj)) return false;
1673 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001674 set_empty_fixed_array(FixedArray::cast(obj));
1675
John Reck59135872010-11-02 12:39:01 -07001676 { MaybeObject* maybe_obj = Allocate(oddball_map(), OLD_DATA_SPACE);
1677 if (!maybe_obj->ToObject(&obj)) return false;
1678 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001679 set_null_value(obj);
Steve Block44f0eee2011-05-26 01:26:41 +01001680 Oddball::cast(obj)->set_kind(Oddball::kNull);
Steve Blocka7e24c12009-10-30 11:49:00 +00001681
1682 // Allocate the empty descriptor array.
John Reck59135872010-11-02 12:39:01 -07001683 { MaybeObject* maybe_obj = AllocateEmptyFixedArray();
1684 if (!maybe_obj->ToObject(&obj)) return false;
1685 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001686 set_empty_descriptor_array(DescriptorArray::cast(obj));
1687
1688 // Fix the instance_descriptors for the existing maps.
1689 meta_map()->set_instance_descriptors(empty_descriptor_array());
1690 meta_map()->set_code_cache(empty_fixed_array());
1691
1692 fixed_array_map()->set_instance_descriptors(empty_descriptor_array());
1693 fixed_array_map()->set_code_cache(empty_fixed_array());
1694
1695 oddball_map()->set_instance_descriptors(empty_descriptor_array());
1696 oddball_map()->set_code_cache(empty_fixed_array());
1697
1698 // Fix prototype object for existing maps.
1699 meta_map()->set_prototype(null_value());
1700 meta_map()->set_constructor(null_value());
1701
1702 fixed_array_map()->set_prototype(null_value());
1703 fixed_array_map()->set_constructor(null_value());
1704
1705 oddball_map()->set_prototype(null_value());
1706 oddball_map()->set_constructor(null_value());
1707
John Reck59135872010-11-02 12:39:01 -07001708 { MaybeObject* maybe_obj =
1709 AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
1710 if (!maybe_obj->ToObject(&obj)) return false;
1711 }
Iain Merrick75681382010-08-19 15:07:18 +01001712 set_fixed_cow_array_map(Map::cast(obj));
1713 ASSERT(fixed_array_map() != fixed_cow_array_map());
1714
John Reck59135872010-11-02 12:39:01 -07001715 { MaybeObject* maybe_obj = AllocateMap(HEAP_NUMBER_TYPE, HeapNumber::kSize);
1716 if (!maybe_obj->ToObject(&obj)) return false;
1717 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001718 set_heap_number_map(Map::cast(obj));
1719
John Reck59135872010-11-02 12:39:01 -07001720 { MaybeObject* maybe_obj = AllocateMap(PROXY_TYPE, Proxy::kSize);
1721 if (!maybe_obj->ToObject(&obj)) return false;
1722 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001723 set_proxy_map(Map::cast(obj));
1724
1725 for (unsigned i = 0; i < ARRAY_SIZE(string_type_table); i++) {
1726 const StringTypeTable& entry = string_type_table[i];
John Reck59135872010-11-02 12:39:01 -07001727 { MaybeObject* maybe_obj = AllocateMap(entry.type, entry.size);
1728 if (!maybe_obj->ToObject(&obj)) return false;
1729 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001730 roots_[entry.index] = Map::cast(obj);
1731 }
1732
John Reck59135872010-11-02 12:39:01 -07001733 { MaybeObject* maybe_obj = AllocateMap(STRING_TYPE, kVariableSizeSentinel);
1734 if (!maybe_obj->ToObject(&obj)) return false;
1735 }
Steve Blockd0582a62009-12-15 09:54:21 +00001736 set_undetectable_string_map(Map::cast(obj));
Steve Blocka7e24c12009-10-30 11:49:00 +00001737 Map::cast(obj)->set_is_undetectable();
1738
John Reck59135872010-11-02 12:39:01 -07001739 { MaybeObject* maybe_obj =
1740 AllocateMap(ASCII_STRING_TYPE, kVariableSizeSentinel);
1741 if (!maybe_obj->ToObject(&obj)) return false;
1742 }
Steve Blockd0582a62009-12-15 09:54:21 +00001743 set_undetectable_ascii_string_map(Map::cast(obj));
Steve Blocka7e24c12009-10-30 11:49:00 +00001744 Map::cast(obj)->set_is_undetectable();
1745
John Reck59135872010-11-02 12:39:01 -07001746 { MaybeObject* maybe_obj =
1747 AllocateMap(BYTE_ARRAY_TYPE, kVariableSizeSentinel);
1748 if (!maybe_obj->ToObject(&obj)) return false;
1749 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001750 set_byte_array_map(Map::cast(obj));
1751
Ben Murdochb0fe1622011-05-05 13:52:32 +01001752 { MaybeObject* maybe_obj = AllocateByteArray(0, TENURED);
1753 if (!maybe_obj->ToObject(&obj)) return false;
1754 }
1755 set_empty_byte_array(ByteArray::cast(obj));
1756
John Reck59135872010-11-02 12:39:01 -07001757 { MaybeObject* maybe_obj =
Steve Block44f0eee2011-05-26 01:26:41 +01001758 AllocateMap(EXTERNAL_PIXEL_ARRAY_TYPE, ExternalArray::kAlignedSize);
John Reck59135872010-11-02 12:39:01 -07001759 if (!maybe_obj->ToObject(&obj)) return false;
1760 }
Steve Block44f0eee2011-05-26 01:26:41 +01001761 set_external_pixel_array_map(Map::cast(obj));
Steve Blocka7e24c12009-10-30 11:49:00 +00001762
John Reck59135872010-11-02 12:39:01 -07001763 { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_BYTE_ARRAY_TYPE,
1764 ExternalArray::kAlignedSize);
1765 if (!maybe_obj->ToObject(&obj)) return false;
1766 }
Steve Block3ce2e202009-11-05 08:53:23 +00001767 set_external_byte_array_map(Map::cast(obj));
1768
John Reck59135872010-11-02 12:39:01 -07001769 { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_UNSIGNED_BYTE_ARRAY_TYPE,
1770 ExternalArray::kAlignedSize);
1771 if (!maybe_obj->ToObject(&obj)) return false;
1772 }
Steve Block3ce2e202009-11-05 08:53:23 +00001773 set_external_unsigned_byte_array_map(Map::cast(obj));
1774
John Reck59135872010-11-02 12:39:01 -07001775 { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_SHORT_ARRAY_TYPE,
1776 ExternalArray::kAlignedSize);
1777 if (!maybe_obj->ToObject(&obj)) return false;
1778 }
Steve Block3ce2e202009-11-05 08:53:23 +00001779 set_external_short_array_map(Map::cast(obj));
1780
John Reck59135872010-11-02 12:39:01 -07001781 { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_UNSIGNED_SHORT_ARRAY_TYPE,
1782 ExternalArray::kAlignedSize);
1783 if (!maybe_obj->ToObject(&obj)) return false;
1784 }
Steve Block3ce2e202009-11-05 08:53:23 +00001785 set_external_unsigned_short_array_map(Map::cast(obj));
1786
John Reck59135872010-11-02 12:39:01 -07001787 { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_INT_ARRAY_TYPE,
1788 ExternalArray::kAlignedSize);
1789 if (!maybe_obj->ToObject(&obj)) return false;
1790 }
Steve Block3ce2e202009-11-05 08:53:23 +00001791 set_external_int_array_map(Map::cast(obj));
1792
John Reck59135872010-11-02 12:39:01 -07001793 { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_UNSIGNED_INT_ARRAY_TYPE,
1794 ExternalArray::kAlignedSize);
1795 if (!maybe_obj->ToObject(&obj)) return false;
1796 }
Steve Block3ce2e202009-11-05 08:53:23 +00001797 set_external_unsigned_int_array_map(Map::cast(obj));
1798
John Reck59135872010-11-02 12:39:01 -07001799 { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_FLOAT_ARRAY_TYPE,
1800 ExternalArray::kAlignedSize);
1801 if (!maybe_obj->ToObject(&obj)) return false;
1802 }
Steve Block3ce2e202009-11-05 08:53:23 +00001803 set_external_float_array_map(Map::cast(obj));
1804
John Reck59135872010-11-02 12:39:01 -07001805 { MaybeObject* maybe_obj = AllocateMap(CODE_TYPE, kVariableSizeSentinel);
1806 if (!maybe_obj->ToObject(&obj)) return false;
1807 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001808 set_code_map(Map::cast(obj));
1809
John Reck59135872010-11-02 12:39:01 -07001810 { MaybeObject* maybe_obj = AllocateMap(JS_GLOBAL_PROPERTY_CELL_TYPE,
1811 JSGlobalPropertyCell::kSize);
1812 if (!maybe_obj->ToObject(&obj)) return false;
1813 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001814 set_global_property_cell_map(Map::cast(obj));
1815
John Reck59135872010-11-02 12:39:01 -07001816 { MaybeObject* maybe_obj = AllocateMap(FILLER_TYPE, kPointerSize);
1817 if (!maybe_obj->ToObject(&obj)) return false;
1818 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001819 set_one_pointer_filler_map(Map::cast(obj));
1820
John Reck59135872010-11-02 12:39:01 -07001821 { MaybeObject* maybe_obj = AllocateMap(FILLER_TYPE, 2 * kPointerSize);
1822 if (!maybe_obj->ToObject(&obj)) return false;
1823 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001824 set_two_pointer_filler_map(Map::cast(obj));
1825
1826 for (unsigned i = 0; i < ARRAY_SIZE(struct_table); i++) {
1827 const StructTable& entry = struct_table[i];
John Reck59135872010-11-02 12:39:01 -07001828 { MaybeObject* maybe_obj = AllocateMap(entry.type, entry.size);
1829 if (!maybe_obj->ToObject(&obj)) return false;
1830 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001831 roots_[entry.index] = Map::cast(obj);
1832 }
1833
John Reck59135872010-11-02 12:39:01 -07001834 { MaybeObject* maybe_obj =
1835 AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
1836 if (!maybe_obj->ToObject(&obj)) return false;
1837 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001838 set_hash_table_map(Map::cast(obj));
1839
John Reck59135872010-11-02 12:39:01 -07001840 { MaybeObject* maybe_obj =
1841 AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
1842 if (!maybe_obj->ToObject(&obj)) return false;
1843 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001844 set_context_map(Map::cast(obj));
1845
John Reck59135872010-11-02 12:39:01 -07001846 { MaybeObject* maybe_obj =
1847 AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
1848 if (!maybe_obj->ToObject(&obj)) return false;
1849 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001850 set_catch_context_map(Map::cast(obj));
1851
John Reck59135872010-11-02 12:39:01 -07001852 { MaybeObject* maybe_obj =
1853 AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
1854 if (!maybe_obj->ToObject(&obj)) return false;
1855 }
Ben Murdochf87a2032010-10-22 12:50:53 +01001856 Map* global_context_map = Map::cast(obj);
1857 global_context_map->set_visitor_id(StaticVisitorBase::kVisitGlobalContext);
1858 set_global_context_map(global_context_map);
Steve Blocka7e24c12009-10-30 11:49:00 +00001859
John Reck59135872010-11-02 12:39:01 -07001860 { MaybeObject* maybe_obj = AllocateMap(SHARED_FUNCTION_INFO_TYPE,
1861 SharedFunctionInfo::kAlignedSize);
1862 if (!maybe_obj->ToObject(&obj)) return false;
1863 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001864 set_shared_function_info_map(Map::cast(obj));
1865
Steve Block1e0659c2011-05-24 12:43:12 +01001866 { MaybeObject* maybe_obj = AllocateMap(JS_MESSAGE_OBJECT_TYPE,
1867 JSMessageObject::kSize);
1868 if (!maybe_obj->ToObject(&obj)) return false;
1869 }
1870 set_message_object_map(Map::cast(obj));
1871
Steve Block44f0eee2011-05-26 01:26:41 +01001872 ASSERT(!InNewSpace(empty_fixed_array()));
Steve Blocka7e24c12009-10-30 11:49:00 +00001873 return true;
1874}
1875
1876
John Reck59135872010-11-02 12:39:01 -07001877MaybeObject* Heap::AllocateHeapNumber(double value, PretenureFlag pretenure) {
Steve Blocka7e24c12009-10-30 11:49:00 +00001878 // Statically ensure that it is safe to allocate heap numbers in paged
1879 // spaces.
1880 STATIC_ASSERT(HeapNumber::kSize <= Page::kMaxHeapObjectSize);
1881 AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
1882
John Reck59135872010-11-02 12:39:01 -07001883 Object* result;
1884 { MaybeObject* maybe_result =
1885 AllocateRaw(HeapNumber::kSize, space, OLD_DATA_SPACE);
1886 if (!maybe_result->ToObject(&result)) return maybe_result;
1887 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001888
1889 HeapObject::cast(result)->set_map(heap_number_map());
1890 HeapNumber::cast(result)->set_value(value);
1891 return result;
1892}
1893
1894
John Reck59135872010-11-02 12:39:01 -07001895MaybeObject* Heap::AllocateHeapNumber(double value) {
Steve Blocka7e24c12009-10-30 11:49:00 +00001896 // Use general version, if we're forced to always allocate.
1897 if (always_allocate()) return AllocateHeapNumber(value, TENURED);
1898
1899 // This version of AllocateHeapNumber is optimized for
1900 // allocation in new space.
1901 STATIC_ASSERT(HeapNumber::kSize <= Page::kMaxHeapObjectSize);
1902 ASSERT(allocation_allowed_ && gc_state_ == NOT_IN_GC);
John Reck59135872010-11-02 12:39:01 -07001903 Object* result;
1904 { MaybeObject* maybe_result = new_space_.AllocateRaw(HeapNumber::kSize);
1905 if (!maybe_result->ToObject(&result)) return maybe_result;
1906 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001907 HeapObject::cast(result)->set_map(heap_number_map());
1908 HeapNumber::cast(result)->set_value(value);
1909 return result;
1910}
1911
1912
John Reck59135872010-11-02 12:39:01 -07001913MaybeObject* Heap::AllocateJSGlobalPropertyCell(Object* value) {
1914 Object* result;
1915 { MaybeObject* maybe_result = AllocateRawCell();
1916 if (!maybe_result->ToObject(&result)) return maybe_result;
1917 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001918 HeapObject::cast(result)->set_map(global_property_cell_map());
1919 JSGlobalPropertyCell::cast(result)->set_value(value);
1920 return result;
1921}
1922
1923
John Reck59135872010-11-02 12:39:01 -07001924MaybeObject* Heap::CreateOddball(const char* to_string,
Steve Block44f0eee2011-05-26 01:26:41 +01001925 Object* to_number,
1926 byte kind) {
John Reck59135872010-11-02 12:39:01 -07001927 Object* result;
1928 { MaybeObject* maybe_result = Allocate(oddball_map(), OLD_DATA_SPACE);
1929 if (!maybe_result->ToObject(&result)) return maybe_result;
1930 }
Steve Block44f0eee2011-05-26 01:26:41 +01001931 return Oddball::cast(result)->Initialize(to_string, to_number, kind);
Steve Blocka7e24c12009-10-30 11:49:00 +00001932}
1933
1934
1935bool Heap::CreateApiObjects() {
1936 Object* obj;
1937
John Reck59135872010-11-02 12:39:01 -07001938 { MaybeObject* maybe_obj = AllocateMap(JS_OBJECT_TYPE, JSObject::kHeaderSize);
1939 if (!maybe_obj->ToObject(&obj)) return false;
1940 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001941 set_neander_map(Map::cast(obj));
1942
Steve Block44f0eee2011-05-26 01:26:41 +01001943 { MaybeObject* maybe_obj = AllocateJSObjectFromMap(neander_map());
John Reck59135872010-11-02 12:39:01 -07001944 if (!maybe_obj->ToObject(&obj)) return false;
1945 }
1946 Object* elements;
1947 { MaybeObject* maybe_elements = AllocateFixedArray(2);
1948 if (!maybe_elements->ToObject(&elements)) return false;
1949 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001950 FixedArray::cast(elements)->set(0, Smi::FromInt(0));
1951 JSObject::cast(obj)->set_elements(FixedArray::cast(elements));
1952 set_message_listeners(JSObject::cast(obj));
1953
1954 return true;
1955}
1956
1957
Steve Blocka7e24c12009-10-30 11:49:00 +00001958void Heap::CreateJSEntryStub() {
1959 JSEntryStub stub;
1960 set_js_entry_code(*stub.GetCode());
1961}
1962
1963
1964void Heap::CreateJSConstructEntryStub() {
1965 JSConstructEntryStub stub;
1966 set_js_construct_entry_code(*stub.GetCode());
1967}
1968
1969
1970void Heap::CreateFixedStubs() {
1971 // Here we create roots for fixed stubs. They are needed at GC
1972 // for cooking and uncooking (check out frames.cc).
1973 // The eliminates the need for doing dictionary lookup in the
1974 // stub cache for these stubs.
1975 HandleScope scope;
1976 // gcc-4.4 has problem generating correct code of following snippet:
Steve Block44f0eee2011-05-26 01:26:41 +01001977 // { JSEntryStub stub;
1978 // js_entry_code_ = *stub.GetCode();
Steve Blocka7e24c12009-10-30 11:49:00 +00001979 // }
Steve Block44f0eee2011-05-26 01:26:41 +01001980 // { JSConstructEntryStub stub;
1981 // js_construct_entry_code_ = *stub.GetCode();
Steve Blocka7e24c12009-10-30 11:49:00 +00001982 // }
1983 // To workaround the problem, make separate functions without inlining.
Steve Blocka7e24c12009-10-30 11:49:00 +00001984 Heap::CreateJSEntryStub();
1985 Heap::CreateJSConstructEntryStub();
Steve Blocka7e24c12009-10-30 11:49:00 +00001986}
1987
1988
1989bool Heap::CreateInitialObjects() {
1990 Object* obj;
1991
1992 // The -0 value must be set before NumberFromDouble works.
John Reck59135872010-11-02 12:39:01 -07001993 { MaybeObject* maybe_obj = AllocateHeapNumber(-0.0, TENURED);
1994 if (!maybe_obj->ToObject(&obj)) return false;
1995 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001996 set_minus_zero_value(obj);
1997 ASSERT(signbit(minus_zero_value()->Number()) != 0);
1998
John Reck59135872010-11-02 12:39:01 -07001999 { MaybeObject* maybe_obj = AllocateHeapNumber(OS::nan_value(), TENURED);
2000 if (!maybe_obj->ToObject(&obj)) return false;
2001 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002002 set_nan_value(obj);
2003
John Reck59135872010-11-02 12:39:01 -07002004 { MaybeObject* maybe_obj = Allocate(oddball_map(), OLD_DATA_SPACE);
2005 if (!maybe_obj->ToObject(&obj)) return false;
2006 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002007 set_undefined_value(obj);
Steve Block44f0eee2011-05-26 01:26:41 +01002008 Oddball::cast(obj)->set_kind(Oddball::kUndefined);
Steve Blocka7e24c12009-10-30 11:49:00 +00002009 ASSERT(!InNewSpace(undefined_value()));
2010
2011 // Allocate initial symbol table.
John Reck59135872010-11-02 12:39:01 -07002012 { MaybeObject* maybe_obj = SymbolTable::Allocate(kInitialSymbolTableSize);
2013 if (!maybe_obj->ToObject(&obj)) return false;
2014 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002015 // Don't use set_symbol_table() due to asserts.
2016 roots_[kSymbolTableRootIndex] = obj;
2017
2018 // Assign the print strings for oddballs after creating symboltable.
John Reck59135872010-11-02 12:39:01 -07002019 Object* symbol;
2020 { MaybeObject* maybe_symbol = LookupAsciiSymbol("undefined");
2021 if (!maybe_symbol->ToObject(&symbol)) return false;
2022 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002023 Oddball::cast(undefined_value())->set_to_string(String::cast(symbol));
2024 Oddball::cast(undefined_value())->set_to_number(nan_value());
2025
Steve Blocka7e24c12009-10-30 11:49:00 +00002026 // Allocate the null_value
John Reck59135872010-11-02 12:39:01 -07002027 { MaybeObject* maybe_obj =
Steve Block44f0eee2011-05-26 01:26:41 +01002028 Oddball::cast(null_value())->Initialize("null",
2029 Smi::FromInt(0),
2030 Oddball::kNull);
John Reck59135872010-11-02 12:39:01 -07002031 if (!maybe_obj->ToObject(&obj)) return false;
2032 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002033
Steve Block44f0eee2011-05-26 01:26:41 +01002034 { MaybeObject* maybe_obj = CreateOddball("true",
2035 Smi::FromInt(1),
2036 Oddball::kTrue);
John Reck59135872010-11-02 12:39:01 -07002037 if (!maybe_obj->ToObject(&obj)) return false;
2038 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002039 set_true_value(obj);
2040
Steve Block44f0eee2011-05-26 01:26:41 +01002041 { MaybeObject* maybe_obj = CreateOddball("false",
2042 Smi::FromInt(0),
2043 Oddball::kFalse);
John Reck59135872010-11-02 12:39:01 -07002044 if (!maybe_obj->ToObject(&obj)) return false;
2045 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002046 set_false_value(obj);
2047
Steve Block44f0eee2011-05-26 01:26:41 +01002048 { MaybeObject* maybe_obj = CreateOddball("hole",
2049 Smi::FromInt(-1),
2050 Oddball::kTheHole);
John Reck59135872010-11-02 12:39:01 -07002051 if (!maybe_obj->ToObject(&obj)) return false;
2052 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002053 set_the_hole_value(obj);
2054
Ben Murdoch086aeea2011-05-13 15:57:08 +01002055 { MaybeObject* maybe_obj = CreateOddball("arguments_marker",
Steve Block44f0eee2011-05-26 01:26:41 +01002056 Smi::FromInt(-4),
2057 Oddball::kArgumentMarker);
Ben Murdoch086aeea2011-05-13 15:57:08 +01002058 if (!maybe_obj->ToObject(&obj)) return false;
2059 }
2060 set_arguments_marker(obj);
2061
Steve Block44f0eee2011-05-26 01:26:41 +01002062 { MaybeObject* maybe_obj = CreateOddball("no_interceptor_result_sentinel",
2063 Smi::FromInt(-2),
2064 Oddball::kOther);
John Reck59135872010-11-02 12:39:01 -07002065 if (!maybe_obj->ToObject(&obj)) return false;
2066 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002067 set_no_interceptor_result_sentinel(obj);
2068
Steve Block44f0eee2011-05-26 01:26:41 +01002069 { MaybeObject* maybe_obj = CreateOddball("termination_exception",
2070 Smi::FromInt(-3),
2071 Oddball::kOther);
John Reck59135872010-11-02 12:39:01 -07002072 if (!maybe_obj->ToObject(&obj)) return false;
2073 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002074 set_termination_exception(obj);
2075
2076 // Allocate the empty string.
John Reck59135872010-11-02 12:39:01 -07002077 { MaybeObject* maybe_obj = AllocateRawAsciiString(0, TENURED);
2078 if (!maybe_obj->ToObject(&obj)) return false;
2079 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002080 set_empty_string(String::cast(obj));
2081
2082 for (unsigned i = 0; i < ARRAY_SIZE(constant_symbol_table); i++) {
John Reck59135872010-11-02 12:39:01 -07002083 { MaybeObject* maybe_obj =
2084 LookupAsciiSymbol(constant_symbol_table[i].contents);
2085 if (!maybe_obj->ToObject(&obj)) return false;
2086 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002087 roots_[constant_symbol_table[i].index] = String::cast(obj);
2088 }
2089
2090 // Allocate the hidden symbol which is used to identify the hidden properties
2091 // in JSObjects. The hash code has a special value so that it will not match
2092 // the empty string when searching for the property. It cannot be part of the
2093 // loop above because it needs to be allocated manually with the special
2094 // hash code in place. The hash code for the hidden_symbol is zero to ensure
2095 // that it will always be at the first entry in property descriptors.
John Reck59135872010-11-02 12:39:01 -07002096 { MaybeObject* maybe_obj =
2097 AllocateSymbol(CStrVector(""), 0, String::kZeroHash);
2098 if (!maybe_obj->ToObject(&obj)) return false;
2099 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002100 hidden_symbol_ = String::cast(obj);
2101
2102 // Allocate the proxy for __proto__.
John Reck59135872010-11-02 12:39:01 -07002103 { MaybeObject* maybe_obj =
2104 AllocateProxy((Address) &Accessors::ObjectPrototype);
2105 if (!maybe_obj->ToObject(&obj)) return false;
2106 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002107 set_prototype_accessors(Proxy::cast(obj));
2108
2109 // Allocate the code_stubs dictionary. The initial size is set to avoid
2110 // expanding the dictionary during bootstrapping.
John Reck59135872010-11-02 12:39:01 -07002111 { MaybeObject* maybe_obj = NumberDictionary::Allocate(128);
2112 if (!maybe_obj->ToObject(&obj)) return false;
2113 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002114 set_code_stubs(NumberDictionary::cast(obj));
2115
2116 // Allocate the non_monomorphic_cache used in stub-cache.cc. The initial size
2117 // is set to avoid expanding the dictionary during bootstrapping.
John Reck59135872010-11-02 12:39:01 -07002118 { MaybeObject* maybe_obj = NumberDictionary::Allocate(64);
2119 if (!maybe_obj->ToObject(&obj)) return false;
2120 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002121 set_non_monomorphic_cache(NumberDictionary::cast(obj));
2122
Kristian Monsen25f61362010-05-21 11:50:48 +01002123 set_instanceof_cache_function(Smi::FromInt(0));
2124 set_instanceof_cache_map(Smi::FromInt(0));
2125 set_instanceof_cache_answer(Smi::FromInt(0));
2126
Steve Blocka7e24c12009-10-30 11:49:00 +00002127 CreateFixedStubs();
2128
Kristian Monsen0d5e1162010-09-30 15:31:59 +01002129 // Allocate the dictionary of intrinsic function names.
John Reck59135872010-11-02 12:39:01 -07002130 { MaybeObject* maybe_obj = StringDictionary::Allocate(Runtime::kNumFunctions);
2131 if (!maybe_obj->ToObject(&obj)) return false;
2132 }
Steve Block44f0eee2011-05-26 01:26:41 +01002133 { MaybeObject* maybe_obj = Runtime::InitializeIntrinsicFunctionNames(this,
2134 obj);
John Reck59135872010-11-02 12:39:01 -07002135 if (!maybe_obj->ToObject(&obj)) return false;
2136 }
Kristian Monsen0d5e1162010-09-30 15:31:59 +01002137 set_intrinsic_function_names(StringDictionary::cast(obj));
2138
Leon Clarkee46be812010-01-19 14:06:41 +00002139 if (InitializeNumberStringCache()->IsFailure()) return false;
Steve Blocka7e24c12009-10-30 11:49:00 +00002140
Steve Block6ded16b2010-05-10 14:33:55 +01002141 // Allocate cache for single character ASCII strings.
John Reck59135872010-11-02 12:39:01 -07002142 { MaybeObject* maybe_obj =
2143 AllocateFixedArray(String::kMaxAsciiCharCode + 1, TENURED);
2144 if (!maybe_obj->ToObject(&obj)) return false;
2145 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002146 set_single_character_string_cache(FixedArray::cast(obj));
2147
2148 // Allocate cache for external strings pointing to native source code.
John Reck59135872010-11-02 12:39:01 -07002149 { MaybeObject* maybe_obj = AllocateFixedArray(Natives::GetBuiltinsCount());
2150 if (!maybe_obj->ToObject(&obj)) return false;
2151 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002152 set_natives_source_cache(FixedArray::cast(obj));
2153
Steve Block44f0eee2011-05-26 01:26:41 +01002154 // Handling of script id generation is in FACTORY->NewScript.
Steve Blocka7e24c12009-10-30 11:49:00 +00002155 set_last_script_id(undefined_value());
2156
2157 // Initialize keyed lookup cache.
Steve Block44f0eee2011-05-26 01:26:41 +01002158 isolate_->keyed_lookup_cache()->Clear();
Steve Blocka7e24c12009-10-30 11:49:00 +00002159
2160 // Initialize context slot cache.
Steve Block44f0eee2011-05-26 01:26:41 +01002161 isolate_->context_slot_cache()->Clear();
Steve Blocka7e24c12009-10-30 11:49:00 +00002162
2163 // Initialize descriptor cache.
Steve Block44f0eee2011-05-26 01:26:41 +01002164 isolate_->descriptor_lookup_cache()->Clear();
Steve Blocka7e24c12009-10-30 11:49:00 +00002165
2166 // Initialize compilation cache.
Steve Block44f0eee2011-05-26 01:26:41 +01002167 isolate_->compilation_cache()->Clear();
Steve Blocka7e24c12009-10-30 11:49:00 +00002168
2169 return true;
2170}
2171
2172
John Reck59135872010-11-02 12:39:01 -07002173MaybeObject* Heap::InitializeNumberStringCache() {
Leon Clarkee46be812010-01-19 14:06:41 +00002174 // Compute the size of the number string cache based on the max heap size.
2175 // max_semispace_size_ == 512 KB => number_string_cache_size = 32.
2176 // max_semispace_size_ == 8 MB => number_string_cache_size = 16KB.
2177 int number_string_cache_size = max_semispace_size_ / 512;
2178 number_string_cache_size = Max(32, Min(16*KB, number_string_cache_size));
John Reck59135872010-11-02 12:39:01 -07002179 Object* obj;
2180 MaybeObject* maybe_obj =
2181 AllocateFixedArray(number_string_cache_size * 2, TENURED);
2182 if (maybe_obj->ToObject(&obj)) set_number_string_cache(FixedArray::cast(obj));
2183 return maybe_obj;
Leon Clarkee46be812010-01-19 14:06:41 +00002184}
2185
2186
2187void Heap::FlushNumberStringCache() {
2188 // Flush the number to string cache.
2189 int len = number_string_cache()->length();
2190 for (int i = 0; i < len; i++) {
Steve Block44f0eee2011-05-26 01:26:41 +01002191 number_string_cache()->set_undefined(this, i);
Leon Clarkee46be812010-01-19 14:06:41 +00002192 }
2193}
2194
2195
Steve Blocka7e24c12009-10-30 11:49:00 +00002196static inline int double_get_hash(double d) {
2197 DoubleRepresentation rep(d);
Leon Clarkee46be812010-01-19 14:06:41 +00002198 return static_cast<int>(rep.bits) ^ static_cast<int>(rep.bits >> 32);
Steve Blocka7e24c12009-10-30 11:49:00 +00002199}
2200
2201
2202static inline int smi_get_hash(Smi* smi) {
Leon Clarkee46be812010-01-19 14:06:41 +00002203 return smi->value();
Steve Blocka7e24c12009-10-30 11:49:00 +00002204}
2205
2206
Steve Blocka7e24c12009-10-30 11:49:00 +00002207Object* Heap::GetNumberStringCache(Object* number) {
2208 int hash;
Leon Clarkee46be812010-01-19 14:06:41 +00002209 int mask = (number_string_cache()->length() >> 1) - 1;
Steve Blocka7e24c12009-10-30 11:49:00 +00002210 if (number->IsSmi()) {
Leon Clarkee46be812010-01-19 14:06:41 +00002211 hash = smi_get_hash(Smi::cast(number)) & mask;
Steve Blocka7e24c12009-10-30 11:49:00 +00002212 } else {
Leon Clarkee46be812010-01-19 14:06:41 +00002213 hash = double_get_hash(number->Number()) & mask;
Steve Blocka7e24c12009-10-30 11:49:00 +00002214 }
2215 Object* key = number_string_cache()->get(hash * 2);
2216 if (key == number) {
2217 return String::cast(number_string_cache()->get(hash * 2 + 1));
2218 } else if (key->IsHeapNumber() &&
2219 number->IsHeapNumber() &&
2220 key->Number() == number->Number()) {
2221 return String::cast(number_string_cache()->get(hash * 2 + 1));
2222 }
2223 return undefined_value();
2224}
2225
2226
2227void Heap::SetNumberStringCache(Object* number, String* string) {
2228 int hash;
Leon Clarkee46be812010-01-19 14:06:41 +00002229 int mask = (number_string_cache()->length() >> 1) - 1;
Steve Blocka7e24c12009-10-30 11:49:00 +00002230 if (number->IsSmi()) {
Leon Clarkee46be812010-01-19 14:06:41 +00002231 hash = smi_get_hash(Smi::cast(number)) & mask;
Leon Clarke4515c472010-02-03 11:58:03 +00002232 number_string_cache()->set(hash * 2, Smi::cast(number));
Steve Blocka7e24c12009-10-30 11:49:00 +00002233 } else {
Leon Clarkee46be812010-01-19 14:06:41 +00002234 hash = double_get_hash(number->Number()) & mask;
Steve Blocka7e24c12009-10-30 11:49:00 +00002235 number_string_cache()->set(hash * 2, number);
2236 }
2237 number_string_cache()->set(hash * 2 + 1, string);
2238}
2239
2240
John Reck59135872010-11-02 12:39:01 -07002241MaybeObject* Heap::NumberToString(Object* number,
2242 bool check_number_string_cache) {
Steve Block44f0eee2011-05-26 01:26:41 +01002243 isolate_->counters()->number_to_string_runtime()->Increment();
Steve Block6ded16b2010-05-10 14:33:55 +01002244 if (check_number_string_cache) {
2245 Object* cached = GetNumberStringCache(number);
2246 if (cached != undefined_value()) {
2247 return cached;
2248 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002249 }
2250
2251 char arr[100];
2252 Vector<char> buffer(arr, ARRAY_SIZE(arr));
2253 const char* str;
2254 if (number->IsSmi()) {
2255 int num = Smi::cast(number)->value();
2256 str = IntToCString(num, buffer);
2257 } else {
2258 double num = HeapNumber::cast(number)->value();
2259 str = DoubleToCString(num, buffer);
2260 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002261
John Reck59135872010-11-02 12:39:01 -07002262 Object* js_string;
2263 MaybeObject* maybe_js_string = AllocateStringFromAscii(CStrVector(str));
2264 if (maybe_js_string->ToObject(&js_string)) {
2265 SetNumberStringCache(number, String::cast(js_string));
Steve Blocka7e24c12009-10-30 11:49:00 +00002266 }
John Reck59135872010-11-02 12:39:01 -07002267 return maybe_js_string;
Steve Blocka7e24c12009-10-30 11:49:00 +00002268}
2269
2270
Steve Block3ce2e202009-11-05 08:53:23 +00002271Map* Heap::MapForExternalArrayType(ExternalArrayType array_type) {
2272 return Map::cast(roots_[RootIndexForExternalArrayType(array_type)]);
2273}
2274
2275
2276Heap::RootListIndex Heap::RootIndexForExternalArrayType(
2277 ExternalArrayType array_type) {
2278 switch (array_type) {
2279 case kExternalByteArray:
2280 return kExternalByteArrayMapRootIndex;
2281 case kExternalUnsignedByteArray:
2282 return kExternalUnsignedByteArrayMapRootIndex;
2283 case kExternalShortArray:
2284 return kExternalShortArrayMapRootIndex;
2285 case kExternalUnsignedShortArray:
2286 return kExternalUnsignedShortArrayMapRootIndex;
2287 case kExternalIntArray:
2288 return kExternalIntArrayMapRootIndex;
2289 case kExternalUnsignedIntArray:
2290 return kExternalUnsignedIntArrayMapRootIndex;
2291 case kExternalFloatArray:
2292 return kExternalFloatArrayMapRootIndex;
Steve Block44f0eee2011-05-26 01:26:41 +01002293 case kExternalPixelArray:
2294 return kExternalPixelArrayMapRootIndex;
Steve Block3ce2e202009-11-05 08:53:23 +00002295 default:
2296 UNREACHABLE();
2297 return kUndefinedValueRootIndex;
2298 }
2299}
2300
2301
John Reck59135872010-11-02 12:39:01 -07002302MaybeObject* Heap::NumberFromDouble(double value, PretenureFlag pretenure) {
Steve Block6ded16b2010-05-10 14:33:55 +01002303 // We need to distinguish the minus zero value and this cannot be
2304 // done after conversion to int. Doing this by comparing bit
2305 // patterns is faster than using fpclassify() et al.
2306 static const DoubleRepresentation minus_zero(-0.0);
2307
2308 DoubleRepresentation rep(value);
2309 if (rep.bits == minus_zero.bits) {
2310 return AllocateHeapNumber(-0.0, pretenure);
2311 }
2312
2313 int int_value = FastD2I(value);
2314 if (value == int_value && Smi::IsValid(int_value)) {
2315 return Smi::FromInt(int_value);
2316 }
2317
2318 // Materialize the value in the heap.
2319 return AllocateHeapNumber(value, pretenure);
Steve Blocka7e24c12009-10-30 11:49:00 +00002320}
2321
2322
John Reck59135872010-11-02 12:39:01 -07002323MaybeObject* Heap::AllocateProxy(Address proxy, PretenureFlag pretenure) {
Steve Blocka7e24c12009-10-30 11:49:00 +00002324 // Statically ensure that it is safe to allocate proxies in paged spaces.
2325 STATIC_ASSERT(Proxy::kSize <= Page::kMaxHeapObjectSize);
2326 AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
John Reck59135872010-11-02 12:39:01 -07002327 Object* result;
2328 { MaybeObject* maybe_result = Allocate(proxy_map(), space);
2329 if (!maybe_result->ToObject(&result)) return maybe_result;
2330 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002331
2332 Proxy::cast(result)->set_proxy(proxy);
2333 return result;
2334}
2335
2336
John Reck59135872010-11-02 12:39:01 -07002337MaybeObject* Heap::AllocateSharedFunctionInfo(Object* name) {
2338 Object* result;
2339 { MaybeObject* maybe_result =
2340 Allocate(shared_function_info_map(), OLD_POINTER_SPACE);
2341 if (!maybe_result->ToObject(&result)) return maybe_result;
2342 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002343
2344 SharedFunctionInfo* share = SharedFunctionInfo::cast(result);
2345 share->set_name(name);
Steve Block44f0eee2011-05-26 01:26:41 +01002346 Code* illegal = isolate_->builtins()->builtin(Builtins::kIllegal);
Steve Blocka7e24c12009-10-30 11:49:00 +00002347 share->set_code(illegal);
Ben Murdoch3bec4d22010-07-22 14:51:16 +01002348 share->set_scope_info(SerializedScopeInfo::Empty());
Steve Block44f0eee2011-05-26 01:26:41 +01002349 Code* construct_stub = isolate_->builtins()->builtin(
2350 Builtins::kJSConstructStubGeneric);
Steve Blocka7e24c12009-10-30 11:49:00 +00002351 share->set_construct_stub(construct_stub);
2352 share->set_expected_nof_properties(0);
2353 share->set_length(0);
2354 share->set_formal_parameter_count(0);
2355 share->set_instance_class_name(Object_symbol());
2356 share->set_function_data(undefined_value());
2357 share->set_script(undefined_value());
2358 share->set_start_position_and_type(0);
2359 share->set_debug_info(undefined_value());
2360 share->set_inferred_name(empty_string());
2361 share->set_compiler_hints(0);
Ben Murdochb0fe1622011-05-05 13:52:32 +01002362 share->set_deopt_counter(Smi::FromInt(FLAG_deopt_every_n_times));
Kristian Monsen0d5e1162010-09-30 15:31:59 +01002363 share->set_initial_map(undefined_value());
Steve Blocka7e24c12009-10-30 11:49:00 +00002364 share->set_this_property_assignments_count(0);
2365 share->set_this_property_assignments(undefined_value());
Ben Murdochb0fe1622011-05-05 13:52:32 +01002366 share->set_opt_count(0);
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002367 share->set_num_literals(0);
2368 share->set_end_position(0);
2369 share->set_function_token_position(0);
Steve Blocka7e24c12009-10-30 11:49:00 +00002370 return result;
2371}
2372
2373
Steve Block1e0659c2011-05-24 12:43:12 +01002374MaybeObject* Heap::AllocateJSMessageObject(String* type,
2375 JSArray* arguments,
2376 int start_position,
2377 int end_position,
2378 Object* script,
2379 Object* stack_trace,
2380 Object* stack_frames) {
2381 Object* result;
2382 { MaybeObject* maybe_result = Allocate(message_object_map(), NEW_SPACE);
2383 if (!maybe_result->ToObject(&result)) return maybe_result;
2384 }
2385 JSMessageObject* message = JSMessageObject::cast(result);
2386 message->set_properties(Heap::empty_fixed_array());
2387 message->set_elements(Heap::empty_fixed_array());
2388 message->set_type(type);
2389 message->set_arguments(arguments);
2390 message->set_start_position(start_position);
2391 message->set_end_position(end_position);
2392 message->set_script(script);
2393 message->set_stack_trace(stack_trace);
2394 message->set_stack_frames(stack_frames);
2395 return result;
2396}
2397
2398
2399
Steve Blockd0582a62009-12-15 09:54:21 +00002400// Returns true for a character in a range. Both limits are inclusive.
2401static inline bool Between(uint32_t character, uint32_t from, uint32_t to) {
2402 // This makes uses of the the unsigned wraparound.
2403 return character - from <= to - from;
2404}
2405
2406
John Reck59135872010-11-02 12:39:01 -07002407MUST_USE_RESULT static inline MaybeObject* MakeOrFindTwoCharacterString(
Steve Block44f0eee2011-05-26 01:26:41 +01002408 Heap* heap,
John Reck59135872010-11-02 12:39:01 -07002409 uint32_t c1,
2410 uint32_t c2) {
Steve Blockd0582a62009-12-15 09:54:21 +00002411 String* symbol;
2412 // Numeric strings have a different hash algorithm not known by
2413 // LookupTwoCharsSymbolIfExists, so we skip this step for such strings.
2414 if ((!Between(c1, '0', '9') || !Between(c2, '0', '9')) &&
Steve Block44f0eee2011-05-26 01:26:41 +01002415 heap->symbol_table()->LookupTwoCharsSymbolIfExists(c1, c2, &symbol)) {
Steve Blockd0582a62009-12-15 09:54:21 +00002416 return symbol;
2417 // Now we know the length is 2, we might as well make use of that fact
2418 // when building the new string.
2419 } else if ((c1 | c2) <= String::kMaxAsciiCharCodeU) { // We can do this
2420 ASSERT(IsPowerOf2(String::kMaxAsciiCharCodeU + 1)); // because of this.
John Reck59135872010-11-02 12:39:01 -07002421 Object* result;
Steve Block44f0eee2011-05-26 01:26:41 +01002422 { MaybeObject* maybe_result = heap->AllocateRawAsciiString(2);
John Reck59135872010-11-02 12:39:01 -07002423 if (!maybe_result->ToObject(&result)) return maybe_result;
2424 }
Steve Blockd0582a62009-12-15 09:54:21 +00002425 char* dest = SeqAsciiString::cast(result)->GetChars();
2426 dest[0] = c1;
2427 dest[1] = c2;
2428 return result;
2429 } else {
John Reck59135872010-11-02 12:39:01 -07002430 Object* result;
Steve Block44f0eee2011-05-26 01:26:41 +01002431 { MaybeObject* maybe_result = heap->AllocateRawTwoByteString(2);
John Reck59135872010-11-02 12:39:01 -07002432 if (!maybe_result->ToObject(&result)) return maybe_result;
2433 }
Steve Blockd0582a62009-12-15 09:54:21 +00002434 uc16* dest = SeqTwoByteString::cast(result)->GetChars();
2435 dest[0] = c1;
2436 dest[1] = c2;
2437 return result;
2438 }
2439}
2440
2441
John Reck59135872010-11-02 12:39:01 -07002442MaybeObject* Heap::AllocateConsString(String* first, String* second) {
Steve Blocka7e24c12009-10-30 11:49:00 +00002443 int first_length = first->length();
Steve Blockd0582a62009-12-15 09:54:21 +00002444 if (first_length == 0) {
2445 return second;
2446 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002447
2448 int second_length = second->length();
Steve Blockd0582a62009-12-15 09:54:21 +00002449 if (second_length == 0) {
2450 return first;
2451 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002452
2453 int length = first_length + second_length;
Steve Blockd0582a62009-12-15 09:54:21 +00002454
2455 // Optimization for 2-byte strings often used as keys in a decompression
2456 // dictionary. Check whether we already have the string in the symbol
2457 // table to prevent creation of many unneccesary strings.
2458 if (length == 2) {
2459 unsigned c1 = first->Get(0);
2460 unsigned c2 = second->Get(0);
Steve Block44f0eee2011-05-26 01:26:41 +01002461 return MakeOrFindTwoCharacterString(this, c1, c2);
Steve Blockd0582a62009-12-15 09:54:21 +00002462 }
2463
Steve Block6ded16b2010-05-10 14:33:55 +01002464 bool first_is_ascii = first->IsAsciiRepresentation();
2465 bool second_is_ascii = second->IsAsciiRepresentation();
2466 bool is_ascii = first_is_ascii && second_is_ascii;
Steve Blocka7e24c12009-10-30 11:49:00 +00002467
2468 // Make sure that an out of memory exception is thrown if the length
Steve Block3ce2e202009-11-05 08:53:23 +00002469 // of the new cons string is too large.
2470 if (length > String::kMaxLength || length < 0) {
Steve Block44f0eee2011-05-26 01:26:41 +01002471 isolate()->context()->mark_out_of_memory();
Steve Blocka7e24c12009-10-30 11:49:00 +00002472 return Failure::OutOfMemoryException();
2473 }
2474
Kristian Monsen9dcf7e22010-06-28 14:14:28 +01002475 bool is_ascii_data_in_two_byte_string = false;
2476 if (!is_ascii) {
2477 // At least one of the strings uses two-byte representation so we
2478 // can't use the fast case code for short ascii strings below, but
2479 // we can try to save memory if all chars actually fit in ascii.
2480 is_ascii_data_in_two_byte_string =
2481 first->HasOnlyAsciiChars() && second->HasOnlyAsciiChars();
2482 if (is_ascii_data_in_two_byte_string) {
Steve Block44f0eee2011-05-26 01:26:41 +01002483 isolate_->counters()->string_add_runtime_ext_to_ascii()->Increment();
Kristian Monsen9dcf7e22010-06-28 14:14:28 +01002484 }
2485 }
2486
Steve Blocka7e24c12009-10-30 11:49:00 +00002487 // If the resulting string is small make a flat string.
2488 if (length < String::kMinNonFlatLength) {
2489 ASSERT(first->IsFlat());
2490 ASSERT(second->IsFlat());
2491 if (is_ascii) {
John Reck59135872010-11-02 12:39:01 -07002492 Object* result;
2493 { MaybeObject* maybe_result = AllocateRawAsciiString(length);
2494 if (!maybe_result->ToObject(&result)) return maybe_result;
2495 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002496 // Copy the characters into the new object.
2497 char* dest = SeqAsciiString::cast(result)->GetChars();
2498 // Copy first part.
Steve Blockd0582a62009-12-15 09:54:21 +00002499 const char* src;
2500 if (first->IsExternalString()) {
2501 src = ExternalAsciiString::cast(first)->resource()->data();
2502 } else {
2503 src = SeqAsciiString::cast(first)->GetChars();
2504 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002505 for (int i = 0; i < first_length; i++) *dest++ = src[i];
2506 // Copy second part.
Steve Blockd0582a62009-12-15 09:54:21 +00002507 if (second->IsExternalString()) {
2508 src = ExternalAsciiString::cast(second)->resource()->data();
2509 } else {
2510 src = SeqAsciiString::cast(second)->GetChars();
2511 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002512 for (int i = 0; i < second_length; i++) *dest++ = src[i];
2513 return result;
2514 } else {
Kristian Monsen9dcf7e22010-06-28 14:14:28 +01002515 if (is_ascii_data_in_two_byte_string) {
John Reck59135872010-11-02 12:39:01 -07002516 Object* result;
2517 { MaybeObject* maybe_result = AllocateRawAsciiString(length);
2518 if (!maybe_result->ToObject(&result)) return maybe_result;
2519 }
Steve Block6ded16b2010-05-10 14:33:55 +01002520 // Copy the characters into the new object.
2521 char* dest = SeqAsciiString::cast(result)->GetChars();
2522 String::WriteToFlat(first, dest, 0, first_length);
2523 String::WriteToFlat(second, dest + first_length, 0, second_length);
Steve Block44f0eee2011-05-26 01:26:41 +01002524 isolate_->counters()->string_add_runtime_ext_to_ascii()->Increment();
Steve Block6ded16b2010-05-10 14:33:55 +01002525 return result;
2526 }
2527
John Reck59135872010-11-02 12:39:01 -07002528 Object* result;
2529 { MaybeObject* maybe_result = AllocateRawTwoByteString(length);
2530 if (!maybe_result->ToObject(&result)) return maybe_result;
2531 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002532 // Copy the characters into the new object.
2533 uc16* dest = SeqTwoByteString::cast(result)->GetChars();
2534 String::WriteToFlat(first, dest, 0, first_length);
2535 String::WriteToFlat(second, dest + first_length, 0, second_length);
2536 return result;
2537 }
2538 }
2539
Kristian Monsen9dcf7e22010-06-28 14:14:28 +01002540 Map* map = (is_ascii || is_ascii_data_in_two_byte_string) ?
2541 cons_ascii_string_map() : cons_string_map();
Steve Blocka7e24c12009-10-30 11:49:00 +00002542
John Reck59135872010-11-02 12:39:01 -07002543 Object* result;
2544 { MaybeObject* maybe_result = Allocate(map, NEW_SPACE);
2545 if (!maybe_result->ToObject(&result)) return maybe_result;
2546 }
Leon Clarke4515c472010-02-03 11:58:03 +00002547
2548 AssertNoAllocation no_gc;
Steve Blocka7e24c12009-10-30 11:49:00 +00002549 ConsString* cons_string = ConsString::cast(result);
Leon Clarke4515c472010-02-03 11:58:03 +00002550 WriteBarrierMode mode = cons_string->GetWriteBarrierMode(no_gc);
Steve Blocka7e24c12009-10-30 11:49:00 +00002551 cons_string->set_length(length);
Steve Blockd0582a62009-12-15 09:54:21 +00002552 cons_string->set_hash_field(String::kEmptyHashField);
2553 cons_string->set_first(first, mode);
2554 cons_string->set_second(second, mode);
Steve Blocka7e24c12009-10-30 11:49:00 +00002555 return result;
2556}
2557
2558
John Reck59135872010-11-02 12:39:01 -07002559MaybeObject* Heap::AllocateSubString(String* buffer,
Steve Blocka7e24c12009-10-30 11:49:00 +00002560 int start,
Steve Block6ded16b2010-05-10 14:33:55 +01002561 int end,
2562 PretenureFlag pretenure) {
Steve Blocka7e24c12009-10-30 11:49:00 +00002563 int length = end - start;
2564
2565 if (length == 1) {
Steve Block44f0eee2011-05-26 01:26:41 +01002566 return LookupSingleCharacterStringFromCode(buffer->Get(start));
Steve Blockd0582a62009-12-15 09:54:21 +00002567 } else if (length == 2) {
2568 // Optimization for 2-byte strings often used as keys in a decompression
2569 // dictionary. Check whether we already have the string in the symbol
2570 // table to prevent creation of many unneccesary strings.
2571 unsigned c1 = buffer->Get(start);
2572 unsigned c2 = buffer->Get(start + 1);
Steve Block44f0eee2011-05-26 01:26:41 +01002573 return MakeOrFindTwoCharacterString(this, c1, c2);
Steve Blocka7e24c12009-10-30 11:49:00 +00002574 }
2575
2576 // Make an attempt to flatten the buffer to reduce access time.
Leon Clarkef7060e22010-06-03 12:02:55 +01002577 buffer = buffer->TryFlattenGetString();
Steve Blocka7e24c12009-10-30 11:49:00 +00002578
John Reck59135872010-11-02 12:39:01 -07002579 Object* result;
2580 { MaybeObject* maybe_result = buffer->IsAsciiRepresentation()
2581 ? AllocateRawAsciiString(length, pretenure )
2582 : AllocateRawTwoByteString(length, pretenure);
2583 if (!maybe_result->ToObject(&result)) return maybe_result;
2584 }
Steve Blockd0582a62009-12-15 09:54:21 +00002585 String* string_result = String::cast(result);
Steve Blocka7e24c12009-10-30 11:49:00 +00002586 // Copy the characters into the new object.
Steve Blockd0582a62009-12-15 09:54:21 +00002587 if (buffer->IsAsciiRepresentation()) {
2588 ASSERT(string_result->IsAsciiRepresentation());
2589 char* dest = SeqAsciiString::cast(string_result)->GetChars();
2590 String::WriteToFlat(buffer, dest, start, end);
2591 } else {
2592 ASSERT(string_result->IsTwoByteRepresentation());
2593 uc16* dest = SeqTwoByteString::cast(string_result)->GetChars();
2594 String::WriteToFlat(buffer, dest, start, end);
Steve Blocka7e24c12009-10-30 11:49:00 +00002595 }
Steve Blockd0582a62009-12-15 09:54:21 +00002596
Steve Blocka7e24c12009-10-30 11:49:00 +00002597 return result;
2598}
2599
2600
John Reck59135872010-11-02 12:39:01 -07002601MaybeObject* Heap::AllocateExternalStringFromAscii(
Steve Blocka7e24c12009-10-30 11:49:00 +00002602 ExternalAsciiString::Resource* resource) {
Steve Blockd0582a62009-12-15 09:54:21 +00002603 size_t length = resource->length();
2604 if (length > static_cast<size_t>(String::kMaxLength)) {
Steve Block44f0eee2011-05-26 01:26:41 +01002605 isolate()->context()->mark_out_of_memory();
Steve Blockd0582a62009-12-15 09:54:21 +00002606 return Failure::OutOfMemoryException();
Steve Blocka7e24c12009-10-30 11:49:00 +00002607 }
2608
Steve Blockd0582a62009-12-15 09:54:21 +00002609 Map* map = external_ascii_string_map();
John Reck59135872010-11-02 12:39:01 -07002610 Object* result;
2611 { MaybeObject* maybe_result = Allocate(map, NEW_SPACE);
2612 if (!maybe_result->ToObject(&result)) return maybe_result;
2613 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002614
2615 ExternalAsciiString* external_string = ExternalAsciiString::cast(result);
Steve Blockd0582a62009-12-15 09:54:21 +00002616 external_string->set_length(static_cast<int>(length));
2617 external_string->set_hash_field(String::kEmptyHashField);
Steve Blocka7e24c12009-10-30 11:49:00 +00002618 external_string->set_resource(resource);
2619
2620 return result;
2621}
2622
2623
John Reck59135872010-11-02 12:39:01 -07002624MaybeObject* Heap::AllocateExternalStringFromTwoByte(
Steve Blocka7e24c12009-10-30 11:49:00 +00002625 ExternalTwoByteString::Resource* resource) {
Steve Blockd0582a62009-12-15 09:54:21 +00002626 size_t length = resource->length();
2627 if (length > static_cast<size_t>(String::kMaxLength)) {
Steve Block44f0eee2011-05-26 01:26:41 +01002628 isolate()->context()->mark_out_of_memory();
Steve Blockd0582a62009-12-15 09:54:21 +00002629 return Failure::OutOfMemoryException();
2630 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002631
Kristian Monsen9dcf7e22010-06-28 14:14:28 +01002632 // For small strings we check whether the resource contains only
Steve Block9fac8402011-05-12 15:51:54 +01002633 // ASCII characters. If yes, we use a different string map.
2634 static const size_t kAsciiCheckLengthLimit = 32;
2635 bool is_ascii = length <= kAsciiCheckLengthLimit &&
2636 String::IsAscii(resource->data(), static_cast<int>(length));
Kristian Monsen9dcf7e22010-06-28 14:14:28 +01002637 Map* map = is_ascii ?
Steve Block44f0eee2011-05-26 01:26:41 +01002638 external_string_with_ascii_data_map() : external_string_map();
John Reck59135872010-11-02 12:39:01 -07002639 Object* result;
2640 { MaybeObject* maybe_result = Allocate(map, NEW_SPACE);
2641 if (!maybe_result->ToObject(&result)) return maybe_result;
2642 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002643
2644 ExternalTwoByteString* external_string = ExternalTwoByteString::cast(result);
Steve Blockd0582a62009-12-15 09:54:21 +00002645 external_string->set_length(static_cast<int>(length));
2646 external_string->set_hash_field(String::kEmptyHashField);
Steve Blocka7e24c12009-10-30 11:49:00 +00002647 external_string->set_resource(resource);
2648
2649 return result;
2650}
2651
2652
John Reck59135872010-11-02 12:39:01 -07002653MaybeObject* Heap::LookupSingleCharacterStringFromCode(uint16_t code) {
Steve Blocka7e24c12009-10-30 11:49:00 +00002654 if (code <= String::kMaxAsciiCharCode) {
Steve Block44f0eee2011-05-26 01:26:41 +01002655 Object* value = single_character_string_cache()->get(code);
2656 if (value != undefined_value()) return value;
Steve Blocka7e24c12009-10-30 11:49:00 +00002657
2658 char buffer[1];
2659 buffer[0] = static_cast<char>(code);
John Reck59135872010-11-02 12:39:01 -07002660 Object* result;
2661 MaybeObject* maybe_result = LookupSymbol(Vector<const char>(buffer, 1));
Steve Blocka7e24c12009-10-30 11:49:00 +00002662
John Reck59135872010-11-02 12:39:01 -07002663 if (!maybe_result->ToObject(&result)) return maybe_result;
Steve Block44f0eee2011-05-26 01:26:41 +01002664 single_character_string_cache()->set(code, result);
Steve Blocka7e24c12009-10-30 11:49:00 +00002665 return result;
2666 }
2667
John Reck59135872010-11-02 12:39:01 -07002668 Object* result;
Steve Block44f0eee2011-05-26 01:26:41 +01002669 { MaybeObject* maybe_result = AllocateRawTwoByteString(1);
John Reck59135872010-11-02 12:39:01 -07002670 if (!maybe_result->ToObject(&result)) return maybe_result;
2671 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002672 String* answer = String::cast(result);
2673 answer->Set(0, code);
2674 return answer;
2675}
2676
2677
John Reck59135872010-11-02 12:39:01 -07002678MaybeObject* Heap::AllocateByteArray(int length, PretenureFlag pretenure) {
Leon Clarkee46be812010-01-19 14:06:41 +00002679 if (length < 0 || length > ByteArray::kMaxLength) {
2680 return Failure::OutOfMemoryException();
2681 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002682 if (pretenure == NOT_TENURED) {
2683 return AllocateByteArray(length);
2684 }
2685 int size = ByteArray::SizeFor(length);
John Reck59135872010-11-02 12:39:01 -07002686 Object* result;
2687 { MaybeObject* maybe_result = (size <= MaxObjectSizeInPagedSpace())
2688 ? old_data_space_->AllocateRaw(size)
2689 : lo_space_->AllocateRaw(size);
2690 if (!maybe_result->ToObject(&result)) return maybe_result;
2691 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002692
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002693 reinterpret_cast<ByteArray*>(result)->set_map(byte_array_map());
2694 reinterpret_cast<ByteArray*>(result)->set_length(length);
Steve Blocka7e24c12009-10-30 11:49:00 +00002695 return result;
2696}
2697
2698
John Reck59135872010-11-02 12:39:01 -07002699MaybeObject* Heap::AllocateByteArray(int length) {
Leon Clarkee46be812010-01-19 14:06:41 +00002700 if (length < 0 || length > ByteArray::kMaxLength) {
2701 return Failure::OutOfMemoryException();
2702 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002703 int size = ByteArray::SizeFor(length);
2704 AllocationSpace space =
Leon Clarkee46be812010-01-19 14:06:41 +00002705 (size > MaxObjectSizeInPagedSpace()) ? LO_SPACE : NEW_SPACE;
John Reck59135872010-11-02 12:39:01 -07002706 Object* result;
2707 { MaybeObject* maybe_result = AllocateRaw(size, space, OLD_DATA_SPACE);
2708 if (!maybe_result->ToObject(&result)) return maybe_result;
2709 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002710
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002711 reinterpret_cast<ByteArray*>(result)->set_map(byte_array_map());
2712 reinterpret_cast<ByteArray*>(result)->set_length(length);
Steve Blocka7e24c12009-10-30 11:49:00 +00002713 return result;
2714}
2715
2716
2717void Heap::CreateFillerObjectAt(Address addr, int size) {
2718 if (size == 0) return;
2719 HeapObject* filler = HeapObject::FromAddress(addr);
2720 if (size == kPointerSize) {
Steve Block6ded16b2010-05-10 14:33:55 +01002721 filler->set_map(one_pointer_filler_map());
2722 } else if (size == 2 * kPointerSize) {
2723 filler->set_map(two_pointer_filler_map());
Steve Blocka7e24c12009-10-30 11:49:00 +00002724 } else {
Steve Block6ded16b2010-05-10 14:33:55 +01002725 filler->set_map(byte_array_map());
Steve Blocka7e24c12009-10-30 11:49:00 +00002726 ByteArray::cast(filler)->set_length(ByteArray::LengthFor(size));
2727 }
2728}
2729
2730
John Reck59135872010-11-02 12:39:01 -07002731MaybeObject* Heap::AllocateExternalArray(int length,
2732 ExternalArrayType array_type,
2733 void* external_pointer,
2734 PretenureFlag pretenure) {
Steve Block3ce2e202009-11-05 08:53:23 +00002735 AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
John Reck59135872010-11-02 12:39:01 -07002736 Object* result;
2737 { MaybeObject* maybe_result = AllocateRaw(ExternalArray::kAlignedSize,
2738 space,
2739 OLD_DATA_SPACE);
2740 if (!maybe_result->ToObject(&result)) return maybe_result;
2741 }
Steve Block3ce2e202009-11-05 08:53:23 +00002742
2743 reinterpret_cast<ExternalArray*>(result)->set_map(
2744 MapForExternalArrayType(array_type));
2745 reinterpret_cast<ExternalArray*>(result)->set_length(length);
2746 reinterpret_cast<ExternalArray*>(result)->set_external_pointer(
2747 external_pointer);
2748
2749 return result;
2750}
2751
2752
John Reck59135872010-11-02 12:39:01 -07002753MaybeObject* Heap::CreateCode(const CodeDesc& desc,
2754 Code::Flags flags,
Steve Block44f0eee2011-05-26 01:26:41 +01002755 Handle<Object> self_reference,
2756 bool immovable) {
Leon Clarkeac952652010-07-15 11:15:24 +01002757 // Allocate ByteArray before the Code object, so that we do not risk
2758 // leaving uninitialized Code object (and breaking the heap).
John Reck59135872010-11-02 12:39:01 -07002759 Object* reloc_info;
2760 { MaybeObject* maybe_reloc_info = AllocateByteArray(desc.reloc_size, TENURED);
2761 if (!maybe_reloc_info->ToObject(&reloc_info)) return maybe_reloc_info;
2762 }
Leon Clarkeac952652010-07-15 11:15:24 +01002763
Steve Block44f0eee2011-05-26 01:26:41 +01002764 // Compute size.
Leon Clarkeac952652010-07-15 11:15:24 +01002765 int body_size = RoundUp(desc.instr_size, kObjectAlignment);
Ben Murdoch3bec4d22010-07-22 14:51:16 +01002766 int obj_size = Code::SizeFor(body_size);
Kristian Monsen0d5e1162010-09-30 15:31:59 +01002767 ASSERT(IsAligned(static_cast<intptr_t>(obj_size), kCodeAlignment));
John Reck59135872010-11-02 12:39:01 -07002768 MaybeObject* maybe_result;
Steve Block44f0eee2011-05-26 01:26:41 +01002769 // Large code objects and code objects which should stay at a fixed address
2770 // are allocated in large object space.
2771 if (obj_size > MaxObjectSizeInPagedSpace() || immovable) {
John Reck59135872010-11-02 12:39:01 -07002772 maybe_result = lo_space_->AllocateRawCode(obj_size);
Steve Blocka7e24c12009-10-30 11:49:00 +00002773 } else {
John Reck59135872010-11-02 12:39:01 -07002774 maybe_result = code_space_->AllocateRaw(obj_size);
Steve Blocka7e24c12009-10-30 11:49:00 +00002775 }
2776
John Reck59135872010-11-02 12:39:01 -07002777 Object* result;
2778 if (!maybe_result->ToObject(&result)) return maybe_result;
Steve Blocka7e24c12009-10-30 11:49:00 +00002779
2780 // Initialize the object
2781 HeapObject::cast(result)->set_map(code_map());
2782 Code* code = Code::cast(result);
Steve Block44f0eee2011-05-26 01:26:41 +01002783 ASSERT(!isolate_->code_range()->exists() ||
2784 isolate_->code_range()->contains(code->address()));
Steve Blocka7e24c12009-10-30 11:49:00 +00002785 code->set_instruction_size(desc.instr_size);
Leon Clarkeac952652010-07-15 11:15:24 +01002786 code->set_relocation_info(ByteArray::cast(reloc_info));
Steve Blocka7e24c12009-10-30 11:49:00 +00002787 code->set_flags(flags);
Ben Murdochb8e0da22011-05-16 14:20:40 +01002788 if (code->is_call_stub() || code->is_keyed_call_stub()) {
2789 code->set_check_type(RECEIVER_MAP_CHECK);
2790 }
Ben Murdochb0fe1622011-05-05 13:52:32 +01002791 code->set_deoptimization_data(empty_fixed_array());
Steve Blocka7e24c12009-10-30 11:49:00 +00002792 // Allow self references to created code object by patching the handle to
2793 // point to the newly allocated Code object.
2794 if (!self_reference.is_null()) {
2795 *(self_reference.location()) = code;
2796 }
2797 // Migrate generated code.
2798 // The generated code can contain Object** values (typically from handles)
2799 // that are dereferenced during the copy to point directly to the actual heap
2800 // objects. These pointers can include references to the code object itself,
2801 // through the self_reference parameter.
2802 code->CopyFrom(desc);
Steve Blocka7e24c12009-10-30 11:49:00 +00002803
2804#ifdef DEBUG
2805 code->Verify();
2806#endif
2807 return code;
2808}
2809
2810
John Reck59135872010-11-02 12:39:01 -07002811MaybeObject* Heap::CopyCode(Code* code) {
Steve Blocka7e24c12009-10-30 11:49:00 +00002812 // Allocate an object the same size as the code object.
2813 int obj_size = code->Size();
John Reck59135872010-11-02 12:39:01 -07002814 MaybeObject* maybe_result;
Steve Blocka7e24c12009-10-30 11:49:00 +00002815 if (obj_size > MaxObjectSizeInPagedSpace()) {
John Reck59135872010-11-02 12:39:01 -07002816 maybe_result = lo_space_->AllocateRawCode(obj_size);
Steve Blocka7e24c12009-10-30 11:49:00 +00002817 } else {
John Reck59135872010-11-02 12:39:01 -07002818 maybe_result = code_space_->AllocateRaw(obj_size);
Steve Blocka7e24c12009-10-30 11:49:00 +00002819 }
2820
John Reck59135872010-11-02 12:39:01 -07002821 Object* result;
2822 if (!maybe_result->ToObject(&result)) return maybe_result;
Steve Blocka7e24c12009-10-30 11:49:00 +00002823
2824 // Copy code object.
2825 Address old_addr = code->address();
2826 Address new_addr = reinterpret_cast<HeapObject*>(result)->address();
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002827 CopyBlock(new_addr, old_addr, obj_size);
Steve Blocka7e24c12009-10-30 11:49:00 +00002828 // Relocate the copy.
2829 Code* new_code = Code::cast(result);
Steve Block44f0eee2011-05-26 01:26:41 +01002830 ASSERT(!isolate_->code_range()->exists() ||
2831 isolate_->code_range()->contains(code->address()));
Steve Blocka7e24c12009-10-30 11:49:00 +00002832 new_code->Relocate(new_addr - old_addr);
2833 return new_code;
2834}
2835
2836
John Reck59135872010-11-02 12:39:01 -07002837MaybeObject* Heap::CopyCode(Code* code, Vector<byte> reloc_info) {
Leon Clarkeac952652010-07-15 11:15:24 +01002838 // Allocate ByteArray before the Code object, so that we do not risk
2839 // leaving uninitialized Code object (and breaking the heap).
John Reck59135872010-11-02 12:39:01 -07002840 Object* reloc_info_array;
2841 { MaybeObject* maybe_reloc_info_array =
2842 AllocateByteArray(reloc_info.length(), TENURED);
2843 if (!maybe_reloc_info_array->ToObject(&reloc_info_array)) {
2844 return maybe_reloc_info_array;
2845 }
2846 }
Leon Clarkeac952652010-07-15 11:15:24 +01002847
2848 int new_body_size = RoundUp(code->instruction_size(), kObjectAlignment);
Steve Block6ded16b2010-05-10 14:33:55 +01002849
Ben Murdoch3bec4d22010-07-22 14:51:16 +01002850 int new_obj_size = Code::SizeFor(new_body_size);
Steve Block6ded16b2010-05-10 14:33:55 +01002851
2852 Address old_addr = code->address();
2853
2854 size_t relocation_offset =
Leon Clarkeac952652010-07-15 11:15:24 +01002855 static_cast<size_t>(code->instruction_end() - old_addr);
Steve Block6ded16b2010-05-10 14:33:55 +01002856
John Reck59135872010-11-02 12:39:01 -07002857 MaybeObject* maybe_result;
Steve Block6ded16b2010-05-10 14:33:55 +01002858 if (new_obj_size > MaxObjectSizeInPagedSpace()) {
John Reck59135872010-11-02 12:39:01 -07002859 maybe_result = lo_space_->AllocateRawCode(new_obj_size);
Steve Block6ded16b2010-05-10 14:33:55 +01002860 } else {
John Reck59135872010-11-02 12:39:01 -07002861 maybe_result = code_space_->AllocateRaw(new_obj_size);
Steve Block6ded16b2010-05-10 14:33:55 +01002862 }
2863
John Reck59135872010-11-02 12:39:01 -07002864 Object* result;
2865 if (!maybe_result->ToObject(&result)) return maybe_result;
Steve Block6ded16b2010-05-10 14:33:55 +01002866
2867 // Copy code object.
2868 Address new_addr = reinterpret_cast<HeapObject*>(result)->address();
2869
2870 // Copy header and instructions.
2871 memcpy(new_addr, old_addr, relocation_offset);
2872
Steve Block6ded16b2010-05-10 14:33:55 +01002873 Code* new_code = Code::cast(result);
Leon Clarkeac952652010-07-15 11:15:24 +01002874 new_code->set_relocation_info(ByteArray::cast(reloc_info_array));
Steve Block6ded16b2010-05-10 14:33:55 +01002875
Leon Clarkeac952652010-07-15 11:15:24 +01002876 // Copy patched rinfo.
2877 memcpy(new_code->relocation_start(), reloc_info.start(), reloc_info.length());
Steve Block6ded16b2010-05-10 14:33:55 +01002878
2879 // Relocate the copy.
Steve Block44f0eee2011-05-26 01:26:41 +01002880 ASSERT(!isolate_->code_range()->exists() ||
2881 isolate_->code_range()->contains(code->address()));
Steve Block6ded16b2010-05-10 14:33:55 +01002882 new_code->Relocate(new_addr - old_addr);
2883
2884#ifdef DEBUG
2885 code->Verify();
2886#endif
2887 return new_code;
2888}
2889
2890
John Reck59135872010-11-02 12:39:01 -07002891MaybeObject* Heap::Allocate(Map* map, AllocationSpace space) {
Steve Blocka7e24c12009-10-30 11:49:00 +00002892 ASSERT(gc_state_ == NOT_IN_GC);
2893 ASSERT(map->instance_type() != MAP_TYPE);
Leon Clarkee46be812010-01-19 14:06:41 +00002894 // If allocation failures are disallowed, we may allocate in a different
2895 // space when new space is full and the object is not a large object.
2896 AllocationSpace retry_space =
2897 (space != NEW_SPACE) ? space : TargetSpaceId(map->instance_type());
John Reck59135872010-11-02 12:39:01 -07002898 Object* result;
2899 { MaybeObject* maybe_result =
2900 AllocateRaw(map->instance_size(), space, retry_space);
2901 if (!maybe_result->ToObject(&result)) return maybe_result;
2902 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002903 HeapObject::cast(result)->set_map(map);
Steve Block3ce2e202009-11-05 08:53:23 +00002904#ifdef ENABLE_LOGGING_AND_PROFILING
Steve Block44f0eee2011-05-26 01:26:41 +01002905 isolate_->producer_heap_profile()->RecordJSObjectAllocation(result);
Steve Block3ce2e202009-11-05 08:53:23 +00002906#endif
Steve Blocka7e24c12009-10-30 11:49:00 +00002907 return result;
2908}
2909
2910
John Reck59135872010-11-02 12:39:01 -07002911MaybeObject* Heap::InitializeFunction(JSFunction* function,
2912 SharedFunctionInfo* shared,
2913 Object* prototype) {
Steve Blocka7e24c12009-10-30 11:49:00 +00002914 ASSERT(!prototype->IsMap());
2915 function->initialize_properties();
2916 function->initialize_elements();
2917 function->set_shared(shared);
Iain Merrick75681382010-08-19 15:07:18 +01002918 function->set_code(shared->code());
Steve Blocka7e24c12009-10-30 11:49:00 +00002919 function->set_prototype_or_initial_map(prototype);
2920 function->set_context(undefined_value());
Leon Clarke4515c472010-02-03 11:58:03 +00002921 function->set_literals(empty_fixed_array());
Ben Murdochb0fe1622011-05-05 13:52:32 +01002922 function->set_next_function_link(undefined_value());
Steve Blocka7e24c12009-10-30 11:49:00 +00002923 return function;
2924}
2925
2926
John Reck59135872010-11-02 12:39:01 -07002927MaybeObject* Heap::AllocateFunctionPrototype(JSFunction* function) {
Steve Blocka7e24c12009-10-30 11:49:00 +00002928 // Allocate the prototype. Make sure to use the object function
2929 // from the function's context, since the function can be from a
2930 // different context.
2931 JSFunction* object_function =
2932 function->context()->global_context()->object_function();
John Reck59135872010-11-02 12:39:01 -07002933 Object* prototype;
2934 { MaybeObject* maybe_prototype = AllocateJSObject(object_function);
2935 if (!maybe_prototype->ToObject(&prototype)) return maybe_prototype;
2936 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002937 // When creating the prototype for the function we must set its
2938 // constructor to the function.
John Reck59135872010-11-02 12:39:01 -07002939 Object* result;
2940 { MaybeObject* maybe_result =
Ben Murdoche0cee9b2011-05-25 10:26:03 +01002941 JSObject::cast(prototype)->SetLocalPropertyIgnoreAttributes(
2942 constructor_symbol(), function, DONT_ENUM);
John Reck59135872010-11-02 12:39:01 -07002943 if (!maybe_result->ToObject(&result)) return maybe_result;
2944 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002945 return prototype;
2946}
2947
2948
John Reck59135872010-11-02 12:39:01 -07002949MaybeObject* Heap::AllocateFunction(Map* function_map,
2950 SharedFunctionInfo* shared,
2951 Object* prototype,
2952 PretenureFlag pretenure) {
Leon Clarkee46be812010-01-19 14:06:41 +00002953 AllocationSpace space =
2954 (pretenure == TENURED) ? OLD_POINTER_SPACE : NEW_SPACE;
John Reck59135872010-11-02 12:39:01 -07002955 Object* result;
2956 { MaybeObject* maybe_result = Allocate(function_map, space);
2957 if (!maybe_result->ToObject(&result)) return maybe_result;
2958 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002959 return InitializeFunction(JSFunction::cast(result), shared, prototype);
2960}
2961
2962
John Reck59135872010-11-02 12:39:01 -07002963MaybeObject* Heap::AllocateArgumentsObject(Object* callee, int length) {
Steve Blocka7e24c12009-10-30 11:49:00 +00002964 // To get fast allocation and map sharing for arguments objects we
2965 // allocate them based on an arguments boilerplate.
2966
Steve Block44f0eee2011-05-26 01:26:41 +01002967 JSObject* boilerplate;
2968 int arguments_object_size;
2969 bool strict_mode_callee = callee->IsJSFunction() &&
2970 JSFunction::cast(callee)->shared()->strict_mode();
2971 if (strict_mode_callee) {
2972 boilerplate =
2973 isolate()->context()->global_context()->
2974 strict_mode_arguments_boilerplate();
2975 arguments_object_size = kArgumentsObjectSizeStrict;
2976 } else {
2977 boilerplate =
2978 isolate()->context()->global_context()->arguments_boilerplate();
2979 arguments_object_size = kArgumentsObjectSize;
2980 }
2981
Steve Blocka7e24c12009-10-30 11:49:00 +00002982 // This calls Copy directly rather than using Heap::AllocateRaw so we
2983 // duplicate the check here.
2984 ASSERT(allocation_allowed_ && gc_state_ == NOT_IN_GC);
2985
Leon Clarkee46be812010-01-19 14:06:41 +00002986 // Check that the size of the boilerplate matches our
2987 // expectations. The ArgumentsAccessStub::GenerateNewObject relies
2988 // on the size being a known constant.
Steve Block44f0eee2011-05-26 01:26:41 +01002989 ASSERT(arguments_object_size == boilerplate->map()->instance_size());
Leon Clarkee46be812010-01-19 14:06:41 +00002990
2991 // Do the allocation.
John Reck59135872010-11-02 12:39:01 -07002992 Object* result;
2993 { MaybeObject* maybe_result =
Steve Block44f0eee2011-05-26 01:26:41 +01002994 AllocateRaw(arguments_object_size, NEW_SPACE, OLD_POINTER_SPACE);
John Reck59135872010-11-02 12:39:01 -07002995 if (!maybe_result->ToObject(&result)) return maybe_result;
2996 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002997
2998 // Copy the content. The arguments boilerplate doesn't have any
2999 // fields that point to new space so it's safe to skip the write
3000 // barrier here.
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01003001 CopyBlock(HeapObject::cast(result)->address(),
3002 boilerplate->address(),
Steve Block44f0eee2011-05-26 01:26:41 +01003003 JSObject::kHeaderSize);
Steve Blocka7e24c12009-10-30 11:49:00 +00003004
Steve Block44f0eee2011-05-26 01:26:41 +01003005 // Set the length property.
3006 JSObject::cast(result)->InObjectPropertyAtPut(kArgumentsLengthIndex,
Steve Blocka7e24c12009-10-30 11:49:00 +00003007 Smi::FromInt(length),
3008 SKIP_WRITE_BARRIER);
Steve Block44f0eee2011-05-26 01:26:41 +01003009 // Set the callee property for non-strict mode arguments object only.
3010 if (!strict_mode_callee) {
3011 JSObject::cast(result)->InObjectPropertyAtPut(kArgumentsCalleeIndex,
3012 callee);
3013 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003014
3015 // Check the state of the object
3016 ASSERT(JSObject::cast(result)->HasFastProperties());
3017 ASSERT(JSObject::cast(result)->HasFastElements());
3018
3019 return result;
3020}
3021
3022
Kristian Monsen0d5e1162010-09-30 15:31:59 +01003023static bool HasDuplicates(DescriptorArray* descriptors) {
3024 int count = descriptors->number_of_descriptors();
3025 if (count > 1) {
3026 String* prev_key = descriptors->GetKey(0);
3027 for (int i = 1; i != count; i++) {
3028 String* current_key = descriptors->GetKey(i);
3029 if (prev_key == current_key) return true;
3030 prev_key = current_key;
3031 }
3032 }
3033 return false;
3034}
3035
3036
John Reck59135872010-11-02 12:39:01 -07003037MaybeObject* Heap::AllocateInitialMap(JSFunction* fun) {
Steve Blocka7e24c12009-10-30 11:49:00 +00003038 ASSERT(!fun->has_initial_map());
3039
3040 // First create a new map with the size and number of in-object properties
3041 // suggested by the function.
3042 int instance_size = fun->shared()->CalculateInstanceSize();
3043 int in_object_properties = fun->shared()->CalculateInObjectProperties();
John Reck59135872010-11-02 12:39:01 -07003044 Object* map_obj;
Steve Block44f0eee2011-05-26 01:26:41 +01003045 { MaybeObject* maybe_map_obj = AllocateMap(JS_OBJECT_TYPE, instance_size);
John Reck59135872010-11-02 12:39:01 -07003046 if (!maybe_map_obj->ToObject(&map_obj)) return maybe_map_obj;
3047 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003048
3049 // Fetch or allocate prototype.
3050 Object* prototype;
3051 if (fun->has_instance_prototype()) {
3052 prototype = fun->instance_prototype();
3053 } else {
John Reck59135872010-11-02 12:39:01 -07003054 { MaybeObject* maybe_prototype = AllocateFunctionPrototype(fun);
3055 if (!maybe_prototype->ToObject(&prototype)) return maybe_prototype;
3056 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003057 }
3058 Map* map = Map::cast(map_obj);
3059 map->set_inobject_properties(in_object_properties);
3060 map->set_unused_property_fields(in_object_properties);
3061 map->set_prototype(prototype);
Steve Block8defd9f2010-07-08 12:39:36 +01003062 ASSERT(map->has_fast_elements());
Steve Blocka7e24c12009-10-30 11:49:00 +00003063
Andrei Popescu402d9372010-02-26 13:31:12 +00003064 // If the function has only simple this property assignments add
3065 // field descriptors for these to the initial map as the object
3066 // cannot be constructed without having these properties. Guard by
3067 // the inline_new flag so we only change the map if we generate a
3068 // specialized construct stub.
Steve Blocka7e24c12009-10-30 11:49:00 +00003069 ASSERT(in_object_properties <= Map::kMaxPreAllocatedPropertyFields);
Andrei Popescu402d9372010-02-26 13:31:12 +00003070 if (fun->shared()->CanGenerateInlineConstructor(prototype)) {
Steve Blocka7e24c12009-10-30 11:49:00 +00003071 int count = fun->shared()->this_property_assignments_count();
3072 if (count > in_object_properties) {
Kristian Monsen0d5e1162010-09-30 15:31:59 +01003073 // Inline constructor can only handle inobject properties.
3074 fun->shared()->ForbidInlineConstructor();
3075 } else {
John Reck59135872010-11-02 12:39:01 -07003076 Object* descriptors_obj;
3077 { MaybeObject* maybe_descriptors_obj = DescriptorArray::Allocate(count);
3078 if (!maybe_descriptors_obj->ToObject(&descriptors_obj)) {
3079 return maybe_descriptors_obj;
3080 }
3081 }
Kristian Monsen0d5e1162010-09-30 15:31:59 +01003082 DescriptorArray* descriptors = DescriptorArray::cast(descriptors_obj);
3083 for (int i = 0; i < count; i++) {
3084 String* name = fun->shared()->GetThisPropertyAssignmentName(i);
3085 ASSERT(name->IsSymbol());
3086 FieldDescriptor field(name, i, NONE);
3087 field.SetEnumerationIndex(i);
3088 descriptors->Set(i, &field);
3089 }
3090 descriptors->SetNextEnumerationIndex(count);
3091 descriptors->SortUnchecked();
3092
3093 // The descriptors may contain duplicates because the compiler does not
3094 // guarantee the uniqueness of property names (it would have required
3095 // quadratic time). Once the descriptors are sorted we can check for
3096 // duplicates in linear time.
3097 if (HasDuplicates(descriptors)) {
3098 fun->shared()->ForbidInlineConstructor();
3099 } else {
3100 map->set_instance_descriptors(descriptors);
3101 map->set_pre_allocated_property_fields(count);
3102 map->set_unused_property_fields(in_object_properties - count);
3103 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003104 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003105 }
Kristian Monsen0d5e1162010-09-30 15:31:59 +01003106
3107 fun->shared()->StartInobjectSlackTracking(map);
3108
Steve Blocka7e24c12009-10-30 11:49:00 +00003109 return map;
3110}
3111
3112
3113void Heap::InitializeJSObjectFromMap(JSObject* obj,
3114 FixedArray* properties,
3115 Map* map) {
3116 obj->set_properties(properties);
3117 obj->initialize_elements();
3118 // TODO(1240798): Initialize the object's body using valid initial values
3119 // according to the object's initial map. For example, if the map's
3120 // instance type is JS_ARRAY_TYPE, the length field should be initialized
3121 // to a number (eg, Smi::FromInt(0)) and the elements initialized to a
3122 // fixed array (eg, Heap::empty_fixed_array()). Currently, the object
3123 // verification code has to cope with (temporarily) invalid objects. See
3124 // for example, JSArray::JSArrayVerify).
Kristian Monsen0d5e1162010-09-30 15:31:59 +01003125 Object* filler;
3126 // We cannot always fill with one_pointer_filler_map because objects
3127 // created from API functions expect their internal fields to be initialized
3128 // with undefined_value.
3129 if (map->constructor()->IsJSFunction() &&
3130 JSFunction::cast(map->constructor())->shared()->
3131 IsInobjectSlackTrackingInProgress()) {
3132 // We might want to shrink the object later.
3133 ASSERT(obj->GetInternalFieldCount() == 0);
3134 filler = Heap::one_pointer_filler_map();
3135 } else {
3136 filler = Heap::undefined_value();
3137 }
3138 obj->InitializeBody(map->instance_size(), filler);
Steve Blocka7e24c12009-10-30 11:49:00 +00003139}
3140
3141
John Reck59135872010-11-02 12:39:01 -07003142MaybeObject* Heap::AllocateJSObjectFromMap(Map* map, PretenureFlag pretenure) {
Steve Blocka7e24c12009-10-30 11:49:00 +00003143 // JSFunctions should be allocated using AllocateFunction to be
3144 // properly initialized.
3145 ASSERT(map->instance_type() != JS_FUNCTION_TYPE);
3146
Steve Block8defd9f2010-07-08 12:39:36 +01003147 // Both types of global objects should be allocated using
3148 // AllocateGlobalObject to be properly initialized.
Steve Blocka7e24c12009-10-30 11:49:00 +00003149 ASSERT(map->instance_type() != JS_GLOBAL_OBJECT_TYPE);
3150 ASSERT(map->instance_type() != JS_BUILTINS_OBJECT_TYPE);
3151
3152 // Allocate the backing storage for the properties.
3153 int prop_size =
3154 map->pre_allocated_property_fields() +
3155 map->unused_property_fields() -
3156 map->inobject_properties();
3157 ASSERT(prop_size >= 0);
John Reck59135872010-11-02 12:39:01 -07003158 Object* properties;
3159 { MaybeObject* maybe_properties = AllocateFixedArray(prop_size, pretenure);
3160 if (!maybe_properties->ToObject(&properties)) return maybe_properties;
3161 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003162
3163 // Allocate the JSObject.
3164 AllocationSpace space =
3165 (pretenure == TENURED) ? OLD_POINTER_SPACE : NEW_SPACE;
3166 if (map->instance_size() > MaxObjectSizeInPagedSpace()) space = LO_SPACE;
John Reck59135872010-11-02 12:39:01 -07003167 Object* obj;
3168 { MaybeObject* maybe_obj = Allocate(map, space);
3169 if (!maybe_obj->ToObject(&obj)) return maybe_obj;
3170 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003171
3172 // Initialize the JSObject.
3173 InitializeJSObjectFromMap(JSObject::cast(obj),
3174 FixedArray::cast(properties),
3175 map);
Steve Block8defd9f2010-07-08 12:39:36 +01003176 ASSERT(JSObject::cast(obj)->HasFastElements());
Steve Blocka7e24c12009-10-30 11:49:00 +00003177 return obj;
3178}
3179
3180
John Reck59135872010-11-02 12:39:01 -07003181MaybeObject* Heap::AllocateJSObject(JSFunction* constructor,
3182 PretenureFlag pretenure) {
Steve Blocka7e24c12009-10-30 11:49:00 +00003183 // Allocate the initial map if absent.
3184 if (!constructor->has_initial_map()) {
John Reck59135872010-11-02 12:39:01 -07003185 Object* initial_map;
3186 { MaybeObject* maybe_initial_map = AllocateInitialMap(constructor);
3187 if (!maybe_initial_map->ToObject(&initial_map)) return maybe_initial_map;
3188 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003189 constructor->set_initial_map(Map::cast(initial_map));
3190 Map::cast(initial_map)->set_constructor(constructor);
3191 }
3192 // Allocate the object based on the constructors initial map.
John Reck59135872010-11-02 12:39:01 -07003193 MaybeObject* result =
Steve Blocka7e24c12009-10-30 11:49:00 +00003194 AllocateJSObjectFromMap(constructor->initial_map(), pretenure);
John Reck59135872010-11-02 12:39:01 -07003195#ifdef DEBUG
Steve Blocka7e24c12009-10-30 11:49:00 +00003196 // Make sure result is NOT a global object if valid.
John Reck59135872010-11-02 12:39:01 -07003197 Object* non_failure;
3198 ASSERT(!result->ToObject(&non_failure) || !non_failure->IsGlobalObject());
3199#endif
Steve Blocka7e24c12009-10-30 11:49:00 +00003200 return result;
3201}
3202
3203
John Reck59135872010-11-02 12:39:01 -07003204MaybeObject* Heap::AllocateGlobalObject(JSFunction* constructor) {
Steve Blocka7e24c12009-10-30 11:49:00 +00003205 ASSERT(constructor->has_initial_map());
3206 Map* map = constructor->initial_map();
3207
3208 // Make sure no field properties are described in the initial map.
3209 // This guarantees us that normalizing the properties does not
3210 // require us to change property values to JSGlobalPropertyCells.
3211 ASSERT(map->NextFreePropertyIndex() == 0);
3212
3213 // Make sure we don't have a ton of pre-allocated slots in the
3214 // global objects. They will be unused once we normalize the object.
3215 ASSERT(map->unused_property_fields() == 0);
3216 ASSERT(map->inobject_properties() == 0);
3217
3218 // Initial size of the backing store to avoid resize of the storage during
3219 // bootstrapping. The size differs between the JS global object ad the
3220 // builtins object.
3221 int initial_size = map->instance_type() == JS_GLOBAL_OBJECT_TYPE ? 64 : 512;
3222
3223 // Allocate a dictionary object for backing storage.
John Reck59135872010-11-02 12:39:01 -07003224 Object* obj;
3225 { MaybeObject* maybe_obj =
3226 StringDictionary::Allocate(
3227 map->NumberOfDescribedProperties() * 2 + initial_size);
3228 if (!maybe_obj->ToObject(&obj)) return maybe_obj;
3229 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003230 StringDictionary* dictionary = StringDictionary::cast(obj);
3231
3232 // The global object might be created from an object template with accessors.
3233 // Fill these accessors into the dictionary.
3234 DescriptorArray* descs = map->instance_descriptors();
3235 for (int i = 0; i < descs->number_of_descriptors(); i++) {
Ben Murdoch8b112d22011-06-08 16:22:53 +01003236 PropertyDetails details(descs->GetDetails(i));
Steve Blocka7e24c12009-10-30 11:49:00 +00003237 ASSERT(details.type() == CALLBACKS); // Only accessors are expected.
3238 PropertyDetails d =
3239 PropertyDetails(details.attributes(), CALLBACKS, details.index());
3240 Object* value = descs->GetCallbacksObject(i);
Steve Block44f0eee2011-05-26 01:26:41 +01003241 { MaybeObject* maybe_value = AllocateJSGlobalPropertyCell(value);
John Reck59135872010-11-02 12:39:01 -07003242 if (!maybe_value->ToObject(&value)) return maybe_value;
3243 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003244
John Reck59135872010-11-02 12:39:01 -07003245 Object* result;
3246 { MaybeObject* maybe_result = dictionary->Add(descs->GetKey(i), value, d);
3247 if (!maybe_result->ToObject(&result)) return maybe_result;
3248 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003249 dictionary = StringDictionary::cast(result);
3250 }
3251
3252 // Allocate the global object and initialize it with the backing store.
John Reck59135872010-11-02 12:39:01 -07003253 { MaybeObject* maybe_obj = Allocate(map, OLD_POINTER_SPACE);
3254 if (!maybe_obj->ToObject(&obj)) return maybe_obj;
3255 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003256 JSObject* global = JSObject::cast(obj);
3257 InitializeJSObjectFromMap(global, dictionary, map);
3258
3259 // Create a new map for the global object.
John Reck59135872010-11-02 12:39:01 -07003260 { MaybeObject* maybe_obj = map->CopyDropDescriptors();
3261 if (!maybe_obj->ToObject(&obj)) return maybe_obj;
3262 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003263 Map* new_map = Map::cast(obj);
3264
3265 // Setup the global object as a normalized object.
3266 global->set_map(new_map);
Steve Block44f0eee2011-05-26 01:26:41 +01003267 global->map()->set_instance_descriptors(empty_descriptor_array());
Steve Blocka7e24c12009-10-30 11:49:00 +00003268 global->set_properties(dictionary);
3269
3270 // Make sure result is a global object with properties in dictionary.
3271 ASSERT(global->IsGlobalObject());
3272 ASSERT(!global->HasFastProperties());
3273 return global;
3274}
3275
3276
John Reck59135872010-11-02 12:39:01 -07003277MaybeObject* Heap::CopyJSObject(JSObject* source) {
Steve Blocka7e24c12009-10-30 11:49:00 +00003278 // Never used to copy functions. If functions need to be copied we
3279 // have to be careful to clear the literals array.
3280 ASSERT(!source->IsJSFunction());
3281
3282 // Make the clone.
3283 Map* map = source->map();
3284 int object_size = map->instance_size();
3285 Object* clone;
3286
3287 // If we're forced to always allocate, we use the general allocation
3288 // functions which may leave us with an object in old space.
3289 if (always_allocate()) {
John Reck59135872010-11-02 12:39:01 -07003290 { MaybeObject* maybe_clone =
3291 AllocateRaw(object_size, NEW_SPACE, OLD_POINTER_SPACE);
3292 if (!maybe_clone->ToObject(&clone)) return maybe_clone;
3293 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003294 Address clone_address = HeapObject::cast(clone)->address();
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01003295 CopyBlock(clone_address,
3296 source->address(),
Steve Blocka7e24c12009-10-30 11:49:00 +00003297 object_size);
3298 // Update write barrier for all fields that lie beyond the header.
Steve Block6ded16b2010-05-10 14:33:55 +01003299 RecordWrites(clone_address,
3300 JSObject::kHeaderSize,
3301 (object_size - JSObject::kHeaderSize) / kPointerSize);
Steve Blocka7e24c12009-10-30 11:49:00 +00003302 } else {
John Reck59135872010-11-02 12:39:01 -07003303 { MaybeObject* maybe_clone = new_space_.AllocateRaw(object_size);
3304 if (!maybe_clone->ToObject(&clone)) return maybe_clone;
3305 }
Steve Block44f0eee2011-05-26 01:26:41 +01003306 ASSERT(InNewSpace(clone));
Steve Blocka7e24c12009-10-30 11:49:00 +00003307 // Since we know the clone is allocated in new space, we can copy
3308 // the contents without worrying about updating the write barrier.
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01003309 CopyBlock(HeapObject::cast(clone)->address(),
3310 source->address(),
Steve Blocka7e24c12009-10-30 11:49:00 +00003311 object_size);
3312 }
3313
3314 FixedArray* elements = FixedArray::cast(source->elements());
3315 FixedArray* properties = FixedArray::cast(source->properties());
3316 // Update elements if necessary.
Steve Block6ded16b2010-05-10 14:33:55 +01003317 if (elements->length() > 0) {
John Reck59135872010-11-02 12:39:01 -07003318 Object* elem;
3319 { MaybeObject* maybe_elem =
3320 (elements->map() == fixed_cow_array_map()) ?
3321 elements : CopyFixedArray(elements);
3322 if (!maybe_elem->ToObject(&elem)) return maybe_elem;
3323 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003324 JSObject::cast(clone)->set_elements(FixedArray::cast(elem));
3325 }
3326 // Update properties if necessary.
3327 if (properties->length() > 0) {
John Reck59135872010-11-02 12:39:01 -07003328 Object* prop;
3329 { MaybeObject* maybe_prop = CopyFixedArray(properties);
3330 if (!maybe_prop->ToObject(&prop)) return maybe_prop;
3331 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003332 JSObject::cast(clone)->set_properties(FixedArray::cast(prop));
3333 }
3334 // Return the new clone.
Steve Block3ce2e202009-11-05 08:53:23 +00003335#ifdef ENABLE_LOGGING_AND_PROFILING
Steve Block44f0eee2011-05-26 01:26:41 +01003336 isolate_->producer_heap_profile()->RecordJSObjectAllocation(clone);
Steve Block3ce2e202009-11-05 08:53:23 +00003337#endif
Steve Blocka7e24c12009-10-30 11:49:00 +00003338 return clone;
3339}
3340
3341
John Reck59135872010-11-02 12:39:01 -07003342MaybeObject* Heap::ReinitializeJSGlobalProxy(JSFunction* constructor,
3343 JSGlobalProxy* object) {
Kristian Monsen0d5e1162010-09-30 15:31:59 +01003344 ASSERT(constructor->has_initial_map());
Steve Blocka7e24c12009-10-30 11:49:00 +00003345 Map* map = constructor->initial_map();
3346
Kristian Monsen0d5e1162010-09-30 15:31:59 +01003347 // Check that the already allocated object has the same size and type as
Steve Blocka7e24c12009-10-30 11:49:00 +00003348 // objects allocated using the constructor.
3349 ASSERT(map->instance_size() == object->map()->instance_size());
Kristian Monsen0d5e1162010-09-30 15:31:59 +01003350 ASSERT(map->instance_type() == object->map()->instance_type());
Steve Blocka7e24c12009-10-30 11:49:00 +00003351
3352 // Allocate the backing storage for the properties.
3353 int prop_size = map->unused_property_fields() - map->inobject_properties();
John Reck59135872010-11-02 12:39:01 -07003354 Object* properties;
3355 { MaybeObject* maybe_properties = AllocateFixedArray(prop_size, TENURED);
3356 if (!maybe_properties->ToObject(&properties)) return maybe_properties;
3357 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003358
3359 // Reset the map for the object.
3360 object->set_map(constructor->initial_map());
3361
3362 // Reinitialize the object from the constructor map.
3363 InitializeJSObjectFromMap(object, FixedArray::cast(properties), map);
3364 return object;
3365}
3366
3367
John Reck59135872010-11-02 12:39:01 -07003368MaybeObject* Heap::AllocateStringFromAscii(Vector<const char> string,
3369 PretenureFlag pretenure) {
3370 Object* result;
3371 { MaybeObject* maybe_result =
3372 AllocateRawAsciiString(string.length(), pretenure);
3373 if (!maybe_result->ToObject(&result)) return maybe_result;
3374 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003375
3376 // Copy the characters into the new object.
3377 SeqAsciiString* string_result = SeqAsciiString::cast(result);
3378 for (int i = 0; i < string.length(); i++) {
3379 string_result->SeqAsciiStringSet(i, string[i]);
3380 }
3381 return result;
3382}
3383
3384
Steve Block9fac8402011-05-12 15:51:54 +01003385MaybeObject* Heap::AllocateStringFromUtf8Slow(Vector<const char> string,
3386 PretenureFlag pretenure) {
Leon Clarkeac952652010-07-15 11:15:24 +01003387 // V8 only supports characters in the Basic Multilingual Plane.
3388 const uc32 kMaxSupportedChar = 0xFFFF;
Steve Blocka7e24c12009-10-30 11:49:00 +00003389 // Count the number of characters in the UTF-8 string and check if
3390 // it is an ASCII string.
Ben Murdoch8b112d22011-06-08 16:22:53 +01003391 Access<UnicodeCache::Utf8Decoder>
3392 decoder(isolate_->unicode_cache()->utf8_decoder());
Steve Blocka7e24c12009-10-30 11:49:00 +00003393 decoder->Reset(string.start(), string.length());
3394 int chars = 0;
Steve Blocka7e24c12009-10-30 11:49:00 +00003395 while (decoder->has_more()) {
Steve Block9fac8402011-05-12 15:51:54 +01003396 decoder->GetNext();
Steve Blocka7e24c12009-10-30 11:49:00 +00003397 chars++;
3398 }
3399
John Reck59135872010-11-02 12:39:01 -07003400 Object* result;
3401 { MaybeObject* maybe_result = AllocateRawTwoByteString(chars, pretenure);
3402 if (!maybe_result->ToObject(&result)) return maybe_result;
3403 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003404
3405 // Convert and copy the characters into the new object.
3406 String* string_result = String::cast(result);
3407 decoder->Reset(string.start(), string.length());
3408 for (int i = 0; i < chars; i++) {
3409 uc32 r = decoder->GetNext();
Leon Clarkeac952652010-07-15 11:15:24 +01003410 if (r > kMaxSupportedChar) { r = unibrow::Utf8::kBadChar; }
Steve Blocka7e24c12009-10-30 11:49:00 +00003411 string_result->Set(i, r);
3412 }
3413 return result;
3414}
3415
3416
John Reck59135872010-11-02 12:39:01 -07003417MaybeObject* Heap::AllocateStringFromTwoByte(Vector<const uc16> string,
3418 PretenureFlag pretenure) {
Steve Blocka7e24c12009-10-30 11:49:00 +00003419 // Check if the string is an ASCII string.
John Reck59135872010-11-02 12:39:01 -07003420 MaybeObject* maybe_result;
Steve Block9fac8402011-05-12 15:51:54 +01003421 if (String::IsAscii(string.start(), string.length())) {
John Reck59135872010-11-02 12:39:01 -07003422 maybe_result = AllocateRawAsciiString(string.length(), pretenure);
Steve Blocka7e24c12009-10-30 11:49:00 +00003423 } else { // It's not an ASCII string.
John Reck59135872010-11-02 12:39:01 -07003424 maybe_result = AllocateRawTwoByteString(string.length(), pretenure);
Steve Blocka7e24c12009-10-30 11:49:00 +00003425 }
John Reck59135872010-11-02 12:39:01 -07003426 Object* result;
3427 if (!maybe_result->ToObject(&result)) return maybe_result;
Steve Blocka7e24c12009-10-30 11:49:00 +00003428
3429 // Copy the characters into the new object, which may be either ASCII or
3430 // UTF-16.
3431 String* string_result = String::cast(result);
3432 for (int i = 0; i < string.length(); i++) {
3433 string_result->Set(i, string[i]);
3434 }
3435 return result;
3436}
3437
3438
3439Map* Heap::SymbolMapForString(String* string) {
3440 // If the string is in new space it cannot be used as a symbol.
3441 if (InNewSpace(string)) return NULL;
3442
3443 // Find the corresponding symbol map for strings.
3444 Map* map = string->map();
Steve Block44f0eee2011-05-26 01:26:41 +01003445 if (map == ascii_string_map()) {
3446 return ascii_symbol_map();
3447 }
3448 if (map == string_map()) {
3449 return symbol_map();
3450 }
3451 if (map == cons_string_map()) {
3452 return cons_symbol_map();
3453 }
3454 if (map == cons_ascii_string_map()) {
3455 return cons_ascii_symbol_map();
3456 }
3457 if (map == external_string_map()) {
3458 return external_symbol_map();
3459 }
3460 if (map == external_ascii_string_map()) {
3461 return external_ascii_symbol_map();
3462 }
Kristian Monsen9dcf7e22010-06-28 14:14:28 +01003463 if (map == external_string_with_ascii_data_map()) {
3464 return external_symbol_with_ascii_data_map();
3465 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003466
3467 // No match found.
3468 return NULL;
3469}
3470
3471
John Reck59135872010-11-02 12:39:01 -07003472MaybeObject* Heap::AllocateInternalSymbol(unibrow::CharacterStream* buffer,
3473 int chars,
3474 uint32_t hash_field) {
Leon Clarkee46be812010-01-19 14:06:41 +00003475 ASSERT(chars >= 0);
Steve Blocka7e24c12009-10-30 11:49:00 +00003476 // Ensure the chars matches the number of characters in the buffer.
3477 ASSERT(static_cast<unsigned>(chars) == buffer->Length());
3478 // Determine whether the string is ascii.
3479 bool is_ascii = true;
Leon Clarkee46be812010-01-19 14:06:41 +00003480 while (buffer->has_more()) {
3481 if (buffer->GetNext() > unibrow::Utf8::kMaxOneByteChar) {
3482 is_ascii = false;
3483 break;
3484 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003485 }
3486 buffer->Rewind();
3487
3488 // Compute map and object size.
3489 int size;
3490 Map* map;
3491
3492 if (is_ascii) {
Leon Clarkee46be812010-01-19 14:06:41 +00003493 if (chars > SeqAsciiString::kMaxLength) {
3494 return Failure::OutOfMemoryException();
3495 }
Steve Blockd0582a62009-12-15 09:54:21 +00003496 map = ascii_symbol_map();
Steve Blocka7e24c12009-10-30 11:49:00 +00003497 size = SeqAsciiString::SizeFor(chars);
3498 } else {
Leon Clarkee46be812010-01-19 14:06:41 +00003499 if (chars > SeqTwoByteString::kMaxLength) {
3500 return Failure::OutOfMemoryException();
3501 }
Steve Blockd0582a62009-12-15 09:54:21 +00003502 map = symbol_map();
Steve Blocka7e24c12009-10-30 11:49:00 +00003503 size = SeqTwoByteString::SizeFor(chars);
3504 }
3505
3506 // Allocate string.
John Reck59135872010-11-02 12:39:01 -07003507 Object* result;
3508 { MaybeObject* maybe_result = (size > MaxObjectSizeInPagedSpace())
3509 ? lo_space_->AllocateRaw(size)
3510 : old_data_space_->AllocateRaw(size);
3511 if (!maybe_result->ToObject(&result)) return maybe_result;
3512 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003513
3514 reinterpret_cast<HeapObject*>(result)->set_map(map);
Steve Blockd0582a62009-12-15 09:54:21 +00003515 // Set length and hash fields of the allocated string.
Steve Blocka7e24c12009-10-30 11:49:00 +00003516 String* answer = String::cast(result);
Steve Blockd0582a62009-12-15 09:54:21 +00003517 answer->set_length(chars);
3518 answer->set_hash_field(hash_field);
Steve Blocka7e24c12009-10-30 11:49:00 +00003519
3520 ASSERT_EQ(size, answer->Size());
3521
3522 // Fill in the characters.
3523 for (int i = 0; i < chars; i++) {
3524 answer->Set(i, buffer->GetNext());
3525 }
3526 return answer;
3527}
3528
3529
John Reck59135872010-11-02 12:39:01 -07003530MaybeObject* Heap::AllocateRawAsciiString(int length, PretenureFlag pretenure) {
Leon Clarkee46be812010-01-19 14:06:41 +00003531 if (length < 0 || length > SeqAsciiString::kMaxLength) {
3532 return Failure::OutOfMemoryException();
3533 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003534
3535 int size = SeqAsciiString::SizeFor(length);
Leon Clarkee46be812010-01-19 14:06:41 +00003536 ASSERT(size <= SeqAsciiString::kMaxSize);
Steve Blocka7e24c12009-10-30 11:49:00 +00003537
Leon Clarkee46be812010-01-19 14:06:41 +00003538 AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
3539 AllocationSpace retry_space = OLD_DATA_SPACE;
3540
Steve Blocka7e24c12009-10-30 11:49:00 +00003541 if (space == NEW_SPACE) {
Leon Clarkee46be812010-01-19 14:06:41 +00003542 if (size > kMaxObjectSizeInNewSpace) {
3543 // Allocate in large object space, retry space will be ignored.
3544 space = LO_SPACE;
3545 } else if (size > MaxObjectSizeInPagedSpace()) {
3546 // Allocate in new space, retry in large object space.
3547 retry_space = LO_SPACE;
3548 }
3549 } else if (space == OLD_DATA_SPACE && size > MaxObjectSizeInPagedSpace()) {
3550 space = LO_SPACE;
Steve Blocka7e24c12009-10-30 11:49:00 +00003551 }
John Reck59135872010-11-02 12:39:01 -07003552 Object* result;
3553 { MaybeObject* maybe_result = AllocateRaw(size, space, retry_space);
3554 if (!maybe_result->ToObject(&result)) return maybe_result;
3555 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003556
Steve Blocka7e24c12009-10-30 11:49:00 +00003557 // Partially initialize the object.
Steve Blockd0582a62009-12-15 09:54:21 +00003558 HeapObject::cast(result)->set_map(ascii_string_map());
Steve Blocka7e24c12009-10-30 11:49:00 +00003559 String::cast(result)->set_length(length);
Steve Blockd0582a62009-12-15 09:54:21 +00003560 String::cast(result)->set_hash_field(String::kEmptyHashField);
Steve Blocka7e24c12009-10-30 11:49:00 +00003561 ASSERT_EQ(size, HeapObject::cast(result)->Size());
3562 return result;
3563}
3564
3565
John Reck59135872010-11-02 12:39:01 -07003566MaybeObject* Heap::AllocateRawTwoByteString(int length,
3567 PretenureFlag pretenure) {
Leon Clarkee46be812010-01-19 14:06:41 +00003568 if (length < 0 || length > SeqTwoByteString::kMaxLength) {
3569 return Failure::OutOfMemoryException();
Steve Blocka7e24c12009-10-30 11:49:00 +00003570 }
Leon Clarkee46be812010-01-19 14:06:41 +00003571 int size = SeqTwoByteString::SizeFor(length);
3572 ASSERT(size <= SeqTwoByteString::kMaxSize);
3573 AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
3574 AllocationSpace retry_space = OLD_DATA_SPACE;
3575
3576 if (space == NEW_SPACE) {
3577 if (size > kMaxObjectSizeInNewSpace) {
3578 // Allocate in large object space, retry space will be ignored.
3579 space = LO_SPACE;
3580 } else if (size > MaxObjectSizeInPagedSpace()) {
3581 // Allocate in new space, retry in large object space.
3582 retry_space = LO_SPACE;
3583 }
3584 } else if (space == OLD_DATA_SPACE && size > MaxObjectSizeInPagedSpace()) {
3585 space = LO_SPACE;
3586 }
John Reck59135872010-11-02 12:39:01 -07003587 Object* result;
3588 { MaybeObject* maybe_result = AllocateRaw(size, space, retry_space);
3589 if (!maybe_result->ToObject(&result)) return maybe_result;
3590 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003591
Steve Blocka7e24c12009-10-30 11:49:00 +00003592 // Partially initialize the object.
Steve Blockd0582a62009-12-15 09:54:21 +00003593 HeapObject::cast(result)->set_map(string_map());
Steve Blocka7e24c12009-10-30 11:49:00 +00003594 String::cast(result)->set_length(length);
Steve Blockd0582a62009-12-15 09:54:21 +00003595 String::cast(result)->set_hash_field(String::kEmptyHashField);
Steve Blocka7e24c12009-10-30 11:49:00 +00003596 ASSERT_EQ(size, HeapObject::cast(result)->Size());
3597 return result;
3598}
3599
3600
John Reck59135872010-11-02 12:39:01 -07003601MaybeObject* Heap::AllocateEmptyFixedArray() {
Steve Blocka7e24c12009-10-30 11:49:00 +00003602 int size = FixedArray::SizeFor(0);
John Reck59135872010-11-02 12:39:01 -07003603 Object* result;
3604 { MaybeObject* maybe_result =
3605 AllocateRaw(size, OLD_DATA_SPACE, OLD_DATA_SPACE);
3606 if (!maybe_result->ToObject(&result)) return maybe_result;
3607 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003608 // Initialize the object.
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01003609 reinterpret_cast<FixedArray*>(result)->set_map(fixed_array_map());
3610 reinterpret_cast<FixedArray*>(result)->set_length(0);
Steve Blocka7e24c12009-10-30 11:49:00 +00003611 return result;
3612}
3613
3614
John Reck59135872010-11-02 12:39:01 -07003615MaybeObject* Heap::AllocateRawFixedArray(int length) {
Leon Clarkee46be812010-01-19 14:06:41 +00003616 if (length < 0 || length > FixedArray::kMaxLength) {
3617 return Failure::OutOfMemoryException();
3618 }
Kristian Monsen0d5e1162010-09-30 15:31:59 +01003619 ASSERT(length > 0);
Steve Blocka7e24c12009-10-30 11:49:00 +00003620 // Use the general function if we're forced to always allocate.
3621 if (always_allocate()) return AllocateFixedArray(length, TENURED);
3622 // Allocate the raw data for a fixed array.
3623 int size = FixedArray::SizeFor(length);
3624 return size <= kMaxObjectSizeInNewSpace
3625 ? new_space_.AllocateRaw(size)
3626 : lo_space_->AllocateRawFixedArray(size);
3627}
3628
3629
John Reck59135872010-11-02 12:39:01 -07003630MaybeObject* Heap::CopyFixedArrayWithMap(FixedArray* src, Map* map) {
Steve Blocka7e24c12009-10-30 11:49:00 +00003631 int len = src->length();
John Reck59135872010-11-02 12:39:01 -07003632 Object* obj;
3633 { MaybeObject* maybe_obj = AllocateRawFixedArray(len);
3634 if (!maybe_obj->ToObject(&obj)) return maybe_obj;
3635 }
Steve Block44f0eee2011-05-26 01:26:41 +01003636 if (InNewSpace(obj)) {
Steve Blocka7e24c12009-10-30 11:49:00 +00003637 HeapObject* dst = HeapObject::cast(obj);
Kristian Monsen0d5e1162010-09-30 15:31:59 +01003638 dst->set_map(map);
3639 CopyBlock(dst->address() + kPointerSize,
3640 src->address() + kPointerSize,
3641 FixedArray::SizeFor(len) - kPointerSize);
Steve Blocka7e24c12009-10-30 11:49:00 +00003642 return obj;
3643 }
Kristian Monsen0d5e1162010-09-30 15:31:59 +01003644 HeapObject::cast(obj)->set_map(map);
Steve Blocka7e24c12009-10-30 11:49:00 +00003645 FixedArray* result = FixedArray::cast(obj);
3646 result->set_length(len);
Leon Clarke4515c472010-02-03 11:58:03 +00003647
Steve Blocka7e24c12009-10-30 11:49:00 +00003648 // Copy the content
Leon Clarke4515c472010-02-03 11:58:03 +00003649 AssertNoAllocation no_gc;
3650 WriteBarrierMode mode = result->GetWriteBarrierMode(no_gc);
Steve Blocka7e24c12009-10-30 11:49:00 +00003651 for (int i = 0; i < len; i++) result->set(i, src->get(i), mode);
3652 return result;
3653}
3654
3655
John Reck59135872010-11-02 12:39:01 -07003656MaybeObject* Heap::AllocateFixedArray(int length) {
Steve Blocka7e24c12009-10-30 11:49:00 +00003657 ASSERT(length >= 0);
3658 if (length == 0) return empty_fixed_array();
John Reck59135872010-11-02 12:39:01 -07003659 Object* result;
3660 { MaybeObject* maybe_result = AllocateRawFixedArray(length);
3661 if (!maybe_result->ToObject(&result)) return maybe_result;
Steve Blocka7e24c12009-10-30 11:49:00 +00003662 }
John Reck59135872010-11-02 12:39:01 -07003663 // Initialize header.
3664 FixedArray* array = reinterpret_cast<FixedArray*>(result);
3665 array->set_map(fixed_array_map());
3666 array->set_length(length);
3667 // Initialize body.
Steve Block44f0eee2011-05-26 01:26:41 +01003668 ASSERT(!InNewSpace(undefined_value()));
John Reck59135872010-11-02 12:39:01 -07003669 MemsetPointer(array->data_start(), undefined_value(), length);
Steve Blocka7e24c12009-10-30 11:49:00 +00003670 return result;
3671}
3672
3673
John Reck59135872010-11-02 12:39:01 -07003674MaybeObject* Heap::AllocateRawFixedArray(int length, PretenureFlag pretenure) {
Leon Clarkee46be812010-01-19 14:06:41 +00003675 if (length < 0 || length > FixedArray::kMaxLength) {
3676 return Failure::OutOfMemoryException();
3677 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003678
Leon Clarkee46be812010-01-19 14:06:41 +00003679 AllocationSpace space =
3680 (pretenure == TENURED) ? OLD_POINTER_SPACE : NEW_SPACE;
Steve Blocka7e24c12009-10-30 11:49:00 +00003681 int size = FixedArray::SizeFor(length);
Leon Clarkee46be812010-01-19 14:06:41 +00003682 if (space == NEW_SPACE && size > kMaxObjectSizeInNewSpace) {
3683 // Too big for new space.
3684 space = LO_SPACE;
3685 } else if (space == OLD_POINTER_SPACE &&
3686 size > MaxObjectSizeInPagedSpace()) {
3687 // Too big for old pointer space.
3688 space = LO_SPACE;
3689 }
3690
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01003691 AllocationSpace retry_space =
3692 (size <= MaxObjectSizeInPagedSpace()) ? OLD_POINTER_SPACE : LO_SPACE;
3693
3694 return AllocateRaw(size, space, retry_space);
Steve Blocka7e24c12009-10-30 11:49:00 +00003695}
3696
3697
John Reck59135872010-11-02 12:39:01 -07003698MUST_USE_RESULT static MaybeObject* AllocateFixedArrayWithFiller(
Steve Block44f0eee2011-05-26 01:26:41 +01003699 Heap* heap,
John Reck59135872010-11-02 12:39:01 -07003700 int length,
3701 PretenureFlag pretenure,
3702 Object* filler) {
Steve Block6ded16b2010-05-10 14:33:55 +01003703 ASSERT(length >= 0);
Steve Block44f0eee2011-05-26 01:26:41 +01003704 ASSERT(heap->empty_fixed_array()->IsFixedArray());
3705 if (length == 0) return heap->empty_fixed_array();
Steve Block6ded16b2010-05-10 14:33:55 +01003706
Steve Block44f0eee2011-05-26 01:26:41 +01003707 ASSERT(!heap->InNewSpace(filler));
John Reck59135872010-11-02 12:39:01 -07003708 Object* result;
Steve Block44f0eee2011-05-26 01:26:41 +01003709 { MaybeObject* maybe_result = heap->AllocateRawFixedArray(length, pretenure);
John Reck59135872010-11-02 12:39:01 -07003710 if (!maybe_result->ToObject(&result)) return maybe_result;
3711 }
Steve Block6ded16b2010-05-10 14:33:55 +01003712
Steve Block44f0eee2011-05-26 01:26:41 +01003713 HeapObject::cast(result)->set_map(heap->fixed_array_map());
Steve Block6ded16b2010-05-10 14:33:55 +01003714 FixedArray* array = FixedArray::cast(result);
3715 array->set_length(length);
3716 MemsetPointer(array->data_start(), filler, length);
3717 return array;
3718}
3719
3720
John Reck59135872010-11-02 12:39:01 -07003721MaybeObject* Heap::AllocateFixedArray(int length, PretenureFlag pretenure) {
Steve Block44f0eee2011-05-26 01:26:41 +01003722 return AllocateFixedArrayWithFiller(this,
3723 length,
3724 pretenure,
3725 undefined_value());
Steve Block6ded16b2010-05-10 14:33:55 +01003726}
3727
3728
John Reck59135872010-11-02 12:39:01 -07003729MaybeObject* Heap::AllocateFixedArrayWithHoles(int length,
3730 PretenureFlag pretenure) {
Steve Block44f0eee2011-05-26 01:26:41 +01003731 return AllocateFixedArrayWithFiller(this,
3732 length,
3733 pretenure,
3734 the_hole_value());
Steve Block6ded16b2010-05-10 14:33:55 +01003735}
3736
3737
John Reck59135872010-11-02 12:39:01 -07003738MaybeObject* Heap::AllocateUninitializedFixedArray(int length) {
Steve Block6ded16b2010-05-10 14:33:55 +01003739 if (length == 0) return empty_fixed_array();
3740
John Reck59135872010-11-02 12:39:01 -07003741 Object* obj;
3742 { MaybeObject* maybe_obj = AllocateRawFixedArray(length);
3743 if (!maybe_obj->ToObject(&obj)) return maybe_obj;
3744 }
Steve Block6ded16b2010-05-10 14:33:55 +01003745
3746 reinterpret_cast<FixedArray*>(obj)->set_map(fixed_array_map());
3747 FixedArray::cast(obj)->set_length(length);
3748 return obj;
3749}
3750
3751
John Reck59135872010-11-02 12:39:01 -07003752MaybeObject* Heap::AllocateHashTable(int length, PretenureFlag pretenure) {
3753 Object* result;
Steve Block44f0eee2011-05-26 01:26:41 +01003754 { MaybeObject* maybe_result = AllocateFixedArray(length, pretenure);
John Reck59135872010-11-02 12:39:01 -07003755 if (!maybe_result->ToObject(&result)) return maybe_result;
3756 }
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01003757 reinterpret_cast<HeapObject*>(result)->set_map(hash_table_map());
Steve Blocka7e24c12009-10-30 11:49:00 +00003758 ASSERT(result->IsHashTable());
3759 return result;
3760}
3761
3762
John Reck59135872010-11-02 12:39:01 -07003763MaybeObject* Heap::AllocateGlobalContext() {
3764 Object* result;
3765 { MaybeObject* maybe_result =
Steve Block44f0eee2011-05-26 01:26:41 +01003766 AllocateFixedArray(Context::GLOBAL_CONTEXT_SLOTS);
John Reck59135872010-11-02 12:39:01 -07003767 if (!maybe_result->ToObject(&result)) return maybe_result;
3768 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003769 Context* context = reinterpret_cast<Context*>(result);
3770 context->set_map(global_context_map());
3771 ASSERT(context->IsGlobalContext());
3772 ASSERT(result->IsContext());
3773 return result;
3774}
3775
3776
John Reck59135872010-11-02 12:39:01 -07003777MaybeObject* Heap::AllocateFunctionContext(int length, JSFunction* function) {
Steve Blocka7e24c12009-10-30 11:49:00 +00003778 ASSERT(length >= Context::MIN_CONTEXT_SLOTS);
John Reck59135872010-11-02 12:39:01 -07003779 Object* result;
Steve Block44f0eee2011-05-26 01:26:41 +01003780 { MaybeObject* maybe_result = AllocateFixedArray(length);
John Reck59135872010-11-02 12:39:01 -07003781 if (!maybe_result->ToObject(&result)) return maybe_result;
3782 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003783 Context* context = reinterpret_cast<Context*>(result);
3784 context->set_map(context_map());
3785 context->set_closure(function);
3786 context->set_fcontext(context);
3787 context->set_previous(NULL);
3788 context->set_extension(NULL);
3789 context->set_global(function->context()->global());
3790 ASSERT(!context->IsGlobalContext());
3791 ASSERT(context->is_function_context());
3792 ASSERT(result->IsContext());
3793 return result;
3794}
3795
3796
John Reck59135872010-11-02 12:39:01 -07003797MaybeObject* Heap::AllocateWithContext(Context* previous,
3798 JSObject* extension,
3799 bool is_catch_context) {
3800 Object* result;
Steve Block44f0eee2011-05-26 01:26:41 +01003801 { MaybeObject* maybe_result = AllocateFixedArray(Context::MIN_CONTEXT_SLOTS);
John Reck59135872010-11-02 12:39:01 -07003802 if (!maybe_result->ToObject(&result)) return maybe_result;
3803 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003804 Context* context = reinterpret_cast<Context*>(result);
Steve Block44f0eee2011-05-26 01:26:41 +01003805 context->set_map(is_catch_context ? catch_context_map() :
3806 context_map());
Steve Blocka7e24c12009-10-30 11:49:00 +00003807 context->set_closure(previous->closure());
3808 context->set_fcontext(previous->fcontext());
3809 context->set_previous(previous);
3810 context->set_extension(extension);
3811 context->set_global(previous->global());
3812 ASSERT(!context->IsGlobalContext());
3813 ASSERT(!context->is_function_context());
3814 ASSERT(result->IsContext());
3815 return result;
3816}
3817
3818
John Reck59135872010-11-02 12:39:01 -07003819MaybeObject* Heap::AllocateStruct(InstanceType type) {
Steve Blocka7e24c12009-10-30 11:49:00 +00003820 Map* map;
3821 switch (type) {
Steve Block44f0eee2011-05-26 01:26:41 +01003822#define MAKE_CASE(NAME, Name, name) \
3823 case NAME##_TYPE: map = name##_map(); break;
Steve Blocka7e24c12009-10-30 11:49:00 +00003824STRUCT_LIST(MAKE_CASE)
3825#undef MAKE_CASE
3826 default:
3827 UNREACHABLE();
3828 return Failure::InternalError();
3829 }
3830 int size = map->instance_size();
3831 AllocationSpace space =
3832 (size > MaxObjectSizeInPagedSpace()) ? LO_SPACE : OLD_POINTER_SPACE;
John Reck59135872010-11-02 12:39:01 -07003833 Object* result;
Steve Block44f0eee2011-05-26 01:26:41 +01003834 { MaybeObject* maybe_result = Allocate(map, space);
John Reck59135872010-11-02 12:39:01 -07003835 if (!maybe_result->ToObject(&result)) return maybe_result;
3836 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003837 Struct::cast(result)->InitializeBody(size);
3838 return result;
3839}
3840
3841
3842bool Heap::IdleNotification() {
3843 static const int kIdlesBeforeScavenge = 4;
3844 static const int kIdlesBeforeMarkSweep = 7;
3845 static const int kIdlesBeforeMarkCompact = 8;
Ben Murdochdb5a90a2011-01-06 18:27:03 +00003846 static const int kMaxIdleCount = kIdlesBeforeMarkCompact + 1;
Ben Murdoche0cee9b2011-05-25 10:26:03 +01003847 static const unsigned int kGCsBetweenCleanup = 4;
Steve Block44f0eee2011-05-26 01:26:41 +01003848
3849 if (!last_idle_notification_gc_count_init_) {
3850 last_idle_notification_gc_count_ = gc_count_;
3851 last_idle_notification_gc_count_init_ = true;
3852 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003853
Steve Block6ded16b2010-05-10 14:33:55 +01003854 bool uncommit = true;
Steve Blocka7e24c12009-10-30 11:49:00 +00003855 bool finished = false;
3856
Ben Murdochdb5a90a2011-01-06 18:27:03 +00003857 // Reset the number of idle notifications received when a number of
3858 // GCs have taken place. This allows another round of cleanup based
3859 // on idle notifications if enough work has been carried out to
3860 // provoke a number of garbage collections.
Steve Block44f0eee2011-05-26 01:26:41 +01003861 if (gc_count_ - last_idle_notification_gc_count_ < kGCsBetweenCleanup) {
3862 number_idle_notifications_ =
3863 Min(number_idle_notifications_ + 1, kMaxIdleCount);
Steve Blocka7e24c12009-10-30 11:49:00 +00003864 } else {
Steve Block44f0eee2011-05-26 01:26:41 +01003865 number_idle_notifications_ = 0;
3866 last_idle_notification_gc_count_ = gc_count_;
Steve Blocka7e24c12009-10-30 11:49:00 +00003867 }
3868
Steve Block44f0eee2011-05-26 01:26:41 +01003869 if (number_idle_notifications_ == kIdlesBeforeScavenge) {
Steve Block6ded16b2010-05-10 14:33:55 +01003870 if (contexts_disposed_ > 0) {
Steve Block44f0eee2011-05-26 01:26:41 +01003871 HistogramTimerScope scope(isolate_->counters()->gc_context());
Steve Block6ded16b2010-05-10 14:33:55 +01003872 CollectAllGarbage(false);
3873 } else {
Ben Murdochf87a2032010-10-22 12:50:53 +01003874 CollectGarbage(NEW_SPACE);
Steve Block6ded16b2010-05-10 14:33:55 +01003875 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003876 new_space_.Shrink();
Steve Block44f0eee2011-05-26 01:26:41 +01003877 last_idle_notification_gc_count_ = gc_count_;
3878 } else if (number_idle_notifications_ == kIdlesBeforeMarkSweep) {
Steve Blockd0582a62009-12-15 09:54:21 +00003879 // Before doing the mark-sweep collections we clear the
3880 // compilation cache to avoid hanging on to source code and
3881 // generated code for cached functions.
Steve Block44f0eee2011-05-26 01:26:41 +01003882 isolate_->compilation_cache()->Clear();
Steve Blockd0582a62009-12-15 09:54:21 +00003883
Steve Blocka7e24c12009-10-30 11:49:00 +00003884 CollectAllGarbage(false);
3885 new_space_.Shrink();
Steve Block44f0eee2011-05-26 01:26:41 +01003886 last_idle_notification_gc_count_ = gc_count_;
Steve Blocka7e24c12009-10-30 11:49:00 +00003887
Steve Block44f0eee2011-05-26 01:26:41 +01003888 } else if (number_idle_notifications_ == kIdlesBeforeMarkCompact) {
Steve Blocka7e24c12009-10-30 11:49:00 +00003889 CollectAllGarbage(true);
3890 new_space_.Shrink();
Steve Block44f0eee2011-05-26 01:26:41 +01003891 last_idle_notification_gc_count_ = gc_count_;
3892 number_idle_notifications_ = 0;
Steve Blocka7e24c12009-10-30 11:49:00 +00003893 finished = true;
Steve Block6ded16b2010-05-10 14:33:55 +01003894 } else if (contexts_disposed_ > 0) {
3895 if (FLAG_expose_gc) {
3896 contexts_disposed_ = 0;
3897 } else {
Steve Block44f0eee2011-05-26 01:26:41 +01003898 HistogramTimerScope scope(isolate_->counters()->gc_context());
Steve Block6ded16b2010-05-10 14:33:55 +01003899 CollectAllGarbage(false);
Steve Block44f0eee2011-05-26 01:26:41 +01003900 last_idle_notification_gc_count_ = gc_count_;
Steve Block6ded16b2010-05-10 14:33:55 +01003901 }
3902 // If this is the first idle notification, we reset the
3903 // notification count to avoid letting idle notifications for
3904 // context disposal garbage collections start a potentially too
3905 // aggressive idle GC cycle.
Steve Block44f0eee2011-05-26 01:26:41 +01003906 if (number_idle_notifications_ <= 1) {
3907 number_idle_notifications_ = 0;
Steve Block6ded16b2010-05-10 14:33:55 +01003908 uncommit = false;
3909 }
Steve Block44f0eee2011-05-26 01:26:41 +01003910 } else if (number_idle_notifications_ > kIdlesBeforeMarkCompact) {
Ben Murdochdb5a90a2011-01-06 18:27:03 +00003911 // If we have received more than kIdlesBeforeMarkCompact idle
3912 // notifications we do not perform any cleanup because we don't
3913 // expect to gain much by doing so.
3914 finished = true;
Steve Blocka7e24c12009-10-30 11:49:00 +00003915 }
3916
Steve Block6ded16b2010-05-10 14:33:55 +01003917 // Make sure that we have no pending context disposals and
3918 // conditionally uncommit from space.
3919 ASSERT(contexts_disposed_ == 0);
Steve Block44f0eee2011-05-26 01:26:41 +01003920 if (uncommit) UncommitFromSpace();
Steve Blocka7e24c12009-10-30 11:49:00 +00003921 return finished;
3922}
3923
3924
3925#ifdef DEBUG
3926
3927void Heap::Print() {
3928 if (!HasBeenSetup()) return;
Steve Block44f0eee2011-05-26 01:26:41 +01003929 isolate()->PrintStack();
Steve Blocka7e24c12009-10-30 11:49:00 +00003930 AllSpaces spaces;
Leon Clarked91b9f72010-01-27 17:25:45 +00003931 for (Space* space = spaces.next(); space != NULL; space = spaces.next())
3932 space->Print();
Steve Blocka7e24c12009-10-30 11:49:00 +00003933}
3934
3935
3936void Heap::ReportCodeStatistics(const char* title) {
3937 PrintF(">>>>>> Code Stats (%s) >>>>>>\n", title);
3938 PagedSpace::ResetCodeStatistics();
3939 // We do not look for code in new space, map space, or old space. If code
3940 // somehow ends up in those spaces, we would miss it here.
3941 code_space_->CollectCodeStatistics();
3942 lo_space_->CollectCodeStatistics();
3943 PagedSpace::ReportCodeStatistics();
3944}
3945
3946
3947// This function expects that NewSpace's allocated objects histogram is
3948// populated (via a call to CollectStatistics or else as a side effect of a
3949// just-completed scavenge collection).
3950void Heap::ReportHeapStatistics(const char* title) {
3951 USE(title);
3952 PrintF(">>>>>> =============== %s (%d) =============== >>>>>>\n",
3953 title, gc_count_);
3954 PrintF("mark-compact GC : %d\n", mc_count_);
Ben Murdochf87a2032010-10-22 12:50:53 +01003955 PrintF("old_gen_promotion_limit_ %" V8_PTR_PREFIX "d\n",
3956 old_gen_promotion_limit_);
3957 PrintF("old_gen_allocation_limit_ %" V8_PTR_PREFIX "d\n",
3958 old_gen_allocation_limit_);
Steve Blocka7e24c12009-10-30 11:49:00 +00003959
3960 PrintF("\n");
3961 PrintF("Number of handles : %d\n", HandleScope::NumberOfHandles());
Steve Block44f0eee2011-05-26 01:26:41 +01003962 isolate_->global_handles()->PrintStats();
Steve Blocka7e24c12009-10-30 11:49:00 +00003963 PrintF("\n");
3964
3965 PrintF("Heap statistics : ");
Steve Block44f0eee2011-05-26 01:26:41 +01003966 isolate_->memory_allocator()->ReportStatistics();
Steve Blocka7e24c12009-10-30 11:49:00 +00003967 PrintF("To space : ");
3968 new_space_.ReportStatistics();
3969 PrintF("Old pointer space : ");
3970 old_pointer_space_->ReportStatistics();
3971 PrintF("Old data space : ");
3972 old_data_space_->ReportStatistics();
3973 PrintF("Code space : ");
3974 code_space_->ReportStatistics();
3975 PrintF("Map space : ");
3976 map_space_->ReportStatistics();
3977 PrintF("Cell space : ");
3978 cell_space_->ReportStatistics();
3979 PrintF("Large object space : ");
3980 lo_space_->ReportStatistics();
3981 PrintF(">>>>>> ========================================= >>>>>>\n");
3982}
3983
3984#endif // DEBUG
3985
3986bool Heap::Contains(HeapObject* value) {
3987 return Contains(value->address());
3988}
3989
3990
3991bool Heap::Contains(Address addr) {
3992 if (OS::IsOutsideAllocatedSpace(addr)) return false;
3993 return HasBeenSetup() &&
3994 (new_space_.ToSpaceContains(addr) ||
3995 old_pointer_space_->Contains(addr) ||
3996 old_data_space_->Contains(addr) ||
3997 code_space_->Contains(addr) ||
3998 map_space_->Contains(addr) ||
3999 cell_space_->Contains(addr) ||
4000 lo_space_->SlowContains(addr));
4001}
4002
4003
4004bool Heap::InSpace(HeapObject* value, AllocationSpace space) {
4005 return InSpace(value->address(), space);
4006}
4007
4008
4009bool Heap::InSpace(Address addr, AllocationSpace space) {
4010 if (OS::IsOutsideAllocatedSpace(addr)) return false;
4011 if (!HasBeenSetup()) return false;
4012
4013 switch (space) {
4014 case NEW_SPACE:
4015 return new_space_.ToSpaceContains(addr);
4016 case OLD_POINTER_SPACE:
4017 return old_pointer_space_->Contains(addr);
4018 case OLD_DATA_SPACE:
4019 return old_data_space_->Contains(addr);
4020 case CODE_SPACE:
4021 return code_space_->Contains(addr);
4022 case MAP_SPACE:
4023 return map_space_->Contains(addr);
4024 case CELL_SPACE:
4025 return cell_space_->Contains(addr);
4026 case LO_SPACE:
4027 return lo_space_->SlowContains(addr);
4028 }
4029
4030 return false;
4031}
4032
4033
4034#ifdef DEBUG
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004035static void DummyScavengePointer(HeapObject** p) {
4036}
4037
4038
4039static void VerifyPointersUnderWatermark(
4040 PagedSpace* space,
4041 DirtyRegionCallback visit_dirty_region) {
4042 PageIterator it(space, PageIterator::PAGES_IN_USE);
4043
4044 while (it.has_next()) {
4045 Page* page = it.next();
4046 Address start = page->ObjectAreaStart();
4047 Address end = page->AllocationWatermark();
4048
Steve Block44f0eee2011-05-26 01:26:41 +01004049 HEAP->IterateDirtyRegions(Page::kAllRegionsDirtyMarks,
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004050 start,
4051 end,
4052 visit_dirty_region,
4053 &DummyScavengePointer);
4054 }
4055}
4056
4057
4058static void VerifyPointersUnderWatermark(LargeObjectSpace* space) {
4059 LargeObjectIterator it(space);
4060 for (HeapObject* object = it.next(); object != NULL; object = it.next()) {
4061 if (object->IsFixedArray()) {
4062 Address slot_address = object->address();
4063 Address end = object->address() + object->Size();
4064
4065 while (slot_address < end) {
4066 HeapObject** slot = reinterpret_cast<HeapObject**>(slot_address);
4067 // When we are not in GC the Heap::InNewSpace() predicate
4068 // checks that pointers which satisfy predicate point into
4069 // the active semispace.
Steve Block44f0eee2011-05-26 01:26:41 +01004070 HEAP->InNewSpace(*slot);
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004071 slot_address += kPointerSize;
4072 }
4073 }
4074 }
4075}
4076
4077
Steve Blocka7e24c12009-10-30 11:49:00 +00004078void Heap::Verify() {
4079 ASSERT(HasBeenSetup());
4080
4081 VerifyPointersVisitor visitor;
Steve Blockd0582a62009-12-15 09:54:21 +00004082 IterateRoots(&visitor, VISIT_ONLY_STRONG);
Steve Blocka7e24c12009-10-30 11:49:00 +00004083
4084 new_space_.Verify();
4085
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004086 VerifyPointersAndDirtyRegionsVisitor dirty_regions_visitor;
4087 old_pointer_space_->Verify(&dirty_regions_visitor);
4088 map_space_->Verify(&dirty_regions_visitor);
Steve Blocka7e24c12009-10-30 11:49:00 +00004089
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004090 VerifyPointersUnderWatermark(old_pointer_space_,
4091 &IteratePointersInDirtyRegion);
4092 VerifyPointersUnderWatermark(map_space_,
4093 &IteratePointersInDirtyMapsRegion);
4094 VerifyPointersUnderWatermark(lo_space_);
4095
4096 VerifyPageWatermarkValidity(old_pointer_space_, ALL_INVALID);
4097 VerifyPageWatermarkValidity(map_space_, ALL_INVALID);
4098
4099 VerifyPointersVisitor no_dirty_regions_visitor;
4100 old_data_space_->Verify(&no_dirty_regions_visitor);
4101 code_space_->Verify(&no_dirty_regions_visitor);
4102 cell_space_->Verify(&no_dirty_regions_visitor);
Steve Blocka7e24c12009-10-30 11:49:00 +00004103
4104 lo_space_->Verify();
4105}
4106#endif // DEBUG
4107
4108
John Reck59135872010-11-02 12:39:01 -07004109MaybeObject* Heap::LookupSymbol(Vector<const char> string) {
Steve Blocka7e24c12009-10-30 11:49:00 +00004110 Object* symbol = NULL;
John Reck59135872010-11-02 12:39:01 -07004111 Object* new_table;
4112 { MaybeObject* maybe_new_table =
4113 symbol_table()->LookupSymbol(string, &symbol);
4114 if (!maybe_new_table->ToObject(&new_table)) return maybe_new_table;
4115 }
Steve Blocka7e24c12009-10-30 11:49:00 +00004116 // Can't use set_symbol_table because SymbolTable::cast knows that
4117 // SymbolTable is a singleton and checks for identity.
4118 roots_[kSymbolTableRootIndex] = new_table;
4119 ASSERT(symbol != NULL);
4120 return symbol;
4121}
4122
4123
Steve Block9fac8402011-05-12 15:51:54 +01004124MaybeObject* Heap::LookupAsciiSymbol(Vector<const char> string) {
4125 Object* symbol = NULL;
4126 Object* new_table;
4127 { MaybeObject* maybe_new_table =
4128 symbol_table()->LookupAsciiSymbol(string, &symbol);
4129 if (!maybe_new_table->ToObject(&new_table)) return maybe_new_table;
4130 }
4131 // Can't use set_symbol_table because SymbolTable::cast knows that
4132 // SymbolTable is a singleton and checks for identity.
4133 roots_[kSymbolTableRootIndex] = new_table;
4134 ASSERT(symbol != NULL);
4135 return symbol;
4136}
4137
4138
4139MaybeObject* Heap::LookupTwoByteSymbol(Vector<const uc16> string) {
4140 Object* symbol = NULL;
4141 Object* new_table;
4142 { MaybeObject* maybe_new_table =
4143 symbol_table()->LookupTwoByteSymbol(string, &symbol);
4144 if (!maybe_new_table->ToObject(&new_table)) return maybe_new_table;
4145 }
4146 // Can't use set_symbol_table because SymbolTable::cast knows that
4147 // SymbolTable is a singleton and checks for identity.
4148 roots_[kSymbolTableRootIndex] = new_table;
4149 ASSERT(symbol != NULL);
4150 return symbol;
4151}
4152
4153
John Reck59135872010-11-02 12:39:01 -07004154MaybeObject* Heap::LookupSymbol(String* string) {
Steve Blocka7e24c12009-10-30 11:49:00 +00004155 if (string->IsSymbol()) return string;
4156 Object* symbol = NULL;
John Reck59135872010-11-02 12:39:01 -07004157 Object* new_table;
4158 { MaybeObject* maybe_new_table =
4159 symbol_table()->LookupString(string, &symbol);
4160 if (!maybe_new_table->ToObject(&new_table)) return maybe_new_table;
4161 }
Steve Blocka7e24c12009-10-30 11:49:00 +00004162 // Can't use set_symbol_table because SymbolTable::cast knows that
4163 // SymbolTable is a singleton and checks for identity.
4164 roots_[kSymbolTableRootIndex] = new_table;
4165 ASSERT(symbol != NULL);
4166 return symbol;
4167}
4168
4169
4170bool Heap::LookupSymbolIfExists(String* string, String** symbol) {
4171 if (string->IsSymbol()) {
4172 *symbol = string;
4173 return true;
4174 }
4175 return symbol_table()->LookupSymbolIfExists(string, symbol);
4176}
4177
4178
4179#ifdef DEBUG
4180void Heap::ZapFromSpace() {
Steve Block1e0659c2011-05-24 12:43:12 +01004181 ASSERT(reinterpret_cast<Object*>(kFromSpaceZapValue)->IsFailure());
Steve Blocka7e24c12009-10-30 11:49:00 +00004182 for (Address a = new_space_.FromSpaceLow();
4183 a < new_space_.FromSpaceHigh();
4184 a += kPointerSize) {
4185 Memory::Address_at(a) = kFromSpaceZapValue;
4186 }
4187}
4188#endif // DEBUG
4189
4190
Steve Block44f0eee2011-05-26 01:26:41 +01004191bool Heap::IteratePointersInDirtyRegion(Heap* heap,
4192 Address start,
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004193 Address end,
4194 ObjectSlotCallback copy_object_func) {
4195 Address slot_address = start;
4196 bool pointers_to_new_space_found = false;
Steve Blocka7e24c12009-10-30 11:49:00 +00004197
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004198 while (slot_address < end) {
4199 Object** slot = reinterpret_cast<Object**>(slot_address);
Steve Block44f0eee2011-05-26 01:26:41 +01004200 if (heap->InNewSpace(*slot)) {
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004201 ASSERT((*slot)->IsHeapObject());
4202 copy_object_func(reinterpret_cast<HeapObject**>(slot));
Steve Block44f0eee2011-05-26 01:26:41 +01004203 if (heap->InNewSpace(*slot)) {
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004204 ASSERT((*slot)->IsHeapObject());
4205 pointers_to_new_space_found = true;
Steve Blocka7e24c12009-10-30 11:49:00 +00004206 }
Steve Blocka7e24c12009-10-30 11:49:00 +00004207 }
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004208 slot_address += kPointerSize;
Steve Blocka7e24c12009-10-30 11:49:00 +00004209 }
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004210 return pointers_to_new_space_found;
Steve Blocka7e24c12009-10-30 11:49:00 +00004211}
4212
4213
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004214// Compute start address of the first map following given addr.
4215static inline Address MapStartAlign(Address addr) {
4216 Address page = Page::FromAddress(addr)->ObjectAreaStart();
4217 return page + (((addr - page) + (Map::kSize - 1)) / Map::kSize * Map::kSize);
4218}
Steve Blocka7e24c12009-10-30 11:49:00 +00004219
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004220
4221// Compute end address of the first map preceding given addr.
4222static inline Address MapEndAlign(Address addr) {
4223 Address page = Page::FromAllocationTop(addr)->ObjectAreaStart();
4224 return page + ((addr - page) / Map::kSize * Map::kSize);
4225}
4226
4227
4228static bool IteratePointersInDirtyMaps(Address start,
4229 Address end,
4230 ObjectSlotCallback copy_object_func) {
4231 ASSERT(MapStartAlign(start) == start);
4232 ASSERT(MapEndAlign(end) == end);
4233
4234 Address map_address = start;
4235 bool pointers_to_new_space_found = false;
4236
Steve Block44f0eee2011-05-26 01:26:41 +01004237 Heap* heap = HEAP;
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004238 while (map_address < end) {
Steve Block44f0eee2011-05-26 01:26:41 +01004239 ASSERT(!heap->InNewSpace(Memory::Object_at(map_address)));
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004240 ASSERT(Memory::Object_at(map_address)->IsMap());
4241
4242 Address pointer_fields_start = map_address + Map::kPointerFieldsBeginOffset;
4243 Address pointer_fields_end = map_address + Map::kPointerFieldsEndOffset;
4244
Steve Block44f0eee2011-05-26 01:26:41 +01004245 if (Heap::IteratePointersInDirtyRegion(heap,
4246 pointer_fields_start,
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004247 pointer_fields_end,
4248 copy_object_func)) {
4249 pointers_to_new_space_found = true;
4250 }
4251
4252 map_address += Map::kSize;
4253 }
4254
4255 return pointers_to_new_space_found;
4256}
4257
4258
4259bool Heap::IteratePointersInDirtyMapsRegion(
Steve Block44f0eee2011-05-26 01:26:41 +01004260 Heap* heap,
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004261 Address start,
4262 Address end,
4263 ObjectSlotCallback copy_object_func) {
4264 Address map_aligned_start = MapStartAlign(start);
4265 Address map_aligned_end = MapEndAlign(end);
4266
4267 bool contains_pointers_to_new_space = false;
4268
4269 if (map_aligned_start != start) {
4270 Address prev_map = map_aligned_start - Map::kSize;
4271 ASSERT(Memory::Object_at(prev_map)->IsMap());
4272
4273 Address pointer_fields_start =
4274 Max(start, prev_map + Map::kPointerFieldsBeginOffset);
4275
4276 Address pointer_fields_end =
Ben Murdoch3bec4d22010-07-22 14:51:16 +01004277 Min(prev_map + Map::kPointerFieldsEndOffset, end);
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004278
4279 contains_pointers_to_new_space =
Steve Block44f0eee2011-05-26 01:26:41 +01004280 IteratePointersInDirtyRegion(heap,
4281 pointer_fields_start,
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004282 pointer_fields_end,
4283 copy_object_func)
4284 || contains_pointers_to_new_space;
4285 }
4286
4287 contains_pointers_to_new_space =
4288 IteratePointersInDirtyMaps(map_aligned_start,
4289 map_aligned_end,
4290 copy_object_func)
4291 || contains_pointers_to_new_space;
4292
4293 if (map_aligned_end != end) {
4294 ASSERT(Memory::Object_at(map_aligned_end)->IsMap());
4295
Ben Murdoch3bec4d22010-07-22 14:51:16 +01004296 Address pointer_fields_start =
4297 map_aligned_end + Map::kPointerFieldsBeginOffset;
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004298
4299 Address pointer_fields_end =
Ben Murdoch3bec4d22010-07-22 14:51:16 +01004300 Min(end, map_aligned_end + Map::kPointerFieldsEndOffset);
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004301
4302 contains_pointers_to_new_space =
Steve Block44f0eee2011-05-26 01:26:41 +01004303 IteratePointersInDirtyRegion(heap,
4304 pointer_fields_start,
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004305 pointer_fields_end,
4306 copy_object_func)
4307 || contains_pointers_to_new_space;
4308 }
4309
4310 return contains_pointers_to_new_space;
4311}
4312
4313
Ben Murdoch3bec4d22010-07-22 14:51:16 +01004314void Heap::IterateAndMarkPointersToFromSpace(Address start,
4315 Address end,
4316 ObjectSlotCallback callback) {
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004317 Address slot_address = start;
4318 Page* page = Page::FromAddress(start);
4319
4320 uint32_t marks = page->GetRegionMarks();
4321
4322 while (slot_address < end) {
4323 Object** slot = reinterpret_cast<Object**>(slot_address);
Steve Block44f0eee2011-05-26 01:26:41 +01004324 if (InFromSpace(*slot)) {
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004325 ASSERT((*slot)->IsHeapObject());
4326 callback(reinterpret_cast<HeapObject**>(slot));
Steve Block44f0eee2011-05-26 01:26:41 +01004327 if (InNewSpace(*slot)) {
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004328 ASSERT((*slot)->IsHeapObject());
4329 marks |= page->GetRegionMaskForAddress(slot_address);
4330 }
4331 }
4332 slot_address += kPointerSize;
4333 }
4334
4335 page->SetRegionMarks(marks);
4336}
4337
4338
4339uint32_t Heap::IterateDirtyRegions(
4340 uint32_t marks,
4341 Address area_start,
4342 Address area_end,
4343 DirtyRegionCallback visit_dirty_region,
4344 ObjectSlotCallback copy_object_func) {
4345 uint32_t newmarks = 0;
4346 uint32_t mask = 1;
4347
4348 if (area_start >= area_end) {
4349 return newmarks;
4350 }
4351
4352 Address region_start = area_start;
4353
4354 // area_start does not necessarily coincide with start of the first region.
4355 // Thus to calculate the beginning of the next region we have to align
4356 // area_start by Page::kRegionSize.
4357 Address second_region =
4358 reinterpret_cast<Address>(
4359 reinterpret_cast<intptr_t>(area_start + Page::kRegionSize) &
4360 ~Page::kRegionAlignmentMask);
4361
4362 // Next region might be beyond area_end.
4363 Address region_end = Min(second_region, area_end);
4364
4365 if (marks & mask) {
Steve Block44f0eee2011-05-26 01:26:41 +01004366 if (visit_dirty_region(this, region_start, region_end, copy_object_func)) {
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004367 newmarks |= mask;
4368 }
4369 }
4370 mask <<= 1;
4371
4372 // Iterate subsequent regions which fully lay inside [area_start, area_end[.
4373 region_start = region_end;
4374 region_end = region_start + Page::kRegionSize;
4375
4376 while (region_end <= area_end) {
4377 if (marks & mask) {
Steve Block44f0eee2011-05-26 01:26:41 +01004378 if (visit_dirty_region(this,
4379 region_start,
4380 region_end,
4381 copy_object_func)) {
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004382 newmarks |= mask;
4383 }
4384 }
4385
4386 region_start = region_end;
4387 region_end = region_start + Page::kRegionSize;
4388
4389 mask <<= 1;
4390 }
4391
4392 if (region_start != area_end) {
4393 // A small piece of area left uniterated because area_end does not coincide
4394 // with region end. Check whether region covering last part of area is
4395 // dirty.
4396 if (marks & mask) {
Steve Block44f0eee2011-05-26 01:26:41 +01004397 if (visit_dirty_region(this, region_start, area_end, copy_object_func)) {
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004398 newmarks |= mask;
4399 }
4400 }
4401 }
4402
4403 return newmarks;
4404}
4405
4406
4407
4408void Heap::IterateDirtyRegions(
4409 PagedSpace* space,
4410 DirtyRegionCallback visit_dirty_region,
4411 ObjectSlotCallback copy_object_func,
4412 ExpectedPageWatermarkState expected_page_watermark_state) {
Steve Blocka7e24c12009-10-30 11:49:00 +00004413
4414 PageIterator it(space, PageIterator::PAGES_IN_USE);
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004415
Steve Blocka7e24c12009-10-30 11:49:00 +00004416 while (it.has_next()) {
4417 Page* page = it.next();
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004418 uint32_t marks = page->GetRegionMarks();
4419
4420 if (marks != Page::kAllRegionsCleanMarks) {
4421 Address start = page->ObjectAreaStart();
4422
4423 // Do not try to visit pointers beyond page allocation watermark.
4424 // Page can contain garbage pointers there.
4425 Address end;
4426
4427 if ((expected_page_watermark_state == WATERMARK_SHOULD_BE_VALID) ||
4428 page->IsWatermarkValid()) {
4429 end = page->AllocationWatermark();
4430 } else {
4431 end = page->CachedAllocationWatermark();
4432 }
4433
4434 ASSERT(space == old_pointer_space_ ||
4435 (space == map_space_ &&
4436 ((page->ObjectAreaStart() - end) % Map::kSize == 0)));
4437
4438 page->SetRegionMarks(IterateDirtyRegions(marks,
4439 start,
4440 end,
4441 visit_dirty_region,
4442 copy_object_func));
Steve Blocka7e24c12009-10-30 11:49:00 +00004443 }
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004444
4445 // Mark page watermark as invalid to maintain watermark validity invariant.
4446 // See Page::FlipMeaningOfInvalidatedWatermarkFlag() for details.
4447 page->InvalidateWatermark(true);
Steve Blocka7e24c12009-10-30 11:49:00 +00004448 }
4449}
4450
4451
Steve Blockd0582a62009-12-15 09:54:21 +00004452void Heap::IterateRoots(ObjectVisitor* v, VisitMode mode) {
4453 IterateStrongRoots(v, mode);
Leon Clarked91b9f72010-01-27 17:25:45 +00004454 IterateWeakRoots(v, mode);
4455}
4456
4457
4458void Heap::IterateWeakRoots(ObjectVisitor* v, VisitMode mode) {
Steve Blocka7e24c12009-10-30 11:49:00 +00004459 v->VisitPointer(reinterpret_cast<Object**>(&roots_[kSymbolTableRootIndex]));
Steve Blockd0582a62009-12-15 09:54:21 +00004460 v->Synchronize("symbol_table");
Leon Clarkee46be812010-01-19 14:06:41 +00004461 if (mode != VISIT_ALL_IN_SCAVENGE) {
4462 // Scavenge collections have special processing for this.
Steve Block44f0eee2011-05-26 01:26:41 +01004463 external_string_table_.Iterate(v);
Leon Clarkee46be812010-01-19 14:06:41 +00004464 }
4465 v->Synchronize("external_string_table");
Steve Blocka7e24c12009-10-30 11:49:00 +00004466}
4467
4468
Steve Blockd0582a62009-12-15 09:54:21 +00004469void Heap::IterateStrongRoots(ObjectVisitor* v, VisitMode mode) {
Steve Blocka7e24c12009-10-30 11:49:00 +00004470 v->VisitPointers(&roots_[0], &roots_[kStrongRootListLength]);
Steve Blockd0582a62009-12-15 09:54:21 +00004471 v->Synchronize("strong_root_list");
Steve Blocka7e24c12009-10-30 11:49:00 +00004472
Iain Merrick75681382010-08-19 15:07:18 +01004473 v->VisitPointer(BitCast<Object**>(&hidden_symbol_));
Steve Blockd0582a62009-12-15 09:54:21 +00004474 v->Synchronize("symbol");
Steve Blocka7e24c12009-10-30 11:49:00 +00004475
Steve Block44f0eee2011-05-26 01:26:41 +01004476 isolate_->bootstrapper()->Iterate(v);
Steve Blockd0582a62009-12-15 09:54:21 +00004477 v->Synchronize("bootstrapper");
Steve Block44f0eee2011-05-26 01:26:41 +01004478 isolate_->Iterate(v);
Steve Blockd0582a62009-12-15 09:54:21 +00004479 v->Synchronize("top");
Steve Blocka7e24c12009-10-30 11:49:00 +00004480 Relocatable::Iterate(v);
Steve Blockd0582a62009-12-15 09:54:21 +00004481 v->Synchronize("relocatable");
Steve Blocka7e24c12009-10-30 11:49:00 +00004482
4483#ifdef ENABLE_DEBUGGER_SUPPORT
Steve Block44f0eee2011-05-26 01:26:41 +01004484 isolate_->debug()->Iterate(v);
Steve Blocka7e24c12009-10-30 11:49:00 +00004485#endif
Steve Blockd0582a62009-12-15 09:54:21 +00004486 v->Synchronize("debug");
Steve Block44f0eee2011-05-26 01:26:41 +01004487 isolate_->compilation_cache()->Iterate(v);
Steve Blockd0582a62009-12-15 09:54:21 +00004488 v->Synchronize("compilationcache");
Steve Blocka7e24c12009-10-30 11:49:00 +00004489
4490 // Iterate over local handles in handle scopes.
Steve Block44f0eee2011-05-26 01:26:41 +01004491 isolate_->handle_scope_implementer()->Iterate(v);
Steve Blockd0582a62009-12-15 09:54:21 +00004492 v->Synchronize("handlescope");
Steve Blocka7e24c12009-10-30 11:49:00 +00004493
Leon Clarkee46be812010-01-19 14:06:41 +00004494 // Iterate over the builtin code objects and code stubs in the
4495 // heap. Note that it is not necessary to iterate over code objects
4496 // on scavenge collections.
4497 if (mode != VISIT_ALL_IN_SCAVENGE) {
Steve Block44f0eee2011-05-26 01:26:41 +01004498 isolate_->builtins()->IterateBuiltins(v);
Leon Clarkee46be812010-01-19 14:06:41 +00004499 }
Steve Blockd0582a62009-12-15 09:54:21 +00004500 v->Synchronize("builtins");
Steve Blocka7e24c12009-10-30 11:49:00 +00004501
4502 // Iterate over global handles.
Steve Blockd0582a62009-12-15 09:54:21 +00004503 if (mode == VISIT_ONLY_STRONG) {
Steve Block44f0eee2011-05-26 01:26:41 +01004504 isolate_->global_handles()->IterateStrongRoots(v);
Steve Blockd0582a62009-12-15 09:54:21 +00004505 } else {
Steve Block44f0eee2011-05-26 01:26:41 +01004506 isolate_->global_handles()->IterateAllRoots(v);
Steve Blockd0582a62009-12-15 09:54:21 +00004507 }
4508 v->Synchronize("globalhandles");
Steve Blocka7e24c12009-10-30 11:49:00 +00004509
4510 // Iterate over pointers being held by inactive threads.
Steve Block44f0eee2011-05-26 01:26:41 +01004511 isolate_->thread_manager()->Iterate(v);
Steve Blockd0582a62009-12-15 09:54:21 +00004512 v->Synchronize("threadmanager");
Leon Clarked91b9f72010-01-27 17:25:45 +00004513
4514 // Iterate over the pointers the Serialization/Deserialization code is
4515 // holding.
4516 // During garbage collection this keeps the partial snapshot cache alive.
4517 // During deserialization of the startup snapshot this creates the partial
4518 // snapshot cache and deserializes the objects it refers to. During
4519 // serialization this does nothing, since the partial snapshot cache is
4520 // empty. However the next thing we do is create the partial snapshot,
4521 // filling up the partial snapshot cache with objects it needs as we go.
4522 SerializerDeserializer::Iterate(v);
4523 // We don't do a v->Synchronize call here, because in debug mode that will
4524 // output a flag to the snapshot. However at this point the serializer and
4525 // deserializer are deliberately a little unsynchronized (see above) so the
4526 // checking of the sync flag in the snapshot would fail.
Steve Blocka7e24c12009-10-30 11:49:00 +00004527}
Steve Blocka7e24c12009-10-30 11:49:00 +00004528
4529
Steve Blocka7e24c12009-10-30 11:49:00 +00004530// TODO(1236194): Since the heap size is configurable on the command line
4531// and through the API, we should gracefully handle the case that the heap
4532// size is not big enough to fit all the initial objects.
Russell Brenner90bac252010-11-18 13:33:46 -08004533bool Heap::ConfigureHeap(int max_semispace_size,
4534 int max_old_gen_size,
4535 int max_executable_size) {
Steve Blocka7e24c12009-10-30 11:49:00 +00004536 if (HasBeenSetup()) return false;
4537
Steve Block3ce2e202009-11-05 08:53:23 +00004538 if (max_semispace_size > 0) max_semispace_size_ = max_semispace_size;
4539
4540 if (Snapshot::IsEnabled()) {
4541 // If we are using a snapshot we always reserve the default amount
4542 // of memory for each semispace because code in the snapshot has
4543 // write-barrier code that relies on the size and alignment of new
4544 // space. We therefore cannot use a larger max semispace size
4545 // than the default reserved semispace size.
4546 if (max_semispace_size_ > reserved_semispace_size_) {
4547 max_semispace_size_ = reserved_semispace_size_;
4548 }
4549 } else {
4550 // If we are not using snapshots we reserve space for the actual
4551 // max semispace size.
4552 reserved_semispace_size_ = max_semispace_size_;
4553 }
4554
4555 if (max_old_gen_size > 0) max_old_generation_size_ = max_old_gen_size;
Russell Brenner90bac252010-11-18 13:33:46 -08004556 if (max_executable_size > 0) {
4557 max_executable_size_ = RoundUp(max_executable_size, Page::kPageSize);
4558 }
4559
4560 // The max executable size must be less than or equal to the max old
4561 // generation size.
4562 if (max_executable_size_ > max_old_generation_size_) {
4563 max_executable_size_ = max_old_generation_size_;
4564 }
Steve Blocka7e24c12009-10-30 11:49:00 +00004565
4566 // The new space size must be a power of two to support single-bit testing
4567 // for containment.
Steve Block3ce2e202009-11-05 08:53:23 +00004568 max_semispace_size_ = RoundUpToPowerOf2(max_semispace_size_);
4569 reserved_semispace_size_ = RoundUpToPowerOf2(reserved_semispace_size_);
4570 initial_semispace_size_ = Min(initial_semispace_size_, max_semispace_size_);
4571 external_allocation_limit_ = 10 * max_semispace_size_;
Steve Blocka7e24c12009-10-30 11:49:00 +00004572
4573 // The old generation is paged.
Steve Block3ce2e202009-11-05 08:53:23 +00004574 max_old_generation_size_ = RoundUp(max_old_generation_size_, Page::kPageSize);
Steve Blocka7e24c12009-10-30 11:49:00 +00004575
Steve Block44f0eee2011-05-26 01:26:41 +01004576 configured_ = true;
Steve Blocka7e24c12009-10-30 11:49:00 +00004577 return true;
4578}
4579
4580
4581bool Heap::ConfigureHeapDefault() {
Russell Brenner90bac252010-11-18 13:33:46 -08004582 return ConfigureHeap(FLAG_max_new_space_size / 2 * KB,
4583 FLAG_max_old_space_size * MB,
4584 FLAG_max_executable_size * MB);
Steve Blocka7e24c12009-10-30 11:49:00 +00004585}
4586
4587
Ben Murdochbb769b22010-08-11 14:56:33 +01004588void Heap::RecordStats(HeapStats* stats, bool take_snapshot) {
Iain Merrick75681382010-08-19 15:07:18 +01004589 *stats->start_marker = HeapStats::kStartMarker;
4590 *stats->end_marker = HeapStats::kEndMarker;
Ben Murdochf87a2032010-10-22 12:50:53 +01004591 *stats->new_space_size = new_space_.SizeAsInt();
4592 *stats->new_space_capacity = static_cast<int>(new_space_.Capacity());
Steve Blockd0582a62009-12-15 09:54:21 +00004593 *stats->old_pointer_space_size = old_pointer_space_->Size();
4594 *stats->old_pointer_space_capacity = old_pointer_space_->Capacity();
4595 *stats->old_data_space_size = old_data_space_->Size();
4596 *stats->old_data_space_capacity = old_data_space_->Capacity();
4597 *stats->code_space_size = code_space_->Size();
4598 *stats->code_space_capacity = code_space_->Capacity();
4599 *stats->map_space_size = map_space_->Size();
4600 *stats->map_space_capacity = map_space_->Capacity();
4601 *stats->cell_space_size = cell_space_->Size();
4602 *stats->cell_space_capacity = cell_space_->Capacity();
4603 *stats->lo_space_size = lo_space_->Size();
Steve Block44f0eee2011-05-26 01:26:41 +01004604 isolate_->global_handles()->RecordStats(stats);
4605 *stats->memory_allocator_size = isolate()->memory_allocator()->Size();
Ben Murdochbb769b22010-08-11 14:56:33 +01004606 *stats->memory_allocator_capacity =
Steve Block44f0eee2011-05-26 01:26:41 +01004607 isolate()->memory_allocator()->Size() +
4608 isolate()->memory_allocator()->Available();
Iain Merrick75681382010-08-19 15:07:18 +01004609 *stats->os_error = OS::GetLastError();
Steve Block44f0eee2011-05-26 01:26:41 +01004610 isolate()->memory_allocator()->Available();
Ben Murdochbb769b22010-08-11 14:56:33 +01004611 if (take_snapshot) {
Ben Murdochb0fe1622011-05-05 13:52:32 +01004612 HeapIterator iterator(HeapIterator::kFilterFreeListNodes);
Ben Murdochbb769b22010-08-11 14:56:33 +01004613 for (HeapObject* obj = iterator.next();
4614 obj != NULL;
4615 obj = iterator.next()) {
Ben Murdochbb769b22010-08-11 14:56:33 +01004616 InstanceType type = obj->map()->instance_type();
4617 ASSERT(0 <= type && type <= LAST_TYPE);
4618 stats->objects_per_type[type]++;
4619 stats->size_per_type[type] += obj->Size();
4620 }
4621 }
Steve Blockd0582a62009-12-15 09:54:21 +00004622}
4623
4624
Ben Murdochf87a2032010-10-22 12:50:53 +01004625intptr_t Heap::PromotedSpaceSize() {
Steve Blocka7e24c12009-10-30 11:49:00 +00004626 return old_pointer_space_->Size()
4627 + old_data_space_->Size()
4628 + code_space_->Size()
4629 + map_space_->Size()
4630 + cell_space_->Size()
4631 + lo_space_->Size();
4632}
4633
4634
4635int Heap::PromotedExternalMemorySize() {
4636 if (amount_of_external_allocated_memory_
4637 <= amount_of_external_allocated_memory_at_last_global_gc_) return 0;
4638 return amount_of_external_allocated_memory_
4639 - amount_of_external_allocated_memory_at_last_global_gc_;
4640}
4641
Steve Block44f0eee2011-05-26 01:26:41 +01004642#ifdef DEBUG
4643
4644// Tags 0, 1, and 3 are used. Use 2 for marking visited HeapObject.
4645static const int kMarkTag = 2;
4646
4647
4648class HeapDebugUtils {
4649 public:
4650 explicit HeapDebugUtils(Heap* heap)
4651 : search_for_any_global_(false),
4652 search_target_(NULL),
4653 found_target_(false),
4654 object_stack_(20),
4655 heap_(heap) {
4656 }
4657
4658 class MarkObjectVisitor : public ObjectVisitor {
4659 public:
4660 explicit MarkObjectVisitor(HeapDebugUtils* utils) : utils_(utils) { }
4661
4662 void VisitPointers(Object** start, Object** end) {
4663 // Copy all HeapObject pointers in [start, end)
4664 for (Object** p = start; p < end; p++) {
4665 if ((*p)->IsHeapObject())
4666 utils_->MarkObjectRecursively(p);
4667 }
4668 }
4669
4670 HeapDebugUtils* utils_;
4671 };
4672
4673 void MarkObjectRecursively(Object** p) {
4674 if (!(*p)->IsHeapObject()) return;
4675
4676 HeapObject* obj = HeapObject::cast(*p);
4677
4678 Object* map = obj->map();
4679
4680 if (!map->IsHeapObject()) return; // visited before
4681
4682 if (found_target_) return; // stop if target found
4683 object_stack_.Add(obj);
4684 if ((search_for_any_global_ && obj->IsJSGlobalObject()) ||
4685 (!search_for_any_global_ && (obj == search_target_))) {
4686 found_target_ = true;
4687 return;
4688 }
4689
4690 // not visited yet
4691 Map* map_p = reinterpret_cast<Map*>(HeapObject::cast(map));
4692
4693 Address map_addr = map_p->address();
4694
4695 obj->set_map(reinterpret_cast<Map*>(map_addr + kMarkTag));
4696
4697 MarkObjectRecursively(&map);
4698
4699 MarkObjectVisitor mark_visitor(this);
4700
4701 obj->IterateBody(map_p->instance_type(), obj->SizeFromMap(map_p),
4702 &mark_visitor);
4703
4704 if (!found_target_) // don't pop if found the target
4705 object_stack_.RemoveLast();
4706 }
4707
4708
4709 class UnmarkObjectVisitor : public ObjectVisitor {
4710 public:
4711 explicit UnmarkObjectVisitor(HeapDebugUtils* utils) : utils_(utils) { }
4712
4713 void VisitPointers(Object** start, Object** end) {
4714 // Copy all HeapObject pointers in [start, end)
4715 for (Object** p = start; p < end; p++) {
4716 if ((*p)->IsHeapObject())
4717 utils_->UnmarkObjectRecursively(p);
4718 }
4719 }
4720
4721 HeapDebugUtils* utils_;
4722 };
4723
4724
4725 void UnmarkObjectRecursively(Object** p) {
4726 if (!(*p)->IsHeapObject()) return;
4727
4728 HeapObject* obj = HeapObject::cast(*p);
4729
4730 Object* map = obj->map();
4731
4732 if (map->IsHeapObject()) return; // unmarked already
4733
4734 Address map_addr = reinterpret_cast<Address>(map);
4735
4736 map_addr -= kMarkTag;
4737
4738 ASSERT_TAG_ALIGNED(map_addr);
4739
4740 HeapObject* map_p = HeapObject::FromAddress(map_addr);
4741
4742 obj->set_map(reinterpret_cast<Map*>(map_p));
4743
4744 UnmarkObjectRecursively(reinterpret_cast<Object**>(&map_p));
4745
4746 UnmarkObjectVisitor unmark_visitor(this);
4747
4748 obj->IterateBody(Map::cast(map_p)->instance_type(),
4749 obj->SizeFromMap(Map::cast(map_p)),
4750 &unmark_visitor);
4751 }
4752
4753
4754 void MarkRootObjectRecursively(Object** root) {
4755 if (search_for_any_global_) {
4756 ASSERT(search_target_ == NULL);
4757 } else {
4758 ASSERT(search_target_->IsHeapObject());
4759 }
4760 found_target_ = false;
4761 object_stack_.Clear();
4762
4763 MarkObjectRecursively(root);
4764 UnmarkObjectRecursively(root);
4765
4766 if (found_target_) {
4767 PrintF("=====================================\n");
4768 PrintF("==== Path to object ====\n");
4769 PrintF("=====================================\n\n");
4770
4771 ASSERT(!object_stack_.is_empty());
4772 for (int i = 0; i < object_stack_.length(); i++) {
4773 if (i > 0) PrintF("\n |\n |\n V\n\n");
4774 Object* obj = object_stack_[i];
4775 obj->Print();
4776 }
4777 PrintF("=====================================\n");
4778 }
4779 }
4780
4781 // Helper class for visiting HeapObjects recursively.
4782 class MarkRootVisitor: public ObjectVisitor {
4783 public:
4784 explicit MarkRootVisitor(HeapDebugUtils* utils) : utils_(utils) { }
4785
4786 void VisitPointers(Object** start, Object** end) {
4787 // Visit all HeapObject pointers in [start, end)
4788 for (Object** p = start; p < end; p++) {
4789 if ((*p)->IsHeapObject())
4790 utils_->MarkRootObjectRecursively(p);
4791 }
4792 }
4793
4794 HeapDebugUtils* utils_;
4795 };
4796
4797 bool search_for_any_global_;
4798 Object* search_target_;
4799 bool found_target_;
4800 List<Object*> object_stack_;
4801 Heap* heap_;
4802
4803 friend class Heap;
4804};
4805
4806#endif
Steve Blocka7e24c12009-10-30 11:49:00 +00004807
4808bool Heap::Setup(bool create_heap_objects) {
Steve Block44f0eee2011-05-26 01:26:41 +01004809#ifdef DEBUG
4810 debug_utils_ = new HeapDebugUtils(this);
4811#endif
4812
Steve Blocka7e24c12009-10-30 11:49:00 +00004813 // Initialize heap spaces and initial maps and objects. Whenever something
4814 // goes wrong, just return false. The caller should check the results and
4815 // call Heap::TearDown() to release allocated memory.
4816 //
4817 // If the heap is not yet configured (eg, through the API), configure it.
4818 // Configuration is based on the flags new-space-size (really the semispace
4819 // size) and old-space-size if set or the initial values of semispace_size_
4820 // and old_generation_size_ otherwise.
Steve Block44f0eee2011-05-26 01:26:41 +01004821 if (!configured_) {
Steve Blocka7e24c12009-10-30 11:49:00 +00004822 if (!ConfigureHeapDefault()) return false;
4823 }
4824
Steve Block44f0eee2011-05-26 01:26:41 +01004825 gc_initializer_mutex->Lock();
4826 static bool initialized_gc = false;
4827 if (!initialized_gc) {
Ben Murdoch8b112d22011-06-08 16:22:53 +01004828 initialized_gc = true;
4829 InitializeScavengingVisitorsTables();
4830 NewSpaceScavenger::Initialize();
4831 MarkCompactCollector::Initialize();
Steve Block44f0eee2011-05-26 01:26:41 +01004832 }
4833 gc_initializer_mutex->Unlock();
Iain Merrick75681382010-08-19 15:07:18 +01004834
Kristian Monsen80d68ea2010-09-08 11:05:35 +01004835 MarkMapPointersAsEncoded(false);
4836
Steve Blocka7e24c12009-10-30 11:49:00 +00004837 // Setup memory allocator and reserve a chunk of memory for new
Steve Block3ce2e202009-11-05 08:53:23 +00004838 // space. The chunk is double the size of the requested reserved
4839 // new space size to ensure that we can find a pair of semispaces that
4840 // are contiguous and aligned to their size.
Steve Block44f0eee2011-05-26 01:26:41 +01004841 if (!isolate_->memory_allocator()->Setup(MaxReserved(), MaxExecutableSize()))
4842 return false;
Steve Blocka7e24c12009-10-30 11:49:00 +00004843 void* chunk =
Steve Block44f0eee2011-05-26 01:26:41 +01004844 isolate_->memory_allocator()->ReserveInitialChunk(
4845 4 * reserved_semispace_size_);
Steve Blocka7e24c12009-10-30 11:49:00 +00004846 if (chunk == NULL) return false;
4847
4848 // Align the pair of semispaces to their size, which must be a power
4849 // of 2.
Steve Blocka7e24c12009-10-30 11:49:00 +00004850 Address new_space_start =
Steve Block3ce2e202009-11-05 08:53:23 +00004851 RoundUp(reinterpret_cast<byte*>(chunk), 2 * reserved_semispace_size_);
4852 if (!new_space_.Setup(new_space_start, 2 * reserved_semispace_size_)) {
4853 return false;
4854 }
Steve Blocka7e24c12009-10-30 11:49:00 +00004855
4856 // Initialize old pointer space.
4857 old_pointer_space_ =
Steve Block44f0eee2011-05-26 01:26:41 +01004858 new OldSpace(this,
4859 max_old_generation_size_,
4860 OLD_POINTER_SPACE,
4861 NOT_EXECUTABLE);
Steve Blocka7e24c12009-10-30 11:49:00 +00004862 if (old_pointer_space_ == NULL) return false;
4863 if (!old_pointer_space_->Setup(NULL, 0)) return false;
4864
4865 // Initialize old data space.
4866 old_data_space_ =
Steve Block44f0eee2011-05-26 01:26:41 +01004867 new OldSpace(this,
4868 max_old_generation_size_,
4869 OLD_DATA_SPACE,
4870 NOT_EXECUTABLE);
Steve Blocka7e24c12009-10-30 11:49:00 +00004871 if (old_data_space_ == NULL) return false;
4872 if (!old_data_space_->Setup(NULL, 0)) return false;
4873
4874 // Initialize the code space, set its maximum capacity to the old
4875 // generation size. It needs executable memory.
4876 // On 64-bit platform(s), we put all code objects in a 2 GB range of
4877 // virtual address space, so that they can call each other with near calls.
4878 if (code_range_size_ > 0) {
Steve Block44f0eee2011-05-26 01:26:41 +01004879 if (!isolate_->code_range()->Setup(code_range_size_)) {
Steve Blocka7e24c12009-10-30 11:49:00 +00004880 return false;
4881 }
4882 }
4883
4884 code_space_ =
Steve Block44f0eee2011-05-26 01:26:41 +01004885 new OldSpace(this, max_old_generation_size_, CODE_SPACE, EXECUTABLE);
Steve Blocka7e24c12009-10-30 11:49:00 +00004886 if (code_space_ == NULL) return false;
4887 if (!code_space_->Setup(NULL, 0)) return false;
4888
4889 // Initialize map space.
Steve Block44f0eee2011-05-26 01:26:41 +01004890 map_space_ = new MapSpace(this, FLAG_use_big_map_space
Leon Clarkee46be812010-01-19 14:06:41 +00004891 ? max_old_generation_size_
Leon Clarked91b9f72010-01-27 17:25:45 +00004892 : MapSpace::kMaxMapPageIndex * Page::kPageSize,
4893 FLAG_max_map_space_pages,
Leon Clarkee46be812010-01-19 14:06:41 +00004894 MAP_SPACE);
Steve Blocka7e24c12009-10-30 11:49:00 +00004895 if (map_space_ == NULL) return false;
4896 if (!map_space_->Setup(NULL, 0)) return false;
4897
4898 // Initialize global property cell space.
Steve Block44f0eee2011-05-26 01:26:41 +01004899 cell_space_ = new CellSpace(this, max_old_generation_size_, CELL_SPACE);
Steve Blocka7e24c12009-10-30 11:49:00 +00004900 if (cell_space_ == NULL) return false;
4901 if (!cell_space_->Setup(NULL, 0)) return false;
4902
4903 // The large object code space may contain code or data. We set the memory
4904 // to be non-executable here for safety, but this means we need to enable it
4905 // explicitly when allocating large code objects.
Steve Block44f0eee2011-05-26 01:26:41 +01004906 lo_space_ = new LargeObjectSpace(this, LO_SPACE);
Steve Blocka7e24c12009-10-30 11:49:00 +00004907 if (lo_space_ == NULL) return false;
4908 if (!lo_space_->Setup()) return false;
4909
4910 if (create_heap_objects) {
4911 // Create initial maps.
4912 if (!CreateInitialMaps()) return false;
4913 if (!CreateApiObjects()) return false;
4914
4915 // Create initial objects
4916 if (!CreateInitialObjects()) return false;
Ben Murdochf87a2032010-10-22 12:50:53 +01004917
4918 global_contexts_list_ = undefined_value();
Steve Blocka7e24c12009-10-30 11:49:00 +00004919 }
4920
Steve Block44f0eee2011-05-26 01:26:41 +01004921 LOG(isolate_, IntPtrTEvent("heap-capacity", Capacity()));
4922 LOG(isolate_, IntPtrTEvent("heap-available", Available()));
Steve Blocka7e24c12009-10-30 11:49:00 +00004923
Steve Block3ce2e202009-11-05 08:53:23 +00004924#ifdef ENABLE_LOGGING_AND_PROFILING
4925 // This should be called only after initial objects have been created.
Steve Block44f0eee2011-05-26 01:26:41 +01004926 isolate_->producer_heap_profile()->Setup();
Steve Block3ce2e202009-11-05 08:53:23 +00004927#endif
4928
Steve Blocka7e24c12009-10-30 11:49:00 +00004929 return true;
4930}
4931
4932
Steve Blockd0582a62009-12-15 09:54:21 +00004933void Heap::SetStackLimits() {
Steve Block44f0eee2011-05-26 01:26:41 +01004934 ASSERT(isolate_ != NULL);
4935 ASSERT(isolate_ == isolate());
Steve Blocka7e24c12009-10-30 11:49:00 +00004936 // On 64 bit machines, pointers are generally out of range of Smis. We write
4937 // something that looks like an out of range Smi to the GC.
4938
Steve Blockd0582a62009-12-15 09:54:21 +00004939 // Set up the special root array entries containing the stack limits.
4940 // These are actually addresses, but the tag makes the GC ignore it.
Steve Blocka7e24c12009-10-30 11:49:00 +00004941 roots_[kStackLimitRootIndex] =
Steve Blockd0582a62009-12-15 09:54:21 +00004942 reinterpret_cast<Object*>(
Steve Block44f0eee2011-05-26 01:26:41 +01004943 (isolate_->stack_guard()->jslimit() & ~kSmiTagMask) | kSmiTag);
Steve Blockd0582a62009-12-15 09:54:21 +00004944 roots_[kRealStackLimitRootIndex] =
4945 reinterpret_cast<Object*>(
Steve Block44f0eee2011-05-26 01:26:41 +01004946 (isolate_->stack_guard()->real_jslimit() & ~kSmiTagMask) | kSmiTag);
Steve Blocka7e24c12009-10-30 11:49:00 +00004947}
4948
4949
4950void Heap::TearDown() {
Leon Clarkef7060e22010-06-03 12:02:55 +01004951 if (FLAG_print_cumulative_gc_stat) {
4952 PrintF("\n\n");
4953 PrintF("gc_count=%d ", gc_count_);
4954 PrintF("mark_sweep_count=%d ", ms_count_);
4955 PrintF("mark_compact_count=%d ", mc_count_);
Steve Block44f0eee2011-05-26 01:26:41 +01004956 PrintF("max_gc_pause=%d ", get_max_gc_pause());
4957 PrintF("min_in_mutator=%d ", get_min_in_mutator());
Ben Murdochf87a2032010-10-22 12:50:53 +01004958 PrintF("max_alive_after_gc=%" V8_PTR_PREFIX "d ",
Steve Block44f0eee2011-05-26 01:26:41 +01004959 get_max_alive_after_gc());
Leon Clarkef7060e22010-06-03 12:02:55 +01004960 PrintF("\n\n");
4961 }
4962
Steve Block44f0eee2011-05-26 01:26:41 +01004963 isolate_->global_handles()->TearDown();
Steve Blocka7e24c12009-10-30 11:49:00 +00004964
Steve Block44f0eee2011-05-26 01:26:41 +01004965 external_string_table_.TearDown();
Leon Clarkee46be812010-01-19 14:06:41 +00004966
Steve Blocka7e24c12009-10-30 11:49:00 +00004967 new_space_.TearDown();
4968
4969 if (old_pointer_space_ != NULL) {
4970 old_pointer_space_->TearDown();
4971 delete old_pointer_space_;
4972 old_pointer_space_ = NULL;
4973 }
4974
4975 if (old_data_space_ != NULL) {
4976 old_data_space_->TearDown();
4977 delete old_data_space_;
4978 old_data_space_ = NULL;
4979 }
4980
4981 if (code_space_ != NULL) {
4982 code_space_->TearDown();
4983 delete code_space_;
4984 code_space_ = NULL;
4985 }
4986
4987 if (map_space_ != NULL) {
4988 map_space_->TearDown();
4989 delete map_space_;
4990 map_space_ = NULL;
4991 }
4992
4993 if (cell_space_ != NULL) {
4994 cell_space_->TearDown();
4995 delete cell_space_;
4996 cell_space_ = NULL;
4997 }
4998
4999 if (lo_space_ != NULL) {
5000 lo_space_->TearDown();
5001 delete lo_space_;
5002 lo_space_ = NULL;
5003 }
5004
Steve Block44f0eee2011-05-26 01:26:41 +01005005 isolate_->memory_allocator()->TearDown();
5006
5007#ifdef DEBUG
5008 delete debug_utils_;
5009 debug_utils_ = NULL;
5010#endif
Steve Blocka7e24c12009-10-30 11:49:00 +00005011}
5012
5013
5014void Heap::Shrink() {
5015 // Try to shrink all paged spaces.
5016 PagedSpaces spaces;
Leon Clarked91b9f72010-01-27 17:25:45 +00005017 for (PagedSpace* space = spaces.next(); space != NULL; space = spaces.next())
5018 space->Shrink();
Steve Blocka7e24c12009-10-30 11:49:00 +00005019}
5020
5021
5022#ifdef ENABLE_HEAP_PROTECTION
5023
5024void Heap::Protect() {
5025 if (HasBeenSetup()) {
5026 AllSpaces spaces;
Leon Clarked91b9f72010-01-27 17:25:45 +00005027 for (Space* space = spaces.next(); space != NULL; space = spaces.next())
5028 space->Protect();
Steve Blocka7e24c12009-10-30 11:49:00 +00005029 }
5030}
5031
5032
5033void Heap::Unprotect() {
5034 if (HasBeenSetup()) {
5035 AllSpaces spaces;
Leon Clarked91b9f72010-01-27 17:25:45 +00005036 for (Space* space = spaces.next(); space != NULL; space = spaces.next())
5037 space->Unprotect();
Steve Blocka7e24c12009-10-30 11:49:00 +00005038 }
5039}
5040
5041#endif
5042
5043
Steve Block6ded16b2010-05-10 14:33:55 +01005044void Heap::AddGCPrologueCallback(GCPrologueCallback callback, GCType gc_type) {
5045 ASSERT(callback != NULL);
5046 GCPrologueCallbackPair pair(callback, gc_type);
5047 ASSERT(!gc_prologue_callbacks_.Contains(pair));
5048 return gc_prologue_callbacks_.Add(pair);
5049}
5050
5051
5052void Heap::RemoveGCPrologueCallback(GCPrologueCallback callback) {
5053 ASSERT(callback != NULL);
5054 for (int i = 0; i < gc_prologue_callbacks_.length(); ++i) {
5055 if (gc_prologue_callbacks_[i].callback == callback) {
5056 gc_prologue_callbacks_.Remove(i);
5057 return;
5058 }
5059 }
5060 UNREACHABLE();
5061}
5062
5063
5064void Heap::AddGCEpilogueCallback(GCEpilogueCallback callback, GCType gc_type) {
5065 ASSERT(callback != NULL);
5066 GCEpilogueCallbackPair pair(callback, gc_type);
5067 ASSERT(!gc_epilogue_callbacks_.Contains(pair));
5068 return gc_epilogue_callbacks_.Add(pair);
5069}
5070
5071
5072void Heap::RemoveGCEpilogueCallback(GCEpilogueCallback callback) {
5073 ASSERT(callback != NULL);
5074 for (int i = 0; i < gc_epilogue_callbacks_.length(); ++i) {
5075 if (gc_epilogue_callbacks_[i].callback == callback) {
5076 gc_epilogue_callbacks_.Remove(i);
5077 return;
5078 }
5079 }
5080 UNREACHABLE();
5081}
5082
5083
Steve Blocka7e24c12009-10-30 11:49:00 +00005084#ifdef DEBUG
5085
5086class PrintHandleVisitor: public ObjectVisitor {
5087 public:
5088 void VisitPointers(Object** start, Object** end) {
5089 for (Object** p = start; p < end; p++)
Ben Murdochf87a2032010-10-22 12:50:53 +01005090 PrintF(" handle %p to %p\n",
5091 reinterpret_cast<void*>(p),
5092 reinterpret_cast<void*>(*p));
Steve Blocka7e24c12009-10-30 11:49:00 +00005093 }
5094};
5095
5096void Heap::PrintHandles() {
5097 PrintF("Handles:\n");
5098 PrintHandleVisitor v;
Steve Block44f0eee2011-05-26 01:26:41 +01005099 isolate_->handle_scope_implementer()->Iterate(&v);
Steve Blocka7e24c12009-10-30 11:49:00 +00005100}
5101
5102#endif
5103
5104
5105Space* AllSpaces::next() {
5106 switch (counter_++) {
5107 case NEW_SPACE:
Steve Block44f0eee2011-05-26 01:26:41 +01005108 return HEAP->new_space();
Steve Blocka7e24c12009-10-30 11:49:00 +00005109 case OLD_POINTER_SPACE:
Steve Block44f0eee2011-05-26 01:26:41 +01005110 return HEAP->old_pointer_space();
Steve Blocka7e24c12009-10-30 11:49:00 +00005111 case OLD_DATA_SPACE:
Steve Block44f0eee2011-05-26 01:26:41 +01005112 return HEAP->old_data_space();
Steve Blocka7e24c12009-10-30 11:49:00 +00005113 case CODE_SPACE:
Steve Block44f0eee2011-05-26 01:26:41 +01005114 return HEAP->code_space();
Steve Blocka7e24c12009-10-30 11:49:00 +00005115 case MAP_SPACE:
Steve Block44f0eee2011-05-26 01:26:41 +01005116 return HEAP->map_space();
Steve Blocka7e24c12009-10-30 11:49:00 +00005117 case CELL_SPACE:
Steve Block44f0eee2011-05-26 01:26:41 +01005118 return HEAP->cell_space();
Steve Blocka7e24c12009-10-30 11:49:00 +00005119 case LO_SPACE:
Steve Block44f0eee2011-05-26 01:26:41 +01005120 return HEAP->lo_space();
Steve Blocka7e24c12009-10-30 11:49:00 +00005121 default:
5122 return NULL;
5123 }
5124}
5125
5126
5127PagedSpace* PagedSpaces::next() {
5128 switch (counter_++) {
5129 case OLD_POINTER_SPACE:
Steve Block44f0eee2011-05-26 01:26:41 +01005130 return HEAP->old_pointer_space();
Steve Blocka7e24c12009-10-30 11:49:00 +00005131 case OLD_DATA_SPACE:
Steve Block44f0eee2011-05-26 01:26:41 +01005132 return HEAP->old_data_space();
Steve Blocka7e24c12009-10-30 11:49:00 +00005133 case CODE_SPACE:
Steve Block44f0eee2011-05-26 01:26:41 +01005134 return HEAP->code_space();
Steve Blocka7e24c12009-10-30 11:49:00 +00005135 case MAP_SPACE:
Steve Block44f0eee2011-05-26 01:26:41 +01005136 return HEAP->map_space();
Steve Blocka7e24c12009-10-30 11:49:00 +00005137 case CELL_SPACE:
Steve Block44f0eee2011-05-26 01:26:41 +01005138 return HEAP->cell_space();
Steve Blocka7e24c12009-10-30 11:49:00 +00005139 default:
5140 return NULL;
5141 }
5142}
5143
5144
5145
5146OldSpace* OldSpaces::next() {
5147 switch (counter_++) {
5148 case OLD_POINTER_SPACE:
Steve Block44f0eee2011-05-26 01:26:41 +01005149 return HEAP->old_pointer_space();
Steve Blocka7e24c12009-10-30 11:49:00 +00005150 case OLD_DATA_SPACE:
Steve Block44f0eee2011-05-26 01:26:41 +01005151 return HEAP->old_data_space();
Steve Blocka7e24c12009-10-30 11:49:00 +00005152 case CODE_SPACE:
Steve Block44f0eee2011-05-26 01:26:41 +01005153 return HEAP->code_space();
Steve Blocka7e24c12009-10-30 11:49:00 +00005154 default:
5155 return NULL;
5156 }
5157}
5158
5159
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -08005160SpaceIterator::SpaceIterator()
5161 : current_space_(FIRST_SPACE),
5162 iterator_(NULL),
5163 size_func_(NULL) {
5164}
5165
5166
5167SpaceIterator::SpaceIterator(HeapObjectCallback size_func)
5168 : current_space_(FIRST_SPACE),
5169 iterator_(NULL),
5170 size_func_(size_func) {
Steve Blocka7e24c12009-10-30 11:49:00 +00005171}
5172
5173
5174SpaceIterator::~SpaceIterator() {
5175 // Delete active iterator if any.
5176 delete iterator_;
5177}
5178
5179
5180bool SpaceIterator::has_next() {
5181 // Iterate until no more spaces.
5182 return current_space_ != LAST_SPACE;
5183}
5184
5185
5186ObjectIterator* SpaceIterator::next() {
5187 if (iterator_ != NULL) {
5188 delete iterator_;
5189 iterator_ = NULL;
5190 // Move to the next space
5191 current_space_++;
5192 if (current_space_ > LAST_SPACE) {
5193 return NULL;
5194 }
5195 }
5196
5197 // Return iterator for the new current space.
5198 return CreateIterator();
5199}
5200
5201
5202// Create an iterator for the space to iterate.
5203ObjectIterator* SpaceIterator::CreateIterator() {
5204 ASSERT(iterator_ == NULL);
5205
5206 switch (current_space_) {
5207 case NEW_SPACE:
Steve Block44f0eee2011-05-26 01:26:41 +01005208 iterator_ = new SemiSpaceIterator(HEAP->new_space(), size_func_);
Steve Blocka7e24c12009-10-30 11:49:00 +00005209 break;
5210 case OLD_POINTER_SPACE:
Steve Block44f0eee2011-05-26 01:26:41 +01005211 iterator_ = new HeapObjectIterator(HEAP->old_pointer_space(), size_func_);
Steve Blocka7e24c12009-10-30 11:49:00 +00005212 break;
5213 case OLD_DATA_SPACE:
Steve Block44f0eee2011-05-26 01:26:41 +01005214 iterator_ = new HeapObjectIterator(HEAP->old_data_space(), size_func_);
Steve Blocka7e24c12009-10-30 11:49:00 +00005215 break;
5216 case CODE_SPACE:
Steve Block44f0eee2011-05-26 01:26:41 +01005217 iterator_ = new HeapObjectIterator(HEAP->code_space(), size_func_);
Steve Blocka7e24c12009-10-30 11:49:00 +00005218 break;
5219 case MAP_SPACE:
Steve Block44f0eee2011-05-26 01:26:41 +01005220 iterator_ = new HeapObjectIterator(HEAP->map_space(), size_func_);
Steve Blocka7e24c12009-10-30 11:49:00 +00005221 break;
5222 case CELL_SPACE:
Steve Block44f0eee2011-05-26 01:26:41 +01005223 iterator_ = new HeapObjectIterator(HEAP->cell_space(), size_func_);
Steve Blocka7e24c12009-10-30 11:49:00 +00005224 break;
5225 case LO_SPACE:
Steve Block44f0eee2011-05-26 01:26:41 +01005226 iterator_ = new LargeObjectIterator(HEAP->lo_space(), size_func_);
Steve Blocka7e24c12009-10-30 11:49:00 +00005227 break;
5228 }
5229
5230 // Return the newly allocated iterator;
5231 ASSERT(iterator_ != NULL);
5232 return iterator_;
5233}
5234
5235
Ben Murdochb0fe1622011-05-05 13:52:32 +01005236class HeapObjectsFilter {
5237 public:
5238 virtual ~HeapObjectsFilter() {}
5239 virtual bool SkipObject(HeapObject* object) = 0;
5240};
5241
5242
5243class FreeListNodesFilter : public HeapObjectsFilter {
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -08005244 public:
5245 FreeListNodesFilter() {
5246 MarkFreeListNodes();
5247 }
5248
Ben Murdochb0fe1622011-05-05 13:52:32 +01005249 bool SkipObject(HeapObject* object) {
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -08005250 if (object->IsMarked()) {
5251 object->ClearMark();
5252 return true;
5253 } else {
5254 return false;
5255 }
5256 }
5257
5258 private:
5259 void MarkFreeListNodes() {
Steve Block44f0eee2011-05-26 01:26:41 +01005260 Heap* heap = HEAP;
5261 heap->old_pointer_space()->MarkFreeListNodes();
5262 heap->old_data_space()->MarkFreeListNodes();
5263 MarkCodeSpaceFreeListNodes(heap);
5264 heap->map_space()->MarkFreeListNodes();
5265 heap->cell_space()->MarkFreeListNodes();
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -08005266 }
5267
Steve Block44f0eee2011-05-26 01:26:41 +01005268 void MarkCodeSpaceFreeListNodes(Heap* heap) {
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -08005269 // For code space, using FreeListNode::IsFreeListNode is OK.
Steve Block44f0eee2011-05-26 01:26:41 +01005270 HeapObjectIterator iter(heap->code_space());
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -08005271 for (HeapObject* obj = iter.next_object();
5272 obj != NULL;
5273 obj = iter.next_object()) {
5274 if (FreeListNode::IsFreeListNode(obj)) obj->SetMark();
5275 }
5276 }
5277
5278 AssertNoAllocation no_alloc;
5279};
5280
5281
Ben Murdochb0fe1622011-05-05 13:52:32 +01005282class UnreachableObjectsFilter : public HeapObjectsFilter {
5283 public:
5284 UnreachableObjectsFilter() {
5285 MarkUnreachableObjects();
5286 }
5287
5288 bool SkipObject(HeapObject* object) {
5289 if (object->IsMarked()) {
5290 object->ClearMark();
5291 return true;
5292 } else {
5293 return false;
5294 }
5295 }
5296
5297 private:
5298 class UnmarkingVisitor : public ObjectVisitor {
5299 public:
5300 UnmarkingVisitor() : list_(10) {}
5301
5302 void VisitPointers(Object** start, Object** end) {
5303 for (Object** p = start; p < end; p++) {
5304 if (!(*p)->IsHeapObject()) continue;
5305 HeapObject* obj = HeapObject::cast(*p);
5306 if (obj->IsMarked()) {
5307 obj->ClearMark();
5308 list_.Add(obj);
5309 }
5310 }
5311 }
5312
5313 bool can_process() { return !list_.is_empty(); }
5314
5315 void ProcessNext() {
5316 HeapObject* obj = list_.RemoveLast();
5317 obj->Iterate(this);
5318 }
5319
5320 private:
5321 List<HeapObject*> list_;
5322 };
5323
5324 void MarkUnreachableObjects() {
5325 HeapIterator iterator;
5326 for (HeapObject* obj = iterator.next();
5327 obj != NULL;
5328 obj = iterator.next()) {
5329 obj->SetMark();
5330 }
5331 UnmarkingVisitor visitor;
Steve Block44f0eee2011-05-26 01:26:41 +01005332 HEAP->IterateRoots(&visitor, VISIT_ALL);
Ben Murdochb0fe1622011-05-05 13:52:32 +01005333 while (visitor.can_process())
5334 visitor.ProcessNext();
5335 }
5336
5337 AssertNoAllocation no_alloc;
5338};
5339
5340
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -08005341HeapIterator::HeapIterator()
5342 : filtering_(HeapIterator::kNoFiltering),
5343 filter_(NULL) {
5344 Init();
5345}
5346
5347
Ben Murdochb0fe1622011-05-05 13:52:32 +01005348HeapIterator::HeapIterator(HeapIterator::HeapObjectsFiltering filtering)
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -08005349 : filtering_(filtering),
5350 filter_(NULL) {
Steve Blocka7e24c12009-10-30 11:49:00 +00005351 Init();
5352}
5353
5354
5355HeapIterator::~HeapIterator() {
5356 Shutdown();
5357}
5358
5359
5360void HeapIterator::Init() {
5361 // Start the iteration.
Ben Murdochb0fe1622011-05-05 13:52:32 +01005362 space_iterator_ = filtering_ == kNoFiltering ? new SpaceIterator :
5363 new SpaceIterator(MarkCompactCollector::SizeOfMarkedObject);
5364 switch (filtering_) {
5365 case kFilterFreeListNodes:
5366 filter_ = new FreeListNodesFilter;
5367 break;
5368 case kFilterUnreachable:
5369 filter_ = new UnreachableObjectsFilter;
5370 break;
5371 default:
5372 break;
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -08005373 }
Steve Blocka7e24c12009-10-30 11:49:00 +00005374 object_iterator_ = space_iterator_->next();
5375}
5376
5377
5378void HeapIterator::Shutdown() {
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -08005379#ifdef DEBUG
Ben Murdochb0fe1622011-05-05 13:52:32 +01005380 // Assert that in filtering mode we have iterated through all
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -08005381 // objects. Otherwise, heap will be left in an inconsistent state.
Ben Murdochb0fe1622011-05-05 13:52:32 +01005382 if (filtering_ != kNoFiltering) {
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -08005383 ASSERT(object_iterator_ == NULL);
5384 }
5385#endif
Steve Blocka7e24c12009-10-30 11:49:00 +00005386 // Make sure the last iterator is deallocated.
5387 delete space_iterator_;
5388 space_iterator_ = NULL;
5389 object_iterator_ = NULL;
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -08005390 delete filter_;
5391 filter_ = NULL;
Steve Blocka7e24c12009-10-30 11:49:00 +00005392}
5393
5394
Leon Clarked91b9f72010-01-27 17:25:45 +00005395HeapObject* HeapIterator::next() {
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -08005396 if (filter_ == NULL) return NextObject();
5397
5398 HeapObject* obj = NextObject();
Ben Murdochb0fe1622011-05-05 13:52:32 +01005399 while (obj != NULL && filter_->SkipObject(obj)) obj = NextObject();
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -08005400 return obj;
5401}
5402
5403
5404HeapObject* HeapIterator::NextObject() {
Steve Blocka7e24c12009-10-30 11:49:00 +00005405 // No iterator means we are done.
Leon Clarked91b9f72010-01-27 17:25:45 +00005406 if (object_iterator_ == NULL) return NULL;
Steve Blocka7e24c12009-10-30 11:49:00 +00005407
Leon Clarked91b9f72010-01-27 17:25:45 +00005408 if (HeapObject* obj = object_iterator_->next_object()) {
Steve Blocka7e24c12009-10-30 11:49:00 +00005409 // If the current iterator has more objects we are fine.
Leon Clarked91b9f72010-01-27 17:25:45 +00005410 return obj;
Steve Blocka7e24c12009-10-30 11:49:00 +00005411 } else {
5412 // Go though the spaces looking for one that has objects.
5413 while (space_iterator_->has_next()) {
5414 object_iterator_ = space_iterator_->next();
Leon Clarked91b9f72010-01-27 17:25:45 +00005415 if (HeapObject* obj = object_iterator_->next_object()) {
5416 return obj;
Steve Blocka7e24c12009-10-30 11:49:00 +00005417 }
5418 }
5419 }
5420 // Done with the last space.
5421 object_iterator_ = NULL;
Leon Clarked91b9f72010-01-27 17:25:45 +00005422 return NULL;
Steve Blocka7e24c12009-10-30 11:49:00 +00005423}
5424
5425
5426void HeapIterator::reset() {
5427 // Restart the iterator.
5428 Shutdown();
5429 Init();
5430}
5431
5432
Ben Murdoche0cee9b2011-05-25 10:26:03 +01005433#if defined(DEBUG) || defined(LIVE_OBJECT_LIST)
Steve Blocka7e24c12009-10-30 11:49:00 +00005434
Ben Murdoche0cee9b2011-05-25 10:26:03 +01005435Object* const PathTracer::kAnyGlobalObject = reinterpret_cast<Object*>(NULL);
Steve Blocka7e24c12009-10-30 11:49:00 +00005436
Ben Murdoche0cee9b2011-05-25 10:26:03 +01005437class PathTracer::MarkVisitor: public ObjectVisitor {
Steve Blocka7e24c12009-10-30 11:49:00 +00005438 public:
Ben Murdoche0cee9b2011-05-25 10:26:03 +01005439 explicit MarkVisitor(PathTracer* tracer) : tracer_(tracer) {}
Steve Blocka7e24c12009-10-30 11:49:00 +00005440 void VisitPointers(Object** start, Object** end) {
Ben Murdoche0cee9b2011-05-25 10:26:03 +01005441 // Scan all HeapObject pointers in [start, end)
5442 for (Object** p = start; !tracer_->found() && (p < end); p++) {
Steve Blocka7e24c12009-10-30 11:49:00 +00005443 if ((*p)->IsHeapObject())
Ben Murdoche0cee9b2011-05-25 10:26:03 +01005444 tracer_->MarkRecursively(p, this);
Steve Blocka7e24c12009-10-30 11:49:00 +00005445 }
5446 }
Ben Murdoche0cee9b2011-05-25 10:26:03 +01005447
5448 private:
5449 PathTracer* tracer_;
Steve Blocka7e24c12009-10-30 11:49:00 +00005450};
5451
Steve Blocka7e24c12009-10-30 11:49:00 +00005452
Ben Murdoche0cee9b2011-05-25 10:26:03 +01005453class PathTracer::UnmarkVisitor: public ObjectVisitor {
5454 public:
5455 explicit UnmarkVisitor(PathTracer* tracer) : tracer_(tracer) {}
5456 void VisitPointers(Object** start, Object** end) {
5457 // Scan all HeapObject pointers in [start, end)
5458 for (Object** p = start; p < end; p++) {
5459 if ((*p)->IsHeapObject())
5460 tracer_->UnmarkRecursively(p, this);
5461 }
5462 }
5463
5464 private:
5465 PathTracer* tracer_;
5466};
5467
5468
5469void PathTracer::VisitPointers(Object** start, Object** end) {
5470 bool done = ((what_to_find_ == FIND_FIRST) && found_target_);
5471 // Visit all HeapObject pointers in [start, end)
5472 for (Object** p = start; !done && (p < end); p++) {
5473 if ((*p)->IsHeapObject()) {
5474 TracePathFrom(p);
5475 done = ((what_to_find_ == FIND_FIRST) && found_target_);
5476 }
5477 }
5478}
5479
5480
5481void PathTracer::Reset() {
5482 found_target_ = false;
5483 object_stack_.Clear();
5484}
5485
5486
5487void PathTracer::TracePathFrom(Object** root) {
5488 ASSERT((search_target_ == kAnyGlobalObject) ||
5489 search_target_->IsHeapObject());
5490 found_target_in_trace_ = false;
5491 object_stack_.Clear();
5492
5493 MarkVisitor mark_visitor(this);
5494 MarkRecursively(root, &mark_visitor);
5495
5496 UnmarkVisitor unmark_visitor(this);
5497 UnmarkRecursively(root, &unmark_visitor);
5498
5499 ProcessResults();
5500}
5501
5502
5503void PathTracer::MarkRecursively(Object** p, MarkVisitor* mark_visitor) {
Steve Blocka7e24c12009-10-30 11:49:00 +00005504 if (!(*p)->IsHeapObject()) return;
5505
5506 HeapObject* obj = HeapObject::cast(*p);
5507
5508 Object* map = obj->map();
5509
5510 if (!map->IsHeapObject()) return; // visited before
5511
Ben Murdoche0cee9b2011-05-25 10:26:03 +01005512 if (found_target_in_trace_) return; // stop if target found
5513 object_stack_.Add(obj);
5514 if (((search_target_ == kAnyGlobalObject) && obj->IsJSGlobalObject()) ||
5515 (obj == search_target_)) {
5516 found_target_in_trace_ = true;
5517 found_target_ = true;
Steve Blocka7e24c12009-10-30 11:49:00 +00005518 return;
5519 }
5520
Ben Murdoche0cee9b2011-05-25 10:26:03 +01005521 bool is_global_context = obj->IsGlobalContext();
5522
Steve Blocka7e24c12009-10-30 11:49:00 +00005523 // not visited yet
5524 Map* map_p = reinterpret_cast<Map*>(HeapObject::cast(map));
5525
5526 Address map_addr = map_p->address();
5527
5528 obj->set_map(reinterpret_cast<Map*>(map_addr + kMarkTag));
5529
Ben Murdoche0cee9b2011-05-25 10:26:03 +01005530 // Scan the object body.
5531 if (is_global_context && (visit_mode_ == VISIT_ONLY_STRONG)) {
5532 // This is specialized to scan Context's properly.
5533 Object** start = reinterpret_cast<Object**>(obj->address() +
5534 Context::kHeaderSize);
5535 Object** end = reinterpret_cast<Object**>(obj->address() +
5536 Context::kHeaderSize + Context::FIRST_WEAK_SLOT * kPointerSize);
5537 mark_visitor->VisitPointers(start, end);
5538 } else {
5539 obj->IterateBody(map_p->instance_type(),
5540 obj->SizeFromMap(map_p),
5541 mark_visitor);
5542 }
Steve Blocka7e24c12009-10-30 11:49:00 +00005543
Ben Murdoche0cee9b2011-05-25 10:26:03 +01005544 // Scan the map after the body because the body is a lot more interesting
5545 // when doing leak detection.
5546 MarkRecursively(&map, mark_visitor);
Steve Blocka7e24c12009-10-30 11:49:00 +00005547
Ben Murdoche0cee9b2011-05-25 10:26:03 +01005548 if (!found_target_in_trace_) // don't pop if found the target
5549 object_stack_.RemoveLast();
Steve Blocka7e24c12009-10-30 11:49:00 +00005550}
5551
5552
Ben Murdoche0cee9b2011-05-25 10:26:03 +01005553void PathTracer::UnmarkRecursively(Object** p, UnmarkVisitor* unmark_visitor) {
Steve Blocka7e24c12009-10-30 11:49:00 +00005554 if (!(*p)->IsHeapObject()) return;
5555
5556 HeapObject* obj = HeapObject::cast(*p);
5557
5558 Object* map = obj->map();
5559
5560 if (map->IsHeapObject()) return; // unmarked already
5561
5562 Address map_addr = reinterpret_cast<Address>(map);
5563
5564 map_addr -= kMarkTag;
5565
5566 ASSERT_TAG_ALIGNED(map_addr);
5567
5568 HeapObject* map_p = HeapObject::FromAddress(map_addr);
5569
5570 obj->set_map(reinterpret_cast<Map*>(map_p));
5571
Ben Murdoche0cee9b2011-05-25 10:26:03 +01005572 UnmarkRecursively(reinterpret_cast<Object**>(&map_p), unmark_visitor);
Steve Blocka7e24c12009-10-30 11:49:00 +00005573
5574 obj->IterateBody(Map::cast(map_p)->instance_type(),
5575 obj->SizeFromMap(Map::cast(map_p)),
Ben Murdoche0cee9b2011-05-25 10:26:03 +01005576 unmark_visitor);
Steve Blocka7e24c12009-10-30 11:49:00 +00005577}
5578
5579
Ben Murdoche0cee9b2011-05-25 10:26:03 +01005580void PathTracer::ProcessResults() {
5581 if (found_target_) {
Steve Blocka7e24c12009-10-30 11:49:00 +00005582 PrintF("=====================================\n");
5583 PrintF("==== Path to object ====\n");
5584 PrintF("=====================================\n\n");
5585
Ben Murdoche0cee9b2011-05-25 10:26:03 +01005586 ASSERT(!object_stack_.is_empty());
5587 for (int i = 0; i < object_stack_.length(); i++) {
Steve Blocka7e24c12009-10-30 11:49:00 +00005588 if (i > 0) PrintF("\n |\n |\n V\n\n");
Ben Murdoche0cee9b2011-05-25 10:26:03 +01005589 Object* obj = object_stack_[i];
5590#ifdef OBJECT_PRINT
Steve Blocka7e24c12009-10-30 11:49:00 +00005591 obj->Print();
Ben Murdoche0cee9b2011-05-25 10:26:03 +01005592#else
5593 obj->ShortPrint();
5594#endif
Steve Blocka7e24c12009-10-30 11:49:00 +00005595 }
5596 PrintF("=====================================\n");
5597 }
5598}
Ben Murdoche0cee9b2011-05-25 10:26:03 +01005599#endif // DEBUG || LIVE_OBJECT_LIST
Steve Blocka7e24c12009-10-30 11:49:00 +00005600
5601
Ben Murdoche0cee9b2011-05-25 10:26:03 +01005602#ifdef DEBUG
Steve Blocka7e24c12009-10-30 11:49:00 +00005603// Triggers a depth-first traversal of reachable objects from roots
5604// and finds a path to a specific heap object and prints it.
Leon Clarkee46be812010-01-19 14:06:41 +00005605void Heap::TracePathToObject(Object* target) {
Ben Murdoche0cee9b2011-05-25 10:26:03 +01005606 PathTracer tracer(target, PathTracer::FIND_ALL, VISIT_ALL);
5607 IterateRoots(&tracer, VISIT_ONLY_STRONG);
Steve Blocka7e24c12009-10-30 11:49:00 +00005608}
5609
5610
5611// Triggers a depth-first traversal of reachable objects from roots
5612// and finds a path to any global object and prints it. Useful for
5613// determining the source for leaks of global objects.
5614void Heap::TracePathToGlobal() {
Ben Murdoche0cee9b2011-05-25 10:26:03 +01005615 PathTracer tracer(PathTracer::kAnyGlobalObject,
5616 PathTracer::FIND_ALL,
5617 VISIT_ALL);
5618 IterateRoots(&tracer, VISIT_ONLY_STRONG);
Steve Blocka7e24c12009-10-30 11:49:00 +00005619}
5620#endif
5621
5622
Ben Murdochf87a2032010-10-22 12:50:53 +01005623static intptr_t CountTotalHolesSize() {
5624 intptr_t holes_size = 0;
Leon Clarkef7060e22010-06-03 12:02:55 +01005625 OldSpaces spaces;
5626 for (OldSpace* space = spaces.next();
5627 space != NULL;
5628 space = spaces.next()) {
5629 holes_size += space->Waste() + space->AvailableFree();
5630 }
5631 return holes_size;
5632}
5633
5634
Steve Block44f0eee2011-05-26 01:26:41 +01005635GCTracer::GCTracer(Heap* heap)
Steve Blocka7e24c12009-10-30 11:49:00 +00005636 : start_time_(0.0),
Leon Clarkef7060e22010-06-03 12:02:55 +01005637 start_size_(0),
Steve Blocka7e24c12009-10-30 11:49:00 +00005638 gc_count_(0),
5639 full_gc_count_(0),
5640 is_compacting_(false),
Leon Clarkef7060e22010-06-03 12:02:55 +01005641 marked_count_(0),
5642 allocated_since_last_gc_(0),
5643 spent_in_mutator_(0),
Steve Block44f0eee2011-05-26 01:26:41 +01005644 promoted_objects_size_(0),
5645 heap_(heap) {
Steve Blocka7e24c12009-10-30 11:49:00 +00005646 // These two fields reflect the state of the previous full collection.
5647 // Set them before they are changed by the collector.
Steve Block44f0eee2011-05-26 01:26:41 +01005648 previous_has_compacted_ = heap_->mark_compact_collector_.HasCompacted();
5649 previous_marked_count_ =
5650 heap_->mark_compact_collector_.previous_marked_count();
Leon Clarkef7060e22010-06-03 12:02:55 +01005651 if (!FLAG_trace_gc && !FLAG_print_cumulative_gc_stat) return;
Steve Blocka7e24c12009-10-30 11:49:00 +00005652 start_time_ = OS::TimeCurrentMillis();
Steve Block44f0eee2011-05-26 01:26:41 +01005653 start_size_ = heap_->SizeOfObjects();
Leon Clarkef7060e22010-06-03 12:02:55 +01005654
5655 for (int i = 0; i < Scope::kNumberOfScopes; i++) {
5656 scopes_[i] = 0;
5657 }
5658
5659 in_free_list_or_wasted_before_gc_ = CountTotalHolesSize();
5660
Steve Block44f0eee2011-05-26 01:26:41 +01005661 allocated_since_last_gc_ =
5662 heap_->SizeOfObjects() - heap_->alive_after_last_gc_;
Leon Clarkef7060e22010-06-03 12:02:55 +01005663
Steve Block44f0eee2011-05-26 01:26:41 +01005664 if (heap_->last_gc_end_timestamp_ > 0) {
5665 spent_in_mutator_ = Max(start_time_ - heap_->last_gc_end_timestamp_, 0.0);
Leon Clarkef7060e22010-06-03 12:02:55 +01005666 }
Steve Blocka7e24c12009-10-30 11:49:00 +00005667}
5668
5669
5670GCTracer::~GCTracer() {
Steve Blocka7e24c12009-10-30 11:49:00 +00005671 // Printf ONE line iff flag is set.
Leon Clarkef7060e22010-06-03 12:02:55 +01005672 if (!FLAG_trace_gc && !FLAG_print_cumulative_gc_stat) return;
5673
Steve Block44f0eee2011-05-26 01:26:41 +01005674 bool first_gc = (heap_->last_gc_end_timestamp_ == 0);
Leon Clarkef7060e22010-06-03 12:02:55 +01005675
Steve Block44f0eee2011-05-26 01:26:41 +01005676 heap_->alive_after_last_gc_ = heap_->SizeOfObjects();
5677 heap_->last_gc_end_timestamp_ = OS::TimeCurrentMillis();
Leon Clarkef7060e22010-06-03 12:02:55 +01005678
Steve Block44f0eee2011-05-26 01:26:41 +01005679 int time = static_cast<int>(heap_->last_gc_end_timestamp_ - start_time_);
Leon Clarkef7060e22010-06-03 12:02:55 +01005680
5681 // Update cumulative GC statistics if required.
5682 if (FLAG_print_cumulative_gc_stat) {
Steve Block44f0eee2011-05-26 01:26:41 +01005683 heap_->max_gc_pause_ = Max(heap_->max_gc_pause_, time);
5684 heap_->max_alive_after_gc_ = Max(heap_->max_alive_after_gc_,
5685 heap_->alive_after_last_gc_);
Leon Clarkef7060e22010-06-03 12:02:55 +01005686 if (!first_gc) {
Steve Block44f0eee2011-05-26 01:26:41 +01005687 heap_->min_in_mutator_ = Min(heap_->min_in_mutator_,
5688 static_cast<int>(spent_in_mutator_));
Leon Clarkef7060e22010-06-03 12:02:55 +01005689 }
5690 }
5691
5692 if (!FLAG_trace_gc_nvp) {
5693 int external_time = static_cast<int>(scopes_[Scope::EXTERNAL]);
5694
5695 PrintF("%s %.1f -> %.1f MB, ",
5696 CollectorString(),
5697 static_cast<double>(start_size_) / MB,
5698 SizeOfHeapObjects());
5699
5700 if (external_time > 0) PrintF("%d / ", external_time);
5701 PrintF("%d ms.\n", time);
5702 } else {
5703 PrintF("pause=%d ", time);
5704 PrintF("mutator=%d ",
5705 static_cast<int>(spent_in_mutator_));
5706
5707 PrintF("gc=");
5708 switch (collector_) {
5709 case SCAVENGER:
5710 PrintF("s");
5711 break;
5712 case MARK_COMPACTOR:
Steve Block44f0eee2011-05-26 01:26:41 +01005713 PrintF("%s",
5714 heap_->mark_compact_collector_.HasCompacted() ? "mc" : "ms");
Leon Clarkef7060e22010-06-03 12:02:55 +01005715 break;
5716 default:
5717 UNREACHABLE();
5718 }
5719 PrintF(" ");
5720
5721 PrintF("external=%d ", static_cast<int>(scopes_[Scope::EXTERNAL]));
5722 PrintF("mark=%d ", static_cast<int>(scopes_[Scope::MC_MARK]));
5723 PrintF("sweep=%d ", static_cast<int>(scopes_[Scope::MC_SWEEP]));
Iain Merrick75681382010-08-19 15:07:18 +01005724 PrintF("sweepns=%d ", static_cast<int>(scopes_[Scope::MC_SWEEP_NEWSPACE]));
Leon Clarkef7060e22010-06-03 12:02:55 +01005725 PrintF("compact=%d ", static_cast<int>(scopes_[Scope::MC_COMPACT]));
5726
Ben Murdochf87a2032010-10-22 12:50:53 +01005727 PrintF("total_size_before=%" V8_PTR_PREFIX "d ", start_size_);
Steve Block44f0eee2011-05-26 01:26:41 +01005728 PrintF("total_size_after=%" V8_PTR_PREFIX "d ", heap_->SizeOfObjects());
Ben Murdochf87a2032010-10-22 12:50:53 +01005729 PrintF("holes_size_before=%" V8_PTR_PREFIX "d ",
5730 in_free_list_or_wasted_before_gc_);
5731 PrintF("holes_size_after=%" V8_PTR_PREFIX "d ", CountTotalHolesSize());
Leon Clarkef7060e22010-06-03 12:02:55 +01005732
Ben Murdochf87a2032010-10-22 12:50:53 +01005733 PrintF("allocated=%" V8_PTR_PREFIX "d ", allocated_since_last_gc_);
5734 PrintF("promoted=%" V8_PTR_PREFIX "d ", promoted_objects_size_);
Leon Clarkef7060e22010-06-03 12:02:55 +01005735
5736 PrintF("\n");
5737 }
Steve Blocka7e24c12009-10-30 11:49:00 +00005738
5739#if defined(ENABLE_LOGGING_AND_PROFILING)
Steve Block44f0eee2011-05-26 01:26:41 +01005740 heap_->PrintShortHeapStatistics();
Steve Blocka7e24c12009-10-30 11:49:00 +00005741#endif
5742}
5743
5744
5745const char* GCTracer::CollectorString() {
5746 switch (collector_) {
5747 case SCAVENGER:
5748 return "Scavenge";
5749 case MARK_COMPACTOR:
Steve Block44f0eee2011-05-26 01:26:41 +01005750 return heap_->mark_compact_collector_.HasCompacted() ? "Mark-compact"
5751 : "Mark-sweep";
Steve Blocka7e24c12009-10-30 11:49:00 +00005752 }
5753 return "Unknown GC";
5754}
5755
5756
5757int KeyedLookupCache::Hash(Map* map, String* name) {
5758 // Uses only lower 32 bits if pointers are larger.
5759 uintptr_t addr_hash =
Leon Clarkee46be812010-01-19 14:06:41 +00005760 static_cast<uint32_t>(reinterpret_cast<uintptr_t>(map)) >> kMapHashShift;
Andrei Popescu402d9372010-02-26 13:31:12 +00005761 return static_cast<uint32_t>((addr_hash ^ name->Hash()) & kCapacityMask);
Steve Blocka7e24c12009-10-30 11:49:00 +00005762}
5763
5764
5765int KeyedLookupCache::Lookup(Map* map, String* name) {
5766 int index = Hash(map, name);
5767 Key& key = keys_[index];
5768 if ((key.map == map) && key.name->Equals(name)) {
5769 return field_offsets_[index];
5770 }
Steve Block44f0eee2011-05-26 01:26:41 +01005771 return kNotFound;
Steve Blocka7e24c12009-10-30 11:49:00 +00005772}
5773
5774
5775void KeyedLookupCache::Update(Map* map, String* name, int field_offset) {
5776 String* symbol;
Steve Block44f0eee2011-05-26 01:26:41 +01005777 if (HEAP->LookupSymbolIfExists(name, &symbol)) {
Steve Blocka7e24c12009-10-30 11:49:00 +00005778 int index = Hash(map, symbol);
5779 Key& key = keys_[index];
5780 key.map = map;
5781 key.name = symbol;
5782 field_offsets_[index] = field_offset;
5783 }
5784}
5785
5786
5787void KeyedLookupCache::Clear() {
5788 for (int index = 0; index < kLength; index++) keys_[index].map = NULL;
5789}
5790
5791
Steve Blocka7e24c12009-10-30 11:49:00 +00005792void DescriptorLookupCache::Clear() {
5793 for (int index = 0; index < kLength; index++) keys_[index].array = NULL;
5794}
5795
5796
Steve Blocka7e24c12009-10-30 11:49:00 +00005797#ifdef DEBUG
Ben Murdochf87a2032010-10-22 12:50:53 +01005798void Heap::GarbageCollectionGreedyCheck() {
Steve Blocka7e24c12009-10-30 11:49:00 +00005799 ASSERT(FLAG_gc_greedy);
Steve Block44f0eee2011-05-26 01:26:41 +01005800 if (isolate_->bootstrapper()->IsActive()) return;
Ben Murdochf87a2032010-10-22 12:50:53 +01005801 if (disallow_allocation_failure()) return;
5802 CollectGarbage(NEW_SPACE);
Steve Blocka7e24c12009-10-30 11:49:00 +00005803}
5804#endif
5805
5806
Steve Block44f0eee2011-05-26 01:26:41 +01005807TranscendentalCache::SubCache::SubCache(Type t)
5808 : type_(t),
5809 isolate_(Isolate::Current()) {
Steve Blocka7e24c12009-10-30 11:49:00 +00005810 uint32_t in0 = 0xffffffffu; // Bit-pattern for a NaN that isn't
5811 uint32_t in1 = 0xffffffffu; // generated by the FPU.
5812 for (int i = 0; i < kCacheSize; i++) {
5813 elements_[i].in[0] = in0;
5814 elements_[i].in[1] = in1;
5815 elements_[i].output = NULL;
5816 }
5817}
5818
5819
Steve Blocka7e24c12009-10-30 11:49:00 +00005820void TranscendentalCache::Clear() {
5821 for (int i = 0; i < kNumberOfCaches; i++) {
5822 if (caches_[i] != NULL) {
5823 delete caches_[i];
5824 caches_[i] = NULL;
5825 }
5826 }
5827}
5828
5829
Leon Clarkee46be812010-01-19 14:06:41 +00005830void ExternalStringTable::CleanUp() {
5831 int last = 0;
5832 for (int i = 0; i < new_space_strings_.length(); ++i) {
Steve Block44f0eee2011-05-26 01:26:41 +01005833 if (new_space_strings_[i] == heap_->raw_unchecked_null_value()) continue;
5834 if (heap_->InNewSpace(new_space_strings_[i])) {
Leon Clarkee46be812010-01-19 14:06:41 +00005835 new_space_strings_[last++] = new_space_strings_[i];
5836 } else {
5837 old_space_strings_.Add(new_space_strings_[i]);
5838 }
5839 }
5840 new_space_strings_.Rewind(last);
5841 last = 0;
5842 for (int i = 0; i < old_space_strings_.length(); ++i) {
Steve Block44f0eee2011-05-26 01:26:41 +01005843 if (old_space_strings_[i] == heap_->raw_unchecked_null_value()) continue;
5844 ASSERT(!heap_->InNewSpace(old_space_strings_[i]));
Leon Clarkee46be812010-01-19 14:06:41 +00005845 old_space_strings_[last++] = old_space_strings_[i];
5846 }
5847 old_space_strings_.Rewind(last);
5848 Verify();
5849}
5850
5851
5852void ExternalStringTable::TearDown() {
5853 new_space_strings_.Free();
5854 old_space_strings_.Free();
5855}
5856
5857
Steve Blocka7e24c12009-10-30 11:49:00 +00005858} } // namespace v8::internal