blob: 5d1a66e2c9e4d49ffb14101190ffc61ec7d219ed [file] [log] [blame]
Kristian Monsen0d5e1162010-09-30 15:31:59 +01001// Copyright 2010 the V8 project authors. All rights reserved.
Steve Blocka7e24c12009-10-30 11:49:00 +00002// Redistribution and use in source and binary forms, with or without
3// modification, are permitted provided that the following conditions are
4// met:
5//
6// * Redistributions of source code must retain the above copyright
7// notice, this list of conditions and the following disclaimer.
8// * Redistributions in binary form must reproduce the above
9// copyright notice, this list of conditions and the following
10// disclaimer in the documentation and/or other materials provided
11// with the distribution.
12// * Neither the name of Google Inc. nor the names of its
13// contributors may be used to endorse or promote products derived
14// from this software without specific prior written permission.
15//
16// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
28#include "v8.h"
29
30#include "accessors.h"
31#include "api.h"
32#include "bootstrapper.h"
33#include "codegen-inl.h"
34#include "compilation-cache.h"
35#include "debug.h"
36#include "heap-profiler.h"
37#include "global-handles.h"
Steve Block1e0659c2011-05-24 12:43:12 +010038#include "liveobjectlist-inl.h"
Steve Blocka7e24c12009-10-30 11:49:00 +000039#include "mark-compact.h"
40#include "natives.h"
Iain Merrick75681382010-08-19 15:07:18 +010041#include "objects-visiting.h"
Ben Murdochb0fe1622011-05-05 13:52:32 +010042#include "runtime-profiler.h"
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -080043#include "scanner-base.h"
Steve Blocka7e24c12009-10-30 11:49:00 +000044#include "scopeinfo.h"
Steve Block3ce2e202009-11-05 08:53:23 +000045#include "snapshot.h"
Steve Blocka7e24c12009-10-30 11:49:00 +000046#include "v8threads.h"
Ben Murdochb0fe1622011-05-05 13:52:32 +010047#include "vm-state-inl.h"
Steve Block6ded16b2010-05-10 14:33:55 +010048#if V8_TARGET_ARCH_ARM && !V8_INTERPRETED_REGEXP
Steve Blocka7e24c12009-10-30 11:49:00 +000049#include "regexp-macro-assembler.h"
Steve Blockd0582a62009-12-15 09:54:21 +000050#include "arm/regexp-macro-assembler-arm.h"
Steve Blocka7e24c12009-10-30 11:49:00 +000051#endif
Steve Block44f0eee2011-05-26 01:26:41 +010052#if V8_TARGET_ARCH_MIPS && !V8_INTERPRETED_REGEXP
53#include "regexp-macro-assembler.h"
54#include "mips/regexp-macro-assembler-mips.h"
55#endif
Steve Block6ded16b2010-05-10 14:33:55 +010056
Steve Blocka7e24c12009-10-30 11:49:00 +000057namespace v8 {
58namespace internal {
59
60
John Reck59135872010-11-02 12:39:01 -070061static const intptr_t kMinimumPromotionLimit = 2 * MB;
62static const intptr_t kMinimumAllocationLimit = 8 * MB;
63
Steve Blocka7e24c12009-10-30 11:49:00 +000064
Steve Block44f0eee2011-05-26 01:26:41 +010065static Mutex* gc_initializer_mutex = OS::CreateMutex();
Steve Blocka7e24c12009-10-30 11:49:00 +000066
Steve Blocka7e24c12009-10-30 11:49:00 +000067
Steve Block44f0eee2011-05-26 01:26:41 +010068Heap::Heap()
69 : isolate_(NULL),
Steve Blocka7e24c12009-10-30 11:49:00 +000070// semispace_size_ should be a power of 2 and old_generation_size_ should be
71// a multiple of Page::kPageSize.
72#if defined(ANDROID)
Steve Block44f0eee2011-05-26 01:26:41 +010073 reserved_semispace_size_(2*MB),
74 max_semispace_size_(2*MB),
75 initial_semispace_size_(128*KB),
76 max_old_generation_size_(192*MB),
77 max_executable_size_(max_old_generation_size_),
78 code_range_size_(0),
Steve Blocka7e24c12009-10-30 11:49:00 +000079#elif defined(V8_TARGET_ARCH_X64)
Steve Block44f0eee2011-05-26 01:26:41 +010080 reserved_semispace_size_(16*MB),
81 max_semispace_size_(16*MB),
82 initial_semispace_size_(1*MB),
83 max_old_generation_size_(1*GB),
84 max_executable_size_(256*MB),
85 code_range_size_(512*MB),
Steve Blocka7e24c12009-10-30 11:49:00 +000086#else
Steve Block44f0eee2011-05-26 01:26:41 +010087 reserved_semispace_size_(8*MB),
88 max_semispace_size_(8*MB),
89 initial_semispace_size_(512*KB),
90 max_old_generation_size_(512*MB),
91 max_executable_size_(128*MB),
92 code_range_size_(0),
Steve Blocka7e24c12009-10-30 11:49:00 +000093#endif
Steve Blocka7e24c12009-10-30 11:49:00 +000094// Variables set based on semispace_size_ and old_generation_size_ in
Steve Block44f0eee2011-05-26 01:26:41 +010095// ConfigureHeap (survived_since_last_expansion_, external_allocation_limit_)
Steve Block3ce2e202009-11-05 08:53:23 +000096// Will be 4 * reserved_semispace_size_ to ensure that young
97// generation can be aligned to its size.
Steve Block44f0eee2011-05-26 01:26:41 +010098 survived_since_last_expansion_(0),
99 always_allocate_scope_depth_(0),
100 linear_allocation_scope_depth_(0),
101 contexts_disposed_(0),
102 new_space_(this),
103 old_pointer_space_(NULL),
104 old_data_space_(NULL),
105 code_space_(NULL),
106 map_space_(NULL),
107 cell_space_(NULL),
108 lo_space_(NULL),
109 gc_state_(NOT_IN_GC),
110 mc_count_(0),
111 ms_count_(0),
112 gc_count_(0),
113 unflattened_strings_length_(0),
Steve Blocka7e24c12009-10-30 11:49:00 +0000114#ifdef DEBUG
Steve Block44f0eee2011-05-26 01:26:41 +0100115 allocation_allowed_(true),
116 allocation_timeout_(0),
117 disallow_allocation_failure_(false),
118 debug_utils_(NULL),
Steve Blocka7e24c12009-10-30 11:49:00 +0000119#endif // DEBUG
Steve Block44f0eee2011-05-26 01:26:41 +0100120 old_gen_promotion_limit_(kMinimumPromotionLimit),
121 old_gen_allocation_limit_(kMinimumAllocationLimit),
122 external_allocation_limit_(0),
123 amount_of_external_allocated_memory_(0),
124 amount_of_external_allocated_memory_at_last_global_gc_(0),
125 old_gen_exhausted_(false),
126 hidden_symbol_(NULL),
127 global_gc_prologue_callback_(NULL),
128 global_gc_epilogue_callback_(NULL),
129 gc_safe_size_of_old_object_(NULL),
130 tracer_(NULL),
131 young_survivors_after_last_gc_(0),
132 high_survival_rate_period_length_(0),
133 survival_rate_(0),
134 previous_survival_rate_trend_(Heap::STABLE),
135 survival_rate_trend_(Heap::STABLE),
136 max_gc_pause_(0),
137 max_alive_after_gc_(0),
138 min_in_mutator_(kMaxInt),
139 alive_after_last_gc_(0),
140 last_gc_end_timestamp_(0.0),
141 page_watermark_invalidated_mark_(1 << Page::WATERMARK_INVALIDATED),
142 number_idle_notifications_(0),
143 last_idle_notification_gc_count_(0),
144 last_idle_notification_gc_count_init_(false),
145 configured_(false),
146 is_safe_to_read_maps_(true) {
147 // Allow build-time customization of the max semispace size. Building
148 // V8 with snapshots and a non-default max semispace size is much
149 // easier if you can define it as part of the build environment.
150#if defined(V8_MAX_SEMISPACE_SIZE)
151 max_semispace_size_ = reserved_semispace_size_ = V8_MAX_SEMISPACE_SIZE;
152#endif
Steve Blocka7e24c12009-10-30 11:49:00 +0000153
Steve Block44f0eee2011-05-26 01:26:41 +0100154 memset(roots_, 0, sizeof(roots_[0]) * kRootListLength);
155 global_contexts_list_ = NULL;
156 mark_compact_collector_.heap_ = this;
157 external_string_table_.heap_ = this;
158}
159
Steve Blocka7e24c12009-10-30 11:49:00 +0000160
Ben Murdochf87a2032010-10-22 12:50:53 +0100161intptr_t Heap::Capacity() {
Steve Blocka7e24c12009-10-30 11:49:00 +0000162 if (!HasBeenSetup()) return 0;
163
164 return new_space_.Capacity() +
165 old_pointer_space_->Capacity() +
166 old_data_space_->Capacity() +
167 code_space_->Capacity() +
168 map_space_->Capacity() +
169 cell_space_->Capacity();
170}
171
172
Ben Murdochf87a2032010-10-22 12:50:53 +0100173intptr_t Heap::CommittedMemory() {
Steve Block3ce2e202009-11-05 08:53:23 +0000174 if (!HasBeenSetup()) return 0;
175
176 return new_space_.CommittedMemory() +
177 old_pointer_space_->CommittedMemory() +
178 old_data_space_->CommittedMemory() +
179 code_space_->CommittedMemory() +
180 map_space_->CommittedMemory() +
181 cell_space_->CommittedMemory() +
182 lo_space_->Size();
183}
184
Russell Brenner90bac252010-11-18 13:33:46 -0800185intptr_t Heap::CommittedMemoryExecutable() {
186 if (!HasBeenSetup()) return 0;
187
Steve Block44f0eee2011-05-26 01:26:41 +0100188 return isolate()->memory_allocator()->SizeExecutable();
Russell Brenner90bac252010-11-18 13:33:46 -0800189}
190
Steve Block3ce2e202009-11-05 08:53:23 +0000191
Ben Murdochf87a2032010-10-22 12:50:53 +0100192intptr_t Heap::Available() {
Steve Blocka7e24c12009-10-30 11:49:00 +0000193 if (!HasBeenSetup()) return 0;
194
195 return new_space_.Available() +
196 old_pointer_space_->Available() +
197 old_data_space_->Available() +
198 code_space_->Available() +
199 map_space_->Available() +
200 cell_space_->Available();
201}
202
203
204bool Heap::HasBeenSetup() {
205 return old_pointer_space_ != NULL &&
206 old_data_space_ != NULL &&
207 code_space_ != NULL &&
208 map_space_ != NULL &&
209 cell_space_ != NULL &&
210 lo_space_ != NULL;
211}
212
213
Kristian Monsen80d68ea2010-09-08 11:05:35 +0100214int Heap::GcSafeSizeOfOldObject(HeapObject* object) {
Steve Block44f0eee2011-05-26 01:26:41 +0100215 ASSERT(!HEAP->InNewSpace(object)); // Code only works for old objects.
216 ASSERT(!HEAP->mark_compact_collector()->are_map_pointers_encoded());
Kristian Monsen80d68ea2010-09-08 11:05:35 +0100217 MapWord map_word = object->map_word();
218 map_word.ClearMark();
219 map_word.ClearOverflow();
220 return object->SizeFromMap(map_word.ToMap());
221}
222
223
224int Heap::GcSafeSizeOfOldObjectWithEncodedMap(HeapObject* object) {
Steve Block44f0eee2011-05-26 01:26:41 +0100225 ASSERT(!HEAP->InNewSpace(object)); // Code only works for old objects.
226 ASSERT(HEAP->mark_compact_collector()->are_map_pointers_encoded());
Kristian Monsen80d68ea2010-09-08 11:05:35 +0100227 uint32_t marker = Memory::uint32_at(object->address());
228 if (marker == MarkCompactCollector::kSingleFreeEncoding) {
229 return kIntSize;
230 } else if (marker == MarkCompactCollector::kMultiFreeEncoding) {
231 return Memory::int_at(object->address() + kIntSize);
232 } else {
233 MapWord map_word = object->map_word();
Steve Block44f0eee2011-05-26 01:26:41 +0100234 Address map_address = map_word.DecodeMapAddress(HEAP->map_space());
Kristian Monsen80d68ea2010-09-08 11:05:35 +0100235 Map* map = reinterpret_cast<Map*>(HeapObject::FromAddress(map_address));
236 return object->SizeFromMap(map);
237 }
238}
239
240
Steve Blocka7e24c12009-10-30 11:49:00 +0000241GarbageCollector Heap::SelectGarbageCollector(AllocationSpace space) {
242 // Is global GC requested?
243 if (space != NEW_SPACE || FLAG_gc_global) {
Steve Block44f0eee2011-05-26 01:26:41 +0100244 isolate_->counters()->gc_compactor_caused_by_request()->Increment();
Steve Blocka7e24c12009-10-30 11:49:00 +0000245 return MARK_COMPACTOR;
246 }
247
248 // Is enough data promoted to justify a global GC?
249 if (OldGenerationPromotionLimitReached()) {
Steve Block44f0eee2011-05-26 01:26:41 +0100250 isolate_->counters()->gc_compactor_caused_by_promoted_data()->Increment();
Steve Blocka7e24c12009-10-30 11:49:00 +0000251 return MARK_COMPACTOR;
252 }
253
254 // Have allocation in OLD and LO failed?
255 if (old_gen_exhausted_) {
Steve Block44f0eee2011-05-26 01:26:41 +0100256 isolate_->counters()->
257 gc_compactor_caused_by_oldspace_exhaustion()->Increment();
Steve Blocka7e24c12009-10-30 11:49:00 +0000258 return MARK_COMPACTOR;
259 }
260
261 // Is there enough space left in OLD to guarantee that a scavenge can
262 // succeed?
263 //
264 // Note that MemoryAllocator->MaxAvailable() undercounts the memory available
265 // for object promotion. It counts only the bytes that the memory
266 // allocator has not yet allocated from the OS and assigned to any space,
267 // and does not count available bytes already in the old space or code
268 // space. Undercounting is safe---we may get an unrequested full GC when
269 // a scavenge would have succeeded.
Steve Block44f0eee2011-05-26 01:26:41 +0100270 if (isolate_->memory_allocator()->MaxAvailable() <= new_space_.Size()) {
271 isolate_->counters()->
272 gc_compactor_caused_by_oldspace_exhaustion()->Increment();
Steve Blocka7e24c12009-10-30 11:49:00 +0000273 return MARK_COMPACTOR;
274 }
275
276 // Default
277 return SCAVENGER;
278}
279
280
281// TODO(1238405): Combine the infrastructure for --heap-stats and
282// --log-gc to avoid the complicated preprocessor and flag testing.
283#if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
284void Heap::ReportStatisticsBeforeGC() {
285 // Heap::ReportHeapStatistics will also log NewSpace statistics when
286 // compiled with ENABLE_LOGGING_AND_PROFILING and --log-gc is set. The
287 // following logic is used to avoid double logging.
288#if defined(DEBUG) && defined(ENABLE_LOGGING_AND_PROFILING)
289 if (FLAG_heap_stats || FLAG_log_gc) new_space_.CollectStatistics();
290 if (FLAG_heap_stats) {
291 ReportHeapStatistics("Before GC");
292 } else if (FLAG_log_gc) {
293 new_space_.ReportStatistics();
294 }
295 if (FLAG_heap_stats || FLAG_log_gc) new_space_.ClearHistograms();
296#elif defined(DEBUG)
297 if (FLAG_heap_stats) {
298 new_space_.CollectStatistics();
299 ReportHeapStatistics("Before GC");
300 new_space_.ClearHistograms();
301 }
302#elif defined(ENABLE_LOGGING_AND_PROFILING)
303 if (FLAG_log_gc) {
304 new_space_.CollectStatistics();
305 new_space_.ReportStatistics();
306 new_space_.ClearHistograms();
307 }
308#endif
309}
310
311
312#if defined(ENABLE_LOGGING_AND_PROFILING)
313void Heap::PrintShortHeapStatistics() {
314 if (!FLAG_trace_gc_verbose) return;
Ben Murdochf87a2032010-10-22 12:50:53 +0100315 PrintF("Memory allocator, used: %8" V8_PTR_PREFIX "d"
316 ", available: %8" V8_PTR_PREFIX "d\n",
Steve Block44f0eee2011-05-26 01:26:41 +0100317 isolate_->memory_allocator()->Size(),
318 isolate_->memory_allocator()->Available());
Ben Murdochf87a2032010-10-22 12:50:53 +0100319 PrintF("New space, used: %8" V8_PTR_PREFIX "d"
320 ", available: %8" V8_PTR_PREFIX "d\n",
Steve Block3ce2e202009-11-05 08:53:23 +0000321 Heap::new_space_.Size(),
322 new_space_.Available());
Ben Murdochf87a2032010-10-22 12:50:53 +0100323 PrintF("Old pointers, used: %8" V8_PTR_PREFIX "d"
324 ", available: %8" V8_PTR_PREFIX "d"
325 ", waste: %8" V8_PTR_PREFIX "d\n",
Steve Block3ce2e202009-11-05 08:53:23 +0000326 old_pointer_space_->Size(),
327 old_pointer_space_->Available(),
328 old_pointer_space_->Waste());
Ben Murdochf87a2032010-10-22 12:50:53 +0100329 PrintF("Old data space, used: %8" V8_PTR_PREFIX "d"
330 ", available: %8" V8_PTR_PREFIX "d"
331 ", waste: %8" V8_PTR_PREFIX "d\n",
Steve Block3ce2e202009-11-05 08:53:23 +0000332 old_data_space_->Size(),
333 old_data_space_->Available(),
334 old_data_space_->Waste());
Ben Murdochf87a2032010-10-22 12:50:53 +0100335 PrintF("Code space, used: %8" V8_PTR_PREFIX "d"
336 ", available: %8" V8_PTR_PREFIX "d"
337 ", waste: %8" V8_PTR_PREFIX "d\n",
Steve Block3ce2e202009-11-05 08:53:23 +0000338 code_space_->Size(),
339 code_space_->Available(),
340 code_space_->Waste());
Ben Murdochf87a2032010-10-22 12:50:53 +0100341 PrintF("Map space, used: %8" V8_PTR_PREFIX "d"
342 ", available: %8" V8_PTR_PREFIX "d"
343 ", waste: %8" V8_PTR_PREFIX "d\n",
Steve Block3ce2e202009-11-05 08:53:23 +0000344 map_space_->Size(),
345 map_space_->Available(),
346 map_space_->Waste());
Ben Murdochf87a2032010-10-22 12:50:53 +0100347 PrintF("Cell space, used: %8" V8_PTR_PREFIX "d"
348 ", available: %8" V8_PTR_PREFIX "d"
349 ", waste: %8" V8_PTR_PREFIX "d\n",
Steve Block3ce2e202009-11-05 08:53:23 +0000350 cell_space_->Size(),
351 cell_space_->Available(),
352 cell_space_->Waste());
Ben Murdochf87a2032010-10-22 12:50:53 +0100353 PrintF("Large object space, used: %8" V8_PTR_PREFIX "d"
354 ", available: %8" V8_PTR_PREFIX "d\n",
Steve Block3ce2e202009-11-05 08:53:23 +0000355 lo_space_->Size(),
356 lo_space_->Available());
Steve Blocka7e24c12009-10-30 11:49:00 +0000357}
358#endif
359
360
361// TODO(1238405): Combine the infrastructure for --heap-stats and
362// --log-gc to avoid the complicated preprocessor and flag testing.
363void Heap::ReportStatisticsAfterGC() {
364 // Similar to the before GC, we use some complicated logic to ensure that
365 // NewSpace statistics are logged exactly once when --log-gc is turned on.
366#if defined(DEBUG) && defined(ENABLE_LOGGING_AND_PROFILING)
367 if (FLAG_heap_stats) {
368 new_space_.CollectStatistics();
369 ReportHeapStatistics("After GC");
370 } else if (FLAG_log_gc) {
371 new_space_.ReportStatistics();
372 }
373#elif defined(DEBUG)
374 if (FLAG_heap_stats) ReportHeapStatistics("After GC");
375#elif defined(ENABLE_LOGGING_AND_PROFILING)
376 if (FLAG_log_gc) new_space_.ReportStatistics();
377#endif
378}
379#endif // defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
380
381
382void Heap::GarbageCollectionPrologue() {
Steve Block44f0eee2011-05-26 01:26:41 +0100383 isolate_->transcendental_cache()->Clear();
Steve Block6ded16b2010-05-10 14:33:55 +0100384 ClearJSFunctionResultCaches();
Steve Blocka7e24c12009-10-30 11:49:00 +0000385 gc_count_++;
Steve Block6ded16b2010-05-10 14:33:55 +0100386 unflattened_strings_length_ = 0;
Steve Blocka7e24c12009-10-30 11:49:00 +0000387#ifdef DEBUG
388 ASSERT(allocation_allowed_ && gc_state_ == NOT_IN_GC);
389 allow_allocation(false);
390
391 if (FLAG_verify_heap) {
392 Verify();
393 }
394
395 if (FLAG_gc_verbose) Print();
Steve Blocka7e24c12009-10-30 11:49:00 +0000396#endif
397
398#if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
399 ReportStatisticsBeforeGC();
400#endif
Steve Block1e0659c2011-05-24 12:43:12 +0100401
402 LiveObjectList::GCPrologue();
Steve Blocka7e24c12009-10-30 11:49:00 +0000403}
404
Ben Murdochf87a2032010-10-22 12:50:53 +0100405intptr_t Heap::SizeOfObjects() {
406 intptr_t total = 0;
Steve Blocka7e24c12009-10-30 11:49:00 +0000407 AllSpaces spaces;
Leon Clarked91b9f72010-01-27 17:25:45 +0000408 for (Space* space = spaces.next(); space != NULL; space = spaces.next()) {
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -0800409 total += space->SizeOfObjects();
Steve Blocka7e24c12009-10-30 11:49:00 +0000410 }
411 return total;
412}
413
414void Heap::GarbageCollectionEpilogue() {
Steve Block1e0659c2011-05-24 12:43:12 +0100415 LiveObjectList::GCEpilogue();
Steve Blocka7e24c12009-10-30 11:49:00 +0000416#ifdef DEBUG
417 allow_allocation(true);
418 ZapFromSpace();
419
420 if (FLAG_verify_heap) {
421 Verify();
422 }
423
Steve Block44f0eee2011-05-26 01:26:41 +0100424 if (FLAG_print_global_handles) isolate_->global_handles()->Print();
Steve Blocka7e24c12009-10-30 11:49:00 +0000425 if (FLAG_print_handles) PrintHandles();
426 if (FLAG_gc_verbose) Print();
427 if (FLAG_code_stats) ReportCodeStatistics("After GC");
428#endif
429
Steve Block44f0eee2011-05-26 01:26:41 +0100430 isolate_->counters()->alive_after_last_gc()->Set(
431 static_cast<int>(SizeOfObjects()));
Steve Blocka7e24c12009-10-30 11:49:00 +0000432
Steve Block44f0eee2011-05-26 01:26:41 +0100433 isolate_->counters()->symbol_table_capacity()->Set(
434 symbol_table()->Capacity());
435 isolate_->counters()->number_of_symbols()->Set(
436 symbol_table()->NumberOfElements());
Steve Blocka7e24c12009-10-30 11:49:00 +0000437#if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
438 ReportStatisticsAfterGC();
439#endif
440#ifdef ENABLE_DEBUGGER_SUPPORT
Steve Block44f0eee2011-05-26 01:26:41 +0100441 isolate_->debug()->AfterGarbageCollection();
Steve Blocka7e24c12009-10-30 11:49:00 +0000442#endif
443}
444
445
John Reck59135872010-11-02 12:39:01 -0700446void Heap::CollectAllGarbage(bool force_compaction) {
Steve Blocka7e24c12009-10-30 11:49:00 +0000447 // Since we are ignoring the return value, the exact choice of space does
448 // not matter, so long as we do not specify NEW_SPACE, which would not
449 // cause a full GC.
Steve Block44f0eee2011-05-26 01:26:41 +0100450 mark_compact_collector_.SetForceCompaction(force_compaction);
John Reck59135872010-11-02 12:39:01 -0700451 CollectGarbage(OLD_POINTER_SPACE);
Steve Block44f0eee2011-05-26 01:26:41 +0100452 mark_compact_collector_.SetForceCompaction(false);
Steve Blocka7e24c12009-10-30 11:49:00 +0000453}
454
455
Teng-Hui Zhu3e5fa292010-11-09 16:16:48 -0800456void Heap::CollectAllAvailableGarbage() {
457 // Since we are ignoring the return value, the exact choice of space does
458 // not matter, so long as we do not specify NEW_SPACE, which would not
459 // cause a full GC.
Steve Block44f0eee2011-05-26 01:26:41 +0100460 mark_compact_collector()->SetForceCompaction(true);
Teng-Hui Zhu3e5fa292010-11-09 16:16:48 -0800461
462 // Major GC would invoke weak handle callbacks on weakly reachable
463 // handles, but won't collect weakly reachable objects until next
464 // major GC. Therefore if we collect aggressively and weak handle callback
465 // has been invoked, we rerun major GC to release objects which become
466 // garbage.
467 // Note: as weak callbacks can execute arbitrary code, we cannot
468 // hope that eventually there will be no weak callbacks invocations.
469 // Therefore stop recollecting after several attempts.
470 const int kMaxNumberOfAttempts = 7;
471 for (int attempt = 0; attempt < kMaxNumberOfAttempts; attempt++) {
472 if (!CollectGarbage(OLD_POINTER_SPACE, MARK_COMPACTOR)) {
473 break;
474 }
475 }
Steve Block44f0eee2011-05-26 01:26:41 +0100476 mark_compact_collector()->SetForceCompaction(false);
Teng-Hui Zhu3e5fa292010-11-09 16:16:48 -0800477}
478
479
480bool Heap::CollectGarbage(AllocationSpace space, GarbageCollector collector) {
Steve Blocka7e24c12009-10-30 11:49:00 +0000481 // The VM is in the GC state until exiting this function.
Steve Block44f0eee2011-05-26 01:26:41 +0100482 VMState state(isolate_, GC);
Steve Blocka7e24c12009-10-30 11:49:00 +0000483
484#ifdef DEBUG
485 // Reset the allocation timeout to the GC interval, but make sure to
486 // allow at least a few allocations after a collection. The reason
487 // for this is that we have a lot of allocation sequences and we
488 // assume that a garbage collection will allow the subsequent
489 // allocation attempts to go through.
490 allocation_timeout_ = Max(6, FLAG_gc_interval);
491#endif
492
Teng-Hui Zhu3e5fa292010-11-09 16:16:48 -0800493 bool next_gc_likely_to_collect_more = false;
494
Steve Block44f0eee2011-05-26 01:26:41 +0100495 { GCTracer tracer(this);
Steve Blocka7e24c12009-10-30 11:49:00 +0000496 GarbageCollectionPrologue();
497 // The GC count was incremented in the prologue. Tell the tracer about
498 // it.
499 tracer.set_gc_count(gc_count_);
500
Steve Blocka7e24c12009-10-30 11:49:00 +0000501 // Tell the tracer which collector we've selected.
502 tracer.set_collector(collector);
503
504 HistogramTimer* rate = (collector == SCAVENGER)
Steve Block44f0eee2011-05-26 01:26:41 +0100505 ? isolate_->counters()->gc_scavenger()
506 : isolate_->counters()->gc_compactor();
Steve Blocka7e24c12009-10-30 11:49:00 +0000507 rate->Start();
Teng-Hui Zhu3e5fa292010-11-09 16:16:48 -0800508 next_gc_likely_to_collect_more =
509 PerformGarbageCollection(collector, &tracer);
Steve Blocka7e24c12009-10-30 11:49:00 +0000510 rate->Stop();
511
512 GarbageCollectionEpilogue();
513 }
514
515
516#ifdef ENABLE_LOGGING_AND_PROFILING
517 if (FLAG_log_gc) HeapProfiler::WriteSample();
518#endif
Teng-Hui Zhu3e5fa292010-11-09 16:16:48 -0800519
520 return next_gc_likely_to_collect_more;
Steve Blocka7e24c12009-10-30 11:49:00 +0000521}
522
523
524void Heap::PerformScavenge() {
Steve Block44f0eee2011-05-26 01:26:41 +0100525 GCTracer tracer(this);
John Reck59135872010-11-02 12:39:01 -0700526 PerformGarbageCollection(SCAVENGER, &tracer);
Steve Blocka7e24c12009-10-30 11:49:00 +0000527}
528
529
530#ifdef DEBUG
531// Helper class for verifying the symbol table.
532class SymbolTableVerifier : public ObjectVisitor {
533 public:
Steve Blocka7e24c12009-10-30 11:49:00 +0000534 void VisitPointers(Object** start, Object** end) {
535 // Visit all HeapObject pointers in [start, end).
536 for (Object** p = start; p < end; p++) {
537 if ((*p)->IsHeapObject()) {
538 // Check that the symbol is actually a symbol.
539 ASSERT((*p)->IsNull() || (*p)->IsUndefined() || (*p)->IsSymbol());
540 }
541 }
542 }
543};
544#endif // DEBUG
545
546
547static void VerifySymbolTable() {
548#ifdef DEBUG
549 SymbolTableVerifier verifier;
Steve Block44f0eee2011-05-26 01:26:41 +0100550 HEAP->symbol_table()->IterateElements(&verifier);
Steve Blocka7e24c12009-10-30 11:49:00 +0000551#endif // DEBUG
552}
553
554
Leon Clarkee46be812010-01-19 14:06:41 +0000555void Heap::ReserveSpace(
556 int new_space_size,
557 int pointer_space_size,
558 int data_space_size,
559 int code_space_size,
560 int map_space_size,
561 int cell_space_size,
562 int large_object_size) {
563 NewSpace* new_space = Heap::new_space();
564 PagedSpace* old_pointer_space = Heap::old_pointer_space();
565 PagedSpace* old_data_space = Heap::old_data_space();
566 PagedSpace* code_space = Heap::code_space();
567 PagedSpace* map_space = Heap::map_space();
568 PagedSpace* cell_space = Heap::cell_space();
569 LargeObjectSpace* lo_space = Heap::lo_space();
570 bool gc_performed = true;
571 while (gc_performed) {
572 gc_performed = false;
573 if (!new_space->ReserveSpace(new_space_size)) {
Ben Murdochf87a2032010-10-22 12:50:53 +0100574 Heap::CollectGarbage(NEW_SPACE);
Leon Clarkee46be812010-01-19 14:06:41 +0000575 gc_performed = true;
576 }
577 if (!old_pointer_space->ReserveSpace(pointer_space_size)) {
Ben Murdochf87a2032010-10-22 12:50:53 +0100578 Heap::CollectGarbage(OLD_POINTER_SPACE);
Leon Clarkee46be812010-01-19 14:06:41 +0000579 gc_performed = true;
580 }
581 if (!(old_data_space->ReserveSpace(data_space_size))) {
Ben Murdochf87a2032010-10-22 12:50:53 +0100582 Heap::CollectGarbage(OLD_DATA_SPACE);
Leon Clarkee46be812010-01-19 14:06:41 +0000583 gc_performed = true;
584 }
585 if (!(code_space->ReserveSpace(code_space_size))) {
Ben Murdochf87a2032010-10-22 12:50:53 +0100586 Heap::CollectGarbage(CODE_SPACE);
Leon Clarkee46be812010-01-19 14:06:41 +0000587 gc_performed = true;
588 }
589 if (!(map_space->ReserveSpace(map_space_size))) {
Ben Murdochf87a2032010-10-22 12:50:53 +0100590 Heap::CollectGarbage(MAP_SPACE);
Leon Clarkee46be812010-01-19 14:06:41 +0000591 gc_performed = true;
592 }
593 if (!(cell_space->ReserveSpace(cell_space_size))) {
Ben Murdochf87a2032010-10-22 12:50:53 +0100594 Heap::CollectGarbage(CELL_SPACE);
Leon Clarkee46be812010-01-19 14:06:41 +0000595 gc_performed = true;
596 }
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100597 // We add a slack-factor of 2 in order to have space for a series of
598 // large-object allocations that are only just larger than the page size.
Leon Clarkee46be812010-01-19 14:06:41 +0000599 large_object_size *= 2;
600 // The ReserveSpace method on the large object space checks how much
601 // we can expand the old generation. This includes expansion caused by
602 // allocation in the other spaces.
603 large_object_size += cell_space_size + map_space_size + code_space_size +
604 data_space_size + pointer_space_size;
605 if (!(lo_space->ReserveSpace(large_object_size))) {
Ben Murdochf87a2032010-10-22 12:50:53 +0100606 Heap::CollectGarbage(LO_SPACE);
Leon Clarkee46be812010-01-19 14:06:41 +0000607 gc_performed = true;
608 }
609 }
610}
611
612
Steve Blocka7e24c12009-10-30 11:49:00 +0000613void Heap::EnsureFromSpaceIsCommitted() {
614 if (new_space_.CommitFromSpaceIfNeeded()) return;
615
616 // Committing memory to from space failed.
617 // Try shrinking and try again.
Kristian Monsen80d68ea2010-09-08 11:05:35 +0100618 PagedSpaces spaces;
619 for (PagedSpace* space = spaces.next();
620 space != NULL;
621 space = spaces.next()) {
622 space->RelinkPageListInChunkOrder(true);
623 }
624
Steve Blocka7e24c12009-10-30 11:49:00 +0000625 Shrink();
626 if (new_space_.CommitFromSpaceIfNeeded()) return;
627
628 // Committing memory to from space failed again.
629 // Memory is exhausted and we will die.
630 V8::FatalProcessOutOfMemory("Committing semi space failed.");
631}
632
633
Teng-Hui Zhu3e5fa292010-11-09 16:16:48 -0800634void Heap::ClearJSFunctionResultCaches() {
Steve Block44f0eee2011-05-26 01:26:41 +0100635 if (isolate_->bootstrapper()->IsActive()) return;
Steve Block6ded16b2010-05-10 14:33:55 +0100636
Teng-Hui Zhu3e5fa292010-11-09 16:16:48 -0800637 Object* context = global_contexts_list_;
638 while (!context->IsUndefined()) {
639 // Get the caches for this context:
Steve Block6ded16b2010-05-10 14:33:55 +0100640 FixedArray* caches =
Teng-Hui Zhu3e5fa292010-11-09 16:16:48 -0800641 Context::cast(context)->jsfunction_result_caches();
642 // Clear the caches:
Steve Block6ded16b2010-05-10 14:33:55 +0100643 int length = caches->length();
644 for (int i = 0; i < length; i++) {
645 JSFunctionResultCache::cast(caches->get(i))->Clear();
646 }
Teng-Hui Zhu3e5fa292010-11-09 16:16:48 -0800647 // Get the next context:
648 context = Context::cast(context)->get(Context::NEXT_CONTEXT_LINK);
Steve Block6ded16b2010-05-10 14:33:55 +0100649 }
Steve Block6ded16b2010-05-10 14:33:55 +0100650}
651
652
Steve Block44f0eee2011-05-26 01:26:41 +0100653
Kristian Monsen80d68ea2010-09-08 11:05:35 +0100654void Heap::ClearNormalizedMapCaches() {
Steve Block44f0eee2011-05-26 01:26:41 +0100655 if (isolate_->bootstrapper()->IsActive()) return;
Ben Murdochf87a2032010-10-22 12:50:53 +0100656
657 Object* context = global_contexts_list_;
658 while (!context->IsUndefined()) {
659 Context::cast(context)->normalized_map_cache()->Clear();
660 context = Context::cast(context)->get(Context::NEXT_CONTEXT_LINK);
661 }
Kristian Monsen80d68ea2010-09-08 11:05:35 +0100662}
663
664
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100665#ifdef DEBUG
666
667enum PageWatermarkValidity {
668 ALL_VALID,
669 ALL_INVALID
670};
671
672static void VerifyPageWatermarkValidity(PagedSpace* space,
673 PageWatermarkValidity validity) {
674 PageIterator it(space, PageIterator::PAGES_IN_USE);
675 bool expected_value = (validity == ALL_VALID);
676 while (it.has_next()) {
677 Page* page = it.next();
678 ASSERT(page->IsWatermarkValid() == expected_value);
679 }
680}
681#endif
682
Steve Block8defd9f2010-07-08 12:39:36 +0100683void Heap::UpdateSurvivalRateTrend(int start_new_space_size) {
684 double survival_rate =
685 (static_cast<double>(young_survivors_after_last_gc_) * 100) /
686 start_new_space_size;
687
688 if (survival_rate > kYoungSurvivalRateThreshold) {
689 high_survival_rate_period_length_++;
690 } else {
691 high_survival_rate_period_length_ = 0;
692 }
693
694 double survival_rate_diff = survival_rate_ - survival_rate;
695
696 if (survival_rate_diff > kYoungSurvivalRateAllowedDeviation) {
697 set_survival_rate_trend(DECREASING);
698 } else if (survival_rate_diff < -kYoungSurvivalRateAllowedDeviation) {
699 set_survival_rate_trend(INCREASING);
700 } else {
701 set_survival_rate_trend(STABLE);
702 }
703
704 survival_rate_ = survival_rate;
705}
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100706
Teng-Hui Zhu3e5fa292010-11-09 16:16:48 -0800707bool Heap::PerformGarbageCollection(GarbageCollector collector,
John Reck59135872010-11-02 12:39:01 -0700708 GCTracer* tracer) {
Teng-Hui Zhu3e5fa292010-11-09 16:16:48 -0800709 bool next_gc_likely_to_collect_more = false;
710
Ben Murdochf87a2032010-10-22 12:50:53 +0100711 if (collector != SCAVENGER) {
Steve Block44f0eee2011-05-26 01:26:41 +0100712 PROFILE(isolate_, CodeMovingGCEvent());
Ben Murdochf87a2032010-10-22 12:50:53 +0100713 }
714
Steve Blocka7e24c12009-10-30 11:49:00 +0000715 VerifySymbolTable();
716 if (collector == MARK_COMPACTOR && global_gc_prologue_callback_) {
717 ASSERT(!allocation_allowed_);
Leon Clarkef7060e22010-06-03 12:02:55 +0100718 GCTracer::Scope scope(tracer, GCTracer::Scope::EXTERNAL);
Steve Blocka7e24c12009-10-30 11:49:00 +0000719 global_gc_prologue_callback_();
720 }
Steve Block6ded16b2010-05-10 14:33:55 +0100721
722 GCType gc_type =
723 collector == MARK_COMPACTOR ? kGCTypeMarkSweepCompact : kGCTypeScavenge;
724
725 for (int i = 0; i < gc_prologue_callbacks_.length(); ++i) {
726 if (gc_type & gc_prologue_callbacks_[i].gc_type) {
727 gc_prologue_callbacks_[i].callback(gc_type, kNoGCCallbackFlags);
728 }
729 }
730
Steve Blocka7e24c12009-10-30 11:49:00 +0000731 EnsureFromSpaceIsCommitted();
Steve Block6ded16b2010-05-10 14:33:55 +0100732
Ben Murdochf87a2032010-10-22 12:50:53 +0100733 int start_new_space_size = Heap::new_space()->SizeAsInt();
Steve Block8defd9f2010-07-08 12:39:36 +0100734
Steve Blocka7e24c12009-10-30 11:49:00 +0000735 if (collector == MARK_COMPACTOR) {
Steve Block6ded16b2010-05-10 14:33:55 +0100736 // Perform mark-sweep with optional compaction.
Steve Blocka7e24c12009-10-30 11:49:00 +0000737 MarkCompact(tracer);
738
Steve Block8defd9f2010-07-08 12:39:36 +0100739 bool high_survival_rate_during_scavenges = IsHighSurvivalRate() &&
740 IsStableOrIncreasingSurvivalTrend();
741
742 UpdateSurvivalRateTrend(start_new_space_size);
743
John Reck59135872010-11-02 12:39:01 -0700744 intptr_t old_gen_size = PromotedSpaceSize();
745 old_gen_promotion_limit_ =
746 old_gen_size + Max(kMinimumPromotionLimit, old_gen_size / 3);
747 old_gen_allocation_limit_ =
748 old_gen_size + Max(kMinimumAllocationLimit, old_gen_size / 2);
Steve Block8defd9f2010-07-08 12:39:36 +0100749
John Reck59135872010-11-02 12:39:01 -0700750 if (high_survival_rate_during_scavenges &&
751 IsStableOrIncreasingSurvivalTrend()) {
752 // Stable high survival rates of young objects both during partial and
753 // full collection indicate that mutator is either building or modifying
754 // a structure with a long lifetime.
755 // In this case we aggressively raise old generation memory limits to
756 // postpone subsequent mark-sweep collection and thus trade memory
757 // space for the mutation speed.
758 old_gen_promotion_limit_ *= 2;
759 old_gen_allocation_limit_ *= 2;
Steve Block8defd9f2010-07-08 12:39:36 +0100760 }
761
John Reck59135872010-11-02 12:39:01 -0700762 old_gen_exhausted_ = false;
Steve Block6ded16b2010-05-10 14:33:55 +0100763 } else {
Leon Clarkef7060e22010-06-03 12:02:55 +0100764 tracer_ = tracer;
Steve Block6ded16b2010-05-10 14:33:55 +0100765 Scavenge();
Leon Clarkef7060e22010-06-03 12:02:55 +0100766 tracer_ = NULL;
Steve Block8defd9f2010-07-08 12:39:36 +0100767
768 UpdateSurvivalRateTrend(start_new_space_size);
Steve Blocka7e24c12009-10-30 11:49:00 +0000769 }
Steve Blocka7e24c12009-10-30 11:49:00 +0000770
Steve Block44f0eee2011-05-26 01:26:41 +0100771 isolate_->counters()->objs_since_last_young()->Set(0);
Steve Blocka7e24c12009-10-30 11:49:00 +0000772
John Reck59135872010-11-02 12:39:01 -0700773 if (collector == MARK_COMPACTOR) {
774 DisableAssertNoAllocation allow_allocation;
775 GCTracer::Scope scope(tracer, GCTracer::Scope::EXTERNAL);
Teng-Hui Zhu3e5fa292010-11-09 16:16:48 -0800776 next_gc_likely_to_collect_more =
Steve Block44f0eee2011-05-26 01:26:41 +0100777 isolate_->global_handles()->PostGarbageCollectionProcessing();
John Reck59135872010-11-02 12:39:01 -0700778 }
779
Steve Block3ce2e202009-11-05 08:53:23 +0000780 // Update relocatables.
781 Relocatable::PostGarbageCollectionProcessing();
Steve Blocka7e24c12009-10-30 11:49:00 +0000782
783 if (collector == MARK_COMPACTOR) {
784 // Register the amount of external allocated memory.
785 amount_of_external_allocated_memory_at_last_global_gc_ =
786 amount_of_external_allocated_memory_;
787 }
788
Steve Block6ded16b2010-05-10 14:33:55 +0100789 GCCallbackFlags callback_flags = tracer->is_compacting()
790 ? kGCCallbackFlagCompacted
791 : kNoGCCallbackFlags;
792 for (int i = 0; i < gc_epilogue_callbacks_.length(); ++i) {
793 if (gc_type & gc_epilogue_callbacks_[i].gc_type) {
794 gc_epilogue_callbacks_[i].callback(gc_type, callback_flags);
795 }
796 }
797
Steve Blocka7e24c12009-10-30 11:49:00 +0000798 if (collector == MARK_COMPACTOR && global_gc_epilogue_callback_) {
799 ASSERT(!allocation_allowed_);
Leon Clarkef7060e22010-06-03 12:02:55 +0100800 GCTracer::Scope scope(tracer, GCTracer::Scope::EXTERNAL);
Steve Blocka7e24c12009-10-30 11:49:00 +0000801 global_gc_epilogue_callback_();
802 }
803 VerifySymbolTable();
Teng-Hui Zhu3e5fa292010-11-09 16:16:48 -0800804
805 return next_gc_likely_to_collect_more;
Steve Blocka7e24c12009-10-30 11:49:00 +0000806}
807
808
Steve Blocka7e24c12009-10-30 11:49:00 +0000809void Heap::MarkCompact(GCTracer* tracer) {
810 gc_state_ = MARK_COMPACT;
Steve Block44f0eee2011-05-26 01:26:41 +0100811 LOG(isolate_, ResourceEvent("markcompact", "begin"));
Steve Blocka7e24c12009-10-30 11:49:00 +0000812
Steve Block44f0eee2011-05-26 01:26:41 +0100813 mark_compact_collector_.Prepare(tracer);
Steve Blocka7e24c12009-10-30 11:49:00 +0000814
Steve Block44f0eee2011-05-26 01:26:41 +0100815 bool is_compacting = mark_compact_collector_.IsCompacting();
Steve Blocka7e24c12009-10-30 11:49:00 +0000816
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100817 if (is_compacting) {
818 mc_count_++;
819 } else {
820 ms_count_++;
821 }
822 tracer->set_full_gc_count(mc_count_ + ms_count_);
823
Steve Blocka7e24c12009-10-30 11:49:00 +0000824 MarkCompactPrologue(is_compacting);
825
Steve Block44f0eee2011-05-26 01:26:41 +0100826 is_safe_to_read_maps_ = false;
827 mark_compact_collector_.CollectGarbage();
828 is_safe_to_read_maps_ = true;
Steve Blocka7e24c12009-10-30 11:49:00 +0000829
Steve Block44f0eee2011-05-26 01:26:41 +0100830 LOG(isolate_, ResourceEvent("markcompact", "end"));
Steve Blocka7e24c12009-10-30 11:49:00 +0000831
832 gc_state_ = NOT_IN_GC;
833
834 Shrink();
835
Steve Block44f0eee2011-05-26 01:26:41 +0100836 isolate_->counters()->objs_since_last_full()->Set(0);
Steve Block6ded16b2010-05-10 14:33:55 +0100837
838 contexts_disposed_ = 0;
Steve Blocka7e24c12009-10-30 11:49:00 +0000839}
840
841
842void Heap::MarkCompactPrologue(bool is_compacting) {
843 // At any old GC clear the keyed lookup cache to enable collection of unused
844 // maps.
Steve Block44f0eee2011-05-26 01:26:41 +0100845 isolate_->keyed_lookup_cache()->Clear();
846 isolate_->context_slot_cache()->Clear();
847 isolate_->descriptor_lookup_cache()->Clear();
Steve Blocka7e24c12009-10-30 11:49:00 +0000848
Steve Block44f0eee2011-05-26 01:26:41 +0100849 isolate_->compilation_cache()->MarkCompactPrologue();
Steve Blocka7e24c12009-10-30 11:49:00 +0000850
Kristian Monsen25f61362010-05-21 11:50:48 +0100851 CompletelyClearInstanceofCache();
852
Leon Clarkee46be812010-01-19 14:06:41 +0000853 if (is_compacting) FlushNumberStringCache();
Steve Blocka7e24c12009-10-30 11:49:00 +0000854
Kristian Monsen80d68ea2010-09-08 11:05:35 +0100855 ClearNormalizedMapCaches();
Steve Blocka7e24c12009-10-30 11:49:00 +0000856}
857
858
859Object* Heap::FindCodeObject(Address a) {
John Reck59135872010-11-02 12:39:01 -0700860 Object* obj = NULL; // Initialization to please compiler.
861 { MaybeObject* maybe_obj = code_space_->FindObject(a);
862 if (!maybe_obj->ToObject(&obj)) {
863 obj = lo_space_->FindObject(a)->ToObjectUnchecked();
864 }
Steve Blocka7e24c12009-10-30 11:49:00 +0000865 }
Steve Blocka7e24c12009-10-30 11:49:00 +0000866 return obj;
867}
868
869
870// Helper class for copying HeapObjects
871class ScavengeVisitor: public ObjectVisitor {
872 public:
Steve Block44f0eee2011-05-26 01:26:41 +0100873 explicit ScavengeVisitor(Heap* heap) : heap_(heap) {}
Steve Blocka7e24c12009-10-30 11:49:00 +0000874
875 void VisitPointer(Object** p) { ScavengePointer(p); }
876
877 void VisitPointers(Object** start, Object** end) {
878 // Copy all HeapObject pointers in [start, end)
879 for (Object** p = start; p < end; p++) ScavengePointer(p);
880 }
881
882 private:
883 void ScavengePointer(Object** p) {
884 Object* object = *p;
Steve Block44f0eee2011-05-26 01:26:41 +0100885 if (!heap_->InNewSpace(object)) return;
Steve Blocka7e24c12009-10-30 11:49:00 +0000886 Heap::ScavengeObject(reinterpret_cast<HeapObject**>(p),
887 reinterpret_cast<HeapObject*>(object));
888 }
Steve Block44f0eee2011-05-26 01:26:41 +0100889
890 Heap* heap_;
Steve Blocka7e24c12009-10-30 11:49:00 +0000891};
892
893
Steve Blocka7e24c12009-10-30 11:49:00 +0000894#ifdef DEBUG
895// Visitor class to verify pointers in code or data space do not point into
896// new space.
897class VerifyNonPointerSpacePointersVisitor: public ObjectVisitor {
898 public:
899 void VisitPointers(Object** start, Object**end) {
900 for (Object** current = start; current < end; current++) {
901 if ((*current)->IsHeapObject()) {
Steve Block44f0eee2011-05-26 01:26:41 +0100902 ASSERT(!HEAP->InNewSpace(HeapObject::cast(*current)));
Steve Blocka7e24c12009-10-30 11:49:00 +0000903 }
904 }
905 }
906};
907
908
909static void VerifyNonPointerSpacePointers() {
910 // Verify that there are no pointers to new space in spaces where we
911 // do not expect them.
912 VerifyNonPointerSpacePointersVisitor v;
Steve Block44f0eee2011-05-26 01:26:41 +0100913 HeapObjectIterator code_it(HEAP->code_space());
Leon Clarked91b9f72010-01-27 17:25:45 +0000914 for (HeapObject* object = code_it.next();
915 object != NULL; object = code_it.next())
Steve Blocka7e24c12009-10-30 11:49:00 +0000916 object->Iterate(&v);
Steve Blocka7e24c12009-10-30 11:49:00 +0000917
Steve Block44f0eee2011-05-26 01:26:41 +0100918 HeapObjectIterator data_it(HEAP->old_data_space());
Leon Clarked91b9f72010-01-27 17:25:45 +0000919 for (HeapObject* object = data_it.next();
920 object != NULL; object = data_it.next())
921 object->Iterate(&v);
Steve Blocka7e24c12009-10-30 11:49:00 +0000922}
923#endif
924
925
Steve Block6ded16b2010-05-10 14:33:55 +0100926void Heap::CheckNewSpaceExpansionCriteria() {
927 if (new_space_.Capacity() < new_space_.MaximumCapacity() &&
928 survived_since_last_expansion_ > new_space_.Capacity()) {
929 // Grow the size of new space if there is room to grow and enough
930 // data has survived scavenge since the last expansion.
931 new_space_.Grow();
932 survived_since_last_expansion_ = 0;
933 }
934}
935
936
Steve Blocka7e24c12009-10-30 11:49:00 +0000937void Heap::Scavenge() {
938#ifdef DEBUG
939 if (FLAG_enable_slow_asserts) VerifyNonPointerSpacePointers();
940#endif
941
942 gc_state_ = SCAVENGE;
943
Steve Block44f0eee2011-05-26 01:26:41 +0100944 Page::FlipMeaningOfInvalidatedWatermarkFlag(this);
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100945#ifdef DEBUG
946 VerifyPageWatermarkValidity(old_pointer_space_, ALL_VALID);
947 VerifyPageWatermarkValidity(map_space_, ALL_VALID);
948#endif
949
950 // We do not update an allocation watermark of the top page during linear
951 // allocation to avoid overhead. So to maintain the watermark invariant
952 // we have to manually cache the watermark and mark the top page as having an
953 // invalid watermark. This guarantees that dirty regions iteration will use a
954 // correct watermark even if a linear allocation happens.
955 old_pointer_space_->FlushTopPageWatermark();
956 map_space_->FlushTopPageWatermark();
957
Steve Blocka7e24c12009-10-30 11:49:00 +0000958 // Implements Cheney's copying algorithm
Steve Block44f0eee2011-05-26 01:26:41 +0100959 LOG(isolate_, ResourceEvent("scavenge", "begin"));
Steve Blocka7e24c12009-10-30 11:49:00 +0000960
961 // Clear descriptor cache.
Steve Block44f0eee2011-05-26 01:26:41 +0100962 isolate_->descriptor_lookup_cache()->Clear();
Steve Blocka7e24c12009-10-30 11:49:00 +0000963
964 // Used for updating survived_since_last_expansion_ at function end.
Ben Murdochf87a2032010-10-22 12:50:53 +0100965 intptr_t survived_watermark = PromotedSpaceSize();
Steve Blocka7e24c12009-10-30 11:49:00 +0000966
Steve Block6ded16b2010-05-10 14:33:55 +0100967 CheckNewSpaceExpansionCriteria();
Steve Blocka7e24c12009-10-30 11:49:00 +0000968
969 // Flip the semispaces. After flipping, to space is empty, from space has
970 // live objects.
971 new_space_.Flip();
972 new_space_.ResetAllocationInfo();
973
974 // We need to sweep newly copied objects which can be either in the
975 // to space or promoted to the old generation. For to-space
976 // objects, we treat the bottom of the to space as a queue. Newly
977 // copied and unswept objects lie between a 'front' mark and the
978 // allocation pointer.
979 //
980 // Promoted objects can go into various old-generation spaces, and
981 // can be allocated internally in the spaces (from the free list).
982 // We treat the top of the to space as a queue of addresses of
983 // promoted objects. The addresses of newly promoted and unswept
984 // objects lie between a 'front' mark and a 'rear' mark that is
985 // updated as a side effect of promoting an object.
986 //
987 // There is guaranteed to be enough room at the top of the to space
988 // for the addresses of promoted objects: every object promoted
989 // frees up its size in bytes from the top of the new space, and
990 // objects are at least one pointer in size.
991 Address new_space_front = new_space_.ToSpaceLow();
Steve Block44f0eee2011-05-26 01:26:41 +0100992 promotion_queue_.Initialize(new_space_.ToSpaceHigh());
Steve Blocka7e24c12009-10-30 11:49:00 +0000993
Steve Block44f0eee2011-05-26 01:26:41 +0100994 is_safe_to_read_maps_ = false;
995 ScavengeVisitor scavenge_visitor(this);
Steve Blocka7e24c12009-10-30 11:49:00 +0000996 // Copy roots.
Leon Clarkee46be812010-01-19 14:06:41 +0000997 IterateRoots(&scavenge_visitor, VISIT_ALL_IN_SCAVENGE);
Steve Blocka7e24c12009-10-30 11:49:00 +0000998
999 // Copy objects reachable from the old generation. By definition,
1000 // there are no intergenerational pointers in code or data spaces.
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01001001 IterateDirtyRegions(old_pointer_space_,
Steve Block44f0eee2011-05-26 01:26:41 +01001002 &Heap::IteratePointersInDirtyRegion,
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01001003 &ScavengePointer,
1004 WATERMARK_CAN_BE_INVALID);
1005
1006 IterateDirtyRegions(map_space_,
1007 &IteratePointersInDirtyMapsRegion,
1008 &ScavengePointer,
1009 WATERMARK_CAN_BE_INVALID);
1010
1011 lo_space_->IterateDirtyRegions(&ScavengePointer);
Steve Blocka7e24c12009-10-30 11:49:00 +00001012
1013 // Copy objects reachable from cells by scavenging cell values directly.
1014 HeapObjectIterator cell_iterator(cell_space_);
Leon Clarked91b9f72010-01-27 17:25:45 +00001015 for (HeapObject* cell = cell_iterator.next();
1016 cell != NULL; cell = cell_iterator.next()) {
Steve Blocka7e24c12009-10-30 11:49:00 +00001017 if (cell->IsJSGlobalPropertyCell()) {
1018 Address value_address =
1019 reinterpret_cast<Address>(cell) +
1020 (JSGlobalPropertyCell::kValueOffset - kHeapObjectTag);
1021 scavenge_visitor.VisitPointer(reinterpret_cast<Object**>(value_address));
1022 }
1023 }
1024
Ben Murdochf87a2032010-10-22 12:50:53 +01001025 // Scavenge object reachable from the global contexts list directly.
1026 scavenge_visitor.VisitPointer(BitCast<Object**>(&global_contexts_list_));
1027
Leon Clarkee46be812010-01-19 14:06:41 +00001028 new_space_front = DoScavenge(&scavenge_visitor, new_space_front);
1029
Steve Block6ded16b2010-05-10 14:33:55 +01001030 UpdateNewSpaceReferencesInExternalStringTable(
1031 &UpdateNewSpaceReferenceInExternalStringTableEntry);
1032
Steve Block1e0659c2011-05-24 12:43:12 +01001033 LiveObjectList::UpdateReferencesForScavengeGC();
Steve Block44f0eee2011-05-26 01:26:41 +01001034 isolate()->runtime_profiler()->UpdateSamplesAfterScavenge();
Steve Block1e0659c2011-05-24 12:43:12 +01001035
Leon Clarkee46be812010-01-19 14:06:41 +00001036 ASSERT(new_space_front == new_space_.top());
1037
Steve Block44f0eee2011-05-26 01:26:41 +01001038 is_safe_to_read_maps_ = true;
1039
Leon Clarkee46be812010-01-19 14:06:41 +00001040 // Set age mark.
1041 new_space_.set_age_mark(new_space_.top());
1042
1043 // Update how much has survived scavenge.
Ben Murdochf87a2032010-10-22 12:50:53 +01001044 IncrementYoungSurvivorsCounter(static_cast<int>(
1045 (PromotedSpaceSize() - survived_watermark) + new_space_.Size()));
Leon Clarkee46be812010-01-19 14:06:41 +00001046
Steve Block44f0eee2011-05-26 01:26:41 +01001047 LOG(isolate_, ResourceEvent("scavenge", "end"));
Leon Clarkee46be812010-01-19 14:06:41 +00001048
1049 gc_state_ = NOT_IN_GC;
1050}
1051
1052
Steve Block44f0eee2011-05-26 01:26:41 +01001053String* Heap::UpdateNewSpaceReferenceInExternalStringTableEntry(Heap* heap,
1054 Object** p) {
Steve Block6ded16b2010-05-10 14:33:55 +01001055 MapWord first_word = HeapObject::cast(*p)->map_word();
1056
1057 if (!first_word.IsForwardingAddress()) {
1058 // Unreachable external string can be finalized.
Steve Block44f0eee2011-05-26 01:26:41 +01001059 heap->FinalizeExternalString(String::cast(*p));
Steve Block6ded16b2010-05-10 14:33:55 +01001060 return NULL;
1061 }
1062
1063 // String is still reachable.
1064 return String::cast(first_word.ToForwardingAddress());
1065}
1066
1067
1068void Heap::UpdateNewSpaceReferencesInExternalStringTable(
1069 ExternalStringTableUpdaterCallback updater_func) {
Steve Block44f0eee2011-05-26 01:26:41 +01001070 external_string_table_.Verify();
Leon Clarkee46be812010-01-19 14:06:41 +00001071
Steve Block44f0eee2011-05-26 01:26:41 +01001072 if (external_string_table_.new_space_strings_.is_empty()) return;
Leon Clarkee46be812010-01-19 14:06:41 +00001073
Steve Block44f0eee2011-05-26 01:26:41 +01001074 Object** start = &external_string_table_.new_space_strings_[0];
1075 Object** end = start + external_string_table_.new_space_strings_.length();
Leon Clarkee46be812010-01-19 14:06:41 +00001076 Object** last = start;
1077
1078 for (Object** p = start; p < end; ++p) {
Steve Block44f0eee2011-05-26 01:26:41 +01001079 ASSERT(InFromSpace(*p));
1080 String* target = updater_func(this, p);
Leon Clarkee46be812010-01-19 14:06:41 +00001081
Steve Block6ded16b2010-05-10 14:33:55 +01001082 if (target == NULL) continue;
Leon Clarkee46be812010-01-19 14:06:41 +00001083
Leon Clarkee46be812010-01-19 14:06:41 +00001084 ASSERT(target->IsExternalString());
1085
Steve Block44f0eee2011-05-26 01:26:41 +01001086 if (InNewSpace(target)) {
Leon Clarkee46be812010-01-19 14:06:41 +00001087 // String is still in new space. Update the table entry.
1088 *last = target;
1089 ++last;
1090 } else {
1091 // String got promoted. Move it to the old string list.
Steve Block44f0eee2011-05-26 01:26:41 +01001092 external_string_table_.AddOldString(target);
Leon Clarkee46be812010-01-19 14:06:41 +00001093 }
1094 }
1095
1096 ASSERT(last <= end);
Steve Block44f0eee2011-05-26 01:26:41 +01001097 external_string_table_.ShrinkNewStrings(static_cast<int>(last - start));
Leon Clarkee46be812010-01-19 14:06:41 +00001098}
1099
1100
Steve Block44f0eee2011-05-26 01:26:41 +01001101static Object* ProcessFunctionWeakReferences(Heap* heap,
1102 Object* function,
Ben Murdochb0fe1622011-05-05 13:52:32 +01001103 WeakObjectRetainer* retainer) {
Steve Block44f0eee2011-05-26 01:26:41 +01001104 Object* head = heap->undefined_value();
Ben Murdochb0fe1622011-05-05 13:52:32 +01001105 JSFunction* tail = NULL;
1106 Object* candidate = function;
Steve Block44f0eee2011-05-26 01:26:41 +01001107 while (candidate != heap->undefined_value()) {
Ben Murdochb0fe1622011-05-05 13:52:32 +01001108 // Check whether to keep the candidate in the list.
1109 JSFunction* candidate_function = reinterpret_cast<JSFunction*>(candidate);
1110 Object* retain = retainer->RetainAs(candidate);
1111 if (retain != NULL) {
Steve Block44f0eee2011-05-26 01:26:41 +01001112 if (head == heap->undefined_value()) {
Ben Murdochb0fe1622011-05-05 13:52:32 +01001113 // First element in the list.
1114 head = candidate_function;
1115 } else {
1116 // Subsequent elements in the list.
1117 ASSERT(tail != NULL);
1118 tail->set_next_function_link(candidate_function);
1119 }
1120 // Retained function is new tail.
1121 tail = candidate_function;
1122 }
1123 // Move to next element in the list.
1124 candidate = candidate_function->next_function_link();
1125 }
1126
1127 // Terminate the list if there is one or more elements.
1128 if (tail != NULL) {
Steve Block44f0eee2011-05-26 01:26:41 +01001129 tail->set_next_function_link(heap->undefined_value());
Ben Murdochb0fe1622011-05-05 13:52:32 +01001130 }
1131
1132 return head;
1133}
1134
1135
Ben Murdochf87a2032010-10-22 12:50:53 +01001136void Heap::ProcessWeakReferences(WeakObjectRetainer* retainer) {
1137 Object* head = undefined_value();
1138 Context* tail = NULL;
1139 Object* candidate = global_contexts_list_;
Steve Block44f0eee2011-05-26 01:26:41 +01001140 while (candidate != undefined_value()) {
Ben Murdochf87a2032010-10-22 12:50:53 +01001141 // Check whether to keep the candidate in the list.
1142 Context* candidate_context = reinterpret_cast<Context*>(candidate);
1143 Object* retain = retainer->RetainAs(candidate);
1144 if (retain != NULL) {
Steve Block44f0eee2011-05-26 01:26:41 +01001145 if (head == undefined_value()) {
Ben Murdochf87a2032010-10-22 12:50:53 +01001146 // First element in the list.
1147 head = candidate_context;
1148 } else {
1149 // Subsequent elements in the list.
1150 ASSERT(tail != NULL);
Steve Block44f0eee2011-05-26 01:26:41 +01001151 tail->set_unchecked(this,
1152 Context::NEXT_CONTEXT_LINK,
Ben Murdochf87a2032010-10-22 12:50:53 +01001153 candidate_context,
1154 UPDATE_WRITE_BARRIER);
1155 }
1156 // Retained context is new tail.
1157 tail = candidate_context;
Ben Murdochb0fe1622011-05-05 13:52:32 +01001158
1159 // Process the weak list of optimized functions for the context.
1160 Object* function_list_head =
1161 ProcessFunctionWeakReferences(
Steve Block44f0eee2011-05-26 01:26:41 +01001162 this,
Ben Murdochb0fe1622011-05-05 13:52:32 +01001163 candidate_context->get(Context::OPTIMIZED_FUNCTIONS_LIST),
1164 retainer);
Steve Block44f0eee2011-05-26 01:26:41 +01001165 candidate_context->set_unchecked(this,
1166 Context::OPTIMIZED_FUNCTIONS_LIST,
Ben Murdochb0fe1622011-05-05 13:52:32 +01001167 function_list_head,
1168 UPDATE_WRITE_BARRIER);
Ben Murdochf87a2032010-10-22 12:50:53 +01001169 }
1170 // Move to next element in the list.
1171 candidate = candidate_context->get(Context::NEXT_CONTEXT_LINK);
1172 }
1173
1174 // Terminate the list if there is one or more elements.
1175 if (tail != NULL) {
Steve Block44f0eee2011-05-26 01:26:41 +01001176 tail->set_unchecked(this,
1177 Context::NEXT_CONTEXT_LINK,
Ben Murdochf87a2032010-10-22 12:50:53 +01001178 Heap::undefined_value(),
1179 UPDATE_WRITE_BARRIER);
1180 }
1181
1182 // Update the head of the list of contexts.
Steve Block44f0eee2011-05-26 01:26:41 +01001183 global_contexts_list_ = head;
Ben Murdochf87a2032010-10-22 12:50:53 +01001184}
1185
1186
Iain Merrick75681382010-08-19 15:07:18 +01001187class NewSpaceScavenger : public StaticNewSpaceVisitor<NewSpaceScavenger> {
1188 public:
Steve Block44f0eee2011-05-26 01:26:41 +01001189 static inline void VisitPointer(Heap* heap, Object** p) {
Iain Merrick75681382010-08-19 15:07:18 +01001190 Object* object = *p;
Steve Block44f0eee2011-05-26 01:26:41 +01001191 if (!heap->InNewSpace(object)) return;
Iain Merrick75681382010-08-19 15:07:18 +01001192 Heap::ScavengeObject(reinterpret_cast<HeapObject**>(p),
1193 reinterpret_cast<HeapObject*>(object));
1194 }
1195};
1196
1197
Leon Clarkee46be812010-01-19 14:06:41 +00001198Address Heap::DoScavenge(ObjectVisitor* scavenge_visitor,
1199 Address new_space_front) {
Steve Blocka7e24c12009-10-30 11:49:00 +00001200 do {
1201 ASSERT(new_space_front <= new_space_.top());
1202
1203 // The addresses new_space_front and new_space_.top() define a
1204 // queue of unprocessed copied objects. Process them until the
1205 // queue is empty.
1206 while (new_space_front < new_space_.top()) {
1207 HeapObject* object = HeapObject::FromAddress(new_space_front);
Iain Merrick75681382010-08-19 15:07:18 +01001208 new_space_front += NewSpaceScavenger::IterateBody(object->map(), object);
Steve Blocka7e24c12009-10-30 11:49:00 +00001209 }
1210
1211 // Promote and process all the to-be-promoted objects.
Steve Block44f0eee2011-05-26 01:26:41 +01001212 while (!promotion_queue_.is_empty()) {
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001213 HeapObject* target;
1214 int size;
Steve Block44f0eee2011-05-26 01:26:41 +01001215 promotion_queue_.remove(&target, &size);
Steve Blocka7e24c12009-10-30 11:49:00 +00001216
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001217 // Promoted object might be already partially visited
1218 // during dirty regions iteration. Thus we search specificly
1219 // for pointers to from semispace instead of looking for pointers
1220 // to new space.
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01001221 ASSERT(!target->IsMap());
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001222 IterateAndMarkPointersToFromSpace(target->address(),
1223 target->address() + size,
1224 &ScavengePointer);
Steve Blocka7e24c12009-10-30 11:49:00 +00001225 }
1226
1227 // Take another spin if there are now unswept objects in new space
1228 // (there are currently no more unswept promoted objects).
1229 } while (new_space_front < new_space_.top());
1230
Leon Clarkee46be812010-01-19 14:06:41 +00001231 return new_space_front;
Steve Blocka7e24c12009-10-30 11:49:00 +00001232}
1233
1234
Iain Merrick75681382010-08-19 15:07:18 +01001235class ScavengingVisitor : public StaticVisitorBase {
1236 public:
1237 static void Initialize() {
1238 table_.Register(kVisitSeqAsciiString, &EvacuateSeqAsciiString);
1239 table_.Register(kVisitSeqTwoByteString, &EvacuateSeqTwoByteString);
1240 table_.Register(kVisitShortcutCandidate, &EvacuateShortcutCandidate);
1241 table_.Register(kVisitByteArray, &EvacuateByteArray);
1242 table_.Register(kVisitFixedArray, &EvacuateFixedArray);
Ben Murdochf87a2032010-10-22 12:50:53 +01001243 table_.Register(kVisitGlobalContext,
1244 &ObjectEvacuationStrategy<POINTER_OBJECT>::
1245 VisitSpecialized<Context::kSize>);
Iain Merrick75681382010-08-19 15:07:18 +01001246
1247 typedef ObjectEvacuationStrategy<POINTER_OBJECT> PointerObject;
1248
1249 table_.Register(kVisitConsString,
1250 &ObjectEvacuationStrategy<POINTER_OBJECT>::
1251 VisitSpecialized<ConsString::kSize>);
1252
1253 table_.Register(kVisitSharedFunctionInfo,
1254 &ObjectEvacuationStrategy<POINTER_OBJECT>::
1255 VisitSpecialized<SharedFunctionInfo::kSize>);
1256
1257 table_.Register(kVisitJSFunction,
1258 &ObjectEvacuationStrategy<POINTER_OBJECT>::
1259 VisitSpecialized<JSFunction::kSize>);
1260
1261 table_.RegisterSpecializations<ObjectEvacuationStrategy<DATA_OBJECT>,
1262 kVisitDataObject,
1263 kVisitDataObjectGeneric>();
1264
1265 table_.RegisterSpecializations<ObjectEvacuationStrategy<POINTER_OBJECT>,
1266 kVisitJSObject,
1267 kVisitJSObjectGeneric>();
1268
1269 table_.RegisterSpecializations<ObjectEvacuationStrategy<POINTER_OBJECT>,
1270 kVisitStruct,
1271 kVisitStructGeneric>();
1272 }
1273
1274
1275 static inline void Scavenge(Map* map, HeapObject** slot, HeapObject* obj) {
1276 table_.GetVisitor(map)(map, slot, obj);
1277 }
1278
1279
1280 private:
1281 enum ObjectContents { DATA_OBJECT, POINTER_OBJECT };
1282 enum SizeRestriction { SMALL, UNKNOWN_SIZE };
1283
Steve Blocka7e24c12009-10-30 11:49:00 +00001284#if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
Steve Block44f0eee2011-05-26 01:26:41 +01001285 static void RecordCopiedObject(Heap* heap, HeapObject* obj) {
Iain Merrick75681382010-08-19 15:07:18 +01001286 bool should_record = false;
Steve Blocka7e24c12009-10-30 11:49:00 +00001287#ifdef DEBUG
Iain Merrick75681382010-08-19 15:07:18 +01001288 should_record = FLAG_heap_stats;
Steve Blocka7e24c12009-10-30 11:49:00 +00001289#endif
1290#ifdef ENABLE_LOGGING_AND_PROFILING
Iain Merrick75681382010-08-19 15:07:18 +01001291 should_record = should_record || FLAG_log_gc;
Steve Blocka7e24c12009-10-30 11:49:00 +00001292#endif
Iain Merrick75681382010-08-19 15:07:18 +01001293 if (should_record) {
Steve Block44f0eee2011-05-26 01:26:41 +01001294 if (heap->new_space()->Contains(obj)) {
1295 heap->new_space()->RecordAllocation(obj);
Iain Merrick75681382010-08-19 15:07:18 +01001296 } else {
Steve Block44f0eee2011-05-26 01:26:41 +01001297 heap->new_space()->RecordPromotion(obj);
Iain Merrick75681382010-08-19 15:07:18 +01001298 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001299 }
1300 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001301#endif // defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
1302
Iain Merrick75681382010-08-19 15:07:18 +01001303 // Helper function used by CopyObject to copy a source object to an
1304 // allocated target object and update the forwarding pointer in the source
1305 // object. Returns the target object.
Steve Block44f0eee2011-05-26 01:26:41 +01001306 INLINE(static HeapObject* MigrateObject(Heap* heap,
1307 HeapObject* source,
Iain Merrick75681382010-08-19 15:07:18 +01001308 HeapObject* target,
1309 int size)) {
1310 // Copy the content of source to target.
Steve Block44f0eee2011-05-26 01:26:41 +01001311 heap->CopyBlock(target->address(), source->address(), size);
Steve Blocka7e24c12009-10-30 11:49:00 +00001312
Iain Merrick75681382010-08-19 15:07:18 +01001313 // Set the forwarding address.
1314 source->set_map_word(MapWord::FromForwardingAddress(target));
Steve Blocka7e24c12009-10-30 11:49:00 +00001315
1316#if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
Iain Merrick75681382010-08-19 15:07:18 +01001317 // Update NewSpace stats if necessary.
Steve Block44f0eee2011-05-26 01:26:41 +01001318 RecordCopiedObject(heap, target);
Steve Blocka7e24c12009-10-30 11:49:00 +00001319#endif
Steve Block44f0eee2011-05-26 01:26:41 +01001320 HEAP_PROFILE(heap, ObjectMoveEvent(source->address(), target->address()));
Kristian Monsen0d5e1162010-09-30 15:31:59 +01001321#if defined(ENABLE_LOGGING_AND_PROFILING)
Steve Block44f0eee2011-05-26 01:26:41 +01001322 Isolate* isolate = heap->isolate();
1323 if (isolate->logger()->is_logging() ||
1324 isolate->cpu_profiler()->is_profiling()) {
Ben Murdoche0cee9b2011-05-25 10:26:03 +01001325 if (target->IsSharedFunctionInfo()) {
Steve Block44f0eee2011-05-26 01:26:41 +01001326 PROFILE(isolate, SharedFunctionInfoMoveEvent(
1327 source->address(), target->address()));
Kristian Monsen0d5e1162010-09-30 15:31:59 +01001328 }
1329 }
1330#endif
Iain Merrick75681382010-08-19 15:07:18 +01001331 return target;
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001332 }
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001333
1334
Iain Merrick75681382010-08-19 15:07:18 +01001335 template<ObjectContents object_contents, SizeRestriction size_restriction>
1336 static inline void EvacuateObject(Map* map,
1337 HeapObject** slot,
1338 HeapObject* object,
1339 int object_size) {
1340 ASSERT((size_restriction != SMALL) ||
1341 (object_size <= Page::kMaxHeapObjectSize));
1342 ASSERT(object->Size() == object_size);
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001343
Steve Block44f0eee2011-05-26 01:26:41 +01001344 Heap* heap = map->heap();
1345 if (heap->ShouldBePromoted(object->address(), object_size)) {
John Reck59135872010-11-02 12:39:01 -07001346 MaybeObject* maybe_result;
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001347
Iain Merrick75681382010-08-19 15:07:18 +01001348 if ((size_restriction != SMALL) &&
1349 (object_size > Page::kMaxHeapObjectSize)) {
Steve Block44f0eee2011-05-26 01:26:41 +01001350 maybe_result = heap->lo_space()->AllocateRawFixedArray(object_size);
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001351 } else {
Iain Merrick75681382010-08-19 15:07:18 +01001352 if (object_contents == DATA_OBJECT) {
Steve Block44f0eee2011-05-26 01:26:41 +01001353 maybe_result = heap->old_data_space()->AllocateRaw(object_size);
Iain Merrick75681382010-08-19 15:07:18 +01001354 } else {
Steve Block44f0eee2011-05-26 01:26:41 +01001355 maybe_result = heap->old_pointer_space()->AllocateRaw(object_size);
Iain Merrick75681382010-08-19 15:07:18 +01001356 }
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001357 }
1358
John Reck59135872010-11-02 12:39:01 -07001359 Object* result = NULL; // Initialization to please compiler.
1360 if (maybe_result->ToObject(&result)) {
Iain Merrick75681382010-08-19 15:07:18 +01001361 HeapObject* target = HeapObject::cast(result);
Steve Block44f0eee2011-05-26 01:26:41 +01001362 *slot = MigrateObject(heap, object , target, object_size);
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001363
Iain Merrick75681382010-08-19 15:07:18 +01001364 if (object_contents == POINTER_OBJECT) {
Steve Block44f0eee2011-05-26 01:26:41 +01001365 heap->promotion_queue()->insert(target, object_size);
Iain Merrick75681382010-08-19 15:07:18 +01001366 }
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001367
Steve Block44f0eee2011-05-26 01:26:41 +01001368 heap->tracer()->increment_promoted_objects_size(object_size);
Iain Merrick75681382010-08-19 15:07:18 +01001369 return;
1370 }
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001371 }
John Reck59135872010-11-02 12:39:01 -07001372 Object* result =
Steve Block44f0eee2011-05-26 01:26:41 +01001373 heap->new_space()->AllocateRaw(object_size)->ToObjectUnchecked();
1374 *slot = MigrateObject(heap, object, HeapObject::cast(result), object_size);
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001375 return;
1376 }
1377
Iain Merrick75681382010-08-19 15:07:18 +01001378
1379 static inline void EvacuateFixedArray(Map* map,
1380 HeapObject** slot,
1381 HeapObject* object) {
1382 int object_size = FixedArray::BodyDescriptor::SizeOf(map, object);
1383 EvacuateObject<POINTER_OBJECT, UNKNOWN_SIZE>(map,
1384 slot,
1385 object,
1386 object_size);
1387 }
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001388
1389
Iain Merrick75681382010-08-19 15:07:18 +01001390 static inline void EvacuateByteArray(Map* map,
1391 HeapObject** slot,
1392 HeapObject* object) {
1393 int object_size = reinterpret_cast<ByteArray*>(object)->ByteArraySize();
1394 EvacuateObject<DATA_OBJECT, UNKNOWN_SIZE>(map, slot, object, object_size);
1395 }
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001396
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001397
Iain Merrick75681382010-08-19 15:07:18 +01001398 static inline void EvacuateSeqAsciiString(Map* map,
1399 HeapObject** slot,
1400 HeapObject* object) {
1401 int object_size = SeqAsciiString::cast(object)->
1402 SeqAsciiStringSize(map->instance_type());
1403 EvacuateObject<DATA_OBJECT, UNKNOWN_SIZE>(map, slot, object, object_size);
1404 }
1405
1406
1407 static inline void EvacuateSeqTwoByteString(Map* map,
1408 HeapObject** slot,
1409 HeapObject* object) {
1410 int object_size = SeqTwoByteString::cast(object)->
1411 SeqTwoByteStringSize(map->instance_type());
1412 EvacuateObject<DATA_OBJECT, UNKNOWN_SIZE>(map, slot, object, object_size);
1413 }
1414
1415
1416 static inline bool IsShortcutCandidate(int type) {
1417 return ((type & kShortcutTypeMask) == kShortcutTypeTag);
1418 }
1419
1420 static inline void EvacuateShortcutCandidate(Map* map,
1421 HeapObject** slot,
1422 HeapObject* object) {
1423 ASSERT(IsShortcutCandidate(map->instance_type()));
1424
Steve Block44f0eee2011-05-26 01:26:41 +01001425 if (ConsString::cast(object)->unchecked_second() ==
1426 map->heap()->empty_string()) {
Iain Merrick75681382010-08-19 15:07:18 +01001427 HeapObject* first =
1428 HeapObject::cast(ConsString::cast(object)->unchecked_first());
1429
1430 *slot = first;
1431
Steve Block44f0eee2011-05-26 01:26:41 +01001432 if (!map->heap()->InNewSpace(first)) {
Iain Merrick75681382010-08-19 15:07:18 +01001433 object->set_map_word(MapWord::FromForwardingAddress(first));
1434 return;
1435 }
1436
1437 MapWord first_word = first->map_word();
1438 if (first_word.IsForwardingAddress()) {
1439 HeapObject* target = first_word.ToForwardingAddress();
1440
1441 *slot = target;
1442 object->set_map_word(MapWord::FromForwardingAddress(target));
1443 return;
1444 }
1445
1446 Scavenge(first->map(), slot, first);
1447 object->set_map_word(MapWord::FromForwardingAddress(*slot));
1448 return;
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001449 }
Iain Merrick75681382010-08-19 15:07:18 +01001450
1451 int object_size = ConsString::kSize;
1452 EvacuateObject<POINTER_OBJECT, SMALL>(map, slot, object, object_size);
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001453 }
1454
Iain Merrick75681382010-08-19 15:07:18 +01001455 template<ObjectContents object_contents>
1456 class ObjectEvacuationStrategy {
1457 public:
1458 template<int object_size>
1459 static inline void VisitSpecialized(Map* map,
1460 HeapObject** slot,
1461 HeapObject* object) {
1462 EvacuateObject<object_contents, SMALL>(map, slot, object, object_size);
1463 }
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001464
Iain Merrick75681382010-08-19 15:07:18 +01001465 static inline void Visit(Map* map,
1466 HeapObject** slot,
1467 HeapObject* object) {
1468 int object_size = map->instance_size();
1469 EvacuateObject<object_contents, SMALL>(map, slot, object, object_size);
1470 }
1471 };
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001472
Iain Merrick75681382010-08-19 15:07:18 +01001473 typedef void (*Callback)(Map* map, HeapObject** slot, HeapObject* object);
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001474
Iain Merrick75681382010-08-19 15:07:18 +01001475 static VisitorDispatchTable<Callback> table_;
1476};
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001477
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001478
Iain Merrick75681382010-08-19 15:07:18 +01001479VisitorDispatchTable<ScavengingVisitor::Callback> ScavengingVisitor::table_;
Steve Blocka7e24c12009-10-30 11:49:00 +00001480
1481
1482void Heap::ScavengeObjectSlow(HeapObject** p, HeapObject* object) {
Steve Block44f0eee2011-05-26 01:26:41 +01001483 ASSERT(HEAP->InFromSpace(object));
Steve Blocka7e24c12009-10-30 11:49:00 +00001484 MapWord first_word = object->map_word();
1485 ASSERT(!first_word.IsForwardingAddress());
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001486 Map* map = first_word.ToMap();
Iain Merrick75681382010-08-19 15:07:18 +01001487 ScavengingVisitor::Scavenge(map, p, object);
Steve Blocka7e24c12009-10-30 11:49:00 +00001488}
1489
1490
John Reck59135872010-11-02 12:39:01 -07001491MaybeObject* Heap::AllocatePartialMap(InstanceType instance_type,
1492 int instance_size) {
1493 Object* result;
1494 { MaybeObject* maybe_result = AllocateRawMap();
1495 if (!maybe_result->ToObject(&result)) return maybe_result;
1496 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001497
1498 // Map::cast cannot be used due to uninitialized map field.
1499 reinterpret_cast<Map*>(result)->set_map(raw_unchecked_meta_map());
1500 reinterpret_cast<Map*>(result)->set_instance_type(instance_type);
1501 reinterpret_cast<Map*>(result)->set_instance_size(instance_size);
Steve Block44f0eee2011-05-26 01:26:41 +01001502 reinterpret_cast<Map*>(result)->set_visitor_id(
1503 StaticVisitorBase::GetVisitorId(instance_type, instance_size));
Steve Blocka7e24c12009-10-30 11:49:00 +00001504 reinterpret_cast<Map*>(result)->set_inobject_properties(0);
Leon Clarke4515c472010-02-03 11:58:03 +00001505 reinterpret_cast<Map*>(result)->set_pre_allocated_property_fields(0);
Steve Blocka7e24c12009-10-30 11:49:00 +00001506 reinterpret_cast<Map*>(result)->set_unused_property_fields(0);
Leon Clarke4515c472010-02-03 11:58:03 +00001507 reinterpret_cast<Map*>(result)->set_bit_field(0);
1508 reinterpret_cast<Map*>(result)->set_bit_field2(0);
Steve Blocka7e24c12009-10-30 11:49:00 +00001509 return result;
1510}
1511
1512
John Reck59135872010-11-02 12:39:01 -07001513MaybeObject* Heap::AllocateMap(InstanceType instance_type, int instance_size) {
1514 Object* result;
1515 { MaybeObject* maybe_result = AllocateRawMap();
1516 if (!maybe_result->ToObject(&result)) return maybe_result;
1517 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001518
1519 Map* map = reinterpret_cast<Map*>(result);
1520 map->set_map(meta_map());
1521 map->set_instance_type(instance_type);
Iain Merrick75681382010-08-19 15:07:18 +01001522 map->set_visitor_id(
1523 StaticVisitorBase::GetVisitorId(instance_type, instance_size));
Steve Blocka7e24c12009-10-30 11:49:00 +00001524 map->set_prototype(null_value());
1525 map->set_constructor(null_value());
1526 map->set_instance_size(instance_size);
1527 map->set_inobject_properties(0);
1528 map->set_pre_allocated_property_fields(0);
1529 map->set_instance_descriptors(empty_descriptor_array());
1530 map->set_code_cache(empty_fixed_array());
1531 map->set_unused_property_fields(0);
1532 map->set_bit_field(0);
Steve Block8defd9f2010-07-08 12:39:36 +01001533 map->set_bit_field2((1 << Map::kIsExtensible) | (1 << Map::kHasFastElements));
Leon Clarkee46be812010-01-19 14:06:41 +00001534
1535 // If the map object is aligned fill the padding area with Smi 0 objects.
1536 if (Map::kPadStart < Map::kSize) {
1537 memset(reinterpret_cast<byte*>(map) + Map::kPadStart - kHeapObjectTag,
1538 0,
1539 Map::kSize - Map::kPadStart);
1540 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001541 return map;
1542}
1543
1544
John Reck59135872010-11-02 12:39:01 -07001545MaybeObject* Heap::AllocateCodeCache() {
1546 Object* result;
1547 { MaybeObject* maybe_result = AllocateStruct(CODE_CACHE_TYPE);
1548 if (!maybe_result->ToObject(&result)) return maybe_result;
1549 }
Steve Block6ded16b2010-05-10 14:33:55 +01001550 CodeCache* code_cache = CodeCache::cast(result);
1551 code_cache->set_default_cache(empty_fixed_array());
1552 code_cache->set_normal_type_cache(undefined_value());
1553 return code_cache;
1554}
1555
1556
Steve Blocka7e24c12009-10-30 11:49:00 +00001557const Heap::StringTypeTable Heap::string_type_table[] = {
1558#define STRING_TYPE_ELEMENT(type, size, name, camel_name) \
1559 {type, size, k##camel_name##MapRootIndex},
1560 STRING_TYPE_LIST(STRING_TYPE_ELEMENT)
1561#undef STRING_TYPE_ELEMENT
1562};
1563
1564
1565const Heap::ConstantSymbolTable Heap::constant_symbol_table[] = {
1566#define CONSTANT_SYMBOL_ELEMENT(name, contents) \
1567 {contents, k##name##RootIndex},
1568 SYMBOL_LIST(CONSTANT_SYMBOL_ELEMENT)
1569#undef CONSTANT_SYMBOL_ELEMENT
1570};
1571
1572
1573const Heap::StructTable Heap::struct_table[] = {
1574#define STRUCT_TABLE_ELEMENT(NAME, Name, name) \
1575 { NAME##_TYPE, Name::kSize, k##Name##MapRootIndex },
1576 STRUCT_LIST(STRUCT_TABLE_ELEMENT)
1577#undef STRUCT_TABLE_ELEMENT
1578};
1579
1580
1581bool Heap::CreateInitialMaps() {
John Reck59135872010-11-02 12:39:01 -07001582 Object* obj;
1583 { MaybeObject* maybe_obj = AllocatePartialMap(MAP_TYPE, Map::kSize);
1584 if (!maybe_obj->ToObject(&obj)) return false;
1585 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001586 // Map::cast cannot be used due to uninitialized map field.
1587 Map* new_meta_map = reinterpret_cast<Map*>(obj);
1588 set_meta_map(new_meta_map);
1589 new_meta_map->set_map(new_meta_map);
1590
John Reck59135872010-11-02 12:39:01 -07001591 { MaybeObject* maybe_obj =
1592 AllocatePartialMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
1593 if (!maybe_obj->ToObject(&obj)) return false;
1594 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001595 set_fixed_array_map(Map::cast(obj));
1596
John Reck59135872010-11-02 12:39:01 -07001597 { MaybeObject* maybe_obj = AllocatePartialMap(ODDBALL_TYPE, Oddball::kSize);
1598 if (!maybe_obj->ToObject(&obj)) return false;
1599 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001600 set_oddball_map(Map::cast(obj));
1601
Steve Block6ded16b2010-05-10 14:33:55 +01001602 // Allocate the empty array.
John Reck59135872010-11-02 12:39:01 -07001603 { MaybeObject* maybe_obj = AllocateEmptyFixedArray();
1604 if (!maybe_obj->ToObject(&obj)) return false;
1605 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001606 set_empty_fixed_array(FixedArray::cast(obj));
1607
John Reck59135872010-11-02 12:39:01 -07001608 { MaybeObject* maybe_obj = Allocate(oddball_map(), OLD_DATA_SPACE);
1609 if (!maybe_obj->ToObject(&obj)) return false;
1610 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001611 set_null_value(obj);
Steve Block44f0eee2011-05-26 01:26:41 +01001612 Oddball::cast(obj)->set_kind(Oddball::kNull);
Steve Blocka7e24c12009-10-30 11:49:00 +00001613
1614 // Allocate the empty descriptor array.
John Reck59135872010-11-02 12:39:01 -07001615 { MaybeObject* maybe_obj = AllocateEmptyFixedArray();
1616 if (!maybe_obj->ToObject(&obj)) return false;
1617 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001618 set_empty_descriptor_array(DescriptorArray::cast(obj));
1619
1620 // Fix the instance_descriptors for the existing maps.
1621 meta_map()->set_instance_descriptors(empty_descriptor_array());
1622 meta_map()->set_code_cache(empty_fixed_array());
1623
1624 fixed_array_map()->set_instance_descriptors(empty_descriptor_array());
1625 fixed_array_map()->set_code_cache(empty_fixed_array());
1626
1627 oddball_map()->set_instance_descriptors(empty_descriptor_array());
1628 oddball_map()->set_code_cache(empty_fixed_array());
1629
1630 // Fix prototype object for existing maps.
1631 meta_map()->set_prototype(null_value());
1632 meta_map()->set_constructor(null_value());
1633
1634 fixed_array_map()->set_prototype(null_value());
1635 fixed_array_map()->set_constructor(null_value());
1636
1637 oddball_map()->set_prototype(null_value());
1638 oddball_map()->set_constructor(null_value());
1639
John Reck59135872010-11-02 12:39:01 -07001640 { MaybeObject* maybe_obj =
1641 AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
1642 if (!maybe_obj->ToObject(&obj)) return false;
1643 }
Iain Merrick75681382010-08-19 15:07:18 +01001644 set_fixed_cow_array_map(Map::cast(obj));
1645 ASSERT(fixed_array_map() != fixed_cow_array_map());
1646
John Reck59135872010-11-02 12:39:01 -07001647 { MaybeObject* maybe_obj = AllocateMap(HEAP_NUMBER_TYPE, HeapNumber::kSize);
1648 if (!maybe_obj->ToObject(&obj)) return false;
1649 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001650 set_heap_number_map(Map::cast(obj));
1651
John Reck59135872010-11-02 12:39:01 -07001652 { MaybeObject* maybe_obj = AllocateMap(PROXY_TYPE, Proxy::kSize);
1653 if (!maybe_obj->ToObject(&obj)) return false;
1654 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001655 set_proxy_map(Map::cast(obj));
1656
1657 for (unsigned i = 0; i < ARRAY_SIZE(string_type_table); i++) {
1658 const StringTypeTable& entry = string_type_table[i];
John Reck59135872010-11-02 12:39:01 -07001659 { MaybeObject* maybe_obj = AllocateMap(entry.type, entry.size);
1660 if (!maybe_obj->ToObject(&obj)) return false;
1661 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001662 roots_[entry.index] = Map::cast(obj);
1663 }
1664
John Reck59135872010-11-02 12:39:01 -07001665 { MaybeObject* maybe_obj = AllocateMap(STRING_TYPE, kVariableSizeSentinel);
1666 if (!maybe_obj->ToObject(&obj)) return false;
1667 }
Steve Blockd0582a62009-12-15 09:54:21 +00001668 set_undetectable_string_map(Map::cast(obj));
Steve Blocka7e24c12009-10-30 11:49:00 +00001669 Map::cast(obj)->set_is_undetectable();
1670
John Reck59135872010-11-02 12:39:01 -07001671 { MaybeObject* maybe_obj =
1672 AllocateMap(ASCII_STRING_TYPE, kVariableSizeSentinel);
1673 if (!maybe_obj->ToObject(&obj)) return false;
1674 }
Steve Blockd0582a62009-12-15 09:54:21 +00001675 set_undetectable_ascii_string_map(Map::cast(obj));
Steve Blocka7e24c12009-10-30 11:49:00 +00001676 Map::cast(obj)->set_is_undetectable();
1677
John Reck59135872010-11-02 12:39:01 -07001678 { MaybeObject* maybe_obj =
1679 AllocateMap(BYTE_ARRAY_TYPE, kVariableSizeSentinel);
1680 if (!maybe_obj->ToObject(&obj)) return false;
1681 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001682 set_byte_array_map(Map::cast(obj));
1683
Ben Murdochb0fe1622011-05-05 13:52:32 +01001684 { MaybeObject* maybe_obj = AllocateByteArray(0, TENURED);
1685 if (!maybe_obj->ToObject(&obj)) return false;
1686 }
1687 set_empty_byte_array(ByteArray::cast(obj));
1688
John Reck59135872010-11-02 12:39:01 -07001689 { MaybeObject* maybe_obj =
Steve Block44f0eee2011-05-26 01:26:41 +01001690 AllocateMap(EXTERNAL_PIXEL_ARRAY_TYPE, ExternalArray::kAlignedSize);
John Reck59135872010-11-02 12:39:01 -07001691 if (!maybe_obj->ToObject(&obj)) return false;
1692 }
Steve Block44f0eee2011-05-26 01:26:41 +01001693 set_external_pixel_array_map(Map::cast(obj));
Steve Blocka7e24c12009-10-30 11:49:00 +00001694
John Reck59135872010-11-02 12:39:01 -07001695 { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_BYTE_ARRAY_TYPE,
1696 ExternalArray::kAlignedSize);
1697 if (!maybe_obj->ToObject(&obj)) return false;
1698 }
Steve Block3ce2e202009-11-05 08:53:23 +00001699 set_external_byte_array_map(Map::cast(obj));
1700
John Reck59135872010-11-02 12:39:01 -07001701 { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_UNSIGNED_BYTE_ARRAY_TYPE,
1702 ExternalArray::kAlignedSize);
1703 if (!maybe_obj->ToObject(&obj)) return false;
1704 }
Steve Block3ce2e202009-11-05 08:53:23 +00001705 set_external_unsigned_byte_array_map(Map::cast(obj));
1706
John Reck59135872010-11-02 12:39:01 -07001707 { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_SHORT_ARRAY_TYPE,
1708 ExternalArray::kAlignedSize);
1709 if (!maybe_obj->ToObject(&obj)) return false;
1710 }
Steve Block3ce2e202009-11-05 08:53:23 +00001711 set_external_short_array_map(Map::cast(obj));
1712
John Reck59135872010-11-02 12:39:01 -07001713 { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_UNSIGNED_SHORT_ARRAY_TYPE,
1714 ExternalArray::kAlignedSize);
1715 if (!maybe_obj->ToObject(&obj)) return false;
1716 }
Steve Block3ce2e202009-11-05 08:53:23 +00001717 set_external_unsigned_short_array_map(Map::cast(obj));
1718
John Reck59135872010-11-02 12:39:01 -07001719 { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_INT_ARRAY_TYPE,
1720 ExternalArray::kAlignedSize);
1721 if (!maybe_obj->ToObject(&obj)) return false;
1722 }
Steve Block3ce2e202009-11-05 08:53:23 +00001723 set_external_int_array_map(Map::cast(obj));
1724
John Reck59135872010-11-02 12:39:01 -07001725 { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_UNSIGNED_INT_ARRAY_TYPE,
1726 ExternalArray::kAlignedSize);
1727 if (!maybe_obj->ToObject(&obj)) return false;
1728 }
Steve Block3ce2e202009-11-05 08:53:23 +00001729 set_external_unsigned_int_array_map(Map::cast(obj));
1730
John Reck59135872010-11-02 12:39:01 -07001731 { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_FLOAT_ARRAY_TYPE,
1732 ExternalArray::kAlignedSize);
1733 if (!maybe_obj->ToObject(&obj)) return false;
1734 }
Steve Block3ce2e202009-11-05 08:53:23 +00001735 set_external_float_array_map(Map::cast(obj));
1736
John Reck59135872010-11-02 12:39:01 -07001737 { MaybeObject* maybe_obj = AllocateMap(CODE_TYPE, kVariableSizeSentinel);
1738 if (!maybe_obj->ToObject(&obj)) return false;
1739 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001740 set_code_map(Map::cast(obj));
1741
John Reck59135872010-11-02 12:39:01 -07001742 { MaybeObject* maybe_obj = AllocateMap(JS_GLOBAL_PROPERTY_CELL_TYPE,
1743 JSGlobalPropertyCell::kSize);
1744 if (!maybe_obj->ToObject(&obj)) return false;
1745 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001746 set_global_property_cell_map(Map::cast(obj));
1747
John Reck59135872010-11-02 12:39:01 -07001748 { MaybeObject* maybe_obj = AllocateMap(FILLER_TYPE, kPointerSize);
1749 if (!maybe_obj->ToObject(&obj)) return false;
1750 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001751 set_one_pointer_filler_map(Map::cast(obj));
1752
John Reck59135872010-11-02 12:39:01 -07001753 { MaybeObject* maybe_obj = AllocateMap(FILLER_TYPE, 2 * kPointerSize);
1754 if (!maybe_obj->ToObject(&obj)) return false;
1755 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001756 set_two_pointer_filler_map(Map::cast(obj));
1757
1758 for (unsigned i = 0; i < ARRAY_SIZE(struct_table); i++) {
1759 const StructTable& entry = struct_table[i];
John Reck59135872010-11-02 12:39:01 -07001760 { MaybeObject* maybe_obj = AllocateMap(entry.type, entry.size);
1761 if (!maybe_obj->ToObject(&obj)) return false;
1762 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001763 roots_[entry.index] = Map::cast(obj);
1764 }
1765
John Reck59135872010-11-02 12:39:01 -07001766 { MaybeObject* maybe_obj =
1767 AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
1768 if (!maybe_obj->ToObject(&obj)) return false;
1769 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001770 set_hash_table_map(Map::cast(obj));
1771
John Reck59135872010-11-02 12:39:01 -07001772 { MaybeObject* maybe_obj =
1773 AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
1774 if (!maybe_obj->ToObject(&obj)) return false;
1775 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001776 set_context_map(Map::cast(obj));
1777
John Reck59135872010-11-02 12:39:01 -07001778 { MaybeObject* maybe_obj =
1779 AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
1780 if (!maybe_obj->ToObject(&obj)) return false;
1781 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001782 set_catch_context_map(Map::cast(obj));
1783
John Reck59135872010-11-02 12:39:01 -07001784 { MaybeObject* maybe_obj =
1785 AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
1786 if (!maybe_obj->ToObject(&obj)) return false;
1787 }
Ben Murdochf87a2032010-10-22 12:50:53 +01001788 Map* global_context_map = Map::cast(obj);
1789 global_context_map->set_visitor_id(StaticVisitorBase::kVisitGlobalContext);
1790 set_global_context_map(global_context_map);
Steve Blocka7e24c12009-10-30 11:49:00 +00001791
John Reck59135872010-11-02 12:39:01 -07001792 { MaybeObject* maybe_obj = AllocateMap(SHARED_FUNCTION_INFO_TYPE,
1793 SharedFunctionInfo::kAlignedSize);
1794 if (!maybe_obj->ToObject(&obj)) return false;
1795 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001796 set_shared_function_info_map(Map::cast(obj));
1797
Steve Block1e0659c2011-05-24 12:43:12 +01001798 { MaybeObject* maybe_obj = AllocateMap(JS_MESSAGE_OBJECT_TYPE,
1799 JSMessageObject::kSize);
1800 if (!maybe_obj->ToObject(&obj)) return false;
1801 }
1802 set_message_object_map(Map::cast(obj));
1803
Steve Block44f0eee2011-05-26 01:26:41 +01001804 ASSERT(!InNewSpace(empty_fixed_array()));
Steve Blocka7e24c12009-10-30 11:49:00 +00001805 return true;
1806}
1807
1808
John Reck59135872010-11-02 12:39:01 -07001809MaybeObject* Heap::AllocateHeapNumber(double value, PretenureFlag pretenure) {
Steve Blocka7e24c12009-10-30 11:49:00 +00001810 // Statically ensure that it is safe to allocate heap numbers in paged
1811 // spaces.
1812 STATIC_ASSERT(HeapNumber::kSize <= Page::kMaxHeapObjectSize);
1813 AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
1814
John Reck59135872010-11-02 12:39:01 -07001815 Object* result;
1816 { MaybeObject* maybe_result =
1817 AllocateRaw(HeapNumber::kSize, space, OLD_DATA_SPACE);
1818 if (!maybe_result->ToObject(&result)) return maybe_result;
1819 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001820
1821 HeapObject::cast(result)->set_map(heap_number_map());
1822 HeapNumber::cast(result)->set_value(value);
1823 return result;
1824}
1825
1826
John Reck59135872010-11-02 12:39:01 -07001827MaybeObject* Heap::AllocateHeapNumber(double value) {
Steve Blocka7e24c12009-10-30 11:49:00 +00001828 // Use general version, if we're forced to always allocate.
1829 if (always_allocate()) return AllocateHeapNumber(value, TENURED);
1830
1831 // This version of AllocateHeapNumber is optimized for
1832 // allocation in new space.
1833 STATIC_ASSERT(HeapNumber::kSize <= Page::kMaxHeapObjectSize);
1834 ASSERT(allocation_allowed_ && gc_state_ == NOT_IN_GC);
John Reck59135872010-11-02 12:39:01 -07001835 Object* result;
1836 { MaybeObject* maybe_result = new_space_.AllocateRaw(HeapNumber::kSize);
1837 if (!maybe_result->ToObject(&result)) return maybe_result;
1838 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001839 HeapObject::cast(result)->set_map(heap_number_map());
1840 HeapNumber::cast(result)->set_value(value);
1841 return result;
1842}
1843
1844
John Reck59135872010-11-02 12:39:01 -07001845MaybeObject* Heap::AllocateJSGlobalPropertyCell(Object* value) {
1846 Object* result;
1847 { MaybeObject* maybe_result = AllocateRawCell();
1848 if (!maybe_result->ToObject(&result)) return maybe_result;
1849 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001850 HeapObject::cast(result)->set_map(global_property_cell_map());
1851 JSGlobalPropertyCell::cast(result)->set_value(value);
1852 return result;
1853}
1854
1855
John Reck59135872010-11-02 12:39:01 -07001856MaybeObject* Heap::CreateOddball(const char* to_string,
Steve Block44f0eee2011-05-26 01:26:41 +01001857 Object* to_number,
1858 byte kind) {
John Reck59135872010-11-02 12:39:01 -07001859 Object* result;
1860 { MaybeObject* maybe_result = Allocate(oddball_map(), OLD_DATA_SPACE);
1861 if (!maybe_result->ToObject(&result)) return maybe_result;
1862 }
Steve Block44f0eee2011-05-26 01:26:41 +01001863 return Oddball::cast(result)->Initialize(to_string, to_number, kind);
Steve Blocka7e24c12009-10-30 11:49:00 +00001864}
1865
1866
1867bool Heap::CreateApiObjects() {
1868 Object* obj;
1869
John Reck59135872010-11-02 12:39:01 -07001870 { MaybeObject* maybe_obj = AllocateMap(JS_OBJECT_TYPE, JSObject::kHeaderSize);
1871 if (!maybe_obj->ToObject(&obj)) return false;
1872 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001873 set_neander_map(Map::cast(obj));
1874
Steve Block44f0eee2011-05-26 01:26:41 +01001875 { MaybeObject* maybe_obj = AllocateJSObjectFromMap(neander_map());
John Reck59135872010-11-02 12:39:01 -07001876 if (!maybe_obj->ToObject(&obj)) return false;
1877 }
1878 Object* elements;
1879 { MaybeObject* maybe_elements = AllocateFixedArray(2);
1880 if (!maybe_elements->ToObject(&elements)) return false;
1881 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001882 FixedArray::cast(elements)->set(0, Smi::FromInt(0));
1883 JSObject::cast(obj)->set_elements(FixedArray::cast(elements));
1884 set_message_listeners(JSObject::cast(obj));
1885
1886 return true;
1887}
1888
1889
Steve Blocka7e24c12009-10-30 11:49:00 +00001890void Heap::CreateJSEntryStub() {
1891 JSEntryStub stub;
1892 set_js_entry_code(*stub.GetCode());
1893}
1894
1895
1896void Heap::CreateJSConstructEntryStub() {
1897 JSConstructEntryStub stub;
1898 set_js_construct_entry_code(*stub.GetCode());
1899}
1900
1901
1902void Heap::CreateFixedStubs() {
1903 // Here we create roots for fixed stubs. They are needed at GC
1904 // for cooking and uncooking (check out frames.cc).
1905 // The eliminates the need for doing dictionary lookup in the
1906 // stub cache for these stubs.
1907 HandleScope scope;
1908 // gcc-4.4 has problem generating correct code of following snippet:
Steve Block44f0eee2011-05-26 01:26:41 +01001909 // { JSEntryStub stub;
1910 // js_entry_code_ = *stub.GetCode();
Steve Blocka7e24c12009-10-30 11:49:00 +00001911 // }
Steve Block44f0eee2011-05-26 01:26:41 +01001912 // { JSConstructEntryStub stub;
1913 // js_construct_entry_code_ = *stub.GetCode();
Steve Blocka7e24c12009-10-30 11:49:00 +00001914 // }
1915 // To workaround the problem, make separate functions without inlining.
Steve Blocka7e24c12009-10-30 11:49:00 +00001916 Heap::CreateJSEntryStub();
1917 Heap::CreateJSConstructEntryStub();
Steve Blocka7e24c12009-10-30 11:49:00 +00001918}
1919
1920
1921bool Heap::CreateInitialObjects() {
1922 Object* obj;
1923
1924 // The -0 value must be set before NumberFromDouble works.
John Reck59135872010-11-02 12:39:01 -07001925 { MaybeObject* maybe_obj = AllocateHeapNumber(-0.0, TENURED);
1926 if (!maybe_obj->ToObject(&obj)) return false;
1927 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001928 set_minus_zero_value(obj);
1929 ASSERT(signbit(minus_zero_value()->Number()) != 0);
1930
John Reck59135872010-11-02 12:39:01 -07001931 { MaybeObject* maybe_obj = AllocateHeapNumber(OS::nan_value(), TENURED);
1932 if (!maybe_obj->ToObject(&obj)) return false;
1933 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001934 set_nan_value(obj);
1935
John Reck59135872010-11-02 12:39:01 -07001936 { MaybeObject* maybe_obj = Allocate(oddball_map(), OLD_DATA_SPACE);
1937 if (!maybe_obj->ToObject(&obj)) return false;
1938 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001939 set_undefined_value(obj);
Steve Block44f0eee2011-05-26 01:26:41 +01001940 Oddball::cast(obj)->set_kind(Oddball::kUndefined);
Steve Blocka7e24c12009-10-30 11:49:00 +00001941 ASSERT(!InNewSpace(undefined_value()));
1942
1943 // Allocate initial symbol table.
John Reck59135872010-11-02 12:39:01 -07001944 { MaybeObject* maybe_obj = SymbolTable::Allocate(kInitialSymbolTableSize);
1945 if (!maybe_obj->ToObject(&obj)) return false;
1946 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001947 // Don't use set_symbol_table() due to asserts.
1948 roots_[kSymbolTableRootIndex] = obj;
1949
1950 // Assign the print strings for oddballs after creating symboltable.
John Reck59135872010-11-02 12:39:01 -07001951 Object* symbol;
1952 { MaybeObject* maybe_symbol = LookupAsciiSymbol("undefined");
1953 if (!maybe_symbol->ToObject(&symbol)) return false;
1954 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001955 Oddball::cast(undefined_value())->set_to_string(String::cast(symbol));
1956 Oddball::cast(undefined_value())->set_to_number(nan_value());
1957
Steve Blocka7e24c12009-10-30 11:49:00 +00001958 // Allocate the null_value
John Reck59135872010-11-02 12:39:01 -07001959 { MaybeObject* maybe_obj =
Steve Block44f0eee2011-05-26 01:26:41 +01001960 Oddball::cast(null_value())->Initialize("null",
1961 Smi::FromInt(0),
1962 Oddball::kNull);
John Reck59135872010-11-02 12:39:01 -07001963 if (!maybe_obj->ToObject(&obj)) return false;
1964 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001965
Steve Block44f0eee2011-05-26 01:26:41 +01001966 { MaybeObject* maybe_obj = CreateOddball("true",
1967 Smi::FromInt(1),
1968 Oddball::kTrue);
John Reck59135872010-11-02 12:39:01 -07001969 if (!maybe_obj->ToObject(&obj)) return false;
1970 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001971 set_true_value(obj);
1972
Steve Block44f0eee2011-05-26 01:26:41 +01001973 { MaybeObject* maybe_obj = CreateOddball("false",
1974 Smi::FromInt(0),
1975 Oddball::kFalse);
John Reck59135872010-11-02 12:39:01 -07001976 if (!maybe_obj->ToObject(&obj)) return false;
1977 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001978 set_false_value(obj);
1979
Steve Block44f0eee2011-05-26 01:26:41 +01001980 { MaybeObject* maybe_obj = CreateOddball("hole",
1981 Smi::FromInt(-1),
1982 Oddball::kTheHole);
John Reck59135872010-11-02 12:39:01 -07001983 if (!maybe_obj->ToObject(&obj)) return false;
1984 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001985 set_the_hole_value(obj);
1986
Ben Murdoch086aeea2011-05-13 15:57:08 +01001987 { MaybeObject* maybe_obj = CreateOddball("arguments_marker",
Steve Block44f0eee2011-05-26 01:26:41 +01001988 Smi::FromInt(-4),
1989 Oddball::kArgumentMarker);
Ben Murdoch086aeea2011-05-13 15:57:08 +01001990 if (!maybe_obj->ToObject(&obj)) return false;
1991 }
1992 set_arguments_marker(obj);
1993
Steve Block44f0eee2011-05-26 01:26:41 +01001994 { MaybeObject* maybe_obj = CreateOddball("no_interceptor_result_sentinel",
1995 Smi::FromInt(-2),
1996 Oddball::kOther);
John Reck59135872010-11-02 12:39:01 -07001997 if (!maybe_obj->ToObject(&obj)) return false;
1998 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001999 set_no_interceptor_result_sentinel(obj);
2000
Steve Block44f0eee2011-05-26 01:26:41 +01002001 { MaybeObject* maybe_obj = CreateOddball("termination_exception",
2002 Smi::FromInt(-3),
2003 Oddball::kOther);
John Reck59135872010-11-02 12:39:01 -07002004 if (!maybe_obj->ToObject(&obj)) return false;
2005 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002006 set_termination_exception(obj);
2007
2008 // Allocate the empty string.
John Reck59135872010-11-02 12:39:01 -07002009 { MaybeObject* maybe_obj = AllocateRawAsciiString(0, TENURED);
2010 if (!maybe_obj->ToObject(&obj)) return false;
2011 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002012 set_empty_string(String::cast(obj));
2013
2014 for (unsigned i = 0; i < ARRAY_SIZE(constant_symbol_table); i++) {
John Reck59135872010-11-02 12:39:01 -07002015 { MaybeObject* maybe_obj =
2016 LookupAsciiSymbol(constant_symbol_table[i].contents);
2017 if (!maybe_obj->ToObject(&obj)) return false;
2018 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002019 roots_[constant_symbol_table[i].index] = String::cast(obj);
2020 }
2021
2022 // Allocate the hidden symbol which is used to identify the hidden properties
2023 // in JSObjects. The hash code has a special value so that it will not match
2024 // the empty string when searching for the property. It cannot be part of the
2025 // loop above because it needs to be allocated manually with the special
2026 // hash code in place. The hash code for the hidden_symbol is zero to ensure
2027 // that it will always be at the first entry in property descriptors.
John Reck59135872010-11-02 12:39:01 -07002028 { MaybeObject* maybe_obj =
2029 AllocateSymbol(CStrVector(""), 0, String::kZeroHash);
2030 if (!maybe_obj->ToObject(&obj)) return false;
2031 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002032 hidden_symbol_ = String::cast(obj);
2033
2034 // Allocate the proxy for __proto__.
John Reck59135872010-11-02 12:39:01 -07002035 { MaybeObject* maybe_obj =
2036 AllocateProxy((Address) &Accessors::ObjectPrototype);
2037 if (!maybe_obj->ToObject(&obj)) return false;
2038 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002039 set_prototype_accessors(Proxy::cast(obj));
2040
2041 // Allocate the code_stubs dictionary. The initial size is set to avoid
2042 // expanding the dictionary during bootstrapping.
John Reck59135872010-11-02 12:39:01 -07002043 { MaybeObject* maybe_obj = NumberDictionary::Allocate(128);
2044 if (!maybe_obj->ToObject(&obj)) return false;
2045 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002046 set_code_stubs(NumberDictionary::cast(obj));
2047
2048 // Allocate the non_monomorphic_cache used in stub-cache.cc. The initial size
2049 // is set to avoid expanding the dictionary during bootstrapping.
John Reck59135872010-11-02 12:39:01 -07002050 { MaybeObject* maybe_obj = NumberDictionary::Allocate(64);
2051 if (!maybe_obj->ToObject(&obj)) return false;
2052 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002053 set_non_monomorphic_cache(NumberDictionary::cast(obj));
2054
Kristian Monsen25f61362010-05-21 11:50:48 +01002055 set_instanceof_cache_function(Smi::FromInt(0));
2056 set_instanceof_cache_map(Smi::FromInt(0));
2057 set_instanceof_cache_answer(Smi::FromInt(0));
2058
Steve Blocka7e24c12009-10-30 11:49:00 +00002059 CreateFixedStubs();
2060
Kristian Monsen0d5e1162010-09-30 15:31:59 +01002061 // Allocate the dictionary of intrinsic function names.
John Reck59135872010-11-02 12:39:01 -07002062 { MaybeObject* maybe_obj = StringDictionary::Allocate(Runtime::kNumFunctions);
2063 if (!maybe_obj->ToObject(&obj)) return false;
2064 }
Steve Block44f0eee2011-05-26 01:26:41 +01002065 { MaybeObject* maybe_obj = Runtime::InitializeIntrinsicFunctionNames(this,
2066 obj);
John Reck59135872010-11-02 12:39:01 -07002067 if (!maybe_obj->ToObject(&obj)) return false;
2068 }
Kristian Monsen0d5e1162010-09-30 15:31:59 +01002069 set_intrinsic_function_names(StringDictionary::cast(obj));
2070
Leon Clarkee46be812010-01-19 14:06:41 +00002071 if (InitializeNumberStringCache()->IsFailure()) return false;
Steve Blocka7e24c12009-10-30 11:49:00 +00002072
Steve Block6ded16b2010-05-10 14:33:55 +01002073 // Allocate cache for single character ASCII strings.
John Reck59135872010-11-02 12:39:01 -07002074 { MaybeObject* maybe_obj =
2075 AllocateFixedArray(String::kMaxAsciiCharCode + 1, TENURED);
2076 if (!maybe_obj->ToObject(&obj)) return false;
2077 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002078 set_single_character_string_cache(FixedArray::cast(obj));
2079
2080 // Allocate cache for external strings pointing to native source code.
John Reck59135872010-11-02 12:39:01 -07002081 { MaybeObject* maybe_obj = AllocateFixedArray(Natives::GetBuiltinsCount());
2082 if (!maybe_obj->ToObject(&obj)) return false;
2083 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002084 set_natives_source_cache(FixedArray::cast(obj));
2085
Steve Block44f0eee2011-05-26 01:26:41 +01002086 // Handling of script id generation is in FACTORY->NewScript.
Steve Blocka7e24c12009-10-30 11:49:00 +00002087 set_last_script_id(undefined_value());
2088
2089 // Initialize keyed lookup cache.
Steve Block44f0eee2011-05-26 01:26:41 +01002090 isolate_->keyed_lookup_cache()->Clear();
Steve Blocka7e24c12009-10-30 11:49:00 +00002091
2092 // Initialize context slot cache.
Steve Block44f0eee2011-05-26 01:26:41 +01002093 isolate_->context_slot_cache()->Clear();
Steve Blocka7e24c12009-10-30 11:49:00 +00002094
2095 // Initialize descriptor cache.
Steve Block44f0eee2011-05-26 01:26:41 +01002096 isolate_->descriptor_lookup_cache()->Clear();
Steve Blocka7e24c12009-10-30 11:49:00 +00002097
2098 // Initialize compilation cache.
Steve Block44f0eee2011-05-26 01:26:41 +01002099 isolate_->compilation_cache()->Clear();
Steve Blocka7e24c12009-10-30 11:49:00 +00002100
2101 return true;
2102}
2103
2104
John Reck59135872010-11-02 12:39:01 -07002105MaybeObject* Heap::InitializeNumberStringCache() {
Leon Clarkee46be812010-01-19 14:06:41 +00002106 // Compute the size of the number string cache based on the max heap size.
2107 // max_semispace_size_ == 512 KB => number_string_cache_size = 32.
2108 // max_semispace_size_ == 8 MB => number_string_cache_size = 16KB.
2109 int number_string_cache_size = max_semispace_size_ / 512;
2110 number_string_cache_size = Max(32, Min(16*KB, number_string_cache_size));
John Reck59135872010-11-02 12:39:01 -07002111 Object* obj;
2112 MaybeObject* maybe_obj =
2113 AllocateFixedArray(number_string_cache_size * 2, TENURED);
2114 if (maybe_obj->ToObject(&obj)) set_number_string_cache(FixedArray::cast(obj));
2115 return maybe_obj;
Leon Clarkee46be812010-01-19 14:06:41 +00002116}
2117
2118
2119void Heap::FlushNumberStringCache() {
2120 // Flush the number to string cache.
2121 int len = number_string_cache()->length();
2122 for (int i = 0; i < len; i++) {
Steve Block44f0eee2011-05-26 01:26:41 +01002123 number_string_cache()->set_undefined(this, i);
Leon Clarkee46be812010-01-19 14:06:41 +00002124 }
2125}
2126
2127
Steve Blocka7e24c12009-10-30 11:49:00 +00002128static inline int double_get_hash(double d) {
2129 DoubleRepresentation rep(d);
Leon Clarkee46be812010-01-19 14:06:41 +00002130 return static_cast<int>(rep.bits) ^ static_cast<int>(rep.bits >> 32);
Steve Blocka7e24c12009-10-30 11:49:00 +00002131}
2132
2133
2134static inline int smi_get_hash(Smi* smi) {
Leon Clarkee46be812010-01-19 14:06:41 +00002135 return smi->value();
Steve Blocka7e24c12009-10-30 11:49:00 +00002136}
2137
2138
Steve Blocka7e24c12009-10-30 11:49:00 +00002139Object* Heap::GetNumberStringCache(Object* number) {
2140 int hash;
Leon Clarkee46be812010-01-19 14:06:41 +00002141 int mask = (number_string_cache()->length() >> 1) - 1;
Steve Blocka7e24c12009-10-30 11:49:00 +00002142 if (number->IsSmi()) {
Leon Clarkee46be812010-01-19 14:06:41 +00002143 hash = smi_get_hash(Smi::cast(number)) & mask;
Steve Blocka7e24c12009-10-30 11:49:00 +00002144 } else {
Leon Clarkee46be812010-01-19 14:06:41 +00002145 hash = double_get_hash(number->Number()) & mask;
Steve Blocka7e24c12009-10-30 11:49:00 +00002146 }
2147 Object* key = number_string_cache()->get(hash * 2);
2148 if (key == number) {
2149 return String::cast(number_string_cache()->get(hash * 2 + 1));
2150 } else if (key->IsHeapNumber() &&
2151 number->IsHeapNumber() &&
2152 key->Number() == number->Number()) {
2153 return String::cast(number_string_cache()->get(hash * 2 + 1));
2154 }
2155 return undefined_value();
2156}
2157
2158
2159void Heap::SetNumberStringCache(Object* number, String* string) {
2160 int hash;
Leon Clarkee46be812010-01-19 14:06:41 +00002161 int mask = (number_string_cache()->length() >> 1) - 1;
Steve Blocka7e24c12009-10-30 11:49:00 +00002162 if (number->IsSmi()) {
Leon Clarkee46be812010-01-19 14:06:41 +00002163 hash = smi_get_hash(Smi::cast(number)) & mask;
Leon Clarke4515c472010-02-03 11:58:03 +00002164 number_string_cache()->set(hash * 2, Smi::cast(number));
Steve Blocka7e24c12009-10-30 11:49:00 +00002165 } else {
Leon Clarkee46be812010-01-19 14:06:41 +00002166 hash = double_get_hash(number->Number()) & mask;
Steve Blocka7e24c12009-10-30 11:49:00 +00002167 number_string_cache()->set(hash * 2, number);
2168 }
2169 number_string_cache()->set(hash * 2 + 1, string);
2170}
2171
2172
John Reck59135872010-11-02 12:39:01 -07002173MaybeObject* Heap::NumberToString(Object* number,
2174 bool check_number_string_cache) {
Steve Block44f0eee2011-05-26 01:26:41 +01002175 isolate_->counters()->number_to_string_runtime()->Increment();
Steve Block6ded16b2010-05-10 14:33:55 +01002176 if (check_number_string_cache) {
2177 Object* cached = GetNumberStringCache(number);
2178 if (cached != undefined_value()) {
2179 return cached;
2180 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002181 }
2182
2183 char arr[100];
2184 Vector<char> buffer(arr, ARRAY_SIZE(arr));
2185 const char* str;
2186 if (number->IsSmi()) {
2187 int num = Smi::cast(number)->value();
2188 str = IntToCString(num, buffer);
2189 } else {
2190 double num = HeapNumber::cast(number)->value();
2191 str = DoubleToCString(num, buffer);
2192 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002193
John Reck59135872010-11-02 12:39:01 -07002194 Object* js_string;
2195 MaybeObject* maybe_js_string = AllocateStringFromAscii(CStrVector(str));
2196 if (maybe_js_string->ToObject(&js_string)) {
2197 SetNumberStringCache(number, String::cast(js_string));
Steve Blocka7e24c12009-10-30 11:49:00 +00002198 }
John Reck59135872010-11-02 12:39:01 -07002199 return maybe_js_string;
Steve Blocka7e24c12009-10-30 11:49:00 +00002200}
2201
2202
Steve Block3ce2e202009-11-05 08:53:23 +00002203Map* Heap::MapForExternalArrayType(ExternalArrayType array_type) {
2204 return Map::cast(roots_[RootIndexForExternalArrayType(array_type)]);
2205}
2206
2207
2208Heap::RootListIndex Heap::RootIndexForExternalArrayType(
2209 ExternalArrayType array_type) {
2210 switch (array_type) {
2211 case kExternalByteArray:
2212 return kExternalByteArrayMapRootIndex;
2213 case kExternalUnsignedByteArray:
2214 return kExternalUnsignedByteArrayMapRootIndex;
2215 case kExternalShortArray:
2216 return kExternalShortArrayMapRootIndex;
2217 case kExternalUnsignedShortArray:
2218 return kExternalUnsignedShortArrayMapRootIndex;
2219 case kExternalIntArray:
2220 return kExternalIntArrayMapRootIndex;
2221 case kExternalUnsignedIntArray:
2222 return kExternalUnsignedIntArrayMapRootIndex;
2223 case kExternalFloatArray:
2224 return kExternalFloatArrayMapRootIndex;
Steve Block44f0eee2011-05-26 01:26:41 +01002225 case kExternalPixelArray:
2226 return kExternalPixelArrayMapRootIndex;
Steve Block3ce2e202009-11-05 08:53:23 +00002227 default:
2228 UNREACHABLE();
2229 return kUndefinedValueRootIndex;
2230 }
2231}
2232
2233
John Reck59135872010-11-02 12:39:01 -07002234MaybeObject* Heap::NumberFromDouble(double value, PretenureFlag pretenure) {
Steve Block6ded16b2010-05-10 14:33:55 +01002235 // We need to distinguish the minus zero value and this cannot be
2236 // done after conversion to int. Doing this by comparing bit
2237 // patterns is faster than using fpclassify() et al.
2238 static const DoubleRepresentation minus_zero(-0.0);
2239
2240 DoubleRepresentation rep(value);
2241 if (rep.bits == minus_zero.bits) {
2242 return AllocateHeapNumber(-0.0, pretenure);
2243 }
2244
2245 int int_value = FastD2I(value);
2246 if (value == int_value && Smi::IsValid(int_value)) {
2247 return Smi::FromInt(int_value);
2248 }
2249
2250 // Materialize the value in the heap.
2251 return AllocateHeapNumber(value, pretenure);
Steve Blocka7e24c12009-10-30 11:49:00 +00002252}
2253
2254
John Reck59135872010-11-02 12:39:01 -07002255MaybeObject* Heap::AllocateProxy(Address proxy, PretenureFlag pretenure) {
Steve Blocka7e24c12009-10-30 11:49:00 +00002256 // Statically ensure that it is safe to allocate proxies in paged spaces.
2257 STATIC_ASSERT(Proxy::kSize <= Page::kMaxHeapObjectSize);
2258 AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
John Reck59135872010-11-02 12:39:01 -07002259 Object* result;
2260 { MaybeObject* maybe_result = Allocate(proxy_map(), space);
2261 if (!maybe_result->ToObject(&result)) return maybe_result;
2262 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002263
2264 Proxy::cast(result)->set_proxy(proxy);
2265 return result;
2266}
2267
2268
John Reck59135872010-11-02 12:39:01 -07002269MaybeObject* Heap::AllocateSharedFunctionInfo(Object* name) {
2270 Object* result;
2271 { MaybeObject* maybe_result =
2272 Allocate(shared_function_info_map(), OLD_POINTER_SPACE);
2273 if (!maybe_result->ToObject(&result)) return maybe_result;
2274 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002275
2276 SharedFunctionInfo* share = SharedFunctionInfo::cast(result);
2277 share->set_name(name);
Steve Block44f0eee2011-05-26 01:26:41 +01002278 Code* illegal = isolate_->builtins()->builtin(Builtins::kIllegal);
Steve Blocka7e24c12009-10-30 11:49:00 +00002279 share->set_code(illegal);
Ben Murdoch3bec4d22010-07-22 14:51:16 +01002280 share->set_scope_info(SerializedScopeInfo::Empty());
Steve Block44f0eee2011-05-26 01:26:41 +01002281 Code* construct_stub = isolate_->builtins()->builtin(
2282 Builtins::kJSConstructStubGeneric);
Steve Blocka7e24c12009-10-30 11:49:00 +00002283 share->set_construct_stub(construct_stub);
2284 share->set_expected_nof_properties(0);
2285 share->set_length(0);
2286 share->set_formal_parameter_count(0);
2287 share->set_instance_class_name(Object_symbol());
2288 share->set_function_data(undefined_value());
2289 share->set_script(undefined_value());
2290 share->set_start_position_and_type(0);
2291 share->set_debug_info(undefined_value());
2292 share->set_inferred_name(empty_string());
2293 share->set_compiler_hints(0);
Ben Murdochb0fe1622011-05-05 13:52:32 +01002294 share->set_deopt_counter(Smi::FromInt(FLAG_deopt_every_n_times));
Kristian Monsen0d5e1162010-09-30 15:31:59 +01002295 share->set_initial_map(undefined_value());
Steve Blocka7e24c12009-10-30 11:49:00 +00002296 share->set_this_property_assignments_count(0);
2297 share->set_this_property_assignments(undefined_value());
Ben Murdochb0fe1622011-05-05 13:52:32 +01002298 share->set_opt_count(0);
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002299 share->set_num_literals(0);
2300 share->set_end_position(0);
2301 share->set_function_token_position(0);
Steve Blocka7e24c12009-10-30 11:49:00 +00002302 return result;
2303}
2304
2305
Steve Block1e0659c2011-05-24 12:43:12 +01002306MaybeObject* Heap::AllocateJSMessageObject(String* type,
2307 JSArray* arguments,
2308 int start_position,
2309 int end_position,
2310 Object* script,
2311 Object* stack_trace,
2312 Object* stack_frames) {
2313 Object* result;
2314 { MaybeObject* maybe_result = Allocate(message_object_map(), NEW_SPACE);
2315 if (!maybe_result->ToObject(&result)) return maybe_result;
2316 }
2317 JSMessageObject* message = JSMessageObject::cast(result);
2318 message->set_properties(Heap::empty_fixed_array());
2319 message->set_elements(Heap::empty_fixed_array());
2320 message->set_type(type);
2321 message->set_arguments(arguments);
2322 message->set_start_position(start_position);
2323 message->set_end_position(end_position);
2324 message->set_script(script);
2325 message->set_stack_trace(stack_trace);
2326 message->set_stack_frames(stack_frames);
2327 return result;
2328}
2329
2330
2331
Steve Blockd0582a62009-12-15 09:54:21 +00002332// Returns true for a character in a range. Both limits are inclusive.
2333static inline bool Between(uint32_t character, uint32_t from, uint32_t to) {
2334 // This makes uses of the the unsigned wraparound.
2335 return character - from <= to - from;
2336}
2337
2338
John Reck59135872010-11-02 12:39:01 -07002339MUST_USE_RESULT static inline MaybeObject* MakeOrFindTwoCharacterString(
Steve Block44f0eee2011-05-26 01:26:41 +01002340 Heap* heap,
John Reck59135872010-11-02 12:39:01 -07002341 uint32_t c1,
2342 uint32_t c2) {
Steve Blockd0582a62009-12-15 09:54:21 +00002343 String* symbol;
2344 // Numeric strings have a different hash algorithm not known by
2345 // LookupTwoCharsSymbolIfExists, so we skip this step for such strings.
2346 if ((!Between(c1, '0', '9') || !Between(c2, '0', '9')) &&
Steve Block44f0eee2011-05-26 01:26:41 +01002347 heap->symbol_table()->LookupTwoCharsSymbolIfExists(c1, c2, &symbol)) {
Steve Blockd0582a62009-12-15 09:54:21 +00002348 return symbol;
2349 // Now we know the length is 2, we might as well make use of that fact
2350 // when building the new string.
2351 } else if ((c1 | c2) <= String::kMaxAsciiCharCodeU) { // We can do this
2352 ASSERT(IsPowerOf2(String::kMaxAsciiCharCodeU + 1)); // because of this.
John Reck59135872010-11-02 12:39:01 -07002353 Object* result;
Steve Block44f0eee2011-05-26 01:26:41 +01002354 { MaybeObject* maybe_result = heap->AllocateRawAsciiString(2);
John Reck59135872010-11-02 12:39:01 -07002355 if (!maybe_result->ToObject(&result)) return maybe_result;
2356 }
Steve Blockd0582a62009-12-15 09:54:21 +00002357 char* dest = SeqAsciiString::cast(result)->GetChars();
2358 dest[0] = c1;
2359 dest[1] = c2;
2360 return result;
2361 } else {
John Reck59135872010-11-02 12:39:01 -07002362 Object* result;
Steve Block44f0eee2011-05-26 01:26:41 +01002363 { MaybeObject* maybe_result = heap->AllocateRawTwoByteString(2);
John Reck59135872010-11-02 12:39:01 -07002364 if (!maybe_result->ToObject(&result)) return maybe_result;
2365 }
Steve Blockd0582a62009-12-15 09:54:21 +00002366 uc16* dest = SeqTwoByteString::cast(result)->GetChars();
2367 dest[0] = c1;
2368 dest[1] = c2;
2369 return result;
2370 }
2371}
2372
2373
John Reck59135872010-11-02 12:39:01 -07002374MaybeObject* Heap::AllocateConsString(String* first, String* second) {
Steve Blocka7e24c12009-10-30 11:49:00 +00002375 int first_length = first->length();
Steve Blockd0582a62009-12-15 09:54:21 +00002376 if (first_length == 0) {
2377 return second;
2378 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002379
2380 int second_length = second->length();
Steve Blockd0582a62009-12-15 09:54:21 +00002381 if (second_length == 0) {
2382 return first;
2383 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002384
2385 int length = first_length + second_length;
Steve Blockd0582a62009-12-15 09:54:21 +00002386
2387 // Optimization for 2-byte strings often used as keys in a decompression
2388 // dictionary. Check whether we already have the string in the symbol
2389 // table to prevent creation of many unneccesary strings.
2390 if (length == 2) {
2391 unsigned c1 = first->Get(0);
2392 unsigned c2 = second->Get(0);
Steve Block44f0eee2011-05-26 01:26:41 +01002393 return MakeOrFindTwoCharacterString(this, c1, c2);
Steve Blockd0582a62009-12-15 09:54:21 +00002394 }
2395
Steve Block6ded16b2010-05-10 14:33:55 +01002396 bool first_is_ascii = first->IsAsciiRepresentation();
2397 bool second_is_ascii = second->IsAsciiRepresentation();
2398 bool is_ascii = first_is_ascii && second_is_ascii;
Steve Blocka7e24c12009-10-30 11:49:00 +00002399
2400 // Make sure that an out of memory exception is thrown if the length
Steve Block3ce2e202009-11-05 08:53:23 +00002401 // of the new cons string is too large.
2402 if (length > String::kMaxLength || length < 0) {
Steve Block44f0eee2011-05-26 01:26:41 +01002403 isolate()->context()->mark_out_of_memory();
Steve Blocka7e24c12009-10-30 11:49:00 +00002404 return Failure::OutOfMemoryException();
2405 }
2406
Kristian Monsen9dcf7e22010-06-28 14:14:28 +01002407 bool is_ascii_data_in_two_byte_string = false;
2408 if (!is_ascii) {
2409 // At least one of the strings uses two-byte representation so we
2410 // can't use the fast case code for short ascii strings below, but
2411 // we can try to save memory if all chars actually fit in ascii.
2412 is_ascii_data_in_two_byte_string =
2413 first->HasOnlyAsciiChars() && second->HasOnlyAsciiChars();
2414 if (is_ascii_data_in_two_byte_string) {
Steve Block44f0eee2011-05-26 01:26:41 +01002415 isolate_->counters()->string_add_runtime_ext_to_ascii()->Increment();
Kristian Monsen9dcf7e22010-06-28 14:14:28 +01002416 }
2417 }
2418
Steve Blocka7e24c12009-10-30 11:49:00 +00002419 // If the resulting string is small make a flat string.
2420 if (length < String::kMinNonFlatLength) {
2421 ASSERT(first->IsFlat());
2422 ASSERT(second->IsFlat());
2423 if (is_ascii) {
John Reck59135872010-11-02 12:39:01 -07002424 Object* result;
2425 { MaybeObject* maybe_result = AllocateRawAsciiString(length);
2426 if (!maybe_result->ToObject(&result)) return maybe_result;
2427 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002428 // Copy the characters into the new object.
2429 char* dest = SeqAsciiString::cast(result)->GetChars();
2430 // Copy first part.
Steve Blockd0582a62009-12-15 09:54:21 +00002431 const char* src;
2432 if (first->IsExternalString()) {
2433 src = ExternalAsciiString::cast(first)->resource()->data();
2434 } else {
2435 src = SeqAsciiString::cast(first)->GetChars();
2436 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002437 for (int i = 0; i < first_length; i++) *dest++ = src[i];
2438 // Copy second part.
Steve Blockd0582a62009-12-15 09:54:21 +00002439 if (second->IsExternalString()) {
2440 src = ExternalAsciiString::cast(second)->resource()->data();
2441 } else {
2442 src = SeqAsciiString::cast(second)->GetChars();
2443 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002444 for (int i = 0; i < second_length; i++) *dest++ = src[i];
2445 return result;
2446 } else {
Kristian Monsen9dcf7e22010-06-28 14:14:28 +01002447 if (is_ascii_data_in_two_byte_string) {
John Reck59135872010-11-02 12:39:01 -07002448 Object* result;
2449 { MaybeObject* maybe_result = AllocateRawAsciiString(length);
2450 if (!maybe_result->ToObject(&result)) return maybe_result;
2451 }
Steve Block6ded16b2010-05-10 14:33:55 +01002452 // Copy the characters into the new object.
2453 char* dest = SeqAsciiString::cast(result)->GetChars();
2454 String::WriteToFlat(first, dest, 0, first_length);
2455 String::WriteToFlat(second, dest + first_length, 0, second_length);
Steve Block44f0eee2011-05-26 01:26:41 +01002456 isolate_->counters()->string_add_runtime_ext_to_ascii()->Increment();
Steve Block6ded16b2010-05-10 14:33:55 +01002457 return result;
2458 }
2459
John Reck59135872010-11-02 12:39:01 -07002460 Object* result;
2461 { MaybeObject* maybe_result = AllocateRawTwoByteString(length);
2462 if (!maybe_result->ToObject(&result)) return maybe_result;
2463 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002464 // Copy the characters into the new object.
2465 uc16* dest = SeqTwoByteString::cast(result)->GetChars();
2466 String::WriteToFlat(first, dest, 0, first_length);
2467 String::WriteToFlat(second, dest + first_length, 0, second_length);
2468 return result;
2469 }
2470 }
2471
Kristian Monsen9dcf7e22010-06-28 14:14:28 +01002472 Map* map = (is_ascii || is_ascii_data_in_two_byte_string) ?
2473 cons_ascii_string_map() : cons_string_map();
Steve Blocka7e24c12009-10-30 11:49:00 +00002474
John Reck59135872010-11-02 12:39:01 -07002475 Object* result;
2476 { MaybeObject* maybe_result = Allocate(map, NEW_SPACE);
2477 if (!maybe_result->ToObject(&result)) return maybe_result;
2478 }
Leon Clarke4515c472010-02-03 11:58:03 +00002479
2480 AssertNoAllocation no_gc;
Steve Blocka7e24c12009-10-30 11:49:00 +00002481 ConsString* cons_string = ConsString::cast(result);
Leon Clarke4515c472010-02-03 11:58:03 +00002482 WriteBarrierMode mode = cons_string->GetWriteBarrierMode(no_gc);
Steve Blocka7e24c12009-10-30 11:49:00 +00002483 cons_string->set_length(length);
Steve Blockd0582a62009-12-15 09:54:21 +00002484 cons_string->set_hash_field(String::kEmptyHashField);
2485 cons_string->set_first(first, mode);
2486 cons_string->set_second(second, mode);
Steve Blocka7e24c12009-10-30 11:49:00 +00002487 return result;
2488}
2489
2490
John Reck59135872010-11-02 12:39:01 -07002491MaybeObject* Heap::AllocateSubString(String* buffer,
Steve Blocka7e24c12009-10-30 11:49:00 +00002492 int start,
Steve Block6ded16b2010-05-10 14:33:55 +01002493 int end,
2494 PretenureFlag pretenure) {
Steve Blocka7e24c12009-10-30 11:49:00 +00002495 int length = end - start;
2496
2497 if (length == 1) {
Steve Block44f0eee2011-05-26 01:26:41 +01002498 return LookupSingleCharacterStringFromCode(buffer->Get(start));
Steve Blockd0582a62009-12-15 09:54:21 +00002499 } else if (length == 2) {
2500 // Optimization for 2-byte strings often used as keys in a decompression
2501 // dictionary. Check whether we already have the string in the symbol
2502 // table to prevent creation of many unneccesary strings.
2503 unsigned c1 = buffer->Get(start);
2504 unsigned c2 = buffer->Get(start + 1);
Steve Block44f0eee2011-05-26 01:26:41 +01002505 return MakeOrFindTwoCharacterString(this, c1, c2);
Steve Blocka7e24c12009-10-30 11:49:00 +00002506 }
2507
2508 // Make an attempt to flatten the buffer to reduce access time.
Leon Clarkef7060e22010-06-03 12:02:55 +01002509 buffer = buffer->TryFlattenGetString();
Steve Blocka7e24c12009-10-30 11:49:00 +00002510
John Reck59135872010-11-02 12:39:01 -07002511 Object* result;
2512 { MaybeObject* maybe_result = buffer->IsAsciiRepresentation()
2513 ? AllocateRawAsciiString(length, pretenure )
2514 : AllocateRawTwoByteString(length, pretenure);
2515 if (!maybe_result->ToObject(&result)) return maybe_result;
2516 }
Steve Blockd0582a62009-12-15 09:54:21 +00002517 String* string_result = String::cast(result);
Steve Blocka7e24c12009-10-30 11:49:00 +00002518 // Copy the characters into the new object.
Steve Blockd0582a62009-12-15 09:54:21 +00002519 if (buffer->IsAsciiRepresentation()) {
2520 ASSERT(string_result->IsAsciiRepresentation());
2521 char* dest = SeqAsciiString::cast(string_result)->GetChars();
2522 String::WriteToFlat(buffer, dest, start, end);
2523 } else {
2524 ASSERT(string_result->IsTwoByteRepresentation());
2525 uc16* dest = SeqTwoByteString::cast(string_result)->GetChars();
2526 String::WriteToFlat(buffer, dest, start, end);
Steve Blocka7e24c12009-10-30 11:49:00 +00002527 }
Steve Blockd0582a62009-12-15 09:54:21 +00002528
Steve Blocka7e24c12009-10-30 11:49:00 +00002529 return result;
2530}
2531
2532
John Reck59135872010-11-02 12:39:01 -07002533MaybeObject* Heap::AllocateExternalStringFromAscii(
Steve Blocka7e24c12009-10-30 11:49:00 +00002534 ExternalAsciiString::Resource* resource) {
Steve Blockd0582a62009-12-15 09:54:21 +00002535 size_t length = resource->length();
2536 if (length > static_cast<size_t>(String::kMaxLength)) {
Steve Block44f0eee2011-05-26 01:26:41 +01002537 isolate()->context()->mark_out_of_memory();
Steve Blockd0582a62009-12-15 09:54:21 +00002538 return Failure::OutOfMemoryException();
Steve Blocka7e24c12009-10-30 11:49:00 +00002539 }
2540
Steve Blockd0582a62009-12-15 09:54:21 +00002541 Map* map = external_ascii_string_map();
John Reck59135872010-11-02 12:39:01 -07002542 Object* result;
2543 { MaybeObject* maybe_result = Allocate(map, NEW_SPACE);
2544 if (!maybe_result->ToObject(&result)) return maybe_result;
2545 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002546
2547 ExternalAsciiString* external_string = ExternalAsciiString::cast(result);
Steve Blockd0582a62009-12-15 09:54:21 +00002548 external_string->set_length(static_cast<int>(length));
2549 external_string->set_hash_field(String::kEmptyHashField);
Steve Blocka7e24c12009-10-30 11:49:00 +00002550 external_string->set_resource(resource);
2551
2552 return result;
2553}
2554
2555
John Reck59135872010-11-02 12:39:01 -07002556MaybeObject* Heap::AllocateExternalStringFromTwoByte(
Steve Blocka7e24c12009-10-30 11:49:00 +00002557 ExternalTwoByteString::Resource* resource) {
Steve Blockd0582a62009-12-15 09:54:21 +00002558 size_t length = resource->length();
2559 if (length > static_cast<size_t>(String::kMaxLength)) {
Steve Block44f0eee2011-05-26 01:26:41 +01002560 isolate()->context()->mark_out_of_memory();
Steve Blockd0582a62009-12-15 09:54:21 +00002561 return Failure::OutOfMemoryException();
2562 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002563
Kristian Monsen9dcf7e22010-06-28 14:14:28 +01002564 // For small strings we check whether the resource contains only
Steve Block9fac8402011-05-12 15:51:54 +01002565 // ASCII characters. If yes, we use a different string map.
2566 static const size_t kAsciiCheckLengthLimit = 32;
2567 bool is_ascii = length <= kAsciiCheckLengthLimit &&
2568 String::IsAscii(resource->data(), static_cast<int>(length));
Kristian Monsen9dcf7e22010-06-28 14:14:28 +01002569 Map* map = is_ascii ?
Steve Block44f0eee2011-05-26 01:26:41 +01002570 external_string_with_ascii_data_map() : external_string_map();
John Reck59135872010-11-02 12:39:01 -07002571 Object* result;
2572 { MaybeObject* maybe_result = Allocate(map, NEW_SPACE);
2573 if (!maybe_result->ToObject(&result)) return maybe_result;
2574 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002575
2576 ExternalTwoByteString* external_string = ExternalTwoByteString::cast(result);
Steve Blockd0582a62009-12-15 09:54:21 +00002577 external_string->set_length(static_cast<int>(length));
2578 external_string->set_hash_field(String::kEmptyHashField);
Steve Blocka7e24c12009-10-30 11:49:00 +00002579 external_string->set_resource(resource);
2580
2581 return result;
2582}
2583
2584
John Reck59135872010-11-02 12:39:01 -07002585MaybeObject* Heap::LookupSingleCharacterStringFromCode(uint16_t code) {
Steve Blocka7e24c12009-10-30 11:49:00 +00002586 if (code <= String::kMaxAsciiCharCode) {
Steve Block44f0eee2011-05-26 01:26:41 +01002587 Object* value = single_character_string_cache()->get(code);
2588 if (value != undefined_value()) return value;
Steve Blocka7e24c12009-10-30 11:49:00 +00002589
2590 char buffer[1];
2591 buffer[0] = static_cast<char>(code);
John Reck59135872010-11-02 12:39:01 -07002592 Object* result;
2593 MaybeObject* maybe_result = LookupSymbol(Vector<const char>(buffer, 1));
Steve Blocka7e24c12009-10-30 11:49:00 +00002594
John Reck59135872010-11-02 12:39:01 -07002595 if (!maybe_result->ToObject(&result)) return maybe_result;
Steve Block44f0eee2011-05-26 01:26:41 +01002596 single_character_string_cache()->set(code, result);
Steve Blocka7e24c12009-10-30 11:49:00 +00002597 return result;
2598 }
2599
John Reck59135872010-11-02 12:39:01 -07002600 Object* result;
Steve Block44f0eee2011-05-26 01:26:41 +01002601 { MaybeObject* maybe_result = AllocateRawTwoByteString(1);
John Reck59135872010-11-02 12:39:01 -07002602 if (!maybe_result->ToObject(&result)) return maybe_result;
2603 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002604 String* answer = String::cast(result);
2605 answer->Set(0, code);
2606 return answer;
2607}
2608
2609
John Reck59135872010-11-02 12:39:01 -07002610MaybeObject* Heap::AllocateByteArray(int length, PretenureFlag pretenure) {
Leon Clarkee46be812010-01-19 14:06:41 +00002611 if (length < 0 || length > ByteArray::kMaxLength) {
2612 return Failure::OutOfMemoryException();
2613 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002614 if (pretenure == NOT_TENURED) {
2615 return AllocateByteArray(length);
2616 }
2617 int size = ByteArray::SizeFor(length);
John Reck59135872010-11-02 12:39:01 -07002618 Object* result;
2619 { MaybeObject* maybe_result = (size <= MaxObjectSizeInPagedSpace())
2620 ? old_data_space_->AllocateRaw(size)
2621 : lo_space_->AllocateRaw(size);
2622 if (!maybe_result->ToObject(&result)) return maybe_result;
2623 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002624
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002625 reinterpret_cast<ByteArray*>(result)->set_map(byte_array_map());
2626 reinterpret_cast<ByteArray*>(result)->set_length(length);
Steve Blocka7e24c12009-10-30 11:49:00 +00002627 return result;
2628}
2629
2630
John Reck59135872010-11-02 12:39:01 -07002631MaybeObject* Heap::AllocateByteArray(int length) {
Leon Clarkee46be812010-01-19 14:06:41 +00002632 if (length < 0 || length > ByteArray::kMaxLength) {
2633 return Failure::OutOfMemoryException();
2634 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002635 int size = ByteArray::SizeFor(length);
2636 AllocationSpace space =
Leon Clarkee46be812010-01-19 14:06:41 +00002637 (size > MaxObjectSizeInPagedSpace()) ? LO_SPACE : NEW_SPACE;
John Reck59135872010-11-02 12:39:01 -07002638 Object* result;
2639 { MaybeObject* maybe_result = AllocateRaw(size, space, OLD_DATA_SPACE);
2640 if (!maybe_result->ToObject(&result)) return maybe_result;
2641 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002642
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002643 reinterpret_cast<ByteArray*>(result)->set_map(byte_array_map());
2644 reinterpret_cast<ByteArray*>(result)->set_length(length);
Steve Blocka7e24c12009-10-30 11:49:00 +00002645 return result;
2646}
2647
2648
2649void Heap::CreateFillerObjectAt(Address addr, int size) {
2650 if (size == 0) return;
2651 HeapObject* filler = HeapObject::FromAddress(addr);
2652 if (size == kPointerSize) {
Steve Block6ded16b2010-05-10 14:33:55 +01002653 filler->set_map(one_pointer_filler_map());
2654 } else if (size == 2 * kPointerSize) {
2655 filler->set_map(two_pointer_filler_map());
Steve Blocka7e24c12009-10-30 11:49:00 +00002656 } else {
Steve Block6ded16b2010-05-10 14:33:55 +01002657 filler->set_map(byte_array_map());
Steve Blocka7e24c12009-10-30 11:49:00 +00002658 ByteArray::cast(filler)->set_length(ByteArray::LengthFor(size));
2659 }
2660}
2661
2662
John Reck59135872010-11-02 12:39:01 -07002663MaybeObject* Heap::AllocateExternalArray(int length,
2664 ExternalArrayType array_type,
2665 void* external_pointer,
2666 PretenureFlag pretenure) {
Steve Block3ce2e202009-11-05 08:53:23 +00002667 AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
John Reck59135872010-11-02 12:39:01 -07002668 Object* result;
2669 { MaybeObject* maybe_result = AllocateRaw(ExternalArray::kAlignedSize,
2670 space,
2671 OLD_DATA_SPACE);
2672 if (!maybe_result->ToObject(&result)) return maybe_result;
2673 }
Steve Block3ce2e202009-11-05 08:53:23 +00002674
2675 reinterpret_cast<ExternalArray*>(result)->set_map(
2676 MapForExternalArrayType(array_type));
2677 reinterpret_cast<ExternalArray*>(result)->set_length(length);
2678 reinterpret_cast<ExternalArray*>(result)->set_external_pointer(
2679 external_pointer);
2680
2681 return result;
2682}
2683
2684
John Reck59135872010-11-02 12:39:01 -07002685MaybeObject* Heap::CreateCode(const CodeDesc& desc,
2686 Code::Flags flags,
Steve Block44f0eee2011-05-26 01:26:41 +01002687 Handle<Object> self_reference,
2688 bool immovable) {
Leon Clarkeac952652010-07-15 11:15:24 +01002689 // Allocate ByteArray before the Code object, so that we do not risk
2690 // leaving uninitialized Code object (and breaking the heap).
John Reck59135872010-11-02 12:39:01 -07002691 Object* reloc_info;
2692 { MaybeObject* maybe_reloc_info = AllocateByteArray(desc.reloc_size, TENURED);
2693 if (!maybe_reloc_info->ToObject(&reloc_info)) return maybe_reloc_info;
2694 }
Leon Clarkeac952652010-07-15 11:15:24 +01002695
Steve Block44f0eee2011-05-26 01:26:41 +01002696 // Compute size.
Leon Clarkeac952652010-07-15 11:15:24 +01002697 int body_size = RoundUp(desc.instr_size, kObjectAlignment);
Ben Murdoch3bec4d22010-07-22 14:51:16 +01002698 int obj_size = Code::SizeFor(body_size);
Kristian Monsen0d5e1162010-09-30 15:31:59 +01002699 ASSERT(IsAligned(static_cast<intptr_t>(obj_size), kCodeAlignment));
John Reck59135872010-11-02 12:39:01 -07002700 MaybeObject* maybe_result;
Steve Block44f0eee2011-05-26 01:26:41 +01002701 // Large code objects and code objects which should stay at a fixed address
2702 // are allocated in large object space.
2703 if (obj_size > MaxObjectSizeInPagedSpace() || immovable) {
John Reck59135872010-11-02 12:39:01 -07002704 maybe_result = lo_space_->AllocateRawCode(obj_size);
Steve Blocka7e24c12009-10-30 11:49:00 +00002705 } else {
John Reck59135872010-11-02 12:39:01 -07002706 maybe_result = code_space_->AllocateRaw(obj_size);
Steve Blocka7e24c12009-10-30 11:49:00 +00002707 }
2708
John Reck59135872010-11-02 12:39:01 -07002709 Object* result;
2710 if (!maybe_result->ToObject(&result)) return maybe_result;
Steve Blocka7e24c12009-10-30 11:49:00 +00002711
2712 // Initialize the object
2713 HeapObject::cast(result)->set_map(code_map());
2714 Code* code = Code::cast(result);
Steve Block44f0eee2011-05-26 01:26:41 +01002715 ASSERT(!isolate_->code_range()->exists() ||
2716 isolate_->code_range()->contains(code->address()));
Steve Blocka7e24c12009-10-30 11:49:00 +00002717 code->set_instruction_size(desc.instr_size);
Leon Clarkeac952652010-07-15 11:15:24 +01002718 code->set_relocation_info(ByteArray::cast(reloc_info));
Steve Blocka7e24c12009-10-30 11:49:00 +00002719 code->set_flags(flags);
Ben Murdochb8e0da22011-05-16 14:20:40 +01002720 if (code->is_call_stub() || code->is_keyed_call_stub()) {
2721 code->set_check_type(RECEIVER_MAP_CHECK);
2722 }
Ben Murdochb0fe1622011-05-05 13:52:32 +01002723 code->set_deoptimization_data(empty_fixed_array());
Steve Blocka7e24c12009-10-30 11:49:00 +00002724 // Allow self references to created code object by patching the handle to
2725 // point to the newly allocated Code object.
2726 if (!self_reference.is_null()) {
2727 *(self_reference.location()) = code;
2728 }
2729 // Migrate generated code.
2730 // The generated code can contain Object** values (typically from handles)
2731 // that are dereferenced during the copy to point directly to the actual heap
2732 // objects. These pointers can include references to the code object itself,
2733 // through the self_reference parameter.
2734 code->CopyFrom(desc);
Steve Blocka7e24c12009-10-30 11:49:00 +00002735
2736#ifdef DEBUG
2737 code->Verify();
2738#endif
2739 return code;
2740}
2741
2742
John Reck59135872010-11-02 12:39:01 -07002743MaybeObject* Heap::CopyCode(Code* code) {
Steve Blocka7e24c12009-10-30 11:49:00 +00002744 // Allocate an object the same size as the code object.
2745 int obj_size = code->Size();
John Reck59135872010-11-02 12:39:01 -07002746 MaybeObject* maybe_result;
Steve Blocka7e24c12009-10-30 11:49:00 +00002747 if (obj_size > MaxObjectSizeInPagedSpace()) {
John Reck59135872010-11-02 12:39:01 -07002748 maybe_result = lo_space_->AllocateRawCode(obj_size);
Steve Blocka7e24c12009-10-30 11:49:00 +00002749 } else {
John Reck59135872010-11-02 12:39:01 -07002750 maybe_result = code_space_->AllocateRaw(obj_size);
Steve Blocka7e24c12009-10-30 11:49:00 +00002751 }
2752
John Reck59135872010-11-02 12:39:01 -07002753 Object* result;
2754 if (!maybe_result->ToObject(&result)) return maybe_result;
Steve Blocka7e24c12009-10-30 11:49:00 +00002755
2756 // Copy code object.
2757 Address old_addr = code->address();
2758 Address new_addr = reinterpret_cast<HeapObject*>(result)->address();
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002759 CopyBlock(new_addr, old_addr, obj_size);
Steve Blocka7e24c12009-10-30 11:49:00 +00002760 // Relocate the copy.
2761 Code* new_code = Code::cast(result);
Steve Block44f0eee2011-05-26 01:26:41 +01002762 ASSERT(!isolate_->code_range()->exists() ||
2763 isolate_->code_range()->contains(code->address()));
Steve Blocka7e24c12009-10-30 11:49:00 +00002764 new_code->Relocate(new_addr - old_addr);
2765 return new_code;
2766}
2767
2768
John Reck59135872010-11-02 12:39:01 -07002769MaybeObject* Heap::CopyCode(Code* code, Vector<byte> reloc_info) {
Leon Clarkeac952652010-07-15 11:15:24 +01002770 // Allocate ByteArray before the Code object, so that we do not risk
2771 // leaving uninitialized Code object (and breaking the heap).
John Reck59135872010-11-02 12:39:01 -07002772 Object* reloc_info_array;
2773 { MaybeObject* maybe_reloc_info_array =
2774 AllocateByteArray(reloc_info.length(), TENURED);
2775 if (!maybe_reloc_info_array->ToObject(&reloc_info_array)) {
2776 return maybe_reloc_info_array;
2777 }
2778 }
Leon Clarkeac952652010-07-15 11:15:24 +01002779
2780 int new_body_size = RoundUp(code->instruction_size(), kObjectAlignment);
Steve Block6ded16b2010-05-10 14:33:55 +01002781
Ben Murdoch3bec4d22010-07-22 14:51:16 +01002782 int new_obj_size = Code::SizeFor(new_body_size);
Steve Block6ded16b2010-05-10 14:33:55 +01002783
2784 Address old_addr = code->address();
2785
2786 size_t relocation_offset =
Leon Clarkeac952652010-07-15 11:15:24 +01002787 static_cast<size_t>(code->instruction_end() - old_addr);
Steve Block6ded16b2010-05-10 14:33:55 +01002788
John Reck59135872010-11-02 12:39:01 -07002789 MaybeObject* maybe_result;
Steve Block6ded16b2010-05-10 14:33:55 +01002790 if (new_obj_size > MaxObjectSizeInPagedSpace()) {
John Reck59135872010-11-02 12:39:01 -07002791 maybe_result = lo_space_->AllocateRawCode(new_obj_size);
Steve Block6ded16b2010-05-10 14:33:55 +01002792 } else {
John Reck59135872010-11-02 12:39:01 -07002793 maybe_result = code_space_->AllocateRaw(new_obj_size);
Steve Block6ded16b2010-05-10 14:33:55 +01002794 }
2795
John Reck59135872010-11-02 12:39:01 -07002796 Object* result;
2797 if (!maybe_result->ToObject(&result)) return maybe_result;
Steve Block6ded16b2010-05-10 14:33:55 +01002798
2799 // Copy code object.
2800 Address new_addr = reinterpret_cast<HeapObject*>(result)->address();
2801
2802 // Copy header and instructions.
2803 memcpy(new_addr, old_addr, relocation_offset);
2804
Steve Block6ded16b2010-05-10 14:33:55 +01002805 Code* new_code = Code::cast(result);
Leon Clarkeac952652010-07-15 11:15:24 +01002806 new_code->set_relocation_info(ByteArray::cast(reloc_info_array));
Steve Block6ded16b2010-05-10 14:33:55 +01002807
Leon Clarkeac952652010-07-15 11:15:24 +01002808 // Copy patched rinfo.
2809 memcpy(new_code->relocation_start(), reloc_info.start(), reloc_info.length());
Steve Block6ded16b2010-05-10 14:33:55 +01002810
2811 // Relocate the copy.
Steve Block44f0eee2011-05-26 01:26:41 +01002812 ASSERT(!isolate_->code_range()->exists() ||
2813 isolate_->code_range()->contains(code->address()));
Steve Block6ded16b2010-05-10 14:33:55 +01002814 new_code->Relocate(new_addr - old_addr);
2815
2816#ifdef DEBUG
2817 code->Verify();
2818#endif
2819 return new_code;
2820}
2821
2822
John Reck59135872010-11-02 12:39:01 -07002823MaybeObject* Heap::Allocate(Map* map, AllocationSpace space) {
Steve Blocka7e24c12009-10-30 11:49:00 +00002824 ASSERT(gc_state_ == NOT_IN_GC);
2825 ASSERT(map->instance_type() != MAP_TYPE);
Leon Clarkee46be812010-01-19 14:06:41 +00002826 // If allocation failures are disallowed, we may allocate in a different
2827 // space when new space is full and the object is not a large object.
2828 AllocationSpace retry_space =
2829 (space != NEW_SPACE) ? space : TargetSpaceId(map->instance_type());
John Reck59135872010-11-02 12:39:01 -07002830 Object* result;
2831 { MaybeObject* maybe_result =
2832 AllocateRaw(map->instance_size(), space, retry_space);
2833 if (!maybe_result->ToObject(&result)) return maybe_result;
2834 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002835 HeapObject::cast(result)->set_map(map);
Steve Block3ce2e202009-11-05 08:53:23 +00002836#ifdef ENABLE_LOGGING_AND_PROFILING
Steve Block44f0eee2011-05-26 01:26:41 +01002837 isolate_->producer_heap_profile()->RecordJSObjectAllocation(result);
Steve Block3ce2e202009-11-05 08:53:23 +00002838#endif
Steve Blocka7e24c12009-10-30 11:49:00 +00002839 return result;
2840}
2841
2842
John Reck59135872010-11-02 12:39:01 -07002843MaybeObject* Heap::InitializeFunction(JSFunction* function,
2844 SharedFunctionInfo* shared,
2845 Object* prototype) {
Steve Blocka7e24c12009-10-30 11:49:00 +00002846 ASSERT(!prototype->IsMap());
2847 function->initialize_properties();
2848 function->initialize_elements();
2849 function->set_shared(shared);
Iain Merrick75681382010-08-19 15:07:18 +01002850 function->set_code(shared->code());
Steve Blocka7e24c12009-10-30 11:49:00 +00002851 function->set_prototype_or_initial_map(prototype);
2852 function->set_context(undefined_value());
Leon Clarke4515c472010-02-03 11:58:03 +00002853 function->set_literals(empty_fixed_array());
Ben Murdochb0fe1622011-05-05 13:52:32 +01002854 function->set_next_function_link(undefined_value());
Steve Blocka7e24c12009-10-30 11:49:00 +00002855 return function;
2856}
2857
2858
John Reck59135872010-11-02 12:39:01 -07002859MaybeObject* Heap::AllocateFunctionPrototype(JSFunction* function) {
Steve Blocka7e24c12009-10-30 11:49:00 +00002860 // Allocate the prototype. Make sure to use the object function
2861 // from the function's context, since the function can be from a
2862 // different context.
2863 JSFunction* object_function =
2864 function->context()->global_context()->object_function();
John Reck59135872010-11-02 12:39:01 -07002865 Object* prototype;
2866 { MaybeObject* maybe_prototype = AllocateJSObject(object_function);
2867 if (!maybe_prototype->ToObject(&prototype)) return maybe_prototype;
2868 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002869 // When creating the prototype for the function we must set its
2870 // constructor to the function.
John Reck59135872010-11-02 12:39:01 -07002871 Object* result;
2872 { MaybeObject* maybe_result =
Ben Murdoche0cee9b2011-05-25 10:26:03 +01002873 JSObject::cast(prototype)->SetLocalPropertyIgnoreAttributes(
2874 constructor_symbol(), function, DONT_ENUM);
John Reck59135872010-11-02 12:39:01 -07002875 if (!maybe_result->ToObject(&result)) return maybe_result;
2876 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002877 return prototype;
2878}
2879
2880
John Reck59135872010-11-02 12:39:01 -07002881MaybeObject* Heap::AllocateFunction(Map* function_map,
2882 SharedFunctionInfo* shared,
2883 Object* prototype,
2884 PretenureFlag pretenure) {
Leon Clarkee46be812010-01-19 14:06:41 +00002885 AllocationSpace space =
2886 (pretenure == TENURED) ? OLD_POINTER_SPACE : NEW_SPACE;
John Reck59135872010-11-02 12:39:01 -07002887 Object* result;
2888 { MaybeObject* maybe_result = Allocate(function_map, space);
2889 if (!maybe_result->ToObject(&result)) return maybe_result;
2890 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002891 return InitializeFunction(JSFunction::cast(result), shared, prototype);
2892}
2893
2894
John Reck59135872010-11-02 12:39:01 -07002895MaybeObject* Heap::AllocateArgumentsObject(Object* callee, int length) {
Steve Blocka7e24c12009-10-30 11:49:00 +00002896 // To get fast allocation and map sharing for arguments objects we
2897 // allocate them based on an arguments boilerplate.
2898
Steve Block44f0eee2011-05-26 01:26:41 +01002899 JSObject* boilerplate;
2900 int arguments_object_size;
2901 bool strict_mode_callee = callee->IsJSFunction() &&
2902 JSFunction::cast(callee)->shared()->strict_mode();
2903 if (strict_mode_callee) {
2904 boilerplate =
2905 isolate()->context()->global_context()->
2906 strict_mode_arguments_boilerplate();
2907 arguments_object_size = kArgumentsObjectSizeStrict;
2908 } else {
2909 boilerplate =
2910 isolate()->context()->global_context()->arguments_boilerplate();
2911 arguments_object_size = kArgumentsObjectSize;
2912 }
2913
Steve Blocka7e24c12009-10-30 11:49:00 +00002914 // This calls Copy directly rather than using Heap::AllocateRaw so we
2915 // duplicate the check here.
2916 ASSERT(allocation_allowed_ && gc_state_ == NOT_IN_GC);
2917
Leon Clarkee46be812010-01-19 14:06:41 +00002918 // Check that the size of the boilerplate matches our
2919 // expectations. The ArgumentsAccessStub::GenerateNewObject relies
2920 // on the size being a known constant.
Steve Block44f0eee2011-05-26 01:26:41 +01002921 ASSERT(arguments_object_size == boilerplate->map()->instance_size());
Leon Clarkee46be812010-01-19 14:06:41 +00002922
2923 // Do the allocation.
John Reck59135872010-11-02 12:39:01 -07002924 Object* result;
2925 { MaybeObject* maybe_result =
Steve Block44f0eee2011-05-26 01:26:41 +01002926 AllocateRaw(arguments_object_size, NEW_SPACE, OLD_POINTER_SPACE);
John Reck59135872010-11-02 12:39:01 -07002927 if (!maybe_result->ToObject(&result)) return maybe_result;
2928 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002929
2930 // Copy the content. The arguments boilerplate doesn't have any
2931 // fields that point to new space so it's safe to skip the write
2932 // barrier here.
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002933 CopyBlock(HeapObject::cast(result)->address(),
2934 boilerplate->address(),
Steve Block44f0eee2011-05-26 01:26:41 +01002935 JSObject::kHeaderSize);
Steve Blocka7e24c12009-10-30 11:49:00 +00002936
Steve Block44f0eee2011-05-26 01:26:41 +01002937 // Set the length property.
2938 JSObject::cast(result)->InObjectPropertyAtPut(kArgumentsLengthIndex,
Steve Blocka7e24c12009-10-30 11:49:00 +00002939 Smi::FromInt(length),
2940 SKIP_WRITE_BARRIER);
Steve Block44f0eee2011-05-26 01:26:41 +01002941 // Set the callee property for non-strict mode arguments object only.
2942 if (!strict_mode_callee) {
2943 JSObject::cast(result)->InObjectPropertyAtPut(kArgumentsCalleeIndex,
2944 callee);
2945 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002946
2947 // Check the state of the object
2948 ASSERT(JSObject::cast(result)->HasFastProperties());
2949 ASSERT(JSObject::cast(result)->HasFastElements());
2950
2951 return result;
2952}
2953
2954
Kristian Monsen0d5e1162010-09-30 15:31:59 +01002955static bool HasDuplicates(DescriptorArray* descriptors) {
2956 int count = descriptors->number_of_descriptors();
2957 if (count > 1) {
2958 String* prev_key = descriptors->GetKey(0);
2959 for (int i = 1; i != count; i++) {
2960 String* current_key = descriptors->GetKey(i);
2961 if (prev_key == current_key) return true;
2962 prev_key = current_key;
2963 }
2964 }
2965 return false;
2966}
2967
2968
John Reck59135872010-11-02 12:39:01 -07002969MaybeObject* Heap::AllocateInitialMap(JSFunction* fun) {
Steve Blocka7e24c12009-10-30 11:49:00 +00002970 ASSERT(!fun->has_initial_map());
2971
2972 // First create a new map with the size and number of in-object properties
2973 // suggested by the function.
2974 int instance_size = fun->shared()->CalculateInstanceSize();
2975 int in_object_properties = fun->shared()->CalculateInObjectProperties();
John Reck59135872010-11-02 12:39:01 -07002976 Object* map_obj;
Steve Block44f0eee2011-05-26 01:26:41 +01002977 { MaybeObject* maybe_map_obj = AllocateMap(JS_OBJECT_TYPE, instance_size);
John Reck59135872010-11-02 12:39:01 -07002978 if (!maybe_map_obj->ToObject(&map_obj)) return maybe_map_obj;
2979 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002980
2981 // Fetch or allocate prototype.
2982 Object* prototype;
2983 if (fun->has_instance_prototype()) {
2984 prototype = fun->instance_prototype();
2985 } else {
John Reck59135872010-11-02 12:39:01 -07002986 { MaybeObject* maybe_prototype = AllocateFunctionPrototype(fun);
2987 if (!maybe_prototype->ToObject(&prototype)) return maybe_prototype;
2988 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002989 }
2990 Map* map = Map::cast(map_obj);
2991 map->set_inobject_properties(in_object_properties);
2992 map->set_unused_property_fields(in_object_properties);
2993 map->set_prototype(prototype);
Steve Block8defd9f2010-07-08 12:39:36 +01002994 ASSERT(map->has_fast_elements());
Steve Blocka7e24c12009-10-30 11:49:00 +00002995
Andrei Popescu402d9372010-02-26 13:31:12 +00002996 // If the function has only simple this property assignments add
2997 // field descriptors for these to the initial map as the object
2998 // cannot be constructed without having these properties. Guard by
2999 // the inline_new flag so we only change the map if we generate a
3000 // specialized construct stub.
Steve Blocka7e24c12009-10-30 11:49:00 +00003001 ASSERT(in_object_properties <= Map::kMaxPreAllocatedPropertyFields);
Andrei Popescu402d9372010-02-26 13:31:12 +00003002 if (fun->shared()->CanGenerateInlineConstructor(prototype)) {
Steve Blocka7e24c12009-10-30 11:49:00 +00003003 int count = fun->shared()->this_property_assignments_count();
3004 if (count > in_object_properties) {
Kristian Monsen0d5e1162010-09-30 15:31:59 +01003005 // Inline constructor can only handle inobject properties.
3006 fun->shared()->ForbidInlineConstructor();
3007 } else {
John Reck59135872010-11-02 12:39:01 -07003008 Object* descriptors_obj;
3009 { MaybeObject* maybe_descriptors_obj = DescriptorArray::Allocate(count);
3010 if (!maybe_descriptors_obj->ToObject(&descriptors_obj)) {
3011 return maybe_descriptors_obj;
3012 }
3013 }
Kristian Monsen0d5e1162010-09-30 15:31:59 +01003014 DescriptorArray* descriptors = DescriptorArray::cast(descriptors_obj);
3015 for (int i = 0; i < count; i++) {
3016 String* name = fun->shared()->GetThisPropertyAssignmentName(i);
3017 ASSERT(name->IsSymbol());
3018 FieldDescriptor field(name, i, NONE);
3019 field.SetEnumerationIndex(i);
3020 descriptors->Set(i, &field);
3021 }
3022 descriptors->SetNextEnumerationIndex(count);
3023 descriptors->SortUnchecked();
3024
3025 // The descriptors may contain duplicates because the compiler does not
3026 // guarantee the uniqueness of property names (it would have required
3027 // quadratic time). Once the descriptors are sorted we can check for
3028 // duplicates in linear time.
3029 if (HasDuplicates(descriptors)) {
3030 fun->shared()->ForbidInlineConstructor();
3031 } else {
3032 map->set_instance_descriptors(descriptors);
3033 map->set_pre_allocated_property_fields(count);
3034 map->set_unused_property_fields(in_object_properties - count);
3035 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003036 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003037 }
Kristian Monsen0d5e1162010-09-30 15:31:59 +01003038
3039 fun->shared()->StartInobjectSlackTracking(map);
3040
Steve Blocka7e24c12009-10-30 11:49:00 +00003041 return map;
3042}
3043
3044
3045void Heap::InitializeJSObjectFromMap(JSObject* obj,
3046 FixedArray* properties,
3047 Map* map) {
3048 obj->set_properties(properties);
3049 obj->initialize_elements();
3050 // TODO(1240798): Initialize the object's body using valid initial values
3051 // according to the object's initial map. For example, if the map's
3052 // instance type is JS_ARRAY_TYPE, the length field should be initialized
3053 // to a number (eg, Smi::FromInt(0)) and the elements initialized to a
3054 // fixed array (eg, Heap::empty_fixed_array()). Currently, the object
3055 // verification code has to cope with (temporarily) invalid objects. See
3056 // for example, JSArray::JSArrayVerify).
Kristian Monsen0d5e1162010-09-30 15:31:59 +01003057 Object* filler;
3058 // We cannot always fill with one_pointer_filler_map because objects
3059 // created from API functions expect their internal fields to be initialized
3060 // with undefined_value.
3061 if (map->constructor()->IsJSFunction() &&
3062 JSFunction::cast(map->constructor())->shared()->
3063 IsInobjectSlackTrackingInProgress()) {
3064 // We might want to shrink the object later.
3065 ASSERT(obj->GetInternalFieldCount() == 0);
3066 filler = Heap::one_pointer_filler_map();
3067 } else {
3068 filler = Heap::undefined_value();
3069 }
3070 obj->InitializeBody(map->instance_size(), filler);
Steve Blocka7e24c12009-10-30 11:49:00 +00003071}
3072
3073
John Reck59135872010-11-02 12:39:01 -07003074MaybeObject* Heap::AllocateJSObjectFromMap(Map* map, PretenureFlag pretenure) {
Steve Blocka7e24c12009-10-30 11:49:00 +00003075 // JSFunctions should be allocated using AllocateFunction to be
3076 // properly initialized.
3077 ASSERT(map->instance_type() != JS_FUNCTION_TYPE);
3078
Steve Block8defd9f2010-07-08 12:39:36 +01003079 // Both types of global objects should be allocated using
3080 // AllocateGlobalObject to be properly initialized.
Steve Blocka7e24c12009-10-30 11:49:00 +00003081 ASSERT(map->instance_type() != JS_GLOBAL_OBJECT_TYPE);
3082 ASSERT(map->instance_type() != JS_BUILTINS_OBJECT_TYPE);
3083
3084 // Allocate the backing storage for the properties.
3085 int prop_size =
3086 map->pre_allocated_property_fields() +
3087 map->unused_property_fields() -
3088 map->inobject_properties();
3089 ASSERT(prop_size >= 0);
John Reck59135872010-11-02 12:39:01 -07003090 Object* properties;
3091 { MaybeObject* maybe_properties = AllocateFixedArray(prop_size, pretenure);
3092 if (!maybe_properties->ToObject(&properties)) return maybe_properties;
3093 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003094
3095 // Allocate the JSObject.
3096 AllocationSpace space =
3097 (pretenure == TENURED) ? OLD_POINTER_SPACE : NEW_SPACE;
3098 if (map->instance_size() > MaxObjectSizeInPagedSpace()) space = LO_SPACE;
John Reck59135872010-11-02 12:39:01 -07003099 Object* obj;
3100 { MaybeObject* maybe_obj = Allocate(map, space);
3101 if (!maybe_obj->ToObject(&obj)) return maybe_obj;
3102 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003103
3104 // Initialize the JSObject.
3105 InitializeJSObjectFromMap(JSObject::cast(obj),
3106 FixedArray::cast(properties),
3107 map);
Steve Block8defd9f2010-07-08 12:39:36 +01003108 ASSERT(JSObject::cast(obj)->HasFastElements());
Steve Blocka7e24c12009-10-30 11:49:00 +00003109 return obj;
3110}
3111
3112
John Reck59135872010-11-02 12:39:01 -07003113MaybeObject* Heap::AllocateJSObject(JSFunction* constructor,
3114 PretenureFlag pretenure) {
Steve Blocka7e24c12009-10-30 11:49:00 +00003115 // Allocate the initial map if absent.
3116 if (!constructor->has_initial_map()) {
John Reck59135872010-11-02 12:39:01 -07003117 Object* initial_map;
3118 { MaybeObject* maybe_initial_map = AllocateInitialMap(constructor);
3119 if (!maybe_initial_map->ToObject(&initial_map)) return maybe_initial_map;
3120 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003121 constructor->set_initial_map(Map::cast(initial_map));
3122 Map::cast(initial_map)->set_constructor(constructor);
3123 }
3124 // Allocate the object based on the constructors initial map.
John Reck59135872010-11-02 12:39:01 -07003125 MaybeObject* result =
Steve Blocka7e24c12009-10-30 11:49:00 +00003126 AllocateJSObjectFromMap(constructor->initial_map(), pretenure);
John Reck59135872010-11-02 12:39:01 -07003127#ifdef DEBUG
Steve Blocka7e24c12009-10-30 11:49:00 +00003128 // Make sure result is NOT a global object if valid.
John Reck59135872010-11-02 12:39:01 -07003129 Object* non_failure;
3130 ASSERT(!result->ToObject(&non_failure) || !non_failure->IsGlobalObject());
3131#endif
Steve Blocka7e24c12009-10-30 11:49:00 +00003132 return result;
3133}
3134
3135
John Reck59135872010-11-02 12:39:01 -07003136MaybeObject* Heap::AllocateGlobalObject(JSFunction* constructor) {
Steve Blocka7e24c12009-10-30 11:49:00 +00003137 ASSERT(constructor->has_initial_map());
3138 Map* map = constructor->initial_map();
3139
3140 // Make sure no field properties are described in the initial map.
3141 // This guarantees us that normalizing the properties does not
3142 // require us to change property values to JSGlobalPropertyCells.
3143 ASSERT(map->NextFreePropertyIndex() == 0);
3144
3145 // Make sure we don't have a ton of pre-allocated slots in the
3146 // global objects. They will be unused once we normalize the object.
3147 ASSERT(map->unused_property_fields() == 0);
3148 ASSERT(map->inobject_properties() == 0);
3149
3150 // Initial size of the backing store to avoid resize of the storage during
3151 // bootstrapping. The size differs between the JS global object ad the
3152 // builtins object.
3153 int initial_size = map->instance_type() == JS_GLOBAL_OBJECT_TYPE ? 64 : 512;
3154
3155 // Allocate a dictionary object for backing storage.
John Reck59135872010-11-02 12:39:01 -07003156 Object* obj;
3157 { MaybeObject* maybe_obj =
3158 StringDictionary::Allocate(
3159 map->NumberOfDescribedProperties() * 2 + initial_size);
3160 if (!maybe_obj->ToObject(&obj)) return maybe_obj;
3161 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003162 StringDictionary* dictionary = StringDictionary::cast(obj);
3163
3164 // The global object might be created from an object template with accessors.
3165 // Fill these accessors into the dictionary.
3166 DescriptorArray* descs = map->instance_descriptors();
3167 for (int i = 0; i < descs->number_of_descriptors(); i++) {
3168 PropertyDetails details = descs->GetDetails(i);
3169 ASSERT(details.type() == CALLBACKS); // Only accessors are expected.
3170 PropertyDetails d =
3171 PropertyDetails(details.attributes(), CALLBACKS, details.index());
3172 Object* value = descs->GetCallbacksObject(i);
Steve Block44f0eee2011-05-26 01:26:41 +01003173 { MaybeObject* maybe_value = AllocateJSGlobalPropertyCell(value);
John Reck59135872010-11-02 12:39:01 -07003174 if (!maybe_value->ToObject(&value)) return maybe_value;
3175 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003176
John Reck59135872010-11-02 12:39:01 -07003177 Object* result;
3178 { MaybeObject* maybe_result = dictionary->Add(descs->GetKey(i), value, d);
3179 if (!maybe_result->ToObject(&result)) return maybe_result;
3180 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003181 dictionary = StringDictionary::cast(result);
3182 }
3183
3184 // Allocate the global object and initialize it with the backing store.
John Reck59135872010-11-02 12:39:01 -07003185 { MaybeObject* maybe_obj = Allocate(map, OLD_POINTER_SPACE);
3186 if (!maybe_obj->ToObject(&obj)) return maybe_obj;
3187 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003188 JSObject* global = JSObject::cast(obj);
3189 InitializeJSObjectFromMap(global, dictionary, map);
3190
3191 // Create a new map for the global object.
John Reck59135872010-11-02 12:39:01 -07003192 { MaybeObject* maybe_obj = map->CopyDropDescriptors();
3193 if (!maybe_obj->ToObject(&obj)) return maybe_obj;
3194 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003195 Map* new_map = Map::cast(obj);
3196
3197 // Setup the global object as a normalized object.
3198 global->set_map(new_map);
Steve Block44f0eee2011-05-26 01:26:41 +01003199 global->map()->set_instance_descriptors(empty_descriptor_array());
Steve Blocka7e24c12009-10-30 11:49:00 +00003200 global->set_properties(dictionary);
3201
3202 // Make sure result is a global object with properties in dictionary.
3203 ASSERT(global->IsGlobalObject());
3204 ASSERT(!global->HasFastProperties());
3205 return global;
3206}
3207
3208
John Reck59135872010-11-02 12:39:01 -07003209MaybeObject* Heap::CopyJSObject(JSObject* source) {
Steve Blocka7e24c12009-10-30 11:49:00 +00003210 // Never used to copy functions. If functions need to be copied we
3211 // have to be careful to clear the literals array.
3212 ASSERT(!source->IsJSFunction());
3213
3214 // Make the clone.
3215 Map* map = source->map();
3216 int object_size = map->instance_size();
3217 Object* clone;
3218
3219 // If we're forced to always allocate, we use the general allocation
3220 // functions which may leave us with an object in old space.
3221 if (always_allocate()) {
John Reck59135872010-11-02 12:39:01 -07003222 { MaybeObject* maybe_clone =
3223 AllocateRaw(object_size, NEW_SPACE, OLD_POINTER_SPACE);
3224 if (!maybe_clone->ToObject(&clone)) return maybe_clone;
3225 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003226 Address clone_address = HeapObject::cast(clone)->address();
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01003227 CopyBlock(clone_address,
3228 source->address(),
Steve Blocka7e24c12009-10-30 11:49:00 +00003229 object_size);
3230 // Update write barrier for all fields that lie beyond the header.
Steve Block6ded16b2010-05-10 14:33:55 +01003231 RecordWrites(clone_address,
3232 JSObject::kHeaderSize,
3233 (object_size - JSObject::kHeaderSize) / kPointerSize);
Steve Blocka7e24c12009-10-30 11:49:00 +00003234 } else {
John Reck59135872010-11-02 12:39:01 -07003235 { MaybeObject* maybe_clone = new_space_.AllocateRaw(object_size);
3236 if (!maybe_clone->ToObject(&clone)) return maybe_clone;
3237 }
Steve Block44f0eee2011-05-26 01:26:41 +01003238 ASSERT(InNewSpace(clone));
Steve Blocka7e24c12009-10-30 11:49:00 +00003239 // Since we know the clone is allocated in new space, we can copy
3240 // the contents without worrying about updating the write barrier.
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01003241 CopyBlock(HeapObject::cast(clone)->address(),
3242 source->address(),
Steve Blocka7e24c12009-10-30 11:49:00 +00003243 object_size);
3244 }
3245
3246 FixedArray* elements = FixedArray::cast(source->elements());
3247 FixedArray* properties = FixedArray::cast(source->properties());
3248 // Update elements if necessary.
Steve Block6ded16b2010-05-10 14:33:55 +01003249 if (elements->length() > 0) {
John Reck59135872010-11-02 12:39:01 -07003250 Object* elem;
3251 { MaybeObject* maybe_elem =
3252 (elements->map() == fixed_cow_array_map()) ?
3253 elements : CopyFixedArray(elements);
3254 if (!maybe_elem->ToObject(&elem)) return maybe_elem;
3255 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003256 JSObject::cast(clone)->set_elements(FixedArray::cast(elem));
3257 }
3258 // Update properties if necessary.
3259 if (properties->length() > 0) {
John Reck59135872010-11-02 12:39:01 -07003260 Object* prop;
3261 { MaybeObject* maybe_prop = CopyFixedArray(properties);
3262 if (!maybe_prop->ToObject(&prop)) return maybe_prop;
3263 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003264 JSObject::cast(clone)->set_properties(FixedArray::cast(prop));
3265 }
3266 // Return the new clone.
Steve Block3ce2e202009-11-05 08:53:23 +00003267#ifdef ENABLE_LOGGING_AND_PROFILING
Steve Block44f0eee2011-05-26 01:26:41 +01003268 isolate_->producer_heap_profile()->RecordJSObjectAllocation(clone);
Steve Block3ce2e202009-11-05 08:53:23 +00003269#endif
Steve Blocka7e24c12009-10-30 11:49:00 +00003270 return clone;
3271}
3272
3273
John Reck59135872010-11-02 12:39:01 -07003274MaybeObject* Heap::ReinitializeJSGlobalProxy(JSFunction* constructor,
3275 JSGlobalProxy* object) {
Kristian Monsen0d5e1162010-09-30 15:31:59 +01003276 ASSERT(constructor->has_initial_map());
Steve Blocka7e24c12009-10-30 11:49:00 +00003277 Map* map = constructor->initial_map();
3278
Kristian Monsen0d5e1162010-09-30 15:31:59 +01003279 // Check that the already allocated object has the same size and type as
Steve Blocka7e24c12009-10-30 11:49:00 +00003280 // objects allocated using the constructor.
3281 ASSERT(map->instance_size() == object->map()->instance_size());
Kristian Monsen0d5e1162010-09-30 15:31:59 +01003282 ASSERT(map->instance_type() == object->map()->instance_type());
Steve Blocka7e24c12009-10-30 11:49:00 +00003283
3284 // Allocate the backing storage for the properties.
3285 int prop_size = map->unused_property_fields() - map->inobject_properties();
John Reck59135872010-11-02 12:39:01 -07003286 Object* properties;
3287 { MaybeObject* maybe_properties = AllocateFixedArray(prop_size, TENURED);
3288 if (!maybe_properties->ToObject(&properties)) return maybe_properties;
3289 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003290
3291 // Reset the map for the object.
3292 object->set_map(constructor->initial_map());
3293
3294 // Reinitialize the object from the constructor map.
3295 InitializeJSObjectFromMap(object, FixedArray::cast(properties), map);
3296 return object;
3297}
3298
3299
John Reck59135872010-11-02 12:39:01 -07003300MaybeObject* Heap::AllocateStringFromAscii(Vector<const char> string,
3301 PretenureFlag pretenure) {
3302 Object* result;
3303 { MaybeObject* maybe_result =
3304 AllocateRawAsciiString(string.length(), pretenure);
3305 if (!maybe_result->ToObject(&result)) return maybe_result;
3306 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003307
3308 // Copy the characters into the new object.
3309 SeqAsciiString* string_result = SeqAsciiString::cast(result);
3310 for (int i = 0; i < string.length(); i++) {
3311 string_result->SeqAsciiStringSet(i, string[i]);
3312 }
3313 return result;
3314}
3315
3316
Steve Block9fac8402011-05-12 15:51:54 +01003317MaybeObject* Heap::AllocateStringFromUtf8Slow(Vector<const char> string,
3318 PretenureFlag pretenure) {
Leon Clarkeac952652010-07-15 11:15:24 +01003319 // V8 only supports characters in the Basic Multilingual Plane.
3320 const uc32 kMaxSupportedChar = 0xFFFF;
Steve Blocka7e24c12009-10-30 11:49:00 +00003321 // Count the number of characters in the UTF-8 string and check if
3322 // it is an ASCII string.
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -08003323 Access<ScannerConstants::Utf8Decoder>
Steve Block44f0eee2011-05-26 01:26:41 +01003324 decoder(isolate_->scanner_constants()->utf8_decoder());
Steve Blocka7e24c12009-10-30 11:49:00 +00003325 decoder->Reset(string.start(), string.length());
3326 int chars = 0;
Steve Blocka7e24c12009-10-30 11:49:00 +00003327 while (decoder->has_more()) {
Steve Block9fac8402011-05-12 15:51:54 +01003328 decoder->GetNext();
Steve Blocka7e24c12009-10-30 11:49:00 +00003329 chars++;
3330 }
3331
John Reck59135872010-11-02 12:39:01 -07003332 Object* result;
3333 { MaybeObject* maybe_result = AllocateRawTwoByteString(chars, pretenure);
3334 if (!maybe_result->ToObject(&result)) return maybe_result;
3335 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003336
3337 // Convert and copy the characters into the new object.
3338 String* string_result = String::cast(result);
3339 decoder->Reset(string.start(), string.length());
3340 for (int i = 0; i < chars; i++) {
3341 uc32 r = decoder->GetNext();
Leon Clarkeac952652010-07-15 11:15:24 +01003342 if (r > kMaxSupportedChar) { r = unibrow::Utf8::kBadChar; }
Steve Blocka7e24c12009-10-30 11:49:00 +00003343 string_result->Set(i, r);
3344 }
3345 return result;
3346}
3347
3348
John Reck59135872010-11-02 12:39:01 -07003349MaybeObject* Heap::AllocateStringFromTwoByte(Vector<const uc16> string,
3350 PretenureFlag pretenure) {
Steve Blocka7e24c12009-10-30 11:49:00 +00003351 // Check if the string is an ASCII string.
John Reck59135872010-11-02 12:39:01 -07003352 MaybeObject* maybe_result;
Steve Block9fac8402011-05-12 15:51:54 +01003353 if (String::IsAscii(string.start(), string.length())) {
John Reck59135872010-11-02 12:39:01 -07003354 maybe_result = AllocateRawAsciiString(string.length(), pretenure);
Steve Blocka7e24c12009-10-30 11:49:00 +00003355 } else { // It's not an ASCII string.
John Reck59135872010-11-02 12:39:01 -07003356 maybe_result = AllocateRawTwoByteString(string.length(), pretenure);
Steve Blocka7e24c12009-10-30 11:49:00 +00003357 }
John Reck59135872010-11-02 12:39:01 -07003358 Object* result;
3359 if (!maybe_result->ToObject(&result)) return maybe_result;
Steve Blocka7e24c12009-10-30 11:49:00 +00003360
3361 // Copy the characters into the new object, which may be either ASCII or
3362 // UTF-16.
3363 String* string_result = String::cast(result);
3364 for (int i = 0; i < string.length(); i++) {
3365 string_result->Set(i, string[i]);
3366 }
3367 return result;
3368}
3369
3370
3371Map* Heap::SymbolMapForString(String* string) {
3372 // If the string is in new space it cannot be used as a symbol.
3373 if (InNewSpace(string)) return NULL;
3374
3375 // Find the corresponding symbol map for strings.
3376 Map* map = string->map();
Steve Block44f0eee2011-05-26 01:26:41 +01003377 if (map == ascii_string_map()) {
3378 return ascii_symbol_map();
3379 }
3380 if (map == string_map()) {
3381 return symbol_map();
3382 }
3383 if (map == cons_string_map()) {
3384 return cons_symbol_map();
3385 }
3386 if (map == cons_ascii_string_map()) {
3387 return cons_ascii_symbol_map();
3388 }
3389 if (map == external_string_map()) {
3390 return external_symbol_map();
3391 }
3392 if (map == external_ascii_string_map()) {
3393 return external_ascii_symbol_map();
3394 }
Kristian Monsen9dcf7e22010-06-28 14:14:28 +01003395 if (map == external_string_with_ascii_data_map()) {
3396 return external_symbol_with_ascii_data_map();
3397 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003398
3399 // No match found.
3400 return NULL;
3401}
3402
3403
John Reck59135872010-11-02 12:39:01 -07003404MaybeObject* Heap::AllocateInternalSymbol(unibrow::CharacterStream* buffer,
3405 int chars,
3406 uint32_t hash_field) {
Leon Clarkee46be812010-01-19 14:06:41 +00003407 ASSERT(chars >= 0);
Steve Blocka7e24c12009-10-30 11:49:00 +00003408 // Ensure the chars matches the number of characters in the buffer.
3409 ASSERT(static_cast<unsigned>(chars) == buffer->Length());
3410 // Determine whether the string is ascii.
3411 bool is_ascii = true;
Leon Clarkee46be812010-01-19 14:06:41 +00003412 while (buffer->has_more()) {
3413 if (buffer->GetNext() > unibrow::Utf8::kMaxOneByteChar) {
3414 is_ascii = false;
3415 break;
3416 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003417 }
3418 buffer->Rewind();
3419
3420 // Compute map and object size.
3421 int size;
3422 Map* map;
3423
3424 if (is_ascii) {
Leon Clarkee46be812010-01-19 14:06:41 +00003425 if (chars > SeqAsciiString::kMaxLength) {
3426 return Failure::OutOfMemoryException();
3427 }
Steve Blockd0582a62009-12-15 09:54:21 +00003428 map = ascii_symbol_map();
Steve Blocka7e24c12009-10-30 11:49:00 +00003429 size = SeqAsciiString::SizeFor(chars);
3430 } else {
Leon Clarkee46be812010-01-19 14:06:41 +00003431 if (chars > SeqTwoByteString::kMaxLength) {
3432 return Failure::OutOfMemoryException();
3433 }
Steve Blockd0582a62009-12-15 09:54:21 +00003434 map = symbol_map();
Steve Blocka7e24c12009-10-30 11:49:00 +00003435 size = SeqTwoByteString::SizeFor(chars);
3436 }
3437
3438 // Allocate string.
John Reck59135872010-11-02 12:39:01 -07003439 Object* result;
3440 { MaybeObject* maybe_result = (size > MaxObjectSizeInPagedSpace())
3441 ? lo_space_->AllocateRaw(size)
3442 : old_data_space_->AllocateRaw(size);
3443 if (!maybe_result->ToObject(&result)) return maybe_result;
3444 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003445
3446 reinterpret_cast<HeapObject*>(result)->set_map(map);
Steve Blockd0582a62009-12-15 09:54:21 +00003447 // Set length and hash fields of the allocated string.
Steve Blocka7e24c12009-10-30 11:49:00 +00003448 String* answer = String::cast(result);
Steve Blockd0582a62009-12-15 09:54:21 +00003449 answer->set_length(chars);
3450 answer->set_hash_field(hash_field);
Steve Blocka7e24c12009-10-30 11:49:00 +00003451
3452 ASSERT_EQ(size, answer->Size());
3453
3454 // Fill in the characters.
3455 for (int i = 0; i < chars; i++) {
3456 answer->Set(i, buffer->GetNext());
3457 }
3458 return answer;
3459}
3460
3461
John Reck59135872010-11-02 12:39:01 -07003462MaybeObject* Heap::AllocateRawAsciiString(int length, PretenureFlag pretenure) {
Leon Clarkee46be812010-01-19 14:06:41 +00003463 if (length < 0 || length > SeqAsciiString::kMaxLength) {
3464 return Failure::OutOfMemoryException();
3465 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003466
3467 int size = SeqAsciiString::SizeFor(length);
Leon Clarkee46be812010-01-19 14:06:41 +00003468 ASSERT(size <= SeqAsciiString::kMaxSize);
Steve Blocka7e24c12009-10-30 11:49:00 +00003469
Leon Clarkee46be812010-01-19 14:06:41 +00003470 AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
3471 AllocationSpace retry_space = OLD_DATA_SPACE;
3472
Steve Blocka7e24c12009-10-30 11:49:00 +00003473 if (space == NEW_SPACE) {
Leon Clarkee46be812010-01-19 14:06:41 +00003474 if (size > kMaxObjectSizeInNewSpace) {
3475 // Allocate in large object space, retry space will be ignored.
3476 space = LO_SPACE;
3477 } else if (size > MaxObjectSizeInPagedSpace()) {
3478 // Allocate in new space, retry in large object space.
3479 retry_space = LO_SPACE;
3480 }
3481 } else if (space == OLD_DATA_SPACE && size > MaxObjectSizeInPagedSpace()) {
3482 space = LO_SPACE;
Steve Blocka7e24c12009-10-30 11:49:00 +00003483 }
John Reck59135872010-11-02 12:39:01 -07003484 Object* result;
3485 { MaybeObject* maybe_result = AllocateRaw(size, space, retry_space);
3486 if (!maybe_result->ToObject(&result)) return maybe_result;
3487 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003488
Steve Blocka7e24c12009-10-30 11:49:00 +00003489 // Partially initialize the object.
Steve Blockd0582a62009-12-15 09:54:21 +00003490 HeapObject::cast(result)->set_map(ascii_string_map());
Steve Blocka7e24c12009-10-30 11:49:00 +00003491 String::cast(result)->set_length(length);
Steve Blockd0582a62009-12-15 09:54:21 +00003492 String::cast(result)->set_hash_field(String::kEmptyHashField);
Steve Blocka7e24c12009-10-30 11:49:00 +00003493 ASSERT_EQ(size, HeapObject::cast(result)->Size());
3494 return result;
3495}
3496
3497
John Reck59135872010-11-02 12:39:01 -07003498MaybeObject* Heap::AllocateRawTwoByteString(int length,
3499 PretenureFlag pretenure) {
Leon Clarkee46be812010-01-19 14:06:41 +00003500 if (length < 0 || length > SeqTwoByteString::kMaxLength) {
3501 return Failure::OutOfMemoryException();
Steve Blocka7e24c12009-10-30 11:49:00 +00003502 }
Leon Clarkee46be812010-01-19 14:06:41 +00003503 int size = SeqTwoByteString::SizeFor(length);
3504 ASSERT(size <= SeqTwoByteString::kMaxSize);
3505 AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
3506 AllocationSpace retry_space = OLD_DATA_SPACE;
3507
3508 if (space == NEW_SPACE) {
3509 if (size > kMaxObjectSizeInNewSpace) {
3510 // Allocate in large object space, retry space will be ignored.
3511 space = LO_SPACE;
3512 } else if (size > MaxObjectSizeInPagedSpace()) {
3513 // Allocate in new space, retry in large object space.
3514 retry_space = LO_SPACE;
3515 }
3516 } else if (space == OLD_DATA_SPACE && size > MaxObjectSizeInPagedSpace()) {
3517 space = LO_SPACE;
3518 }
John Reck59135872010-11-02 12:39:01 -07003519 Object* result;
3520 { MaybeObject* maybe_result = AllocateRaw(size, space, retry_space);
3521 if (!maybe_result->ToObject(&result)) return maybe_result;
3522 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003523
Steve Blocka7e24c12009-10-30 11:49:00 +00003524 // Partially initialize the object.
Steve Blockd0582a62009-12-15 09:54:21 +00003525 HeapObject::cast(result)->set_map(string_map());
Steve Blocka7e24c12009-10-30 11:49:00 +00003526 String::cast(result)->set_length(length);
Steve Blockd0582a62009-12-15 09:54:21 +00003527 String::cast(result)->set_hash_field(String::kEmptyHashField);
Steve Blocka7e24c12009-10-30 11:49:00 +00003528 ASSERT_EQ(size, HeapObject::cast(result)->Size());
3529 return result;
3530}
3531
3532
John Reck59135872010-11-02 12:39:01 -07003533MaybeObject* Heap::AllocateEmptyFixedArray() {
Steve Blocka7e24c12009-10-30 11:49:00 +00003534 int size = FixedArray::SizeFor(0);
John Reck59135872010-11-02 12:39:01 -07003535 Object* result;
3536 { MaybeObject* maybe_result =
3537 AllocateRaw(size, OLD_DATA_SPACE, OLD_DATA_SPACE);
3538 if (!maybe_result->ToObject(&result)) return maybe_result;
3539 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003540 // Initialize the object.
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01003541 reinterpret_cast<FixedArray*>(result)->set_map(fixed_array_map());
3542 reinterpret_cast<FixedArray*>(result)->set_length(0);
Steve Blocka7e24c12009-10-30 11:49:00 +00003543 return result;
3544}
3545
3546
John Reck59135872010-11-02 12:39:01 -07003547MaybeObject* Heap::AllocateRawFixedArray(int length) {
Leon Clarkee46be812010-01-19 14:06:41 +00003548 if (length < 0 || length > FixedArray::kMaxLength) {
3549 return Failure::OutOfMemoryException();
3550 }
Kristian Monsen0d5e1162010-09-30 15:31:59 +01003551 ASSERT(length > 0);
Steve Blocka7e24c12009-10-30 11:49:00 +00003552 // Use the general function if we're forced to always allocate.
3553 if (always_allocate()) return AllocateFixedArray(length, TENURED);
3554 // Allocate the raw data for a fixed array.
3555 int size = FixedArray::SizeFor(length);
3556 return size <= kMaxObjectSizeInNewSpace
3557 ? new_space_.AllocateRaw(size)
3558 : lo_space_->AllocateRawFixedArray(size);
3559}
3560
3561
John Reck59135872010-11-02 12:39:01 -07003562MaybeObject* Heap::CopyFixedArrayWithMap(FixedArray* src, Map* map) {
Steve Blocka7e24c12009-10-30 11:49:00 +00003563 int len = src->length();
John Reck59135872010-11-02 12:39:01 -07003564 Object* obj;
3565 { MaybeObject* maybe_obj = AllocateRawFixedArray(len);
3566 if (!maybe_obj->ToObject(&obj)) return maybe_obj;
3567 }
Steve Block44f0eee2011-05-26 01:26:41 +01003568 if (InNewSpace(obj)) {
Steve Blocka7e24c12009-10-30 11:49:00 +00003569 HeapObject* dst = HeapObject::cast(obj);
Kristian Monsen0d5e1162010-09-30 15:31:59 +01003570 dst->set_map(map);
3571 CopyBlock(dst->address() + kPointerSize,
3572 src->address() + kPointerSize,
3573 FixedArray::SizeFor(len) - kPointerSize);
Steve Blocka7e24c12009-10-30 11:49:00 +00003574 return obj;
3575 }
Kristian Monsen0d5e1162010-09-30 15:31:59 +01003576 HeapObject::cast(obj)->set_map(map);
Steve Blocka7e24c12009-10-30 11:49:00 +00003577 FixedArray* result = FixedArray::cast(obj);
3578 result->set_length(len);
Leon Clarke4515c472010-02-03 11:58:03 +00003579
Steve Blocka7e24c12009-10-30 11:49:00 +00003580 // Copy the content
Leon Clarke4515c472010-02-03 11:58:03 +00003581 AssertNoAllocation no_gc;
3582 WriteBarrierMode mode = result->GetWriteBarrierMode(no_gc);
Steve Blocka7e24c12009-10-30 11:49:00 +00003583 for (int i = 0; i < len; i++) result->set(i, src->get(i), mode);
3584 return result;
3585}
3586
3587
John Reck59135872010-11-02 12:39:01 -07003588MaybeObject* Heap::AllocateFixedArray(int length) {
Steve Blocka7e24c12009-10-30 11:49:00 +00003589 ASSERT(length >= 0);
3590 if (length == 0) return empty_fixed_array();
John Reck59135872010-11-02 12:39:01 -07003591 Object* result;
3592 { MaybeObject* maybe_result = AllocateRawFixedArray(length);
3593 if (!maybe_result->ToObject(&result)) return maybe_result;
Steve Blocka7e24c12009-10-30 11:49:00 +00003594 }
John Reck59135872010-11-02 12:39:01 -07003595 // Initialize header.
3596 FixedArray* array = reinterpret_cast<FixedArray*>(result);
3597 array->set_map(fixed_array_map());
3598 array->set_length(length);
3599 // Initialize body.
Steve Block44f0eee2011-05-26 01:26:41 +01003600 ASSERT(!InNewSpace(undefined_value()));
John Reck59135872010-11-02 12:39:01 -07003601 MemsetPointer(array->data_start(), undefined_value(), length);
Steve Blocka7e24c12009-10-30 11:49:00 +00003602 return result;
3603}
3604
3605
John Reck59135872010-11-02 12:39:01 -07003606MaybeObject* Heap::AllocateRawFixedArray(int length, PretenureFlag pretenure) {
Leon Clarkee46be812010-01-19 14:06:41 +00003607 if (length < 0 || length > FixedArray::kMaxLength) {
3608 return Failure::OutOfMemoryException();
3609 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003610
Leon Clarkee46be812010-01-19 14:06:41 +00003611 AllocationSpace space =
3612 (pretenure == TENURED) ? OLD_POINTER_SPACE : NEW_SPACE;
Steve Blocka7e24c12009-10-30 11:49:00 +00003613 int size = FixedArray::SizeFor(length);
Leon Clarkee46be812010-01-19 14:06:41 +00003614 if (space == NEW_SPACE && size > kMaxObjectSizeInNewSpace) {
3615 // Too big for new space.
3616 space = LO_SPACE;
3617 } else if (space == OLD_POINTER_SPACE &&
3618 size > MaxObjectSizeInPagedSpace()) {
3619 // Too big for old pointer space.
3620 space = LO_SPACE;
3621 }
3622
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01003623 AllocationSpace retry_space =
3624 (size <= MaxObjectSizeInPagedSpace()) ? OLD_POINTER_SPACE : LO_SPACE;
3625
3626 return AllocateRaw(size, space, retry_space);
Steve Blocka7e24c12009-10-30 11:49:00 +00003627}
3628
3629
John Reck59135872010-11-02 12:39:01 -07003630MUST_USE_RESULT static MaybeObject* AllocateFixedArrayWithFiller(
Steve Block44f0eee2011-05-26 01:26:41 +01003631 Heap* heap,
John Reck59135872010-11-02 12:39:01 -07003632 int length,
3633 PretenureFlag pretenure,
3634 Object* filler) {
Steve Block6ded16b2010-05-10 14:33:55 +01003635 ASSERT(length >= 0);
Steve Block44f0eee2011-05-26 01:26:41 +01003636 ASSERT(heap->empty_fixed_array()->IsFixedArray());
3637 if (length == 0) return heap->empty_fixed_array();
Steve Block6ded16b2010-05-10 14:33:55 +01003638
Steve Block44f0eee2011-05-26 01:26:41 +01003639 ASSERT(!heap->InNewSpace(filler));
John Reck59135872010-11-02 12:39:01 -07003640 Object* result;
Steve Block44f0eee2011-05-26 01:26:41 +01003641 { MaybeObject* maybe_result = heap->AllocateRawFixedArray(length, pretenure);
John Reck59135872010-11-02 12:39:01 -07003642 if (!maybe_result->ToObject(&result)) return maybe_result;
3643 }
Steve Block6ded16b2010-05-10 14:33:55 +01003644
Steve Block44f0eee2011-05-26 01:26:41 +01003645 HeapObject::cast(result)->set_map(heap->fixed_array_map());
Steve Block6ded16b2010-05-10 14:33:55 +01003646 FixedArray* array = FixedArray::cast(result);
3647 array->set_length(length);
3648 MemsetPointer(array->data_start(), filler, length);
3649 return array;
3650}
3651
3652
John Reck59135872010-11-02 12:39:01 -07003653MaybeObject* Heap::AllocateFixedArray(int length, PretenureFlag pretenure) {
Steve Block44f0eee2011-05-26 01:26:41 +01003654 return AllocateFixedArrayWithFiller(this,
3655 length,
3656 pretenure,
3657 undefined_value());
Steve Block6ded16b2010-05-10 14:33:55 +01003658}
3659
3660
John Reck59135872010-11-02 12:39:01 -07003661MaybeObject* Heap::AllocateFixedArrayWithHoles(int length,
3662 PretenureFlag pretenure) {
Steve Block44f0eee2011-05-26 01:26:41 +01003663 return AllocateFixedArrayWithFiller(this,
3664 length,
3665 pretenure,
3666 the_hole_value());
Steve Block6ded16b2010-05-10 14:33:55 +01003667}
3668
3669
John Reck59135872010-11-02 12:39:01 -07003670MaybeObject* Heap::AllocateUninitializedFixedArray(int length) {
Steve Block6ded16b2010-05-10 14:33:55 +01003671 if (length == 0) return empty_fixed_array();
3672
John Reck59135872010-11-02 12:39:01 -07003673 Object* obj;
3674 { MaybeObject* maybe_obj = AllocateRawFixedArray(length);
3675 if (!maybe_obj->ToObject(&obj)) return maybe_obj;
3676 }
Steve Block6ded16b2010-05-10 14:33:55 +01003677
3678 reinterpret_cast<FixedArray*>(obj)->set_map(fixed_array_map());
3679 FixedArray::cast(obj)->set_length(length);
3680 return obj;
3681}
3682
3683
John Reck59135872010-11-02 12:39:01 -07003684MaybeObject* Heap::AllocateHashTable(int length, PretenureFlag pretenure) {
3685 Object* result;
Steve Block44f0eee2011-05-26 01:26:41 +01003686 { MaybeObject* maybe_result = AllocateFixedArray(length, pretenure);
John Reck59135872010-11-02 12:39:01 -07003687 if (!maybe_result->ToObject(&result)) return maybe_result;
3688 }
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01003689 reinterpret_cast<HeapObject*>(result)->set_map(hash_table_map());
Steve Blocka7e24c12009-10-30 11:49:00 +00003690 ASSERT(result->IsHashTable());
3691 return result;
3692}
3693
3694
John Reck59135872010-11-02 12:39:01 -07003695MaybeObject* Heap::AllocateGlobalContext() {
3696 Object* result;
3697 { MaybeObject* maybe_result =
Steve Block44f0eee2011-05-26 01:26:41 +01003698 AllocateFixedArray(Context::GLOBAL_CONTEXT_SLOTS);
John Reck59135872010-11-02 12:39:01 -07003699 if (!maybe_result->ToObject(&result)) return maybe_result;
3700 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003701 Context* context = reinterpret_cast<Context*>(result);
3702 context->set_map(global_context_map());
3703 ASSERT(context->IsGlobalContext());
3704 ASSERT(result->IsContext());
3705 return result;
3706}
3707
3708
John Reck59135872010-11-02 12:39:01 -07003709MaybeObject* Heap::AllocateFunctionContext(int length, JSFunction* function) {
Steve Blocka7e24c12009-10-30 11:49:00 +00003710 ASSERT(length >= Context::MIN_CONTEXT_SLOTS);
John Reck59135872010-11-02 12:39:01 -07003711 Object* result;
Steve Block44f0eee2011-05-26 01:26:41 +01003712 { MaybeObject* maybe_result = AllocateFixedArray(length);
John Reck59135872010-11-02 12:39:01 -07003713 if (!maybe_result->ToObject(&result)) return maybe_result;
3714 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003715 Context* context = reinterpret_cast<Context*>(result);
3716 context->set_map(context_map());
3717 context->set_closure(function);
3718 context->set_fcontext(context);
3719 context->set_previous(NULL);
3720 context->set_extension(NULL);
3721 context->set_global(function->context()->global());
3722 ASSERT(!context->IsGlobalContext());
3723 ASSERT(context->is_function_context());
3724 ASSERT(result->IsContext());
3725 return result;
3726}
3727
3728
John Reck59135872010-11-02 12:39:01 -07003729MaybeObject* Heap::AllocateWithContext(Context* previous,
3730 JSObject* extension,
3731 bool is_catch_context) {
3732 Object* result;
Steve Block44f0eee2011-05-26 01:26:41 +01003733 { MaybeObject* maybe_result = AllocateFixedArray(Context::MIN_CONTEXT_SLOTS);
John Reck59135872010-11-02 12:39:01 -07003734 if (!maybe_result->ToObject(&result)) return maybe_result;
3735 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003736 Context* context = reinterpret_cast<Context*>(result);
Steve Block44f0eee2011-05-26 01:26:41 +01003737 context->set_map(is_catch_context ? catch_context_map() :
3738 context_map());
Steve Blocka7e24c12009-10-30 11:49:00 +00003739 context->set_closure(previous->closure());
3740 context->set_fcontext(previous->fcontext());
3741 context->set_previous(previous);
3742 context->set_extension(extension);
3743 context->set_global(previous->global());
3744 ASSERT(!context->IsGlobalContext());
3745 ASSERT(!context->is_function_context());
3746 ASSERT(result->IsContext());
3747 return result;
3748}
3749
3750
John Reck59135872010-11-02 12:39:01 -07003751MaybeObject* Heap::AllocateStruct(InstanceType type) {
Steve Blocka7e24c12009-10-30 11:49:00 +00003752 Map* map;
3753 switch (type) {
Steve Block44f0eee2011-05-26 01:26:41 +01003754#define MAKE_CASE(NAME, Name, name) \
3755 case NAME##_TYPE: map = name##_map(); break;
Steve Blocka7e24c12009-10-30 11:49:00 +00003756STRUCT_LIST(MAKE_CASE)
3757#undef MAKE_CASE
3758 default:
3759 UNREACHABLE();
3760 return Failure::InternalError();
3761 }
3762 int size = map->instance_size();
3763 AllocationSpace space =
3764 (size > MaxObjectSizeInPagedSpace()) ? LO_SPACE : OLD_POINTER_SPACE;
John Reck59135872010-11-02 12:39:01 -07003765 Object* result;
Steve Block44f0eee2011-05-26 01:26:41 +01003766 { MaybeObject* maybe_result = Allocate(map, space);
John Reck59135872010-11-02 12:39:01 -07003767 if (!maybe_result->ToObject(&result)) return maybe_result;
3768 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003769 Struct::cast(result)->InitializeBody(size);
3770 return result;
3771}
3772
3773
3774bool Heap::IdleNotification() {
3775 static const int kIdlesBeforeScavenge = 4;
3776 static const int kIdlesBeforeMarkSweep = 7;
3777 static const int kIdlesBeforeMarkCompact = 8;
Ben Murdochdb5a90a2011-01-06 18:27:03 +00003778 static const int kMaxIdleCount = kIdlesBeforeMarkCompact + 1;
Ben Murdoche0cee9b2011-05-25 10:26:03 +01003779 static const unsigned int kGCsBetweenCleanup = 4;
Steve Block44f0eee2011-05-26 01:26:41 +01003780
3781 if (!last_idle_notification_gc_count_init_) {
3782 last_idle_notification_gc_count_ = gc_count_;
3783 last_idle_notification_gc_count_init_ = true;
3784 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003785
Steve Block6ded16b2010-05-10 14:33:55 +01003786 bool uncommit = true;
Steve Blocka7e24c12009-10-30 11:49:00 +00003787 bool finished = false;
3788
Ben Murdochdb5a90a2011-01-06 18:27:03 +00003789 // Reset the number of idle notifications received when a number of
3790 // GCs have taken place. This allows another round of cleanup based
3791 // on idle notifications if enough work has been carried out to
3792 // provoke a number of garbage collections.
Steve Block44f0eee2011-05-26 01:26:41 +01003793 if (gc_count_ - last_idle_notification_gc_count_ < kGCsBetweenCleanup) {
3794 number_idle_notifications_ =
3795 Min(number_idle_notifications_ + 1, kMaxIdleCount);
Steve Blocka7e24c12009-10-30 11:49:00 +00003796 } else {
Steve Block44f0eee2011-05-26 01:26:41 +01003797 number_idle_notifications_ = 0;
3798 last_idle_notification_gc_count_ = gc_count_;
Steve Blocka7e24c12009-10-30 11:49:00 +00003799 }
3800
Steve Block44f0eee2011-05-26 01:26:41 +01003801 if (number_idle_notifications_ == kIdlesBeforeScavenge) {
Steve Block6ded16b2010-05-10 14:33:55 +01003802 if (contexts_disposed_ > 0) {
Steve Block44f0eee2011-05-26 01:26:41 +01003803 HistogramTimerScope scope(isolate_->counters()->gc_context());
Steve Block6ded16b2010-05-10 14:33:55 +01003804 CollectAllGarbage(false);
3805 } else {
Ben Murdochf87a2032010-10-22 12:50:53 +01003806 CollectGarbage(NEW_SPACE);
Steve Block6ded16b2010-05-10 14:33:55 +01003807 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003808 new_space_.Shrink();
Steve Block44f0eee2011-05-26 01:26:41 +01003809 last_idle_notification_gc_count_ = gc_count_;
3810 } else if (number_idle_notifications_ == kIdlesBeforeMarkSweep) {
Steve Blockd0582a62009-12-15 09:54:21 +00003811 // Before doing the mark-sweep collections we clear the
3812 // compilation cache to avoid hanging on to source code and
3813 // generated code for cached functions.
Steve Block44f0eee2011-05-26 01:26:41 +01003814 isolate_->compilation_cache()->Clear();
Steve Blockd0582a62009-12-15 09:54:21 +00003815
Steve Blocka7e24c12009-10-30 11:49:00 +00003816 CollectAllGarbage(false);
3817 new_space_.Shrink();
Steve Block44f0eee2011-05-26 01:26:41 +01003818 last_idle_notification_gc_count_ = gc_count_;
Steve Blocka7e24c12009-10-30 11:49:00 +00003819
Steve Block44f0eee2011-05-26 01:26:41 +01003820 } else if (number_idle_notifications_ == kIdlesBeforeMarkCompact) {
Steve Blocka7e24c12009-10-30 11:49:00 +00003821 CollectAllGarbage(true);
3822 new_space_.Shrink();
Steve Block44f0eee2011-05-26 01:26:41 +01003823 last_idle_notification_gc_count_ = gc_count_;
3824 number_idle_notifications_ = 0;
Steve Blocka7e24c12009-10-30 11:49:00 +00003825 finished = true;
Steve Block6ded16b2010-05-10 14:33:55 +01003826 } else if (contexts_disposed_ > 0) {
3827 if (FLAG_expose_gc) {
3828 contexts_disposed_ = 0;
3829 } else {
Steve Block44f0eee2011-05-26 01:26:41 +01003830 HistogramTimerScope scope(isolate_->counters()->gc_context());
Steve Block6ded16b2010-05-10 14:33:55 +01003831 CollectAllGarbage(false);
Steve Block44f0eee2011-05-26 01:26:41 +01003832 last_idle_notification_gc_count_ = gc_count_;
Steve Block6ded16b2010-05-10 14:33:55 +01003833 }
3834 // If this is the first idle notification, we reset the
3835 // notification count to avoid letting idle notifications for
3836 // context disposal garbage collections start a potentially too
3837 // aggressive idle GC cycle.
Steve Block44f0eee2011-05-26 01:26:41 +01003838 if (number_idle_notifications_ <= 1) {
3839 number_idle_notifications_ = 0;
Steve Block6ded16b2010-05-10 14:33:55 +01003840 uncommit = false;
3841 }
Steve Block44f0eee2011-05-26 01:26:41 +01003842 } else if (number_idle_notifications_ > kIdlesBeforeMarkCompact) {
Ben Murdochdb5a90a2011-01-06 18:27:03 +00003843 // If we have received more than kIdlesBeforeMarkCompact idle
3844 // notifications we do not perform any cleanup because we don't
3845 // expect to gain much by doing so.
3846 finished = true;
Steve Blocka7e24c12009-10-30 11:49:00 +00003847 }
3848
Steve Block6ded16b2010-05-10 14:33:55 +01003849 // Make sure that we have no pending context disposals and
3850 // conditionally uncommit from space.
3851 ASSERT(contexts_disposed_ == 0);
Steve Block44f0eee2011-05-26 01:26:41 +01003852 if (uncommit) UncommitFromSpace();
Steve Blocka7e24c12009-10-30 11:49:00 +00003853 return finished;
3854}
3855
3856
3857#ifdef DEBUG
3858
3859void Heap::Print() {
3860 if (!HasBeenSetup()) return;
Steve Block44f0eee2011-05-26 01:26:41 +01003861 isolate()->PrintStack();
Steve Blocka7e24c12009-10-30 11:49:00 +00003862 AllSpaces spaces;
Leon Clarked91b9f72010-01-27 17:25:45 +00003863 for (Space* space = spaces.next(); space != NULL; space = spaces.next())
3864 space->Print();
Steve Blocka7e24c12009-10-30 11:49:00 +00003865}
3866
3867
3868void Heap::ReportCodeStatistics(const char* title) {
3869 PrintF(">>>>>> Code Stats (%s) >>>>>>\n", title);
3870 PagedSpace::ResetCodeStatistics();
3871 // We do not look for code in new space, map space, or old space. If code
3872 // somehow ends up in those spaces, we would miss it here.
3873 code_space_->CollectCodeStatistics();
3874 lo_space_->CollectCodeStatistics();
3875 PagedSpace::ReportCodeStatistics();
3876}
3877
3878
3879// This function expects that NewSpace's allocated objects histogram is
3880// populated (via a call to CollectStatistics or else as a side effect of a
3881// just-completed scavenge collection).
3882void Heap::ReportHeapStatistics(const char* title) {
3883 USE(title);
3884 PrintF(">>>>>> =============== %s (%d) =============== >>>>>>\n",
3885 title, gc_count_);
3886 PrintF("mark-compact GC : %d\n", mc_count_);
Ben Murdochf87a2032010-10-22 12:50:53 +01003887 PrintF("old_gen_promotion_limit_ %" V8_PTR_PREFIX "d\n",
3888 old_gen_promotion_limit_);
3889 PrintF("old_gen_allocation_limit_ %" V8_PTR_PREFIX "d\n",
3890 old_gen_allocation_limit_);
Steve Blocka7e24c12009-10-30 11:49:00 +00003891
3892 PrintF("\n");
3893 PrintF("Number of handles : %d\n", HandleScope::NumberOfHandles());
Steve Block44f0eee2011-05-26 01:26:41 +01003894 isolate_->global_handles()->PrintStats();
Steve Blocka7e24c12009-10-30 11:49:00 +00003895 PrintF("\n");
3896
3897 PrintF("Heap statistics : ");
Steve Block44f0eee2011-05-26 01:26:41 +01003898 isolate_->memory_allocator()->ReportStatistics();
Steve Blocka7e24c12009-10-30 11:49:00 +00003899 PrintF("To space : ");
3900 new_space_.ReportStatistics();
3901 PrintF("Old pointer space : ");
3902 old_pointer_space_->ReportStatistics();
3903 PrintF("Old data space : ");
3904 old_data_space_->ReportStatistics();
3905 PrintF("Code space : ");
3906 code_space_->ReportStatistics();
3907 PrintF("Map space : ");
3908 map_space_->ReportStatistics();
3909 PrintF("Cell space : ");
3910 cell_space_->ReportStatistics();
3911 PrintF("Large object space : ");
3912 lo_space_->ReportStatistics();
3913 PrintF(">>>>>> ========================================= >>>>>>\n");
3914}
3915
3916#endif // DEBUG
3917
3918bool Heap::Contains(HeapObject* value) {
3919 return Contains(value->address());
3920}
3921
3922
3923bool Heap::Contains(Address addr) {
3924 if (OS::IsOutsideAllocatedSpace(addr)) return false;
3925 return HasBeenSetup() &&
3926 (new_space_.ToSpaceContains(addr) ||
3927 old_pointer_space_->Contains(addr) ||
3928 old_data_space_->Contains(addr) ||
3929 code_space_->Contains(addr) ||
3930 map_space_->Contains(addr) ||
3931 cell_space_->Contains(addr) ||
3932 lo_space_->SlowContains(addr));
3933}
3934
3935
3936bool Heap::InSpace(HeapObject* value, AllocationSpace space) {
3937 return InSpace(value->address(), space);
3938}
3939
3940
3941bool Heap::InSpace(Address addr, AllocationSpace space) {
3942 if (OS::IsOutsideAllocatedSpace(addr)) return false;
3943 if (!HasBeenSetup()) return false;
3944
3945 switch (space) {
3946 case NEW_SPACE:
3947 return new_space_.ToSpaceContains(addr);
3948 case OLD_POINTER_SPACE:
3949 return old_pointer_space_->Contains(addr);
3950 case OLD_DATA_SPACE:
3951 return old_data_space_->Contains(addr);
3952 case CODE_SPACE:
3953 return code_space_->Contains(addr);
3954 case MAP_SPACE:
3955 return map_space_->Contains(addr);
3956 case CELL_SPACE:
3957 return cell_space_->Contains(addr);
3958 case LO_SPACE:
3959 return lo_space_->SlowContains(addr);
3960 }
3961
3962 return false;
3963}
3964
3965
3966#ifdef DEBUG
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01003967static void DummyScavengePointer(HeapObject** p) {
3968}
3969
3970
3971static void VerifyPointersUnderWatermark(
3972 PagedSpace* space,
3973 DirtyRegionCallback visit_dirty_region) {
3974 PageIterator it(space, PageIterator::PAGES_IN_USE);
3975
3976 while (it.has_next()) {
3977 Page* page = it.next();
3978 Address start = page->ObjectAreaStart();
3979 Address end = page->AllocationWatermark();
3980
Steve Block44f0eee2011-05-26 01:26:41 +01003981 HEAP->IterateDirtyRegions(Page::kAllRegionsDirtyMarks,
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01003982 start,
3983 end,
3984 visit_dirty_region,
3985 &DummyScavengePointer);
3986 }
3987}
3988
3989
3990static void VerifyPointersUnderWatermark(LargeObjectSpace* space) {
3991 LargeObjectIterator it(space);
3992 for (HeapObject* object = it.next(); object != NULL; object = it.next()) {
3993 if (object->IsFixedArray()) {
3994 Address slot_address = object->address();
3995 Address end = object->address() + object->Size();
3996
3997 while (slot_address < end) {
3998 HeapObject** slot = reinterpret_cast<HeapObject**>(slot_address);
3999 // When we are not in GC the Heap::InNewSpace() predicate
4000 // checks that pointers which satisfy predicate point into
4001 // the active semispace.
Steve Block44f0eee2011-05-26 01:26:41 +01004002 HEAP->InNewSpace(*slot);
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004003 slot_address += kPointerSize;
4004 }
4005 }
4006 }
4007}
4008
4009
Steve Blocka7e24c12009-10-30 11:49:00 +00004010void Heap::Verify() {
4011 ASSERT(HasBeenSetup());
4012
4013 VerifyPointersVisitor visitor;
Steve Blockd0582a62009-12-15 09:54:21 +00004014 IterateRoots(&visitor, VISIT_ONLY_STRONG);
Steve Blocka7e24c12009-10-30 11:49:00 +00004015
4016 new_space_.Verify();
4017
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004018 VerifyPointersAndDirtyRegionsVisitor dirty_regions_visitor;
4019 old_pointer_space_->Verify(&dirty_regions_visitor);
4020 map_space_->Verify(&dirty_regions_visitor);
Steve Blocka7e24c12009-10-30 11:49:00 +00004021
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004022 VerifyPointersUnderWatermark(old_pointer_space_,
4023 &IteratePointersInDirtyRegion);
4024 VerifyPointersUnderWatermark(map_space_,
4025 &IteratePointersInDirtyMapsRegion);
4026 VerifyPointersUnderWatermark(lo_space_);
4027
4028 VerifyPageWatermarkValidity(old_pointer_space_, ALL_INVALID);
4029 VerifyPageWatermarkValidity(map_space_, ALL_INVALID);
4030
4031 VerifyPointersVisitor no_dirty_regions_visitor;
4032 old_data_space_->Verify(&no_dirty_regions_visitor);
4033 code_space_->Verify(&no_dirty_regions_visitor);
4034 cell_space_->Verify(&no_dirty_regions_visitor);
Steve Blocka7e24c12009-10-30 11:49:00 +00004035
4036 lo_space_->Verify();
4037}
4038#endif // DEBUG
4039
4040
John Reck59135872010-11-02 12:39:01 -07004041MaybeObject* Heap::LookupSymbol(Vector<const char> string) {
Steve Blocka7e24c12009-10-30 11:49:00 +00004042 Object* symbol = NULL;
John Reck59135872010-11-02 12:39:01 -07004043 Object* new_table;
4044 { MaybeObject* maybe_new_table =
4045 symbol_table()->LookupSymbol(string, &symbol);
4046 if (!maybe_new_table->ToObject(&new_table)) return maybe_new_table;
4047 }
Steve Blocka7e24c12009-10-30 11:49:00 +00004048 // Can't use set_symbol_table because SymbolTable::cast knows that
4049 // SymbolTable is a singleton and checks for identity.
4050 roots_[kSymbolTableRootIndex] = new_table;
4051 ASSERT(symbol != NULL);
4052 return symbol;
4053}
4054
4055
Steve Block9fac8402011-05-12 15:51:54 +01004056MaybeObject* Heap::LookupAsciiSymbol(Vector<const char> string) {
4057 Object* symbol = NULL;
4058 Object* new_table;
4059 { MaybeObject* maybe_new_table =
4060 symbol_table()->LookupAsciiSymbol(string, &symbol);
4061 if (!maybe_new_table->ToObject(&new_table)) return maybe_new_table;
4062 }
4063 // Can't use set_symbol_table because SymbolTable::cast knows that
4064 // SymbolTable is a singleton and checks for identity.
4065 roots_[kSymbolTableRootIndex] = new_table;
4066 ASSERT(symbol != NULL);
4067 return symbol;
4068}
4069
4070
4071MaybeObject* Heap::LookupTwoByteSymbol(Vector<const uc16> string) {
4072 Object* symbol = NULL;
4073 Object* new_table;
4074 { MaybeObject* maybe_new_table =
4075 symbol_table()->LookupTwoByteSymbol(string, &symbol);
4076 if (!maybe_new_table->ToObject(&new_table)) return maybe_new_table;
4077 }
4078 // Can't use set_symbol_table because SymbolTable::cast knows that
4079 // SymbolTable is a singleton and checks for identity.
4080 roots_[kSymbolTableRootIndex] = new_table;
4081 ASSERT(symbol != NULL);
4082 return symbol;
4083}
4084
4085
John Reck59135872010-11-02 12:39:01 -07004086MaybeObject* Heap::LookupSymbol(String* string) {
Steve Blocka7e24c12009-10-30 11:49:00 +00004087 if (string->IsSymbol()) return string;
4088 Object* symbol = NULL;
John Reck59135872010-11-02 12:39:01 -07004089 Object* new_table;
4090 { MaybeObject* maybe_new_table =
4091 symbol_table()->LookupString(string, &symbol);
4092 if (!maybe_new_table->ToObject(&new_table)) return maybe_new_table;
4093 }
Steve Blocka7e24c12009-10-30 11:49:00 +00004094 // Can't use set_symbol_table because SymbolTable::cast knows that
4095 // SymbolTable is a singleton and checks for identity.
4096 roots_[kSymbolTableRootIndex] = new_table;
4097 ASSERT(symbol != NULL);
4098 return symbol;
4099}
4100
4101
4102bool Heap::LookupSymbolIfExists(String* string, String** symbol) {
4103 if (string->IsSymbol()) {
4104 *symbol = string;
4105 return true;
4106 }
4107 return symbol_table()->LookupSymbolIfExists(string, symbol);
4108}
4109
4110
4111#ifdef DEBUG
4112void Heap::ZapFromSpace() {
Steve Block1e0659c2011-05-24 12:43:12 +01004113 ASSERT(reinterpret_cast<Object*>(kFromSpaceZapValue)->IsFailure());
Steve Blocka7e24c12009-10-30 11:49:00 +00004114 for (Address a = new_space_.FromSpaceLow();
4115 a < new_space_.FromSpaceHigh();
4116 a += kPointerSize) {
4117 Memory::Address_at(a) = kFromSpaceZapValue;
4118 }
4119}
4120#endif // DEBUG
4121
4122
Steve Block44f0eee2011-05-26 01:26:41 +01004123bool Heap::IteratePointersInDirtyRegion(Heap* heap,
4124 Address start,
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004125 Address end,
4126 ObjectSlotCallback copy_object_func) {
4127 Address slot_address = start;
4128 bool pointers_to_new_space_found = false;
Steve Blocka7e24c12009-10-30 11:49:00 +00004129
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004130 while (slot_address < end) {
4131 Object** slot = reinterpret_cast<Object**>(slot_address);
Steve Block44f0eee2011-05-26 01:26:41 +01004132 if (heap->InNewSpace(*slot)) {
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004133 ASSERT((*slot)->IsHeapObject());
4134 copy_object_func(reinterpret_cast<HeapObject**>(slot));
Steve Block44f0eee2011-05-26 01:26:41 +01004135 if (heap->InNewSpace(*slot)) {
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004136 ASSERT((*slot)->IsHeapObject());
4137 pointers_to_new_space_found = true;
Steve Blocka7e24c12009-10-30 11:49:00 +00004138 }
Steve Blocka7e24c12009-10-30 11:49:00 +00004139 }
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004140 slot_address += kPointerSize;
Steve Blocka7e24c12009-10-30 11:49:00 +00004141 }
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004142 return pointers_to_new_space_found;
Steve Blocka7e24c12009-10-30 11:49:00 +00004143}
4144
4145
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004146// Compute start address of the first map following given addr.
4147static inline Address MapStartAlign(Address addr) {
4148 Address page = Page::FromAddress(addr)->ObjectAreaStart();
4149 return page + (((addr - page) + (Map::kSize - 1)) / Map::kSize * Map::kSize);
4150}
Steve Blocka7e24c12009-10-30 11:49:00 +00004151
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004152
4153// Compute end address of the first map preceding given addr.
4154static inline Address MapEndAlign(Address addr) {
4155 Address page = Page::FromAllocationTop(addr)->ObjectAreaStart();
4156 return page + ((addr - page) / Map::kSize * Map::kSize);
4157}
4158
4159
4160static bool IteratePointersInDirtyMaps(Address start,
4161 Address end,
4162 ObjectSlotCallback copy_object_func) {
4163 ASSERT(MapStartAlign(start) == start);
4164 ASSERT(MapEndAlign(end) == end);
4165
4166 Address map_address = start;
4167 bool pointers_to_new_space_found = false;
4168
Steve Block44f0eee2011-05-26 01:26:41 +01004169 Heap* heap = HEAP;
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004170 while (map_address < end) {
Steve Block44f0eee2011-05-26 01:26:41 +01004171 ASSERT(!heap->InNewSpace(Memory::Object_at(map_address)));
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004172 ASSERT(Memory::Object_at(map_address)->IsMap());
4173
4174 Address pointer_fields_start = map_address + Map::kPointerFieldsBeginOffset;
4175 Address pointer_fields_end = map_address + Map::kPointerFieldsEndOffset;
4176
Steve Block44f0eee2011-05-26 01:26:41 +01004177 if (Heap::IteratePointersInDirtyRegion(heap,
4178 pointer_fields_start,
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004179 pointer_fields_end,
4180 copy_object_func)) {
4181 pointers_to_new_space_found = true;
4182 }
4183
4184 map_address += Map::kSize;
4185 }
4186
4187 return pointers_to_new_space_found;
4188}
4189
4190
4191bool Heap::IteratePointersInDirtyMapsRegion(
Steve Block44f0eee2011-05-26 01:26:41 +01004192 Heap* heap,
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004193 Address start,
4194 Address end,
4195 ObjectSlotCallback copy_object_func) {
4196 Address map_aligned_start = MapStartAlign(start);
4197 Address map_aligned_end = MapEndAlign(end);
4198
4199 bool contains_pointers_to_new_space = false;
4200
4201 if (map_aligned_start != start) {
4202 Address prev_map = map_aligned_start - Map::kSize;
4203 ASSERT(Memory::Object_at(prev_map)->IsMap());
4204
4205 Address pointer_fields_start =
4206 Max(start, prev_map + Map::kPointerFieldsBeginOffset);
4207
4208 Address pointer_fields_end =
Ben Murdoch3bec4d22010-07-22 14:51:16 +01004209 Min(prev_map + Map::kPointerFieldsEndOffset, end);
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004210
4211 contains_pointers_to_new_space =
Steve Block44f0eee2011-05-26 01:26:41 +01004212 IteratePointersInDirtyRegion(heap,
4213 pointer_fields_start,
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004214 pointer_fields_end,
4215 copy_object_func)
4216 || contains_pointers_to_new_space;
4217 }
4218
4219 contains_pointers_to_new_space =
4220 IteratePointersInDirtyMaps(map_aligned_start,
4221 map_aligned_end,
4222 copy_object_func)
4223 || contains_pointers_to_new_space;
4224
4225 if (map_aligned_end != end) {
4226 ASSERT(Memory::Object_at(map_aligned_end)->IsMap());
4227
Ben Murdoch3bec4d22010-07-22 14:51:16 +01004228 Address pointer_fields_start =
4229 map_aligned_end + Map::kPointerFieldsBeginOffset;
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004230
4231 Address pointer_fields_end =
Ben Murdoch3bec4d22010-07-22 14:51:16 +01004232 Min(end, map_aligned_end + Map::kPointerFieldsEndOffset);
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004233
4234 contains_pointers_to_new_space =
Steve Block44f0eee2011-05-26 01:26:41 +01004235 IteratePointersInDirtyRegion(heap,
4236 pointer_fields_start,
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004237 pointer_fields_end,
4238 copy_object_func)
4239 || contains_pointers_to_new_space;
4240 }
4241
4242 return contains_pointers_to_new_space;
4243}
4244
4245
Ben Murdoch3bec4d22010-07-22 14:51:16 +01004246void Heap::IterateAndMarkPointersToFromSpace(Address start,
4247 Address end,
4248 ObjectSlotCallback callback) {
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004249 Address slot_address = start;
4250 Page* page = Page::FromAddress(start);
4251
4252 uint32_t marks = page->GetRegionMarks();
4253
4254 while (slot_address < end) {
4255 Object** slot = reinterpret_cast<Object**>(slot_address);
Steve Block44f0eee2011-05-26 01:26:41 +01004256 if (InFromSpace(*slot)) {
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004257 ASSERT((*slot)->IsHeapObject());
4258 callback(reinterpret_cast<HeapObject**>(slot));
Steve Block44f0eee2011-05-26 01:26:41 +01004259 if (InNewSpace(*slot)) {
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004260 ASSERT((*slot)->IsHeapObject());
4261 marks |= page->GetRegionMaskForAddress(slot_address);
4262 }
4263 }
4264 slot_address += kPointerSize;
4265 }
4266
4267 page->SetRegionMarks(marks);
4268}
4269
4270
4271uint32_t Heap::IterateDirtyRegions(
4272 uint32_t marks,
4273 Address area_start,
4274 Address area_end,
4275 DirtyRegionCallback visit_dirty_region,
4276 ObjectSlotCallback copy_object_func) {
4277 uint32_t newmarks = 0;
4278 uint32_t mask = 1;
4279
4280 if (area_start >= area_end) {
4281 return newmarks;
4282 }
4283
4284 Address region_start = area_start;
4285
4286 // area_start does not necessarily coincide with start of the first region.
4287 // Thus to calculate the beginning of the next region we have to align
4288 // area_start by Page::kRegionSize.
4289 Address second_region =
4290 reinterpret_cast<Address>(
4291 reinterpret_cast<intptr_t>(area_start + Page::kRegionSize) &
4292 ~Page::kRegionAlignmentMask);
4293
4294 // Next region might be beyond area_end.
4295 Address region_end = Min(second_region, area_end);
4296
4297 if (marks & mask) {
Steve Block44f0eee2011-05-26 01:26:41 +01004298 if (visit_dirty_region(this, region_start, region_end, copy_object_func)) {
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004299 newmarks |= mask;
4300 }
4301 }
4302 mask <<= 1;
4303
4304 // Iterate subsequent regions which fully lay inside [area_start, area_end[.
4305 region_start = region_end;
4306 region_end = region_start + Page::kRegionSize;
4307
4308 while (region_end <= area_end) {
4309 if (marks & mask) {
Steve Block44f0eee2011-05-26 01:26:41 +01004310 if (visit_dirty_region(this,
4311 region_start,
4312 region_end,
4313 copy_object_func)) {
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004314 newmarks |= mask;
4315 }
4316 }
4317
4318 region_start = region_end;
4319 region_end = region_start + Page::kRegionSize;
4320
4321 mask <<= 1;
4322 }
4323
4324 if (region_start != area_end) {
4325 // A small piece of area left uniterated because area_end does not coincide
4326 // with region end. Check whether region covering last part of area is
4327 // dirty.
4328 if (marks & mask) {
Steve Block44f0eee2011-05-26 01:26:41 +01004329 if (visit_dirty_region(this, region_start, area_end, copy_object_func)) {
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004330 newmarks |= mask;
4331 }
4332 }
4333 }
4334
4335 return newmarks;
4336}
4337
4338
4339
4340void Heap::IterateDirtyRegions(
4341 PagedSpace* space,
4342 DirtyRegionCallback visit_dirty_region,
4343 ObjectSlotCallback copy_object_func,
4344 ExpectedPageWatermarkState expected_page_watermark_state) {
Steve Blocka7e24c12009-10-30 11:49:00 +00004345
4346 PageIterator it(space, PageIterator::PAGES_IN_USE);
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004347
Steve Blocka7e24c12009-10-30 11:49:00 +00004348 while (it.has_next()) {
4349 Page* page = it.next();
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004350 uint32_t marks = page->GetRegionMarks();
4351
4352 if (marks != Page::kAllRegionsCleanMarks) {
4353 Address start = page->ObjectAreaStart();
4354
4355 // Do not try to visit pointers beyond page allocation watermark.
4356 // Page can contain garbage pointers there.
4357 Address end;
4358
4359 if ((expected_page_watermark_state == WATERMARK_SHOULD_BE_VALID) ||
4360 page->IsWatermarkValid()) {
4361 end = page->AllocationWatermark();
4362 } else {
4363 end = page->CachedAllocationWatermark();
4364 }
4365
4366 ASSERT(space == old_pointer_space_ ||
4367 (space == map_space_ &&
4368 ((page->ObjectAreaStart() - end) % Map::kSize == 0)));
4369
4370 page->SetRegionMarks(IterateDirtyRegions(marks,
4371 start,
4372 end,
4373 visit_dirty_region,
4374 copy_object_func));
Steve Blocka7e24c12009-10-30 11:49:00 +00004375 }
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004376
4377 // Mark page watermark as invalid to maintain watermark validity invariant.
4378 // See Page::FlipMeaningOfInvalidatedWatermarkFlag() for details.
4379 page->InvalidateWatermark(true);
Steve Blocka7e24c12009-10-30 11:49:00 +00004380 }
4381}
4382
4383
Steve Blockd0582a62009-12-15 09:54:21 +00004384void Heap::IterateRoots(ObjectVisitor* v, VisitMode mode) {
4385 IterateStrongRoots(v, mode);
Leon Clarked91b9f72010-01-27 17:25:45 +00004386 IterateWeakRoots(v, mode);
4387}
4388
4389
4390void Heap::IterateWeakRoots(ObjectVisitor* v, VisitMode mode) {
Steve Blocka7e24c12009-10-30 11:49:00 +00004391 v->VisitPointer(reinterpret_cast<Object**>(&roots_[kSymbolTableRootIndex]));
Steve Blockd0582a62009-12-15 09:54:21 +00004392 v->Synchronize("symbol_table");
Leon Clarkee46be812010-01-19 14:06:41 +00004393 if (mode != VISIT_ALL_IN_SCAVENGE) {
4394 // Scavenge collections have special processing for this.
Steve Block44f0eee2011-05-26 01:26:41 +01004395 external_string_table_.Iterate(v);
Leon Clarkee46be812010-01-19 14:06:41 +00004396 }
4397 v->Synchronize("external_string_table");
Steve Blocka7e24c12009-10-30 11:49:00 +00004398}
4399
4400
Steve Blockd0582a62009-12-15 09:54:21 +00004401void Heap::IterateStrongRoots(ObjectVisitor* v, VisitMode mode) {
Steve Blocka7e24c12009-10-30 11:49:00 +00004402 v->VisitPointers(&roots_[0], &roots_[kStrongRootListLength]);
Steve Blockd0582a62009-12-15 09:54:21 +00004403 v->Synchronize("strong_root_list");
Steve Blocka7e24c12009-10-30 11:49:00 +00004404
Iain Merrick75681382010-08-19 15:07:18 +01004405 v->VisitPointer(BitCast<Object**>(&hidden_symbol_));
Steve Blockd0582a62009-12-15 09:54:21 +00004406 v->Synchronize("symbol");
Steve Blocka7e24c12009-10-30 11:49:00 +00004407
Steve Block44f0eee2011-05-26 01:26:41 +01004408 isolate_->bootstrapper()->Iterate(v);
Steve Blockd0582a62009-12-15 09:54:21 +00004409 v->Synchronize("bootstrapper");
Steve Block44f0eee2011-05-26 01:26:41 +01004410 isolate_->Iterate(v);
Steve Blockd0582a62009-12-15 09:54:21 +00004411 v->Synchronize("top");
Steve Blocka7e24c12009-10-30 11:49:00 +00004412 Relocatable::Iterate(v);
Steve Blockd0582a62009-12-15 09:54:21 +00004413 v->Synchronize("relocatable");
Steve Blocka7e24c12009-10-30 11:49:00 +00004414
4415#ifdef ENABLE_DEBUGGER_SUPPORT
Steve Block44f0eee2011-05-26 01:26:41 +01004416 isolate_->debug()->Iterate(v);
Steve Blocka7e24c12009-10-30 11:49:00 +00004417#endif
Steve Blockd0582a62009-12-15 09:54:21 +00004418 v->Synchronize("debug");
Steve Block44f0eee2011-05-26 01:26:41 +01004419 isolate_->compilation_cache()->Iterate(v);
Steve Blockd0582a62009-12-15 09:54:21 +00004420 v->Synchronize("compilationcache");
Steve Blocka7e24c12009-10-30 11:49:00 +00004421
4422 // Iterate over local handles in handle scopes.
Steve Block44f0eee2011-05-26 01:26:41 +01004423 isolate_->handle_scope_implementer()->Iterate(v);
Steve Blockd0582a62009-12-15 09:54:21 +00004424 v->Synchronize("handlescope");
Steve Blocka7e24c12009-10-30 11:49:00 +00004425
Leon Clarkee46be812010-01-19 14:06:41 +00004426 // Iterate over the builtin code objects and code stubs in the
4427 // heap. Note that it is not necessary to iterate over code objects
4428 // on scavenge collections.
4429 if (mode != VISIT_ALL_IN_SCAVENGE) {
Steve Block44f0eee2011-05-26 01:26:41 +01004430 isolate_->builtins()->IterateBuiltins(v);
Leon Clarkee46be812010-01-19 14:06:41 +00004431 }
Steve Blockd0582a62009-12-15 09:54:21 +00004432 v->Synchronize("builtins");
Steve Blocka7e24c12009-10-30 11:49:00 +00004433
4434 // Iterate over global handles.
Steve Blockd0582a62009-12-15 09:54:21 +00004435 if (mode == VISIT_ONLY_STRONG) {
Steve Block44f0eee2011-05-26 01:26:41 +01004436 isolate_->global_handles()->IterateStrongRoots(v);
Steve Blockd0582a62009-12-15 09:54:21 +00004437 } else {
Steve Block44f0eee2011-05-26 01:26:41 +01004438 isolate_->global_handles()->IterateAllRoots(v);
Steve Blockd0582a62009-12-15 09:54:21 +00004439 }
4440 v->Synchronize("globalhandles");
Steve Blocka7e24c12009-10-30 11:49:00 +00004441
4442 // Iterate over pointers being held by inactive threads.
Steve Block44f0eee2011-05-26 01:26:41 +01004443 isolate_->thread_manager()->Iterate(v);
Steve Blockd0582a62009-12-15 09:54:21 +00004444 v->Synchronize("threadmanager");
Leon Clarked91b9f72010-01-27 17:25:45 +00004445
4446 // Iterate over the pointers the Serialization/Deserialization code is
4447 // holding.
4448 // During garbage collection this keeps the partial snapshot cache alive.
4449 // During deserialization of the startup snapshot this creates the partial
4450 // snapshot cache and deserializes the objects it refers to. During
4451 // serialization this does nothing, since the partial snapshot cache is
4452 // empty. However the next thing we do is create the partial snapshot,
4453 // filling up the partial snapshot cache with objects it needs as we go.
4454 SerializerDeserializer::Iterate(v);
4455 // We don't do a v->Synchronize call here, because in debug mode that will
4456 // output a flag to the snapshot. However at this point the serializer and
4457 // deserializer are deliberately a little unsynchronized (see above) so the
4458 // checking of the sync flag in the snapshot would fail.
Steve Blocka7e24c12009-10-30 11:49:00 +00004459}
Steve Blocka7e24c12009-10-30 11:49:00 +00004460
4461
Steve Blocka7e24c12009-10-30 11:49:00 +00004462// TODO(1236194): Since the heap size is configurable on the command line
4463// and through the API, we should gracefully handle the case that the heap
4464// size is not big enough to fit all the initial objects.
Russell Brenner90bac252010-11-18 13:33:46 -08004465bool Heap::ConfigureHeap(int max_semispace_size,
4466 int max_old_gen_size,
4467 int max_executable_size) {
Steve Blocka7e24c12009-10-30 11:49:00 +00004468 if (HasBeenSetup()) return false;
4469
Steve Block3ce2e202009-11-05 08:53:23 +00004470 if (max_semispace_size > 0) max_semispace_size_ = max_semispace_size;
4471
4472 if (Snapshot::IsEnabled()) {
4473 // If we are using a snapshot we always reserve the default amount
4474 // of memory for each semispace because code in the snapshot has
4475 // write-barrier code that relies on the size and alignment of new
4476 // space. We therefore cannot use a larger max semispace size
4477 // than the default reserved semispace size.
4478 if (max_semispace_size_ > reserved_semispace_size_) {
4479 max_semispace_size_ = reserved_semispace_size_;
4480 }
4481 } else {
4482 // If we are not using snapshots we reserve space for the actual
4483 // max semispace size.
4484 reserved_semispace_size_ = max_semispace_size_;
4485 }
4486
4487 if (max_old_gen_size > 0) max_old_generation_size_ = max_old_gen_size;
Russell Brenner90bac252010-11-18 13:33:46 -08004488 if (max_executable_size > 0) {
4489 max_executable_size_ = RoundUp(max_executable_size, Page::kPageSize);
4490 }
4491
4492 // The max executable size must be less than or equal to the max old
4493 // generation size.
4494 if (max_executable_size_ > max_old_generation_size_) {
4495 max_executable_size_ = max_old_generation_size_;
4496 }
Steve Blocka7e24c12009-10-30 11:49:00 +00004497
4498 // The new space size must be a power of two to support single-bit testing
4499 // for containment.
Steve Block3ce2e202009-11-05 08:53:23 +00004500 max_semispace_size_ = RoundUpToPowerOf2(max_semispace_size_);
4501 reserved_semispace_size_ = RoundUpToPowerOf2(reserved_semispace_size_);
4502 initial_semispace_size_ = Min(initial_semispace_size_, max_semispace_size_);
4503 external_allocation_limit_ = 10 * max_semispace_size_;
Steve Blocka7e24c12009-10-30 11:49:00 +00004504
4505 // The old generation is paged.
Steve Block3ce2e202009-11-05 08:53:23 +00004506 max_old_generation_size_ = RoundUp(max_old_generation_size_, Page::kPageSize);
Steve Blocka7e24c12009-10-30 11:49:00 +00004507
Steve Block44f0eee2011-05-26 01:26:41 +01004508 configured_ = true;
Steve Blocka7e24c12009-10-30 11:49:00 +00004509 return true;
4510}
4511
4512
4513bool Heap::ConfigureHeapDefault() {
Russell Brenner90bac252010-11-18 13:33:46 -08004514 return ConfigureHeap(FLAG_max_new_space_size / 2 * KB,
4515 FLAG_max_old_space_size * MB,
4516 FLAG_max_executable_size * MB);
Steve Blocka7e24c12009-10-30 11:49:00 +00004517}
4518
4519
Ben Murdochbb769b22010-08-11 14:56:33 +01004520void Heap::RecordStats(HeapStats* stats, bool take_snapshot) {
Iain Merrick75681382010-08-19 15:07:18 +01004521 *stats->start_marker = HeapStats::kStartMarker;
4522 *stats->end_marker = HeapStats::kEndMarker;
Ben Murdochf87a2032010-10-22 12:50:53 +01004523 *stats->new_space_size = new_space_.SizeAsInt();
4524 *stats->new_space_capacity = static_cast<int>(new_space_.Capacity());
Steve Blockd0582a62009-12-15 09:54:21 +00004525 *stats->old_pointer_space_size = old_pointer_space_->Size();
4526 *stats->old_pointer_space_capacity = old_pointer_space_->Capacity();
4527 *stats->old_data_space_size = old_data_space_->Size();
4528 *stats->old_data_space_capacity = old_data_space_->Capacity();
4529 *stats->code_space_size = code_space_->Size();
4530 *stats->code_space_capacity = code_space_->Capacity();
4531 *stats->map_space_size = map_space_->Size();
4532 *stats->map_space_capacity = map_space_->Capacity();
4533 *stats->cell_space_size = cell_space_->Size();
4534 *stats->cell_space_capacity = cell_space_->Capacity();
4535 *stats->lo_space_size = lo_space_->Size();
Steve Block44f0eee2011-05-26 01:26:41 +01004536 isolate_->global_handles()->RecordStats(stats);
4537 *stats->memory_allocator_size = isolate()->memory_allocator()->Size();
Ben Murdochbb769b22010-08-11 14:56:33 +01004538 *stats->memory_allocator_capacity =
Steve Block44f0eee2011-05-26 01:26:41 +01004539 isolate()->memory_allocator()->Size() +
4540 isolate()->memory_allocator()->Available();
Iain Merrick75681382010-08-19 15:07:18 +01004541 *stats->os_error = OS::GetLastError();
Steve Block44f0eee2011-05-26 01:26:41 +01004542 isolate()->memory_allocator()->Available();
Ben Murdochbb769b22010-08-11 14:56:33 +01004543 if (take_snapshot) {
Ben Murdochb0fe1622011-05-05 13:52:32 +01004544 HeapIterator iterator(HeapIterator::kFilterFreeListNodes);
Ben Murdochbb769b22010-08-11 14:56:33 +01004545 for (HeapObject* obj = iterator.next();
4546 obj != NULL;
4547 obj = iterator.next()) {
Ben Murdochbb769b22010-08-11 14:56:33 +01004548 InstanceType type = obj->map()->instance_type();
4549 ASSERT(0 <= type && type <= LAST_TYPE);
4550 stats->objects_per_type[type]++;
4551 stats->size_per_type[type] += obj->Size();
4552 }
4553 }
Steve Blockd0582a62009-12-15 09:54:21 +00004554}
4555
4556
Ben Murdochf87a2032010-10-22 12:50:53 +01004557intptr_t Heap::PromotedSpaceSize() {
Steve Blocka7e24c12009-10-30 11:49:00 +00004558 return old_pointer_space_->Size()
4559 + old_data_space_->Size()
4560 + code_space_->Size()
4561 + map_space_->Size()
4562 + cell_space_->Size()
4563 + lo_space_->Size();
4564}
4565
4566
4567int Heap::PromotedExternalMemorySize() {
4568 if (amount_of_external_allocated_memory_
4569 <= amount_of_external_allocated_memory_at_last_global_gc_) return 0;
4570 return amount_of_external_allocated_memory_
4571 - amount_of_external_allocated_memory_at_last_global_gc_;
4572}
4573
Steve Block44f0eee2011-05-26 01:26:41 +01004574#ifdef DEBUG
4575
4576// Tags 0, 1, and 3 are used. Use 2 for marking visited HeapObject.
4577static const int kMarkTag = 2;
4578
4579
4580class HeapDebugUtils {
4581 public:
4582 explicit HeapDebugUtils(Heap* heap)
4583 : search_for_any_global_(false),
4584 search_target_(NULL),
4585 found_target_(false),
4586 object_stack_(20),
4587 heap_(heap) {
4588 }
4589
4590 class MarkObjectVisitor : public ObjectVisitor {
4591 public:
4592 explicit MarkObjectVisitor(HeapDebugUtils* utils) : utils_(utils) { }
4593
4594 void VisitPointers(Object** start, Object** end) {
4595 // Copy all HeapObject pointers in [start, end)
4596 for (Object** p = start; p < end; p++) {
4597 if ((*p)->IsHeapObject())
4598 utils_->MarkObjectRecursively(p);
4599 }
4600 }
4601
4602 HeapDebugUtils* utils_;
4603 };
4604
4605 void MarkObjectRecursively(Object** p) {
4606 if (!(*p)->IsHeapObject()) return;
4607
4608 HeapObject* obj = HeapObject::cast(*p);
4609
4610 Object* map = obj->map();
4611
4612 if (!map->IsHeapObject()) return; // visited before
4613
4614 if (found_target_) return; // stop if target found
4615 object_stack_.Add(obj);
4616 if ((search_for_any_global_ && obj->IsJSGlobalObject()) ||
4617 (!search_for_any_global_ && (obj == search_target_))) {
4618 found_target_ = true;
4619 return;
4620 }
4621
4622 // not visited yet
4623 Map* map_p = reinterpret_cast<Map*>(HeapObject::cast(map));
4624
4625 Address map_addr = map_p->address();
4626
4627 obj->set_map(reinterpret_cast<Map*>(map_addr + kMarkTag));
4628
4629 MarkObjectRecursively(&map);
4630
4631 MarkObjectVisitor mark_visitor(this);
4632
4633 obj->IterateBody(map_p->instance_type(), obj->SizeFromMap(map_p),
4634 &mark_visitor);
4635
4636 if (!found_target_) // don't pop if found the target
4637 object_stack_.RemoveLast();
4638 }
4639
4640
4641 class UnmarkObjectVisitor : public ObjectVisitor {
4642 public:
4643 explicit UnmarkObjectVisitor(HeapDebugUtils* utils) : utils_(utils) { }
4644
4645 void VisitPointers(Object** start, Object** end) {
4646 // Copy all HeapObject pointers in [start, end)
4647 for (Object** p = start; p < end; p++) {
4648 if ((*p)->IsHeapObject())
4649 utils_->UnmarkObjectRecursively(p);
4650 }
4651 }
4652
4653 HeapDebugUtils* utils_;
4654 };
4655
4656
4657 void UnmarkObjectRecursively(Object** p) {
4658 if (!(*p)->IsHeapObject()) return;
4659
4660 HeapObject* obj = HeapObject::cast(*p);
4661
4662 Object* map = obj->map();
4663
4664 if (map->IsHeapObject()) return; // unmarked already
4665
4666 Address map_addr = reinterpret_cast<Address>(map);
4667
4668 map_addr -= kMarkTag;
4669
4670 ASSERT_TAG_ALIGNED(map_addr);
4671
4672 HeapObject* map_p = HeapObject::FromAddress(map_addr);
4673
4674 obj->set_map(reinterpret_cast<Map*>(map_p));
4675
4676 UnmarkObjectRecursively(reinterpret_cast<Object**>(&map_p));
4677
4678 UnmarkObjectVisitor unmark_visitor(this);
4679
4680 obj->IterateBody(Map::cast(map_p)->instance_type(),
4681 obj->SizeFromMap(Map::cast(map_p)),
4682 &unmark_visitor);
4683 }
4684
4685
4686 void MarkRootObjectRecursively(Object** root) {
4687 if (search_for_any_global_) {
4688 ASSERT(search_target_ == NULL);
4689 } else {
4690 ASSERT(search_target_->IsHeapObject());
4691 }
4692 found_target_ = false;
4693 object_stack_.Clear();
4694
4695 MarkObjectRecursively(root);
4696 UnmarkObjectRecursively(root);
4697
4698 if (found_target_) {
4699 PrintF("=====================================\n");
4700 PrintF("==== Path to object ====\n");
4701 PrintF("=====================================\n\n");
4702
4703 ASSERT(!object_stack_.is_empty());
4704 for (int i = 0; i < object_stack_.length(); i++) {
4705 if (i > 0) PrintF("\n |\n |\n V\n\n");
4706 Object* obj = object_stack_[i];
4707 obj->Print();
4708 }
4709 PrintF("=====================================\n");
4710 }
4711 }
4712
4713 // Helper class for visiting HeapObjects recursively.
4714 class MarkRootVisitor: public ObjectVisitor {
4715 public:
4716 explicit MarkRootVisitor(HeapDebugUtils* utils) : utils_(utils) { }
4717
4718 void VisitPointers(Object** start, Object** end) {
4719 // Visit all HeapObject pointers in [start, end)
4720 for (Object** p = start; p < end; p++) {
4721 if ((*p)->IsHeapObject())
4722 utils_->MarkRootObjectRecursively(p);
4723 }
4724 }
4725
4726 HeapDebugUtils* utils_;
4727 };
4728
4729 bool search_for_any_global_;
4730 Object* search_target_;
4731 bool found_target_;
4732 List<Object*> object_stack_;
4733 Heap* heap_;
4734
4735 friend class Heap;
4736};
4737
4738#endif
Steve Blocka7e24c12009-10-30 11:49:00 +00004739
4740bool Heap::Setup(bool create_heap_objects) {
Steve Block44f0eee2011-05-26 01:26:41 +01004741#ifdef DEBUG
4742 debug_utils_ = new HeapDebugUtils(this);
4743#endif
4744
Steve Blocka7e24c12009-10-30 11:49:00 +00004745 // Initialize heap spaces and initial maps and objects. Whenever something
4746 // goes wrong, just return false. The caller should check the results and
4747 // call Heap::TearDown() to release allocated memory.
4748 //
4749 // If the heap is not yet configured (eg, through the API), configure it.
4750 // Configuration is based on the flags new-space-size (really the semispace
4751 // size) and old-space-size if set or the initial values of semispace_size_
4752 // and old_generation_size_ otherwise.
Steve Block44f0eee2011-05-26 01:26:41 +01004753 if (!configured_) {
Steve Blocka7e24c12009-10-30 11:49:00 +00004754 if (!ConfigureHeapDefault()) return false;
4755 }
4756
Steve Block44f0eee2011-05-26 01:26:41 +01004757 gc_initializer_mutex->Lock();
4758 static bool initialized_gc = false;
4759 if (!initialized_gc) {
4760 initialized_gc = true;
4761 ScavengingVisitor::Initialize();
4762 NewSpaceScavenger::Initialize();
4763 MarkCompactCollector::Initialize();
4764 }
4765 gc_initializer_mutex->Unlock();
Iain Merrick75681382010-08-19 15:07:18 +01004766
Kristian Monsen80d68ea2010-09-08 11:05:35 +01004767 MarkMapPointersAsEncoded(false);
4768
Steve Blocka7e24c12009-10-30 11:49:00 +00004769 // Setup memory allocator and reserve a chunk of memory for new
Steve Block3ce2e202009-11-05 08:53:23 +00004770 // space. The chunk is double the size of the requested reserved
4771 // new space size to ensure that we can find a pair of semispaces that
4772 // are contiguous and aligned to their size.
Steve Block44f0eee2011-05-26 01:26:41 +01004773 if (!isolate_->memory_allocator()->Setup(MaxReserved(), MaxExecutableSize()))
4774 return false;
Steve Blocka7e24c12009-10-30 11:49:00 +00004775 void* chunk =
Steve Block44f0eee2011-05-26 01:26:41 +01004776 isolate_->memory_allocator()->ReserveInitialChunk(
4777 4 * reserved_semispace_size_);
Steve Blocka7e24c12009-10-30 11:49:00 +00004778 if (chunk == NULL) return false;
4779
4780 // Align the pair of semispaces to their size, which must be a power
4781 // of 2.
Steve Blocka7e24c12009-10-30 11:49:00 +00004782 Address new_space_start =
Steve Block3ce2e202009-11-05 08:53:23 +00004783 RoundUp(reinterpret_cast<byte*>(chunk), 2 * reserved_semispace_size_);
4784 if (!new_space_.Setup(new_space_start, 2 * reserved_semispace_size_)) {
4785 return false;
4786 }
Steve Blocka7e24c12009-10-30 11:49:00 +00004787
4788 // Initialize old pointer space.
4789 old_pointer_space_ =
Steve Block44f0eee2011-05-26 01:26:41 +01004790 new OldSpace(this,
4791 max_old_generation_size_,
4792 OLD_POINTER_SPACE,
4793 NOT_EXECUTABLE);
Steve Blocka7e24c12009-10-30 11:49:00 +00004794 if (old_pointer_space_ == NULL) return false;
4795 if (!old_pointer_space_->Setup(NULL, 0)) return false;
4796
4797 // Initialize old data space.
4798 old_data_space_ =
Steve Block44f0eee2011-05-26 01:26:41 +01004799 new OldSpace(this,
4800 max_old_generation_size_,
4801 OLD_DATA_SPACE,
4802 NOT_EXECUTABLE);
Steve Blocka7e24c12009-10-30 11:49:00 +00004803 if (old_data_space_ == NULL) return false;
4804 if (!old_data_space_->Setup(NULL, 0)) return false;
4805
4806 // Initialize the code space, set its maximum capacity to the old
4807 // generation size. It needs executable memory.
4808 // On 64-bit platform(s), we put all code objects in a 2 GB range of
4809 // virtual address space, so that they can call each other with near calls.
4810 if (code_range_size_ > 0) {
Steve Block44f0eee2011-05-26 01:26:41 +01004811 if (!isolate_->code_range()->Setup(code_range_size_)) {
Steve Blocka7e24c12009-10-30 11:49:00 +00004812 return false;
4813 }
4814 }
4815
4816 code_space_ =
Steve Block44f0eee2011-05-26 01:26:41 +01004817 new OldSpace(this, max_old_generation_size_, CODE_SPACE, EXECUTABLE);
Steve Blocka7e24c12009-10-30 11:49:00 +00004818 if (code_space_ == NULL) return false;
4819 if (!code_space_->Setup(NULL, 0)) return false;
4820
4821 // Initialize map space.
Steve Block44f0eee2011-05-26 01:26:41 +01004822 map_space_ = new MapSpace(this, FLAG_use_big_map_space
Leon Clarkee46be812010-01-19 14:06:41 +00004823 ? max_old_generation_size_
Leon Clarked91b9f72010-01-27 17:25:45 +00004824 : MapSpace::kMaxMapPageIndex * Page::kPageSize,
4825 FLAG_max_map_space_pages,
Leon Clarkee46be812010-01-19 14:06:41 +00004826 MAP_SPACE);
Steve Blocka7e24c12009-10-30 11:49:00 +00004827 if (map_space_ == NULL) return false;
4828 if (!map_space_->Setup(NULL, 0)) return false;
4829
4830 // Initialize global property cell space.
Steve Block44f0eee2011-05-26 01:26:41 +01004831 cell_space_ = new CellSpace(this, max_old_generation_size_, CELL_SPACE);
Steve Blocka7e24c12009-10-30 11:49:00 +00004832 if (cell_space_ == NULL) return false;
4833 if (!cell_space_->Setup(NULL, 0)) return false;
4834
4835 // The large object code space may contain code or data. We set the memory
4836 // to be non-executable here for safety, but this means we need to enable it
4837 // explicitly when allocating large code objects.
Steve Block44f0eee2011-05-26 01:26:41 +01004838 lo_space_ = new LargeObjectSpace(this, LO_SPACE);
Steve Blocka7e24c12009-10-30 11:49:00 +00004839 if (lo_space_ == NULL) return false;
4840 if (!lo_space_->Setup()) return false;
4841
4842 if (create_heap_objects) {
4843 // Create initial maps.
4844 if (!CreateInitialMaps()) return false;
4845 if (!CreateApiObjects()) return false;
4846
4847 // Create initial objects
4848 if (!CreateInitialObjects()) return false;
Ben Murdochf87a2032010-10-22 12:50:53 +01004849
4850 global_contexts_list_ = undefined_value();
Steve Blocka7e24c12009-10-30 11:49:00 +00004851 }
4852
Steve Block44f0eee2011-05-26 01:26:41 +01004853 LOG(isolate_, IntPtrTEvent("heap-capacity", Capacity()));
4854 LOG(isolate_, IntPtrTEvent("heap-available", Available()));
Steve Blocka7e24c12009-10-30 11:49:00 +00004855
Steve Block3ce2e202009-11-05 08:53:23 +00004856#ifdef ENABLE_LOGGING_AND_PROFILING
4857 // This should be called only after initial objects have been created.
Steve Block44f0eee2011-05-26 01:26:41 +01004858 isolate_->producer_heap_profile()->Setup();
Steve Block3ce2e202009-11-05 08:53:23 +00004859#endif
4860
Steve Blocka7e24c12009-10-30 11:49:00 +00004861 return true;
4862}
4863
4864
Steve Blockd0582a62009-12-15 09:54:21 +00004865void Heap::SetStackLimits() {
Steve Block44f0eee2011-05-26 01:26:41 +01004866 ASSERT(isolate_ != NULL);
4867 ASSERT(isolate_ == isolate());
Steve Blocka7e24c12009-10-30 11:49:00 +00004868 // On 64 bit machines, pointers are generally out of range of Smis. We write
4869 // something that looks like an out of range Smi to the GC.
4870
Steve Blockd0582a62009-12-15 09:54:21 +00004871 // Set up the special root array entries containing the stack limits.
4872 // These are actually addresses, but the tag makes the GC ignore it.
Steve Blocka7e24c12009-10-30 11:49:00 +00004873 roots_[kStackLimitRootIndex] =
Steve Blockd0582a62009-12-15 09:54:21 +00004874 reinterpret_cast<Object*>(
Steve Block44f0eee2011-05-26 01:26:41 +01004875 (isolate_->stack_guard()->jslimit() & ~kSmiTagMask) | kSmiTag);
Steve Blockd0582a62009-12-15 09:54:21 +00004876 roots_[kRealStackLimitRootIndex] =
4877 reinterpret_cast<Object*>(
Steve Block44f0eee2011-05-26 01:26:41 +01004878 (isolate_->stack_guard()->real_jslimit() & ~kSmiTagMask) | kSmiTag);
Steve Blocka7e24c12009-10-30 11:49:00 +00004879}
4880
4881
4882void Heap::TearDown() {
Leon Clarkef7060e22010-06-03 12:02:55 +01004883 if (FLAG_print_cumulative_gc_stat) {
4884 PrintF("\n\n");
4885 PrintF("gc_count=%d ", gc_count_);
4886 PrintF("mark_sweep_count=%d ", ms_count_);
4887 PrintF("mark_compact_count=%d ", mc_count_);
Steve Block44f0eee2011-05-26 01:26:41 +01004888 PrintF("max_gc_pause=%d ", get_max_gc_pause());
4889 PrintF("min_in_mutator=%d ", get_min_in_mutator());
Ben Murdochf87a2032010-10-22 12:50:53 +01004890 PrintF("max_alive_after_gc=%" V8_PTR_PREFIX "d ",
Steve Block44f0eee2011-05-26 01:26:41 +01004891 get_max_alive_after_gc());
Leon Clarkef7060e22010-06-03 12:02:55 +01004892 PrintF("\n\n");
4893 }
4894
Steve Block44f0eee2011-05-26 01:26:41 +01004895 isolate_->global_handles()->TearDown();
Steve Blocka7e24c12009-10-30 11:49:00 +00004896
Steve Block44f0eee2011-05-26 01:26:41 +01004897 external_string_table_.TearDown();
Leon Clarkee46be812010-01-19 14:06:41 +00004898
Steve Blocka7e24c12009-10-30 11:49:00 +00004899 new_space_.TearDown();
4900
4901 if (old_pointer_space_ != NULL) {
4902 old_pointer_space_->TearDown();
4903 delete old_pointer_space_;
4904 old_pointer_space_ = NULL;
4905 }
4906
4907 if (old_data_space_ != NULL) {
4908 old_data_space_->TearDown();
4909 delete old_data_space_;
4910 old_data_space_ = NULL;
4911 }
4912
4913 if (code_space_ != NULL) {
4914 code_space_->TearDown();
4915 delete code_space_;
4916 code_space_ = NULL;
4917 }
4918
4919 if (map_space_ != NULL) {
4920 map_space_->TearDown();
4921 delete map_space_;
4922 map_space_ = NULL;
4923 }
4924
4925 if (cell_space_ != NULL) {
4926 cell_space_->TearDown();
4927 delete cell_space_;
4928 cell_space_ = NULL;
4929 }
4930
4931 if (lo_space_ != NULL) {
4932 lo_space_->TearDown();
4933 delete lo_space_;
4934 lo_space_ = NULL;
4935 }
4936
Steve Block44f0eee2011-05-26 01:26:41 +01004937 isolate_->memory_allocator()->TearDown();
4938
4939#ifdef DEBUG
4940 delete debug_utils_;
4941 debug_utils_ = NULL;
4942#endif
Steve Blocka7e24c12009-10-30 11:49:00 +00004943}
4944
4945
4946void Heap::Shrink() {
4947 // Try to shrink all paged spaces.
4948 PagedSpaces spaces;
Leon Clarked91b9f72010-01-27 17:25:45 +00004949 for (PagedSpace* space = spaces.next(); space != NULL; space = spaces.next())
4950 space->Shrink();
Steve Blocka7e24c12009-10-30 11:49:00 +00004951}
4952
4953
4954#ifdef ENABLE_HEAP_PROTECTION
4955
4956void Heap::Protect() {
4957 if (HasBeenSetup()) {
4958 AllSpaces spaces;
Leon Clarked91b9f72010-01-27 17:25:45 +00004959 for (Space* space = spaces.next(); space != NULL; space = spaces.next())
4960 space->Protect();
Steve Blocka7e24c12009-10-30 11:49:00 +00004961 }
4962}
4963
4964
4965void Heap::Unprotect() {
4966 if (HasBeenSetup()) {
4967 AllSpaces spaces;
Leon Clarked91b9f72010-01-27 17:25:45 +00004968 for (Space* space = spaces.next(); space != NULL; space = spaces.next())
4969 space->Unprotect();
Steve Blocka7e24c12009-10-30 11:49:00 +00004970 }
4971}
4972
4973#endif
4974
4975
Steve Block6ded16b2010-05-10 14:33:55 +01004976void Heap::AddGCPrologueCallback(GCPrologueCallback callback, GCType gc_type) {
4977 ASSERT(callback != NULL);
4978 GCPrologueCallbackPair pair(callback, gc_type);
4979 ASSERT(!gc_prologue_callbacks_.Contains(pair));
4980 return gc_prologue_callbacks_.Add(pair);
4981}
4982
4983
4984void Heap::RemoveGCPrologueCallback(GCPrologueCallback callback) {
4985 ASSERT(callback != NULL);
4986 for (int i = 0; i < gc_prologue_callbacks_.length(); ++i) {
4987 if (gc_prologue_callbacks_[i].callback == callback) {
4988 gc_prologue_callbacks_.Remove(i);
4989 return;
4990 }
4991 }
4992 UNREACHABLE();
4993}
4994
4995
4996void Heap::AddGCEpilogueCallback(GCEpilogueCallback callback, GCType gc_type) {
4997 ASSERT(callback != NULL);
4998 GCEpilogueCallbackPair pair(callback, gc_type);
4999 ASSERT(!gc_epilogue_callbacks_.Contains(pair));
5000 return gc_epilogue_callbacks_.Add(pair);
5001}
5002
5003
5004void Heap::RemoveGCEpilogueCallback(GCEpilogueCallback callback) {
5005 ASSERT(callback != NULL);
5006 for (int i = 0; i < gc_epilogue_callbacks_.length(); ++i) {
5007 if (gc_epilogue_callbacks_[i].callback == callback) {
5008 gc_epilogue_callbacks_.Remove(i);
5009 return;
5010 }
5011 }
5012 UNREACHABLE();
5013}
5014
5015
Steve Blocka7e24c12009-10-30 11:49:00 +00005016#ifdef DEBUG
5017
5018class PrintHandleVisitor: public ObjectVisitor {
5019 public:
5020 void VisitPointers(Object** start, Object** end) {
5021 for (Object** p = start; p < end; p++)
Ben Murdochf87a2032010-10-22 12:50:53 +01005022 PrintF(" handle %p to %p\n",
5023 reinterpret_cast<void*>(p),
5024 reinterpret_cast<void*>(*p));
Steve Blocka7e24c12009-10-30 11:49:00 +00005025 }
5026};
5027
5028void Heap::PrintHandles() {
5029 PrintF("Handles:\n");
5030 PrintHandleVisitor v;
Steve Block44f0eee2011-05-26 01:26:41 +01005031 isolate_->handle_scope_implementer()->Iterate(&v);
Steve Blocka7e24c12009-10-30 11:49:00 +00005032}
5033
5034#endif
5035
5036
5037Space* AllSpaces::next() {
5038 switch (counter_++) {
5039 case NEW_SPACE:
Steve Block44f0eee2011-05-26 01:26:41 +01005040 return HEAP->new_space();
Steve Blocka7e24c12009-10-30 11:49:00 +00005041 case OLD_POINTER_SPACE:
Steve Block44f0eee2011-05-26 01:26:41 +01005042 return HEAP->old_pointer_space();
Steve Blocka7e24c12009-10-30 11:49:00 +00005043 case OLD_DATA_SPACE:
Steve Block44f0eee2011-05-26 01:26:41 +01005044 return HEAP->old_data_space();
Steve Blocka7e24c12009-10-30 11:49:00 +00005045 case CODE_SPACE:
Steve Block44f0eee2011-05-26 01:26:41 +01005046 return HEAP->code_space();
Steve Blocka7e24c12009-10-30 11:49:00 +00005047 case MAP_SPACE:
Steve Block44f0eee2011-05-26 01:26:41 +01005048 return HEAP->map_space();
Steve Blocka7e24c12009-10-30 11:49:00 +00005049 case CELL_SPACE:
Steve Block44f0eee2011-05-26 01:26:41 +01005050 return HEAP->cell_space();
Steve Blocka7e24c12009-10-30 11:49:00 +00005051 case LO_SPACE:
Steve Block44f0eee2011-05-26 01:26:41 +01005052 return HEAP->lo_space();
Steve Blocka7e24c12009-10-30 11:49:00 +00005053 default:
5054 return NULL;
5055 }
5056}
5057
5058
5059PagedSpace* PagedSpaces::next() {
5060 switch (counter_++) {
5061 case OLD_POINTER_SPACE:
Steve Block44f0eee2011-05-26 01:26:41 +01005062 return HEAP->old_pointer_space();
Steve Blocka7e24c12009-10-30 11:49:00 +00005063 case OLD_DATA_SPACE:
Steve Block44f0eee2011-05-26 01:26:41 +01005064 return HEAP->old_data_space();
Steve Blocka7e24c12009-10-30 11:49:00 +00005065 case CODE_SPACE:
Steve Block44f0eee2011-05-26 01:26:41 +01005066 return HEAP->code_space();
Steve Blocka7e24c12009-10-30 11:49:00 +00005067 case MAP_SPACE:
Steve Block44f0eee2011-05-26 01:26:41 +01005068 return HEAP->map_space();
Steve Blocka7e24c12009-10-30 11:49:00 +00005069 case CELL_SPACE:
Steve Block44f0eee2011-05-26 01:26:41 +01005070 return HEAP->cell_space();
Steve Blocka7e24c12009-10-30 11:49:00 +00005071 default:
5072 return NULL;
5073 }
5074}
5075
5076
5077
5078OldSpace* OldSpaces::next() {
5079 switch (counter_++) {
5080 case OLD_POINTER_SPACE:
Steve Block44f0eee2011-05-26 01:26:41 +01005081 return HEAP->old_pointer_space();
Steve Blocka7e24c12009-10-30 11:49:00 +00005082 case OLD_DATA_SPACE:
Steve Block44f0eee2011-05-26 01:26:41 +01005083 return HEAP->old_data_space();
Steve Blocka7e24c12009-10-30 11:49:00 +00005084 case CODE_SPACE:
Steve Block44f0eee2011-05-26 01:26:41 +01005085 return HEAP->code_space();
Steve Blocka7e24c12009-10-30 11:49:00 +00005086 default:
5087 return NULL;
5088 }
5089}
5090
5091
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -08005092SpaceIterator::SpaceIterator()
5093 : current_space_(FIRST_SPACE),
5094 iterator_(NULL),
5095 size_func_(NULL) {
5096}
5097
5098
5099SpaceIterator::SpaceIterator(HeapObjectCallback size_func)
5100 : current_space_(FIRST_SPACE),
5101 iterator_(NULL),
5102 size_func_(size_func) {
Steve Blocka7e24c12009-10-30 11:49:00 +00005103}
5104
5105
5106SpaceIterator::~SpaceIterator() {
5107 // Delete active iterator if any.
5108 delete iterator_;
5109}
5110
5111
5112bool SpaceIterator::has_next() {
5113 // Iterate until no more spaces.
5114 return current_space_ != LAST_SPACE;
5115}
5116
5117
5118ObjectIterator* SpaceIterator::next() {
5119 if (iterator_ != NULL) {
5120 delete iterator_;
5121 iterator_ = NULL;
5122 // Move to the next space
5123 current_space_++;
5124 if (current_space_ > LAST_SPACE) {
5125 return NULL;
5126 }
5127 }
5128
5129 // Return iterator for the new current space.
5130 return CreateIterator();
5131}
5132
5133
5134// Create an iterator for the space to iterate.
5135ObjectIterator* SpaceIterator::CreateIterator() {
5136 ASSERT(iterator_ == NULL);
5137
5138 switch (current_space_) {
5139 case NEW_SPACE:
Steve Block44f0eee2011-05-26 01:26:41 +01005140 iterator_ = new SemiSpaceIterator(HEAP->new_space(), size_func_);
Steve Blocka7e24c12009-10-30 11:49:00 +00005141 break;
5142 case OLD_POINTER_SPACE:
Steve Block44f0eee2011-05-26 01:26:41 +01005143 iterator_ = new HeapObjectIterator(HEAP->old_pointer_space(), size_func_);
Steve Blocka7e24c12009-10-30 11:49:00 +00005144 break;
5145 case OLD_DATA_SPACE:
Steve Block44f0eee2011-05-26 01:26:41 +01005146 iterator_ = new HeapObjectIterator(HEAP->old_data_space(), size_func_);
Steve Blocka7e24c12009-10-30 11:49:00 +00005147 break;
5148 case CODE_SPACE:
Steve Block44f0eee2011-05-26 01:26:41 +01005149 iterator_ = new HeapObjectIterator(HEAP->code_space(), size_func_);
Steve Blocka7e24c12009-10-30 11:49:00 +00005150 break;
5151 case MAP_SPACE:
Steve Block44f0eee2011-05-26 01:26:41 +01005152 iterator_ = new HeapObjectIterator(HEAP->map_space(), size_func_);
Steve Blocka7e24c12009-10-30 11:49:00 +00005153 break;
5154 case CELL_SPACE:
Steve Block44f0eee2011-05-26 01:26:41 +01005155 iterator_ = new HeapObjectIterator(HEAP->cell_space(), size_func_);
Steve Blocka7e24c12009-10-30 11:49:00 +00005156 break;
5157 case LO_SPACE:
Steve Block44f0eee2011-05-26 01:26:41 +01005158 iterator_ = new LargeObjectIterator(HEAP->lo_space(), size_func_);
Steve Blocka7e24c12009-10-30 11:49:00 +00005159 break;
5160 }
5161
5162 // Return the newly allocated iterator;
5163 ASSERT(iterator_ != NULL);
5164 return iterator_;
5165}
5166
5167
Ben Murdochb0fe1622011-05-05 13:52:32 +01005168class HeapObjectsFilter {
5169 public:
5170 virtual ~HeapObjectsFilter() {}
5171 virtual bool SkipObject(HeapObject* object) = 0;
5172};
5173
5174
5175class FreeListNodesFilter : public HeapObjectsFilter {
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -08005176 public:
5177 FreeListNodesFilter() {
5178 MarkFreeListNodes();
5179 }
5180
Ben Murdochb0fe1622011-05-05 13:52:32 +01005181 bool SkipObject(HeapObject* object) {
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -08005182 if (object->IsMarked()) {
5183 object->ClearMark();
5184 return true;
5185 } else {
5186 return false;
5187 }
5188 }
5189
5190 private:
5191 void MarkFreeListNodes() {
Steve Block44f0eee2011-05-26 01:26:41 +01005192 Heap* heap = HEAP;
5193 heap->old_pointer_space()->MarkFreeListNodes();
5194 heap->old_data_space()->MarkFreeListNodes();
5195 MarkCodeSpaceFreeListNodes(heap);
5196 heap->map_space()->MarkFreeListNodes();
5197 heap->cell_space()->MarkFreeListNodes();
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -08005198 }
5199
Steve Block44f0eee2011-05-26 01:26:41 +01005200 void MarkCodeSpaceFreeListNodes(Heap* heap) {
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -08005201 // For code space, using FreeListNode::IsFreeListNode is OK.
Steve Block44f0eee2011-05-26 01:26:41 +01005202 HeapObjectIterator iter(heap->code_space());
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -08005203 for (HeapObject* obj = iter.next_object();
5204 obj != NULL;
5205 obj = iter.next_object()) {
5206 if (FreeListNode::IsFreeListNode(obj)) obj->SetMark();
5207 }
5208 }
5209
5210 AssertNoAllocation no_alloc;
5211};
5212
5213
Ben Murdochb0fe1622011-05-05 13:52:32 +01005214class UnreachableObjectsFilter : public HeapObjectsFilter {
5215 public:
5216 UnreachableObjectsFilter() {
5217 MarkUnreachableObjects();
5218 }
5219
5220 bool SkipObject(HeapObject* object) {
5221 if (object->IsMarked()) {
5222 object->ClearMark();
5223 return true;
5224 } else {
5225 return false;
5226 }
5227 }
5228
5229 private:
5230 class UnmarkingVisitor : public ObjectVisitor {
5231 public:
5232 UnmarkingVisitor() : list_(10) {}
5233
5234 void VisitPointers(Object** start, Object** end) {
5235 for (Object** p = start; p < end; p++) {
5236 if (!(*p)->IsHeapObject()) continue;
5237 HeapObject* obj = HeapObject::cast(*p);
5238 if (obj->IsMarked()) {
5239 obj->ClearMark();
5240 list_.Add(obj);
5241 }
5242 }
5243 }
5244
5245 bool can_process() { return !list_.is_empty(); }
5246
5247 void ProcessNext() {
5248 HeapObject* obj = list_.RemoveLast();
5249 obj->Iterate(this);
5250 }
5251
5252 private:
5253 List<HeapObject*> list_;
5254 };
5255
5256 void MarkUnreachableObjects() {
5257 HeapIterator iterator;
5258 for (HeapObject* obj = iterator.next();
5259 obj != NULL;
5260 obj = iterator.next()) {
5261 obj->SetMark();
5262 }
5263 UnmarkingVisitor visitor;
Steve Block44f0eee2011-05-26 01:26:41 +01005264 HEAP->IterateRoots(&visitor, VISIT_ALL);
Ben Murdochb0fe1622011-05-05 13:52:32 +01005265 while (visitor.can_process())
5266 visitor.ProcessNext();
5267 }
5268
5269 AssertNoAllocation no_alloc;
5270};
5271
5272
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -08005273HeapIterator::HeapIterator()
5274 : filtering_(HeapIterator::kNoFiltering),
5275 filter_(NULL) {
5276 Init();
5277}
5278
5279
Ben Murdochb0fe1622011-05-05 13:52:32 +01005280HeapIterator::HeapIterator(HeapIterator::HeapObjectsFiltering filtering)
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -08005281 : filtering_(filtering),
5282 filter_(NULL) {
Steve Blocka7e24c12009-10-30 11:49:00 +00005283 Init();
5284}
5285
5286
5287HeapIterator::~HeapIterator() {
5288 Shutdown();
5289}
5290
5291
5292void HeapIterator::Init() {
5293 // Start the iteration.
Ben Murdochb0fe1622011-05-05 13:52:32 +01005294 space_iterator_ = filtering_ == kNoFiltering ? new SpaceIterator :
5295 new SpaceIterator(MarkCompactCollector::SizeOfMarkedObject);
5296 switch (filtering_) {
5297 case kFilterFreeListNodes:
5298 filter_ = new FreeListNodesFilter;
5299 break;
5300 case kFilterUnreachable:
5301 filter_ = new UnreachableObjectsFilter;
5302 break;
5303 default:
5304 break;
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -08005305 }
Steve Blocka7e24c12009-10-30 11:49:00 +00005306 object_iterator_ = space_iterator_->next();
5307}
5308
5309
5310void HeapIterator::Shutdown() {
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -08005311#ifdef DEBUG
Ben Murdochb0fe1622011-05-05 13:52:32 +01005312 // Assert that in filtering mode we have iterated through all
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -08005313 // objects. Otherwise, heap will be left in an inconsistent state.
Ben Murdochb0fe1622011-05-05 13:52:32 +01005314 if (filtering_ != kNoFiltering) {
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -08005315 ASSERT(object_iterator_ == NULL);
5316 }
5317#endif
Steve Blocka7e24c12009-10-30 11:49:00 +00005318 // Make sure the last iterator is deallocated.
5319 delete space_iterator_;
5320 space_iterator_ = NULL;
5321 object_iterator_ = NULL;
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -08005322 delete filter_;
5323 filter_ = NULL;
Steve Blocka7e24c12009-10-30 11:49:00 +00005324}
5325
5326
Leon Clarked91b9f72010-01-27 17:25:45 +00005327HeapObject* HeapIterator::next() {
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -08005328 if (filter_ == NULL) return NextObject();
5329
5330 HeapObject* obj = NextObject();
Ben Murdochb0fe1622011-05-05 13:52:32 +01005331 while (obj != NULL && filter_->SkipObject(obj)) obj = NextObject();
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -08005332 return obj;
5333}
5334
5335
5336HeapObject* HeapIterator::NextObject() {
Steve Blocka7e24c12009-10-30 11:49:00 +00005337 // No iterator means we are done.
Leon Clarked91b9f72010-01-27 17:25:45 +00005338 if (object_iterator_ == NULL) return NULL;
Steve Blocka7e24c12009-10-30 11:49:00 +00005339
Leon Clarked91b9f72010-01-27 17:25:45 +00005340 if (HeapObject* obj = object_iterator_->next_object()) {
Steve Blocka7e24c12009-10-30 11:49:00 +00005341 // If the current iterator has more objects we are fine.
Leon Clarked91b9f72010-01-27 17:25:45 +00005342 return obj;
Steve Blocka7e24c12009-10-30 11:49:00 +00005343 } else {
5344 // Go though the spaces looking for one that has objects.
5345 while (space_iterator_->has_next()) {
5346 object_iterator_ = space_iterator_->next();
Leon Clarked91b9f72010-01-27 17:25:45 +00005347 if (HeapObject* obj = object_iterator_->next_object()) {
5348 return obj;
Steve Blocka7e24c12009-10-30 11:49:00 +00005349 }
5350 }
5351 }
5352 // Done with the last space.
5353 object_iterator_ = NULL;
Leon Clarked91b9f72010-01-27 17:25:45 +00005354 return NULL;
Steve Blocka7e24c12009-10-30 11:49:00 +00005355}
5356
5357
5358void HeapIterator::reset() {
5359 // Restart the iterator.
5360 Shutdown();
5361 Init();
5362}
5363
5364
Ben Murdoche0cee9b2011-05-25 10:26:03 +01005365#if defined(DEBUG) || defined(LIVE_OBJECT_LIST)
Steve Blocka7e24c12009-10-30 11:49:00 +00005366
Ben Murdoche0cee9b2011-05-25 10:26:03 +01005367Object* const PathTracer::kAnyGlobalObject = reinterpret_cast<Object*>(NULL);
Steve Blocka7e24c12009-10-30 11:49:00 +00005368
Ben Murdoche0cee9b2011-05-25 10:26:03 +01005369class PathTracer::MarkVisitor: public ObjectVisitor {
Steve Blocka7e24c12009-10-30 11:49:00 +00005370 public:
Ben Murdoche0cee9b2011-05-25 10:26:03 +01005371 explicit MarkVisitor(PathTracer* tracer) : tracer_(tracer) {}
Steve Blocka7e24c12009-10-30 11:49:00 +00005372 void VisitPointers(Object** start, Object** end) {
Ben Murdoche0cee9b2011-05-25 10:26:03 +01005373 // Scan all HeapObject pointers in [start, end)
5374 for (Object** p = start; !tracer_->found() && (p < end); p++) {
Steve Blocka7e24c12009-10-30 11:49:00 +00005375 if ((*p)->IsHeapObject())
Ben Murdoche0cee9b2011-05-25 10:26:03 +01005376 tracer_->MarkRecursively(p, this);
Steve Blocka7e24c12009-10-30 11:49:00 +00005377 }
5378 }
Ben Murdoche0cee9b2011-05-25 10:26:03 +01005379
5380 private:
5381 PathTracer* tracer_;
Steve Blocka7e24c12009-10-30 11:49:00 +00005382};
5383
Steve Blocka7e24c12009-10-30 11:49:00 +00005384
Ben Murdoche0cee9b2011-05-25 10:26:03 +01005385class PathTracer::UnmarkVisitor: public ObjectVisitor {
5386 public:
5387 explicit UnmarkVisitor(PathTracer* tracer) : tracer_(tracer) {}
5388 void VisitPointers(Object** start, Object** end) {
5389 // Scan all HeapObject pointers in [start, end)
5390 for (Object** p = start; p < end; p++) {
5391 if ((*p)->IsHeapObject())
5392 tracer_->UnmarkRecursively(p, this);
5393 }
5394 }
5395
5396 private:
5397 PathTracer* tracer_;
5398};
5399
5400
5401void PathTracer::VisitPointers(Object** start, Object** end) {
5402 bool done = ((what_to_find_ == FIND_FIRST) && found_target_);
5403 // Visit all HeapObject pointers in [start, end)
5404 for (Object** p = start; !done && (p < end); p++) {
5405 if ((*p)->IsHeapObject()) {
5406 TracePathFrom(p);
5407 done = ((what_to_find_ == FIND_FIRST) && found_target_);
5408 }
5409 }
5410}
5411
5412
5413void PathTracer::Reset() {
5414 found_target_ = false;
5415 object_stack_.Clear();
5416}
5417
5418
5419void PathTracer::TracePathFrom(Object** root) {
5420 ASSERT((search_target_ == kAnyGlobalObject) ||
5421 search_target_->IsHeapObject());
5422 found_target_in_trace_ = false;
5423 object_stack_.Clear();
5424
5425 MarkVisitor mark_visitor(this);
5426 MarkRecursively(root, &mark_visitor);
5427
5428 UnmarkVisitor unmark_visitor(this);
5429 UnmarkRecursively(root, &unmark_visitor);
5430
5431 ProcessResults();
5432}
5433
5434
5435void PathTracer::MarkRecursively(Object** p, MarkVisitor* mark_visitor) {
Steve Blocka7e24c12009-10-30 11:49:00 +00005436 if (!(*p)->IsHeapObject()) return;
5437
5438 HeapObject* obj = HeapObject::cast(*p);
5439
5440 Object* map = obj->map();
5441
5442 if (!map->IsHeapObject()) return; // visited before
5443
Ben Murdoche0cee9b2011-05-25 10:26:03 +01005444 if (found_target_in_trace_) return; // stop if target found
5445 object_stack_.Add(obj);
5446 if (((search_target_ == kAnyGlobalObject) && obj->IsJSGlobalObject()) ||
5447 (obj == search_target_)) {
5448 found_target_in_trace_ = true;
5449 found_target_ = true;
Steve Blocka7e24c12009-10-30 11:49:00 +00005450 return;
5451 }
5452
Ben Murdoche0cee9b2011-05-25 10:26:03 +01005453 bool is_global_context = obj->IsGlobalContext();
5454
Steve Blocka7e24c12009-10-30 11:49:00 +00005455 // not visited yet
5456 Map* map_p = reinterpret_cast<Map*>(HeapObject::cast(map));
5457
5458 Address map_addr = map_p->address();
5459
5460 obj->set_map(reinterpret_cast<Map*>(map_addr + kMarkTag));
5461
Ben Murdoche0cee9b2011-05-25 10:26:03 +01005462 // Scan the object body.
5463 if (is_global_context && (visit_mode_ == VISIT_ONLY_STRONG)) {
5464 // This is specialized to scan Context's properly.
5465 Object** start = reinterpret_cast<Object**>(obj->address() +
5466 Context::kHeaderSize);
5467 Object** end = reinterpret_cast<Object**>(obj->address() +
5468 Context::kHeaderSize + Context::FIRST_WEAK_SLOT * kPointerSize);
5469 mark_visitor->VisitPointers(start, end);
5470 } else {
5471 obj->IterateBody(map_p->instance_type(),
5472 obj->SizeFromMap(map_p),
5473 mark_visitor);
5474 }
Steve Blocka7e24c12009-10-30 11:49:00 +00005475
Ben Murdoche0cee9b2011-05-25 10:26:03 +01005476 // Scan the map after the body because the body is a lot more interesting
5477 // when doing leak detection.
5478 MarkRecursively(&map, mark_visitor);
Steve Blocka7e24c12009-10-30 11:49:00 +00005479
Ben Murdoche0cee9b2011-05-25 10:26:03 +01005480 if (!found_target_in_trace_) // don't pop if found the target
5481 object_stack_.RemoveLast();
Steve Blocka7e24c12009-10-30 11:49:00 +00005482}
5483
5484
Ben Murdoche0cee9b2011-05-25 10:26:03 +01005485void PathTracer::UnmarkRecursively(Object** p, UnmarkVisitor* unmark_visitor) {
Steve Blocka7e24c12009-10-30 11:49:00 +00005486 if (!(*p)->IsHeapObject()) return;
5487
5488 HeapObject* obj = HeapObject::cast(*p);
5489
5490 Object* map = obj->map();
5491
5492 if (map->IsHeapObject()) return; // unmarked already
5493
5494 Address map_addr = reinterpret_cast<Address>(map);
5495
5496 map_addr -= kMarkTag;
5497
5498 ASSERT_TAG_ALIGNED(map_addr);
5499
5500 HeapObject* map_p = HeapObject::FromAddress(map_addr);
5501
5502 obj->set_map(reinterpret_cast<Map*>(map_p));
5503
Ben Murdoche0cee9b2011-05-25 10:26:03 +01005504 UnmarkRecursively(reinterpret_cast<Object**>(&map_p), unmark_visitor);
Steve Blocka7e24c12009-10-30 11:49:00 +00005505
5506 obj->IterateBody(Map::cast(map_p)->instance_type(),
5507 obj->SizeFromMap(Map::cast(map_p)),
Ben Murdoche0cee9b2011-05-25 10:26:03 +01005508 unmark_visitor);
Steve Blocka7e24c12009-10-30 11:49:00 +00005509}
5510
5511
Ben Murdoche0cee9b2011-05-25 10:26:03 +01005512void PathTracer::ProcessResults() {
5513 if (found_target_) {
Steve Blocka7e24c12009-10-30 11:49:00 +00005514 PrintF("=====================================\n");
5515 PrintF("==== Path to object ====\n");
5516 PrintF("=====================================\n\n");
5517
Ben Murdoche0cee9b2011-05-25 10:26:03 +01005518 ASSERT(!object_stack_.is_empty());
5519 for (int i = 0; i < object_stack_.length(); i++) {
Steve Blocka7e24c12009-10-30 11:49:00 +00005520 if (i > 0) PrintF("\n |\n |\n V\n\n");
Ben Murdoche0cee9b2011-05-25 10:26:03 +01005521 Object* obj = object_stack_[i];
5522#ifdef OBJECT_PRINT
Steve Blocka7e24c12009-10-30 11:49:00 +00005523 obj->Print();
Ben Murdoche0cee9b2011-05-25 10:26:03 +01005524#else
5525 obj->ShortPrint();
5526#endif
Steve Blocka7e24c12009-10-30 11:49:00 +00005527 }
5528 PrintF("=====================================\n");
5529 }
5530}
Ben Murdoche0cee9b2011-05-25 10:26:03 +01005531#endif // DEBUG || LIVE_OBJECT_LIST
Steve Blocka7e24c12009-10-30 11:49:00 +00005532
5533
Ben Murdoche0cee9b2011-05-25 10:26:03 +01005534#ifdef DEBUG
Steve Blocka7e24c12009-10-30 11:49:00 +00005535// Triggers a depth-first traversal of reachable objects from roots
5536// and finds a path to a specific heap object and prints it.
Leon Clarkee46be812010-01-19 14:06:41 +00005537void Heap::TracePathToObject(Object* target) {
Ben Murdoche0cee9b2011-05-25 10:26:03 +01005538 PathTracer tracer(target, PathTracer::FIND_ALL, VISIT_ALL);
5539 IterateRoots(&tracer, VISIT_ONLY_STRONG);
Steve Blocka7e24c12009-10-30 11:49:00 +00005540}
5541
5542
5543// Triggers a depth-first traversal of reachable objects from roots
5544// and finds a path to any global object and prints it. Useful for
5545// determining the source for leaks of global objects.
5546void Heap::TracePathToGlobal() {
Ben Murdoche0cee9b2011-05-25 10:26:03 +01005547 PathTracer tracer(PathTracer::kAnyGlobalObject,
5548 PathTracer::FIND_ALL,
5549 VISIT_ALL);
5550 IterateRoots(&tracer, VISIT_ONLY_STRONG);
Steve Blocka7e24c12009-10-30 11:49:00 +00005551}
5552#endif
5553
5554
Ben Murdochf87a2032010-10-22 12:50:53 +01005555static intptr_t CountTotalHolesSize() {
5556 intptr_t holes_size = 0;
Leon Clarkef7060e22010-06-03 12:02:55 +01005557 OldSpaces spaces;
5558 for (OldSpace* space = spaces.next();
5559 space != NULL;
5560 space = spaces.next()) {
5561 holes_size += space->Waste() + space->AvailableFree();
5562 }
5563 return holes_size;
5564}
5565
5566
Steve Block44f0eee2011-05-26 01:26:41 +01005567GCTracer::GCTracer(Heap* heap)
Steve Blocka7e24c12009-10-30 11:49:00 +00005568 : start_time_(0.0),
Leon Clarkef7060e22010-06-03 12:02:55 +01005569 start_size_(0),
Steve Blocka7e24c12009-10-30 11:49:00 +00005570 gc_count_(0),
5571 full_gc_count_(0),
5572 is_compacting_(false),
Leon Clarkef7060e22010-06-03 12:02:55 +01005573 marked_count_(0),
5574 allocated_since_last_gc_(0),
5575 spent_in_mutator_(0),
Steve Block44f0eee2011-05-26 01:26:41 +01005576 promoted_objects_size_(0),
5577 heap_(heap) {
Steve Blocka7e24c12009-10-30 11:49:00 +00005578 // These two fields reflect the state of the previous full collection.
5579 // Set them before they are changed by the collector.
Steve Block44f0eee2011-05-26 01:26:41 +01005580 previous_has_compacted_ = heap_->mark_compact_collector_.HasCompacted();
5581 previous_marked_count_ =
5582 heap_->mark_compact_collector_.previous_marked_count();
Leon Clarkef7060e22010-06-03 12:02:55 +01005583 if (!FLAG_trace_gc && !FLAG_print_cumulative_gc_stat) return;
Steve Blocka7e24c12009-10-30 11:49:00 +00005584 start_time_ = OS::TimeCurrentMillis();
Steve Block44f0eee2011-05-26 01:26:41 +01005585 start_size_ = heap_->SizeOfObjects();
Leon Clarkef7060e22010-06-03 12:02:55 +01005586
5587 for (int i = 0; i < Scope::kNumberOfScopes; i++) {
5588 scopes_[i] = 0;
5589 }
5590
5591 in_free_list_or_wasted_before_gc_ = CountTotalHolesSize();
5592
Steve Block44f0eee2011-05-26 01:26:41 +01005593 allocated_since_last_gc_ =
5594 heap_->SizeOfObjects() - heap_->alive_after_last_gc_;
Leon Clarkef7060e22010-06-03 12:02:55 +01005595
Steve Block44f0eee2011-05-26 01:26:41 +01005596 if (heap_->last_gc_end_timestamp_ > 0) {
5597 spent_in_mutator_ = Max(start_time_ - heap_->last_gc_end_timestamp_, 0.0);
Leon Clarkef7060e22010-06-03 12:02:55 +01005598 }
Steve Blocka7e24c12009-10-30 11:49:00 +00005599}
5600
5601
5602GCTracer::~GCTracer() {
Steve Blocka7e24c12009-10-30 11:49:00 +00005603 // Printf ONE line iff flag is set.
Leon Clarkef7060e22010-06-03 12:02:55 +01005604 if (!FLAG_trace_gc && !FLAG_print_cumulative_gc_stat) return;
5605
Steve Block44f0eee2011-05-26 01:26:41 +01005606 bool first_gc = (heap_->last_gc_end_timestamp_ == 0);
Leon Clarkef7060e22010-06-03 12:02:55 +01005607
Steve Block44f0eee2011-05-26 01:26:41 +01005608 heap_->alive_after_last_gc_ = heap_->SizeOfObjects();
5609 heap_->last_gc_end_timestamp_ = OS::TimeCurrentMillis();
Leon Clarkef7060e22010-06-03 12:02:55 +01005610
Steve Block44f0eee2011-05-26 01:26:41 +01005611 int time = static_cast<int>(heap_->last_gc_end_timestamp_ - start_time_);
Leon Clarkef7060e22010-06-03 12:02:55 +01005612
5613 // Update cumulative GC statistics if required.
5614 if (FLAG_print_cumulative_gc_stat) {
Steve Block44f0eee2011-05-26 01:26:41 +01005615 heap_->max_gc_pause_ = Max(heap_->max_gc_pause_, time);
5616 heap_->max_alive_after_gc_ = Max(heap_->max_alive_after_gc_,
5617 heap_->alive_after_last_gc_);
Leon Clarkef7060e22010-06-03 12:02:55 +01005618 if (!first_gc) {
Steve Block44f0eee2011-05-26 01:26:41 +01005619 heap_->min_in_mutator_ = Min(heap_->min_in_mutator_,
5620 static_cast<int>(spent_in_mutator_));
Leon Clarkef7060e22010-06-03 12:02:55 +01005621 }
5622 }
5623
5624 if (!FLAG_trace_gc_nvp) {
5625 int external_time = static_cast<int>(scopes_[Scope::EXTERNAL]);
5626
5627 PrintF("%s %.1f -> %.1f MB, ",
5628 CollectorString(),
5629 static_cast<double>(start_size_) / MB,
5630 SizeOfHeapObjects());
5631
5632 if (external_time > 0) PrintF("%d / ", external_time);
5633 PrintF("%d ms.\n", time);
5634 } else {
5635 PrintF("pause=%d ", time);
5636 PrintF("mutator=%d ",
5637 static_cast<int>(spent_in_mutator_));
5638
5639 PrintF("gc=");
5640 switch (collector_) {
5641 case SCAVENGER:
5642 PrintF("s");
5643 break;
5644 case MARK_COMPACTOR:
Steve Block44f0eee2011-05-26 01:26:41 +01005645 PrintF("%s",
5646 heap_->mark_compact_collector_.HasCompacted() ? "mc" : "ms");
Leon Clarkef7060e22010-06-03 12:02:55 +01005647 break;
5648 default:
5649 UNREACHABLE();
5650 }
5651 PrintF(" ");
5652
5653 PrintF("external=%d ", static_cast<int>(scopes_[Scope::EXTERNAL]));
5654 PrintF("mark=%d ", static_cast<int>(scopes_[Scope::MC_MARK]));
5655 PrintF("sweep=%d ", static_cast<int>(scopes_[Scope::MC_SWEEP]));
Iain Merrick75681382010-08-19 15:07:18 +01005656 PrintF("sweepns=%d ", static_cast<int>(scopes_[Scope::MC_SWEEP_NEWSPACE]));
Leon Clarkef7060e22010-06-03 12:02:55 +01005657 PrintF("compact=%d ", static_cast<int>(scopes_[Scope::MC_COMPACT]));
5658
Ben Murdochf87a2032010-10-22 12:50:53 +01005659 PrintF("total_size_before=%" V8_PTR_PREFIX "d ", start_size_);
Steve Block44f0eee2011-05-26 01:26:41 +01005660 PrintF("total_size_after=%" V8_PTR_PREFIX "d ", heap_->SizeOfObjects());
Ben Murdochf87a2032010-10-22 12:50:53 +01005661 PrintF("holes_size_before=%" V8_PTR_PREFIX "d ",
5662 in_free_list_or_wasted_before_gc_);
5663 PrintF("holes_size_after=%" V8_PTR_PREFIX "d ", CountTotalHolesSize());
Leon Clarkef7060e22010-06-03 12:02:55 +01005664
Ben Murdochf87a2032010-10-22 12:50:53 +01005665 PrintF("allocated=%" V8_PTR_PREFIX "d ", allocated_since_last_gc_);
5666 PrintF("promoted=%" V8_PTR_PREFIX "d ", promoted_objects_size_);
Leon Clarkef7060e22010-06-03 12:02:55 +01005667
5668 PrintF("\n");
5669 }
Steve Blocka7e24c12009-10-30 11:49:00 +00005670
5671#if defined(ENABLE_LOGGING_AND_PROFILING)
Steve Block44f0eee2011-05-26 01:26:41 +01005672 heap_->PrintShortHeapStatistics();
Steve Blocka7e24c12009-10-30 11:49:00 +00005673#endif
5674}
5675
5676
5677const char* GCTracer::CollectorString() {
5678 switch (collector_) {
5679 case SCAVENGER:
5680 return "Scavenge";
5681 case MARK_COMPACTOR:
Steve Block44f0eee2011-05-26 01:26:41 +01005682 return heap_->mark_compact_collector_.HasCompacted() ? "Mark-compact"
5683 : "Mark-sweep";
Steve Blocka7e24c12009-10-30 11:49:00 +00005684 }
5685 return "Unknown GC";
5686}
5687
5688
5689int KeyedLookupCache::Hash(Map* map, String* name) {
5690 // Uses only lower 32 bits if pointers are larger.
5691 uintptr_t addr_hash =
Leon Clarkee46be812010-01-19 14:06:41 +00005692 static_cast<uint32_t>(reinterpret_cast<uintptr_t>(map)) >> kMapHashShift;
Andrei Popescu402d9372010-02-26 13:31:12 +00005693 return static_cast<uint32_t>((addr_hash ^ name->Hash()) & kCapacityMask);
Steve Blocka7e24c12009-10-30 11:49:00 +00005694}
5695
5696
5697int KeyedLookupCache::Lookup(Map* map, String* name) {
5698 int index = Hash(map, name);
5699 Key& key = keys_[index];
5700 if ((key.map == map) && key.name->Equals(name)) {
5701 return field_offsets_[index];
5702 }
Steve Block44f0eee2011-05-26 01:26:41 +01005703 return kNotFound;
Steve Blocka7e24c12009-10-30 11:49:00 +00005704}
5705
5706
5707void KeyedLookupCache::Update(Map* map, String* name, int field_offset) {
5708 String* symbol;
Steve Block44f0eee2011-05-26 01:26:41 +01005709 if (HEAP->LookupSymbolIfExists(name, &symbol)) {
Steve Blocka7e24c12009-10-30 11:49:00 +00005710 int index = Hash(map, symbol);
5711 Key& key = keys_[index];
5712 key.map = map;
5713 key.name = symbol;
5714 field_offsets_[index] = field_offset;
5715 }
5716}
5717
5718
5719void KeyedLookupCache::Clear() {
5720 for (int index = 0; index < kLength; index++) keys_[index].map = NULL;
5721}
5722
5723
Steve Blocka7e24c12009-10-30 11:49:00 +00005724void DescriptorLookupCache::Clear() {
5725 for (int index = 0; index < kLength; index++) keys_[index].array = NULL;
5726}
5727
5728
Steve Blocka7e24c12009-10-30 11:49:00 +00005729#ifdef DEBUG
Ben Murdochf87a2032010-10-22 12:50:53 +01005730void Heap::GarbageCollectionGreedyCheck() {
Steve Blocka7e24c12009-10-30 11:49:00 +00005731 ASSERT(FLAG_gc_greedy);
Steve Block44f0eee2011-05-26 01:26:41 +01005732 if (isolate_->bootstrapper()->IsActive()) return;
Ben Murdochf87a2032010-10-22 12:50:53 +01005733 if (disallow_allocation_failure()) return;
5734 CollectGarbage(NEW_SPACE);
Steve Blocka7e24c12009-10-30 11:49:00 +00005735}
5736#endif
5737
5738
Steve Block44f0eee2011-05-26 01:26:41 +01005739TranscendentalCache::SubCache::SubCache(Type t)
5740 : type_(t),
5741 isolate_(Isolate::Current()) {
Steve Blocka7e24c12009-10-30 11:49:00 +00005742 uint32_t in0 = 0xffffffffu; // Bit-pattern for a NaN that isn't
5743 uint32_t in1 = 0xffffffffu; // generated by the FPU.
5744 for (int i = 0; i < kCacheSize; i++) {
5745 elements_[i].in[0] = in0;
5746 elements_[i].in[1] = in1;
5747 elements_[i].output = NULL;
5748 }
5749}
5750
5751
Steve Blocka7e24c12009-10-30 11:49:00 +00005752void TranscendentalCache::Clear() {
5753 for (int i = 0; i < kNumberOfCaches; i++) {
5754 if (caches_[i] != NULL) {
5755 delete caches_[i];
5756 caches_[i] = NULL;
5757 }
5758 }
5759}
5760
5761
Leon Clarkee46be812010-01-19 14:06:41 +00005762void ExternalStringTable::CleanUp() {
5763 int last = 0;
5764 for (int i = 0; i < new_space_strings_.length(); ++i) {
Steve Block44f0eee2011-05-26 01:26:41 +01005765 if (new_space_strings_[i] == heap_->raw_unchecked_null_value()) continue;
5766 if (heap_->InNewSpace(new_space_strings_[i])) {
Leon Clarkee46be812010-01-19 14:06:41 +00005767 new_space_strings_[last++] = new_space_strings_[i];
5768 } else {
5769 old_space_strings_.Add(new_space_strings_[i]);
5770 }
5771 }
5772 new_space_strings_.Rewind(last);
5773 last = 0;
5774 for (int i = 0; i < old_space_strings_.length(); ++i) {
Steve Block44f0eee2011-05-26 01:26:41 +01005775 if (old_space_strings_[i] == heap_->raw_unchecked_null_value()) continue;
5776 ASSERT(!heap_->InNewSpace(old_space_strings_[i]));
Leon Clarkee46be812010-01-19 14:06:41 +00005777 old_space_strings_[last++] = old_space_strings_[i];
5778 }
5779 old_space_strings_.Rewind(last);
5780 Verify();
5781}
5782
5783
5784void ExternalStringTable::TearDown() {
5785 new_space_strings_.Free();
5786 old_space_strings_.Free();
5787}
5788
5789
Steve Blocka7e24c12009-10-30 11:49:00 +00005790} } // namespace v8::internal