blob: 9a3cfe4100caf35c9b5db84968cf307e69adcf31 [file] [log] [blame]
Ben Murdoch8b112d22011-06-08 16:22:53 +01001// Copyright 2011 the V8 project authors. All rights reserved.
Steve Blocka7e24c12009-10-30 11:49:00 +00002// Redistribution and use in source and binary forms, with or without
3// modification, are permitted provided that the following conditions are
4// met:
5//
6// * Redistributions of source code must retain the above copyright
7// notice, this list of conditions and the following disclaimer.
8// * Redistributions in binary form must reproduce the above
9// copyright notice, this list of conditions and the following
10// disclaimer in the documentation and/or other materials provided
11// with the distribution.
12// * Neither the name of Google Inc. nor the names of its
13// contributors may be used to endorse or promote products derived
14// from this software without specific prior written permission.
15//
16// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
28#include "v8.h"
29
30#include "accessors.h"
31#include "api.h"
32#include "bootstrapper.h"
Ben Murdoch8b112d22011-06-08 16:22:53 +010033#include "codegen.h"
Steve Blocka7e24c12009-10-30 11:49:00 +000034#include "compilation-cache.h"
35#include "debug.h"
36#include "heap-profiler.h"
37#include "global-handles.h"
Steve Block1e0659c2011-05-24 12:43:12 +010038#include "liveobjectlist-inl.h"
Steve Blocka7e24c12009-10-30 11:49:00 +000039#include "mark-compact.h"
40#include "natives.h"
Iain Merrick75681382010-08-19 15:07:18 +010041#include "objects-visiting.h"
Ben Murdochb0fe1622011-05-05 13:52:32 +010042#include "runtime-profiler.h"
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -080043#include "scanner-base.h"
Steve Blocka7e24c12009-10-30 11:49:00 +000044#include "scopeinfo.h"
Steve Block3ce2e202009-11-05 08:53:23 +000045#include "snapshot.h"
Steve Blocka7e24c12009-10-30 11:49:00 +000046#include "v8threads.h"
Ben Murdochb0fe1622011-05-05 13:52:32 +010047#include "vm-state-inl.h"
Steve Block6ded16b2010-05-10 14:33:55 +010048#if V8_TARGET_ARCH_ARM && !V8_INTERPRETED_REGEXP
Steve Blocka7e24c12009-10-30 11:49:00 +000049#include "regexp-macro-assembler.h"
Steve Blockd0582a62009-12-15 09:54:21 +000050#include "arm/regexp-macro-assembler-arm.h"
Steve Blocka7e24c12009-10-30 11:49:00 +000051#endif
Steve Block44f0eee2011-05-26 01:26:41 +010052#if V8_TARGET_ARCH_MIPS && !V8_INTERPRETED_REGEXP
53#include "regexp-macro-assembler.h"
54#include "mips/regexp-macro-assembler-mips.h"
55#endif
Steve Block6ded16b2010-05-10 14:33:55 +010056
Steve Blocka7e24c12009-10-30 11:49:00 +000057namespace v8 {
58namespace internal {
59
60
John Reck59135872010-11-02 12:39:01 -070061static const intptr_t kMinimumPromotionLimit = 2 * MB;
62static const intptr_t kMinimumAllocationLimit = 8 * MB;
63
Steve Blocka7e24c12009-10-30 11:49:00 +000064
Steve Block44f0eee2011-05-26 01:26:41 +010065static Mutex* gc_initializer_mutex = OS::CreateMutex();
Steve Blocka7e24c12009-10-30 11:49:00 +000066
Steve Blocka7e24c12009-10-30 11:49:00 +000067
Steve Block44f0eee2011-05-26 01:26:41 +010068Heap::Heap()
69 : isolate_(NULL),
Steve Blocka7e24c12009-10-30 11:49:00 +000070// semispace_size_ should be a power of 2 and old_generation_size_ should be
71// a multiple of Page::kPageSize.
72#if defined(ANDROID)
Steve Block44f0eee2011-05-26 01:26:41 +010073 reserved_semispace_size_(2*MB),
74 max_semispace_size_(2*MB),
75 initial_semispace_size_(128*KB),
76 max_old_generation_size_(192*MB),
77 max_executable_size_(max_old_generation_size_),
78 code_range_size_(0),
Steve Blocka7e24c12009-10-30 11:49:00 +000079#elif defined(V8_TARGET_ARCH_X64)
Steve Block44f0eee2011-05-26 01:26:41 +010080 reserved_semispace_size_(16*MB),
81 max_semispace_size_(16*MB),
82 initial_semispace_size_(1*MB),
83 max_old_generation_size_(1*GB),
84 max_executable_size_(256*MB),
85 code_range_size_(512*MB),
Steve Blocka7e24c12009-10-30 11:49:00 +000086#else
Steve Block44f0eee2011-05-26 01:26:41 +010087 reserved_semispace_size_(8*MB),
88 max_semispace_size_(8*MB),
89 initial_semispace_size_(512*KB),
90 max_old_generation_size_(512*MB),
91 max_executable_size_(128*MB),
92 code_range_size_(0),
Steve Blocka7e24c12009-10-30 11:49:00 +000093#endif
Steve Blocka7e24c12009-10-30 11:49:00 +000094// Variables set based on semispace_size_ and old_generation_size_ in
Steve Block44f0eee2011-05-26 01:26:41 +010095// ConfigureHeap (survived_since_last_expansion_, external_allocation_limit_)
Steve Block3ce2e202009-11-05 08:53:23 +000096// Will be 4 * reserved_semispace_size_ to ensure that young
97// generation can be aligned to its size.
Steve Block44f0eee2011-05-26 01:26:41 +010098 survived_since_last_expansion_(0),
99 always_allocate_scope_depth_(0),
100 linear_allocation_scope_depth_(0),
101 contexts_disposed_(0),
102 new_space_(this),
103 old_pointer_space_(NULL),
104 old_data_space_(NULL),
105 code_space_(NULL),
106 map_space_(NULL),
107 cell_space_(NULL),
108 lo_space_(NULL),
109 gc_state_(NOT_IN_GC),
110 mc_count_(0),
111 ms_count_(0),
112 gc_count_(0),
113 unflattened_strings_length_(0),
Steve Blocka7e24c12009-10-30 11:49:00 +0000114#ifdef DEBUG
Steve Block44f0eee2011-05-26 01:26:41 +0100115 allocation_allowed_(true),
116 allocation_timeout_(0),
117 disallow_allocation_failure_(false),
118 debug_utils_(NULL),
Steve Blocka7e24c12009-10-30 11:49:00 +0000119#endif // DEBUG
Steve Block44f0eee2011-05-26 01:26:41 +0100120 old_gen_promotion_limit_(kMinimumPromotionLimit),
121 old_gen_allocation_limit_(kMinimumAllocationLimit),
122 external_allocation_limit_(0),
123 amount_of_external_allocated_memory_(0),
124 amount_of_external_allocated_memory_at_last_global_gc_(0),
125 old_gen_exhausted_(false),
126 hidden_symbol_(NULL),
127 global_gc_prologue_callback_(NULL),
128 global_gc_epilogue_callback_(NULL),
129 gc_safe_size_of_old_object_(NULL),
130 tracer_(NULL),
131 young_survivors_after_last_gc_(0),
132 high_survival_rate_period_length_(0),
133 survival_rate_(0),
134 previous_survival_rate_trend_(Heap::STABLE),
135 survival_rate_trend_(Heap::STABLE),
136 max_gc_pause_(0),
137 max_alive_after_gc_(0),
138 min_in_mutator_(kMaxInt),
139 alive_after_last_gc_(0),
140 last_gc_end_timestamp_(0.0),
141 page_watermark_invalidated_mark_(1 << Page::WATERMARK_INVALIDATED),
142 number_idle_notifications_(0),
143 last_idle_notification_gc_count_(0),
144 last_idle_notification_gc_count_init_(false),
145 configured_(false),
146 is_safe_to_read_maps_(true) {
147 // Allow build-time customization of the max semispace size. Building
148 // V8 with snapshots and a non-default max semispace size is much
149 // easier if you can define it as part of the build environment.
150#if defined(V8_MAX_SEMISPACE_SIZE)
151 max_semispace_size_ = reserved_semispace_size_ = V8_MAX_SEMISPACE_SIZE;
152#endif
Steve Blocka7e24c12009-10-30 11:49:00 +0000153
Steve Block44f0eee2011-05-26 01:26:41 +0100154 memset(roots_, 0, sizeof(roots_[0]) * kRootListLength);
155 global_contexts_list_ = NULL;
156 mark_compact_collector_.heap_ = this;
157 external_string_table_.heap_ = this;
158}
159
Steve Blocka7e24c12009-10-30 11:49:00 +0000160
Ben Murdochf87a2032010-10-22 12:50:53 +0100161intptr_t Heap::Capacity() {
Steve Blocka7e24c12009-10-30 11:49:00 +0000162 if (!HasBeenSetup()) return 0;
163
164 return new_space_.Capacity() +
165 old_pointer_space_->Capacity() +
166 old_data_space_->Capacity() +
167 code_space_->Capacity() +
168 map_space_->Capacity() +
169 cell_space_->Capacity();
170}
171
172
Ben Murdochf87a2032010-10-22 12:50:53 +0100173intptr_t Heap::CommittedMemory() {
Steve Block3ce2e202009-11-05 08:53:23 +0000174 if (!HasBeenSetup()) return 0;
175
176 return new_space_.CommittedMemory() +
177 old_pointer_space_->CommittedMemory() +
178 old_data_space_->CommittedMemory() +
179 code_space_->CommittedMemory() +
180 map_space_->CommittedMemory() +
181 cell_space_->CommittedMemory() +
182 lo_space_->Size();
183}
184
Russell Brenner90bac252010-11-18 13:33:46 -0800185intptr_t Heap::CommittedMemoryExecutable() {
186 if (!HasBeenSetup()) return 0;
187
Steve Block44f0eee2011-05-26 01:26:41 +0100188 return isolate()->memory_allocator()->SizeExecutable();
Russell Brenner90bac252010-11-18 13:33:46 -0800189}
190
Steve Block3ce2e202009-11-05 08:53:23 +0000191
Ben Murdochf87a2032010-10-22 12:50:53 +0100192intptr_t Heap::Available() {
Steve Blocka7e24c12009-10-30 11:49:00 +0000193 if (!HasBeenSetup()) return 0;
194
195 return new_space_.Available() +
196 old_pointer_space_->Available() +
197 old_data_space_->Available() +
198 code_space_->Available() +
199 map_space_->Available() +
200 cell_space_->Available();
201}
202
203
204bool Heap::HasBeenSetup() {
205 return old_pointer_space_ != NULL &&
206 old_data_space_ != NULL &&
207 code_space_ != NULL &&
208 map_space_ != NULL &&
209 cell_space_ != NULL &&
210 lo_space_ != NULL;
211}
212
213
Kristian Monsen80d68ea2010-09-08 11:05:35 +0100214int Heap::GcSafeSizeOfOldObject(HeapObject* object) {
Steve Block44f0eee2011-05-26 01:26:41 +0100215 ASSERT(!HEAP->InNewSpace(object)); // Code only works for old objects.
216 ASSERT(!HEAP->mark_compact_collector()->are_map_pointers_encoded());
Kristian Monsen80d68ea2010-09-08 11:05:35 +0100217 MapWord map_word = object->map_word();
218 map_word.ClearMark();
219 map_word.ClearOverflow();
220 return object->SizeFromMap(map_word.ToMap());
221}
222
223
224int Heap::GcSafeSizeOfOldObjectWithEncodedMap(HeapObject* object) {
Steve Block44f0eee2011-05-26 01:26:41 +0100225 ASSERT(!HEAP->InNewSpace(object)); // Code only works for old objects.
226 ASSERT(HEAP->mark_compact_collector()->are_map_pointers_encoded());
Kristian Monsen80d68ea2010-09-08 11:05:35 +0100227 uint32_t marker = Memory::uint32_at(object->address());
228 if (marker == MarkCompactCollector::kSingleFreeEncoding) {
229 return kIntSize;
230 } else if (marker == MarkCompactCollector::kMultiFreeEncoding) {
231 return Memory::int_at(object->address() + kIntSize);
232 } else {
233 MapWord map_word = object->map_word();
Steve Block44f0eee2011-05-26 01:26:41 +0100234 Address map_address = map_word.DecodeMapAddress(HEAP->map_space());
Kristian Monsen80d68ea2010-09-08 11:05:35 +0100235 Map* map = reinterpret_cast<Map*>(HeapObject::FromAddress(map_address));
236 return object->SizeFromMap(map);
237 }
238}
239
240
Steve Blocka7e24c12009-10-30 11:49:00 +0000241GarbageCollector Heap::SelectGarbageCollector(AllocationSpace space) {
242 // Is global GC requested?
243 if (space != NEW_SPACE || FLAG_gc_global) {
Steve Block44f0eee2011-05-26 01:26:41 +0100244 isolate_->counters()->gc_compactor_caused_by_request()->Increment();
Steve Blocka7e24c12009-10-30 11:49:00 +0000245 return MARK_COMPACTOR;
246 }
247
248 // Is enough data promoted to justify a global GC?
249 if (OldGenerationPromotionLimitReached()) {
Steve Block44f0eee2011-05-26 01:26:41 +0100250 isolate_->counters()->gc_compactor_caused_by_promoted_data()->Increment();
Steve Blocka7e24c12009-10-30 11:49:00 +0000251 return MARK_COMPACTOR;
252 }
253
254 // Have allocation in OLD and LO failed?
255 if (old_gen_exhausted_) {
Steve Block44f0eee2011-05-26 01:26:41 +0100256 isolate_->counters()->
257 gc_compactor_caused_by_oldspace_exhaustion()->Increment();
Steve Blocka7e24c12009-10-30 11:49:00 +0000258 return MARK_COMPACTOR;
259 }
260
261 // Is there enough space left in OLD to guarantee that a scavenge can
262 // succeed?
263 //
264 // Note that MemoryAllocator->MaxAvailable() undercounts the memory available
265 // for object promotion. It counts only the bytes that the memory
266 // allocator has not yet allocated from the OS and assigned to any space,
267 // and does not count available bytes already in the old space or code
268 // space. Undercounting is safe---we may get an unrequested full GC when
269 // a scavenge would have succeeded.
Steve Block44f0eee2011-05-26 01:26:41 +0100270 if (isolate_->memory_allocator()->MaxAvailable() <= new_space_.Size()) {
271 isolate_->counters()->
272 gc_compactor_caused_by_oldspace_exhaustion()->Increment();
Steve Blocka7e24c12009-10-30 11:49:00 +0000273 return MARK_COMPACTOR;
274 }
275
276 // Default
277 return SCAVENGER;
278}
279
280
281// TODO(1238405): Combine the infrastructure for --heap-stats and
282// --log-gc to avoid the complicated preprocessor and flag testing.
283#if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
284void Heap::ReportStatisticsBeforeGC() {
285 // Heap::ReportHeapStatistics will also log NewSpace statistics when
286 // compiled with ENABLE_LOGGING_AND_PROFILING and --log-gc is set. The
287 // following logic is used to avoid double logging.
288#if defined(DEBUG) && defined(ENABLE_LOGGING_AND_PROFILING)
289 if (FLAG_heap_stats || FLAG_log_gc) new_space_.CollectStatistics();
290 if (FLAG_heap_stats) {
291 ReportHeapStatistics("Before GC");
292 } else if (FLAG_log_gc) {
293 new_space_.ReportStatistics();
294 }
295 if (FLAG_heap_stats || FLAG_log_gc) new_space_.ClearHistograms();
296#elif defined(DEBUG)
297 if (FLAG_heap_stats) {
298 new_space_.CollectStatistics();
299 ReportHeapStatistics("Before GC");
300 new_space_.ClearHistograms();
301 }
302#elif defined(ENABLE_LOGGING_AND_PROFILING)
303 if (FLAG_log_gc) {
304 new_space_.CollectStatistics();
305 new_space_.ReportStatistics();
306 new_space_.ClearHistograms();
307 }
308#endif
309}
310
311
312#if defined(ENABLE_LOGGING_AND_PROFILING)
313void Heap::PrintShortHeapStatistics() {
314 if (!FLAG_trace_gc_verbose) return;
Ben Murdochf87a2032010-10-22 12:50:53 +0100315 PrintF("Memory allocator, used: %8" V8_PTR_PREFIX "d"
316 ", available: %8" V8_PTR_PREFIX "d\n",
Steve Block44f0eee2011-05-26 01:26:41 +0100317 isolate_->memory_allocator()->Size(),
318 isolate_->memory_allocator()->Available());
Ben Murdochf87a2032010-10-22 12:50:53 +0100319 PrintF("New space, used: %8" V8_PTR_PREFIX "d"
320 ", available: %8" V8_PTR_PREFIX "d\n",
Steve Block3ce2e202009-11-05 08:53:23 +0000321 Heap::new_space_.Size(),
322 new_space_.Available());
Ben Murdochf87a2032010-10-22 12:50:53 +0100323 PrintF("Old pointers, used: %8" V8_PTR_PREFIX "d"
324 ", available: %8" V8_PTR_PREFIX "d"
325 ", waste: %8" V8_PTR_PREFIX "d\n",
Steve Block3ce2e202009-11-05 08:53:23 +0000326 old_pointer_space_->Size(),
327 old_pointer_space_->Available(),
328 old_pointer_space_->Waste());
Ben Murdochf87a2032010-10-22 12:50:53 +0100329 PrintF("Old data space, used: %8" V8_PTR_PREFIX "d"
330 ", available: %8" V8_PTR_PREFIX "d"
331 ", waste: %8" V8_PTR_PREFIX "d\n",
Steve Block3ce2e202009-11-05 08:53:23 +0000332 old_data_space_->Size(),
333 old_data_space_->Available(),
334 old_data_space_->Waste());
Ben Murdochf87a2032010-10-22 12:50:53 +0100335 PrintF("Code space, used: %8" V8_PTR_PREFIX "d"
336 ", available: %8" V8_PTR_PREFIX "d"
337 ", waste: %8" V8_PTR_PREFIX "d\n",
Steve Block3ce2e202009-11-05 08:53:23 +0000338 code_space_->Size(),
339 code_space_->Available(),
340 code_space_->Waste());
Ben Murdochf87a2032010-10-22 12:50:53 +0100341 PrintF("Map space, used: %8" V8_PTR_PREFIX "d"
342 ", available: %8" V8_PTR_PREFIX "d"
343 ", waste: %8" V8_PTR_PREFIX "d\n",
Steve Block3ce2e202009-11-05 08:53:23 +0000344 map_space_->Size(),
345 map_space_->Available(),
346 map_space_->Waste());
Ben Murdochf87a2032010-10-22 12:50:53 +0100347 PrintF("Cell space, used: %8" V8_PTR_PREFIX "d"
348 ", available: %8" V8_PTR_PREFIX "d"
349 ", waste: %8" V8_PTR_PREFIX "d\n",
Steve Block3ce2e202009-11-05 08:53:23 +0000350 cell_space_->Size(),
351 cell_space_->Available(),
352 cell_space_->Waste());
Ben Murdochf87a2032010-10-22 12:50:53 +0100353 PrintF("Large object space, used: %8" V8_PTR_PREFIX "d"
354 ", available: %8" V8_PTR_PREFIX "d\n",
Steve Block3ce2e202009-11-05 08:53:23 +0000355 lo_space_->Size(),
356 lo_space_->Available());
Steve Blocka7e24c12009-10-30 11:49:00 +0000357}
358#endif
359
360
361// TODO(1238405): Combine the infrastructure for --heap-stats and
362// --log-gc to avoid the complicated preprocessor and flag testing.
363void Heap::ReportStatisticsAfterGC() {
364 // Similar to the before GC, we use some complicated logic to ensure that
365 // NewSpace statistics are logged exactly once when --log-gc is turned on.
366#if defined(DEBUG) && defined(ENABLE_LOGGING_AND_PROFILING)
367 if (FLAG_heap_stats) {
368 new_space_.CollectStatistics();
369 ReportHeapStatistics("After GC");
370 } else if (FLAG_log_gc) {
371 new_space_.ReportStatistics();
372 }
373#elif defined(DEBUG)
374 if (FLAG_heap_stats) ReportHeapStatistics("After GC");
375#elif defined(ENABLE_LOGGING_AND_PROFILING)
376 if (FLAG_log_gc) new_space_.ReportStatistics();
377#endif
378}
379#endif // defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
380
381
382void Heap::GarbageCollectionPrologue() {
Steve Block44f0eee2011-05-26 01:26:41 +0100383 isolate_->transcendental_cache()->Clear();
Steve Block6ded16b2010-05-10 14:33:55 +0100384 ClearJSFunctionResultCaches();
Steve Blocka7e24c12009-10-30 11:49:00 +0000385 gc_count_++;
Steve Block6ded16b2010-05-10 14:33:55 +0100386 unflattened_strings_length_ = 0;
Steve Blocka7e24c12009-10-30 11:49:00 +0000387#ifdef DEBUG
388 ASSERT(allocation_allowed_ && gc_state_ == NOT_IN_GC);
389 allow_allocation(false);
390
391 if (FLAG_verify_heap) {
392 Verify();
393 }
394
395 if (FLAG_gc_verbose) Print();
Steve Blocka7e24c12009-10-30 11:49:00 +0000396#endif
397
398#if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
399 ReportStatisticsBeforeGC();
400#endif
Steve Block1e0659c2011-05-24 12:43:12 +0100401
402 LiveObjectList::GCPrologue();
Steve Blocka7e24c12009-10-30 11:49:00 +0000403}
404
Ben Murdochf87a2032010-10-22 12:50:53 +0100405intptr_t Heap::SizeOfObjects() {
406 intptr_t total = 0;
Steve Blocka7e24c12009-10-30 11:49:00 +0000407 AllSpaces spaces;
Leon Clarked91b9f72010-01-27 17:25:45 +0000408 for (Space* space = spaces.next(); space != NULL; space = spaces.next()) {
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -0800409 total += space->SizeOfObjects();
Steve Blocka7e24c12009-10-30 11:49:00 +0000410 }
411 return total;
412}
413
414void Heap::GarbageCollectionEpilogue() {
Steve Block1e0659c2011-05-24 12:43:12 +0100415 LiveObjectList::GCEpilogue();
Steve Blocka7e24c12009-10-30 11:49:00 +0000416#ifdef DEBUG
417 allow_allocation(true);
418 ZapFromSpace();
419
420 if (FLAG_verify_heap) {
421 Verify();
422 }
423
Steve Block44f0eee2011-05-26 01:26:41 +0100424 if (FLAG_print_global_handles) isolate_->global_handles()->Print();
Steve Blocka7e24c12009-10-30 11:49:00 +0000425 if (FLAG_print_handles) PrintHandles();
426 if (FLAG_gc_verbose) Print();
427 if (FLAG_code_stats) ReportCodeStatistics("After GC");
428#endif
429
Steve Block44f0eee2011-05-26 01:26:41 +0100430 isolate_->counters()->alive_after_last_gc()->Set(
431 static_cast<int>(SizeOfObjects()));
Steve Blocka7e24c12009-10-30 11:49:00 +0000432
Steve Block44f0eee2011-05-26 01:26:41 +0100433 isolate_->counters()->symbol_table_capacity()->Set(
434 symbol_table()->Capacity());
435 isolate_->counters()->number_of_symbols()->Set(
436 symbol_table()->NumberOfElements());
Steve Blocka7e24c12009-10-30 11:49:00 +0000437#if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
438 ReportStatisticsAfterGC();
439#endif
440#ifdef ENABLE_DEBUGGER_SUPPORT
Steve Block44f0eee2011-05-26 01:26:41 +0100441 isolate_->debug()->AfterGarbageCollection();
Steve Blocka7e24c12009-10-30 11:49:00 +0000442#endif
443}
444
445
John Reck59135872010-11-02 12:39:01 -0700446void Heap::CollectAllGarbage(bool force_compaction) {
Steve Blocka7e24c12009-10-30 11:49:00 +0000447 // Since we are ignoring the return value, the exact choice of space does
448 // not matter, so long as we do not specify NEW_SPACE, which would not
449 // cause a full GC.
Steve Block44f0eee2011-05-26 01:26:41 +0100450 mark_compact_collector_.SetForceCompaction(force_compaction);
John Reck59135872010-11-02 12:39:01 -0700451 CollectGarbage(OLD_POINTER_SPACE);
Steve Block44f0eee2011-05-26 01:26:41 +0100452 mark_compact_collector_.SetForceCompaction(false);
Steve Blocka7e24c12009-10-30 11:49:00 +0000453}
454
455
Teng-Hui Zhu3e5fa292010-11-09 16:16:48 -0800456void Heap::CollectAllAvailableGarbage() {
457 // Since we are ignoring the return value, the exact choice of space does
458 // not matter, so long as we do not specify NEW_SPACE, which would not
459 // cause a full GC.
Steve Block44f0eee2011-05-26 01:26:41 +0100460 mark_compact_collector()->SetForceCompaction(true);
Teng-Hui Zhu3e5fa292010-11-09 16:16:48 -0800461
462 // Major GC would invoke weak handle callbacks on weakly reachable
463 // handles, but won't collect weakly reachable objects until next
464 // major GC. Therefore if we collect aggressively and weak handle callback
465 // has been invoked, we rerun major GC to release objects which become
466 // garbage.
467 // Note: as weak callbacks can execute arbitrary code, we cannot
468 // hope that eventually there will be no weak callbacks invocations.
469 // Therefore stop recollecting after several attempts.
470 const int kMaxNumberOfAttempts = 7;
471 for (int attempt = 0; attempt < kMaxNumberOfAttempts; attempt++) {
472 if (!CollectGarbage(OLD_POINTER_SPACE, MARK_COMPACTOR)) {
473 break;
474 }
475 }
Steve Block44f0eee2011-05-26 01:26:41 +0100476 mark_compact_collector()->SetForceCompaction(false);
Teng-Hui Zhu3e5fa292010-11-09 16:16:48 -0800477}
478
479
480bool Heap::CollectGarbage(AllocationSpace space, GarbageCollector collector) {
Steve Blocka7e24c12009-10-30 11:49:00 +0000481 // The VM is in the GC state until exiting this function.
Steve Block44f0eee2011-05-26 01:26:41 +0100482 VMState state(isolate_, GC);
Steve Blocka7e24c12009-10-30 11:49:00 +0000483
484#ifdef DEBUG
485 // Reset the allocation timeout to the GC interval, but make sure to
486 // allow at least a few allocations after a collection. The reason
487 // for this is that we have a lot of allocation sequences and we
488 // assume that a garbage collection will allow the subsequent
489 // allocation attempts to go through.
490 allocation_timeout_ = Max(6, FLAG_gc_interval);
491#endif
492
Teng-Hui Zhu3e5fa292010-11-09 16:16:48 -0800493 bool next_gc_likely_to_collect_more = false;
494
Steve Block44f0eee2011-05-26 01:26:41 +0100495 { GCTracer tracer(this);
Steve Blocka7e24c12009-10-30 11:49:00 +0000496 GarbageCollectionPrologue();
497 // The GC count was incremented in the prologue. Tell the tracer about
498 // it.
499 tracer.set_gc_count(gc_count_);
500
Steve Blocka7e24c12009-10-30 11:49:00 +0000501 // Tell the tracer which collector we've selected.
502 tracer.set_collector(collector);
503
504 HistogramTimer* rate = (collector == SCAVENGER)
Steve Block44f0eee2011-05-26 01:26:41 +0100505 ? isolate_->counters()->gc_scavenger()
506 : isolate_->counters()->gc_compactor();
Steve Blocka7e24c12009-10-30 11:49:00 +0000507 rate->Start();
Teng-Hui Zhu3e5fa292010-11-09 16:16:48 -0800508 next_gc_likely_to_collect_more =
509 PerformGarbageCollection(collector, &tracer);
Steve Blocka7e24c12009-10-30 11:49:00 +0000510 rate->Stop();
511
512 GarbageCollectionEpilogue();
513 }
514
515
516#ifdef ENABLE_LOGGING_AND_PROFILING
517 if (FLAG_log_gc) HeapProfiler::WriteSample();
518#endif
Teng-Hui Zhu3e5fa292010-11-09 16:16:48 -0800519
520 return next_gc_likely_to_collect_more;
Steve Blocka7e24c12009-10-30 11:49:00 +0000521}
522
523
524void Heap::PerformScavenge() {
Steve Block44f0eee2011-05-26 01:26:41 +0100525 GCTracer tracer(this);
John Reck59135872010-11-02 12:39:01 -0700526 PerformGarbageCollection(SCAVENGER, &tracer);
Steve Blocka7e24c12009-10-30 11:49:00 +0000527}
528
529
530#ifdef DEBUG
531// Helper class for verifying the symbol table.
532class SymbolTableVerifier : public ObjectVisitor {
533 public:
Steve Blocka7e24c12009-10-30 11:49:00 +0000534 void VisitPointers(Object** start, Object** end) {
535 // Visit all HeapObject pointers in [start, end).
536 for (Object** p = start; p < end; p++) {
537 if ((*p)->IsHeapObject()) {
538 // Check that the symbol is actually a symbol.
539 ASSERT((*p)->IsNull() || (*p)->IsUndefined() || (*p)->IsSymbol());
540 }
541 }
542 }
543};
544#endif // DEBUG
545
546
547static void VerifySymbolTable() {
548#ifdef DEBUG
549 SymbolTableVerifier verifier;
Steve Block44f0eee2011-05-26 01:26:41 +0100550 HEAP->symbol_table()->IterateElements(&verifier);
Steve Blocka7e24c12009-10-30 11:49:00 +0000551#endif // DEBUG
552}
553
554
Leon Clarkee46be812010-01-19 14:06:41 +0000555void Heap::ReserveSpace(
556 int new_space_size,
557 int pointer_space_size,
558 int data_space_size,
559 int code_space_size,
560 int map_space_size,
561 int cell_space_size,
562 int large_object_size) {
563 NewSpace* new_space = Heap::new_space();
564 PagedSpace* old_pointer_space = Heap::old_pointer_space();
565 PagedSpace* old_data_space = Heap::old_data_space();
566 PagedSpace* code_space = Heap::code_space();
567 PagedSpace* map_space = Heap::map_space();
568 PagedSpace* cell_space = Heap::cell_space();
569 LargeObjectSpace* lo_space = Heap::lo_space();
570 bool gc_performed = true;
571 while (gc_performed) {
572 gc_performed = false;
573 if (!new_space->ReserveSpace(new_space_size)) {
Ben Murdochf87a2032010-10-22 12:50:53 +0100574 Heap::CollectGarbage(NEW_SPACE);
Leon Clarkee46be812010-01-19 14:06:41 +0000575 gc_performed = true;
576 }
577 if (!old_pointer_space->ReserveSpace(pointer_space_size)) {
Ben Murdochf87a2032010-10-22 12:50:53 +0100578 Heap::CollectGarbage(OLD_POINTER_SPACE);
Leon Clarkee46be812010-01-19 14:06:41 +0000579 gc_performed = true;
580 }
581 if (!(old_data_space->ReserveSpace(data_space_size))) {
Ben Murdochf87a2032010-10-22 12:50:53 +0100582 Heap::CollectGarbage(OLD_DATA_SPACE);
Leon Clarkee46be812010-01-19 14:06:41 +0000583 gc_performed = true;
584 }
585 if (!(code_space->ReserveSpace(code_space_size))) {
Ben Murdochf87a2032010-10-22 12:50:53 +0100586 Heap::CollectGarbage(CODE_SPACE);
Leon Clarkee46be812010-01-19 14:06:41 +0000587 gc_performed = true;
588 }
589 if (!(map_space->ReserveSpace(map_space_size))) {
Ben Murdochf87a2032010-10-22 12:50:53 +0100590 Heap::CollectGarbage(MAP_SPACE);
Leon Clarkee46be812010-01-19 14:06:41 +0000591 gc_performed = true;
592 }
593 if (!(cell_space->ReserveSpace(cell_space_size))) {
Ben Murdochf87a2032010-10-22 12:50:53 +0100594 Heap::CollectGarbage(CELL_SPACE);
Leon Clarkee46be812010-01-19 14:06:41 +0000595 gc_performed = true;
596 }
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100597 // We add a slack-factor of 2 in order to have space for a series of
598 // large-object allocations that are only just larger than the page size.
Leon Clarkee46be812010-01-19 14:06:41 +0000599 large_object_size *= 2;
600 // The ReserveSpace method on the large object space checks how much
601 // we can expand the old generation. This includes expansion caused by
602 // allocation in the other spaces.
603 large_object_size += cell_space_size + map_space_size + code_space_size +
604 data_space_size + pointer_space_size;
605 if (!(lo_space->ReserveSpace(large_object_size))) {
Ben Murdochf87a2032010-10-22 12:50:53 +0100606 Heap::CollectGarbage(LO_SPACE);
Leon Clarkee46be812010-01-19 14:06:41 +0000607 gc_performed = true;
608 }
609 }
610}
611
612
Steve Blocka7e24c12009-10-30 11:49:00 +0000613void Heap::EnsureFromSpaceIsCommitted() {
614 if (new_space_.CommitFromSpaceIfNeeded()) return;
615
616 // Committing memory to from space failed.
617 // Try shrinking and try again.
Kristian Monsen80d68ea2010-09-08 11:05:35 +0100618 PagedSpaces spaces;
619 for (PagedSpace* space = spaces.next();
620 space != NULL;
621 space = spaces.next()) {
622 space->RelinkPageListInChunkOrder(true);
623 }
624
Steve Blocka7e24c12009-10-30 11:49:00 +0000625 Shrink();
626 if (new_space_.CommitFromSpaceIfNeeded()) return;
627
628 // Committing memory to from space failed again.
629 // Memory is exhausted and we will die.
630 V8::FatalProcessOutOfMemory("Committing semi space failed.");
631}
632
633
Teng-Hui Zhu3e5fa292010-11-09 16:16:48 -0800634void Heap::ClearJSFunctionResultCaches() {
Steve Block44f0eee2011-05-26 01:26:41 +0100635 if (isolate_->bootstrapper()->IsActive()) return;
Steve Block6ded16b2010-05-10 14:33:55 +0100636
Teng-Hui Zhu3e5fa292010-11-09 16:16:48 -0800637 Object* context = global_contexts_list_;
638 while (!context->IsUndefined()) {
639 // Get the caches for this context:
Steve Block6ded16b2010-05-10 14:33:55 +0100640 FixedArray* caches =
Teng-Hui Zhu3e5fa292010-11-09 16:16:48 -0800641 Context::cast(context)->jsfunction_result_caches();
642 // Clear the caches:
Steve Block6ded16b2010-05-10 14:33:55 +0100643 int length = caches->length();
644 for (int i = 0; i < length; i++) {
645 JSFunctionResultCache::cast(caches->get(i))->Clear();
646 }
Teng-Hui Zhu3e5fa292010-11-09 16:16:48 -0800647 // Get the next context:
648 context = Context::cast(context)->get(Context::NEXT_CONTEXT_LINK);
Steve Block6ded16b2010-05-10 14:33:55 +0100649 }
Steve Block6ded16b2010-05-10 14:33:55 +0100650}
651
652
Steve Block44f0eee2011-05-26 01:26:41 +0100653
Kristian Monsen80d68ea2010-09-08 11:05:35 +0100654void Heap::ClearNormalizedMapCaches() {
Steve Block44f0eee2011-05-26 01:26:41 +0100655 if (isolate_->bootstrapper()->IsActive()) return;
Ben Murdochf87a2032010-10-22 12:50:53 +0100656
657 Object* context = global_contexts_list_;
658 while (!context->IsUndefined()) {
659 Context::cast(context)->normalized_map_cache()->Clear();
660 context = Context::cast(context)->get(Context::NEXT_CONTEXT_LINK);
661 }
Kristian Monsen80d68ea2010-09-08 11:05:35 +0100662}
663
664
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100665#ifdef DEBUG
666
667enum PageWatermarkValidity {
668 ALL_VALID,
669 ALL_INVALID
670};
671
672static void VerifyPageWatermarkValidity(PagedSpace* space,
673 PageWatermarkValidity validity) {
674 PageIterator it(space, PageIterator::PAGES_IN_USE);
675 bool expected_value = (validity == ALL_VALID);
676 while (it.has_next()) {
677 Page* page = it.next();
678 ASSERT(page->IsWatermarkValid() == expected_value);
679 }
680}
681#endif
682
Steve Block8defd9f2010-07-08 12:39:36 +0100683void Heap::UpdateSurvivalRateTrend(int start_new_space_size) {
684 double survival_rate =
685 (static_cast<double>(young_survivors_after_last_gc_) * 100) /
686 start_new_space_size;
687
688 if (survival_rate > kYoungSurvivalRateThreshold) {
689 high_survival_rate_period_length_++;
690 } else {
691 high_survival_rate_period_length_ = 0;
692 }
693
694 double survival_rate_diff = survival_rate_ - survival_rate;
695
696 if (survival_rate_diff > kYoungSurvivalRateAllowedDeviation) {
697 set_survival_rate_trend(DECREASING);
698 } else if (survival_rate_diff < -kYoungSurvivalRateAllowedDeviation) {
699 set_survival_rate_trend(INCREASING);
700 } else {
701 set_survival_rate_trend(STABLE);
702 }
703
704 survival_rate_ = survival_rate;
705}
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100706
Teng-Hui Zhu3e5fa292010-11-09 16:16:48 -0800707bool Heap::PerformGarbageCollection(GarbageCollector collector,
John Reck59135872010-11-02 12:39:01 -0700708 GCTracer* tracer) {
Teng-Hui Zhu3e5fa292010-11-09 16:16:48 -0800709 bool next_gc_likely_to_collect_more = false;
710
Ben Murdochf87a2032010-10-22 12:50:53 +0100711 if (collector != SCAVENGER) {
Steve Block44f0eee2011-05-26 01:26:41 +0100712 PROFILE(isolate_, CodeMovingGCEvent());
Ben Murdochf87a2032010-10-22 12:50:53 +0100713 }
714
Steve Blocka7e24c12009-10-30 11:49:00 +0000715 VerifySymbolTable();
716 if (collector == MARK_COMPACTOR && global_gc_prologue_callback_) {
717 ASSERT(!allocation_allowed_);
Leon Clarkef7060e22010-06-03 12:02:55 +0100718 GCTracer::Scope scope(tracer, GCTracer::Scope::EXTERNAL);
Steve Blocka7e24c12009-10-30 11:49:00 +0000719 global_gc_prologue_callback_();
720 }
Steve Block6ded16b2010-05-10 14:33:55 +0100721
722 GCType gc_type =
723 collector == MARK_COMPACTOR ? kGCTypeMarkSweepCompact : kGCTypeScavenge;
724
725 for (int i = 0; i < gc_prologue_callbacks_.length(); ++i) {
726 if (gc_type & gc_prologue_callbacks_[i].gc_type) {
727 gc_prologue_callbacks_[i].callback(gc_type, kNoGCCallbackFlags);
728 }
729 }
730
Steve Blocka7e24c12009-10-30 11:49:00 +0000731 EnsureFromSpaceIsCommitted();
Steve Block6ded16b2010-05-10 14:33:55 +0100732
Ben Murdochf87a2032010-10-22 12:50:53 +0100733 int start_new_space_size = Heap::new_space()->SizeAsInt();
Steve Block8defd9f2010-07-08 12:39:36 +0100734
Steve Blocka7e24c12009-10-30 11:49:00 +0000735 if (collector == MARK_COMPACTOR) {
Steve Block6ded16b2010-05-10 14:33:55 +0100736 // Perform mark-sweep with optional compaction.
Steve Blocka7e24c12009-10-30 11:49:00 +0000737 MarkCompact(tracer);
738
Steve Block8defd9f2010-07-08 12:39:36 +0100739 bool high_survival_rate_during_scavenges = IsHighSurvivalRate() &&
740 IsStableOrIncreasingSurvivalTrend();
741
742 UpdateSurvivalRateTrend(start_new_space_size);
743
John Reck59135872010-11-02 12:39:01 -0700744 intptr_t old_gen_size = PromotedSpaceSize();
745 old_gen_promotion_limit_ =
746 old_gen_size + Max(kMinimumPromotionLimit, old_gen_size / 3);
747 old_gen_allocation_limit_ =
748 old_gen_size + Max(kMinimumAllocationLimit, old_gen_size / 2);
Steve Block8defd9f2010-07-08 12:39:36 +0100749
John Reck59135872010-11-02 12:39:01 -0700750 if (high_survival_rate_during_scavenges &&
751 IsStableOrIncreasingSurvivalTrend()) {
752 // Stable high survival rates of young objects both during partial and
753 // full collection indicate that mutator is either building or modifying
754 // a structure with a long lifetime.
755 // In this case we aggressively raise old generation memory limits to
756 // postpone subsequent mark-sweep collection and thus trade memory
757 // space for the mutation speed.
758 old_gen_promotion_limit_ *= 2;
759 old_gen_allocation_limit_ *= 2;
Steve Block8defd9f2010-07-08 12:39:36 +0100760 }
761
John Reck59135872010-11-02 12:39:01 -0700762 old_gen_exhausted_ = false;
Steve Block6ded16b2010-05-10 14:33:55 +0100763 } else {
Leon Clarkef7060e22010-06-03 12:02:55 +0100764 tracer_ = tracer;
Steve Block6ded16b2010-05-10 14:33:55 +0100765 Scavenge();
Leon Clarkef7060e22010-06-03 12:02:55 +0100766 tracer_ = NULL;
Steve Block8defd9f2010-07-08 12:39:36 +0100767
768 UpdateSurvivalRateTrend(start_new_space_size);
Steve Blocka7e24c12009-10-30 11:49:00 +0000769 }
Steve Blocka7e24c12009-10-30 11:49:00 +0000770
Steve Block44f0eee2011-05-26 01:26:41 +0100771 isolate_->counters()->objs_since_last_young()->Set(0);
Steve Blocka7e24c12009-10-30 11:49:00 +0000772
John Reck59135872010-11-02 12:39:01 -0700773 if (collector == MARK_COMPACTOR) {
774 DisableAssertNoAllocation allow_allocation;
775 GCTracer::Scope scope(tracer, GCTracer::Scope::EXTERNAL);
Teng-Hui Zhu3e5fa292010-11-09 16:16:48 -0800776 next_gc_likely_to_collect_more =
Steve Block44f0eee2011-05-26 01:26:41 +0100777 isolate_->global_handles()->PostGarbageCollectionProcessing();
John Reck59135872010-11-02 12:39:01 -0700778 }
779
Steve Block3ce2e202009-11-05 08:53:23 +0000780 // Update relocatables.
781 Relocatable::PostGarbageCollectionProcessing();
Steve Blocka7e24c12009-10-30 11:49:00 +0000782
783 if (collector == MARK_COMPACTOR) {
784 // Register the amount of external allocated memory.
785 amount_of_external_allocated_memory_at_last_global_gc_ =
786 amount_of_external_allocated_memory_;
787 }
788
Steve Block6ded16b2010-05-10 14:33:55 +0100789 GCCallbackFlags callback_flags = tracer->is_compacting()
790 ? kGCCallbackFlagCompacted
791 : kNoGCCallbackFlags;
792 for (int i = 0; i < gc_epilogue_callbacks_.length(); ++i) {
793 if (gc_type & gc_epilogue_callbacks_[i].gc_type) {
794 gc_epilogue_callbacks_[i].callback(gc_type, callback_flags);
795 }
796 }
797
Steve Blocka7e24c12009-10-30 11:49:00 +0000798 if (collector == MARK_COMPACTOR && global_gc_epilogue_callback_) {
799 ASSERT(!allocation_allowed_);
Leon Clarkef7060e22010-06-03 12:02:55 +0100800 GCTracer::Scope scope(tracer, GCTracer::Scope::EXTERNAL);
Steve Blocka7e24c12009-10-30 11:49:00 +0000801 global_gc_epilogue_callback_();
802 }
803 VerifySymbolTable();
Teng-Hui Zhu3e5fa292010-11-09 16:16:48 -0800804
805 return next_gc_likely_to_collect_more;
Steve Blocka7e24c12009-10-30 11:49:00 +0000806}
807
808
Steve Blocka7e24c12009-10-30 11:49:00 +0000809void Heap::MarkCompact(GCTracer* tracer) {
810 gc_state_ = MARK_COMPACT;
Steve Block44f0eee2011-05-26 01:26:41 +0100811 LOG(isolate_, ResourceEvent("markcompact", "begin"));
Steve Blocka7e24c12009-10-30 11:49:00 +0000812
Steve Block44f0eee2011-05-26 01:26:41 +0100813 mark_compact_collector_.Prepare(tracer);
Steve Blocka7e24c12009-10-30 11:49:00 +0000814
Steve Block44f0eee2011-05-26 01:26:41 +0100815 bool is_compacting = mark_compact_collector_.IsCompacting();
Steve Blocka7e24c12009-10-30 11:49:00 +0000816
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100817 if (is_compacting) {
818 mc_count_++;
819 } else {
820 ms_count_++;
821 }
822 tracer->set_full_gc_count(mc_count_ + ms_count_);
823
Steve Blocka7e24c12009-10-30 11:49:00 +0000824 MarkCompactPrologue(is_compacting);
825
Steve Block44f0eee2011-05-26 01:26:41 +0100826 is_safe_to_read_maps_ = false;
827 mark_compact_collector_.CollectGarbage();
828 is_safe_to_read_maps_ = true;
Steve Blocka7e24c12009-10-30 11:49:00 +0000829
Steve Block44f0eee2011-05-26 01:26:41 +0100830 LOG(isolate_, ResourceEvent("markcompact", "end"));
Steve Blocka7e24c12009-10-30 11:49:00 +0000831
832 gc_state_ = NOT_IN_GC;
833
834 Shrink();
835
Steve Block44f0eee2011-05-26 01:26:41 +0100836 isolate_->counters()->objs_since_last_full()->Set(0);
Steve Block6ded16b2010-05-10 14:33:55 +0100837
838 contexts_disposed_ = 0;
Steve Blocka7e24c12009-10-30 11:49:00 +0000839}
840
841
842void Heap::MarkCompactPrologue(bool is_compacting) {
843 // At any old GC clear the keyed lookup cache to enable collection of unused
844 // maps.
Steve Block44f0eee2011-05-26 01:26:41 +0100845 isolate_->keyed_lookup_cache()->Clear();
846 isolate_->context_slot_cache()->Clear();
847 isolate_->descriptor_lookup_cache()->Clear();
Steve Blocka7e24c12009-10-30 11:49:00 +0000848
Steve Block44f0eee2011-05-26 01:26:41 +0100849 isolate_->compilation_cache()->MarkCompactPrologue();
Steve Blocka7e24c12009-10-30 11:49:00 +0000850
Kristian Monsen25f61362010-05-21 11:50:48 +0100851 CompletelyClearInstanceofCache();
852
Leon Clarkee46be812010-01-19 14:06:41 +0000853 if (is_compacting) FlushNumberStringCache();
Steve Blocka7e24c12009-10-30 11:49:00 +0000854
Kristian Monsen80d68ea2010-09-08 11:05:35 +0100855 ClearNormalizedMapCaches();
Steve Blocka7e24c12009-10-30 11:49:00 +0000856}
857
858
859Object* Heap::FindCodeObject(Address a) {
John Reck59135872010-11-02 12:39:01 -0700860 Object* obj = NULL; // Initialization to please compiler.
861 { MaybeObject* maybe_obj = code_space_->FindObject(a);
862 if (!maybe_obj->ToObject(&obj)) {
863 obj = lo_space_->FindObject(a)->ToObjectUnchecked();
864 }
Steve Blocka7e24c12009-10-30 11:49:00 +0000865 }
Steve Blocka7e24c12009-10-30 11:49:00 +0000866 return obj;
867}
868
869
870// Helper class for copying HeapObjects
871class ScavengeVisitor: public ObjectVisitor {
872 public:
Steve Block44f0eee2011-05-26 01:26:41 +0100873 explicit ScavengeVisitor(Heap* heap) : heap_(heap) {}
Steve Blocka7e24c12009-10-30 11:49:00 +0000874
875 void VisitPointer(Object** p) { ScavengePointer(p); }
876
877 void VisitPointers(Object** start, Object** end) {
878 // Copy all HeapObject pointers in [start, end)
879 for (Object** p = start; p < end; p++) ScavengePointer(p);
880 }
881
882 private:
883 void ScavengePointer(Object** p) {
884 Object* object = *p;
Steve Block44f0eee2011-05-26 01:26:41 +0100885 if (!heap_->InNewSpace(object)) return;
Steve Blocka7e24c12009-10-30 11:49:00 +0000886 Heap::ScavengeObject(reinterpret_cast<HeapObject**>(p),
887 reinterpret_cast<HeapObject*>(object));
888 }
Steve Block44f0eee2011-05-26 01:26:41 +0100889
890 Heap* heap_;
Steve Blocka7e24c12009-10-30 11:49:00 +0000891};
892
893
Steve Blocka7e24c12009-10-30 11:49:00 +0000894#ifdef DEBUG
895// Visitor class to verify pointers in code or data space do not point into
896// new space.
897class VerifyNonPointerSpacePointersVisitor: public ObjectVisitor {
898 public:
899 void VisitPointers(Object** start, Object**end) {
900 for (Object** current = start; current < end; current++) {
901 if ((*current)->IsHeapObject()) {
Steve Block44f0eee2011-05-26 01:26:41 +0100902 ASSERT(!HEAP->InNewSpace(HeapObject::cast(*current)));
Steve Blocka7e24c12009-10-30 11:49:00 +0000903 }
904 }
905 }
906};
907
908
909static void VerifyNonPointerSpacePointers() {
910 // Verify that there are no pointers to new space in spaces where we
911 // do not expect them.
912 VerifyNonPointerSpacePointersVisitor v;
Steve Block44f0eee2011-05-26 01:26:41 +0100913 HeapObjectIterator code_it(HEAP->code_space());
Leon Clarked91b9f72010-01-27 17:25:45 +0000914 for (HeapObject* object = code_it.next();
915 object != NULL; object = code_it.next())
Steve Blocka7e24c12009-10-30 11:49:00 +0000916 object->Iterate(&v);
Steve Blocka7e24c12009-10-30 11:49:00 +0000917
Steve Block44f0eee2011-05-26 01:26:41 +0100918 HeapObjectIterator data_it(HEAP->old_data_space());
Leon Clarked91b9f72010-01-27 17:25:45 +0000919 for (HeapObject* object = data_it.next();
920 object != NULL; object = data_it.next())
921 object->Iterate(&v);
Steve Blocka7e24c12009-10-30 11:49:00 +0000922}
923#endif
924
925
Steve Block6ded16b2010-05-10 14:33:55 +0100926void Heap::CheckNewSpaceExpansionCriteria() {
927 if (new_space_.Capacity() < new_space_.MaximumCapacity() &&
928 survived_since_last_expansion_ > new_space_.Capacity()) {
929 // Grow the size of new space if there is room to grow and enough
930 // data has survived scavenge since the last expansion.
931 new_space_.Grow();
932 survived_since_last_expansion_ = 0;
933 }
934}
935
936
Steve Blocka7e24c12009-10-30 11:49:00 +0000937void Heap::Scavenge() {
938#ifdef DEBUG
939 if (FLAG_enable_slow_asserts) VerifyNonPointerSpacePointers();
940#endif
941
942 gc_state_ = SCAVENGE;
943
Ben Murdoch8b112d22011-06-08 16:22:53 +0100944 SwitchScavengingVisitorsTableIfProfilingWasEnabled();
945
Steve Block44f0eee2011-05-26 01:26:41 +0100946 Page::FlipMeaningOfInvalidatedWatermarkFlag(this);
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100947#ifdef DEBUG
948 VerifyPageWatermarkValidity(old_pointer_space_, ALL_VALID);
949 VerifyPageWatermarkValidity(map_space_, ALL_VALID);
950#endif
951
952 // We do not update an allocation watermark of the top page during linear
953 // allocation to avoid overhead. So to maintain the watermark invariant
954 // we have to manually cache the watermark and mark the top page as having an
955 // invalid watermark. This guarantees that dirty regions iteration will use a
956 // correct watermark even if a linear allocation happens.
957 old_pointer_space_->FlushTopPageWatermark();
958 map_space_->FlushTopPageWatermark();
959
Steve Blocka7e24c12009-10-30 11:49:00 +0000960 // Implements Cheney's copying algorithm
Steve Block44f0eee2011-05-26 01:26:41 +0100961 LOG(isolate_, ResourceEvent("scavenge", "begin"));
Steve Blocka7e24c12009-10-30 11:49:00 +0000962
963 // Clear descriptor cache.
Steve Block44f0eee2011-05-26 01:26:41 +0100964 isolate_->descriptor_lookup_cache()->Clear();
Steve Blocka7e24c12009-10-30 11:49:00 +0000965
966 // Used for updating survived_since_last_expansion_ at function end.
Ben Murdochf87a2032010-10-22 12:50:53 +0100967 intptr_t survived_watermark = PromotedSpaceSize();
Steve Blocka7e24c12009-10-30 11:49:00 +0000968
Steve Block6ded16b2010-05-10 14:33:55 +0100969 CheckNewSpaceExpansionCriteria();
Steve Blocka7e24c12009-10-30 11:49:00 +0000970
971 // Flip the semispaces. After flipping, to space is empty, from space has
972 // live objects.
973 new_space_.Flip();
974 new_space_.ResetAllocationInfo();
975
976 // We need to sweep newly copied objects which can be either in the
977 // to space or promoted to the old generation. For to-space
978 // objects, we treat the bottom of the to space as a queue. Newly
979 // copied and unswept objects lie between a 'front' mark and the
980 // allocation pointer.
981 //
982 // Promoted objects can go into various old-generation spaces, and
983 // can be allocated internally in the spaces (from the free list).
984 // We treat the top of the to space as a queue of addresses of
985 // promoted objects. The addresses of newly promoted and unswept
986 // objects lie between a 'front' mark and a 'rear' mark that is
987 // updated as a side effect of promoting an object.
988 //
989 // There is guaranteed to be enough room at the top of the to space
990 // for the addresses of promoted objects: every object promoted
991 // frees up its size in bytes from the top of the new space, and
992 // objects are at least one pointer in size.
993 Address new_space_front = new_space_.ToSpaceLow();
Steve Block44f0eee2011-05-26 01:26:41 +0100994 promotion_queue_.Initialize(new_space_.ToSpaceHigh());
Steve Blocka7e24c12009-10-30 11:49:00 +0000995
Steve Block44f0eee2011-05-26 01:26:41 +0100996 is_safe_to_read_maps_ = false;
997 ScavengeVisitor scavenge_visitor(this);
Steve Blocka7e24c12009-10-30 11:49:00 +0000998 // Copy roots.
Leon Clarkee46be812010-01-19 14:06:41 +0000999 IterateRoots(&scavenge_visitor, VISIT_ALL_IN_SCAVENGE);
Steve Blocka7e24c12009-10-30 11:49:00 +00001000
1001 // Copy objects reachable from the old generation. By definition,
1002 // there are no intergenerational pointers in code or data spaces.
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01001003 IterateDirtyRegions(old_pointer_space_,
Steve Block44f0eee2011-05-26 01:26:41 +01001004 &Heap::IteratePointersInDirtyRegion,
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01001005 &ScavengePointer,
1006 WATERMARK_CAN_BE_INVALID);
1007
1008 IterateDirtyRegions(map_space_,
1009 &IteratePointersInDirtyMapsRegion,
1010 &ScavengePointer,
1011 WATERMARK_CAN_BE_INVALID);
1012
1013 lo_space_->IterateDirtyRegions(&ScavengePointer);
Steve Blocka7e24c12009-10-30 11:49:00 +00001014
1015 // Copy objects reachable from cells by scavenging cell values directly.
1016 HeapObjectIterator cell_iterator(cell_space_);
Leon Clarked91b9f72010-01-27 17:25:45 +00001017 for (HeapObject* cell = cell_iterator.next();
1018 cell != NULL; cell = cell_iterator.next()) {
Steve Blocka7e24c12009-10-30 11:49:00 +00001019 if (cell->IsJSGlobalPropertyCell()) {
1020 Address value_address =
1021 reinterpret_cast<Address>(cell) +
1022 (JSGlobalPropertyCell::kValueOffset - kHeapObjectTag);
1023 scavenge_visitor.VisitPointer(reinterpret_cast<Object**>(value_address));
1024 }
1025 }
1026
Ben Murdochf87a2032010-10-22 12:50:53 +01001027 // Scavenge object reachable from the global contexts list directly.
1028 scavenge_visitor.VisitPointer(BitCast<Object**>(&global_contexts_list_));
1029
Leon Clarkee46be812010-01-19 14:06:41 +00001030 new_space_front = DoScavenge(&scavenge_visitor, new_space_front);
1031
Steve Block6ded16b2010-05-10 14:33:55 +01001032 UpdateNewSpaceReferencesInExternalStringTable(
1033 &UpdateNewSpaceReferenceInExternalStringTableEntry);
1034
Steve Block1e0659c2011-05-24 12:43:12 +01001035 LiveObjectList::UpdateReferencesForScavengeGC();
Steve Block44f0eee2011-05-26 01:26:41 +01001036 isolate()->runtime_profiler()->UpdateSamplesAfterScavenge();
Steve Block1e0659c2011-05-24 12:43:12 +01001037
Leon Clarkee46be812010-01-19 14:06:41 +00001038 ASSERT(new_space_front == new_space_.top());
1039
Steve Block44f0eee2011-05-26 01:26:41 +01001040 is_safe_to_read_maps_ = true;
1041
Leon Clarkee46be812010-01-19 14:06:41 +00001042 // Set age mark.
1043 new_space_.set_age_mark(new_space_.top());
1044
1045 // Update how much has survived scavenge.
Ben Murdochf87a2032010-10-22 12:50:53 +01001046 IncrementYoungSurvivorsCounter(static_cast<int>(
1047 (PromotedSpaceSize() - survived_watermark) + new_space_.Size()));
Leon Clarkee46be812010-01-19 14:06:41 +00001048
Steve Block44f0eee2011-05-26 01:26:41 +01001049 LOG(isolate_, ResourceEvent("scavenge", "end"));
Leon Clarkee46be812010-01-19 14:06:41 +00001050
1051 gc_state_ = NOT_IN_GC;
1052}
1053
1054
Steve Block44f0eee2011-05-26 01:26:41 +01001055String* Heap::UpdateNewSpaceReferenceInExternalStringTableEntry(Heap* heap,
1056 Object** p) {
Steve Block6ded16b2010-05-10 14:33:55 +01001057 MapWord first_word = HeapObject::cast(*p)->map_word();
1058
1059 if (!first_word.IsForwardingAddress()) {
1060 // Unreachable external string can be finalized.
Steve Block44f0eee2011-05-26 01:26:41 +01001061 heap->FinalizeExternalString(String::cast(*p));
Steve Block6ded16b2010-05-10 14:33:55 +01001062 return NULL;
1063 }
1064
1065 // String is still reachable.
1066 return String::cast(first_word.ToForwardingAddress());
1067}
1068
1069
1070void Heap::UpdateNewSpaceReferencesInExternalStringTable(
1071 ExternalStringTableUpdaterCallback updater_func) {
Steve Block44f0eee2011-05-26 01:26:41 +01001072 external_string_table_.Verify();
Leon Clarkee46be812010-01-19 14:06:41 +00001073
Steve Block44f0eee2011-05-26 01:26:41 +01001074 if (external_string_table_.new_space_strings_.is_empty()) return;
Leon Clarkee46be812010-01-19 14:06:41 +00001075
Steve Block44f0eee2011-05-26 01:26:41 +01001076 Object** start = &external_string_table_.new_space_strings_[0];
1077 Object** end = start + external_string_table_.new_space_strings_.length();
Leon Clarkee46be812010-01-19 14:06:41 +00001078 Object** last = start;
1079
1080 for (Object** p = start; p < end; ++p) {
Steve Block44f0eee2011-05-26 01:26:41 +01001081 ASSERT(InFromSpace(*p));
1082 String* target = updater_func(this, p);
Leon Clarkee46be812010-01-19 14:06:41 +00001083
Steve Block6ded16b2010-05-10 14:33:55 +01001084 if (target == NULL) continue;
Leon Clarkee46be812010-01-19 14:06:41 +00001085
Leon Clarkee46be812010-01-19 14:06:41 +00001086 ASSERT(target->IsExternalString());
1087
Steve Block44f0eee2011-05-26 01:26:41 +01001088 if (InNewSpace(target)) {
Leon Clarkee46be812010-01-19 14:06:41 +00001089 // String is still in new space. Update the table entry.
1090 *last = target;
1091 ++last;
1092 } else {
1093 // String got promoted. Move it to the old string list.
Steve Block44f0eee2011-05-26 01:26:41 +01001094 external_string_table_.AddOldString(target);
Leon Clarkee46be812010-01-19 14:06:41 +00001095 }
1096 }
1097
1098 ASSERT(last <= end);
Steve Block44f0eee2011-05-26 01:26:41 +01001099 external_string_table_.ShrinkNewStrings(static_cast<int>(last - start));
Leon Clarkee46be812010-01-19 14:06:41 +00001100}
1101
1102
Steve Block44f0eee2011-05-26 01:26:41 +01001103static Object* ProcessFunctionWeakReferences(Heap* heap,
1104 Object* function,
Ben Murdochb0fe1622011-05-05 13:52:32 +01001105 WeakObjectRetainer* retainer) {
Steve Block44f0eee2011-05-26 01:26:41 +01001106 Object* head = heap->undefined_value();
Ben Murdochb0fe1622011-05-05 13:52:32 +01001107 JSFunction* tail = NULL;
1108 Object* candidate = function;
Steve Block44f0eee2011-05-26 01:26:41 +01001109 while (candidate != heap->undefined_value()) {
Ben Murdochb0fe1622011-05-05 13:52:32 +01001110 // Check whether to keep the candidate in the list.
1111 JSFunction* candidate_function = reinterpret_cast<JSFunction*>(candidate);
1112 Object* retain = retainer->RetainAs(candidate);
1113 if (retain != NULL) {
Steve Block44f0eee2011-05-26 01:26:41 +01001114 if (head == heap->undefined_value()) {
Ben Murdochb0fe1622011-05-05 13:52:32 +01001115 // First element in the list.
1116 head = candidate_function;
1117 } else {
1118 // Subsequent elements in the list.
1119 ASSERT(tail != NULL);
1120 tail->set_next_function_link(candidate_function);
1121 }
1122 // Retained function is new tail.
1123 tail = candidate_function;
1124 }
1125 // Move to next element in the list.
1126 candidate = candidate_function->next_function_link();
1127 }
1128
1129 // Terminate the list if there is one or more elements.
1130 if (tail != NULL) {
Steve Block44f0eee2011-05-26 01:26:41 +01001131 tail->set_next_function_link(heap->undefined_value());
Ben Murdochb0fe1622011-05-05 13:52:32 +01001132 }
1133
1134 return head;
1135}
1136
1137
Ben Murdochf87a2032010-10-22 12:50:53 +01001138void Heap::ProcessWeakReferences(WeakObjectRetainer* retainer) {
1139 Object* head = undefined_value();
1140 Context* tail = NULL;
1141 Object* candidate = global_contexts_list_;
Steve Block44f0eee2011-05-26 01:26:41 +01001142 while (candidate != undefined_value()) {
Ben Murdochf87a2032010-10-22 12:50:53 +01001143 // Check whether to keep the candidate in the list.
1144 Context* candidate_context = reinterpret_cast<Context*>(candidate);
1145 Object* retain = retainer->RetainAs(candidate);
1146 if (retain != NULL) {
Steve Block44f0eee2011-05-26 01:26:41 +01001147 if (head == undefined_value()) {
Ben Murdochf87a2032010-10-22 12:50:53 +01001148 // First element in the list.
1149 head = candidate_context;
1150 } else {
1151 // Subsequent elements in the list.
1152 ASSERT(tail != NULL);
Steve Block44f0eee2011-05-26 01:26:41 +01001153 tail->set_unchecked(this,
1154 Context::NEXT_CONTEXT_LINK,
Ben Murdochf87a2032010-10-22 12:50:53 +01001155 candidate_context,
1156 UPDATE_WRITE_BARRIER);
1157 }
1158 // Retained context is new tail.
1159 tail = candidate_context;
Ben Murdochb0fe1622011-05-05 13:52:32 +01001160
1161 // Process the weak list of optimized functions for the context.
1162 Object* function_list_head =
1163 ProcessFunctionWeakReferences(
Steve Block44f0eee2011-05-26 01:26:41 +01001164 this,
Ben Murdochb0fe1622011-05-05 13:52:32 +01001165 candidate_context->get(Context::OPTIMIZED_FUNCTIONS_LIST),
1166 retainer);
Steve Block44f0eee2011-05-26 01:26:41 +01001167 candidate_context->set_unchecked(this,
1168 Context::OPTIMIZED_FUNCTIONS_LIST,
Ben Murdochb0fe1622011-05-05 13:52:32 +01001169 function_list_head,
1170 UPDATE_WRITE_BARRIER);
Ben Murdochf87a2032010-10-22 12:50:53 +01001171 }
1172 // Move to next element in the list.
1173 candidate = candidate_context->get(Context::NEXT_CONTEXT_LINK);
1174 }
1175
1176 // Terminate the list if there is one or more elements.
1177 if (tail != NULL) {
Steve Block44f0eee2011-05-26 01:26:41 +01001178 tail->set_unchecked(this,
1179 Context::NEXT_CONTEXT_LINK,
Ben Murdochf87a2032010-10-22 12:50:53 +01001180 Heap::undefined_value(),
1181 UPDATE_WRITE_BARRIER);
1182 }
1183
1184 // Update the head of the list of contexts.
Steve Block44f0eee2011-05-26 01:26:41 +01001185 global_contexts_list_ = head;
Ben Murdochf87a2032010-10-22 12:50:53 +01001186}
1187
1188
Iain Merrick75681382010-08-19 15:07:18 +01001189class NewSpaceScavenger : public StaticNewSpaceVisitor<NewSpaceScavenger> {
1190 public:
Steve Block44f0eee2011-05-26 01:26:41 +01001191 static inline void VisitPointer(Heap* heap, Object** p) {
Iain Merrick75681382010-08-19 15:07:18 +01001192 Object* object = *p;
Steve Block44f0eee2011-05-26 01:26:41 +01001193 if (!heap->InNewSpace(object)) return;
Iain Merrick75681382010-08-19 15:07:18 +01001194 Heap::ScavengeObject(reinterpret_cast<HeapObject**>(p),
1195 reinterpret_cast<HeapObject*>(object));
1196 }
1197};
1198
1199
Leon Clarkee46be812010-01-19 14:06:41 +00001200Address Heap::DoScavenge(ObjectVisitor* scavenge_visitor,
1201 Address new_space_front) {
Steve Blocka7e24c12009-10-30 11:49:00 +00001202 do {
1203 ASSERT(new_space_front <= new_space_.top());
1204
1205 // The addresses new_space_front and new_space_.top() define a
1206 // queue of unprocessed copied objects. Process them until the
1207 // queue is empty.
1208 while (new_space_front < new_space_.top()) {
1209 HeapObject* object = HeapObject::FromAddress(new_space_front);
Iain Merrick75681382010-08-19 15:07:18 +01001210 new_space_front += NewSpaceScavenger::IterateBody(object->map(), object);
Steve Blocka7e24c12009-10-30 11:49:00 +00001211 }
1212
1213 // Promote and process all the to-be-promoted objects.
Steve Block44f0eee2011-05-26 01:26:41 +01001214 while (!promotion_queue_.is_empty()) {
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001215 HeapObject* target;
1216 int size;
Steve Block44f0eee2011-05-26 01:26:41 +01001217 promotion_queue_.remove(&target, &size);
Steve Blocka7e24c12009-10-30 11:49:00 +00001218
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001219 // Promoted object might be already partially visited
1220 // during dirty regions iteration. Thus we search specificly
1221 // for pointers to from semispace instead of looking for pointers
1222 // to new space.
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01001223 ASSERT(!target->IsMap());
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001224 IterateAndMarkPointersToFromSpace(target->address(),
1225 target->address() + size,
1226 &ScavengePointer);
Steve Blocka7e24c12009-10-30 11:49:00 +00001227 }
1228
1229 // Take another spin if there are now unswept objects in new space
1230 // (there are currently no more unswept promoted objects).
1231 } while (new_space_front < new_space_.top());
1232
Leon Clarkee46be812010-01-19 14:06:41 +00001233 return new_space_front;
Steve Blocka7e24c12009-10-30 11:49:00 +00001234}
1235
1236
Ben Murdoch8b112d22011-06-08 16:22:53 +01001237enum LoggingAndProfiling {
1238 LOGGING_AND_PROFILING_ENABLED,
1239 LOGGING_AND_PROFILING_DISABLED
1240};
1241
1242
1243typedef void (*ScavengingCallback)(Map* map,
1244 HeapObject** slot,
1245 HeapObject* object);
1246
1247
1248static Atomic32 scavenging_visitors_table_mode_;
1249static VisitorDispatchTable<ScavengingCallback> scavenging_visitors_table_;
1250
1251
1252INLINE(static void DoScavengeObject(Map* map,
1253 HeapObject** slot,
1254 HeapObject* obj));
1255
1256
1257void DoScavengeObject(Map* map, HeapObject** slot, HeapObject* obj) {
1258 scavenging_visitors_table_.GetVisitor(map)(map, slot, obj);
1259}
1260
1261
1262template<LoggingAndProfiling logging_and_profiling_mode>
Iain Merrick75681382010-08-19 15:07:18 +01001263class ScavengingVisitor : public StaticVisitorBase {
1264 public:
1265 static void Initialize() {
1266 table_.Register(kVisitSeqAsciiString, &EvacuateSeqAsciiString);
1267 table_.Register(kVisitSeqTwoByteString, &EvacuateSeqTwoByteString);
1268 table_.Register(kVisitShortcutCandidate, &EvacuateShortcutCandidate);
1269 table_.Register(kVisitByteArray, &EvacuateByteArray);
1270 table_.Register(kVisitFixedArray, &EvacuateFixedArray);
Ben Murdoch8b112d22011-06-08 16:22:53 +01001271
Ben Murdochf87a2032010-10-22 12:50:53 +01001272 table_.Register(kVisitGlobalContext,
1273 &ObjectEvacuationStrategy<POINTER_OBJECT>::
Ben Murdoch8b112d22011-06-08 16:22:53 +01001274 template VisitSpecialized<Context::kSize>);
Iain Merrick75681382010-08-19 15:07:18 +01001275
1276 table_.Register(kVisitConsString,
1277 &ObjectEvacuationStrategy<POINTER_OBJECT>::
Ben Murdoch8b112d22011-06-08 16:22:53 +01001278 template VisitSpecialized<ConsString::kSize>);
Iain Merrick75681382010-08-19 15:07:18 +01001279
1280 table_.Register(kVisitSharedFunctionInfo,
1281 &ObjectEvacuationStrategy<POINTER_OBJECT>::
Ben Murdoch8b112d22011-06-08 16:22:53 +01001282 template VisitSpecialized<SharedFunctionInfo::kSize>);
Iain Merrick75681382010-08-19 15:07:18 +01001283
1284 table_.Register(kVisitJSFunction,
1285 &ObjectEvacuationStrategy<POINTER_OBJECT>::
Ben Murdoch8b112d22011-06-08 16:22:53 +01001286 template VisitSpecialized<JSFunction::kSize>);
Iain Merrick75681382010-08-19 15:07:18 +01001287
1288 table_.RegisterSpecializations<ObjectEvacuationStrategy<DATA_OBJECT>,
1289 kVisitDataObject,
1290 kVisitDataObjectGeneric>();
1291
1292 table_.RegisterSpecializations<ObjectEvacuationStrategy<POINTER_OBJECT>,
1293 kVisitJSObject,
1294 kVisitJSObjectGeneric>();
1295
1296 table_.RegisterSpecializations<ObjectEvacuationStrategy<POINTER_OBJECT>,
1297 kVisitStruct,
1298 kVisitStructGeneric>();
1299 }
1300
Ben Murdoch8b112d22011-06-08 16:22:53 +01001301 static VisitorDispatchTable<ScavengingCallback>* GetTable() {
1302 return &table_;
Iain Merrick75681382010-08-19 15:07:18 +01001303 }
1304
Iain Merrick75681382010-08-19 15:07:18 +01001305 private:
1306 enum ObjectContents { DATA_OBJECT, POINTER_OBJECT };
1307 enum SizeRestriction { SMALL, UNKNOWN_SIZE };
1308
Steve Blocka7e24c12009-10-30 11:49:00 +00001309#if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
Steve Block44f0eee2011-05-26 01:26:41 +01001310 static void RecordCopiedObject(Heap* heap, HeapObject* obj) {
Iain Merrick75681382010-08-19 15:07:18 +01001311 bool should_record = false;
Steve Blocka7e24c12009-10-30 11:49:00 +00001312#ifdef DEBUG
Iain Merrick75681382010-08-19 15:07:18 +01001313 should_record = FLAG_heap_stats;
Steve Blocka7e24c12009-10-30 11:49:00 +00001314#endif
1315#ifdef ENABLE_LOGGING_AND_PROFILING
Iain Merrick75681382010-08-19 15:07:18 +01001316 should_record = should_record || FLAG_log_gc;
Steve Blocka7e24c12009-10-30 11:49:00 +00001317#endif
Iain Merrick75681382010-08-19 15:07:18 +01001318 if (should_record) {
Steve Block44f0eee2011-05-26 01:26:41 +01001319 if (heap->new_space()->Contains(obj)) {
1320 heap->new_space()->RecordAllocation(obj);
Iain Merrick75681382010-08-19 15:07:18 +01001321 } else {
Steve Block44f0eee2011-05-26 01:26:41 +01001322 heap->new_space()->RecordPromotion(obj);
Iain Merrick75681382010-08-19 15:07:18 +01001323 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001324 }
1325 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001326#endif // defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
1327
Iain Merrick75681382010-08-19 15:07:18 +01001328 // Helper function used by CopyObject to copy a source object to an
1329 // allocated target object and update the forwarding pointer in the source
1330 // object. Returns the target object.
Steve Block44f0eee2011-05-26 01:26:41 +01001331 INLINE(static HeapObject* MigrateObject(Heap* heap,
1332 HeapObject* source,
Iain Merrick75681382010-08-19 15:07:18 +01001333 HeapObject* target,
1334 int size)) {
1335 // Copy the content of source to target.
Steve Block44f0eee2011-05-26 01:26:41 +01001336 heap->CopyBlock(target->address(), source->address(), size);
Steve Blocka7e24c12009-10-30 11:49:00 +00001337
Iain Merrick75681382010-08-19 15:07:18 +01001338 // Set the forwarding address.
1339 source->set_map_word(MapWord::FromForwardingAddress(target));
Steve Blocka7e24c12009-10-30 11:49:00 +00001340
Ben Murdoch8b112d22011-06-08 16:22:53 +01001341 if (logging_and_profiling_mode == LOGGING_AND_PROFILING_ENABLED) {
Steve Blocka7e24c12009-10-30 11:49:00 +00001342#if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
Ben Murdoch8b112d22011-06-08 16:22:53 +01001343 // Update NewSpace stats if necessary.
1344 RecordCopiedObject(heap, target);
Steve Blocka7e24c12009-10-30 11:49:00 +00001345#endif
Ben Murdoch8b112d22011-06-08 16:22:53 +01001346 HEAP_PROFILE(heap, ObjectMoveEvent(source->address(), target->address()));
Kristian Monsen0d5e1162010-09-30 15:31:59 +01001347#if defined(ENABLE_LOGGING_AND_PROFILING)
Ben Murdoch8b112d22011-06-08 16:22:53 +01001348 Isolate* isolate = heap->isolate();
1349 if (isolate->logger()->is_logging() ||
1350 isolate->cpu_profiler()->is_profiling()) {
1351 if (target->IsSharedFunctionInfo()) {
1352 PROFILE(isolate, SharedFunctionInfoMoveEvent(
1353 source->address(), target->address()));
1354 }
Kristian Monsen0d5e1162010-09-30 15:31:59 +01001355 }
Kristian Monsen0d5e1162010-09-30 15:31:59 +01001356#endif
Ben Murdoch8b112d22011-06-08 16:22:53 +01001357 }
1358
Iain Merrick75681382010-08-19 15:07:18 +01001359 return target;
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001360 }
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001361
1362
Iain Merrick75681382010-08-19 15:07:18 +01001363 template<ObjectContents object_contents, SizeRestriction size_restriction>
1364 static inline void EvacuateObject(Map* map,
1365 HeapObject** slot,
1366 HeapObject* object,
1367 int object_size) {
1368 ASSERT((size_restriction != SMALL) ||
1369 (object_size <= Page::kMaxHeapObjectSize));
1370 ASSERT(object->Size() == object_size);
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001371
Steve Block44f0eee2011-05-26 01:26:41 +01001372 Heap* heap = map->heap();
1373 if (heap->ShouldBePromoted(object->address(), object_size)) {
John Reck59135872010-11-02 12:39:01 -07001374 MaybeObject* maybe_result;
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001375
Iain Merrick75681382010-08-19 15:07:18 +01001376 if ((size_restriction != SMALL) &&
1377 (object_size > Page::kMaxHeapObjectSize)) {
Steve Block44f0eee2011-05-26 01:26:41 +01001378 maybe_result = heap->lo_space()->AllocateRawFixedArray(object_size);
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001379 } else {
Iain Merrick75681382010-08-19 15:07:18 +01001380 if (object_contents == DATA_OBJECT) {
Steve Block44f0eee2011-05-26 01:26:41 +01001381 maybe_result = heap->old_data_space()->AllocateRaw(object_size);
Iain Merrick75681382010-08-19 15:07:18 +01001382 } else {
Steve Block44f0eee2011-05-26 01:26:41 +01001383 maybe_result = heap->old_pointer_space()->AllocateRaw(object_size);
Iain Merrick75681382010-08-19 15:07:18 +01001384 }
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001385 }
1386
John Reck59135872010-11-02 12:39:01 -07001387 Object* result = NULL; // Initialization to please compiler.
1388 if (maybe_result->ToObject(&result)) {
Iain Merrick75681382010-08-19 15:07:18 +01001389 HeapObject* target = HeapObject::cast(result);
Steve Block44f0eee2011-05-26 01:26:41 +01001390 *slot = MigrateObject(heap, object , target, object_size);
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001391
Iain Merrick75681382010-08-19 15:07:18 +01001392 if (object_contents == POINTER_OBJECT) {
Steve Block44f0eee2011-05-26 01:26:41 +01001393 heap->promotion_queue()->insert(target, object_size);
Iain Merrick75681382010-08-19 15:07:18 +01001394 }
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001395
Steve Block44f0eee2011-05-26 01:26:41 +01001396 heap->tracer()->increment_promoted_objects_size(object_size);
Iain Merrick75681382010-08-19 15:07:18 +01001397 return;
1398 }
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001399 }
John Reck59135872010-11-02 12:39:01 -07001400 Object* result =
Steve Block44f0eee2011-05-26 01:26:41 +01001401 heap->new_space()->AllocateRaw(object_size)->ToObjectUnchecked();
1402 *slot = MigrateObject(heap, object, HeapObject::cast(result), object_size);
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001403 return;
1404 }
1405
Iain Merrick75681382010-08-19 15:07:18 +01001406
1407 static inline void EvacuateFixedArray(Map* map,
1408 HeapObject** slot,
1409 HeapObject* object) {
1410 int object_size = FixedArray::BodyDescriptor::SizeOf(map, object);
1411 EvacuateObject<POINTER_OBJECT, UNKNOWN_SIZE>(map,
1412 slot,
1413 object,
1414 object_size);
1415 }
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001416
1417
Iain Merrick75681382010-08-19 15:07:18 +01001418 static inline void EvacuateByteArray(Map* map,
1419 HeapObject** slot,
1420 HeapObject* object) {
1421 int object_size = reinterpret_cast<ByteArray*>(object)->ByteArraySize();
1422 EvacuateObject<DATA_OBJECT, UNKNOWN_SIZE>(map, slot, object, object_size);
1423 }
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001424
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001425
Iain Merrick75681382010-08-19 15:07:18 +01001426 static inline void EvacuateSeqAsciiString(Map* map,
1427 HeapObject** slot,
1428 HeapObject* object) {
1429 int object_size = SeqAsciiString::cast(object)->
1430 SeqAsciiStringSize(map->instance_type());
1431 EvacuateObject<DATA_OBJECT, UNKNOWN_SIZE>(map, slot, object, object_size);
1432 }
1433
1434
1435 static inline void EvacuateSeqTwoByteString(Map* map,
1436 HeapObject** slot,
1437 HeapObject* object) {
1438 int object_size = SeqTwoByteString::cast(object)->
1439 SeqTwoByteStringSize(map->instance_type());
1440 EvacuateObject<DATA_OBJECT, UNKNOWN_SIZE>(map, slot, object, object_size);
1441 }
1442
1443
1444 static inline bool IsShortcutCandidate(int type) {
1445 return ((type & kShortcutTypeMask) == kShortcutTypeTag);
1446 }
1447
1448 static inline void EvacuateShortcutCandidate(Map* map,
1449 HeapObject** slot,
1450 HeapObject* object) {
1451 ASSERT(IsShortcutCandidate(map->instance_type()));
1452
Steve Block44f0eee2011-05-26 01:26:41 +01001453 if (ConsString::cast(object)->unchecked_second() ==
1454 map->heap()->empty_string()) {
Iain Merrick75681382010-08-19 15:07:18 +01001455 HeapObject* first =
1456 HeapObject::cast(ConsString::cast(object)->unchecked_first());
1457
1458 *slot = first;
1459
Steve Block44f0eee2011-05-26 01:26:41 +01001460 if (!map->heap()->InNewSpace(first)) {
Iain Merrick75681382010-08-19 15:07:18 +01001461 object->set_map_word(MapWord::FromForwardingAddress(first));
1462 return;
1463 }
1464
1465 MapWord first_word = first->map_word();
1466 if (first_word.IsForwardingAddress()) {
1467 HeapObject* target = first_word.ToForwardingAddress();
1468
1469 *slot = target;
1470 object->set_map_word(MapWord::FromForwardingAddress(target));
1471 return;
1472 }
1473
Ben Murdoch8b112d22011-06-08 16:22:53 +01001474 DoScavengeObject(first->map(), slot, first);
Iain Merrick75681382010-08-19 15:07:18 +01001475 object->set_map_word(MapWord::FromForwardingAddress(*slot));
1476 return;
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001477 }
Iain Merrick75681382010-08-19 15:07:18 +01001478
1479 int object_size = ConsString::kSize;
1480 EvacuateObject<POINTER_OBJECT, SMALL>(map, slot, object, object_size);
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001481 }
1482
Iain Merrick75681382010-08-19 15:07:18 +01001483 template<ObjectContents object_contents>
1484 class ObjectEvacuationStrategy {
1485 public:
1486 template<int object_size>
1487 static inline void VisitSpecialized(Map* map,
1488 HeapObject** slot,
1489 HeapObject* object) {
1490 EvacuateObject<object_contents, SMALL>(map, slot, object, object_size);
1491 }
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001492
Iain Merrick75681382010-08-19 15:07:18 +01001493 static inline void Visit(Map* map,
1494 HeapObject** slot,
1495 HeapObject* object) {
1496 int object_size = map->instance_size();
1497 EvacuateObject<object_contents, SMALL>(map, slot, object, object_size);
1498 }
1499 };
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001500
Ben Murdoch8b112d22011-06-08 16:22:53 +01001501 static VisitorDispatchTable<ScavengingCallback> table_;
Iain Merrick75681382010-08-19 15:07:18 +01001502};
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001503
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001504
Ben Murdoch8b112d22011-06-08 16:22:53 +01001505template<LoggingAndProfiling logging_and_profiling_mode>
1506VisitorDispatchTable<ScavengingCallback>
1507 ScavengingVisitor<logging_and_profiling_mode>::table_;
1508
1509
1510static void InitializeScavengingVisitorsTables() {
1511 ScavengingVisitor<LOGGING_AND_PROFILING_DISABLED>::Initialize();
1512 ScavengingVisitor<LOGGING_AND_PROFILING_ENABLED>::Initialize();
1513 scavenging_visitors_table_.CopyFrom(
1514 ScavengingVisitor<LOGGING_AND_PROFILING_DISABLED>::GetTable());
1515 scavenging_visitors_table_mode_ = LOGGING_AND_PROFILING_DISABLED;
1516}
1517
1518
1519void Heap::SwitchScavengingVisitorsTableIfProfilingWasEnabled() {
1520 if (scavenging_visitors_table_mode_ == LOGGING_AND_PROFILING_ENABLED) {
1521 // Table was already updated by some isolate.
1522 return;
1523 }
1524
1525 if (isolate()->logger()->is_logging() ||
1526 isolate()->cpu_profiler()->is_profiling() ||
1527 (isolate()->heap_profiler() != NULL &&
1528 isolate()->heap_profiler()->is_profiling())) {
1529 // If one of the isolates is doing scavenge at this moment of time
1530 // it might see this table in an inconsitent state when
1531 // some of the callbacks point to
1532 // ScavengingVisitor<LOGGING_AND_PROFILING_ENABLED> and others
1533 // to ScavengingVisitor<LOGGING_AND_PROFILING_DISABLED>.
1534 // However this does not lead to any bugs as such isolate does not have
1535 // profiling enabled and any isolate with enabled profiling is guaranteed
1536 // to see the table in the consistent state.
1537 scavenging_visitors_table_.CopyFrom(
1538 ScavengingVisitor<LOGGING_AND_PROFILING_ENABLED>::GetTable());
1539
1540 // We use Release_Store to prevent reordering of this write before writes
1541 // to the table.
1542 Release_Store(&scavenging_visitors_table_mode_,
1543 LOGGING_AND_PROFILING_ENABLED);
1544 }
1545}
Steve Blocka7e24c12009-10-30 11:49:00 +00001546
1547
1548void Heap::ScavengeObjectSlow(HeapObject** p, HeapObject* object) {
Steve Block44f0eee2011-05-26 01:26:41 +01001549 ASSERT(HEAP->InFromSpace(object));
Steve Blocka7e24c12009-10-30 11:49:00 +00001550 MapWord first_word = object->map_word();
1551 ASSERT(!first_word.IsForwardingAddress());
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001552 Map* map = first_word.ToMap();
Ben Murdoch8b112d22011-06-08 16:22:53 +01001553 DoScavengeObject(map, p, object);
Steve Blocka7e24c12009-10-30 11:49:00 +00001554}
1555
1556
John Reck59135872010-11-02 12:39:01 -07001557MaybeObject* Heap::AllocatePartialMap(InstanceType instance_type,
1558 int instance_size) {
1559 Object* result;
1560 { MaybeObject* maybe_result = AllocateRawMap();
1561 if (!maybe_result->ToObject(&result)) return maybe_result;
1562 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001563
1564 // Map::cast cannot be used due to uninitialized map field.
1565 reinterpret_cast<Map*>(result)->set_map(raw_unchecked_meta_map());
1566 reinterpret_cast<Map*>(result)->set_instance_type(instance_type);
1567 reinterpret_cast<Map*>(result)->set_instance_size(instance_size);
Steve Block44f0eee2011-05-26 01:26:41 +01001568 reinterpret_cast<Map*>(result)->set_visitor_id(
1569 StaticVisitorBase::GetVisitorId(instance_type, instance_size));
Steve Blocka7e24c12009-10-30 11:49:00 +00001570 reinterpret_cast<Map*>(result)->set_inobject_properties(0);
Leon Clarke4515c472010-02-03 11:58:03 +00001571 reinterpret_cast<Map*>(result)->set_pre_allocated_property_fields(0);
Steve Blocka7e24c12009-10-30 11:49:00 +00001572 reinterpret_cast<Map*>(result)->set_unused_property_fields(0);
Leon Clarke4515c472010-02-03 11:58:03 +00001573 reinterpret_cast<Map*>(result)->set_bit_field(0);
1574 reinterpret_cast<Map*>(result)->set_bit_field2(0);
Steve Blocka7e24c12009-10-30 11:49:00 +00001575 return result;
1576}
1577
1578
John Reck59135872010-11-02 12:39:01 -07001579MaybeObject* Heap::AllocateMap(InstanceType instance_type, int instance_size) {
1580 Object* result;
1581 { MaybeObject* maybe_result = AllocateRawMap();
1582 if (!maybe_result->ToObject(&result)) return maybe_result;
1583 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001584
1585 Map* map = reinterpret_cast<Map*>(result);
1586 map->set_map(meta_map());
1587 map->set_instance_type(instance_type);
Iain Merrick75681382010-08-19 15:07:18 +01001588 map->set_visitor_id(
1589 StaticVisitorBase::GetVisitorId(instance_type, instance_size));
Steve Blocka7e24c12009-10-30 11:49:00 +00001590 map->set_prototype(null_value());
1591 map->set_constructor(null_value());
1592 map->set_instance_size(instance_size);
1593 map->set_inobject_properties(0);
1594 map->set_pre_allocated_property_fields(0);
1595 map->set_instance_descriptors(empty_descriptor_array());
1596 map->set_code_cache(empty_fixed_array());
1597 map->set_unused_property_fields(0);
1598 map->set_bit_field(0);
Steve Block8defd9f2010-07-08 12:39:36 +01001599 map->set_bit_field2((1 << Map::kIsExtensible) | (1 << Map::kHasFastElements));
Leon Clarkee46be812010-01-19 14:06:41 +00001600
1601 // If the map object is aligned fill the padding area with Smi 0 objects.
1602 if (Map::kPadStart < Map::kSize) {
1603 memset(reinterpret_cast<byte*>(map) + Map::kPadStart - kHeapObjectTag,
1604 0,
1605 Map::kSize - Map::kPadStart);
1606 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001607 return map;
1608}
1609
1610
John Reck59135872010-11-02 12:39:01 -07001611MaybeObject* Heap::AllocateCodeCache() {
1612 Object* result;
1613 { MaybeObject* maybe_result = AllocateStruct(CODE_CACHE_TYPE);
1614 if (!maybe_result->ToObject(&result)) return maybe_result;
1615 }
Steve Block6ded16b2010-05-10 14:33:55 +01001616 CodeCache* code_cache = CodeCache::cast(result);
1617 code_cache->set_default_cache(empty_fixed_array());
1618 code_cache->set_normal_type_cache(undefined_value());
1619 return code_cache;
1620}
1621
1622
Steve Blocka7e24c12009-10-30 11:49:00 +00001623const Heap::StringTypeTable Heap::string_type_table[] = {
1624#define STRING_TYPE_ELEMENT(type, size, name, camel_name) \
1625 {type, size, k##camel_name##MapRootIndex},
1626 STRING_TYPE_LIST(STRING_TYPE_ELEMENT)
1627#undef STRING_TYPE_ELEMENT
1628};
1629
1630
1631const Heap::ConstantSymbolTable Heap::constant_symbol_table[] = {
1632#define CONSTANT_SYMBOL_ELEMENT(name, contents) \
1633 {contents, k##name##RootIndex},
1634 SYMBOL_LIST(CONSTANT_SYMBOL_ELEMENT)
1635#undef CONSTANT_SYMBOL_ELEMENT
1636};
1637
1638
1639const Heap::StructTable Heap::struct_table[] = {
1640#define STRUCT_TABLE_ELEMENT(NAME, Name, name) \
1641 { NAME##_TYPE, Name::kSize, k##Name##MapRootIndex },
1642 STRUCT_LIST(STRUCT_TABLE_ELEMENT)
1643#undef STRUCT_TABLE_ELEMENT
1644};
1645
1646
1647bool Heap::CreateInitialMaps() {
John Reck59135872010-11-02 12:39:01 -07001648 Object* obj;
1649 { MaybeObject* maybe_obj = AllocatePartialMap(MAP_TYPE, Map::kSize);
1650 if (!maybe_obj->ToObject(&obj)) return false;
1651 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001652 // Map::cast cannot be used due to uninitialized map field.
1653 Map* new_meta_map = reinterpret_cast<Map*>(obj);
1654 set_meta_map(new_meta_map);
1655 new_meta_map->set_map(new_meta_map);
1656
John Reck59135872010-11-02 12:39:01 -07001657 { MaybeObject* maybe_obj =
1658 AllocatePartialMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
1659 if (!maybe_obj->ToObject(&obj)) return false;
1660 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001661 set_fixed_array_map(Map::cast(obj));
1662
John Reck59135872010-11-02 12:39:01 -07001663 { MaybeObject* maybe_obj = AllocatePartialMap(ODDBALL_TYPE, Oddball::kSize);
1664 if (!maybe_obj->ToObject(&obj)) return false;
1665 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001666 set_oddball_map(Map::cast(obj));
1667
Steve Block6ded16b2010-05-10 14:33:55 +01001668 // Allocate the empty array.
John Reck59135872010-11-02 12:39:01 -07001669 { MaybeObject* maybe_obj = AllocateEmptyFixedArray();
1670 if (!maybe_obj->ToObject(&obj)) return false;
1671 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001672 set_empty_fixed_array(FixedArray::cast(obj));
1673
John Reck59135872010-11-02 12:39:01 -07001674 { MaybeObject* maybe_obj = Allocate(oddball_map(), OLD_DATA_SPACE);
1675 if (!maybe_obj->ToObject(&obj)) return false;
1676 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001677 set_null_value(obj);
Steve Block44f0eee2011-05-26 01:26:41 +01001678 Oddball::cast(obj)->set_kind(Oddball::kNull);
Steve Blocka7e24c12009-10-30 11:49:00 +00001679
1680 // Allocate the empty descriptor array.
John Reck59135872010-11-02 12:39:01 -07001681 { MaybeObject* maybe_obj = AllocateEmptyFixedArray();
1682 if (!maybe_obj->ToObject(&obj)) return false;
1683 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001684 set_empty_descriptor_array(DescriptorArray::cast(obj));
1685
1686 // Fix the instance_descriptors for the existing maps.
1687 meta_map()->set_instance_descriptors(empty_descriptor_array());
1688 meta_map()->set_code_cache(empty_fixed_array());
1689
1690 fixed_array_map()->set_instance_descriptors(empty_descriptor_array());
1691 fixed_array_map()->set_code_cache(empty_fixed_array());
1692
1693 oddball_map()->set_instance_descriptors(empty_descriptor_array());
1694 oddball_map()->set_code_cache(empty_fixed_array());
1695
1696 // Fix prototype object for existing maps.
1697 meta_map()->set_prototype(null_value());
1698 meta_map()->set_constructor(null_value());
1699
1700 fixed_array_map()->set_prototype(null_value());
1701 fixed_array_map()->set_constructor(null_value());
1702
1703 oddball_map()->set_prototype(null_value());
1704 oddball_map()->set_constructor(null_value());
1705
John Reck59135872010-11-02 12:39:01 -07001706 { MaybeObject* maybe_obj =
1707 AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
1708 if (!maybe_obj->ToObject(&obj)) return false;
1709 }
Iain Merrick75681382010-08-19 15:07:18 +01001710 set_fixed_cow_array_map(Map::cast(obj));
1711 ASSERT(fixed_array_map() != fixed_cow_array_map());
1712
John Reck59135872010-11-02 12:39:01 -07001713 { MaybeObject* maybe_obj = AllocateMap(HEAP_NUMBER_TYPE, HeapNumber::kSize);
1714 if (!maybe_obj->ToObject(&obj)) return false;
1715 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001716 set_heap_number_map(Map::cast(obj));
1717
John Reck59135872010-11-02 12:39:01 -07001718 { MaybeObject* maybe_obj = AllocateMap(PROXY_TYPE, Proxy::kSize);
1719 if (!maybe_obj->ToObject(&obj)) return false;
1720 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001721 set_proxy_map(Map::cast(obj));
1722
1723 for (unsigned i = 0; i < ARRAY_SIZE(string_type_table); i++) {
1724 const StringTypeTable& entry = string_type_table[i];
John Reck59135872010-11-02 12:39:01 -07001725 { MaybeObject* maybe_obj = AllocateMap(entry.type, entry.size);
1726 if (!maybe_obj->ToObject(&obj)) return false;
1727 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001728 roots_[entry.index] = Map::cast(obj);
1729 }
1730
John Reck59135872010-11-02 12:39:01 -07001731 { MaybeObject* maybe_obj = AllocateMap(STRING_TYPE, kVariableSizeSentinel);
1732 if (!maybe_obj->ToObject(&obj)) return false;
1733 }
Steve Blockd0582a62009-12-15 09:54:21 +00001734 set_undetectable_string_map(Map::cast(obj));
Steve Blocka7e24c12009-10-30 11:49:00 +00001735 Map::cast(obj)->set_is_undetectable();
1736
John Reck59135872010-11-02 12:39:01 -07001737 { MaybeObject* maybe_obj =
1738 AllocateMap(ASCII_STRING_TYPE, kVariableSizeSentinel);
1739 if (!maybe_obj->ToObject(&obj)) return false;
1740 }
Steve Blockd0582a62009-12-15 09:54:21 +00001741 set_undetectable_ascii_string_map(Map::cast(obj));
Steve Blocka7e24c12009-10-30 11:49:00 +00001742 Map::cast(obj)->set_is_undetectable();
1743
John Reck59135872010-11-02 12:39:01 -07001744 { MaybeObject* maybe_obj =
1745 AllocateMap(BYTE_ARRAY_TYPE, kVariableSizeSentinel);
1746 if (!maybe_obj->ToObject(&obj)) return false;
1747 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001748 set_byte_array_map(Map::cast(obj));
1749
Ben Murdochb0fe1622011-05-05 13:52:32 +01001750 { MaybeObject* maybe_obj = AllocateByteArray(0, TENURED);
1751 if (!maybe_obj->ToObject(&obj)) return false;
1752 }
1753 set_empty_byte_array(ByteArray::cast(obj));
1754
John Reck59135872010-11-02 12:39:01 -07001755 { MaybeObject* maybe_obj =
Steve Block44f0eee2011-05-26 01:26:41 +01001756 AllocateMap(EXTERNAL_PIXEL_ARRAY_TYPE, ExternalArray::kAlignedSize);
John Reck59135872010-11-02 12:39:01 -07001757 if (!maybe_obj->ToObject(&obj)) return false;
1758 }
Steve Block44f0eee2011-05-26 01:26:41 +01001759 set_external_pixel_array_map(Map::cast(obj));
Steve Blocka7e24c12009-10-30 11:49:00 +00001760
John Reck59135872010-11-02 12:39:01 -07001761 { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_BYTE_ARRAY_TYPE,
1762 ExternalArray::kAlignedSize);
1763 if (!maybe_obj->ToObject(&obj)) return false;
1764 }
Steve Block3ce2e202009-11-05 08:53:23 +00001765 set_external_byte_array_map(Map::cast(obj));
1766
John Reck59135872010-11-02 12:39:01 -07001767 { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_UNSIGNED_BYTE_ARRAY_TYPE,
1768 ExternalArray::kAlignedSize);
1769 if (!maybe_obj->ToObject(&obj)) return false;
1770 }
Steve Block3ce2e202009-11-05 08:53:23 +00001771 set_external_unsigned_byte_array_map(Map::cast(obj));
1772
John Reck59135872010-11-02 12:39:01 -07001773 { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_SHORT_ARRAY_TYPE,
1774 ExternalArray::kAlignedSize);
1775 if (!maybe_obj->ToObject(&obj)) return false;
1776 }
Steve Block3ce2e202009-11-05 08:53:23 +00001777 set_external_short_array_map(Map::cast(obj));
1778
John Reck59135872010-11-02 12:39:01 -07001779 { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_UNSIGNED_SHORT_ARRAY_TYPE,
1780 ExternalArray::kAlignedSize);
1781 if (!maybe_obj->ToObject(&obj)) return false;
1782 }
Steve Block3ce2e202009-11-05 08:53:23 +00001783 set_external_unsigned_short_array_map(Map::cast(obj));
1784
John Reck59135872010-11-02 12:39:01 -07001785 { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_INT_ARRAY_TYPE,
1786 ExternalArray::kAlignedSize);
1787 if (!maybe_obj->ToObject(&obj)) return false;
1788 }
Steve Block3ce2e202009-11-05 08:53:23 +00001789 set_external_int_array_map(Map::cast(obj));
1790
John Reck59135872010-11-02 12:39:01 -07001791 { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_UNSIGNED_INT_ARRAY_TYPE,
1792 ExternalArray::kAlignedSize);
1793 if (!maybe_obj->ToObject(&obj)) return false;
1794 }
Steve Block3ce2e202009-11-05 08:53:23 +00001795 set_external_unsigned_int_array_map(Map::cast(obj));
1796
John Reck59135872010-11-02 12:39:01 -07001797 { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_FLOAT_ARRAY_TYPE,
1798 ExternalArray::kAlignedSize);
1799 if (!maybe_obj->ToObject(&obj)) return false;
1800 }
Steve Block3ce2e202009-11-05 08:53:23 +00001801 set_external_float_array_map(Map::cast(obj));
1802
John Reck59135872010-11-02 12:39:01 -07001803 { MaybeObject* maybe_obj = AllocateMap(CODE_TYPE, kVariableSizeSentinel);
1804 if (!maybe_obj->ToObject(&obj)) return false;
1805 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001806 set_code_map(Map::cast(obj));
1807
John Reck59135872010-11-02 12:39:01 -07001808 { MaybeObject* maybe_obj = AllocateMap(JS_GLOBAL_PROPERTY_CELL_TYPE,
1809 JSGlobalPropertyCell::kSize);
1810 if (!maybe_obj->ToObject(&obj)) return false;
1811 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001812 set_global_property_cell_map(Map::cast(obj));
1813
John Reck59135872010-11-02 12:39:01 -07001814 { MaybeObject* maybe_obj = AllocateMap(FILLER_TYPE, kPointerSize);
1815 if (!maybe_obj->ToObject(&obj)) return false;
1816 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001817 set_one_pointer_filler_map(Map::cast(obj));
1818
John Reck59135872010-11-02 12:39:01 -07001819 { MaybeObject* maybe_obj = AllocateMap(FILLER_TYPE, 2 * kPointerSize);
1820 if (!maybe_obj->ToObject(&obj)) return false;
1821 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001822 set_two_pointer_filler_map(Map::cast(obj));
1823
1824 for (unsigned i = 0; i < ARRAY_SIZE(struct_table); i++) {
1825 const StructTable& entry = struct_table[i];
John Reck59135872010-11-02 12:39:01 -07001826 { MaybeObject* maybe_obj = AllocateMap(entry.type, entry.size);
1827 if (!maybe_obj->ToObject(&obj)) return false;
1828 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001829 roots_[entry.index] = Map::cast(obj);
1830 }
1831
John Reck59135872010-11-02 12:39:01 -07001832 { MaybeObject* maybe_obj =
1833 AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
1834 if (!maybe_obj->ToObject(&obj)) return false;
1835 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001836 set_hash_table_map(Map::cast(obj));
1837
John Reck59135872010-11-02 12:39:01 -07001838 { MaybeObject* maybe_obj =
1839 AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
1840 if (!maybe_obj->ToObject(&obj)) return false;
1841 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001842 set_context_map(Map::cast(obj));
1843
John Reck59135872010-11-02 12:39:01 -07001844 { MaybeObject* maybe_obj =
1845 AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
1846 if (!maybe_obj->ToObject(&obj)) return false;
1847 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001848 set_catch_context_map(Map::cast(obj));
1849
John Reck59135872010-11-02 12:39:01 -07001850 { MaybeObject* maybe_obj =
1851 AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
1852 if (!maybe_obj->ToObject(&obj)) return false;
1853 }
Ben Murdochf87a2032010-10-22 12:50:53 +01001854 Map* global_context_map = Map::cast(obj);
1855 global_context_map->set_visitor_id(StaticVisitorBase::kVisitGlobalContext);
1856 set_global_context_map(global_context_map);
Steve Blocka7e24c12009-10-30 11:49:00 +00001857
John Reck59135872010-11-02 12:39:01 -07001858 { MaybeObject* maybe_obj = AllocateMap(SHARED_FUNCTION_INFO_TYPE,
1859 SharedFunctionInfo::kAlignedSize);
1860 if (!maybe_obj->ToObject(&obj)) return false;
1861 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001862 set_shared_function_info_map(Map::cast(obj));
1863
Steve Block1e0659c2011-05-24 12:43:12 +01001864 { MaybeObject* maybe_obj = AllocateMap(JS_MESSAGE_OBJECT_TYPE,
1865 JSMessageObject::kSize);
1866 if (!maybe_obj->ToObject(&obj)) return false;
1867 }
1868 set_message_object_map(Map::cast(obj));
1869
Steve Block44f0eee2011-05-26 01:26:41 +01001870 ASSERT(!InNewSpace(empty_fixed_array()));
Steve Blocka7e24c12009-10-30 11:49:00 +00001871 return true;
1872}
1873
1874
John Reck59135872010-11-02 12:39:01 -07001875MaybeObject* Heap::AllocateHeapNumber(double value, PretenureFlag pretenure) {
Steve Blocka7e24c12009-10-30 11:49:00 +00001876 // Statically ensure that it is safe to allocate heap numbers in paged
1877 // spaces.
1878 STATIC_ASSERT(HeapNumber::kSize <= Page::kMaxHeapObjectSize);
1879 AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
1880
John Reck59135872010-11-02 12:39:01 -07001881 Object* result;
1882 { MaybeObject* maybe_result =
1883 AllocateRaw(HeapNumber::kSize, space, OLD_DATA_SPACE);
1884 if (!maybe_result->ToObject(&result)) return maybe_result;
1885 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001886
1887 HeapObject::cast(result)->set_map(heap_number_map());
1888 HeapNumber::cast(result)->set_value(value);
1889 return result;
1890}
1891
1892
John Reck59135872010-11-02 12:39:01 -07001893MaybeObject* Heap::AllocateHeapNumber(double value) {
Steve Blocka7e24c12009-10-30 11:49:00 +00001894 // Use general version, if we're forced to always allocate.
1895 if (always_allocate()) return AllocateHeapNumber(value, TENURED);
1896
1897 // This version of AllocateHeapNumber is optimized for
1898 // allocation in new space.
1899 STATIC_ASSERT(HeapNumber::kSize <= Page::kMaxHeapObjectSize);
1900 ASSERT(allocation_allowed_ && gc_state_ == NOT_IN_GC);
John Reck59135872010-11-02 12:39:01 -07001901 Object* result;
1902 { MaybeObject* maybe_result = new_space_.AllocateRaw(HeapNumber::kSize);
1903 if (!maybe_result->ToObject(&result)) return maybe_result;
1904 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001905 HeapObject::cast(result)->set_map(heap_number_map());
1906 HeapNumber::cast(result)->set_value(value);
1907 return result;
1908}
1909
1910
John Reck59135872010-11-02 12:39:01 -07001911MaybeObject* Heap::AllocateJSGlobalPropertyCell(Object* value) {
1912 Object* result;
1913 { MaybeObject* maybe_result = AllocateRawCell();
1914 if (!maybe_result->ToObject(&result)) return maybe_result;
1915 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001916 HeapObject::cast(result)->set_map(global_property_cell_map());
1917 JSGlobalPropertyCell::cast(result)->set_value(value);
1918 return result;
1919}
1920
1921
John Reck59135872010-11-02 12:39:01 -07001922MaybeObject* Heap::CreateOddball(const char* to_string,
Steve Block44f0eee2011-05-26 01:26:41 +01001923 Object* to_number,
1924 byte kind) {
John Reck59135872010-11-02 12:39:01 -07001925 Object* result;
1926 { MaybeObject* maybe_result = Allocate(oddball_map(), OLD_DATA_SPACE);
1927 if (!maybe_result->ToObject(&result)) return maybe_result;
1928 }
Steve Block44f0eee2011-05-26 01:26:41 +01001929 return Oddball::cast(result)->Initialize(to_string, to_number, kind);
Steve Blocka7e24c12009-10-30 11:49:00 +00001930}
1931
1932
1933bool Heap::CreateApiObjects() {
1934 Object* obj;
1935
John Reck59135872010-11-02 12:39:01 -07001936 { MaybeObject* maybe_obj = AllocateMap(JS_OBJECT_TYPE, JSObject::kHeaderSize);
1937 if (!maybe_obj->ToObject(&obj)) return false;
1938 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001939 set_neander_map(Map::cast(obj));
1940
Steve Block44f0eee2011-05-26 01:26:41 +01001941 { MaybeObject* maybe_obj = AllocateJSObjectFromMap(neander_map());
John Reck59135872010-11-02 12:39:01 -07001942 if (!maybe_obj->ToObject(&obj)) return false;
1943 }
1944 Object* elements;
1945 { MaybeObject* maybe_elements = AllocateFixedArray(2);
1946 if (!maybe_elements->ToObject(&elements)) return false;
1947 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001948 FixedArray::cast(elements)->set(0, Smi::FromInt(0));
1949 JSObject::cast(obj)->set_elements(FixedArray::cast(elements));
1950 set_message_listeners(JSObject::cast(obj));
1951
1952 return true;
1953}
1954
1955
Steve Blocka7e24c12009-10-30 11:49:00 +00001956void Heap::CreateJSEntryStub() {
1957 JSEntryStub stub;
1958 set_js_entry_code(*stub.GetCode());
1959}
1960
1961
1962void Heap::CreateJSConstructEntryStub() {
1963 JSConstructEntryStub stub;
1964 set_js_construct_entry_code(*stub.GetCode());
1965}
1966
1967
1968void Heap::CreateFixedStubs() {
1969 // Here we create roots for fixed stubs. They are needed at GC
1970 // for cooking and uncooking (check out frames.cc).
1971 // The eliminates the need for doing dictionary lookup in the
1972 // stub cache for these stubs.
1973 HandleScope scope;
1974 // gcc-4.4 has problem generating correct code of following snippet:
Steve Block44f0eee2011-05-26 01:26:41 +01001975 // { JSEntryStub stub;
1976 // js_entry_code_ = *stub.GetCode();
Steve Blocka7e24c12009-10-30 11:49:00 +00001977 // }
Steve Block44f0eee2011-05-26 01:26:41 +01001978 // { JSConstructEntryStub stub;
1979 // js_construct_entry_code_ = *stub.GetCode();
Steve Blocka7e24c12009-10-30 11:49:00 +00001980 // }
1981 // To workaround the problem, make separate functions without inlining.
Steve Blocka7e24c12009-10-30 11:49:00 +00001982 Heap::CreateJSEntryStub();
1983 Heap::CreateJSConstructEntryStub();
Steve Blocka7e24c12009-10-30 11:49:00 +00001984}
1985
1986
1987bool Heap::CreateInitialObjects() {
1988 Object* obj;
1989
1990 // The -0 value must be set before NumberFromDouble works.
John Reck59135872010-11-02 12:39:01 -07001991 { MaybeObject* maybe_obj = AllocateHeapNumber(-0.0, TENURED);
1992 if (!maybe_obj->ToObject(&obj)) return false;
1993 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001994 set_minus_zero_value(obj);
1995 ASSERT(signbit(minus_zero_value()->Number()) != 0);
1996
John Reck59135872010-11-02 12:39:01 -07001997 { MaybeObject* maybe_obj = AllocateHeapNumber(OS::nan_value(), TENURED);
1998 if (!maybe_obj->ToObject(&obj)) return false;
1999 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002000 set_nan_value(obj);
2001
John Reck59135872010-11-02 12:39:01 -07002002 { MaybeObject* maybe_obj = Allocate(oddball_map(), OLD_DATA_SPACE);
2003 if (!maybe_obj->ToObject(&obj)) return false;
2004 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002005 set_undefined_value(obj);
Steve Block44f0eee2011-05-26 01:26:41 +01002006 Oddball::cast(obj)->set_kind(Oddball::kUndefined);
Steve Blocka7e24c12009-10-30 11:49:00 +00002007 ASSERT(!InNewSpace(undefined_value()));
2008
2009 // Allocate initial symbol table.
John Reck59135872010-11-02 12:39:01 -07002010 { MaybeObject* maybe_obj = SymbolTable::Allocate(kInitialSymbolTableSize);
2011 if (!maybe_obj->ToObject(&obj)) return false;
2012 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002013 // Don't use set_symbol_table() due to asserts.
2014 roots_[kSymbolTableRootIndex] = obj;
2015
2016 // Assign the print strings for oddballs after creating symboltable.
John Reck59135872010-11-02 12:39:01 -07002017 Object* symbol;
2018 { MaybeObject* maybe_symbol = LookupAsciiSymbol("undefined");
2019 if (!maybe_symbol->ToObject(&symbol)) return false;
2020 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002021 Oddball::cast(undefined_value())->set_to_string(String::cast(symbol));
2022 Oddball::cast(undefined_value())->set_to_number(nan_value());
2023
Steve Blocka7e24c12009-10-30 11:49:00 +00002024 // Allocate the null_value
John Reck59135872010-11-02 12:39:01 -07002025 { MaybeObject* maybe_obj =
Steve Block44f0eee2011-05-26 01:26:41 +01002026 Oddball::cast(null_value())->Initialize("null",
2027 Smi::FromInt(0),
2028 Oddball::kNull);
John Reck59135872010-11-02 12:39:01 -07002029 if (!maybe_obj->ToObject(&obj)) return false;
2030 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002031
Steve Block44f0eee2011-05-26 01:26:41 +01002032 { MaybeObject* maybe_obj = CreateOddball("true",
2033 Smi::FromInt(1),
2034 Oddball::kTrue);
John Reck59135872010-11-02 12:39:01 -07002035 if (!maybe_obj->ToObject(&obj)) return false;
2036 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002037 set_true_value(obj);
2038
Steve Block44f0eee2011-05-26 01:26:41 +01002039 { MaybeObject* maybe_obj = CreateOddball("false",
2040 Smi::FromInt(0),
2041 Oddball::kFalse);
John Reck59135872010-11-02 12:39:01 -07002042 if (!maybe_obj->ToObject(&obj)) return false;
2043 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002044 set_false_value(obj);
2045
Steve Block44f0eee2011-05-26 01:26:41 +01002046 { MaybeObject* maybe_obj = CreateOddball("hole",
2047 Smi::FromInt(-1),
2048 Oddball::kTheHole);
John Reck59135872010-11-02 12:39:01 -07002049 if (!maybe_obj->ToObject(&obj)) return false;
2050 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002051 set_the_hole_value(obj);
2052
Ben Murdoch086aeea2011-05-13 15:57:08 +01002053 { MaybeObject* maybe_obj = CreateOddball("arguments_marker",
Steve Block44f0eee2011-05-26 01:26:41 +01002054 Smi::FromInt(-4),
2055 Oddball::kArgumentMarker);
Ben Murdoch086aeea2011-05-13 15:57:08 +01002056 if (!maybe_obj->ToObject(&obj)) return false;
2057 }
2058 set_arguments_marker(obj);
2059
Steve Block44f0eee2011-05-26 01:26:41 +01002060 { MaybeObject* maybe_obj = CreateOddball("no_interceptor_result_sentinel",
2061 Smi::FromInt(-2),
2062 Oddball::kOther);
John Reck59135872010-11-02 12:39:01 -07002063 if (!maybe_obj->ToObject(&obj)) return false;
2064 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002065 set_no_interceptor_result_sentinel(obj);
2066
Steve Block44f0eee2011-05-26 01:26:41 +01002067 { MaybeObject* maybe_obj = CreateOddball("termination_exception",
2068 Smi::FromInt(-3),
2069 Oddball::kOther);
John Reck59135872010-11-02 12:39:01 -07002070 if (!maybe_obj->ToObject(&obj)) return false;
2071 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002072 set_termination_exception(obj);
2073
2074 // Allocate the empty string.
John Reck59135872010-11-02 12:39:01 -07002075 { MaybeObject* maybe_obj = AllocateRawAsciiString(0, TENURED);
2076 if (!maybe_obj->ToObject(&obj)) return false;
2077 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002078 set_empty_string(String::cast(obj));
2079
2080 for (unsigned i = 0; i < ARRAY_SIZE(constant_symbol_table); i++) {
John Reck59135872010-11-02 12:39:01 -07002081 { MaybeObject* maybe_obj =
2082 LookupAsciiSymbol(constant_symbol_table[i].contents);
2083 if (!maybe_obj->ToObject(&obj)) return false;
2084 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002085 roots_[constant_symbol_table[i].index] = String::cast(obj);
2086 }
2087
2088 // Allocate the hidden symbol which is used to identify the hidden properties
2089 // in JSObjects. The hash code has a special value so that it will not match
2090 // the empty string when searching for the property. It cannot be part of the
2091 // loop above because it needs to be allocated manually with the special
2092 // hash code in place. The hash code for the hidden_symbol is zero to ensure
2093 // that it will always be at the first entry in property descriptors.
John Reck59135872010-11-02 12:39:01 -07002094 { MaybeObject* maybe_obj =
2095 AllocateSymbol(CStrVector(""), 0, String::kZeroHash);
2096 if (!maybe_obj->ToObject(&obj)) return false;
2097 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002098 hidden_symbol_ = String::cast(obj);
2099
2100 // Allocate the proxy for __proto__.
John Reck59135872010-11-02 12:39:01 -07002101 { MaybeObject* maybe_obj =
2102 AllocateProxy((Address) &Accessors::ObjectPrototype);
2103 if (!maybe_obj->ToObject(&obj)) return false;
2104 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002105 set_prototype_accessors(Proxy::cast(obj));
2106
2107 // Allocate the code_stubs dictionary. The initial size is set to avoid
2108 // expanding the dictionary during bootstrapping.
John Reck59135872010-11-02 12:39:01 -07002109 { MaybeObject* maybe_obj = NumberDictionary::Allocate(128);
2110 if (!maybe_obj->ToObject(&obj)) return false;
2111 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002112 set_code_stubs(NumberDictionary::cast(obj));
2113
2114 // Allocate the non_monomorphic_cache used in stub-cache.cc. The initial size
2115 // is set to avoid expanding the dictionary during bootstrapping.
John Reck59135872010-11-02 12:39:01 -07002116 { MaybeObject* maybe_obj = NumberDictionary::Allocate(64);
2117 if (!maybe_obj->ToObject(&obj)) return false;
2118 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002119 set_non_monomorphic_cache(NumberDictionary::cast(obj));
2120
Kristian Monsen25f61362010-05-21 11:50:48 +01002121 set_instanceof_cache_function(Smi::FromInt(0));
2122 set_instanceof_cache_map(Smi::FromInt(0));
2123 set_instanceof_cache_answer(Smi::FromInt(0));
2124
Steve Blocka7e24c12009-10-30 11:49:00 +00002125 CreateFixedStubs();
2126
Kristian Monsen0d5e1162010-09-30 15:31:59 +01002127 // Allocate the dictionary of intrinsic function names.
John Reck59135872010-11-02 12:39:01 -07002128 { MaybeObject* maybe_obj = StringDictionary::Allocate(Runtime::kNumFunctions);
2129 if (!maybe_obj->ToObject(&obj)) return false;
2130 }
Steve Block44f0eee2011-05-26 01:26:41 +01002131 { MaybeObject* maybe_obj = Runtime::InitializeIntrinsicFunctionNames(this,
2132 obj);
John Reck59135872010-11-02 12:39:01 -07002133 if (!maybe_obj->ToObject(&obj)) return false;
2134 }
Kristian Monsen0d5e1162010-09-30 15:31:59 +01002135 set_intrinsic_function_names(StringDictionary::cast(obj));
2136
Leon Clarkee46be812010-01-19 14:06:41 +00002137 if (InitializeNumberStringCache()->IsFailure()) return false;
Steve Blocka7e24c12009-10-30 11:49:00 +00002138
Steve Block6ded16b2010-05-10 14:33:55 +01002139 // Allocate cache for single character ASCII strings.
John Reck59135872010-11-02 12:39:01 -07002140 { MaybeObject* maybe_obj =
2141 AllocateFixedArray(String::kMaxAsciiCharCode + 1, TENURED);
2142 if (!maybe_obj->ToObject(&obj)) return false;
2143 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002144 set_single_character_string_cache(FixedArray::cast(obj));
2145
2146 // Allocate cache for external strings pointing to native source code.
John Reck59135872010-11-02 12:39:01 -07002147 { MaybeObject* maybe_obj = AllocateFixedArray(Natives::GetBuiltinsCount());
2148 if (!maybe_obj->ToObject(&obj)) return false;
2149 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002150 set_natives_source_cache(FixedArray::cast(obj));
2151
Steve Block44f0eee2011-05-26 01:26:41 +01002152 // Handling of script id generation is in FACTORY->NewScript.
Steve Blocka7e24c12009-10-30 11:49:00 +00002153 set_last_script_id(undefined_value());
2154
2155 // Initialize keyed lookup cache.
Steve Block44f0eee2011-05-26 01:26:41 +01002156 isolate_->keyed_lookup_cache()->Clear();
Steve Blocka7e24c12009-10-30 11:49:00 +00002157
2158 // Initialize context slot cache.
Steve Block44f0eee2011-05-26 01:26:41 +01002159 isolate_->context_slot_cache()->Clear();
Steve Blocka7e24c12009-10-30 11:49:00 +00002160
2161 // Initialize descriptor cache.
Steve Block44f0eee2011-05-26 01:26:41 +01002162 isolate_->descriptor_lookup_cache()->Clear();
Steve Blocka7e24c12009-10-30 11:49:00 +00002163
2164 // Initialize compilation cache.
Steve Block44f0eee2011-05-26 01:26:41 +01002165 isolate_->compilation_cache()->Clear();
Steve Blocka7e24c12009-10-30 11:49:00 +00002166
2167 return true;
2168}
2169
2170
John Reck59135872010-11-02 12:39:01 -07002171MaybeObject* Heap::InitializeNumberStringCache() {
Leon Clarkee46be812010-01-19 14:06:41 +00002172 // Compute the size of the number string cache based on the max heap size.
2173 // max_semispace_size_ == 512 KB => number_string_cache_size = 32.
2174 // max_semispace_size_ == 8 MB => number_string_cache_size = 16KB.
2175 int number_string_cache_size = max_semispace_size_ / 512;
2176 number_string_cache_size = Max(32, Min(16*KB, number_string_cache_size));
John Reck59135872010-11-02 12:39:01 -07002177 Object* obj;
2178 MaybeObject* maybe_obj =
2179 AllocateFixedArray(number_string_cache_size * 2, TENURED);
2180 if (maybe_obj->ToObject(&obj)) set_number_string_cache(FixedArray::cast(obj));
2181 return maybe_obj;
Leon Clarkee46be812010-01-19 14:06:41 +00002182}
2183
2184
2185void Heap::FlushNumberStringCache() {
2186 // Flush the number to string cache.
2187 int len = number_string_cache()->length();
2188 for (int i = 0; i < len; i++) {
Steve Block44f0eee2011-05-26 01:26:41 +01002189 number_string_cache()->set_undefined(this, i);
Leon Clarkee46be812010-01-19 14:06:41 +00002190 }
2191}
2192
2193
Steve Blocka7e24c12009-10-30 11:49:00 +00002194static inline int double_get_hash(double d) {
2195 DoubleRepresentation rep(d);
Leon Clarkee46be812010-01-19 14:06:41 +00002196 return static_cast<int>(rep.bits) ^ static_cast<int>(rep.bits >> 32);
Steve Blocka7e24c12009-10-30 11:49:00 +00002197}
2198
2199
2200static inline int smi_get_hash(Smi* smi) {
Leon Clarkee46be812010-01-19 14:06:41 +00002201 return smi->value();
Steve Blocka7e24c12009-10-30 11:49:00 +00002202}
2203
2204
Steve Blocka7e24c12009-10-30 11:49:00 +00002205Object* Heap::GetNumberStringCache(Object* number) {
2206 int hash;
Leon Clarkee46be812010-01-19 14:06:41 +00002207 int mask = (number_string_cache()->length() >> 1) - 1;
Steve Blocka7e24c12009-10-30 11:49:00 +00002208 if (number->IsSmi()) {
Leon Clarkee46be812010-01-19 14:06:41 +00002209 hash = smi_get_hash(Smi::cast(number)) & mask;
Steve Blocka7e24c12009-10-30 11:49:00 +00002210 } else {
Leon Clarkee46be812010-01-19 14:06:41 +00002211 hash = double_get_hash(number->Number()) & mask;
Steve Blocka7e24c12009-10-30 11:49:00 +00002212 }
2213 Object* key = number_string_cache()->get(hash * 2);
2214 if (key == number) {
2215 return String::cast(number_string_cache()->get(hash * 2 + 1));
2216 } else if (key->IsHeapNumber() &&
2217 number->IsHeapNumber() &&
2218 key->Number() == number->Number()) {
2219 return String::cast(number_string_cache()->get(hash * 2 + 1));
2220 }
2221 return undefined_value();
2222}
2223
2224
2225void Heap::SetNumberStringCache(Object* number, String* string) {
2226 int hash;
Leon Clarkee46be812010-01-19 14:06:41 +00002227 int mask = (number_string_cache()->length() >> 1) - 1;
Steve Blocka7e24c12009-10-30 11:49:00 +00002228 if (number->IsSmi()) {
Leon Clarkee46be812010-01-19 14:06:41 +00002229 hash = smi_get_hash(Smi::cast(number)) & mask;
Leon Clarke4515c472010-02-03 11:58:03 +00002230 number_string_cache()->set(hash * 2, Smi::cast(number));
Steve Blocka7e24c12009-10-30 11:49:00 +00002231 } else {
Leon Clarkee46be812010-01-19 14:06:41 +00002232 hash = double_get_hash(number->Number()) & mask;
Steve Blocka7e24c12009-10-30 11:49:00 +00002233 number_string_cache()->set(hash * 2, number);
2234 }
2235 number_string_cache()->set(hash * 2 + 1, string);
2236}
2237
2238
John Reck59135872010-11-02 12:39:01 -07002239MaybeObject* Heap::NumberToString(Object* number,
2240 bool check_number_string_cache) {
Steve Block44f0eee2011-05-26 01:26:41 +01002241 isolate_->counters()->number_to_string_runtime()->Increment();
Steve Block6ded16b2010-05-10 14:33:55 +01002242 if (check_number_string_cache) {
2243 Object* cached = GetNumberStringCache(number);
2244 if (cached != undefined_value()) {
2245 return cached;
2246 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002247 }
2248
2249 char arr[100];
2250 Vector<char> buffer(arr, ARRAY_SIZE(arr));
2251 const char* str;
2252 if (number->IsSmi()) {
2253 int num = Smi::cast(number)->value();
2254 str = IntToCString(num, buffer);
2255 } else {
2256 double num = HeapNumber::cast(number)->value();
2257 str = DoubleToCString(num, buffer);
2258 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002259
John Reck59135872010-11-02 12:39:01 -07002260 Object* js_string;
2261 MaybeObject* maybe_js_string = AllocateStringFromAscii(CStrVector(str));
2262 if (maybe_js_string->ToObject(&js_string)) {
2263 SetNumberStringCache(number, String::cast(js_string));
Steve Blocka7e24c12009-10-30 11:49:00 +00002264 }
John Reck59135872010-11-02 12:39:01 -07002265 return maybe_js_string;
Steve Blocka7e24c12009-10-30 11:49:00 +00002266}
2267
2268
Steve Block3ce2e202009-11-05 08:53:23 +00002269Map* Heap::MapForExternalArrayType(ExternalArrayType array_type) {
2270 return Map::cast(roots_[RootIndexForExternalArrayType(array_type)]);
2271}
2272
2273
2274Heap::RootListIndex Heap::RootIndexForExternalArrayType(
2275 ExternalArrayType array_type) {
2276 switch (array_type) {
2277 case kExternalByteArray:
2278 return kExternalByteArrayMapRootIndex;
2279 case kExternalUnsignedByteArray:
2280 return kExternalUnsignedByteArrayMapRootIndex;
2281 case kExternalShortArray:
2282 return kExternalShortArrayMapRootIndex;
2283 case kExternalUnsignedShortArray:
2284 return kExternalUnsignedShortArrayMapRootIndex;
2285 case kExternalIntArray:
2286 return kExternalIntArrayMapRootIndex;
2287 case kExternalUnsignedIntArray:
2288 return kExternalUnsignedIntArrayMapRootIndex;
2289 case kExternalFloatArray:
2290 return kExternalFloatArrayMapRootIndex;
Steve Block44f0eee2011-05-26 01:26:41 +01002291 case kExternalPixelArray:
2292 return kExternalPixelArrayMapRootIndex;
Steve Block3ce2e202009-11-05 08:53:23 +00002293 default:
2294 UNREACHABLE();
2295 return kUndefinedValueRootIndex;
2296 }
2297}
2298
2299
John Reck59135872010-11-02 12:39:01 -07002300MaybeObject* Heap::NumberFromDouble(double value, PretenureFlag pretenure) {
Steve Block6ded16b2010-05-10 14:33:55 +01002301 // We need to distinguish the minus zero value and this cannot be
2302 // done after conversion to int. Doing this by comparing bit
2303 // patterns is faster than using fpclassify() et al.
2304 static const DoubleRepresentation minus_zero(-0.0);
2305
2306 DoubleRepresentation rep(value);
2307 if (rep.bits == minus_zero.bits) {
2308 return AllocateHeapNumber(-0.0, pretenure);
2309 }
2310
2311 int int_value = FastD2I(value);
2312 if (value == int_value && Smi::IsValid(int_value)) {
2313 return Smi::FromInt(int_value);
2314 }
2315
2316 // Materialize the value in the heap.
2317 return AllocateHeapNumber(value, pretenure);
Steve Blocka7e24c12009-10-30 11:49:00 +00002318}
2319
2320
John Reck59135872010-11-02 12:39:01 -07002321MaybeObject* Heap::AllocateProxy(Address proxy, PretenureFlag pretenure) {
Steve Blocka7e24c12009-10-30 11:49:00 +00002322 // Statically ensure that it is safe to allocate proxies in paged spaces.
2323 STATIC_ASSERT(Proxy::kSize <= Page::kMaxHeapObjectSize);
2324 AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
John Reck59135872010-11-02 12:39:01 -07002325 Object* result;
2326 { MaybeObject* maybe_result = Allocate(proxy_map(), space);
2327 if (!maybe_result->ToObject(&result)) return maybe_result;
2328 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002329
2330 Proxy::cast(result)->set_proxy(proxy);
2331 return result;
2332}
2333
2334
John Reck59135872010-11-02 12:39:01 -07002335MaybeObject* Heap::AllocateSharedFunctionInfo(Object* name) {
2336 Object* result;
2337 { MaybeObject* maybe_result =
2338 Allocate(shared_function_info_map(), OLD_POINTER_SPACE);
2339 if (!maybe_result->ToObject(&result)) return maybe_result;
2340 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002341
2342 SharedFunctionInfo* share = SharedFunctionInfo::cast(result);
2343 share->set_name(name);
Steve Block44f0eee2011-05-26 01:26:41 +01002344 Code* illegal = isolate_->builtins()->builtin(Builtins::kIllegal);
Steve Blocka7e24c12009-10-30 11:49:00 +00002345 share->set_code(illegal);
Ben Murdoch3bec4d22010-07-22 14:51:16 +01002346 share->set_scope_info(SerializedScopeInfo::Empty());
Steve Block44f0eee2011-05-26 01:26:41 +01002347 Code* construct_stub = isolate_->builtins()->builtin(
2348 Builtins::kJSConstructStubGeneric);
Steve Blocka7e24c12009-10-30 11:49:00 +00002349 share->set_construct_stub(construct_stub);
2350 share->set_expected_nof_properties(0);
2351 share->set_length(0);
2352 share->set_formal_parameter_count(0);
2353 share->set_instance_class_name(Object_symbol());
2354 share->set_function_data(undefined_value());
2355 share->set_script(undefined_value());
2356 share->set_start_position_and_type(0);
2357 share->set_debug_info(undefined_value());
2358 share->set_inferred_name(empty_string());
2359 share->set_compiler_hints(0);
Ben Murdochb0fe1622011-05-05 13:52:32 +01002360 share->set_deopt_counter(Smi::FromInt(FLAG_deopt_every_n_times));
Kristian Monsen0d5e1162010-09-30 15:31:59 +01002361 share->set_initial_map(undefined_value());
Steve Blocka7e24c12009-10-30 11:49:00 +00002362 share->set_this_property_assignments_count(0);
2363 share->set_this_property_assignments(undefined_value());
Ben Murdochb0fe1622011-05-05 13:52:32 +01002364 share->set_opt_count(0);
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002365 share->set_num_literals(0);
2366 share->set_end_position(0);
2367 share->set_function_token_position(0);
Steve Blocka7e24c12009-10-30 11:49:00 +00002368 return result;
2369}
2370
2371
Steve Block1e0659c2011-05-24 12:43:12 +01002372MaybeObject* Heap::AllocateJSMessageObject(String* type,
2373 JSArray* arguments,
2374 int start_position,
2375 int end_position,
2376 Object* script,
2377 Object* stack_trace,
2378 Object* stack_frames) {
2379 Object* result;
2380 { MaybeObject* maybe_result = Allocate(message_object_map(), NEW_SPACE);
2381 if (!maybe_result->ToObject(&result)) return maybe_result;
2382 }
2383 JSMessageObject* message = JSMessageObject::cast(result);
2384 message->set_properties(Heap::empty_fixed_array());
2385 message->set_elements(Heap::empty_fixed_array());
2386 message->set_type(type);
2387 message->set_arguments(arguments);
2388 message->set_start_position(start_position);
2389 message->set_end_position(end_position);
2390 message->set_script(script);
2391 message->set_stack_trace(stack_trace);
2392 message->set_stack_frames(stack_frames);
2393 return result;
2394}
2395
2396
2397
Steve Blockd0582a62009-12-15 09:54:21 +00002398// Returns true for a character in a range. Both limits are inclusive.
2399static inline bool Between(uint32_t character, uint32_t from, uint32_t to) {
2400 // This makes uses of the the unsigned wraparound.
2401 return character - from <= to - from;
2402}
2403
2404
John Reck59135872010-11-02 12:39:01 -07002405MUST_USE_RESULT static inline MaybeObject* MakeOrFindTwoCharacterString(
Steve Block44f0eee2011-05-26 01:26:41 +01002406 Heap* heap,
John Reck59135872010-11-02 12:39:01 -07002407 uint32_t c1,
2408 uint32_t c2) {
Steve Blockd0582a62009-12-15 09:54:21 +00002409 String* symbol;
2410 // Numeric strings have a different hash algorithm not known by
2411 // LookupTwoCharsSymbolIfExists, so we skip this step for such strings.
2412 if ((!Between(c1, '0', '9') || !Between(c2, '0', '9')) &&
Steve Block44f0eee2011-05-26 01:26:41 +01002413 heap->symbol_table()->LookupTwoCharsSymbolIfExists(c1, c2, &symbol)) {
Steve Blockd0582a62009-12-15 09:54:21 +00002414 return symbol;
2415 // Now we know the length is 2, we might as well make use of that fact
2416 // when building the new string.
2417 } else if ((c1 | c2) <= String::kMaxAsciiCharCodeU) { // We can do this
2418 ASSERT(IsPowerOf2(String::kMaxAsciiCharCodeU + 1)); // because of this.
John Reck59135872010-11-02 12:39:01 -07002419 Object* result;
Steve Block44f0eee2011-05-26 01:26:41 +01002420 { MaybeObject* maybe_result = heap->AllocateRawAsciiString(2);
John Reck59135872010-11-02 12:39:01 -07002421 if (!maybe_result->ToObject(&result)) return maybe_result;
2422 }
Steve Blockd0582a62009-12-15 09:54:21 +00002423 char* dest = SeqAsciiString::cast(result)->GetChars();
2424 dest[0] = c1;
2425 dest[1] = c2;
2426 return result;
2427 } else {
John Reck59135872010-11-02 12:39:01 -07002428 Object* result;
Steve Block44f0eee2011-05-26 01:26:41 +01002429 { MaybeObject* maybe_result = heap->AllocateRawTwoByteString(2);
John Reck59135872010-11-02 12:39:01 -07002430 if (!maybe_result->ToObject(&result)) return maybe_result;
2431 }
Steve Blockd0582a62009-12-15 09:54:21 +00002432 uc16* dest = SeqTwoByteString::cast(result)->GetChars();
2433 dest[0] = c1;
2434 dest[1] = c2;
2435 return result;
2436 }
2437}
2438
2439
John Reck59135872010-11-02 12:39:01 -07002440MaybeObject* Heap::AllocateConsString(String* first, String* second) {
Steve Blocka7e24c12009-10-30 11:49:00 +00002441 int first_length = first->length();
Steve Blockd0582a62009-12-15 09:54:21 +00002442 if (first_length == 0) {
2443 return second;
2444 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002445
2446 int second_length = second->length();
Steve Blockd0582a62009-12-15 09:54:21 +00002447 if (second_length == 0) {
2448 return first;
2449 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002450
2451 int length = first_length + second_length;
Steve Blockd0582a62009-12-15 09:54:21 +00002452
2453 // Optimization for 2-byte strings often used as keys in a decompression
2454 // dictionary. Check whether we already have the string in the symbol
2455 // table to prevent creation of many unneccesary strings.
2456 if (length == 2) {
2457 unsigned c1 = first->Get(0);
2458 unsigned c2 = second->Get(0);
Steve Block44f0eee2011-05-26 01:26:41 +01002459 return MakeOrFindTwoCharacterString(this, c1, c2);
Steve Blockd0582a62009-12-15 09:54:21 +00002460 }
2461
Steve Block6ded16b2010-05-10 14:33:55 +01002462 bool first_is_ascii = first->IsAsciiRepresentation();
2463 bool second_is_ascii = second->IsAsciiRepresentation();
2464 bool is_ascii = first_is_ascii && second_is_ascii;
Steve Blocka7e24c12009-10-30 11:49:00 +00002465
2466 // Make sure that an out of memory exception is thrown if the length
Steve Block3ce2e202009-11-05 08:53:23 +00002467 // of the new cons string is too large.
2468 if (length > String::kMaxLength || length < 0) {
Steve Block44f0eee2011-05-26 01:26:41 +01002469 isolate()->context()->mark_out_of_memory();
Steve Blocka7e24c12009-10-30 11:49:00 +00002470 return Failure::OutOfMemoryException();
2471 }
2472
Kristian Monsen9dcf7e22010-06-28 14:14:28 +01002473 bool is_ascii_data_in_two_byte_string = false;
2474 if (!is_ascii) {
2475 // At least one of the strings uses two-byte representation so we
2476 // can't use the fast case code for short ascii strings below, but
2477 // we can try to save memory if all chars actually fit in ascii.
2478 is_ascii_data_in_two_byte_string =
2479 first->HasOnlyAsciiChars() && second->HasOnlyAsciiChars();
2480 if (is_ascii_data_in_two_byte_string) {
Steve Block44f0eee2011-05-26 01:26:41 +01002481 isolate_->counters()->string_add_runtime_ext_to_ascii()->Increment();
Kristian Monsen9dcf7e22010-06-28 14:14:28 +01002482 }
2483 }
2484
Steve Blocka7e24c12009-10-30 11:49:00 +00002485 // If the resulting string is small make a flat string.
2486 if (length < String::kMinNonFlatLength) {
2487 ASSERT(first->IsFlat());
2488 ASSERT(second->IsFlat());
2489 if (is_ascii) {
John Reck59135872010-11-02 12:39:01 -07002490 Object* result;
2491 { MaybeObject* maybe_result = AllocateRawAsciiString(length);
2492 if (!maybe_result->ToObject(&result)) return maybe_result;
2493 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002494 // Copy the characters into the new object.
2495 char* dest = SeqAsciiString::cast(result)->GetChars();
2496 // Copy first part.
Steve Blockd0582a62009-12-15 09:54:21 +00002497 const char* src;
2498 if (first->IsExternalString()) {
2499 src = ExternalAsciiString::cast(first)->resource()->data();
2500 } else {
2501 src = SeqAsciiString::cast(first)->GetChars();
2502 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002503 for (int i = 0; i < first_length; i++) *dest++ = src[i];
2504 // Copy second part.
Steve Blockd0582a62009-12-15 09:54:21 +00002505 if (second->IsExternalString()) {
2506 src = ExternalAsciiString::cast(second)->resource()->data();
2507 } else {
2508 src = SeqAsciiString::cast(second)->GetChars();
2509 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002510 for (int i = 0; i < second_length; i++) *dest++ = src[i];
2511 return result;
2512 } else {
Kristian Monsen9dcf7e22010-06-28 14:14:28 +01002513 if (is_ascii_data_in_two_byte_string) {
John Reck59135872010-11-02 12:39:01 -07002514 Object* result;
2515 { MaybeObject* maybe_result = AllocateRawAsciiString(length);
2516 if (!maybe_result->ToObject(&result)) return maybe_result;
2517 }
Steve Block6ded16b2010-05-10 14:33:55 +01002518 // Copy the characters into the new object.
2519 char* dest = SeqAsciiString::cast(result)->GetChars();
2520 String::WriteToFlat(first, dest, 0, first_length);
2521 String::WriteToFlat(second, dest + first_length, 0, second_length);
Steve Block44f0eee2011-05-26 01:26:41 +01002522 isolate_->counters()->string_add_runtime_ext_to_ascii()->Increment();
Steve Block6ded16b2010-05-10 14:33:55 +01002523 return result;
2524 }
2525
John Reck59135872010-11-02 12:39:01 -07002526 Object* result;
2527 { MaybeObject* maybe_result = AllocateRawTwoByteString(length);
2528 if (!maybe_result->ToObject(&result)) return maybe_result;
2529 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002530 // Copy the characters into the new object.
2531 uc16* dest = SeqTwoByteString::cast(result)->GetChars();
2532 String::WriteToFlat(first, dest, 0, first_length);
2533 String::WriteToFlat(second, dest + first_length, 0, second_length);
2534 return result;
2535 }
2536 }
2537
Kristian Monsen9dcf7e22010-06-28 14:14:28 +01002538 Map* map = (is_ascii || is_ascii_data_in_two_byte_string) ?
2539 cons_ascii_string_map() : cons_string_map();
Steve Blocka7e24c12009-10-30 11:49:00 +00002540
John Reck59135872010-11-02 12:39:01 -07002541 Object* result;
2542 { MaybeObject* maybe_result = Allocate(map, NEW_SPACE);
2543 if (!maybe_result->ToObject(&result)) return maybe_result;
2544 }
Leon Clarke4515c472010-02-03 11:58:03 +00002545
2546 AssertNoAllocation no_gc;
Steve Blocka7e24c12009-10-30 11:49:00 +00002547 ConsString* cons_string = ConsString::cast(result);
Leon Clarke4515c472010-02-03 11:58:03 +00002548 WriteBarrierMode mode = cons_string->GetWriteBarrierMode(no_gc);
Steve Blocka7e24c12009-10-30 11:49:00 +00002549 cons_string->set_length(length);
Steve Blockd0582a62009-12-15 09:54:21 +00002550 cons_string->set_hash_field(String::kEmptyHashField);
2551 cons_string->set_first(first, mode);
2552 cons_string->set_second(second, mode);
Steve Blocka7e24c12009-10-30 11:49:00 +00002553 return result;
2554}
2555
2556
John Reck59135872010-11-02 12:39:01 -07002557MaybeObject* Heap::AllocateSubString(String* buffer,
Steve Blocka7e24c12009-10-30 11:49:00 +00002558 int start,
Steve Block6ded16b2010-05-10 14:33:55 +01002559 int end,
2560 PretenureFlag pretenure) {
Steve Blocka7e24c12009-10-30 11:49:00 +00002561 int length = end - start;
2562
2563 if (length == 1) {
Steve Block44f0eee2011-05-26 01:26:41 +01002564 return LookupSingleCharacterStringFromCode(buffer->Get(start));
Steve Blockd0582a62009-12-15 09:54:21 +00002565 } else if (length == 2) {
2566 // Optimization for 2-byte strings often used as keys in a decompression
2567 // dictionary. Check whether we already have the string in the symbol
2568 // table to prevent creation of many unneccesary strings.
2569 unsigned c1 = buffer->Get(start);
2570 unsigned c2 = buffer->Get(start + 1);
Steve Block44f0eee2011-05-26 01:26:41 +01002571 return MakeOrFindTwoCharacterString(this, c1, c2);
Steve Blocka7e24c12009-10-30 11:49:00 +00002572 }
2573
2574 // Make an attempt to flatten the buffer to reduce access time.
Leon Clarkef7060e22010-06-03 12:02:55 +01002575 buffer = buffer->TryFlattenGetString();
Steve Blocka7e24c12009-10-30 11:49:00 +00002576
John Reck59135872010-11-02 12:39:01 -07002577 Object* result;
2578 { MaybeObject* maybe_result = buffer->IsAsciiRepresentation()
2579 ? AllocateRawAsciiString(length, pretenure )
2580 : AllocateRawTwoByteString(length, pretenure);
2581 if (!maybe_result->ToObject(&result)) return maybe_result;
2582 }
Steve Blockd0582a62009-12-15 09:54:21 +00002583 String* string_result = String::cast(result);
Steve Blocka7e24c12009-10-30 11:49:00 +00002584 // Copy the characters into the new object.
Steve Blockd0582a62009-12-15 09:54:21 +00002585 if (buffer->IsAsciiRepresentation()) {
2586 ASSERT(string_result->IsAsciiRepresentation());
2587 char* dest = SeqAsciiString::cast(string_result)->GetChars();
2588 String::WriteToFlat(buffer, dest, start, end);
2589 } else {
2590 ASSERT(string_result->IsTwoByteRepresentation());
2591 uc16* dest = SeqTwoByteString::cast(string_result)->GetChars();
2592 String::WriteToFlat(buffer, dest, start, end);
Steve Blocka7e24c12009-10-30 11:49:00 +00002593 }
Steve Blockd0582a62009-12-15 09:54:21 +00002594
Steve Blocka7e24c12009-10-30 11:49:00 +00002595 return result;
2596}
2597
2598
John Reck59135872010-11-02 12:39:01 -07002599MaybeObject* Heap::AllocateExternalStringFromAscii(
Steve Blocka7e24c12009-10-30 11:49:00 +00002600 ExternalAsciiString::Resource* resource) {
Steve Blockd0582a62009-12-15 09:54:21 +00002601 size_t length = resource->length();
2602 if (length > static_cast<size_t>(String::kMaxLength)) {
Steve Block44f0eee2011-05-26 01:26:41 +01002603 isolate()->context()->mark_out_of_memory();
Steve Blockd0582a62009-12-15 09:54:21 +00002604 return Failure::OutOfMemoryException();
Steve Blocka7e24c12009-10-30 11:49:00 +00002605 }
2606
Steve Blockd0582a62009-12-15 09:54:21 +00002607 Map* map = external_ascii_string_map();
John Reck59135872010-11-02 12:39:01 -07002608 Object* result;
2609 { MaybeObject* maybe_result = Allocate(map, NEW_SPACE);
2610 if (!maybe_result->ToObject(&result)) return maybe_result;
2611 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002612
2613 ExternalAsciiString* external_string = ExternalAsciiString::cast(result);
Steve Blockd0582a62009-12-15 09:54:21 +00002614 external_string->set_length(static_cast<int>(length));
2615 external_string->set_hash_field(String::kEmptyHashField);
Steve Blocka7e24c12009-10-30 11:49:00 +00002616 external_string->set_resource(resource);
2617
2618 return result;
2619}
2620
2621
John Reck59135872010-11-02 12:39:01 -07002622MaybeObject* Heap::AllocateExternalStringFromTwoByte(
Steve Blocka7e24c12009-10-30 11:49:00 +00002623 ExternalTwoByteString::Resource* resource) {
Steve Blockd0582a62009-12-15 09:54:21 +00002624 size_t length = resource->length();
2625 if (length > static_cast<size_t>(String::kMaxLength)) {
Steve Block44f0eee2011-05-26 01:26:41 +01002626 isolate()->context()->mark_out_of_memory();
Steve Blockd0582a62009-12-15 09:54:21 +00002627 return Failure::OutOfMemoryException();
2628 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002629
Kristian Monsen9dcf7e22010-06-28 14:14:28 +01002630 // For small strings we check whether the resource contains only
Steve Block9fac8402011-05-12 15:51:54 +01002631 // ASCII characters. If yes, we use a different string map.
2632 static const size_t kAsciiCheckLengthLimit = 32;
2633 bool is_ascii = length <= kAsciiCheckLengthLimit &&
2634 String::IsAscii(resource->data(), static_cast<int>(length));
Kristian Monsen9dcf7e22010-06-28 14:14:28 +01002635 Map* map = is_ascii ?
Steve Block44f0eee2011-05-26 01:26:41 +01002636 external_string_with_ascii_data_map() : external_string_map();
John Reck59135872010-11-02 12:39:01 -07002637 Object* result;
2638 { MaybeObject* maybe_result = Allocate(map, NEW_SPACE);
2639 if (!maybe_result->ToObject(&result)) return maybe_result;
2640 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002641
2642 ExternalTwoByteString* external_string = ExternalTwoByteString::cast(result);
Steve Blockd0582a62009-12-15 09:54:21 +00002643 external_string->set_length(static_cast<int>(length));
2644 external_string->set_hash_field(String::kEmptyHashField);
Steve Blocka7e24c12009-10-30 11:49:00 +00002645 external_string->set_resource(resource);
2646
2647 return result;
2648}
2649
2650
John Reck59135872010-11-02 12:39:01 -07002651MaybeObject* Heap::LookupSingleCharacterStringFromCode(uint16_t code) {
Steve Blocka7e24c12009-10-30 11:49:00 +00002652 if (code <= String::kMaxAsciiCharCode) {
Steve Block44f0eee2011-05-26 01:26:41 +01002653 Object* value = single_character_string_cache()->get(code);
2654 if (value != undefined_value()) return value;
Steve Blocka7e24c12009-10-30 11:49:00 +00002655
2656 char buffer[1];
2657 buffer[0] = static_cast<char>(code);
John Reck59135872010-11-02 12:39:01 -07002658 Object* result;
2659 MaybeObject* maybe_result = LookupSymbol(Vector<const char>(buffer, 1));
Steve Blocka7e24c12009-10-30 11:49:00 +00002660
John Reck59135872010-11-02 12:39:01 -07002661 if (!maybe_result->ToObject(&result)) return maybe_result;
Steve Block44f0eee2011-05-26 01:26:41 +01002662 single_character_string_cache()->set(code, result);
Steve Blocka7e24c12009-10-30 11:49:00 +00002663 return result;
2664 }
2665
John Reck59135872010-11-02 12:39:01 -07002666 Object* result;
Steve Block44f0eee2011-05-26 01:26:41 +01002667 { MaybeObject* maybe_result = AllocateRawTwoByteString(1);
John Reck59135872010-11-02 12:39:01 -07002668 if (!maybe_result->ToObject(&result)) return maybe_result;
2669 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002670 String* answer = String::cast(result);
2671 answer->Set(0, code);
2672 return answer;
2673}
2674
2675
John Reck59135872010-11-02 12:39:01 -07002676MaybeObject* Heap::AllocateByteArray(int length, PretenureFlag pretenure) {
Leon Clarkee46be812010-01-19 14:06:41 +00002677 if (length < 0 || length > ByteArray::kMaxLength) {
2678 return Failure::OutOfMemoryException();
2679 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002680 if (pretenure == NOT_TENURED) {
2681 return AllocateByteArray(length);
2682 }
2683 int size = ByteArray::SizeFor(length);
John Reck59135872010-11-02 12:39:01 -07002684 Object* result;
2685 { MaybeObject* maybe_result = (size <= MaxObjectSizeInPagedSpace())
2686 ? old_data_space_->AllocateRaw(size)
2687 : lo_space_->AllocateRaw(size);
2688 if (!maybe_result->ToObject(&result)) return maybe_result;
2689 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002690
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002691 reinterpret_cast<ByteArray*>(result)->set_map(byte_array_map());
2692 reinterpret_cast<ByteArray*>(result)->set_length(length);
Steve Blocka7e24c12009-10-30 11:49:00 +00002693 return result;
2694}
2695
2696
John Reck59135872010-11-02 12:39:01 -07002697MaybeObject* Heap::AllocateByteArray(int length) {
Leon Clarkee46be812010-01-19 14:06:41 +00002698 if (length < 0 || length > ByteArray::kMaxLength) {
2699 return Failure::OutOfMemoryException();
2700 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002701 int size = ByteArray::SizeFor(length);
2702 AllocationSpace space =
Leon Clarkee46be812010-01-19 14:06:41 +00002703 (size > MaxObjectSizeInPagedSpace()) ? LO_SPACE : NEW_SPACE;
John Reck59135872010-11-02 12:39:01 -07002704 Object* result;
2705 { MaybeObject* maybe_result = AllocateRaw(size, space, OLD_DATA_SPACE);
2706 if (!maybe_result->ToObject(&result)) return maybe_result;
2707 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002708
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002709 reinterpret_cast<ByteArray*>(result)->set_map(byte_array_map());
2710 reinterpret_cast<ByteArray*>(result)->set_length(length);
Steve Blocka7e24c12009-10-30 11:49:00 +00002711 return result;
2712}
2713
2714
2715void Heap::CreateFillerObjectAt(Address addr, int size) {
2716 if (size == 0) return;
2717 HeapObject* filler = HeapObject::FromAddress(addr);
2718 if (size == kPointerSize) {
Steve Block6ded16b2010-05-10 14:33:55 +01002719 filler->set_map(one_pointer_filler_map());
2720 } else if (size == 2 * kPointerSize) {
2721 filler->set_map(two_pointer_filler_map());
Steve Blocka7e24c12009-10-30 11:49:00 +00002722 } else {
Steve Block6ded16b2010-05-10 14:33:55 +01002723 filler->set_map(byte_array_map());
Steve Blocka7e24c12009-10-30 11:49:00 +00002724 ByteArray::cast(filler)->set_length(ByteArray::LengthFor(size));
2725 }
2726}
2727
2728
John Reck59135872010-11-02 12:39:01 -07002729MaybeObject* Heap::AllocateExternalArray(int length,
2730 ExternalArrayType array_type,
2731 void* external_pointer,
2732 PretenureFlag pretenure) {
Steve Block3ce2e202009-11-05 08:53:23 +00002733 AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
John Reck59135872010-11-02 12:39:01 -07002734 Object* result;
2735 { MaybeObject* maybe_result = AllocateRaw(ExternalArray::kAlignedSize,
2736 space,
2737 OLD_DATA_SPACE);
2738 if (!maybe_result->ToObject(&result)) return maybe_result;
2739 }
Steve Block3ce2e202009-11-05 08:53:23 +00002740
2741 reinterpret_cast<ExternalArray*>(result)->set_map(
2742 MapForExternalArrayType(array_type));
2743 reinterpret_cast<ExternalArray*>(result)->set_length(length);
2744 reinterpret_cast<ExternalArray*>(result)->set_external_pointer(
2745 external_pointer);
2746
2747 return result;
2748}
2749
2750
John Reck59135872010-11-02 12:39:01 -07002751MaybeObject* Heap::CreateCode(const CodeDesc& desc,
2752 Code::Flags flags,
Steve Block44f0eee2011-05-26 01:26:41 +01002753 Handle<Object> self_reference,
2754 bool immovable) {
Leon Clarkeac952652010-07-15 11:15:24 +01002755 // Allocate ByteArray before the Code object, so that we do not risk
2756 // leaving uninitialized Code object (and breaking the heap).
John Reck59135872010-11-02 12:39:01 -07002757 Object* reloc_info;
2758 { MaybeObject* maybe_reloc_info = AllocateByteArray(desc.reloc_size, TENURED);
2759 if (!maybe_reloc_info->ToObject(&reloc_info)) return maybe_reloc_info;
2760 }
Leon Clarkeac952652010-07-15 11:15:24 +01002761
Steve Block44f0eee2011-05-26 01:26:41 +01002762 // Compute size.
Leon Clarkeac952652010-07-15 11:15:24 +01002763 int body_size = RoundUp(desc.instr_size, kObjectAlignment);
Ben Murdoch3bec4d22010-07-22 14:51:16 +01002764 int obj_size = Code::SizeFor(body_size);
Kristian Monsen0d5e1162010-09-30 15:31:59 +01002765 ASSERT(IsAligned(static_cast<intptr_t>(obj_size), kCodeAlignment));
John Reck59135872010-11-02 12:39:01 -07002766 MaybeObject* maybe_result;
Steve Block44f0eee2011-05-26 01:26:41 +01002767 // Large code objects and code objects which should stay at a fixed address
2768 // are allocated in large object space.
2769 if (obj_size > MaxObjectSizeInPagedSpace() || immovable) {
John Reck59135872010-11-02 12:39:01 -07002770 maybe_result = lo_space_->AllocateRawCode(obj_size);
Steve Blocka7e24c12009-10-30 11:49:00 +00002771 } else {
John Reck59135872010-11-02 12:39:01 -07002772 maybe_result = code_space_->AllocateRaw(obj_size);
Steve Blocka7e24c12009-10-30 11:49:00 +00002773 }
2774
John Reck59135872010-11-02 12:39:01 -07002775 Object* result;
2776 if (!maybe_result->ToObject(&result)) return maybe_result;
Steve Blocka7e24c12009-10-30 11:49:00 +00002777
2778 // Initialize the object
2779 HeapObject::cast(result)->set_map(code_map());
2780 Code* code = Code::cast(result);
Steve Block44f0eee2011-05-26 01:26:41 +01002781 ASSERT(!isolate_->code_range()->exists() ||
2782 isolate_->code_range()->contains(code->address()));
Steve Blocka7e24c12009-10-30 11:49:00 +00002783 code->set_instruction_size(desc.instr_size);
Leon Clarkeac952652010-07-15 11:15:24 +01002784 code->set_relocation_info(ByteArray::cast(reloc_info));
Steve Blocka7e24c12009-10-30 11:49:00 +00002785 code->set_flags(flags);
Ben Murdochb8e0da22011-05-16 14:20:40 +01002786 if (code->is_call_stub() || code->is_keyed_call_stub()) {
2787 code->set_check_type(RECEIVER_MAP_CHECK);
2788 }
Ben Murdochb0fe1622011-05-05 13:52:32 +01002789 code->set_deoptimization_data(empty_fixed_array());
Steve Blocka7e24c12009-10-30 11:49:00 +00002790 // Allow self references to created code object by patching the handle to
2791 // point to the newly allocated Code object.
2792 if (!self_reference.is_null()) {
2793 *(self_reference.location()) = code;
2794 }
2795 // Migrate generated code.
2796 // The generated code can contain Object** values (typically from handles)
2797 // that are dereferenced during the copy to point directly to the actual heap
2798 // objects. These pointers can include references to the code object itself,
2799 // through the self_reference parameter.
2800 code->CopyFrom(desc);
Steve Blocka7e24c12009-10-30 11:49:00 +00002801
2802#ifdef DEBUG
2803 code->Verify();
2804#endif
2805 return code;
2806}
2807
2808
John Reck59135872010-11-02 12:39:01 -07002809MaybeObject* Heap::CopyCode(Code* code) {
Steve Blocka7e24c12009-10-30 11:49:00 +00002810 // Allocate an object the same size as the code object.
2811 int obj_size = code->Size();
John Reck59135872010-11-02 12:39:01 -07002812 MaybeObject* maybe_result;
Steve Blocka7e24c12009-10-30 11:49:00 +00002813 if (obj_size > MaxObjectSizeInPagedSpace()) {
John Reck59135872010-11-02 12:39:01 -07002814 maybe_result = lo_space_->AllocateRawCode(obj_size);
Steve Blocka7e24c12009-10-30 11:49:00 +00002815 } else {
John Reck59135872010-11-02 12:39:01 -07002816 maybe_result = code_space_->AllocateRaw(obj_size);
Steve Blocka7e24c12009-10-30 11:49:00 +00002817 }
2818
John Reck59135872010-11-02 12:39:01 -07002819 Object* result;
2820 if (!maybe_result->ToObject(&result)) return maybe_result;
Steve Blocka7e24c12009-10-30 11:49:00 +00002821
2822 // Copy code object.
2823 Address old_addr = code->address();
2824 Address new_addr = reinterpret_cast<HeapObject*>(result)->address();
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002825 CopyBlock(new_addr, old_addr, obj_size);
Steve Blocka7e24c12009-10-30 11:49:00 +00002826 // Relocate the copy.
2827 Code* new_code = Code::cast(result);
Steve Block44f0eee2011-05-26 01:26:41 +01002828 ASSERT(!isolate_->code_range()->exists() ||
2829 isolate_->code_range()->contains(code->address()));
Steve Blocka7e24c12009-10-30 11:49:00 +00002830 new_code->Relocate(new_addr - old_addr);
2831 return new_code;
2832}
2833
2834
John Reck59135872010-11-02 12:39:01 -07002835MaybeObject* Heap::CopyCode(Code* code, Vector<byte> reloc_info) {
Leon Clarkeac952652010-07-15 11:15:24 +01002836 // Allocate ByteArray before the Code object, so that we do not risk
2837 // leaving uninitialized Code object (and breaking the heap).
John Reck59135872010-11-02 12:39:01 -07002838 Object* reloc_info_array;
2839 { MaybeObject* maybe_reloc_info_array =
2840 AllocateByteArray(reloc_info.length(), TENURED);
2841 if (!maybe_reloc_info_array->ToObject(&reloc_info_array)) {
2842 return maybe_reloc_info_array;
2843 }
2844 }
Leon Clarkeac952652010-07-15 11:15:24 +01002845
2846 int new_body_size = RoundUp(code->instruction_size(), kObjectAlignment);
Steve Block6ded16b2010-05-10 14:33:55 +01002847
Ben Murdoch3bec4d22010-07-22 14:51:16 +01002848 int new_obj_size = Code::SizeFor(new_body_size);
Steve Block6ded16b2010-05-10 14:33:55 +01002849
2850 Address old_addr = code->address();
2851
2852 size_t relocation_offset =
Leon Clarkeac952652010-07-15 11:15:24 +01002853 static_cast<size_t>(code->instruction_end() - old_addr);
Steve Block6ded16b2010-05-10 14:33:55 +01002854
John Reck59135872010-11-02 12:39:01 -07002855 MaybeObject* maybe_result;
Steve Block6ded16b2010-05-10 14:33:55 +01002856 if (new_obj_size > MaxObjectSizeInPagedSpace()) {
John Reck59135872010-11-02 12:39:01 -07002857 maybe_result = lo_space_->AllocateRawCode(new_obj_size);
Steve Block6ded16b2010-05-10 14:33:55 +01002858 } else {
John Reck59135872010-11-02 12:39:01 -07002859 maybe_result = code_space_->AllocateRaw(new_obj_size);
Steve Block6ded16b2010-05-10 14:33:55 +01002860 }
2861
John Reck59135872010-11-02 12:39:01 -07002862 Object* result;
2863 if (!maybe_result->ToObject(&result)) return maybe_result;
Steve Block6ded16b2010-05-10 14:33:55 +01002864
2865 // Copy code object.
2866 Address new_addr = reinterpret_cast<HeapObject*>(result)->address();
2867
2868 // Copy header and instructions.
2869 memcpy(new_addr, old_addr, relocation_offset);
2870
Steve Block6ded16b2010-05-10 14:33:55 +01002871 Code* new_code = Code::cast(result);
Leon Clarkeac952652010-07-15 11:15:24 +01002872 new_code->set_relocation_info(ByteArray::cast(reloc_info_array));
Steve Block6ded16b2010-05-10 14:33:55 +01002873
Leon Clarkeac952652010-07-15 11:15:24 +01002874 // Copy patched rinfo.
2875 memcpy(new_code->relocation_start(), reloc_info.start(), reloc_info.length());
Steve Block6ded16b2010-05-10 14:33:55 +01002876
2877 // Relocate the copy.
Steve Block44f0eee2011-05-26 01:26:41 +01002878 ASSERT(!isolate_->code_range()->exists() ||
2879 isolate_->code_range()->contains(code->address()));
Steve Block6ded16b2010-05-10 14:33:55 +01002880 new_code->Relocate(new_addr - old_addr);
2881
2882#ifdef DEBUG
2883 code->Verify();
2884#endif
2885 return new_code;
2886}
2887
2888
John Reck59135872010-11-02 12:39:01 -07002889MaybeObject* Heap::Allocate(Map* map, AllocationSpace space) {
Steve Blocka7e24c12009-10-30 11:49:00 +00002890 ASSERT(gc_state_ == NOT_IN_GC);
2891 ASSERT(map->instance_type() != MAP_TYPE);
Leon Clarkee46be812010-01-19 14:06:41 +00002892 // If allocation failures are disallowed, we may allocate in a different
2893 // space when new space is full and the object is not a large object.
2894 AllocationSpace retry_space =
2895 (space != NEW_SPACE) ? space : TargetSpaceId(map->instance_type());
John Reck59135872010-11-02 12:39:01 -07002896 Object* result;
2897 { MaybeObject* maybe_result =
2898 AllocateRaw(map->instance_size(), space, retry_space);
2899 if (!maybe_result->ToObject(&result)) return maybe_result;
2900 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002901 HeapObject::cast(result)->set_map(map);
Steve Block3ce2e202009-11-05 08:53:23 +00002902#ifdef ENABLE_LOGGING_AND_PROFILING
Steve Block44f0eee2011-05-26 01:26:41 +01002903 isolate_->producer_heap_profile()->RecordJSObjectAllocation(result);
Steve Block3ce2e202009-11-05 08:53:23 +00002904#endif
Steve Blocka7e24c12009-10-30 11:49:00 +00002905 return result;
2906}
2907
2908
John Reck59135872010-11-02 12:39:01 -07002909MaybeObject* Heap::InitializeFunction(JSFunction* function,
2910 SharedFunctionInfo* shared,
2911 Object* prototype) {
Steve Blocka7e24c12009-10-30 11:49:00 +00002912 ASSERT(!prototype->IsMap());
2913 function->initialize_properties();
2914 function->initialize_elements();
2915 function->set_shared(shared);
Iain Merrick75681382010-08-19 15:07:18 +01002916 function->set_code(shared->code());
Steve Blocka7e24c12009-10-30 11:49:00 +00002917 function->set_prototype_or_initial_map(prototype);
2918 function->set_context(undefined_value());
Leon Clarke4515c472010-02-03 11:58:03 +00002919 function->set_literals(empty_fixed_array());
Ben Murdochb0fe1622011-05-05 13:52:32 +01002920 function->set_next_function_link(undefined_value());
Steve Blocka7e24c12009-10-30 11:49:00 +00002921 return function;
2922}
2923
2924
John Reck59135872010-11-02 12:39:01 -07002925MaybeObject* Heap::AllocateFunctionPrototype(JSFunction* function) {
Steve Blocka7e24c12009-10-30 11:49:00 +00002926 // Allocate the prototype. Make sure to use the object function
2927 // from the function's context, since the function can be from a
2928 // different context.
2929 JSFunction* object_function =
2930 function->context()->global_context()->object_function();
John Reck59135872010-11-02 12:39:01 -07002931 Object* prototype;
2932 { MaybeObject* maybe_prototype = AllocateJSObject(object_function);
2933 if (!maybe_prototype->ToObject(&prototype)) return maybe_prototype;
2934 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002935 // When creating the prototype for the function we must set its
2936 // constructor to the function.
John Reck59135872010-11-02 12:39:01 -07002937 Object* result;
2938 { MaybeObject* maybe_result =
Ben Murdoche0cee9b2011-05-25 10:26:03 +01002939 JSObject::cast(prototype)->SetLocalPropertyIgnoreAttributes(
2940 constructor_symbol(), function, DONT_ENUM);
John Reck59135872010-11-02 12:39:01 -07002941 if (!maybe_result->ToObject(&result)) return maybe_result;
2942 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002943 return prototype;
2944}
2945
2946
John Reck59135872010-11-02 12:39:01 -07002947MaybeObject* Heap::AllocateFunction(Map* function_map,
2948 SharedFunctionInfo* shared,
2949 Object* prototype,
2950 PretenureFlag pretenure) {
Leon Clarkee46be812010-01-19 14:06:41 +00002951 AllocationSpace space =
2952 (pretenure == TENURED) ? OLD_POINTER_SPACE : NEW_SPACE;
John Reck59135872010-11-02 12:39:01 -07002953 Object* result;
2954 { MaybeObject* maybe_result = Allocate(function_map, space);
2955 if (!maybe_result->ToObject(&result)) return maybe_result;
2956 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002957 return InitializeFunction(JSFunction::cast(result), shared, prototype);
2958}
2959
2960
John Reck59135872010-11-02 12:39:01 -07002961MaybeObject* Heap::AllocateArgumentsObject(Object* callee, int length) {
Steve Blocka7e24c12009-10-30 11:49:00 +00002962 // To get fast allocation and map sharing for arguments objects we
2963 // allocate them based on an arguments boilerplate.
2964
Steve Block44f0eee2011-05-26 01:26:41 +01002965 JSObject* boilerplate;
2966 int arguments_object_size;
2967 bool strict_mode_callee = callee->IsJSFunction() &&
2968 JSFunction::cast(callee)->shared()->strict_mode();
2969 if (strict_mode_callee) {
2970 boilerplate =
2971 isolate()->context()->global_context()->
2972 strict_mode_arguments_boilerplate();
2973 arguments_object_size = kArgumentsObjectSizeStrict;
2974 } else {
2975 boilerplate =
2976 isolate()->context()->global_context()->arguments_boilerplate();
2977 arguments_object_size = kArgumentsObjectSize;
2978 }
2979
Steve Blocka7e24c12009-10-30 11:49:00 +00002980 // This calls Copy directly rather than using Heap::AllocateRaw so we
2981 // duplicate the check here.
2982 ASSERT(allocation_allowed_ && gc_state_ == NOT_IN_GC);
2983
Leon Clarkee46be812010-01-19 14:06:41 +00002984 // Check that the size of the boilerplate matches our
2985 // expectations. The ArgumentsAccessStub::GenerateNewObject relies
2986 // on the size being a known constant.
Steve Block44f0eee2011-05-26 01:26:41 +01002987 ASSERT(arguments_object_size == boilerplate->map()->instance_size());
Leon Clarkee46be812010-01-19 14:06:41 +00002988
2989 // Do the allocation.
John Reck59135872010-11-02 12:39:01 -07002990 Object* result;
2991 { MaybeObject* maybe_result =
Steve Block44f0eee2011-05-26 01:26:41 +01002992 AllocateRaw(arguments_object_size, NEW_SPACE, OLD_POINTER_SPACE);
John Reck59135872010-11-02 12:39:01 -07002993 if (!maybe_result->ToObject(&result)) return maybe_result;
2994 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002995
2996 // Copy the content. The arguments boilerplate doesn't have any
2997 // fields that point to new space so it's safe to skip the write
2998 // barrier here.
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002999 CopyBlock(HeapObject::cast(result)->address(),
3000 boilerplate->address(),
Steve Block44f0eee2011-05-26 01:26:41 +01003001 JSObject::kHeaderSize);
Steve Blocka7e24c12009-10-30 11:49:00 +00003002
Steve Block44f0eee2011-05-26 01:26:41 +01003003 // Set the length property.
3004 JSObject::cast(result)->InObjectPropertyAtPut(kArgumentsLengthIndex,
Steve Blocka7e24c12009-10-30 11:49:00 +00003005 Smi::FromInt(length),
3006 SKIP_WRITE_BARRIER);
Steve Block44f0eee2011-05-26 01:26:41 +01003007 // Set the callee property for non-strict mode arguments object only.
3008 if (!strict_mode_callee) {
3009 JSObject::cast(result)->InObjectPropertyAtPut(kArgumentsCalleeIndex,
3010 callee);
3011 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003012
3013 // Check the state of the object
3014 ASSERT(JSObject::cast(result)->HasFastProperties());
3015 ASSERT(JSObject::cast(result)->HasFastElements());
3016
3017 return result;
3018}
3019
3020
Kristian Monsen0d5e1162010-09-30 15:31:59 +01003021static bool HasDuplicates(DescriptorArray* descriptors) {
3022 int count = descriptors->number_of_descriptors();
3023 if (count > 1) {
3024 String* prev_key = descriptors->GetKey(0);
3025 for (int i = 1; i != count; i++) {
3026 String* current_key = descriptors->GetKey(i);
3027 if (prev_key == current_key) return true;
3028 prev_key = current_key;
3029 }
3030 }
3031 return false;
3032}
3033
3034
John Reck59135872010-11-02 12:39:01 -07003035MaybeObject* Heap::AllocateInitialMap(JSFunction* fun) {
Steve Blocka7e24c12009-10-30 11:49:00 +00003036 ASSERT(!fun->has_initial_map());
3037
3038 // First create a new map with the size and number of in-object properties
3039 // suggested by the function.
3040 int instance_size = fun->shared()->CalculateInstanceSize();
3041 int in_object_properties = fun->shared()->CalculateInObjectProperties();
John Reck59135872010-11-02 12:39:01 -07003042 Object* map_obj;
Steve Block44f0eee2011-05-26 01:26:41 +01003043 { MaybeObject* maybe_map_obj = AllocateMap(JS_OBJECT_TYPE, instance_size);
John Reck59135872010-11-02 12:39:01 -07003044 if (!maybe_map_obj->ToObject(&map_obj)) return maybe_map_obj;
3045 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003046
3047 // Fetch or allocate prototype.
3048 Object* prototype;
3049 if (fun->has_instance_prototype()) {
3050 prototype = fun->instance_prototype();
3051 } else {
John Reck59135872010-11-02 12:39:01 -07003052 { MaybeObject* maybe_prototype = AllocateFunctionPrototype(fun);
3053 if (!maybe_prototype->ToObject(&prototype)) return maybe_prototype;
3054 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003055 }
3056 Map* map = Map::cast(map_obj);
3057 map->set_inobject_properties(in_object_properties);
3058 map->set_unused_property_fields(in_object_properties);
3059 map->set_prototype(prototype);
Steve Block8defd9f2010-07-08 12:39:36 +01003060 ASSERT(map->has_fast_elements());
Steve Blocka7e24c12009-10-30 11:49:00 +00003061
Andrei Popescu402d9372010-02-26 13:31:12 +00003062 // If the function has only simple this property assignments add
3063 // field descriptors for these to the initial map as the object
3064 // cannot be constructed without having these properties. Guard by
3065 // the inline_new flag so we only change the map if we generate a
3066 // specialized construct stub.
Steve Blocka7e24c12009-10-30 11:49:00 +00003067 ASSERT(in_object_properties <= Map::kMaxPreAllocatedPropertyFields);
Andrei Popescu402d9372010-02-26 13:31:12 +00003068 if (fun->shared()->CanGenerateInlineConstructor(prototype)) {
Steve Blocka7e24c12009-10-30 11:49:00 +00003069 int count = fun->shared()->this_property_assignments_count();
3070 if (count > in_object_properties) {
Kristian Monsen0d5e1162010-09-30 15:31:59 +01003071 // Inline constructor can only handle inobject properties.
3072 fun->shared()->ForbidInlineConstructor();
3073 } else {
John Reck59135872010-11-02 12:39:01 -07003074 Object* descriptors_obj;
3075 { MaybeObject* maybe_descriptors_obj = DescriptorArray::Allocate(count);
3076 if (!maybe_descriptors_obj->ToObject(&descriptors_obj)) {
3077 return maybe_descriptors_obj;
3078 }
3079 }
Kristian Monsen0d5e1162010-09-30 15:31:59 +01003080 DescriptorArray* descriptors = DescriptorArray::cast(descriptors_obj);
3081 for (int i = 0; i < count; i++) {
3082 String* name = fun->shared()->GetThisPropertyAssignmentName(i);
3083 ASSERT(name->IsSymbol());
3084 FieldDescriptor field(name, i, NONE);
3085 field.SetEnumerationIndex(i);
3086 descriptors->Set(i, &field);
3087 }
3088 descriptors->SetNextEnumerationIndex(count);
3089 descriptors->SortUnchecked();
3090
3091 // The descriptors may contain duplicates because the compiler does not
3092 // guarantee the uniqueness of property names (it would have required
3093 // quadratic time). Once the descriptors are sorted we can check for
3094 // duplicates in linear time.
3095 if (HasDuplicates(descriptors)) {
3096 fun->shared()->ForbidInlineConstructor();
3097 } else {
3098 map->set_instance_descriptors(descriptors);
3099 map->set_pre_allocated_property_fields(count);
3100 map->set_unused_property_fields(in_object_properties - count);
3101 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003102 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003103 }
Kristian Monsen0d5e1162010-09-30 15:31:59 +01003104
3105 fun->shared()->StartInobjectSlackTracking(map);
3106
Steve Blocka7e24c12009-10-30 11:49:00 +00003107 return map;
3108}
3109
3110
3111void Heap::InitializeJSObjectFromMap(JSObject* obj,
3112 FixedArray* properties,
3113 Map* map) {
3114 obj->set_properties(properties);
3115 obj->initialize_elements();
3116 // TODO(1240798): Initialize the object's body using valid initial values
3117 // according to the object's initial map. For example, if the map's
3118 // instance type is JS_ARRAY_TYPE, the length field should be initialized
3119 // to a number (eg, Smi::FromInt(0)) and the elements initialized to a
3120 // fixed array (eg, Heap::empty_fixed_array()). Currently, the object
3121 // verification code has to cope with (temporarily) invalid objects. See
3122 // for example, JSArray::JSArrayVerify).
Kristian Monsen0d5e1162010-09-30 15:31:59 +01003123 Object* filler;
3124 // We cannot always fill with one_pointer_filler_map because objects
3125 // created from API functions expect their internal fields to be initialized
3126 // with undefined_value.
3127 if (map->constructor()->IsJSFunction() &&
3128 JSFunction::cast(map->constructor())->shared()->
3129 IsInobjectSlackTrackingInProgress()) {
3130 // We might want to shrink the object later.
3131 ASSERT(obj->GetInternalFieldCount() == 0);
3132 filler = Heap::one_pointer_filler_map();
3133 } else {
3134 filler = Heap::undefined_value();
3135 }
3136 obj->InitializeBody(map->instance_size(), filler);
Steve Blocka7e24c12009-10-30 11:49:00 +00003137}
3138
3139
John Reck59135872010-11-02 12:39:01 -07003140MaybeObject* Heap::AllocateJSObjectFromMap(Map* map, PretenureFlag pretenure) {
Steve Blocka7e24c12009-10-30 11:49:00 +00003141 // JSFunctions should be allocated using AllocateFunction to be
3142 // properly initialized.
3143 ASSERT(map->instance_type() != JS_FUNCTION_TYPE);
3144
Steve Block8defd9f2010-07-08 12:39:36 +01003145 // Both types of global objects should be allocated using
3146 // AllocateGlobalObject to be properly initialized.
Steve Blocka7e24c12009-10-30 11:49:00 +00003147 ASSERT(map->instance_type() != JS_GLOBAL_OBJECT_TYPE);
3148 ASSERT(map->instance_type() != JS_BUILTINS_OBJECT_TYPE);
3149
3150 // Allocate the backing storage for the properties.
3151 int prop_size =
3152 map->pre_allocated_property_fields() +
3153 map->unused_property_fields() -
3154 map->inobject_properties();
3155 ASSERT(prop_size >= 0);
John Reck59135872010-11-02 12:39:01 -07003156 Object* properties;
3157 { MaybeObject* maybe_properties = AllocateFixedArray(prop_size, pretenure);
3158 if (!maybe_properties->ToObject(&properties)) return maybe_properties;
3159 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003160
3161 // Allocate the JSObject.
3162 AllocationSpace space =
3163 (pretenure == TENURED) ? OLD_POINTER_SPACE : NEW_SPACE;
3164 if (map->instance_size() > MaxObjectSizeInPagedSpace()) space = LO_SPACE;
John Reck59135872010-11-02 12:39:01 -07003165 Object* obj;
3166 { MaybeObject* maybe_obj = Allocate(map, space);
3167 if (!maybe_obj->ToObject(&obj)) return maybe_obj;
3168 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003169
3170 // Initialize the JSObject.
3171 InitializeJSObjectFromMap(JSObject::cast(obj),
3172 FixedArray::cast(properties),
3173 map);
Steve Block8defd9f2010-07-08 12:39:36 +01003174 ASSERT(JSObject::cast(obj)->HasFastElements());
Steve Blocka7e24c12009-10-30 11:49:00 +00003175 return obj;
3176}
3177
3178
John Reck59135872010-11-02 12:39:01 -07003179MaybeObject* Heap::AllocateJSObject(JSFunction* constructor,
3180 PretenureFlag pretenure) {
Steve Blocka7e24c12009-10-30 11:49:00 +00003181 // Allocate the initial map if absent.
3182 if (!constructor->has_initial_map()) {
John Reck59135872010-11-02 12:39:01 -07003183 Object* initial_map;
3184 { MaybeObject* maybe_initial_map = AllocateInitialMap(constructor);
3185 if (!maybe_initial_map->ToObject(&initial_map)) return maybe_initial_map;
3186 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003187 constructor->set_initial_map(Map::cast(initial_map));
3188 Map::cast(initial_map)->set_constructor(constructor);
3189 }
3190 // Allocate the object based on the constructors initial map.
John Reck59135872010-11-02 12:39:01 -07003191 MaybeObject* result =
Steve Blocka7e24c12009-10-30 11:49:00 +00003192 AllocateJSObjectFromMap(constructor->initial_map(), pretenure);
John Reck59135872010-11-02 12:39:01 -07003193#ifdef DEBUG
Steve Blocka7e24c12009-10-30 11:49:00 +00003194 // Make sure result is NOT a global object if valid.
John Reck59135872010-11-02 12:39:01 -07003195 Object* non_failure;
3196 ASSERT(!result->ToObject(&non_failure) || !non_failure->IsGlobalObject());
3197#endif
Steve Blocka7e24c12009-10-30 11:49:00 +00003198 return result;
3199}
3200
3201
John Reck59135872010-11-02 12:39:01 -07003202MaybeObject* Heap::AllocateGlobalObject(JSFunction* constructor) {
Steve Blocka7e24c12009-10-30 11:49:00 +00003203 ASSERT(constructor->has_initial_map());
3204 Map* map = constructor->initial_map();
3205
3206 // Make sure no field properties are described in the initial map.
3207 // This guarantees us that normalizing the properties does not
3208 // require us to change property values to JSGlobalPropertyCells.
3209 ASSERT(map->NextFreePropertyIndex() == 0);
3210
3211 // Make sure we don't have a ton of pre-allocated slots in the
3212 // global objects. They will be unused once we normalize the object.
3213 ASSERT(map->unused_property_fields() == 0);
3214 ASSERT(map->inobject_properties() == 0);
3215
3216 // Initial size of the backing store to avoid resize of the storage during
3217 // bootstrapping. The size differs between the JS global object ad the
3218 // builtins object.
3219 int initial_size = map->instance_type() == JS_GLOBAL_OBJECT_TYPE ? 64 : 512;
3220
3221 // Allocate a dictionary object for backing storage.
John Reck59135872010-11-02 12:39:01 -07003222 Object* obj;
3223 { MaybeObject* maybe_obj =
3224 StringDictionary::Allocate(
3225 map->NumberOfDescribedProperties() * 2 + initial_size);
3226 if (!maybe_obj->ToObject(&obj)) return maybe_obj;
3227 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003228 StringDictionary* dictionary = StringDictionary::cast(obj);
3229
3230 // The global object might be created from an object template with accessors.
3231 // Fill these accessors into the dictionary.
3232 DescriptorArray* descs = map->instance_descriptors();
3233 for (int i = 0; i < descs->number_of_descriptors(); i++) {
Ben Murdoch8b112d22011-06-08 16:22:53 +01003234 PropertyDetails details(descs->GetDetails(i));
Steve Blocka7e24c12009-10-30 11:49:00 +00003235 ASSERT(details.type() == CALLBACKS); // Only accessors are expected.
3236 PropertyDetails d =
3237 PropertyDetails(details.attributes(), CALLBACKS, details.index());
3238 Object* value = descs->GetCallbacksObject(i);
Steve Block44f0eee2011-05-26 01:26:41 +01003239 { MaybeObject* maybe_value = AllocateJSGlobalPropertyCell(value);
John Reck59135872010-11-02 12:39:01 -07003240 if (!maybe_value->ToObject(&value)) return maybe_value;
3241 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003242
John Reck59135872010-11-02 12:39:01 -07003243 Object* result;
3244 { MaybeObject* maybe_result = dictionary->Add(descs->GetKey(i), value, d);
3245 if (!maybe_result->ToObject(&result)) return maybe_result;
3246 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003247 dictionary = StringDictionary::cast(result);
3248 }
3249
3250 // Allocate the global object and initialize it with the backing store.
John Reck59135872010-11-02 12:39:01 -07003251 { MaybeObject* maybe_obj = Allocate(map, OLD_POINTER_SPACE);
3252 if (!maybe_obj->ToObject(&obj)) return maybe_obj;
3253 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003254 JSObject* global = JSObject::cast(obj);
3255 InitializeJSObjectFromMap(global, dictionary, map);
3256
3257 // Create a new map for the global object.
John Reck59135872010-11-02 12:39:01 -07003258 { MaybeObject* maybe_obj = map->CopyDropDescriptors();
3259 if (!maybe_obj->ToObject(&obj)) return maybe_obj;
3260 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003261 Map* new_map = Map::cast(obj);
3262
3263 // Setup the global object as a normalized object.
3264 global->set_map(new_map);
Steve Block44f0eee2011-05-26 01:26:41 +01003265 global->map()->set_instance_descriptors(empty_descriptor_array());
Steve Blocka7e24c12009-10-30 11:49:00 +00003266 global->set_properties(dictionary);
3267
3268 // Make sure result is a global object with properties in dictionary.
3269 ASSERT(global->IsGlobalObject());
3270 ASSERT(!global->HasFastProperties());
3271 return global;
3272}
3273
3274
John Reck59135872010-11-02 12:39:01 -07003275MaybeObject* Heap::CopyJSObject(JSObject* source) {
Steve Blocka7e24c12009-10-30 11:49:00 +00003276 // Never used to copy functions. If functions need to be copied we
3277 // have to be careful to clear the literals array.
3278 ASSERT(!source->IsJSFunction());
3279
3280 // Make the clone.
3281 Map* map = source->map();
3282 int object_size = map->instance_size();
3283 Object* clone;
3284
3285 // If we're forced to always allocate, we use the general allocation
3286 // functions which may leave us with an object in old space.
3287 if (always_allocate()) {
John Reck59135872010-11-02 12:39:01 -07003288 { MaybeObject* maybe_clone =
3289 AllocateRaw(object_size, NEW_SPACE, OLD_POINTER_SPACE);
3290 if (!maybe_clone->ToObject(&clone)) return maybe_clone;
3291 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003292 Address clone_address = HeapObject::cast(clone)->address();
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01003293 CopyBlock(clone_address,
3294 source->address(),
Steve Blocka7e24c12009-10-30 11:49:00 +00003295 object_size);
3296 // Update write barrier for all fields that lie beyond the header.
Steve Block6ded16b2010-05-10 14:33:55 +01003297 RecordWrites(clone_address,
3298 JSObject::kHeaderSize,
3299 (object_size - JSObject::kHeaderSize) / kPointerSize);
Steve Blocka7e24c12009-10-30 11:49:00 +00003300 } else {
John Reck59135872010-11-02 12:39:01 -07003301 { MaybeObject* maybe_clone = new_space_.AllocateRaw(object_size);
3302 if (!maybe_clone->ToObject(&clone)) return maybe_clone;
3303 }
Steve Block44f0eee2011-05-26 01:26:41 +01003304 ASSERT(InNewSpace(clone));
Steve Blocka7e24c12009-10-30 11:49:00 +00003305 // Since we know the clone is allocated in new space, we can copy
3306 // the contents without worrying about updating the write barrier.
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01003307 CopyBlock(HeapObject::cast(clone)->address(),
3308 source->address(),
Steve Blocka7e24c12009-10-30 11:49:00 +00003309 object_size);
3310 }
3311
3312 FixedArray* elements = FixedArray::cast(source->elements());
3313 FixedArray* properties = FixedArray::cast(source->properties());
3314 // Update elements if necessary.
Steve Block6ded16b2010-05-10 14:33:55 +01003315 if (elements->length() > 0) {
John Reck59135872010-11-02 12:39:01 -07003316 Object* elem;
3317 { MaybeObject* maybe_elem =
3318 (elements->map() == fixed_cow_array_map()) ?
3319 elements : CopyFixedArray(elements);
3320 if (!maybe_elem->ToObject(&elem)) return maybe_elem;
3321 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003322 JSObject::cast(clone)->set_elements(FixedArray::cast(elem));
3323 }
3324 // Update properties if necessary.
3325 if (properties->length() > 0) {
John Reck59135872010-11-02 12:39:01 -07003326 Object* prop;
3327 { MaybeObject* maybe_prop = CopyFixedArray(properties);
3328 if (!maybe_prop->ToObject(&prop)) return maybe_prop;
3329 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003330 JSObject::cast(clone)->set_properties(FixedArray::cast(prop));
3331 }
3332 // Return the new clone.
Steve Block3ce2e202009-11-05 08:53:23 +00003333#ifdef ENABLE_LOGGING_AND_PROFILING
Steve Block44f0eee2011-05-26 01:26:41 +01003334 isolate_->producer_heap_profile()->RecordJSObjectAllocation(clone);
Steve Block3ce2e202009-11-05 08:53:23 +00003335#endif
Steve Blocka7e24c12009-10-30 11:49:00 +00003336 return clone;
3337}
3338
3339
John Reck59135872010-11-02 12:39:01 -07003340MaybeObject* Heap::ReinitializeJSGlobalProxy(JSFunction* constructor,
3341 JSGlobalProxy* object) {
Kristian Monsen0d5e1162010-09-30 15:31:59 +01003342 ASSERT(constructor->has_initial_map());
Steve Blocka7e24c12009-10-30 11:49:00 +00003343 Map* map = constructor->initial_map();
3344
Kristian Monsen0d5e1162010-09-30 15:31:59 +01003345 // Check that the already allocated object has the same size and type as
Steve Blocka7e24c12009-10-30 11:49:00 +00003346 // objects allocated using the constructor.
3347 ASSERT(map->instance_size() == object->map()->instance_size());
Kristian Monsen0d5e1162010-09-30 15:31:59 +01003348 ASSERT(map->instance_type() == object->map()->instance_type());
Steve Blocka7e24c12009-10-30 11:49:00 +00003349
3350 // Allocate the backing storage for the properties.
3351 int prop_size = map->unused_property_fields() - map->inobject_properties();
John Reck59135872010-11-02 12:39:01 -07003352 Object* properties;
3353 { MaybeObject* maybe_properties = AllocateFixedArray(prop_size, TENURED);
3354 if (!maybe_properties->ToObject(&properties)) return maybe_properties;
3355 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003356
3357 // Reset the map for the object.
3358 object->set_map(constructor->initial_map());
3359
3360 // Reinitialize the object from the constructor map.
3361 InitializeJSObjectFromMap(object, FixedArray::cast(properties), map);
3362 return object;
3363}
3364
3365
John Reck59135872010-11-02 12:39:01 -07003366MaybeObject* Heap::AllocateStringFromAscii(Vector<const char> string,
3367 PretenureFlag pretenure) {
3368 Object* result;
3369 { MaybeObject* maybe_result =
3370 AllocateRawAsciiString(string.length(), pretenure);
3371 if (!maybe_result->ToObject(&result)) return maybe_result;
3372 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003373
3374 // Copy the characters into the new object.
3375 SeqAsciiString* string_result = SeqAsciiString::cast(result);
3376 for (int i = 0; i < string.length(); i++) {
3377 string_result->SeqAsciiStringSet(i, string[i]);
3378 }
3379 return result;
3380}
3381
3382
Steve Block9fac8402011-05-12 15:51:54 +01003383MaybeObject* Heap::AllocateStringFromUtf8Slow(Vector<const char> string,
3384 PretenureFlag pretenure) {
Leon Clarkeac952652010-07-15 11:15:24 +01003385 // V8 only supports characters in the Basic Multilingual Plane.
3386 const uc32 kMaxSupportedChar = 0xFFFF;
Steve Blocka7e24c12009-10-30 11:49:00 +00003387 // Count the number of characters in the UTF-8 string and check if
3388 // it is an ASCII string.
Ben Murdoch8b112d22011-06-08 16:22:53 +01003389 Access<UnicodeCache::Utf8Decoder>
3390 decoder(isolate_->unicode_cache()->utf8_decoder());
Steve Blocka7e24c12009-10-30 11:49:00 +00003391 decoder->Reset(string.start(), string.length());
3392 int chars = 0;
Steve Blocka7e24c12009-10-30 11:49:00 +00003393 while (decoder->has_more()) {
Steve Block9fac8402011-05-12 15:51:54 +01003394 decoder->GetNext();
Steve Blocka7e24c12009-10-30 11:49:00 +00003395 chars++;
3396 }
3397
John Reck59135872010-11-02 12:39:01 -07003398 Object* result;
3399 { MaybeObject* maybe_result = AllocateRawTwoByteString(chars, pretenure);
3400 if (!maybe_result->ToObject(&result)) return maybe_result;
3401 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003402
3403 // Convert and copy the characters into the new object.
3404 String* string_result = String::cast(result);
3405 decoder->Reset(string.start(), string.length());
3406 for (int i = 0; i < chars; i++) {
3407 uc32 r = decoder->GetNext();
Leon Clarkeac952652010-07-15 11:15:24 +01003408 if (r > kMaxSupportedChar) { r = unibrow::Utf8::kBadChar; }
Steve Blocka7e24c12009-10-30 11:49:00 +00003409 string_result->Set(i, r);
3410 }
3411 return result;
3412}
3413
3414
John Reck59135872010-11-02 12:39:01 -07003415MaybeObject* Heap::AllocateStringFromTwoByte(Vector<const uc16> string,
3416 PretenureFlag pretenure) {
Steve Blocka7e24c12009-10-30 11:49:00 +00003417 // Check if the string is an ASCII string.
John Reck59135872010-11-02 12:39:01 -07003418 MaybeObject* maybe_result;
Steve Block9fac8402011-05-12 15:51:54 +01003419 if (String::IsAscii(string.start(), string.length())) {
John Reck59135872010-11-02 12:39:01 -07003420 maybe_result = AllocateRawAsciiString(string.length(), pretenure);
Steve Blocka7e24c12009-10-30 11:49:00 +00003421 } else { // It's not an ASCII string.
John Reck59135872010-11-02 12:39:01 -07003422 maybe_result = AllocateRawTwoByteString(string.length(), pretenure);
Steve Blocka7e24c12009-10-30 11:49:00 +00003423 }
John Reck59135872010-11-02 12:39:01 -07003424 Object* result;
3425 if (!maybe_result->ToObject(&result)) return maybe_result;
Steve Blocka7e24c12009-10-30 11:49:00 +00003426
3427 // Copy the characters into the new object, which may be either ASCII or
3428 // UTF-16.
3429 String* string_result = String::cast(result);
3430 for (int i = 0; i < string.length(); i++) {
3431 string_result->Set(i, string[i]);
3432 }
3433 return result;
3434}
3435
3436
3437Map* Heap::SymbolMapForString(String* string) {
3438 // If the string is in new space it cannot be used as a symbol.
3439 if (InNewSpace(string)) return NULL;
3440
3441 // Find the corresponding symbol map for strings.
3442 Map* map = string->map();
Steve Block44f0eee2011-05-26 01:26:41 +01003443 if (map == ascii_string_map()) {
3444 return ascii_symbol_map();
3445 }
3446 if (map == string_map()) {
3447 return symbol_map();
3448 }
3449 if (map == cons_string_map()) {
3450 return cons_symbol_map();
3451 }
3452 if (map == cons_ascii_string_map()) {
3453 return cons_ascii_symbol_map();
3454 }
3455 if (map == external_string_map()) {
3456 return external_symbol_map();
3457 }
3458 if (map == external_ascii_string_map()) {
3459 return external_ascii_symbol_map();
3460 }
Kristian Monsen9dcf7e22010-06-28 14:14:28 +01003461 if (map == external_string_with_ascii_data_map()) {
3462 return external_symbol_with_ascii_data_map();
3463 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003464
3465 // No match found.
3466 return NULL;
3467}
3468
3469
John Reck59135872010-11-02 12:39:01 -07003470MaybeObject* Heap::AllocateInternalSymbol(unibrow::CharacterStream* buffer,
3471 int chars,
3472 uint32_t hash_field) {
Leon Clarkee46be812010-01-19 14:06:41 +00003473 ASSERT(chars >= 0);
Steve Blocka7e24c12009-10-30 11:49:00 +00003474 // Ensure the chars matches the number of characters in the buffer.
3475 ASSERT(static_cast<unsigned>(chars) == buffer->Length());
3476 // Determine whether the string is ascii.
3477 bool is_ascii = true;
Leon Clarkee46be812010-01-19 14:06:41 +00003478 while (buffer->has_more()) {
3479 if (buffer->GetNext() > unibrow::Utf8::kMaxOneByteChar) {
3480 is_ascii = false;
3481 break;
3482 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003483 }
3484 buffer->Rewind();
3485
3486 // Compute map and object size.
3487 int size;
3488 Map* map;
3489
3490 if (is_ascii) {
Leon Clarkee46be812010-01-19 14:06:41 +00003491 if (chars > SeqAsciiString::kMaxLength) {
3492 return Failure::OutOfMemoryException();
3493 }
Steve Blockd0582a62009-12-15 09:54:21 +00003494 map = ascii_symbol_map();
Steve Blocka7e24c12009-10-30 11:49:00 +00003495 size = SeqAsciiString::SizeFor(chars);
3496 } else {
Leon Clarkee46be812010-01-19 14:06:41 +00003497 if (chars > SeqTwoByteString::kMaxLength) {
3498 return Failure::OutOfMemoryException();
3499 }
Steve Blockd0582a62009-12-15 09:54:21 +00003500 map = symbol_map();
Steve Blocka7e24c12009-10-30 11:49:00 +00003501 size = SeqTwoByteString::SizeFor(chars);
3502 }
3503
3504 // Allocate string.
John Reck59135872010-11-02 12:39:01 -07003505 Object* result;
3506 { MaybeObject* maybe_result = (size > MaxObjectSizeInPagedSpace())
3507 ? lo_space_->AllocateRaw(size)
3508 : old_data_space_->AllocateRaw(size);
3509 if (!maybe_result->ToObject(&result)) return maybe_result;
3510 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003511
3512 reinterpret_cast<HeapObject*>(result)->set_map(map);
Steve Blockd0582a62009-12-15 09:54:21 +00003513 // Set length and hash fields of the allocated string.
Steve Blocka7e24c12009-10-30 11:49:00 +00003514 String* answer = String::cast(result);
Steve Blockd0582a62009-12-15 09:54:21 +00003515 answer->set_length(chars);
3516 answer->set_hash_field(hash_field);
Steve Blocka7e24c12009-10-30 11:49:00 +00003517
3518 ASSERT_EQ(size, answer->Size());
3519
3520 // Fill in the characters.
3521 for (int i = 0; i < chars; i++) {
3522 answer->Set(i, buffer->GetNext());
3523 }
3524 return answer;
3525}
3526
3527
John Reck59135872010-11-02 12:39:01 -07003528MaybeObject* Heap::AllocateRawAsciiString(int length, PretenureFlag pretenure) {
Leon Clarkee46be812010-01-19 14:06:41 +00003529 if (length < 0 || length > SeqAsciiString::kMaxLength) {
3530 return Failure::OutOfMemoryException();
3531 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003532
3533 int size = SeqAsciiString::SizeFor(length);
Leon Clarkee46be812010-01-19 14:06:41 +00003534 ASSERT(size <= SeqAsciiString::kMaxSize);
Steve Blocka7e24c12009-10-30 11:49:00 +00003535
Leon Clarkee46be812010-01-19 14:06:41 +00003536 AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
3537 AllocationSpace retry_space = OLD_DATA_SPACE;
3538
Steve Blocka7e24c12009-10-30 11:49:00 +00003539 if (space == NEW_SPACE) {
Leon Clarkee46be812010-01-19 14:06:41 +00003540 if (size > kMaxObjectSizeInNewSpace) {
3541 // Allocate in large object space, retry space will be ignored.
3542 space = LO_SPACE;
3543 } else if (size > MaxObjectSizeInPagedSpace()) {
3544 // Allocate in new space, retry in large object space.
3545 retry_space = LO_SPACE;
3546 }
3547 } else if (space == OLD_DATA_SPACE && size > MaxObjectSizeInPagedSpace()) {
3548 space = LO_SPACE;
Steve Blocka7e24c12009-10-30 11:49:00 +00003549 }
John Reck59135872010-11-02 12:39:01 -07003550 Object* result;
3551 { MaybeObject* maybe_result = AllocateRaw(size, space, retry_space);
3552 if (!maybe_result->ToObject(&result)) return maybe_result;
3553 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003554
Steve Blocka7e24c12009-10-30 11:49:00 +00003555 // Partially initialize the object.
Steve Blockd0582a62009-12-15 09:54:21 +00003556 HeapObject::cast(result)->set_map(ascii_string_map());
Steve Blocka7e24c12009-10-30 11:49:00 +00003557 String::cast(result)->set_length(length);
Steve Blockd0582a62009-12-15 09:54:21 +00003558 String::cast(result)->set_hash_field(String::kEmptyHashField);
Steve Blocka7e24c12009-10-30 11:49:00 +00003559 ASSERT_EQ(size, HeapObject::cast(result)->Size());
3560 return result;
3561}
3562
3563
John Reck59135872010-11-02 12:39:01 -07003564MaybeObject* Heap::AllocateRawTwoByteString(int length,
3565 PretenureFlag pretenure) {
Leon Clarkee46be812010-01-19 14:06:41 +00003566 if (length < 0 || length > SeqTwoByteString::kMaxLength) {
3567 return Failure::OutOfMemoryException();
Steve Blocka7e24c12009-10-30 11:49:00 +00003568 }
Leon Clarkee46be812010-01-19 14:06:41 +00003569 int size = SeqTwoByteString::SizeFor(length);
3570 ASSERT(size <= SeqTwoByteString::kMaxSize);
3571 AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
3572 AllocationSpace retry_space = OLD_DATA_SPACE;
3573
3574 if (space == NEW_SPACE) {
3575 if (size > kMaxObjectSizeInNewSpace) {
3576 // Allocate in large object space, retry space will be ignored.
3577 space = LO_SPACE;
3578 } else if (size > MaxObjectSizeInPagedSpace()) {
3579 // Allocate in new space, retry in large object space.
3580 retry_space = LO_SPACE;
3581 }
3582 } else if (space == OLD_DATA_SPACE && size > MaxObjectSizeInPagedSpace()) {
3583 space = LO_SPACE;
3584 }
John Reck59135872010-11-02 12:39:01 -07003585 Object* result;
3586 { MaybeObject* maybe_result = AllocateRaw(size, space, retry_space);
3587 if (!maybe_result->ToObject(&result)) return maybe_result;
3588 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003589
Steve Blocka7e24c12009-10-30 11:49:00 +00003590 // Partially initialize the object.
Steve Blockd0582a62009-12-15 09:54:21 +00003591 HeapObject::cast(result)->set_map(string_map());
Steve Blocka7e24c12009-10-30 11:49:00 +00003592 String::cast(result)->set_length(length);
Steve Blockd0582a62009-12-15 09:54:21 +00003593 String::cast(result)->set_hash_field(String::kEmptyHashField);
Steve Blocka7e24c12009-10-30 11:49:00 +00003594 ASSERT_EQ(size, HeapObject::cast(result)->Size());
3595 return result;
3596}
3597
3598
John Reck59135872010-11-02 12:39:01 -07003599MaybeObject* Heap::AllocateEmptyFixedArray() {
Steve Blocka7e24c12009-10-30 11:49:00 +00003600 int size = FixedArray::SizeFor(0);
John Reck59135872010-11-02 12:39:01 -07003601 Object* result;
3602 { MaybeObject* maybe_result =
3603 AllocateRaw(size, OLD_DATA_SPACE, OLD_DATA_SPACE);
3604 if (!maybe_result->ToObject(&result)) return maybe_result;
3605 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003606 // Initialize the object.
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01003607 reinterpret_cast<FixedArray*>(result)->set_map(fixed_array_map());
3608 reinterpret_cast<FixedArray*>(result)->set_length(0);
Steve Blocka7e24c12009-10-30 11:49:00 +00003609 return result;
3610}
3611
3612
John Reck59135872010-11-02 12:39:01 -07003613MaybeObject* Heap::AllocateRawFixedArray(int length) {
Leon Clarkee46be812010-01-19 14:06:41 +00003614 if (length < 0 || length > FixedArray::kMaxLength) {
3615 return Failure::OutOfMemoryException();
3616 }
Kristian Monsen0d5e1162010-09-30 15:31:59 +01003617 ASSERT(length > 0);
Steve Blocka7e24c12009-10-30 11:49:00 +00003618 // Use the general function if we're forced to always allocate.
3619 if (always_allocate()) return AllocateFixedArray(length, TENURED);
3620 // Allocate the raw data for a fixed array.
3621 int size = FixedArray::SizeFor(length);
3622 return size <= kMaxObjectSizeInNewSpace
3623 ? new_space_.AllocateRaw(size)
3624 : lo_space_->AllocateRawFixedArray(size);
3625}
3626
3627
John Reck59135872010-11-02 12:39:01 -07003628MaybeObject* Heap::CopyFixedArrayWithMap(FixedArray* src, Map* map) {
Steve Blocka7e24c12009-10-30 11:49:00 +00003629 int len = src->length();
John Reck59135872010-11-02 12:39:01 -07003630 Object* obj;
3631 { MaybeObject* maybe_obj = AllocateRawFixedArray(len);
3632 if (!maybe_obj->ToObject(&obj)) return maybe_obj;
3633 }
Steve Block44f0eee2011-05-26 01:26:41 +01003634 if (InNewSpace(obj)) {
Steve Blocka7e24c12009-10-30 11:49:00 +00003635 HeapObject* dst = HeapObject::cast(obj);
Kristian Monsen0d5e1162010-09-30 15:31:59 +01003636 dst->set_map(map);
3637 CopyBlock(dst->address() + kPointerSize,
3638 src->address() + kPointerSize,
3639 FixedArray::SizeFor(len) - kPointerSize);
Steve Blocka7e24c12009-10-30 11:49:00 +00003640 return obj;
3641 }
Kristian Monsen0d5e1162010-09-30 15:31:59 +01003642 HeapObject::cast(obj)->set_map(map);
Steve Blocka7e24c12009-10-30 11:49:00 +00003643 FixedArray* result = FixedArray::cast(obj);
3644 result->set_length(len);
Leon Clarke4515c472010-02-03 11:58:03 +00003645
Steve Blocka7e24c12009-10-30 11:49:00 +00003646 // Copy the content
Leon Clarke4515c472010-02-03 11:58:03 +00003647 AssertNoAllocation no_gc;
3648 WriteBarrierMode mode = result->GetWriteBarrierMode(no_gc);
Steve Blocka7e24c12009-10-30 11:49:00 +00003649 for (int i = 0; i < len; i++) result->set(i, src->get(i), mode);
3650 return result;
3651}
3652
3653
John Reck59135872010-11-02 12:39:01 -07003654MaybeObject* Heap::AllocateFixedArray(int length) {
Steve Blocka7e24c12009-10-30 11:49:00 +00003655 ASSERT(length >= 0);
3656 if (length == 0) return empty_fixed_array();
John Reck59135872010-11-02 12:39:01 -07003657 Object* result;
3658 { MaybeObject* maybe_result = AllocateRawFixedArray(length);
3659 if (!maybe_result->ToObject(&result)) return maybe_result;
Steve Blocka7e24c12009-10-30 11:49:00 +00003660 }
John Reck59135872010-11-02 12:39:01 -07003661 // Initialize header.
3662 FixedArray* array = reinterpret_cast<FixedArray*>(result);
3663 array->set_map(fixed_array_map());
3664 array->set_length(length);
3665 // Initialize body.
Steve Block44f0eee2011-05-26 01:26:41 +01003666 ASSERT(!InNewSpace(undefined_value()));
John Reck59135872010-11-02 12:39:01 -07003667 MemsetPointer(array->data_start(), undefined_value(), length);
Steve Blocka7e24c12009-10-30 11:49:00 +00003668 return result;
3669}
3670
3671
John Reck59135872010-11-02 12:39:01 -07003672MaybeObject* Heap::AllocateRawFixedArray(int length, PretenureFlag pretenure) {
Leon Clarkee46be812010-01-19 14:06:41 +00003673 if (length < 0 || length > FixedArray::kMaxLength) {
3674 return Failure::OutOfMemoryException();
3675 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003676
Leon Clarkee46be812010-01-19 14:06:41 +00003677 AllocationSpace space =
3678 (pretenure == TENURED) ? OLD_POINTER_SPACE : NEW_SPACE;
Steve Blocka7e24c12009-10-30 11:49:00 +00003679 int size = FixedArray::SizeFor(length);
Leon Clarkee46be812010-01-19 14:06:41 +00003680 if (space == NEW_SPACE && size > kMaxObjectSizeInNewSpace) {
3681 // Too big for new space.
3682 space = LO_SPACE;
3683 } else if (space == OLD_POINTER_SPACE &&
3684 size > MaxObjectSizeInPagedSpace()) {
3685 // Too big for old pointer space.
3686 space = LO_SPACE;
3687 }
3688
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01003689 AllocationSpace retry_space =
3690 (size <= MaxObjectSizeInPagedSpace()) ? OLD_POINTER_SPACE : LO_SPACE;
3691
3692 return AllocateRaw(size, space, retry_space);
Steve Blocka7e24c12009-10-30 11:49:00 +00003693}
3694
3695
John Reck59135872010-11-02 12:39:01 -07003696MUST_USE_RESULT static MaybeObject* AllocateFixedArrayWithFiller(
Steve Block44f0eee2011-05-26 01:26:41 +01003697 Heap* heap,
John Reck59135872010-11-02 12:39:01 -07003698 int length,
3699 PretenureFlag pretenure,
3700 Object* filler) {
Steve Block6ded16b2010-05-10 14:33:55 +01003701 ASSERT(length >= 0);
Steve Block44f0eee2011-05-26 01:26:41 +01003702 ASSERT(heap->empty_fixed_array()->IsFixedArray());
3703 if (length == 0) return heap->empty_fixed_array();
Steve Block6ded16b2010-05-10 14:33:55 +01003704
Steve Block44f0eee2011-05-26 01:26:41 +01003705 ASSERT(!heap->InNewSpace(filler));
John Reck59135872010-11-02 12:39:01 -07003706 Object* result;
Steve Block44f0eee2011-05-26 01:26:41 +01003707 { MaybeObject* maybe_result = heap->AllocateRawFixedArray(length, pretenure);
John Reck59135872010-11-02 12:39:01 -07003708 if (!maybe_result->ToObject(&result)) return maybe_result;
3709 }
Steve Block6ded16b2010-05-10 14:33:55 +01003710
Steve Block44f0eee2011-05-26 01:26:41 +01003711 HeapObject::cast(result)->set_map(heap->fixed_array_map());
Steve Block6ded16b2010-05-10 14:33:55 +01003712 FixedArray* array = FixedArray::cast(result);
3713 array->set_length(length);
3714 MemsetPointer(array->data_start(), filler, length);
3715 return array;
3716}
3717
3718
John Reck59135872010-11-02 12:39:01 -07003719MaybeObject* Heap::AllocateFixedArray(int length, PretenureFlag pretenure) {
Steve Block44f0eee2011-05-26 01:26:41 +01003720 return AllocateFixedArrayWithFiller(this,
3721 length,
3722 pretenure,
3723 undefined_value());
Steve Block6ded16b2010-05-10 14:33:55 +01003724}
3725
3726
John Reck59135872010-11-02 12:39:01 -07003727MaybeObject* Heap::AllocateFixedArrayWithHoles(int length,
3728 PretenureFlag pretenure) {
Steve Block44f0eee2011-05-26 01:26:41 +01003729 return AllocateFixedArrayWithFiller(this,
3730 length,
3731 pretenure,
3732 the_hole_value());
Steve Block6ded16b2010-05-10 14:33:55 +01003733}
3734
3735
John Reck59135872010-11-02 12:39:01 -07003736MaybeObject* Heap::AllocateUninitializedFixedArray(int length) {
Steve Block6ded16b2010-05-10 14:33:55 +01003737 if (length == 0) return empty_fixed_array();
3738
John Reck59135872010-11-02 12:39:01 -07003739 Object* obj;
3740 { MaybeObject* maybe_obj = AllocateRawFixedArray(length);
3741 if (!maybe_obj->ToObject(&obj)) return maybe_obj;
3742 }
Steve Block6ded16b2010-05-10 14:33:55 +01003743
3744 reinterpret_cast<FixedArray*>(obj)->set_map(fixed_array_map());
3745 FixedArray::cast(obj)->set_length(length);
3746 return obj;
3747}
3748
3749
John Reck59135872010-11-02 12:39:01 -07003750MaybeObject* Heap::AllocateHashTable(int length, PretenureFlag pretenure) {
3751 Object* result;
Steve Block44f0eee2011-05-26 01:26:41 +01003752 { MaybeObject* maybe_result = AllocateFixedArray(length, pretenure);
John Reck59135872010-11-02 12:39:01 -07003753 if (!maybe_result->ToObject(&result)) return maybe_result;
3754 }
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01003755 reinterpret_cast<HeapObject*>(result)->set_map(hash_table_map());
Steve Blocka7e24c12009-10-30 11:49:00 +00003756 ASSERT(result->IsHashTable());
3757 return result;
3758}
3759
3760
John Reck59135872010-11-02 12:39:01 -07003761MaybeObject* Heap::AllocateGlobalContext() {
3762 Object* result;
3763 { MaybeObject* maybe_result =
Steve Block44f0eee2011-05-26 01:26:41 +01003764 AllocateFixedArray(Context::GLOBAL_CONTEXT_SLOTS);
John Reck59135872010-11-02 12:39:01 -07003765 if (!maybe_result->ToObject(&result)) return maybe_result;
3766 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003767 Context* context = reinterpret_cast<Context*>(result);
3768 context->set_map(global_context_map());
3769 ASSERT(context->IsGlobalContext());
3770 ASSERT(result->IsContext());
3771 return result;
3772}
3773
3774
John Reck59135872010-11-02 12:39:01 -07003775MaybeObject* Heap::AllocateFunctionContext(int length, JSFunction* function) {
Steve Blocka7e24c12009-10-30 11:49:00 +00003776 ASSERT(length >= Context::MIN_CONTEXT_SLOTS);
John Reck59135872010-11-02 12:39:01 -07003777 Object* result;
Steve Block44f0eee2011-05-26 01:26:41 +01003778 { MaybeObject* maybe_result = AllocateFixedArray(length);
John Reck59135872010-11-02 12:39:01 -07003779 if (!maybe_result->ToObject(&result)) return maybe_result;
3780 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003781 Context* context = reinterpret_cast<Context*>(result);
3782 context->set_map(context_map());
3783 context->set_closure(function);
3784 context->set_fcontext(context);
3785 context->set_previous(NULL);
3786 context->set_extension(NULL);
3787 context->set_global(function->context()->global());
3788 ASSERT(!context->IsGlobalContext());
3789 ASSERT(context->is_function_context());
3790 ASSERT(result->IsContext());
3791 return result;
3792}
3793
3794
John Reck59135872010-11-02 12:39:01 -07003795MaybeObject* Heap::AllocateWithContext(Context* previous,
3796 JSObject* extension,
3797 bool is_catch_context) {
3798 Object* result;
Steve Block44f0eee2011-05-26 01:26:41 +01003799 { MaybeObject* maybe_result = AllocateFixedArray(Context::MIN_CONTEXT_SLOTS);
John Reck59135872010-11-02 12:39:01 -07003800 if (!maybe_result->ToObject(&result)) return maybe_result;
3801 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003802 Context* context = reinterpret_cast<Context*>(result);
Steve Block44f0eee2011-05-26 01:26:41 +01003803 context->set_map(is_catch_context ? catch_context_map() :
3804 context_map());
Steve Blocka7e24c12009-10-30 11:49:00 +00003805 context->set_closure(previous->closure());
3806 context->set_fcontext(previous->fcontext());
3807 context->set_previous(previous);
3808 context->set_extension(extension);
3809 context->set_global(previous->global());
3810 ASSERT(!context->IsGlobalContext());
3811 ASSERT(!context->is_function_context());
3812 ASSERT(result->IsContext());
3813 return result;
3814}
3815
3816
John Reck59135872010-11-02 12:39:01 -07003817MaybeObject* Heap::AllocateStruct(InstanceType type) {
Steve Blocka7e24c12009-10-30 11:49:00 +00003818 Map* map;
3819 switch (type) {
Steve Block44f0eee2011-05-26 01:26:41 +01003820#define MAKE_CASE(NAME, Name, name) \
3821 case NAME##_TYPE: map = name##_map(); break;
Steve Blocka7e24c12009-10-30 11:49:00 +00003822STRUCT_LIST(MAKE_CASE)
3823#undef MAKE_CASE
3824 default:
3825 UNREACHABLE();
3826 return Failure::InternalError();
3827 }
3828 int size = map->instance_size();
3829 AllocationSpace space =
3830 (size > MaxObjectSizeInPagedSpace()) ? LO_SPACE : OLD_POINTER_SPACE;
John Reck59135872010-11-02 12:39:01 -07003831 Object* result;
Steve Block44f0eee2011-05-26 01:26:41 +01003832 { MaybeObject* maybe_result = Allocate(map, space);
John Reck59135872010-11-02 12:39:01 -07003833 if (!maybe_result->ToObject(&result)) return maybe_result;
3834 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003835 Struct::cast(result)->InitializeBody(size);
3836 return result;
3837}
3838
3839
3840bool Heap::IdleNotification() {
3841 static const int kIdlesBeforeScavenge = 4;
3842 static const int kIdlesBeforeMarkSweep = 7;
3843 static const int kIdlesBeforeMarkCompact = 8;
Ben Murdochdb5a90a2011-01-06 18:27:03 +00003844 static const int kMaxIdleCount = kIdlesBeforeMarkCompact + 1;
Ben Murdoche0cee9b2011-05-25 10:26:03 +01003845 static const unsigned int kGCsBetweenCleanup = 4;
Steve Block44f0eee2011-05-26 01:26:41 +01003846
3847 if (!last_idle_notification_gc_count_init_) {
3848 last_idle_notification_gc_count_ = gc_count_;
3849 last_idle_notification_gc_count_init_ = true;
3850 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003851
Steve Block6ded16b2010-05-10 14:33:55 +01003852 bool uncommit = true;
Steve Blocka7e24c12009-10-30 11:49:00 +00003853 bool finished = false;
3854
Ben Murdochdb5a90a2011-01-06 18:27:03 +00003855 // Reset the number of idle notifications received when a number of
3856 // GCs have taken place. This allows another round of cleanup based
3857 // on idle notifications if enough work has been carried out to
3858 // provoke a number of garbage collections.
Steve Block44f0eee2011-05-26 01:26:41 +01003859 if (gc_count_ - last_idle_notification_gc_count_ < kGCsBetweenCleanup) {
3860 number_idle_notifications_ =
3861 Min(number_idle_notifications_ + 1, kMaxIdleCount);
Steve Blocka7e24c12009-10-30 11:49:00 +00003862 } else {
Steve Block44f0eee2011-05-26 01:26:41 +01003863 number_idle_notifications_ = 0;
3864 last_idle_notification_gc_count_ = gc_count_;
Steve Blocka7e24c12009-10-30 11:49:00 +00003865 }
3866
Steve Block44f0eee2011-05-26 01:26:41 +01003867 if (number_idle_notifications_ == kIdlesBeforeScavenge) {
Steve Block6ded16b2010-05-10 14:33:55 +01003868 if (contexts_disposed_ > 0) {
Steve Block44f0eee2011-05-26 01:26:41 +01003869 HistogramTimerScope scope(isolate_->counters()->gc_context());
Steve Block6ded16b2010-05-10 14:33:55 +01003870 CollectAllGarbage(false);
3871 } else {
Ben Murdochf87a2032010-10-22 12:50:53 +01003872 CollectGarbage(NEW_SPACE);
Steve Block6ded16b2010-05-10 14:33:55 +01003873 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003874 new_space_.Shrink();
Steve Block44f0eee2011-05-26 01:26:41 +01003875 last_idle_notification_gc_count_ = gc_count_;
3876 } else if (number_idle_notifications_ == kIdlesBeforeMarkSweep) {
Steve Blockd0582a62009-12-15 09:54:21 +00003877 // Before doing the mark-sweep collections we clear the
3878 // compilation cache to avoid hanging on to source code and
3879 // generated code for cached functions.
Steve Block44f0eee2011-05-26 01:26:41 +01003880 isolate_->compilation_cache()->Clear();
Steve Blockd0582a62009-12-15 09:54:21 +00003881
Steve Blocka7e24c12009-10-30 11:49:00 +00003882 CollectAllGarbage(false);
3883 new_space_.Shrink();
Steve Block44f0eee2011-05-26 01:26:41 +01003884 last_idle_notification_gc_count_ = gc_count_;
Steve Blocka7e24c12009-10-30 11:49:00 +00003885
Steve Block44f0eee2011-05-26 01:26:41 +01003886 } else if (number_idle_notifications_ == kIdlesBeforeMarkCompact) {
Steve Blocka7e24c12009-10-30 11:49:00 +00003887 CollectAllGarbage(true);
3888 new_space_.Shrink();
Steve Block44f0eee2011-05-26 01:26:41 +01003889 last_idle_notification_gc_count_ = gc_count_;
3890 number_idle_notifications_ = 0;
Steve Blocka7e24c12009-10-30 11:49:00 +00003891 finished = true;
Steve Block6ded16b2010-05-10 14:33:55 +01003892 } else if (contexts_disposed_ > 0) {
3893 if (FLAG_expose_gc) {
3894 contexts_disposed_ = 0;
3895 } else {
Steve Block44f0eee2011-05-26 01:26:41 +01003896 HistogramTimerScope scope(isolate_->counters()->gc_context());
Steve Block6ded16b2010-05-10 14:33:55 +01003897 CollectAllGarbage(false);
Steve Block44f0eee2011-05-26 01:26:41 +01003898 last_idle_notification_gc_count_ = gc_count_;
Steve Block6ded16b2010-05-10 14:33:55 +01003899 }
3900 // If this is the first idle notification, we reset the
3901 // notification count to avoid letting idle notifications for
3902 // context disposal garbage collections start a potentially too
3903 // aggressive idle GC cycle.
Steve Block44f0eee2011-05-26 01:26:41 +01003904 if (number_idle_notifications_ <= 1) {
3905 number_idle_notifications_ = 0;
Steve Block6ded16b2010-05-10 14:33:55 +01003906 uncommit = false;
3907 }
Steve Block44f0eee2011-05-26 01:26:41 +01003908 } else if (number_idle_notifications_ > kIdlesBeforeMarkCompact) {
Ben Murdochdb5a90a2011-01-06 18:27:03 +00003909 // If we have received more than kIdlesBeforeMarkCompact idle
3910 // notifications we do not perform any cleanup because we don't
3911 // expect to gain much by doing so.
3912 finished = true;
Steve Blocka7e24c12009-10-30 11:49:00 +00003913 }
3914
Steve Block6ded16b2010-05-10 14:33:55 +01003915 // Make sure that we have no pending context disposals and
3916 // conditionally uncommit from space.
3917 ASSERT(contexts_disposed_ == 0);
Steve Block44f0eee2011-05-26 01:26:41 +01003918 if (uncommit) UncommitFromSpace();
Steve Blocka7e24c12009-10-30 11:49:00 +00003919 return finished;
3920}
3921
3922
3923#ifdef DEBUG
3924
3925void Heap::Print() {
3926 if (!HasBeenSetup()) return;
Steve Block44f0eee2011-05-26 01:26:41 +01003927 isolate()->PrintStack();
Steve Blocka7e24c12009-10-30 11:49:00 +00003928 AllSpaces spaces;
Leon Clarked91b9f72010-01-27 17:25:45 +00003929 for (Space* space = spaces.next(); space != NULL; space = spaces.next())
3930 space->Print();
Steve Blocka7e24c12009-10-30 11:49:00 +00003931}
3932
3933
3934void Heap::ReportCodeStatistics(const char* title) {
3935 PrintF(">>>>>> Code Stats (%s) >>>>>>\n", title);
3936 PagedSpace::ResetCodeStatistics();
3937 // We do not look for code in new space, map space, or old space. If code
3938 // somehow ends up in those spaces, we would miss it here.
3939 code_space_->CollectCodeStatistics();
3940 lo_space_->CollectCodeStatistics();
3941 PagedSpace::ReportCodeStatistics();
3942}
3943
3944
3945// This function expects that NewSpace's allocated objects histogram is
3946// populated (via a call to CollectStatistics or else as a side effect of a
3947// just-completed scavenge collection).
3948void Heap::ReportHeapStatistics(const char* title) {
3949 USE(title);
3950 PrintF(">>>>>> =============== %s (%d) =============== >>>>>>\n",
3951 title, gc_count_);
3952 PrintF("mark-compact GC : %d\n", mc_count_);
Ben Murdochf87a2032010-10-22 12:50:53 +01003953 PrintF("old_gen_promotion_limit_ %" V8_PTR_PREFIX "d\n",
3954 old_gen_promotion_limit_);
3955 PrintF("old_gen_allocation_limit_ %" V8_PTR_PREFIX "d\n",
3956 old_gen_allocation_limit_);
Steve Blocka7e24c12009-10-30 11:49:00 +00003957
3958 PrintF("\n");
3959 PrintF("Number of handles : %d\n", HandleScope::NumberOfHandles());
Steve Block44f0eee2011-05-26 01:26:41 +01003960 isolate_->global_handles()->PrintStats();
Steve Blocka7e24c12009-10-30 11:49:00 +00003961 PrintF("\n");
3962
3963 PrintF("Heap statistics : ");
Steve Block44f0eee2011-05-26 01:26:41 +01003964 isolate_->memory_allocator()->ReportStatistics();
Steve Blocka7e24c12009-10-30 11:49:00 +00003965 PrintF("To space : ");
3966 new_space_.ReportStatistics();
3967 PrintF("Old pointer space : ");
3968 old_pointer_space_->ReportStatistics();
3969 PrintF("Old data space : ");
3970 old_data_space_->ReportStatistics();
3971 PrintF("Code space : ");
3972 code_space_->ReportStatistics();
3973 PrintF("Map space : ");
3974 map_space_->ReportStatistics();
3975 PrintF("Cell space : ");
3976 cell_space_->ReportStatistics();
3977 PrintF("Large object space : ");
3978 lo_space_->ReportStatistics();
3979 PrintF(">>>>>> ========================================= >>>>>>\n");
3980}
3981
3982#endif // DEBUG
3983
3984bool Heap::Contains(HeapObject* value) {
3985 return Contains(value->address());
3986}
3987
3988
3989bool Heap::Contains(Address addr) {
3990 if (OS::IsOutsideAllocatedSpace(addr)) return false;
3991 return HasBeenSetup() &&
3992 (new_space_.ToSpaceContains(addr) ||
3993 old_pointer_space_->Contains(addr) ||
3994 old_data_space_->Contains(addr) ||
3995 code_space_->Contains(addr) ||
3996 map_space_->Contains(addr) ||
3997 cell_space_->Contains(addr) ||
3998 lo_space_->SlowContains(addr));
3999}
4000
4001
4002bool Heap::InSpace(HeapObject* value, AllocationSpace space) {
4003 return InSpace(value->address(), space);
4004}
4005
4006
4007bool Heap::InSpace(Address addr, AllocationSpace space) {
4008 if (OS::IsOutsideAllocatedSpace(addr)) return false;
4009 if (!HasBeenSetup()) return false;
4010
4011 switch (space) {
4012 case NEW_SPACE:
4013 return new_space_.ToSpaceContains(addr);
4014 case OLD_POINTER_SPACE:
4015 return old_pointer_space_->Contains(addr);
4016 case OLD_DATA_SPACE:
4017 return old_data_space_->Contains(addr);
4018 case CODE_SPACE:
4019 return code_space_->Contains(addr);
4020 case MAP_SPACE:
4021 return map_space_->Contains(addr);
4022 case CELL_SPACE:
4023 return cell_space_->Contains(addr);
4024 case LO_SPACE:
4025 return lo_space_->SlowContains(addr);
4026 }
4027
4028 return false;
4029}
4030
4031
4032#ifdef DEBUG
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004033static void DummyScavengePointer(HeapObject** p) {
4034}
4035
4036
4037static void VerifyPointersUnderWatermark(
4038 PagedSpace* space,
4039 DirtyRegionCallback visit_dirty_region) {
4040 PageIterator it(space, PageIterator::PAGES_IN_USE);
4041
4042 while (it.has_next()) {
4043 Page* page = it.next();
4044 Address start = page->ObjectAreaStart();
4045 Address end = page->AllocationWatermark();
4046
Steve Block44f0eee2011-05-26 01:26:41 +01004047 HEAP->IterateDirtyRegions(Page::kAllRegionsDirtyMarks,
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004048 start,
4049 end,
4050 visit_dirty_region,
4051 &DummyScavengePointer);
4052 }
4053}
4054
4055
4056static void VerifyPointersUnderWatermark(LargeObjectSpace* space) {
4057 LargeObjectIterator it(space);
4058 for (HeapObject* object = it.next(); object != NULL; object = it.next()) {
4059 if (object->IsFixedArray()) {
4060 Address slot_address = object->address();
4061 Address end = object->address() + object->Size();
4062
4063 while (slot_address < end) {
4064 HeapObject** slot = reinterpret_cast<HeapObject**>(slot_address);
4065 // When we are not in GC the Heap::InNewSpace() predicate
4066 // checks that pointers which satisfy predicate point into
4067 // the active semispace.
Steve Block44f0eee2011-05-26 01:26:41 +01004068 HEAP->InNewSpace(*slot);
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004069 slot_address += kPointerSize;
4070 }
4071 }
4072 }
4073}
4074
4075
Steve Blocka7e24c12009-10-30 11:49:00 +00004076void Heap::Verify() {
4077 ASSERT(HasBeenSetup());
4078
4079 VerifyPointersVisitor visitor;
Steve Blockd0582a62009-12-15 09:54:21 +00004080 IterateRoots(&visitor, VISIT_ONLY_STRONG);
Steve Blocka7e24c12009-10-30 11:49:00 +00004081
4082 new_space_.Verify();
4083
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004084 VerifyPointersAndDirtyRegionsVisitor dirty_regions_visitor;
4085 old_pointer_space_->Verify(&dirty_regions_visitor);
4086 map_space_->Verify(&dirty_regions_visitor);
Steve Blocka7e24c12009-10-30 11:49:00 +00004087
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004088 VerifyPointersUnderWatermark(old_pointer_space_,
4089 &IteratePointersInDirtyRegion);
4090 VerifyPointersUnderWatermark(map_space_,
4091 &IteratePointersInDirtyMapsRegion);
4092 VerifyPointersUnderWatermark(lo_space_);
4093
4094 VerifyPageWatermarkValidity(old_pointer_space_, ALL_INVALID);
4095 VerifyPageWatermarkValidity(map_space_, ALL_INVALID);
4096
4097 VerifyPointersVisitor no_dirty_regions_visitor;
4098 old_data_space_->Verify(&no_dirty_regions_visitor);
4099 code_space_->Verify(&no_dirty_regions_visitor);
4100 cell_space_->Verify(&no_dirty_regions_visitor);
Steve Blocka7e24c12009-10-30 11:49:00 +00004101
4102 lo_space_->Verify();
4103}
4104#endif // DEBUG
4105
4106
John Reck59135872010-11-02 12:39:01 -07004107MaybeObject* Heap::LookupSymbol(Vector<const char> string) {
Steve Blocka7e24c12009-10-30 11:49:00 +00004108 Object* symbol = NULL;
John Reck59135872010-11-02 12:39:01 -07004109 Object* new_table;
4110 { MaybeObject* maybe_new_table =
4111 symbol_table()->LookupSymbol(string, &symbol);
4112 if (!maybe_new_table->ToObject(&new_table)) return maybe_new_table;
4113 }
Steve Blocka7e24c12009-10-30 11:49:00 +00004114 // Can't use set_symbol_table because SymbolTable::cast knows that
4115 // SymbolTable is a singleton and checks for identity.
4116 roots_[kSymbolTableRootIndex] = new_table;
4117 ASSERT(symbol != NULL);
4118 return symbol;
4119}
4120
4121
Steve Block9fac8402011-05-12 15:51:54 +01004122MaybeObject* Heap::LookupAsciiSymbol(Vector<const char> string) {
4123 Object* symbol = NULL;
4124 Object* new_table;
4125 { MaybeObject* maybe_new_table =
4126 symbol_table()->LookupAsciiSymbol(string, &symbol);
4127 if (!maybe_new_table->ToObject(&new_table)) return maybe_new_table;
4128 }
4129 // Can't use set_symbol_table because SymbolTable::cast knows that
4130 // SymbolTable is a singleton and checks for identity.
4131 roots_[kSymbolTableRootIndex] = new_table;
4132 ASSERT(symbol != NULL);
4133 return symbol;
4134}
4135
4136
4137MaybeObject* Heap::LookupTwoByteSymbol(Vector<const uc16> string) {
4138 Object* symbol = NULL;
4139 Object* new_table;
4140 { MaybeObject* maybe_new_table =
4141 symbol_table()->LookupTwoByteSymbol(string, &symbol);
4142 if (!maybe_new_table->ToObject(&new_table)) return maybe_new_table;
4143 }
4144 // Can't use set_symbol_table because SymbolTable::cast knows that
4145 // SymbolTable is a singleton and checks for identity.
4146 roots_[kSymbolTableRootIndex] = new_table;
4147 ASSERT(symbol != NULL);
4148 return symbol;
4149}
4150
4151
John Reck59135872010-11-02 12:39:01 -07004152MaybeObject* Heap::LookupSymbol(String* string) {
Steve Blocka7e24c12009-10-30 11:49:00 +00004153 if (string->IsSymbol()) return string;
4154 Object* symbol = NULL;
John Reck59135872010-11-02 12:39:01 -07004155 Object* new_table;
4156 { MaybeObject* maybe_new_table =
4157 symbol_table()->LookupString(string, &symbol);
4158 if (!maybe_new_table->ToObject(&new_table)) return maybe_new_table;
4159 }
Steve Blocka7e24c12009-10-30 11:49:00 +00004160 // Can't use set_symbol_table because SymbolTable::cast knows that
4161 // SymbolTable is a singleton and checks for identity.
4162 roots_[kSymbolTableRootIndex] = new_table;
4163 ASSERT(symbol != NULL);
4164 return symbol;
4165}
4166
4167
4168bool Heap::LookupSymbolIfExists(String* string, String** symbol) {
4169 if (string->IsSymbol()) {
4170 *symbol = string;
4171 return true;
4172 }
4173 return symbol_table()->LookupSymbolIfExists(string, symbol);
4174}
4175
4176
4177#ifdef DEBUG
4178void Heap::ZapFromSpace() {
Steve Block1e0659c2011-05-24 12:43:12 +01004179 ASSERT(reinterpret_cast<Object*>(kFromSpaceZapValue)->IsFailure());
Steve Blocka7e24c12009-10-30 11:49:00 +00004180 for (Address a = new_space_.FromSpaceLow();
4181 a < new_space_.FromSpaceHigh();
4182 a += kPointerSize) {
4183 Memory::Address_at(a) = kFromSpaceZapValue;
4184 }
4185}
4186#endif // DEBUG
4187
4188
Steve Block44f0eee2011-05-26 01:26:41 +01004189bool Heap::IteratePointersInDirtyRegion(Heap* heap,
4190 Address start,
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004191 Address end,
4192 ObjectSlotCallback copy_object_func) {
4193 Address slot_address = start;
4194 bool pointers_to_new_space_found = false;
Steve Blocka7e24c12009-10-30 11:49:00 +00004195
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004196 while (slot_address < end) {
4197 Object** slot = reinterpret_cast<Object**>(slot_address);
Steve Block44f0eee2011-05-26 01:26:41 +01004198 if (heap->InNewSpace(*slot)) {
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004199 ASSERT((*slot)->IsHeapObject());
4200 copy_object_func(reinterpret_cast<HeapObject**>(slot));
Steve Block44f0eee2011-05-26 01:26:41 +01004201 if (heap->InNewSpace(*slot)) {
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004202 ASSERT((*slot)->IsHeapObject());
4203 pointers_to_new_space_found = true;
Steve Blocka7e24c12009-10-30 11:49:00 +00004204 }
Steve Blocka7e24c12009-10-30 11:49:00 +00004205 }
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004206 slot_address += kPointerSize;
Steve Blocka7e24c12009-10-30 11:49:00 +00004207 }
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004208 return pointers_to_new_space_found;
Steve Blocka7e24c12009-10-30 11:49:00 +00004209}
4210
4211
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004212// Compute start address of the first map following given addr.
4213static inline Address MapStartAlign(Address addr) {
4214 Address page = Page::FromAddress(addr)->ObjectAreaStart();
4215 return page + (((addr - page) + (Map::kSize - 1)) / Map::kSize * Map::kSize);
4216}
Steve Blocka7e24c12009-10-30 11:49:00 +00004217
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004218
4219// Compute end address of the first map preceding given addr.
4220static inline Address MapEndAlign(Address addr) {
4221 Address page = Page::FromAllocationTop(addr)->ObjectAreaStart();
4222 return page + ((addr - page) / Map::kSize * Map::kSize);
4223}
4224
4225
4226static bool IteratePointersInDirtyMaps(Address start,
4227 Address end,
4228 ObjectSlotCallback copy_object_func) {
4229 ASSERT(MapStartAlign(start) == start);
4230 ASSERT(MapEndAlign(end) == end);
4231
4232 Address map_address = start;
4233 bool pointers_to_new_space_found = false;
4234
Steve Block44f0eee2011-05-26 01:26:41 +01004235 Heap* heap = HEAP;
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004236 while (map_address < end) {
Steve Block44f0eee2011-05-26 01:26:41 +01004237 ASSERT(!heap->InNewSpace(Memory::Object_at(map_address)));
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004238 ASSERT(Memory::Object_at(map_address)->IsMap());
4239
4240 Address pointer_fields_start = map_address + Map::kPointerFieldsBeginOffset;
4241 Address pointer_fields_end = map_address + Map::kPointerFieldsEndOffset;
4242
Steve Block44f0eee2011-05-26 01:26:41 +01004243 if (Heap::IteratePointersInDirtyRegion(heap,
4244 pointer_fields_start,
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004245 pointer_fields_end,
4246 copy_object_func)) {
4247 pointers_to_new_space_found = true;
4248 }
4249
4250 map_address += Map::kSize;
4251 }
4252
4253 return pointers_to_new_space_found;
4254}
4255
4256
4257bool Heap::IteratePointersInDirtyMapsRegion(
Steve Block44f0eee2011-05-26 01:26:41 +01004258 Heap* heap,
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004259 Address start,
4260 Address end,
4261 ObjectSlotCallback copy_object_func) {
4262 Address map_aligned_start = MapStartAlign(start);
4263 Address map_aligned_end = MapEndAlign(end);
4264
4265 bool contains_pointers_to_new_space = false;
4266
4267 if (map_aligned_start != start) {
4268 Address prev_map = map_aligned_start - Map::kSize;
4269 ASSERT(Memory::Object_at(prev_map)->IsMap());
4270
4271 Address pointer_fields_start =
4272 Max(start, prev_map + Map::kPointerFieldsBeginOffset);
4273
4274 Address pointer_fields_end =
Ben Murdoch3bec4d22010-07-22 14:51:16 +01004275 Min(prev_map + Map::kPointerFieldsEndOffset, end);
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004276
4277 contains_pointers_to_new_space =
Steve Block44f0eee2011-05-26 01:26:41 +01004278 IteratePointersInDirtyRegion(heap,
4279 pointer_fields_start,
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004280 pointer_fields_end,
4281 copy_object_func)
4282 || contains_pointers_to_new_space;
4283 }
4284
4285 contains_pointers_to_new_space =
4286 IteratePointersInDirtyMaps(map_aligned_start,
4287 map_aligned_end,
4288 copy_object_func)
4289 || contains_pointers_to_new_space;
4290
4291 if (map_aligned_end != end) {
4292 ASSERT(Memory::Object_at(map_aligned_end)->IsMap());
4293
Ben Murdoch3bec4d22010-07-22 14:51:16 +01004294 Address pointer_fields_start =
4295 map_aligned_end + Map::kPointerFieldsBeginOffset;
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004296
4297 Address pointer_fields_end =
Ben Murdoch3bec4d22010-07-22 14:51:16 +01004298 Min(end, map_aligned_end + Map::kPointerFieldsEndOffset);
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004299
4300 contains_pointers_to_new_space =
Steve Block44f0eee2011-05-26 01:26:41 +01004301 IteratePointersInDirtyRegion(heap,
4302 pointer_fields_start,
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004303 pointer_fields_end,
4304 copy_object_func)
4305 || contains_pointers_to_new_space;
4306 }
4307
4308 return contains_pointers_to_new_space;
4309}
4310
4311
Ben Murdoch3bec4d22010-07-22 14:51:16 +01004312void Heap::IterateAndMarkPointersToFromSpace(Address start,
4313 Address end,
4314 ObjectSlotCallback callback) {
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004315 Address slot_address = start;
4316 Page* page = Page::FromAddress(start);
4317
4318 uint32_t marks = page->GetRegionMarks();
4319
4320 while (slot_address < end) {
4321 Object** slot = reinterpret_cast<Object**>(slot_address);
Steve Block44f0eee2011-05-26 01:26:41 +01004322 if (InFromSpace(*slot)) {
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004323 ASSERT((*slot)->IsHeapObject());
4324 callback(reinterpret_cast<HeapObject**>(slot));
Steve Block44f0eee2011-05-26 01:26:41 +01004325 if (InNewSpace(*slot)) {
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004326 ASSERT((*slot)->IsHeapObject());
4327 marks |= page->GetRegionMaskForAddress(slot_address);
4328 }
4329 }
4330 slot_address += kPointerSize;
4331 }
4332
4333 page->SetRegionMarks(marks);
4334}
4335
4336
4337uint32_t Heap::IterateDirtyRegions(
4338 uint32_t marks,
4339 Address area_start,
4340 Address area_end,
4341 DirtyRegionCallback visit_dirty_region,
4342 ObjectSlotCallback copy_object_func) {
4343 uint32_t newmarks = 0;
4344 uint32_t mask = 1;
4345
4346 if (area_start >= area_end) {
4347 return newmarks;
4348 }
4349
4350 Address region_start = area_start;
4351
4352 // area_start does not necessarily coincide with start of the first region.
4353 // Thus to calculate the beginning of the next region we have to align
4354 // area_start by Page::kRegionSize.
4355 Address second_region =
4356 reinterpret_cast<Address>(
4357 reinterpret_cast<intptr_t>(area_start + Page::kRegionSize) &
4358 ~Page::kRegionAlignmentMask);
4359
4360 // Next region might be beyond area_end.
4361 Address region_end = Min(second_region, area_end);
4362
4363 if (marks & mask) {
Steve Block44f0eee2011-05-26 01:26:41 +01004364 if (visit_dirty_region(this, region_start, region_end, copy_object_func)) {
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004365 newmarks |= mask;
4366 }
4367 }
4368 mask <<= 1;
4369
4370 // Iterate subsequent regions which fully lay inside [area_start, area_end[.
4371 region_start = region_end;
4372 region_end = region_start + Page::kRegionSize;
4373
4374 while (region_end <= area_end) {
4375 if (marks & mask) {
Steve Block44f0eee2011-05-26 01:26:41 +01004376 if (visit_dirty_region(this,
4377 region_start,
4378 region_end,
4379 copy_object_func)) {
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004380 newmarks |= mask;
4381 }
4382 }
4383
4384 region_start = region_end;
4385 region_end = region_start + Page::kRegionSize;
4386
4387 mask <<= 1;
4388 }
4389
4390 if (region_start != area_end) {
4391 // A small piece of area left uniterated because area_end does not coincide
4392 // with region end. Check whether region covering last part of area is
4393 // dirty.
4394 if (marks & mask) {
Steve Block44f0eee2011-05-26 01:26:41 +01004395 if (visit_dirty_region(this, region_start, area_end, copy_object_func)) {
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004396 newmarks |= mask;
4397 }
4398 }
4399 }
4400
4401 return newmarks;
4402}
4403
4404
4405
4406void Heap::IterateDirtyRegions(
4407 PagedSpace* space,
4408 DirtyRegionCallback visit_dirty_region,
4409 ObjectSlotCallback copy_object_func,
4410 ExpectedPageWatermarkState expected_page_watermark_state) {
Steve Blocka7e24c12009-10-30 11:49:00 +00004411
4412 PageIterator it(space, PageIterator::PAGES_IN_USE);
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004413
Steve Blocka7e24c12009-10-30 11:49:00 +00004414 while (it.has_next()) {
4415 Page* page = it.next();
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004416 uint32_t marks = page->GetRegionMarks();
4417
4418 if (marks != Page::kAllRegionsCleanMarks) {
4419 Address start = page->ObjectAreaStart();
4420
4421 // Do not try to visit pointers beyond page allocation watermark.
4422 // Page can contain garbage pointers there.
4423 Address end;
4424
4425 if ((expected_page_watermark_state == WATERMARK_SHOULD_BE_VALID) ||
4426 page->IsWatermarkValid()) {
4427 end = page->AllocationWatermark();
4428 } else {
4429 end = page->CachedAllocationWatermark();
4430 }
4431
4432 ASSERT(space == old_pointer_space_ ||
4433 (space == map_space_ &&
4434 ((page->ObjectAreaStart() - end) % Map::kSize == 0)));
4435
4436 page->SetRegionMarks(IterateDirtyRegions(marks,
4437 start,
4438 end,
4439 visit_dirty_region,
4440 copy_object_func));
Steve Blocka7e24c12009-10-30 11:49:00 +00004441 }
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004442
4443 // Mark page watermark as invalid to maintain watermark validity invariant.
4444 // See Page::FlipMeaningOfInvalidatedWatermarkFlag() for details.
4445 page->InvalidateWatermark(true);
Steve Blocka7e24c12009-10-30 11:49:00 +00004446 }
4447}
4448
4449
Steve Blockd0582a62009-12-15 09:54:21 +00004450void Heap::IterateRoots(ObjectVisitor* v, VisitMode mode) {
4451 IterateStrongRoots(v, mode);
Leon Clarked91b9f72010-01-27 17:25:45 +00004452 IterateWeakRoots(v, mode);
4453}
4454
4455
4456void Heap::IterateWeakRoots(ObjectVisitor* v, VisitMode mode) {
Steve Blocka7e24c12009-10-30 11:49:00 +00004457 v->VisitPointer(reinterpret_cast<Object**>(&roots_[kSymbolTableRootIndex]));
Steve Blockd0582a62009-12-15 09:54:21 +00004458 v->Synchronize("symbol_table");
Leon Clarkee46be812010-01-19 14:06:41 +00004459 if (mode != VISIT_ALL_IN_SCAVENGE) {
4460 // Scavenge collections have special processing for this.
Steve Block44f0eee2011-05-26 01:26:41 +01004461 external_string_table_.Iterate(v);
Leon Clarkee46be812010-01-19 14:06:41 +00004462 }
4463 v->Synchronize("external_string_table");
Steve Blocka7e24c12009-10-30 11:49:00 +00004464}
4465
4466
Steve Blockd0582a62009-12-15 09:54:21 +00004467void Heap::IterateStrongRoots(ObjectVisitor* v, VisitMode mode) {
Steve Blocka7e24c12009-10-30 11:49:00 +00004468 v->VisitPointers(&roots_[0], &roots_[kStrongRootListLength]);
Steve Blockd0582a62009-12-15 09:54:21 +00004469 v->Synchronize("strong_root_list");
Steve Blocka7e24c12009-10-30 11:49:00 +00004470
Iain Merrick75681382010-08-19 15:07:18 +01004471 v->VisitPointer(BitCast<Object**>(&hidden_symbol_));
Steve Blockd0582a62009-12-15 09:54:21 +00004472 v->Synchronize("symbol");
Steve Blocka7e24c12009-10-30 11:49:00 +00004473
Steve Block44f0eee2011-05-26 01:26:41 +01004474 isolate_->bootstrapper()->Iterate(v);
Steve Blockd0582a62009-12-15 09:54:21 +00004475 v->Synchronize("bootstrapper");
Steve Block44f0eee2011-05-26 01:26:41 +01004476 isolate_->Iterate(v);
Steve Blockd0582a62009-12-15 09:54:21 +00004477 v->Synchronize("top");
Steve Blocka7e24c12009-10-30 11:49:00 +00004478 Relocatable::Iterate(v);
Steve Blockd0582a62009-12-15 09:54:21 +00004479 v->Synchronize("relocatable");
Steve Blocka7e24c12009-10-30 11:49:00 +00004480
4481#ifdef ENABLE_DEBUGGER_SUPPORT
Steve Block44f0eee2011-05-26 01:26:41 +01004482 isolate_->debug()->Iterate(v);
Steve Blocka7e24c12009-10-30 11:49:00 +00004483#endif
Steve Blockd0582a62009-12-15 09:54:21 +00004484 v->Synchronize("debug");
Steve Block44f0eee2011-05-26 01:26:41 +01004485 isolate_->compilation_cache()->Iterate(v);
Steve Blockd0582a62009-12-15 09:54:21 +00004486 v->Synchronize("compilationcache");
Steve Blocka7e24c12009-10-30 11:49:00 +00004487
4488 // Iterate over local handles in handle scopes.
Steve Block44f0eee2011-05-26 01:26:41 +01004489 isolate_->handle_scope_implementer()->Iterate(v);
Steve Blockd0582a62009-12-15 09:54:21 +00004490 v->Synchronize("handlescope");
Steve Blocka7e24c12009-10-30 11:49:00 +00004491
Leon Clarkee46be812010-01-19 14:06:41 +00004492 // Iterate over the builtin code objects and code stubs in the
4493 // heap. Note that it is not necessary to iterate over code objects
4494 // on scavenge collections.
4495 if (mode != VISIT_ALL_IN_SCAVENGE) {
Steve Block44f0eee2011-05-26 01:26:41 +01004496 isolate_->builtins()->IterateBuiltins(v);
Leon Clarkee46be812010-01-19 14:06:41 +00004497 }
Steve Blockd0582a62009-12-15 09:54:21 +00004498 v->Synchronize("builtins");
Steve Blocka7e24c12009-10-30 11:49:00 +00004499
4500 // Iterate over global handles.
Steve Blockd0582a62009-12-15 09:54:21 +00004501 if (mode == VISIT_ONLY_STRONG) {
Steve Block44f0eee2011-05-26 01:26:41 +01004502 isolate_->global_handles()->IterateStrongRoots(v);
Steve Blockd0582a62009-12-15 09:54:21 +00004503 } else {
Steve Block44f0eee2011-05-26 01:26:41 +01004504 isolate_->global_handles()->IterateAllRoots(v);
Steve Blockd0582a62009-12-15 09:54:21 +00004505 }
4506 v->Synchronize("globalhandles");
Steve Blocka7e24c12009-10-30 11:49:00 +00004507
4508 // Iterate over pointers being held by inactive threads.
Steve Block44f0eee2011-05-26 01:26:41 +01004509 isolate_->thread_manager()->Iterate(v);
Steve Blockd0582a62009-12-15 09:54:21 +00004510 v->Synchronize("threadmanager");
Leon Clarked91b9f72010-01-27 17:25:45 +00004511
4512 // Iterate over the pointers the Serialization/Deserialization code is
4513 // holding.
4514 // During garbage collection this keeps the partial snapshot cache alive.
4515 // During deserialization of the startup snapshot this creates the partial
4516 // snapshot cache and deserializes the objects it refers to. During
4517 // serialization this does nothing, since the partial snapshot cache is
4518 // empty. However the next thing we do is create the partial snapshot,
4519 // filling up the partial snapshot cache with objects it needs as we go.
4520 SerializerDeserializer::Iterate(v);
4521 // We don't do a v->Synchronize call here, because in debug mode that will
4522 // output a flag to the snapshot. However at this point the serializer and
4523 // deserializer are deliberately a little unsynchronized (see above) so the
4524 // checking of the sync flag in the snapshot would fail.
Steve Blocka7e24c12009-10-30 11:49:00 +00004525}
Steve Blocka7e24c12009-10-30 11:49:00 +00004526
4527
Steve Blocka7e24c12009-10-30 11:49:00 +00004528// TODO(1236194): Since the heap size is configurable on the command line
4529// and through the API, we should gracefully handle the case that the heap
4530// size is not big enough to fit all the initial objects.
Russell Brenner90bac252010-11-18 13:33:46 -08004531bool Heap::ConfigureHeap(int max_semispace_size,
4532 int max_old_gen_size,
4533 int max_executable_size) {
Steve Blocka7e24c12009-10-30 11:49:00 +00004534 if (HasBeenSetup()) return false;
4535
Steve Block3ce2e202009-11-05 08:53:23 +00004536 if (max_semispace_size > 0) max_semispace_size_ = max_semispace_size;
4537
4538 if (Snapshot::IsEnabled()) {
4539 // If we are using a snapshot we always reserve the default amount
4540 // of memory for each semispace because code in the snapshot has
4541 // write-barrier code that relies on the size and alignment of new
4542 // space. We therefore cannot use a larger max semispace size
4543 // than the default reserved semispace size.
4544 if (max_semispace_size_ > reserved_semispace_size_) {
4545 max_semispace_size_ = reserved_semispace_size_;
4546 }
4547 } else {
4548 // If we are not using snapshots we reserve space for the actual
4549 // max semispace size.
4550 reserved_semispace_size_ = max_semispace_size_;
4551 }
4552
4553 if (max_old_gen_size > 0) max_old_generation_size_ = max_old_gen_size;
Russell Brenner90bac252010-11-18 13:33:46 -08004554 if (max_executable_size > 0) {
4555 max_executable_size_ = RoundUp(max_executable_size, Page::kPageSize);
4556 }
4557
4558 // The max executable size must be less than or equal to the max old
4559 // generation size.
4560 if (max_executable_size_ > max_old_generation_size_) {
4561 max_executable_size_ = max_old_generation_size_;
4562 }
Steve Blocka7e24c12009-10-30 11:49:00 +00004563
4564 // The new space size must be a power of two to support single-bit testing
4565 // for containment.
Steve Block3ce2e202009-11-05 08:53:23 +00004566 max_semispace_size_ = RoundUpToPowerOf2(max_semispace_size_);
4567 reserved_semispace_size_ = RoundUpToPowerOf2(reserved_semispace_size_);
4568 initial_semispace_size_ = Min(initial_semispace_size_, max_semispace_size_);
4569 external_allocation_limit_ = 10 * max_semispace_size_;
Steve Blocka7e24c12009-10-30 11:49:00 +00004570
4571 // The old generation is paged.
Steve Block3ce2e202009-11-05 08:53:23 +00004572 max_old_generation_size_ = RoundUp(max_old_generation_size_, Page::kPageSize);
Steve Blocka7e24c12009-10-30 11:49:00 +00004573
Steve Block44f0eee2011-05-26 01:26:41 +01004574 configured_ = true;
Steve Blocka7e24c12009-10-30 11:49:00 +00004575 return true;
4576}
4577
4578
4579bool Heap::ConfigureHeapDefault() {
Russell Brenner90bac252010-11-18 13:33:46 -08004580 return ConfigureHeap(FLAG_max_new_space_size / 2 * KB,
4581 FLAG_max_old_space_size * MB,
4582 FLAG_max_executable_size * MB);
Steve Blocka7e24c12009-10-30 11:49:00 +00004583}
4584
4585
Ben Murdochbb769b22010-08-11 14:56:33 +01004586void Heap::RecordStats(HeapStats* stats, bool take_snapshot) {
Iain Merrick75681382010-08-19 15:07:18 +01004587 *stats->start_marker = HeapStats::kStartMarker;
4588 *stats->end_marker = HeapStats::kEndMarker;
Ben Murdochf87a2032010-10-22 12:50:53 +01004589 *stats->new_space_size = new_space_.SizeAsInt();
4590 *stats->new_space_capacity = static_cast<int>(new_space_.Capacity());
Steve Blockd0582a62009-12-15 09:54:21 +00004591 *stats->old_pointer_space_size = old_pointer_space_->Size();
4592 *stats->old_pointer_space_capacity = old_pointer_space_->Capacity();
4593 *stats->old_data_space_size = old_data_space_->Size();
4594 *stats->old_data_space_capacity = old_data_space_->Capacity();
4595 *stats->code_space_size = code_space_->Size();
4596 *stats->code_space_capacity = code_space_->Capacity();
4597 *stats->map_space_size = map_space_->Size();
4598 *stats->map_space_capacity = map_space_->Capacity();
4599 *stats->cell_space_size = cell_space_->Size();
4600 *stats->cell_space_capacity = cell_space_->Capacity();
4601 *stats->lo_space_size = lo_space_->Size();
Steve Block44f0eee2011-05-26 01:26:41 +01004602 isolate_->global_handles()->RecordStats(stats);
4603 *stats->memory_allocator_size = isolate()->memory_allocator()->Size();
Ben Murdochbb769b22010-08-11 14:56:33 +01004604 *stats->memory_allocator_capacity =
Steve Block44f0eee2011-05-26 01:26:41 +01004605 isolate()->memory_allocator()->Size() +
4606 isolate()->memory_allocator()->Available();
Iain Merrick75681382010-08-19 15:07:18 +01004607 *stats->os_error = OS::GetLastError();
Steve Block44f0eee2011-05-26 01:26:41 +01004608 isolate()->memory_allocator()->Available();
Ben Murdochbb769b22010-08-11 14:56:33 +01004609 if (take_snapshot) {
Ben Murdochb0fe1622011-05-05 13:52:32 +01004610 HeapIterator iterator(HeapIterator::kFilterFreeListNodes);
Ben Murdochbb769b22010-08-11 14:56:33 +01004611 for (HeapObject* obj = iterator.next();
4612 obj != NULL;
4613 obj = iterator.next()) {
Ben Murdochbb769b22010-08-11 14:56:33 +01004614 InstanceType type = obj->map()->instance_type();
4615 ASSERT(0 <= type && type <= LAST_TYPE);
4616 stats->objects_per_type[type]++;
4617 stats->size_per_type[type] += obj->Size();
4618 }
4619 }
Steve Blockd0582a62009-12-15 09:54:21 +00004620}
4621
4622
Ben Murdochf87a2032010-10-22 12:50:53 +01004623intptr_t Heap::PromotedSpaceSize() {
Steve Blocka7e24c12009-10-30 11:49:00 +00004624 return old_pointer_space_->Size()
4625 + old_data_space_->Size()
4626 + code_space_->Size()
4627 + map_space_->Size()
4628 + cell_space_->Size()
4629 + lo_space_->Size();
4630}
4631
4632
4633int Heap::PromotedExternalMemorySize() {
4634 if (amount_of_external_allocated_memory_
4635 <= amount_of_external_allocated_memory_at_last_global_gc_) return 0;
4636 return amount_of_external_allocated_memory_
4637 - amount_of_external_allocated_memory_at_last_global_gc_;
4638}
4639
Steve Block44f0eee2011-05-26 01:26:41 +01004640#ifdef DEBUG
4641
4642// Tags 0, 1, and 3 are used. Use 2 for marking visited HeapObject.
4643static const int kMarkTag = 2;
4644
4645
4646class HeapDebugUtils {
4647 public:
4648 explicit HeapDebugUtils(Heap* heap)
4649 : search_for_any_global_(false),
4650 search_target_(NULL),
4651 found_target_(false),
4652 object_stack_(20),
4653 heap_(heap) {
4654 }
4655
4656 class MarkObjectVisitor : public ObjectVisitor {
4657 public:
4658 explicit MarkObjectVisitor(HeapDebugUtils* utils) : utils_(utils) { }
4659
4660 void VisitPointers(Object** start, Object** end) {
4661 // Copy all HeapObject pointers in [start, end)
4662 for (Object** p = start; p < end; p++) {
4663 if ((*p)->IsHeapObject())
4664 utils_->MarkObjectRecursively(p);
4665 }
4666 }
4667
4668 HeapDebugUtils* utils_;
4669 };
4670
4671 void MarkObjectRecursively(Object** p) {
4672 if (!(*p)->IsHeapObject()) return;
4673
4674 HeapObject* obj = HeapObject::cast(*p);
4675
4676 Object* map = obj->map();
4677
4678 if (!map->IsHeapObject()) return; // visited before
4679
4680 if (found_target_) return; // stop if target found
4681 object_stack_.Add(obj);
4682 if ((search_for_any_global_ && obj->IsJSGlobalObject()) ||
4683 (!search_for_any_global_ && (obj == search_target_))) {
4684 found_target_ = true;
4685 return;
4686 }
4687
4688 // not visited yet
4689 Map* map_p = reinterpret_cast<Map*>(HeapObject::cast(map));
4690
4691 Address map_addr = map_p->address();
4692
4693 obj->set_map(reinterpret_cast<Map*>(map_addr + kMarkTag));
4694
4695 MarkObjectRecursively(&map);
4696
4697 MarkObjectVisitor mark_visitor(this);
4698
4699 obj->IterateBody(map_p->instance_type(), obj->SizeFromMap(map_p),
4700 &mark_visitor);
4701
4702 if (!found_target_) // don't pop if found the target
4703 object_stack_.RemoveLast();
4704 }
4705
4706
4707 class UnmarkObjectVisitor : public ObjectVisitor {
4708 public:
4709 explicit UnmarkObjectVisitor(HeapDebugUtils* utils) : utils_(utils) { }
4710
4711 void VisitPointers(Object** start, Object** end) {
4712 // Copy all HeapObject pointers in [start, end)
4713 for (Object** p = start; p < end; p++) {
4714 if ((*p)->IsHeapObject())
4715 utils_->UnmarkObjectRecursively(p);
4716 }
4717 }
4718
4719 HeapDebugUtils* utils_;
4720 };
4721
4722
4723 void UnmarkObjectRecursively(Object** p) {
4724 if (!(*p)->IsHeapObject()) return;
4725
4726 HeapObject* obj = HeapObject::cast(*p);
4727
4728 Object* map = obj->map();
4729
4730 if (map->IsHeapObject()) return; // unmarked already
4731
4732 Address map_addr = reinterpret_cast<Address>(map);
4733
4734 map_addr -= kMarkTag;
4735
4736 ASSERT_TAG_ALIGNED(map_addr);
4737
4738 HeapObject* map_p = HeapObject::FromAddress(map_addr);
4739
4740 obj->set_map(reinterpret_cast<Map*>(map_p));
4741
4742 UnmarkObjectRecursively(reinterpret_cast<Object**>(&map_p));
4743
4744 UnmarkObjectVisitor unmark_visitor(this);
4745
4746 obj->IterateBody(Map::cast(map_p)->instance_type(),
4747 obj->SizeFromMap(Map::cast(map_p)),
4748 &unmark_visitor);
4749 }
4750
4751
4752 void MarkRootObjectRecursively(Object** root) {
4753 if (search_for_any_global_) {
4754 ASSERT(search_target_ == NULL);
4755 } else {
4756 ASSERT(search_target_->IsHeapObject());
4757 }
4758 found_target_ = false;
4759 object_stack_.Clear();
4760
4761 MarkObjectRecursively(root);
4762 UnmarkObjectRecursively(root);
4763
4764 if (found_target_) {
4765 PrintF("=====================================\n");
4766 PrintF("==== Path to object ====\n");
4767 PrintF("=====================================\n\n");
4768
4769 ASSERT(!object_stack_.is_empty());
4770 for (int i = 0; i < object_stack_.length(); i++) {
4771 if (i > 0) PrintF("\n |\n |\n V\n\n");
4772 Object* obj = object_stack_[i];
4773 obj->Print();
4774 }
4775 PrintF("=====================================\n");
4776 }
4777 }
4778
4779 // Helper class for visiting HeapObjects recursively.
4780 class MarkRootVisitor: public ObjectVisitor {
4781 public:
4782 explicit MarkRootVisitor(HeapDebugUtils* utils) : utils_(utils) { }
4783
4784 void VisitPointers(Object** start, Object** end) {
4785 // Visit all HeapObject pointers in [start, end)
4786 for (Object** p = start; p < end; p++) {
4787 if ((*p)->IsHeapObject())
4788 utils_->MarkRootObjectRecursively(p);
4789 }
4790 }
4791
4792 HeapDebugUtils* utils_;
4793 };
4794
4795 bool search_for_any_global_;
4796 Object* search_target_;
4797 bool found_target_;
4798 List<Object*> object_stack_;
4799 Heap* heap_;
4800
4801 friend class Heap;
4802};
4803
4804#endif
Steve Blocka7e24c12009-10-30 11:49:00 +00004805
4806bool Heap::Setup(bool create_heap_objects) {
Steve Block44f0eee2011-05-26 01:26:41 +01004807#ifdef DEBUG
4808 debug_utils_ = new HeapDebugUtils(this);
4809#endif
4810
Steve Blocka7e24c12009-10-30 11:49:00 +00004811 // Initialize heap spaces and initial maps and objects. Whenever something
4812 // goes wrong, just return false. The caller should check the results and
4813 // call Heap::TearDown() to release allocated memory.
4814 //
4815 // If the heap is not yet configured (eg, through the API), configure it.
4816 // Configuration is based on the flags new-space-size (really the semispace
4817 // size) and old-space-size if set or the initial values of semispace_size_
4818 // and old_generation_size_ otherwise.
Steve Block44f0eee2011-05-26 01:26:41 +01004819 if (!configured_) {
Steve Blocka7e24c12009-10-30 11:49:00 +00004820 if (!ConfigureHeapDefault()) return false;
4821 }
4822
Steve Block44f0eee2011-05-26 01:26:41 +01004823 gc_initializer_mutex->Lock();
4824 static bool initialized_gc = false;
4825 if (!initialized_gc) {
Ben Murdoch8b112d22011-06-08 16:22:53 +01004826 initialized_gc = true;
4827 InitializeScavengingVisitorsTables();
4828 NewSpaceScavenger::Initialize();
4829 MarkCompactCollector::Initialize();
Steve Block44f0eee2011-05-26 01:26:41 +01004830 }
4831 gc_initializer_mutex->Unlock();
Iain Merrick75681382010-08-19 15:07:18 +01004832
Kristian Monsen80d68ea2010-09-08 11:05:35 +01004833 MarkMapPointersAsEncoded(false);
4834
Steve Blocka7e24c12009-10-30 11:49:00 +00004835 // Setup memory allocator and reserve a chunk of memory for new
Steve Block3ce2e202009-11-05 08:53:23 +00004836 // space. The chunk is double the size of the requested reserved
4837 // new space size to ensure that we can find a pair of semispaces that
4838 // are contiguous and aligned to their size.
Steve Block44f0eee2011-05-26 01:26:41 +01004839 if (!isolate_->memory_allocator()->Setup(MaxReserved(), MaxExecutableSize()))
4840 return false;
Steve Blocka7e24c12009-10-30 11:49:00 +00004841 void* chunk =
Steve Block44f0eee2011-05-26 01:26:41 +01004842 isolate_->memory_allocator()->ReserveInitialChunk(
4843 4 * reserved_semispace_size_);
Steve Blocka7e24c12009-10-30 11:49:00 +00004844 if (chunk == NULL) return false;
4845
4846 // Align the pair of semispaces to their size, which must be a power
4847 // of 2.
Steve Blocka7e24c12009-10-30 11:49:00 +00004848 Address new_space_start =
Steve Block3ce2e202009-11-05 08:53:23 +00004849 RoundUp(reinterpret_cast<byte*>(chunk), 2 * reserved_semispace_size_);
4850 if (!new_space_.Setup(new_space_start, 2 * reserved_semispace_size_)) {
4851 return false;
4852 }
Steve Blocka7e24c12009-10-30 11:49:00 +00004853
4854 // Initialize old pointer space.
4855 old_pointer_space_ =
Steve Block44f0eee2011-05-26 01:26:41 +01004856 new OldSpace(this,
4857 max_old_generation_size_,
4858 OLD_POINTER_SPACE,
4859 NOT_EXECUTABLE);
Steve Blocka7e24c12009-10-30 11:49:00 +00004860 if (old_pointer_space_ == NULL) return false;
4861 if (!old_pointer_space_->Setup(NULL, 0)) return false;
4862
4863 // Initialize old data space.
4864 old_data_space_ =
Steve Block44f0eee2011-05-26 01:26:41 +01004865 new OldSpace(this,
4866 max_old_generation_size_,
4867 OLD_DATA_SPACE,
4868 NOT_EXECUTABLE);
Steve Blocka7e24c12009-10-30 11:49:00 +00004869 if (old_data_space_ == NULL) return false;
4870 if (!old_data_space_->Setup(NULL, 0)) return false;
4871
4872 // Initialize the code space, set its maximum capacity to the old
4873 // generation size. It needs executable memory.
4874 // On 64-bit platform(s), we put all code objects in a 2 GB range of
4875 // virtual address space, so that they can call each other with near calls.
4876 if (code_range_size_ > 0) {
Steve Block44f0eee2011-05-26 01:26:41 +01004877 if (!isolate_->code_range()->Setup(code_range_size_)) {
Steve Blocka7e24c12009-10-30 11:49:00 +00004878 return false;
4879 }
4880 }
4881
4882 code_space_ =
Steve Block44f0eee2011-05-26 01:26:41 +01004883 new OldSpace(this, max_old_generation_size_, CODE_SPACE, EXECUTABLE);
Steve Blocka7e24c12009-10-30 11:49:00 +00004884 if (code_space_ == NULL) return false;
4885 if (!code_space_->Setup(NULL, 0)) return false;
4886
4887 // Initialize map space.
Steve Block44f0eee2011-05-26 01:26:41 +01004888 map_space_ = new MapSpace(this, FLAG_use_big_map_space
Leon Clarkee46be812010-01-19 14:06:41 +00004889 ? max_old_generation_size_
Leon Clarked91b9f72010-01-27 17:25:45 +00004890 : MapSpace::kMaxMapPageIndex * Page::kPageSize,
4891 FLAG_max_map_space_pages,
Leon Clarkee46be812010-01-19 14:06:41 +00004892 MAP_SPACE);
Steve Blocka7e24c12009-10-30 11:49:00 +00004893 if (map_space_ == NULL) return false;
4894 if (!map_space_->Setup(NULL, 0)) return false;
4895
4896 // Initialize global property cell space.
Steve Block44f0eee2011-05-26 01:26:41 +01004897 cell_space_ = new CellSpace(this, max_old_generation_size_, CELL_SPACE);
Steve Blocka7e24c12009-10-30 11:49:00 +00004898 if (cell_space_ == NULL) return false;
4899 if (!cell_space_->Setup(NULL, 0)) return false;
4900
4901 // The large object code space may contain code or data. We set the memory
4902 // to be non-executable here for safety, but this means we need to enable it
4903 // explicitly when allocating large code objects.
Steve Block44f0eee2011-05-26 01:26:41 +01004904 lo_space_ = new LargeObjectSpace(this, LO_SPACE);
Steve Blocka7e24c12009-10-30 11:49:00 +00004905 if (lo_space_ == NULL) return false;
4906 if (!lo_space_->Setup()) return false;
4907
4908 if (create_heap_objects) {
4909 // Create initial maps.
4910 if (!CreateInitialMaps()) return false;
4911 if (!CreateApiObjects()) return false;
4912
4913 // Create initial objects
4914 if (!CreateInitialObjects()) return false;
Ben Murdochf87a2032010-10-22 12:50:53 +01004915
4916 global_contexts_list_ = undefined_value();
Steve Blocka7e24c12009-10-30 11:49:00 +00004917 }
4918
Steve Block44f0eee2011-05-26 01:26:41 +01004919 LOG(isolate_, IntPtrTEvent("heap-capacity", Capacity()));
4920 LOG(isolate_, IntPtrTEvent("heap-available", Available()));
Steve Blocka7e24c12009-10-30 11:49:00 +00004921
Steve Block3ce2e202009-11-05 08:53:23 +00004922#ifdef ENABLE_LOGGING_AND_PROFILING
4923 // This should be called only after initial objects have been created.
Steve Block44f0eee2011-05-26 01:26:41 +01004924 isolate_->producer_heap_profile()->Setup();
Steve Block3ce2e202009-11-05 08:53:23 +00004925#endif
4926
Steve Blocka7e24c12009-10-30 11:49:00 +00004927 return true;
4928}
4929
4930
Steve Blockd0582a62009-12-15 09:54:21 +00004931void Heap::SetStackLimits() {
Steve Block44f0eee2011-05-26 01:26:41 +01004932 ASSERT(isolate_ != NULL);
4933 ASSERT(isolate_ == isolate());
Steve Blocka7e24c12009-10-30 11:49:00 +00004934 // On 64 bit machines, pointers are generally out of range of Smis. We write
4935 // something that looks like an out of range Smi to the GC.
4936
Steve Blockd0582a62009-12-15 09:54:21 +00004937 // Set up the special root array entries containing the stack limits.
4938 // These are actually addresses, but the tag makes the GC ignore it.
Steve Blocka7e24c12009-10-30 11:49:00 +00004939 roots_[kStackLimitRootIndex] =
Steve Blockd0582a62009-12-15 09:54:21 +00004940 reinterpret_cast<Object*>(
Steve Block44f0eee2011-05-26 01:26:41 +01004941 (isolate_->stack_guard()->jslimit() & ~kSmiTagMask) | kSmiTag);
Steve Blockd0582a62009-12-15 09:54:21 +00004942 roots_[kRealStackLimitRootIndex] =
4943 reinterpret_cast<Object*>(
Steve Block44f0eee2011-05-26 01:26:41 +01004944 (isolate_->stack_guard()->real_jslimit() & ~kSmiTagMask) | kSmiTag);
Steve Blocka7e24c12009-10-30 11:49:00 +00004945}
4946
4947
4948void Heap::TearDown() {
Leon Clarkef7060e22010-06-03 12:02:55 +01004949 if (FLAG_print_cumulative_gc_stat) {
4950 PrintF("\n\n");
4951 PrintF("gc_count=%d ", gc_count_);
4952 PrintF("mark_sweep_count=%d ", ms_count_);
4953 PrintF("mark_compact_count=%d ", mc_count_);
Steve Block44f0eee2011-05-26 01:26:41 +01004954 PrintF("max_gc_pause=%d ", get_max_gc_pause());
4955 PrintF("min_in_mutator=%d ", get_min_in_mutator());
Ben Murdochf87a2032010-10-22 12:50:53 +01004956 PrintF("max_alive_after_gc=%" V8_PTR_PREFIX "d ",
Steve Block44f0eee2011-05-26 01:26:41 +01004957 get_max_alive_after_gc());
Leon Clarkef7060e22010-06-03 12:02:55 +01004958 PrintF("\n\n");
4959 }
4960
Steve Block44f0eee2011-05-26 01:26:41 +01004961 isolate_->global_handles()->TearDown();
Steve Blocka7e24c12009-10-30 11:49:00 +00004962
Steve Block44f0eee2011-05-26 01:26:41 +01004963 external_string_table_.TearDown();
Leon Clarkee46be812010-01-19 14:06:41 +00004964
Steve Blocka7e24c12009-10-30 11:49:00 +00004965 new_space_.TearDown();
4966
4967 if (old_pointer_space_ != NULL) {
4968 old_pointer_space_->TearDown();
4969 delete old_pointer_space_;
4970 old_pointer_space_ = NULL;
4971 }
4972
4973 if (old_data_space_ != NULL) {
4974 old_data_space_->TearDown();
4975 delete old_data_space_;
4976 old_data_space_ = NULL;
4977 }
4978
4979 if (code_space_ != NULL) {
4980 code_space_->TearDown();
4981 delete code_space_;
4982 code_space_ = NULL;
4983 }
4984
4985 if (map_space_ != NULL) {
4986 map_space_->TearDown();
4987 delete map_space_;
4988 map_space_ = NULL;
4989 }
4990
4991 if (cell_space_ != NULL) {
4992 cell_space_->TearDown();
4993 delete cell_space_;
4994 cell_space_ = NULL;
4995 }
4996
4997 if (lo_space_ != NULL) {
4998 lo_space_->TearDown();
4999 delete lo_space_;
5000 lo_space_ = NULL;
5001 }
5002
Steve Block44f0eee2011-05-26 01:26:41 +01005003 isolate_->memory_allocator()->TearDown();
5004
5005#ifdef DEBUG
5006 delete debug_utils_;
5007 debug_utils_ = NULL;
5008#endif
Steve Blocka7e24c12009-10-30 11:49:00 +00005009}
5010
5011
5012void Heap::Shrink() {
5013 // Try to shrink all paged spaces.
5014 PagedSpaces spaces;
Leon Clarked91b9f72010-01-27 17:25:45 +00005015 for (PagedSpace* space = spaces.next(); space != NULL; space = spaces.next())
5016 space->Shrink();
Steve Blocka7e24c12009-10-30 11:49:00 +00005017}
5018
5019
5020#ifdef ENABLE_HEAP_PROTECTION
5021
5022void Heap::Protect() {
5023 if (HasBeenSetup()) {
5024 AllSpaces spaces;
Leon Clarked91b9f72010-01-27 17:25:45 +00005025 for (Space* space = spaces.next(); space != NULL; space = spaces.next())
5026 space->Protect();
Steve Blocka7e24c12009-10-30 11:49:00 +00005027 }
5028}
5029
5030
5031void Heap::Unprotect() {
5032 if (HasBeenSetup()) {
5033 AllSpaces spaces;
Leon Clarked91b9f72010-01-27 17:25:45 +00005034 for (Space* space = spaces.next(); space != NULL; space = spaces.next())
5035 space->Unprotect();
Steve Blocka7e24c12009-10-30 11:49:00 +00005036 }
5037}
5038
5039#endif
5040
5041
Steve Block6ded16b2010-05-10 14:33:55 +01005042void Heap::AddGCPrologueCallback(GCPrologueCallback callback, GCType gc_type) {
5043 ASSERT(callback != NULL);
5044 GCPrologueCallbackPair pair(callback, gc_type);
5045 ASSERT(!gc_prologue_callbacks_.Contains(pair));
5046 return gc_prologue_callbacks_.Add(pair);
5047}
5048
5049
5050void Heap::RemoveGCPrologueCallback(GCPrologueCallback callback) {
5051 ASSERT(callback != NULL);
5052 for (int i = 0; i < gc_prologue_callbacks_.length(); ++i) {
5053 if (gc_prologue_callbacks_[i].callback == callback) {
5054 gc_prologue_callbacks_.Remove(i);
5055 return;
5056 }
5057 }
5058 UNREACHABLE();
5059}
5060
5061
5062void Heap::AddGCEpilogueCallback(GCEpilogueCallback callback, GCType gc_type) {
5063 ASSERT(callback != NULL);
5064 GCEpilogueCallbackPair pair(callback, gc_type);
5065 ASSERT(!gc_epilogue_callbacks_.Contains(pair));
5066 return gc_epilogue_callbacks_.Add(pair);
5067}
5068
5069
5070void Heap::RemoveGCEpilogueCallback(GCEpilogueCallback callback) {
5071 ASSERT(callback != NULL);
5072 for (int i = 0; i < gc_epilogue_callbacks_.length(); ++i) {
5073 if (gc_epilogue_callbacks_[i].callback == callback) {
5074 gc_epilogue_callbacks_.Remove(i);
5075 return;
5076 }
5077 }
5078 UNREACHABLE();
5079}
5080
5081
Steve Blocka7e24c12009-10-30 11:49:00 +00005082#ifdef DEBUG
5083
5084class PrintHandleVisitor: public ObjectVisitor {
5085 public:
5086 void VisitPointers(Object** start, Object** end) {
5087 for (Object** p = start; p < end; p++)
Ben Murdochf87a2032010-10-22 12:50:53 +01005088 PrintF(" handle %p to %p\n",
5089 reinterpret_cast<void*>(p),
5090 reinterpret_cast<void*>(*p));
Steve Blocka7e24c12009-10-30 11:49:00 +00005091 }
5092};
5093
5094void Heap::PrintHandles() {
5095 PrintF("Handles:\n");
5096 PrintHandleVisitor v;
Steve Block44f0eee2011-05-26 01:26:41 +01005097 isolate_->handle_scope_implementer()->Iterate(&v);
Steve Blocka7e24c12009-10-30 11:49:00 +00005098}
5099
5100#endif
5101
5102
5103Space* AllSpaces::next() {
5104 switch (counter_++) {
5105 case NEW_SPACE:
Steve Block44f0eee2011-05-26 01:26:41 +01005106 return HEAP->new_space();
Steve Blocka7e24c12009-10-30 11:49:00 +00005107 case OLD_POINTER_SPACE:
Steve Block44f0eee2011-05-26 01:26:41 +01005108 return HEAP->old_pointer_space();
Steve Blocka7e24c12009-10-30 11:49:00 +00005109 case OLD_DATA_SPACE:
Steve Block44f0eee2011-05-26 01:26:41 +01005110 return HEAP->old_data_space();
Steve Blocka7e24c12009-10-30 11:49:00 +00005111 case CODE_SPACE:
Steve Block44f0eee2011-05-26 01:26:41 +01005112 return HEAP->code_space();
Steve Blocka7e24c12009-10-30 11:49:00 +00005113 case MAP_SPACE:
Steve Block44f0eee2011-05-26 01:26:41 +01005114 return HEAP->map_space();
Steve Blocka7e24c12009-10-30 11:49:00 +00005115 case CELL_SPACE:
Steve Block44f0eee2011-05-26 01:26:41 +01005116 return HEAP->cell_space();
Steve Blocka7e24c12009-10-30 11:49:00 +00005117 case LO_SPACE:
Steve Block44f0eee2011-05-26 01:26:41 +01005118 return HEAP->lo_space();
Steve Blocka7e24c12009-10-30 11:49:00 +00005119 default:
5120 return NULL;
5121 }
5122}
5123
5124
5125PagedSpace* PagedSpaces::next() {
5126 switch (counter_++) {
5127 case OLD_POINTER_SPACE:
Steve Block44f0eee2011-05-26 01:26:41 +01005128 return HEAP->old_pointer_space();
Steve Blocka7e24c12009-10-30 11:49:00 +00005129 case OLD_DATA_SPACE:
Steve Block44f0eee2011-05-26 01:26:41 +01005130 return HEAP->old_data_space();
Steve Blocka7e24c12009-10-30 11:49:00 +00005131 case CODE_SPACE:
Steve Block44f0eee2011-05-26 01:26:41 +01005132 return HEAP->code_space();
Steve Blocka7e24c12009-10-30 11:49:00 +00005133 case MAP_SPACE:
Steve Block44f0eee2011-05-26 01:26:41 +01005134 return HEAP->map_space();
Steve Blocka7e24c12009-10-30 11:49:00 +00005135 case CELL_SPACE:
Steve Block44f0eee2011-05-26 01:26:41 +01005136 return HEAP->cell_space();
Steve Blocka7e24c12009-10-30 11:49:00 +00005137 default:
5138 return NULL;
5139 }
5140}
5141
5142
5143
5144OldSpace* OldSpaces::next() {
5145 switch (counter_++) {
5146 case OLD_POINTER_SPACE:
Steve Block44f0eee2011-05-26 01:26:41 +01005147 return HEAP->old_pointer_space();
Steve Blocka7e24c12009-10-30 11:49:00 +00005148 case OLD_DATA_SPACE:
Steve Block44f0eee2011-05-26 01:26:41 +01005149 return HEAP->old_data_space();
Steve Blocka7e24c12009-10-30 11:49:00 +00005150 case CODE_SPACE:
Steve Block44f0eee2011-05-26 01:26:41 +01005151 return HEAP->code_space();
Steve Blocka7e24c12009-10-30 11:49:00 +00005152 default:
5153 return NULL;
5154 }
5155}
5156
5157
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -08005158SpaceIterator::SpaceIterator()
5159 : current_space_(FIRST_SPACE),
5160 iterator_(NULL),
5161 size_func_(NULL) {
5162}
5163
5164
5165SpaceIterator::SpaceIterator(HeapObjectCallback size_func)
5166 : current_space_(FIRST_SPACE),
5167 iterator_(NULL),
5168 size_func_(size_func) {
Steve Blocka7e24c12009-10-30 11:49:00 +00005169}
5170
5171
5172SpaceIterator::~SpaceIterator() {
5173 // Delete active iterator if any.
5174 delete iterator_;
5175}
5176
5177
5178bool SpaceIterator::has_next() {
5179 // Iterate until no more spaces.
5180 return current_space_ != LAST_SPACE;
5181}
5182
5183
5184ObjectIterator* SpaceIterator::next() {
5185 if (iterator_ != NULL) {
5186 delete iterator_;
5187 iterator_ = NULL;
5188 // Move to the next space
5189 current_space_++;
5190 if (current_space_ > LAST_SPACE) {
5191 return NULL;
5192 }
5193 }
5194
5195 // Return iterator for the new current space.
5196 return CreateIterator();
5197}
5198
5199
5200// Create an iterator for the space to iterate.
5201ObjectIterator* SpaceIterator::CreateIterator() {
5202 ASSERT(iterator_ == NULL);
5203
5204 switch (current_space_) {
5205 case NEW_SPACE:
Steve Block44f0eee2011-05-26 01:26:41 +01005206 iterator_ = new SemiSpaceIterator(HEAP->new_space(), size_func_);
Steve Blocka7e24c12009-10-30 11:49:00 +00005207 break;
5208 case OLD_POINTER_SPACE:
Steve Block44f0eee2011-05-26 01:26:41 +01005209 iterator_ = new HeapObjectIterator(HEAP->old_pointer_space(), size_func_);
Steve Blocka7e24c12009-10-30 11:49:00 +00005210 break;
5211 case OLD_DATA_SPACE:
Steve Block44f0eee2011-05-26 01:26:41 +01005212 iterator_ = new HeapObjectIterator(HEAP->old_data_space(), size_func_);
Steve Blocka7e24c12009-10-30 11:49:00 +00005213 break;
5214 case CODE_SPACE:
Steve Block44f0eee2011-05-26 01:26:41 +01005215 iterator_ = new HeapObjectIterator(HEAP->code_space(), size_func_);
Steve Blocka7e24c12009-10-30 11:49:00 +00005216 break;
5217 case MAP_SPACE:
Steve Block44f0eee2011-05-26 01:26:41 +01005218 iterator_ = new HeapObjectIterator(HEAP->map_space(), size_func_);
Steve Blocka7e24c12009-10-30 11:49:00 +00005219 break;
5220 case CELL_SPACE:
Steve Block44f0eee2011-05-26 01:26:41 +01005221 iterator_ = new HeapObjectIterator(HEAP->cell_space(), size_func_);
Steve Blocka7e24c12009-10-30 11:49:00 +00005222 break;
5223 case LO_SPACE:
Steve Block44f0eee2011-05-26 01:26:41 +01005224 iterator_ = new LargeObjectIterator(HEAP->lo_space(), size_func_);
Steve Blocka7e24c12009-10-30 11:49:00 +00005225 break;
5226 }
5227
5228 // Return the newly allocated iterator;
5229 ASSERT(iterator_ != NULL);
5230 return iterator_;
5231}
5232
5233
Ben Murdochb0fe1622011-05-05 13:52:32 +01005234class HeapObjectsFilter {
5235 public:
5236 virtual ~HeapObjectsFilter() {}
5237 virtual bool SkipObject(HeapObject* object) = 0;
5238};
5239
5240
5241class FreeListNodesFilter : public HeapObjectsFilter {
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -08005242 public:
5243 FreeListNodesFilter() {
5244 MarkFreeListNodes();
5245 }
5246
Ben Murdochb0fe1622011-05-05 13:52:32 +01005247 bool SkipObject(HeapObject* object) {
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -08005248 if (object->IsMarked()) {
5249 object->ClearMark();
5250 return true;
5251 } else {
5252 return false;
5253 }
5254 }
5255
5256 private:
5257 void MarkFreeListNodes() {
Steve Block44f0eee2011-05-26 01:26:41 +01005258 Heap* heap = HEAP;
5259 heap->old_pointer_space()->MarkFreeListNodes();
5260 heap->old_data_space()->MarkFreeListNodes();
5261 MarkCodeSpaceFreeListNodes(heap);
5262 heap->map_space()->MarkFreeListNodes();
5263 heap->cell_space()->MarkFreeListNodes();
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -08005264 }
5265
Steve Block44f0eee2011-05-26 01:26:41 +01005266 void MarkCodeSpaceFreeListNodes(Heap* heap) {
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -08005267 // For code space, using FreeListNode::IsFreeListNode is OK.
Steve Block44f0eee2011-05-26 01:26:41 +01005268 HeapObjectIterator iter(heap->code_space());
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -08005269 for (HeapObject* obj = iter.next_object();
5270 obj != NULL;
5271 obj = iter.next_object()) {
5272 if (FreeListNode::IsFreeListNode(obj)) obj->SetMark();
5273 }
5274 }
5275
5276 AssertNoAllocation no_alloc;
5277};
5278
5279
Ben Murdochb0fe1622011-05-05 13:52:32 +01005280class UnreachableObjectsFilter : public HeapObjectsFilter {
5281 public:
5282 UnreachableObjectsFilter() {
5283 MarkUnreachableObjects();
5284 }
5285
5286 bool SkipObject(HeapObject* object) {
5287 if (object->IsMarked()) {
5288 object->ClearMark();
5289 return true;
5290 } else {
5291 return false;
5292 }
5293 }
5294
5295 private:
5296 class UnmarkingVisitor : public ObjectVisitor {
5297 public:
5298 UnmarkingVisitor() : list_(10) {}
5299
5300 void VisitPointers(Object** start, Object** end) {
5301 for (Object** p = start; p < end; p++) {
5302 if (!(*p)->IsHeapObject()) continue;
5303 HeapObject* obj = HeapObject::cast(*p);
5304 if (obj->IsMarked()) {
5305 obj->ClearMark();
5306 list_.Add(obj);
5307 }
5308 }
5309 }
5310
5311 bool can_process() { return !list_.is_empty(); }
5312
5313 void ProcessNext() {
5314 HeapObject* obj = list_.RemoveLast();
5315 obj->Iterate(this);
5316 }
5317
5318 private:
5319 List<HeapObject*> list_;
5320 };
5321
5322 void MarkUnreachableObjects() {
5323 HeapIterator iterator;
5324 for (HeapObject* obj = iterator.next();
5325 obj != NULL;
5326 obj = iterator.next()) {
5327 obj->SetMark();
5328 }
5329 UnmarkingVisitor visitor;
Steve Block44f0eee2011-05-26 01:26:41 +01005330 HEAP->IterateRoots(&visitor, VISIT_ALL);
Ben Murdochb0fe1622011-05-05 13:52:32 +01005331 while (visitor.can_process())
5332 visitor.ProcessNext();
5333 }
5334
5335 AssertNoAllocation no_alloc;
5336};
5337
5338
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -08005339HeapIterator::HeapIterator()
5340 : filtering_(HeapIterator::kNoFiltering),
5341 filter_(NULL) {
5342 Init();
5343}
5344
5345
Ben Murdochb0fe1622011-05-05 13:52:32 +01005346HeapIterator::HeapIterator(HeapIterator::HeapObjectsFiltering filtering)
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -08005347 : filtering_(filtering),
5348 filter_(NULL) {
Steve Blocka7e24c12009-10-30 11:49:00 +00005349 Init();
5350}
5351
5352
5353HeapIterator::~HeapIterator() {
5354 Shutdown();
5355}
5356
5357
5358void HeapIterator::Init() {
5359 // Start the iteration.
Ben Murdochb0fe1622011-05-05 13:52:32 +01005360 space_iterator_ = filtering_ == kNoFiltering ? new SpaceIterator :
5361 new SpaceIterator(MarkCompactCollector::SizeOfMarkedObject);
5362 switch (filtering_) {
5363 case kFilterFreeListNodes:
5364 filter_ = new FreeListNodesFilter;
5365 break;
5366 case kFilterUnreachable:
5367 filter_ = new UnreachableObjectsFilter;
5368 break;
5369 default:
5370 break;
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -08005371 }
Steve Blocka7e24c12009-10-30 11:49:00 +00005372 object_iterator_ = space_iterator_->next();
5373}
5374
5375
5376void HeapIterator::Shutdown() {
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -08005377#ifdef DEBUG
Ben Murdochb0fe1622011-05-05 13:52:32 +01005378 // Assert that in filtering mode we have iterated through all
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -08005379 // objects. Otherwise, heap will be left in an inconsistent state.
Ben Murdochb0fe1622011-05-05 13:52:32 +01005380 if (filtering_ != kNoFiltering) {
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -08005381 ASSERT(object_iterator_ == NULL);
5382 }
5383#endif
Steve Blocka7e24c12009-10-30 11:49:00 +00005384 // Make sure the last iterator is deallocated.
5385 delete space_iterator_;
5386 space_iterator_ = NULL;
5387 object_iterator_ = NULL;
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -08005388 delete filter_;
5389 filter_ = NULL;
Steve Blocka7e24c12009-10-30 11:49:00 +00005390}
5391
5392
Leon Clarked91b9f72010-01-27 17:25:45 +00005393HeapObject* HeapIterator::next() {
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -08005394 if (filter_ == NULL) return NextObject();
5395
5396 HeapObject* obj = NextObject();
Ben Murdochb0fe1622011-05-05 13:52:32 +01005397 while (obj != NULL && filter_->SkipObject(obj)) obj = NextObject();
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -08005398 return obj;
5399}
5400
5401
5402HeapObject* HeapIterator::NextObject() {
Steve Blocka7e24c12009-10-30 11:49:00 +00005403 // No iterator means we are done.
Leon Clarked91b9f72010-01-27 17:25:45 +00005404 if (object_iterator_ == NULL) return NULL;
Steve Blocka7e24c12009-10-30 11:49:00 +00005405
Leon Clarked91b9f72010-01-27 17:25:45 +00005406 if (HeapObject* obj = object_iterator_->next_object()) {
Steve Blocka7e24c12009-10-30 11:49:00 +00005407 // If the current iterator has more objects we are fine.
Leon Clarked91b9f72010-01-27 17:25:45 +00005408 return obj;
Steve Blocka7e24c12009-10-30 11:49:00 +00005409 } else {
5410 // Go though the spaces looking for one that has objects.
5411 while (space_iterator_->has_next()) {
5412 object_iterator_ = space_iterator_->next();
Leon Clarked91b9f72010-01-27 17:25:45 +00005413 if (HeapObject* obj = object_iterator_->next_object()) {
5414 return obj;
Steve Blocka7e24c12009-10-30 11:49:00 +00005415 }
5416 }
5417 }
5418 // Done with the last space.
5419 object_iterator_ = NULL;
Leon Clarked91b9f72010-01-27 17:25:45 +00005420 return NULL;
Steve Blocka7e24c12009-10-30 11:49:00 +00005421}
5422
5423
5424void HeapIterator::reset() {
5425 // Restart the iterator.
5426 Shutdown();
5427 Init();
5428}
5429
5430
Ben Murdoche0cee9b2011-05-25 10:26:03 +01005431#if defined(DEBUG) || defined(LIVE_OBJECT_LIST)
Steve Blocka7e24c12009-10-30 11:49:00 +00005432
Ben Murdoche0cee9b2011-05-25 10:26:03 +01005433Object* const PathTracer::kAnyGlobalObject = reinterpret_cast<Object*>(NULL);
Steve Blocka7e24c12009-10-30 11:49:00 +00005434
Ben Murdoche0cee9b2011-05-25 10:26:03 +01005435class PathTracer::MarkVisitor: public ObjectVisitor {
Steve Blocka7e24c12009-10-30 11:49:00 +00005436 public:
Ben Murdoche0cee9b2011-05-25 10:26:03 +01005437 explicit MarkVisitor(PathTracer* tracer) : tracer_(tracer) {}
Steve Blocka7e24c12009-10-30 11:49:00 +00005438 void VisitPointers(Object** start, Object** end) {
Ben Murdoche0cee9b2011-05-25 10:26:03 +01005439 // Scan all HeapObject pointers in [start, end)
5440 for (Object** p = start; !tracer_->found() && (p < end); p++) {
Steve Blocka7e24c12009-10-30 11:49:00 +00005441 if ((*p)->IsHeapObject())
Ben Murdoche0cee9b2011-05-25 10:26:03 +01005442 tracer_->MarkRecursively(p, this);
Steve Blocka7e24c12009-10-30 11:49:00 +00005443 }
5444 }
Ben Murdoche0cee9b2011-05-25 10:26:03 +01005445
5446 private:
5447 PathTracer* tracer_;
Steve Blocka7e24c12009-10-30 11:49:00 +00005448};
5449
Steve Blocka7e24c12009-10-30 11:49:00 +00005450
Ben Murdoche0cee9b2011-05-25 10:26:03 +01005451class PathTracer::UnmarkVisitor: public ObjectVisitor {
5452 public:
5453 explicit UnmarkVisitor(PathTracer* tracer) : tracer_(tracer) {}
5454 void VisitPointers(Object** start, Object** end) {
5455 // Scan all HeapObject pointers in [start, end)
5456 for (Object** p = start; p < end; p++) {
5457 if ((*p)->IsHeapObject())
5458 tracer_->UnmarkRecursively(p, this);
5459 }
5460 }
5461
5462 private:
5463 PathTracer* tracer_;
5464};
5465
5466
5467void PathTracer::VisitPointers(Object** start, Object** end) {
5468 bool done = ((what_to_find_ == FIND_FIRST) && found_target_);
5469 // Visit all HeapObject pointers in [start, end)
5470 for (Object** p = start; !done && (p < end); p++) {
5471 if ((*p)->IsHeapObject()) {
5472 TracePathFrom(p);
5473 done = ((what_to_find_ == FIND_FIRST) && found_target_);
5474 }
5475 }
5476}
5477
5478
5479void PathTracer::Reset() {
5480 found_target_ = false;
5481 object_stack_.Clear();
5482}
5483
5484
5485void PathTracer::TracePathFrom(Object** root) {
5486 ASSERT((search_target_ == kAnyGlobalObject) ||
5487 search_target_->IsHeapObject());
5488 found_target_in_trace_ = false;
5489 object_stack_.Clear();
5490
5491 MarkVisitor mark_visitor(this);
5492 MarkRecursively(root, &mark_visitor);
5493
5494 UnmarkVisitor unmark_visitor(this);
5495 UnmarkRecursively(root, &unmark_visitor);
5496
5497 ProcessResults();
5498}
5499
5500
5501void PathTracer::MarkRecursively(Object** p, MarkVisitor* mark_visitor) {
Steve Blocka7e24c12009-10-30 11:49:00 +00005502 if (!(*p)->IsHeapObject()) return;
5503
5504 HeapObject* obj = HeapObject::cast(*p);
5505
5506 Object* map = obj->map();
5507
5508 if (!map->IsHeapObject()) return; // visited before
5509
Ben Murdoche0cee9b2011-05-25 10:26:03 +01005510 if (found_target_in_trace_) return; // stop if target found
5511 object_stack_.Add(obj);
5512 if (((search_target_ == kAnyGlobalObject) && obj->IsJSGlobalObject()) ||
5513 (obj == search_target_)) {
5514 found_target_in_trace_ = true;
5515 found_target_ = true;
Steve Blocka7e24c12009-10-30 11:49:00 +00005516 return;
5517 }
5518
Ben Murdoche0cee9b2011-05-25 10:26:03 +01005519 bool is_global_context = obj->IsGlobalContext();
5520
Steve Blocka7e24c12009-10-30 11:49:00 +00005521 // not visited yet
5522 Map* map_p = reinterpret_cast<Map*>(HeapObject::cast(map));
5523
5524 Address map_addr = map_p->address();
5525
5526 obj->set_map(reinterpret_cast<Map*>(map_addr + kMarkTag));
5527
Ben Murdoche0cee9b2011-05-25 10:26:03 +01005528 // Scan the object body.
5529 if (is_global_context && (visit_mode_ == VISIT_ONLY_STRONG)) {
5530 // This is specialized to scan Context's properly.
5531 Object** start = reinterpret_cast<Object**>(obj->address() +
5532 Context::kHeaderSize);
5533 Object** end = reinterpret_cast<Object**>(obj->address() +
5534 Context::kHeaderSize + Context::FIRST_WEAK_SLOT * kPointerSize);
5535 mark_visitor->VisitPointers(start, end);
5536 } else {
5537 obj->IterateBody(map_p->instance_type(),
5538 obj->SizeFromMap(map_p),
5539 mark_visitor);
5540 }
Steve Blocka7e24c12009-10-30 11:49:00 +00005541
Ben Murdoche0cee9b2011-05-25 10:26:03 +01005542 // Scan the map after the body because the body is a lot more interesting
5543 // when doing leak detection.
5544 MarkRecursively(&map, mark_visitor);
Steve Blocka7e24c12009-10-30 11:49:00 +00005545
Ben Murdoche0cee9b2011-05-25 10:26:03 +01005546 if (!found_target_in_trace_) // don't pop if found the target
5547 object_stack_.RemoveLast();
Steve Blocka7e24c12009-10-30 11:49:00 +00005548}
5549
5550
Ben Murdoche0cee9b2011-05-25 10:26:03 +01005551void PathTracer::UnmarkRecursively(Object** p, UnmarkVisitor* unmark_visitor) {
Steve Blocka7e24c12009-10-30 11:49:00 +00005552 if (!(*p)->IsHeapObject()) return;
5553
5554 HeapObject* obj = HeapObject::cast(*p);
5555
5556 Object* map = obj->map();
5557
5558 if (map->IsHeapObject()) return; // unmarked already
5559
5560 Address map_addr = reinterpret_cast<Address>(map);
5561
5562 map_addr -= kMarkTag;
5563
5564 ASSERT_TAG_ALIGNED(map_addr);
5565
5566 HeapObject* map_p = HeapObject::FromAddress(map_addr);
5567
5568 obj->set_map(reinterpret_cast<Map*>(map_p));
5569
Ben Murdoche0cee9b2011-05-25 10:26:03 +01005570 UnmarkRecursively(reinterpret_cast<Object**>(&map_p), unmark_visitor);
Steve Blocka7e24c12009-10-30 11:49:00 +00005571
5572 obj->IterateBody(Map::cast(map_p)->instance_type(),
5573 obj->SizeFromMap(Map::cast(map_p)),
Ben Murdoche0cee9b2011-05-25 10:26:03 +01005574 unmark_visitor);
Steve Blocka7e24c12009-10-30 11:49:00 +00005575}
5576
5577
Ben Murdoche0cee9b2011-05-25 10:26:03 +01005578void PathTracer::ProcessResults() {
5579 if (found_target_) {
Steve Blocka7e24c12009-10-30 11:49:00 +00005580 PrintF("=====================================\n");
5581 PrintF("==== Path to object ====\n");
5582 PrintF("=====================================\n\n");
5583
Ben Murdoche0cee9b2011-05-25 10:26:03 +01005584 ASSERT(!object_stack_.is_empty());
5585 for (int i = 0; i < object_stack_.length(); i++) {
Steve Blocka7e24c12009-10-30 11:49:00 +00005586 if (i > 0) PrintF("\n |\n |\n V\n\n");
Ben Murdoche0cee9b2011-05-25 10:26:03 +01005587 Object* obj = object_stack_[i];
5588#ifdef OBJECT_PRINT
Steve Blocka7e24c12009-10-30 11:49:00 +00005589 obj->Print();
Ben Murdoche0cee9b2011-05-25 10:26:03 +01005590#else
5591 obj->ShortPrint();
5592#endif
Steve Blocka7e24c12009-10-30 11:49:00 +00005593 }
5594 PrintF("=====================================\n");
5595 }
5596}
Ben Murdoche0cee9b2011-05-25 10:26:03 +01005597#endif // DEBUG || LIVE_OBJECT_LIST
Steve Blocka7e24c12009-10-30 11:49:00 +00005598
5599
Ben Murdoche0cee9b2011-05-25 10:26:03 +01005600#ifdef DEBUG
Steve Blocka7e24c12009-10-30 11:49:00 +00005601// Triggers a depth-first traversal of reachable objects from roots
5602// and finds a path to a specific heap object and prints it.
Leon Clarkee46be812010-01-19 14:06:41 +00005603void Heap::TracePathToObject(Object* target) {
Ben Murdoche0cee9b2011-05-25 10:26:03 +01005604 PathTracer tracer(target, PathTracer::FIND_ALL, VISIT_ALL);
5605 IterateRoots(&tracer, VISIT_ONLY_STRONG);
Steve Blocka7e24c12009-10-30 11:49:00 +00005606}
5607
5608
5609// Triggers a depth-first traversal of reachable objects from roots
5610// and finds a path to any global object and prints it. Useful for
5611// determining the source for leaks of global objects.
5612void Heap::TracePathToGlobal() {
Ben Murdoche0cee9b2011-05-25 10:26:03 +01005613 PathTracer tracer(PathTracer::kAnyGlobalObject,
5614 PathTracer::FIND_ALL,
5615 VISIT_ALL);
5616 IterateRoots(&tracer, VISIT_ONLY_STRONG);
Steve Blocka7e24c12009-10-30 11:49:00 +00005617}
5618#endif
5619
5620
Ben Murdochf87a2032010-10-22 12:50:53 +01005621static intptr_t CountTotalHolesSize() {
5622 intptr_t holes_size = 0;
Leon Clarkef7060e22010-06-03 12:02:55 +01005623 OldSpaces spaces;
5624 for (OldSpace* space = spaces.next();
5625 space != NULL;
5626 space = spaces.next()) {
5627 holes_size += space->Waste() + space->AvailableFree();
5628 }
5629 return holes_size;
5630}
5631
5632
Steve Block44f0eee2011-05-26 01:26:41 +01005633GCTracer::GCTracer(Heap* heap)
Steve Blocka7e24c12009-10-30 11:49:00 +00005634 : start_time_(0.0),
Leon Clarkef7060e22010-06-03 12:02:55 +01005635 start_size_(0),
Steve Blocka7e24c12009-10-30 11:49:00 +00005636 gc_count_(0),
5637 full_gc_count_(0),
5638 is_compacting_(false),
Leon Clarkef7060e22010-06-03 12:02:55 +01005639 marked_count_(0),
5640 allocated_since_last_gc_(0),
5641 spent_in_mutator_(0),
Steve Block44f0eee2011-05-26 01:26:41 +01005642 promoted_objects_size_(0),
5643 heap_(heap) {
Steve Blocka7e24c12009-10-30 11:49:00 +00005644 // These two fields reflect the state of the previous full collection.
5645 // Set them before they are changed by the collector.
Steve Block44f0eee2011-05-26 01:26:41 +01005646 previous_has_compacted_ = heap_->mark_compact_collector_.HasCompacted();
5647 previous_marked_count_ =
5648 heap_->mark_compact_collector_.previous_marked_count();
Leon Clarkef7060e22010-06-03 12:02:55 +01005649 if (!FLAG_trace_gc && !FLAG_print_cumulative_gc_stat) return;
Steve Blocka7e24c12009-10-30 11:49:00 +00005650 start_time_ = OS::TimeCurrentMillis();
Steve Block44f0eee2011-05-26 01:26:41 +01005651 start_size_ = heap_->SizeOfObjects();
Leon Clarkef7060e22010-06-03 12:02:55 +01005652
5653 for (int i = 0; i < Scope::kNumberOfScopes; i++) {
5654 scopes_[i] = 0;
5655 }
5656
5657 in_free_list_or_wasted_before_gc_ = CountTotalHolesSize();
5658
Steve Block44f0eee2011-05-26 01:26:41 +01005659 allocated_since_last_gc_ =
5660 heap_->SizeOfObjects() - heap_->alive_after_last_gc_;
Leon Clarkef7060e22010-06-03 12:02:55 +01005661
Steve Block44f0eee2011-05-26 01:26:41 +01005662 if (heap_->last_gc_end_timestamp_ > 0) {
5663 spent_in_mutator_ = Max(start_time_ - heap_->last_gc_end_timestamp_, 0.0);
Leon Clarkef7060e22010-06-03 12:02:55 +01005664 }
Steve Blocka7e24c12009-10-30 11:49:00 +00005665}
5666
5667
5668GCTracer::~GCTracer() {
Steve Blocka7e24c12009-10-30 11:49:00 +00005669 // Printf ONE line iff flag is set.
Leon Clarkef7060e22010-06-03 12:02:55 +01005670 if (!FLAG_trace_gc && !FLAG_print_cumulative_gc_stat) return;
5671
Steve Block44f0eee2011-05-26 01:26:41 +01005672 bool first_gc = (heap_->last_gc_end_timestamp_ == 0);
Leon Clarkef7060e22010-06-03 12:02:55 +01005673
Steve Block44f0eee2011-05-26 01:26:41 +01005674 heap_->alive_after_last_gc_ = heap_->SizeOfObjects();
5675 heap_->last_gc_end_timestamp_ = OS::TimeCurrentMillis();
Leon Clarkef7060e22010-06-03 12:02:55 +01005676
Steve Block44f0eee2011-05-26 01:26:41 +01005677 int time = static_cast<int>(heap_->last_gc_end_timestamp_ - start_time_);
Leon Clarkef7060e22010-06-03 12:02:55 +01005678
5679 // Update cumulative GC statistics if required.
5680 if (FLAG_print_cumulative_gc_stat) {
Steve Block44f0eee2011-05-26 01:26:41 +01005681 heap_->max_gc_pause_ = Max(heap_->max_gc_pause_, time);
5682 heap_->max_alive_after_gc_ = Max(heap_->max_alive_after_gc_,
5683 heap_->alive_after_last_gc_);
Leon Clarkef7060e22010-06-03 12:02:55 +01005684 if (!first_gc) {
Steve Block44f0eee2011-05-26 01:26:41 +01005685 heap_->min_in_mutator_ = Min(heap_->min_in_mutator_,
5686 static_cast<int>(spent_in_mutator_));
Leon Clarkef7060e22010-06-03 12:02:55 +01005687 }
5688 }
5689
5690 if (!FLAG_trace_gc_nvp) {
5691 int external_time = static_cast<int>(scopes_[Scope::EXTERNAL]);
5692
5693 PrintF("%s %.1f -> %.1f MB, ",
5694 CollectorString(),
5695 static_cast<double>(start_size_) / MB,
5696 SizeOfHeapObjects());
5697
5698 if (external_time > 0) PrintF("%d / ", external_time);
5699 PrintF("%d ms.\n", time);
5700 } else {
5701 PrintF("pause=%d ", time);
5702 PrintF("mutator=%d ",
5703 static_cast<int>(spent_in_mutator_));
5704
5705 PrintF("gc=");
5706 switch (collector_) {
5707 case SCAVENGER:
5708 PrintF("s");
5709 break;
5710 case MARK_COMPACTOR:
Steve Block44f0eee2011-05-26 01:26:41 +01005711 PrintF("%s",
5712 heap_->mark_compact_collector_.HasCompacted() ? "mc" : "ms");
Leon Clarkef7060e22010-06-03 12:02:55 +01005713 break;
5714 default:
5715 UNREACHABLE();
5716 }
5717 PrintF(" ");
5718
5719 PrintF("external=%d ", static_cast<int>(scopes_[Scope::EXTERNAL]));
5720 PrintF("mark=%d ", static_cast<int>(scopes_[Scope::MC_MARK]));
5721 PrintF("sweep=%d ", static_cast<int>(scopes_[Scope::MC_SWEEP]));
Iain Merrick75681382010-08-19 15:07:18 +01005722 PrintF("sweepns=%d ", static_cast<int>(scopes_[Scope::MC_SWEEP_NEWSPACE]));
Leon Clarkef7060e22010-06-03 12:02:55 +01005723 PrintF("compact=%d ", static_cast<int>(scopes_[Scope::MC_COMPACT]));
5724
Ben Murdochf87a2032010-10-22 12:50:53 +01005725 PrintF("total_size_before=%" V8_PTR_PREFIX "d ", start_size_);
Steve Block44f0eee2011-05-26 01:26:41 +01005726 PrintF("total_size_after=%" V8_PTR_PREFIX "d ", heap_->SizeOfObjects());
Ben Murdochf87a2032010-10-22 12:50:53 +01005727 PrintF("holes_size_before=%" V8_PTR_PREFIX "d ",
5728 in_free_list_or_wasted_before_gc_);
5729 PrintF("holes_size_after=%" V8_PTR_PREFIX "d ", CountTotalHolesSize());
Leon Clarkef7060e22010-06-03 12:02:55 +01005730
Ben Murdochf87a2032010-10-22 12:50:53 +01005731 PrintF("allocated=%" V8_PTR_PREFIX "d ", allocated_since_last_gc_);
5732 PrintF("promoted=%" V8_PTR_PREFIX "d ", promoted_objects_size_);
Leon Clarkef7060e22010-06-03 12:02:55 +01005733
5734 PrintF("\n");
5735 }
Steve Blocka7e24c12009-10-30 11:49:00 +00005736
5737#if defined(ENABLE_LOGGING_AND_PROFILING)
Steve Block44f0eee2011-05-26 01:26:41 +01005738 heap_->PrintShortHeapStatistics();
Steve Blocka7e24c12009-10-30 11:49:00 +00005739#endif
5740}
5741
5742
5743const char* GCTracer::CollectorString() {
5744 switch (collector_) {
5745 case SCAVENGER:
5746 return "Scavenge";
5747 case MARK_COMPACTOR:
Steve Block44f0eee2011-05-26 01:26:41 +01005748 return heap_->mark_compact_collector_.HasCompacted() ? "Mark-compact"
5749 : "Mark-sweep";
Steve Blocka7e24c12009-10-30 11:49:00 +00005750 }
5751 return "Unknown GC";
5752}
5753
5754
5755int KeyedLookupCache::Hash(Map* map, String* name) {
5756 // Uses only lower 32 bits if pointers are larger.
5757 uintptr_t addr_hash =
Leon Clarkee46be812010-01-19 14:06:41 +00005758 static_cast<uint32_t>(reinterpret_cast<uintptr_t>(map)) >> kMapHashShift;
Andrei Popescu402d9372010-02-26 13:31:12 +00005759 return static_cast<uint32_t>((addr_hash ^ name->Hash()) & kCapacityMask);
Steve Blocka7e24c12009-10-30 11:49:00 +00005760}
5761
5762
5763int KeyedLookupCache::Lookup(Map* map, String* name) {
5764 int index = Hash(map, name);
5765 Key& key = keys_[index];
5766 if ((key.map == map) && key.name->Equals(name)) {
5767 return field_offsets_[index];
5768 }
Steve Block44f0eee2011-05-26 01:26:41 +01005769 return kNotFound;
Steve Blocka7e24c12009-10-30 11:49:00 +00005770}
5771
5772
5773void KeyedLookupCache::Update(Map* map, String* name, int field_offset) {
5774 String* symbol;
Steve Block44f0eee2011-05-26 01:26:41 +01005775 if (HEAP->LookupSymbolIfExists(name, &symbol)) {
Steve Blocka7e24c12009-10-30 11:49:00 +00005776 int index = Hash(map, symbol);
5777 Key& key = keys_[index];
5778 key.map = map;
5779 key.name = symbol;
5780 field_offsets_[index] = field_offset;
5781 }
5782}
5783
5784
5785void KeyedLookupCache::Clear() {
5786 for (int index = 0; index < kLength; index++) keys_[index].map = NULL;
5787}
5788
5789
Steve Blocka7e24c12009-10-30 11:49:00 +00005790void DescriptorLookupCache::Clear() {
5791 for (int index = 0; index < kLength; index++) keys_[index].array = NULL;
5792}
5793
5794
Steve Blocka7e24c12009-10-30 11:49:00 +00005795#ifdef DEBUG
Ben Murdochf87a2032010-10-22 12:50:53 +01005796void Heap::GarbageCollectionGreedyCheck() {
Steve Blocka7e24c12009-10-30 11:49:00 +00005797 ASSERT(FLAG_gc_greedy);
Steve Block44f0eee2011-05-26 01:26:41 +01005798 if (isolate_->bootstrapper()->IsActive()) return;
Ben Murdochf87a2032010-10-22 12:50:53 +01005799 if (disallow_allocation_failure()) return;
5800 CollectGarbage(NEW_SPACE);
Steve Blocka7e24c12009-10-30 11:49:00 +00005801}
5802#endif
5803
5804
Steve Block44f0eee2011-05-26 01:26:41 +01005805TranscendentalCache::SubCache::SubCache(Type t)
5806 : type_(t),
5807 isolate_(Isolate::Current()) {
Steve Blocka7e24c12009-10-30 11:49:00 +00005808 uint32_t in0 = 0xffffffffu; // Bit-pattern for a NaN that isn't
5809 uint32_t in1 = 0xffffffffu; // generated by the FPU.
5810 for (int i = 0; i < kCacheSize; i++) {
5811 elements_[i].in[0] = in0;
5812 elements_[i].in[1] = in1;
5813 elements_[i].output = NULL;
5814 }
5815}
5816
5817
Steve Blocka7e24c12009-10-30 11:49:00 +00005818void TranscendentalCache::Clear() {
5819 for (int i = 0; i < kNumberOfCaches; i++) {
5820 if (caches_[i] != NULL) {
5821 delete caches_[i];
5822 caches_[i] = NULL;
5823 }
5824 }
5825}
5826
5827
Leon Clarkee46be812010-01-19 14:06:41 +00005828void ExternalStringTable::CleanUp() {
5829 int last = 0;
5830 for (int i = 0; i < new_space_strings_.length(); ++i) {
Steve Block44f0eee2011-05-26 01:26:41 +01005831 if (new_space_strings_[i] == heap_->raw_unchecked_null_value()) continue;
5832 if (heap_->InNewSpace(new_space_strings_[i])) {
Leon Clarkee46be812010-01-19 14:06:41 +00005833 new_space_strings_[last++] = new_space_strings_[i];
5834 } else {
5835 old_space_strings_.Add(new_space_strings_[i]);
5836 }
5837 }
5838 new_space_strings_.Rewind(last);
5839 last = 0;
5840 for (int i = 0; i < old_space_strings_.length(); ++i) {
Steve Block44f0eee2011-05-26 01:26:41 +01005841 if (old_space_strings_[i] == heap_->raw_unchecked_null_value()) continue;
5842 ASSERT(!heap_->InNewSpace(old_space_strings_[i]));
Leon Clarkee46be812010-01-19 14:06:41 +00005843 old_space_strings_[last++] = old_space_strings_[i];
5844 }
5845 old_space_strings_.Rewind(last);
5846 Verify();
5847}
5848
5849
5850void ExternalStringTable::TearDown() {
5851 new_space_strings_.Free();
5852 old_space_strings_.Free();
5853}
5854
5855
Steve Blocka7e24c12009-10-30 11:49:00 +00005856} } // namespace v8::internal