blob: 2d27570f15ac0473929ebc6e2415a31901a919b2 [file] [log] [blame]
Ben Murdoch8b112d22011-06-08 16:22:53 +01001// Copyright 2011 the V8 project authors. All rights reserved.
Steve Blocka7e24c12009-10-30 11:49:00 +00002// Redistribution and use in source and binary forms, with or without
3// modification, are permitted provided that the following conditions are
4// met:
5//
6// * Redistributions of source code must retain the above copyright
7// notice, this list of conditions and the following disclaimer.
8// * Redistributions in binary form must reproduce the above
9// copyright notice, this list of conditions and the following
10// disclaimer in the documentation and/or other materials provided
11// with the distribution.
12// * Neither the name of Google Inc. nor the names of its
13// contributors may be used to endorse or promote products derived
14// from this software without specific prior written permission.
15//
16// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
28#include "v8.h"
29
30#include "accessors.h"
31#include "api.h"
32#include "bootstrapper.h"
Ben Murdoch8b112d22011-06-08 16:22:53 +010033#include "codegen.h"
Steve Blocka7e24c12009-10-30 11:49:00 +000034#include "compilation-cache.h"
35#include "debug.h"
Ben Murdoch3fb3ca82011-12-02 17:19:32 +000036#include "deoptimizer.h"
Steve Blocka7e24c12009-10-30 11:49:00 +000037#include "global-handles.h"
Ben Murdoch257744e2011-11-30 15:57:28 +000038#include "heap-profiler.h"
Steve Block1e0659c2011-05-24 12:43:12 +010039#include "liveobjectlist-inl.h"
Steve Blocka7e24c12009-10-30 11:49:00 +000040#include "mark-compact.h"
41#include "natives.h"
Iain Merrick75681382010-08-19 15:07:18 +010042#include "objects-visiting.h"
Ben Murdochb0fe1622011-05-05 13:52:32 +010043#include "runtime-profiler.h"
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -080044#include "scanner-base.h"
Steve Blocka7e24c12009-10-30 11:49:00 +000045#include "scopeinfo.h"
Steve Block3ce2e202009-11-05 08:53:23 +000046#include "snapshot.h"
Steve Blocka7e24c12009-10-30 11:49:00 +000047#include "v8threads.h"
Ben Murdochb0fe1622011-05-05 13:52:32 +010048#include "vm-state-inl.h"
Steve Block6ded16b2010-05-10 14:33:55 +010049#if V8_TARGET_ARCH_ARM && !V8_INTERPRETED_REGEXP
Steve Blocka7e24c12009-10-30 11:49:00 +000050#include "regexp-macro-assembler.h"
Steve Blockd0582a62009-12-15 09:54:21 +000051#include "arm/regexp-macro-assembler-arm.h"
Steve Blocka7e24c12009-10-30 11:49:00 +000052#endif
Steve Block44f0eee2011-05-26 01:26:41 +010053#if V8_TARGET_ARCH_MIPS && !V8_INTERPRETED_REGEXP
54#include "regexp-macro-assembler.h"
55#include "mips/regexp-macro-assembler-mips.h"
56#endif
Steve Block6ded16b2010-05-10 14:33:55 +010057
Steve Blocka7e24c12009-10-30 11:49:00 +000058namespace v8 {
59namespace internal {
60
61
John Reck59135872010-11-02 12:39:01 -070062static const intptr_t kMinimumPromotionLimit = 2 * MB;
63static const intptr_t kMinimumAllocationLimit = 8 * MB;
64
Steve Blocka7e24c12009-10-30 11:49:00 +000065
Steve Block44f0eee2011-05-26 01:26:41 +010066static Mutex* gc_initializer_mutex = OS::CreateMutex();
Steve Blocka7e24c12009-10-30 11:49:00 +000067
Steve Blocka7e24c12009-10-30 11:49:00 +000068
Steve Block44f0eee2011-05-26 01:26:41 +010069Heap::Heap()
70 : isolate_(NULL),
Steve Blocka7e24c12009-10-30 11:49:00 +000071// semispace_size_ should be a power of 2 and old_generation_size_ should be
72// a multiple of Page::kPageSize.
Ben Murdoch257744e2011-11-30 15:57:28 +000073#if 0//defined(ANDROID)
Steve Block44f0eee2011-05-26 01:26:41 +010074 reserved_semispace_size_(2*MB),
75 max_semispace_size_(2*MB),
76 initial_semispace_size_(128*KB),
Ben Murdoch257744e2011-11-30 15:57:28 +000077 max_old_generation_size_(512*MB),
Steve Block44f0eee2011-05-26 01:26:41 +010078 max_executable_size_(max_old_generation_size_),
79 code_range_size_(0),
Steve Blocka7e24c12009-10-30 11:49:00 +000080#elif defined(V8_TARGET_ARCH_X64)
Steve Block44f0eee2011-05-26 01:26:41 +010081 reserved_semispace_size_(16*MB),
82 max_semispace_size_(16*MB),
83 initial_semispace_size_(1*MB),
Ben Murdoch3fb3ca82011-12-02 17:19:32 +000084 max_old_generation_size_(1400*MB),
Steve Block44f0eee2011-05-26 01:26:41 +010085 max_executable_size_(256*MB),
86 code_range_size_(512*MB),
Steve Blocka7e24c12009-10-30 11:49:00 +000087#else
Steve Block44f0eee2011-05-26 01:26:41 +010088 reserved_semispace_size_(8*MB),
89 max_semispace_size_(8*MB),
90 initial_semispace_size_(512*KB),
Ben Murdoch3fb3ca82011-12-02 17:19:32 +000091 max_old_generation_size_(700*MB),
Steve Block44f0eee2011-05-26 01:26:41 +010092 max_executable_size_(128*MB),
93 code_range_size_(0),
Steve Blocka7e24c12009-10-30 11:49:00 +000094#endif
Steve Blocka7e24c12009-10-30 11:49:00 +000095// Variables set based on semispace_size_ and old_generation_size_ in
Steve Block44f0eee2011-05-26 01:26:41 +010096// ConfigureHeap (survived_since_last_expansion_, external_allocation_limit_)
Steve Block3ce2e202009-11-05 08:53:23 +000097// Will be 4 * reserved_semispace_size_ to ensure that young
98// generation can be aligned to its size.
Steve Block44f0eee2011-05-26 01:26:41 +010099 survived_since_last_expansion_(0),
Ben Murdoch257744e2011-11-30 15:57:28 +0000100 sweep_generation_(0),
Steve Block44f0eee2011-05-26 01:26:41 +0100101 always_allocate_scope_depth_(0),
102 linear_allocation_scope_depth_(0),
103 contexts_disposed_(0),
104 new_space_(this),
105 old_pointer_space_(NULL),
106 old_data_space_(NULL),
107 code_space_(NULL),
108 map_space_(NULL),
109 cell_space_(NULL),
110 lo_space_(NULL),
111 gc_state_(NOT_IN_GC),
Ben Murdoch3fb3ca82011-12-02 17:19:32 +0000112 gc_post_processing_depth_(0),
Steve Block44f0eee2011-05-26 01:26:41 +0100113 mc_count_(0),
114 ms_count_(0),
115 gc_count_(0),
116 unflattened_strings_length_(0),
Steve Blocka7e24c12009-10-30 11:49:00 +0000117#ifdef DEBUG
Steve Block44f0eee2011-05-26 01:26:41 +0100118 allocation_allowed_(true),
119 allocation_timeout_(0),
120 disallow_allocation_failure_(false),
121 debug_utils_(NULL),
Steve Blocka7e24c12009-10-30 11:49:00 +0000122#endif // DEBUG
Steve Block44f0eee2011-05-26 01:26:41 +0100123 old_gen_promotion_limit_(kMinimumPromotionLimit),
124 old_gen_allocation_limit_(kMinimumAllocationLimit),
125 external_allocation_limit_(0),
126 amount_of_external_allocated_memory_(0),
127 amount_of_external_allocated_memory_at_last_global_gc_(0),
128 old_gen_exhausted_(false),
129 hidden_symbol_(NULL),
130 global_gc_prologue_callback_(NULL),
131 global_gc_epilogue_callback_(NULL),
132 gc_safe_size_of_old_object_(NULL),
Steve Block053d10c2011-06-13 19:13:29 +0100133 total_regexp_code_generated_(0),
Steve Block44f0eee2011-05-26 01:26:41 +0100134 tracer_(NULL),
135 young_survivors_after_last_gc_(0),
136 high_survival_rate_period_length_(0),
137 survival_rate_(0),
138 previous_survival_rate_trend_(Heap::STABLE),
139 survival_rate_trend_(Heap::STABLE),
140 max_gc_pause_(0),
141 max_alive_after_gc_(0),
142 min_in_mutator_(kMaxInt),
143 alive_after_last_gc_(0),
144 last_gc_end_timestamp_(0.0),
145 page_watermark_invalidated_mark_(1 << Page::WATERMARK_INVALIDATED),
146 number_idle_notifications_(0),
147 last_idle_notification_gc_count_(0),
148 last_idle_notification_gc_count_init_(false),
149 configured_(false),
150 is_safe_to_read_maps_(true) {
151 // Allow build-time customization of the max semispace size. Building
152 // V8 with snapshots and a non-default max semispace size is much
153 // easier if you can define it as part of the build environment.
154#if defined(V8_MAX_SEMISPACE_SIZE)
155 max_semispace_size_ = reserved_semispace_size_ = V8_MAX_SEMISPACE_SIZE;
156#endif
Steve Blocka7e24c12009-10-30 11:49:00 +0000157
Ben Murdoch3fb3ca82011-12-02 17:19:32 +0000158 intptr_t max_virtual = OS::MaxVirtualMemory();
159
160 if (max_virtual > 0) {
161 if (code_range_size_ > 0) {
162 // Reserve no more than 1/8 of the memory for the code range.
163 code_range_size_ = Min(code_range_size_, max_virtual >> 3);
164 }
165 }
166
Steve Block44f0eee2011-05-26 01:26:41 +0100167 memset(roots_, 0, sizeof(roots_[0]) * kRootListLength);
168 global_contexts_list_ = NULL;
169 mark_compact_collector_.heap_ = this;
170 external_string_table_.heap_ = this;
171}
172
Steve Blocka7e24c12009-10-30 11:49:00 +0000173
Ben Murdochf87a2032010-10-22 12:50:53 +0100174intptr_t Heap::Capacity() {
Steve Blocka7e24c12009-10-30 11:49:00 +0000175 if (!HasBeenSetup()) return 0;
176
177 return new_space_.Capacity() +
178 old_pointer_space_->Capacity() +
179 old_data_space_->Capacity() +
180 code_space_->Capacity() +
181 map_space_->Capacity() +
182 cell_space_->Capacity();
183}
184
185
Ben Murdochf87a2032010-10-22 12:50:53 +0100186intptr_t Heap::CommittedMemory() {
Steve Block3ce2e202009-11-05 08:53:23 +0000187 if (!HasBeenSetup()) return 0;
188
189 return new_space_.CommittedMemory() +
190 old_pointer_space_->CommittedMemory() +
191 old_data_space_->CommittedMemory() +
192 code_space_->CommittedMemory() +
193 map_space_->CommittedMemory() +
194 cell_space_->CommittedMemory() +
195 lo_space_->Size();
196}
197
Russell Brenner90bac252010-11-18 13:33:46 -0800198intptr_t Heap::CommittedMemoryExecutable() {
199 if (!HasBeenSetup()) return 0;
200
Steve Block44f0eee2011-05-26 01:26:41 +0100201 return isolate()->memory_allocator()->SizeExecutable();
Russell Brenner90bac252010-11-18 13:33:46 -0800202}
203
Steve Block3ce2e202009-11-05 08:53:23 +0000204
Ben Murdochf87a2032010-10-22 12:50:53 +0100205intptr_t Heap::Available() {
Steve Blocka7e24c12009-10-30 11:49:00 +0000206 if (!HasBeenSetup()) return 0;
207
208 return new_space_.Available() +
209 old_pointer_space_->Available() +
210 old_data_space_->Available() +
211 code_space_->Available() +
212 map_space_->Available() +
213 cell_space_->Available();
214}
215
216
217bool Heap::HasBeenSetup() {
218 return old_pointer_space_ != NULL &&
219 old_data_space_ != NULL &&
220 code_space_ != NULL &&
221 map_space_ != NULL &&
222 cell_space_ != NULL &&
223 lo_space_ != NULL;
224}
225
226
Kristian Monsen80d68ea2010-09-08 11:05:35 +0100227int Heap::GcSafeSizeOfOldObject(HeapObject* object) {
Steve Block44f0eee2011-05-26 01:26:41 +0100228 ASSERT(!HEAP->InNewSpace(object)); // Code only works for old objects.
229 ASSERT(!HEAP->mark_compact_collector()->are_map_pointers_encoded());
Kristian Monsen80d68ea2010-09-08 11:05:35 +0100230 MapWord map_word = object->map_word();
231 map_word.ClearMark();
232 map_word.ClearOverflow();
233 return object->SizeFromMap(map_word.ToMap());
234}
235
236
237int Heap::GcSafeSizeOfOldObjectWithEncodedMap(HeapObject* object) {
Steve Block44f0eee2011-05-26 01:26:41 +0100238 ASSERT(!HEAP->InNewSpace(object)); // Code only works for old objects.
239 ASSERT(HEAP->mark_compact_collector()->are_map_pointers_encoded());
Kristian Monsen80d68ea2010-09-08 11:05:35 +0100240 uint32_t marker = Memory::uint32_at(object->address());
241 if (marker == MarkCompactCollector::kSingleFreeEncoding) {
242 return kIntSize;
243 } else if (marker == MarkCompactCollector::kMultiFreeEncoding) {
244 return Memory::int_at(object->address() + kIntSize);
245 } else {
246 MapWord map_word = object->map_word();
Steve Block44f0eee2011-05-26 01:26:41 +0100247 Address map_address = map_word.DecodeMapAddress(HEAP->map_space());
Kristian Monsen80d68ea2010-09-08 11:05:35 +0100248 Map* map = reinterpret_cast<Map*>(HeapObject::FromAddress(map_address));
249 return object->SizeFromMap(map);
250 }
251}
252
253
Steve Blocka7e24c12009-10-30 11:49:00 +0000254GarbageCollector Heap::SelectGarbageCollector(AllocationSpace space) {
255 // Is global GC requested?
256 if (space != NEW_SPACE || FLAG_gc_global) {
Steve Block44f0eee2011-05-26 01:26:41 +0100257 isolate_->counters()->gc_compactor_caused_by_request()->Increment();
Steve Blocka7e24c12009-10-30 11:49:00 +0000258 return MARK_COMPACTOR;
259 }
260
261 // Is enough data promoted to justify a global GC?
262 if (OldGenerationPromotionLimitReached()) {
Steve Block44f0eee2011-05-26 01:26:41 +0100263 isolate_->counters()->gc_compactor_caused_by_promoted_data()->Increment();
Steve Blocka7e24c12009-10-30 11:49:00 +0000264 return MARK_COMPACTOR;
265 }
266
267 // Have allocation in OLD and LO failed?
268 if (old_gen_exhausted_) {
Steve Block44f0eee2011-05-26 01:26:41 +0100269 isolate_->counters()->
270 gc_compactor_caused_by_oldspace_exhaustion()->Increment();
Steve Blocka7e24c12009-10-30 11:49:00 +0000271 return MARK_COMPACTOR;
272 }
273
274 // Is there enough space left in OLD to guarantee that a scavenge can
275 // succeed?
276 //
277 // Note that MemoryAllocator->MaxAvailable() undercounts the memory available
278 // for object promotion. It counts only the bytes that the memory
279 // allocator has not yet allocated from the OS and assigned to any space,
280 // and does not count available bytes already in the old space or code
281 // space. Undercounting is safe---we may get an unrequested full GC when
282 // a scavenge would have succeeded.
Steve Block44f0eee2011-05-26 01:26:41 +0100283 if (isolate_->memory_allocator()->MaxAvailable() <= new_space_.Size()) {
284 isolate_->counters()->
285 gc_compactor_caused_by_oldspace_exhaustion()->Increment();
Steve Blocka7e24c12009-10-30 11:49:00 +0000286 return MARK_COMPACTOR;
287 }
288
289 // Default
290 return SCAVENGER;
291}
292
293
294// TODO(1238405): Combine the infrastructure for --heap-stats and
295// --log-gc to avoid the complicated preprocessor and flag testing.
Steve Blocka7e24c12009-10-30 11:49:00 +0000296void Heap::ReportStatisticsBeforeGC() {
297 // Heap::ReportHeapStatistics will also log NewSpace statistics when
Ben Murdoch3fb3ca82011-12-02 17:19:32 +0000298 // compiled --log-gc is set. The following logic is used to avoid
299 // double logging.
300#ifdef DEBUG
Steve Blocka7e24c12009-10-30 11:49:00 +0000301 if (FLAG_heap_stats || FLAG_log_gc) new_space_.CollectStatistics();
302 if (FLAG_heap_stats) {
303 ReportHeapStatistics("Before GC");
304 } else if (FLAG_log_gc) {
305 new_space_.ReportStatistics();
306 }
307 if (FLAG_heap_stats || FLAG_log_gc) new_space_.ClearHistograms();
Ben Murdoch3fb3ca82011-12-02 17:19:32 +0000308#else
Steve Blocka7e24c12009-10-30 11:49:00 +0000309 if (FLAG_log_gc) {
310 new_space_.CollectStatistics();
311 new_space_.ReportStatistics();
312 new_space_.ClearHistograms();
313 }
Ben Murdoch3fb3ca82011-12-02 17:19:32 +0000314#endif // DEBUG
Steve Blocka7e24c12009-10-30 11:49:00 +0000315}
316
317
Steve Blocka7e24c12009-10-30 11:49:00 +0000318void Heap::PrintShortHeapStatistics() {
319 if (!FLAG_trace_gc_verbose) return;
Ben Murdochf87a2032010-10-22 12:50:53 +0100320 PrintF("Memory allocator, used: %8" V8_PTR_PREFIX "d"
321 ", available: %8" V8_PTR_PREFIX "d\n",
Steve Block44f0eee2011-05-26 01:26:41 +0100322 isolate_->memory_allocator()->Size(),
323 isolate_->memory_allocator()->Available());
Ben Murdochf87a2032010-10-22 12:50:53 +0100324 PrintF("New space, used: %8" V8_PTR_PREFIX "d"
325 ", available: %8" V8_PTR_PREFIX "d\n",
Steve Block3ce2e202009-11-05 08:53:23 +0000326 Heap::new_space_.Size(),
327 new_space_.Available());
Ben Murdochf87a2032010-10-22 12:50:53 +0100328 PrintF("Old pointers, used: %8" V8_PTR_PREFIX "d"
329 ", available: %8" V8_PTR_PREFIX "d"
330 ", waste: %8" V8_PTR_PREFIX "d\n",
Steve Block3ce2e202009-11-05 08:53:23 +0000331 old_pointer_space_->Size(),
332 old_pointer_space_->Available(),
333 old_pointer_space_->Waste());
Ben Murdochf87a2032010-10-22 12:50:53 +0100334 PrintF("Old data space, used: %8" V8_PTR_PREFIX "d"
335 ", available: %8" V8_PTR_PREFIX "d"
336 ", waste: %8" V8_PTR_PREFIX "d\n",
Steve Block3ce2e202009-11-05 08:53:23 +0000337 old_data_space_->Size(),
338 old_data_space_->Available(),
339 old_data_space_->Waste());
Ben Murdochf87a2032010-10-22 12:50:53 +0100340 PrintF("Code space, used: %8" V8_PTR_PREFIX "d"
341 ", available: %8" V8_PTR_PREFIX "d"
342 ", waste: %8" V8_PTR_PREFIX "d\n",
Steve Block3ce2e202009-11-05 08:53:23 +0000343 code_space_->Size(),
344 code_space_->Available(),
345 code_space_->Waste());
Ben Murdochf87a2032010-10-22 12:50:53 +0100346 PrintF("Map space, used: %8" V8_PTR_PREFIX "d"
347 ", available: %8" V8_PTR_PREFIX "d"
348 ", waste: %8" V8_PTR_PREFIX "d\n",
Steve Block3ce2e202009-11-05 08:53:23 +0000349 map_space_->Size(),
350 map_space_->Available(),
351 map_space_->Waste());
Ben Murdochf87a2032010-10-22 12:50:53 +0100352 PrintF("Cell space, used: %8" V8_PTR_PREFIX "d"
353 ", available: %8" V8_PTR_PREFIX "d"
354 ", waste: %8" V8_PTR_PREFIX "d\n",
Steve Block3ce2e202009-11-05 08:53:23 +0000355 cell_space_->Size(),
356 cell_space_->Available(),
357 cell_space_->Waste());
Ben Murdochf87a2032010-10-22 12:50:53 +0100358 PrintF("Large object space, used: %8" V8_PTR_PREFIX "d"
359 ", available: %8" V8_PTR_PREFIX "d\n",
Steve Block3ce2e202009-11-05 08:53:23 +0000360 lo_space_->Size(),
361 lo_space_->Available());
Steve Blocka7e24c12009-10-30 11:49:00 +0000362}
Steve Blocka7e24c12009-10-30 11:49:00 +0000363
364
365// TODO(1238405): Combine the infrastructure for --heap-stats and
366// --log-gc to avoid the complicated preprocessor and flag testing.
367void Heap::ReportStatisticsAfterGC() {
368 // Similar to the before GC, we use some complicated logic to ensure that
369 // NewSpace statistics are logged exactly once when --log-gc is turned on.
Ben Murdoch3fb3ca82011-12-02 17:19:32 +0000370#if defined(DEBUG)
Steve Blocka7e24c12009-10-30 11:49:00 +0000371 if (FLAG_heap_stats) {
372 new_space_.CollectStatistics();
373 ReportHeapStatistics("After GC");
374 } else if (FLAG_log_gc) {
375 new_space_.ReportStatistics();
376 }
Ben Murdoch3fb3ca82011-12-02 17:19:32 +0000377#else
Steve Blocka7e24c12009-10-30 11:49:00 +0000378 if (FLAG_log_gc) new_space_.ReportStatistics();
Ben Murdoch3fb3ca82011-12-02 17:19:32 +0000379#endif // DEBUG
Steve Blocka7e24c12009-10-30 11:49:00 +0000380}
Steve Blocka7e24c12009-10-30 11:49:00 +0000381
382
383void Heap::GarbageCollectionPrologue() {
Steve Block44f0eee2011-05-26 01:26:41 +0100384 isolate_->transcendental_cache()->Clear();
Steve Block6ded16b2010-05-10 14:33:55 +0100385 ClearJSFunctionResultCaches();
Steve Blocka7e24c12009-10-30 11:49:00 +0000386 gc_count_++;
Steve Block6ded16b2010-05-10 14:33:55 +0100387 unflattened_strings_length_ = 0;
Steve Blocka7e24c12009-10-30 11:49:00 +0000388#ifdef DEBUG
389 ASSERT(allocation_allowed_ && gc_state_ == NOT_IN_GC);
390 allow_allocation(false);
391
392 if (FLAG_verify_heap) {
393 Verify();
394 }
395
396 if (FLAG_gc_verbose) Print();
Ben Murdoch3fb3ca82011-12-02 17:19:32 +0000397#endif // DEBUG
Steve Blocka7e24c12009-10-30 11:49:00 +0000398
Ben Murdoch3fb3ca82011-12-02 17:19:32 +0000399#if defined(DEBUG)
Steve Blocka7e24c12009-10-30 11:49:00 +0000400 ReportStatisticsBeforeGC();
Ben Murdoch3fb3ca82011-12-02 17:19:32 +0000401#endif // DEBUG
Steve Block1e0659c2011-05-24 12:43:12 +0100402
403 LiveObjectList::GCPrologue();
Steve Blocka7e24c12009-10-30 11:49:00 +0000404}
405
Ben Murdochf87a2032010-10-22 12:50:53 +0100406intptr_t Heap::SizeOfObjects() {
407 intptr_t total = 0;
Steve Blocka7e24c12009-10-30 11:49:00 +0000408 AllSpaces spaces;
Leon Clarked91b9f72010-01-27 17:25:45 +0000409 for (Space* space = spaces.next(); space != NULL; space = spaces.next()) {
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -0800410 total += space->SizeOfObjects();
Steve Blocka7e24c12009-10-30 11:49:00 +0000411 }
412 return total;
413}
414
415void Heap::GarbageCollectionEpilogue() {
Steve Block1e0659c2011-05-24 12:43:12 +0100416 LiveObjectList::GCEpilogue();
Steve Blocka7e24c12009-10-30 11:49:00 +0000417#ifdef DEBUG
418 allow_allocation(true);
419 ZapFromSpace();
420
421 if (FLAG_verify_heap) {
422 Verify();
423 }
424
Steve Block44f0eee2011-05-26 01:26:41 +0100425 if (FLAG_print_global_handles) isolate_->global_handles()->Print();
Steve Blocka7e24c12009-10-30 11:49:00 +0000426 if (FLAG_print_handles) PrintHandles();
427 if (FLAG_gc_verbose) Print();
428 if (FLAG_code_stats) ReportCodeStatistics("After GC");
429#endif
430
Steve Block44f0eee2011-05-26 01:26:41 +0100431 isolate_->counters()->alive_after_last_gc()->Set(
432 static_cast<int>(SizeOfObjects()));
Steve Blocka7e24c12009-10-30 11:49:00 +0000433
Steve Block44f0eee2011-05-26 01:26:41 +0100434 isolate_->counters()->symbol_table_capacity()->Set(
435 symbol_table()->Capacity());
436 isolate_->counters()->number_of_symbols()->Set(
437 symbol_table()->NumberOfElements());
Ben Murdoch3fb3ca82011-12-02 17:19:32 +0000438#if defined(DEBUG)
Steve Blocka7e24c12009-10-30 11:49:00 +0000439 ReportStatisticsAfterGC();
Ben Murdoch3fb3ca82011-12-02 17:19:32 +0000440#endif // DEBUG
Steve Block44f0eee2011-05-26 01:26:41 +0100441 isolate_->debug()->AfterGarbageCollection();
Steve Blocka7e24c12009-10-30 11:49:00 +0000442}
443
444
John Reck59135872010-11-02 12:39:01 -0700445void Heap::CollectAllGarbage(bool force_compaction) {
Steve Blocka7e24c12009-10-30 11:49:00 +0000446 // Since we are ignoring the return value, the exact choice of space does
447 // not matter, so long as we do not specify NEW_SPACE, which would not
448 // cause a full GC.
Steve Block44f0eee2011-05-26 01:26:41 +0100449 mark_compact_collector_.SetForceCompaction(force_compaction);
John Reck59135872010-11-02 12:39:01 -0700450 CollectGarbage(OLD_POINTER_SPACE);
Steve Block44f0eee2011-05-26 01:26:41 +0100451 mark_compact_collector_.SetForceCompaction(false);
Steve Blocka7e24c12009-10-30 11:49:00 +0000452}
453
454
Teng-Hui Zhu3e5fa292010-11-09 16:16:48 -0800455void Heap::CollectAllAvailableGarbage() {
456 // Since we are ignoring the return value, the exact choice of space does
457 // not matter, so long as we do not specify NEW_SPACE, which would not
458 // cause a full GC.
Steve Block44f0eee2011-05-26 01:26:41 +0100459 mark_compact_collector()->SetForceCompaction(true);
Teng-Hui Zhu3e5fa292010-11-09 16:16:48 -0800460
461 // Major GC would invoke weak handle callbacks on weakly reachable
462 // handles, but won't collect weakly reachable objects until next
463 // major GC. Therefore if we collect aggressively and weak handle callback
464 // has been invoked, we rerun major GC to release objects which become
465 // garbage.
466 // Note: as weak callbacks can execute arbitrary code, we cannot
467 // hope that eventually there will be no weak callbacks invocations.
468 // Therefore stop recollecting after several attempts.
469 const int kMaxNumberOfAttempts = 7;
470 for (int attempt = 0; attempt < kMaxNumberOfAttempts; attempt++) {
471 if (!CollectGarbage(OLD_POINTER_SPACE, MARK_COMPACTOR)) {
472 break;
473 }
474 }
Steve Block44f0eee2011-05-26 01:26:41 +0100475 mark_compact_collector()->SetForceCompaction(false);
Teng-Hui Zhu3e5fa292010-11-09 16:16:48 -0800476}
477
478
479bool Heap::CollectGarbage(AllocationSpace space, GarbageCollector collector) {
Steve Blocka7e24c12009-10-30 11:49:00 +0000480 // The VM is in the GC state until exiting this function.
Steve Block44f0eee2011-05-26 01:26:41 +0100481 VMState state(isolate_, GC);
Steve Blocka7e24c12009-10-30 11:49:00 +0000482
483#ifdef DEBUG
484 // Reset the allocation timeout to the GC interval, but make sure to
485 // allow at least a few allocations after a collection. The reason
486 // for this is that we have a lot of allocation sequences and we
487 // assume that a garbage collection will allow the subsequent
488 // allocation attempts to go through.
489 allocation_timeout_ = Max(6, FLAG_gc_interval);
490#endif
491
Teng-Hui Zhu3e5fa292010-11-09 16:16:48 -0800492 bool next_gc_likely_to_collect_more = false;
493
Steve Block44f0eee2011-05-26 01:26:41 +0100494 { GCTracer tracer(this);
Steve Blocka7e24c12009-10-30 11:49:00 +0000495 GarbageCollectionPrologue();
496 // The GC count was incremented in the prologue. Tell the tracer about
497 // it.
498 tracer.set_gc_count(gc_count_);
499
Steve Blocka7e24c12009-10-30 11:49:00 +0000500 // Tell the tracer which collector we've selected.
501 tracer.set_collector(collector);
502
503 HistogramTimer* rate = (collector == SCAVENGER)
Steve Block44f0eee2011-05-26 01:26:41 +0100504 ? isolate_->counters()->gc_scavenger()
505 : isolate_->counters()->gc_compactor();
Steve Blocka7e24c12009-10-30 11:49:00 +0000506 rate->Start();
Teng-Hui Zhu3e5fa292010-11-09 16:16:48 -0800507 next_gc_likely_to_collect_more =
508 PerformGarbageCollection(collector, &tracer);
Steve Blocka7e24c12009-10-30 11:49:00 +0000509 rate->Stop();
510
511 GarbageCollectionEpilogue();
512 }
513
Teng-Hui Zhu3e5fa292010-11-09 16:16:48 -0800514 return next_gc_likely_to_collect_more;
Steve Blocka7e24c12009-10-30 11:49:00 +0000515}
516
517
518void Heap::PerformScavenge() {
Steve Block44f0eee2011-05-26 01:26:41 +0100519 GCTracer tracer(this);
John Reck59135872010-11-02 12:39:01 -0700520 PerformGarbageCollection(SCAVENGER, &tracer);
Steve Blocka7e24c12009-10-30 11:49:00 +0000521}
522
523
524#ifdef DEBUG
525// Helper class for verifying the symbol table.
526class SymbolTableVerifier : public ObjectVisitor {
527 public:
Steve Blocka7e24c12009-10-30 11:49:00 +0000528 void VisitPointers(Object** start, Object** end) {
529 // Visit all HeapObject pointers in [start, end).
530 for (Object** p = start; p < end; p++) {
531 if ((*p)->IsHeapObject()) {
532 // Check that the symbol is actually a symbol.
533 ASSERT((*p)->IsNull() || (*p)->IsUndefined() || (*p)->IsSymbol());
534 }
535 }
536 }
537};
538#endif // DEBUG
539
540
541static void VerifySymbolTable() {
542#ifdef DEBUG
543 SymbolTableVerifier verifier;
Steve Block44f0eee2011-05-26 01:26:41 +0100544 HEAP->symbol_table()->IterateElements(&verifier);
Steve Blocka7e24c12009-10-30 11:49:00 +0000545#endif // DEBUG
546}
547
548
Leon Clarkee46be812010-01-19 14:06:41 +0000549void Heap::ReserveSpace(
550 int new_space_size,
551 int pointer_space_size,
552 int data_space_size,
553 int code_space_size,
554 int map_space_size,
555 int cell_space_size,
556 int large_object_size) {
557 NewSpace* new_space = Heap::new_space();
558 PagedSpace* old_pointer_space = Heap::old_pointer_space();
559 PagedSpace* old_data_space = Heap::old_data_space();
560 PagedSpace* code_space = Heap::code_space();
561 PagedSpace* map_space = Heap::map_space();
562 PagedSpace* cell_space = Heap::cell_space();
563 LargeObjectSpace* lo_space = Heap::lo_space();
564 bool gc_performed = true;
565 while (gc_performed) {
566 gc_performed = false;
567 if (!new_space->ReserveSpace(new_space_size)) {
Ben Murdochf87a2032010-10-22 12:50:53 +0100568 Heap::CollectGarbage(NEW_SPACE);
Leon Clarkee46be812010-01-19 14:06:41 +0000569 gc_performed = true;
570 }
571 if (!old_pointer_space->ReserveSpace(pointer_space_size)) {
Ben Murdochf87a2032010-10-22 12:50:53 +0100572 Heap::CollectGarbage(OLD_POINTER_SPACE);
Leon Clarkee46be812010-01-19 14:06:41 +0000573 gc_performed = true;
574 }
575 if (!(old_data_space->ReserveSpace(data_space_size))) {
Ben Murdochf87a2032010-10-22 12:50:53 +0100576 Heap::CollectGarbage(OLD_DATA_SPACE);
Leon Clarkee46be812010-01-19 14:06:41 +0000577 gc_performed = true;
578 }
579 if (!(code_space->ReserveSpace(code_space_size))) {
Ben Murdochf87a2032010-10-22 12:50:53 +0100580 Heap::CollectGarbage(CODE_SPACE);
Leon Clarkee46be812010-01-19 14:06:41 +0000581 gc_performed = true;
582 }
583 if (!(map_space->ReserveSpace(map_space_size))) {
Ben Murdochf87a2032010-10-22 12:50:53 +0100584 Heap::CollectGarbage(MAP_SPACE);
Leon Clarkee46be812010-01-19 14:06:41 +0000585 gc_performed = true;
586 }
587 if (!(cell_space->ReserveSpace(cell_space_size))) {
Ben Murdochf87a2032010-10-22 12:50:53 +0100588 Heap::CollectGarbage(CELL_SPACE);
Leon Clarkee46be812010-01-19 14:06:41 +0000589 gc_performed = true;
590 }
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100591 // We add a slack-factor of 2 in order to have space for a series of
592 // large-object allocations that are only just larger than the page size.
Leon Clarkee46be812010-01-19 14:06:41 +0000593 large_object_size *= 2;
594 // The ReserveSpace method on the large object space checks how much
595 // we can expand the old generation. This includes expansion caused by
596 // allocation in the other spaces.
597 large_object_size += cell_space_size + map_space_size + code_space_size +
598 data_space_size + pointer_space_size;
599 if (!(lo_space->ReserveSpace(large_object_size))) {
Ben Murdochf87a2032010-10-22 12:50:53 +0100600 Heap::CollectGarbage(LO_SPACE);
Leon Clarkee46be812010-01-19 14:06:41 +0000601 gc_performed = true;
602 }
603 }
604}
605
606
Steve Blocka7e24c12009-10-30 11:49:00 +0000607void Heap::EnsureFromSpaceIsCommitted() {
608 if (new_space_.CommitFromSpaceIfNeeded()) return;
609
610 // Committing memory to from space failed.
611 // Try shrinking and try again.
Kristian Monsen80d68ea2010-09-08 11:05:35 +0100612 PagedSpaces spaces;
613 for (PagedSpace* space = spaces.next();
614 space != NULL;
615 space = spaces.next()) {
616 space->RelinkPageListInChunkOrder(true);
617 }
618
Steve Blocka7e24c12009-10-30 11:49:00 +0000619 Shrink();
620 if (new_space_.CommitFromSpaceIfNeeded()) return;
621
622 // Committing memory to from space failed again.
623 // Memory is exhausted and we will die.
624 V8::FatalProcessOutOfMemory("Committing semi space failed.");
625}
626
627
Teng-Hui Zhu3e5fa292010-11-09 16:16:48 -0800628void Heap::ClearJSFunctionResultCaches() {
Steve Block44f0eee2011-05-26 01:26:41 +0100629 if (isolate_->bootstrapper()->IsActive()) return;
Steve Block6ded16b2010-05-10 14:33:55 +0100630
Teng-Hui Zhu3e5fa292010-11-09 16:16:48 -0800631 Object* context = global_contexts_list_;
632 while (!context->IsUndefined()) {
633 // Get the caches for this context:
Steve Block6ded16b2010-05-10 14:33:55 +0100634 FixedArray* caches =
Teng-Hui Zhu3e5fa292010-11-09 16:16:48 -0800635 Context::cast(context)->jsfunction_result_caches();
636 // Clear the caches:
Steve Block6ded16b2010-05-10 14:33:55 +0100637 int length = caches->length();
638 for (int i = 0; i < length; i++) {
639 JSFunctionResultCache::cast(caches->get(i))->Clear();
640 }
Teng-Hui Zhu3e5fa292010-11-09 16:16:48 -0800641 // Get the next context:
642 context = Context::cast(context)->get(Context::NEXT_CONTEXT_LINK);
Steve Block6ded16b2010-05-10 14:33:55 +0100643 }
Steve Block6ded16b2010-05-10 14:33:55 +0100644}
645
646
Steve Block44f0eee2011-05-26 01:26:41 +0100647
Kristian Monsen80d68ea2010-09-08 11:05:35 +0100648void Heap::ClearNormalizedMapCaches() {
Steve Block44f0eee2011-05-26 01:26:41 +0100649 if (isolate_->bootstrapper()->IsActive()) return;
Ben Murdochf87a2032010-10-22 12:50:53 +0100650
651 Object* context = global_contexts_list_;
652 while (!context->IsUndefined()) {
653 Context::cast(context)->normalized_map_cache()->Clear();
654 context = Context::cast(context)->get(Context::NEXT_CONTEXT_LINK);
655 }
Kristian Monsen80d68ea2010-09-08 11:05:35 +0100656}
657
658
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100659#ifdef DEBUG
660
661enum PageWatermarkValidity {
662 ALL_VALID,
663 ALL_INVALID
664};
665
666static void VerifyPageWatermarkValidity(PagedSpace* space,
667 PageWatermarkValidity validity) {
668 PageIterator it(space, PageIterator::PAGES_IN_USE);
669 bool expected_value = (validity == ALL_VALID);
670 while (it.has_next()) {
671 Page* page = it.next();
672 ASSERT(page->IsWatermarkValid() == expected_value);
673 }
674}
675#endif
676
Steve Block8defd9f2010-07-08 12:39:36 +0100677void Heap::UpdateSurvivalRateTrend(int start_new_space_size) {
678 double survival_rate =
679 (static_cast<double>(young_survivors_after_last_gc_) * 100) /
680 start_new_space_size;
681
682 if (survival_rate > kYoungSurvivalRateThreshold) {
683 high_survival_rate_period_length_++;
684 } else {
685 high_survival_rate_period_length_ = 0;
686 }
687
688 double survival_rate_diff = survival_rate_ - survival_rate;
689
690 if (survival_rate_diff > kYoungSurvivalRateAllowedDeviation) {
691 set_survival_rate_trend(DECREASING);
692 } else if (survival_rate_diff < -kYoungSurvivalRateAllowedDeviation) {
693 set_survival_rate_trend(INCREASING);
694 } else {
695 set_survival_rate_trend(STABLE);
696 }
697
698 survival_rate_ = survival_rate;
699}
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100700
Teng-Hui Zhu3e5fa292010-11-09 16:16:48 -0800701bool Heap::PerformGarbageCollection(GarbageCollector collector,
John Reck59135872010-11-02 12:39:01 -0700702 GCTracer* tracer) {
Teng-Hui Zhu3e5fa292010-11-09 16:16:48 -0800703 bool next_gc_likely_to_collect_more = false;
704
Ben Murdochf87a2032010-10-22 12:50:53 +0100705 if (collector != SCAVENGER) {
Steve Block44f0eee2011-05-26 01:26:41 +0100706 PROFILE(isolate_, CodeMovingGCEvent());
Ben Murdochf87a2032010-10-22 12:50:53 +0100707 }
708
Steve Blocka7e24c12009-10-30 11:49:00 +0000709 VerifySymbolTable();
710 if (collector == MARK_COMPACTOR && global_gc_prologue_callback_) {
711 ASSERT(!allocation_allowed_);
Leon Clarkef7060e22010-06-03 12:02:55 +0100712 GCTracer::Scope scope(tracer, GCTracer::Scope::EXTERNAL);
Steve Blocka7e24c12009-10-30 11:49:00 +0000713 global_gc_prologue_callback_();
714 }
Steve Block6ded16b2010-05-10 14:33:55 +0100715
716 GCType gc_type =
717 collector == MARK_COMPACTOR ? kGCTypeMarkSweepCompact : kGCTypeScavenge;
718
719 for (int i = 0; i < gc_prologue_callbacks_.length(); ++i) {
720 if (gc_type & gc_prologue_callbacks_[i].gc_type) {
721 gc_prologue_callbacks_[i].callback(gc_type, kNoGCCallbackFlags);
722 }
723 }
724
Steve Blocka7e24c12009-10-30 11:49:00 +0000725 EnsureFromSpaceIsCommitted();
Steve Block6ded16b2010-05-10 14:33:55 +0100726
Ben Murdochf87a2032010-10-22 12:50:53 +0100727 int start_new_space_size = Heap::new_space()->SizeAsInt();
Steve Block8defd9f2010-07-08 12:39:36 +0100728
Steve Blocka7e24c12009-10-30 11:49:00 +0000729 if (collector == MARK_COMPACTOR) {
Steve Block6ded16b2010-05-10 14:33:55 +0100730 // Perform mark-sweep with optional compaction.
Steve Blocka7e24c12009-10-30 11:49:00 +0000731 MarkCompact(tracer);
Ben Murdoch257744e2011-11-30 15:57:28 +0000732 sweep_generation_++;
Steve Block8defd9f2010-07-08 12:39:36 +0100733 bool high_survival_rate_during_scavenges = IsHighSurvivalRate() &&
734 IsStableOrIncreasingSurvivalTrend();
735
736 UpdateSurvivalRateTrend(start_new_space_size);
737
John Reck59135872010-11-02 12:39:01 -0700738 intptr_t old_gen_size = PromotedSpaceSize();
739 old_gen_promotion_limit_ =
740 old_gen_size + Max(kMinimumPromotionLimit, old_gen_size / 3);
741 old_gen_allocation_limit_ =
742 old_gen_size + Max(kMinimumAllocationLimit, old_gen_size / 2);
Steve Block8defd9f2010-07-08 12:39:36 +0100743
John Reck59135872010-11-02 12:39:01 -0700744 if (high_survival_rate_during_scavenges &&
745 IsStableOrIncreasingSurvivalTrend()) {
746 // Stable high survival rates of young objects both during partial and
747 // full collection indicate that mutator is either building or modifying
748 // a structure with a long lifetime.
749 // In this case we aggressively raise old generation memory limits to
750 // postpone subsequent mark-sweep collection and thus trade memory
751 // space for the mutation speed.
752 old_gen_promotion_limit_ *= 2;
753 old_gen_allocation_limit_ *= 2;
Steve Block8defd9f2010-07-08 12:39:36 +0100754 }
755
John Reck59135872010-11-02 12:39:01 -0700756 old_gen_exhausted_ = false;
Steve Block6ded16b2010-05-10 14:33:55 +0100757 } else {
Leon Clarkef7060e22010-06-03 12:02:55 +0100758 tracer_ = tracer;
Steve Block6ded16b2010-05-10 14:33:55 +0100759 Scavenge();
Leon Clarkef7060e22010-06-03 12:02:55 +0100760 tracer_ = NULL;
Steve Block8defd9f2010-07-08 12:39:36 +0100761
762 UpdateSurvivalRateTrend(start_new_space_size);
Steve Blocka7e24c12009-10-30 11:49:00 +0000763 }
Steve Blocka7e24c12009-10-30 11:49:00 +0000764
Steve Block44f0eee2011-05-26 01:26:41 +0100765 isolate_->counters()->objs_since_last_young()->Set(0);
Steve Blocka7e24c12009-10-30 11:49:00 +0000766
Ben Murdoch3fb3ca82011-12-02 17:19:32 +0000767 gc_post_processing_depth_++;
Ben Murdoch257744e2011-11-30 15:57:28 +0000768 { DisableAssertNoAllocation allow_allocation;
John Reck59135872010-11-02 12:39:01 -0700769 GCTracer::Scope scope(tracer, GCTracer::Scope::EXTERNAL);
Teng-Hui Zhu3e5fa292010-11-09 16:16:48 -0800770 next_gc_likely_to_collect_more =
Ben Murdoch257744e2011-11-30 15:57:28 +0000771 isolate_->global_handles()->PostGarbageCollectionProcessing(collector);
John Reck59135872010-11-02 12:39:01 -0700772 }
Ben Murdoch3fb3ca82011-12-02 17:19:32 +0000773 gc_post_processing_depth_--;
John Reck59135872010-11-02 12:39:01 -0700774
Steve Block3ce2e202009-11-05 08:53:23 +0000775 // Update relocatables.
776 Relocatable::PostGarbageCollectionProcessing();
Steve Blocka7e24c12009-10-30 11:49:00 +0000777
778 if (collector == MARK_COMPACTOR) {
779 // Register the amount of external allocated memory.
780 amount_of_external_allocated_memory_at_last_global_gc_ =
781 amount_of_external_allocated_memory_;
782 }
783
Steve Block6ded16b2010-05-10 14:33:55 +0100784 GCCallbackFlags callback_flags = tracer->is_compacting()
785 ? kGCCallbackFlagCompacted
786 : kNoGCCallbackFlags;
787 for (int i = 0; i < gc_epilogue_callbacks_.length(); ++i) {
788 if (gc_type & gc_epilogue_callbacks_[i].gc_type) {
789 gc_epilogue_callbacks_[i].callback(gc_type, callback_flags);
790 }
791 }
792
Steve Blocka7e24c12009-10-30 11:49:00 +0000793 if (collector == MARK_COMPACTOR && global_gc_epilogue_callback_) {
794 ASSERT(!allocation_allowed_);
Leon Clarkef7060e22010-06-03 12:02:55 +0100795 GCTracer::Scope scope(tracer, GCTracer::Scope::EXTERNAL);
Steve Blocka7e24c12009-10-30 11:49:00 +0000796 global_gc_epilogue_callback_();
797 }
798 VerifySymbolTable();
Teng-Hui Zhu3e5fa292010-11-09 16:16:48 -0800799
800 return next_gc_likely_to_collect_more;
Steve Blocka7e24c12009-10-30 11:49:00 +0000801}
802
803
Steve Blocka7e24c12009-10-30 11:49:00 +0000804void Heap::MarkCompact(GCTracer* tracer) {
805 gc_state_ = MARK_COMPACT;
Steve Block44f0eee2011-05-26 01:26:41 +0100806 LOG(isolate_, ResourceEvent("markcompact", "begin"));
Steve Blocka7e24c12009-10-30 11:49:00 +0000807
Steve Block44f0eee2011-05-26 01:26:41 +0100808 mark_compact_collector_.Prepare(tracer);
Steve Blocka7e24c12009-10-30 11:49:00 +0000809
Steve Block44f0eee2011-05-26 01:26:41 +0100810 bool is_compacting = mark_compact_collector_.IsCompacting();
Steve Blocka7e24c12009-10-30 11:49:00 +0000811
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100812 if (is_compacting) {
813 mc_count_++;
814 } else {
815 ms_count_++;
816 }
817 tracer->set_full_gc_count(mc_count_ + ms_count_);
818
Steve Blocka7e24c12009-10-30 11:49:00 +0000819 MarkCompactPrologue(is_compacting);
820
Steve Block44f0eee2011-05-26 01:26:41 +0100821 is_safe_to_read_maps_ = false;
822 mark_compact_collector_.CollectGarbage();
823 is_safe_to_read_maps_ = true;
Steve Blocka7e24c12009-10-30 11:49:00 +0000824
Steve Block44f0eee2011-05-26 01:26:41 +0100825 LOG(isolate_, ResourceEvent("markcompact", "end"));
Steve Blocka7e24c12009-10-30 11:49:00 +0000826
827 gc_state_ = NOT_IN_GC;
828
829 Shrink();
830
Steve Block44f0eee2011-05-26 01:26:41 +0100831 isolate_->counters()->objs_since_last_full()->Set(0);
Steve Block6ded16b2010-05-10 14:33:55 +0100832
833 contexts_disposed_ = 0;
Steve Blocka7e24c12009-10-30 11:49:00 +0000834}
835
836
837void Heap::MarkCompactPrologue(bool is_compacting) {
838 // At any old GC clear the keyed lookup cache to enable collection of unused
839 // maps.
Steve Block44f0eee2011-05-26 01:26:41 +0100840 isolate_->keyed_lookup_cache()->Clear();
841 isolate_->context_slot_cache()->Clear();
842 isolate_->descriptor_lookup_cache()->Clear();
Steve Blocka7e24c12009-10-30 11:49:00 +0000843
Steve Block44f0eee2011-05-26 01:26:41 +0100844 isolate_->compilation_cache()->MarkCompactPrologue();
Steve Blocka7e24c12009-10-30 11:49:00 +0000845
Kristian Monsen25f61362010-05-21 11:50:48 +0100846 CompletelyClearInstanceofCache();
847
Leon Clarkee46be812010-01-19 14:06:41 +0000848 if (is_compacting) FlushNumberStringCache();
Ben Murdoch3fb3ca82011-12-02 17:19:32 +0000849 if (FLAG_cleanup_code_caches_at_gc) {
850 polymorphic_code_cache()->set_cache(undefined_value());
851 }
Steve Blocka7e24c12009-10-30 11:49:00 +0000852
Kristian Monsen80d68ea2010-09-08 11:05:35 +0100853 ClearNormalizedMapCaches();
Steve Blocka7e24c12009-10-30 11:49:00 +0000854}
855
856
857Object* Heap::FindCodeObject(Address a) {
John Reck59135872010-11-02 12:39:01 -0700858 Object* obj = NULL; // Initialization to please compiler.
859 { MaybeObject* maybe_obj = code_space_->FindObject(a);
860 if (!maybe_obj->ToObject(&obj)) {
861 obj = lo_space_->FindObject(a)->ToObjectUnchecked();
862 }
Steve Blocka7e24c12009-10-30 11:49:00 +0000863 }
Steve Blocka7e24c12009-10-30 11:49:00 +0000864 return obj;
865}
866
867
868// Helper class for copying HeapObjects
869class ScavengeVisitor: public ObjectVisitor {
870 public:
Steve Block44f0eee2011-05-26 01:26:41 +0100871 explicit ScavengeVisitor(Heap* heap) : heap_(heap) {}
Steve Blocka7e24c12009-10-30 11:49:00 +0000872
873 void VisitPointer(Object** p) { ScavengePointer(p); }
874
875 void VisitPointers(Object** start, Object** end) {
876 // Copy all HeapObject pointers in [start, end)
877 for (Object** p = start; p < end; p++) ScavengePointer(p);
878 }
879
880 private:
881 void ScavengePointer(Object** p) {
882 Object* object = *p;
Steve Block44f0eee2011-05-26 01:26:41 +0100883 if (!heap_->InNewSpace(object)) return;
Steve Blocka7e24c12009-10-30 11:49:00 +0000884 Heap::ScavengeObject(reinterpret_cast<HeapObject**>(p),
885 reinterpret_cast<HeapObject*>(object));
886 }
Steve Block44f0eee2011-05-26 01:26:41 +0100887
888 Heap* heap_;
Steve Blocka7e24c12009-10-30 11:49:00 +0000889};
890
891
Steve Blocka7e24c12009-10-30 11:49:00 +0000892#ifdef DEBUG
893// Visitor class to verify pointers in code or data space do not point into
894// new space.
895class VerifyNonPointerSpacePointersVisitor: public ObjectVisitor {
896 public:
897 void VisitPointers(Object** start, Object**end) {
898 for (Object** current = start; current < end; current++) {
899 if ((*current)->IsHeapObject()) {
Steve Block44f0eee2011-05-26 01:26:41 +0100900 ASSERT(!HEAP->InNewSpace(HeapObject::cast(*current)));
Steve Blocka7e24c12009-10-30 11:49:00 +0000901 }
902 }
903 }
904};
905
906
907static void VerifyNonPointerSpacePointers() {
908 // Verify that there are no pointers to new space in spaces where we
909 // do not expect them.
910 VerifyNonPointerSpacePointersVisitor v;
Steve Block44f0eee2011-05-26 01:26:41 +0100911 HeapObjectIterator code_it(HEAP->code_space());
Leon Clarked91b9f72010-01-27 17:25:45 +0000912 for (HeapObject* object = code_it.next();
913 object != NULL; object = code_it.next())
Steve Blocka7e24c12009-10-30 11:49:00 +0000914 object->Iterate(&v);
Steve Blocka7e24c12009-10-30 11:49:00 +0000915
Steve Block44f0eee2011-05-26 01:26:41 +0100916 HeapObjectIterator data_it(HEAP->old_data_space());
Leon Clarked91b9f72010-01-27 17:25:45 +0000917 for (HeapObject* object = data_it.next();
918 object != NULL; object = data_it.next())
919 object->Iterate(&v);
Steve Blocka7e24c12009-10-30 11:49:00 +0000920}
921#endif
922
923
Steve Block6ded16b2010-05-10 14:33:55 +0100924void Heap::CheckNewSpaceExpansionCriteria() {
925 if (new_space_.Capacity() < new_space_.MaximumCapacity() &&
926 survived_since_last_expansion_ > new_space_.Capacity()) {
927 // Grow the size of new space if there is room to grow and enough
928 // data has survived scavenge since the last expansion.
929 new_space_.Grow();
930 survived_since_last_expansion_ = 0;
931 }
932}
933
934
Ben Murdoch257744e2011-11-30 15:57:28 +0000935static bool IsUnscavengedHeapObject(Heap* heap, Object** p) {
936 return heap->InNewSpace(*p) &&
937 !HeapObject::cast(*p)->map_word().IsForwardingAddress();
938}
939
940
Steve Blocka7e24c12009-10-30 11:49:00 +0000941void Heap::Scavenge() {
942#ifdef DEBUG
943 if (FLAG_enable_slow_asserts) VerifyNonPointerSpacePointers();
944#endif
945
946 gc_state_ = SCAVENGE;
947
Ben Murdoch8b112d22011-06-08 16:22:53 +0100948 SwitchScavengingVisitorsTableIfProfilingWasEnabled();
949
Steve Block44f0eee2011-05-26 01:26:41 +0100950 Page::FlipMeaningOfInvalidatedWatermarkFlag(this);
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100951#ifdef DEBUG
952 VerifyPageWatermarkValidity(old_pointer_space_, ALL_VALID);
953 VerifyPageWatermarkValidity(map_space_, ALL_VALID);
954#endif
955
956 // We do not update an allocation watermark of the top page during linear
957 // allocation to avoid overhead. So to maintain the watermark invariant
958 // we have to manually cache the watermark and mark the top page as having an
959 // invalid watermark. This guarantees that dirty regions iteration will use a
960 // correct watermark even if a linear allocation happens.
961 old_pointer_space_->FlushTopPageWatermark();
962 map_space_->FlushTopPageWatermark();
963
Steve Blocka7e24c12009-10-30 11:49:00 +0000964 // Implements Cheney's copying algorithm
Steve Block44f0eee2011-05-26 01:26:41 +0100965 LOG(isolate_, ResourceEvent("scavenge", "begin"));
Steve Blocka7e24c12009-10-30 11:49:00 +0000966
967 // Clear descriptor cache.
Steve Block44f0eee2011-05-26 01:26:41 +0100968 isolate_->descriptor_lookup_cache()->Clear();
Steve Blocka7e24c12009-10-30 11:49:00 +0000969
970 // Used for updating survived_since_last_expansion_ at function end.
Ben Murdochf87a2032010-10-22 12:50:53 +0100971 intptr_t survived_watermark = PromotedSpaceSize();
Steve Blocka7e24c12009-10-30 11:49:00 +0000972
Steve Block6ded16b2010-05-10 14:33:55 +0100973 CheckNewSpaceExpansionCriteria();
Steve Blocka7e24c12009-10-30 11:49:00 +0000974
975 // Flip the semispaces. After flipping, to space is empty, from space has
976 // live objects.
977 new_space_.Flip();
978 new_space_.ResetAllocationInfo();
979
980 // We need to sweep newly copied objects which can be either in the
981 // to space or promoted to the old generation. For to-space
982 // objects, we treat the bottom of the to space as a queue. Newly
983 // copied and unswept objects lie between a 'front' mark and the
984 // allocation pointer.
985 //
986 // Promoted objects can go into various old-generation spaces, and
987 // can be allocated internally in the spaces (from the free list).
988 // We treat the top of the to space as a queue of addresses of
989 // promoted objects. The addresses of newly promoted and unswept
990 // objects lie between a 'front' mark and a 'rear' mark that is
991 // updated as a side effect of promoting an object.
992 //
993 // There is guaranteed to be enough room at the top of the to space
994 // for the addresses of promoted objects: every object promoted
995 // frees up its size in bytes from the top of the new space, and
996 // objects are at least one pointer in size.
997 Address new_space_front = new_space_.ToSpaceLow();
Steve Block44f0eee2011-05-26 01:26:41 +0100998 promotion_queue_.Initialize(new_space_.ToSpaceHigh());
Steve Blocka7e24c12009-10-30 11:49:00 +0000999
Steve Block44f0eee2011-05-26 01:26:41 +01001000 is_safe_to_read_maps_ = false;
1001 ScavengeVisitor scavenge_visitor(this);
Steve Blocka7e24c12009-10-30 11:49:00 +00001002 // Copy roots.
Leon Clarkee46be812010-01-19 14:06:41 +00001003 IterateRoots(&scavenge_visitor, VISIT_ALL_IN_SCAVENGE);
Steve Blocka7e24c12009-10-30 11:49:00 +00001004
1005 // Copy objects reachable from the old generation. By definition,
1006 // there are no intergenerational pointers in code or data spaces.
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01001007 IterateDirtyRegions(old_pointer_space_,
Steve Block44f0eee2011-05-26 01:26:41 +01001008 &Heap::IteratePointersInDirtyRegion,
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01001009 &ScavengePointer,
1010 WATERMARK_CAN_BE_INVALID);
1011
1012 IterateDirtyRegions(map_space_,
1013 &IteratePointersInDirtyMapsRegion,
1014 &ScavengePointer,
1015 WATERMARK_CAN_BE_INVALID);
1016
1017 lo_space_->IterateDirtyRegions(&ScavengePointer);
Steve Blocka7e24c12009-10-30 11:49:00 +00001018
1019 // Copy objects reachable from cells by scavenging cell values directly.
1020 HeapObjectIterator cell_iterator(cell_space_);
Leon Clarked91b9f72010-01-27 17:25:45 +00001021 for (HeapObject* cell = cell_iterator.next();
1022 cell != NULL; cell = cell_iterator.next()) {
Steve Blocka7e24c12009-10-30 11:49:00 +00001023 if (cell->IsJSGlobalPropertyCell()) {
1024 Address value_address =
1025 reinterpret_cast<Address>(cell) +
1026 (JSGlobalPropertyCell::kValueOffset - kHeapObjectTag);
1027 scavenge_visitor.VisitPointer(reinterpret_cast<Object**>(value_address));
1028 }
1029 }
1030
Ben Murdochf87a2032010-10-22 12:50:53 +01001031 // Scavenge object reachable from the global contexts list directly.
1032 scavenge_visitor.VisitPointer(BitCast<Object**>(&global_contexts_list_));
1033
Leon Clarkee46be812010-01-19 14:06:41 +00001034 new_space_front = DoScavenge(&scavenge_visitor, new_space_front);
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00001035 isolate_->global_handles()->IdentifyNewSpaceWeakIndependentHandles(
Ben Murdoch257744e2011-11-30 15:57:28 +00001036 &IsUnscavengedHeapObject);
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00001037 isolate_->global_handles()->IterateNewSpaceWeakIndependentRoots(
1038 &scavenge_visitor);
Ben Murdoch257744e2011-11-30 15:57:28 +00001039 new_space_front = DoScavenge(&scavenge_visitor, new_space_front);
1040
Leon Clarkee46be812010-01-19 14:06:41 +00001041
Steve Block6ded16b2010-05-10 14:33:55 +01001042 UpdateNewSpaceReferencesInExternalStringTable(
1043 &UpdateNewSpaceReferenceInExternalStringTableEntry);
1044
Steve Block1e0659c2011-05-24 12:43:12 +01001045 LiveObjectList::UpdateReferencesForScavengeGC();
Steve Block44f0eee2011-05-26 01:26:41 +01001046 isolate()->runtime_profiler()->UpdateSamplesAfterScavenge();
Steve Block1e0659c2011-05-24 12:43:12 +01001047
Leon Clarkee46be812010-01-19 14:06:41 +00001048 ASSERT(new_space_front == new_space_.top());
1049
Steve Block44f0eee2011-05-26 01:26:41 +01001050 is_safe_to_read_maps_ = true;
1051
Leon Clarkee46be812010-01-19 14:06:41 +00001052 // Set age mark.
1053 new_space_.set_age_mark(new_space_.top());
1054
1055 // Update how much has survived scavenge.
Ben Murdochf87a2032010-10-22 12:50:53 +01001056 IncrementYoungSurvivorsCounter(static_cast<int>(
1057 (PromotedSpaceSize() - survived_watermark) + new_space_.Size()));
Leon Clarkee46be812010-01-19 14:06:41 +00001058
Steve Block44f0eee2011-05-26 01:26:41 +01001059 LOG(isolate_, ResourceEvent("scavenge", "end"));
Leon Clarkee46be812010-01-19 14:06:41 +00001060
1061 gc_state_ = NOT_IN_GC;
1062}
1063
1064
Steve Block44f0eee2011-05-26 01:26:41 +01001065String* Heap::UpdateNewSpaceReferenceInExternalStringTableEntry(Heap* heap,
1066 Object** p) {
Steve Block6ded16b2010-05-10 14:33:55 +01001067 MapWord first_word = HeapObject::cast(*p)->map_word();
1068
1069 if (!first_word.IsForwardingAddress()) {
1070 // Unreachable external string can be finalized.
Steve Block44f0eee2011-05-26 01:26:41 +01001071 heap->FinalizeExternalString(String::cast(*p));
Steve Block6ded16b2010-05-10 14:33:55 +01001072 return NULL;
1073 }
1074
1075 // String is still reachable.
1076 return String::cast(first_word.ToForwardingAddress());
1077}
1078
1079
1080void Heap::UpdateNewSpaceReferencesInExternalStringTable(
1081 ExternalStringTableUpdaterCallback updater_func) {
Steve Block44f0eee2011-05-26 01:26:41 +01001082 external_string_table_.Verify();
Leon Clarkee46be812010-01-19 14:06:41 +00001083
Steve Block44f0eee2011-05-26 01:26:41 +01001084 if (external_string_table_.new_space_strings_.is_empty()) return;
Leon Clarkee46be812010-01-19 14:06:41 +00001085
Steve Block44f0eee2011-05-26 01:26:41 +01001086 Object** start = &external_string_table_.new_space_strings_[0];
1087 Object** end = start + external_string_table_.new_space_strings_.length();
Leon Clarkee46be812010-01-19 14:06:41 +00001088 Object** last = start;
1089
1090 for (Object** p = start; p < end; ++p) {
Steve Block44f0eee2011-05-26 01:26:41 +01001091 ASSERT(InFromSpace(*p));
1092 String* target = updater_func(this, p);
Leon Clarkee46be812010-01-19 14:06:41 +00001093
Steve Block6ded16b2010-05-10 14:33:55 +01001094 if (target == NULL) continue;
Leon Clarkee46be812010-01-19 14:06:41 +00001095
Leon Clarkee46be812010-01-19 14:06:41 +00001096 ASSERT(target->IsExternalString());
1097
Steve Block44f0eee2011-05-26 01:26:41 +01001098 if (InNewSpace(target)) {
Leon Clarkee46be812010-01-19 14:06:41 +00001099 // String is still in new space. Update the table entry.
1100 *last = target;
1101 ++last;
1102 } else {
1103 // String got promoted. Move it to the old string list.
Steve Block44f0eee2011-05-26 01:26:41 +01001104 external_string_table_.AddOldString(target);
Leon Clarkee46be812010-01-19 14:06:41 +00001105 }
1106 }
1107
1108 ASSERT(last <= end);
Steve Block44f0eee2011-05-26 01:26:41 +01001109 external_string_table_.ShrinkNewStrings(static_cast<int>(last - start));
Leon Clarkee46be812010-01-19 14:06:41 +00001110}
1111
1112
Steve Block44f0eee2011-05-26 01:26:41 +01001113static Object* ProcessFunctionWeakReferences(Heap* heap,
1114 Object* function,
Ben Murdochb0fe1622011-05-05 13:52:32 +01001115 WeakObjectRetainer* retainer) {
Steve Block44f0eee2011-05-26 01:26:41 +01001116 Object* head = heap->undefined_value();
Ben Murdochb0fe1622011-05-05 13:52:32 +01001117 JSFunction* tail = NULL;
1118 Object* candidate = function;
Steve Block44f0eee2011-05-26 01:26:41 +01001119 while (candidate != heap->undefined_value()) {
Ben Murdochb0fe1622011-05-05 13:52:32 +01001120 // Check whether to keep the candidate in the list.
1121 JSFunction* candidate_function = reinterpret_cast<JSFunction*>(candidate);
1122 Object* retain = retainer->RetainAs(candidate);
1123 if (retain != NULL) {
Steve Block44f0eee2011-05-26 01:26:41 +01001124 if (head == heap->undefined_value()) {
Ben Murdochb0fe1622011-05-05 13:52:32 +01001125 // First element in the list.
1126 head = candidate_function;
1127 } else {
1128 // Subsequent elements in the list.
1129 ASSERT(tail != NULL);
1130 tail->set_next_function_link(candidate_function);
1131 }
1132 // Retained function is new tail.
1133 tail = candidate_function;
1134 }
1135 // Move to next element in the list.
1136 candidate = candidate_function->next_function_link();
1137 }
1138
1139 // Terminate the list if there is one or more elements.
1140 if (tail != NULL) {
Steve Block44f0eee2011-05-26 01:26:41 +01001141 tail->set_next_function_link(heap->undefined_value());
Ben Murdochb0fe1622011-05-05 13:52:32 +01001142 }
1143
1144 return head;
1145}
1146
1147
Ben Murdochf87a2032010-10-22 12:50:53 +01001148void Heap::ProcessWeakReferences(WeakObjectRetainer* retainer) {
1149 Object* head = undefined_value();
1150 Context* tail = NULL;
1151 Object* candidate = global_contexts_list_;
Steve Block44f0eee2011-05-26 01:26:41 +01001152 while (candidate != undefined_value()) {
Ben Murdochf87a2032010-10-22 12:50:53 +01001153 // Check whether to keep the candidate in the list.
1154 Context* candidate_context = reinterpret_cast<Context*>(candidate);
1155 Object* retain = retainer->RetainAs(candidate);
1156 if (retain != NULL) {
Steve Block44f0eee2011-05-26 01:26:41 +01001157 if (head == undefined_value()) {
Ben Murdochf87a2032010-10-22 12:50:53 +01001158 // First element in the list.
1159 head = candidate_context;
1160 } else {
1161 // Subsequent elements in the list.
1162 ASSERT(tail != NULL);
Steve Block44f0eee2011-05-26 01:26:41 +01001163 tail->set_unchecked(this,
1164 Context::NEXT_CONTEXT_LINK,
Ben Murdochf87a2032010-10-22 12:50:53 +01001165 candidate_context,
1166 UPDATE_WRITE_BARRIER);
1167 }
1168 // Retained context is new tail.
1169 tail = candidate_context;
Ben Murdochb0fe1622011-05-05 13:52:32 +01001170
1171 // Process the weak list of optimized functions for the context.
1172 Object* function_list_head =
1173 ProcessFunctionWeakReferences(
Steve Block44f0eee2011-05-26 01:26:41 +01001174 this,
Ben Murdochb0fe1622011-05-05 13:52:32 +01001175 candidate_context->get(Context::OPTIMIZED_FUNCTIONS_LIST),
1176 retainer);
Steve Block44f0eee2011-05-26 01:26:41 +01001177 candidate_context->set_unchecked(this,
1178 Context::OPTIMIZED_FUNCTIONS_LIST,
Ben Murdochb0fe1622011-05-05 13:52:32 +01001179 function_list_head,
1180 UPDATE_WRITE_BARRIER);
Ben Murdochf87a2032010-10-22 12:50:53 +01001181 }
1182 // Move to next element in the list.
1183 candidate = candidate_context->get(Context::NEXT_CONTEXT_LINK);
1184 }
1185
1186 // Terminate the list if there is one or more elements.
1187 if (tail != NULL) {
Steve Block44f0eee2011-05-26 01:26:41 +01001188 tail->set_unchecked(this,
1189 Context::NEXT_CONTEXT_LINK,
Ben Murdochf87a2032010-10-22 12:50:53 +01001190 Heap::undefined_value(),
1191 UPDATE_WRITE_BARRIER);
1192 }
1193
1194 // Update the head of the list of contexts.
Steve Block44f0eee2011-05-26 01:26:41 +01001195 global_contexts_list_ = head;
Ben Murdochf87a2032010-10-22 12:50:53 +01001196}
1197
1198
Iain Merrick75681382010-08-19 15:07:18 +01001199class NewSpaceScavenger : public StaticNewSpaceVisitor<NewSpaceScavenger> {
1200 public:
Steve Block44f0eee2011-05-26 01:26:41 +01001201 static inline void VisitPointer(Heap* heap, Object** p) {
Iain Merrick75681382010-08-19 15:07:18 +01001202 Object* object = *p;
Steve Block44f0eee2011-05-26 01:26:41 +01001203 if (!heap->InNewSpace(object)) return;
Iain Merrick75681382010-08-19 15:07:18 +01001204 Heap::ScavengeObject(reinterpret_cast<HeapObject**>(p),
1205 reinterpret_cast<HeapObject*>(object));
1206 }
1207};
1208
1209
Leon Clarkee46be812010-01-19 14:06:41 +00001210Address Heap::DoScavenge(ObjectVisitor* scavenge_visitor,
1211 Address new_space_front) {
Steve Blocka7e24c12009-10-30 11:49:00 +00001212 do {
1213 ASSERT(new_space_front <= new_space_.top());
1214
1215 // The addresses new_space_front and new_space_.top() define a
1216 // queue of unprocessed copied objects. Process them until the
1217 // queue is empty.
1218 while (new_space_front < new_space_.top()) {
1219 HeapObject* object = HeapObject::FromAddress(new_space_front);
Iain Merrick75681382010-08-19 15:07:18 +01001220 new_space_front += NewSpaceScavenger::IterateBody(object->map(), object);
Steve Blocka7e24c12009-10-30 11:49:00 +00001221 }
1222
1223 // Promote and process all the to-be-promoted objects.
Steve Block44f0eee2011-05-26 01:26:41 +01001224 while (!promotion_queue_.is_empty()) {
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001225 HeapObject* target;
1226 int size;
Steve Block44f0eee2011-05-26 01:26:41 +01001227 promotion_queue_.remove(&target, &size);
Steve Blocka7e24c12009-10-30 11:49:00 +00001228
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001229 // Promoted object might be already partially visited
1230 // during dirty regions iteration. Thus we search specificly
1231 // for pointers to from semispace instead of looking for pointers
1232 // to new space.
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01001233 ASSERT(!target->IsMap());
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001234 IterateAndMarkPointersToFromSpace(target->address(),
1235 target->address() + size,
1236 &ScavengePointer);
Steve Blocka7e24c12009-10-30 11:49:00 +00001237 }
1238
1239 // Take another spin if there are now unswept objects in new space
1240 // (there are currently no more unswept promoted objects).
1241 } while (new_space_front < new_space_.top());
1242
Leon Clarkee46be812010-01-19 14:06:41 +00001243 return new_space_front;
Steve Blocka7e24c12009-10-30 11:49:00 +00001244}
1245
1246
Ben Murdoch8b112d22011-06-08 16:22:53 +01001247enum LoggingAndProfiling {
1248 LOGGING_AND_PROFILING_ENABLED,
1249 LOGGING_AND_PROFILING_DISABLED
1250};
1251
1252
1253typedef void (*ScavengingCallback)(Map* map,
1254 HeapObject** slot,
1255 HeapObject* object);
1256
1257
1258static Atomic32 scavenging_visitors_table_mode_;
1259static VisitorDispatchTable<ScavengingCallback> scavenging_visitors_table_;
1260
1261
1262INLINE(static void DoScavengeObject(Map* map,
1263 HeapObject** slot,
1264 HeapObject* obj));
1265
1266
1267void DoScavengeObject(Map* map, HeapObject** slot, HeapObject* obj) {
1268 scavenging_visitors_table_.GetVisitor(map)(map, slot, obj);
1269}
1270
1271
1272template<LoggingAndProfiling logging_and_profiling_mode>
Iain Merrick75681382010-08-19 15:07:18 +01001273class ScavengingVisitor : public StaticVisitorBase {
1274 public:
1275 static void Initialize() {
1276 table_.Register(kVisitSeqAsciiString, &EvacuateSeqAsciiString);
1277 table_.Register(kVisitSeqTwoByteString, &EvacuateSeqTwoByteString);
1278 table_.Register(kVisitShortcutCandidate, &EvacuateShortcutCandidate);
1279 table_.Register(kVisitByteArray, &EvacuateByteArray);
1280 table_.Register(kVisitFixedArray, &EvacuateFixedArray);
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00001281 table_.Register(kVisitFixedDoubleArray, &EvacuateFixedDoubleArray);
Ben Murdoch8b112d22011-06-08 16:22:53 +01001282
Ben Murdochf87a2032010-10-22 12:50:53 +01001283 table_.Register(kVisitGlobalContext,
1284 &ObjectEvacuationStrategy<POINTER_OBJECT>::
Ben Murdoch8b112d22011-06-08 16:22:53 +01001285 template VisitSpecialized<Context::kSize>);
Iain Merrick75681382010-08-19 15:07:18 +01001286
1287 table_.Register(kVisitConsString,
1288 &ObjectEvacuationStrategy<POINTER_OBJECT>::
Ben Murdoch8b112d22011-06-08 16:22:53 +01001289 template VisitSpecialized<ConsString::kSize>);
Iain Merrick75681382010-08-19 15:07:18 +01001290
1291 table_.Register(kVisitSharedFunctionInfo,
1292 &ObjectEvacuationStrategy<POINTER_OBJECT>::
Ben Murdoch8b112d22011-06-08 16:22:53 +01001293 template VisitSpecialized<SharedFunctionInfo::kSize>);
Iain Merrick75681382010-08-19 15:07:18 +01001294
Ben Murdoch257744e2011-11-30 15:57:28 +00001295 table_.Register(kVisitJSRegExp,
1296 &ObjectEvacuationStrategy<POINTER_OBJECT>::
1297 Visit);
1298
Iain Merrick75681382010-08-19 15:07:18 +01001299 table_.Register(kVisitJSFunction,
1300 &ObjectEvacuationStrategy<POINTER_OBJECT>::
Ben Murdoch8b112d22011-06-08 16:22:53 +01001301 template VisitSpecialized<JSFunction::kSize>);
Iain Merrick75681382010-08-19 15:07:18 +01001302
1303 table_.RegisterSpecializations<ObjectEvacuationStrategy<DATA_OBJECT>,
1304 kVisitDataObject,
1305 kVisitDataObjectGeneric>();
1306
1307 table_.RegisterSpecializations<ObjectEvacuationStrategy<POINTER_OBJECT>,
1308 kVisitJSObject,
1309 kVisitJSObjectGeneric>();
1310
1311 table_.RegisterSpecializations<ObjectEvacuationStrategy<POINTER_OBJECT>,
1312 kVisitStruct,
1313 kVisitStructGeneric>();
1314 }
1315
Ben Murdoch8b112d22011-06-08 16:22:53 +01001316 static VisitorDispatchTable<ScavengingCallback>* GetTable() {
1317 return &table_;
Iain Merrick75681382010-08-19 15:07:18 +01001318 }
1319
Iain Merrick75681382010-08-19 15:07:18 +01001320 private:
1321 enum ObjectContents { DATA_OBJECT, POINTER_OBJECT };
1322 enum SizeRestriction { SMALL, UNKNOWN_SIZE };
1323
Steve Block44f0eee2011-05-26 01:26:41 +01001324 static void RecordCopiedObject(Heap* heap, HeapObject* obj) {
Iain Merrick75681382010-08-19 15:07:18 +01001325 bool should_record = false;
Steve Blocka7e24c12009-10-30 11:49:00 +00001326#ifdef DEBUG
Iain Merrick75681382010-08-19 15:07:18 +01001327 should_record = FLAG_heap_stats;
Steve Blocka7e24c12009-10-30 11:49:00 +00001328#endif
Iain Merrick75681382010-08-19 15:07:18 +01001329 should_record = should_record || FLAG_log_gc;
Iain Merrick75681382010-08-19 15:07:18 +01001330 if (should_record) {
Steve Block44f0eee2011-05-26 01:26:41 +01001331 if (heap->new_space()->Contains(obj)) {
1332 heap->new_space()->RecordAllocation(obj);
Iain Merrick75681382010-08-19 15:07:18 +01001333 } else {
Steve Block44f0eee2011-05-26 01:26:41 +01001334 heap->new_space()->RecordPromotion(obj);
Iain Merrick75681382010-08-19 15:07:18 +01001335 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001336 }
1337 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001338
Iain Merrick75681382010-08-19 15:07:18 +01001339 // Helper function used by CopyObject to copy a source object to an
1340 // allocated target object and update the forwarding pointer in the source
1341 // object. Returns the target object.
Steve Block44f0eee2011-05-26 01:26:41 +01001342 INLINE(static HeapObject* MigrateObject(Heap* heap,
1343 HeapObject* source,
Iain Merrick75681382010-08-19 15:07:18 +01001344 HeapObject* target,
1345 int size)) {
1346 // Copy the content of source to target.
Steve Block44f0eee2011-05-26 01:26:41 +01001347 heap->CopyBlock(target->address(), source->address(), size);
Steve Blocka7e24c12009-10-30 11:49:00 +00001348
Iain Merrick75681382010-08-19 15:07:18 +01001349 // Set the forwarding address.
1350 source->set_map_word(MapWord::FromForwardingAddress(target));
Steve Blocka7e24c12009-10-30 11:49:00 +00001351
Ben Murdoch8b112d22011-06-08 16:22:53 +01001352 if (logging_and_profiling_mode == LOGGING_AND_PROFILING_ENABLED) {
Ben Murdoch8b112d22011-06-08 16:22:53 +01001353 // Update NewSpace stats if necessary.
1354 RecordCopiedObject(heap, target);
Ben Murdoch8b112d22011-06-08 16:22:53 +01001355 HEAP_PROFILE(heap, ObjectMoveEvent(source->address(), target->address()));
Ben Murdoch8b112d22011-06-08 16:22:53 +01001356 Isolate* isolate = heap->isolate();
1357 if (isolate->logger()->is_logging() ||
Ben Murdoch257744e2011-11-30 15:57:28 +00001358 CpuProfiler::is_profiling(isolate)) {
Ben Murdoch8b112d22011-06-08 16:22:53 +01001359 if (target->IsSharedFunctionInfo()) {
1360 PROFILE(isolate, SharedFunctionInfoMoveEvent(
1361 source->address(), target->address()));
1362 }
Kristian Monsen0d5e1162010-09-30 15:31:59 +01001363 }
Ben Murdoch8b112d22011-06-08 16:22:53 +01001364 }
1365
Iain Merrick75681382010-08-19 15:07:18 +01001366 return target;
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001367 }
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001368
1369
Iain Merrick75681382010-08-19 15:07:18 +01001370 template<ObjectContents object_contents, SizeRestriction size_restriction>
1371 static inline void EvacuateObject(Map* map,
1372 HeapObject** slot,
1373 HeapObject* object,
1374 int object_size) {
1375 ASSERT((size_restriction != SMALL) ||
1376 (object_size <= Page::kMaxHeapObjectSize));
1377 ASSERT(object->Size() == object_size);
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001378
Steve Block44f0eee2011-05-26 01:26:41 +01001379 Heap* heap = map->heap();
1380 if (heap->ShouldBePromoted(object->address(), object_size)) {
John Reck59135872010-11-02 12:39:01 -07001381 MaybeObject* maybe_result;
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001382
Iain Merrick75681382010-08-19 15:07:18 +01001383 if ((size_restriction != SMALL) &&
1384 (object_size > Page::kMaxHeapObjectSize)) {
Steve Block44f0eee2011-05-26 01:26:41 +01001385 maybe_result = heap->lo_space()->AllocateRawFixedArray(object_size);
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001386 } else {
Iain Merrick75681382010-08-19 15:07:18 +01001387 if (object_contents == DATA_OBJECT) {
Steve Block44f0eee2011-05-26 01:26:41 +01001388 maybe_result = heap->old_data_space()->AllocateRaw(object_size);
Iain Merrick75681382010-08-19 15:07:18 +01001389 } else {
Steve Block44f0eee2011-05-26 01:26:41 +01001390 maybe_result = heap->old_pointer_space()->AllocateRaw(object_size);
Iain Merrick75681382010-08-19 15:07:18 +01001391 }
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001392 }
1393
John Reck59135872010-11-02 12:39:01 -07001394 Object* result = NULL; // Initialization to please compiler.
1395 if (maybe_result->ToObject(&result)) {
Iain Merrick75681382010-08-19 15:07:18 +01001396 HeapObject* target = HeapObject::cast(result);
Steve Block44f0eee2011-05-26 01:26:41 +01001397 *slot = MigrateObject(heap, object , target, object_size);
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001398
Iain Merrick75681382010-08-19 15:07:18 +01001399 if (object_contents == POINTER_OBJECT) {
Steve Block44f0eee2011-05-26 01:26:41 +01001400 heap->promotion_queue()->insert(target, object_size);
Iain Merrick75681382010-08-19 15:07:18 +01001401 }
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001402
Steve Block44f0eee2011-05-26 01:26:41 +01001403 heap->tracer()->increment_promoted_objects_size(object_size);
Iain Merrick75681382010-08-19 15:07:18 +01001404 return;
1405 }
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001406 }
John Reck59135872010-11-02 12:39:01 -07001407 Object* result =
Steve Block44f0eee2011-05-26 01:26:41 +01001408 heap->new_space()->AllocateRaw(object_size)->ToObjectUnchecked();
1409 *slot = MigrateObject(heap, object, HeapObject::cast(result), object_size);
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001410 return;
1411 }
1412
Iain Merrick75681382010-08-19 15:07:18 +01001413
1414 static inline void EvacuateFixedArray(Map* map,
1415 HeapObject** slot,
1416 HeapObject* object) {
1417 int object_size = FixedArray::BodyDescriptor::SizeOf(map, object);
1418 EvacuateObject<POINTER_OBJECT, UNKNOWN_SIZE>(map,
1419 slot,
1420 object,
1421 object_size);
1422 }
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001423
1424
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00001425 static inline void EvacuateFixedDoubleArray(Map* map,
1426 HeapObject** slot,
1427 HeapObject* object) {
1428 int length = reinterpret_cast<FixedDoubleArray*>(object)->length();
1429 int object_size = FixedDoubleArray::SizeFor(length);
1430 EvacuateObject<DATA_OBJECT, UNKNOWN_SIZE>(map,
1431 slot,
1432 object,
1433 object_size);
1434 }
1435
1436
Iain Merrick75681382010-08-19 15:07:18 +01001437 static inline void EvacuateByteArray(Map* map,
1438 HeapObject** slot,
1439 HeapObject* object) {
1440 int object_size = reinterpret_cast<ByteArray*>(object)->ByteArraySize();
1441 EvacuateObject<DATA_OBJECT, UNKNOWN_SIZE>(map, slot, object, object_size);
1442 }
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001443
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001444
Iain Merrick75681382010-08-19 15:07:18 +01001445 static inline void EvacuateSeqAsciiString(Map* map,
1446 HeapObject** slot,
1447 HeapObject* object) {
1448 int object_size = SeqAsciiString::cast(object)->
1449 SeqAsciiStringSize(map->instance_type());
1450 EvacuateObject<DATA_OBJECT, UNKNOWN_SIZE>(map, slot, object, object_size);
1451 }
1452
1453
1454 static inline void EvacuateSeqTwoByteString(Map* map,
1455 HeapObject** slot,
1456 HeapObject* object) {
1457 int object_size = SeqTwoByteString::cast(object)->
1458 SeqTwoByteStringSize(map->instance_type());
1459 EvacuateObject<DATA_OBJECT, UNKNOWN_SIZE>(map, slot, object, object_size);
1460 }
1461
1462
1463 static inline bool IsShortcutCandidate(int type) {
1464 return ((type & kShortcutTypeMask) == kShortcutTypeTag);
1465 }
1466
1467 static inline void EvacuateShortcutCandidate(Map* map,
1468 HeapObject** slot,
1469 HeapObject* object) {
1470 ASSERT(IsShortcutCandidate(map->instance_type()));
1471
Steve Block44f0eee2011-05-26 01:26:41 +01001472 if (ConsString::cast(object)->unchecked_second() ==
1473 map->heap()->empty_string()) {
Iain Merrick75681382010-08-19 15:07:18 +01001474 HeapObject* first =
1475 HeapObject::cast(ConsString::cast(object)->unchecked_first());
1476
1477 *slot = first;
1478
Steve Block44f0eee2011-05-26 01:26:41 +01001479 if (!map->heap()->InNewSpace(first)) {
Iain Merrick75681382010-08-19 15:07:18 +01001480 object->set_map_word(MapWord::FromForwardingAddress(first));
1481 return;
1482 }
1483
1484 MapWord first_word = first->map_word();
1485 if (first_word.IsForwardingAddress()) {
1486 HeapObject* target = first_word.ToForwardingAddress();
1487
1488 *slot = target;
1489 object->set_map_word(MapWord::FromForwardingAddress(target));
1490 return;
1491 }
1492
Ben Murdoch8b112d22011-06-08 16:22:53 +01001493 DoScavengeObject(first->map(), slot, first);
Iain Merrick75681382010-08-19 15:07:18 +01001494 object->set_map_word(MapWord::FromForwardingAddress(*slot));
1495 return;
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001496 }
Iain Merrick75681382010-08-19 15:07:18 +01001497
1498 int object_size = ConsString::kSize;
1499 EvacuateObject<POINTER_OBJECT, SMALL>(map, slot, object, object_size);
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001500 }
1501
Iain Merrick75681382010-08-19 15:07:18 +01001502 template<ObjectContents object_contents>
1503 class ObjectEvacuationStrategy {
1504 public:
1505 template<int object_size>
1506 static inline void VisitSpecialized(Map* map,
1507 HeapObject** slot,
1508 HeapObject* object) {
1509 EvacuateObject<object_contents, SMALL>(map, slot, object, object_size);
1510 }
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001511
Iain Merrick75681382010-08-19 15:07:18 +01001512 static inline void Visit(Map* map,
1513 HeapObject** slot,
1514 HeapObject* object) {
1515 int object_size = map->instance_size();
1516 EvacuateObject<object_contents, SMALL>(map, slot, object, object_size);
1517 }
1518 };
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001519
Ben Murdoch8b112d22011-06-08 16:22:53 +01001520 static VisitorDispatchTable<ScavengingCallback> table_;
Iain Merrick75681382010-08-19 15:07:18 +01001521};
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001522
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001523
Ben Murdoch8b112d22011-06-08 16:22:53 +01001524template<LoggingAndProfiling logging_and_profiling_mode>
1525VisitorDispatchTable<ScavengingCallback>
1526 ScavengingVisitor<logging_and_profiling_mode>::table_;
1527
1528
1529static void InitializeScavengingVisitorsTables() {
1530 ScavengingVisitor<LOGGING_AND_PROFILING_DISABLED>::Initialize();
1531 ScavengingVisitor<LOGGING_AND_PROFILING_ENABLED>::Initialize();
1532 scavenging_visitors_table_.CopyFrom(
1533 ScavengingVisitor<LOGGING_AND_PROFILING_DISABLED>::GetTable());
1534 scavenging_visitors_table_mode_ = LOGGING_AND_PROFILING_DISABLED;
1535}
1536
1537
1538void Heap::SwitchScavengingVisitorsTableIfProfilingWasEnabled() {
1539 if (scavenging_visitors_table_mode_ == LOGGING_AND_PROFILING_ENABLED) {
1540 // Table was already updated by some isolate.
1541 return;
1542 }
1543
Ben Murdoch257744e2011-11-30 15:57:28 +00001544 if (isolate()->logger()->is_logging() |
1545 CpuProfiler::is_profiling(isolate()) ||
Ben Murdoch8b112d22011-06-08 16:22:53 +01001546 (isolate()->heap_profiler() != NULL &&
1547 isolate()->heap_profiler()->is_profiling())) {
1548 // If one of the isolates is doing scavenge at this moment of time
1549 // it might see this table in an inconsitent state when
1550 // some of the callbacks point to
1551 // ScavengingVisitor<LOGGING_AND_PROFILING_ENABLED> and others
1552 // to ScavengingVisitor<LOGGING_AND_PROFILING_DISABLED>.
1553 // However this does not lead to any bugs as such isolate does not have
1554 // profiling enabled and any isolate with enabled profiling is guaranteed
1555 // to see the table in the consistent state.
1556 scavenging_visitors_table_.CopyFrom(
1557 ScavengingVisitor<LOGGING_AND_PROFILING_ENABLED>::GetTable());
1558
1559 // We use Release_Store to prevent reordering of this write before writes
1560 // to the table.
1561 Release_Store(&scavenging_visitors_table_mode_,
1562 LOGGING_AND_PROFILING_ENABLED);
1563 }
1564}
Steve Blocka7e24c12009-10-30 11:49:00 +00001565
1566
1567void Heap::ScavengeObjectSlow(HeapObject** p, HeapObject* object) {
Steve Block44f0eee2011-05-26 01:26:41 +01001568 ASSERT(HEAP->InFromSpace(object));
Steve Blocka7e24c12009-10-30 11:49:00 +00001569 MapWord first_word = object->map_word();
1570 ASSERT(!first_word.IsForwardingAddress());
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001571 Map* map = first_word.ToMap();
Ben Murdoch8b112d22011-06-08 16:22:53 +01001572 DoScavengeObject(map, p, object);
Steve Blocka7e24c12009-10-30 11:49:00 +00001573}
1574
1575
John Reck59135872010-11-02 12:39:01 -07001576MaybeObject* Heap::AllocatePartialMap(InstanceType instance_type,
1577 int instance_size) {
1578 Object* result;
1579 { MaybeObject* maybe_result = AllocateRawMap();
1580 if (!maybe_result->ToObject(&result)) return maybe_result;
1581 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001582
1583 // Map::cast cannot be used due to uninitialized map field.
1584 reinterpret_cast<Map*>(result)->set_map(raw_unchecked_meta_map());
1585 reinterpret_cast<Map*>(result)->set_instance_type(instance_type);
1586 reinterpret_cast<Map*>(result)->set_instance_size(instance_size);
Steve Block44f0eee2011-05-26 01:26:41 +01001587 reinterpret_cast<Map*>(result)->set_visitor_id(
1588 StaticVisitorBase::GetVisitorId(instance_type, instance_size));
Steve Blocka7e24c12009-10-30 11:49:00 +00001589 reinterpret_cast<Map*>(result)->set_inobject_properties(0);
Leon Clarke4515c472010-02-03 11:58:03 +00001590 reinterpret_cast<Map*>(result)->set_pre_allocated_property_fields(0);
Steve Blocka7e24c12009-10-30 11:49:00 +00001591 reinterpret_cast<Map*>(result)->set_unused_property_fields(0);
Leon Clarke4515c472010-02-03 11:58:03 +00001592 reinterpret_cast<Map*>(result)->set_bit_field(0);
1593 reinterpret_cast<Map*>(result)->set_bit_field2(0);
Steve Blocka7e24c12009-10-30 11:49:00 +00001594 return result;
1595}
1596
1597
John Reck59135872010-11-02 12:39:01 -07001598MaybeObject* Heap::AllocateMap(InstanceType instance_type, int instance_size) {
1599 Object* result;
1600 { MaybeObject* maybe_result = AllocateRawMap();
1601 if (!maybe_result->ToObject(&result)) return maybe_result;
1602 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001603
1604 Map* map = reinterpret_cast<Map*>(result);
1605 map->set_map(meta_map());
1606 map->set_instance_type(instance_type);
Iain Merrick75681382010-08-19 15:07:18 +01001607 map->set_visitor_id(
1608 StaticVisitorBase::GetVisitorId(instance_type, instance_size));
Steve Blocka7e24c12009-10-30 11:49:00 +00001609 map->set_prototype(null_value());
1610 map->set_constructor(null_value());
1611 map->set_instance_size(instance_size);
1612 map->set_inobject_properties(0);
1613 map->set_pre_allocated_property_fields(0);
Ben Murdoch257744e2011-11-30 15:57:28 +00001614 map->init_instance_descriptors();
Steve Blocka7e24c12009-10-30 11:49:00 +00001615 map->set_code_cache(empty_fixed_array());
Steve Block053d10c2011-06-13 19:13:29 +01001616 map->set_prototype_transitions(empty_fixed_array());
Steve Blocka7e24c12009-10-30 11:49:00 +00001617 map->set_unused_property_fields(0);
1618 map->set_bit_field(0);
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00001619 map->set_bit_field2(1 << Map::kIsExtensible);
1620 map->set_elements_kind(JSObject::FAST_ELEMENTS);
Leon Clarkee46be812010-01-19 14:06:41 +00001621
1622 // If the map object is aligned fill the padding area with Smi 0 objects.
1623 if (Map::kPadStart < Map::kSize) {
1624 memset(reinterpret_cast<byte*>(map) + Map::kPadStart - kHeapObjectTag,
1625 0,
1626 Map::kSize - Map::kPadStart);
1627 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001628 return map;
1629}
1630
1631
John Reck59135872010-11-02 12:39:01 -07001632MaybeObject* Heap::AllocateCodeCache() {
1633 Object* result;
1634 { MaybeObject* maybe_result = AllocateStruct(CODE_CACHE_TYPE);
1635 if (!maybe_result->ToObject(&result)) return maybe_result;
1636 }
Steve Block6ded16b2010-05-10 14:33:55 +01001637 CodeCache* code_cache = CodeCache::cast(result);
1638 code_cache->set_default_cache(empty_fixed_array());
1639 code_cache->set_normal_type_cache(undefined_value());
1640 return code_cache;
1641}
1642
1643
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00001644MaybeObject* Heap::AllocatePolymorphicCodeCache() {
1645 return AllocateStruct(POLYMORPHIC_CODE_CACHE_TYPE);
1646}
1647
1648
Steve Blocka7e24c12009-10-30 11:49:00 +00001649const Heap::StringTypeTable Heap::string_type_table[] = {
1650#define STRING_TYPE_ELEMENT(type, size, name, camel_name) \
1651 {type, size, k##camel_name##MapRootIndex},
1652 STRING_TYPE_LIST(STRING_TYPE_ELEMENT)
1653#undef STRING_TYPE_ELEMENT
1654};
1655
1656
1657const Heap::ConstantSymbolTable Heap::constant_symbol_table[] = {
1658#define CONSTANT_SYMBOL_ELEMENT(name, contents) \
1659 {contents, k##name##RootIndex},
1660 SYMBOL_LIST(CONSTANT_SYMBOL_ELEMENT)
1661#undef CONSTANT_SYMBOL_ELEMENT
1662};
1663
1664
1665const Heap::StructTable Heap::struct_table[] = {
1666#define STRUCT_TABLE_ELEMENT(NAME, Name, name) \
1667 { NAME##_TYPE, Name::kSize, k##Name##MapRootIndex },
1668 STRUCT_LIST(STRUCT_TABLE_ELEMENT)
1669#undef STRUCT_TABLE_ELEMENT
1670};
1671
1672
1673bool Heap::CreateInitialMaps() {
John Reck59135872010-11-02 12:39:01 -07001674 Object* obj;
1675 { MaybeObject* maybe_obj = AllocatePartialMap(MAP_TYPE, Map::kSize);
1676 if (!maybe_obj->ToObject(&obj)) return false;
1677 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001678 // Map::cast cannot be used due to uninitialized map field.
1679 Map* new_meta_map = reinterpret_cast<Map*>(obj);
1680 set_meta_map(new_meta_map);
1681 new_meta_map->set_map(new_meta_map);
1682
John Reck59135872010-11-02 12:39:01 -07001683 { MaybeObject* maybe_obj =
1684 AllocatePartialMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
1685 if (!maybe_obj->ToObject(&obj)) return false;
1686 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001687 set_fixed_array_map(Map::cast(obj));
1688
John Reck59135872010-11-02 12:39:01 -07001689 { MaybeObject* maybe_obj = AllocatePartialMap(ODDBALL_TYPE, Oddball::kSize);
1690 if (!maybe_obj->ToObject(&obj)) return false;
1691 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001692 set_oddball_map(Map::cast(obj));
1693
Steve Block6ded16b2010-05-10 14:33:55 +01001694 // Allocate the empty array.
John Reck59135872010-11-02 12:39:01 -07001695 { MaybeObject* maybe_obj = AllocateEmptyFixedArray();
1696 if (!maybe_obj->ToObject(&obj)) return false;
1697 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001698 set_empty_fixed_array(FixedArray::cast(obj));
1699
John Reck59135872010-11-02 12:39:01 -07001700 { MaybeObject* maybe_obj = Allocate(oddball_map(), OLD_DATA_SPACE);
1701 if (!maybe_obj->ToObject(&obj)) return false;
1702 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001703 set_null_value(obj);
Steve Block44f0eee2011-05-26 01:26:41 +01001704 Oddball::cast(obj)->set_kind(Oddball::kNull);
Steve Blocka7e24c12009-10-30 11:49:00 +00001705
1706 // Allocate the empty descriptor array.
John Reck59135872010-11-02 12:39:01 -07001707 { MaybeObject* maybe_obj = AllocateEmptyFixedArray();
1708 if (!maybe_obj->ToObject(&obj)) return false;
1709 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001710 set_empty_descriptor_array(DescriptorArray::cast(obj));
1711
1712 // Fix the instance_descriptors for the existing maps.
Ben Murdoch257744e2011-11-30 15:57:28 +00001713 meta_map()->init_instance_descriptors();
Steve Blocka7e24c12009-10-30 11:49:00 +00001714 meta_map()->set_code_cache(empty_fixed_array());
Steve Block053d10c2011-06-13 19:13:29 +01001715 meta_map()->set_prototype_transitions(empty_fixed_array());
Steve Blocka7e24c12009-10-30 11:49:00 +00001716
Ben Murdoch257744e2011-11-30 15:57:28 +00001717 fixed_array_map()->init_instance_descriptors();
Steve Blocka7e24c12009-10-30 11:49:00 +00001718 fixed_array_map()->set_code_cache(empty_fixed_array());
Steve Block053d10c2011-06-13 19:13:29 +01001719 fixed_array_map()->set_prototype_transitions(empty_fixed_array());
Steve Blocka7e24c12009-10-30 11:49:00 +00001720
Ben Murdoch257744e2011-11-30 15:57:28 +00001721 oddball_map()->init_instance_descriptors();
Steve Blocka7e24c12009-10-30 11:49:00 +00001722 oddball_map()->set_code_cache(empty_fixed_array());
Steve Block053d10c2011-06-13 19:13:29 +01001723 oddball_map()->set_prototype_transitions(empty_fixed_array());
Steve Blocka7e24c12009-10-30 11:49:00 +00001724
1725 // Fix prototype object for existing maps.
1726 meta_map()->set_prototype(null_value());
1727 meta_map()->set_constructor(null_value());
1728
1729 fixed_array_map()->set_prototype(null_value());
1730 fixed_array_map()->set_constructor(null_value());
1731
1732 oddball_map()->set_prototype(null_value());
1733 oddball_map()->set_constructor(null_value());
1734
John Reck59135872010-11-02 12:39:01 -07001735 { MaybeObject* maybe_obj =
1736 AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
1737 if (!maybe_obj->ToObject(&obj)) return false;
1738 }
Iain Merrick75681382010-08-19 15:07:18 +01001739 set_fixed_cow_array_map(Map::cast(obj));
1740 ASSERT(fixed_array_map() != fixed_cow_array_map());
1741
John Reck59135872010-11-02 12:39:01 -07001742 { MaybeObject* maybe_obj = AllocateMap(HEAP_NUMBER_TYPE, HeapNumber::kSize);
1743 if (!maybe_obj->ToObject(&obj)) return false;
1744 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001745 set_heap_number_map(Map::cast(obj));
1746
Ben Murdoch257744e2011-11-30 15:57:28 +00001747 { MaybeObject* maybe_obj = AllocateMap(FOREIGN_TYPE, Foreign::kSize);
John Reck59135872010-11-02 12:39:01 -07001748 if (!maybe_obj->ToObject(&obj)) return false;
1749 }
Ben Murdoch257744e2011-11-30 15:57:28 +00001750 set_foreign_map(Map::cast(obj));
Steve Blocka7e24c12009-10-30 11:49:00 +00001751
1752 for (unsigned i = 0; i < ARRAY_SIZE(string_type_table); i++) {
1753 const StringTypeTable& entry = string_type_table[i];
John Reck59135872010-11-02 12:39:01 -07001754 { MaybeObject* maybe_obj = AllocateMap(entry.type, entry.size);
1755 if (!maybe_obj->ToObject(&obj)) return false;
1756 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001757 roots_[entry.index] = Map::cast(obj);
1758 }
1759
John Reck59135872010-11-02 12:39:01 -07001760 { MaybeObject* maybe_obj = AllocateMap(STRING_TYPE, kVariableSizeSentinel);
1761 if (!maybe_obj->ToObject(&obj)) return false;
1762 }
Steve Blockd0582a62009-12-15 09:54:21 +00001763 set_undetectable_string_map(Map::cast(obj));
Steve Blocka7e24c12009-10-30 11:49:00 +00001764 Map::cast(obj)->set_is_undetectable();
1765
John Reck59135872010-11-02 12:39:01 -07001766 { MaybeObject* maybe_obj =
1767 AllocateMap(ASCII_STRING_TYPE, kVariableSizeSentinel);
1768 if (!maybe_obj->ToObject(&obj)) return false;
1769 }
Steve Blockd0582a62009-12-15 09:54:21 +00001770 set_undetectable_ascii_string_map(Map::cast(obj));
Steve Blocka7e24c12009-10-30 11:49:00 +00001771 Map::cast(obj)->set_is_undetectable();
1772
John Reck59135872010-11-02 12:39:01 -07001773 { MaybeObject* maybe_obj =
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00001774 AllocateMap(FIXED_DOUBLE_ARRAY_TYPE, kVariableSizeSentinel);
1775 if (!maybe_obj->ToObject(&obj)) return false;
1776 }
1777 set_fixed_double_array_map(Map::cast(obj));
1778
1779 { MaybeObject* maybe_obj =
John Reck59135872010-11-02 12:39:01 -07001780 AllocateMap(BYTE_ARRAY_TYPE, kVariableSizeSentinel);
1781 if (!maybe_obj->ToObject(&obj)) return false;
1782 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001783 set_byte_array_map(Map::cast(obj));
1784
Ben Murdochb0fe1622011-05-05 13:52:32 +01001785 { MaybeObject* maybe_obj = AllocateByteArray(0, TENURED);
1786 if (!maybe_obj->ToObject(&obj)) return false;
1787 }
1788 set_empty_byte_array(ByteArray::cast(obj));
1789
John Reck59135872010-11-02 12:39:01 -07001790 { MaybeObject* maybe_obj =
Steve Block44f0eee2011-05-26 01:26:41 +01001791 AllocateMap(EXTERNAL_PIXEL_ARRAY_TYPE, ExternalArray::kAlignedSize);
John Reck59135872010-11-02 12:39:01 -07001792 if (!maybe_obj->ToObject(&obj)) return false;
1793 }
Steve Block44f0eee2011-05-26 01:26:41 +01001794 set_external_pixel_array_map(Map::cast(obj));
Steve Blocka7e24c12009-10-30 11:49:00 +00001795
John Reck59135872010-11-02 12:39:01 -07001796 { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_BYTE_ARRAY_TYPE,
1797 ExternalArray::kAlignedSize);
1798 if (!maybe_obj->ToObject(&obj)) return false;
1799 }
Steve Block3ce2e202009-11-05 08:53:23 +00001800 set_external_byte_array_map(Map::cast(obj));
1801
John Reck59135872010-11-02 12:39:01 -07001802 { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_UNSIGNED_BYTE_ARRAY_TYPE,
1803 ExternalArray::kAlignedSize);
1804 if (!maybe_obj->ToObject(&obj)) return false;
1805 }
Steve Block3ce2e202009-11-05 08:53:23 +00001806 set_external_unsigned_byte_array_map(Map::cast(obj));
1807
John Reck59135872010-11-02 12:39:01 -07001808 { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_SHORT_ARRAY_TYPE,
1809 ExternalArray::kAlignedSize);
1810 if (!maybe_obj->ToObject(&obj)) return false;
1811 }
Steve Block3ce2e202009-11-05 08:53:23 +00001812 set_external_short_array_map(Map::cast(obj));
1813
John Reck59135872010-11-02 12:39:01 -07001814 { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_UNSIGNED_SHORT_ARRAY_TYPE,
1815 ExternalArray::kAlignedSize);
1816 if (!maybe_obj->ToObject(&obj)) return false;
1817 }
Steve Block3ce2e202009-11-05 08:53:23 +00001818 set_external_unsigned_short_array_map(Map::cast(obj));
1819
John Reck59135872010-11-02 12:39:01 -07001820 { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_INT_ARRAY_TYPE,
1821 ExternalArray::kAlignedSize);
1822 if (!maybe_obj->ToObject(&obj)) return false;
1823 }
Steve Block3ce2e202009-11-05 08:53:23 +00001824 set_external_int_array_map(Map::cast(obj));
1825
John Reck59135872010-11-02 12:39:01 -07001826 { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_UNSIGNED_INT_ARRAY_TYPE,
1827 ExternalArray::kAlignedSize);
1828 if (!maybe_obj->ToObject(&obj)) return false;
1829 }
Steve Block3ce2e202009-11-05 08:53:23 +00001830 set_external_unsigned_int_array_map(Map::cast(obj));
1831
John Reck59135872010-11-02 12:39:01 -07001832 { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_FLOAT_ARRAY_TYPE,
1833 ExternalArray::kAlignedSize);
1834 if (!maybe_obj->ToObject(&obj)) return false;
1835 }
Steve Block3ce2e202009-11-05 08:53:23 +00001836 set_external_float_array_map(Map::cast(obj));
1837
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00001838 { MaybeObject* maybe_obj =
1839 AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
1840 if (!maybe_obj->ToObject(&obj)) return false;
1841 }
1842 set_non_strict_arguments_elements_map(Map::cast(obj));
1843
Ben Murdoch257744e2011-11-30 15:57:28 +00001844 { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_DOUBLE_ARRAY_TYPE,
1845 ExternalArray::kAlignedSize);
1846 if (!maybe_obj->ToObject(&obj)) return false;
1847 }
1848 set_external_double_array_map(Map::cast(obj));
1849
John Reck59135872010-11-02 12:39:01 -07001850 { MaybeObject* maybe_obj = AllocateMap(CODE_TYPE, kVariableSizeSentinel);
1851 if (!maybe_obj->ToObject(&obj)) return false;
1852 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001853 set_code_map(Map::cast(obj));
1854
John Reck59135872010-11-02 12:39:01 -07001855 { MaybeObject* maybe_obj = AllocateMap(JS_GLOBAL_PROPERTY_CELL_TYPE,
1856 JSGlobalPropertyCell::kSize);
1857 if (!maybe_obj->ToObject(&obj)) return false;
1858 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001859 set_global_property_cell_map(Map::cast(obj));
1860
John Reck59135872010-11-02 12:39:01 -07001861 { MaybeObject* maybe_obj = AllocateMap(FILLER_TYPE, kPointerSize);
1862 if (!maybe_obj->ToObject(&obj)) return false;
1863 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001864 set_one_pointer_filler_map(Map::cast(obj));
1865
John Reck59135872010-11-02 12:39:01 -07001866 { MaybeObject* maybe_obj = AllocateMap(FILLER_TYPE, 2 * kPointerSize);
1867 if (!maybe_obj->ToObject(&obj)) return false;
1868 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001869 set_two_pointer_filler_map(Map::cast(obj));
1870
1871 for (unsigned i = 0; i < ARRAY_SIZE(struct_table); i++) {
1872 const StructTable& entry = struct_table[i];
John Reck59135872010-11-02 12:39:01 -07001873 { MaybeObject* maybe_obj = AllocateMap(entry.type, entry.size);
1874 if (!maybe_obj->ToObject(&obj)) return false;
1875 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001876 roots_[entry.index] = Map::cast(obj);
1877 }
1878
John Reck59135872010-11-02 12:39:01 -07001879 { MaybeObject* maybe_obj =
1880 AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
1881 if (!maybe_obj->ToObject(&obj)) return false;
1882 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001883 set_hash_table_map(Map::cast(obj));
1884
John Reck59135872010-11-02 12:39:01 -07001885 { MaybeObject* maybe_obj =
1886 AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
1887 if (!maybe_obj->ToObject(&obj)) return false;
1888 }
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00001889 set_function_context_map(Map::cast(obj));
Steve Blocka7e24c12009-10-30 11:49:00 +00001890
John Reck59135872010-11-02 12:39:01 -07001891 { MaybeObject* maybe_obj =
1892 AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
1893 if (!maybe_obj->ToObject(&obj)) return false;
1894 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001895 set_catch_context_map(Map::cast(obj));
1896
John Reck59135872010-11-02 12:39:01 -07001897 { MaybeObject* maybe_obj =
1898 AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
1899 if (!maybe_obj->ToObject(&obj)) return false;
1900 }
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00001901 set_with_context_map(Map::cast(obj));
1902
1903 { MaybeObject* maybe_obj =
1904 AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
1905 if (!maybe_obj->ToObject(&obj)) return false;
1906 }
Ben Murdochf87a2032010-10-22 12:50:53 +01001907 Map* global_context_map = Map::cast(obj);
1908 global_context_map->set_visitor_id(StaticVisitorBase::kVisitGlobalContext);
1909 set_global_context_map(global_context_map);
Steve Blocka7e24c12009-10-30 11:49:00 +00001910
John Reck59135872010-11-02 12:39:01 -07001911 { MaybeObject* maybe_obj = AllocateMap(SHARED_FUNCTION_INFO_TYPE,
1912 SharedFunctionInfo::kAlignedSize);
1913 if (!maybe_obj->ToObject(&obj)) return false;
1914 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001915 set_shared_function_info_map(Map::cast(obj));
1916
Steve Block1e0659c2011-05-24 12:43:12 +01001917 { MaybeObject* maybe_obj = AllocateMap(JS_MESSAGE_OBJECT_TYPE,
1918 JSMessageObject::kSize);
1919 if (!maybe_obj->ToObject(&obj)) return false;
1920 }
1921 set_message_object_map(Map::cast(obj));
1922
Steve Block44f0eee2011-05-26 01:26:41 +01001923 ASSERT(!InNewSpace(empty_fixed_array()));
Steve Blocka7e24c12009-10-30 11:49:00 +00001924 return true;
1925}
1926
1927
John Reck59135872010-11-02 12:39:01 -07001928MaybeObject* Heap::AllocateHeapNumber(double value, PretenureFlag pretenure) {
Steve Blocka7e24c12009-10-30 11:49:00 +00001929 // Statically ensure that it is safe to allocate heap numbers in paged
1930 // spaces.
1931 STATIC_ASSERT(HeapNumber::kSize <= Page::kMaxHeapObjectSize);
1932 AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
1933
John Reck59135872010-11-02 12:39:01 -07001934 Object* result;
1935 { MaybeObject* maybe_result =
1936 AllocateRaw(HeapNumber::kSize, space, OLD_DATA_SPACE);
1937 if (!maybe_result->ToObject(&result)) return maybe_result;
1938 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001939
1940 HeapObject::cast(result)->set_map(heap_number_map());
1941 HeapNumber::cast(result)->set_value(value);
1942 return result;
1943}
1944
1945
John Reck59135872010-11-02 12:39:01 -07001946MaybeObject* Heap::AllocateHeapNumber(double value) {
Steve Blocka7e24c12009-10-30 11:49:00 +00001947 // Use general version, if we're forced to always allocate.
1948 if (always_allocate()) return AllocateHeapNumber(value, TENURED);
1949
1950 // This version of AllocateHeapNumber is optimized for
1951 // allocation in new space.
1952 STATIC_ASSERT(HeapNumber::kSize <= Page::kMaxHeapObjectSize);
1953 ASSERT(allocation_allowed_ && gc_state_ == NOT_IN_GC);
John Reck59135872010-11-02 12:39:01 -07001954 Object* result;
1955 { MaybeObject* maybe_result = new_space_.AllocateRaw(HeapNumber::kSize);
1956 if (!maybe_result->ToObject(&result)) return maybe_result;
1957 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001958 HeapObject::cast(result)->set_map(heap_number_map());
1959 HeapNumber::cast(result)->set_value(value);
1960 return result;
1961}
1962
1963
John Reck59135872010-11-02 12:39:01 -07001964MaybeObject* Heap::AllocateJSGlobalPropertyCell(Object* value) {
1965 Object* result;
1966 { MaybeObject* maybe_result = AllocateRawCell();
1967 if (!maybe_result->ToObject(&result)) return maybe_result;
1968 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001969 HeapObject::cast(result)->set_map(global_property_cell_map());
1970 JSGlobalPropertyCell::cast(result)->set_value(value);
1971 return result;
1972}
1973
1974
John Reck59135872010-11-02 12:39:01 -07001975MaybeObject* Heap::CreateOddball(const char* to_string,
Steve Block44f0eee2011-05-26 01:26:41 +01001976 Object* to_number,
1977 byte kind) {
John Reck59135872010-11-02 12:39:01 -07001978 Object* result;
1979 { MaybeObject* maybe_result = Allocate(oddball_map(), OLD_DATA_SPACE);
1980 if (!maybe_result->ToObject(&result)) return maybe_result;
1981 }
Steve Block44f0eee2011-05-26 01:26:41 +01001982 return Oddball::cast(result)->Initialize(to_string, to_number, kind);
Steve Blocka7e24c12009-10-30 11:49:00 +00001983}
1984
1985
1986bool Heap::CreateApiObjects() {
1987 Object* obj;
1988
John Reck59135872010-11-02 12:39:01 -07001989 { MaybeObject* maybe_obj = AllocateMap(JS_OBJECT_TYPE, JSObject::kHeaderSize);
1990 if (!maybe_obj->ToObject(&obj)) return false;
1991 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001992 set_neander_map(Map::cast(obj));
1993
Steve Block44f0eee2011-05-26 01:26:41 +01001994 { MaybeObject* maybe_obj = AllocateJSObjectFromMap(neander_map());
John Reck59135872010-11-02 12:39:01 -07001995 if (!maybe_obj->ToObject(&obj)) return false;
1996 }
1997 Object* elements;
1998 { MaybeObject* maybe_elements = AllocateFixedArray(2);
1999 if (!maybe_elements->ToObject(&elements)) return false;
2000 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002001 FixedArray::cast(elements)->set(0, Smi::FromInt(0));
2002 JSObject::cast(obj)->set_elements(FixedArray::cast(elements));
2003 set_message_listeners(JSObject::cast(obj));
2004
2005 return true;
2006}
2007
2008
Steve Blocka7e24c12009-10-30 11:49:00 +00002009void Heap::CreateJSEntryStub() {
2010 JSEntryStub stub;
2011 set_js_entry_code(*stub.GetCode());
2012}
2013
2014
2015void Heap::CreateJSConstructEntryStub() {
2016 JSConstructEntryStub stub;
2017 set_js_construct_entry_code(*stub.GetCode());
2018}
2019
2020
2021void Heap::CreateFixedStubs() {
2022 // Here we create roots for fixed stubs. They are needed at GC
2023 // for cooking and uncooking (check out frames.cc).
2024 // The eliminates the need for doing dictionary lookup in the
2025 // stub cache for these stubs.
2026 HandleScope scope;
2027 // gcc-4.4 has problem generating correct code of following snippet:
Steve Block44f0eee2011-05-26 01:26:41 +01002028 // { JSEntryStub stub;
2029 // js_entry_code_ = *stub.GetCode();
Steve Blocka7e24c12009-10-30 11:49:00 +00002030 // }
Steve Block44f0eee2011-05-26 01:26:41 +01002031 // { JSConstructEntryStub stub;
2032 // js_construct_entry_code_ = *stub.GetCode();
Steve Blocka7e24c12009-10-30 11:49:00 +00002033 // }
2034 // To workaround the problem, make separate functions without inlining.
Steve Blocka7e24c12009-10-30 11:49:00 +00002035 Heap::CreateJSEntryStub();
2036 Heap::CreateJSConstructEntryStub();
Steve Blocka7e24c12009-10-30 11:49:00 +00002037}
2038
2039
2040bool Heap::CreateInitialObjects() {
2041 Object* obj;
2042
2043 // The -0 value must be set before NumberFromDouble works.
John Reck59135872010-11-02 12:39:01 -07002044 { MaybeObject* maybe_obj = AllocateHeapNumber(-0.0, TENURED);
2045 if (!maybe_obj->ToObject(&obj)) return false;
2046 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002047 set_minus_zero_value(obj);
2048 ASSERT(signbit(minus_zero_value()->Number()) != 0);
2049
John Reck59135872010-11-02 12:39:01 -07002050 { MaybeObject* maybe_obj = AllocateHeapNumber(OS::nan_value(), TENURED);
2051 if (!maybe_obj->ToObject(&obj)) return false;
2052 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002053 set_nan_value(obj);
2054
John Reck59135872010-11-02 12:39:01 -07002055 { MaybeObject* maybe_obj = Allocate(oddball_map(), OLD_DATA_SPACE);
2056 if (!maybe_obj->ToObject(&obj)) return false;
2057 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002058 set_undefined_value(obj);
Steve Block44f0eee2011-05-26 01:26:41 +01002059 Oddball::cast(obj)->set_kind(Oddball::kUndefined);
Steve Blocka7e24c12009-10-30 11:49:00 +00002060 ASSERT(!InNewSpace(undefined_value()));
2061
2062 // Allocate initial symbol table.
John Reck59135872010-11-02 12:39:01 -07002063 { MaybeObject* maybe_obj = SymbolTable::Allocate(kInitialSymbolTableSize);
2064 if (!maybe_obj->ToObject(&obj)) return false;
2065 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002066 // Don't use set_symbol_table() due to asserts.
2067 roots_[kSymbolTableRootIndex] = obj;
2068
2069 // Assign the print strings for oddballs after creating symboltable.
John Reck59135872010-11-02 12:39:01 -07002070 Object* symbol;
2071 { MaybeObject* maybe_symbol = LookupAsciiSymbol("undefined");
2072 if (!maybe_symbol->ToObject(&symbol)) return false;
2073 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002074 Oddball::cast(undefined_value())->set_to_string(String::cast(symbol));
2075 Oddball::cast(undefined_value())->set_to_number(nan_value());
2076
Steve Blocka7e24c12009-10-30 11:49:00 +00002077 // Allocate the null_value
John Reck59135872010-11-02 12:39:01 -07002078 { MaybeObject* maybe_obj =
Steve Block44f0eee2011-05-26 01:26:41 +01002079 Oddball::cast(null_value())->Initialize("null",
2080 Smi::FromInt(0),
2081 Oddball::kNull);
John Reck59135872010-11-02 12:39:01 -07002082 if (!maybe_obj->ToObject(&obj)) return false;
2083 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002084
Steve Block44f0eee2011-05-26 01:26:41 +01002085 { MaybeObject* maybe_obj = CreateOddball("true",
2086 Smi::FromInt(1),
2087 Oddball::kTrue);
John Reck59135872010-11-02 12:39:01 -07002088 if (!maybe_obj->ToObject(&obj)) return false;
2089 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002090 set_true_value(obj);
2091
Steve Block44f0eee2011-05-26 01:26:41 +01002092 { MaybeObject* maybe_obj = CreateOddball("false",
2093 Smi::FromInt(0),
2094 Oddball::kFalse);
John Reck59135872010-11-02 12:39:01 -07002095 if (!maybe_obj->ToObject(&obj)) return false;
2096 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002097 set_false_value(obj);
2098
Steve Block44f0eee2011-05-26 01:26:41 +01002099 { MaybeObject* maybe_obj = CreateOddball("hole",
2100 Smi::FromInt(-1),
2101 Oddball::kTheHole);
John Reck59135872010-11-02 12:39:01 -07002102 if (!maybe_obj->ToObject(&obj)) return false;
2103 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002104 set_the_hole_value(obj);
2105
Ben Murdoch086aeea2011-05-13 15:57:08 +01002106 { MaybeObject* maybe_obj = CreateOddball("arguments_marker",
Steve Block44f0eee2011-05-26 01:26:41 +01002107 Smi::FromInt(-4),
2108 Oddball::kArgumentMarker);
Ben Murdoch086aeea2011-05-13 15:57:08 +01002109 if (!maybe_obj->ToObject(&obj)) return false;
2110 }
2111 set_arguments_marker(obj);
2112
Steve Block44f0eee2011-05-26 01:26:41 +01002113 { MaybeObject* maybe_obj = CreateOddball("no_interceptor_result_sentinel",
2114 Smi::FromInt(-2),
2115 Oddball::kOther);
John Reck59135872010-11-02 12:39:01 -07002116 if (!maybe_obj->ToObject(&obj)) return false;
2117 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002118 set_no_interceptor_result_sentinel(obj);
2119
Steve Block44f0eee2011-05-26 01:26:41 +01002120 { MaybeObject* maybe_obj = CreateOddball("termination_exception",
2121 Smi::FromInt(-3),
2122 Oddball::kOther);
John Reck59135872010-11-02 12:39:01 -07002123 if (!maybe_obj->ToObject(&obj)) return false;
2124 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002125 set_termination_exception(obj);
2126
2127 // Allocate the empty string.
John Reck59135872010-11-02 12:39:01 -07002128 { MaybeObject* maybe_obj = AllocateRawAsciiString(0, TENURED);
2129 if (!maybe_obj->ToObject(&obj)) return false;
2130 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002131 set_empty_string(String::cast(obj));
2132
2133 for (unsigned i = 0; i < ARRAY_SIZE(constant_symbol_table); i++) {
John Reck59135872010-11-02 12:39:01 -07002134 { MaybeObject* maybe_obj =
2135 LookupAsciiSymbol(constant_symbol_table[i].contents);
2136 if (!maybe_obj->ToObject(&obj)) return false;
2137 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002138 roots_[constant_symbol_table[i].index] = String::cast(obj);
2139 }
2140
2141 // Allocate the hidden symbol which is used to identify the hidden properties
2142 // in JSObjects. The hash code has a special value so that it will not match
2143 // the empty string when searching for the property. It cannot be part of the
2144 // loop above because it needs to be allocated manually with the special
2145 // hash code in place. The hash code for the hidden_symbol is zero to ensure
2146 // that it will always be at the first entry in property descriptors.
John Reck59135872010-11-02 12:39:01 -07002147 { MaybeObject* maybe_obj =
2148 AllocateSymbol(CStrVector(""), 0, String::kZeroHash);
2149 if (!maybe_obj->ToObject(&obj)) return false;
2150 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002151 hidden_symbol_ = String::cast(obj);
2152
Ben Murdoch257744e2011-11-30 15:57:28 +00002153 // Allocate the foreign for __proto__.
John Reck59135872010-11-02 12:39:01 -07002154 { MaybeObject* maybe_obj =
Ben Murdoch257744e2011-11-30 15:57:28 +00002155 AllocateForeign((Address) &Accessors::ObjectPrototype);
John Reck59135872010-11-02 12:39:01 -07002156 if (!maybe_obj->ToObject(&obj)) return false;
2157 }
Ben Murdoch257744e2011-11-30 15:57:28 +00002158 set_prototype_accessors(Foreign::cast(obj));
Steve Blocka7e24c12009-10-30 11:49:00 +00002159
2160 // Allocate the code_stubs dictionary. The initial size is set to avoid
2161 // expanding the dictionary during bootstrapping.
John Reck59135872010-11-02 12:39:01 -07002162 { MaybeObject* maybe_obj = NumberDictionary::Allocate(128);
2163 if (!maybe_obj->ToObject(&obj)) return false;
2164 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002165 set_code_stubs(NumberDictionary::cast(obj));
2166
2167 // Allocate the non_monomorphic_cache used in stub-cache.cc. The initial size
2168 // is set to avoid expanding the dictionary during bootstrapping.
John Reck59135872010-11-02 12:39:01 -07002169 { MaybeObject* maybe_obj = NumberDictionary::Allocate(64);
2170 if (!maybe_obj->ToObject(&obj)) return false;
2171 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002172 set_non_monomorphic_cache(NumberDictionary::cast(obj));
2173
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00002174 { MaybeObject* maybe_obj = AllocatePolymorphicCodeCache();
2175 if (!maybe_obj->ToObject(&obj)) return false;
2176 }
2177 set_polymorphic_code_cache(PolymorphicCodeCache::cast(obj));
2178
Kristian Monsen25f61362010-05-21 11:50:48 +01002179 set_instanceof_cache_function(Smi::FromInt(0));
2180 set_instanceof_cache_map(Smi::FromInt(0));
2181 set_instanceof_cache_answer(Smi::FromInt(0));
2182
Steve Blocka7e24c12009-10-30 11:49:00 +00002183 CreateFixedStubs();
2184
Kristian Monsen0d5e1162010-09-30 15:31:59 +01002185 // Allocate the dictionary of intrinsic function names.
John Reck59135872010-11-02 12:39:01 -07002186 { MaybeObject* maybe_obj = StringDictionary::Allocate(Runtime::kNumFunctions);
2187 if (!maybe_obj->ToObject(&obj)) return false;
2188 }
Steve Block44f0eee2011-05-26 01:26:41 +01002189 { MaybeObject* maybe_obj = Runtime::InitializeIntrinsicFunctionNames(this,
2190 obj);
John Reck59135872010-11-02 12:39:01 -07002191 if (!maybe_obj->ToObject(&obj)) return false;
2192 }
Kristian Monsen0d5e1162010-09-30 15:31:59 +01002193 set_intrinsic_function_names(StringDictionary::cast(obj));
2194
Leon Clarkee46be812010-01-19 14:06:41 +00002195 if (InitializeNumberStringCache()->IsFailure()) return false;
Steve Blocka7e24c12009-10-30 11:49:00 +00002196
Steve Block6ded16b2010-05-10 14:33:55 +01002197 // Allocate cache for single character ASCII strings.
John Reck59135872010-11-02 12:39:01 -07002198 { MaybeObject* maybe_obj =
2199 AllocateFixedArray(String::kMaxAsciiCharCode + 1, TENURED);
2200 if (!maybe_obj->ToObject(&obj)) return false;
2201 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002202 set_single_character_string_cache(FixedArray::cast(obj));
2203
2204 // Allocate cache for external strings pointing to native source code.
John Reck59135872010-11-02 12:39:01 -07002205 { MaybeObject* maybe_obj = AllocateFixedArray(Natives::GetBuiltinsCount());
2206 if (!maybe_obj->ToObject(&obj)) return false;
2207 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002208 set_natives_source_cache(FixedArray::cast(obj));
2209
Steve Block44f0eee2011-05-26 01:26:41 +01002210 // Handling of script id generation is in FACTORY->NewScript.
Steve Blocka7e24c12009-10-30 11:49:00 +00002211 set_last_script_id(undefined_value());
2212
2213 // Initialize keyed lookup cache.
Steve Block44f0eee2011-05-26 01:26:41 +01002214 isolate_->keyed_lookup_cache()->Clear();
Steve Blocka7e24c12009-10-30 11:49:00 +00002215
2216 // Initialize context slot cache.
Steve Block44f0eee2011-05-26 01:26:41 +01002217 isolate_->context_slot_cache()->Clear();
Steve Blocka7e24c12009-10-30 11:49:00 +00002218
2219 // Initialize descriptor cache.
Steve Block44f0eee2011-05-26 01:26:41 +01002220 isolate_->descriptor_lookup_cache()->Clear();
Steve Blocka7e24c12009-10-30 11:49:00 +00002221
2222 // Initialize compilation cache.
Steve Block44f0eee2011-05-26 01:26:41 +01002223 isolate_->compilation_cache()->Clear();
Steve Blocka7e24c12009-10-30 11:49:00 +00002224
2225 return true;
2226}
2227
2228
John Reck59135872010-11-02 12:39:01 -07002229MaybeObject* Heap::InitializeNumberStringCache() {
Leon Clarkee46be812010-01-19 14:06:41 +00002230 // Compute the size of the number string cache based on the max heap size.
2231 // max_semispace_size_ == 512 KB => number_string_cache_size = 32.
2232 // max_semispace_size_ == 8 MB => number_string_cache_size = 16KB.
2233 int number_string_cache_size = max_semispace_size_ / 512;
2234 number_string_cache_size = Max(32, Min(16*KB, number_string_cache_size));
John Reck59135872010-11-02 12:39:01 -07002235 Object* obj;
2236 MaybeObject* maybe_obj =
2237 AllocateFixedArray(number_string_cache_size * 2, TENURED);
2238 if (maybe_obj->ToObject(&obj)) set_number_string_cache(FixedArray::cast(obj));
2239 return maybe_obj;
Leon Clarkee46be812010-01-19 14:06:41 +00002240}
2241
2242
2243void Heap::FlushNumberStringCache() {
2244 // Flush the number to string cache.
2245 int len = number_string_cache()->length();
2246 for (int i = 0; i < len; i++) {
Steve Block44f0eee2011-05-26 01:26:41 +01002247 number_string_cache()->set_undefined(this, i);
Leon Clarkee46be812010-01-19 14:06:41 +00002248 }
2249}
2250
2251
Steve Blocka7e24c12009-10-30 11:49:00 +00002252static inline int double_get_hash(double d) {
2253 DoubleRepresentation rep(d);
Leon Clarkee46be812010-01-19 14:06:41 +00002254 return static_cast<int>(rep.bits) ^ static_cast<int>(rep.bits >> 32);
Steve Blocka7e24c12009-10-30 11:49:00 +00002255}
2256
2257
2258static inline int smi_get_hash(Smi* smi) {
Leon Clarkee46be812010-01-19 14:06:41 +00002259 return smi->value();
Steve Blocka7e24c12009-10-30 11:49:00 +00002260}
2261
2262
Steve Blocka7e24c12009-10-30 11:49:00 +00002263Object* Heap::GetNumberStringCache(Object* number) {
2264 int hash;
Leon Clarkee46be812010-01-19 14:06:41 +00002265 int mask = (number_string_cache()->length() >> 1) - 1;
Steve Blocka7e24c12009-10-30 11:49:00 +00002266 if (number->IsSmi()) {
Leon Clarkee46be812010-01-19 14:06:41 +00002267 hash = smi_get_hash(Smi::cast(number)) & mask;
Steve Blocka7e24c12009-10-30 11:49:00 +00002268 } else {
Leon Clarkee46be812010-01-19 14:06:41 +00002269 hash = double_get_hash(number->Number()) & mask;
Steve Blocka7e24c12009-10-30 11:49:00 +00002270 }
2271 Object* key = number_string_cache()->get(hash * 2);
2272 if (key == number) {
2273 return String::cast(number_string_cache()->get(hash * 2 + 1));
2274 } else if (key->IsHeapNumber() &&
2275 number->IsHeapNumber() &&
2276 key->Number() == number->Number()) {
2277 return String::cast(number_string_cache()->get(hash * 2 + 1));
2278 }
2279 return undefined_value();
2280}
2281
2282
2283void Heap::SetNumberStringCache(Object* number, String* string) {
2284 int hash;
Leon Clarkee46be812010-01-19 14:06:41 +00002285 int mask = (number_string_cache()->length() >> 1) - 1;
Steve Blocka7e24c12009-10-30 11:49:00 +00002286 if (number->IsSmi()) {
Leon Clarkee46be812010-01-19 14:06:41 +00002287 hash = smi_get_hash(Smi::cast(number)) & mask;
Leon Clarke4515c472010-02-03 11:58:03 +00002288 number_string_cache()->set(hash * 2, Smi::cast(number));
Steve Blocka7e24c12009-10-30 11:49:00 +00002289 } else {
Leon Clarkee46be812010-01-19 14:06:41 +00002290 hash = double_get_hash(number->Number()) & mask;
Steve Blocka7e24c12009-10-30 11:49:00 +00002291 number_string_cache()->set(hash * 2, number);
2292 }
2293 number_string_cache()->set(hash * 2 + 1, string);
2294}
2295
2296
John Reck59135872010-11-02 12:39:01 -07002297MaybeObject* Heap::NumberToString(Object* number,
2298 bool check_number_string_cache) {
Steve Block44f0eee2011-05-26 01:26:41 +01002299 isolate_->counters()->number_to_string_runtime()->Increment();
Steve Block6ded16b2010-05-10 14:33:55 +01002300 if (check_number_string_cache) {
2301 Object* cached = GetNumberStringCache(number);
2302 if (cached != undefined_value()) {
2303 return cached;
2304 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002305 }
2306
2307 char arr[100];
2308 Vector<char> buffer(arr, ARRAY_SIZE(arr));
2309 const char* str;
2310 if (number->IsSmi()) {
2311 int num = Smi::cast(number)->value();
2312 str = IntToCString(num, buffer);
2313 } else {
2314 double num = HeapNumber::cast(number)->value();
2315 str = DoubleToCString(num, buffer);
2316 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002317
John Reck59135872010-11-02 12:39:01 -07002318 Object* js_string;
2319 MaybeObject* maybe_js_string = AllocateStringFromAscii(CStrVector(str));
2320 if (maybe_js_string->ToObject(&js_string)) {
2321 SetNumberStringCache(number, String::cast(js_string));
Steve Blocka7e24c12009-10-30 11:49:00 +00002322 }
John Reck59135872010-11-02 12:39:01 -07002323 return maybe_js_string;
Steve Blocka7e24c12009-10-30 11:49:00 +00002324}
2325
2326
Steve Block3ce2e202009-11-05 08:53:23 +00002327Map* Heap::MapForExternalArrayType(ExternalArrayType array_type) {
2328 return Map::cast(roots_[RootIndexForExternalArrayType(array_type)]);
2329}
2330
2331
2332Heap::RootListIndex Heap::RootIndexForExternalArrayType(
2333 ExternalArrayType array_type) {
2334 switch (array_type) {
2335 case kExternalByteArray:
2336 return kExternalByteArrayMapRootIndex;
2337 case kExternalUnsignedByteArray:
2338 return kExternalUnsignedByteArrayMapRootIndex;
2339 case kExternalShortArray:
2340 return kExternalShortArrayMapRootIndex;
2341 case kExternalUnsignedShortArray:
2342 return kExternalUnsignedShortArrayMapRootIndex;
2343 case kExternalIntArray:
2344 return kExternalIntArrayMapRootIndex;
2345 case kExternalUnsignedIntArray:
2346 return kExternalUnsignedIntArrayMapRootIndex;
2347 case kExternalFloatArray:
2348 return kExternalFloatArrayMapRootIndex;
Ben Murdoch257744e2011-11-30 15:57:28 +00002349 case kExternalDoubleArray:
2350 return kExternalDoubleArrayMapRootIndex;
Steve Block44f0eee2011-05-26 01:26:41 +01002351 case kExternalPixelArray:
2352 return kExternalPixelArrayMapRootIndex;
Steve Block3ce2e202009-11-05 08:53:23 +00002353 default:
2354 UNREACHABLE();
2355 return kUndefinedValueRootIndex;
2356 }
2357}
2358
2359
John Reck59135872010-11-02 12:39:01 -07002360MaybeObject* Heap::NumberFromDouble(double value, PretenureFlag pretenure) {
Steve Block6ded16b2010-05-10 14:33:55 +01002361 // We need to distinguish the minus zero value and this cannot be
2362 // done after conversion to int. Doing this by comparing bit
2363 // patterns is faster than using fpclassify() et al.
2364 static const DoubleRepresentation minus_zero(-0.0);
2365
2366 DoubleRepresentation rep(value);
2367 if (rep.bits == minus_zero.bits) {
2368 return AllocateHeapNumber(-0.0, pretenure);
2369 }
2370
2371 int int_value = FastD2I(value);
2372 if (value == int_value && Smi::IsValid(int_value)) {
2373 return Smi::FromInt(int_value);
2374 }
2375
2376 // Materialize the value in the heap.
2377 return AllocateHeapNumber(value, pretenure);
Steve Blocka7e24c12009-10-30 11:49:00 +00002378}
2379
2380
Ben Murdoch257744e2011-11-30 15:57:28 +00002381MaybeObject* Heap::AllocateForeign(Address address, PretenureFlag pretenure) {
2382 // Statically ensure that it is safe to allocate foreigns in paged spaces.
2383 STATIC_ASSERT(Foreign::kSize <= Page::kMaxHeapObjectSize);
Steve Blocka7e24c12009-10-30 11:49:00 +00002384 AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
John Reck59135872010-11-02 12:39:01 -07002385 Object* result;
Ben Murdoch257744e2011-11-30 15:57:28 +00002386 { MaybeObject* maybe_result = Allocate(foreign_map(), space);
John Reck59135872010-11-02 12:39:01 -07002387 if (!maybe_result->ToObject(&result)) return maybe_result;
2388 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002389
Ben Murdoch257744e2011-11-30 15:57:28 +00002390 Foreign::cast(result)->set_address(address);
Steve Blocka7e24c12009-10-30 11:49:00 +00002391 return result;
2392}
2393
2394
John Reck59135872010-11-02 12:39:01 -07002395MaybeObject* Heap::AllocateSharedFunctionInfo(Object* name) {
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00002396 SharedFunctionInfo* share;
2397 MaybeObject* maybe = Allocate(shared_function_info_map(), OLD_POINTER_SPACE);
2398 if (!maybe->To<SharedFunctionInfo>(&share)) return maybe;
Steve Blocka7e24c12009-10-30 11:49:00 +00002399
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00002400 // Set pointer fields.
Steve Blocka7e24c12009-10-30 11:49:00 +00002401 share->set_name(name);
Steve Block44f0eee2011-05-26 01:26:41 +01002402 Code* illegal = isolate_->builtins()->builtin(Builtins::kIllegal);
Steve Blocka7e24c12009-10-30 11:49:00 +00002403 share->set_code(illegal);
Ben Murdoch3bec4d22010-07-22 14:51:16 +01002404 share->set_scope_info(SerializedScopeInfo::Empty());
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00002405 Code* construct_stub =
2406 isolate_->builtins()->builtin(Builtins::kJSConstructStubGeneric);
Steve Blocka7e24c12009-10-30 11:49:00 +00002407 share->set_construct_stub(construct_stub);
Steve Blocka7e24c12009-10-30 11:49:00 +00002408 share->set_instance_class_name(Object_symbol());
2409 share->set_function_data(undefined_value());
2410 share->set_script(undefined_value());
Steve Blocka7e24c12009-10-30 11:49:00 +00002411 share->set_debug_info(undefined_value());
2412 share->set_inferred_name(empty_string());
Kristian Monsen0d5e1162010-09-30 15:31:59 +01002413 share->set_initial_map(undefined_value());
Steve Blocka7e24c12009-10-30 11:49:00 +00002414 share->set_this_property_assignments(undefined_value());
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00002415 share->set_deopt_counter(Smi::FromInt(FLAG_deopt_every_n_times));
2416
2417 // Set integer fields (smi or int, depending on the architecture).
2418 share->set_length(0);
2419 share->set_formal_parameter_count(0);
2420 share->set_expected_nof_properties(0);
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002421 share->set_num_literals(0);
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00002422 share->set_start_position_and_type(0);
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002423 share->set_end_position(0);
2424 share->set_function_token_position(0);
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00002425 // All compiler hints default to false or 0.
2426 share->set_compiler_hints(0);
2427 share->set_this_property_assignments_count(0);
2428 share->set_opt_count(0);
2429
2430 return share;
Steve Blocka7e24c12009-10-30 11:49:00 +00002431}
2432
2433
Steve Block1e0659c2011-05-24 12:43:12 +01002434MaybeObject* Heap::AllocateJSMessageObject(String* type,
2435 JSArray* arguments,
2436 int start_position,
2437 int end_position,
2438 Object* script,
2439 Object* stack_trace,
2440 Object* stack_frames) {
2441 Object* result;
2442 { MaybeObject* maybe_result = Allocate(message_object_map(), NEW_SPACE);
2443 if (!maybe_result->ToObject(&result)) return maybe_result;
2444 }
2445 JSMessageObject* message = JSMessageObject::cast(result);
2446 message->set_properties(Heap::empty_fixed_array());
2447 message->set_elements(Heap::empty_fixed_array());
2448 message->set_type(type);
2449 message->set_arguments(arguments);
2450 message->set_start_position(start_position);
2451 message->set_end_position(end_position);
2452 message->set_script(script);
2453 message->set_stack_trace(stack_trace);
2454 message->set_stack_frames(stack_frames);
2455 return result;
2456}
2457
2458
2459
Steve Blockd0582a62009-12-15 09:54:21 +00002460// Returns true for a character in a range. Both limits are inclusive.
2461static inline bool Between(uint32_t character, uint32_t from, uint32_t to) {
2462 // This makes uses of the the unsigned wraparound.
2463 return character - from <= to - from;
2464}
2465
2466
John Reck59135872010-11-02 12:39:01 -07002467MUST_USE_RESULT static inline MaybeObject* MakeOrFindTwoCharacterString(
Steve Block44f0eee2011-05-26 01:26:41 +01002468 Heap* heap,
John Reck59135872010-11-02 12:39:01 -07002469 uint32_t c1,
2470 uint32_t c2) {
Steve Blockd0582a62009-12-15 09:54:21 +00002471 String* symbol;
2472 // Numeric strings have a different hash algorithm not known by
2473 // LookupTwoCharsSymbolIfExists, so we skip this step for such strings.
2474 if ((!Between(c1, '0', '9') || !Between(c2, '0', '9')) &&
Steve Block44f0eee2011-05-26 01:26:41 +01002475 heap->symbol_table()->LookupTwoCharsSymbolIfExists(c1, c2, &symbol)) {
Steve Blockd0582a62009-12-15 09:54:21 +00002476 return symbol;
2477 // Now we know the length is 2, we might as well make use of that fact
2478 // when building the new string.
2479 } else if ((c1 | c2) <= String::kMaxAsciiCharCodeU) { // We can do this
2480 ASSERT(IsPowerOf2(String::kMaxAsciiCharCodeU + 1)); // because of this.
John Reck59135872010-11-02 12:39:01 -07002481 Object* result;
Steve Block44f0eee2011-05-26 01:26:41 +01002482 { MaybeObject* maybe_result = heap->AllocateRawAsciiString(2);
John Reck59135872010-11-02 12:39:01 -07002483 if (!maybe_result->ToObject(&result)) return maybe_result;
2484 }
Steve Blockd0582a62009-12-15 09:54:21 +00002485 char* dest = SeqAsciiString::cast(result)->GetChars();
2486 dest[0] = c1;
2487 dest[1] = c2;
2488 return result;
2489 } else {
John Reck59135872010-11-02 12:39:01 -07002490 Object* result;
Steve Block44f0eee2011-05-26 01:26:41 +01002491 { MaybeObject* maybe_result = heap->AllocateRawTwoByteString(2);
John Reck59135872010-11-02 12:39:01 -07002492 if (!maybe_result->ToObject(&result)) return maybe_result;
2493 }
Steve Blockd0582a62009-12-15 09:54:21 +00002494 uc16* dest = SeqTwoByteString::cast(result)->GetChars();
2495 dest[0] = c1;
2496 dest[1] = c2;
2497 return result;
2498 }
2499}
2500
2501
John Reck59135872010-11-02 12:39:01 -07002502MaybeObject* Heap::AllocateConsString(String* first, String* second) {
Steve Blocka7e24c12009-10-30 11:49:00 +00002503 int first_length = first->length();
Steve Blockd0582a62009-12-15 09:54:21 +00002504 if (first_length == 0) {
2505 return second;
2506 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002507
2508 int second_length = second->length();
Steve Blockd0582a62009-12-15 09:54:21 +00002509 if (second_length == 0) {
2510 return first;
2511 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002512
2513 int length = first_length + second_length;
Steve Blockd0582a62009-12-15 09:54:21 +00002514
2515 // Optimization for 2-byte strings often used as keys in a decompression
2516 // dictionary. Check whether we already have the string in the symbol
2517 // table to prevent creation of many unneccesary strings.
2518 if (length == 2) {
2519 unsigned c1 = first->Get(0);
2520 unsigned c2 = second->Get(0);
Steve Block44f0eee2011-05-26 01:26:41 +01002521 return MakeOrFindTwoCharacterString(this, c1, c2);
Steve Blockd0582a62009-12-15 09:54:21 +00002522 }
2523
Steve Block6ded16b2010-05-10 14:33:55 +01002524 bool first_is_ascii = first->IsAsciiRepresentation();
2525 bool second_is_ascii = second->IsAsciiRepresentation();
2526 bool is_ascii = first_is_ascii && second_is_ascii;
Steve Blocka7e24c12009-10-30 11:49:00 +00002527
2528 // Make sure that an out of memory exception is thrown if the length
Steve Block3ce2e202009-11-05 08:53:23 +00002529 // of the new cons string is too large.
2530 if (length > String::kMaxLength || length < 0) {
Steve Block44f0eee2011-05-26 01:26:41 +01002531 isolate()->context()->mark_out_of_memory();
Steve Blocka7e24c12009-10-30 11:49:00 +00002532 return Failure::OutOfMemoryException();
2533 }
2534
Kristian Monsen9dcf7e22010-06-28 14:14:28 +01002535 bool is_ascii_data_in_two_byte_string = false;
2536 if (!is_ascii) {
2537 // At least one of the strings uses two-byte representation so we
2538 // can't use the fast case code for short ascii strings below, but
2539 // we can try to save memory if all chars actually fit in ascii.
2540 is_ascii_data_in_two_byte_string =
2541 first->HasOnlyAsciiChars() && second->HasOnlyAsciiChars();
2542 if (is_ascii_data_in_two_byte_string) {
Steve Block44f0eee2011-05-26 01:26:41 +01002543 isolate_->counters()->string_add_runtime_ext_to_ascii()->Increment();
Kristian Monsen9dcf7e22010-06-28 14:14:28 +01002544 }
2545 }
2546
Steve Blocka7e24c12009-10-30 11:49:00 +00002547 // If the resulting string is small make a flat string.
2548 if (length < String::kMinNonFlatLength) {
2549 ASSERT(first->IsFlat());
2550 ASSERT(second->IsFlat());
2551 if (is_ascii) {
John Reck59135872010-11-02 12:39:01 -07002552 Object* result;
2553 { MaybeObject* maybe_result = AllocateRawAsciiString(length);
2554 if (!maybe_result->ToObject(&result)) return maybe_result;
2555 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002556 // Copy the characters into the new object.
2557 char* dest = SeqAsciiString::cast(result)->GetChars();
2558 // Copy first part.
Steve Blockd0582a62009-12-15 09:54:21 +00002559 const char* src;
2560 if (first->IsExternalString()) {
2561 src = ExternalAsciiString::cast(first)->resource()->data();
2562 } else {
2563 src = SeqAsciiString::cast(first)->GetChars();
2564 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002565 for (int i = 0; i < first_length; i++) *dest++ = src[i];
2566 // Copy second part.
Steve Blockd0582a62009-12-15 09:54:21 +00002567 if (second->IsExternalString()) {
2568 src = ExternalAsciiString::cast(second)->resource()->data();
2569 } else {
2570 src = SeqAsciiString::cast(second)->GetChars();
2571 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002572 for (int i = 0; i < second_length; i++) *dest++ = src[i];
2573 return result;
2574 } else {
Kristian Monsen9dcf7e22010-06-28 14:14:28 +01002575 if (is_ascii_data_in_two_byte_string) {
John Reck59135872010-11-02 12:39:01 -07002576 Object* result;
2577 { MaybeObject* maybe_result = AllocateRawAsciiString(length);
2578 if (!maybe_result->ToObject(&result)) return maybe_result;
2579 }
Steve Block6ded16b2010-05-10 14:33:55 +01002580 // Copy the characters into the new object.
2581 char* dest = SeqAsciiString::cast(result)->GetChars();
2582 String::WriteToFlat(first, dest, 0, first_length);
2583 String::WriteToFlat(second, dest + first_length, 0, second_length);
Steve Block44f0eee2011-05-26 01:26:41 +01002584 isolate_->counters()->string_add_runtime_ext_to_ascii()->Increment();
Steve Block6ded16b2010-05-10 14:33:55 +01002585 return result;
2586 }
2587
John Reck59135872010-11-02 12:39:01 -07002588 Object* result;
2589 { MaybeObject* maybe_result = AllocateRawTwoByteString(length);
2590 if (!maybe_result->ToObject(&result)) return maybe_result;
2591 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002592 // Copy the characters into the new object.
2593 uc16* dest = SeqTwoByteString::cast(result)->GetChars();
2594 String::WriteToFlat(first, dest, 0, first_length);
2595 String::WriteToFlat(second, dest + first_length, 0, second_length);
2596 return result;
2597 }
2598 }
2599
Kristian Monsen9dcf7e22010-06-28 14:14:28 +01002600 Map* map = (is_ascii || is_ascii_data_in_two_byte_string) ?
2601 cons_ascii_string_map() : cons_string_map();
Steve Blocka7e24c12009-10-30 11:49:00 +00002602
John Reck59135872010-11-02 12:39:01 -07002603 Object* result;
2604 { MaybeObject* maybe_result = Allocate(map, NEW_SPACE);
2605 if (!maybe_result->ToObject(&result)) return maybe_result;
2606 }
Leon Clarke4515c472010-02-03 11:58:03 +00002607
2608 AssertNoAllocation no_gc;
Steve Blocka7e24c12009-10-30 11:49:00 +00002609 ConsString* cons_string = ConsString::cast(result);
Leon Clarke4515c472010-02-03 11:58:03 +00002610 WriteBarrierMode mode = cons_string->GetWriteBarrierMode(no_gc);
Steve Blocka7e24c12009-10-30 11:49:00 +00002611 cons_string->set_length(length);
Steve Blockd0582a62009-12-15 09:54:21 +00002612 cons_string->set_hash_field(String::kEmptyHashField);
2613 cons_string->set_first(first, mode);
2614 cons_string->set_second(second, mode);
Steve Blocka7e24c12009-10-30 11:49:00 +00002615 return result;
2616}
2617
2618
John Reck59135872010-11-02 12:39:01 -07002619MaybeObject* Heap::AllocateSubString(String* buffer,
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00002620 int start,
2621 int end,
2622 PretenureFlag pretenure) {
Steve Blocka7e24c12009-10-30 11:49:00 +00002623 int length = end - start;
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00002624 if (length == 0) {
2625 return empty_string();
2626 } else if (length == 1) {
Steve Block44f0eee2011-05-26 01:26:41 +01002627 return LookupSingleCharacterStringFromCode(buffer->Get(start));
Steve Blockd0582a62009-12-15 09:54:21 +00002628 } else if (length == 2) {
2629 // Optimization for 2-byte strings often used as keys in a decompression
2630 // dictionary. Check whether we already have the string in the symbol
2631 // table to prevent creation of many unneccesary strings.
2632 unsigned c1 = buffer->Get(start);
2633 unsigned c2 = buffer->Get(start + 1);
Steve Block44f0eee2011-05-26 01:26:41 +01002634 return MakeOrFindTwoCharacterString(this, c1, c2);
Steve Blocka7e24c12009-10-30 11:49:00 +00002635 }
2636
2637 // Make an attempt to flatten the buffer to reduce access time.
Leon Clarkef7060e22010-06-03 12:02:55 +01002638 buffer = buffer->TryFlattenGetString();
Steve Blocka7e24c12009-10-30 11:49:00 +00002639
John Reck59135872010-11-02 12:39:01 -07002640 Object* result;
2641 { MaybeObject* maybe_result = buffer->IsAsciiRepresentation()
2642 ? AllocateRawAsciiString(length, pretenure )
2643 : AllocateRawTwoByteString(length, pretenure);
2644 if (!maybe_result->ToObject(&result)) return maybe_result;
2645 }
Steve Blockd0582a62009-12-15 09:54:21 +00002646 String* string_result = String::cast(result);
Steve Blocka7e24c12009-10-30 11:49:00 +00002647 // Copy the characters into the new object.
Steve Blockd0582a62009-12-15 09:54:21 +00002648 if (buffer->IsAsciiRepresentation()) {
2649 ASSERT(string_result->IsAsciiRepresentation());
2650 char* dest = SeqAsciiString::cast(string_result)->GetChars();
2651 String::WriteToFlat(buffer, dest, start, end);
2652 } else {
2653 ASSERT(string_result->IsTwoByteRepresentation());
2654 uc16* dest = SeqTwoByteString::cast(string_result)->GetChars();
2655 String::WriteToFlat(buffer, dest, start, end);
Steve Blocka7e24c12009-10-30 11:49:00 +00002656 }
Steve Blockd0582a62009-12-15 09:54:21 +00002657
Steve Blocka7e24c12009-10-30 11:49:00 +00002658 return result;
2659}
2660
2661
John Reck59135872010-11-02 12:39:01 -07002662MaybeObject* Heap::AllocateExternalStringFromAscii(
Steve Blocka7e24c12009-10-30 11:49:00 +00002663 ExternalAsciiString::Resource* resource) {
Steve Blockd0582a62009-12-15 09:54:21 +00002664 size_t length = resource->length();
2665 if (length > static_cast<size_t>(String::kMaxLength)) {
Steve Block44f0eee2011-05-26 01:26:41 +01002666 isolate()->context()->mark_out_of_memory();
Steve Blockd0582a62009-12-15 09:54:21 +00002667 return Failure::OutOfMemoryException();
Steve Blocka7e24c12009-10-30 11:49:00 +00002668 }
2669
Steve Blockd0582a62009-12-15 09:54:21 +00002670 Map* map = external_ascii_string_map();
John Reck59135872010-11-02 12:39:01 -07002671 Object* result;
2672 { MaybeObject* maybe_result = Allocate(map, NEW_SPACE);
2673 if (!maybe_result->ToObject(&result)) return maybe_result;
2674 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002675
2676 ExternalAsciiString* external_string = ExternalAsciiString::cast(result);
Steve Blockd0582a62009-12-15 09:54:21 +00002677 external_string->set_length(static_cast<int>(length));
2678 external_string->set_hash_field(String::kEmptyHashField);
Steve Blocka7e24c12009-10-30 11:49:00 +00002679 external_string->set_resource(resource);
2680
2681 return result;
2682}
2683
2684
John Reck59135872010-11-02 12:39:01 -07002685MaybeObject* Heap::AllocateExternalStringFromTwoByte(
Steve Blocka7e24c12009-10-30 11:49:00 +00002686 ExternalTwoByteString::Resource* resource) {
Steve Blockd0582a62009-12-15 09:54:21 +00002687 size_t length = resource->length();
2688 if (length > static_cast<size_t>(String::kMaxLength)) {
Steve Block44f0eee2011-05-26 01:26:41 +01002689 isolate()->context()->mark_out_of_memory();
Steve Blockd0582a62009-12-15 09:54:21 +00002690 return Failure::OutOfMemoryException();
2691 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002692
Kristian Monsen9dcf7e22010-06-28 14:14:28 +01002693 // For small strings we check whether the resource contains only
Steve Block9fac8402011-05-12 15:51:54 +01002694 // ASCII characters. If yes, we use a different string map.
2695 static const size_t kAsciiCheckLengthLimit = 32;
2696 bool is_ascii = length <= kAsciiCheckLengthLimit &&
2697 String::IsAscii(resource->data(), static_cast<int>(length));
Kristian Monsen9dcf7e22010-06-28 14:14:28 +01002698 Map* map = is_ascii ?
Steve Block44f0eee2011-05-26 01:26:41 +01002699 external_string_with_ascii_data_map() : external_string_map();
John Reck59135872010-11-02 12:39:01 -07002700 Object* result;
2701 { MaybeObject* maybe_result = Allocate(map, NEW_SPACE);
2702 if (!maybe_result->ToObject(&result)) return maybe_result;
2703 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002704
2705 ExternalTwoByteString* external_string = ExternalTwoByteString::cast(result);
Steve Blockd0582a62009-12-15 09:54:21 +00002706 external_string->set_length(static_cast<int>(length));
2707 external_string->set_hash_field(String::kEmptyHashField);
Steve Blocka7e24c12009-10-30 11:49:00 +00002708 external_string->set_resource(resource);
2709
2710 return result;
2711}
2712
2713
John Reck59135872010-11-02 12:39:01 -07002714MaybeObject* Heap::LookupSingleCharacterStringFromCode(uint16_t code) {
Steve Blocka7e24c12009-10-30 11:49:00 +00002715 if (code <= String::kMaxAsciiCharCode) {
Steve Block44f0eee2011-05-26 01:26:41 +01002716 Object* value = single_character_string_cache()->get(code);
2717 if (value != undefined_value()) return value;
Steve Blocka7e24c12009-10-30 11:49:00 +00002718
2719 char buffer[1];
2720 buffer[0] = static_cast<char>(code);
John Reck59135872010-11-02 12:39:01 -07002721 Object* result;
2722 MaybeObject* maybe_result = LookupSymbol(Vector<const char>(buffer, 1));
Steve Blocka7e24c12009-10-30 11:49:00 +00002723
John Reck59135872010-11-02 12:39:01 -07002724 if (!maybe_result->ToObject(&result)) return maybe_result;
Steve Block44f0eee2011-05-26 01:26:41 +01002725 single_character_string_cache()->set(code, result);
Steve Blocka7e24c12009-10-30 11:49:00 +00002726 return result;
2727 }
2728
John Reck59135872010-11-02 12:39:01 -07002729 Object* result;
Steve Block44f0eee2011-05-26 01:26:41 +01002730 { MaybeObject* maybe_result = AllocateRawTwoByteString(1);
John Reck59135872010-11-02 12:39:01 -07002731 if (!maybe_result->ToObject(&result)) return maybe_result;
2732 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002733 String* answer = String::cast(result);
2734 answer->Set(0, code);
2735 return answer;
2736}
2737
2738
John Reck59135872010-11-02 12:39:01 -07002739MaybeObject* Heap::AllocateByteArray(int length, PretenureFlag pretenure) {
Leon Clarkee46be812010-01-19 14:06:41 +00002740 if (length < 0 || length > ByteArray::kMaxLength) {
2741 return Failure::OutOfMemoryException();
2742 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002743 if (pretenure == NOT_TENURED) {
2744 return AllocateByteArray(length);
2745 }
2746 int size = ByteArray::SizeFor(length);
John Reck59135872010-11-02 12:39:01 -07002747 Object* result;
2748 { MaybeObject* maybe_result = (size <= MaxObjectSizeInPagedSpace())
2749 ? old_data_space_->AllocateRaw(size)
2750 : lo_space_->AllocateRaw(size);
2751 if (!maybe_result->ToObject(&result)) return maybe_result;
2752 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002753
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002754 reinterpret_cast<ByteArray*>(result)->set_map(byte_array_map());
2755 reinterpret_cast<ByteArray*>(result)->set_length(length);
Steve Blocka7e24c12009-10-30 11:49:00 +00002756 return result;
2757}
2758
2759
John Reck59135872010-11-02 12:39:01 -07002760MaybeObject* Heap::AllocateByteArray(int length) {
Leon Clarkee46be812010-01-19 14:06:41 +00002761 if (length < 0 || length > ByteArray::kMaxLength) {
2762 return Failure::OutOfMemoryException();
2763 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002764 int size = ByteArray::SizeFor(length);
2765 AllocationSpace space =
Leon Clarkee46be812010-01-19 14:06:41 +00002766 (size > MaxObjectSizeInPagedSpace()) ? LO_SPACE : NEW_SPACE;
John Reck59135872010-11-02 12:39:01 -07002767 Object* result;
2768 { MaybeObject* maybe_result = AllocateRaw(size, space, OLD_DATA_SPACE);
2769 if (!maybe_result->ToObject(&result)) return maybe_result;
2770 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002771
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002772 reinterpret_cast<ByteArray*>(result)->set_map(byte_array_map());
2773 reinterpret_cast<ByteArray*>(result)->set_length(length);
Steve Blocka7e24c12009-10-30 11:49:00 +00002774 return result;
2775}
2776
2777
2778void Heap::CreateFillerObjectAt(Address addr, int size) {
2779 if (size == 0) return;
2780 HeapObject* filler = HeapObject::FromAddress(addr);
2781 if (size == kPointerSize) {
Steve Block6ded16b2010-05-10 14:33:55 +01002782 filler->set_map(one_pointer_filler_map());
2783 } else if (size == 2 * kPointerSize) {
2784 filler->set_map(two_pointer_filler_map());
Steve Blocka7e24c12009-10-30 11:49:00 +00002785 } else {
Steve Block6ded16b2010-05-10 14:33:55 +01002786 filler->set_map(byte_array_map());
Steve Blocka7e24c12009-10-30 11:49:00 +00002787 ByteArray::cast(filler)->set_length(ByteArray::LengthFor(size));
2788 }
2789}
2790
2791
John Reck59135872010-11-02 12:39:01 -07002792MaybeObject* Heap::AllocateExternalArray(int length,
2793 ExternalArrayType array_type,
2794 void* external_pointer,
2795 PretenureFlag pretenure) {
Steve Block3ce2e202009-11-05 08:53:23 +00002796 AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
John Reck59135872010-11-02 12:39:01 -07002797 Object* result;
2798 { MaybeObject* maybe_result = AllocateRaw(ExternalArray::kAlignedSize,
2799 space,
2800 OLD_DATA_SPACE);
2801 if (!maybe_result->ToObject(&result)) return maybe_result;
2802 }
Steve Block3ce2e202009-11-05 08:53:23 +00002803
2804 reinterpret_cast<ExternalArray*>(result)->set_map(
2805 MapForExternalArrayType(array_type));
2806 reinterpret_cast<ExternalArray*>(result)->set_length(length);
2807 reinterpret_cast<ExternalArray*>(result)->set_external_pointer(
2808 external_pointer);
2809
2810 return result;
2811}
2812
2813
John Reck59135872010-11-02 12:39:01 -07002814MaybeObject* Heap::CreateCode(const CodeDesc& desc,
2815 Code::Flags flags,
Steve Block44f0eee2011-05-26 01:26:41 +01002816 Handle<Object> self_reference,
2817 bool immovable) {
Leon Clarkeac952652010-07-15 11:15:24 +01002818 // Allocate ByteArray before the Code object, so that we do not risk
2819 // leaving uninitialized Code object (and breaking the heap).
John Reck59135872010-11-02 12:39:01 -07002820 Object* reloc_info;
2821 { MaybeObject* maybe_reloc_info = AllocateByteArray(desc.reloc_size, TENURED);
2822 if (!maybe_reloc_info->ToObject(&reloc_info)) return maybe_reloc_info;
2823 }
Leon Clarkeac952652010-07-15 11:15:24 +01002824
Steve Block44f0eee2011-05-26 01:26:41 +01002825 // Compute size.
Leon Clarkeac952652010-07-15 11:15:24 +01002826 int body_size = RoundUp(desc.instr_size, kObjectAlignment);
Ben Murdoch3bec4d22010-07-22 14:51:16 +01002827 int obj_size = Code::SizeFor(body_size);
Kristian Monsen0d5e1162010-09-30 15:31:59 +01002828 ASSERT(IsAligned(static_cast<intptr_t>(obj_size), kCodeAlignment));
John Reck59135872010-11-02 12:39:01 -07002829 MaybeObject* maybe_result;
Steve Block44f0eee2011-05-26 01:26:41 +01002830 // Large code objects and code objects which should stay at a fixed address
2831 // are allocated in large object space.
2832 if (obj_size > MaxObjectSizeInPagedSpace() || immovable) {
John Reck59135872010-11-02 12:39:01 -07002833 maybe_result = lo_space_->AllocateRawCode(obj_size);
Steve Blocka7e24c12009-10-30 11:49:00 +00002834 } else {
John Reck59135872010-11-02 12:39:01 -07002835 maybe_result = code_space_->AllocateRaw(obj_size);
Steve Blocka7e24c12009-10-30 11:49:00 +00002836 }
2837
John Reck59135872010-11-02 12:39:01 -07002838 Object* result;
2839 if (!maybe_result->ToObject(&result)) return maybe_result;
Steve Blocka7e24c12009-10-30 11:49:00 +00002840
2841 // Initialize the object
2842 HeapObject::cast(result)->set_map(code_map());
2843 Code* code = Code::cast(result);
Steve Block44f0eee2011-05-26 01:26:41 +01002844 ASSERT(!isolate_->code_range()->exists() ||
2845 isolate_->code_range()->contains(code->address()));
Steve Blocka7e24c12009-10-30 11:49:00 +00002846 code->set_instruction_size(desc.instr_size);
Leon Clarkeac952652010-07-15 11:15:24 +01002847 code->set_relocation_info(ByteArray::cast(reloc_info));
Steve Blocka7e24c12009-10-30 11:49:00 +00002848 code->set_flags(flags);
Ben Murdochb8e0da22011-05-16 14:20:40 +01002849 if (code->is_call_stub() || code->is_keyed_call_stub()) {
2850 code->set_check_type(RECEIVER_MAP_CHECK);
2851 }
Ben Murdochb0fe1622011-05-05 13:52:32 +01002852 code->set_deoptimization_data(empty_fixed_array());
Ben Murdoch257744e2011-11-30 15:57:28 +00002853 code->set_next_code_flushing_candidate(undefined_value());
Steve Blocka7e24c12009-10-30 11:49:00 +00002854 // Allow self references to created code object by patching the handle to
2855 // point to the newly allocated Code object.
2856 if (!self_reference.is_null()) {
2857 *(self_reference.location()) = code;
2858 }
2859 // Migrate generated code.
2860 // The generated code can contain Object** values (typically from handles)
2861 // that are dereferenced during the copy to point directly to the actual heap
2862 // objects. These pointers can include references to the code object itself,
2863 // through the self_reference parameter.
2864 code->CopyFrom(desc);
Steve Blocka7e24c12009-10-30 11:49:00 +00002865
2866#ifdef DEBUG
2867 code->Verify();
2868#endif
2869 return code;
2870}
2871
2872
John Reck59135872010-11-02 12:39:01 -07002873MaybeObject* Heap::CopyCode(Code* code) {
Steve Blocka7e24c12009-10-30 11:49:00 +00002874 // Allocate an object the same size as the code object.
2875 int obj_size = code->Size();
John Reck59135872010-11-02 12:39:01 -07002876 MaybeObject* maybe_result;
Steve Blocka7e24c12009-10-30 11:49:00 +00002877 if (obj_size > MaxObjectSizeInPagedSpace()) {
John Reck59135872010-11-02 12:39:01 -07002878 maybe_result = lo_space_->AllocateRawCode(obj_size);
Steve Blocka7e24c12009-10-30 11:49:00 +00002879 } else {
John Reck59135872010-11-02 12:39:01 -07002880 maybe_result = code_space_->AllocateRaw(obj_size);
Steve Blocka7e24c12009-10-30 11:49:00 +00002881 }
2882
John Reck59135872010-11-02 12:39:01 -07002883 Object* result;
2884 if (!maybe_result->ToObject(&result)) return maybe_result;
Steve Blocka7e24c12009-10-30 11:49:00 +00002885
2886 // Copy code object.
2887 Address old_addr = code->address();
2888 Address new_addr = reinterpret_cast<HeapObject*>(result)->address();
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002889 CopyBlock(new_addr, old_addr, obj_size);
Steve Blocka7e24c12009-10-30 11:49:00 +00002890 // Relocate the copy.
2891 Code* new_code = Code::cast(result);
Steve Block44f0eee2011-05-26 01:26:41 +01002892 ASSERT(!isolate_->code_range()->exists() ||
2893 isolate_->code_range()->contains(code->address()));
Steve Blocka7e24c12009-10-30 11:49:00 +00002894 new_code->Relocate(new_addr - old_addr);
2895 return new_code;
2896}
2897
2898
John Reck59135872010-11-02 12:39:01 -07002899MaybeObject* Heap::CopyCode(Code* code, Vector<byte> reloc_info) {
Leon Clarkeac952652010-07-15 11:15:24 +01002900 // Allocate ByteArray before the Code object, so that we do not risk
2901 // leaving uninitialized Code object (and breaking the heap).
John Reck59135872010-11-02 12:39:01 -07002902 Object* reloc_info_array;
2903 { MaybeObject* maybe_reloc_info_array =
2904 AllocateByteArray(reloc_info.length(), TENURED);
2905 if (!maybe_reloc_info_array->ToObject(&reloc_info_array)) {
2906 return maybe_reloc_info_array;
2907 }
2908 }
Leon Clarkeac952652010-07-15 11:15:24 +01002909
2910 int new_body_size = RoundUp(code->instruction_size(), kObjectAlignment);
Steve Block6ded16b2010-05-10 14:33:55 +01002911
Ben Murdoch3bec4d22010-07-22 14:51:16 +01002912 int new_obj_size = Code::SizeFor(new_body_size);
Steve Block6ded16b2010-05-10 14:33:55 +01002913
2914 Address old_addr = code->address();
2915
2916 size_t relocation_offset =
Leon Clarkeac952652010-07-15 11:15:24 +01002917 static_cast<size_t>(code->instruction_end() - old_addr);
Steve Block6ded16b2010-05-10 14:33:55 +01002918
John Reck59135872010-11-02 12:39:01 -07002919 MaybeObject* maybe_result;
Steve Block6ded16b2010-05-10 14:33:55 +01002920 if (new_obj_size > MaxObjectSizeInPagedSpace()) {
John Reck59135872010-11-02 12:39:01 -07002921 maybe_result = lo_space_->AllocateRawCode(new_obj_size);
Steve Block6ded16b2010-05-10 14:33:55 +01002922 } else {
John Reck59135872010-11-02 12:39:01 -07002923 maybe_result = code_space_->AllocateRaw(new_obj_size);
Steve Block6ded16b2010-05-10 14:33:55 +01002924 }
2925
John Reck59135872010-11-02 12:39:01 -07002926 Object* result;
2927 if (!maybe_result->ToObject(&result)) return maybe_result;
Steve Block6ded16b2010-05-10 14:33:55 +01002928
2929 // Copy code object.
2930 Address new_addr = reinterpret_cast<HeapObject*>(result)->address();
2931
2932 // Copy header and instructions.
2933 memcpy(new_addr, old_addr, relocation_offset);
2934
Steve Block6ded16b2010-05-10 14:33:55 +01002935 Code* new_code = Code::cast(result);
Leon Clarkeac952652010-07-15 11:15:24 +01002936 new_code->set_relocation_info(ByteArray::cast(reloc_info_array));
Steve Block6ded16b2010-05-10 14:33:55 +01002937
Leon Clarkeac952652010-07-15 11:15:24 +01002938 // Copy patched rinfo.
2939 memcpy(new_code->relocation_start(), reloc_info.start(), reloc_info.length());
Steve Block6ded16b2010-05-10 14:33:55 +01002940
2941 // Relocate the copy.
Steve Block44f0eee2011-05-26 01:26:41 +01002942 ASSERT(!isolate_->code_range()->exists() ||
2943 isolate_->code_range()->contains(code->address()));
Steve Block6ded16b2010-05-10 14:33:55 +01002944 new_code->Relocate(new_addr - old_addr);
2945
2946#ifdef DEBUG
2947 code->Verify();
2948#endif
2949 return new_code;
2950}
2951
2952
John Reck59135872010-11-02 12:39:01 -07002953MaybeObject* Heap::Allocate(Map* map, AllocationSpace space) {
Steve Blocka7e24c12009-10-30 11:49:00 +00002954 ASSERT(gc_state_ == NOT_IN_GC);
2955 ASSERT(map->instance_type() != MAP_TYPE);
Leon Clarkee46be812010-01-19 14:06:41 +00002956 // If allocation failures are disallowed, we may allocate in a different
2957 // space when new space is full and the object is not a large object.
2958 AllocationSpace retry_space =
2959 (space != NEW_SPACE) ? space : TargetSpaceId(map->instance_type());
John Reck59135872010-11-02 12:39:01 -07002960 Object* result;
2961 { MaybeObject* maybe_result =
2962 AllocateRaw(map->instance_size(), space, retry_space);
2963 if (!maybe_result->ToObject(&result)) return maybe_result;
2964 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002965 HeapObject::cast(result)->set_map(map);
2966 return result;
2967}
2968
2969
John Reck59135872010-11-02 12:39:01 -07002970MaybeObject* Heap::InitializeFunction(JSFunction* function,
2971 SharedFunctionInfo* shared,
2972 Object* prototype) {
Steve Blocka7e24c12009-10-30 11:49:00 +00002973 ASSERT(!prototype->IsMap());
2974 function->initialize_properties();
2975 function->initialize_elements();
2976 function->set_shared(shared);
Iain Merrick75681382010-08-19 15:07:18 +01002977 function->set_code(shared->code());
Steve Blocka7e24c12009-10-30 11:49:00 +00002978 function->set_prototype_or_initial_map(prototype);
2979 function->set_context(undefined_value());
Leon Clarke4515c472010-02-03 11:58:03 +00002980 function->set_literals(empty_fixed_array());
Ben Murdochb0fe1622011-05-05 13:52:32 +01002981 function->set_next_function_link(undefined_value());
Steve Blocka7e24c12009-10-30 11:49:00 +00002982 return function;
2983}
2984
2985
John Reck59135872010-11-02 12:39:01 -07002986MaybeObject* Heap::AllocateFunctionPrototype(JSFunction* function) {
Steve Blocka7e24c12009-10-30 11:49:00 +00002987 // Allocate the prototype. Make sure to use the object function
2988 // from the function's context, since the function can be from a
2989 // different context.
2990 JSFunction* object_function =
2991 function->context()->global_context()->object_function();
John Reck59135872010-11-02 12:39:01 -07002992 Object* prototype;
2993 { MaybeObject* maybe_prototype = AllocateJSObject(object_function);
2994 if (!maybe_prototype->ToObject(&prototype)) return maybe_prototype;
2995 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002996 // When creating the prototype for the function we must set its
2997 // constructor to the function.
John Reck59135872010-11-02 12:39:01 -07002998 Object* result;
2999 { MaybeObject* maybe_result =
Ben Murdoche0cee9b2011-05-25 10:26:03 +01003000 JSObject::cast(prototype)->SetLocalPropertyIgnoreAttributes(
3001 constructor_symbol(), function, DONT_ENUM);
John Reck59135872010-11-02 12:39:01 -07003002 if (!maybe_result->ToObject(&result)) return maybe_result;
3003 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003004 return prototype;
3005}
3006
3007
John Reck59135872010-11-02 12:39:01 -07003008MaybeObject* Heap::AllocateFunction(Map* function_map,
3009 SharedFunctionInfo* shared,
3010 Object* prototype,
3011 PretenureFlag pretenure) {
Leon Clarkee46be812010-01-19 14:06:41 +00003012 AllocationSpace space =
3013 (pretenure == TENURED) ? OLD_POINTER_SPACE : NEW_SPACE;
John Reck59135872010-11-02 12:39:01 -07003014 Object* result;
3015 { MaybeObject* maybe_result = Allocate(function_map, space);
3016 if (!maybe_result->ToObject(&result)) return maybe_result;
3017 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003018 return InitializeFunction(JSFunction::cast(result), shared, prototype);
3019}
3020
3021
John Reck59135872010-11-02 12:39:01 -07003022MaybeObject* Heap::AllocateArgumentsObject(Object* callee, int length) {
Steve Blocka7e24c12009-10-30 11:49:00 +00003023 // To get fast allocation and map sharing for arguments objects we
3024 // allocate them based on an arguments boilerplate.
3025
Steve Block44f0eee2011-05-26 01:26:41 +01003026 JSObject* boilerplate;
3027 int arguments_object_size;
3028 bool strict_mode_callee = callee->IsJSFunction() &&
3029 JSFunction::cast(callee)->shared()->strict_mode();
3030 if (strict_mode_callee) {
3031 boilerplate =
3032 isolate()->context()->global_context()->
3033 strict_mode_arguments_boilerplate();
3034 arguments_object_size = kArgumentsObjectSizeStrict;
3035 } else {
3036 boilerplate =
3037 isolate()->context()->global_context()->arguments_boilerplate();
3038 arguments_object_size = kArgumentsObjectSize;
3039 }
3040
Steve Blocka7e24c12009-10-30 11:49:00 +00003041 // This calls Copy directly rather than using Heap::AllocateRaw so we
3042 // duplicate the check here.
3043 ASSERT(allocation_allowed_ && gc_state_ == NOT_IN_GC);
3044
Leon Clarkee46be812010-01-19 14:06:41 +00003045 // Check that the size of the boilerplate matches our
3046 // expectations. The ArgumentsAccessStub::GenerateNewObject relies
3047 // on the size being a known constant.
Steve Block44f0eee2011-05-26 01:26:41 +01003048 ASSERT(arguments_object_size == boilerplate->map()->instance_size());
Leon Clarkee46be812010-01-19 14:06:41 +00003049
3050 // Do the allocation.
John Reck59135872010-11-02 12:39:01 -07003051 Object* result;
3052 { MaybeObject* maybe_result =
Steve Block44f0eee2011-05-26 01:26:41 +01003053 AllocateRaw(arguments_object_size, NEW_SPACE, OLD_POINTER_SPACE);
John Reck59135872010-11-02 12:39:01 -07003054 if (!maybe_result->ToObject(&result)) return maybe_result;
3055 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003056
3057 // Copy the content. The arguments boilerplate doesn't have any
3058 // fields that point to new space so it's safe to skip the write
3059 // barrier here.
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01003060 CopyBlock(HeapObject::cast(result)->address(),
3061 boilerplate->address(),
Steve Block44f0eee2011-05-26 01:26:41 +01003062 JSObject::kHeaderSize);
Steve Blocka7e24c12009-10-30 11:49:00 +00003063
Steve Block44f0eee2011-05-26 01:26:41 +01003064 // Set the length property.
3065 JSObject::cast(result)->InObjectPropertyAtPut(kArgumentsLengthIndex,
Steve Blocka7e24c12009-10-30 11:49:00 +00003066 Smi::FromInt(length),
3067 SKIP_WRITE_BARRIER);
Steve Block44f0eee2011-05-26 01:26:41 +01003068 // Set the callee property for non-strict mode arguments object only.
3069 if (!strict_mode_callee) {
3070 JSObject::cast(result)->InObjectPropertyAtPut(kArgumentsCalleeIndex,
3071 callee);
3072 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003073
3074 // Check the state of the object
3075 ASSERT(JSObject::cast(result)->HasFastProperties());
3076 ASSERT(JSObject::cast(result)->HasFastElements());
3077
3078 return result;
3079}
3080
3081
Kristian Monsen0d5e1162010-09-30 15:31:59 +01003082static bool HasDuplicates(DescriptorArray* descriptors) {
3083 int count = descriptors->number_of_descriptors();
3084 if (count > 1) {
3085 String* prev_key = descriptors->GetKey(0);
3086 for (int i = 1; i != count; i++) {
3087 String* current_key = descriptors->GetKey(i);
3088 if (prev_key == current_key) return true;
3089 prev_key = current_key;
3090 }
3091 }
3092 return false;
3093}
3094
3095
John Reck59135872010-11-02 12:39:01 -07003096MaybeObject* Heap::AllocateInitialMap(JSFunction* fun) {
Steve Blocka7e24c12009-10-30 11:49:00 +00003097 ASSERT(!fun->has_initial_map());
3098
3099 // First create a new map with the size and number of in-object properties
3100 // suggested by the function.
3101 int instance_size = fun->shared()->CalculateInstanceSize();
3102 int in_object_properties = fun->shared()->CalculateInObjectProperties();
John Reck59135872010-11-02 12:39:01 -07003103 Object* map_obj;
Steve Block44f0eee2011-05-26 01:26:41 +01003104 { MaybeObject* maybe_map_obj = AllocateMap(JS_OBJECT_TYPE, instance_size);
John Reck59135872010-11-02 12:39:01 -07003105 if (!maybe_map_obj->ToObject(&map_obj)) return maybe_map_obj;
3106 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003107
3108 // Fetch or allocate prototype.
3109 Object* prototype;
3110 if (fun->has_instance_prototype()) {
3111 prototype = fun->instance_prototype();
3112 } else {
John Reck59135872010-11-02 12:39:01 -07003113 { MaybeObject* maybe_prototype = AllocateFunctionPrototype(fun);
3114 if (!maybe_prototype->ToObject(&prototype)) return maybe_prototype;
3115 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003116 }
3117 Map* map = Map::cast(map_obj);
3118 map->set_inobject_properties(in_object_properties);
3119 map->set_unused_property_fields(in_object_properties);
3120 map->set_prototype(prototype);
Steve Block8defd9f2010-07-08 12:39:36 +01003121 ASSERT(map->has_fast_elements());
Steve Blocka7e24c12009-10-30 11:49:00 +00003122
Andrei Popescu402d9372010-02-26 13:31:12 +00003123 // If the function has only simple this property assignments add
3124 // field descriptors for these to the initial map as the object
3125 // cannot be constructed without having these properties. Guard by
3126 // the inline_new flag so we only change the map if we generate a
3127 // specialized construct stub.
Steve Blocka7e24c12009-10-30 11:49:00 +00003128 ASSERT(in_object_properties <= Map::kMaxPreAllocatedPropertyFields);
Andrei Popescu402d9372010-02-26 13:31:12 +00003129 if (fun->shared()->CanGenerateInlineConstructor(prototype)) {
Steve Blocka7e24c12009-10-30 11:49:00 +00003130 int count = fun->shared()->this_property_assignments_count();
3131 if (count > in_object_properties) {
Kristian Monsen0d5e1162010-09-30 15:31:59 +01003132 // Inline constructor can only handle inobject properties.
3133 fun->shared()->ForbidInlineConstructor();
3134 } else {
John Reck59135872010-11-02 12:39:01 -07003135 Object* descriptors_obj;
3136 { MaybeObject* maybe_descriptors_obj = DescriptorArray::Allocate(count);
3137 if (!maybe_descriptors_obj->ToObject(&descriptors_obj)) {
3138 return maybe_descriptors_obj;
3139 }
3140 }
Kristian Monsen0d5e1162010-09-30 15:31:59 +01003141 DescriptorArray* descriptors = DescriptorArray::cast(descriptors_obj);
3142 for (int i = 0; i < count; i++) {
3143 String* name = fun->shared()->GetThisPropertyAssignmentName(i);
3144 ASSERT(name->IsSymbol());
3145 FieldDescriptor field(name, i, NONE);
3146 field.SetEnumerationIndex(i);
3147 descriptors->Set(i, &field);
3148 }
3149 descriptors->SetNextEnumerationIndex(count);
3150 descriptors->SortUnchecked();
3151
3152 // The descriptors may contain duplicates because the compiler does not
3153 // guarantee the uniqueness of property names (it would have required
3154 // quadratic time). Once the descriptors are sorted we can check for
3155 // duplicates in linear time.
3156 if (HasDuplicates(descriptors)) {
3157 fun->shared()->ForbidInlineConstructor();
3158 } else {
3159 map->set_instance_descriptors(descriptors);
3160 map->set_pre_allocated_property_fields(count);
3161 map->set_unused_property_fields(in_object_properties - count);
3162 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003163 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003164 }
Kristian Monsen0d5e1162010-09-30 15:31:59 +01003165
3166 fun->shared()->StartInobjectSlackTracking(map);
3167
Steve Blocka7e24c12009-10-30 11:49:00 +00003168 return map;
3169}
3170
3171
3172void Heap::InitializeJSObjectFromMap(JSObject* obj,
3173 FixedArray* properties,
3174 Map* map) {
3175 obj->set_properties(properties);
3176 obj->initialize_elements();
3177 // TODO(1240798): Initialize the object's body using valid initial values
3178 // according to the object's initial map. For example, if the map's
3179 // instance type is JS_ARRAY_TYPE, the length field should be initialized
3180 // to a number (eg, Smi::FromInt(0)) and the elements initialized to a
3181 // fixed array (eg, Heap::empty_fixed_array()). Currently, the object
3182 // verification code has to cope with (temporarily) invalid objects. See
3183 // for example, JSArray::JSArrayVerify).
Kristian Monsen0d5e1162010-09-30 15:31:59 +01003184 Object* filler;
3185 // We cannot always fill with one_pointer_filler_map because objects
3186 // created from API functions expect their internal fields to be initialized
3187 // with undefined_value.
3188 if (map->constructor()->IsJSFunction() &&
3189 JSFunction::cast(map->constructor())->shared()->
3190 IsInobjectSlackTrackingInProgress()) {
3191 // We might want to shrink the object later.
3192 ASSERT(obj->GetInternalFieldCount() == 0);
3193 filler = Heap::one_pointer_filler_map();
3194 } else {
3195 filler = Heap::undefined_value();
3196 }
3197 obj->InitializeBody(map->instance_size(), filler);
Steve Blocka7e24c12009-10-30 11:49:00 +00003198}
3199
3200
John Reck59135872010-11-02 12:39:01 -07003201MaybeObject* Heap::AllocateJSObjectFromMap(Map* map, PretenureFlag pretenure) {
Steve Blocka7e24c12009-10-30 11:49:00 +00003202 // JSFunctions should be allocated using AllocateFunction to be
3203 // properly initialized.
3204 ASSERT(map->instance_type() != JS_FUNCTION_TYPE);
3205
Steve Block8defd9f2010-07-08 12:39:36 +01003206 // Both types of global objects should be allocated using
3207 // AllocateGlobalObject to be properly initialized.
Steve Blocka7e24c12009-10-30 11:49:00 +00003208 ASSERT(map->instance_type() != JS_GLOBAL_OBJECT_TYPE);
3209 ASSERT(map->instance_type() != JS_BUILTINS_OBJECT_TYPE);
3210
3211 // Allocate the backing storage for the properties.
3212 int prop_size =
3213 map->pre_allocated_property_fields() +
3214 map->unused_property_fields() -
3215 map->inobject_properties();
3216 ASSERT(prop_size >= 0);
John Reck59135872010-11-02 12:39:01 -07003217 Object* properties;
3218 { MaybeObject* maybe_properties = AllocateFixedArray(prop_size, pretenure);
3219 if (!maybe_properties->ToObject(&properties)) return maybe_properties;
3220 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003221
3222 // Allocate the JSObject.
3223 AllocationSpace space =
3224 (pretenure == TENURED) ? OLD_POINTER_SPACE : NEW_SPACE;
3225 if (map->instance_size() > MaxObjectSizeInPagedSpace()) space = LO_SPACE;
John Reck59135872010-11-02 12:39:01 -07003226 Object* obj;
3227 { MaybeObject* maybe_obj = Allocate(map, space);
3228 if (!maybe_obj->ToObject(&obj)) return maybe_obj;
3229 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003230
3231 // Initialize the JSObject.
3232 InitializeJSObjectFromMap(JSObject::cast(obj),
3233 FixedArray::cast(properties),
3234 map);
Steve Block8defd9f2010-07-08 12:39:36 +01003235 ASSERT(JSObject::cast(obj)->HasFastElements());
Steve Blocka7e24c12009-10-30 11:49:00 +00003236 return obj;
3237}
3238
3239
John Reck59135872010-11-02 12:39:01 -07003240MaybeObject* Heap::AllocateJSObject(JSFunction* constructor,
3241 PretenureFlag pretenure) {
Steve Blocka7e24c12009-10-30 11:49:00 +00003242 // Allocate the initial map if absent.
3243 if (!constructor->has_initial_map()) {
John Reck59135872010-11-02 12:39:01 -07003244 Object* initial_map;
3245 { MaybeObject* maybe_initial_map = AllocateInitialMap(constructor);
3246 if (!maybe_initial_map->ToObject(&initial_map)) return maybe_initial_map;
3247 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003248 constructor->set_initial_map(Map::cast(initial_map));
3249 Map::cast(initial_map)->set_constructor(constructor);
3250 }
3251 // Allocate the object based on the constructors initial map.
John Reck59135872010-11-02 12:39:01 -07003252 MaybeObject* result =
Steve Blocka7e24c12009-10-30 11:49:00 +00003253 AllocateJSObjectFromMap(constructor->initial_map(), pretenure);
John Reck59135872010-11-02 12:39:01 -07003254#ifdef DEBUG
Steve Blocka7e24c12009-10-30 11:49:00 +00003255 // Make sure result is NOT a global object if valid.
John Reck59135872010-11-02 12:39:01 -07003256 Object* non_failure;
3257 ASSERT(!result->ToObject(&non_failure) || !non_failure->IsGlobalObject());
3258#endif
Steve Blocka7e24c12009-10-30 11:49:00 +00003259 return result;
3260}
3261
3262
Ben Murdoch257744e2011-11-30 15:57:28 +00003263MaybeObject* Heap::AllocateJSProxy(Object* handler, Object* prototype) {
3264 // Allocate map.
3265 // TODO(rossberg): Once we optimize proxies, think about a scheme to share
3266 // maps. Will probably depend on the identity of the handler object, too.
3267 Map* map;
3268 MaybeObject* maybe_map_obj = AllocateMap(JS_PROXY_TYPE, JSProxy::kSize);
3269 if (!maybe_map_obj->To<Map>(&map)) return maybe_map_obj;
3270 map->set_prototype(prototype);
Ben Murdoch257744e2011-11-30 15:57:28 +00003271
3272 // Allocate the proxy object.
3273 Object* result;
3274 MaybeObject* maybe_result = Allocate(map, NEW_SPACE);
3275 if (!maybe_result->ToObject(&result)) return maybe_result;
3276 JSProxy::cast(result)->set_handler(handler);
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00003277 JSProxy::cast(result)->set_padding(Smi::FromInt(0));
Ben Murdoch257744e2011-11-30 15:57:28 +00003278 return result;
3279}
3280
3281
John Reck59135872010-11-02 12:39:01 -07003282MaybeObject* Heap::AllocateGlobalObject(JSFunction* constructor) {
Steve Blocka7e24c12009-10-30 11:49:00 +00003283 ASSERT(constructor->has_initial_map());
3284 Map* map = constructor->initial_map();
3285
3286 // Make sure no field properties are described in the initial map.
3287 // This guarantees us that normalizing the properties does not
3288 // require us to change property values to JSGlobalPropertyCells.
3289 ASSERT(map->NextFreePropertyIndex() == 0);
3290
3291 // Make sure we don't have a ton of pre-allocated slots in the
3292 // global objects. They will be unused once we normalize the object.
3293 ASSERT(map->unused_property_fields() == 0);
3294 ASSERT(map->inobject_properties() == 0);
3295
3296 // Initial size of the backing store to avoid resize of the storage during
3297 // bootstrapping. The size differs between the JS global object ad the
3298 // builtins object.
3299 int initial_size = map->instance_type() == JS_GLOBAL_OBJECT_TYPE ? 64 : 512;
3300
3301 // Allocate a dictionary object for backing storage.
John Reck59135872010-11-02 12:39:01 -07003302 Object* obj;
3303 { MaybeObject* maybe_obj =
3304 StringDictionary::Allocate(
3305 map->NumberOfDescribedProperties() * 2 + initial_size);
3306 if (!maybe_obj->ToObject(&obj)) return maybe_obj;
3307 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003308 StringDictionary* dictionary = StringDictionary::cast(obj);
3309
3310 // The global object might be created from an object template with accessors.
3311 // Fill these accessors into the dictionary.
3312 DescriptorArray* descs = map->instance_descriptors();
3313 for (int i = 0; i < descs->number_of_descriptors(); i++) {
Ben Murdoch8b112d22011-06-08 16:22:53 +01003314 PropertyDetails details(descs->GetDetails(i));
Steve Blocka7e24c12009-10-30 11:49:00 +00003315 ASSERT(details.type() == CALLBACKS); // Only accessors are expected.
3316 PropertyDetails d =
3317 PropertyDetails(details.attributes(), CALLBACKS, details.index());
3318 Object* value = descs->GetCallbacksObject(i);
Steve Block44f0eee2011-05-26 01:26:41 +01003319 { MaybeObject* maybe_value = AllocateJSGlobalPropertyCell(value);
John Reck59135872010-11-02 12:39:01 -07003320 if (!maybe_value->ToObject(&value)) return maybe_value;
3321 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003322
John Reck59135872010-11-02 12:39:01 -07003323 Object* result;
3324 { MaybeObject* maybe_result = dictionary->Add(descs->GetKey(i), value, d);
3325 if (!maybe_result->ToObject(&result)) return maybe_result;
3326 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003327 dictionary = StringDictionary::cast(result);
3328 }
3329
3330 // Allocate the global object and initialize it with the backing store.
John Reck59135872010-11-02 12:39:01 -07003331 { MaybeObject* maybe_obj = Allocate(map, OLD_POINTER_SPACE);
3332 if (!maybe_obj->ToObject(&obj)) return maybe_obj;
3333 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003334 JSObject* global = JSObject::cast(obj);
3335 InitializeJSObjectFromMap(global, dictionary, map);
3336
3337 // Create a new map for the global object.
John Reck59135872010-11-02 12:39:01 -07003338 { MaybeObject* maybe_obj = map->CopyDropDescriptors();
3339 if (!maybe_obj->ToObject(&obj)) return maybe_obj;
3340 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003341 Map* new_map = Map::cast(obj);
3342
3343 // Setup the global object as a normalized object.
3344 global->set_map(new_map);
Ben Murdoch257744e2011-11-30 15:57:28 +00003345 global->map()->clear_instance_descriptors();
Steve Blocka7e24c12009-10-30 11:49:00 +00003346 global->set_properties(dictionary);
3347
3348 // Make sure result is a global object with properties in dictionary.
3349 ASSERT(global->IsGlobalObject());
3350 ASSERT(!global->HasFastProperties());
3351 return global;
3352}
3353
3354
John Reck59135872010-11-02 12:39:01 -07003355MaybeObject* Heap::CopyJSObject(JSObject* source) {
Steve Blocka7e24c12009-10-30 11:49:00 +00003356 // Never used to copy functions. If functions need to be copied we
3357 // have to be careful to clear the literals array.
3358 ASSERT(!source->IsJSFunction());
3359
3360 // Make the clone.
3361 Map* map = source->map();
3362 int object_size = map->instance_size();
3363 Object* clone;
3364
3365 // If we're forced to always allocate, we use the general allocation
3366 // functions which may leave us with an object in old space.
3367 if (always_allocate()) {
John Reck59135872010-11-02 12:39:01 -07003368 { MaybeObject* maybe_clone =
3369 AllocateRaw(object_size, NEW_SPACE, OLD_POINTER_SPACE);
3370 if (!maybe_clone->ToObject(&clone)) return maybe_clone;
3371 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003372 Address clone_address = HeapObject::cast(clone)->address();
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01003373 CopyBlock(clone_address,
3374 source->address(),
Steve Blocka7e24c12009-10-30 11:49:00 +00003375 object_size);
3376 // Update write barrier for all fields that lie beyond the header.
Steve Block6ded16b2010-05-10 14:33:55 +01003377 RecordWrites(clone_address,
3378 JSObject::kHeaderSize,
3379 (object_size - JSObject::kHeaderSize) / kPointerSize);
Steve Blocka7e24c12009-10-30 11:49:00 +00003380 } else {
John Reck59135872010-11-02 12:39:01 -07003381 { MaybeObject* maybe_clone = new_space_.AllocateRaw(object_size);
3382 if (!maybe_clone->ToObject(&clone)) return maybe_clone;
3383 }
Steve Block44f0eee2011-05-26 01:26:41 +01003384 ASSERT(InNewSpace(clone));
Steve Blocka7e24c12009-10-30 11:49:00 +00003385 // Since we know the clone is allocated in new space, we can copy
3386 // the contents without worrying about updating the write barrier.
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01003387 CopyBlock(HeapObject::cast(clone)->address(),
3388 source->address(),
Steve Blocka7e24c12009-10-30 11:49:00 +00003389 object_size);
3390 }
3391
3392 FixedArray* elements = FixedArray::cast(source->elements());
3393 FixedArray* properties = FixedArray::cast(source->properties());
3394 // Update elements if necessary.
Steve Block6ded16b2010-05-10 14:33:55 +01003395 if (elements->length() > 0) {
John Reck59135872010-11-02 12:39:01 -07003396 Object* elem;
3397 { MaybeObject* maybe_elem =
3398 (elements->map() == fixed_cow_array_map()) ?
3399 elements : CopyFixedArray(elements);
3400 if (!maybe_elem->ToObject(&elem)) return maybe_elem;
3401 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003402 JSObject::cast(clone)->set_elements(FixedArray::cast(elem));
3403 }
3404 // Update properties if necessary.
3405 if (properties->length() > 0) {
John Reck59135872010-11-02 12:39:01 -07003406 Object* prop;
3407 { MaybeObject* maybe_prop = CopyFixedArray(properties);
3408 if (!maybe_prop->ToObject(&prop)) return maybe_prop;
3409 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003410 JSObject::cast(clone)->set_properties(FixedArray::cast(prop));
3411 }
3412 // Return the new clone.
3413 return clone;
3414}
3415
3416
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00003417MaybeObject* Heap::ReinitializeJSProxyAsJSObject(JSProxy* object) {
3418 // Allocate fresh map.
3419 // TODO(rossberg): Once we optimize proxies, cache these maps.
3420 Map* map;
3421 MaybeObject* maybe_map_obj =
3422 AllocateMap(JS_OBJECT_TYPE, JSObject::kHeaderSize);
3423 if (!maybe_map_obj->To<Map>(&map)) return maybe_map_obj;
3424
3425 // Check that the receiver has the same size as a fresh object.
3426 ASSERT(map->instance_size() == object->map()->instance_size());
3427
3428 map->set_prototype(object->map()->prototype());
3429
3430 // Allocate the backing storage for the properties.
3431 int prop_size = map->unused_property_fields() - map->inobject_properties();
3432 Object* properties;
3433 { MaybeObject* maybe_properties = AllocateFixedArray(prop_size, TENURED);
3434 if (!maybe_properties->ToObject(&properties)) return maybe_properties;
3435 }
3436
3437 // Reset the map for the object.
3438 object->set_map(map);
3439
3440 // Reinitialize the object from the constructor map.
3441 InitializeJSObjectFromMap(JSObject::cast(object),
3442 FixedArray::cast(properties), map);
3443 return object;
3444}
3445
3446
John Reck59135872010-11-02 12:39:01 -07003447MaybeObject* Heap::ReinitializeJSGlobalProxy(JSFunction* constructor,
3448 JSGlobalProxy* object) {
Kristian Monsen0d5e1162010-09-30 15:31:59 +01003449 ASSERT(constructor->has_initial_map());
Steve Blocka7e24c12009-10-30 11:49:00 +00003450 Map* map = constructor->initial_map();
3451
Kristian Monsen0d5e1162010-09-30 15:31:59 +01003452 // Check that the already allocated object has the same size and type as
Steve Blocka7e24c12009-10-30 11:49:00 +00003453 // objects allocated using the constructor.
3454 ASSERT(map->instance_size() == object->map()->instance_size());
Kristian Monsen0d5e1162010-09-30 15:31:59 +01003455 ASSERT(map->instance_type() == object->map()->instance_type());
Steve Blocka7e24c12009-10-30 11:49:00 +00003456
3457 // Allocate the backing storage for the properties.
3458 int prop_size = map->unused_property_fields() - map->inobject_properties();
John Reck59135872010-11-02 12:39:01 -07003459 Object* properties;
3460 { MaybeObject* maybe_properties = AllocateFixedArray(prop_size, TENURED);
3461 if (!maybe_properties->ToObject(&properties)) return maybe_properties;
3462 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003463
3464 // Reset the map for the object.
3465 object->set_map(constructor->initial_map());
3466
3467 // Reinitialize the object from the constructor map.
3468 InitializeJSObjectFromMap(object, FixedArray::cast(properties), map);
3469 return object;
3470}
3471
3472
John Reck59135872010-11-02 12:39:01 -07003473MaybeObject* Heap::AllocateStringFromAscii(Vector<const char> string,
3474 PretenureFlag pretenure) {
3475 Object* result;
3476 { MaybeObject* maybe_result =
3477 AllocateRawAsciiString(string.length(), pretenure);
3478 if (!maybe_result->ToObject(&result)) return maybe_result;
3479 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003480
3481 // Copy the characters into the new object.
3482 SeqAsciiString* string_result = SeqAsciiString::cast(result);
3483 for (int i = 0; i < string.length(); i++) {
3484 string_result->SeqAsciiStringSet(i, string[i]);
3485 }
3486 return result;
3487}
3488
3489
Steve Block9fac8402011-05-12 15:51:54 +01003490MaybeObject* Heap::AllocateStringFromUtf8Slow(Vector<const char> string,
3491 PretenureFlag pretenure) {
Leon Clarkeac952652010-07-15 11:15:24 +01003492 // V8 only supports characters in the Basic Multilingual Plane.
3493 const uc32 kMaxSupportedChar = 0xFFFF;
Steve Blocka7e24c12009-10-30 11:49:00 +00003494 // Count the number of characters in the UTF-8 string and check if
3495 // it is an ASCII string.
Ben Murdoch8b112d22011-06-08 16:22:53 +01003496 Access<UnicodeCache::Utf8Decoder>
3497 decoder(isolate_->unicode_cache()->utf8_decoder());
Steve Blocka7e24c12009-10-30 11:49:00 +00003498 decoder->Reset(string.start(), string.length());
3499 int chars = 0;
Steve Blocka7e24c12009-10-30 11:49:00 +00003500 while (decoder->has_more()) {
Steve Block9fac8402011-05-12 15:51:54 +01003501 decoder->GetNext();
Steve Blocka7e24c12009-10-30 11:49:00 +00003502 chars++;
3503 }
3504
John Reck59135872010-11-02 12:39:01 -07003505 Object* result;
3506 { MaybeObject* maybe_result = AllocateRawTwoByteString(chars, pretenure);
3507 if (!maybe_result->ToObject(&result)) return maybe_result;
3508 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003509
3510 // Convert and copy the characters into the new object.
3511 String* string_result = String::cast(result);
3512 decoder->Reset(string.start(), string.length());
3513 for (int i = 0; i < chars; i++) {
3514 uc32 r = decoder->GetNext();
Leon Clarkeac952652010-07-15 11:15:24 +01003515 if (r > kMaxSupportedChar) { r = unibrow::Utf8::kBadChar; }
Steve Blocka7e24c12009-10-30 11:49:00 +00003516 string_result->Set(i, r);
3517 }
3518 return result;
3519}
3520
3521
John Reck59135872010-11-02 12:39:01 -07003522MaybeObject* Heap::AllocateStringFromTwoByte(Vector<const uc16> string,
3523 PretenureFlag pretenure) {
Steve Blocka7e24c12009-10-30 11:49:00 +00003524 // Check if the string is an ASCII string.
John Reck59135872010-11-02 12:39:01 -07003525 MaybeObject* maybe_result;
Steve Block9fac8402011-05-12 15:51:54 +01003526 if (String::IsAscii(string.start(), string.length())) {
John Reck59135872010-11-02 12:39:01 -07003527 maybe_result = AllocateRawAsciiString(string.length(), pretenure);
Steve Blocka7e24c12009-10-30 11:49:00 +00003528 } else { // It's not an ASCII string.
John Reck59135872010-11-02 12:39:01 -07003529 maybe_result = AllocateRawTwoByteString(string.length(), pretenure);
Steve Blocka7e24c12009-10-30 11:49:00 +00003530 }
John Reck59135872010-11-02 12:39:01 -07003531 Object* result;
3532 if (!maybe_result->ToObject(&result)) return maybe_result;
Steve Blocka7e24c12009-10-30 11:49:00 +00003533
3534 // Copy the characters into the new object, which may be either ASCII or
3535 // UTF-16.
3536 String* string_result = String::cast(result);
3537 for (int i = 0; i < string.length(); i++) {
3538 string_result->Set(i, string[i]);
3539 }
3540 return result;
3541}
3542
3543
3544Map* Heap::SymbolMapForString(String* string) {
3545 // If the string is in new space it cannot be used as a symbol.
3546 if (InNewSpace(string)) return NULL;
3547
3548 // Find the corresponding symbol map for strings.
3549 Map* map = string->map();
Steve Block44f0eee2011-05-26 01:26:41 +01003550 if (map == ascii_string_map()) {
3551 return ascii_symbol_map();
3552 }
3553 if (map == string_map()) {
3554 return symbol_map();
3555 }
3556 if (map == cons_string_map()) {
3557 return cons_symbol_map();
3558 }
3559 if (map == cons_ascii_string_map()) {
3560 return cons_ascii_symbol_map();
3561 }
3562 if (map == external_string_map()) {
3563 return external_symbol_map();
3564 }
3565 if (map == external_ascii_string_map()) {
3566 return external_ascii_symbol_map();
3567 }
Kristian Monsen9dcf7e22010-06-28 14:14:28 +01003568 if (map == external_string_with_ascii_data_map()) {
3569 return external_symbol_with_ascii_data_map();
3570 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003571
3572 // No match found.
3573 return NULL;
3574}
3575
3576
John Reck59135872010-11-02 12:39:01 -07003577MaybeObject* Heap::AllocateInternalSymbol(unibrow::CharacterStream* buffer,
3578 int chars,
3579 uint32_t hash_field) {
Leon Clarkee46be812010-01-19 14:06:41 +00003580 ASSERT(chars >= 0);
Steve Blocka7e24c12009-10-30 11:49:00 +00003581 // Ensure the chars matches the number of characters in the buffer.
3582 ASSERT(static_cast<unsigned>(chars) == buffer->Length());
3583 // Determine whether the string is ascii.
3584 bool is_ascii = true;
Leon Clarkee46be812010-01-19 14:06:41 +00003585 while (buffer->has_more()) {
3586 if (buffer->GetNext() > unibrow::Utf8::kMaxOneByteChar) {
3587 is_ascii = false;
3588 break;
3589 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003590 }
3591 buffer->Rewind();
3592
3593 // Compute map and object size.
3594 int size;
3595 Map* map;
3596
3597 if (is_ascii) {
Leon Clarkee46be812010-01-19 14:06:41 +00003598 if (chars > SeqAsciiString::kMaxLength) {
3599 return Failure::OutOfMemoryException();
3600 }
Steve Blockd0582a62009-12-15 09:54:21 +00003601 map = ascii_symbol_map();
Steve Blocka7e24c12009-10-30 11:49:00 +00003602 size = SeqAsciiString::SizeFor(chars);
3603 } else {
Leon Clarkee46be812010-01-19 14:06:41 +00003604 if (chars > SeqTwoByteString::kMaxLength) {
3605 return Failure::OutOfMemoryException();
3606 }
Steve Blockd0582a62009-12-15 09:54:21 +00003607 map = symbol_map();
Steve Blocka7e24c12009-10-30 11:49:00 +00003608 size = SeqTwoByteString::SizeFor(chars);
3609 }
3610
3611 // Allocate string.
John Reck59135872010-11-02 12:39:01 -07003612 Object* result;
3613 { MaybeObject* maybe_result = (size > MaxObjectSizeInPagedSpace())
3614 ? lo_space_->AllocateRaw(size)
3615 : old_data_space_->AllocateRaw(size);
3616 if (!maybe_result->ToObject(&result)) return maybe_result;
3617 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003618
3619 reinterpret_cast<HeapObject*>(result)->set_map(map);
Steve Blockd0582a62009-12-15 09:54:21 +00003620 // Set length and hash fields of the allocated string.
Steve Blocka7e24c12009-10-30 11:49:00 +00003621 String* answer = String::cast(result);
Steve Blockd0582a62009-12-15 09:54:21 +00003622 answer->set_length(chars);
3623 answer->set_hash_field(hash_field);
Steve Blocka7e24c12009-10-30 11:49:00 +00003624
3625 ASSERT_EQ(size, answer->Size());
3626
3627 // Fill in the characters.
3628 for (int i = 0; i < chars; i++) {
3629 answer->Set(i, buffer->GetNext());
3630 }
3631 return answer;
3632}
3633
3634
John Reck59135872010-11-02 12:39:01 -07003635MaybeObject* Heap::AllocateRawAsciiString(int length, PretenureFlag pretenure) {
Leon Clarkee46be812010-01-19 14:06:41 +00003636 if (length < 0 || length > SeqAsciiString::kMaxLength) {
3637 return Failure::OutOfMemoryException();
3638 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003639
3640 int size = SeqAsciiString::SizeFor(length);
Leon Clarkee46be812010-01-19 14:06:41 +00003641 ASSERT(size <= SeqAsciiString::kMaxSize);
Steve Blocka7e24c12009-10-30 11:49:00 +00003642
Leon Clarkee46be812010-01-19 14:06:41 +00003643 AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
3644 AllocationSpace retry_space = OLD_DATA_SPACE;
3645
Steve Blocka7e24c12009-10-30 11:49:00 +00003646 if (space == NEW_SPACE) {
Leon Clarkee46be812010-01-19 14:06:41 +00003647 if (size > kMaxObjectSizeInNewSpace) {
3648 // Allocate in large object space, retry space will be ignored.
3649 space = LO_SPACE;
3650 } else if (size > MaxObjectSizeInPagedSpace()) {
3651 // Allocate in new space, retry in large object space.
3652 retry_space = LO_SPACE;
3653 }
3654 } else if (space == OLD_DATA_SPACE && size > MaxObjectSizeInPagedSpace()) {
3655 space = LO_SPACE;
Steve Blocka7e24c12009-10-30 11:49:00 +00003656 }
John Reck59135872010-11-02 12:39:01 -07003657 Object* result;
3658 { MaybeObject* maybe_result = AllocateRaw(size, space, retry_space);
3659 if (!maybe_result->ToObject(&result)) return maybe_result;
3660 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003661
Steve Blocka7e24c12009-10-30 11:49:00 +00003662 // Partially initialize the object.
Steve Blockd0582a62009-12-15 09:54:21 +00003663 HeapObject::cast(result)->set_map(ascii_string_map());
Steve Blocka7e24c12009-10-30 11:49:00 +00003664 String::cast(result)->set_length(length);
Steve Blockd0582a62009-12-15 09:54:21 +00003665 String::cast(result)->set_hash_field(String::kEmptyHashField);
Steve Blocka7e24c12009-10-30 11:49:00 +00003666 ASSERT_EQ(size, HeapObject::cast(result)->Size());
3667 return result;
3668}
3669
3670
John Reck59135872010-11-02 12:39:01 -07003671MaybeObject* Heap::AllocateRawTwoByteString(int length,
3672 PretenureFlag pretenure) {
Leon Clarkee46be812010-01-19 14:06:41 +00003673 if (length < 0 || length > SeqTwoByteString::kMaxLength) {
3674 return Failure::OutOfMemoryException();
Steve Blocka7e24c12009-10-30 11:49:00 +00003675 }
Leon Clarkee46be812010-01-19 14:06:41 +00003676 int size = SeqTwoByteString::SizeFor(length);
3677 ASSERT(size <= SeqTwoByteString::kMaxSize);
3678 AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
3679 AllocationSpace retry_space = OLD_DATA_SPACE;
3680
3681 if (space == NEW_SPACE) {
3682 if (size > kMaxObjectSizeInNewSpace) {
3683 // Allocate in large object space, retry space will be ignored.
3684 space = LO_SPACE;
3685 } else if (size > MaxObjectSizeInPagedSpace()) {
3686 // Allocate in new space, retry in large object space.
3687 retry_space = LO_SPACE;
3688 }
3689 } else if (space == OLD_DATA_SPACE && size > MaxObjectSizeInPagedSpace()) {
3690 space = LO_SPACE;
3691 }
John Reck59135872010-11-02 12:39:01 -07003692 Object* result;
3693 { MaybeObject* maybe_result = AllocateRaw(size, space, retry_space);
3694 if (!maybe_result->ToObject(&result)) return maybe_result;
3695 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003696
Steve Blocka7e24c12009-10-30 11:49:00 +00003697 // Partially initialize the object.
Steve Blockd0582a62009-12-15 09:54:21 +00003698 HeapObject::cast(result)->set_map(string_map());
Steve Blocka7e24c12009-10-30 11:49:00 +00003699 String::cast(result)->set_length(length);
Steve Blockd0582a62009-12-15 09:54:21 +00003700 String::cast(result)->set_hash_field(String::kEmptyHashField);
Steve Blocka7e24c12009-10-30 11:49:00 +00003701 ASSERT_EQ(size, HeapObject::cast(result)->Size());
3702 return result;
3703}
3704
3705
John Reck59135872010-11-02 12:39:01 -07003706MaybeObject* Heap::AllocateEmptyFixedArray() {
Steve Blocka7e24c12009-10-30 11:49:00 +00003707 int size = FixedArray::SizeFor(0);
John Reck59135872010-11-02 12:39:01 -07003708 Object* result;
3709 { MaybeObject* maybe_result =
3710 AllocateRaw(size, OLD_DATA_SPACE, OLD_DATA_SPACE);
3711 if (!maybe_result->ToObject(&result)) return maybe_result;
3712 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003713 // Initialize the object.
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01003714 reinterpret_cast<FixedArray*>(result)->set_map(fixed_array_map());
3715 reinterpret_cast<FixedArray*>(result)->set_length(0);
Steve Blocka7e24c12009-10-30 11:49:00 +00003716 return result;
3717}
3718
3719
John Reck59135872010-11-02 12:39:01 -07003720MaybeObject* Heap::AllocateRawFixedArray(int length) {
Leon Clarkee46be812010-01-19 14:06:41 +00003721 if (length < 0 || length > FixedArray::kMaxLength) {
3722 return Failure::OutOfMemoryException();
3723 }
Kristian Monsen0d5e1162010-09-30 15:31:59 +01003724 ASSERT(length > 0);
Steve Blocka7e24c12009-10-30 11:49:00 +00003725 // Use the general function if we're forced to always allocate.
3726 if (always_allocate()) return AllocateFixedArray(length, TENURED);
3727 // Allocate the raw data for a fixed array.
3728 int size = FixedArray::SizeFor(length);
3729 return size <= kMaxObjectSizeInNewSpace
3730 ? new_space_.AllocateRaw(size)
3731 : lo_space_->AllocateRawFixedArray(size);
3732}
3733
3734
John Reck59135872010-11-02 12:39:01 -07003735MaybeObject* Heap::CopyFixedArrayWithMap(FixedArray* src, Map* map) {
Steve Blocka7e24c12009-10-30 11:49:00 +00003736 int len = src->length();
John Reck59135872010-11-02 12:39:01 -07003737 Object* obj;
3738 { MaybeObject* maybe_obj = AllocateRawFixedArray(len);
3739 if (!maybe_obj->ToObject(&obj)) return maybe_obj;
3740 }
Steve Block44f0eee2011-05-26 01:26:41 +01003741 if (InNewSpace(obj)) {
Steve Blocka7e24c12009-10-30 11:49:00 +00003742 HeapObject* dst = HeapObject::cast(obj);
Kristian Monsen0d5e1162010-09-30 15:31:59 +01003743 dst->set_map(map);
3744 CopyBlock(dst->address() + kPointerSize,
3745 src->address() + kPointerSize,
3746 FixedArray::SizeFor(len) - kPointerSize);
Steve Blocka7e24c12009-10-30 11:49:00 +00003747 return obj;
3748 }
Kristian Monsen0d5e1162010-09-30 15:31:59 +01003749 HeapObject::cast(obj)->set_map(map);
Steve Blocka7e24c12009-10-30 11:49:00 +00003750 FixedArray* result = FixedArray::cast(obj);
3751 result->set_length(len);
Leon Clarke4515c472010-02-03 11:58:03 +00003752
Steve Blocka7e24c12009-10-30 11:49:00 +00003753 // Copy the content
Leon Clarke4515c472010-02-03 11:58:03 +00003754 AssertNoAllocation no_gc;
3755 WriteBarrierMode mode = result->GetWriteBarrierMode(no_gc);
Steve Blocka7e24c12009-10-30 11:49:00 +00003756 for (int i = 0; i < len; i++) result->set(i, src->get(i), mode);
3757 return result;
3758}
3759
3760
John Reck59135872010-11-02 12:39:01 -07003761MaybeObject* Heap::AllocateFixedArray(int length) {
Steve Blocka7e24c12009-10-30 11:49:00 +00003762 ASSERT(length >= 0);
3763 if (length == 0) return empty_fixed_array();
John Reck59135872010-11-02 12:39:01 -07003764 Object* result;
3765 { MaybeObject* maybe_result = AllocateRawFixedArray(length);
3766 if (!maybe_result->ToObject(&result)) return maybe_result;
Steve Blocka7e24c12009-10-30 11:49:00 +00003767 }
John Reck59135872010-11-02 12:39:01 -07003768 // Initialize header.
3769 FixedArray* array = reinterpret_cast<FixedArray*>(result);
3770 array->set_map(fixed_array_map());
3771 array->set_length(length);
3772 // Initialize body.
Steve Block44f0eee2011-05-26 01:26:41 +01003773 ASSERT(!InNewSpace(undefined_value()));
John Reck59135872010-11-02 12:39:01 -07003774 MemsetPointer(array->data_start(), undefined_value(), length);
Steve Blocka7e24c12009-10-30 11:49:00 +00003775 return result;
3776}
3777
3778
John Reck59135872010-11-02 12:39:01 -07003779MaybeObject* Heap::AllocateRawFixedArray(int length, PretenureFlag pretenure) {
Leon Clarkee46be812010-01-19 14:06:41 +00003780 if (length < 0 || length > FixedArray::kMaxLength) {
3781 return Failure::OutOfMemoryException();
3782 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003783
Leon Clarkee46be812010-01-19 14:06:41 +00003784 AllocationSpace space =
3785 (pretenure == TENURED) ? OLD_POINTER_SPACE : NEW_SPACE;
Steve Blocka7e24c12009-10-30 11:49:00 +00003786 int size = FixedArray::SizeFor(length);
Leon Clarkee46be812010-01-19 14:06:41 +00003787 if (space == NEW_SPACE && size > kMaxObjectSizeInNewSpace) {
3788 // Too big for new space.
3789 space = LO_SPACE;
3790 } else if (space == OLD_POINTER_SPACE &&
3791 size > MaxObjectSizeInPagedSpace()) {
3792 // Too big for old pointer space.
3793 space = LO_SPACE;
3794 }
3795
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01003796 AllocationSpace retry_space =
3797 (size <= MaxObjectSizeInPagedSpace()) ? OLD_POINTER_SPACE : LO_SPACE;
3798
3799 return AllocateRaw(size, space, retry_space);
Steve Blocka7e24c12009-10-30 11:49:00 +00003800}
3801
3802
John Reck59135872010-11-02 12:39:01 -07003803MUST_USE_RESULT static MaybeObject* AllocateFixedArrayWithFiller(
Steve Block44f0eee2011-05-26 01:26:41 +01003804 Heap* heap,
John Reck59135872010-11-02 12:39:01 -07003805 int length,
3806 PretenureFlag pretenure,
3807 Object* filler) {
Steve Block6ded16b2010-05-10 14:33:55 +01003808 ASSERT(length >= 0);
Steve Block44f0eee2011-05-26 01:26:41 +01003809 ASSERT(heap->empty_fixed_array()->IsFixedArray());
3810 if (length == 0) return heap->empty_fixed_array();
Steve Block6ded16b2010-05-10 14:33:55 +01003811
Steve Block44f0eee2011-05-26 01:26:41 +01003812 ASSERT(!heap->InNewSpace(filler));
John Reck59135872010-11-02 12:39:01 -07003813 Object* result;
Steve Block44f0eee2011-05-26 01:26:41 +01003814 { MaybeObject* maybe_result = heap->AllocateRawFixedArray(length, pretenure);
John Reck59135872010-11-02 12:39:01 -07003815 if (!maybe_result->ToObject(&result)) return maybe_result;
3816 }
Steve Block6ded16b2010-05-10 14:33:55 +01003817
Steve Block44f0eee2011-05-26 01:26:41 +01003818 HeapObject::cast(result)->set_map(heap->fixed_array_map());
Steve Block6ded16b2010-05-10 14:33:55 +01003819 FixedArray* array = FixedArray::cast(result);
3820 array->set_length(length);
3821 MemsetPointer(array->data_start(), filler, length);
3822 return array;
3823}
3824
3825
John Reck59135872010-11-02 12:39:01 -07003826MaybeObject* Heap::AllocateFixedArray(int length, PretenureFlag pretenure) {
Steve Block44f0eee2011-05-26 01:26:41 +01003827 return AllocateFixedArrayWithFiller(this,
3828 length,
3829 pretenure,
3830 undefined_value());
Steve Block6ded16b2010-05-10 14:33:55 +01003831}
3832
3833
John Reck59135872010-11-02 12:39:01 -07003834MaybeObject* Heap::AllocateFixedArrayWithHoles(int length,
3835 PretenureFlag pretenure) {
Steve Block44f0eee2011-05-26 01:26:41 +01003836 return AllocateFixedArrayWithFiller(this,
3837 length,
3838 pretenure,
3839 the_hole_value());
Steve Block6ded16b2010-05-10 14:33:55 +01003840}
3841
3842
John Reck59135872010-11-02 12:39:01 -07003843MaybeObject* Heap::AllocateUninitializedFixedArray(int length) {
Steve Block6ded16b2010-05-10 14:33:55 +01003844 if (length == 0) return empty_fixed_array();
3845
John Reck59135872010-11-02 12:39:01 -07003846 Object* obj;
3847 { MaybeObject* maybe_obj = AllocateRawFixedArray(length);
3848 if (!maybe_obj->ToObject(&obj)) return maybe_obj;
3849 }
Steve Block6ded16b2010-05-10 14:33:55 +01003850
3851 reinterpret_cast<FixedArray*>(obj)->set_map(fixed_array_map());
3852 FixedArray::cast(obj)->set_length(length);
3853 return obj;
3854}
3855
3856
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00003857MaybeObject* Heap::AllocateEmptyFixedDoubleArray() {
3858 int size = FixedDoubleArray::SizeFor(0);
3859 Object* result;
3860 { MaybeObject* maybe_result =
3861 AllocateRaw(size, OLD_DATA_SPACE, OLD_DATA_SPACE);
3862 if (!maybe_result->ToObject(&result)) return maybe_result;
3863 }
3864 // Initialize the object.
3865 reinterpret_cast<FixedDoubleArray*>(result)->set_map(
3866 fixed_double_array_map());
3867 reinterpret_cast<FixedDoubleArray*>(result)->set_length(0);
3868 return result;
3869}
3870
3871
3872MaybeObject* Heap::AllocateUninitializedFixedDoubleArray(
3873 int length,
3874 PretenureFlag pretenure) {
3875 if (length == 0) return empty_fixed_double_array();
3876
3877 Object* obj;
3878 { MaybeObject* maybe_obj = AllocateRawFixedDoubleArray(length, pretenure);
3879 if (!maybe_obj->ToObject(&obj)) return maybe_obj;
3880 }
3881
3882 reinterpret_cast<FixedDoubleArray*>(obj)->set_map(fixed_double_array_map());
3883 FixedDoubleArray::cast(obj)->set_length(length);
3884 return obj;
3885}
3886
3887
3888MaybeObject* Heap::AllocateRawFixedDoubleArray(int length,
3889 PretenureFlag pretenure) {
3890 if (length < 0 || length > FixedDoubleArray::kMaxLength) {
3891 return Failure::OutOfMemoryException();
3892 }
3893
3894 AllocationSpace space =
3895 (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
3896 int size = FixedDoubleArray::SizeFor(length);
3897 if (space == NEW_SPACE && size > kMaxObjectSizeInNewSpace) {
3898 // Too big for new space.
3899 space = LO_SPACE;
3900 } else if (space == OLD_DATA_SPACE &&
3901 size > MaxObjectSizeInPagedSpace()) {
3902 // Too big for old data space.
3903 space = LO_SPACE;
3904 }
3905
3906 AllocationSpace retry_space =
3907 (size <= MaxObjectSizeInPagedSpace()) ? OLD_DATA_SPACE : LO_SPACE;
3908
3909 return AllocateRaw(size, space, retry_space);
3910}
3911
3912
John Reck59135872010-11-02 12:39:01 -07003913MaybeObject* Heap::AllocateHashTable(int length, PretenureFlag pretenure) {
3914 Object* result;
Steve Block44f0eee2011-05-26 01:26:41 +01003915 { MaybeObject* maybe_result = AllocateFixedArray(length, pretenure);
John Reck59135872010-11-02 12:39:01 -07003916 if (!maybe_result->ToObject(&result)) return maybe_result;
3917 }
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01003918 reinterpret_cast<HeapObject*>(result)->set_map(hash_table_map());
Steve Blocka7e24c12009-10-30 11:49:00 +00003919 ASSERT(result->IsHashTable());
3920 return result;
3921}
3922
3923
John Reck59135872010-11-02 12:39:01 -07003924MaybeObject* Heap::AllocateGlobalContext() {
3925 Object* result;
3926 { MaybeObject* maybe_result =
Steve Block44f0eee2011-05-26 01:26:41 +01003927 AllocateFixedArray(Context::GLOBAL_CONTEXT_SLOTS);
John Reck59135872010-11-02 12:39:01 -07003928 if (!maybe_result->ToObject(&result)) return maybe_result;
3929 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003930 Context* context = reinterpret_cast<Context*>(result);
3931 context->set_map(global_context_map());
3932 ASSERT(context->IsGlobalContext());
3933 ASSERT(result->IsContext());
3934 return result;
3935}
3936
3937
John Reck59135872010-11-02 12:39:01 -07003938MaybeObject* Heap::AllocateFunctionContext(int length, JSFunction* function) {
Steve Blocka7e24c12009-10-30 11:49:00 +00003939 ASSERT(length >= Context::MIN_CONTEXT_SLOTS);
John Reck59135872010-11-02 12:39:01 -07003940 Object* result;
Steve Block44f0eee2011-05-26 01:26:41 +01003941 { MaybeObject* maybe_result = AllocateFixedArray(length);
John Reck59135872010-11-02 12:39:01 -07003942 if (!maybe_result->ToObject(&result)) return maybe_result;
3943 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003944 Context* context = reinterpret_cast<Context*>(result);
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00003945 context->set_map(function_context_map());
Steve Blocka7e24c12009-10-30 11:49:00 +00003946 context->set_closure(function);
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00003947 context->set_previous(function->context());
Steve Blocka7e24c12009-10-30 11:49:00 +00003948 context->set_extension(NULL);
3949 context->set_global(function->context()->global());
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00003950 return context;
Steve Blocka7e24c12009-10-30 11:49:00 +00003951}
3952
3953
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00003954MaybeObject* Heap::AllocateCatchContext(JSFunction* function,
3955 Context* previous,
3956 String* name,
3957 Object* thrown_object) {
3958 STATIC_ASSERT(Context::MIN_CONTEXT_SLOTS == Context::THROWN_OBJECT_INDEX);
3959 Object* result;
3960 { MaybeObject* maybe_result =
3961 AllocateFixedArray(Context::MIN_CONTEXT_SLOTS + 1);
3962 if (!maybe_result->ToObject(&result)) return maybe_result;
3963 }
3964 Context* context = reinterpret_cast<Context*>(result);
3965 context->set_map(catch_context_map());
3966 context->set_closure(function);
3967 context->set_previous(previous);
3968 context->set_extension(name);
3969 context->set_global(previous->global());
3970 context->set(Context::THROWN_OBJECT_INDEX, thrown_object);
3971 return context;
3972}
3973
3974
3975MaybeObject* Heap::AllocateWithContext(JSFunction* function,
3976 Context* previous,
3977 JSObject* extension) {
John Reck59135872010-11-02 12:39:01 -07003978 Object* result;
Steve Block44f0eee2011-05-26 01:26:41 +01003979 { MaybeObject* maybe_result = AllocateFixedArray(Context::MIN_CONTEXT_SLOTS);
John Reck59135872010-11-02 12:39:01 -07003980 if (!maybe_result->ToObject(&result)) return maybe_result;
3981 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003982 Context* context = reinterpret_cast<Context*>(result);
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00003983 context->set_map(with_context_map());
3984 context->set_closure(function);
Steve Blocka7e24c12009-10-30 11:49:00 +00003985 context->set_previous(previous);
3986 context->set_extension(extension);
3987 context->set_global(previous->global());
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00003988 return context;
Steve Blocka7e24c12009-10-30 11:49:00 +00003989}
3990
3991
John Reck59135872010-11-02 12:39:01 -07003992MaybeObject* Heap::AllocateStruct(InstanceType type) {
Steve Blocka7e24c12009-10-30 11:49:00 +00003993 Map* map;
3994 switch (type) {
Steve Block44f0eee2011-05-26 01:26:41 +01003995#define MAKE_CASE(NAME, Name, name) \
3996 case NAME##_TYPE: map = name##_map(); break;
Steve Blocka7e24c12009-10-30 11:49:00 +00003997STRUCT_LIST(MAKE_CASE)
3998#undef MAKE_CASE
3999 default:
4000 UNREACHABLE();
4001 return Failure::InternalError();
4002 }
4003 int size = map->instance_size();
4004 AllocationSpace space =
4005 (size > MaxObjectSizeInPagedSpace()) ? LO_SPACE : OLD_POINTER_SPACE;
John Reck59135872010-11-02 12:39:01 -07004006 Object* result;
Steve Block44f0eee2011-05-26 01:26:41 +01004007 { MaybeObject* maybe_result = Allocate(map, space);
John Reck59135872010-11-02 12:39:01 -07004008 if (!maybe_result->ToObject(&result)) return maybe_result;
4009 }
Steve Blocka7e24c12009-10-30 11:49:00 +00004010 Struct::cast(result)->InitializeBody(size);
4011 return result;
4012}
4013
4014
4015bool Heap::IdleNotification() {
4016 static const int kIdlesBeforeScavenge = 4;
4017 static const int kIdlesBeforeMarkSweep = 7;
4018 static const int kIdlesBeforeMarkCompact = 8;
Ben Murdochdb5a90a2011-01-06 18:27:03 +00004019 static const int kMaxIdleCount = kIdlesBeforeMarkCompact + 1;
Ben Murdoche0cee9b2011-05-25 10:26:03 +01004020 static const unsigned int kGCsBetweenCleanup = 4;
Steve Block44f0eee2011-05-26 01:26:41 +01004021
4022 if (!last_idle_notification_gc_count_init_) {
4023 last_idle_notification_gc_count_ = gc_count_;
4024 last_idle_notification_gc_count_init_ = true;
4025 }
Steve Blocka7e24c12009-10-30 11:49:00 +00004026
Steve Block6ded16b2010-05-10 14:33:55 +01004027 bool uncommit = true;
Steve Blocka7e24c12009-10-30 11:49:00 +00004028 bool finished = false;
4029
Ben Murdochdb5a90a2011-01-06 18:27:03 +00004030 // Reset the number of idle notifications received when a number of
4031 // GCs have taken place. This allows another round of cleanup based
4032 // on idle notifications if enough work has been carried out to
4033 // provoke a number of garbage collections.
Steve Block44f0eee2011-05-26 01:26:41 +01004034 if (gc_count_ - last_idle_notification_gc_count_ < kGCsBetweenCleanup) {
4035 number_idle_notifications_ =
4036 Min(number_idle_notifications_ + 1, kMaxIdleCount);
Steve Blocka7e24c12009-10-30 11:49:00 +00004037 } else {
Steve Block44f0eee2011-05-26 01:26:41 +01004038 number_idle_notifications_ = 0;
4039 last_idle_notification_gc_count_ = gc_count_;
Steve Blocka7e24c12009-10-30 11:49:00 +00004040 }
4041
Steve Block44f0eee2011-05-26 01:26:41 +01004042 if (number_idle_notifications_ == kIdlesBeforeScavenge) {
Steve Block6ded16b2010-05-10 14:33:55 +01004043 if (contexts_disposed_ > 0) {
Steve Block44f0eee2011-05-26 01:26:41 +01004044 HistogramTimerScope scope(isolate_->counters()->gc_context());
Steve Block6ded16b2010-05-10 14:33:55 +01004045 CollectAllGarbage(false);
4046 } else {
Ben Murdochf87a2032010-10-22 12:50:53 +01004047 CollectGarbage(NEW_SPACE);
Steve Block6ded16b2010-05-10 14:33:55 +01004048 }
Steve Blocka7e24c12009-10-30 11:49:00 +00004049 new_space_.Shrink();
Steve Block44f0eee2011-05-26 01:26:41 +01004050 last_idle_notification_gc_count_ = gc_count_;
4051 } else if (number_idle_notifications_ == kIdlesBeforeMarkSweep) {
Steve Blockd0582a62009-12-15 09:54:21 +00004052 // Before doing the mark-sweep collections we clear the
4053 // compilation cache to avoid hanging on to source code and
4054 // generated code for cached functions.
Steve Block44f0eee2011-05-26 01:26:41 +01004055 isolate_->compilation_cache()->Clear();
Steve Blockd0582a62009-12-15 09:54:21 +00004056
Steve Blocka7e24c12009-10-30 11:49:00 +00004057 CollectAllGarbage(false);
4058 new_space_.Shrink();
Steve Block44f0eee2011-05-26 01:26:41 +01004059 last_idle_notification_gc_count_ = gc_count_;
Steve Blocka7e24c12009-10-30 11:49:00 +00004060
Steve Block44f0eee2011-05-26 01:26:41 +01004061 } else if (number_idle_notifications_ == kIdlesBeforeMarkCompact) {
Steve Blocka7e24c12009-10-30 11:49:00 +00004062 CollectAllGarbage(true);
4063 new_space_.Shrink();
Steve Block44f0eee2011-05-26 01:26:41 +01004064 last_idle_notification_gc_count_ = gc_count_;
4065 number_idle_notifications_ = 0;
Steve Blocka7e24c12009-10-30 11:49:00 +00004066 finished = true;
Steve Block6ded16b2010-05-10 14:33:55 +01004067 } else if (contexts_disposed_ > 0) {
4068 if (FLAG_expose_gc) {
4069 contexts_disposed_ = 0;
4070 } else {
Steve Block44f0eee2011-05-26 01:26:41 +01004071 HistogramTimerScope scope(isolate_->counters()->gc_context());
Steve Block6ded16b2010-05-10 14:33:55 +01004072 CollectAllGarbage(false);
Steve Block44f0eee2011-05-26 01:26:41 +01004073 last_idle_notification_gc_count_ = gc_count_;
Steve Block6ded16b2010-05-10 14:33:55 +01004074 }
4075 // If this is the first idle notification, we reset the
4076 // notification count to avoid letting idle notifications for
4077 // context disposal garbage collections start a potentially too
4078 // aggressive idle GC cycle.
Steve Block44f0eee2011-05-26 01:26:41 +01004079 if (number_idle_notifications_ <= 1) {
4080 number_idle_notifications_ = 0;
Steve Block6ded16b2010-05-10 14:33:55 +01004081 uncommit = false;
4082 }
Steve Block44f0eee2011-05-26 01:26:41 +01004083 } else if (number_idle_notifications_ > kIdlesBeforeMarkCompact) {
Ben Murdochdb5a90a2011-01-06 18:27:03 +00004084 // If we have received more than kIdlesBeforeMarkCompact idle
4085 // notifications we do not perform any cleanup because we don't
4086 // expect to gain much by doing so.
4087 finished = true;
Steve Blocka7e24c12009-10-30 11:49:00 +00004088 }
4089
Steve Block6ded16b2010-05-10 14:33:55 +01004090 // Make sure that we have no pending context disposals and
4091 // conditionally uncommit from space.
4092 ASSERT(contexts_disposed_ == 0);
Steve Block44f0eee2011-05-26 01:26:41 +01004093 if (uncommit) UncommitFromSpace();
Steve Blocka7e24c12009-10-30 11:49:00 +00004094 return finished;
4095}
4096
4097
4098#ifdef DEBUG
4099
4100void Heap::Print() {
4101 if (!HasBeenSetup()) return;
Steve Block44f0eee2011-05-26 01:26:41 +01004102 isolate()->PrintStack();
Steve Blocka7e24c12009-10-30 11:49:00 +00004103 AllSpaces spaces;
Leon Clarked91b9f72010-01-27 17:25:45 +00004104 for (Space* space = spaces.next(); space != NULL; space = spaces.next())
4105 space->Print();
Steve Blocka7e24c12009-10-30 11:49:00 +00004106}
4107
4108
4109void Heap::ReportCodeStatistics(const char* title) {
4110 PrintF(">>>>>> Code Stats (%s) >>>>>>\n", title);
4111 PagedSpace::ResetCodeStatistics();
4112 // We do not look for code in new space, map space, or old space. If code
4113 // somehow ends up in those spaces, we would miss it here.
4114 code_space_->CollectCodeStatistics();
4115 lo_space_->CollectCodeStatistics();
4116 PagedSpace::ReportCodeStatistics();
4117}
4118
4119
4120// This function expects that NewSpace's allocated objects histogram is
4121// populated (via a call to CollectStatistics or else as a side effect of a
4122// just-completed scavenge collection).
4123void Heap::ReportHeapStatistics(const char* title) {
4124 USE(title);
4125 PrintF(">>>>>> =============== %s (%d) =============== >>>>>>\n",
4126 title, gc_count_);
4127 PrintF("mark-compact GC : %d\n", mc_count_);
Ben Murdochf87a2032010-10-22 12:50:53 +01004128 PrintF("old_gen_promotion_limit_ %" V8_PTR_PREFIX "d\n",
4129 old_gen_promotion_limit_);
4130 PrintF("old_gen_allocation_limit_ %" V8_PTR_PREFIX "d\n",
4131 old_gen_allocation_limit_);
Steve Blocka7e24c12009-10-30 11:49:00 +00004132
4133 PrintF("\n");
4134 PrintF("Number of handles : %d\n", HandleScope::NumberOfHandles());
Steve Block44f0eee2011-05-26 01:26:41 +01004135 isolate_->global_handles()->PrintStats();
Steve Blocka7e24c12009-10-30 11:49:00 +00004136 PrintF("\n");
4137
4138 PrintF("Heap statistics : ");
Steve Block44f0eee2011-05-26 01:26:41 +01004139 isolate_->memory_allocator()->ReportStatistics();
Steve Blocka7e24c12009-10-30 11:49:00 +00004140 PrintF("To space : ");
4141 new_space_.ReportStatistics();
4142 PrintF("Old pointer space : ");
4143 old_pointer_space_->ReportStatistics();
4144 PrintF("Old data space : ");
4145 old_data_space_->ReportStatistics();
4146 PrintF("Code space : ");
4147 code_space_->ReportStatistics();
4148 PrintF("Map space : ");
4149 map_space_->ReportStatistics();
4150 PrintF("Cell space : ");
4151 cell_space_->ReportStatistics();
4152 PrintF("Large object space : ");
4153 lo_space_->ReportStatistics();
4154 PrintF(">>>>>> ========================================= >>>>>>\n");
4155}
4156
4157#endif // DEBUG
4158
4159bool Heap::Contains(HeapObject* value) {
4160 return Contains(value->address());
4161}
4162
4163
4164bool Heap::Contains(Address addr) {
4165 if (OS::IsOutsideAllocatedSpace(addr)) return false;
4166 return HasBeenSetup() &&
4167 (new_space_.ToSpaceContains(addr) ||
4168 old_pointer_space_->Contains(addr) ||
4169 old_data_space_->Contains(addr) ||
4170 code_space_->Contains(addr) ||
4171 map_space_->Contains(addr) ||
4172 cell_space_->Contains(addr) ||
4173 lo_space_->SlowContains(addr));
4174}
4175
4176
4177bool Heap::InSpace(HeapObject* value, AllocationSpace space) {
4178 return InSpace(value->address(), space);
4179}
4180
4181
4182bool Heap::InSpace(Address addr, AllocationSpace space) {
4183 if (OS::IsOutsideAllocatedSpace(addr)) return false;
4184 if (!HasBeenSetup()) return false;
4185
4186 switch (space) {
4187 case NEW_SPACE:
4188 return new_space_.ToSpaceContains(addr);
4189 case OLD_POINTER_SPACE:
4190 return old_pointer_space_->Contains(addr);
4191 case OLD_DATA_SPACE:
4192 return old_data_space_->Contains(addr);
4193 case CODE_SPACE:
4194 return code_space_->Contains(addr);
4195 case MAP_SPACE:
4196 return map_space_->Contains(addr);
4197 case CELL_SPACE:
4198 return cell_space_->Contains(addr);
4199 case LO_SPACE:
4200 return lo_space_->SlowContains(addr);
4201 }
4202
4203 return false;
4204}
4205
4206
4207#ifdef DEBUG
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004208static void DummyScavengePointer(HeapObject** p) {
4209}
4210
4211
4212static void VerifyPointersUnderWatermark(
4213 PagedSpace* space,
4214 DirtyRegionCallback visit_dirty_region) {
4215 PageIterator it(space, PageIterator::PAGES_IN_USE);
4216
4217 while (it.has_next()) {
4218 Page* page = it.next();
4219 Address start = page->ObjectAreaStart();
4220 Address end = page->AllocationWatermark();
4221
Steve Block44f0eee2011-05-26 01:26:41 +01004222 HEAP->IterateDirtyRegions(Page::kAllRegionsDirtyMarks,
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004223 start,
4224 end,
4225 visit_dirty_region,
4226 &DummyScavengePointer);
4227 }
4228}
4229
4230
4231static void VerifyPointersUnderWatermark(LargeObjectSpace* space) {
4232 LargeObjectIterator it(space);
4233 for (HeapObject* object = it.next(); object != NULL; object = it.next()) {
4234 if (object->IsFixedArray()) {
4235 Address slot_address = object->address();
4236 Address end = object->address() + object->Size();
4237
4238 while (slot_address < end) {
4239 HeapObject** slot = reinterpret_cast<HeapObject**>(slot_address);
4240 // When we are not in GC the Heap::InNewSpace() predicate
4241 // checks that pointers which satisfy predicate point into
4242 // the active semispace.
Steve Block44f0eee2011-05-26 01:26:41 +01004243 HEAP->InNewSpace(*slot);
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004244 slot_address += kPointerSize;
4245 }
4246 }
4247 }
4248}
4249
4250
Steve Blocka7e24c12009-10-30 11:49:00 +00004251void Heap::Verify() {
4252 ASSERT(HasBeenSetup());
4253
4254 VerifyPointersVisitor visitor;
Steve Blockd0582a62009-12-15 09:54:21 +00004255 IterateRoots(&visitor, VISIT_ONLY_STRONG);
Steve Blocka7e24c12009-10-30 11:49:00 +00004256
4257 new_space_.Verify();
4258
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004259 VerifyPointersAndDirtyRegionsVisitor dirty_regions_visitor;
4260 old_pointer_space_->Verify(&dirty_regions_visitor);
4261 map_space_->Verify(&dirty_regions_visitor);
Steve Blocka7e24c12009-10-30 11:49:00 +00004262
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004263 VerifyPointersUnderWatermark(old_pointer_space_,
4264 &IteratePointersInDirtyRegion);
4265 VerifyPointersUnderWatermark(map_space_,
4266 &IteratePointersInDirtyMapsRegion);
4267 VerifyPointersUnderWatermark(lo_space_);
4268
4269 VerifyPageWatermarkValidity(old_pointer_space_, ALL_INVALID);
4270 VerifyPageWatermarkValidity(map_space_, ALL_INVALID);
4271
4272 VerifyPointersVisitor no_dirty_regions_visitor;
4273 old_data_space_->Verify(&no_dirty_regions_visitor);
4274 code_space_->Verify(&no_dirty_regions_visitor);
4275 cell_space_->Verify(&no_dirty_regions_visitor);
Steve Blocka7e24c12009-10-30 11:49:00 +00004276
4277 lo_space_->Verify();
4278}
4279#endif // DEBUG
4280
4281
John Reck59135872010-11-02 12:39:01 -07004282MaybeObject* Heap::LookupSymbol(Vector<const char> string) {
Steve Blocka7e24c12009-10-30 11:49:00 +00004283 Object* symbol = NULL;
John Reck59135872010-11-02 12:39:01 -07004284 Object* new_table;
4285 { MaybeObject* maybe_new_table =
4286 symbol_table()->LookupSymbol(string, &symbol);
4287 if (!maybe_new_table->ToObject(&new_table)) return maybe_new_table;
4288 }
Steve Blocka7e24c12009-10-30 11:49:00 +00004289 // Can't use set_symbol_table because SymbolTable::cast knows that
4290 // SymbolTable is a singleton and checks for identity.
4291 roots_[kSymbolTableRootIndex] = new_table;
4292 ASSERT(symbol != NULL);
4293 return symbol;
4294}
4295
4296
Steve Block9fac8402011-05-12 15:51:54 +01004297MaybeObject* Heap::LookupAsciiSymbol(Vector<const char> string) {
4298 Object* symbol = NULL;
4299 Object* new_table;
4300 { MaybeObject* maybe_new_table =
4301 symbol_table()->LookupAsciiSymbol(string, &symbol);
4302 if (!maybe_new_table->ToObject(&new_table)) return maybe_new_table;
4303 }
4304 // Can't use set_symbol_table because SymbolTable::cast knows that
4305 // SymbolTable is a singleton and checks for identity.
4306 roots_[kSymbolTableRootIndex] = new_table;
4307 ASSERT(symbol != NULL);
4308 return symbol;
4309}
4310
4311
Ben Murdoch257744e2011-11-30 15:57:28 +00004312MaybeObject* Heap::LookupAsciiSymbol(Handle<SeqAsciiString> string,
4313 int from,
4314 int length) {
4315 Object* symbol = NULL;
4316 Object* new_table;
4317 { MaybeObject* maybe_new_table =
4318 symbol_table()->LookupSubStringAsciiSymbol(string,
4319 from,
4320 length,
4321 &symbol);
4322 if (!maybe_new_table->ToObject(&new_table)) return maybe_new_table;
4323 }
4324 // Can't use set_symbol_table because SymbolTable::cast knows that
4325 // SymbolTable is a singleton and checks for identity.
4326 roots_[kSymbolTableRootIndex] = new_table;
4327 ASSERT(symbol != NULL);
4328 return symbol;
4329}
4330
4331
Steve Block9fac8402011-05-12 15:51:54 +01004332MaybeObject* Heap::LookupTwoByteSymbol(Vector<const uc16> string) {
4333 Object* symbol = NULL;
4334 Object* new_table;
4335 { MaybeObject* maybe_new_table =
4336 symbol_table()->LookupTwoByteSymbol(string, &symbol);
4337 if (!maybe_new_table->ToObject(&new_table)) return maybe_new_table;
4338 }
4339 // Can't use set_symbol_table because SymbolTable::cast knows that
4340 // SymbolTable is a singleton and checks for identity.
4341 roots_[kSymbolTableRootIndex] = new_table;
4342 ASSERT(symbol != NULL);
4343 return symbol;
4344}
4345
4346
John Reck59135872010-11-02 12:39:01 -07004347MaybeObject* Heap::LookupSymbol(String* string) {
Steve Blocka7e24c12009-10-30 11:49:00 +00004348 if (string->IsSymbol()) return string;
4349 Object* symbol = NULL;
John Reck59135872010-11-02 12:39:01 -07004350 Object* new_table;
4351 { MaybeObject* maybe_new_table =
4352 symbol_table()->LookupString(string, &symbol);
4353 if (!maybe_new_table->ToObject(&new_table)) return maybe_new_table;
4354 }
Steve Blocka7e24c12009-10-30 11:49:00 +00004355 // Can't use set_symbol_table because SymbolTable::cast knows that
4356 // SymbolTable is a singleton and checks for identity.
4357 roots_[kSymbolTableRootIndex] = new_table;
4358 ASSERT(symbol != NULL);
4359 return symbol;
4360}
4361
4362
4363bool Heap::LookupSymbolIfExists(String* string, String** symbol) {
4364 if (string->IsSymbol()) {
4365 *symbol = string;
4366 return true;
4367 }
4368 return symbol_table()->LookupSymbolIfExists(string, symbol);
4369}
4370
4371
4372#ifdef DEBUG
4373void Heap::ZapFromSpace() {
Steve Block1e0659c2011-05-24 12:43:12 +01004374 ASSERT(reinterpret_cast<Object*>(kFromSpaceZapValue)->IsFailure());
Steve Blocka7e24c12009-10-30 11:49:00 +00004375 for (Address a = new_space_.FromSpaceLow();
4376 a < new_space_.FromSpaceHigh();
4377 a += kPointerSize) {
4378 Memory::Address_at(a) = kFromSpaceZapValue;
4379 }
4380}
4381#endif // DEBUG
4382
4383
Steve Block44f0eee2011-05-26 01:26:41 +01004384bool Heap::IteratePointersInDirtyRegion(Heap* heap,
4385 Address start,
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004386 Address end,
4387 ObjectSlotCallback copy_object_func) {
4388 Address slot_address = start;
4389 bool pointers_to_new_space_found = false;
Steve Blocka7e24c12009-10-30 11:49:00 +00004390
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004391 while (slot_address < end) {
4392 Object** slot = reinterpret_cast<Object**>(slot_address);
Steve Block44f0eee2011-05-26 01:26:41 +01004393 if (heap->InNewSpace(*slot)) {
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004394 ASSERT((*slot)->IsHeapObject());
4395 copy_object_func(reinterpret_cast<HeapObject**>(slot));
Steve Block44f0eee2011-05-26 01:26:41 +01004396 if (heap->InNewSpace(*slot)) {
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004397 ASSERT((*slot)->IsHeapObject());
4398 pointers_to_new_space_found = true;
Steve Blocka7e24c12009-10-30 11:49:00 +00004399 }
Steve Blocka7e24c12009-10-30 11:49:00 +00004400 }
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004401 slot_address += kPointerSize;
Steve Blocka7e24c12009-10-30 11:49:00 +00004402 }
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004403 return pointers_to_new_space_found;
Steve Blocka7e24c12009-10-30 11:49:00 +00004404}
4405
4406
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004407// Compute start address of the first map following given addr.
4408static inline Address MapStartAlign(Address addr) {
4409 Address page = Page::FromAddress(addr)->ObjectAreaStart();
4410 return page + (((addr - page) + (Map::kSize - 1)) / Map::kSize * Map::kSize);
4411}
Steve Blocka7e24c12009-10-30 11:49:00 +00004412
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004413
4414// Compute end address of the first map preceding given addr.
4415static inline Address MapEndAlign(Address addr) {
4416 Address page = Page::FromAllocationTop(addr)->ObjectAreaStart();
4417 return page + ((addr - page) / Map::kSize * Map::kSize);
4418}
4419
4420
4421static bool IteratePointersInDirtyMaps(Address start,
4422 Address end,
4423 ObjectSlotCallback copy_object_func) {
4424 ASSERT(MapStartAlign(start) == start);
4425 ASSERT(MapEndAlign(end) == end);
4426
4427 Address map_address = start;
4428 bool pointers_to_new_space_found = false;
4429
Steve Block44f0eee2011-05-26 01:26:41 +01004430 Heap* heap = HEAP;
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004431 while (map_address < end) {
Steve Block44f0eee2011-05-26 01:26:41 +01004432 ASSERT(!heap->InNewSpace(Memory::Object_at(map_address)));
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004433 ASSERT(Memory::Object_at(map_address)->IsMap());
4434
4435 Address pointer_fields_start = map_address + Map::kPointerFieldsBeginOffset;
4436 Address pointer_fields_end = map_address + Map::kPointerFieldsEndOffset;
4437
Steve Block44f0eee2011-05-26 01:26:41 +01004438 if (Heap::IteratePointersInDirtyRegion(heap,
4439 pointer_fields_start,
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004440 pointer_fields_end,
4441 copy_object_func)) {
4442 pointers_to_new_space_found = true;
4443 }
4444
4445 map_address += Map::kSize;
4446 }
4447
4448 return pointers_to_new_space_found;
4449}
4450
4451
4452bool Heap::IteratePointersInDirtyMapsRegion(
Steve Block44f0eee2011-05-26 01:26:41 +01004453 Heap* heap,
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004454 Address start,
4455 Address end,
4456 ObjectSlotCallback copy_object_func) {
4457 Address map_aligned_start = MapStartAlign(start);
4458 Address map_aligned_end = MapEndAlign(end);
4459
4460 bool contains_pointers_to_new_space = false;
4461
4462 if (map_aligned_start != start) {
4463 Address prev_map = map_aligned_start - Map::kSize;
4464 ASSERT(Memory::Object_at(prev_map)->IsMap());
4465
4466 Address pointer_fields_start =
4467 Max(start, prev_map + Map::kPointerFieldsBeginOffset);
4468
4469 Address pointer_fields_end =
Ben Murdoch3bec4d22010-07-22 14:51:16 +01004470 Min(prev_map + Map::kPointerFieldsEndOffset, end);
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004471
4472 contains_pointers_to_new_space =
Steve Block44f0eee2011-05-26 01:26:41 +01004473 IteratePointersInDirtyRegion(heap,
4474 pointer_fields_start,
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004475 pointer_fields_end,
4476 copy_object_func)
4477 || contains_pointers_to_new_space;
4478 }
4479
4480 contains_pointers_to_new_space =
4481 IteratePointersInDirtyMaps(map_aligned_start,
4482 map_aligned_end,
4483 copy_object_func)
4484 || contains_pointers_to_new_space;
4485
4486 if (map_aligned_end != end) {
4487 ASSERT(Memory::Object_at(map_aligned_end)->IsMap());
4488
Ben Murdoch3bec4d22010-07-22 14:51:16 +01004489 Address pointer_fields_start =
4490 map_aligned_end + Map::kPointerFieldsBeginOffset;
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004491
4492 Address pointer_fields_end =
Ben Murdoch3bec4d22010-07-22 14:51:16 +01004493 Min(end, map_aligned_end + Map::kPointerFieldsEndOffset);
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004494
4495 contains_pointers_to_new_space =
Steve Block44f0eee2011-05-26 01:26:41 +01004496 IteratePointersInDirtyRegion(heap,
4497 pointer_fields_start,
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004498 pointer_fields_end,
4499 copy_object_func)
4500 || contains_pointers_to_new_space;
4501 }
4502
4503 return contains_pointers_to_new_space;
4504}
4505
4506
Ben Murdoch3bec4d22010-07-22 14:51:16 +01004507void Heap::IterateAndMarkPointersToFromSpace(Address start,
4508 Address end,
4509 ObjectSlotCallback callback) {
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004510 Address slot_address = start;
4511 Page* page = Page::FromAddress(start);
4512
4513 uint32_t marks = page->GetRegionMarks();
4514
4515 while (slot_address < end) {
4516 Object** slot = reinterpret_cast<Object**>(slot_address);
Steve Block44f0eee2011-05-26 01:26:41 +01004517 if (InFromSpace(*slot)) {
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004518 ASSERT((*slot)->IsHeapObject());
4519 callback(reinterpret_cast<HeapObject**>(slot));
Steve Block44f0eee2011-05-26 01:26:41 +01004520 if (InNewSpace(*slot)) {
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004521 ASSERT((*slot)->IsHeapObject());
4522 marks |= page->GetRegionMaskForAddress(slot_address);
4523 }
4524 }
4525 slot_address += kPointerSize;
4526 }
4527
4528 page->SetRegionMarks(marks);
4529}
4530
4531
4532uint32_t Heap::IterateDirtyRegions(
4533 uint32_t marks,
4534 Address area_start,
4535 Address area_end,
4536 DirtyRegionCallback visit_dirty_region,
4537 ObjectSlotCallback copy_object_func) {
4538 uint32_t newmarks = 0;
4539 uint32_t mask = 1;
4540
4541 if (area_start >= area_end) {
4542 return newmarks;
4543 }
4544
4545 Address region_start = area_start;
4546
4547 // area_start does not necessarily coincide with start of the first region.
4548 // Thus to calculate the beginning of the next region we have to align
4549 // area_start by Page::kRegionSize.
4550 Address second_region =
4551 reinterpret_cast<Address>(
4552 reinterpret_cast<intptr_t>(area_start + Page::kRegionSize) &
4553 ~Page::kRegionAlignmentMask);
4554
4555 // Next region might be beyond area_end.
4556 Address region_end = Min(second_region, area_end);
4557
4558 if (marks & mask) {
Steve Block44f0eee2011-05-26 01:26:41 +01004559 if (visit_dirty_region(this, region_start, region_end, copy_object_func)) {
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004560 newmarks |= mask;
4561 }
4562 }
4563 mask <<= 1;
4564
4565 // Iterate subsequent regions which fully lay inside [area_start, area_end[.
4566 region_start = region_end;
4567 region_end = region_start + Page::kRegionSize;
4568
4569 while (region_end <= area_end) {
4570 if (marks & mask) {
Steve Block44f0eee2011-05-26 01:26:41 +01004571 if (visit_dirty_region(this,
4572 region_start,
4573 region_end,
4574 copy_object_func)) {
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004575 newmarks |= mask;
4576 }
4577 }
4578
4579 region_start = region_end;
4580 region_end = region_start + Page::kRegionSize;
4581
4582 mask <<= 1;
4583 }
4584
4585 if (region_start != area_end) {
4586 // A small piece of area left uniterated because area_end does not coincide
4587 // with region end. Check whether region covering last part of area is
4588 // dirty.
4589 if (marks & mask) {
Steve Block44f0eee2011-05-26 01:26:41 +01004590 if (visit_dirty_region(this, region_start, area_end, copy_object_func)) {
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004591 newmarks |= mask;
4592 }
4593 }
4594 }
4595
4596 return newmarks;
4597}
4598
4599
4600
4601void Heap::IterateDirtyRegions(
4602 PagedSpace* space,
4603 DirtyRegionCallback visit_dirty_region,
4604 ObjectSlotCallback copy_object_func,
4605 ExpectedPageWatermarkState expected_page_watermark_state) {
Steve Blocka7e24c12009-10-30 11:49:00 +00004606
4607 PageIterator it(space, PageIterator::PAGES_IN_USE);
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004608
Steve Blocka7e24c12009-10-30 11:49:00 +00004609 while (it.has_next()) {
4610 Page* page = it.next();
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004611 uint32_t marks = page->GetRegionMarks();
4612
4613 if (marks != Page::kAllRegionsCleanMarks) {
4614 Address start = page->ObjectAreaStart();
4615
4616 // Do not try to visit pointers beyond page allocation watermark.
4617 // Page can contain garbage pointers there.
4618 Address end;
4619
4620 if ((expected_page_watermark_state == WATERMARK_SHOULD_BE_VALID) ||
4621 page->IsWatermarkValid()) {
4622 end = page->AllocationWatermark();
4623 } else {
4624 end = page->CachedAllocationWatermark();
4625 }
4626
4627 ASSERT(space == old_pointer_space_ ||
4628 (space == map_space_ &&
4629 ((page->ObjectAreaStart() - end) % Map::kSize == 0)));
4630
4631 page->SetRegionMarks(IterateDirtyRegions(marks,
4632 start,
4633 end,
4634 visit_dirty_region,
4635 copy_object_func));
Steve Blocka7e24c12009-10-30 11:49:00 +00004636 }
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004637
4638 // Mark page watermark as invalid to maintain watermark validity invariant.
4639 // See Page::FlipMeaningOfInvalidatedWatermarkFlag() for details.
4640 page->InvalidateWatermark(true);
Steve Blocka7e24c12009-10-30 11:49:00 +00004641 }
4642}
4643
4644
Steve Blockd0582a62009-12-15 09:54:21 +00004645void Heap::IterateRoots(ObjectVisitor* v, VisitMode mode) {
4646 IterateStrongRoots(v, mode);
Leon Clarked91b9f72010-01-27 17:25:45 +00004647 IterateWeakRoots(v, mode);
4648}
4649
4650
4651void Heap::IterateWeakRoots(ObjectVisitor* v, VisitMode mode) {
Steve Blocka7e24c12009-10-30 11:49:00 +00004652 v->VisitPointer(reinterpret_cast<Object**>(&roots_[kSymbolTableRootIndex]));
Steve Blockd0582a62009-12-15 09:54:21 +00004653 v->Synchronize("symbol_table");
Ben Murdoch257744e2011-11-30 15:57:28 +00004654 if (mode != VISIT_ALL_IN_SCAVENGE &&
4655 mode != VISIT_ALL_IN_SWEEP_NEWSPACE) {
Leon Clarkee46be812010-01-19 14:06:41 +00004656 // Scavenge collections have special processing for this.
Steve Block44f0eee2011-05-26 01:26:41 +01004657 external_string_table_.Iterate(v);
Leon Clarkee46be812010-01-19 14:06:41 +00004658 }
4659 v->Synchronize("external_string_table");
Steve Blocka7e24c12009-10-30 11:49:00 +00004660}
4661
4662
Steve Blockd0582a62009-12-15 09:54:21 +00004663void Heap::IterateStrongRoots(ObjectVisitor* v, VisitMode mode) {
Steve Blocka7e24c12009-10-30 11:49:00 +00004664 v->VisitPointers(&roots_[0], &roots_[kStrongRootListLength]);
Steve Blockd0582a62009-12-15 09:54:21 +00004665 v->Synchronize("strong_root_list");
Steve Blocka7e24c12009-10-30 11:49:00 +00004666
Iain Merrick75681382010-08-19 15:07:18 +01004667 v->VisitPointer(BitCast<Object**>(&hidden_symbol_));
Steve Blockd0582a62009-12-15 09:54:21 +00004668 v->Synchronize("symbol");
Steve Blocka7e24c12009-10-30 11:49:00 +00004669
Steve Block44f0eee2011-05-26 01:26:41 +01004670 isolate_->bootstrapper()->Iterate(v);
Steve Blockd0582a62009-12-15 09:54:21 +00004671 v->Synchronize("bootstrapper");
Steve Block44f0eee2011-05-26 01:26:41 +01004672 isolate_->Iterate(v);
Steve Blockd0582a62009-12-15 09:54:21 +00004673 v->Synchronize("top");
Steve Blocka7e24c12009-10-30 11:49:00 +00004674 Relocatable::Iterate(v);
Steve Blockd0582a62009-12-15 09:54:21 +00004675 v->Synchronize("relocatable");
Steve Blocka7e24c12009-10-30 11:49:00 +00004676
4677#ifdef ENABLE_DEBUGGER_SUPPORT
Steve Block44f0eee2011-05-26 01:26:41 +01004678 isolate_->debug()->Iterate(v);
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00004679 if (isolate_->deoptimizer_data() != NULL) {
4680 isolate_->deoptimizer_data()->Iterate(v);
4681 }
Steve Blocka7e24c12009-10-30 11:49:00 +00004682#endif
Steve Blockd0582a62009-12-15 09:54:21 +00004683 v->Synchronize("debug");
Steve Block44f0eee2011-05-26 01:26:41 +01004684 isolate_->compilation_cache()->Iterate(v);
Steve Blockd0582a62009-12-15 09:54:21 +00004685 v->Synchronize("compilationcache");
Steve Blocka7e24c12009-10-30 11:49:00 +00004686
4687 // Iterate over local handles in handle scopes.
Steve Block44f0eee2011-05-26 01:26:41 +01004688 isolate_->handle_scope_implementer()->Iterate(v);
Steve Blockd0582a62009-12-15 09:54:21 +00004689 v->Synchronize("handlescope");
Steve Blocka7e24c12009-10-30 11:49:00 +00004690
Leon Clarkee46be812010-01-19 14:06:41 +00004691 // Iterate over the builtin code objects and code stubs in the
4692 // heap. Note that it is not necessary to iterate over code objects
4693 // on scavenge collections.
Ben Murdoch257744e2011-11-30 15:57:28 +00004694 if (mode != VISIT_ALL_IN_SCAVENGE &&
4695 mode != VISIT_ALL_IN_SWEEP_NEWSPACE) {
Steve Block44f0eee2011-05-26 01:26:41 +01004696 isolate_->builtins()->IterateBuiltins(v);
Leon Clarkee46be812010-01-19 14:06:41 +00004697 }
Steve Blockd0582a62009-12-15 09:54:21 +00004698 v->Synchronize("builtins");
Steve Blocka7e24c12009-10-30 11:49:00 +00004699
4700 // Iterate over global handles.
Ben Murdoch257744e2011-11-30 15:57:28 +00004701 switch (mode) {
4702 case VISIT_ONLY_STRONG:
4703 isolate_->global_handles()->IterateStrongRoots(v);
4704 break;
4705 case VISIT_ALL_IN_SCAVENGE:
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00004706 isolate_->global_handles()->IterateNewSpaceStrongAndDependentRoots(v);
Ben Murdoch257744e2011-11-30 15:57:28 +00004707 break;
4708 case VISIT_ALL_IN_SWEEP_NEWSPACE:
4709 case VISIT_ALL:
4710 isolate_->global_handles()->IterateAllRoots(v);
4711 break;
Steve Blockd0582a62009-12-15 09:54:21 +00004712 }
4713 v->Synchronize("globalhandles");
Steve Blocka7e24c12009-10-30 11:49:00 +00004714
4715 // Iterate over pointers being held by inactive threads.
Steve Block44f0eee2011-05-26 01:26:41 +01004716 isolate_->thread_manager()->Iterate(v);
Steve Blockd0582a62009-12-15 09:54:21 +00004717 v->Synchronize("threadmanager");
Leon Clarked91b9f72010-01-27 17:25:45 +00004718
4719 // Iterate over the pointers the Serialization/Deserialization code is
4720 // holding.
4721 // During garbage collection this keeps the partial snapshot cache alive.
4722 // During deserialization of the startup snapshot this creates the partial
4723 // snapshot cache and deserializes the objects it refers to. During
4724 // serialization this does nothing, since the partial snapshot cache is
4725 // empty. However the next thing we do is create the partial snapshot,
4726 // filling up the partial snapshot cache with objects it needs as we go.
4727 SerializerDeserializer::Iterate(v);
4728 // We don't do a v->Synchronize call here, because in debug mode that will
4729 // output a flag to the snapshot. However at this point the serializer and
4730 // deserializer are deliberately a little unsynchronized (see above) so the
4731 // checking of the sync flag in the snapshot would fail.
Steve Blocka7e24c12009-10-30 11:49:00 +00004732}
Steve Blocka7e24c12009-10-30 11:49:00 +00004733
4734
Steve Blocka7e24c12009-10-30 11:49:00 +00004735// TODO(1236194): Since the heap size is configurable on the command line
4736// and through the API, we should gracefully handle the case that the heap
4737// size is not big enough to fit all the initial objects.
Russell Brenner90bac252010-11-18 13:33:46 -08004738bool Heap::ConfigureHeap(int max_semispace_size,
4739 int max_old_gen_size,
4740 int max_executable_size) {
Steve Blocka7e24c12009-10-30 11:49:00 +00004741 if (HasBeenSetup()) return false;
4742
Steve Block3ce2e202009-11-05 08:53:23 +00004743 if (max_semispace_size > 0) max_semispace_size_ = max_semispace_size;
4744
4745 if (Snapshot::IsEnabled()) {
4746 // If we are using a snapshot we always reserve the default amount
4747 // of memory for each semispace because code in the snapshot has
4748 // write-barrier code that relies on the size and alignment of new
4749 // space. We therefore cannot use a larger max semispace size
4750 // than the default reserved semispace size.
4751 if (max_semispace_size_ > reserved_semispace_size_) {
4752 max_semispace_size_ = reserved_semispace_size_;
4753 }
4754 } else {
4755 // If we are not using snapshots we reserve space for the actual
4756 // max semispace size.
4757 reserved_semispace_size_ = max_semispace_size_;
4758 }
4759
4760 if (max_old_gen_size > 0) max_old_generation_size_ = max_old_gen_size;
Russell Brenner90bac252010-11-18 13:33:46 -08004761 if (max_executable_size > 0) {
4762 max_executable_size_ = RoundUp(max_executable_size, Page::kPageSize);
4763 }
4764
4765 // The max executable size must be less than or equal to the max old
4766 // generation size.
4767 if (max_executable_size_ > max_old_generation_size_) {
4768 max_executable_size_ = max_old_generation_size_;
4769 }
Steve Blocka7e24c12009-10-30 11:49:00 +00004770
4771 // The new space size must be a power of two to support single-bit testing
4772 // for containment.
Steve Block3ce2e202009-11-05 08:53:23 +00004773 max_semispace_size_ = RoundUpToPowerOf2(max_semispace_size_);
4774 reserved_semispace_size_ = RoundUpToPowerOf2(reserved_semispace_size_);
4775 initial_semispace_size_ = Min(initial_semispace_size_, max_semispace_size_);
4776 external_allocation_limit_ = 10 * max_semispace_size_;
Steve Blocka7e24c12009-10-30 11:49:00 +00004777
4778 // The old generation is paged.
Steve Block3ce2e202009-11-05 08:53:23 +00004779 max_old_generation_size_ = RoundUp(max_old_generation_size_, Page::kPageSize);
Steve Blocka7e24c12009-10-30 11:49:00 +00004780
Steve Block44f0eee2011-05-26 01:26:41 +01004781 configured_ = true;
Steve Blocka7e24c12009-10-30 11:49:00 +00004782 return true;
4783}
4784
4785
4786bool Heap::ConfigureHeapDefault() {
Russell Brenner90bac252010-11-18 13:33:46 -08004787 return ConfigureHeap(FLAG_max_new_space_size / 2 * KB,
4788 FLAG_max_old_space_size * MB,
4789 FLAG_max_executable_size * MB);
Steve Blocka7e24c12009-10-30 11:49:00 +00004790}
4791
4792
Ben Murdochbb769b22010-08-11 14:56:33 +01004793void Heap::RecordStats(HeapStats* stats, bool take_snapshot) {
Iain Merrick75681382010-08-19 15:07:18 +01004794 *stats->start_marker = HeapStats::kStartMarker;
4795 *stats->end_marker = HeapStats::kEndMarker;
Ben Murdochf87a2032010-10-22 12:50:53 +01004796 *stats->new_space_size = new_space_.SizeAsInt();
4797 *stats->new_space_capacity = static_cast<int>(new_space_.Capacity());
Steve Blockd0582a62009-12-15 09:54:21 +00004798 *stats->old_pointer_space_size = old_pointer_space_->Size();
4799 *stats->old_pointer_space_capacity = old_pointer_space_->Capacity();
4800 *stats->old_data_space_size = old_data_space_->Size();
4801 *stats->old_data_space_capacity = old_data_space_->Capacity();
4802 *stats->code_space_size = code_space_->Size();
4803 *stats->code_space_capacity = code_space_->Capacity();
4804 *stats->map_space_size = map_space_->Size();
4805 *stats->map_space_capacity = map_space_->Capacity();
4806 *stats->cell_space_size = cell_space_->Size();
4807 *stats->cell_space_capacity = cell_space_->Capacity();
4808 *stats->lo_space_size = lo_space_->Size();
Steve Block44f0eee2011-05-26 01:26:41 +01004809 isolate_->global_handles()->RecordStats(stats);
4810 *stats->memory_allocator_size = isolate()->memory_allocator()->Size();
Ben Murdochbb769b22010-08-11 14:56:33 +01004811 *stats->memory_allocator_capacity =
Steve Block44f0eee2011-05-26 01:26:41 +01004812 isolate()->memory_allocator()->Size() +
4813 isolate()->memory_allocator()->Available();
Iain Merrick75681382010-08-19 15:07:18 +01004814 *stats->os_error = OS::GetLastError();
Steve Block44f0eee2011-05-26 01:26:41 +01004815 isolate()->memory_allocator()->Available();
Ben Murdochbb769b22010-08-11 14:56:33 +01004816 if (take_snapshot) {
Ben Murdochb0fe1622011-05-05 13:52:32 +01004817 HeapIterator iterator(HeapIterator::kFilterFreeListNodes);
Ben Murdochbb769b22010-08-11 14:56:33 +01004818 for (HeapObject* obj = iterator.next();
4819 obj != NULL;
4820 obj = iterator.next()) {
Ben Murdochbb769b22010-08-11 14:56:33 +01004821 InstanceType type = obj->map()->instance_type();
4822 ASSERT(0 <= type && type <= LAST_TYPE);
4823 stats->objects_per_type[type]++;
4824 stats->size_per_type[type] += obj->Size();
4825 }
4826 }
Steve Blockd0582a62009-12-15 09:54:21 +00004827}
4828
4829
Ben Murdochf87a2032010-10-22 12:50:53 +01004830intptr_t Heap::PromotedSpaceSize() {
Steve Blocka7e24c12009-10-30 11:49:00 +00004831 return old_pointer_space_->Size()
4832 + old_data_space_->Size()
4833 + code_space_->Size()
4834 + map_space_->Size()
4835 + cell_space_->Size()
4836 + lo_space_->Size();
4837}
4838
4839
4840int Heap::PromotedExternalMemorySize() {
4841 if (amount_of_external_allocated_memory_
4842 <= amount_of_external_allocated_memory_at_last_global_gc_) return 0;
4843 return amount_of_external_allocated_memory_
4844 - amount_of_external_allocated_memory_at_last_global_gc_;
4845}
4846
Steve Block44f0eee2011-05-26 01:26:41 +01004847#ifdef DEBUG
4848
4849// Tags 0, 1, and 3 are used. Use 2 for marking visited HeapObject.
4850static const int kMarkTag = 2;
4851
4852
4853class HeapDebugUtils {
4854 public:
4855 explicit HeapDebugUtils(Heap* heap)
4856 : search_for_any_global_(false),
4857 search_target_(NULL),
4858 found_target_(false),
4859 object_stack_(20),
4860 heap_(heap) {
4861 }
4862
4863 class MarkObjectVisitor : public ObjectVisitor {
4864 public:
4865 explicit MarkObjectVisitor(HeapDebugUtils* utils) : utils_(utils) { }
4866
4867 void VisitPointers(Object** start, Object** end) {
4868 // Copy all HeapObject pointers in [start, end)
4869 for (Object** p = start; p < end; p++) {
4870 if ((*p)->IsHeapObject())
4871 utils_->MarkObjectRecursively(p);
4872 }
4873 }
4874
4875 HeapDebugUtils* utils_;
4876 };
4877
4878 void MarkObjectRecursively(Object** p) {
4879 if (!(*p)->IsHeapObject()) return;
4880
4881 HeapObject* obj = HeapObject::cast(*p);
4882
4883 Object* map = obj->map();
4884
4885 if (!map->IsHeapObject()) return; // visited before
4886
4887 if (found_target_) return; // stop if target found
4888 object_stack_.Add(obj);
4889 if ((search_for_any_global_ && obj->IsJSGlobalObject()) ||
4890 (!search_for_any_global_ && (obj == search_target_))) {
4891 found_target_ = true;
4892 return;
4893 }
4894
4895 // not visited yet
4896 Map* map_p = reinterpret_cast<Map*>(HeapObject::cast(map));
4897
4898 Address map_addr = map_p->address();
4899
4900 obj->set_map(reinterpret_cast<Map*>(map_addr + kMarkTag));
4901
4902 MarkObjectRecursively(&map);
4903
4904 MarkObjectVisitor mark_visitor(this);
4905
4906 obj->IterateBody(map_p->instance_type(), obj->SizeFromMap(map_p),
4907 &mark_visitor);
4908
4909 if (!found_target_) // don't pop if found the target
4910 object_stack_.RemoveLast();
4911 }
4912
4913
4914 class UnmarkObjectVisitor : public ObjectVisitor {
4915 public:
4916 explicit UnmarkObjectVisitor(HeapDebugUtils* utils) : utils_(utils) { }
4917
4918 void VisitPointers(Object** start, Object** end) {
4919 // Copy all HeapObject pointers in [start, end)
4920 for (Object** p = start; p < end; p++) {
4921 if ((*p)->IsHeapObject())
4922 utils_->UnmarkObjectRecursively(p);
4923 }
4924 }
4925
4926 HeapDebugUtils* utils_;
4927 };
4928
4929
4930 void UnmarkObjectRecursively(Object** p) {
4931 if (!(*p)->IsHeapObject()) return;
4932
4933 HeapObject* obj = HeapObject::cast(*p);
4934
4935 Object* map = obj->map();
4936
4937 if (map->IsHeapObject()) return; // unmarked already
4938
4939 Address map_addr = reinterpret_cast<Address>(map);
4940
4941 map_addr -= kMarkTag;
4942
4943 ASSERT_TAG_ALIGNED(map_addr);
4944
4945 HeapObject* map_p = HeapObject::FromAddress(map_addr);
4946
4947 obj->set_map(reinterpret_cast<Map*>(map_p));
4948
4949 UnmarkObjectRecursively(reinterpret_cast<Object**>(&map_p));
4950
4951 UnmarkObjectVisitor unmark_visitor(this);
4952
4953 obj->IterateBody(Map::cast(map_p)->instance_type(),
4954 obj->SizeFromMap(Map::cast(map_p)),
4955 &unmark_visitor);
4956 }
4957
4958
4959 void MarkRootObjectRecursively(Object** root) {
4960 if (search_for_any_global_) {
4961 ASSERT(search_target_ == NULL);
4962 } else {
4963 ASSERT(search_target_->IsHeapObject());
4964 }
4965 found_target_ = false;
4966 object_stack_.Clear();
4967
4968 MarkObjectRecursively(root);
4969 UnmarkObjectRecursively(root);
4970
4971 if (found_target_) {
4972 PrintF("=====================================\n");
4973 PrintF("==== Path to object ====\n");
4974 PrintF("=====================================\n\n");
4975
4976 ASSERT(!object_stack_.is_empty());
4977 for (int i = 0; i < object_stack_.length(); i++) {
4978 if (i > 0) PrintF("\n |\n |\n V\n\n");
4979 Object* obj = object_stack_[i];
4980 obj->Print();
4981 }
4982 PrintF("=====================================\n");
4983 }
4984 }
4985
4986 // Helper class for visiting HeapObjects recursively.
4987 class MarkRootVisitor: public ObjectVisitor {
4988 public:
4989 explicit MarkRootVisitor(HeapDebugUtils* utils) : utils_(utils) { }
4990
4991 void VisitPointers(Object** start, Object** end) {
4992 // Visit all HeapObject pointers in [start, end)
4993 for (Object** p = start; p < end; p++) {
4994 if ((*p)->IsHeapObject())
4995 utils_->MarkRootObjectRecursively(p);
4996 }
4997 }
4998
4999 HeapDebugUtils* utils_;
5000 };
5001
5002 bool search_for_any_global_;
5003 Object* search_target_;
5004 bool found_target_;
5005 List<Object*> object_stack_;
5006 Heap* heap_;
5007
5008 friend class Heap;
5009};
5010
5011#endif
Steve Blocka7e24c12009-10-30 11:49:00 +00005012
5013bool Heap::Setup(bool create_heap_objects) {
Steve Block44f0eee2011-05-26 01:26:41 +01005014#ifdef DEBUG
5015 debug_utils_ = new HeapDebugUtils(this);
5016#endif
5017
Steve Blocka7e24c12009-10-30 11:49:00 +00005018 // Initialize heap spaces and initial maps and objects. Whenever something
5019 // goes wrong, just return false. The caller should check the results and
5020 // call Heap::TearDown() to release allocated memory.
5021 //
5022 // If the heap is not yet configured (eg, through the API), configure it.
5023 // Configuration is based on the flags new-space-size (really the semispace
5024 // size) and old-space-size if set or the initial values of semispace_size_
5025 // and old_generation_size_ otherwise.
Steve Block44f0eee2011-05-26 01:26:41 +01005026 if (!configured_) {
Steve Blocka7e24c12009-10-30 11:49:00 +00005027 if (!ConfigureHeapDefault()) return false;
5028 }
5029
Steve Block44f0eee2011-05-26 01:26:41 +01005030 gc_initializer_mutex->Lock();
5031 static bool initialized_gc = false;
5032 if (!initialized_gc) {
Ben Murdoch8b112d22011-06-08 16:22:53 +01005033 initialized_gc = true;
5034 InitializeScavengingVisitorsTables();
5035 NewSpaceScavenger::Initialize();
5036 MarkCompactCollector::Initialize();
Steve Block44f0eee2011-05-26 01:26:41 +01005037 }
5038 gc_initializer_mutex->Unlock();
Iain Merrick75681382010-08-19 15:07:18 +01005039
Kristian Monsen80d68ea2010-09-08 11:05:35 +01005040 MarkMapPointersAsEncoded(false);
5041
Steve Blocka7e24c12009-10-30 11:49:00 +00005042 // Setup memory allocator and reserve a chunk of memory for new
Steve Block3ce2e202009-11-05 08:53:23 +00005043 // space. The chunk is double the size of the requested reserved
5044 // new space size to ensure that we can find a pair of semispaces that
5045 // are contiguous and aligned to their size.
Steve Block44f0eee2011-05-26 01:26:41 +01005046 if (!isolate_->memory_allocator()->Setup(MaxReserved(), MaxExecutableSize()))
5047 return false;
Steve Blocka7e24c12009-10-30 11:49:00 +00005048 void* chunk =
Steve Block44f0eee2011-05-26 01:26:41 +01005049 isolate_->memory_allocator()->ReserveInitialChunk(
5050 4 * reserved_semispace_size_);
Steve Blocka7e24c12009-10-30 11:49:00 +00005051 if (chunk == NULL) return false;
5052
5053 // Align the pair of semispaces to their size, which must be a power
5054 // of 2.
Steve Blocka7e24c12009-10-30 11:49:00 +00005055 Address new_space_start =
Steve Block3ce2e202009-11-05 08:53:23 +00005056 RoundUp(reinterpret_cast<byte*>(chunk), 2 * reserved_semispace_size_);
5057 if (!new_space_.Setup(new_space_start, 2 * reserved_semispace_size_)) {
5058 return false;
5059 }
Steve Blocka7e24c12009-10-30 11:49:00 +00005060
5061 // Initialize old pointer space.
5062 old_pointer_space_ =
Steve Block44f0eee2011-05-26 01:26:41 +01005063 new OldSpace(this,
5064 max_old_generation_size_,
5065 OLD_POINTER_SPACE,
5066 NOT_EXECUTABLE);
Steve Blocka7e24c12009-10-30 11:49:00 +00005067 if (old_pointer_space_ == NULL) return false;
5068 if (!old_pointer_space_->Setup(NULL, 0)) return false;
5069
5070 // Initialize old data space.
5071 old_data_space_ =
Steve Block44f0eee2011-05-26 01:26:41 +01005072 new OldSpace(this,
5073 max_old_generation_size_,
5074 OLD_DATA_SPACE,
5075 NOT_EXECUTABLE);
Steve Blocka7e24c12009-10-30 11:49:00 +00005076 if (old_data_space_ == NULL) return false;
5077 if (!old_data_space_->Setup(NULL, 0)) return false;
5078
5079 // Initialize the code space, set its maximum capacity to the old
5080 // generation size. It needs executable memory.
5081 // On 64-bit platform(s), we put all code objects in a 2 GB range of
5082 // virtual address space, so that they can call each other with near calls.
5083 if (code_range_size_ > 0) {
Steve Block44f0eee2011-05-26 01:26:41 +01005084 if (!isolate_->code_range()->Setup(code_range_size_)) {
Steve Blocka7e24c12009-10-30 11:49:00 +00005085 return false;
5086 }
5087 }
5088
5089 code_space_ =
Steve Block44f0eee2011-05-26 01:26:41 +01005090 new OldSpace(this, max_old_generation_size_, CODE_SPACE, EXECUTABLE);
Steve Blocka7e24c12009-10-30 11:49:00 +00005091 if (code_space_ == NULL) return false;
5092 if (!code_space_->Setup(NULL, 0)) return false;
5093
5094 // Initialize map space.
Steve Block44f0eee2011-05-26 01:26:41 +01005095 map_space_ = new MapSpace(this, FLAG_use_big_map_space
Leon Clarkee46be812010-01-19 14:06:41 +00005096 ? max_old_generation_size_
Leon Clarked91b9f72010-01-27 17:25:45 +00005097 : MapSpace::kMaxMapPageIndex * Page::kPageSize,
5098 FLAG_max_map_space_pages,
Leon Clarkee46be812010-01-19 14:06:41 +00005099 MAP_SPACE);
Steve Blocka7e24c12009-10-30 11:49:00 +00005100 if (map_space_ == NULL) return false;
5101 if (!map_space_->Setup(NULL, 0)) return false;
5102
5103 // Initialize global property cell space.
Steve Block44f0eee2011-05-26 01:26:41 +01005104 cell_space_ = new CellSpace(this, max_old_generation_size_, CELL_SPACE);
Steve Blocka7e24c12009-10-30 11:49:00 +00005105 if (cell_space_ == NULL) return false;
5106 if (!cell_space_->Setup(NULL, 0)) return false;
5107
5108 // The large object code space may contain code or data. We set the memory
5109 // to be non-executable here for safety, but this means we need to enable it
5110 // explicitly when allocating large code objects.
Steve Block44f0eee2011-05-26 01:26:41 +01005111 lo_space_ = new LargeObjectSpace(this, LO_SPACE);
Steve Blocka7e24c12009-10-30 11:49:00 +00005112 if (lo_space_ == NULL) return false;
5113 if (!lo_space_->Setup()) return false;
5114
5115 if (create_heap_objects) {
5116 // Create initial maps.
5117 if (!CreateInitialMaps()) return false;
5118 if (!CreateApiObjects()) return false;
5119
5120 // Create initial objects
5121 if (!CreateInitialObjects()) return false;
Ben Murdochf87a2032010-10-22 12:50:53 +01005122
5123 global_contexts_list_ = undefined_value();
Steve Blocka7e24c12009-10-30 11:49:00 +00005124 }
5125
Steve Block44f0eee2011-05-26 01:26:41 +01005126 LOG(isolate_, IntPtrTEvent("heap-capacity", Capacity()));
5127 LOG(isolate_, IntPtrTEvent("heap-available", Available()));
Steve Blocka7e24c12009-10-30 11:49:00 +00005128
Steve Blocka7e24c12009-10-30 11:49:00 +00005129 return true;
5130}
5131
5132
Steve Blockd0582a62009-12-15 09:54:21 +00005133void Heap::SetStackLimits() {
Steve Block44f0eee2011-05-26 01:26:41 +01005134 ASSERT(isolate_ != NULL);
5135 ASSERT(isolate_ == isolate());
Steve Blocka7e24c12009-10-30 11:49:00 +00005136 // On 64 bit machines, pointers are generally out of range of Smis. We write
5137 // something that looks like an out of range Smi to the GC.
5138
Steve Blockd0582a62009-12-15 09:54:21 +00005139 // Set up the special root array entries containing the stack limits.
5140 // These are actually addresses, but the tag makes the GC ignore it.
Steve Blocka7e24c12009-10-30 11:49:00 +00005141 roots_[kStackLimitRootIndex] =
Steve Blockd0582a62009-12-15 09:54:21 +00005142 reinterpret_cast<Object*>(
Steve Block44f0eee2011-05-26 01:26:41 +01005143 (isolate_->stack_guard()->jslimit() & ~kSmiTagMask) | kSmiTag);
Steve Blockd0582a62009-12-15 09:54:21 +00005144 roots_[kRealStackLimitRootIndex] =
5145 reinterpret_cast<Object*>(
Steve Block44f0eee2011-05-26 01:26:41 +01005146 (isolate_->stack_guard()->real_jslimit() & ~kSmiTagMask) | kSmiTag);
Steve Blocka7e24c12009-10-30 11:49:00 +00005147}
5148
5149
5150void Heap::TearDown() {
Leon Clarkef7060e22010-06-03 12:02:55 +01005151 if (FLAG_print_cumulative_gc_stat) {
5152 PrintF("\n\n");
5153 PrintF("gc_count=%d ", gc_count_);
5154 PrintF("mark_sweep_count=%d ", ms_count_);
5155 PrintF("mark_compact_count=%d ", mc_count_);
Steve Block44f0eee2011-05-26 01:26:41 +01005156 PrintF("max_gc_pause=%d ", get_max_gc_pause());
5157 PrintF("min_in_mutator=%d ", get_min_in_mutator());
Ben Murdochf87a2032010-10-22 12:50:53 +01005158 PrintF("max_alive_after_gc=%" V8_PTR_PREFIX "d ",
Steve Block44f0eee2011-05-26 01:26:41 +01005159 get_max_alive_after_gc());
Leon Clarkef7060e22010-06-03 12:02:55 +01005160 PrintF("\n\n");
5161 }
5162
Steve Block44f0eee2011-05-26 01:26:41 +01005163 isolate_->global_handles()->TearDown();
Steve Blocka7e24c12009-10-30 11:49:00 +00005164
Steve Block44f0eee2011-05-26 01:26:41 +01005165 external_string_table_.TearDown();
Leon Clarkee46be812010-01-19 14:06:41 +00005166
Steve Blocka7e24c12009-10-30 11:49:00 +00005167 new_space_.TearDown();
5168
5169 if (old_pointer_space_ != NULL) {
5170 old_pointer_space_->TearDown();
5171 delete old_pointer_space_;
5172 old_pointer_space_ = NULL;
5173 }
5174
5175 if (old_data_space_ != NULL) {
5176 old_data_space_->TearDown();
5177 delete old_data_space_;
5178 old_data_space_ = NULL;
5179 }
5180
5181 if (code_space_ != NULL) {
5182 code_space_->TearDown();
5183 delete code_space_;
5184 code_space_ = NULL;
5185 }
5186
5187 if (map_space_ != NULL) {
5188 map_space_->TearDown();
5189 delete map_space_;
5190 map_space_ = NULL;
5191 }
5192
5193 if (cell_space_ != NULL) {
5194 cell_space_->TearDown();
5195 delete cell_space_;
5196 cell_space_ = NULL;
5197 }
5198
5199 if (lo_space_ != NULL) {
5200 lo_space_->TearDown();
5201 delete lo_space_;
5202 lo_space_ = NULL;
5203 }
5204
Steve Block44f0eee2011-05-26 01:26:41 +01005205 isolate_->memory_allocator()->TearDown();
5206
5207#ifdef DEBUG
5208 delete debug_utils_;
5209 debug_utils_ = NULL;
5210#endif
Steve Blocka7e24c12009-10-30 11:49:00 +00005211}
5212
5213
5214void Heap::Shrink() {
5215 // Try to shrink all paged spaces.
5216 PagedSpaces spaces;
Leon Clarked91b9f72010-01-27 17:25:45 +00005217 for (PagedSpace* space = spaces.next(); space != NULL; space = spaces.next())
5218 space->Shrink();
Steve Blocka7e24c12009-10-30 11:49:00 +00005219}
5220
5221
Steve Block6ded16b2010-05-10 14:33:55 +01005222void Heap::AddGCPrologueCallback(GCPrologueCallback callback, GCType gc_type) {
5223 ASSERT(callback != NULL);
5224 GCPrologueCallbackPair pair(callback, gc_type);
5225 ASSERT(!gc_prologue_callbacks_.Contains(pair));
5226 return gc_prologue_callbacks_.Add(pair);
5227}
5228
5229
5230void Heap::RemoveGCPrologueCallback(GCPrologueCallback callback) {
5231 ASSERT(callback != NULL);
5232 for (int i = 0; i < gc_prologue_callbacks_.length(); ++i) {
5233 if (gc_prologue_callbacks_[i].callback == callback) {
5234 gc_prologue_callbacks_.Remove(i);
5235 return;
5236 }
5237 }
5238 UNREACHABLE();
5239}
5240
5241
5242void Heap::AddGCEpilogueCallback(GCEpilogueCallback callback, GCType gc_type) {
5243 ASSERT(callback != NULL);
5244 GCEpilogueCallbackPair pair(callback, gc_type);
5245 ASSERT(!gc_epilogue_callbacks_.Contains(pair));
5246 return gc_epilogue_callbacks_.Add(pair);
5247}
5248
5249
5250void Heap::RemoveGCEpilogueCallback(GCEpilogueCallback callback) {
5251 ASSERT(callback != NULL);
5252 for (int i = 0; i < gc_epilogue_callbacks_.length(); ++i) {
5253 if (gc_epilogue_callbacks_[i].callback == callback) {
5254 gc_epilogue_callbacks_.Remove(i);
5255 return;
5256 }
5257 }
5258 UNREACHABLE();
5259}
5260
5261
Steve Blocka7e24c12009-10-30 11:49:00 +00005262#ifdef DEBUG
5263
5264class PrintHandleVisitor: public ObjectVisitor {
5265 public:
5266 void VisitPointers(Object** start, Object** end) {
5267 for (Object** p = start; p < end; p++)
Ben Murdochf87a2032010-10-22 12:50:53 +01005268 PrintF(" handle %p to %p\n",
5269 reinterpret_cast<void*>(p),
5270 reinterpret_cast<void*>(*p));
Steve Blocka7e24c12009-10-30 11:49:00 +00005271 }
5272};
5273
5274void Heap::PrintHandles() {
5275 PrintF("Handles:\n");
5276 PrintHandleVisitor v;
Steve Block44f0eee2011-05-26 01:26:41 +01005277 isolate_->handle_scope_implementer()->Iterate(&v);
Steve Blocka7e24c12009-10-30 11:49:00 +00005278}
5279
5280#endif
5281
5282
5283Space* AllSpaces::next() {
5284 switch (counter_++) {
5285 case NEW_SPACE:
Steve Block44f0eee2011-05-26 01:26:41 +01005286 return HEAP->new_space();
Steve Blocka7e24c12009-10-30 11:49:00 +00005287 case OLD_POINTER_SPACE:
Steve Block44f0eee2011-05-26 01:26:41 +01005288 return HEAP->old_pointer_space();
Steve Blocka7e24c12009-10-30 11:49:00 +00005289 case OLD_DATA_SPACE:
Steve Block44f0eee2011-05-26 01:26:41 +01005290 return HEAP->old_data_space();
Steve Blocka7e24c12009-10-30 11:49:00 +00005291 case CODE_SPACE:
Steve Block44f0eee2011-05-26 01:26:41 +01005292 return HEAP->code_space();
Steve Blocka7e24c12009-10-30 11:49:00 +00005293 case MAP_SPACE:
Steve Block44f0eee2011-05-26 01:26:41 +01005294 return HEAP->map_space();
Steve Blocka7e24c12009-10-30 11:49:00 +00005295 case CELL_SPACE:
Steve Block44f0eee2011-05-26 01:26:41 +01005296 return HEAP->cell_space();
Steve Blocka7e24c12009-10-30 11:49:00 +00005297 case LO_SPACE:
Steve Block44f0eee2011-05-26 01:26:41 +01005298 return HEAP->lo_space();
Steve Blocka7e24c12009-10-30 11:49:00 +00005299 default:
5300 return NULL;
5301 }
5302}
5303
5304
5305PagedSpace* PagedSpaces::next() {
5306 switch (counter_++) {
5307 case OLD_POINTER_SPACE:
Steve Block44f0eee2011-05-26 01:26:41 +01005308 return HEAP->old_pointer_space();
Steve Blocka7e24c12009-10-30 11:49:00 +00005309 case OLD_DATA_SPACE:
Steve Block44f0eee2011-05-26 01:26:41 +01005310 return HEAP->old_data_space();
Steve Blocka7e24c12009-10-30 11:49:00 +00005311 case CODE_SPACE:
Steve Block44f0eee2011-05-26 01:26:41 +01005312 return HEAP->code_space();
Steve Blocka7e24c12009-10-30 11:49:00 +00005313 case MAP_SPACE:
Steve Block44f0eee2011-05-26 01:26:41 +01005314 return HEAP->map_space();
Steve Blocka7e24c12009-10-30 11:49:00 +00005315 case CELL_SPACE:
Steve Block44f0eee2011-05-26 01:26:41 +01005316 return HEAP->cell_space();
Steve Blocka7e24c12009-10-30 11:49:00 +00005317 default:
5318 return NULL;
5319 }
5320}
5321
5322
5323
5324OldSpace* OldSpaces::next() {
5325 switch (counter_++) {
5326 case OLD_POINTER_SPACE:
Steve Block44f0eee2011-05-26 01:26:41 +01005327 return HEAP->old_pointer_space();
Steve Blocka7e24c12009-10-30 11:49:00 +00005328 case OLD_DATA_SPACE:
Steve Block44f0eee2011-05-26 01:26:41 +01005329 return HEAP->old_data_space();
Steve Blocka7e24c12009-10-30 11:49:00 +00005330 case CODE_SPACE:
Steve Block44f0eee2011-05-26 01:26:41 +01005331 return HEAP->code_space();
Steve Blocka7e24c12009-10-30 11:49:00 +00005332 default:
5333 return NULL;
5334 }
5335}
5336
5337
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -08005338SpaceIterator::SpaceIterator()
5339 : current_space_(FIRST_SPACE),
5340 iterator_(NULL),
5341 size_func_(NULL) {
5342}
5343
5344
5345SpaceIterator::SpaceIterator(HeapObjectCallback size_func)
5346 : current_space_(FIRST_SPACE),
5347 iterator_(NULL),
5348 size_func_(size_func) {
Steve Blocka7e24c12009-10-30 11:49:00 +00005349}
5350
5351
5352SpaceIterator::~SpaceIterator() {
5353 // Delete active iterator if any.
5354 delete iterator_;
5355}
5356
5357
5358bool SpaceIterator::has_next() {
5359 // Iterate until no more spaces.
5360 return current_space_ != LAST_SPACE;
5361}
5362
5363
5364ObjectIterator* SpaceIterator::next() {
5365 if (iterator_ != NULL) {
5366 delete iterator_;
5367 iterator_ = NULL;
5368 // Move to the next space
5369 current_space_++;
5370 if (current_space_ > LAST_SPACE) {
5371 return NULL;
5372 }
5373 }
5374
5375 // Return iterator for the new current space.
5376 return CreateIterator();
5377}
5378
5379
5380// Create an iterator for the space to iterate.
5381ObjectIterator* SpaceIterator::CreateIterator() {
5382 ASSERT(iterator_ == NULL);
5383
5384 switch (current_space_) {
5385 case NEW_SPACE:
Steve Block44f0eee2011-05-26 01:26:41 +01005386 iterator_ = new SemiSpaceIterator(HEAP->new_space(), size_func_);
Steve Blocka7e24c12009-10-30 11:49:00 +00005387 break;
5388 case OLD_POINTER_SPACE:
Steve Block44f0eee2011-05-26 01:26:41 +01005389 iterator_ = new HeapObjectIterator(HEAP->old_pointer_space(), size_func_);
Steve Blocka7e24c12009-10-30 11:49:00 +00005390 break;
5391 case OLD_DATA_SPACE:
Steve Block44f0eee2011-05-26 01:26:41 +01005392 iterator_ = new HeapObjectIterator(HEAP->old_data_space(), size_func_);
Steve Blocka7e24c12009-10-30 11:49:00 +00005393 break;
5394 case CODE_SPACE:
Steve Block44f0eee2011-05-26 01:26:41 +01005395 iterator_ = new HeapObjectIterator(HEAP->code_space(), size_func_);
Steve Blocka7e24c12009-10-30 11:49:00 +00005396 break;
5397 case MAP_SPACE:
Steve Block44f0eee2011-05-26 01:26:41 +01005398 iterator_ = new HeapObjectIterator(HEAP->map_space(), size_func_);
Steve Blocka7e24c12009-10-30 11:49:00 +00005399 break;
5400 case CELL_SPACE:
Steve Block44f0eee2011-05-26 01:26:41 +01005401 iterator_ = new HeapObjectIterator(HEAP->cell_space(), size_func_);
Steve Blocka7e24c12009-10-30 11:49:00 +00005402 break;
5403 case LO_SPACE:
Steve Block44f0eee2011-05-26 01:26:41 +01005404 iterator_ = new LargeObjectIterator(HEAP->lo_space(), size_func_);
Steve Blocka7e24c12009-10-30 11:49:00 +00005405 break;
5406 }
5407
5408 // Return the newly allocated iterator;
5409 ASSERT(iterator_ != NULL);
5410 return iterator_;
5411}
5412
5413
Ben Murdochb0fe1622011-05-05 13:52:32 +01005414class HeapObjectsFilter {
5415 public:
5416 virtual ~HeapObjectsFilter() {}
5417 virtual bool SkipObject(HeapObject* object) = 0;
5418};
5419
5420
5421class FreeListNodesFilter : public HeapObjectsFilter {
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -08005422 public:
5423 FreeListNodesFilter() {
5424 MarkFreeListNodes();
5425 }
5426
Ben Murdochb0fe1622011-05-05 13:52:32 +01005427 bool SkipObject(HeapObject* object) {
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -08005428 if (object->IsMarked()) {
5429 object->ClearMark();
5430 return true;
5431 } else {
5432 return false;
5433 }
5434 }
5435
5436 private:
5437 void MarkFreeListNodes() {
Steve Block44f0eee2011-05-26 01:26:41 +01005438 Heap* heap = HEAP;
5439 heap->old_pointer_space()->MarkFreeListNodes();
5440 heap->old_data_space()->MarkFreeListNodes();
5441 MarkCodeSpaceFreeListNodes(heap);
5442 heap->map_space()->MarkFreeListNodes();
5443 heap->cell_space()->MarkFreeListNodes();
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -08005444 }
5445
Steve Block44f0eee2011-05-26 01:26:41 +01005446 void MarkCodeSpaceFreeListNodes(Heap* heap) {
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -08005447 // For code space, using FreeListNode::IsFreeListNode is OK.
Steve Block44f0eee2011-05-26 01:26:41 +01005448 HeapObjectIterator iter(heap->code_space());
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -08005449 for (HeapObject* obj = iter.next_object();
5450 obj != NULL;
5451 obj = iter.next_object()) {
5452 if (FreeListNode::IsFreeListNode(obj)) obj->SetMark();
5453 }
5454 }
5455
5456 AssertNoAllocation no_alloc;
5457};
5458
5459
Ben Murdochb0fe1622011-05-05 13:52:32 +01005460class UnreachableObjectsFilter : public HeapObjectsFilter {
5461 public:
5462 UnreachableObjectsFilter() {
5463 MarkUnreachableObjects();
5464 }
5465
5466 bool SkipObject(HeapObject* object) {
5467 if (object->IsMarked()) {
5468 object->ClearMark();
5469 return true;
5470 } else {
5471 return false;
5472 }
5473 }
5474
5475 private:
5476 class UnmarkingVisitor : public ObjectVisitor {
5477 public:
5478 UnmarkingVisitor() : list_(10) {}
5479
5480 void VisitPointers(Object** start, Object** end) {
5481 for (Object** p = start; p < end; p++) {
5482 if (!(*p)->IsHeapObject()) continue;
5483 HeapObject* obj = HeapObject::cast(*p);
5484 if (obj->IsMarked()) {
5485 obj->ClearMark();
5486 list_.Add(obj);
5487 }
5488 }
5489 }
5490
5491 bool can_process() { return !list_.is_empty(); }
5492
5493 void ProcessNext() {
5494 HeapObject* obj = list_.RemoveLast();
5495 obj->Iterate(this);
5496 }
5497
5498 private:
5499 List<HeapObject*> list_;
5500 };
5501
5502 void MarkUnreachableObjects() {
5503 HeapIterator iterator;
5504 for (HeapObject* obj = iterator.next();
5505 obj != NULL;
5506 obj = iterator.next()) {
5507 obj->SetMark();
5508 }
5509 UnmarkingVisitor visitor;
Steve Block44f0eee2011-05-26 01:26:41 +01005510 HEAP->IterateRoots(&visitor, VISIT_ALL);
Ben Murdochb0fe1622011-05-05 13:52:32 +01005511 while (visitor.can_process())
5512 visitor.ProcessNext();
5513 }
5514
5515 AssertNoAllocation no_alloc;
5516};
5517
5518
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -08005519HeapIterator::HeapIterator()
5520 : filtering_(HeapIterator::kNoFiltering),
5521 filter_(NULL) {
5522 Init();
5523}
5524
5525
Ben Murdochb0fe1622011-05-05 13:52:32 +01005526HeapIterator::HeapIterator(HeapIterator::HeapObjectsFiltering filtering)
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -08005527 : filtering_(filtering),
5528 filter_(NULL) {
Steve Blocka7e24c12009-10-30 11:49:00 +00005529 Init();
5530}
5531
5532
5533HeapIterator::~HeapIterator() {
5534 Shutdown();
5535}
5536
5537
5538void HeapIterator::Init() {
5539 // Start the iteration.
Ben Murdochb0fe1622011-05-05 13:52:32 +01005540 space_iterator_ = filtering_ == kNoFiltering ? new SpaceIterator :
5541 new SpaceIterator(MarkCompactCollector::SizeOfMarkedObject);
5542 switch (filtering_) {
5543 case kFilterFreeListNodes:
5544 filter_ = new FreeListNodesFilter;
5545 break;
5546 case kFilterUnreachable:
5547 filter_ = new UnreachableObjectsFilter;
5548 break;
5549 default:
5550 break;
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -08005551 }
Steve Blocka7e24c12009-10-30 11:49:00 +00005552 object_iterator_ = space_iterator_->next();
5553}
5554
5555
5556void HeapIterator::Shutdown() {
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -08005557#ifdef DEBUG
Ben Murdochb0fe1622011-05-05 13:52:32 +01005558 // Assert that in filtering mode we have iterated through all
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -08005559 // objects. Otherwise, heap will be left in an inconsistent state.
Ben Murdochb0fe1622011-05-05 13:52:32 +01005560 if (filtering_ != kNoFiltering) {
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -08005561 ASSERT(object_iterator_ == NULL);
5562 }
5563#endif
Steve Blocka7e24c12009-10-30 11:49:00 +00005564 // Make sure the last iterator is deallocated.
5565 delete space_iterator_;
5566 space_iterator_ = NULL;
5567 object_iterator_ = NULL;
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -08005568 delete filter_;
5569 filter_ = NULL;
Steve Blocka7e24c12009-10-30 11:49:00 +00005570}
5571
5572
Leon Clarked91b9f72010-01-27 17:25:45 +00005573HeapObject* HeapIterator::next() {
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -08005574 if (filter_ == NULL) return NextObject();
5575
5576 HeapObject* obj = NextObject();
Ben Murdochb0fe1622011-05-05 13:52:32 +01005577 while (obj != NULL && filter_->SkipObject(obj)) obj = NextObject();
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -08005578 return obj;
5579}
5580
5581
5582HeapObject* HeapIterator::NextObject() {
Steve Blocka7e24c12009-10-30 11:49:00 +00005583 // No iterator means we are done.
Leon Clarked91b9f72010-01-27 17:25:45 +00005584 if (object_iterator_ == NULL) return NULL;
Steve Blocka7e24c12009-10-30 11:49:00 +00005585
Leon Clarked91b9f72010-01-27 17:25:45 +00005586 if (HeapObject* obj = object_iterator_->next_object()) {
Steve Blocka7e24c12009-10-30 11:49:00 +00005587 // If the current iterator has more objects we are fine.
Leon Clarked91b9f72010-01-27 17:25:45 +00005588 return obj;
Steve Blocka7e24c12009-10-30 11:49:00 +00005589 } else {
5590 // Go though the spaces looking for one that has objects.
5591 while (space_iterator_->has_next()) {
5592 object_iterator_ = space_iterator_->next();
Leon Clarked91b9f72010-01-27 17:25:45 +00005593 if (HeapObject* obj = object_iterator_->next_object()) {
5594 return obj;
Steve Blocka7e24c12009-10-30 11:49:00 +00005595 }
5596 }
5597 }
5598 // Done with the last space.
5599 object_iterator_ = NULL;
Leon Clarked91b9f72010-01-27 17:25:45 +00005600 return NULL;
Steve Blocka7e24c12009-10-30 11:49:00 +00005601}
5602
5603
5604void HeapIterator::reset() {
5605 // Restart the iterator.
5606 Shutdown();
5607 Init();
5608}
5609
5610
Ben Murdoche0cee9b2011-05-25 10:26:03 +01005611#if defined(DEBUG) || defined(LIVE_OBJECT_LIST)
Steve Blocka7e24c12009-10-30 11:49:00 +00005612
Ben Murdoche0cee9b2011-05-25 10:26:03 +01005613Object* const PathTracer::kAnyGlobalObject = reinterpret_cast<Object*>(NULL);
Steve Blocka7e24c12009-10-30 11:49:00 +00005614
Ben Murdoche0cee9b2011-05-25 10:26:03 +01005615class PathTracer::MarkVisitor: public ObjectVisitor {
Steve Blocka7e24c12009-10-30 11:49:00 +00005616 public:
Ben Murdoche0cee9b2011-05-25 10:26:03 +01005617 explicit MarkVisitor(PathTracer* tracer) : tracer_(tracer) {}
Steve Blocka7e24c12009-10-30 11:49:00 +00005618 void VisitPointers(Object** start, Object** end) {
Ben Murdoche0cee9b2011-05-25 10:26:03 +01005619 // Scan all HeapObject pointers in [start, end)
5620 for (Object** p = start; !tracer_->found() && (p < end); p++) {
Steve Blocka7e24c12009-10-30 11:49:00 +00005621 if ((*p)->IsHeapObject())
Ben Murdoche0cee9b2011-05-25 10:26:03 +01005622 tracer_->MarkRecursively(p, this);
Steve Blocka7e24c12009-10-30 11:49:00 +00005623 }
5624 }
Ben Murdoche0cee9b2011-05-25 10:26:03 +01005625
5626 private:
5627 PathTracer* tracer_;
Steve Blocka7e24c12009-10-30 11:49:00 +00005628};
5629
Steve Blocka7e24c12009-10-30 11:49:00 +00005630
Ben Murdoche0cee9b2011-05-25 10:26:03 +01005631class PathTracer::UnmarkVisitor: public ObjectVisitor {
5632 public:
5633 explicit UnmarkVisitor(PathTracer* tracer) : tracer_(tracer) {}
5634 void VisitPointers(Object** start, Object** end) {
5635 // Scan all HeapObject pointers in [start, end)
5636 for (Object** p = start; p < end; p++) {
5637 if ((*p)->IsHeapObject())
5638 tracer_->UnmarkRecursively(p, this);
5639 }
5640 }
5641
5642 private:
5643 PathTracer* tracer_;
5644};
5645
5646
5647void PathTracer::VisitPointers(Object** start, Object** end) {
5648 bool done = ((what_to_find_ == FIND_FIRST) && found_target_);
5649 // Visit all HeapObject pointers in [start, end)
5650 for (Object** p = start; !done && (p < end); p++) {
5651 if ((*p)->IsHeapObject()) {
5652 TracePathFrom(p);
5653 done = ((what_to_find_ == FIND_FIRST) && found_target_);
5654 }
5655 }
5656}
5657
5658
5659void PathTracer::Reset() {
5660 found_target_ = false;
5661 object_stack_.Clear();
5662}
5663
5664
5665void PathTracer::TracePathFrom(Object** root) {
5666 ASSERT((search_target_ == kAnyGlobalObject) ||
5667 search_target_->IsHeapObject());
5668 found_target_in_trace_ = false;
5669 object_stack_.Clear();
5670
5671 MarkVisitor mark_visitor(this);
5672 MarkRecursively(root, &mark_visitor);
5673
5674 UnmarkVisitor unmark_visitor(this);
5675 UnmarkRecursively(root, &unmark_visitor);
5676
5677 ProcessResults();
5678}
5679
5680
5681void PathTracer::MarkRecursively(Object** p, MarkVisitor* mark_visitor) {
Steve Blocka7e24c12009-10-30 11:49:00 +00005682 if (!(*p)->IsHeapObject()) return;
5683
5684 HeapObject* obj = HeapObject::cast(*p);
5685
5686 Object* map = obj->map();
5687
5688 if (!map->IsHeapObject()) return; // visited before
5689
Ben Murdoche0cee9b2011-05-25 10:26:03 +01005690 if (found_target_in_trace_) return; // stop if target found
5691 object_stack_.Add(obj);
5692 if (((search_target_ == kAnyGlobalObject) && obj->IsJSGlobalObject()) ||
5693 (obj == search_target_)) {
5694 found_target_in_trace_ = true;
5695 found_target_ = true;
Steve Blocka7e24c12009-10-30 11:49:00 +00005696 return;
5697 }
5698
Ben Murdoche0cee9b2011-05-25 10:26:03 +01005699 bool is_global_context = obj->IsGlobalContext();
5700
Steve Blocka7e24c12009-10-30 11:49:00 +00005701 // not visited yet
5702 Map* map_p = reinterpret_cast<Map*>(HeapObject::cast(map));
5703
5704 Address map_addr = map_p->address();
5705
5706 obj->set_map(reinterpret_cast<Map*>(map_addr + kMarkTag));
5707
Ben Murdoche0cee9b2011-05-25 10:26:03 +01005708 // Scan the object body.
5709 if (is_global_context && (visit_mode_ == VISIT_ONLY_STRONG)) {
5710 // This is specialized to scan Context's properly.
5711 Object** start = reinterpret_cast<Object**>(obj->address() +
5712 Context::kHeaderSize);
5713 Object** end = reinterpret_cast<Object**>(obj->address() +
5714 Context::kHeaderSize + Context::FIRST_WEAK_SLOT * kPointerSize);
5715 mark_visitor->VisitPointers(start, end);
5716 } else {
5717 obj->IterateBody(map_p->instance_type(),
5718 obj->SizeFromMap(map_p),
5719 mark_visitor);
5720 }
Steve Blocka7e24c12009-10-30 11:49:00 +00005721
Ben Murdoche0cee9b2011-05-25 10:26:03 +01005722 // Scan the map after the body because the body is a lot more interesting
5723 // when doing leak detection.
5724 MarkRecursively(&map, mark_visitor);
Steve Blocka7e24c12009-10-30 11:49:00 +00005725
Ben Murdoche0cee9b2011-05-25 10:26:03 +01005726 if (!found_target_in_trace_) // don't pop if found the target
5727 object_stack_.RemoveLast();
Steve Blocka7e24c12009-10-30 11:49:00 +00005728}
5729
5730
Ben Murdoche0cee9b2011-05-25 10:26:03 +01005731void PathTracer::UnmarkRecursively(Object** p, UnmarkVisitor* unmark_visitor) {
Steve Blocka7e24c12009-10-30 11:49:00 +00005732 if (!(*p)->IsHeapObject()) return;
5733
5734 HeapObject* obj = HeapObject::cast(*p);
5735
5736 Object* map = obj->map();
5737
5738 if (map->IsHeapObject()) return; // unmarked already
5739
5740 Address map_addr = reinterpret_cast<Address>(map);
5741
5742 map_addr -= kMarkTag;
5743
5744 ASSERT_TAG_ALIGNED(map_addr);
5745
5746 HeapObject* map_p = HeapObject::FromAddress(map_addr);
5747
5748 obj->set_map(reinterpret_cast<Map*>(map_p));
5749
Ben Murdoche0cee9b2011-05-25 10:26:03 +01005750 UnmarkRecursively(reinterpret_cast<Object**>(&map_p), unmark_visitor);
Steve Blocka7e24c12009-10-30 11:49:00 +00005751
5752 obj->IterateBody(Map::cast(map_p)->instance_type(),
5753 obj->SizeFromMap(Map::cast(map_p)),
Ben Murdoche0cee9b2011-05-25 10:26:03 +01005754 unmark_visitor);
Steve Blocka7e24c12009-10-30 11:49:00 +00005755}
5756
5757
Ben Murdoche0cee9b2011-05-25 10:26:03 +01005758void PathTracer::ProcessResults() {
5759 if (found_target_) {
Steve Blocka7e24c12009-10-30 11:49:00 +00005760 PrintF("=====================================\n");
5761 PrintF("==== Path to object ====\n");
5762 PrintF("=====================================\n\n");
5763
Ben Murdoche0cee9b2011-05-25 10:26:03 +01005764 ASSERT(!object_stack_.is_empty());
5765 for (int i = 0; i < object_stack_.length(); i++) {
Steve Blocka7e24c12009-10-30 11:49:00 +00005766 if (i > 0) PrintF("\n |\n |\n V\n\n");
Ben Murdoche0cee9b2011-05-25 10:26:03 +01005767 Object* obj = object_stack_[i];
5768#ifdef OBJECT_PRINT
Steve Blocka7e24c12009-10-30 11:49:00 +00005769 obj->Print();
Ben Murdoche0cee9b2011-05-25 10:26:03 +01005770#else
5771 obj->ShortPrint();
5772#endif
Steve Blocka7e24c12009-10-30 11:49:00 +00005773 }
5774 PrintF("=====================================\n");
5775 }
5776}
Ben Murdoche0cee9b2011-05-25 10:26:03 +01005777#endif // DEBUG || LIVE_OBJECT_LIST
Steve Blocka7e24c12009-10-30 11:49:00 +00005778
5779
Ben Murdoche0cee9b2011-05-25 10:26:03 +01005780#ifdef DEBUG
Steve Blocka7e24c12009-10-30 11:49:00 +00005781// Triggers a depth-first traversal of reachable objects from roots
5782// and finds a path to a specific heap object and prints it.
Leon Clarkee46be812010-01-19 14:06:41 +00005783void Heap::TracePathToObject(Object* target) {
Ben Murdoche0cee9b2011-05-25 10:26:03 +01005784 PathTracer tracer(target, PathTracer::FIND_ALL, VISIT_ALL);
5785 IterateRoots(&tracer, VISIT_ONLY_STRONG);
Steve Blocka7e24c12009-10-30 11:49:00 +00005786}
5787
5788
5789// Triggers a depth-first traversal of reachable objects from roots
5790// and finds a path to any global object and prints it. Useful for
5791// determining the source for leaks of global objects.
5792void Heap::TracePathToGlobal() {
Ben Murdoche0cee9b2011-05-25 10:26:03 +01005793 PathTracer tracer(PathTracer::kAnyGlobalObject,
5794 PathTracer::FIND_ALL,
5795 VISIT_ALL);
5796 IterateRoots(&tracer, VISIT_ONLY_STRONG);
Steve Blocka7e24c12009-10-30 11:49:00 +00005797}
5798#endif
5799
5800
Ben Murdochf87a2032010-10-22 12:50:53 +01005801static intptr_t CountTotalHolesSize() {
5802 intptr_t holes_size = 0;
Leon Clarkef7060e22010-06-03 12:02:55 +01005803 OldSpaces spaces;
5804 for (OldSpace* space = spaces.next();
5805 space != NULL;
5806 space = spaces.next()) {
5807 holes_size += space->Waste() + space->AvailableFree();
5808 }
5809 return holes_size;
5810}
5811
5812
Steve Block44f0eee2011-05-26 01:26:41 +01005813GCTracer::GCTracer(Heap* heap)
Steve Blocka7e24c12009-10-30 11:49:00 +00005814 : start_time_(0.0),
Leon Clarkef7060e22010-06-03 12:02:55 +01005815 start_size_(0),
Steve Blocka7e24c12009-10-30 11:49:00 +00005816 gc_count_(0),
5817 full_gc_count_(0),
5818 is_compacting_(false),
Leon Clarkef7060e22010-06-03 12:02:55 +01005819 marked_count_(0),
5820 allocated_since_last_gc_(0),
5821 spent_in_mutator_(0),
Steve Block44f0eee2011-05-26 01:26:41 +01005822 promoted_objects_size_(0),
5823 heap_(heap) {
Steve Blocka7e24c12009-10-30 11:49:00 +00005824 // These two fields reflect the state of the previous full collection.
5825 // Set them before they are changed by the collector.
Steve Block44f0eee2011-05-26 01:26:41 +01005826 previous_has_compacted_ = heap_->mark_compact_collector_.HasCompacted();
5827 previous_marked_count_ =
5828 heap_->mark_compact_collector_.previous_marked_count();
Leon Clarkef7060e22010-06-03 12:02:55 +01005829 if (!FLAG_trace_gc && !FLAG_print_cumulative_gc_stat) return;
Steve Blocka7e24c12009-10-30 11:49:00 +00005830 start_time_ = OS::TimeCurrentMillis();
Steve Block44f0eee2011-05-26 01:26:41 +01005831 start_size_ = heap_->SizeOfObjects();
Leon Clarkef7060e22010-06-03 12:02:55 +01005832
5833 for (int i = 0; i < Scope::kNumberOfScopes; i++) {
5834 scopes_[i] = 0;
5835 }
5836
5837 in_free_list_or_wasted_before_gc_ = CountTotalHolesSize();
5838
Steve Block44f0eee2011-05-26 01:26:41 +01005839 allocated_since_last_gc_ =
5840 heap_->SizeOfObjects() - heap_->alive_after_last_gc_;
Leon Clarkef7060e22010-06-03 12:02:55 +01005841
Steve Block44f0eee2011-05-26 01:26:41 +01005842 if (heap_->last_gc_end_timestamp_ > 0) {
5843 spent_in_mutator_ = Max(start_time_ - heap_->last_gc_end_timestamp_, 0.0);
Leon Clarkef7060e22010-06-03 12:02:55 +01005844 }
Steve Blocka7e24c12009-10-30 11:49:00 +00005845}
5846
5847
5848GCTracer::~GCTracer() {
Steve Blocka7e24c12009-10-30 11:49:00 +00005849 // Printf ONE line iff flag is set.
Leon Clarkef7060e22010-06-03 12:02:55 +01005850 if (!FLAG_trace_gc && !FLAG_print_cumulative_gc_stat) return;
5851
Steve Block44f0eee2011-05-26 01:26:41 +01005852 bool first_gc = (heap_->last_gc_end_timestamp_ == 0);
Leon Clarkef7060e22010-06-03 12:02:55 +01005853
Steve Block44f0eee2011-05-26 01:26:41 +01005854 heap_->alive_after_last_gc_ = heap_->SizeOfObjects();
5855 heap_->last_gc_end_timestamp_ = OS::TimeCurrentMillis();
Leon Clarkef7060e22010-06-03 12:02:55 +01005856
Steve Block44f0eee2011-05-26 01:26:41 +01005857 int time = static_cast<int>(heap_->last_gc_end_timestamp_ - start_time_);
Leon Clarkef7060e22010-06-03 12:02:55 +01005858
5859 // Update cumulative GC statistics if required.
5860 if (FLAG_print_cumulative_gc_stat) {
Steve Block44f0eee2011-05-26 01:26:41 +01005861 heap_->max_gc_pause_ = Max(heap_->max_gc_pause_, time);
5862 heap_->max_alive_after_gc_ = Max(heap_->max_alive_after_gc_,
5863 heap_->alive_after_last_gc_);
Leon Clarkef7060e22010-06-03 12:02:55 +01005864 if (!first_gc) {
Steve Block44f0eee2011-05-26 01:26:41 +01005865 heap_->min_in_mutator_ = Min(heap_->min_in_mutator_,
5866 static_cast<int>(spent_in_mutator_));
Leon Clarkef7060e22010-06-03 12:02:55 +01005867 }
5868 }
5869
5870 if (!FLAG_trace_gc_nvp) {
5871 int external_time = static_cast<int>(scopes_[Scope::EXTERNAL]);
5872
5873 PrintF("%s %.1f -> %.1f MB, ",
5874 CollectorString(),
5875 static_cast<double>(start_size_) / MB,
5876 SizeOfHeapObjects());
5877
5878 if (external_time > 0) PrintF("%d / ", external_time);
5879 PrintF("%d ms.\n", time);
5880 } else {
5881 PrintF("pause=%d ", time);
5882 PrintF("mutator=%d ",
5883 static_cast<int>(spent_in_mutator_));
5884
5885 PrintF("gc=");
5886 switch (collector_) {
5887 case SCAVENGER:
5888 PrintF("s");
5889 break;
5890 case MARK_COMPACTOR:
Steve Block44f0eee2011-05-26 01:26:41 +01005891 PrintF("%s",
5892 heap_->mark_compact_collector_.HasCompacted() ? "mc" : "ms");
Leon Clarkef7060e22010-06-03 12:02:55 +01005893 break;
5894 default:
5895 UNREACHABLE();
5896 }
5897 PrintF(" ");
5898
5899 PrintF("external=%d ", static_cast<int>(scopes_[Scope::EXTERNAL]));
5900 PrintF("mark=%d ", static_cast<int>(scopes_[Scope::MC_MARK]));
5901 PrintF("sweep=%d ", static_cast<int>(scopes_[Scope::MC_SWEEP]));
Iain Merrick75681382010-08-19 15:07:18 +01005902 PrintF("sweepns=%d ", static_cast<int>(scopes_[Scope::MC_SWEEP_NEWSPACE]));
Leon Clarkef7060e22010-06-03 12:02:55 +01005903 PrintF("compact=%d ", static_cast<int>(scopes_[Scope::MC_COMPACT]));
5904
Ben Murdochf87a2032010-10-22 12:50:53 +01005905 PrintF("total_size_before=%" V8_PTR_PREFIX "d ", start_size_);
Steve Block44f0eee2011-05-26 01:26:41 +01005906 PrintF("total_size_after=%" V8_PTR_PREFIX "d ", heap_->SizeOfObjects());
Ben Murdochf87a2032010-10-22 12:50:53 +01005907 PrintF("holes_size_before=%" V8_PTR_PREFIX "d ",
5908 in_free_list_or_wasted_before_gc_);
5909 PrintF("holes_size_after=%" V8_PTR_PREFIX "d ", CountTotalHolesSize());
Leon Clarkef7060e22010-06-03 12:02:55 +01005910
Ben Murdochf87a2032010-10-22 12:50:53 +01005911 PrintF("allocated=%" V8_PTR_PREFIX "d ", allocated_since_last_gc_);
5912 PrintF("promoted=%" V8_PTR_PREFIX "d ", promoted_objects_size_);
Leon Clarkef7060e22010-06-03 12:02:55 +01005913
5914 PrintF("\n");
5915 }
Steve Blocka7e24c12009-10-30 11:49:00 +00005916
Steve Block44f0eee2011-05-26 01:26:41 +01005917 heap_->PrintShortHeapStatistics();
Steve Blocka7e24c12009-10-30 11:49:00 +00005918}
5919
5920
5921const char* GCTracer::CollectorString() {
5922 switch (collector_) {
5923 case SCAVENGER:
5924 return "Scavenge";
5925 case MARK_COMPACTOR:
Steve Block44f0eee2011-05-26 01:26:41 +01005926 return heap_->mark_compact_collector_.HasCompacted() ? "Mark-compact"
5927 : "Mark-sweep";
Steve Blocka7e24c12009-10-30 11:49:00 +00005928 }
5929 return "Unknown GC";
5930}
5931
5932
5933int KeyedLookupCache::Hash(Map* map, String* name) {
5934 // Uses only lower 32 bits if pointers are larger.
5935 uintptr_t addr_hash =
Leon Clarkee46be812010-01-19 14:06:41 +00005936 static_cast<uint32_t>(reinterpret_cast<uintptr_t>(map)) >> kMapHashShift;
Andrei Popescu402d9372010-02-26 13:31:12 +00005937 return static_cast<uint32_t>((addr_hash ^ name->Hash()) & kCapacityMask);
Steve Blocka7e24c12009-10-30 11:49:00 +00005938}
5939
5940
5941int KeyedLookupCache::Lookup(Map* map, String* name) {
5942 int index = Hash(map, name);
5943 Key& key = keys_[index];
5944 if ((key.map == map) && key.name->Equals(name)) {
5945 return field_offsets_[index];
5946 }
Steve Block44f0eee2011-05-26 01:26:41 +01005947 return kNotFound;
Steve Blocka7e24c12009-10-30 11:49:00 +00005948}
5949
5950
5951void KeyedLookupCache::Update(Map* map, String* name, int field_offset) {
5952 String* symbol;
Steve Block44f0eee2011-05-26 01:26:41 +01005953 if (HEAP->LookupSymbolIfExists(name, &symbol)) {
Steve Blocka7e24c12009-10-30 11:49:00 +00005954 int index = Hash(map, symbol);
5955 Key& key = keys_[index];
5956 key.map = map;
5957 key.name = symbol;
5958 field_offsets_[index] = field_offset;
5959 }
5960}
5961
5962
5963void KeyedLookupCache::Clear() {
5964 for (int index = 0; index < kLength; index++) keys_[index].map = NULL;
5965}
5966
5967
Steve Blocka7e24c12009-10-30 11:49:00 +00005968void DescriptorLookupCache::Clear() {
5969 for (int index = 0; index < kLength; index++) keys_[index].array = NULL;
5970}
5971
5972
Steve Blocka7e24c12009-10-30 11:49:00 +00005973#ifdef DEBUG
Ben Murdochf87a2032010-10-22 12:50:53 +01005974void Heap::GarbageCollectionGreedyCheck() {
Steve Blocka7e24c12009-10-30 11:49:00 +00005975 ASSERT(FLAG_gc_greedy);
Steve Block44f0eee2011-05-26 01:26:41 +01005976 if (isolate_->bootstrapper()->IsActive()) return;
Ben Murdochf87a2032010-10-22 12:50:53 +01005977 if (disallow_allocation_failure()) return;
5978 CollectGarbage(NEW_SPACE);
Steve Blocka7e24c12009-10-30 11:49:00 +00005979}
5980#endif
5981
5982
Steve Block44f0eee2011-05-26 01:26:41 +01005983TranscendentalCache::SubCache::SubCache(Type t)
5984 : type_(t),
5985 isolate_(Isolate::Current()) {
Steve Blocka7e24c12009-10-30 11:49:00 +00005986 uint32_t in0 = 0xffffffffu; // Bit-pattern for a NaN that isn't
5987 uint32_t in1 = 0xffffffffu; // generated by the FPU.
5988 for (int i = 0; i < kCacheSize; i++) {
5989 elements_[i].in[0] = in0;
5990 elements_[i].in[1] = in1;
5991 elements_[i].output = NULL;
5992 }
5993}
5994
5995
Steve Blocka7e24c12009-10-30 11:49:00 +00005996void TranscendentalCache::Clear() {
5997 for (int i = 0; i < kNumberOfCaches; i++) {
5998 if (caches_[i] != NULL) {
5999 delete caches_[i];
6000 caches_[i] = NULL;
6001 }
6002 }
6003}
6004
6005
Leon Clarkee46be812010-01-19 14:06:41 +00006006void ExternalStringTable::CleanUp() {
6007 int last = 0;
6008 for (int i = 0; i < new_space_strings_.length(); ++i) {
Steve Block44f0eee2011-05-26 01:26:41 +01006009 if (new_space_strings_[i] == heap_->raw_unchecked_null_value()) continue;
6010 if (heap_->InNewSpace(new_space_strings_[i])) {
Leon Clarkee46be812010-01-19 14:06:41 +00006011 new_space_strings_[last++] = new_space_strings_[i];
6012 } else {
6013 old_space_strings_.Add(new_space_strings_[i]);
6014 }
6015 }
6016 new_space_strings_.Rewind(last);
6017 last = 0;
6018 for (int i = 0; i < old_space_strings_.length(); ++i) {
Steve Block44f0eee2011-05-26 01:26:41 +01006019 if (old_space_strings_[i] == heap_->raw_unchecked_null_value()) continue;
6020 ASSERT(!heap_->InNewSpace(old_space_strings_[i]));
Leon Clarkee46be812010-01-19 14:06:41 +00006021 old_space_strings_[last++] = old_space_strings_[i];
6022 }
6023 old_space_strings_.Rewind(last);
6024 Verify();
6025}
6026
6027
6028void ExternalStringTable::TearDown() {
6029 new_space_strings_.Free();
6030 old_space_strings_.Free();
6031}
6032
6033
Steve Blocka7e24c12009-10-30 11:49:00 +00006034} } // namespace v8::internal